# To ensure that I/O can always make progress, even when there is no
# memory, we provide static buffers which are to be used when dynamic
# ones can't be allocated.  These buffers are protected by flags which
# are set when they are currently in use.  The use of these flags is 
# protected by the queue lock, which is held for the duration of the 
# do_ubd_request call.
#
# There is an allocation failure emulation
# mechanism here - setting fail_start and fail_end will cause
# allocations in that range (fail_start <= allocations < fail_end) to
# fail, invoking the emergency mechanism.
# When this is happening, I/O requests proceed one at a time,
# essentially synchronously, until allocations start succeeding again.
#
# This currently doesn't handle the bitmap array, since that can be of
# any length, so we can't have a static version of it at this point.
Index: linux-2.6.17/arch/um/drivers/ubd_kern.c
===================================================================
--- linux-2.6.17.orig/arch/um/drivers/ubd_kern.c	2007-11-19 11:22:48.000000000 -0500
+++ linux-2.6.17/arch/um/drivers/ubd_kern.c	2007-11-19 11:24:01.000000000 -0500
@@ -513,6 +513,81 @@ struct ubd_aio {
 	void *bitmap_buf;
 };
 
+/*
+ * Technically, this should be locked, since it can be accessed
+ * concurrently.  However, since it's used only for testing and it
+ * doesn't matter if the value is off a bit, it'll stay unlocked.
+ */
+static int allocations;
+/*
+ * These are set in gdb to test the behavior of the driver under
+ * memory allocation failures.
+ */
+static int fail_start, fail_end;
+
+static struct bitmap_io emergency_bitmap_io;
+static atomic_t bitmap_io_taken = ATOMIC_INIT(0);
+
+static struct bitmap_io *alloc_bitmap_io(void)
+{
+	struct bitmap_io *ret;
+
+	allocations++;
+	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
+
+	if((allocations >= fail_start) && (allocations < fail_end)){
+		kfree(ret);
+		ret = NULL;
+	}
+
+	if(ret != NULL)
+		return ret;
+
+	if(atomic_add_return(1, &bitmap_io_taken) > 1)
+		return ERR_PTR(-EAGAIN);
+
+	return(&emergency_bitmap_io);
+}
+
+static void free_bitmap_io(struct bitmap_io *io)
+{
+	if(io == &emergency_bitmap_io)
+		atomic_set(&bitmap_io_taken, 0);
+	else kfree(io);
+}
+
+static struct ubd_aio emergency_aio;
+static atomic_t aio_taken = ATOMIC_INIT(0);
+
+static struct ubd_aio *alloc_ubd_aio(void)
+{
+	struct ubd_aio *ret;
+
+	allocations++;
+	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
+
+	if((allocations >= fail_start) && (allocations < fail_end)){
+		kfree(ret);
+		ret = NULL;
+	}
+
+	if(ret != NULL)
+		return ret;
+
+	if(atomic_add_return(1, &aio_taken) > 1)
+		return ERR_PTR(-EAGAIN);
+
+	return(&emergency_aio);
+}
+
+static void free_ubd_aio(struct ubd_aio *aio)
+{
+	if(aio == &emergency_aio)
+		atomic_set(&aio_taken, 0);
+	else kfree(aio);
+}
+
+static DEFINE_SPINLOCK(restart_lock);
 static LIST_HEAD(restart);
 
 /* Called without dev->lock held, and only in interrupt context. */
@@ -522,6 +597,7 @@ static void ubd_intr(struct aio_context 
 	struct request *req;
 	struct ubd_aio *aio;
 	struct ubd *ubd;
+	LIST_HEAD(head);
 	struct list_head *list, *next_ele;
 	unsigned long flags;
 	int n;
@@ -538,7 +614,7 @@ static void ubd_intr(struct aio_context 
 			   (atomic_dec_and_test(&aio->bitmap->count))){
 				aio->aio = aio->bitmap->aio;
 				aio->len = 0;
-				kfree(aio->bitmap);
+				free_bitmap_io(aio->bitmap);
 				aio->bitmap = NULL;
 				submit_aio(&aio->aio);
 			}
@@ -551,22 +627,26 @@ static void ubd_intr(struct aio_context 
 
 				if(aio->bitmap_buf != NULL)
 					kfree(aio->bitmap_buf);
-				kfree(aio);
+				free_ubd_aio(aio);
 			}
 		}
 		else if(n < 0){
 			ubd_finish(req, n);
 			if(aio->bitmap != NULL)
-				kfree(aio->bitmap);
+				free_bitmap_io(aio->bitmap);
 			if(aio->bitmap_buf != NULL)
 				kfree(aio->bitmap_buf);
-			kfree(aio);
+			free_ubd_aio(aio);
 		}
 
 		context = next;
 	}
 
-	list_for_each_safe(list, next_ele, &restart){
+	spin_lock_irqsave(&restart_lock, flags);
+	list_splice_init(&restart, &head);
+	spin_unlock_irqrestore(&restart_lock, flags);
+
+	list_for_each_safe(list, next_ele, &head){
 		ubd = container_of(list, struct ubd, restart);
 		list_del_init(&ubd->restart);
 		spin_lock_irqsave(&ubd->lock, flags);
@@ -1038,6 +1118,7 @@ static void do_ubd_request(struct reques
 {
 	struct io_thread_req *io_req;
 	struct request *req;
+	struct ubd *dev = q->queuedata;
 	int last_sectors;
 
 	while(1){
@@ -1072,17 +1153,19 @@ static void do_ubd_request(struct reques
 
 			last_sectors = sg->length >> 9;
 
-			if(do_io(io_req, req, dev->cow.bitmap) == -EAGAIN){
-				if(list_empty(&dev->restart))
-					list_add(&dev->restart, &restart);
-				return;
-			}
+			if(do_io(io_req, req, dev->cow.bitmap) == -EAGAIN)
+				goto out_again;
 
 			dev->start_sg++;
 		}
 		dev->end_sg = 0;
 		dev->request = NULL;
 	}
+	return;
+
+out_again:
+	if(list_empty(&dev->restart))
+		list_add(&dev->restart, &restart);
 }
 
 static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -1325,6 +1408,11 @@ static int do_io(struct io_thread_req *r
 	if(req->bitmap_start != -1){
 		/* Round up to the nearest word */
 		int round = sizeof(unsigned long);
+
+		bitmap_io = alloc_bitmap_io();
+		if(IS_ERR(bitmap_io))
+			return PTR_ERR(bitmap_io);
+
 		len = (req->bitmap_end - req->bitmap_start +
 		       round * 8 - 1) / (round * 8);
 		len *= round;
@@ -1332,18 +1420,11 @@ static int do_io(struct io_thread_req *r
 		off = req->bitmap_start / (8 * round);
 		off *= round;
 
-		bitmap_io = kmalloc(sizeof(*bitmap_io), GFP_KERNEL);
-		if(bitmap_io == NULL){
-			printk("Failed to kmalloc bitmap IO\n");
-			req->error = 1;
-			return -ENOMEM;
-		}
-
 		bitmap_buf = kmalloc(len, GFP_KERNEL);
 		if(bitmap_buf == NULL){
 			printk("do_io : kmalloc of bitmap chunk (len %ld)"
 			       "failed\n", len);
-			kfree(bitmap_io);
+			free_bitmap_io(bitmap_io);
 			req->error = 1;
 			return -ENOMEM;
 		}
@@ -1375,11 +1456,10 @@ static int do_io(struct io_thread_req *r
 			start * req->sectorsize;
 		len = (end - start) * req->sectorsize;
 		buf = &req->buffer[start * req->sectorsize];
-		aio = kmalloc(sizeof(*aio), GFP_KERNEL);
-		if(aio == NULL){
-			req->error = 1;
-			return -ENOMEM;
-		}
+
+		aio = alloc_ubd_aio();
+		if(IS_ERR(aio))
+			return PTR_ERR(aio);
 
 		*aio = ((struct ubd_aio)
 			{ .aio		= INIT_AIO(req->op, req->fds[bit], buf,