drivers/block/as-iosched.c |   75 +++++++++++++++------------------------------
 1 files changed, 26 insertions(+), 49 deletions(-)

diff -puN drivers/block/as-iosched.c~as-use-kblockd drivers/block/as-iosched.c
--- 25/drivers/block/as-iosched.c~as-use-kblockd	2003-03-16 14:35:31.000000000 -0800
+++ 25-akpm/drivers/block/as-iosched.c	2003-03-16 14:35:31.000000000 -0800
@@ -29,7 +29,7 @@ struct ant_stats {
 	int expired_read_batches;
 	int expired_write_batches;
 	int timeouts;
-	int bottom_halves;
+	int kblockd_calls;
 	int anticipate_hits;
 	int expired_fifo_reads;
 	int expired_fifo_writes;
@@ -145,7 +145,7 @@ struct as_data {
 	int antic_status;
 	unsigned long antic_start;	/* jiffies: when it started */
 	struct timer_list antic_timer;	/* anticipatory scheduling timer */
-	struct tasklet_struct tasklet;	/* Deferred unplugging */
+	struct work_struct antic_work;	/* Deferred unplugging */
 	struct as_io_context *as_io_context;/* Identify the expected process */
 	int aic_finished; /* IO associated with as_io_context finished */
 
@@ -914,79 +914,57 @@ static void as_antic_waitnext(struct as_
 }
 
 /*
- * This is executed in a "deferred" context.  Either in a timer handler or
- * in tasklet. It calls the driver's request_fn so the driver can submit
- * that request. IMPORTANT! This guy will reenter the elevator, so setup
- * all queue global state before calling, and don't rely on any state over
- * calls.
+ * This is executed in a "deferred" process context, by kblockd. It calls the
+ * driver's request_fn so the driver can submit that request.
+ *
+ * IMPORTANT! Thisguy will reenter the elevator, so set up all queue global
+ * state before calling, and don't rely on any state over calls.
  *
  * FIXME! dispatch queue is not a queue at all!
  * Andrew! as_queue_notready does not _try_ to move a request to dispatch
  * list, in fact it tries not to! Unfortunately it sometimes must in order
  * to guarantee elv_next_request will return !NULL after a ready indication.
  */
-static void __as_antic_stop(struct as_data *ad)
-{
-	struct request_queue *q = ad->q;
-
-	if (!as_queue_notready(q))
-		q->request_fn(q);
-}
-
-/*
- * Conveniently, this function is identical whether called via tasklet or via
- * timer.
- */
-static void __as_bottom_half(unsigned long data)
+static void as_work_handler(void *data)
 {
-	struct request_queue *q = (struct request_queue *)data;
-	struct as_data *ad = q->elevator.elevator_data;
+	struct request_queue *q = data;
 	unsigned long flags;
 
 	spin_lock_irqsave(q->queue_lock, flags);
-	__as_antic_stop(ad);
+	if (!as_queue_notready(q))
+		q->request_fn(q);
 	spin_unlock_irqrestore(q->queue_lock, flags);
 }
 
 /*
- * as_antic_timeout is the timer function set by as_antic_waitnext.
+ * This is called directly by the functions in this file to stop anticipation.
+ * We kill the timer and schedule a call to the request_fn asap.
  */
-static void as_antic_timeout(unsigned long data)
+static void as_antic_stop(struct as_data *ad)
 {
-	struct request_queue *q = (struct request_queue *)data;
-	struct as_data *ad = q->elevator.elevator_data;
 	int status = ad->antic_status;
 
-	ant_stats.timeouts++;
 	if (status == ANTIC_WAIT_REQ || status == ANTIC_WAIT_NEXT) {
+		if (status == ANTIC_WAIT_NEXT)
+			del_timer(&ad->antic_timer);
 		ad->antic_status = ANTIC_FINISHED;
-		__as_bottom_half(data);
+		kblockd_schedule_work(&ad->antic_work);
 	}
 }
 
 /*
- * This is the tasklet handler.  It is called from "bottom half" context
- * shortly after the functions in this file called as_antic_stop().
- */
-static void as_tasklet_handler(unsigned long data)
-{
-	ant_stats.bottom_halves++;
-	__as_bottom_half(data);
-}
-
-/*
- * This is called directly by the functions in this file to stop anticipation.
- * We kill the timer and schedule a call to the request_fn asap.
+ * as_antic_timeout is the timer function set by as_antic_waitnext.
  */
-static void as_antic_stop(struct as_data *ad)
+static void as_antic_timeout(unsigned long data)
 {
+	struct request_queue *q = (struct request_queue *)data;
+	struct as_data *ad = q->elevator.elevator_data;
 	int status = ad->antic_status;
 
+	ant_stats.timeouts++;
 	if (status == ANTIC_WAIT_REQ || status == ANTIC_WAIT_NEXT) {
-		if (status == ANTIC_WAIT_NEXT)
-			del_timer(&ad->antic_timer);
 		ad->antic_status = ANTIC_FINISHED;
-		tasklet_schedule(&ad->tasklet);
+		kblockd_schedule_work(&ad->antic_work);
 	}
 }
 
@@ -994,8 +972,7 @@ static void as_antic_stop(struct as_data
  * as_close_req decides if one request is considered "close" to the
  * previous one issued.
  */
-static int
-as_close_req(struct as_data *ad, struct as_rq *arq)
+static int as_close_req(struct as_data *ad, struct as_rq *arq)
 {
 	unsigned long delay;	/* milliseconds */
 	sector_t last = ad->last_sector[ad->batch_data_dir];
@@ -1475,7 +1452,7 @@ static void as_exit(request_queue_t *q, 
 	int i;
 
 	del_timer_sync(&ad->antic_timer);
-	tasklet_kill(&ad->tasklet);
+	kblockd_flush();
 
 	BUG_ON(!list_empty(&ad->fifo_list[READ]));
 	BUG_ON(!list_empty(&ad->fifo_list[WRITE]));
@@ -1531,7 +1508,7 @@ static int as_init(request_queue_t *q, e
 	ad->antic_timer.function = as_antic_timeout;
 	ad->antic_timer.data = (unsigned long)q;
 	init_timer(&ad->antic_timer);
-	tasklet_init(&ad->tasklet, as_tasklet_handler, (unsigned long)q);
+	INIT_WORK(&ad->antic_work, as_work_handler, q);
 
 	for (i = 0; i < AS_HASH_ENTRIES; i++)
 		INIT_LIST_HEAD(&ad->hash[i]);

_