v2.5.0.11 -> v2.5.1

- Al Viro: floppy_eject cleanup, mount cleanups
- Jens Axboe: bio updates
- Ingo Molnar: mempool fixes
- GOTO Masanori: Fix O_DIRECT error handling
diff --git a/Makefile b/Makefile
index a62e69d..a5aafb7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 5
 SUBLEVEL = 1
-EXTRAVERSION =-pre11
+EXTRAVERSION =
 
 KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
 
diff --git a/arch/m68k/kernel/setup.c b/arch/m68k/kernel/setup.c
index 86007ef..57a954d 100644
--- a/arch/m68k/kernel/setup.c
+++ b/arch/m68k/kernel/setup.c
@@ -93,7 +93,6 @@
 long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */
 #if defined(CONFIG_AMIGA_FLOPPY) || defined(CONFIG_ATARI_FLOPPY) 
 void (*mach_floppy_setup) (char *, int *) __initdata = NULL;
-void (*mach_floppy_eject) (void) = NULL;
 #endif
 #ifdef CONFIG_HEARTBEAT
 void (*mach_heartbeat) (int) = NULL;
@@ -514,11 +513,6 @@
 		mach_floppy_setup (str, ints);
 }
 
-void floppy_eject(void)
-{
-	if (mach_floppy_eject)
-		mach_floppy_eject();
-}
 #endif
 
 /* for "kbd-reset" cmdline param */
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 2369090..4989e67 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -36,7 +36,6 @@
 #include <asm/q40_master.h>
 #include <asm/keyboard.h>
 
-extern void floppy_eject(void);
 extern void floppy_setup(char *str, int *ints);
 
 extern int q40kbd_translate(unsigned char scancode, unsigned char *keycode,
diff --git a/arch/ppc/kernel/apus_setup.c b/arch/ppc/kernel/apus_setup.c
index bdbc452..c3fe77c 100644
--- a/arch/ppc/kernel/apus_setup.c
+++ b/arch/ppc/kernel/apus_setup.c
@@ -106,7 +106,6 @@
 long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */
 #if defined(CONFIG_AMIGA_FLOPPY)
 void (*mach_floppy_setup) (char *, int *) __initdata = NULL;
-void (*mach_floppy_eject) (void) = NULL;
 #endif
 #ifdef CONFIG_HEARTBEAT
 void (*mach_heartbeat) (int) = NULL;
@@ -404,12 +403,6 @@
 	if (mach_floppy_setup)
 		mach_floppy_setup (str, ints);
 }
-
-void floppy_eject(void)
-{
-	if (mach_floppy_eject)
-		mach_floppy_eject();
-}
 #endif
 
 /*********************************************************** MEMORY */
diff --git a/arch/sparc64/kernel/iommu_common.c b/arch/sparc64/kernel/iommu_common.c
index e9d4bea..134be5c 100644
--- a/arch/sparc64/kernel/iommu_common.c
+++ b/arch/sparc64/kernel/iommu_common.c
@@ -1,4 +1,4 @@
-/* $Id: iommu_common.c,v 1.6 2001/10/09 02:24:33 davem Exp $
+/* $Id: iommu_common.c,v 1.8 2001/12/11 11:13:06 davem Exp $
  * iommu_common.c: UltraSparc SBUS/PCI common iommu code.
  *
  * Copyright (C) 1999 David S. Miller (davem@redhat.com)
@@ -66,7 +66,9 @@
 
 	daddr = dma_sg->dma_address;
 	sglen = sg->length;
-	sgaddr = (unsigned long) sg->address;
+	sgaddr = (unsigned long) (sg->address ?
+				  sg->address :
+				  page_address(sg->page) + sg->offset);
 	while (dlen > 0) {
 		unsigned long paddr;
 
@@ -116,7 +118,9 @@
 		sg++;
 		if (--nents <= 0)
 			break;
-		sgaddr = (unsigned long) sg->address;
+		sgaddr = (unsigned long) (sg->address ?
+					  sg->address :
+					  page_address(sg->page) + sg->offset);
 		sglen = sg->length;
 	}
 	if (dlen < 0) {
@@ -197,14 +201,21 @@
 	unsigned long prev;
 	u32 dent_addr, dent_len;
 
-	prev  = (unsigned long) sg->address;
+	prev  = (unsigned long) (sg->address ?
+				 sg->address :
+				 page_address(sg->page) + sg->offset);
 	prev += (unsigned long) (dent_len = sg->length);
-	dent_addr = (u32) ((unsigned long)sg->address & (IO_PAGE_SIZE - 1UL));
+	dent_addr = (u32) ((unsigned long)(sg->address ?
+					   sg->address :
+					   page_address(sg->page) + sg->offset)
+			   & (IO_PAGE_SIZE - 1UL));
 	while (--nents) {
 		unsigned long addr;
 
 		sg++;
-		addr = (unsigned long) sg->address;
+		addr = (unsigned long) (sg->address ?
+					sg->address :
+					page_address(sg->page) + sg->offset);
 		if (! VCONTIG(prev, addr)) {
 			dma_sg->dma_address = dent_addr;
 			dma_sg->dma_length = dent_len;
diff --git a/arch/sparc64/kernel/iommu_common.h b/arch/sparc64/kernel/iommu_common.h
index 4d0795a..0397440 100644
--- a/arch/sparc64/kernel/iommu_common.h
+++ b/arch/sparc64/kernel/iommu_common.h
@@ -1,4 +1,4 @@
-/* $Id: iommu_common.h,v 1.4 2001/10/09 02:24:33 davem Exp $
+/* $Id: iommu_common.h,v 1.5 2001/12/11 09:41:01 davem Exp $
  * iommu_common.h: UltraSparc SBUS/PCI common iommu declarations.
  *
  * Copyright (C) 1999 David S. Miller (davem@redhat.com)
@@ -6,8 +6,9 @@
 
 #include <linux/kernel.h>
 #include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
 
-#include <asm/page.h>
 #include <asm/iommu.h>
 #include <asm/scatterlist.h>
 
diff --git a/drivers/acorn/block/fd1772.c b/drivers/acorn/block/fd1772.c
index d9cf8b5..1667797 100644
--- a/drivers/acorn/block/fd1772.c
+++ b/drivers/acorn/block/fd1772.c
@@ -1620,7 +1620,3 @@
 
 	return 0;
 }
-
-void floppy_eject(void)
-{
-}
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 55d3fbe..0f0060d 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -1948,8 +1948,11 @@
   RequestQueue = BLK_DEFAULT_QUEUE(MajorNumber);
   blk_init_queue(RequestQueue, DAC960_RequestFunction);
   RequestQueue->queuedata = Controller;
-  RequestQueue->max_segments = Controller->DriverScatterGatherLimit;
-  RequestQueue->max_sectors = Controller->MaxBlocksPerCommand;
+  blk_queue_max_hw_segments(RequestQueue,
+			    Controller->DriverScatterGatherLimit);
+  blk_queue_max_phys_segments(RequestQueue, ~0);
+  blk_queue_max_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
+
   Controller->RequestQueue = RequestQueue;
   /*
     Initialize the Disk Partitions array, Partition Sizes array, Block Sizes
@@ -2889,7 +2892,7 @@
   Command->LogicalDriveNumber = DAC960_LogicalDriveNumber(Request->rq_dev);
   Command->BlockNumber = Request->sector;
   Command->BlockCount = Request->nr_sectors;
-  Command->SegmentCount = Request->nr_segments;
+  Command->SegmentCount = Request->nr_phys_segments;
   Command->BufferHeader = Request->bio;
   Command->RequestBuffer = Request->buffer;
   blkdev_dequeue_request(Request);
diff --git a/drivers/block/acsi.c b/drivers/block/acsi.c
index 916a192..28e5ae8 100644
--- a/drivers/block/acsi.c
+++ b/drivers/block/acsi.c
@@ -253,6 +253,8 @@
 static int				CurrentNSect;
 static char				*CurrentBuffer;
 
+static spinlock_t			acsi_lock = SPIN_LOCK_UNLOCKED;
+
 
 #define SET_TIMER()	mod_timer(&acsi_timer, jiffies + ACSI_TIMEOUT)
 #define CLEAR_TIMER()	del_timer(&acsi_timer)
@@ -1784,7 +1786,7 @@
 	phys_acsi_buffer = virt_to_phys( acsi_buffer );
 	STramMask = ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 : 0xff000000;
 	
-	blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+	blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &acsi_lock);
 	read_ahead[MAJOR_NR] = 8;		/* 8 sector (4kB) read-ahead */
 	add_gendisk(&acsi_gendisk);
 
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 45d30ce..c2e353b 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -174,6 +174,8 @@
 static int writefromint;
 static char *raw_buf;
 
+static spinlock_t amiflop_lock = SPIN_LOCK_UNLOCKED;
+
 #define RAW_BUF_SIZE 30000  /* size of raw disk data */
 
 /*
@@ -1855,7 +1857,7 @@
 	post_write_timer.data = 0;
 	post_write_timer.function = post_write;
   
-	blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+	blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &amiflop_lock);
 	blksize_size[MAJOR_NR] = floppy_blocksizes;
 	blk_size[MAJOR_NR] = floppy_sizes;
 
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index eea26ff..1386f3e 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -156,6 +156,8 @@
 
 static int DriveType = TYPE_HD;
 
+static spinlock_t ataflop_lock = SPIN_LOCK_UNLOCKED;
+
 /* Array for translating minors into disk formats */
 static struct {
 	int 	 index;
@@ -2013,7 +2015,7 @@
 
 	blk_size[MAJOR_NR] = floppy_sizes;
 	blksize_size[MAJOR_NR] = floppy_blocksizes;
-	blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+	blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &ataflop_lock);
 
 	printk(KERN_INFO "Atari floppy driver: max. %cD, %strack buffering\n",
 	       DriveType == 0 ? 'D' : DriveType == 1 ? 'H' : 'E',
diff --git a/drivers/block/block_ioctl.c b/drivers/block/block_ioctl.c
index a894888..75d71ca 100644
--- a/drivers/block/block_ioctl.c
+++ b/drivers/block/block_ioctl.c
@@ -76,5 +76,8 @@
 			err = -ENOTTY;
 	}
 
+#if 0
+	blk_put_queue(q);
+#endif
 	return err;
 }
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 74aca53..b038dde 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1219,7 +1219,7 @@
 		goto startio;
 
 	creq = elv_next_request(q);
-	if (creq->nr_segments > MAXSGENTRIES)
+	if (creq->nr_phys_segments > MAXSGENTRIES)
                 BUG();
 
         if (h->ctlr != MAJOR(creq->rq_dev)-MAJOR_NR )
@@ -1866,9 +1866,16 @@
 
 	q = BLK_DEFAULT_QUEUE(MAJOR_NR + i);
         q->queuedata = hba[i];
+	spin_lock_init(&hba[i]->lock);
         blk_init_queue(q, do_cciss_request, &hba[i]->lock);
 	blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
-	blk_queue_max_segments(q, MAXSGENTRIES);
+
+	/* This is a hardware imposed limit. */
+	blk_queue_max_hw_segments(q, MAXSGENTRIES);
+
+	/* This is a limit in the driver and could be eliminated. */
+	blk_queue_max_phys_segments(q, MAXSGENTRIES);
+
 	blk_queue_max_sectors(q, 512);
 
 	/* fill in the other Kernel structs */
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 5f85cb0..5f2298b 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -467,9 +467,16 @@
 
 		q = BLK_DEFAULT_QUEUE(MAJOR_NR + i);
 		q->queuedata = hba[i];
+		spin_lock_init(&hba[i]->lock);
 		blk_init_queue(q, do_ida_request, &hba[i]->lock);
 		blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
-		blk_queue_max_segments(q, SG_MAX);
+
+		/* This is a hardware imposed limit. */
+		blk_queue_max_hw_segments(q, SG_MAX);
+
+		/* This is a driver limit and could be eliminated. */
+		blk_queue_max_phys_segments(q, SG_MAX);
+
 		blksize_size[MAJOR_NR+i] = ida_blocksizes + (i*256);
 		read_ahead[MAJOR_NR+i] = READ_AHEAD;
 
@@ -864,7 +871,7 @@
 		goto startio;
 
 	creq = elv_next_request(q);
-	if (creq->nr_segments > SG_MAX)
+	if (creq->nr_phys_segments > SG_MAX)
 		BUG();
 
 	if (h->ctlr != MAJOR(creq->rq_dev)-MAJOR_NR || h->ctlr > nr_ctlr)
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 2417023..2fcdcc5 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -204,7 +204,7 @@
  * record each buffers capabilities
  */
 
-static spinlock_t floppy_lock;
+static spinlock_t floppy_lock = SPIN_LOCK_UNLOCKED;
 
 static unsigned short virtual_dma_port=0x3f0;
 void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs);
@@ -4479,21 +4479,4 @@
 
 __setup ("floppy=", floppy_setup);
 module_init(floppy_init)
-
-/* eject the boot floppy (if we need the drive for a different root floppy) */
-/* This should only be called at boot time when we're sure that there's no
- * resource contention. */
-void floppy_eject(void)
-{
-	int dummy;
-	if (have_no_fdc)
-		return;
-	if(floppy_grab_irq_and_dma()==0)
-	{
-		lock_fdc(MAXTIMEOUT,0);
-		dummy=fd_eject(0);
-		process_fd_request();
-		floppy_release_irq_and_dma();
-	}
-}
 #endif
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 9849061f..e5c93889 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -144,7 +144,8 @@
 	/*
 	 * set defaults
 	 */
-	q->max_segments = MAX_SEGMENTS;
+	q->max_phys_segments = MAX_PHYS_SEGMENTS;
+	q->max_hw_segments = MAX_HW_SEGMENTS;
 	q->make_request_fn = mfn;
 	blk_queue_max_sectors(q, MAX_SECTORS);
 	blk_queue_hardsect_size(q, 512);
@@ -171,6 +172,18 @@
 	static request_queue_t *last_q;
 
 	/*
+	 * set appropriate bounce gfp mask -- unfortunately we don't have a
+	 * full 4GB zone, so we have to resort to low memory for any bounces.
+	 * ISA has its own < 16MB zone.
+	 */
+	if (dma_addr == BLK_BOUNCE_ISA) {
+		init_emergency_isa_pool();
+		q->bounce_gfp = GFP_NOIO | GFP_DMA;
+		printk("isa pfn %lu, max low %lu, max %lu\n", bounce_pfn, blk_max_low_pfn, blk_max_pfn);
+	} else
+		q->bounce_gfp = GFP_NOHIGHIO;
+
+	/*
 	 * keep this for debugging for now...
 	 */
 	if (dma_addr != BLK_BOUNCE_HIGH && q != last_q) {
@@ -178,7 +191,7 @@
 		if (dma_addr == BLK_BOUNCE_ANY)
 			printk("no I/O memory limit\n");
 		else
-			printk("I/O limit %luMb (mask 0x%Lx)\n", mb, (u64) dma_addr);
+			printk("I/O limit %luMb (mask 0x%Lx)\n", mb, (long long) dma_addr);
 	}
 
 	q->bounce_pfn = bounce_pfn;
@@ -201,17 +214,34 @@
 }
 
 /**
- * blk_queue_max_segments - set max segments for a request for this queue
+ * blk_queue_max_phys_segments - set max phys segments for a request for this queue
  * @q:  the request queue for the device
  * @max_segments:  max number of segments
  *
  * Description:
  *    Enables a low level driver to set an upper limit on the number of
- *    data segments in a request
+ *    physical data segments in a request.  This would be the largest sized
+ *    scatter list the driver could handle.
  **/
-void blk_queue_max_segments(request_queue_t *q, unsigned short max_segments)
+void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments)
 {
-	q->max_segments = max_segments;
+	q->max_phys_segments = max_segments;
+}
+
+/**
+ * blk_queue_max_hw_segments - set max hw segments for a request for this queue
+ * @q:  the request queue for the device
+ * @max_segments:  max number of segments
+ *
+ * Description:
+ *    Enables a low level driver to set an upper limit on the number of
+ *    hw data segments in a request.  This would be the largest number of
+ *    address/length pairs the host adapter can actually give as once
+ *    to the device.
+ **/
+void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments)
+{
+	q->max_hw_segments = max_segments;
 }
 
 /**
@@ -325,44 +355,78 @@
 void blk_recount_segments(request_queue_t *q, struct bio *bio)
 {
 	struct bio_vec *bv, *bvprv = NULL;
-	int i, nr_segs, seg_size, cluster;
+	int i, nr_phys_segs, nr_hw_segs, seg_size, cluster;
 
 	if (unlikely(!bio->bi_io_vec))
 		return;
 
 	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
-	seg_size = nr_segs = 0;
+	seg_size = nr_phys_segs = nr_hw_segs = 0;
 	bio_for_each_segment(bv, bio, i) {
 		if (bvprv && cluster) {
-			if (seg_size + bv->bv_len > q->max_segment_size)
+			int phys, seg;
+
+			if (seg_size + bv->bv_len > q->max_segment_size) {
+				nr_phys_segs++;
 				goto new_segment;
-			if (!BIOVEC_MERGEABLE(bvprv, bv))
+			}
+
+			phys = BIOVEC_PHYS_MERGEABLE(bvprv, bv);
+			seg = BIOVEC_SEG_BOUNDARY(q, bvprv, bv);
+			if (!phys || !seg)
+				nr_phys_segs++;
+			if (!seg)
 				goto new_segment;
-			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+
+			if (!BIOVEC_VIRT_MERGEABLE(bvprv, bv))
 				goto new_segment;
 
 			seg_size += bv->bv_len;
 			bvprv = bv;
 			continue;
+		} else {
+			nr_phys_segs++;
 		}
 new_segment:
-		nr_segs++;
+		nr_hw_segs++;
 		bvprv = bv;
-		seg_size = 0;
+		seg_size = bv->bv_len;
 	}
 
-	bio->bi_hw_seg = nr_segs;
+	bio->bi_phys_segments = nr_phys_segs;
+	bio->bi_hw_segments = nr_hw_segs;
 	bio->bi_flags |= (1 << BIO_SEG_VALID);
 }
 
 
-inline int blk_contig_segment(request_queue_t *q, struct bio *bio,
-			    struct bio *nxt)
+inline int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
+				   struct bio *nxt)
 {
 	if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
 		return 0;
 
-	if (!BIO_CONTIG(bio, nxt))
+	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+		return 0;
+	if (bio->bi_size + nxt->bi_size > q->max_segment_size)
+		return 0;
+
+	/*
+	 * bio and nxt are contigous in memory, check if the queue allows
+	 * these two to be merged into one
+	 */
+	if (BIO_SEG_BOUNDARY(q, bio, nxt))
+		return 1;
+
+	return 0;
+}
+
+inline int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
+				 struct bio *nxt)
+{
+	if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
+		return 0;
+
+	if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
 		return 0;
 	if (bio->bi_size + nxt->bi_size > q->max_segment_size)
 		return 0;
@@ -379,7 +443,7 @@
 
 /*
  * map a request to scatterlist, return number of sg entries setup. Caller
- * must make sure sg can hold rq->nr_segments entries
+ * must make sure sg can hold rq->nr_phys_segments entries
  */
 int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg)
 {
@@ -405,7 +469,7 @@
 				if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
 					goto new_segment;
 
-				if (!BIOVEC_MERGEABLE(bvprv, bvec))
+				if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
 					goto new_segment;
 				if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
 					goto new_segment;
@@ -413,11 +477,6 @@
 				sg[nsegs - 1].length += nbytes;
 			} else {
 new_segment:
-				if (nsegs >= q->max_segments) {
-					printk("map: %d >= %d, i %d, segs %d, size %ld\n", nsegs, q->max_segments, i, rq->nr_segments, rq->nr_sectors);
-					BUG();
-				}
-
 				sg[nsegs].address = NULL;
 				sg[nsegs].page = bvec->bv_page;
 				sg[nsegs].length = nbytes;
@@ -436,18 +495,44 @@
  * the standard queue merge functions, can be overridden with device
  * specific ones if so desired
  */
-static inline int ll_new_segment(request_queue_t *q, struct request *req,
-				 struct bio *bio)
-{
-	int nr_segs = bio_hw_segments(q, bio);
 
-	if (req->nr_segments + nr_segs <= q->max_segments) {
-		req->nr_segments += nr_segs;
-		return 1;
+static inline int ll_new_mergeable(request_queue_t *q,
+				   struct request *req,
+				   struct bio *bio)
+{
+	int nr_phys_segs = bio_phys_segments(q, bio);
+
+	if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
+		req->flags |= REQ_NOMERGE;
+		return 0;
 	}
 
-	req->flags |= REQ_NOMERGE;
-	return 0;
+	/*
+	 * A hw segment is just getting larger, bump just the phys
+	 * counter.
+	 */
+	req->nr_phys_segments += nr_phys_segs;
+	return 1;
+}
+
+static inline int ll_new_hw_segment(request_queue_t *q,
+				    struct request *req,
+				    struct bio *bio)
+{
+	int nr_hw_segs = bio_hw_segments(q, bio);
+
+	if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments) {
+		req->flags |= REQ_NOMERGE;
+		return 0;
+	}
+
+	/*
+	 * This will form the start of a new hw segment.  Bump both
+	 * counters.
+	 */
+	req->nr_hw_segments += nr_hw_segs;
+	req->nr_phys_segments += bio_phys_segments(q, bio);
+	return 1;
 }
 
 static int ll_back_merge_fn(request_queue_t *q, struct request *req, 
@@ -458,7 +543,11 @@
 		return 0;
 	}
 
-	return ll_new_segment(q, req, bio);
+	if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail),
+				  __BVEC_START(bio)))
+		return ll_new_mergeable(q, req, bio);
+
+	return ll_new_hw_segment(q, req, bio);
 }
 
 static int ll_front_merge_fn(request_queue_t *q, struct request *req, 
@@ -469,21 +558,49 @@
 		return 0;
 	}
 
-	return ll_new_segment(q, req, bio);
+	if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio),
+				  __BVEC_START(req->bio)))
+		return ll_new_mergeable(q, req, bio);
+
+	return ll_new_hw_segment(q, req, bio);
 }
 
 static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
 				struct request *next)
 {
-	int total_segments = req->nr_segments + next->nr_segments;
+	int total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
+	int total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
 
-	if (blk_contig_segment(q, req->biotail, next->bio))
-		total_segments--;
-    
-	if (total_segments > q->max_segments)
+	/*
+	 * First check if the either of the requests are re-queued
+	 * requests.  Can't merge them if they are.
+	 */
+	if (req->special || next->special)
 		return 0;
 
-	req->nr_segments = total_segments;
+	/*
+	 * Will it become to large?
+	 */
+	if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
+		return 0;
+
+	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
+	if (blk_phys_contig_segment(q, req->biotail, next->bio))
+		total_phys_segments--;
+
+	if (total_phys_segments > q->max_phys_segments)
+		return 0;
+
+	total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
+	if (blk_hw_contig_segment(q, req->biotail, next->bio))
+		total_hw_segments--;
+    
+	if (total_hw_segments > q->max_hw_segments)
+		return 0;
+
+	/* Merge is OK... */
+	req->nr_phys_segments = total_phys_segments;
+	req->nr_hw_segments = total_hw_segments;
 	return 1;
 }
 
@@ -1107,7 +1224,7 @@
 	req->hard_sector = req->sector = sector;
 	req->hard_nr_sectors = req->nr_sectors = nr_sectors;
 	req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors;
-	req->nr_segments = bio->bi_vcnt;
+	req->nr_phys_segments = bio_phys_segments(q, bio);
 	req->nr_hw_segments = bio_hw_segments(q, bio);
 	req->buffer = bio_data(bio);	/* see ->buffer comment above */
 	req->waiting = NULL;
@@ -1201,7 +1318,7 @@
 				printk(KERN_INFO "%s: rw=%ld, want=%ld, limit=%Lu\n",
 				       kdevname(bio->bi_dev), bio->bi_rw,
 				       (sector + nr_sectors)>>1,
-				       (u64) blk_size[major][minor]);
+				       (long long) blk_size[major][minor]);
 			}
 			set_bit(BIO_EOF, &bio->bi_flags);
 			goto end_io;
@@ -1221,7 +1338,7 @@
 		if (!q) {
 			printk(KERN_ERR
 			       "generic_make_request: Trying to access nonexistent block-device %s (%Lu)\n",
-			       kdevname(bio->bi_dev), (u64) bio->bi_sector);
+			       kdevname(bio->bi_dev), (long long) bio->bi_sector);
 end_io:
 			bio->bi_end_io(bio, nr_sectors);
 			break;
@@ -1433,7 +1550,27 @@
 extern int stram_device_init (void);
 #endif
 
-inline void blk_recalc_request(struct request *rq, int nsect)
+inline void blk_recalc_rq_segments(struct request *rq)
+{
+	struct bio *bio;
+	int nr_phys_segs, nr_hw_segs;
+
+	rq->buffer = bio_data(rq->bio);
+
+	nr_phys_segs = nr_hw_segs = 0;
+	rq_for_each_bio(bio, rq) {
+		/* Force bio hw/phys segs to be recalculated. */
+		bio->bi_flags &= ~(1 << BIO_SEG_VALID);
+
+		nr_phys_segs += bio_phys_segments(rq->q, bio);
+		nr_hw_segs += bio_hw_segments(rq->q, bio);
+	}
+
+	rq->nr_phys_segments = nr_phys_segs;
+	rq->nr_hw_segments = nr_hw_segs;
+}
+
+inline void blk_recalc_rq_sectors(struct request *rq, int nsect)
 {
 	rq->hard_sector += nsect;
 	rq->hard_nr_sectors -= nsect;
@@ -1451,8 +1588,6 @@
 		printk("blk: request botched\n");
 		rq->nr_sectors = rq->current_nr_sectors;
 	}
-
-	rq->buffer = bio_data(rq->bio);
 }
 
 /**
@@ -1495,7 +1630,8 @@
 			bio->bi_size -= residual;
 			bio_iovec(bio)->bv_offset += residual;
 			bio_iovec(bio)->bv_len -= residual;
-			blk_recalc_request(req, nr_sectors);
+			blk_recalc_rq_sectors(req, nr_sectors);
+			blk_recalc_rq_segments(req);
 			return 1;
 		}
 
@@ -1518,13 +1654,15 @@
 		}
 
 		if ((bio = req->bio)) {
-			blk_recalc_request(req, nsect);
+			blk_recalc_rq_sectors(req, nsect);
 
 			/*
 			 * end more in this run, or just return 'not-done'
 			 */
-			if (unlikely(nr_sectors <= 0))
+			if (unlikely(nr_sectors <= 0)) {
+				blk_recalc_rq_segments(req);
 				return 1;
+			}
 		}
 	}
 
@@ -1605,7 +1743,8 @@
 EXPORT_SYMBOL(blk_attempt_remerge);
 EXPORT_SYMBOL(blk_max_low_pfn);
 EXPORT_SYMBOL(blk_queue_max_sectors);
-EXPORT_SYMBOL(blk_queue_max_segments);
+EXPORT_SYMBOL(blk_queue_max_phys_segments);
+EXPORT_SYMBOL(blk_queue_max_hw_segments);
 EXPORT_SYMBOL(blk_queue_max_segment_size);
 EXPORT_SYMBOL(blk_queue_hardsect_size);
 EXPORT_SYMBOL(blk_queue_segment_boundary);
@@ -1613,5 +1752,6 @@
 EXPORT_SYMBOL(blk_nohighio);
 EXPORT_SYMBOL(blk_dump_rq_flags);
 EXPORT_SYMBOL(submit_bio);
-EXPORT_SYMBOL(blk_contig_segment);
 EXPORT_SYMBOL(blk_queue_assign_lock);
+EXPORT_SYMBOL(blk_phys_contig_segment);
+EXPORT_SYMBOL(blk_hw_contig_segment);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index c16b6163..38b2514 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -62,7 +62,7 @@
 static struct nbd_device nbd_dev[MAX_NBD];
 static devfs_handle_t devfs_handle;
 
-static spinlock_t nbd_lock;
+static spinlock_t nbd_lock = SPIN_LOCK_UNLOCKED;
 
 #define DEBUG( s )
 /* #define DEBUG( s ) printk( s ) 
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 1430fcb..8e1374c 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -166,6 +166,8 @@
 
 #include <asm/uaccess.h>
 
+static spinlock_t pd_lock = SPIN_LOCK_UNLOCKED;
+
 #ifndef MODULE
 
 #include "setup.h"
@@ -394,7 +396,7 @@
                 return -1;
         }
 	q = BLK_DEFAULT_QUEUE(MAJOR_NR);
-	blk_init_queue(q, DEVICE_REQUEST);
+	blk_init_queue(q, DEVICE_REQUEST, &pd_lock);
 	blk_queue_max_sectors(q, cluster);
         read_ahead[MAJOR_NR] = 8;       /* 8 sector (4kB) read ahead */
         
@@ -875,9 +877,9 @@
 
 {	long	saved_flags;
 
-	spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+	spin_lock_irqsave(&pd_lock,saved_flags);
 	end_request(1);
-	if (!pd_run) {  spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+	if (!pd_run) {  spin_unlock_irqrestore(&pd_lock,saved_flags);
 			return; 
 	}
 	
@@ -893,7 +895,7 @@
 
 	pd_count = CURRENT->current_nr_sectors;
 	pd_buf = CURRENT->buffer;
-	spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+	spin_unlock_irqrestore(&pd_lock,saved_flags);
 }
 
 static void do_pd_read( void )
@@ -916,11 +918,11 @@
                         pi_do_claimed(PI,do_pd_read_start);
 			return;
                 }
-		spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+		spin_lock_irqsave(&pd_lock,saved_flags);
                 end_request(0);
                 pd_busy = 0;
 		do_pd_request(NULL);
-		spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+		spin_unlock_irqrestore(&pd_lock,saved_flags);
                 return;
         }
         pd_ide_command(unit,IDE_READ,pd_block,pd_run);
@@ -940,11 +942,11 @@
                         pi_do_claimed(PI,do_pd_read_start);
                         return;
                 }
-		spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+		spin_lock_irqsave(&pd_lock,saved_flags);
                 end_request(0);
                 pd_busy = 0;
 		do_pd_request(NULL);
-		spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+		spin_unlock_irqrestore(&pd_lock,saved_flags);
                 return;
             }
             pi_read_block(PI,pd_buf,512);
@@ -955,11 +957,11 @@
 	    if (!pd_count) pd_next_buf(unit);
         }
         pi_disconnect(PI);
-	spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+	spin_lock_irqsave(&pd_lock,saved_flags);
         end_request(1);
         pd_busy = 0;
 	do_pd_request(NULL);
-	spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+	spin_unlock_irqrestore(&pd_lock,saved_flags);
 }
 
 static void do_pd_write( void )
@@ -982,11 +984,11 @@
 			pi_do_claimed(PI,do_pd_write_start);
                         return;
                 }
-		spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+		spin_lock_irqsave(&pd_lock,saved_flags);
                 end_request(0);
                 pd_busy = 0;
 		do_pd_request(NULL);
-		spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+		spin_unlock_irqrestore(&pd_lock,saved_flags);
                 return;
         }
         pd_ide_command(unit,IDE_WRITE,pd_block,pd_run);
@@ -998,11 +1000,11 @@
                         pi_do_claimed(PI,do_pd_write_start);
                         return;
                 }
-		spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+		spin_lock_irqsave(&pd_lock,saved_flags);
                 end_request(0);
                 pd_busy = 0;
 		do_pd_request(NULL);
-                spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+                spin_unlock_irqrestore(&pd_lock,saved_flags);
 		return;
             }
             pi_write_block(PI,pd_buf,512);
@@ -1027,19 +1029,19 @@
                         pi_do_claimed(PI,do_pd_write_start);
                         return;
                 }
-		spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+		spin_lock_irqsave(&pd_lock,saved_flags);
                 end_request(0);
                 pd_busy = 0;
 		do_pd_request(NULL);
-		spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+		spin_unlock_irqrestore(&pd_lock,saved_flags);
                 return;
         }
         pi_disconnect(PI);
-	spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+	spin_lock_irqsave(&pd_lock,saved_flags);
         end_request(1);
         pd_busy = 0;
 	do_pd_request(NULL);
-	spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+	spin_unlock_irqrestore(&pd_lock,saved_flags);
 }
 
 /* end of pd.c */
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index e495654..c83901c 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -361,7 +361,8 @@
         }
 	q = BLK_DEFAULT_QUEUE(MAJOR_NR);
 	blk_init_queue(q, DEVICE_REQUEST, &pf_spin_lock);
-	blk_queue_max_segments(q, cluster);
+	blk_queue_max_phys_segments(q, cluster);
+	blk_queue_max_hw_segments(q, cluster);
         read_ahead[MAJOR_NR] = 8;       /* 8 sector (4kB) read ahead */
         
 	for (i=0;i<PF_UNITS;i++) pf_blocksizes[i] = 1024;
diff --git a/drivers/block/ps2esdi.c b/drivers/block/ps2esdi.c
index b248b43..dec02f0 100644
--- a/drivers/block/ps2esdi.c
+++ b/drivers/block/ps2esdi.c
@@ -66,8 +66,6 @@
 #define TYPE_0_CMD_BLK_LENGTH 2
 #define TYPE_1_CMD_BLK_LENGTH 4
 
-#define PS2ESDI_LOCK (&((BLK_DEFAULT_QUEUE(MAJOR_NR))->queue_lock))
-
 static void reset_ctrl(void);
 
 int ps2esdi_init(void);
@@ -130,6 +128,7 @@
 struct ps2esdi_i_struct {
 	unsigned int head, sect, cyl, wpcom, lzone, ctl;
 };
+static spinlock_t ps2esdi_lock = SPIN_LOCK_UNLOCKED;
 
 #if 0
 #if 0				/* try both - I don't know which one is better... UB */
@@ -180,7 +179,7 @@
 		return -1;
 	}
 	/* set up some global information - indicating device specific info */
-	blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+	blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &ps2esdi_lock);
 	read_ahead[MAJOR_NR] = 8;	/* 8 sector (4kB) read ahead */
 
 	/* some minor housekeeping - setup the global gendisk structure */
@@ -954,10 +953,10 @@
 		break;
 	}
 	if(ending != -1) {
-		spin_lock_irqsave(PS2ESDI_LOCK, flags);
+		spin_lock_irqsave(ps2esdi_LOCK, flags);
 		end_request(ending);
 		do_ps2esdi_request(BLK_DEFAULT_QUEUE(MAJOR_NR));
-		spin_unlock_irqrestore(PS2ESDI_LOCK, flags);
+		spin_unlock_irqrestore(ps2esdi_LOCK, flags);
 	}
 }				/* handle interrupts */
 
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index f4dee49..ad3ead3 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -203,6 +203,7 @@
 
 static struct floppy_state floppy_states[MAX_FLOPPIES];
 static int floppy_count = 0;
+static spinlock_t swim3_lock = SPIN_LOCK_UNLOCKED;
 
 static unsigned short write_preamble[] = {
 	0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e,	/* gap field */
@@ -807,16 +808,6 @@
 	return err;
 }
 
-int swim3_fd_eject(int devnum)
-{
-	if (devnum >= floppy_count)
-		return -ENODEV;
-	/* Do not check this - this function should ONLY be called early
-	 * in the boot process! */
-	/* if (floppy_states[devnum].ref_count != 1) return -EBUSY; */
-	return fd_eject(&floppy_states[devnum]);
-}
-
 static struct floppy_struct floppy_type =
 	{ 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL };	/*  7 1.44MB 3.5"   */
 
@@ -1041,7 +1032,7 @@
 			       MAJOR_NR);
 			return -EBUSY;
 		}
-		blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+		blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST,&swim3_lock);
 		blksize_size[MAJOR_NR] = floppy_blocksizes;
 		blk_size[MAJOR_NR] = floppy_sizes;
 	}
diff --git a/drivers/block/swim_iop.c b/drivers/block/swim_iop.c
index 29b8f82..0fbe21e 100644
--- a/drivers/block/swim_iop.c
+++ b/drivers/block/swim_iop.c
@@ -84,6 +84,8 @@
 static int floppy_blocksizes[2] = {512,512};
 static int floppy_sizes[2] = {2880,2880};
 
+static spinlock_t swim_iop_lock = SPIN_LOCK_UNLOCKED;
+
 static char *drive_names[7] = {
 	"not installed",	/* DRV_NONE    */
 	"unknown (1)",		/* DRV_UNKNOWN */
@@ -147,7 +149,7 @@
 		       MAJOR_NR);
 		return -EBUSY;
 	}
-	blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+	blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &swim_iop_lock);
 	blksize_size[MAJOR_NR] = floppy_blocksizes;
 	blk_size[MAJOR_NR] = floppy_sizes;
 
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 4357b31..55587b4 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -122,6 +122,8 @@
 static int xd_sizes[XD_MAXDRIVES << 6], xd_access[XD_MAXDRIVES];
 static int xd_blocksizes[XD_MAXDRIVES << 6];
 
+static spinlock_t xd_lock = SPIN_LOCK_UNLOCKED;
+
 extern struct block_device_operations xd_fops;
 
 static struct gendisk xd_gendisk = {
@@ -170,7 +172,7 @@
 		return -1;
 	}
 	devfs_handle = devfs_mk_dir (NULL, xd_gendisk.major_name, NULL);
-	blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+	blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &xd_lock);
 	read_ahead[MAJOR_NR] = 8;	/* 8 sector (4kB) read ahead */
 	add_gendisk(&xd_gendisk);
 	xd_geninit();
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index e050e31..f7d35d7 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -68,6 +68,8 @@
 static int list_count       = 0;
 static int current_device   = -1;
 
+static spinlock_t z2ram_lock = SPIN_LOCK_UNLOCKED;
+
 static void
 do_z2_request( request_queue_t * q )
 {
@@ -364,7 +366,7 @@
 	    }
     }    
    
-    blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+    blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUES, &z2ram_lock);
     blksize_size[ MAJOR_NR ] = z2_blocksizes;
     blk_size[ MAJOR_NR ] = z2_sizes;
 
diff --git a/drivers/fc4/fc.c b/drivers/fc4/fc.c
index 6a128292..9068ede2 100644
--- a/drivers/fc4/fc.c
+++ b/drivers/fc4/fc.c
@@ -767,8 +767,12 @@
 
 static void fcp_scsi_done (Scsi_Cmnd *SCpnt)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&SCpnt->host->host_lock, flags);
 	if (FCP_CMND(SCpnt)->done)
 		FCP_CMND(SCpnt)->done(SCpnt);
+	spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
 }
 
 static int fcp_scsi_queue_it(fc_channel *fc, Scsi_Cmnd *SCpnt, fcp_cmnd *fcmd, int prepare)
@@ -913,8 +917,12 @@
 	 */
 
 	if (++fc->abort_count < (fc->can_queue >> 1)) {
+		unsigned long flags;
+
 		SCpnt->result = DID_ABORT;
+		spin_lock_irqsave(&SCpnt->host->host_lock, flags);
 		fcmd->done(SCpnt);
+		spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
 		printk("FC: soft abort\n");
 		return SUCCESS;
 	} else {
diff --git a/drivers/fc4/soc.c b/drivers/fc4/soc.c
index 924de1c..19aee06 100644
--- a/drivers/fc4/soc.c
+++ b/drivers/fc4/soc.c
@@ -341,14 +341,14 @@
 	unsigned long flags;
 	register struct soc *s = (struct soc *)dev_id;
 
-	spin_lock_irqsave(&io_request_lock, flags);
+	spin_lock_irqsave(&s->lock, flags);
 	cmd = sbus_readl(s->regs + CMD);
 	for (; (cmd = SOC_INTR (s, cmd)); cmd = sbus_readl(s->regs + CMD)) {
 		if (cmd & SOC_CMD_RSP_Q1) soc_unsolicited (s);
 		if (cmd & SOC_CMD_RSP_Q0) soc_solicited (s);
 		if (cmd & SOC_CMD_REQ_QALL) soc_request (s, cmd);
 	}
-	spin_unlock_irqrestore(&io_request_lock, flags);
+	spin_unlock_irqrestore(&s->lock, flags);
 }
 
 #define TOKEN(proto, port, token) (((proto)<<12)|(token)|(port))
@@ -559,6 +559,7 @@
 	if (s == NULL)
 		return;
 	memset (s, 0, sizeof(struct soc));
+	spin_lock_init(&s->lock);
 	s->soc_no = no;
 
 	SOD(("socs %08lx soc_intr %08lx soc_hw_enque %08x\n",
diff --git a/drivers/fc4/soc.h b/drivers/fc4/soc.h
index 740e1a3..c9c6d1d 100644
--- a/drivers/fc4/soc.h
+++ b/drivers/fc4/soc.h
@@ -265,6 +265,7 @@
 } soc_cq;
 
 struct soc {
+	spinlock_t		lock;
 	soc_port		port[2]; /* Every SOC has one or two FC ports */
 	soc_cq			req[2]; /* Request CQs */
 	soc_cq			rsp[2]; /* Response CQs */
diff --git a/drivers/fc4/socal.c b/drivers/fc4/socal.c
index bec5167..447a4de 100644
--- a/drivers/fc4/socal.c
+++ b/drivers/fc4/socal.c
@@ -411,7 +411,7 @@
 	unsigned long flags;
 	register struct socal *s = (struct socal *)dev_id;
 
-	spin_lock_irqsave(&io_request_lock, flags);
+	spin_lock_irqsave(&s->lock, flags);
 	cmd = sbus_readl(s->regs + CMD);
 	for (; (cmd = SOCAL_INTR (s, cmd)); cmd = sbus_readl(s->regs + CMD)) {
 #ifdef SOCALDEBUG
@@ -428,7 +428,7 @@
 		if (cmd & SOCAL_CMD_REQ_QALL)
 			socal_request (s, cmd);
 	}
-	spin_unlock_irqrestore(&io_request_lock, flags);
+	spin_unlock_irqrestore(&s->lock, flags);
 }
 
 #define TOKEN(proto, port, token) (((proto)<<12)|(token)|(port))
@@ -667,6 +667,7 @@
 	s = kmalloc (sizeof (struct socal), GFP_KERNEL);
 	if (!s) return;
 	memset (s, 0, sizeof(struct socal));
+	spin_lock_init(&s->lock);
 	s->socal_no = no;
 
 	SOD(("socals %08lx socal_intr %08lx socal_hw_enque %08lx\n",
diff --git a/drivers/fc4/socal.h b/drivers/fc4/socal.h
index 8e8c7f4..a853fad 100644
--- a/drivers/fc4/socal.h
+++ b/drivers/fc4/socal.h
@@ -290,6 +290,7 @@
 } socal_cq;
 
 struct socal {
+	spinlock_t		lock;
 	socal_port		port[2]; /* Every SOCAL has one or two FC ports */
 	socal_cq		req[4]; /* Request CQs */
 	socal_cq		rsp[4]; /* Response CQs */
diff --git a/drivers/ide/hd.c b/drivers/ide/hd.c
index 38c0777..08485cf 100644
--- a/drivers/ide/hd.c
+++ b/drivers/ide/hd.c
@@ -62,6 +62,8 @@
 #define HD_IRQ IRQ_HARDDISK
 #endif
 
+static spinlock_t hd_lock = SPIN_LOCK_UNLOCKED;
+
 static int revalidate_hddisk(kdev_t, int);
 
 #define	HD_DELAY	0
@@ -106,7 +108,7 @@
 static struct hd_struct hd[MAX_HD<<6];
 static int hd_sizes[MAX_HD<<6];
 static int hd_blocksizes[MAX_HD<<6];
-static int hd_hardsectsizes[MAX_HD<<6];
+
 
 static struct timer_list device_timer;
 
@@ -464,7 +466,7 @@
 	i = --CURRENT->nr_sectors;
 	--CURRENT->current_nr_sectors;
 	CURRENT->buffer += 512;
-	if (!i || (CURRENT->bh && !SUBSECTOR(i)))
+	if (!i || (CURRENT->bio && !SUBSECTOR(i)))
 		end_request(1);
 	if (i > 0) {
 		SET_INTR(&write_intr);
@@ -586,24 +588,29 @@
 		dev+'a', (CURRENT->cmd == READ)?"read":"writ",
 		cyl, head, sec, nsect, (unsigned long) CURRENT->buffer);
 #endif
-	if (CURRENT->cmd == READ) {
-		hd_out(dev,nsect,sec,head,cyl,WIN_READ,&read_intr);
-		if (reset)
-			goto repeat;
-		return;
-	}
-	if (CURRENT->cmd == WRITE) {
-		hd_out(dev,nsect,sec,head,cyl,WIN_WRITE,&write_intr);
-		if (reset)
-			goto repeat;
-		if (wait_DRQ()) {
-			bad_rw_intr();
-			goto repeat;
+	if(CURRENT->flags & REQ_CMD) {
+		switch (rq_data_dir(CURRENT)) {
+		case READ:
+			hd_out(dev,nsect,sec,head,cyl,WIN_READ,&read_intr);
+			if (reset)
+				goto repeat;
+			break;
+		case WRITE:
+			hd_out(dev,nsect,sec,head,cyl,WIN_WRITE,&write_intr);
+			if (reset)
+				goto repeat;
+			if (wait_DRQ()) {
+				bad_rw_intr();
+				goto repeat;
+			}
+			outsw(HD_DATA,CURRENT->buffer,256);
+			break;
+		default:
+			printk("unknown hd-command\n");
+			end_request(0);
+			break;
 		}
-		outsw(HD_DATA,CURRENT->buffer,256);
-		return;
 	}
-	panic("unknown hd-command");
 }
 
 static void do_hd_request (request_queue_t * q)
@@ -723,12 +730,11 @@
 {
 	int drive;
 
-	for(drive=0; drive < (MAX_HD << 6); drive++) {
+	for(drive=0; drive < (MAX_HD << 6); drive++)
 		hd_blocksizes[drive] = 1024;
-		hd_hardsectsizes[drive] = 512;
-	}
+
 	blksize_size[MAJOR_NR] = hd_blocksizes;
-	hardsect_size[MAJOR_NR] = hd_hardsectsizes;
+	blk_queue_hardsect_size(QUEUE, 512);
 
 #ifdef __i386__
 	if (!NR_HD) {
@@ -830,7 +836,7 @@
 		printk("hd: unable to get major %d for hard disk\n",MAJOR_NR);
 		return -1;
 	}
-	blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+	blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &hd_lock);
 	blk_queue_max_sectors(BLK_DEFAULT_QUEUE(MAJOR_NR), 255);
 	read_ahead[MAJOR_NR] = 8;		/* 8 sector (4kB) read-ahead */
 	add_gendisk(&hd_gendisk);
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index ecdcd85..fb638ca3 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -232,8 +232,8 @@
 
 	nents = blk_rq_map_sg(q, rq, hwif->sg_table);
 
-	if (rq->q && nents > rq->nr_segments)
-		printk("ide-dma: received %d segments, build %d\n", rq->nr_segments, nents);
+	if (rq->q && nents > rq->nr_phys_segments)
+		printk("ide-dma: received %d phys segments, build %d\n", rq->nr_phys_segments, nents);
 
 	if (rq_data_dir(rq) == READ)
 		hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 6201c2d..3f93dcc 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -608,8 +608,11 @@
 #endif
 	blk_queue_max_sectors(q, max_sectors);
 
-	/* IDE DMA can do PRD_ENTRIES number of segments */
-	q->max_segments = PRD_ENTRIES;
+	/* IDE DMA can do PRD_ENTRIES number of segments. */
+	blk_queue_max_hw_segments(q, PRD_ENTRIES);
+
+	/* This is a driver limit and could be eliminated. */
+	blk_queue_max_phys_segments(q, PRD_ENTRIES);
 }
 
 /*
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index c1b19e1..c4eb0a4 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -3686,6 +3686,7 @@
  */
 devfs_handle_t ide_devfs_handle;
 
+EXPORT_SYMBOL(ide_lock);
 EXPORT_SYMBOL(ide_probe);
 EXPORT_SYMBOL(drive_is_flashcard);
 EXPORT_SYMBOL(ide_timer_expiry);
@@ -3718,6 +3719,7 @@
 EXPORT_SYMBOL(ide_end_drive_cmd);
 EXPORT_SYMBOL(ide_end_request);
 EXPORT_SYMBOL(__ide_end_request);
+EXPORT_SYMBOL(ide_revalidate_drive);
 EXPORT_SYMBOL(ide_revalidate_disk);
 EXPORT_SYMBOL(ide_cmd);
 EXPORT_SYMBOL(ide_wait_cmd);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 372583a..6044b05 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -69,6 +69,7 @@
 #include <linux/version.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/major.h>
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/slab.h>
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 1e7c77b..9b6d48e 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -65,7 +65,7 @@
 #include <linux/errno.h>
 #include <linux/kdev_t.h>
 #include <linux/blkdev.h>
-#include <linux/blk.h>		/* for io_request_lock (spinlock) decl */
+#include <linux/blk.h>
 #include "../../scsi/scsi.h"
 #include "../../scsi/hosts.h"
 #include "../../scsi/sd.h"
@@ -246,9 +246,9 @@
 		mf_chk = search_taskQ(1,sc,MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK);
 		if (mf_chk != NULL) {
 			sc->result = DID_ABORT << 16;
-			spin_lock_irqsave(&io_request_lock, flags);
+			spin_lock_irqsave(&sc->host->host_lock, flags);
 			sc->scsi_done(sc);
-			spin_unlock_irqrestore(&io_request_lock, flags);
+			spin_unlock_irqrestore(&sc->host->host_lock, flags);
 			return 1;
 		}
 	}
@@ -426,9 +426,9 @@
 					 scsi_to_pci_dma_dir(sc->sc_data_direction));
 		}
 
-		spin_lock_irqsave(&io_request_lock, flags);
+		spin_lock_irqsave(&sc->host->host_lock, flags);
 		sc->scsi_done(sc);
-		spin_unlock_irqrestore(&io_request_lock, flags);
+		spin_unlock_irqrestore(&sc->host->host_lock, flags);
 	}
 
 	return 1;
@@ -928,9 +928,9 @@
 			}
 			SCpnt->resid = SCpnt->request_bufflen - mpt_sdev->sense_sz;
 			SCpnt->result = 0;
-/*			spin_lock(&io_request_lock);	*/
+/*			spin_lock(&SCpnt->host->host_lock);	*/
 			SCpnt->scsi_done(SCpnt);
-/*			spin_unlock(&io_request_lock);	*/
+/*			spin_unlock(&SCpnt->host->host_lock);	*/
 			return 0;
 		}
 	}
@@ -1333,9 +1333,9 @@
 	if (ctx2abort == -1) {
 		printk(KERN_ERR MYNAM ": ERROR - ScsiLookup fail(#2) for SCpnt=%p\n", SCpnt);
 		SCpnt->result = DID_SOFT_ERROR << 16;
-		spin_lock_irqsave(&io_request_lock, flags);
+		spin_lock_irqsave(&SCpnt->host->host_lock, flags);
 		SCpnt->scsi_done(SCpnt);
-		spin_unlock_irqrestore(&io_request_lock, flags);
+		spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
 		mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf);
 	} else {
 		dprintk((KERN_INFO MYNAM ":DbG: ctx2abort = %08x\n", ctx2abort));
@@ -1352,9 +1352,9 @@
 					": WARNING[2] - IOC error (%d) processing TaskMgmt request (mf=%p:sc=%p)\n",
 					i, mf, SCpnt);
 			SCpnt->result = DID_SOFT_ERROR << 16;
-			spin_lock_irqsave(&io_request_lock, flags);
+			spin_lock_irqsave(&SCpnt->host->host_lock, flags);
 			SCpnt->scsi_done(SCpnt);
-			spin_unlock_irqrestore(&io_request_lock, flags);
+			spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
 			mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf);
 		}
 	}
@@ -1428,9 +1428,9 @@
 				": WARNING[3] - IOC error (%d) processing TaskMgmt request (mf=%p:sc=%p)\n",
 				i, mf, SCpnt);
 		SCpnt->result = DID_SOFT_ERROR << 16;
-		spin_lock_irqsave(&io_request_lock, flags);
+		spin_lock_irqsave(&SCpnt->host->host_lock, flags);
 		SCpnt->scsi_done(SCpnt);
-		spin_unlock_irqrestore(&io_request_lock, flags);
+		spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
 		mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf);
 	}
 
@@ -1502,9 +1502,9 @@
 				": WARNING[4] - IOC error (%d) processing TaskMgmt request (mf=%p:sc=%p)\n",
 				i, mf, SCpnt);
 		SCpnt->result = DID_SOFT_ERROR << 16;
-		spin_lock_irqsave(&io_request_lock, flags);
+		spin_lock_irqsave(&SCpnt->host->host_lock, flags);
 		SCpnt->scsi_done(SCpnt);
-		spin_unlock_irqrestore(&io_request_lock, flags);
+		spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
 		mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf);
 	}
 
@@ -1748,9 +1748,9 @@
 			if (ctx2abort == -1) {
 				printk(KERN_ERR MYNAM ": ERROR - ScsiLookup fail(#1) for SCpnt=%p\n", SCpnt);
 				SCpnt->result = DID_SOFT_ERROR << 16;
-				spin_lock_irqsave(&io_request_lock, flags);
+				spin_lock_irqsave(&SCpnt->host->host_lock, flags);
 				SCpnt->scsi_done(SCpnt);
-				spin_unlock_irqrestore(&io_request_lock, flags);
+				spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
 				mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf);
 				continue;
 			}
@@ -1797,9 +1797,9 @@
 		    != 0) {
 			printk(KERN_WARNING MYNAM ": WARNING[1] - IOC error (%d) processing TaskMgmt request (mf=%p:sc=%p)\n", i, mf, SCpnt);
 			SCpnt->result = DID_SOFT_ERROR << 16;
-			spin_lock_irqsave(&io_request_lock, flags);
+			spin_lock_irqsave(&SCpnt->host->host_lock, flags);
 			SCpnt->scsi_done(SCpnt);
-			spin_unlock_irqrestore(&io_request_lock, flags);
+			spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
 			mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf);
 		} else {
 			/* Spin-Wait for TaskMgmt complete!!! */
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index bdf5259..c64b739 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -1301,7 +1301,8 @@
 		request_queue_t *q = i2ob_dev[unit].req_queue;
 
 		blk_queue_max_sectors(q, 256);
-		blk_queue_max_segments(q, (d->controller->status_block->inbound_frame_size - 8)/2);
+		blk_queue_max_phys_segments(q, (d->controller->status_block->inbound_frame_size - 8)/2);
+		blk_queue_max_hw_segments(q, (d->controller->status_block->inbound_frame_size - 8)/2);
 
 		if(d->controller->type == I2O_TYPE_PCI && d->controller->bus.pci.queue_buggy == 2)
 			i2ob_dev[i].depth = 32;
@@ -1309,14 +1310,16 @@
 		if(d->controller->type == I2O_TYPE_PCI && d->controller->bus.pci.queue_buggy == 1)
 		{
 			blk_queue_max_sectors(q, 32);
-			blk_queue_max_segments(q, 8);
+			blk_queue_max_phys_segments(q, 8);
+			blk_queue_max_hw_segments(q, 8);
 			i2ob_dev[i].depth = 4;
 		}
 
 		if(d->controller->type == I2O_TYPE_PCI && d->controller->bus.pci.short_req)
 		{
 			blk_queue_max_sectors(q, 8);
-			blk_queue_max_segments(q, 8);
+			blk_queue_max_phys_segments(q, 8);
+			blk_queue_max_hw_segments(q, 8);
 		}
 	}
 
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index f3528a2..2e51889 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -135,7 +135,7 @@
 list-multi	:= scsi_mod.o sd_mod.o sr_mod.o initio.o a100u2w.o cpqfc.o
 scsi_mod-objs	:= scsi.o hosts.o scsi_ioctl.o constants.o scsicam.o \
 			scsi_proc.o scsi_error.o scsi_queue.o scsi_lib.o \
-			scsi_merge.o scsi_dma.o scsi_scan.o scsi_syms.o
+			scsi_merge.o scsi_scan.o scsi_syms.o
 sd_mod-objs	:= sd.o
 sr_mod-objs	:= sr.o sr_ioctl.o sr_vendor.o
 initio-objs	:= ini9100u.o i91uscsi.o
diff --git a/drivers/scsi/README.ncr53c8xx b/drivers/scsi/README.ncr53c8xx
index 206233d..514c2be 100644
--- a/drivers/scsi/README.ncr53c8xx
+++ b/drivers/scsi/README.ncr53c8xx
@@ -1,6 +1,6 @@
 The Linux NCR53C8XX/SYM53C8XX drivers README file
 
-Written by Gerard Roudier <groudier@club-internet.fr>
+Written by Gerard Roudier <groudier@free.fr>
 21 Rue Carnot
 95170 DEUIL LA BARRE - FRANCE
 
@@ -87,7 +87,7 @@
 
 The initial Linux ncr53c8xx driver has been a port of the ncr driver from 
 FreeBSD that has been achieved in November 1995 by:
-          Gerard Roudier              <groudier@club-internet.fr>
+          Gerard Roudier              <groudier@free.fr>
 
 The original driver has been written for 386bsd and FreeBSD by:
           Wolfgang Stanglmeier        <wolf@cologne.de>
@@ -1287,7 +1287,7 @@
 be sure I will receive it.  Obviously, a bug in the driver code is
 possible.
 
-     My email address: Gerard Roudier <groudier@club-internet.fr>
+     My email address: Gerard Roudier <groudier@free.fr>
 
 Allowing disconnections is important if you use several devices on
 your SCSI bus but often causes problems with buggy devices.
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index be4495fd..9177efb 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -3084,7 +3084,7 @@
        * we check data_cmnd[0].  This catches the conditions for st.c, but
        * I'm still not sure if request.cmd is valid for sg devices.
        */
-      if ( (cmd->request.cmd == WRITE) || (cmd->data_cmnd[0] == WRITE_6) ||
+      if ( (rq_data_dir(&cmd->request) == WRITE) || (cmd->data_cmnd[0] == WRITE_6) ||
            (cmd->data_cmnd[0] == WRITE_FILEMARKS) )
       {
         sp->w_total++;
@@ -4294,7 +4294,7 @@
       {
         printk(INFO_LEAD "Underflow - Wanted %u, %s %u, residual SG "
           "count %d.\n", p->host_no, CTL_OF_SCB(scb), cmd->underflow,
-          (cmd->request.cmd == WRITE) ? "wrote" : "read", actual,
+          (rq_data_dir(&cmd->request) == WRITE) ? "wrote" : "read", actual,
           hscb->residual_SG_segment_count);
         printk(INFO_LEAD "status 0x%x.\n", p->host_no, CTL_OF_SCB(scb),
           hscb->target_status);
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c
index 0ec9d56..3f5bbb0 100644
--- a/drivers/scsi/esp.c
+++ b/drivers/scsi/esp.c
@@ -1,4 +1,4 @@
-/* $Id: esp.c,v 1.99 2001/02/13 01:17:01 davem Exp $
+/* $Id: esp.c,v 1.100 2001/12/11 04:55:48 davem Exp $
  * esp.c:  EnhancedScsiProcessor Sun SCSI driver code.
  *
  * Copyright (C) 1995, 1998 David S. Miller (davem@caip.rutgers.edu)
@@ -1035,9 +1035,6 @@
 {
 	int i;
 
-	/* Driver spinlock... */
-	spin_lock_init(&esp->lock);
-
 	/* Command queues... */
 	esp->current_SC = NULL;
 	esp->disconnected_SC = NULL;
@@ -1816,7 +1813,6 @@
 int esp_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
 {
 	struct esp *esp;
-	unsigned long flags;
 
 	/* Set up func ptr and initial driver cmd-phase. */
 	SCpnt->scsi_done = done;
@@ -1834,8 +1830,6 @@
 	SCpnt->SCp.Message          = 0xff;
 	SCpnt->SCp.sent_command     = 0;
 
-	spin_lock_irqsave(&esp->lock, flags);
-
 	/* Place into our queue. */
 	if (SCpnt->cmnd[0] == REQUEST_SENSE) {
 		ESPQUEUE(("RQSENSE\n"));
@@ -1849,8 +1843,6 @@
 	if (!esp->current_SC && !esp->resetting_bus)
 		esp_exec_cmd(esp);
 
-	spin_unlock_irqrestore(&esp->lock, flags);
-
 	return 0;
 }
 
@@ -1926,7 +1918,7 @@
 	unsigned long flags;
 	int don;
 
-	spin_lock_irqsave(&esp->lock, flags);
+	spin_lock_irqsave(&esp->ehost->host_lock, flags);
 
 	ESPLOG(("esp%d: Aborting command\n", esp->esp_id));
 	esp_dump_state(esp);
@@ -1942,7 +1934,7 @@
 		esp->msgout_len = 1;
 		esp->msgout_ctr = 0;
 		esp_cmd(esp, ESP_CMD_SATN);
-		spin_unlock_irqrestore(&esp->lock, flags);
+		spin_unlock_irqrestore(&esp->ehost->host_lock, flags);
 		return SCSI_ABORT_PENDING;
 	}
 
@@ -1964,14 +1956,14 @@
 				*prev = (Scsi_Cmnd *) this->host_scribble;
 				this->host_scribble = NULL;
 
-				spin_unlock_irqrestore(&esp->lock, flags);
-
 				esp_release_dmabufs(esp, this);
 				this->result = DID_ABORT << 16;
 				this->scsi_done(this);
+
 				if (don)
 					ESP_INTSON(esp->dregs);
 
+				spin_unlock_irqrestore(&esp->ehost->host_lock, flags);
 				return SCSI_ABORT_SUCCESS;
 			}
 		}
@@ -1985,7 +1977,7 @@
 	if (esp->current_SC) {
 		if (don)
 			ESP_INTSON(esp->dregs);
-		spin_unlock_irqrestore(&esp->lock, flags);
+		spin_unlock_irqrestore(&esp->ehost->host_lock, flags);
 		return SCSI_ABORT_BUSY;
 	}
 
@@ -1998,7 +1990,7 @@
 
 	if (don)
 		ESP_INTSON(esp->dregs);
-	spin_unlock_irqrestore(&esp->lock, flags);
+	spin_unlock_irqrestore(&esp->ehost->host_lock, flags);
 	return SCSI_ABORT_SNOOZE;
 }
 
@@ -2014,16 +2006,11 @@
 	/* Clean up currently executing command, if any. */
 	if (sp != NULL) {
 		esp->current_SC = NULL;
-		spin_unlock(&esp->lock);
 
 		esp_release_dmabufs(esp, sp);
 		sp->result = (DID_RESET << 16);
 
-		spin_lock(&io_request_lock);
 		sp->scsi_done(sp);
-		spin_unlock(&io_request_lock);
-
-		spin_lock(&esp->lock);
 	}
 
 	/* Clean up disconnected queue, they have been invalidated
@@ -2031,16 +2018,10 @@
 	 */
 	if (esp->disconnected_SC) {
 		while ((sp = remove_first_SC(&esp->disconnected_SC)) != NULL) {
-			spin_unlock(&esp->lock);
-
 			esp_release_dmabufs(esp, sp);
 			sp->result = (DID_RESET << 16);
 
-			spin_lock(&io_request_lock);
 			sp->scsi_done(sp);
-			spin_unlock(&io_request_lock);
-
-			spin_lock(&esp->lock);
 		}
 	}
 
@@ -2071,9 +2052,9 @@
 	struct esp *esp = (struct esp *) SCptr->host->hostdata;
 	unsigned long flags;
 
-	spin_lock_irqsave(&esp->lock, flags);
+	spin_lock_irqsave(&esp->ehost->host_lock, flags);
 	(void) esp_do_resetbus(esp);
-	spin_unlock_irqrestore(&esp->lock, flags);
+	spin_unlock_irqrestore(&esp->ehost->host_lock, flags);
 
 	return SCSI_RESET_PENDING;
 }
@@ -2085,16 +2066,12 @@
 
 	esp->current_SC = NULL;
 
-	spin_unlock(&esp->lock);
 	esp_release_dmabufs(esp, done_SC);
 	done_SC->result = error;
 
-	spin_lock(&io_request_lock);
 	done_SC->scsi_done(done_SC);
-	spin_unlock(&io_request_lock);
 
 	/* Bus is free, issue any commands in the queue. */
-	spin_lock(&esp->lock);
 	if (esp->issue_SC && !esp->current_SC)
 		esp_exec_cmd(esp);
 
@@ -4344,7 +4321,7 @@
 	struct esp *esp = dev_id;
 	unsigned long flags;
 
-	spin_lock_irqsave(&esp->lock, flags);
+	spin_lock_irqsave(&esp->ehost->host_lock, flags);
 	if (ESP_IRQ_P(esp->dregs)) {
 		ESP_INTSOFF(esp->dregs);
 
@@ -4354,7 +4331,7 @@
 
 		ESP_INTSON(esp->dregs);
 	}
-	spin_unlock_irqrestore(&esp->lock, flags);
+	spin_unlock_irqrestore(&esp->ehost->host_lock, flags);
 }
 
 int esp_revoke(Scsi_Device* SDptr)
diff --git a/drivers/scsi/esp.h b/drivers/scsi/esp.h
index 0cc5e37..70f1a7c 100644
--- a/drivers/scsi/esp.h
+++ b/drivers/scsi/esp.h
@@ -1,4 +1,4 @@
-/* $Id: esp.h,v 1.28 2000/03/30 01:33:17 davem Exp $
+/* $Id: esp.h,v 1.29 2001/12/11 04:55:47 davem Exp $
  * esp.h:  Defines and structures for the Sparc ESP (Enhanced SCSI
  *         Processor) driver under Linux.
  *
@@ -64,7 +64,6 @@
 
 /* We get one of these for each ESP probed. */
 struct esp {
-	spinlock_t		lock;
 	unsigned long		eregs;		/* ESP controller registers */
 	unsigned long		dregs;		/* DMA controller registers */
 	struct sbus_dma		*dma;		/* DMA controller sw state */
@@ -416,6 +415,7 @@
 		sg_tablesize:   SG_ALL,				\
 		cmd_per_lun:    1,				\
 		use_clustering: ENABLE_CLUSTERING,		\
+		highmem_io:	1,				\
 }
 
 /* For our interrupt engine. */
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index a33868d..cf32a8a 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -130,7 +130,8 @@
  * pain to reverse this, so we try to avoid it 
  */
 extern int blk_nohighio;
-struct Scsi_Host * scsi_register(Scsi_Host_Template * tpnt, int j){
+struct Scsi_Host * scsi_register(Scsi_Host_Template * tpnt, int j)
+{
     struct Scsi_Host * retval, *shpnt, *o_shp;
     Scsi_Host_Name *shn, *shn2;
     int flag_new = 1;
diff --git a/drivers/scsi/hosts.h b/drivers/scsi/hosts.h
index 08f3ea2..9045cc4 100644
--- a/drivers/scsi/hosts.h
+++ b/drivers/scsi/hosts.h
@@ -334,7 +334,6 @@
     int resetting; /* if set, it means that last_reset is a valid value */
     unsigned long last_reset;
 
-
     /*
      *	These three parameters can be used to allow for wide scsi,
      *	and for host adapters that support multiple busses
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 263e182..ce2e2e0 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -22,7 +22,7 @@
 **  This driver has been ported to Linux from the FreeBSD NCR53C8XX driver
 **  and is currently maintained by
 **
-**          Gerard Roudier              <groudier@club-internet.fr>
+**          Gerard Roudier              <groudier@free.fr>
 **
 **  Being given that this driver originates from the FreeBSD version, and
 **  in order to keep synergy on both, any suggested enhancements and corrections
@@ -63,7 +63,7 @@
 **  August 18 1997 by Cort <cort@cs.nmt.edu>:
 **     Support for Power/PC (Big Endian).
 **
-**  June 20 1998 by Gerard Roudier <groudier@club-internet.fr>:
+**  June 20 1998 by Gerard Roudier
 **     Support for up to 64 tags per lun.
 **     O(1) everywhere (C and SCRIPTS) for normal cases.
 **     Low PCI traffic for command handling when on-chip RAM is present.
@@ -8127,10 +8127,14 @@
 			segment = 1;
 		}
 	}
-	else if (use_sg <= MAX_SCATTER) {
+	else {
 		struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
 
 		use_sg = map_scsi_sg_data(np, cmd);
+		if (use_sg > MAX_SCATTER) {
+			unmap_scsi_data(np, cmd);
+			return -1;
+		}
 		data = &data[MAX_SCATTER - use_sg];
 
 		while (segment < use_sg) {
@@ -8143,9 +8147,6 @@
 			++segment;
 		}
 	}
-	else {
-		return -1;
-	}
 
 	return segment;
 }
diff --git a/drivers/scsi/ncr53c8xx.h b/drivers/scsi/ncr53c8xx.h
index ac3f3b9..ac4e795 100644
--- a/drivers/scsi/ncr53c8xx.h
+++ b/drivers/scsi/ncr53c8xx.h
@@ -22,7 +22,7 @@
 **  This driver has been ported to Linux from the FreeBSD NCR53C8XX driver
 **  and is currently maintained by
 **
-**          Gerard Roudier              <groudier@club-internet.fr>
+**          Gerard Roudier              <groudier@free.fr>
 **
 **  Being given that this driver originates from the FreeBSD version, and
 **  in order to keep synergy on both, any suggested enhancements and corrections
diff --git a/drivers/scsi/qlogicfc.c b/drivers/scsi/qlogicfc.c
index efe6d60..0a67952 100644
--- a/drivers/scsi/qlogicfc.c
+++ b/drivers/scsi/qlogicfc.c
@@ -1375,7 +1375,7 @@
 	hostdata->explore_timer.data = 0;
 	del_timer(&hostdata->explore_timer);
 
-	spin_lock_irqsave(&io_request_lock, flags);
+	spin_lock_irqsave(&host->host_lock, flags);
 
 	if (hostdata->adapter_state & AS_REDO_FABRIC_PORTDB || hostdata->adapter_state & AS_REDO_LOOP_PORTDB) {
 		isp2x00_make_portdb(host);
@@ -1422,7 +1422,7 @@
 		hostdata->adapter_state = AS_LOOP_GOOD;
 	}
 
-	spin_unlock_irqrestore(&io_request_lock, flags);
+	spin_unlock_irqrestore(&host->host_lock, flags);
 
 }
 
@@ -1430,11 +1430,12 @@
 
 void do_isp2x00_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
 {
+	struct Scsi_Host *host = dev_id;
 	unsigned long flags;
 
-	spin_lock_irqsave(&io_request_lock, flags);
+	spin_lock_irqsave(&host->host_lock, flags);
 	isp2x00_intr_handler(irq, dev_id, regs);
-	spin_unlock_irqrestore(&io_request_lock, flags);
+	spin_unlock_irqrestore(&host->host_lock, flags);
 }
 
 void isp2x00_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
diff --git a/drivers/scsi/qlogicisp.c b/drivers/scsi/qlogicisp.c
index 6677a0f..bc76121 100644
--- a/drivers/scsi/qlogicisp.c
+++ b/drivers/scsi/qlogicisp.c
@@ -970,11 +970,12 @@
 
 void do_isp1020_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
 {
+	struct Scsi_Host *host = dev_id;
 	unsigned long flags;
 
-	spin_lock_irqsave(&io_request_lock, flags);
+	spin_lock_irqsave(&host->host_lock, flags);
 	isp1020_intr_handler(irq, dev_id, regs);
-	spin_unlock_irqrestore(&io_request_lock, flags);
+	spin_unlock_irqrestore(&host->host_lock, flags);
 }
 
 void isp1020_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 8561843..1b8acd2 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1445,7 +1445,7 @@
 	spin_unlock(&qpti->lock);
 
 	if (dq != NULL) {
-		spin_lock(&io_request_lock);
+		spin_lock(&qpti->qhost->host_lock);
 		do {
 			Scsi_Cmnd *next;
 
@@ -1453,7 +1453,7 @@
 			dq->scsi_done(dq);
 			dq = next;
 		} while (dq != NULL);
-		spin_unlock(&io_request_lock);
+		spin_unlock(&qpti->qhost->host_lock);
 	}
 	__restore_flags(flags);
 }
diff --git a/drivers/scsi/qlogicpti.h b/drivers/scsi/qlogicpti.h
index aad9347..6c49ea1 100644
--- a/drivers/scsi/qlogicpti.h
+++ b/drivers/scsi/qlogicpti.h
@@ -524,6 +524,7 @@
 	sg_tablesize:	QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN), \
 	cmd_per_lun:	1,					   \
 	use_clustering:	ENABLE_CLUSTERING,			   \
+	highmem_io:	1,			   		   \
 }
 
 /* For our interrupt engine. */
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 656766c..98a4780 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -55,6 +55,7 @@
 #include <linux/init.h>
 #include <linux/smp_lock.h>
 #include <linux/completion.h>
+#include <linux/mempool.h>
 
 #define __KERNEL_SYSCALLS__
 
@@ -83,6 +84,18 @@
 static void scsi_dump_status(int level);
 #endif
 
+#define SG_MEMPOOL_NR		5
+#define SG_MEMPOOL_SIZE		32
+
+struct scsi_host_sg_pool {
+	int size;
+	kmem_cache_t *slab;
+	mempool_t *pool;
+};
+
+static const int scsi_host_sg_pool_sizes[SG_MEMPOOL_NR] = { 8, 16, 32, 64, MAX_PHYS_SEGMENTS };
+struct scsi_host_sg_pool scsi_sg_pools[SG_MEMPOOL_NR];
+
 /*
    static const char RCSid[] = "$Header: /vger/u4/cvs/linux/drivers/scsi/scsi.c,v 1.38 1997/01/19 23:07:18 davem Exp $";
  */
@@ -181,23 +194,22 @@
 void  scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt)
 {
 	request_queue_t *q = &SDpnt->request_queue;
-	int max_segments = SHpnt->sg_tablesize;
 
 	blk_init_queue(q, scsi_request_fn, &SHpnt->host_lock);
 	q->queuedata = (void *) SDpnt;
 
-#ifdef DMA_CHUNK_SIZE
-	if (max_segments > 64)
-		max_segments = 64;
-#endif
+	/* Hardware imposed limit. */
+	blk_queue_max_hw_segments(q, SHpnt->sg_tablesize);
 
-	blk_queue_max_segments(q, max_segments);
+	/*
+	 * When we remove scsi_malloc soonish, this can die too
+	 */
+	blk_queue_max_phys_segments(q, PAGE_SIZE / sizeof(struct scatterlist));
+
 	blk_queue_max_sectors(q, SHpnt->max_sectors);
 
 	if (!SHpnt->use_clustering)
 		clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
-	if (SHpnt->unchecked_isa_dma)
-		blk_queue_segment_boundary(q, ISA_DMA_THRESHOLD);
 }
 
 #ifdef MODULE
@@ -1955,13 +1967,6 @@
 				}
 		}
 
-		/*
-		 * Now that we have all of the devices, resize the DMA pool,
-		 * as required.  */
-		if (!out_of_space)
-			scsi_resize_dma_pool();
-
-
 		/* This does any final handling that is required. */
 		for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
 			if (sdtpnt->finish && sdtpnt->nr_dev) {
@@ -2160,14 +2165,6 @@
 		tpnt->present--;
 	}
 
-	/*
-	 * If there are absolutely no more hosts left, it is safe
-	 * to completely nuke the DMA pool.  The resize operation will
-	 * do the right thing and free everything.
-	 */
-	if (!scsi_hosts)
-		scsi_resize_dma_pool();
-
 	if (pcount0 != next_scsi_host)
 		printk(KERN_INFO "scsi : %d host%s left.\n", next_scsi_host,
 		       (next_scsi_host == 1) ? "" : "s");
@@ -2268,8 +2265,6 @@
 	 */
 	if (tpnt->finish && tpnt->nr_dev)
 		(*tpnt->finish) ();
-	if (!out_of_space)
-		scsi_resize_dma_pool();
 	MOD_INC_USE_COUNT;
 
 	if (out_of_space) {
@@ -2535,16 +2530,81 @@
 __setup("scsihosts=", scsi_setup);
 #endif
 
+static void *scsi_pool_alloc(int gfp_mask, void *data)
+{
+	return kmem_cache_alloc(data, gfp_mask);
+}
+
+static void scsi_pool_free(void *ptr, void *data)
+{
+	kmem_cache_free(data, ptr);
+}
+
+struct scatterlist *scsi_alloc_sgtable(Scsi_Cmnd *SCpnt, int gfp_mask)
+{
+	struct scsi_host_sg_pool *sgp;
+	struct scatterlist *sgl;
+
+	BUG_ON(!SCpnt->use_sg);
+
+	switch (SCpnt->use_sg) {
+		case 1 ... 8			: SCpnt->sglist_len = 0; break;
+		case 9 ... 16			: SCpnt->sglist_len = 1; break;
+		case 17 ... 32			: SCpnt->sglist_len = 2; break;
+		case 33 ... 64			: SCpnt->sglist_len = 3; break;
+		case 65 ... MAX_PHYS_SEGMENTS	: SCpnt->sglist_len = 4; break;
+		default: return NULL;
+	}
+
+	sgp = scsi_sg_pools + SCpnt->sglist_len;
+
+	sgl = mempool_alloc(sgp->pool, gfp_mask);
+	if (sgl) {
+		memset(sgl, 0, sgp->size);
+		return sgl;
+	}
+
+	return sgl;
+}
+
+void scsi_free_sgtable(struct scatterlist *sgl, int index)
+{
+	struct scsi_host_sg_pool *sgp = scsi_sg_pools + index;
+
+	if (unlikely(index > SG_MEMPOOL_NR)) {
+		printk("scsi_free_sgtable: mempool %d\n", index);
+		BUG();
+	}
+
+	mempool_free(sgl, sgp->pool);
+}
+
 static int __init init_scsi(void)
 {
 	struct proc_dir_entry *generic;
+	char name[16];
+	int i;
 
 	printk(KERN_INFO "SCSI subsystem driver " REVISION "\n");
 
-        if( scsi_init_minimal_dma_pool() != 0 )
-        {
-                return 1;
-        }
+	/*
+	 * setup sg memory pools
+	 */
+	for (i = 0; i < SG_MEMPOOL_NR; i++) {
+		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
+		int size = scsi_host_sg_pool_sizes[i] * sizeof(struct scatterlist);
+
+		snprintf(name, sizeof(name) - 1, "sgpool-%d", scsi_host_sg_pool_sizes[i]);
+		sgp->slab = kmem_cache_create(name, size, 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+		if (!sgp->slab)
+			panic("SCSI: can't init sg slab\n");
+
+		sgp->pool = mempool_create(SG_MEMPOOL_SIZE, scsi_pool_alloc, scsi_pool_free, sgp->slab);
+		if (!sgp->pool)
+			panic("SCSI: can't init sg mempool\n");
+
+		sgp->size = size;
+	}
 
 	/*
 	 * This makes /proc/scsi and /proc/scsi/scsi visible.
@@ -2580,6 +2640,7 @@
 static void __exit exit_scsi(void)
 {
 	Scsi_Host_Name *shn, *shn2 = NULL;
+	int i;
 
 	remove_bh(SCSI_BH);
 
@@ -2600,11 +2661,13 @@
 	remove_proc_entry ("scsi", 0);
 #endif
 	
-	/*
-	 * Free up the DMA pool.
-	 */
-	scsi_resize_dma_pool();
-
+	for (i = 0; i < SG_MEMPOOL_NR; i++) {
+		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
+		mempool_destroy(sgp->pool);
+		kmem_cache_destroy(sgp->slab);
+		sgp->pool = NULL;
+		sgp->slab = NULL;
+	}
 }
 
 module_init(init_scsi);
diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h
index 3e6b1c3..b8ad3f4 100644
--- a/drivers/scsi/scsi.h
+++ b/drivers/scsi/scsi.h
@@ -439,6 +439,12 @@
                     unsigned int *secs);
 
 /*
+ * sg list allocations
+ */
+struct scatterlist *scsi_alloc_sgtable(Scsi_Cmnd *SCpnt, int gfp_mask);
+void scsi_free_sgtable(struct scatterlist *sgl, int index);
+
+/*
  * Prototypes for functions in scsi_dma.c
  */
 void scsi_resize_dma_pool(void);
@@ -449,8 +455,8 @@
 /*
  * Prototypes for functions in scsi_merge.c
  */
-extern void recount_segments(Scsi_Cmnd * SCpnt);
-extern void initialize_merge_fn(Scsi_Device * SDpnt);
+extern void scsi_initialize_merge_fn(Scsi_Device *SDpnt);
+extern int scsi_init_io(Scsi_Cmnd *SCpnt);
 
 /*
  * Prototypes for functions in scsi_queue.c
@@ -555,8 +561,6 @@
 	request_queue_t request_queue;
         atomic_t                device_active; /* commands checked out for device */
 	volatile unsigned short device_busy;	/* commands actually active on low-level */
-	int (*scsi_init_io_fn) (Scsi_Cmnd *);	/* Used to initialize
-						   new request */
 	Scsi_Cmnd *device_queue;	/* queue of SCSI Command structures */
 
 /* public: */
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 9b947ee..fda3c65 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -182,7 +182,6 @@
 	};
 	printk("\n");
 #endif
-	printk("DMA free %d sectors.\n", scsi_dma_free_sectors);
 }
 
 int scsi_debug_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
@@ -653,7 +652,6 @@
 
 int scsi_debug_biosparam(Disk * disk, kdev_t dev, int *info)
 {
-	int size = disk->capacity;
 	info[0] = N_HEAD;
 	info[1] = N_SECTOR;
 	info[2] = N_CYLINDER;
diff --git a/drivers/scsi/scsi_dma.c b/drivers/scsi/scsi_dma.c
deleted file mode 100644
index 3de8351..0000000
--- a/drivers/scsi/scsi_dma.c
+++ /dev/null
@@ -1,450 +0,0 @@
-/*
- *  scsi_dma.c Copyright (C) 2000 Eric Youngdale
- *
- *  mid-level SCSI DMA bounce buffer allocator
- *
- */
-
-#define __NO_VERSION__
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/blk.h>
-
-
-#include "scsi.h"
-#include "hosts.h"
-#include "constants.h"
-
-#ifdef CONFIG_KMOD
-#include <linux/kmod.h>
-#endif
-
-/*
- * PAGE_SIZE must be a multiple of the sector size (512).  True
- * for all reasonably recent architectures (even the VAX...).
- */
-#define SECTOR_SIZE		512
-#define SECTORS_PER_PAGE	(PAGE_SIZE/SECTOR_SIZE)
-
-#if SECTORS_PER_PAGE <= 8
-typedef unsigned char FreeSectorBitmap;
-#elif SECTORS_PER_PAGE <= 32
-typedef unsigned int FreeSectorBitmap;
-#else
-#error You lose.
-#endif
-
-/*
- * Used for access to internal allocator used for DMA safe buffers.
- */
-static spinlock_t allocator_request_lock = SPIN_LOCK_UNLOCKED;
-
-static FreeSectorBitmap *dma_malloc_freelist = NULL;
-static int need_isa_bounce_buffers;
-static unsigned int dma_sectors = 0;
-unsigned int scsi_dma_free_sectors = 0;
-unsigned int scsi_need_isa_buffer = 0;
-static unsigned char **dma_malloc_pages = NULL;
-
-/*
- * Function:    scsi_malloc
- *
- * Purpose:     Allocate memory from the DMA-safe pool.
- *
- * Arguments:   len       - amount of memory we need.
- *
- * Lock status: No locks assumed to be held.  This function is SMP-safe.
- *
- * Returns:     Pointer to memory block.
- *
- * Notes:       Prior to the new queue code, this function was not SMP-safe.
- *              This function can only allocate in units of sectors
- *              (i.e. 512 bytes).
- *
- *              We cannot use the normal system allocator becuase we need
- *              to be able to guarantee that we can process a complete disk
- *              I/O request without touching the system allocator.  Think
- *              about it - if the system were heavily swapping, and tried to
- *              write out a block of memory to disk, and the SCSI code needed
- *              to allocate more memory in order to be able to write the
- *              data to disk, you would wedge the system.
- */
-void *scsi_malloc(unsigned int len)
-{
-	unsigned int nbits, mask;
-	unsigned long flags;
-
-	int i, j;
-	if (len % SECTOR_SIZE != 0 || len > PAGE_SIZE)
-		return NULL;
-
-	nbits = len >> 9;
-	mask = (1 << nbits) - 1;
-
-	spin_lock_irqsave(&allocator_request_lock, flags);
-
-	for (i = 0; i < dma_sectors / SECTORS_PER_PAGE; i++)
-		for (j = 0; j <= SECTORS_PER_PAGE - nbits; j++) {
-			if ((dma_malloc_freelist[i] & (mask << j)) == 0) {
-				dma_malloc_freelist[i] |= (mask << j);
-				scsi_dma_free_sectors -= nbits;
-#ifdef DEBUG
-				SCSI_LOG_MLQUEUE(3, printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9)));
-				printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9));
-#endif
-				spin_unlock_irqrestore(&allocator_request_lock, flags);
-				return (void *) ((unsigned long) dma_malloc_pages[i] + (j << 9));
-			}
-		}
-	spin_unlock_irqrestore(&allocator_request_lock, flags);
-	return NULL;		/* Nope.  No more */
-}
-
-/*
- * Function:    scsi_free
- *
- * Purpose:     Free memory into the DMA-safe pool.
- *
- * Arguments:   ptr       - data block we are freeing.
- *              len       - size of block we are freeing.
- *
- * Lock status: No locks assumed to be held.  This function is SMP-safe.
- *
- * Returns:     Nothing
- *
- * Notes:       This function *must* only be used to free memory
- *              allocated from scsi_malloc().
- *
- *              Prior to the new queue code, this function was not SMP-safe.
- *              This function can only allocate in units of sectors
- *              (i.e. 512 bytes).
- */
-int scsi_free(void *obj, unsigned int len)
-{
-	unsigned int page, sector, nbits, mask;
-	unsigned long flags;
-
-#ifdef DEBUG
-	unsigned long ret = 0;
-
-#ifdef __mips__
-	__asm__ __volatile__("move\t%0,$31":"=r"(ret));
-#else
-	ret = __builtin_return_address(0);
-#endif
-	printk("scsi_free %p %d\n", obj, len);
-	SCSI_LOG_MLQUEUE(3, printk("SFree: %p %d\n", obj, len));
-#endif
-
-	spin_lock_irqsave(&allocator_request_lock, flags);
-
-	for (page = 0; page < dma_sectors / SECTORS_PER_PAGE; page++) {
-		unsigned long page_addr = (unsigned long) dma_malloc_pages[page];
-		if ((unsigned long) obj >= page_addr &&
-		    (unsigned long) obj < page_addr + PAGE_SIZE) {
-			sector = (((unsigned long) obj) - page_addr) >> 9;
-
-			nbits = len >> 9;
-			mask = (1 << nbits) - 1;
-
-			if (sector + nbits > SECTORS_PER_PAGE)
-				panic("scsi_free:Bad memory alignment");
-
-			if ((dma_malloc_freelist[page] &
-			     (mask << sector)) != (mask << sector)) {
-#ifdef DEBUG
-				printk("scsi_free(obj=%p, len=%d) called from %08lx\n",
-				       obj, len, ret);
-#endif
-				panic("scsi_free:Trying to free unused memory");
-			}
-			scsi_dma_free_sectors += nbits;
-			dma_malloc_freelist[page] &= ~(mask << sector);
-			spin_unlock_irqrestore(&allocator_request_lock, flags);
-			return 0;
-		}
-	}
-	panic("scsi_free:Bad offset");
-}
-
-
-/*
- * Function:    scsi_resize_dma_pool
- *
- * Purpose:     Ensure that the DMA pool is sufficiently large to be
- *              able to guarantee that we can always process I/O requests
- *              without calling the system allocator.
- *
- * Arguments:   None.
- *
- * Lock status: No locks assumed to be held.  This function is SMP-safe.
- *
- * Returns:     Nothing
- *
- * Notes:       Prior to the new queue code, this function was not SMP-safe.
- *              Go through the device list and recompute the most appropriate
- *              size for the dma pool.  Then grab more memory (as required).
- */
-void scsi_resize_dma_pool(void)
-{
-	int i, k;
-	unsigned long size;
-	unsigned long flags;
-	struct Scsi_Host *shpnt;
-	struct Scsi_Host *host = NULL;
-	Scsi_Device *SDpnt;
-	FreeSectorBitmap *new_dma_malloc_freelist = NULL;
-	unsigned int new_dma_sectors = 0;
-	unsigned int new_need_isa_buffer = 0;
-	unsigned char **new_dma_malloc_pages = NULL;
-	int out_of_space = 0;
-
-	spin_lock_irqsave(&allocator_request_lock, flags);
-
-	if (!scsi_hostlist) {
-		/*
-		 * Free up the DMA pool.
-		 */
-		if (scsi_dma_free_sectors != dma_sectors)
-			panic("SCSI DMA pool memory leak %d %d\n", scsi_dma_free_sectors, dma_sectors);
-
-		for (i = 0; i < dma_sectors / SECTORS_PER_PAGE; i++)
-			free_pages((unsigned long) dma_malloc_pages[i], 0);
-		if (dma_malloc_pages)
-			kfree((char *) dma_malloc_pages);
-		dma_malloc_pages = NULL;
-		if (dma_malloc_freelist)
-			kfree((char *) dma_malloc_freelist);
-		dma_malloc_freelist = NULL;
-		dma_sectors = 0;
-		scsi_dma_free_sectors = 0;
-		spin_unlock_irqrestore(&allocator_request_lock, flags);
-		return;
-	}
-	/* Next, check to see if we need to extend the DMA buffer pool */
-
-	new_dma_sectors = 2 * SECTORS_PER_PAGE;		/* Base value we use */
-
-	if (__pa(high_memory) - 1 > ISA_DMA_THRESHOLD)
-		need_isa_bounce_buffers = 1;
-	else
-		need_isa_bounce_buffers = 0;
-
-	if (scsi_devicelist)
-		for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
-			new_dma_sectors += SECTORS_PER_PAGE;	/* Increment for each host */
-
-	for (host = scsi_hostlist; host; host = host->next) {
-		for (SDpnt = host->host_queue; SDpnt; SDpnt = SDpnt->next) {
-			/*
-			 * sd and sr drivers allocate scatterlists.
-			 * sr drivers may allocate for each command 1x2048 or 2x1024 extra
-			 * buffers for 2k sector size and 1k fs.
-			 * sg driver allocates buffers < 4k.
-			 * st driver does not need buffers from the dma pool.
-			 * estimate 4k buffer/command for devices of unknown type (should panic).
-			 */
-			if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM ||
-			    SDpnt->type == TYPE_DISK || SDpnt->type == TYPE_MOD) {
-				int nents = host->sg_tablesize;
-#ifdef DMA_CHUNK_SIZE
-				/* If the architecture does DMA sg merging, make sure
-				   we count with at least 64 entries even for HBAs
-				   which handle very few sg entries.  */
-				if (nents < 64) nents = 64;
-#endif
-				new_dma_sectors += ((nents *
-				sizeof(struct scatterlist) + 511) >> 9) *
-				 SDpnt->queue_depth;
-				if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM)
-					new_dma_sectors += (2048 >> 9) * SDpnt->queue_depth;
-			} else if (SDpnt->type == TYPE_SCANNER ||
-				   SDpnt->type == TYPE_PROCESSOR ||
-				   SDpnt->type == TYPE_COMM ||
-				   SDpnt->type == TYPE_MEDIUM_CHANGER ||
-				   SDpnt->type == TYPE_ENCLOSURE) {
-				new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth;
-			} else {
-				if (SDpnt->type != TYPE_TAPE) {
-					printk("resize_dma_pool: unknown device type %d\n", SDpnt->type);
-					new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth;
-				}
-			}
-
-			if (host->unchecked_isa_dma &&
-			    need_isa_bounce_buffers &&
-			    SDpnt->type != TYPE_TAPE) {
-				new_dma_sectors += (PAGE_SIZE >> 9) * host->sg_tablesize *
-				    SDpnt->queue_depth;
-				new_need_isa_buffer++;
-			}
-		}
-	}
-
-#ifdef DEBUG_INIT
-	printk("resize_dma_pool: needed dma sectors = %d\n", new_dma_sectors);
-#endif
-
-	/* limit DMA memory to 32MB: */
-	new_dma_sectors = (new_dma_sectors + 15) & 0xfff0;
-
-	/*
-	 * We never shrink the buffers - this leads to
-	 * race conditions that I would rather not even think
-	 * about right now.
-	 */
-#if 0				/* Why do this? No gain and risks out_of_space */
-	if (new_dma_sectors < dma_sectors)
-		new_dma_sectors = dma_sectors;
-#endif
-	if (new_dma_sectors <= dma_sectors) {
-		spin_unlock_irqrestore(&allocator_request_lock, flags);
-		return;		/* best to quit while we are in front */
-        }
-
-	for (k = 0; k < 20; ++k) {	/* just in case */
-		out_of_space = 0;
-		size = (new_dma_sectors / SECTORS_PER_PAGE) *
-		    sizeof(FreeSectorBitmap);
-		new_dma_malloc_freelist = (FreeSectorBitmap *)
-		    kmalloc(size, GFP_ATOMIC);
-		if (new_dma_malloc_freelist) {
-                        memset(new_dma_malloc_freelist, 0, size);
-			size = (new_dma_sectors / SECTORS_PER_PAGE) *
-			    sizeof(*new_dma_malloc_pages);
-			new_dma_malloc_pages = (unsigned char **)
-			    kmalloc(size, GFP_ATOMIC);
-			if (!new_dma_malloc_pages) {
-				size = (new_dma_sectors / SECTORS_PER_PAGE) *
-				    sizeof(FreeSectorBitmap);
-				kfree((char *) new_dma_malloc_freelist);
-				out_of_space = 1;
-			} else {
-                                memset(new_dma_malloc_pages, 0, size);
-                        }
-		} else
-			out_of_space = 1;
-
-		if ((!out_of_space) && (new_dma_sectors > dma_sectors)) {
-			for (i = dma_sectors / SECTORS_PER_PAGE;
-			   i < new_dma_sectors / SECTORS_PER_PAGE; i++) {
-				new_dma_malloc_pages[i] = (unsigned char *)
-				    __get_free_pages(GFP_ATOMIC | GFP_DMA, 0);
-				if (!new_dma_malloc_pages[i])
-					break;
-			}
-			if (i != new_dma_sectors / SECTORS_PER_PAGE) {	/* clean up */
-				int k = i;
-
-				out_of_space = 1;
-				for (i = 0; i < k; ++i)
-					free_pages((unsigned long) new_dma_malloc_pages[i], 0);
-			}
-		}
-		if (out_of_space) {	/* try scaling down new_dma_sectors request */
-			printk("scsi::resize_dma_pool: WARNING, dma_sectors=%u, "
-			       "wanted=%u, scaling\n", dma_sectors, new_dma_sectors);
-			if (new_dma_sectors < (8 * SECTORS_PER_PAGE))
-				break;	/* pretty well hopeless ... */
-			new_dma_sectors = (new_dma_sectors * 3) / 4;
-			new_dma_sectors = (new_dma_sectors + 15) & 0xfff0;
-			if (new_dma_sectors <= dma_sectors)
-				break;	/* stick with what we have got */
-		} else
-			break;	/* found space ... */
-	}			/* end of for loop */
-	if (out_of_space) {
-		spin_unlock_irqrestore(&allocator_request_lock, flags);
-		scsi_need_isa_buffer = new_need_isa_buffer;	/* some useful info */
-		printk("      WARNING, not enough memory, pool not expanded\n");
-		return;
-	}
-	/* When we dick with the actual DMA list, we need to
-	 * protect things
-	 */
-	if (dma_malloc_freelist) {
-		size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(FreeSectorBitmap);
-		memcpy(new_dma_malloc_freelist, dma_malloc_freelist, size);
-		kfree((char *) dma_malloc_freelist);
-	}
-	dma_malloc_freelist = new_dma_malloc_freelist;
-
-	if (dma_malloc_pages) {
-		size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_pages);
-		memcpy(new_dma_malloc_pages, dma_malloc_pages, size);
-		kfree((char *) dma_malloc_pages);
-	}
-	scsi_dma_free_sectors += new_dma_sectors - dma_sectors;
-	dma_malloc_pages = new_dma_malloc_pages;
-	dma_sectors = new_dma_sectors;
-	scsi_need_isa_buffer = new_need_isa_buffer;
-
-	spin_unlock_irqrestore(&allocator_request_lock, flags);
-
-#ifdef DEBUG_INIT
-	printk("resize_dma_pool: dma free sectors   = %d\n", scsi_dma_free_sectors);
-	printk("resize_dma_pool: dma sectors        = %d\n", dma_sectors);
-	printk("resize_dma_pool: need isa buffers   = %d\n", scsi_need_isa_buffer);
-#endif
-}
-
-/*
- * Function:    scsi_init_minimal_dma_pool
- *
- * Purpose:     Allocate a minimal (1-page) DMA pool.
- *
- * Arguments:   None.
- *
- * Lock status: No locks assumed to be held.  This function is SMP-safe.
- *
- * Returns:     Nothing
- *
- * Notes:       
- */
-int scsi_init_minimal_dma_pool(void)
-{
-	unsigned long size;
-	unsigned long flags;
-	int has_space = 0;
-
-	spin_lock_irqsave(&allocator_request_lock, flags);
-
-	dma_sectors = PAGE_SIZE / SECTOR_SIZE;
-	scsi_dma_free_sectors = dma_sectors;
-	/*
-	 * Set up a minimal DMA buffer list - this will be used during scan_scsis
-	 * in some cases.
-	 */
-
-	/* One bit per sector to indicate free/busy */
-	size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(FreeSectorBitmap);
-	dma_malloc_freelist = (FreeSectorBitmap *)
-	    kmalloc(size, GFP_ATOMIC);
-	if (dma_malloc_freelist) {
-                memset(dma_malloc_freelist, 0, size);
-		/* One pointer per page for the page list */
-		dma_malloc_pages = (unsigned char **) kmalloc(
-                        (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_pages),
-							     GFP_ATOMIC);
-		if (dma_malloc_pages) {
-                        memset(dma_malloc_pages, 0, size);
-			dma_malloc_pages[0] = (unsigned char *)
-			    __get_free_pages(GFP_ATOMIC | GFP_DMA, 0);
-			if (dma_malloc_pages[0])
-				has_space = 1;
-		}
-	}
-	if (!has_space) {
-		if (dma_malloc_freelist) {
-			kfree((char *) dma_malloc_freelist);
-			if (dma_malloc_pages)
-				kfree((char *) dma_malloc_pages);
-		}
-		spin_unlock_irqrestore(&allocator_request_lock, flags);
-		printk("scsi::init_module: failed, out of memory\n");
-		return 1;
-	}
-
-	spin_unlock_irqrestore(&allocator_request_lock, flags);
-	return 0;
-}
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index dc4681c..f64d200 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -78,8 +78,7 @@
  * *(char *) ((int *) arg)[2] the actual command byte.   
  * 
  * Note that if more than MAX_BUF bytes are requested to be transferred,
- * the ioctl will fail with error EINVAL.  MAX_BUF can be increased in
- * the future by increasing the size that scsi_malloc will accept.
+ * the ioctl will fail with error EINVAL.
  * 
  * This size *does not* include the initial lengths that were passed.
  * 
@@ -197,10 +196,14 @@
 	unsigned int inlen, outlen, cmdlen;
 	unsigned int needed, buf_needed;
 	int timeout, retries, result;
-	int data_direction;
+	int data_direction, gfp_mask = GFP_KERNEL;
 
 	if (!sic)
 		return -EINVAL;
+
+	if (dev->host->unchecked_isa_dma)
+		gfp_mask |= GFP_DMA;
+
 	/*
 	 * Verify that we can read at least this much.
 	 */
@@ -232,7 +235,7 @@
 		buf_needed = (buf_needed + 511) & ~511;
 		if (buf_needed > MAX_BUF)
 			buf_needed = MAX_BUF;
-		buf = (char *) scsi_malloc(buf_needed);
+		buf = (char *) kmalloc(buf_needed, gfp_mask);
 		if (!buf)
 			return -ENOMEM;
 		memset(buf, 0, buf_needed);
@@ -341,7 +344,7 @@
 
 error:
 	if (buf)
-		scsi_free(buf, buf_needed);
+		kfree(buf);
 
 
 	return result;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d7cc000b..317f218 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -82,7 +82,7 @@
 	rq->special = data;
 	rq->q = NULL;
 	rq->bio = rq->biotail = NULL;
-	rq->nr_segments = 0;
+	rq->nr_phys_segments = 0;
 	rq->elevator_sequence = 0;
 
 	/*
@@ -461,13 +461,13 @@
 		if (bbpnt) {
 			for (i = 0; i < SCpnt->use_sg; i++) {
 				if (bbpnt[i])
-					scsi_free(sgpnt[i].address, sgpnt[i].length);
+					kfree(sgpnt[i].address);
 			}
 		}
-		scsi_free(SCpnt->request_buffer, SCpnt->sglist_len);
+		scsi_free_sgtable(SCpnt->request_buffer, SCpnt->sglist_len);
 	} else {
 		if (SCpnt->request_buffer != req->buffer)
-			scsi_free(SCpnt->request_buffer,SCpnt->request_bufflen);
+			kfree(SCpnt->request_buffer);
 	}
 
 	/*
@@ -541,11 +541,11 @@
 						       sgpnt[i].address,
 						       sgpnt[i].length);
 					}
-					scsi_free(sgpnt[i].address, sgpnt[i].length);
+					kfree(sgpnt[i].address);
 				}
 			}
 		}
-		scsi_free(SCpnt->buffer, SCpnt->sglist_len);
+		scsi_free_sgtable(SCpnt->buffer, SCpnt->sglist_len);
 	} else {
 		if (SCpnt->buffer != req->buffer) {
 			if (rq_data_dir(req) == READ) {
@@ -555,7 +555,7 @@
 				memcpy(to, SCpnt->buffer, SCpnt->bufflen);
 				bio_kunmap_irq(to, &flags);
 			}
-			scsi_free(SCpnt->buffer, SCpnt->bufflen);
+			kfree(SCpnt->buffer);
 		}
 	}
 
@@ -922,15 +922,6 @@
 			 */
 			if (req->special) {
 				SCpnt = (Scsi_Cmnd *) req->special;
-				/*
-				 * We need to recount the number of
-				 * scatter-gather segments here - the
-				 * normal case code assumes this to be
-				 * correct, as it would be a performance
-				 * loss to always recount.  Handling
-				 * errors is always unusual, of course.
-				 */
-				recount_segments(SCpnt);
 			} else {
 				SCpnt = scsi_allocate_device(SDpnt, FALSE, FALSE);
 			}
@@ -1003,7 +994,7 @@
 			 * required).  Hosts that need bounce buffers will also
 			 * get those allocated here.  
 			 */
-			if (!SDpnt->scsi_init_io_fn(SCpnt)) {
+			if (!scsi_init_io(SCpnt)) {
 				SCpnt = __scsi_end_request(SCpnt, 0, 
 							   SCpnt->request.nr_sectors, 0, 0);
 				if( SCpnt != NULL )
diff --git a/drivers/scsi/scsi_merge.c b/drivers/scsi/scsi_merge.c
index 89def7c..72ac525 100644
--- a/drivers/scsi/scsi_merge.c
+++ b/drivers/scsi/scsi_merge.c
@@ -11,26 +11,8 @@
 
 /*
  * This file contains queue management functions that are used by SCSI.
- * Typically this is used for several purposes.   First, we need to ensure
- * that commands do not grow so large that they cannot be handled all at
- * once by a host adapter.   The various flavors of merge functions included
- * here serve this purpose.
- *
- * Note that it would be quite trivial to allow the low-level driver the
- * flexibility to define it's own queue handling functions.  For the time
- * being, the hooks are not present.   Right now we are just using the
- * data in the host template as an indicator of how we should be handling
- * queues, and we select routines that are optimized for that purpose.
- *
- * Some hosts do not impose any restrictions on the size of a request.
- * In such cases none of the merge functions in this file are called,
- * and we allow ll_rw_blk to merge requests in the default manner.
- * This isn't guaranteed to be optimal, but it should be pretty darned
- * good.   If someone comes up with ideas of better ways of managing queues
- * to improve on the default behavior, then certainly fit it into this
- * scheme in whatever manner makes the most sense.   Please note that
- * since each device has it's own queue, we have considerable flexibility
- * in queue management.
+ * We need to ensure that commands do not grow so large that they cannot
+ * be handled all at once by a host adapter.
  */
 
 #define __NO_VERSION__
@@ -65,430 +47,28 @@
 #include <scsi/scsi_ioctl.h>
 
 /*
- * This means that bounce buffers cannot be allocated in chunks > PAGE_SIZE.
- * Ultimately we should get away from using a dedicated DMA bounce buffer
- * pool, and we should instead try and use kmalloc() instead.  If we can
- * eliminate this pool, then this restriction would no longer be needed.
- */
-#define DMA_SEGMENT_SIZE_LIMITED
-
-static void dma_exhausted(Scsi_Cmnd * SCpnt, int i)
-{
-	int jj;
-	struct scatterlist *sgpnt;
-	void **bbpnt;
-	int consumed = 0;
-
-	sgpnt = (struct scatterlist *) SCpnt->request_buffer;
-	bbpnt = SCpnt->bounce_buffers;
-
-	/*
-	 * Now print out a bunch of stats.  First, start with the request
-	 * size.
-	 */
-	printk("dma_free_sectors:%d\n", scsi_dma_free_sectors);
-	printk("use_sg:%d\ti:%d\n", SCpnt->use_sg, i);
-	printk("request_bufflen:%d\n", SCpnt->request_bufflen);
-	/*
-	 * Now dump the scatter-gather table, up to the point of failure.
-	 */
-	for(jj=0; jj < SCpnt->use_sg; jj++)
-	{
-		printk("[%d]\tlen:%d\taddr:%p\tbounce:%p\n",
-		       jj,
-		       sgpnt[jj].length,
-		       sgpnt[jj].address,
-		       (bbpnt ? bbpnt[jj] : NULL));
-		if (bbpnt && bbpnt[jj])
-			consumed += sgpnt[jj].length;
-	}
-	printk("Total %d sectors consumed\n", consumed);
-	panic("DMA pool exhausted");
-}
-
-/*
- * This entire source file deals with the new queueing code.
- */
-
-/*
- * Function:    __count_segments()
+ * Function:    scsi_init_io()
  *
- * Purpose:     Prototype for queue merge function.
- *
- * Arguments:   q       - Queue for which we are merging request.
- *              req     - request into which we wish to merge.
- *              dma_host - 1 if this host has ISA DMA issues (bus doesn't
- *                      expose all of the address lines, so that DMA cannot
- *                      be done from an arbitrary address).
- *		remainder - used to track the residual size of the last
- *			segment.  Comes in handy when we want to limit the 
- *			size of bounce buffer segments to PAGE_SIZE.
- *
- * Returns:     Count of the number of SG segments for the request.
- *
- * Lock status: 
- *
- * Notes:       This is only used for diagnostic purposes.
- */
-__inline static int __count_segments(struct request *req,
-				     int dma_host,
-				     int * remainder)
-{
-	int ret = 1;
-	int reqsize = 0;
-	int i;
-	struct bio *bio;
-	struct bio_vec *bvec;
-
-	if (remainder)
-		reqsize = *remainder;
-
-	/*
-	 * Add in the size increment for the first buffer.
-	 */
-	bio = req->bio;
-#ifdef DMA_SEGMENT_SIZE_LIMITED
-	if (reqsize + bio->bi_size > PAGE_SIZE)
-		ret++;
-#endif
-
-	rq_for_each_bio(bio, req) {
-		bio_for_each_segment(bvec, bio, i)
-			ret++;
-
-		reqsize += bio->bi_size;
-	}
-
-	if (remainder)
-		*remainder = reqsize;
-
-	return ret;
-}
-
-/*
- * Function:    recount_segments()
- *
- * Purpose:     Recount the number of scatter-gather segments for this request.
- *
- * Arguments:   req     - request that needs recounting.
- *
- * Returns:     Count of the number of SG segments for the request.
- *
- * Lock status: Irrelevant.
- *
- * Notes:	This is only used when we have partially completed requests
- *		and the bit that is leftover is of an indeterminate size.
- *		This can come up if you get a MEDIUM_ERROR, for example,
- *		as we will have "completed" all of the sectors up to and
- *		including the bad sector, and the leftover bit is what
- *		we have to do now.  This tends to be a rare occurrence, so
- *		we aren't busting our butts to instantiate separate versions
- *		of this function for the 4 different flag values.  We
- *		probably should, however.
- */
-void
-recount_segments(Scsi_Cmnd * SCpnt)
-{
-	struct request *req = &SCpnt->request;
-	struct Scsi_Host *SHpnt = SCpnt->host;
-
-	req->nr_segments = __count_segments(req, SHpnt->unchecked_isa_dma,NULL);
-}
-
-/*
- * IOMMU hackery for sparc64
- */
-#ifdef DMA_CHUNK_SIZE
-
-#define MERGEABLE_BUFFERS(X,Y) \
-	((((bvec_to_phys(__BVEC_END((X))) + __BVEC_END((X))->bv_len) | bio_to_phys((Y))) & (DMA_CHUNK_SIZE - 1)) == 0)
-
-static inline int scsi_new_mergeable(request_queue_t * q,
-				     struct request * req,
-				     struct bio *bio)
-{
-	int nr_segs = bio_hw_segments(q, bio);
-
-	/*
-	 * pci_map_sg will be able to merge these two
-	 * into a single hardware sg entry, check if
-	 * we'll have enough memory for the sg list.
-	 * scsi.c allocates for this purpose
-	 * min(64,sg_tablesize) entries.
-	 */
-	if (req->nr_segments + nr_segs > q->max_segments)
-		return 0;
-
-	req->nr_segments += nr_segs;
-	return 1;
-}
-
-static inline int scsi_new_segment(request_queue_t * q,
-				   struct request * req,
-				   struct bio *bio)
-{
-	int nr_segs = bio_hw_segments(q, bio);
-	/*
-	 * pci_map_sg won't be able to map these two
-	 * into a single hardware sg entry, so we have to
-	 * check if things fit into sg_tablesize.
-	 */
-	if (req->nr_hw_segments + nr_segs > q->max_segments)
-		return 0;
-	else if (req->nr_segments + nr_segs > q->max_segments)
-		return 0;
-
-	req->nr_hw_segments += nr_segs;
-	req->nr_segments += nr_segs;
-	return 1;
-}
-
-#else /* DMA_CHUNK_SIZE */
-
-static inline int scsi_new_segment(request_queue_t * q,
-				   struct request * req,
-				   struct bio *bio)
-{
-	int nr_segs = bio_hw_segments(q, bio);
-
-	if (req->nr_segments + nr_segs > q->max_segments) {
-		req->flags |= REQ_NOMERGE;
-		return 0;
-	}
-
-	/*
-	 * This will form the start of a new segment.  Bump the 
-	 * counter.
-	 */
-	req->nr_segments += nr_segs;
-	return 1;
-}
-#endif /* DMA_CHUNK_SIZE */
-
-/*
- * Function:    __scsi_merge_fn()
- *
- * Purpose:     Prototype for queue merge function.
- *
- * Arguments:   q       - Queue for which we are merging request.
- *              req     - request into which we wish to merge.
- *              bio     - Block which we may wish to merge into request
- *              dma_host - 1 if this host has ISA DMA issues (bus doesn't
- *                      expose all of the address lines, so that DMA cannot
- *                      be done from an arbitrary address).
- *
- * Returns:     1 if it is OK to merge the block into the request.  0
- *              if it is not OK.
- *
- * Lock status: queue lock is assumed to be held here.
- *
- * Notes:       Some drivers have limited scatter-gather table sizes, and
- *              thus they cannot queue an infinitely large command.  This
- *              function is called from ll_rw_blk before it attempts to merge
- *              a new block into a request to make sure that the request will
- *              not become too large.
- *
- *              This function is not designed to be directly called.  Instead
- *              it should be referenced from other functions where the
- *              dma_host parameter should be an integer constant. The
- *              compiler should thus be able to properly optimize the code,
- *              eliminating stuff that is irrelevant.
- *              It is more maintainable to do this way with a single function
- *              than to have 4 separate functions all doing roughly the
- *              same thing.
- */
-__inline static int __scsi_back_merge_fn(request_queue_t * q,
-					 struct request *req,
-					 struct bio *bio)
-{
-	if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
-		req->flags |= REQ_NOMERGE;
-		return 0;
-	}
-
-#ifdef DMA_CHUNK_SIZE
-	if (MERGEABLE_BUFFERS(req->biotail, bio))
-		return scsi_new_mergeable(q, req, bio);
-#endif
-
-	return scsi_new_segment(q, req, bio);
-}
-
-__inline static int __scsi_front_merge_fn(request_queue_t * q,
-					  struct request *req,
-					  struct bio *bio)
-{
-	if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
-		req->flags |= REQ_NOMERGE;
-		return 0;
-	}
-
-#ifdef DMA_CHUNK_SIZE
-	if (MERGEABLE_BUFFERS(bio, req->bio))
-		return scsi_new_mergeable(q, req, bio);
-#endif
-	return scsi_new_segment(q, req, bio);
-}
-
-/*
- * Function:    scsi_merge_fn_()
- *
- * Purpose:     queue merge function.
- *
- * Arguments:   q       - Queue for which we are merging request.
- *              req     - request into which we wish to merge.
- *              bio     - Block which we may wish to merge into request
- *
- * Returns:     1 if it is OK to merge the block into the request.  0
- *              if it is not OK.
- *
- * Lock status: queue lock is assumed to be held here.
- *
- * Notes:       Optimized for different cases depending upon whether
- *              ISA DMA is in use and whether clustering should be used.
- */
-#define MERGEFCT(_FUNCTION, _BACK_FRONT)				\
-static int _FUNCTION(request_queue_t * q,				\
-		     struct request * req,				\
-		     struct bio *bio)					\
-{									\
-    int ret;								\
-    ret =  __scsi_ ## _BACK_FRONT ## _merge_fn(q,			\
-					       req,			\
-					       bio);			\
-    return ret;								\
-}
-
-MERGEFCT(scsi_back_merge_fn, back)
-MERGEFCT(scsi_front_merge_fn, front)
-
-/*
- * Function:    scsi_merge_requests_fn_()
- *
- * Purpose:     queue merge function.
- *
- * Arguments:   q       - Queue for which we are merging request.
- *              req     - request into which we wish to merge.
- *              next    - Block which we may wish to merge into request
- *
- * Returns:     1 if it is OK to merge the block into the request.  0
- *              if it is not OK.
- *
- * Lock status: queue lock is assumed to be held here.
- *
- */
-inline static int scsi_merge_requests_fn(request_queue_t * q,
-					 struct request *req,
-					 struct request *next)
-{
-	int bio_segs;
-
-	/*
-	 * First check if the either of the requests are re-queued
-	 * requests.  Can't merge them if they are.
-	 */
-	if (req->special || next->special)
-		return 0;
-
-	/*
-	 * will become to large?
-	 */
-	if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
-		return 0;
-
-	bio_segs = req->nr_segments + next->nr_segments;
-	if (blk_contig_segment(q, req->biotail, next->bio))
-		bio_segs--;
-
-	/*
-	 * exceeds our max allowed segments?
-	 */
-	if (bio_segs > q->max_segments)
-		return 0;
-
-#ifdef DMA_CHUNK_SIZE
-	bio_segs = req->nr_hw_segments + next->nr_hw_segments;
-	if (blk_contig_segment(q, req->biotail, next->bio))
-		bio_segs--;
-
-	/* If dynamic DMA mapping can merge last segment in req with
-	 * first segment in next, then the check for hw segments was
-	 * done above already, so we can always merge.
-	 */
-	if (bio_segs > q->max_segments)
-		return 0;
-
-	req->nr_hw_segments = bio_segs;
-#endif
-
-	/*
-	 * This will form the start of a new segment.  Bump the 
-	 * counter.
-	 */
-	req->nr_segments = bio_segs;
-	return 1;
-}
-
-/*
- * Function:    __init_io()
- *
- * Purpose:     Prototype for io initialize function.
+ * Purpose:     SCSI I/O initialize function.
  *
  * Arguments:   SCpnt   - Command descriptor we wish to initialize
- *              sg_count_valid  - 1 if the sg count in the req is valid.
- *              dma_host - 1 if this host has ISA DMA issues (bus doesn't
- *                      expose all of the address lines, so that DMA cannot
- *                      be done from an arbitrary address).
  *
  * Returns:     1 on success.
  *
  * Lock status: 
- *
- * Notes:       Only the SCpnt argument should be a non-constant variable.
- *              This function is designed in such a way that it will be
- *              invoked from a series of small stubs, each of which would
- *              be optimized for specific circumstances.
- *
- *              The advantage of this is that hosts that don't do DMA
- *              get versions of the function that essentially don't have
- *              any of the DMA code.  Same goes for clustering - in the
- *              case of hosts with no need for clustering, there is no point
- *              in a whole bunch of overhead.
- *
- *              Finally, in the event that a host has set can_queue to SG_ALL
- *              implying that there is no limit to the length of a scatter
- *              gather list, the sg count in the request won't be valid
- *              (mainly because we don't need queue management functions
- *              which keep the tally uptodate.
  */
-__inline static int __init_io(Scsi_Cmnd * SCpnt, int dma_host)
+int scsi_init_io(Scsi_Cmnd *SCpnt)
 {
-	struct bio	   * bio;
-	char		   * buff;
-	int		     count;
-	int		     i;
-	struct request     * req;
-	int		     sectors;
-	struct scatterlist * sgpnt;
-	int		     this_count;
-	void		   ** bbpnt;
+	struct request     *req;
+	struct scatterlist *sgpnt;
+	int count, gfp_mask;
 
 	req = &SCpnt->request;
 
 	/*
 	 * First we need to know how many scatter gather segments are needed.
 	 */
-	count = req->nr_segments;
-
-	/*
-	 * If the dma pool is nearly empty, then queue a minimal request
-	 * with a single segment.  Typically this will satisfy a single
-	 * buffer.
-	 */
-	if (dma_host && scsi_dma_free_sectors <= 10) {
-		this_count = req->current_nr_sectors;
-		goto single_segment;
-	}
+	count = req->nr_phys_segments;
 
 	/*
 	 * we used to not use scatter-gather for single segment request,
@@ -497,50 +77,17 @@
 	 */
 	SCpnt->use_sg = count;
 
-	/* 
-	 * Allocate the actual scatter-gather table itself.
-	 */
-	SCpnt->sglist_len = (SCpnt->use_sg * sizeof(struct scatterlist));
+	gfp_mask = GFP_NOIO;
+	if (in_interrupt())
+		gfp_mask &= ~__GFP_WAIT;
 
-	/* If we could potentially require ISA bounce buffers, allocate
-	 * space for this array here.
-	 */
-	if (dma_host)
-		SCpnt->sglist_len += (SCpnt->use_sg * sizeof(void *));
+	sgpnt = scsi_alloc_sgtable(SCpnt, gfp_mask);
+	BUG_ON(!sgpnt);
 
-	/* scsi_malloc can only allocate in chunks of 512 bytes so
-	 * round it up.
-	 */
-	SCpnt->sglist_len = (SCpnt->sglist_len + 511) & ~511;
- 
-	sgpnt = (struct scatterlist *) scsi_malloc(SCpnt->sglist_len);
-
-	if (!sgpnt) {
-		struct Scsi_Host *SHpnt = SCpnt->host;
-
-		/*
-		 * If we cannot allocate the scatter-gather table, then
-		 * simply write the first buffer all by itself.
-		 */
-		printk("Warning - running *really* short on DMA buffers\n");
-		this_count = req->current_nr_sectors;
-		printk("SCSI: depth is %d, # segs %d, # hw segs %d\n", SHpnt->host_busy, req->nr_segments, req->nr_hw_segments);
-		goto single_segment;
-	}
-
-	memset(sgpnt, 0, SCpnt->sglist_len);
 	SCpnt->request_buffer = (char *) sgpnt;
 	SCpnt->request_bufflen = 0;
 	req->buffer = NULL;
 
-	if (dma_host)
-		bbpnt = (void **) ((char *)sgpnt +
-			 (SCpnt->use_sg * sizeof(struct scatterlist)));
-	else
-		bbpnt = NULL;
-
-	SCpnt->bounce_buffers = bbpnt;
-
 	/* 
 	 * Next, walk the list, and fill in the addresses and sizes of
 	 * each segment.
@@ -549,183 +96,22 @@
 	count = blk_rq_map_sg(req->q, req, SCpnt->request_buffer);
 
 	/*
-	 * Verify that the count is correct.
+	 * mapped well, send it off
 	 */
-	if (count > SCpnt->use_sg) {
-		printk("Incorrect number of segments after building list\n");
-		printk("counted %d, received %d\n", count, SCpnt->use_sg);
-		printk("req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors, req->current_nr_sectors);
-		scsi_free(SCpnt->request_buffer, SCpnt->sglist_len);
-		this_count = req->current_nr_sectors;
-		goto single_segment;
-	}
-
-	SCpnt->use_sg = count;
-
-	if (!dma_host)
+	if (count <= SCpnt->use_sg) {
+		SCpnt->use_sg = count;
 		return 1;
-
-	/*
-	 * Now allocate bounce buffers, if needed.
-	 */
-	SCpnt->request_bufflen = 0;
-	for (i = 0; i < count; i++) {
-		sectors = (sgpnt[i].length >> 9);
-		SCpnt->request_bufflen += sgpnt[i].length;
-		if (virt_to_phys(sgpnt[i].address) + sgpnt[i].length - 1 >
-                    ISA_DMA_THRESHOLD) {
-			if( scsi_dma_free_sectors - sectors <= 10  ) {
-				/*
-				 * If this would nearly drain the DMA
-				 * pool empty, then let's stop here.
-				 * Don't make this request any larger.
-				 * This is kind of a safety valve that
-				 * we use - we could get screwed later
-				 * on if we run out completely.  
-				 */
-				SCpnt->request_bufflen -= sgpnt[i].length;
-				SCpnt->use_sg = i;
-				if (i == 0) {
-					goto big_trouble;
-				}
-				break;
-			}
-
-			/*
-			 * this is not a dma host, so it will never
-			 * be a highmem page
-			 */
-			bbpnt[i] = page_address(sgpnt[i].page) +sgpnt[i].offset;
-			sgpnt[i].address = (char *)scsi_malloc(sgpnt[i].length);
-			/*
-			 * If we cannot allocate memory for this DMA bounce
-			 * buffer, then queue just what we have done so far.
-			 */
-			if (sgpnt[i].address == NULL) {
-				printk("Warning - running low on DMA memory\n");
-				SCpnt->request_bufflen -= sgpnt[i].length;
-				SCpnt->use_sg = i;
-				if (i == 0) {
-					goto big_trouble;
-				}
-				break;
-			}
-			if (rq_data_dir(req) == WRITE)
-				memcpy(sgpnt[i].address, bbpnt[i],
-				       sgpnt[i].length);
-		}
-	}
-	return 1;
-
-      big_trouble:
-	/*
-	 * We come here in the event that we get one humongous
-	 * request, where we need a bounce buffer, and the buffer is
-	 * more than we can allocate in a single call to
-	 * scsi_malloc().  In addition, we only come here when it is
-	 * the 0th element of the scatter-gather table that gets us
-	 * into this trouble.  As a fallback, we fall back to
-	 * non-scatter-gather, and ask for a single segment.  We make
-	 * a half-hearted attempt to pick a reasonably large request
-	 * size mainly so that we don't thrash the thing with
-	 * iddy-biddy requests.
-	 */
-
-	/*
-	 * The original number of sectors in the 0th element of the
-	 * scatter-gather table.  
-	 */
-	sectors = sgpnt[0].length >> 9;
-
-	/* 
-	 * Free up the original scatter-gather table.  Note that since
-	 * it was the 0th element that got us here, we don't have to
-	 * go in and free up memory from the other slots.  
-	 */
-	SCpnt->request_bufflen = 0;
-	SCpnt->use_sg = 0;
-	scsi_free(SCpnt->request_buffer, SCpnt->sglist_len);
-
-	/*
-	 * Make an attempt to pick up as much as we reasonably can.
-	 * Just keep adding sectors until the pool starts running kind of
-	 * low.  The limit of 30 is somewhat arbitrary - the point is that
-	 * it would kind of suck if we dropped down and limited ourselves to
-	 * single-block requests if we had hundreds of free sectors.
-	 */
-	if( scsi_dma_free_sectors > 30 ) {
-		for (this_count = 0, bio = req->bio; bio; bio = bio->bi_next) {
-			if( scsi_dma_free_sectors - this_count < 30 
-			    || this_count == sectors )
-			{
-				break;
-			}
-			this_count += bio_sectors(bio);
-		}
-
-	} else {
-		/*
-		 * Yow!   Take the absolute minimum here.
-		 */
-		this_count = req->current_nr_sectors;
 	}
 
-	/*
-	 * Now drop through into the single-segment case.
-	 */
-	
-      single_segment:
-	/*
-	 * Come here if for any reason we choose to do this as a single
-	 * segment.  Possibly the entire request, or possibly a small
-	 * chunk of the entire request.
-	 */
-
-	bio = req->bio;
-	buff = req->buffer = bio_data(bio);
-
-	if (dma_host || PageHighMem(bio_page(bio))) {
-		/*
-		 * Allocate a DMA bounce buffer.  If the allocation fails, fall
-		 * back and allocate a really small one - enough to satisfy
-		 * the first buffer.
-		 */
-		if (bio_to_phys(bio) + bio->bi_size - 1 > ISA_DMA_THRESHOLD) {
-			buff = (char *) scsi_malloc(this_count << 9);
-			if (!buff) {
-				printk("Warning - running low on DMA memory\n");
-				this_count = req->current_nr_sectors;
-				buff = (char *) scsi_malloc(this_count << 9);
-				if (!buff) {
-					dma_exhausted(SCpnt, 0);
-					return 0;
-				}
-			}
-			if (rq_data_dir(req) == WRITE) {
-				unsigned long flags;
-				char *buf = bio_kmap_irq(bio, &flags);
-				memcpy(buff, buf, this_count << 9);
-				bio_kunmap_irq(buf, &flags);
-			}
-		}
-	}
-	SCpnt->request_bufflen = this_count << 9;
-	SCpnt->request_buffer = buff;
-	SCpnt->use_sg = 0;
-	return 1;
+	printk("Incorrect number of segments after building list\n");
+	printk("counted %d, received %d\n", count, SCpnt->use_sg);
+	printk("req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors, req->current_nr_sectors);
+	BUG();
+	return 0; /* ahem */
 }
 
-#define INITIO(_FUNCTION, _DMA)			\
-static int _FUNCTION(Scsi_Cmnd * SCpnt)		\
-{						\
-    return __init_io(SCpnt, _DMA);		\
-}
-
-INITIO(scsi_init_io_v, 0)
-INITIO(scsi_init_io_vd, 1)
-
 /*
- * Function:    initialize_merge_fn()
+ * Function:    scsi_initialize_merge_fn()
  *
  * Purpose:     Initialize merge function for a host
  *
@@ -737,35 +123,15 @@
  *
  * Notes:
  */
-void initialize_merge_fn(Scsi_Device * SDpnt)
+void scsi_initialize_merge_fn(Scsi_Device * SDpnt)
 {
 	struct Scsi_Host *SHpnt = SDpnt->host;
 	request_queue_t *q = &SDpnt->request_queue;
 	dma64_addr_t bounce_limit;
 
 	/*
-	 * If this host has an unlimited tablesize, then don't bother with a
-	 * merge manager.  The whole point of the operation is to make sure
-	 * that requests don't grow too large, and this host isn't picky.
-	 *
-	 * Note that ll_rw_blk.c is effectively maintaining a segment
-	 * count which is only valid if clustering is used, and it obviously
-	 * doesn't handle the DMA case.   In the end, it
-	 * is simply easier to do it ourselves with our own functions
-	 * rather than rely upon the default behavior of ll_rw_blk.
-	 */
-	q->back_merge_fn = scsi_back_merge_fn;
-	q->front_merge_fn = scsi_front_merge_fn;
-	q->merge_requests_fn = scsi_merge_requests_fn;
-
-	if (SHpnt->unchecked_isa_dma == 0) {
-		SDpnt->scsi_init_io_fn = scsi_init_io_v;
-	} else {
-		SDpnt->scsi_init_io_fn = scsi_init_io_vd;
-	}
-
-	/*
-	 * now enable highmem I/O, if appropriate
+	 * The generic merging functions work just fine for us.
+	 * Enable highmem I/O, if appropriate.
 	 */
 	bounce_limit = BLK_BOUNCE_HIGH;
 	if (SHpnt->highmem_io && (SDpnt->type == TYPE_DISK)) {
@@ -777,6 +143,8 @@
 		else
 			bounce_limit = SHpnt->pci_dev->dma_mask;
 	}
+	if (SHpnt->unchecked_isa_dma)
+		bounce_limit = BLK_BOUNCE_ISA;
 
 	blk_queue_bounce_limit(q, bounce_limit);
 }
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index ad3e31a..9cd871b 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -320,7 +320,7 @@
 	SDpnt->host = shpnt;
 	SDpnt->online = TRUE;
 
-	initialize_merge_fn(SDpnt);
+	scsi_initialize_merge_fn(SDpnt);
 
         /*
          * Initialize the object that we will use to wait for command blocks.
@@ -390,8 +390,6 @@
 					}
 				}
 			}
-			scsi_resize_dma_pool();
-
 			for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
 				if (sdtpnt->finish && sdtpnt->nr_dev) {
 					(*sdtpnt->finish) ();
@@ -759,7 +757,7 @@
 	 */
 	scsi_initialize_queue(SDpnt, shpnt);
 	SDpnt->host = shpnt;
-	initialize_merge_fn(SDpnt);
+	scsi_initialize_merge_fn(SDpnt);
 
 	/*
 	 * Mark this device as online, or otherwise we won't be able to do much with it.
diff --git a/drivers/scsi/scsi_syms.c b/drivers/scsi/scsi_syms.c
index 7fb2469..dbe14ba 100644
--- a/drivers/scsi/scsi_syms.c
+++ b/drivers/scsi/scsi_syms.c
@@ -33,8 +33,6 @@
  */
 EXPORT_SYMBOL(scsi_register_module);
 EXPORT_SYMBOL(scsi_unregister_module);
-EXPORT_SYMBOL(scsi_free);
-EXPORT_SYMBOL(scsi_malloc);
 EXPORT_SYMBOL(scsi_register);
 EXPORT_SYMBOL(scsi_unregister);
 EXPORT_SYMBOL(scsicam_bios_param);
@@ -48,9 +46,7 @@
 EXPORT_SYMBOL(print_req_sense);
 EXPORT_SYMBOL(print_msg);
 EXPORT_SYMBOL(print_status);
-EXPORT_SYMBOL(scsi_dma_free_sectors);
 EXPORT_SYMBOL(kernel_scsi_ioctl);
-EXPORT_SYMBOL(scsi_need_isa_buffer);
 EXPORT_SYMBOL(scsi_release_command);
 EXPORT_SYMBOL(print_Scsi_Cmnd);
 EXPORT_SYMBOL(scsi_block_when_processing_errors);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index eb93a83..7bada9d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -765,7 +765,7 @@
 		return i;
 	}
 
-	buffer = (unsigned char *) scsi_malloc(512);
+	buffer = kmalloc(512, GFP_DMA);
 	if (!buffer) {
 		printk(KERN_WARNING "(sd_init_onedisk:) Memory allocation failure.\n");
 		scsi_release_request(SRpnt);
@@ -1042,7 +1042,7 @@
 	scsi_release_request(SRpnt);
 	SRpnt = NULL;
 
-	scsi_free(buffer, 512);
+	kfree(buffer);
 	return i;
 }
 
@@ -1111,7 +1111,7 @@
 		 * commands if they know what they're doing and they ask for it
 		 * explicitly via the SHpnt->max_sectors API.
 		 */
-		sd_max_sectors[i] = MAX_SEGMENTS*8;
+		sd_max_sectors[i] = MAX_PHYS_SEGMENTS*8;
 	}
 
 	for (i = 0; i < N_USED_SD_MAJORS; i++) {
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 8c63763..44a5075b 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2280,9 +2280,8 @@
             rqSz = num_sect * SG_SECTOR_SZ;
         }
         while (num_sect > 0) {
-            if ((num_sect <= sg_pool_secs_avail) &&
-                (scsi_dma_free_sectors > (SG_LOW_POOL_THRESHHOLD + num_sect))) {
-                resp = scsi_malloc(rqSz);
+            if ((num_sect <= sg_pool_secs_avail)) {
+                resp = kmalloc(rqSz, page_mask);
                 if (resp) {
                     if (retSzp) *retSzp = rqSz;
                     sg_pool_secs_avail -= num_sect;
@@ -2374,7 +2373,7 @@
 	{
 	    int num_sect = size / SG_SECTOR_SZ;
 
-	    scsi_free(buff, size);
+	    kfree(buff);
 	    sg_pool_secs_avail += num_sect;
 	}
 	break;
@@ -2681,9 +2680,8 @@
     max_dev = sg_last_dev();
     PRINT_PROC("dev_max(currently)=%d max_active_device=%d (origin 1)\n",
 	       sg_template.dev_max, max_dev);
-    PRINT_PROC(" scsi_dma_free_sectors=%u sg_pool_secs_aval=%d "
-	       "def_reserved_size=%d\n",
-	       scsi_dma_free_sectors, sg_pool_secs_avail, sg_big_buff);
+    PRINT_PROC(" sg_pool_secs_aval=%d def_reserved_size=%d\n",
+	       sg_pool_secs_avail, sg_big_buff);
     for (j = 0; j < max_dev; ++j) {
 	if ((sdp = sg_get_dev(j))) {
 	    Sg_fd * fp;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 1d1c271..530f893 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -258,112 +258,6 @@
 	return &scsi_CDs[MINOR(dev)].device->request_queue;
 }
 
-static int sr_scatter_pad(Scsi_Cmnd *SCpnt, int s_size)
-{
-	struct scatterlist *sg, *old_sg = NULL;
-	int i, fsize, bsize, sg_ent, sg_count;
-	char *front, *back;
-	void **bbpnt, **old_bbpnt = NULL;
-
-	back = front = NULL;
-	sg_ent = SCpnt->use_sg;
-	bsize = 0; /* gcc... */
-
-	/*
-	 * need front pad
-	 */
-	if ((fsize = SCpnt->request.sector % (s_size >> 9))) {
-		fsize <<= 9;
-		sg_ent++;
-		if ((front = scsi_malloc(fsize)) == NULL)
-			goto no_mem;
-	}
-	/*
-	 * need a back pad too
-	 */
-	if ((bsize = s_size - ((SCpnt->request_bufflen + fsize) % s_size))) {
-		sg_ent++;
-		if ((back = scsi_malloc(bsize)) == NULL)
-			goto no_mem;
-	}
-
-	/*
-	 * extend or allocate new scatter-gather table
-	 */
-	sg_count = SCpnt->use_sg;
-	if (sg_count) {
-		old_sg = (struct scatterlist *) SCpnt->request_buffer;
-		old_bbpnt = SCpnt->bounce_buffers;
-	} else {
-		sg_count = 1;
-		sg_ent++;
-	}
-
-	/* Get space for scatterlist and bounce buffer array. */
-	i  = sg_ent * sizeof(struct scatterlist);
-	i += sg_ent * sizeof(void *);
-	i  = (i + 511) & ~511;
-
-	if ((sg = scsi_malloc(i)) == NULL)
-		goto no_mem;
-
-	bbpnt = (void **)
-		((char *)sg + (sg_ent * sizeof(struct scatterlist)));
-
-	/*
-	 * no more failing memory allocs possible, we can safely assign
-	 * SCpnt values now
-	 */
-	SCpnt->sglist_len = i;
-	SCpnt->use_sg = sg_count;
-	memset(sg, 0, SCpnt->sglist_len);
-
-	i = 0;
-	if (fsize) {
-		sg[0].address = bbpnt[0] = front;
-		sg[0].length = fsize;
-		i++;
-	}
-	if (old_sg) {
-		memcpy(sg + i, old_sg, SCpnt->use_sg * sizeof(struct scatterlist));
-		if (old_bbpnt)
-			memcpy(bbpnt + i, old_bbpnt, SCpnt->use_sg * sizeof(void *));
-		scsi_free(old_sg, (((SCpnt->use_sg * sizeof(struct scatterlist)) +
-				    (SCpnt->use_sg * sizeof(void *))) + 511) & ~511);
-	} else {
-		sg[i].address = NULL;
-		sg[i].page = virt_to_page(SCpnt->request_buffer);
-		sg[i].offset = (unsigned long) SCpnt->request_buffer&~PAGE_MASK;
-		sg[i].length = SCpnt->request_bufflen;
-	}
-
-	SCpnt->request_bufflen += (fsize + bsize);
-	SCpnt->request_buffer = sg;
-	SCpnt->bounce_buffers = bbpnt;
-	SCpnt->use_sg += i;
-
-	if (bsize) {
-		sg[SCpnt->use_sg].address = NULL;
-		sg[SCpnt->use_sg].page = virt_to_page(back);
-		sg[SCpnt->use_sg].offset = (unsigned long) back & ~PAGE_MASK;
-		bbpnt[SCpnt->use_sg] = back;
-		sg[SCpnt->use_sg].length = bsize;
-		SCpnt->use_sg++;
-	}
-
-	return 0;
-
-no_mem:
-	printk("sr: ran out of mem for scatter pad\n");
-	if (front)
-		scsi_free(front, fsize);
-	if (back)
-		scsi_free(back, bsize);
-
-	return 1;
-}
-
-
 static int sr_init_command(Scsi_Cmnd * SCpnt)
 {
 	int dev, devm, block=0, this_count, s_size;
@@ -429,9 +323,10 @@
 	/*
 	 * request doesn't start on hw block boundary, add scatter pads
 	 */
-	if ((SCpnt->request.sector % (s_size >> 9)) || (SCpnt->request_bufflen % s_size))
-		if (sr_scatter_pad(SCpnt, s_size))
-			return 0;
+	if ((SCpnt->request.sector % (s_size >> 9)) || (SCpnt->request_bufflen % s_size)) {
+		printk("sr: unaligned transfer\n");
+		return 0;
+	}
 
 	this_count = (SCpnt->request_bufflen >> 9) / (s_size >> 9);
 
@@ -583,7 +478,7 @@
 	int sector_size;
 	Scsi_Request *SRpnt;
 
-	buffer = (unsigned char *) scsi_malloc(512);
+	buffer = (unsigned char *) kmalloc(512, GFP_DMA);
 	SRpnt = scsi_allocate_request(scsi_CDs[i].device);
 	
 	if(buffer == NULL || SRpnt == NULL)
@@ -592,7 +487,7 @@
 		sector_size = 2048;	/* A guess, just in case */
 		scsi_CDs[i].needs_sector_size = 1;
 		if(buffer)
-			scsi_free(buffer, 512);
+			kfree(buffer);
 		if(SRpnt)
 			scsi_release_request(SRpnt);
 		return;
@@ -673,7 +568,7 @@
 		sr_sizes[i] = scsi_CDs[i].capacity >> (BLOCK_SIZE_BITS - 9);
 	};
 	blk_queue_hardsect_size(blk_get_queue(MAJOR_NR), sector_size);
-	scsi_free(buffer, 512);
+	kfree(buffer);
 }
 
 void get_capabilities(int i)
@@ -694,7 +589,7 @@
 		""
 	};
 
-	buffer = (unsigned char *) scsi_malloc(512);
+	buffer = (unsigned char *) kmalloc(512, GFP_DMA);
 	if (!buffer)
 	{
 		printk(KERN_ERR "sr: out of memory.\n");
@@ -714,7 +609,7 @@
 		scsi_CDs[i].cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
 					 CDC_DVD | CDC_DVD_RAM |
 					 CDC_SELECT_DISC | CDC_SELECT_SPEED);
-		scsi_free(buffer, 512);
+		kfree(buffer);
 		printk("sr%i: scsi-1 drive\n", i);
 		return;
 	}
@@ -767,7 +662,7 @@
 	/*else    I don't think it can close its tray
 	   scsi_CDs[i].cdi.mask |= CDC_CLOSE_TRAY; */
 
-	scsi_free(buffer, 512);
+	kfree(buffer);
 }
 
 /*
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 3c3a53a..da3ec60 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -95,7 +95,7 @@
 	SRpnt->sr_request.buffer = buffer;
 	if (buffer && SRpnt->sr_host->unchecked_isa_dma &&
 	    (virt_to_phys(buffer) + buflength - 1 > ISA_DMA_THRESHOLD)) {
-		bounce_buffer = (char *) scsi_malloc((buflength + 511) & ~511);
+		bounce_buffer = (char *) kmalloc(buflength, GFP_DMA);
 		if (bounce_buffer == NULL) {
 			printk("SCSI DMA pool exhausted.");
 			return -ENOMEM;
@@ -114,7 +114,7 @@
 	req = &SRpnt->sr_request;
 	if (SRpnt->sr_buffer && req->buffer && SRpnt->sr_buffer != req->buffer) {
 		memcpy(req->buffer, SRpnt->sr_buffer, SRpnt->sr_bufflen);
-		scsi_free(SRpnt->sr_buffer, (SRpnt->sr_bufflen + 511) & ~511);
+		kfree(SRpnt->sr_buffer);
 		SRpnt->sr_buffer = req->buffer;
         }
 
@@ -519,7 +519,7 @@
 	if (!xa_test)
 		return 0;
 
-	raw_sector = (unsigned char *) scsi_malloc(2048 + 512);
+	raw_sector = (unsigned char *) kmalloc(2048, GFP_DMA | GFP_KERNEL);
 	if (!raw_sector)
 		return -ENOMEM;
 	if (0 == sr_read_sector(minor, scsi_CDs[minor].ms_offset + 16,
@@ -529,7 +529,7 @@
 		/* read a raw sector failed for some reason. */
 		is_xa = -1;
 	}
-	scsi_free(raw_sector, 2048 + 512);
+	kfree(raw_sector);
 #ifdef DEBUG
 	printk("sr%d: sr_is_xa: %d\n", minor, is_xa);
 #endif
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index 39bd3b6..a1d4a7d 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -115,7 +115,7 @@
 		density = (blocklength > 2048) ? 0x81 : 0x83;
 #endif
 
-	buffer = (unsigned char *) scsi_malloc(512);
+	buffer = (unsigned char *) kmalloc(512, GFP_KERNEL | GFP_DMA);
 	if (!buffer)
 		return -ENOMEM;
 
@@ -142,7 +142,7 @@
 		printk("sr%d: switching blocklength to %d bytes failed\n",
 		       minor, blocklength);
 #endif
-	scsi_free(buffer, 512);
+	kfree(buffer);
 	return rc;
 }
 
@@ -162,7 +162,7 @@
 	if (scsi_CDs[minor].cdi.mask & CDC_MULTI_SESSION)
 		return 0;
 
-	buffer = (unsigned char *) scsi_malloc(512);
+	buffer = (unsigned char *) kmalloc(512, GFP_KERNEL | GFP_DMA);
 	if (!buffer)
 		return -ENOMEM;
 
@@ -306,6 +306,6 @@
 		printk(KERN_DEBUG "sr%d: multisession offset=%lu\n",
 		       minor, sector);
 #endif
-	scsi_free(buffer, 512);
+	kfree(buffer);
 	return rc;
 }
diff --git a/drivers/scsi/sym53c8xx.c b/drivers/scsi/sym53c8xx.c
index 70d8a00..bc030dc 100644
--- a/drivers/scsi/sym53c8xx.c
+++ b/drivers/scsi/sym53c8xx.c
@@ -1,7 +1,7 @@
 /******************************************************************************
 **  High Performance device driver for the Symbios 53C896 controller.
 **
-**  Copyright (C) 1998-2000  Gerard Roudier <groudier@club-internet.fr>
+**  Copyright (C) 1998-2001  Gerard Roudier <groudier@free.fr>
 **
 **  This driver also supports all the Symbios 53C8XX controller family, 
 **  except 53C810 revisions < 16, 53C825 revisions < 16 and all 
@@ -32,7 +32,7 @@
 **  The Linux port of the FreeBSD ncr driver has been achieved in 
 **  november 1995 by:
 **
-**          Gerard Roudier              <groudier@club-internet.fr>
+**          Gerard Roudier              <groudier@free.fr>
 **
 **  Being given that this driver originates from the FreeBSD version, and
 **  in order to keep synergy on both, any suggested enhancements and corrections
@@ -12126,13 +12126,16 @@
 
 	if (!use_sg)
 		segn = ncr_scatter_no_sglist(np, cp, cmd);
-	else if (use_sg > MAX_SCATTER)
-		segn = -1;
 	else {
 		struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
 		struct scr_tblmove *data;
 
 		use_sg = map_scsi_sg_data(np, cmd);
+		if (use_sg > MAX_SCATTER) {
+			unmap_scsi_data(np, cmd);
+			return -1;
+		}
+
 		data = &cp->phys.data[MAX_SCATTER - use_sg];
 
 		for (segn = 0; segn < use_sg; segn++) {
@@ -12165,13 +12168,15 @@
 
 	if (!use_sg)
 		segment = ncr_scatter_no_sglist(np, cp, cmd);
-	else if (use_sg > MAX_SCATTER)
-		segment = -1;
 	else {
 		struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
 		struct scr_tblmove *data;
 
 		use_sg = map_scsi_sg_data(np, cmd);
+		if (use_sg > MAX_SCATTER) {
+			unmap_scsi_data(np, cmd);
+			return -1;
+		}
 		data = &cp->phys.data[MAX_SCATTER - use_sg];
 
 		for (segment = 0; segment < use_sg; segment++) {
diff --git a/drivers/scsi/sym53c8xx.h b/drivers/scsi/sym53c8xx.h
index 780a8df..256d34b 100644
--- a/drivers/scsi/sym53c8xx.h
+++ b/drivers/scsi/sym53c8xx.h
@@ -1,7 +1,7 @@
 /******************************************************************************
 **  High Performance device driver for the Symbios 53C896 controller.
 **
-**  Copyright (C) 1998-2000  Gerard Roudier <groudier@club-internet.fr>
+**  Copyright (C) 1998-2001  Gerard Roudier <groudier@free.fr>
 **
 **  This driver also supports all the Symbios 53C8XX controller family, 
 **  except 53C810 revisions < 16, 53C825 revisions < 16 and all 
@@ -32,7 +32,7 @@
 **  The Linux port of the FreeBSD ncr driver has been achieved in 
 **  november 1995 by:
 **
-**          Gerard Roudier              <groudier@club-internet.fr>
+**          Gerard Roudier              <groudier@free.fr>
 **
 **  Being given that this driver originates from the FreeBSD version, and
 **  in order to keep synergy on both, any suggested enhancements and corrections
@@ -96,7 +96,7 @@
 			this_id:        7,			\
 			sg_tablesize:   SCSI_NCR_SG_TABLESIZE,	\
 			cmd_per_lun:    SCSI_NCR_CMD_PER_LUN,	\
-			max_sectors:	MAX_SEGMENTS*8,		\
+			max_sectors:	MAX_HW_SEGMENTS*8,	\
 			use_clustering: DISABLE_CLUSTERING,	\
 			highmem_io:	1} 
 
diff --git a/drivers/scsi/sym53c8xx_2/ChangeLog.txt b/drivers/scsi/sym53c8xx_2/ChangeLog.txt
index c020492..6a4a3f8 100644
--- a/drivers/scsi/sym53c8xx_2/ChangeLog.txt
+++ b/drivers/scsi/sym53c8xx_2/ChangeLog.txt
@@ -128,3 +128,21 @@
 	* version sym-2.1.16-20011028
 	- Slightly simplify driver configuration.
 	- Prepare a new patch against linux-2.4.13.
+
+Sat Nov 17 10:00 2001 Gerard Roudier 
+	* version sym-2.1.17
+	- Fix a couple of gcc/gcc3 warnings.
+	- Allocate separately from the HCB the array for CCBs hashed by DSA.
+	  All driver memory allocations are now not greater than 1 PAGE 
+	  even on PPC64 / 4KB PAGE surprising setup.
+
+Sat Dec 01 18:00 2001 Gerard Roudier 
+	* version sym-2.1.17a
+	- Use u_long instead of U32 for the IO base cookie. This is more 
+	  consistent with what archs are expecting.
+	- Use MMIO per default for Power PC instead of some fake normal IO,
+	  as Paul Mackerras stated that MMIO works fine now on this arch.
+
+
+
+
diff --git a/drivers/scsi/sym53c8xx_2/sym53c8xx.h b/drivers/scsi/sym53c8xx_2/sym53c8xx.h
index 9c7ef02..0f6114b 100644
--- a/drivers/scsi/sym53c8xx_2/sym53c8xx.h
+++ b/drivers/scsi/sym53c8xx_2/sym53c8xx.h
@@ -130,17 +130,17 @@
 #if !defined(HOSTS_C)
 
 /*
- *  Use normal IO if configured. Forced for alpha and powerpc.
- *  Powerpc fails copying to on-chip RAM using memcpy_toio().
+ *  Use normal IO if configured.
+ *  Normal IO forced for alpha.
  *  Forced to MMIO for sparc.
  */
 #if defined(__alpha__)
 #define	SYM_CONF_IOMAPPED
-#elif defined(__powerpc__)
-#define	SYM_CONF_IOMAPPED
-#define SYM_OPT_NO_BUS_MEMORY_MAPPING
 #elif defined(__sparc__)
 #undef SYM_CONF_IOMAPPED
+/* #elif defined(__powerpc__) */
+/* #define	SYM_CONF_IOMAPPED */
+/* #define SYM_OPT_NO_BUS_MEMORY_MAPPING */
 #elif defined(CONFIG_SCSI_SYM53C8XX_IOMAPPED)
 #define	SYM_CONF_IOMAPPED
 #endif
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 9c9c3bc..d8c43a9 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -647,12 +647,15 @@
 
 	if (!use_sg)
 		segment = sym_scatter_no_sglist(np, cp, cmd);
-	else if (use_sg > SYM_CONF_MAX_SG)
-		segment = -1;
 	else if ((use_sg = map_scsi_sg_data(np, cmd)) > 0) {
 		struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
 		struct sym_tblmove *data;
 
+		if (use_sg > SYM_CONF_MAX_SG) {
+			unmap_scsi_data(np, cmd);
+			return -1;
+		}
+
 		data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg];
 
 		for (segment = 0; segment < use_sg; segment++) {
@@ -2452,8 +2455,8 @@
 	u_char pci_fix_up = SYM_SETUP_PCI_FIX_UP;
 	u_char revision;
 	u_int irq;
-	u_long base, base_2, io_port; 
-	u_long base_c, base_2_c; 
+	u_long base, base_2, base_io; 
+	u_long base_c, base_2_c, io_port; 
 	int i;
 	sym_chip *chip;
 
@@ -2470,7 +2473,7 @@
 	device_id = PciDeviceId(pdev);
 	irq	  = PciIrqLine(pdev);
 
-	i = pci_get_base_address(pdev, 0, &io_port);
+	i = pci_get_base_address(pdev, 0, &base_io);
 	io_port = pci_get_base_cookie(pdev, 0);
 
 	base_c = pci_get_base_cookie(pdev, i);
@@ -2488,9 +2491,9 @@
 	/*
 	 *  If user excluded this chip, donnot initialize it.
 	 */
-	if (io_port) {
+	if (base_io) {
 		for (i = 0 ; i < 8 ; i++) {
-			if (sym_driver_setup.excludes[i] == io_port)
+			if (sym_driver_setup.excludes[i] == base_io)
 				return -1;
 		}
 	}
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h
index c89832f..4db72ce 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.h
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.h
@@ -77,7 +77,6 @@
 #include <linux/errno.h>
 #include <linux/pci.h>
 #include <linux/string.h>
-#include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/ioport.h>
 #include <linux/time.h>
@@ -463,7 +462,7 @@
 
 	vm_offset_t	mmio_va;	/* MMIO kernel virtual address	*/
 	vm_offset_t	ram_va;		/* RAM  kernel virtual address	*/
-	u32		io_port;	/* IO port address		*/
+	u_long		io_port;	/* IO port address cookie	*/
 	u_short		io_ws;		/* IO window size		*/
 	int		irq;		/* IRQ number			*/
 
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index ce06b72..3ed0e9e 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -4689,8 +4689,9 @@
 	return;
 out_clrack:
 	OUTL_DSP (SCRIPTA_BA (np, clrack));
+	return;
 out_stuck:
-	;
+	return;
 }
 
 /*
@@ -5223,8 +5224,10 @@
 	 *  And accept tagged commands now.
 	 */
 	lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl));
+
+	return;
 fail:
-	;
+	return;
 }
 
 /*
@@ -5787,6 +5790,13 @@
 		goto attach_failed;
 
 	/*
+	 *  Allocate the array of lists of CCBs hashed by DSA.
+	 */
+	np->ccbh = sym_calloc(sizeof(ccb_p *)*CCB_HASH_SIZE, "CCBH");
+	if (!np->ccbh)
+		goto attach_failed;
+
+	/*
 	 *  Initialyze the CCB free and busy queues.
 	 */
 	sym_que_init(&np->free_ccbq);
@@ -5977,6 +5987,8 @@
 			sym_mfree_dma(cp, sizeof(*cp), "CCB");
 		}
 	}
+	if (np->ccbh)
+		sym_mfree(np->ccbh, sizeof(ccb_p *)*CCB_HASH_SIZE, "CCBH");
 
 	if (np->badluntbl)
 		sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL");
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h
index 62530d4..cd8d791 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.h
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h
@@ -1068,7 +1068,8 @@
 	/*
 	 *  CCB lists and queue.
 	 */
-	ccb_p ccbh[CCB_HASH_SIZE];	/* CCB hashed by DSA value	*/
+	ccb_p *ccbh;			/* CCBs hashed by DSA value	*/
+					/* CCB_HASH_SIZE lists of CCBs	*/
 	SYM_QUEHEAD	free_ccbq;	/* Queue of available CCBs	*/
 	SYM_QUEHEAD	busy_ccbq;	/* Queue of busy CCBs		*/
 
diff --git a/drivers/scsi/sym53c8xx_comm.h b/drivers/scsi/sym53c8xx_comm.h
index 57934ac..ec4f1cc 100644
--- a/drivers/scsi/sym53c8xx_comm.h
+++ b/drivers/scsi/sym53c8xx_comm.h
@@ -1,7 +1,7 @@
 /******************************************************************************
 **  High Performance device driver for the Symbios 53C896 controller.
 **
-**  Copyright (C) 1998-2000  Gerard Roudier <groudier@club-internet.fr>
+**  Copyright (C) 1998-2001  Gerard Roudier <groudier@free.fr>
 **
 **  This driver also supports all the Symbios 53C8XX controller family, 
 **  except 53C810 revisions < 16, 53C825 revisions < 16 and all 
@@ -32,7 +32,7 @@
 **  The Linux port of the FreeBSD ncr driver has been achieved in 
 **  november 1995 by:
 **
-**          Gerard Roudier              <groudier@club-internet.fr>
+**          Gerard Roudier              <groudier@free.fr>
 **
 **  Being given that this driver originates from the FreeBSD version, and
 **  in order to keep synergy on both, any suggested enhancements and corrections
diff --git a/drivers/scsi/sym53c8xx_defs.h b/drivers/scsi/sym53c8xx_defs.h
index 52bd0ea..82f3f11 100644
--- a/drivers/scsi/sym53c8xx_defs.h
+++ b/drivers/scsi/sym53c8xx_defs.h
@@ -1,7 +1,7 @@
 /******************************************************************************
 **  High Performance device driver for the Symbios 53C896 controller.
 **
-**  Copyright (C) 1998-2000  Gerard Roudier <groudier@club-internet.fr>
+**  Copyright (C) 1998-2001  Gerard Roudier <groudier@free.fr>
 **
 **  This driver also supports all the Symbios 53C8XX controller family, 
 **  except 53C810 revisions < 16, 53C825 revisions < 16 and all 
@@ -32,7 +32,7 @@
 **  The Linux port of the FreeBSD ncr driver has been achieved in 
 **  november 1995 by:
 **
-**          Gerard Roudier              <groudier@club-internet.fr>
+**          Gerard Roudier              <groudier@free.fr>
 **
 **  Being given that this driver originates from the FreeBSD version, and
 **  in order to keep synergy on both, any suggested enhancements and corrections
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index 30c400e..720b707 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -66,7 +66,7 @@
 
 /* Inode stuff */
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
-int adfs_get_block(struct inode *inode, long block,
+int adfs_get_block(struct inode *inode, sector_t block,
 		   struct buffer_head *bh, int create);
 #else
 int adfs_bmap(struct inode *inode, int block);
diff --git a/fs/adfs/dir_f.c b/fs/adfs/dir_f.c
index ef7715c..66a0c36 100644
--- a/fs/adfs/dir_f.c
+++ b/fs/adfs/dir_f.c
@@ -193,7 +193,7 @@
 			goto release_buffers;
 		}
 
-		dir->bh[blk] = bread(sb->s_dev, phys, sb->s_blocksize);
+		dir->bh[blk] = sb_bread(sb, phys);
 		if (!dir->bh[blk])
 			goto release_buffers;
 	}
diff --git a/fs/adfs/dir_fplus.c b/fs/adfs/dir_fplus.c
index 329bbd5..71064bc 100644
--- a/fs/adfs/dir_fplus.c
+++ b/fs/adfs/dir_fplus.c
@@ -35,7 +35,7 @@
 		goto out;
 	}
 
-	dir->bh[0] = bread(sb->s_dev, block, sb->s_blocksize);
+	dir->bh[0] = sb_bread(sb, block);
 	if (!dir->bh[0])
 		goto out;
 	dir->nr_buffers += 1;
@@ -60,7 +60,7 @@
 			goto out;
 		}
 
-		dir->bh[blk] = bread(sb->s_dev, block, sb->s_blocksize);
+		dir->bh[blk] = sb_bread(sb, block);
 		if (!dir->bh[blk])
 			goto out;
 		dir->nr_buffers = blk;
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 22d9bfd..9e402bc 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -27,7 +27,7 @@
  * not support creation of new blocks, so we return -EIO for this case.
  */
 int
-adfs_get_block(struct inode *inode, long block, struct buffer_head *bh, int create)
+adfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh, int create)
 {
 	if (block < 0)
 		goto abort_negative;
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 00be08b..f1af563 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -263,7 +263,7 @@
 		dm[zone].dm_startbit = 0;
 		dm[zone].dm_endbit   = zone_size;
 		dm[zone].dm_startblk = zone * zone_size - ADFS_DR_SIZE_BITS;
-		dm[zone].dm_bh       = bread(sb->s_dev, map_addr, sb->s_blocksize);
+		dm[zone].dm_bh       = sb_bread(sb, map_addr);
 
 		if (!dm[zone].dm_bh) {
 			adfs_error(sb, "unable to read map");
@@ -319,8 +319,9 @@
 	if (parse_options(sb, data))
 		goto error;
 
+	sb->s_blocksize = BLOCK_SIZE;
 	set_blocksize(dev, BLOCK_SIZE);
-	if (!(bh = bread(dev, ADFS_DISCRECORD / BLOCK_SIZE, BLOCK_SIZE))) {
+	if (!(bh = sb_bread(sb, ADFS_DISCRECORD / BLOCK_SIZE))) {
 		adfs_error(sb, "unable to read superblock");
 		goto error;
 	}
@@ -354,7 +355,7 @@
 
 		brelse(bh);
 		set_blocksize(dev, sb->s_blocksize);
-		bh = bread(dev, ADFS_DISCRECORD / sb->s_blocksize, sb->s_blocksize);
+		bh = sb_bread(sb, ADFS_DISCRECORD / sb->s_blocksize);
 		if (!bh) {
 			adfs_error(sb, "couldn't read superblock on "
 				"2nd try.");
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 8a168f7..a542891 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -38,8 +38,6 @@
 static struct buffer_head *affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext);
 static inline struct buffer_head *affs_get_extblock(struct inode *inode, u32 ext);
 static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
-static int affs_get_block(struct inode *inode, long block, struct buffer_head *bh_result, int create);
-
 static ssize_t affs_file_write(struct file *filp, const char *buf, size_t count, loff_t *ppos);
 static int affs_file_open(struct inode *inode, struct file *filp);
 static int affs_file_release(struct inode *inode, struct file *filp);
@@ -332,7 +330,7 @@
 }
 
 static int
-affs_get_block(struct inode *inode, long block, struct buffer_head *bh_result, int create)
+affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
 {
 	struct super_block	*sb = inode->i_sb;
 	struct buffer_head	*ext_bh;
diff --git a/fs/affs/super.c b/fs/affs/super.c
index a2a034f..d65e44b 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -332,7 +332,7 @@
 			       blocksize == 2048 ? 11 : 12;
 
 	/* Find out which kind of FS we have */
-	boot_bh = bread(sb->s_dev, 0, sb->s_blocksize);
+	boot_bh = sb_bread(sb, 0);
 	if (!boot_bh) {
 		printk(KERN_ERR "AFFS: Cannot read boot block\n");
 		goto out_error;
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 5caf04a..5a88748 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -41,7 +41,7 @@
 	while (f->f_pos < dir->i_size) {
 		offset = f->f_pos & (BFS_BSIZE-1);
 		block = dir->iu_sblock + (f->f_pos >> BFS_BSIZE_BITS);
-		bh = bread(dev, block, BFS_BSIZE);
+		bh = sb_bread(dir->i_sb, block);
 		if (!bh) {
 			f->f_pos += BFS_BSIZE - offset;
 			continue;
@@ -270,7 +270,7 @@
 	sblock = dir->iu_sblock;
 	eblock = dir->iu_eblock;
 	for (block=sblock; block<=eblock; block++) {
-		bh = bread(dev, block, BFS_BSIZE);
+		bh = sb_bread(dir->i_sb, block);
 		if(!bh) 
 			return -ENOSPC;
 		for (off=0; off<BFS_BSIZE; off+=BFS_DIRENT_SIZE) {
@@ -319,7 +319,7 @@
 	block = offset = 0;
 	while (block * BFS_BSIZE + offset < dir->i_size) {
 		if (!bh) {
-			bh = bread(dir->i_dev, dir->iu_sblock + block, BFS_BSIZE);
+			bh = sb_bread(dir->i_sb, dir->iu_sblock + block);
 			if (!bh) {
 				block++;
 				continue;
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
index d7a2843..bb301b4 100644
--- a/fs/bfs/file.c
+++ b/fs/bfs/file.c
@@ -54,7 +54,7 @@
 	return 0;
 }
 
-static int bfs_get_block(struct inode * inode, long block, 
+static int bfs_get_block(struct inode * inode, sector_t block, 
 	struct buffer_head * bh_result, int create)
 {
 	long phys;
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 6a4a4c5..f83f13f 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -47,7 +47,7 @@
 	}
 
 	block = (ino - BFS_ROOT_INO)/BFS_INODES_PER_BLOCK + 1;
-	bh = bread(dev, block, BFS_BSIZE);
+	bh = sb_bread(inode->i_sb, block);
 	if (!bh) {
 		printf("Unable to read inode %s:%08lx\n", bdevname(dev), ino);
 		make_bad_inode(inode);
@@ -100,7 +100,7 @@
 
 	lock_kernel();
 	block = (ino - BFS_ROOT_INO)/BFS_INODES_PER_BLOCK + 1;
-	bh = bread(dev, block, BFS_BSIZE);
+	bh = sb_bread(inode->i_sb, block);
 	if (!bh) {
 		printf("Unable to read inode %s:%08lx\n", bdevname(dev), ino);
 		unlock_kernel();
@@ -153,7 +153,7 @@
 	lock_kernel();
 	mark_inode_dirty(inode);
 	block = (ino - BFS_ROOT_INO)/BFS_INODES_PER_BLOCK + 1;
-	bh = bread(dev, block, BFS_BSIZE);
+	bh = sb_bread(s, block);
 	if (!bh) {
 		printf("Unable to read inode %s:%08lx\n", bdevname(dev), ino);
 		unlock_kernel();
@@ -252,7 +252,7 @@
 	s->s_blocksize = BFS_BSIZE;
 	s->s_blocksize_bits = BFS_BSIZE_BITS;
 
-	bh = bread(dev, 0, BFS_BSIZE);
+	bh = sb_bread(s, 0);
 	if(!bh)
 		goto out;
 	bfs_sb = (struct bfs_super_block *)bh->b_data;
diff --git a/fs/bio.c b/fs/bio.c
index 36fe91f..555b7ac 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -111,7 +111,8 @@
 	bio->bi_rw = 0;
 	bio->bi_vcnt = 0;
 	bio->bi_idx = 0;
-	bio->bi_hw_seg = 0;
+	bio->bi_phys_segments = 0;
+	bio->bi_hw_segments = 0;
 	bio->bi_size = 0;
 	bio->bi_end_io = NULL;
 	atomic_set(&bio->bi_cnt, 1);
@@ -166,12 +167,20 @@
 	}
 }
 
-inline int bio_hw_segments(request_queue_t *q, struct bio *bio)
+inline int bio_phys_segments(request_queue_t *q, struct bio *bio)
 {
-	if (unlikely(!(bio->bi_flags & BIO_SEG_VALID)))
+	if (unlikely(!(bio->bi_flags & (1 << BIO_SEG_VALID))))
 		blk_recount_segments(q, bio);
 
-	return bio->bi_hw_seg;
+	return bio->bi_phys_segments;
+}
+
+inline int bio_hw_segments(request_queue_t *q, struct bio *bio)
+{
+	if (unlikely(!(bio->bi_flags & (1 << BIO_SEG_VALID))))
+		blk_recount_segments(q, bio);
+
+	return bio->bi_hw_segments;
 }
 
 /**
@@ -199,7 +208,8 @@
 	bio->bi_vcnt = bio_src->bi_vcnt;
 	bio->bi_idx = bio_src->bi_idx;
 	if (bio_src->bi_flags & (1 << BIO_SEG_VALID)) {
-		bio->bi_hw_seg = bio_src->bi_hw_seg;
+		bio->bi_phys_segments = bio_src->bi_phys_segments;
+		bio->bi_hw_segments = bio_src->bi_hw_segments;
 		bio->bi_flags |= (1 << BIO_SEG_VALID);
 	}
 	bio->bi_size = bio_src->bi_size;
@@ -496,7 +506,7 @@
 	if (!bio_pool)
 		panic("bio: can't create mempool\n");
 
-	printk("BIO: pool of %d setup, %uKb (%d bytes/bio)\n", BIO_POOL_SIZE, BIO_POOL_SIZE * sizeof(struct bio) >> 10, sizeof(struct bio));
+	printk("BIO: pool of %d setup, %ZuKb (%Zd bytes/bio)\n", BIO_POOL_SIZE, BIO_POOL_SIZE * sizeof(struct bio) >> 10, sizeof(struct bio));
 
 	biovec_init_pool();
 
@@ -513,4 +523,5 @@
 EXPORT_SYMBOL(bio_copy);
 EXPORT_SYMBOL(__bio_clone);
 EXPORT_SYMBOL(bio_clone);
+EXPORT_SYMBOL(bio_phys_segments);
 EXPORT_SYMBOL(bio_hw_segments);
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 7582c9d..7f4afb3 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -153,7 +153,7 @@
 
 		bh = NULL;
 		if (blocknr + i < devsize) {
-			bh = getblk(sb->s_dev, blocknr + i, PAGE_CACHE_SIZE);
+			bh = sb_getblk(sb, blocknr + i);
 			if (!buffer_uptodate(bh))
 				read_array[unread++] = bh;
 		}
diff --git a/fs/efs/dir.c b/fs/efs/dir.c
index 9bba7a2..cc7df77 100644
--- a/fs/efs/dir.c
+++ b/fs/efs/dir.c
@@ -40,7 +40,7 @@
 	/* look at all blocks */
 	while (block < inode->i_blocks) {
 		/* read the dir block */
-		bh = bread(inode->i_dev, efs_bmap(inode, block), EFS_DIRBSIZE);
+		bh = sb_bread(inode->i_sb, efs_bmap(inode, block));
 
 		if (!bh) {
 			printk(KERN_ERR "EFS: readdir(): failed to read dir block %d\n", block);
diff --git a/fs/efs/file.c b/fs/efs/file.c
index 67f5898..faa5b9f 100644
--- a/fs/efs/file.c
+++ b/fs/efs/file.c
@@ -8,7 +8,7 @@
 
 #include <linux/efs_fs.h>
 
-int efs_get_block(struct inode *inode, long iblock,
+int efs_get_block(struct inode *inode, sector_t iblock,
 		  struct buffer_head *bh_result, int create)
 {
 	int error = -EROFS;
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index 39e503d..67d050f 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -12,7 +12,7 @@
 #include <linux/module.h>
 
 
-extern int efs_get_block(struct inode *, long, struct buffer_head *, int);
+extern int efs_get_block(struct inode *, sector_t, struct buffer_head *, int);
 static int efs_readpage(struct file *file, struct page *page)
 {
 	return block_read_full_page(page,efs_get_block);
@@ -77,7 +77,7 @@
 			(EFS_BLOCKSIZE / sizeof(struct efs_dinode))) *
 		sizeof(struct efs_dinode);
 
-	bh = bread(inode->i_dev, block, EFS_BLOCKSIZE);
+	bh = sb_bread(inode->i_sb, block);
 	if (!bh) {
 		printk(KERN_WARNING "EFS: bread() failed at block %d\n", block);
 		goto read_inode_error;
@@ -271,7 +271,7 @@
 		if (first || lastblock != iblock) {
 			if (bh) brelse(bh);
 
-			bh = bread(inode->i_dev, iblock, EFS_BLOCKSIZE);
+			bh = sb_bread(inode->i_sb, iblock);
 			if (!bh) {
 				printk(KERN_ERR "EFS: bread() failed at block %d\n", iblock);
 				return 0;
diff --git a/fs/efs/namei.c b/fs/efs/namei.c
index cc85f5d..cc06bc8 100644
--- a/fs/efs/namei.c
+++ b/fs/efs/namei.c
@@ -24,7 +24,7 @@
 
 	for(block = 0; block < inode->i_blocks; block++) {
 
-		bh = bread(inode->i_dev, efs_bmap(inode, block), EFS_DIRBSIZE);
+		bh = sb_bread(inode->i_sb, efs_bmap(inode, block));
 		if (!bh) {
 			printk(KERN_ERR "EFS: find_entry(): failed to read dir block %d\n", block);
 			return 0;
diff --git a/fs/efs/super.c b/fs/efs/super.c
index 79ee8d8..691f6df 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -137,11 +137,14 @@
 	struct buffer_head *bh;
 
  	sb = SUPER_INFO(s);
-
+ 
+	s->s_magic		= EFS_SUPER_MAGIC;
+	s->s_blocksize		= EFS_BLOCKSIZE;
+	s->s_blocksize_bits	= EFS_BLOCKSIZE_BITS;
 	set_blocksize(dev, EFS_BLOCKSIZE);
   
 	/* read the vh (volume header) block */
-	bh = bread(dev, 0, EFS_BLOCKSIZE);
+	bh = sb_bread(s, 0);
 
 	if (!bh) {
 		printk(KERN_ERR "EFS: cannot read volume header\n");
@@ -160,7 +163,7 @@
 		goto out_no_fs_ul;
 	}
 
-	bh = bread(dev, sb->fs_start + EFS_SUPER, EFS_BLOCKSIZE);
+	bh = sb_bread(s, sb->fs_start + EFS_SUPER);
 	if (!bh) {
 		printk(KERN_ERR "EFS: cannot read superblock\n");
 		goto out_no_fs_ul;
@@ -174,10 +177,6 @@
 		goto out_no_fs_ul;
 	}
 	brelse(bh);
- 
-	s->s_magic		= EFS_SUPER_MAGIC;
-	s->s_blocksize		= EFS_BLOCKSIZE;
-	s->s_blocksize_bits	= EFS_BLOCKSIZE_BITS;
 
 	if (!(s->s_flags & MS_RDONLY)) {
 #ifdef DEBUG
diff --git a/fs/efs/symlink.c b/fs/efs/symlink.c
index b5d17f3..5dd10f5 100644
--- a/fs/efs/symlink.c
+++ b/fs/efs/symlink.c
@@ -26,13 +26,13 @@
 	lock_kernel();
 	/* read first 512 bytes of link target */
 	err = -EIO;
-	bh = bread(inode->i_dev, efs_bmap(inode, 0), EFS_BLOCKSIZE);
+	bh = sb_bread(inode->i_sb, efs_bmap(inode, 0));
 	if (!bh)
 		goto fail;
 	memcpy(link, bh->b_data, (size > EFS_BLOCKSIZE) ? EFS_BLOCKSIZE : size);
 	brelse(bh);
 	if (size > EFS_BLOCKSIZE) {
-		bh = bread(inode->i_dev, efs_bmap(inode, 1), EFS_BLOCKSIZE);
+		bh = sb_bread(inode->i_sb, efs_bmap(inode, 1));
 		if (!bh)
 			goto fail;
 		memcpy(link + EFS_BLOCKSIZE, bh->b_data, size - EFS_BLOCKSIZE);
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 3d799f7..da07d94 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -88,7 +88,7 @@
 	if (!gdp)
 		goto error_out;
 	retval = 0;
-	bh = bread (sb->s_dev, le32_to_cpu(gdp->bg_block_bitmap), sb->s_blocksize);
+	bh = sb_bread(sb, le32_to_cpu(gdp->bg_block_bitmap));
 	if (!bh) {
 		ext2_error (sb, "read_block_bitmap",
 			    "Cannot read block bitmap - "
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index bcc088a..46e7f22 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -51,8 +51,7 @@
 	if (!desc)
 		goto error_out;
 
-	bh = bread(sb->s_dev, le32_to_cpu(desc->bg_inode_bitmap),
-			sb->s_blocksize);
+	bh = sb_bread(sb, le32_to_cpu(desc->bg_inode_bitmap));
 	if (!bh)
 		ext2_error (sb, "read_inode_bitmap",
 			    "Cannot read inode bitmap - "
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 3665f5e..e96e1a0 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -239,8 +239,7 @@
 				 Indirect chain[4],
 				 int *err)
 {
-	kdev_t dev = inode->i_dev;
-	int size = inode->i_sb->s_blocksize;
+	struct super_block *sb = inode->i_sb;
 	Indirect *p = chain;
 	struct buffer_head *bh;
 
@@ -250,7 +249,7 @@
 	if (!p->key)
 		goto no_block;
 	while (--depth) {
-		bh = bread(dev, le32_to_cpu(p->key), size);
+		bh = sb_bread(sb, le32_to_cpu(p->key));
 		if (!bh)
 			goto failure;
 		/* Reader: pointers */
@@ -399,7 +398,7 @@
 		 * Get buffer_head for parent block, zero it out and set 
 		 * the pointer to new one, then send parent to disk.
 		 */
-		bh = getblk(inode->i_dev, parent, blocksize);
+		bh = sb_getblk(inode->i_sb, parent);
 		lock_buffer(bh);
 		memset(bh->b_data, 0, blocksize);
 		branch[n].bh = bh;
@@ -763,7 +762,7 @@
 			if (!nr)
 				continue;
 			*p = 0;
-			bh = bread (inode->i_dev, nr, inode->i_sb->s_blocksize);
+			bh = sb_bread(inode->i_sb, nr);
 			/*
 			 * A read failure? Report error and clear slot
 			 * (should be rare).
@@ -921,7 +920,7 @@
 		EXT2_INODE_SIZE(inode->i_sb);
 	block = le32_to_cpu(gdp[desc].bg_inode_table) +
 		(offset >> EXT2_BLOCK_SIZE_BITS(inode->i_sb));
-	if (!(bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize))) {
+	if (!(bh = sb_bread(inode->i_sb, block))) {
 		ext2_error (inode->i_sb, "ext2_read_inode",
 			    "unable to read inode block - "
 			    "inode=%lu, block=%lu", inode->i_ino, block);
@@ -1063,7 +1062,7 @@
 		EXT2_INODE_SIZE(inode->i_sb);
 	block = le32_to_cpu(gdp[desc].bg_inode_table) +
 		(offset >> EXT2_BLOCK_SIZE_BITS(inode->i_sb));
-	if (!(bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize))) {
+	if (!(bh = sb_bread(inode->i_sb, block))) {
 		ext2_error (inode->i_sb, "ext2_write_inode",
 			    "unable to read inode block - "
 			    "inode=%lu, block=%lu", inode->i_ino, block);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index ee386b0..c9c5444 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -432,6 +432,7 @@
 		printk ("EXT2-fs: unable to set blocksize %d\n", blocksize);
 		return NULL;
 	}
+	sb->s_blocksize = blocksize;
 
 	/*
 	 * If the superblock doesn't start on a sector boundary,
@@ -443,7 +444,7 @@
 		offset = (sb_block*BLOCK_SIZE) % blocksize;
 	}
 
-	if (!(bh = bread (dev, logic_sb_block, blocksize))) {
+	if (!(bh = sb_bread(sb, logic_sb_block))) {
 		printk ("EXT2-fs: unable to read superblock\n");
 		return NULL;
 	}
@@ -502,7 +503,7 @@
 
 		logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
 		offset = (sb_block*BLOCK_SIZE) % blocksize;
-		bh = bread (dev, logic_sb_block, blocksize);
+		bh = sb_bread(sb, logic_sb_block);
 		if(!bh) {
 			printk("EXT2-fs: Couldn't read superblock on "
 			       "2nd try.\n");
@@ -606,8 +607,7 @@
 		goto failed_mount;
 	}
 	for (i = 0; i < db_count; i++) {
-		sb->u.ext2_sb.s_group_desc[i] = bread (dev, logic_sb_block + i + 1,
-						       sb->s_blocksize);
+		sb->u.ext2_sb.s_group_desc[i] = sb_bread(sb, logic_sb_block + i + 1);
 		if (!sb->u.ext2_sb.s_group_desc[i]) {
 			for (j = 0; j < i; j++)
 				brelse (sb->u.ext2_sb.s_group_desc[j]);
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index db676c0..f4f87da 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -91,8 +91,7 @@
 	if (!gdp)
 		goto error_out;
 	retval = 0;
-	bh = bread (sb->s_dev,
-			le32_to_cpu(gdp->bg_block_bitmap), sb->s_blocksize);
+	bh = sb_bread(sb, le32_to_cpu(gdp->bg_block_bitmap));
 	if (!bh) {
 		ext3_error (sb, "read_block_bitmap",
 			    "Cannot read block bitmap - "
@@ -353,8 +352,7 @@
 #ifdef CONFIG_JBD_DEBUG
 		{
 			struct buffer_head *debug_bh;
-			debug_bh = get_hash_table(sb->s_dev, block + i,
-							sb->s_blocksize);
+			debug_bh = sb_get_hash_table(sb, block + i);
 			if (debug_bh) {
 				BUFFER_TRACE(debug_bh, "Deleted!");
 				if (!bh2jh(bitmap_bh)->b_committed_data)
@@ -702,7 +700,7 @@
 		struct buffer_head *debug_bh;
 
 		/* Record bitmap buffer state in the newly allocated block */
-		debug_bh = get_hash_table(sb->s_dev, tmp, sb->s_blocksize);
+		debug_bh = sb_get_hash_table(sb, tmp);
 		if (debug_bh) {
 			BUFFER_TRACE(debug_bh, "state when allocated");
 			BUFFER_TRACE2(debug_bh, bh, "bitmap state");
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 088f796..062ed93 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -60,8 +60,7 @@
 		retval = -EIO;
 		goto error_out;
 	}
-	bh = bread (sb->s_dev,
-			le32_to_cpu(gdp->bg_inode_bitmap), sb->s_blocksize);
+	bh = sb_bread(sb, le32_to_cpu(gdp->bg_inode_bitmap));
 	if (!bh) {
 		ext3_error (sb, "read_inode_bitmap",
 			    "Cannot read inode bitmap - "
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index ca171a4..b3e997f 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -389,8 +389,7 @@
 static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
 				 Indirect chain[4], int *err)
 {
-	kdev_t dev = inode->i_dev;
-	int blocksize = inode->i_sb->s_blocksize;
+	struct super_block *sb = inode->i_sb;
 	Indirect *p = chain;
 	struct buffer_head *bh;
 
@@ -400,7 +399,7 @@
 	if (!p->key)
 		goto no_block;
 	while (--depth) {
-		bh = bread(dev, le32_to_cpu(p->key), blocksize);
+		bh = sb_bread(sb, le32_to_cpu(p->key));
 		if (!bh)
 			goto failure;
 		/* Reader: pointers */
@@ -558,7 +557,7 @@
 			 * and set the pointer to new one, then send
 			 * parent to disk.  
 			 */
-			bh = getblk(inode->i_dev, parent, blocksize);
+			bh = sb_getblk(inode->i_sb, parent);
 			branch[n].bh = bh;
 			lock_buffer(bh);
 			BUFFER_TRACE(bh, "call get_create_access");
@@ -854,8 +853,7 @@
 	*errp = ext3_get_block_handle(handle, inode, block, &dummy, create);
 	if (!*errp && buffer_mapped(&dummy)) {
 		struct buffer_head *bh;
-		bh = getblk(dummy.b_dev, dummy.b_blocknr,
-					inode->i_sb->s_blocksize);
+		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
 		if (buffer_new(&dummy)) {
 			J_ASSERT(create != 0);
 			J_ASSERT(handle != 0);
@@ -1549,9 +1547,6 @@
 		u32 *first, u32 *last)
 {
 	u32 *p;
-	kdev_t dev = inode->i_sb->s_dev;
-	unsigned long blocksize = inode->i_sb->s_blocksize;
-
 	if (try_to_extend_transaction(handle, inode)) {
 		if (bh) {
 			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
@@ -1577,7 +1572,7 @@
 			struct buffer_head *bh;
 
 			*p = 0;
-			bh = get_hash_table(dev, nr, blocksize);
+			bh = sb_get_hash_table(inode->i_sb, nr);
 			ext3_forget(handle, 0, inode, bh, nr);
 		}
 	}
@@ -1690,7 +1685,7 @@
 				continue;		/* A hole */
 
 			/* Go read the buffer for the next level down */
-			bh = bread(inode->i_dev, nr, inode->i_sb->s_blocksize);
+			bh = sb_bread(inode->i_sb, nr);
 
 			/*
 			 * A read failure? Report error and clear slot
@@ -2003,7 +1998,7 @@
 		EXT3_INODE_SIZE(inode->i_sb);
 	block = le32_to_cpu(gdp[desc].bg_inode_table) +
 		(offset >> EXT3_BLOCK_SIZE_BITS(inode->i_sb));
-	if (!(bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize))) {
+	if (!(bh = sb_bread(inode->i_sb, block))) {
 		ext3_error (inode->i_sb, "ext3_get_inode_loc",
 			    "unable to read inode block - "
 			    "inode=%lu, block=%lu", inode->i_ino, block);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 9a5e189..d7ebe39 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -925,6 +925,7 @@
 		goto out_fail;
 	}
 
+	sb->s_blocksize = blocksize;
 	set_blocksize (dev, blocksize);
 
 	/*
@@ -936,7 +937,7 @@
 		offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
 	}
 
-	if (!(bh = bread (dev, logic_sb_block, blocksize))) {
+	if (!(bh = sb_bread(sb, logic_sb_block))) {
 		printk (KERN_ERR "EXT3-fs: unable to read superblock\n");
 		goto out_fail;
 	}
@@ -1009,7 +1010,7 @@
 		set_blocksize (dev, sb->s_blocksize);
 		logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize;
 		offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
-		bh = bread (dev, logic_sb_block, blocksize);
+		bh = sb_bread(sb, logic_sb_block);
 		if (!bh) {
 			printk(KERN_ERR 
 			       "EXT3-fs: Can't read superblock on 2nd try.\n");
@@ -1093,8 +1094,7 @@
 		goto failed_mount;
 	}
 	for (i = 0; i < db_count; i++) {
-		sbi->s_group_desc[i] = bread(dev, logic_sb_block + i + 1,
-					     blocksize);
+		sbi->s_group_desc[i] = sb_bread(sb, logic_sb_block + i + 1);
 		if (!sbi->s_group_desc[i]) {
 			printk (KERN_ERR "EXT3-fs: "
 				"can't read group descriptor %d\n", i);
diff --git a/fs/fat/buffer.c b/fs/fat/buffer.c
index d8a4d0b..117d85b 100644
--- a/fs/fat/buffer.c
+++ b/fs/fat/buffer.c
@@ -59,12 +59,12 @@
 
 struct buffer_head *default_fat_bread(struct super_block *sb, int block)
 {
-	return bread (sb->s_dev, block, sb->s_blocksize);
+	return sb_bread(sb, block);
 }
 
 struct buffer_head *default_fat_getblk(struct super_block *sb, int block)
 {
-	return getblk (sb->s_dev, block, sb->s_blocksize);
+	return sb_getblk(sb, block);
 }
 
 void default_fat_brelse(struct super_block *sb, struct buffer_head *bh)
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index bba65ef..5bbebb0 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -584,7 +584,7 @@
 
 	sb->s_blocksize = hard_blksize;
 	set_blocksize(sb->s_dev, hard_blksize);
-	bh = bread(sb->s_dev, 0, sb->s_blocksize);
+	bh = sb_bread(sb, 0);
 	if (bh == NULL) {
 		printk("FAT: unable to read boot sector\n");
 		goto out_fail;
@@ -656,7 +656,7 @@
 			(sbi->fsinfo_sector * logical_sector_size) % hard_blksize;
 		fsinfo_bh = bh;
 		if (fsinfo_block != 0) {
-			fsinfo_bh = bread(sb->s_dev, fsinfo_block, hard_blksize);
+			fsinfo_bh = sb_bread(sb, fsinfo_block);
 			if (fsinfo_bh == NULL) {
 				printk("FAT: bread failed, FSINFO block"
 				       " (blocknr = %d)\n", fsinfo_block);
diff --git a/fs/freevxfs/vxfs_bmap.c b/fs/freevxfs/vxfs_bmap.c
index 979bb37..fb3eeeb 100644
--- a/fs/freevxfs/vxfs_bmap.c
+++ b/fs/freevxfs/vxfs_bmap.c
@@ -137,9 +137,8 @@
 		struct vxfs_typed	*typ;
 		int64_t			off;
 
-		bp = bread(ip->i_dev,
-				indir + (i / VXFS_TYPED_PER_BLOCK(ip->i_sb)),
-				ip->i_sb->s_blocksize);
+		bp = sb_bread(ip->i_sb,
+				indir + (i / VXFS_TYPED_PER_BLOCK(ip->i_sb)));
 		if (!buffer_mapped(bp))
 			return 0;
 
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index a06f13f3..363e1ac 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -104,7 +104,7 @@
 
 	block = extent + ((ino * VXFS_ISIZE) / sbp->s_blocksize);
 	offset = ((ino % (sbp->s_blocksize / VXFS_ISIZE)) * VXFS_ISIZE);
-	bp = bread(sbp->s_dev, block, sbp->s_blocksize);
+	bp = sb_bread(sbp, block);
 
 	if (buffer_mapped(bp)) {
 		struct vxfs_inode_info	*vip;
diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c
index 7fe6688..341d5c1 100644
--- a/fs/freevxfs/vxfs_subr.c
+++ b/fs/freevxfs/vxfs_subr.c
@@ -114,7 +114,7 @@
 	daddr_t			pblock;
 
 	pblock = vxfs_bmap1(ip, block);
-	bp = bread(ip->i_dev, pblock, ip->i_sb->s_blocksize);
+	bp = sb_bread(ip->i_sb, pblock);
 
 	return (bp);
 }
@@ -135,7 +135,7 @@
  *   Zero on success, else a negativ error code (-EIO).
  */
 static int
-vxfs_getblk(struct inode *ip, long iblock,
+vxfs_getblk(struct inode *ip, sector_t iblock,
 	    struct buffer_head *bp, int create)
 {
 	daddr_t			pblock;
diff --git a/fs/hfs/file.c b/fs/hfs/file.c
index fbfdc1b..d2043ae 100644
--- a/fs/hfs/file.c
+++ b/fs/hfs/file.c
@@ -61,7 +61,7 @@
 struct buffer_head *hfs_getblk(struct hfs_fork *fork, int block, int create)
 {
 	int tmp;
-	kdev_t dev = fork->entry->mdb->sys_mdb->s_dev;
+	struct super_block *sb = fork->entry->mdb->sys_mdb;
 
 	tmp = hfs_extent_map(fork, block, create);
 
@@ -71,7 +71,7 @@
 		*/
 		if (tmp) {
 			hfs_cat_mark_dirty(fork->entry);
-			return getblk(dev, tmp, HFS_SECTOR_SIZE);
+			return sb_getblk(sb, tmp);
 		}
 		return NULL;
 	} else {
@@ -80,8 +80,7 @@
 		   we waited on the I/O in getblk to complete.
 		*/
 		do {
-			struct buffer_head *bh =
-					getblk(dev, tmp, HFS_SECTOR_SIZE);
+			struct buffer_head *bh = sb_getblk(sb, tmp);
 			int tmp2 = hfs_extent_map(fork, block, 0);
 
 			if (tmp2 == tmp) {
@@ -107,7 +106,7 @@
  * block number.  This function just calls hfs_extent_map() to do the
  * real work and then stuffs the appropriate info into the buffer_head.
  */
-int hfs_get_block(struct inode *inode, long iblock, struct buffer_head *bh_result, int create)
+int hfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
 {
 	unsigned long phys;
 
diff --git a/fs/hfs/hfs.h b/fs/hfs/hfs.h
index f266bc0..e328a14 100644
--- a/fs/hfs/hfs.h
+++ b/fs/hfs/hfs.h
@@ -495,7 +495,7 @@
 extern void hfs_extent_free(struct hfs_fork *);
 
 /* file.c */
-extern int hfs_get_block(struct inode *, long, struct buffer_head *, int);
+extern int hfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
 
 /* mdb.c */
 extern struct hfs_mdb *hfs_mdb_get(hfs_sysmdb, int, hfs_s32);
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index a39710f..6d69f71 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -402,6 +402,8 @@
 
 	/* set the device driver to 512-byte blocks */
 	set_blocksize(dev, HFS_SECTOR_SIZE);
+	s->s_blocksize_bits = HFS_SECTOR_SIZE_BITS;
+	s->s_blocksize = HFS_SECTOR_SIZE;
 
 #ifdef CONFIG_MAC_PARTITION
 	/* check to see if we're in a partition */
@@ -437,8 +439,6 @@
 	}
 
 	s->s_magic = HFS_SUPER_MAGIC;
-	s->s_blocksize_bits = HFS_SECTOR_SIZE_BITS;
-	s->s_blocksize = HFS_SECTOR_SIZE;
 	s->s_op = &hfs_super_operations;
 
 	/* try to get the root inode */
diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c
index 4ce747a..c96107d 100644
--- a/fs/hfs/sysdep.c
+++ b/fs/hfs/sysdep.c
@@ -41,9 +41,9 @@
 	hfs_buffer tmp = HFS_BAD_BUFFER;
 
 	if (read) {
-		tmp = bread(sys_mdb->s_dev, block, HFS_SECTOR_SIZE);
+		tmp = sb_bread(sys_mdb, block);
 	} else {
-		tmp = getblk(sys_mdb->s_dev, block, HFS_SECTOR_SIZE);
+		tmp = sb_getblk(sys_mdb, block);
 		if (tmp) {
 			mark_buffer_uptodate(tmp, 1);
 		}
diff --git a/fs/hpfs/buffer.c b/fs/hpfs/buffer.c
index c7b63f3..66067c2 100644
--- a/fs/hpfs/buffer.c
+++ b/fs/hpfs/buffer.c
@@ -122,12 +122,9 @@
 void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp,
 		 int ahead)
 {
-	kdev_t dev = s->s_dev;
 	struct buffer_head *bh;
 
-	if (!ahead || secno + ahead >= s->s_hpfs_fs_size)
-		*bhp = bh = bread(dev, secno, 512);
-	else *bhp = bh = bread(dev, secno, 512);
+	*bhp = bh = sb_bread(s, secno);
 	if (bh != NULL)
 		return bh->b_data;
 	else {
@@ -143,7 +140,7 @@
 	struct buffer_head *bh;
 	/*return hpfs_map_sector(s, secno, bhp, 0);*/
 
-	if ((*bhp = bh = getblk(s->s_dev, secno, 512)) != NULL) {
+	if ((*bhp = bh = sb_getblk(s, secno)) != NULL) {
 		if (!buffer_uptodate(bh)) wait_on_buffer(bh);
 		mark_buffer_uptodate(bh, 1);
 		return bh->b_data;
@@ -158,7 +155,6 @@
 void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh,
 		   int ahead)
 {
-	kdev_t dev = s->s_dev;
 	struct buffer_head *bh;
 	char *data;
 
@@ -173,24 +169,22 @@
 		goto bail;
 	}
 
-	if (!ahead || secno + 4 + ahead > s->s_hpfs_fs_size)
-		qbh->bh[0] = bh = bread(dev, secno, 512);
-	else qbh->bh[0] = bh = bread(dev, secno, 512);
+	qbh->bh[0] = bh = sb_bread(s, secno);
 	if (!bh)
 		goto bail0;
 	memcpy(data, bh->b_data, 512);
 
-	qbh->bh[1] = bh = bread(dev, secno + 1, 512);
+	qbh->bh[1] = bh = sb_bread(s, secno + 1);
 	if (!bh)
 		goto bail1;
 	memcpy(data + 512, bh->b_data, 512);
 
-	qbh->bh[2] = bh = bread(dev, secno + 2, 512);
+	qbh->bh[2] = bh = sb_bread(s, secno + 2);
 	if (!bh)
 		goto bail2;
 	memcpy(data + 2 * 512, bh->b_data, 512);
 
-	qbh->bh[3] = bh = bread(dev, secno + 3, 512);
+	qbh->bh[3] = bh = sb_bread(s, secno + 3);
 	if (!bh)
 		goto bail3;
 	memcpy(data + 3 * 512, bh->b_data, 512);
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index cefb5b3..bce5d5d 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -68,7 +68,7 @@
 	hpfs_write_inode(i);
 }
 
-int hpfs_get_block(struct inode *inode, long iblock, struct buffer_head *bh_result, int create)
+int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
 {
 	secno s;
 	s = hpfs_bmap(inode, iblock);
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index e30e613..7fa8f74 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -259,7 +259,7 @@
 int hpfs_file_fsync(struct file *, struct dentry *, int);
 secno hpfs_bmap(struct inode *, unsigned);
 void hpfs_truncate(struct inode *);
-int hpfs_get_block(struct inode *inode, long iblock, struct buffer_head *bh_result, int create);
+int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create);
 ssize_t hpfs_file_write(struct file *file, const char *buf, size_t count, loff_t *ppos);
 
 /* inode.c */
diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
index 48a358d..29db6b6 100644
--- a/fs/isofs/dir.c
+++ b/fs/isofs/dir.c
@@ -123,7 +123,7 @@
 		int de_len;
 
 		if (!bh) {
-			bh = isofs_bread(inode, bufsize, block);
+			bh = isofs_bread(inode, block);
 			if (!bh)
 				return 0;
 		}
@@ -158,7 +158,7 @@
 			brelse(bh);
 			bh = NULL;
 			if (offset) {
-				bh = isofs_bread(inode, bufsize, block);
+				bh = isofs_bread(inode, block);
 				if (!bh)
 					return 0;
 				memcpy((void *) tmpde + slop, bh->b_data, offset);
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 305bf8f..cbf4cab 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -527,6 +527,7 @@
 	}
 
 	set_blocksize(dev, opt.blocksize);
+	s->s_blocksize = opt.blocksize;
 
 	s->u.isofs_sb.s_high_sierra = high_sierra = 0; /* default is iso9660 */
 
@@ -540,8 +541,8 @@
 	    struct iso_volume_descriptor  * vdp;
 
 	    block = iso_blknum << (ISOFS_BLOCK_BITS-blocksize_bits);
-	    if (!(bh = bread(dev, block, opt.blocksize)))
-		goto out_no_read;		
+	    if (!(bh = sb_bread(s, block)))
+		goto out_no_read;
 
 	    vdp = (struct iso_volume_descriptor *)bh->b_data;
 	    hdp = (struct hs_volume_descriptor *)bh->b_data;
@@ -896,7 +897,6 @@
 	unsigned int firstext;
 	unsigned long nextino;
 	int section, rv;
-	unsigned int blocksize = inode->i_sb->s_blocksize;
 
 	lock_kernel();
 
@@ -957,7 +957,7 @@
 			(*bh_result)->b_blocknr  = firstext + b_off - offset;
 			(*bh_result)->b_state   |= (1UL << BH_Mapped);
 		} else {
-			*bh_result = getblk(inode->i_dev, firstext+b_off-offset, blocksize);
+			*bh_result = sb_getblk(inode->i_sb, firstext+b_off-offset);
 			if ( !*bh_result )
 				goto abort;
 		}
@@ -1000,12 +1000,12 @@
 	return 0;
 }
 
-struct buffer_head *isofs_bread(struct inode *inode, unsigned int bufsize, unsigned int block)
+struct buffer_head *isofs_bread(struct inode *inode, unsigned int block)
 {
 	unsigned int blknr = isofs_bmap(inode, block);
 	if (!blknr)
 		return NULL;
-	return bread(inode->i_dev, blknr, bufsize);
+	return sb_bread(inode->i_sb, blknr);
 }
 
 static int isofs_readpage(struct file *file, struct page *page)
@@ -1060,7 +1060,7 @@
 		unsigned int de_len;
 
 		if (!bh) {
-			bh = bread(inode->i_dev, block, bufsize);
+			bh = sb_bread(inode->i_sb, block);
 			if (!bh)
 				goto out_noread;
 		}
@@ -1092,7 +1092,7 @@
 			brelse(bh);
 			bh = NULL;
 			if (offset) {
-				bh = bread(inode->i_dev, block, bufsize);
+				bh = sb_bread(inode->i_sb, block);
 				if (!bh)
 					goto out_noread;
 				memcpy((void *) tmpde + slop, bh->b_data, offset);
@@ -1150,7 +1150,7 @@
 	unsigned long offset;
 	int volume_seq_no, i;
 
-	bh = bread(inode->i_dev, block, bufsize);
+	bh = sb_bread(inode->i_sb, block);
 	if (!bh)
 		goto out_badread;
 
@@ -1168,7 +1168,7 @@
 		}
 		memcpy(tmpde, bh->b_data + offset, frag1);
 		brelse(bh);
-		bh = bread(inode->i_dev, ++block, bufsize);
+		bh = sb_bread(inode->i_sb, ++block);
 		if (!bh)
 			goto out_badread;
 		memcpy((char *)tmpde+frag1, bh->b_data, de_len - frag1);
@@ -1345,7 +1345,7 @@
 #ifdef LEAK_CHECK
 #undef malloc
 #undef free_s
-#undef bread
+#undef sb_bread
 #undef brelse
 
 void * leak_check_malloc(unsigned int size){
@@ -1360,9 +1360,9 @@
   return kfree(obj);
 }
 
-struct buffer_head * leak_check_bread(int dev, int block, int size){
+struct buffer_head * leak_check_bread(struct super_block *sb, int block){
   check_bread++;
-  return bread(dev, block, size);
+  return sb_bread(sb, block);
 }
 
 void leak_check_brelse(struct buffer_head * bh){
diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
index 87fe121..13e79d4 100644
--- a/fs/isofs/namei.c
+++ b/fs/isofs/namei.c
@@ -78,7 +78,7 @@
 		char *dpnt;
 
 		if (!bh) {
-			bh = isofs_bread(dir, bufsize, block);
+			bh = isofs_bread(dir, block);
 			if (!bh)
 				return 0;
 		}
@@ -108,7 +108,7 @@
 			brelse(bh);
 			bh = NULL;
 			if (offset) {
-				bh = isofs_bread(dir, bufsize, block);
+				bh = isofs_bread(dir, block);
 				if (!bh)
 					return 0;
 				memcpy((void *) tmpde + slop, bh->b_data, offset);
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index ee1413f..6906a8e 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -69,7 +69,7 @@
     block = cont_extent; \
     offset = cont_offset; \
     offset1 = 0; \
-    pbh = bread(DEV->i_dev, block, ISOFS_BUFFER_SIZE(DEV)); \
+    pbh = sb_bread(DEV->i_sb, block); \
     if(pbh){       \
       memcpy(buffer + offset1, pbh->b_data + offset, cont_size - offset1); \
       brelse(pbh); \
@@ -511,7 +511,7 @@
 
 	block = inode->i_ino >> bufbits;
 	lock_kernel();
-	bh = bread(inode->i_dev, block, bufsize);
+	bh = sb_bread(inode->i_sb, block);
 	if (!bh)
 		goto out_noread;
 
diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
index 491a7fd..6347bb1 100644
--- a/fs/minix/bitmap.c
+++ b/fs/minix/bitmap.c
@@ -133,7 +133,7 @@
 	ino--;
 	block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks +
 		 ino / MINIX_INODES_PER_BLOCK;
-	*bh = bread(sb->s_dev, block, BLOCK_SIZE);
+	*bh = sb_bread(sb, block);
 	if (!*bh) {
 		printk("unable to read i-node block\n");
 		return NULL;
@@ -158,7 +158,7 @@
 	ino--;
 	block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks +
 		 ino / MINIX2_INODES_PER_BLOCK;
-	*bh = bread(sb->s_dev, block, BLOCK_SIZE);
+	*bh = sb_bread(sb, block);
 	if (!*bh) {
 		printk("unable to read i-node block\n");
 		return NULL;
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index fe53b49..5525a48 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -143,15 +143,15 @@
 		goto out_bad_hblock;
 
 	set_blocksize(dev, BLOCK_SIZE);
-	if (!(bh = bread(dev,1,BLOCK_SIZE)))
+	s->s_blocksize = BLOCK_SIZE;
+	s->s_blocksize_bits = BLOCK_SIZE_BITS;
+	if (!(bh = sb_bread(s, 1)))
 		goto out_bad_sb;
 
 	ms = (struct minix_super_block *) bh->b_data;
 	sbi->s_ms = ms;
 	sbi->s_sbh = bh;
 	sbi->s_mount_state = ms->s_state;
-	s->s_blocksize = BLOCK_SIZE;
-	s->s_blocksize_bits = BLOCK_SIZE_BITS;
 	sbi->s_ninodes = ms->s_ninodes;
 	sbi->s_nzones = ms->s_nzones;
 	sbi->s_imap_blocks = ms->s_imap_blocks;
@@ -198,12 +198,12 @@
 
 	block=2;
 	for (i=0 ; i < sbi->s_imap_blocks ; i++) {
-		if (!(sbi->s_imap[i]=bread(dev,block,BLOCK_SIZE)))
+		if (!(sbi->s_imap[i]=sb_bread(s, block)))
 			goto out_no_bitmap;
 		block++;
 	}
 	for (i=0 ; i < sbi->s_zmap_blocks ; i++) {
-		if (!(sbi->s_zmap[i]=bread(dev,block,BLOCK_SIZE)))
+		if (!(sbi->s_zmap[i]=sb_bread(s, block)))
 			goto out_no_bitmap;
 		block++;
 	}
diff --git a/fs/minix/itree_common.c b/fs/minix/itree_common.c
index 0aee59b..373df98 100644
--- a/fs/minix/itree_common.c
+++ b/fs/minix/itree_common.c
@@ -30,7 +30,7 @@
 					Indirect chain[DEPTH],
 					int *err)
 {
-	kdev_t dev = inode->i_dev;
+	struct super_block *sb = inode->i_sb;
 	Indirect *p = chain;
 	struct buffer_head *bh;
 
@@ -40,7 +40,7 @@
 	if (!p->key)
 		goto no_block;
 	while (--depth) {
-		bh = bread(dev, block_to_cpu(p->key), BLOCK_SIZE);
+		bh = sb_bread(sb, block_to_cpu(p->key));
 		if (!bh)
 			goto failure;
 		/* Reader: pointers */
@@ -79,7 +79,7 @@
 		if (!nr)
 			break;
 		branch[n].key = cpu_to_block(nr);
-		bh = getblk(inode->i_dev, parent, BLOCK_SIZE);
+		bh = sb_getblk(inode->i_sb, parent);
 		lock_buffer(bh);
 		memset(bh->b_data, 0, BLOCK_SIZE);
 		branch[n].bh = bh;
@@ -277,7 +277,7 @@
 			if (!nr)
 				continue;
 			*p = 0;
-			bh = bread (inode->i_dev, nr, BLOCK_SIZE);
+			bh = sb_bread(inode->i_sb, nr);
 			if (!bh)
 				continue;
 			free_branches(inode, (block_t*)bh->b_data,
diff --git a/fs/namespace.c b/fs/namespace.c
index d790be36..bbe1258 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -517,9 +517,11 @@
 
 	if (mnt) {
 		err = graft_tree(mnt, nd);
-		if (err)
+		if (err) {
+			spin_lock(&dcache_lock);
 			umount_tree(mnt);
-		else
+			spin_unlock(&dcache_lock);
+		} else
 			mntput(mnt);
 	}
 
diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c
index 58f543c..b72f910 100644
--- a/fs/ncpfs/ncplib_kernel.c
+++ b/fs/ncpfs/ncplib_kernel.c
@@ -52,14 +52,6 @@
 	return;
 }
 
-static void ncp_add_mem_fromfs(struct ncp_server *server, const char *source, int size)
-{
-	assert_server_locked(server);
-	copy_from_user(&(server->packet[server->current_size]), source, size);
-	server->current_size += size;
-	return;
-}
-
 static void ncp_add_pstring(struct ncp_server *server, const char *s)
 {
 	int len = strlen(s);
diff --git a/fs/ntfs/fs.c b/fs/ntfs/fs.c
index 4533c63..5076c1d 100644
--- a/fs/ntfs/fs.c
+++ b/fs/ntfs/fs.c
@@ -1023,8 +1023,9 @@
 		ntfs_error("Unable to set blocksize %d.\n", blocksize);
 		goto ntfs_read_super_vol;
 	}
+	sb->s_blocksize = blocksize;
 	/* Read the super block (boot block). */
-	if (!(bh = bread(sb->s_dev, 0, blocksize))) {
+	if (!(bh = sb_bread(sb, 0))) {
 		ntfs_error("Reading super block failed\n");
 		goto ntfs_read_super_unl;
 	}
@@ -1071,8 +1072,7 @@
 	if (to_read < 1)
 		to_read = 1;
 	for (i = 0; i < to_read; i++) {
-		if (!(bh = bread(sb->s_dev, vol->mft_lcn + i,
-							  vol->cluster_size))) {
+		if (!(bh = sb_bread(sb, vol->mft_lcn + i))) {
 			ntfs_error("Could not read $Mft record 0\n");
 			goto ntfs_read_super_mft;
 		}
diff --git a/fs/ntfs/support.c b/fs/ntfs/support.c
index 2f290b4..d490f25 100644
--- a/fs/ntfs/support.c
+++ b/fs/ntfs/support.c
@@ -169,7 +169,7 @@
 		   buf->do_read ? "get" : "put", cluster, start_offs, length);
 	to_copy = vol->cluster_size - start_offs;
 	while (length) {
-		if (!(bh = bread(sb->s_dev, cluster, vol->cluster_size))) {
+		if (!(bh = sb_bread(sb, cluster))) {
 			ntfs_debug(DEBUG_OTHER, "%s failed\n",
 				   buf->do_read ? "Reading" : "Writing");
 			error = -EIO;
diff --git a/fs/qnx4/bitmap.c b/fs/qnx4/bitmap.c
index 09b9439..f544a43 100644
--- a/fs/qnx4/bitmap.c
+++ b/fs/qnx4/bitmap.c
@@ -69,7 +69,7 @@
 	struct buffer_head *bh;
 
 	while (total < size) {
-		if ((bh = bread(sb->s_dev, start + offset, QNX4_BLOCK_SIZE)) == NULL) {
+		if ((bh = sb_bread(sb, start + offset)) == NULL) {
 			printk("qnx4: I/O error in counting free blocks\n");
 			break;
 		}
@@ -96,7 +96,7 @@
 	QNX4DEBUG(("qnx4: is_free requesting block [%lu], bitmap in block [%lu]\n",
 		   (unsigned long) block, (unsigned long) start));
 	(void) size;		/* CHECKME */
-	bh = bread(sb->s_dev, start, QNX4_BLOCK_SIZE);
+	bh = sb_bread(sb, start);
 	if (bh == NULL) {
 		return -EIO;
 	}
@@ -124,7 +124,7 @@
 	QNX4DEBUG(("qnx4: set_bitmap requesting block [%lu], bitmap in block [%lu]\n",
 		   (unsigned long) block, (unsigned long) start));
 	(void) size;		/* CHECKME */
-	bh = bread(sb->s_dev, start, QNX4_BLOCK_SIZE);
+	bh = sb_bread(sb, start);
 	if (bh == NULL) {
 		return -EIO;
 	}
diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c
index ac5d09b..49df47d 100644
--- a/fs/qnx4/dir.c
+++ b/fs/qnx4/dir.c
@@ -36,7 +36,7 @@
 
 	while (filp->f_pos < inode->i_size) {
 		blknum = qnx4_block_map( inode, filp->f_pos >> QNX4_BLOCK_SIZE_BITS );
-		bh = bread(inode->i_dev, blknum, QNX4_BLOCK_SIZE);
+		bh = sb_bread(inode->i_sb, blknum);
 		if(bh==NULL) {
 			printk(KERN_ERR "qnx4_readdir: bread failed (%ld)\n", blknum);
 			break;
diff --git a/fs/qnx4/fsync.c b/fs/qnx4/fsync.c
index 26c8d77..4ef5de9 100644
--- a/fs/qnx4/fsync.c
+++ b/fs/qnx4/fsync.c
@@ -24,8 +24,6 @@
 #include <asm/segment.h>
 #include <asm/system.h>
 
-#define blocksize QNX4_BLOCK_SIZE
-
 /*
  * The functions for qnx4 fs file synchronization.
  */
@@ -40,7 +38,7 @@
 	if (!*block)
 		return 0;
 	tmp = *block;
-	bh = get_hash_table(inode->i_dev, *block, blocksize);
+	bh = sb_get_hash_table(inode->i_sb, *block);
 	if (!bh)
 		return 0;
 	if (*block != tmp) {
@@ -74,7 +72,7 @@
 	rc = sync_block(inode, iblock, wait);
 	if (rc)
 		return rc;
-	*bh = bread(inode->i_dev, tmp, blocksize);
+	*bh = sb_bread(inode->i_sb, tmp);
 	if (tmp != *iblock) {
 		brelse(*bh);
 		*bh = NULL;
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 75ad8a8..cfec8ed 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -95,7 +95,7 @@
 	QNX4DEBUG(("qnx4: write inode 2.\n"));
 	block = ino / QNX4_INODES_PER_BLOCK;
 	lock_kernel();
-	if (!(bh = bread(inode->i_dev, block, QNX4_BLOCK_SIZE))) {
+	if (!(bh = sb_bread(inode->i_sb, block))) {
 		printk("qnx4: major problem: unable to read inode from dev "
 		       "%s\n", kdevname(inode->i_dev));
 		unlock_kernel();
@@ -162,7 +162,7 @@
 	if ( nr >= 0 )
 		nr = qnx4_block_map( inode, nr );
 	if (nr) {
-		result = getblk(inode->i_dev, nr, QNX4_BLOCK_SIZE);
+		result = sb_getblk(inode->i_sb, nr);
 		return result;
 	}
 	if (!create) {
@@ -173,7 +173,7 @@
 	if (!tmp) {
 		return NULL;
 	}
-	result = getblk(inode->i_dev, tmp, QNX4_BLOCK_SIZE);
+	result = sb_getblk(inode->i_sb, tmp);
 	if (tst) {
 		qnx4_free_block(inode->i_sb, tmp);
 		brelse(result);
@@ -204,7 +204,7 @@
 	return NULL;
 }
 
-int qnx4_get_block( struct inode *inode, long iblock, struct buffer_head *bh, int create )
+int qnx4_get_block( struct inode *inode, sector_t iblock, struct buffer_head *bh, int create )
 {
 	unsigned long phys;
 
@@ -243,7 +243,7 @@
 		while ( --nxtnt > 0 ) {
 			if ( ix == 0 ) {
 				// read next xtnt block.
-				bh = bread( inode->i_dev, i_xblk - 1, QNX4_BLOCK_SIZE );
+				bh = sb_bread(inode->i_sb, i_xblk - 1);
 				if ( !bh ) {
 					QNX4DEBUG(("qnx4: I/O error reading xtnt block [%ld])\n", i_xblk - 1));
 					return -EIO;
@@ -307,7 +307,7 @@
 		rd = le32_to_cpu(sb->u.qnx4_sb.sb->RootDir.di_first_xtnt.xtnt_blk) - 1;
 		rl = le32_to_cpu(sb->u.qnx4_sb.sb->RootDir.di_first_xtnt.xtnt_size);
 		for (j = 0; j < rl; j++) {
-			bh = bread(sb->s_dev, rd + j, QNX4_BLOCK_SIZE);	/* root dir, first block */
+			bh = sb_bread(sb, rd + j);	/* root dir, first block */
 			if (bh == NULL) {
 				return "unable to read root entry.";
 			}
@@ -350,7 +350,7 @@
 	/* Check the boot signature. Since the qnx4 code is
 	   dangerous, we should leave as quickly as possible
 	   if we don't belong here... */
-	bh = bread(dev, 0, QNX4_BLOCK_SIZE);
+	bh = sb_bread(s, 0);
 	if (!bh) {
 		printk("qnx4: unable to read the boot sector\n");
 		goto outnobh;
@@ -362,7 +362,7 @@
 	}
 	brelse(bh);
 
-	bh = bread(dev, 1, QNX4_BLOCK_SIZE);
+	bh = sb_bread(s, 1);
 	if (!bh) {
 		printk("qnx4: unable to read the superblock\n");
 		goto outnobh;
@@ -457,7 +457,7 @@
 	}
 	block = ino / QNX4_INODES_PER_BLOCK;
 
-	if (!(bh = bread(inode->i_dev, block, QNX4_BLOCK_SIZE))) {
+	if (!(bh = sb_bread(inode->i_sb, block))) {
 		printk("qnx4: major problem: unable to read inode from dev "
 		       "%s\n", kdevname(inode->i_dev));
 		return;
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c
index 4c61dfd..5f800a8 100644
--- a/fs/reiserfs/fix_node.c
+++ b/fs/reiserfs/fix_node.c
@@ -920,7 +920,7 @@
   /* Get left neighbor block number. */
   n_left_neighbor_blocknr = B_N_CHILD_NUM(p_s_tb->FL[n_h], n_left_neighbor_position);
   /* Look for the left neighbor in the cache. */
-  if ( (left = get_hash_table(p_s_sb->s_dev, n_left_neighbor_blocknr, p_s_sb->s_blocksize)) ) {
+  if ( (left = sb_get_hash_table(p_s_sb, n_left_neighbor_blocknr)) ) {
 
     RFALSE( buffer_uptodate (left) && ! B_IS_IN_TREE(left),
 	    "vs-8170: left neighbor (%b %z) is not in the tree", left, left);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 4214850..65ac678 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1963,7 +1963,7 @@
 //
 // this is exactly what 2.3.99-pre9's ext2_bmap is
 //
-static int reiserfs_aop_bmap(struct address_space *as, sector_t block) {
+static int reiserfs_aop_bmap(struct address_space *as, long block) {
   return generic_block_bmap(as, block, reiserfs_bmap) ;
 }
 
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 2c71fd4..3b70c98 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -683,7 +683,7 @@
   count = 0 ;
   for (i = 0 ; atomic_read(&(jl->j_commit_left)) > 1 && i < (jl->j_len + 1) ; i++) {  /* everything but commit_bh */
     bn = reiserfs_get_journal_block(s) + (jl->j_start+i) % JOURNAL_BLOCK_COUNT;
-    tbh = get_hash_table(s->s_dev, bn, s->s_blocksize) ;
+    tbh = sb_get_hash_table(s, bn) ;
 
 /* kill this sanity check */
 if (count > (orig_commit_left + 2)) {
@@ -712,7 +712,7 @@
     for (i = 0 ; atomic_read(&(jl->j_commit_left)) > 1 && 
                  i < (jl->j_len + 1) ; i++) {  /* everything but commit_bh */
       bn = reiserfs_get_journal_block(s) + (jl->j_start + i) % JOURNAL_BLOCK_COUNT  ;
-      tbh = get_hash_table(s->s_dev, bn, s->s_blocksize) ;
+      tbh = sb_get_hash_table(s, bn) ;
 
       wait_on_buffer(tbh) ;
       if (!buffer_uptodate(tbh)) {
@@ -1403,8 +1403,7 @@
     offset = d_bh->b_blocknr - reiserfs_get_journal_block(p_s_sb) ;
 
     /* ok, we have a journal description block, lets see if the transaction was valid */
-    c_bh = bread(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + ((offset + le32_to_cpu(desc->j_len) + 1) % JOURNAL_BLOCK_COUNT), 
-    		p_s_sb->s_blocksize) ;
+    c_bh = sb_bread(p_s_sb, reiserfs_get_journal_block(p_s_sb) + ((offset + le32_to_cpu(desc->j_len) + 1) % JOURNAL_BLOCK_COUNT)) ;
     if (!c_bh)
       return 0 ;
     commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
@@ -1458,7 +1457,7 @@
   unsigned long trans_offset ;
   int i;
 
-  d_bh = bread(p_s_sb->s_dev, cur_dblock, p_s_sb->s_blocksize) ;
+  d_bh = sb_bread(p_s_sb, cur_dblock) ;
   if (!d_bh)
     return 1 ;
   desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
@@ -1482,8 +1481,7 @@
     brelse(d_bh) ;
     return 1 ;
   }
-  c_bh = bread(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + ((trans_offset + le32_to_cpu(desc->j_len) + 1) % JOURNAL_BLOCK_COUNT), 
-    		p_s_sb->s_blocksize) ;
+  c_bh = sb_bread(p_s_sb, reiserfs_get_journal_block(p_s_sb) + ((trans_offset + le32_to_cpu(desc->j_len) + 1) % JOURNAL_BLOCK_COUNT)) ;
   if (!c_bh) {
     brelse(d_bh) ;
     return 1 ;
@@ -1512,11 +1510,11 @@
   }
   /* get all the buffer heads */
   for(i = 0 ; i < le32_to_cpu(desc->j_len) ; i++) {
-    log_blocks[i] = getblk(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + (trans_offset + 1 + i) % JOURNAL_BLOCK_COUNT, p_s_sb->s_blocksize);
+    log_blocks[i] = sb_getblk(p_s_sb, reiserfs_get_journal_block(p_s_sb) + (trans_offset + 1 + i) % JOURNAL_BLOCK_COUNT);
     if (i < JOURNAL_TRANS_HALF) {
-      real_blocks[i] = getblk(p_s_sb->s_dev, le32_to_cpu(desc->j_realblock[i]), p_s_sb->s_blocksize) ;
+      real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(desc->j_realblock[i])) ;
     } else {
-      real_blocks[i] = getblk(p_s_sb->s_dev, le32_to_cpu(commit->j_realblock[i - JOURNAL_TRANS_HALF]), p_s_sb->s_blocksize) ;
+      real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(commit->j_realblock[i - JOURNAL_TRANS_HALF])) ;
     }
     if (real_blocks[i]->b_blocknr >= reiserfs_get_journal_block(p_s_sb) &&
         real_blocks[i]->b_blocknr < (reiserfs_get_journal_block(p_s_sb)+JOURNAL_BLOCK_COUNT)) {
@@ -1617,10 +1615,9 @@
   ** is the first unflushed, and if that transaction is not valid, 
   ** replay is done
   */
-  SB_JOURNAL(p_s_sb)->j_header_bh = bread(p_s_sb->s_dev, 
+  SB_JOURNAL(p_s_sb)->j_header_bh = sb_bread(p_s_sb, 
                                           reiserfs_get_journal_block(p_s_sb) + 
-					  JOURNAL_BLOCK_COUNT, 
-					  p_s_sb->s_blocksize) ;
+					  JOURNAL_BLOCK_COUNT) ;
   if (!SB_JOURNAL(p_s_sb)->j_header_bh) {
     return 1 ;
   }
@@ -1641,7 +1638,7 @@
     ** there is nothing more we can do, and it makes no sense to read 
     ** through the whole log.
     */
-    d_bh = bread(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + le32_to_cpu(jh->j_first_unflushed_offset), p_s_sb->s_blocksize) ;
+    d_bh = sb_bread(p_s_sb, reiserfs_get_journal_block(p_s_sb) + le32_to_cpu(jh->j_first_unflushed_offset)) ;
     ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL) ;
     if (!ret) {
       continue_replay = 0 ;
@@ -1661,7 +1658,7 @@
   ** all the valid transactions, and pick out the oldest.
   */
   while(continue_replay && cur_dblock < (reiserfs_get_journal_block(p_s_sb) + JOURNAL_BLOCK_COUNT)) {
-    d_bh = bread(p_s_sb->s_dev, cur_dblock, p_s_sb->s_blocksize) ;
+    d_bh = sb_bread(p_s_sb, cur_dblock) ;
     ret = journal_transaction_is_valid(p_s_sb, d_bh, &oldest_invalid_trans_id, &newest_mount_id) ;
     if (ret == 1) {
       desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
@@ -2553,7 +2550,7 @@
   int cleaned = 0 ;
   
   if (reiserfs_dont_log(th->t_super)) {
-    bh = get_hash_table(p_s_sb->s_dev, blocknr, p_s_sb->s_blocksize) ;
+    bh = sb_get_hash_table(p_s_sb, blocknr) ;
     if (bh && buffer_dirty (bh)) {
       printk ("journal_mark_freed(dont_log): dirty buffer on hash list: %lx %ld\n", bh->b_state, blocknr);
       BUG ();
@@ -2561,7 +2558,7 @@
     brelse (bh);
     return 0 ;
   }
-  bh = get_hash_table(p_s_sb->s_dev, blocknr, p_s_sb->s_blocksize) ;
+  bh = sb_get_hash_table(p_s_sb, blocknr) ;
   /* if it is journal new, we just remove it from this transaction */
   if (bh && buffer_journal_new(bh)) {
     mark_buffer_notjournal_new(bh) ;
@@ -2768,7 +2765,7 @@
   
   rs = SB_DISK_SUPER_BLOCK(p_s_sb) ;
   /* setup description block */
-  d_bh = getblk(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start, p_s_sb->s_blocksize) ; 
+  d_bh = sb_getblk(p_s_sb, reiserfs_get_journal_block(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start) ; 
   mark_buffer_uptodate(d_bh, 1) ;
   desc = (struct reiserfs_journal_desc *)(d_bh)->b_data ;
   memset(desc, 0, sizeof(struct reiserfs_journal_desc)) ;
@@ -2776,9 +2773,8 @@
   desc->j_trans_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_trans_id) ;
 
   /* setup commit block.  Don't write (keep it clean too) this one until after everyone else is written */
-  c_bh = getblk(p_s_sb->s_dev,  reiserfs_get_journal_block(p_s_sb) + 
-  				        ((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL(p_s_sb)->j_len + 1) % JOURNAL_BLOCK_COUNT), 
-					 p_s_sb->s_blocksize) ;
+  c_bh = sb_getblk(p_s_sb,  reiserfs_get_journal_block(p_s_sb) + 
+  				        ((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL(p_s_sb)->j_len + 1) % JOURNAL_BLOCK_COUNT)) ;
   commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
   memset(commit, 0, sizeof(struct reiserfs_journal_commit)) ;
   commit->j_trans_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_trans_id) ;
@@ -2866,9 +2862,8 @@
     /* copy all the real blocks into log area.  dirty log blocks */
     if (test_bit(BH_JDirty, &cn->bh->b_state)) {
       struct buffer_head *tmp_bh ;
-      tmp_bh = getblk(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + 
-		     ((cur_write_start + jindex) % JOURNAL_BLOCK_COUNT), 
-				       p_s_sb->s_blocksize) ;
+      tmp_bh = sb_getblk(p_s_sb, reiserfs_get_journal_block(p_s_sb) + 
+		     ((cur_write_start + jindex) % JOURNAL_BLOCK_COUNT)) ;
       mark_buffer_uptodate(tmp_bh, 1) ;
       memcpy(tmp_bh->b_data, cn->bh->b_data, cn->bh->b_size) ;  
       jindex++ ;
diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c
index 9fed213..ab7a310 100644
--- a/fs/reiserfs/resize.c
+++ b/fs/reiserfs/resize.c
@@ -39,7 +39,7 @@
 	}
 
 	/* check the device size */
-	bh = bread(s->s_dev, block_count_new - 1, s->s_blocksize);
+	bh = sb_bread(s, block_count_new - 1);
 	if (!bh) {
 		printk("reiserfs_resize: can\'t read last block\n");
 		return -EINVAL;
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 993e6fe..327bd73 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1116,7 +1116,7 @@
 		    continue;
 		}
 		/* Search for the buffer in cache. */
-		p_s_un_bh = get_hash_table(p_s_sb->s_dev, get_block_num(p_n_unfm_pointer,0), n_blk_size);
+		p_s_un_bh = sb_get_hash_table(p_s_sb, get_block_num(p_n_unfm_pointer,0));
 
 		if (p_s_un_bh) {
 		    mark_buffer_clean(p_s_un_bh) ;
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
index 477d2eb..9c791cb 100644
--- a/fs/romfs/inode.c
+++ b/fs/romfs/inode.c
@@ -108,7 +108,7 @@
 	s->u.generic_sbp = (void *) 0;
 	s->s_maxbytes = 0xFFFFFFFF;
 
-	bh = bread(dev, 0, ROMBSIZE);
+	bh = sb_bread(s, 0);
 	if (!bh) {
 		/* XXX merge with other printk? */
                 printk ("romfs: unable to read superblock\n");
@@ -188,7 +188,7 @@
 	if (count > maxsize || offset+count > maxsize)
 		count = maxsize-offset;
 
-	bh = bread(i->i_dev, offset>>ROMBSBITS, ROMBSIZE);
+	bh = sb_bread(i->i_sb, offset>>ROMBSBITS);
 	if (!bh)
 		return -1;		/* error */
 
@@ -203,7 +203,7 @@
 	while (res < count) {
 		offset += maxsize;
 
-		bh = bread(i->i_dev, offset>>ROMBSBITS, ROMBSIZE);
+		bh = sb_bread(i->i_sb, offset>>ROMBSBITS);
 		if (!bh)
 			return -1;
 		maxsize = min_t(unsigned long, count - res, ROMBSIZE);
@@ -226,7 +226,7 @@
 	if (offset >= maxsize || count > maxsize || offset+count>maxsize)
 		return -1;
 
-	bh = bread(i->i_dev, offset>>ROMBSBITS, ROMBSIZE);
+	bh = sb_bread(i->i_sb, offset>>ROMBSBITS);
 	if (!bh)
 		return -1;		/* error */
 
@@ -241,7 +241,7 @@
 		offset += maxsize;
 		dest += maxsize;
 
-		bh = bread(i->i_dev, offset>>ROMBSBITS, ROMBSIZE);
+		bh = sb_bread(i->i_sb, offset>>ROMBSBITS);
 		if (!bh)
 			return -1;
 		maxsize = min_t(unsigned long, count - res, ROMBSIZE);
diff --git a/fs/sysv/balloc.c b/fs/sysv/balloc.c
index 1d76bb9..2f3df11 100644
--- a/fs/sysv/balloc.c
+++ b/fs/sysv/balloc.c
@@ -73,7 +73,7 @@
 	 */
 	if (count == sb->sv_flc_size || count == 0) {
 		block += sb->sv_block_base;
-		bh = getblk(sb->s_dev, block, sb->s_blocksize);
+		bh = sb_getblk(sb, block);
 		if (!bh) {
 			printk("sysv_free_block: getblk() failed\n");
 			unlock_super(sb);
@@ -125,7 +125,7 @@
 		unsigned count;
 
 		block += sb->sv_block_base;
-		if (!(bh = bread(sb->s_dev, block, sb->s_blocksize))) {
+		if (!(bh = sb_bread(sb, block))) {
 			printk("sysv_new_block: cannot read free-list block\n");
 			/* retry this same block next time */
 			*sb->sv_bcache_count = cpu_to_fs16(sb, 1);
@@ -196,7 +196,7 @@
 		if (block < sb->sv_firstdatazone || block >= sb->sv_nzones)
 			goto Einval;
 		block += sb->sv_block_base;
-		bh = bread(sb->s_dev, block, sb->s_blocksize);
+		bh = sb_bread(sb, block);
 		if (!bh)
 			goto Eio;
 		n = fs16_to_cpu(sb, *(u16*)bh->b_data);
diff --git a/fs/sysv/ialloc.c b/fs/sysv/ialloc.c
index a91224c..474e67e 100644
--- a/fs/sysv/ialloc.c
+++ b/fs/sysv/ialloc.c
@@ -55,7 +55,7 @@
 	struct sysv_inode *res;
 	int block = sb->sv_firstinodezone + sb->sv_block_base;
 	block += (ino-1) >> sb->sv_inodes_per_block_bits;
-	*bh = bread(sb->s_dev, block, sb->s_blocksize);
+	*bh = sb_bread(sb, block);
 	if (!*bh)
 		return NULL;
 	res = (struct sysv_inode *) (*bh)->b_data;
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index af27d22..5d4ef29 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -86,8 +86,7 @@
 			    Indirect chain[],
 			    int *err)
 {
-	kdev_t dev = inode->i_dev;
-	int size = inode->i_sb->s_blocksize;
+	struct super_block *sb = inode->i_sb;
 	Indirect *p = chain;
 	struct buffer_head *bh;
 
@@ -96,8 +95,8 @@
 	if (!p->key)
 		goto no_block;
 	while (--depth) {
-		int block = block_to_cpu(inode->i_sb, p->key);
-		bh = bread(dev, block, size);
+		int block = block_to_cpu(sb, p->key);
+		bh = sb_bread(sb, block);
 		if (!bh)
 			goto failure;
 		if (!verify_chain(chain, p))
@@ -139,7 +138,7 @@
 		 * the pointer to new one, then send parent to disk.
 		 */
 		parent = block_to_cpu(inode->i_sb, branch[n-1].key);
-		bh = getblk(inode->i_dev, parent, blocksize);
+		bh = sb_getblk(inode->i_sb, parent);
 		lock_buffer(bh);
 		memset(bh->b_data, 0, blocksize);
 		branch[n].bh = bh;
@@ -192,7 +191,7 @@
 	return -EAGAIN;
 }
 
-static int get_block(struct inode *inode, long iblock, struct buffer_head *bh_result, int create)
+static int get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
 {
 	int err = -EIO;
 	int offsets[DEPTH];
@@ -336,7 +335,7 @@
 				continue;
 			*p = 0;
 			block = block_to_cpu(sb, nr);
-			bh = bread(inode->i_dev, block, sb->s_blocksize);
+			bh = sb_bread(sb, block);
 			if (!bh)
 				continue;
 			free_branches(inode, (u32*)bh->b_data,
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index 983f0be..2a38452 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -362,11 +362,12 @@
 	if (64 != sizeof (struct sysv_inode))
 		panic("sysv fs: bad i-node size");
 	set_blocksize(dev,BLOCK_SIZE);
+	sb->s_blocksize = BLOCK_SIZE;
 	sb->sv_block_base = 0;
 
 	for (i = 0; i < sizeof(flavours)/sizeof(flavours[0]) && !size; i++) {
 		brelse(bh);
-		bh = bread(dev, flavours[i].block, BLOCK_SIZE);
+		bh = sb_bread(sb, flavours[i].block);
 		if (!bh)
 			continue;
 		size = flavours[i].test(sb, bh);
@@ -380,8 +381,9 @@
 			blocknr = bh->b_blocknr << 1;
 			brelse(bh);
 			set_blocksize(dev, 512);
-			bh1 = bread(dev, blocknr, 512);
-			bh = bread(dev, blocknr + 1, 512);
+			sb->s_blocksize = 512;
+			bh1 = sb_bread(sb, blocknr);
+			bh = sb_bread(sb, blocknr + 1);
 			break;
 		case 2:
 			bh1 = bh;
@@ -390,7 +392,8 @@
 			blocknr = bh->b_blocknr >> 1;
 			brelse(bh);
 			set_blocksize(dev, 2048);
-			bh1 = bh = bread(dev, blocknr, 2048);
+			sb->s_blocksize = 2048;
+			bh1 = bh = sb_bread(sb, blocknr);
 			break;
 		default:
 			goto Ebadsize;
@@ -441,8 +444,9 @@
 	sb->sv_bytesex = BYTESEX_PDP;
 
 	set_blocksize(dev, 512);
+	sb->s_blocksize = 512;
 
-	if ((bh = bread(dev, 1, 512)) == NULL) {
+	if ((bh = sb_bread(sb, 1)) == NULL) {
 		if (!silent)
 			printk("VFS: unable to read V7 FS superblock on "
 			       "device %s.\n", bdevname(dev));
@@ -458,7 +462,7 @@
 
 	/* plausibility check on root inode: it is a directory,
 	   with a nonzero size that is a multiple of 16 */
-	if ((bh2 = bread(dev, 2, 512)) == NULL)
+	if ((bh2 = sb_bread(sb, 2)) == NULL)
 		goto failed;
 	v7i = (struct sysv_inode *)(bh2->b_data + 64);
 	if ((fs16_to_cpu(sb,v7i->i_mode) & ~0777) != S_IFDIR ||
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 982a7f8..8dc09bf 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -98,7 +98,7 @@
 	loc.logicalBlockNum = bitmap->s_extPosition;
 	loc.partitionReferenceNum = UDF_SB_PARTITION(sb);
 
-	bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block), sb->s_blocksize);
+	bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
 	if (!bh)
 	{
 		retval = -EIO;
@@ -463,7 +463,7 @@
 	elen = 0;
 	obloc = nbloc = UDF_I_LOCATION(table);
 
-	obh = nbh = udf_tread(sb, udf_get_lb_pblock(sb, nbloc, 0), sb->s_blocksize);
+	obh = nbh = udf_tread(sb, udf_get_lb_pblock(sb, nbloc, 0));
 	atomic_inc(&nbh->b_count);
 
 	while (count && (etype =
@@ -571,8 +571,7 @@
 			elen -= sb->s_blocksize;
 
 			if (!(nbh = udf_tread(sb,
-				udf_get_lb_pblock(sb, nbloc, 0),
-				sb->s_blocksize)))
+				udf_get_lb_pblock(sb, nbloc, 0))))
 			{
 				udf_release_data(obh);
 				goto error_return;
@@ -689,7 +688,7 @@
 	extoffset = sizeof(struct UnallocatedSpaceEntry);
 	bloc = UDF_I_LOCATION(table);
 
-	bh = udf_tread(sb, udf_get_lb_pblock(sb, bloc, 0), sb->s_blocksize);
+	bh = udf_tread(sb, udf_get_lb_pblock(sb, bloc, 0));
 	eloc.logicalBlockNum = 0xFFFFFFFF;
 
 	while (first_block != eloc.logicalBlockNum && (etype =
@@ -766,7 +765,7 @@
 	extoffset = sizeof(struct UnallocatedSpaceEntry);
 	bloc = UDF_I_LOCATION(table);
 
-	goal_bh = bh = udf_tread(sb, udf_get_lb_pblock(sb, bloc, 0), sb->s_blocksize);
+	goal_bh = bh = udf_tread(sb, udf_get_lb_pblock(sb, bloc, 0));
 	atomic_inc(&goal_bh->b_count);
 
 	while (spread && (etype =
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 7a13d86..f1dd42b 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -146,7 +146,7 @@
 		return -ENOENT;
 	}
 
-	if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+	if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
 	{
 		udf_release_data(bh);
 		return -EIO;
@@ -160,7 +160,7 @@
 		for (num=0; i>0; i--)
 		{
 			block = udf_get_lb_pblock(dir->i_sb, eloc, offset+i);
-			tmp = udf_tgetblk(dir->i_sb, block, dir->i_sb->s_blocksize);
+			tmp = udf_tgetblk(dir->i_sb, block);
 			if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
 				bha[num++] = tmp;
 			else
diff --git a/fs/udf/directory.c b/fs/udf/directory.c
index 33f5cf0..97ebc7e 100644
--- a/fs/udf/directory.c
+++ b/fs/udf/directory.c
@@ -60,7 +60,7 @@
 		block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos);
 		if (!block)
 			return NULL;
-		if (!(*bh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+		if (!(*bh = udf_tread(dir->i_sb, block)))
 			return NULL;
 	}
 	else if (*offset > dir->i_sb->s_blocksize)
@@ -74,7 +74,7 @@
 		block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos);
 		if (!block)
 			return NULL;
-		if (!((*bh) = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+		if (!((*bh) = udf_tread(dir->i_sb, block)))
 			return NULL;
 
 		memcpy((Uint8 *)ad + remainder, (*bh)->b_data, ad_size - remainder);
@@ -117,7 +117,7 @@
 			*extoffset = lextoffset;
 
 		udf_release_data(fibh->sbh);
-		if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+		if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
 			return NULL;
 		fibh->soffset = fibh->eoffset = 0;
 
@@ -129,7 +129,7 @@
 			for (num=0; i>0; i--)
 			{
 				block = udf_get_lb_pblock(dir->i_sb, *eloc, *offset+i);
-				tmp = udf_tgetblk(dir->i_sb, block, dir->i_sb->s_blocksize);
+				tmp = udf_tgetblk(dir->i_sb, block);
 				if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
 					bha[num++] = tmp;
 				else
@@ -183,7 +183,7 @@
 		fibh->soffset -= dir->i_sb->s_blocksize;
 		fibh->eoffset -= dir->i_sb->s_blocksize;
 
-		if (!(fibh->ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+		if (!(fibh->ebh = udf_tread(dir->i_sb, block)))
 			return NULL;
 
 		if (sizeof(struct FileIdentDesc) > - fibh->soffset)
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 33ff393..72ce825 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -57,7 +57,7 @@
 	kaddr = kmap(page);
 	memset(kaddr, 0, PAGE_CACHE_SIZE);
 	block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
-	bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize);
+	bh = sb_bread(inode->i_sb, block);
 	memcpy(kaddr, bh->b_data + udf_ext0_offset(inode), inode->i_size);
 	brelse(bh);
 	flush_dcache_page(page);
@@ -80,7 +80,7 @@
 
 	kaddr = kmap(page);
 	block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
-	bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize);
+	bh = sb_bread(inode->i_sb, block);
 	memcpy(bh->b_data + udf_ext0_offset(inode), kaddr, inode->i_size);
 	mark_buffer_dirty(bh);
 	brelse(bh);
@@ -105,7 +105,7 @@
 	char *kaddr = page_address(page);
 
 	block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
-	bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize);
+	bh = sb_bread(inode->i_sb, block);
 	memcpy(bh->b_data + udf_file_entry_alloc_offset(inode) + offset,
 		kaddr + offset, to-offset);
 	mark_buffer_dirty(bh);
@@ -246,8 +246,7 @@
 
 	/* ok, we need to read the inode */
 	bh = udf_tread(inode->i_sb,
-		udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0),
-		inode->i_sb->s_blocksize);
+		udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
 
 	if (!bh)
 	{
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 48431ce..7b95bf9 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -184,7 +184,7 @@
 	}
 
 	block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
-	bh = udf_tread(inode->i_sb, block, inode->i_sb->s_blocksize);
+	bh = udf_tread(inode->i_sb, block);
 	if (!bh)
 		return;
 	page = grab_cache_page(inode->i_mapping, 0);
@@ -251,10 +251,10 @@
 		UDF_I_LOCATION(inode).partitionReferenceNum, 0);
 	if (!newblock)
 		return NULL;
-	sbh = udf_tread(inode->i_sb, inode->i_ino, inode->i_sb->s_blocksize);
+	sbh = udf_tread(inode->i_sb, inode->i_ino);
 	if (!sbh)
 		return NULL;
-	dbh = udf_tgetblk(inode->i_sb, newblock, inode->i_sb->s_blocksize);
+	dbh = udf_tgetblk(inode->i_sb, newblock);
 	if (!dbh)
 		return NULL;
 	lock_buffer(dbh);
@@ -382,7 +382,7 @@
 	if (!*err && buffer_mapped(&dummy))
 	{
 		struct buffer_head *bh;
-		bh = getblk(dummy.b_dev, dummy.b_blocknr, inode->i_sb->s_blocksize);
+		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
 		if (buffer_new(&dummy))
 		{
 			lock_buffer(bh);
@@ -886,8 +886,7 @@
 				udf_file_entry_alloc_offset(inode);
 
 			if ((bh = udf_tread(inode->i_sb,
-				udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0),
-				inode->i_sb->s_blocksize)))
+				udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0))))
 			{
 				memset(bh->b_data + offset, 0x00, inode->i_sb->s_blocksize - offset);
 				mark_buffer_dirty(bh);
@@ -1322,8 +1321,7 @@
 	int err = 0;
 
 	bh = udf_tread(inode->i_sb,
-		udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0),
-		inode->i_sb->s_blocksize);
+		udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
 
 	if (!bh)
 	{
@@ -1624,8 +1622,7 @@
 	if (!(*bh))
 	{
 		if (!(*bh = udf_tread(inode->i_sb,
-			udf_get_lb_pblock(inode->i_sb, *bloc, 0),
-			inode->i_sb->s_blocksize)))
+			udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
 		{
 			udf_debug("reading block %d failed!\n",
 				udf_get_lb_pblock(inode->i_sb, *bloc, 0));
@@ -1653,7 +1650,7 @@
 			return -1;
 		}
 		if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
-			*bloc, 0), inode->i_sb->s_blocksize)))
+			*bloc, 0))))
 		{
 			return -1;
 		}
@@ -1759,8 +1756,7 @@
 	if (!(bh))
 	{
 		if (!(bh = udf_tread(inode->i_sb,
-			udf_get_lb_pblock(inode->i_sb, bloc, 0),
-			inode->i_sb->s_blocksize)))
+			udf_get_lb_pblock(inode->i_sb, bloc, 0))))
 		{
 			udf_debug("reading block %d failed!\n",
 				udf_get_lb_pblock(inode->i_sb, bloc, 0));
@@ -1828,8 +1824,7 @@
 	if (!(*bh))
 	{
 		if (!(*bh = udf_tread(inode->i_sb,
-			udf_get_lb_pblock(inode->i_sb, *bloc, 0),
-			inode->i_sb->s_blocksize)))
+			udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
 		{
 			udf_debug("reading block %d failed!\n",
 				udf_get_lb_pblock(inode->i_sb, *bloc, 0));
@@ -1951,8 +1946,7 @@
 	if (!(*bh))
 	{
 		if (!(*bh = udf_tread(inode->i_sb,
-			udf_get_lb_pblock(inode->i_sb, *bloc, 0),
-			inode->i_sb->s_blocksize)))
+			udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
 		{
 			udf_debug("reading block %d failed!\n",
 				udf_get_lb_pblock(inode->i_sb, *bloc, 0));
@@ -2033,8 +2027,7 @@
 	if (!bh)
 	{
 		if (!(bh = udf_tread(inode->i_sb,
-			udf_get_lb_pblock(inode->i_sb, bloc, 0),
-			inode->i_sb->s_blocksize)))
+			udf_get_lb_pblock(inode->i_sb, bloc, 0))))
 		{
 			udf_debug("reading block %d failed!\n",
 				udf_get_lb_pblock(inode->i_sb, bloc, 0));
@@ -2068,8 +2061,7 @@
 	if (!(nbh))
 	{
 		if (!(nbh = udf_tread(inode->i_sb,
-			udf_get_lb_pblock(inode->i_sb, nbloc, 0),
-			inode->i_sb->s_blocksize)))
+			udf_get_lb_pblock(inode->i_sb, nbloc, 0))))
 		{
 			udf_debug("reading block %d failed!\n",
 				udf_get_lb_pblock(inode->i_sb, nbloc, 0));
diff --git a/fs/udf/misc.c b/fs/udf/misc.c
index 4b9cb0e..3cbebf7 100644
--- a/fs/udf/misc.c
+++ b/fs/udf/misc.c
@@ -67,21 +67,21 @@
 #if defined(__linux__) && defined(__KERNEL__)
 
 extern struct buffer_head *
-udf_tgetblk(struct super_block *sb, int block, int size)
+udf_tgetblk(struct super_block *sb, int block)
 {
 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV))
-		return getblk(sb->s_dev, udf_fixed_to_variable(block), size);
+		return sb_getblk(sb, udf_fixed_to_variable(block));
 	else
-		return getblk(sb->s_dev, block, size);
+		return sb_getblk(sb, block);
 }
 
 extern struct buffer_head *
-udf_tread(struct super_block *sb, int block, int size)
+udf_tread(struct super_block *sb, int block)
 {
 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV))
-		return bread(sb->s_dev, udf_fixed_to_variable(block), size);
+		return sb_bread(sb, udf_fixed_to_variable(block));
 	else
-		return bread(sb->s_dev, block, size);
+		return sb_bread(sb, block);
 }
 
 extern struct GenericAttrFormat *
@@ -92,7 +92,7 @@
 	long_ad eaicb;
 	int offset;
 
-	*bh = udf_tread(inode->i_sb, inode->i_ino, inode->i_sb->s_blocksize);
+	*bh = udf_tread(inode->i_sb, inode->i_ino);
 
 	if (UDF_I_EXTENDED_FE(inode) == 0)
 	{
@@ -208,7 +208,7 @@
 	long_ad eaicb;
 	Uint32 offset;
 
-	*bh = udf_tread(inode->i_sb, inode->i_ino, inode->i_sb->s_blocksize);
+	*bh = udf_tread(inode->i_sb, inode->i_ino);
 
 	if (UDF_I_EXTENDED_FE(inode) == 0)
 	{
@@ -273,7 +273,7 @@
 	struct buffer_head *bh = NULL;
 
 	/* Read the block */
-	bh = udf_tread(sb, block+offset, sb->s_blocksize);
+	bh = udf_tread(sb, block+offset);
 	if (!bh)
 	{
 		printk(KERN_ERR "udf: udf_read_untagged(%p,%d,%d) failed\n",
@@ -305,7 +305,7 @@
 	if (block == 0xFFFFFFFF)
 		return NULL;
 
-	bh = udf_tread(sb, block, sb->s_blocksize);
+	bh = udf_tread(sb, block);
 	if (!bh)
 	{
 		udf_debug("block=%d, location=%d: read failed\n", block, location);
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 7b6f0a6..b36093c 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -183,7 +183,7 @@
 		return NULL;
 	}
 
-	if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+	if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
 	{
 		udf_release_data(bh);
 		return NULL;
@@ -404,7 +404,7 @@
 		else
 			offset = 0;
 
-		if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+		if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
 		{
 			udf_release_data(bh);
 			*err = -EIO;
@@ -488,7 +488,7 @@
 		block = udf_get_lb_pblock(dir->i_sb, UDF_I_LOCATION(dir), 0);
 		if (UDF_I_ALLOCTYPE(dir) == ICB_FLAG_AD_IN_ICB)
 		{
-			fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize);
+			fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
 			fibh->soffset = fibh->eoffset = udf_file_entry_alloc_offset(dir);
 		}
 		else
@@ -803,7 +803,7 @@
 		return 0;
 	}
 
-	if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+	if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
 		return 0;
 
 	while ( (f_pos < size) )
@@ -964,7 +964,7 @@
 
 		block = udf_get_pblock(inode->i_sb, block,
 			UDF_I_LOCATION(inode).partitionReferenceNum, 0);
-		bh = udf_tread(inode->i_sb, block, inode->i_sb->s_blocksize);
+		bh = udf_tread(inode->i_sb, block);
 		lock_buffer(bh);
 		memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
 		mark_buffer_uptodate(bh, 1);
@@ -974,7 +974,7 @@
 	else
 	{
 		block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
-		bh = udf_tread(inode->i_sb, block, inode->i_sb->s_blocksize);
+		bh = udf_tread(inode->i_sb, block);
 	}
 	ea = bh->b_data + udf_ext0_offset(inode);
 
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
index 933f2db..3938284 100644
--- a/fs/udf/partition.c
+++ b/fs/udf/partition.c
@@ -76,7 +76,7 @@
 
 	loc = udf_block_map(UDF_SB_VAT(sb), newblock);
 
-	if (!(bh = bread(sb->s_dev, loc, sb->s_blocksize)))
+	if (!(bh = sb_bread(sb, loc)))
 	{
 		udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
 			sb, block, partition, loc, index);
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 034064d..7afbe3a 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -412,7 +412,7 @@
 	for (;!nsr02 && !nsr03; sector += sectorsize)
 	{
 		/* Read a block */
-		bh = udf_tread(sb, sector >> sb->s_blocksize_bits, sb->s_blocksize);
+		bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
 		if (!bh)
 			break;
 
@@ -525,7 +525,7 @@
 
 		for (i=0; (!lastblock && i<sizeof(last)/sizeof(int)); i++)
 		{
-			if (last[i] < 0 || !(bh = bread(sb->s_dev, last[i], sb->s_blocksize)))
+			if (last[i] < 0 || !(bh = sb_bread(sb, last[i])))
 			{
 				ident = location = 0;
 			}
@@ -560,7 +560,7 @@
 			}
 			else
 			{
-				if (last[i] < 256 || !(bh = bread(sb->s_dev, last[i] - 256, sb->s_blocksize)))
+				if (last[i] < 256 || !(bh = sb_bread(sb, last[i] - 256)))
 				{
 					ident = location = 0;
 				}
@@ -579,8 +579,7 @@
 				}
 				else
 				{
-					if (last[i] < 312 + UDF_SB_SESSION(sb) || !(bh = bread(sb->s_dev, last[i] - 312 - UDF_SB_SESSION(sb),
-						sb->s_blocksize)))
+					if (last[i] < 312 + UDF_SB_SESSION(sb) || !(bh = sb_bread(sb, last[i] - 312 - UDF_SB_SESSION(sb))))
 					{
 						ident = location = 0;
 					}
@@ -606,7 +605,7 @@
 	if (!lastblock)
 	{
 		/* We havn't found the lastblock. check 312 */
-		if ((bh = bread(sb->s_dev, 312 + UDF_SB_SESSION(sb), sb->s_blocksize)))
+		if ((bh = sb_bread(sb, 312 + UDF_SB_SESSION(sb))))
 		{
 			ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent);
 			location = le32_to_cpu(((tag *)bh->b_data)->tagLocation);
@@ -1258,7 +1257,7 @@
 					Uint32 pos;
 
 					pos = udf_block_map(UDF_SB_VAT(sb), 0);
-					bh = bread(sb->s_dev, pos, sb->s_blocksize);
+					bh = sb_bread(sb, pos);
 					UDF_SB_TYPEVIRT(sb,i).s_start_offset =
 						le16_to_cpu(((struct VirtualAllocationTable20 *)bh->b_data + udf_ext0_offset(UDF_SB_VAT(sb)))->lengthHeader) +
 							udf_ext0_offset(UDF_SB_VAT(sb));
@@ -1728,7 +1727,7 @@
 		{
 			udf_release_data(bh);
 			newblock = udf_get_lb_pblock(sb, loc, ++block);
-			bh = udf_tread(sb, newblock, sb->s_blocksize);
+			bh = udf_tread(sb, newblock);
 			if (!bh)
 			{
 				udf_debug("read failed\n");
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index 543e9b4..3254e53 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -88,7 +88,7 @@
 	lock_kernel();
 	if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_IN_ICB)
 	{
-		bh = udf_tread(inode->i_sb, inode->i_ino, inode->i_sb->s_blocksize);
+		bh = udf_tread(inode->i_sb, inode->i_ino);
 
 		if (!bh)
 			goto out;
@@ -97,8 +97,7 @@
 	}
 	else
 	{
-		bh = bread(inode->i_dev, udf_block_map(inode, 0),
-				inode->i_sb->s_blocksize);
+		bh = sb_bread(inode->i_sb, udf_block_map(inode, 0));
 
 		if (!bh)
 			goto out;
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 97dabb0..56d80d3 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -139,8 +139,8 @@
 
 /* misc.c */
 extern int udf_read_tagged_data(char *, int size, int fd, int block, int partref);
-extern struct buffer_head *udf_tgetblk(struct super_block *, int, int);
-extern struct buffer_head *udf_tread(struct super_block *, int, int);
+extern struct buffer_head *udf_tgetblk(struct super_block *, int);
+extern struct buffer_head *udf_tread(struct super_block *, int);
 extern struct GenericAttrFormat *udf_add_extendedattr(struct inode *, Uint32, Uint32, Uint8, struct buffer_head **);
 extern struct GenericAttrFormat *udf_get_extendedattr(struct inode *, Uint32, Uint8, struct buffer_head **);
 extern struct buffer_head *udf_read_tagged(struct super_block *, Uint32, Uint32, Uint16 *);
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 38083ea..31c1bdd 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -223,7 +223,7 @@
 
 #define NULLIFY_FRAGMENTS \
 	for (i = oldcount; i < newcount; i++) { \
-		bh = getblk (sb->s_dev, result + i, sb->s_blocksize); \
+		bh = sb_getblk(sb, result + i); \
 		memset (bh->b_data, 0, sb->s_blocksize); \
 		mark_buffer_uptodate(bh, 1); \
 		mark_buffer_dirty (bh); \
@@ -357,7 +357,7 @@
 	result = ufs_alloc_fragments (inode, cgno, goal, request, err);
 	if (result) {
 		for (i = 0; i < oldcount; i++) {
-			bh = bread (sb->s_dev, tmp + i, sb->s_blocksize);
+			bh = sb_bread(sb, tmp + i);
 			if(bh)
 			{
 				mark_buffer_clean (bh);
diff --git a/fs/ufs/cylinder.c b/fs/ufs/cylinder.c
index 16aa991..97391b4 100644
--- a/fs/ufs/cylinder.c
+++ b/fs/ufs/cylinder.c
@@ -54,7 +54,7 @@
 	 */
 	UCPI_UBH->bh[0] = sb->u.ufs_sb.s_ucg[cgno];
 	for (i = 1; i < UCPI_UBH->count; i++)
-		if (!(UCPI_UBH->bh[i] = bread (sb->s_dev, UCPI_UBH->fragment + i, sb->s_blocksize)))
+		if (!(UCPI_UBH->bh[i] = sb_bread(sb, UCPI_UBH->fragment + i)))
 			goto failed;
 	sb->u.ufs_sb.s_cgno[bitmap_nr] = cgno;
 			
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index f333e5a..3dca14b 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -74,7 +74,7 @@
 	while (!error && !stored && filp->f_pos < inode->i_size) {
 		lblk = (filp->f_pos) >> sb->s_blocksize_bits;
 		blk = ufs_frag_map(inode, lblk);
-		if (!blk || !(bh = bread (sb->s_dev, blk, sb->s_blocksize))) {
+		if (!blk || !(bh = sb_bread(sb, blk))) {
 			/* XXX - error - skip to the next block */
 			printk("ufs_readdir: "
 			       "dir inode %lu has a hole at offset %lu\n",
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index cff561a..5c3bc8f 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -106,8 +106,7 @@
 		struct buffer_head *bh;
 		int n = *p++;
 
-		bh = bread(sb->s_dev, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift),
-				sb->s_blocksize);
+		bh = sb_bread(sb, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift));
 		if (!bh)
 			goto out;
 		block = ((u32*) bh->b_data)[n & mask];
@@ -147,8 +146,7 @@
 	lastfrag = inode->u.ufs_i.i_lastfrag;
 	if (tmp && fragment < lastfrag) {
 		if (metadata) {
-			result = getblk (sb->s_dev, uspi->s_sbbase + tmp + blockoff,
-					 sb->s_blocksize);
+			result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
 			if (tmp == fs32_to_cpu(sb, *p)) {
 				UFSD(("EXIT, result %u\n", tmp + blockoff))
 				return result;
@@ -216,7 +214,7 @@
 	 * now. -DaveM
 	 */
 	if (metadata) {
-		result = getblk (inode->i_dev, tmp + blockoff, sb->s_blocksize);
+		result = sb_getblk(inode->i_sb, tmp + blockoff);
 	} else {
 		*phys = tmp;
 		result = NULL;
@@ -264,8 +262,7 @@
 	tmp = fs32_to_cpu(sb, *p);
 	if (tmp) {
 		if (metadata) {
-			result = getblk (bh->b_dev, uspi->s_sbbase + tmp + blockoff,
-					 sb->s_blocksize);
+			result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
 			if (tmp == fs32_to_cpu(sb, *p))
 				goto out;
 			brelse (result);
@@ -292,7 +289,7 @@
 	 * now. -DaveM
 	 */
 	if (metadata) {
-		result = getblk (bh->b_dev, tmp + blockoff, sb->s_blocksize);
+		result = sb_getblk(sb, tmp + blockoff);
 	} else {
 		*phys = tmp;
 		*new = 1;
@@ -425,7 +422,7 @@
 	*err = error;
 	if (!error && buffer_mapped(&dummy)) {
 		struct buffer_head *bh;
-		bh = getblk(dummy.b_dev, dummy.b_blocknr, inode->i_sb->s_blocksize);
+		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
 		if (buffer_new(&dummy)) {
 			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
 			mark_buffer_uptodate(bh, 1);
@@ -500,7 +497,7 @@
 		return;
 	}
 	
-	bh = bread (sb->s_dev, uspi->s_sbbase + ufs_inotofsba(inode->i_ino), sb->s_blocksize);
+	bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
 	if (!bh) {
 		ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
 		return;
@@ -591,7 +588,7 @@
 		return -1;
 	}
 
-	bh = bread (sb->s_dev, ufs_inotofsba(inode->i_ino), sb->s_blocksize);
+	bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
 	if (!bh) {
 		ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
 		return -1;
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 8cdb4c8..9421f59 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -339,7 +339,7 @@
 		size = uspi->s_bsize;
 		if (i + uspi->s_fpb > blks)
 			size = (blks - i) * uspi->s_fsize;
-		ubh = ubh_bread(sb->s_dev, uspi->s_csaddr + i, size);
+		ubh = ubh_bread(sb, uspi->s_csaddr + i, size);
 		if (!ubh)
 			goto failed;
 		ubh_ubhcpymem (space, ubh, size);
@@ -363,7 +363,7 @@
 	}
 	for (i = 0; i < uspi->s_ncg; i++) {
 		UFSD(("read cg %u\n", i))
-		if (!(sb->u.ufs_sb.s_ucg[i] = bread (sb->s_dev, ufs_cgcmin(i), sb->s_blocksize)))
+		if (!(sb->u.ufs_sb.s_ucg[i] = sb_bread(sb, ufs_cgcmin(i))))
 			goto failed;
 		if (!ufs_cg_chkmagic (sb, (struct ufs_cylinder_group *) sb->u.ufs_sb.s_ucg[i]->b_data))
 			goto failed;
@@ -414,7 +414,7 @@
 		size = uspi->s_bsize;
 		if (i + uspi->s_fpb > blks)
 			size = (blks - i) * uspi->s_fsize;
-		ubh = ubh_bread (sb->s_dev, uspi->s_csaddr + i, size);
+		ubh = ubh_bread(sb, uspi->s_csaddr + i, size);
 		ubh_memcpyubh (ubh, space, size);
 		space += size;
 		ubh_mark_buffer_uptodate (ubh, 1);
@@ -597,11 +597,12 @@
 	
 again:	
 	set_blocksize (sb->s_dev, block_size);
+	sb->s_blocksize = block_size;
 
 	/*
 	 * read ufs super block from device
 	 */
-	ubh = ubh_bread_uspi (uspi, sb->s_dev, uspi->s_sbbase + UFS_SBLOCK/block_size, super_block_size);
+	ubh = ubh_bread_uspi (uspi, sb, uspi->s_sbbase + UFS_SBLOCK/block_size, super_block_size);
 	if (!ubh) 
 		goto failed;
 	
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index e90fa8f..fc4cb9c 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -114,7 +114,7 @@
 	frag1 = ufs_fragnum (frag1);
 	frag2 = ufs_fragnum (frag2);
 	for (j = frag1; j < frag2; j++) {
-		bh = get_hash_table (sb->s_dev, tmp + j, uspi->s_fsize);
+		bh = sb_get_hash_table (sb, tmp + j);
 		if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
 			retry = 1;
 			brelse (bh);
@@ -137,7 +137,7 @@
 		if (!tmp)
 			continue;
 		for (j = 0; j < uspi->s_fpb; j++) {
-			bh = get_hash_table (sb->s_dev, tmp + j, uspi->s_fsize);
+			bh = sb_get_hash_table(sb, tmp + j);
 			if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
 				retry = 1;
 				brelse (bh);
@@ -176,7 +176,7 @@
 		ufs_panic(sb, "ufs_truncate_direct", "internal error");
 	frag4 = ufs_fragnum (frag4);
 	for (j = 0; j < frag4; j++) {
-		bh = get_hash_table (sb->s_dev, tmp + j, uspi->s_fsize);
+		bh = sb_get_hash_table (sb, tmp + j);
 		if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
 			retry = 1;
 			brelse (bh);
@@ -218,7 +218,7 @@
 	tmp = fs32_to_cpu(sb, *p);
 	if (!tmp)
 		return 0;
-	ind_ubh = ubh_bread (sb->s_dev, tmp, uspi->s_bsize);
+	ind_ubh = ubh_bread(sb, tmp, uspi->s_bsize);
 	if (tmp != fs32_to_cpu(sb, *p)) {
 		ubh_brelse (ind_ubh);
 		return 1;
@@ -235,7 +235,7 @@
 		if (!tmp)
 			continue;
 		for (j = 0; j < uspi->s_fpb; j++) {
-			bh = get_hash_table (sb->s_dev, tmp + j, uspi->s_fsize);
+			bh = sb_get_hash_table(sb, tmp + j);
 			if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *ind)) {
 				retry = 1;
 				brelse (bh);
@@ -312,7 +312,7 @@
 	tmp = fs32_to_cpu(sb, *p);
 	if (!tmp)
 		return 0;
-	dind_bh = ubh_bread (inode->i_dev, tmp, uspi->s_bsize);
+	dind_bh = ubh_bread(sb, tmp, uspi->s_bsize);
 	if (tmp != fs32_to_cpu(sb, *p)) {
 		ubh_brelse (dind_bh);
 		return 1;
@@ -378,7 +378,7 @@
 	p = inode->u.ufs_i.i_u1.i_data + UFS_TIND_BLOCK;
 	if (!(tmp = fs32_to_cpu(sb, *p)))
 		return 0;
-	tind_bh = ubh_bread (sb->s_dev, tmp, uspi->s_bsize);
+	tind_bh = ubh_bread (sb, tmp, uspi->s_bsize);
 	if (tmp != fs32_to_cpu(sb, *p)) {
 		ubh_brelse (tind_bh);
 		return 1;
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
index 06f2cb8..2d94ed5 100644
--- a/fs/ufs/util.c
+++ b/fs/ufs/util.c
@@ -23,7 +23,7 @@
 
 
 struct ufs_buffer_head * _ubh_bread_ (struct ufs_sb_private_info * uspi,
-	kdev_t dev, unsigned fragment, unsigned size)
+	struct super_block *sb, unsigned fragment, unsigned size)
 {
 	struct ufs_buffer_head * ubh;
 	unsigned i, j, count;
@@ -39,7 +39,7 @@
 	ubh->fragment = fragment;
 	ubh->count = count;
 	for (i = 0; i < count; i++)
-		if (!(ubh->bh[i] = bread (dev, fragment + i, uspi->s_fsize)))
+		if (!(ubh->bh[i] = sb_bread(sb, fragment + i)))
 			goto failed;
 	for (; i < UFS_MAXFRAG; i++)
 		ubh->bh[i] = NULL;
@@ -51,7 +51,7 @@
 }
 
 struct ufs_buffer_head * ubh_bread_uspi (struct ufs_sb_private_info * uspi,
-	kdev_t dev, unsigned fragment, unsigned size)
+	struct super_block *sb, unsigned fragment, unsigned size)
 {
 	unsigned i, j, count;
 	if (size & ~uspi->s_fmask)
@@ -62,7 +62,7 @@
 	USPI_UBH->fragment = fragment;
 	USPI_UBH->count = count;
 	for (i = 0; i < count; i++)
-		if (!(USPI_UBH->bh[i] = bread (dev, fragment + i, uspi->s_fsize)))
+		if (!(USPI_UBH->bh[i] = sb_bread(sb, fragment + i)))
 			goto failed;
 	for (; i < UFS_MAXFRAG; i++)
 		USPI_UBH->bh[i] = NULL;
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 5ee0ecb..2e5d476 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -226,9 +226,9 @@
 /*
  * These functions manipulate ufs buffers
  */
-#define ubh_bread(dev,fragment,size) _ubh_bread_(uspi,dev,fragment,size)  
-extern struct ufs_buffer_head * _ubh_bread_(struct ufs_sb_private_info *, kdev_t, unsigned, unsigned);
-extern struct ufs_buffer_head * ubh_bread_uspi(struct ufs_sb_private_info *, kdev_t, unsigned, unsigned);
+#define ubh_bread(sb,fragment,size) _ubh_bread_(uspi,sb,fragment,size)  
+extern struct ufs_buffer_head * _ubh_bread_(struct ufs_sb_private_info *, struct super_block *, unsigned, unsigned);
+extern struct ufs_buffer_head * ubh_bread_uspi(struct ufs_sb_private_info *, struct super_block *, unsigned, unsigned);
 extern void ubh_brelse (struct ufs_buffer_head *);
 extern void ubh_brelse_uspi (struct ufs_sb_private_info *);
 extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *);
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index d8d68e8c..a140326 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -102,13 +102,6 @@
 #define page_to_bus page_to_phys
 
 /*
- * can the hardware map this into one segment or not, given no other
- * constraints.
- */
-#define BIOVEC_MERGEABLE(vec1, vec2)	\
-	((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
-
-/*
  * readX/writeX() are used to access memory mapped devices. On some
  * architectures the memory mapped IO stuff needs to be accessed
  * differently. On the x86 architecture, we just read/write the
diff --git a/include/asm-m68k/machdep.h b/include/asm-m68k/machdep.h
index 82badf6..b7c3f7a 100644
--- a/include/asm-m68k/machdep.h
+++ b/include/asm-m68k/machdep.h
@@ -38,7 +38,6 @@
 extern void (*mach_hd_setup)(char *, int *);
 extern long mach_max_dma_address;
 extern void (*mach_floppy_setup)(char *, int *);
-extern void (*mach_floppy_eject)(void);
 extern void (*mach_heartbeat) (int);
 extern void (*mach_l2_flush) (int);
 extern int mach_sysrq_key;
diff --git a/include/asm-sparc64/dma.h b/include/asm-sparc64/dma.h
index fce01fc..b80f2f7a 100644
--- a/include/asm-sparc64/dma.h
+++ b/include/asm-sparc64/dma.h
@@ -1,4 +1,4 @@
-/* $Id: dma.h,v 1.19 2000/01/28 13:43:14 jj Exp $
+/* $Id: dma.h,v 1.21 2001/12/13 04:16:52 davem Exp $
  * include/asm-sparc64/dma.h
  *
  * Copyright 1996 (C) David S. Miller (davem@caip.rutgers.edu)
@@ -218,10 +218,4 @@
 #define isa_dma_bridge_buggy 	(0)
 #endif
 
-/* We support dynamic DMA remapping and adjacent SG entries
- * which have addresses modulo DMA_CHUNK_SIZE will be merged
- * by dma_prepare_sg().
- */
-#define DMA_CHUNK_SIZE 8192
-
 #endif /* !(_ASM_SPARC64_DMA_H) */
diff --git a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h
index 258428a..8b8c056 100644
--- a/include/asm-sparc64/io.h
+++ b/include/asm-sparc64/io.h
@@ -1,4 +1,4 @@
-/* $Id: io.h,v 1.40 2001/11/10 09:24:56 davem Exp $ */
+/* $Id: io.h,v 1.46 2001/12/13 04:16:52 davem Exp $ */
 #ifndef __SPARC64_IO_H
 #define __SPARC64_IO_H
 
@@ -18,11 +18,10 @@
 extern unsigned long bus_to_virt_not_defined_use_pci_map(volatile void *addr);
 #define bus_to_virt bus_to_virt_not_defined_use_pci_map
 
+/* BIO layer definitions. */
 extern unsigned long phys_base;
 #define page_to_phys(page)	((((page) - mem_map) << PAGE_SHIFT)+phys_base)
-
-#define BIOVEC_MERGEABLE(vec1, vec2)	\
-	((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (DMA_CHUNK_SIZE - 1)) == 0)
+#define BIO_VMERGE_BOUNDARY	8192
 
 /* Different PCI controllers we support have their PCI MEM space
  * mapped to an either 2GB (Psycho) or 4GB (Sabre) aligned area,
@@ -258,6 +257,7 @@
 #define __raw_readb(__addr)		(_raw_readb((unsigned long)(__addr)))
 #define __raw_readw(__addr)		(_raw_readw((unsigned long)(__addr)))
 #define __raw_readl(__addr)		(_raw_readl((unsigned long)(__addr)))
+#define __raw_readq(__addr)		(_raw_readq((unsigned long)(__addr)))
 #define __raw_writeb(__b, __addr)	(_raw_writeb((u8)(__b), (unsigned long)(__addr)))
 #define __raw_writew(__w, __addr)	(_raw_writew((u16)(__w), (unsigned long)(__addr)))
 #define __raw_writel(__l, __addr)	(_raw_writel((u32)(__l), (unsigned long)(__addr)))
@@ -415,7 +415,7 @@
  */
 #define ioremap(__offset, __size)	((void *)(__offset))
 #define ioremap_nocache(X,Y)		ioremap((X),(Y))
-#define iounmap(__addr)			do { } while(0)
+#define iounmap(__addr)			do { (void)(__addr); } while(0)
 
 /* Similarly for SBUS. */
 #define sbus_ioremap(__res, __offset, __size, __name) \
diff --git a/include/linux/amigaffs.h b/include/linux/amigaffs.h
index 3a264a7..39bbdbb 100644
--- a/include/linux/amigaffs.h
+++ b/include/linux/amigaffs.h
@@ -31,7 +31,7 @@
 {
 	pr_debug(KERN_DEBUG "affs_bread: %d\n", block);
 	if (block >= AFFS_SB->s_reserved && block < AFFS_SB->s_partition_size)
-		return bread(sb->s_dev, block, sb->s_blocksize);
+		return sb_bread(sb, block);
 	return NULL;
 }
 static inline struct buffer_head *
@@ -39,7 +39,7 @@
 {
 	pr_debug(KERN_DEBUG "affs_getblk: %d\n", block);
 	if (block >= AFFS_SB->s_reserved && block < AFFS_SB->s_partition_size)
-		return getblk(sb->s_dev, block, sb->s_blocksize);
+		return sb_getblk(sb, block);
 	return NULL;
 }
 static inline struct buffer_head *
@@ -48,10 +48,11 @@
 	struct buffer_head *bh;
 	pr_debug(KERN_DEBUG "affs_getzeroblk: %d\n", block);
 	if (block >= AFFS_SB->s_reserved && block < AFFS_SB->s_partition_size) {
-		bh = getblk(sb->s_dev, block, sb->s_blocksize);
-		wait_on_buffer(bh);
+		bh = sb_getblk(sb, block);
+		lock_buffer(bh);
 		memset(bh->b_data, 0 , sb->s_blocksize);
 		mark_buffer_uptodate(bh, 1);
+		unlock_buffer(bh);
 		return bh;
 	}
 	return NULL;
@@ -62,7 +63,7 @@
 	struct buffer_head *bh;
 	pr_debug(KERN_DEBUG "affs_getemptyblk: %d\n", block);
 	if (block >= AFFS_SB->s_reserved && block < AFFS_SB->s_partition_size) {
-		bh = getblk(sb->s_dev, block, sb->s_blocksize);
+		bh = sb_getblk(sb, block);
 		wait_on_buffer(bh);
 		mark_buffer_uptodate(bh, 1);
 		return bh;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index a7c0c25..8bbacfe 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -20,6 +20,12 @@
 #ifndef __LINUX_BIO_H
 #define __LINUX_BIO_H
 
+/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
+#include <asm/io.h>
+#ifndef BIO_VMERGE_BOUNDARY
+#define BIO_VMERGE_BOUNDARY	0
+#endif
+
 #define BIO_DEBUG
 
 #ifdef BIO_DEBUG
@@ -61,7 +67,17 @@
 
 	unsigned short		bi_vcnt;	/* how many bio_vec's */
 	unsigned short		bi_idx;		/* current index into bvl_vec */
-	unsigned short		bi_hw_seg;	/* actual mapped segments */
+
+	/* Number of segments in this BIO after
+	 * physical address coalescing is performed.
+	 */
+	unsigned short		bi_phys_segments;
+
+	/* Number of segments after physical and DMA remapping
+	 * hardware coalescing is performed.
+	 */
+	unsigned short		bi_hw_segments;
+
 	unsigned int		bi_size;	/* residual I/O count */
 	unsigned int		bi_max;		/* max bvl_vecs we can hold,
 						   used as index into pool */
@@ -128,10 +144,13 @@
 /*
  * merge helpers etc
  */
+
 #define __BVEC_END(bio)		bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
 #define __BVEC_START(bio)	bio_iovec_idx((bio), 0)
-#define BIO_CONTIG(bio, nxt) \
-	BIOVEC_MERGEABLE(__BVEC_END((bio)), __BVEC_START((nxt)))
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)	\
+	((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
+#define BIOVEC_VIRT_MERGEABLE(vec1, vec2)	\
+	((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0)
 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
 	(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
@@ -174,6 +193,7 @@
 
 extern int bio_endio(struct bio *, int, int);
 struct request_queue;
+extern inline int bio_phys_segments(struct request_queue *, struct bio *);
 extern inline int bio_hw_segments(struct request_queue *, struct bio *);
 
 extern inline void __bio_clone(struct bio *, struct bio *);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index fad87a3..620b149 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -41,8 +41,19 @@
 					 * touch them
 					 */
 	unsigned long hard_nr_sectors;
-	unsigned short nr_segments;
+
+	/* Number of scatter-gather DMA addr+len pairs after
+	 * physical address coalescing is performed.
+	 */
+	unsigned short nr_phys_segments;
+
+	/* Number of scatter-gather addr+len pairs after
+	 * physical and DMA remapping hardware coalescing is performed.
+	 * This is the number of scatter-gather entries the driver
+	 * will actually have to deal with after DMA mapping is done.
+	 */
 	unsigned short nr_hw_segments;
+
 	unsigned int current_nr_sectors;
 	unsigned int hard_cur_sectors;
 	void *special;
@@ -146,6 +157,7 @@
 	 * queue needs bounce pages for pages above this limit
 	 */
 	unsigned long		bounce_pfn;
+	int			bounce_gfp;
 
 	/*
 	 * This is used to remove the plug when tq_disk runs.
@@ -166,7 +178,8 @@
 	 * queue settings
 	 */
 	unsigned short		max_sectors;
-	unsigned short		max_segments;
+	unsigned short		max_phys_segments;
+	unsigned short		max_hw_segments;
 	unsigned short		hardsect_size;
 	unsigned int		max_segment_size;
 
@@ -202,19 +215,22 @@
 
 #define BLK_BOUNCE_HIGH	(blk_max_low_pfn << PAGE_SHIFT)
 #define BLK_BOUNCE_ANY	(blk_max_pfn << PAGE_SHIFT)
+#define BLK_BOUNCE_ISA	(ISA_DMA_THRESHOLD)
 
 #ifdef CONFIG_HIGHMEM
 
-extern void create_bounce(unsigned long pfn, struct bio **bio_orig);
+extern void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig);
+extern void init_emergency_isa_pool(void);
 
 extern inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
 {
-	create_bounce(q->bounce_pfn, bio);
+	create_bounce(q->bounce_pfn, q->bounce_gfp, bio);
 }
 
 #else /* CONFIG_HIGHMEM */
 
 #define blk_queue_bounce(q, bio)	do { } while (0)
+#define init_emergency_isa_pool()	do { } while (0)
 
 #endif /* CONFIG_HIGHMEM */
 
@@ -257,7 +273,8 @@
 extern void blk_put_request(struct request *);
 extern void blk_plug_device(request_queue_t *);
 extern void blk_recount_segments(request_queue_t *, struct bio *);
-extern inline int blk_contig_segment(request_queue_t *q, struct bio *, struct bio *);
+extern inline int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *);
+extern inline int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *);
 extern void blk_queue_assign_lock(request_queue_t *q, spinlock_t *);
 
 extern int block_ioctl(kdev_t, unsigned int, unsigned long);
@@ -270,7 +287,8 @@
 extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
 extern void blk_queue_bounce_limit(request_queue_t *, u64);
 extern void blk_queue_max_sectors(request_queue_t *q, unsigned short);
-extern void blk_queue_max_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_phys_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_hw_segments(request_queue_t *q, unsigned short);
 extern void blk_queue_max_segment_size(request_queue_t *q, unsigned int);
 extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short);
 extern void blk_queue_segment_boundary(request_queue_t *q, unsigned long);
@@ -284,7 +302,8 @@
 
 extern int * max_readahead[MAX_BLKDEV];
 
-#define MAX_SEGMENTS 128
+#define MAX_PHYS_SEGMENTS 128
+#define MAX_HW_SEGMENTS 128
 #define MAX_SECTORS 255
 
 #define MAX_SEGMENT_SIZE	65536
diff --git a/include/linux/blkdev.h.orig b/include/linux/blkdev.h.orig
new file mode 100644
index 0000000..620b149
--- /dev/null
+++ b/include/linux/blkdev.h.orig
@@ -0,0 +1,371 @@
+#ifndef _LINUX_BLKDEV_H
+#define _LINUX_BLKDEV_H
+
+#include <linux/major.h>
+#include <linux/sched.h>
+#include <linux/genhd.h>
+#include <linux/tqueue.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+
+#include <asm/scatterlist.h>
+
+struct request_queue;
+typedef struct request_queue request_queue_t;
+struct elevator_s;
+typedef struct elevator_s elevator_t;
+
+struct request_list {
+	unsigned int count;
+	struct list_head free;
+	wait_queue_head_t wait;
+};
+
+struct request {
+	struct list_head queuelist; /* looking for ->queue? you must _not_
+				     * access it directly, use
+				     * blkdev_dequeue_request! */
+	int elevator_sequence;
+
+	unsigned char cmd[16];
+
+	unsigned long flags;		/* see REQ_ bits below */
+
+	int rq_status;	/* should split this into a few status bits */
+	kdev_t rq_dev;
+	int errors;
+	sector_t sector;
+	unsigned long nr_sectors;
+	unsigned long hard_sector;	/* the hard_* are block layer
+					 * internals, no driver should
+					 * touch them
+					 */
+	unsigned long hard_nr_sectors;
+
+	/* Number of scatter-gather DMA addr+len pairs after
+	 * physical address coalescing is performed.
+	 */
+	unsigned short nr_phys_segments;
+
+	/* Number of scatter-gather addr+len pairs after
+	 * physical and DMA remapping hardware coalescing is performed.
+	 * This is the number of scatter-gather entries the driver
+	 * will actually have to deal with after DMA mapping is done.
+	 */
+	unsigned short nr_hw_segments;
+
+	unsigned int current_nr_sectors;
+	unsigned int hard_cur_sectors;
+	void *special;
+	char *buffer;
+	struct completion *waiting;
+	struct bio *bio, *biotail;
+	request_queue_t *q;
+	struct request_list *rl;
+};
+
+/*
+ * first three bits match BIO_RW* bits, important
+ */
+enum rq_flag_bits {
+	__REQ_RW,	/* not set, read. set, write */
+	__REQ_RW_AHEAD,	/* READA */
+	__REQ_BARRIER,	/* may not be passed */
+	__REQ_CMD,	/* is a regular fs rw request */
+	__REQ_NOMERGE,	/* don't touch this for merging */
+	__REQ_STARTED,	/* drive already may have started this one */
+	__REQ_DONTPREP,	/* don't call prep for this one */
+	/*
+	 * for IDE
+ 	*/
+	__REQ_DRIVE_CMD,
+	__REQ_DRIVE_TASK,
+
+	__REQ_PC,	/* packet command (special) */
+	__REQ_BLOCK_PC,	/* queued down pc from block layer */
+	__REQ_SENSE,	/* sense retrival */
+
+	__REQ_SPECIAL,	/* driver special command */
+
+	__REQ_NR_BITS,	/* stops here */
+};
+
+#define REQ_RW		(1 << __REQ_RW)
+#define REQ_RW_AHEAD	(1 << __REQ_RW_AHEAD)
+#define REQ_BARRIER	(1 << __REQ_BARRIER)
+#define REQ_CMD		(1 << __REQ_CMD)
+#define REQ_NOMERGE	(1 << __REQ_NOMERGE)
+#define REQ_STARTED	(1 << __REQ_STARTED)
+#define REQ_DONTPREP	(1 << __REQ_DONTPREP)
+#define REQ_DRIVE_CMD	(1 << __REQ_DRIVE_CMD)
+#define REQ_DRIVE_TASK	(1 << __REQ_DRIVE_TASK)
+#define REQ_PC		(1 << __REQ_PC)
+#define REQ_SENSE	(1 << __REQ_SENSE)
+#define REQ_BLOCK_PC	(1 << __REQ_BLOCK_PC)
+#define REQ_SPECIAL	(1 << __REQ_SPECIAL)
+
+#include <linux/elevator.h>
+
+typedef int (merge_request_fn) (request_queue_t *, struct request *,
+				struct bio *);
+typedef int (merge_requests_fn) (request_queue_t *, struct request *,
+				 struct request *);
+typedef void (request_fn_proc) (request_queue_t *q);
+typedef request_queue_t * (queue_proc) (kdev_t dev);
+typedef int (make_request_fn) (request_queue_t *q, struct bio *bio);
+typedef int (prep_rq_fn) (request_queue_t *, struct request *);
+typedef void (unplug_device_fn) (void *q);
+
+enum blk_queue_state {
+	Queue_down,
+	Queue_up,
+};
+
+/*
+ * Default nr free requests per queue, ll_rw_blk will scale it down
+ * according to available RAM at init time
+ */
+#define QUEUE_NR_REQUESTS	8192
+
+struct request_queue
+{
+	/*
+	 * the queue request freelist, one for reads and one for writes
+	 */
+	struct request_list	rq[2];
+
+	/*
+	 * Together with queue_head for cacheline sharing
+	 */
+	struct list_head	queue_head;
+	elevator_t		elevator;
+
+	request_fn_proc		*request_fn;
+	merge_request_fn	*back_merge_fn;
+	merge_request_fn	*front_merge_fn;
+	merge_requests_fn	*merge_requests_fn;
+	make_request_fn		*make_request_fn;
+	prep_rq_fn		*prep_rq_fn;
+
+	/*
+	 * The queue owner gets to use this for whatever they like.
+	 * ll_rw_blk doesn't touch it.
+	 */
+	void			*queuedata;
+
+	/*
+	 * queue needs bounce pages for pages above this limit
+	 */
+	unsigned long		bounce_pfn;
+	int			bounce_gfp;
+
+	/*
+	 * This is used to remove the plug when tq_disk runs.
+	 */
+	struct tq_struct	plug_tq;
+
+	/*
+	 * various queue flags, see QUEUE_* below
+	 */
+	unsigned long		queue_flags;
+
+	/*
+	 * protects queue structures from reentrancy
+	 */
+	spinlock_t		*queue_lock;
+
+	/*
+	 * queue settings
+	 */
+	unsigned short		max_sectors;
+	unsigned short		max_phys_segments;
+	unsigned short		max_hw_segments;
+	unsigned short		hardsect_size;
+	unsigned int		max_segment_size;
+
+	unsigned long		seg_boundary_mask;
+
+	wait_queue_head_t	queue_wait;
+};
+
+#define RQ_INACTIVE		(-1)
+#define RQ_ACTIVE		1
+#define RQ_SCSI_BUSY		0xffff
+#define RQ_SCSI_DONE		0xfffe
+#define RQ_SCSI_DISCONNECTING	0xffe0
+
+#define QUEUE_FLAG_PLUGGED	0	/* queue is plugged */
+#define QUEUE_FLAG_NOSPLIT	1	/* can process bio over several goes */
+#define QUEUE_FLAG_CLUSTER	2	/* cluster several segments into 1 */
+
+#define blk_queue_plugged(q)	test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
+#define blk_mark_plugged(q)	set_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
+#define blk_queue_empty(q)	elv_queue_empty(q)
+#define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)
+
+#define rq_data_dir(rq)		((rq)->flags & 1)
+
+/*
+ * noop, requests are automagically marked as active/inactive by I/O
+ * scheduler -- see elv_next_request
+ */
+#define blk_queue_headactive(q, head_active)
+
+extern unsigned long blk_max_low_pfn, blk_max_pfn;
+
+#define BLK_BOUNCE_HIGH	(blk_max_low_pfn << PAGE_SHIFT)
+#define BLK_BOUNCE_ANY	(blk_max_pfn << PAGE_SHIFT)
+#define BLK_BOUNCE_ISA	(ISA_DMA_THRESHOLD)
+
+#ifdef CONFIG_HIGHMEM
+
+extern void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig);
+extern void init_emergency_isa_pool(void);
+
+extern inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
+{
+	create_bounce(q->bounce_pfn, q->bounce_gfp, bio);
+}
+
+#else /* CONFIG_HIGHMEM */
+
+#define blk_queue_bounce(q, bio)	do { } while (0)
+#define init_emergency_isa_pool()	do { } while (0)
+
+#endif /* CONFIG_HIGHMEM */
+
+#define rq_for_each_bio(bio, rq)	\
+	if ((rq->bio))			\
+		for (bio = (rq)->bio; bio; bio = bio->bi_next)
+
+struct blk_dev_struct {
+	/*
+	 * queue_proc has to be atomic
+	 */
+	request_queue_t		request_queue;
+	queue_proc		*queue;
+	void			*data;
+};
+
+struct sec_size {
+	unsigned block_size;
+	unsigned block_size_bits;
+};
+
+/*
+ * Used to indicate the default queue for drivers that don't bother
+ * to implement multiple queues.  We have this access macro here
+ * so as to eliminate the need for each and every block device
+ * driver to know about the internal structure of blk_dev[].
+ */
+#define BLK_DEFAULT_QUEUE(_MAJOR)  &blk_dev[_MAJOR].request_queue
+
+extern struct sec_size * blk_sec[MAX_BLKDEV];
+extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
+extern void grok_partitions(kdev_t dev, long size);
+extern int wipe_partitions(kdev_t dev);
+extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size);
+extern void generic_make_request(struct bio *bio);
+extern inline request_queue_t *blk_get_queue(kdev_t dev);
+extern void blkdev_release_request(struct request *);
+extern void blk_attempt_remerge(request_queue_t *, struct request *);
+extern struct request *blk_get_request(request_queue_t *, int, int);
+extern void blk_put_request(struct request *);
+extern void blk_plug_device(request_queue_t *);
+extern void blk_recount_segments(request_queue_t *, struct bio *);
+extern inline int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *);
+extern inline int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *);
+extern void blk_queue_assign_lock(request_queue_t *q, spinlock_t *);
+
+extern int block_ioctl(kdev_t, unsigned int, unsigned long);
+
+/*
+ * Access functions for manipulating queue properties
+ */
+extern int blk_init_queue(request_queue_t *, request_fn_proc *, spinlock_t *);
+extern void blk_cleanup_queue(request_queue_t *);
+extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
+extern void blk_queue_bounce_limit(request_queue_t *, u64);
+extern void blk_queue_max_sectors(request_queue_t *q, unsigned short);
+extern void blk_queue_max_phys_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_hw_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_segment_size(request_queue_t *q, unsigned int);
+extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short);
+extern void blk_queue_segment_boundary(request_queue_t *q, unsigned long);
+extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
+extern void blk_dump_rq_flags(struct request *, char *);
+extern void generic_unplug_device(void *);
+
+extern int * blk_size[MAX_BLKDEV];
+
+extern int * blksize_size[MAX_BLKDEV];
+
+extern int * max_readahead[MAX_BLKDEV];
+
+#define MAX_PHYS_SEGMENTS 128
+#define MAX_HW_SEGMENTS 128
+#define MAX_SECTORS 255
+
+#define MAX_SEGMENT_SIZE	65536
+
+/* read-ahead in pages.. */
+#define MAX_READAHEAD	31
+#define MIN_READAHEAD	3
+
+#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
+#define blkdev_entry_next_request(entry) blkdev_entry_to_request((entry)->next)
+#define blkdev_entry_prev_request(entry) blkdev_entry_to_request((entry)->prev)
+#define blkdev_next_request(req) blkdev_entry_to_request((req)->queuelist.next)
+#define blkdev_prev_request(req) blkdev_entry_to_request((req)->queuelist.prev)
+
+extern void drive_stat_acct(struct request *, int, int);
+
+extern inline void blk_clear(int major)
+{
+	blk_size[major] = NULL;
+#if 0
+	blk_size_in_bytes[major] = NULL;
+#endif
+	blksize_size[major] = NULL;
+	max_readahead[major] = NULL;
+	read_ahead[major] = 0;
+}
+
+extern inline int get_hardsect_size(kdev_t dev)
+{
+	request_queue_t *q = blk_get_queue(dev);
+	int retval = 512;
+
+	if (q && q->hardsect_size)
+		retval = q->hardsect_size;
+
+	return retval;
+}
+
+#define blk_finished_io(nsects)	do { } while (0)
+#define blk_started_io(nsects)	do { } while (0)
+
+extern inline unsigned int blksize_bits(unsigned int size)
+{
+	unsigned int bits = 8;
+	do {
+		bits++;
+		size >>= 1;
+	} while (size > 256);
+	return bits;
+}
+
+extern inline unsigned int block_size(kdev_t dev)
+{
+	int retval = BLOCK_SIZE;
+	int major = MAJOR(dev);
+
+	if (blksize_size[major]) {
+		int minor = MINOR(dev);
+		if (blksize_size[major][minor])
+			retval = blksize_size[major][minor];
+	}
+	return retval;
+}
+
+#endif
diff --git a/include/linux/fd.h b/include/linux/fd.h
index c0ed279..187785b 100644
--- a/include/linux/fd.h
+++ b/include/linux/fd.h
@@ -369,10 +369,4 @@
 #define FDEJECT _IO(2, 0x5a)
 /* eject the disk */
 
-
-#ifdef __KERNEL__
-/* eject the boot floppy (if we need the drive for a different root floppy) */
-void floppy_eject(void);
-#endif
-
 #endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 7f52b46..b1e59c1 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -21,7 +21,6 @@
 #include <linux/cache.h>
 #include <linux/stddef.h>
 #include <linux/string.h>
-#include <linux/bio.h>
 
 #include <asm/atomic.h>
 #include <asm/bitops.h>
@@ -1363,6 +1362,7 @@
 extern struct buffer_head * getblk(kdev_t, sector_t, int);
 extern void ll_rw_block(int, int, struct buffer_head * bh[]);
 extern int submit_bh(int, struct buffer_head *);
+struct bio;
 extern int submit_bio(int, struct bio *);
 extern int is_read_only(kdev_t);
 extern void __brelse(struct buffer_head *);
@@ -1379,6 +1379,18 @@
 }
 extern int set_blocksize(kdev_t, int);
 extern struct buffer_head * bread(kdev_t, int, int);
+static inline struct buffer_head * sb_bread(struct super_block *sb, int block)
+{
+	return bread(sb->s_dev, block, sb->s_blocksize);
+}
+static inline struct buffer_head * sb_getblk(struct super_block *sb, int block)
+{
+	return getblk(sb->s_dev, block, sb->s_blocksize);
+}
+static inline struct buffer_head * sb_get_hash_table(struct super_block *sb, int block)
+{
+	return get_hash_table(sb->s_dev, block, sb->s_blocksize);
+}
 extern void wakeup_bdflush(void);
 extern void put_unused_buffer_head(struct buffer_head * bh);
 extern struct buffer_head * get_unused_buffer_head(int async);
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 157c3b6..7aa92d2 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -2,6 +2,7 @@
 #define _LINUX_HIGHMEM_H
 
 #include <linux/config.h>
+#include <linux/bio.h>
 #include <asm/pgalloc.h>
 
 #ifdef CONFIG_HIGHMEM
@@ -13,7 +14,7 @@
 /* declarations for linux/mm/highmem.c */
 unsigned int nr_free_highpages(void);
 
-extern void create_bounce(unsigned long pfn, struct bio **bio_orig);
+extern void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig);
 
 static inline char *bh_kmap(struct buffer_head *bh)
 {
diff --git a/include/linux/iso_fs.h b/include/linux/iso_fs.h
index 82dde80..9cdfbae 100644
--- a/include/linux/iso_fs.h
+++ b/include/linux/iso_fs.h
@@ -219,7 +219,7 @@
 int get_acorn_filename(struct iso_directory_record *, char *, struct inode *);
 
 extern struct dentry *isofs_lookup(struct inode *, struct dentry *);
-extern struct buffer_head *isofs_bread(struct inode *, unsigned int, unsigned int);
+extern struct buffer_head *isofs_bread(struct inode *, unsigned int);
 extern int isofs_get_blocks(struct inode *, sector_t, struct buffer_head **, unsigned long);
 
 extern struct inode_operations isofs_dir_inode_operations;
@@ -230,11 +230,11 @@
 #ifdef LEAK_CHECK
 #define free_s leak_check_free_s
 #define malloc leak_check_malloc
-#define bread leak_check_bread
+#define sb_bread leak_check_bread
 #define brelse leak_check_brelse
 extern void * leak_check_malloc(unsigned int size);
 extern void leak_check_free_s(void * obj, int size);
-extern struct buffer_head * leak_check_bread(int dev, int block, int size);
+extern struct buffer_head * leak_check_bread(struct super_block *sb, int block);
 extern void leak_check_brelse(struct buffer_head * bh);
 #endif /* LEAK_CHECK */
 
diff --git a/include/linux/qnx4_fs.h b/include/linux/qnx4_fs.h
index dd9b7cb..55ba2f9 100644
--- a/include/linux/qnx4_fs.h
+++ b/include/linux/qnx4_fs.h
@@ -118,7 +118,7 @@
 extern int qnx4_rmdir(struct inode *dir, struct dentry *dentry);
 extern int qnx4_sync_file(struct file *file, struct dentry *dentry, int);
 extern int qnx4_sync_inode(struct inode *inode);
-extern int qnx4_get_block(struct inode *inode, long iblock, struct buffer_head *bh, int create);
+extern int qnx4_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create);
 
 #endif				/* __KERNEL__ */
 
diff --git a/init/do_mounts.c b/init/do_mounts.c
index e6a9429..3aab59a 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -351,23 +351,20 @@
 	return sys_symlink(path + n + 5, name);
 }
 
-#ifdef CONFIG_MAC_FLOPPY
-int swim3_fd_eject(int devnum);
-#endif
 static void __init change_floppy(char *fmt, ...)
 {
 	extern void wait_for_keypress(void);
 	char buf[80];
+	int fd;
 	va_list args;
 	va_start(args, fmt);
 	vsprintf(buf, fmt, args);
 	va_end(args);
-#ifdef CONFIG_BLK_DEV_FD
-	floppy_eject();
-#endif
-#ifdef CONFIG_MAC_FLOPPY
-	swim3_fd_eject(MINOR(ROOT_DEV));
-#endif
+	fd = open("/dev/root", O_RDWR, 0);
+	if (fd >= 0) {
+		sys_ioctl(fd, FDEJECT, 0);
+		close(fd);
+	}
 	printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
 	wait_for_keypress();
 }
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index bd626a1..55a53c0 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -60,7 +60,7 @@
 extern void *sys_call_table;
 
 extern struct timezone sys_tz;
-extern int request_dma(unsigned int dmanr, char * deviceID);
+extern int request_dma(unsigned int dmanr, const char * deviceID);
 extern void free_dma(unsigned int dmanr);
 extern spinlock_t dma_spin_lock;
 
diff --git a/kernel/signal.c b/kernel/signal.c
index 44acecd..b695891 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -649,8 +649,10 @@
 /*
  * kill_something_info() interprets pid in interesting ways just like kill(2).
  *
- * POSIX specifies that kill(-1,sig) is unspecified, but what we have
- * is probably wrong.  Should make it like BSD or SYSV.
+ * POSIX (2001) specifies "If pid is -1, sig shall be sent to all processes
+ * (excluding an unspecified set of system processes) for which the process
+ * has permission to send that signal."
+ * So, probably the process should also signal itself.
  */
 
 static int kill_something_info(int sig, struct siginfo *info, int pid)
@@ -663,7 +665,7 @@
 
 		read_lock(&tasklist_lock);
 		for_each_task(p) {
-			if (p->pid > 1 && p != current) {
+			if (p->pid > 1) {
 				int err = send_sig_info(sig, info, p);
 				++count;
 				if (err != -EPERM)
diff --git a/mm/filemap.c b/mm/filemap.c
index bd53edf..0ae33bc 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2847,7 +2847,7 @@
 	unsigned long	limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
 	loff_t		pos;
 	struct page	*page, *cached_page;
-	unsigned long	written;
+	ssize_t		written;
 	long		status = 0;
 	int		err;
 	unsigned	bytes;
diff --git a/mm/highmem.c b/mm/highmem.c
index efdc8b7..72fd4e8 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -184,13 +184,14 @@
 		wake_up(&pkmap_map_wait);
 }
 
-#define POOL_SIZE 64
+#define POOL_SIZE	64
+#define ISA_POOL_SIZE	16
 
-static mempool_t *page_pool;
+static mempool_t *page_pool, *isa_page_pool;
 
-static void * page_pool_alloc(int gfp_mask, void *data)
+static void *page_pool_alloc(int gfp_mask, void *data)
 {
-	return alloc_page(gfp_mask & ~ __GFP_HIGHIO);
+	return alloc_page(gfp_mask);
 }
 
 static void page_pool_free(void *page, void *data)
@@ -215,6 +216,23 @@
 	return 0;
 }
 
+/*
+ * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
+ * as the max address, so check if the pool has already been created.
+ */
+int init_emergency_isa_pool(void)
+{
+	if (isa_page_pool)
+		return 0;
+
+	isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc, page_pool_free, NULL);
+	if (!isa_page_pool)
+		BUG();
+
+	printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
+	return 0;
+}
+
 __initcall(init_emergency_pool);
 
 /*
@@ -248,7 +266,7 @@
 	}
 }
 
-static inline int bounce_end_io (struct bio *bio, int nr_sectors)
+static inline int bounce_end_io (struct bio *bio, int nr_sectors, mempool_t *pool)
 {
 	struct bio *bio_orig = bio->bi_private;
 	struct bio_vec *bvec, *org_vec;
@@ -267,7 +285,7 @@
 		if (bvec->bv_page == org_vec->bv_page)
 			continue;
 
-		mempool_free(bvec->bv_page, page_pool);	
+		mempool_free(bvec->bv_page, pool);	
 	}
 
 out_eio:
@@ -279,28 +297,53 @@
 
 static int bounce_end_io_write(struct bio *bio, int nr_sectors)
 {
-	return bounce_end_io(bio, nr_sectors);
+	return bounce_end_io(bio, nr_sectors, page_pool);
 }
 
-static int bounce_end_io_read (struct bio *bio, int nr_sectors)
+static int bounce_end_io_write_isa(struct bio *bio, int nr_sectors)
+{
+	return bounce_end_io(bio, nr_sectors, isa_page_pool);
+}
+
+static inline int __bounce_end_io_read(struct bio *bio, int nr_sectors,
+				       mempool_t *pool)
 {
 	struct bio *bio_orig = bio->bi_private;
 
 	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
 		copy_to_high_bio_irq(bio_orig, bio);
 
-	return bounce_end_io(bio, nr_sectors);
+	return bounce_end_io(bio, nr_sectors, pool);
 }
 
-void create_bounce(unsigned long pfn, struct bio **bio_orig)
+static int bounce_end_io_read(struct bio *bio, int nr_sectors)
+{
+	return __bounce_end_io_read(bio, nr_sectors, page_pool);
+}
+
+static int bounce_end_io_read_isa(struct bio *bio, int nr_sectors)
+{
+	return __bounce_end_io_read(bio, nr_sectors, isa_page_pool);
+}
+
+void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig)
 {
 	struct page *page;
 	struct bio *bio = NULL;
-	int i, rw = bio_data_dir(*bio_orig);
+	int i, rw = bio_data_dir(*bio_orig), bio_gfp;
 	struct bio_vec *to, *from;
+	mempool_t *pool;
 
 	BUG_ON((*bio_orig)->bi_idx);
 
+	if (!(gfp & GFP_DMA)) {
+		bio_gfp = GFP_NOHIGHIO;
+		pool = page_pool;
+	} else {
+		bio_gfp = GFP_NOIO;
+		pool = isa_page_pool;
+	}
+
 	bio_for_each_segment(from, *bio_orig, i) {
 		page = from->bv_page;
 
@@ -314,11 +357,11 @@
 		 * irk, bounce it
 		 */
 		if (!bio)
-			bio = bio_alloc(GFP_NOHIGHIO, (*bio_orig)->bi_vcnt);
+			bio = bio_alloc(bio_gfp, (*bio_orig)->bi_vcnt);
 
 		to = &bio->bi_io_vec[i];
 
-		to->bv_page = mempool_alloc(page_pool, GFP_NOHIGHIO);
+		to->bv_page = mempool_alloc(pool, gfp);
 		to->bv_len = from->bv_len;
 		to->bv_offset = from->bv_offset;
 
@@ -359,10 +402,17 @@
 	bio->bi_idx = 0;
 	bio->bi_size = (*bio_orig)->bi_size;
 
-	if (rw & WRITE)
-		bio->bi_end_io = bounce_end_io_write;
-	else
-		bio->bi_end_io = bounce_end_io_read;
+	if (pool == page_pool) {
+		if (rw & WRITE)
+			bio->bi_end_io = bounce_end_io_write;
+		else
+			bio->bi_end_io = bounce_end_io_read;
+	} else {
+		if (rw & WRITE)
+			bio->bi_end_io = bounce_end_io_write_isa;
+		else
+			bio->bi_end_io = bounce_end_io_read_isa;
+	}
 
 	bio->bi_private = *bio_orig;
 	*bio_orig = bio;
diff --git a/mm/mempool.c b/mm/mempool.c
index 0c0bf99..ecf1acc 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -176,7 +176,8 @@
  *
  * this function only sleeps if the alloc_fn function sleeps or
  * returns NULL. Note that due to preallocation, this function
- * *never* fails.
+ * *never* fails when called from process contexts. (it might
+ * fail if called from an IRQ context.)
  */
 void * mempool_alloc(mempool_t *pool, int gfp_mask)
 {
@@ -185,7 +186,7 @@
 	struct list_head *tmp;
 	int curr_nr;
 	DECLARE_WAITQUEUE(wait, current);
-	int gfp_nowait = gfp_mask & ~__GFP_WAIT;
+	int gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
 
 repeat_alloc:
 	element = pool->alloc(gfp_nowait, pool->pool_data);
@@ -196,15 +197,11 @@
 	 * If the pool is less than 50% full then try harder
 	 * to allocate an element:
 	 */
-	if (gfp_mask != gfp_nowait) {
-		if (pool->curr_nr <= pool->min_nr/2) {
-			element = pool->alloc(gfp_mask, pool->pool_data);
-			if (likely(element != NULL))
-				return element;
-		}
-	} else
-		/* we must not sleep */
-		return NULL;
+	if ((gfp_mask != gfp_nowait) && (pool->curr_nr <= pool->min_nr/2)) {
+		element = pool->alloc(gfp_mask, pool->pool_data);
+		if (likely(element != NULL))
+			return element;
+	}
 
 	/*
 	 * Kick the VM at this point.
@@ -218,19 +215,25 @@
 		element = tmp;
 		pool->curr_nr--;
 		spin_unlock_irqrestore(&pool->lock, flags);
-
 		return element;
 	}
+	spin_unlock_irqrestore(&pool->lock, flags);
+
+	/* We must not sleep in the GFP_ATOMIC case */
+	if (gfp_mask == gfp_nowait)
+		return NULL;
+
+	run_task_queue(&tq_disk);
+
 	add_wait_queue_exclusive(&pool->wait, &wait);
 	set_task_state(current, TASK_UNINTERRUPTIBLE);
 
+	spin_lock_irqsave(&pool->lock, flags);
 	curr_nr = pool->curr_nr;
 	spin_unlock_irqrestore(&pool->lock, flags);
 
-	if (!curr_nr) {
-		run_task_queue(&tq_disk);
+	if (!curr_nr)
 		schedule();
-	}
 
 	current->state = TASK_RUNNING;
 	remove_wait_queue(&pool->wait, &wait);