Introduce rq_for_each_segment replacing rq_for_each_bio

Every usage of rq_for_each_bio wraps a usage of
bio_for_each_segment, so these can be combined into
rq_for_each_segment.

We define "struct req_iterator" to hold the 'bio' and 'index' that
are needed for the double iteration.

Signed-off-by: Neil Brown <neilb@suse.de>

Various compile fixes by me...

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
NeilBrown
2007-09-25 12:35:59 +02:00
committed by Jens Axboe
parent 9dfa52831e
commit 5705f70217
14 changed files with 131 additions and 162 deletions

View File

@@ -2437,22 +2437,19 @@ static void rw_interrupt(void)
/* Compute maximal contiguous buffer size. */
static int buffer_chain_size(void)
{
struct bio *bio;
struct bio_vec *bv;
int size, i;
int size;
struct req_iterator iter;
char *base;
base = bio_data(current_req->bio);
size = 0;
rq_for_each_bio(bio, current_req) {
bio_for_each_segment(bv, bio, i) {
if (page_address(bv->bv_page) + bv->bv_offset !=
base + size)
break;
rq_for_each_segment(bv, current_req, iter) {
if (page_address(bv->bv_page) + bv->bv_offset != base + size)
break;
size += bv->bv_len;
}
size += bv->bv_len;
}
return size >> 9;
@@ -2479,9 +2476,9 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
{
int remaining; /* number of transferred 512-byte sectors */
struct bio_vec *bv;
struct bio *bio;
char *buffer, *dma_buffer;
int size, i;
int size;
struct req_iterator iter;
max_sector = transfer_size(ssize,
min(max_sector, max_sector_2),
@@ -2514,43 +2511,41 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
size = current_req->current_nr_sectors << 9;
rq_for_each_bio(bio, current_req) {
bio_for_each_segment(bv, bio, i) {
if (!remaining)
break;
rq_for_each_segment(bv, current_req, iter) {
if (!remaining)
break;
size = bv->bv_len;
SUPBOUND(size, remaining);
size = bv->bv_len;
SUPBOUND(size, remaining);
buffer = page_address(bv->bv_page) + bv->bv_offset;
buffer = page_address(bv->bv_page) + bv->bv_offset;
#ifdef FLOPPY_SANITY_CHECK
if (dma_buffer + size >
floppy_track_buffer + (max_buffer_sectors << 10) ||
dma_buffer < floppy_track_buffer) {
DPRINT("buffer overrun in copy buffer %d\n",
(int)((floppy_track_buffer -
dma_buffer) >> 9));
printk("fsector_t=%d buffer_min=%d\n",
fsector_t, buffer_min);
printk("current_count_sectors=%ld\n",
current_count_sectors);
if (CT(COMMAND) == FD_READ)
printk("read\n");
if (CT(COMMAND) == FD_WRITE)
printk("write\n");
break;
}
if (((unsigned long)buffer) % 512)
DPRINT("%p buffer not aligned\n", buffer);
#endif
if (dma_buffer + size >
floppy_track_buffer + (max_buffer_sectors << 10) ||
dma_buffer < floppy_track_buffer) {
DPRINT("buffer overrun in copy buffer %d\n",
(int)((floppy_track_buffer -
dma_buffer) >> 9));
printk("fsector_t=%d buffer_min=%d\n",
fsector_t, buffer_min);
printk("current_count_sectors=%ld\n",
current_count_sectors);
if (CT(COMMAND) == FD_READ)
memcpy(buffer, dma_buffer, size);
else
memcpy(dma_buffer, buffer, size);
remaining -= size;
dma_buffer += size;
printk("read\n");
if (CT(COMMAND) == FD_WRITE)
printk("write\n");
break;
}
if (((unsigned long)buffer) % 512)
DPRINT("%p buffer not aligned\n", buffer);
#endif
if (CT(COMMAND) == FD_READ)
memcpy(buffer, dma_buffer, size);
else
memcpy(dma_buffer, buffer, size);
remaining -= size;
dma_buffer += size;
}
#ifdef FLOPPY_SANITY_CHECK
if (remaining) {

View File

@@ -142,12 +142,11 @@ static irqreturn_t lgb_irq(int irq, void *_bd)
* return the total length. */
static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma)
{
unsigned int i = 0, idx, len = 0;
struct bio *bio;
unsigned int i = 0, len = 0;
struct req_iterator iter;
struct bio_vec *bvec;
rq_for_each_bio(bio, req) {
struct bio_vec *bvec;
bio_for_each_segment(bvec, bio, idx) {
rq_for_each_segment(bvec, req, iter) {
/* We told the block layer not to give us too many. */
BUG_ON(i == LGUEST_MAX_DMA_SECTIONS);
/* If we had a zero-length segment, it would look like
@@ -160,7 +159,6 @@ static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma)
dma->len[i] = bvec->bv_len;
len += bvec->bv_len;
i++;
}
}
/* If the array isn't full, we mark the end with a 0 length */
if (i < LGUEST_MAX_DMA_SECTIONS)

View File

@@ -180,7 +180,7 @@ static inline int sock_send_bvec(struct socket *sock, struct bio_vec *bvec,
static int nbd_send_req(struct nbd_device *lo, struct request *req)
{
int result, i, flags;
int result, flags;
struct nbd_request request;
unsigned long size = req->nr_sectors << 9;
struct socket *sock = lo->sock;
@@ -205,16 +205,15 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
}
if (nbd_cmd(req) == NBD_CMD_WRITE) {
struct bio *bio;
struct req_iterator iter;
struct bio_vec *bvec;
/*
* we are really probing at internals to determine
* whether to set MSG_MORE or not...
*/
rq_for_each_bio(bio, req) {
struct bio_vec *bvec;
bio_for_each_segment(bvec, bio, i) {
rq_for_each_segment(bvec, req, iter) {
flags = 0;
if ((i < (bio->bi_vcnt - 1)) || bio->bi_next)
if (!rq_iter_last(req, iter))
flags = MSG_MORE;
dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
lo->disk->disk_name, req,
@@ -226,7 +225,6 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
result);
goto error_out;
}
}
}
}
return 0;
@@ -321,11 +319,10 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
dprintk(DBG_RX, "%s: request %p: got reply\n",
lo->disk->disk_name, req);
if (nbd_cmd(req) == NBD_CMD_READ) {
int i;
struct bio *bio;
rq_for_each_bio(bio, req) {
struct bio_vec *bvec;
bio_for_each_segment(bvec, bio, i) {
struct req_iterator iter;
struct bio_vec *bvec;
rq_for_each_segment(bvec, req, iter) {
result = sock_recv_bvec(sock, bvec);
if (result <= 0) {
printk(KERN_ERR "%s: Receive data failed (result %d)\n",
@@ -336,7 +333,6 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
}
dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
lo->disk->disk_name, req, bvec->bv_len);
}
}
}
return req;

View File

@@ -91,30 +91,30 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
struct request *req, int gather)
{
unsigned int offset = 0;
struct bio *bio;
sector_t sector;
struct req_iterator iter;
struct bio_vec *bvec;
unsigned int i = 0, j;
unsigned int i = 0;
size_t size;
void *buf;
rq_for_each_bio(bio, req) {
sector = bio->bi_sector;
rq_for_each_segment(bvec, req, iter) {
unsigned long flags;
dev_dbg(&dev->sbd.core,
"%s:%u: bio %u: %u segs %u sectors from %lu\n",
__func__, __LINE__, i, bio_segments(bio),
bio_sectors(bio), sector);
bio_for_each_segment(bvec, bio, j) {
__func__, __LINE__, i, bio_segments(iter.bio),
bio_sectors(iter.bio),
(unsigned long)iter.bio->bi_sector);
size = bvec->bv_len;
buf = __bio_kmap_atomic(bio, j, KM_IRQ0);
buf = bvec_kmap_irq(bvec, &flags);
if (gather)
memcpy(dev->bounce_buf+offset, buf, size);
else
memcpy(buf, dev->bounce_buf+offset, size);
offset += size;
flush_kernel_dcache_page(bio_iovec_idx(bio, j)->bv_page);
__bio_kunmap_atomic(bio, KM_IRQ0);
}
flush_kernel_dcache_page(bvec->bv_page);
bvec_kunmap_irq(bvec, &flags);
i++;
}
}
@@ -130,12 +130,13 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
#ifdef DEBUG
unsigned int n = 0;
struct bio *bio;
struct bio_vec *bv;
struct req_iterator iter;
rq_for_each_bio(bio, req)
rq_for_each_segment(bv, req, iter)
n++;
dev_dbg(&dev->sbd.core,
"%s:%u: %s req has %u bios for %lu sectors %lu hard sectors\n",
"%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n",
__func__, __LINE__, op, n, req->nr_sectors,
req->hard_nr_sectors);
#endif

View File

@@ -150,9 +150,8 @@ static int blkif_queue_request(struct request *req)
struct blkfront_info *info = req->rq_disk->private_data;
unsigned long buffer_mfn;
struct blkif_request *ring_req;
struct bio *bio;
struct req_iterator iter;
struct bio_vec *bvec;
int idx;
unsigned long id;
unsigned int fsect, lsect;
int ref;
@@ -186,8 +185,7 @@ static int blkif_queue_request(struct request *req)
ring_req->operation = BLKIF_OP_WRITE_BARRIER;
ring_req->nr_segments = 0;
rq_for_each_bio (bio, req) {
bio_for_each_segment (bvec, bio, idx) {
rq_for_each_segment(bvec, req, iter) {
BUG_ON(ring_req->nr_segments
== BLKIF_MAX_SEGMENTS_PER_REQUEST);
buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
@@ -213,7 +211,6 @@ static int blkif_queue_request(struct request *req)
.last_sect = lsect };
ring_req->nr_segments++;
}
}
info->ring.req_prod_pvt++;