Add wrapper to allow submitting sequences of BIOs

master
Vitaliy Filippov 2013-05-20 01:49:53 +04:00
parent ad11d0d38c
commit b814b86302
1 changed files with 170 additions and 34 deletions

204
sftl.c
View File

@ -79,11 +79,16 @@ static DEFINE_IDA(sftl_index_ida);
/* Our block device list, used in cleanup_module */
static LIST_HEAD(sftl_device_list);
static void sync_io(struct block_device *bdev, unsigned sector, void *buf, unsigned len, int rw);
static void sync_io(struct block_device *bdev, sector_t sector, void *buf, unsigned len, int rw);
static long bio_submit_kern_seq(
struct block_device *bdev, void *data, unsigned int len, gfp_t gfp_mask,
sector_t sector, void *private, bio_end_io_t *endio, int rw);
static void sftl_complete_seg(struct bio *bio, int err)
{
bio_endio((struct bio *)bio->bi_private, err);
bio_put(bio);
}
static void sftl_make_request(struct request_queue *q, struct bio *bio)
@ -122,8 +127,8 @@ static void sftl_make_request(struct request_queue *q, struct bio *bio)
submit_bio(READ, bb);
if (!(bb->bi_flags & (1 << BIO_UPTODATE)))
{
bio_endio(bio, -EIO);
bio_put(bb);
bio_endio(bio, -EIO);
}
}
}
@ -147,20 +152,10 @@ static void sftl_make_request(struct request_queue *q, struct bio *bio)
if (sftl->buf_size >= sftl->buf_max)
{
// Need to flush the buffer before completing this bio
struct request_queue *q = bdev_get_queue(sftl->blkdev);
struct bio *bb = bio_map_kern(q, sftl->buf, seg_clust*clust_sz+phy_sz, GFP_KERNEL);
if (IS_ERR(bb))
return;
bb->bi_sector = sftl->nextfreeseg*(seg_clust*clust_blocks+1);
bb->bi_bdev = sftl->blkdev;
bb->bi_private = bio;
bb->bi_end_io = sftl_complete_seg;
submit_bio(WRITE, bb);
if (!(bb->bi_flags & (1 << BIO_UPTODATE)))
{
bio_put(bb);
int err = bio_submit_kern_seq(sftl->blkdev, sftl->buf, seg_clust*clust_sz+phy_sz, GFP_KERNEL,
sftl->nextfreeseg*(seg_clust*clust_blocks+1), bio, sftl_complete_seg, WRITE);
if (err)
bio_endio(bio, -EIO);
}
// FIXME Is it correct?.. I think no...
sftl->buf_size = 0;
// FIXME Correctly adjust free segment address
@ -168,7 +163,7 @@ static void sftl_make_request(struct request_queue *q, struct bio *bio)
}
else
{
bio_endio(bio, -EIO);
bio_endio(bio, 0);
}
}
}
@ -221,6 +216,7 @@ static void sftl_free_device(struct sftl_dev *dev)
return;
if (dev->buf_size)
{
INFO("Flushing %d pending clusters", dev->buf_size);
sync_io(dev->blkdev, dev->nextfreeseg*(seg_clust*clust_blocks+1), dev->buf, seg_clust*clust_sz+phy_sz, WRITE);
dev->buf_size = 0;
// Don't care about adjusting nextfreeseg because we're freeing the device
@ -254,10 +250,11 @@ static void __exit sftl_exit(void)
list_for_each_safe(pos, next, &sftl_device_list)
{
struct sftl_dev *dev = list_entry(pos, typeof(*dev), list);
sync_blockdev(dev->blkdev);
list_del(&dev->list);
struct block_device *bdev = dev->blkdev;
INFO("%s: removing", dev->gd->disk_name);
sftl_free_device(dev);
INFO("%s: removed", dev->gd->disk_name);
sync_blockdev(bdev);
list_del(&dev->list);
}
unregister_blkdev(major_num, "sftl");
}
@ -265,35 +262,173 @@ static void __exit sftl_exit(void)
module_init(sftl_init);
module_exit(sftl_exit);
static void bio_map_kern_endio(struct bio *bio, int err)
{
bio_put(bio);
}
// Copy-pasted from fs/bio.c
static struct bio *__bio_map_kern(struct request_queue *q, void *data,
unsigned int len, gfp_t gfp_mask)
{
unsigned long kaddr = (unsigned long)data;
unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long start = kaddr >> PAGE_SHIFT;
const int nr_pages = end - start;
int offset, i;
struct bio *bio;
bio = bio_kmalloc(gfp_mask, nr_pages);
if (!bio)
return ERR_PTR(-ENOMEM);
offset = offset_in_page(kaddr);
for (i = 0; i < nr_pages; i++) {
unsigned int bytes = PAGE_SIZE - offset;
if (len <= 0)
break;
if (bytes > len)
bytes = len;
if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
offset) < bytes)
break;
data += bytes;
len -= bytes;
offset = 0;
}
bio->bi_end_io = bio_map_kern_endio;
return bio;
}
struct bio_seq
{
atomic_t count;
void *private;
bio_end_io_t *endio;
int err;
};
static void bio_map_kern_seq_endio(struct bio *bio, int err)
{
struct bio_seq *seq = bio->bi_private;
if (err)
{
INFO("I/O err %d", err);
seq->err = err;
}
if (atomic_dec_and_test(&seq->count))
{
bio->bi_private = seq->private;
seq->endio(bio, seq->err);
kfree(seq);
}
else
bio_put(bio);
}
/**
* Generates and submits a sequence of 1 or more BIO's. @endio
* will be called after ALL of them will finish.
*
* @bdev: block device
* @data: data buffer
* @len: total length, can exceed @bdev queue limit
* @gfp_mask: mask to use when allocating memory
* @sector: starting sector to write at
* @private: @endio will see this value at bio->bi_private when called
* @endio: normal bio endio callback
* @rw: READ or WRITE
*/
static long bio_submit_kern_seq(
struct block_device *bdev, void *data, unsigned int len, gfp_t gfp_mask,
sector_t sector, void *private, bio_end_io_t *endio, int rw)
{
struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = __bio_map_kern(q, data, len, gfp_mask);
if (IS_ERR(bio))
{
return PTR_ERR(bio);
}
bio->bi_sector = sector;
bio->bi_bdev = bdev;
if (bio->bi_size < len)
{
struct bio_seq *seq = kmalloc(sizeof(struct bio_seq), gfp_mask);
int n = 1;
bio->bi_private = NULL;
bio->bi_end_io = bio_map_kern_seq_endio;
seq->endio = endio;
seq->private = private;
data += bio->bi_size;
len -= bio->bi_size;
sector += bio->bi_size >> 9;
while (len > 0)
{
struct bio *bio2 = __bio_map_kern(q, data, len, gfp_mask);
if (IS_ERR(bio2))
{
// Free previously allocated bio's
kfree(seq);
while (bio)
{
struct bio *t = bio->bi_private;
bio_put(bio);
bio = t;
}
return PTR_ERR(bio2);
}
data += bio2->bi_size;
len -= bio2->bi_size;
sector += bio2->bi_size >> 9;
bio2->bi_bdev = bdev;
bio2->bi_sector = sector;
bio2->bi_private = bio;
bio2->bi_end_io = bio_map_kern_seq_endio;
bio = bio2;
n++;
}
atomic_set(&seq->count, n);
while (bio)
{
struct bio *t = bio->bi_private;
bio->bi_private = seq;
submit_bio(rw, bio);
bio = t;
}
}
else
{
bio->bi_private = private;
bio->bi_end_io = endio;
submit_bio(rw, bio);
}
return 0;
}
static void endFunc_tryKM2(struct bio *bb, int err)
{
if (bb->bi_private)
{
complete((struct completion*)(bb->bi_private));
}
bio_put(bb);
}
static void sync_io(struct block_device *bdev, unsigned sector, void *buf, unsigned len, int rw)
static void sync_io(struct block_device *bdev, sector_t sector, void *buf, unsigned len, int rw)
{
struct bio *bb;
struct request_queue *q;
DECLARE_COMPLETION_ONSTACK(waithandle);
q = bdev_get_queue(bdev);
bb = bio_map_kern(q, buf, len, GFP_KERNEL);
if (IS_ERR(bb))
return;
bb->bi_sector = sector;
bb->bi_bdev = bdev;
bb->bi_private = &waithandle;
bb->bi_end_io = endFunc_tryKM2;
submit_bio(rw, bb);
if (!(bb->bi_flags & (1 << BIO_UPTODATE)))
int err = bio_submit_kern_seq(bdev, buf, len, GFP_KERNEL, sector, &waithandle, endFunc_tryKM2, rw);
if (err)
{
bio_put(bb);
INFO("I/O error %d", err);
return;
}
wait_for_completion(&waithandle);
bio_put(bb);
}
static void read_maps(struct sftl_dev *dev)
@ -358,6 +493,7 @@ static void read_maps(struct sftl_dev *dev)
}
// We'll start writing into a free segment
dev->nextfreeseg = (cur_free > max_free ? cur_first : max_first);
INFO("Next free segment = %d", dev->nextfreeseg);
kfree(buf);
}