Reading works! Using a stupid make_request_fn, but works :)

master
Vitaliy Filippov 2013-05-19 02:47:35 +04:00
parent bf609346c3
commit 8ca6ba42f9
1 changed files with 50 additions and 33 deletions

83
sftl.c
View File

@ -69,46 +69,57 @@ static DEFINE_IDA(sftl_index_ida);
/* Our block device list, used in cleanup_module */
static LIST_HEAD(sftl_device_list);
/*
* Handle an I/O request.
*/
static void sftl_transfer(struct sftl_dev *dev, sector_t sector,
unsigned long nsect, char *buffer, int write)
static void sftl_complete_seg(struct bio *bio, int err)
{
unsigned long offset = sector * clust_blocks;
unsigned long nblocks = nsect * clust_blocks;
if ((offset + nblocks) > dev->size)
{
INFO("Beyond-end write (starting sector = %ld, count = %ld)", offset, nblocks);
return;
}
// TODO
bio_endio((struct bio *)bio->bi_private, err);
}
static void sftl_request(struct request_queue *q)
static void sftl_make_request(struct request_queue *q, struct bio *bio)
{
struct request *req;
req = blk_fetch_request(q);
while (req != NULL)
struct sftl_dev *sftl = (struct sftl_dev*)q->queuedata;
BUG_ON(bio->bi_vcnt > 1);
BUG_ON(bio->bi_sector % clust_blocks);
BUG_ON(bio->bi_size != clust_sz);
if (bio->bi_sector > sftl->size)
{
// blk_fs_request() was removed in 2.6.36 - many thanks to
// Christian Paro for the heads up and fix...
//if (!blk_fs_request(req)) {
if (req == NULL || (req->cmd_type != REQ_TYPE_FS))
INFO("Beyond-end i/o (starting sector = %ld)", (long)bio->bi_sector);
bio_endio(bio, -EIO);
}
else if (!bio_rw(bio))
{
if (!sftl->ver[bio->bi_sector/clust_blocks])
{
printk (KERN_NOTICE "Skip non-CMD request\n");
__blk_end_request_all(req, -EIO);
continue;
// version=0 => unallocated cluster
zero_fill_bio(bio);
bio_endio(bio, 0);
}
sftl_transfer((struct sftl_dev *)q->queuedata, blk_rq_pos(req),
blk_rq_cur_sectors(req), req->buffer, rq_data_dir(req));
if (!__blk_end_request_cur(req, 0))
else
{
req = blk_fetch_request(q);
struct block_device *bdev = sftl->blkdev;
struct request_queue *q = bdev_get_queue(bdev);
struct bio *bb = bio_alloc(GFP_KERNEL, 1);
u32 m;
if (IS_ERR(bb))
return;
bio_add_pc_page(q, bb, bio_page(bio), bio->bi_size, bio_offset(bio));
m = sftl->map[bio->bi_sector/clust_blocks];
bb->bi_sector = m/seg_sz * (seg_sz*clust_blocks + 1) + (m%seg_sz)*clust_blocks;
bb->bi_bdev = bdev;
bb->bi_private = bio;
bb->bi_end_io = sftl_complete_seg;
submit_bio(READ, bb);
if (!(bb->bi_flags & (1 << BIO_UPTODATE)))
{
bio_endio(bio, -EIO);
bio_put(bb);
}
}
}
else
{
INFO("Write request (starting sector = %ld, count = %ld)", (long)bio->bi_sector, (long)bio_sectors(bio));
bio_endio(bio, -EIO);
}
}
/*
@ -234,7 +245,8 @@ static void read_maps(struct sftl_dev *dev)
sync_read(dev->blkdev, i*(seg_sz*clust_blocks+1), buf, phy_sz);
for (seg = 1, j = 0; j < seg_sz; j++)
{
if (buf[j].magic == magic && buf[j].checksum == sftl_map_checksum(buf[j]))
if (buf[j].magic == magic && buf[j].checksum == sftl_map_checksum(buf[j]) &&
dev->ver[i*seg_sz+j] < buf[j].ver)
{
dev->map[i*seg_sz+j] = buf[j].block;
dev->ver[i*seg_sz+j] = buf[j].ver;
@ -293,13 +305,18 @@ static struct sftl_dev *add_device(char *devname)
dev->reserved_segs = seg_sz * (seg_sz+1);
dev->map = vmalloc(sizeof(u32) * dev->size);
dev->ver = vmalloc(sizeof(u32) * dev->size);
memset(dev->ver, 0, sizeof(u32) * dev->size);
/* Get a request queue */
spin_lock_init(&dev->spinlock);
dev->queue = blk_init_queue(sftl_request, &dev->spinlock);
if (dev->queue == NULL)
dev->queue = blk_alloc_queue(GFP_KERNEL);
if (!dev->queue)
goto devinit_err;
blk_queue_make_request(dev->queue, sftl_make_request);
dev->queue->queuedata = dev;
/* FIXME: It's OK when PAGE_SIZE==clust_sz==4096
but we should ALWAYS support bio of size==PAGE_SIZE */
blk_queue_max_hw_sectors(dev->queue, clust_blocks);
blk_queue_logical_block_size(dev->queue, clust_sz);
/* Allocate index for the new disk */