Begin writing code
parent
cf155ce511
commit
ad11d0d38c
75
sftl.c
75
sftl.c
|
@ -50,18 +50,24 @@ struct sftl_map {
|
|||
|
||||
/* The internal representation of our device */
|
||||
struct sftl_dev {
|
||||
// Device parameters
|
||||
u32 size; // device size in physical blocks
|
||||
u32 segs; // device size in segments
|
||||
u32 reserved_segs; // segments reserved for defragmentation during write
|
||||
u32 *map; // virtual-to-real cluster map
|
||||
u32 *clust_map; // real-to-virtual cluster map
|
||||
u32 *ver; // cluster versions indexed by their virtual positions
|
||||
u32 nextfreeclust; // next free cluster pointer
|
||||
u32 freeclust, freesegs; // free cluster count, free segment count
|
||||
u32 nextfreeseg; // next free available segment
|
||||
|
||||
// Buffer to hold pending writes - will hold up to a complete segment
|
||||
char *buf;
|
||||
u32 buf_max, buf_size;
|
||||
|
||||
// Kernel objects
|
||||
struct gendisk *gd;
|
||||
struct block_device *blkdev;
|
||||
struct request_queue *queue;
|
||||
spinlock_t spinlock;
|
||||
struct mutex write_mutex;
|
||||
struct list_head list;
|
||||
};
|
||||
|
@ -73,6 +79,8 @@ static DEFINE_IDA(sftl_index_ida);
|
|||
/* Our block device list, used in cleanup_module */
|
||||
static LIST_HEAD(sftl_device_list);
|
||||
|
||||
static void sync_io(struct block_device *bdev, unsigned sector, void *buf, unsigned len, int rw);
|
||||
|
||||
static void sftl_complete_seg(struct bio *bio, int err)
|
||||
{
|
||||
bio_endio((struct bio *)bio->bi_private, err);
|
||||
|
@ -121,9 +129,47 @@ static void sftl_make_request(struct request_queue *q, struct bio *bio)
|
|||
}
|
||||
else
|
||||
{
|
||||
// FIXME Is concurrent writing OK? Do we need any write locks?
|
||||
struct sftl_map *buf_map = (struct sftl_map *)(sftl->buf + seg_clust*clust_sz) + sftl->buf_size;
|
||||
char *buffer = __bio_kmap_atomic(bio, 0, KM_USER0);
|
||||
memcpy(sftl->buf + clust_sz*sftl->buf_size, buffer, clust_sz);
|
||||
__bio_kunmap_atomic(bio, KM_USER0);
|
||||
buf_map->magic = magic;
|
||||
buf_map->block = bio->bi_sector/clust_blocks;
|
||||
buf_map->ver = sftl->ver[bio->bi_sector/clust_blocks]+1;
|
||||
buf_map->checksum = sftl_map_checksum(*buf_map);
|
||||
sftl->map[bio->bi_sector/clust_blocks] = sftl->nextfreeseg*seg_clust + sftl->buf_size;
|
||||
sftl->clust_map[sftl->nextfreeseg*seg_clust + sftl->buf_size] = bio->bi_sector/clust_blocks;
|
||||
sftl->ver[bio->bi_sector/clust_blocks] = buf_map->ver;
|
||||
sftl->buf_size++;
|
||||
INFO("Write request (starting sector = %lu, count = %lu)",
|
||||
(unsigned long)bio->bi_sector, (unsigned long)bio_sectors(bio));
|
||||
bio_endio(bio, -EIO);
|
||||
if (sftl->buf_size >= sftl->buf_max)
|
||||
{
|
||||
// Need to flush the buffer before completing this bio
|
||||
struct request_queue *q = bdev_get_queue(sftl->blkdev);
|
||||
struct bio *bb = bio_map_kern(q, sftl->buf, seg_clust*clust_sz+phy_sz, GFP_KERNEL);
|
||||
if (IS_ERR(bb))
|
||||
return;
|
||||
bb->bi_sector = sftl->nextfreeseg*(seg_clust*clust_blocks+1);
|
||||
bb->bi_bdev = sftl->blkdev;
|
||||
bb->bi_private = bio;
|
||||
bb->bi_end_io = sftl_complete_seg;
|
||||
submit_bio(WRITE, bb);
|
||||
if (!(bb->bi_flags & (1 << BIO_UPTODATE)))
|
||||
{
|
||||
bio_put(bb);
|
||||
bio_endio(bio, -EIO);
|
||||
}
|
||||
// FIXME Is it correct?.. I think no...
|
||||
sftl->buf_size = 0;
|
||||
// FIXME Correctly adjust free segment address
|
||||
sftl->nextfreeseg++;
|
||||
}
|
||||
else
|
||||
{
|
||||
bio_endio(bio, -EIO);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -173,6 +219,12 @@ static void sftl_free_device(struct sftl_dev *dev)
|
|||
{
|
||||
if (!dev)
|
||||
return;
|
||||
if (dev->buf_size)
|
||||
{
|
||||
sync_io(dev->blkdev, dev->nextfreeseg*(seg_clust*clust_blocks+1), dev->buf, seg_clust*clust_sz+phy_sz, WRITE);
|
||||
dev->buf_size = 0;
|
||||
// Don't care about adjusting nextfreeseg because we're freeing the device
|
||||
}
|
||||
if (dev->gd)
|
||||
{
|
||||
del_gendisk(dev->gd);
|
||||
|
@ -186,8 +238,12 @@ static void sftl_free_device(struct sftl_dev *dev)
|
|||
}
|
||||
if (dev->map)
|
||||
vfree(dev->map);
|
||||
if (dev->clust_map)
|
||||
vfree(dev->clust_map);
|
||||
if (dev->ver)
|
||||
vfree(dev->ver);
|
||||
if (dev->buf)
|
||||
kfree(dev->buf);
|
||||
kfree(dev);
|
||||
}
|
||||
|
||||
|
@ -217,7 +273,7 @@ static void endFunc_tryKM2(struct bio *bb, int err)
|
|||
}
|
||||
}
|
||||
|
||||
static void sync_read(struct block_device *bdev, unsigned sector, void *buf, unsigned len)
|
||||
static void sync_io(struct block_device *bdev, unsigned sector, void *buf, unsigned len, int rw)
|
||||
{
|
||||
struct bio *bb;
|
||||
struct request_queue *q;
|
||||
|
@ -230,7 +286,7 @@ static void sync_read(struct block_device *bdev, unsigned sector, void *buf, uns
|
|||
bb->bi_bdev = bdev;
|
||||
bb->bi_private = &waithandle;
|
||||
bb->bi_end_io = endFunc_tryKM2;
|
||||
submit_bio(READ, bb);
|
||||
submit_bio(rw, bb);
|
||||
if (!(bb->bi_flags & (1 << BIO_UPTODATE)))
|
||||
{
|
||||
bio_put(bb);
|
||||
|
@ -250,7 +306,7 @@ static void read_maps(struct sftl_dev *dev)
|
|||
INFO("reading translation maps");
|
||||
for (i = 0; i < dev->segs; i++)
|
||||
{
|
||||
sync_read(dev->blkdev, (i+1)*(seg_clust*clust_blocks+1) - 1, buf, phy_sz);
|
||||
sync_io(dev->blkdev, (i+1)*(seg_clust*clust_blocks+1) - 1, buf, phy_sz, READ);
|
||||
for (seg = 1, j = 0; j < seg_clust; j++)
|
||||
{
|
||||
if (buf[j].magic == magic && buf[j].checksum == sftl_map_checksum(buf[j]) &&
|
||||
|
@ -301,7 +357,7 @@ static void read_maps(struct sftl_dev *dev)
|
|||
// FIXME: Need to defragment free space on the device...
|
||||
}
|
||||
// We'll start writing into a free segment
|
||||
dev->nextfreeclust = (cur_free > max_free ? cur_first : max_first) * seg_clust;
|
||||
dev->nextfreeseg = (cur_free > max_free ? cur_first : max_first);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
|
@ -357,11 +413,12 @@ static struct sftl_dev *add_device(char *devname)
|
|||
memset(dev->ver, 0, sizeof(u32) * (dev->segs-dev->reserved_segs) * seg_clust);
|
||||
dev->clust_map = vmalloc(sizeof(u32) * dev->segs * seg_clust);
|
||||
memset(dev->clust_map, 0, sizeof(u32) * dev->segs * seg_clust);
|
||||
|
||||
allocated_memory = sizeof(u32) * seg_clust * (dev->segs*3 - dev->reserved_segs*2);
|
||||
|
||||
dev->buf = kzalloc(seg_clust*clust_sz + phy_sz, GFP_KERNEL);
|
||||
dev->buf_max = seg_clust;
|
||||
|
||||
/* Get a request queue */
|
||||
spin_lock_init(&dev->spinlock);
|
||||
dev->queue = blk_alloc_queue(GFP_KERNEL);
|
||||
if (!dev->queue)
|
||||
goto devinit_err;
|
||||
|
|
Loading…
Reference in New Issue