overwrite code...

master
Vitaliy Filippov 2013-06-02 21:13:09 +04:00
parent 6e333dd219
commit 1206aa80b0
1 changed files with 73 additions and 40 deletions

113
sftl.c
View File

@ -106,12 +106,11 @@ struct sftl_flush_info
struct bio *next_bio;
u32 random_free[seg_clust];
u32 random_found;
};
struct sftl_overwrite_info
{
struct sftl_flush_info *flush;
u32 cluster;
u32 overwrite_last_cluster;
u32 overwrite_total;
u32 overwrite_current;
char overwrite_do_write;
atomic_t overwrite_pending;
};
static void sftl_search_free_sequence(struct sftl_dev *sftl, u32 *out_cur_first, u32 *out_cur_free)
@ -183,6 +182,69 @@ static void sftl_search_freeable_sequence(struct sftl_dev *sftl, struct sftl_flu
*out_min_freeable_start = min_freeable_start;
}
static void sftl_continue_overwrite_callback(struct bio *bio, int err)
{
struct sftl_flush_info *info = bio->bi_private;
sftl_continue_overwrite(info);
}
static void sftl_continue_overwrite(struct sftl_flush_info *info)
{
u32 i, j, k;
struct sftl_dev *sftl = info->sftl;
info->overwrite_total -= info->overwrite_current;
info->overwrite_current = info->overwrite_total < sftl->buf_max ? info->overwrite_total : sftl->buf_max;
atomic_set(&info->overwrite_pending, info->overwrite_current);
for (k = info->overwrite_last_cluster; k < sftl->next_free_end*seg_clust && sftl->buf_size < sftl->buf_max; k++)
{
if (sftl->clust_map[k])
{
// Modify maps
struct sftl_map *buf_map = (struct sftl_map *)(sftl->buf + seg_clust*clust_sz) + sftl->buf_size;
u32 cluster = sftl->clust_map[k]-1;
buf_map->magic[0] = magic[0];
buf_map->magic[1] = magic[1];
buf_map->magic[2] = magic[2];
buf_map->is_erased = 0;
buf_map->block = cluster;
buf_map->ver = sftl->ver[cluster]+1;
buf_map->checksum = sftl_map_checksum(*buf_map);
sftl->map[cluster] = sftl->free_start_seg*seg_clust + sftl->buf_size;
sftl->clust_map[sftl->map[cluster]] = 1 + cluster;
sftl->ver[cluster] = buf_map->ver;
// Read into buffer - will write back from a callback
sftl->buf_size++;
if (sftl->buf_size >= sftl->buf_max || k == sftl->next_free_end*seg_clust-1)
{
info->overwrite_last_cluster = k+1;
info->overwrite_do_write = sftl->buf_size >= sftl->buf_max;
}
bio_submit_kern_seq(sftl->blkdev, sftl->buf + (sftl->buf_size-1)*clust*sz, clust_sz, GFP_KERNEL,
(min_freeable_start+i)*(seg_clust*clust_blocks+1) + j*clust_blocks, info, sftl_overwrite_one, READ);
}
}
}
static void sftl_overwrite_one(struct bio *bio, int err)
{
struct sftl_flush_info *info = bio->bi_private;
struct sftl_dev *sftl = info->sftl;
bio_put(bio);
if (!atomic_dec_and_test(&info->overwrite_pending))
{
if (info->overwrite_do_write)
{
bio_submit_kern_seq(sftl->blkdev, sftl->buf, seg_clust*clust_sz + phy_sz, GFP_KERNEL,
sftl->free_start_seg*(seg_clust*clust_sz) + phy_sz, info, sftl_continue_overwrite_callback, READ);
}
else
{
// If cleaning doesn't compose a full segment, we won't write it
}
}
}
// Callback called after flushing buffer the first time during flush
static void sftl_continue_flush(struct bio *bio, int err)
{
@ -207,6 +269,7 @@ static void sftl_continue_flush(struct bio *bio, int err)
sftl->next_free_start = 0;
sftl->next_free_end = 0;
}
// Finish flushing and complete next_bio
}
else if (sftl->free_end_seg - sftl->free_start_seg <= seg_clust-1)
{
@ -229,42 +292,12 @@ static void sftl_continue_flush(struct bio *bio, int err)
{
// Best freeable sequence has at least 1 free segment in total
// Free it and continue writing
char *buf = sftl->buf;
struct sftl_overwrite_info *ow;
u32 cluster;
info->overwrite_total = min_freeable_cost;
info->overwrite_current = 0;
info->overwrite_last_cluster = min_freeable_start*seg_clust;
sftl->next_free_start = min_freeable_start;
sftl->next_free_end = min_freeable_start+seg_clust;
for (k = min_freeable_start*seg_clust, i = 0; i < seg_clust; i++)
{
for (j = 0; j < seg_clust; j++, k++)
{
if (sftl->clust_map[k])
{
// Modify maps
struct sftl_map *buf_map = (struct sftl_map *)(sftl->buf + seg_clust*clust_sz) + sftl->buf_size;
cluster = sftl->clust_map[k]-1;
buf_map->magic[0] = magic[0];
buf_map->magic[1] = magic[1];
buf_map->magic[2] = magic[2];
buf_map->is_erased = 0;
buf_map->block = cluster;
buf_map->ver = sftl->ver[cluster]+1;
buf_map->checksum = sftl_map_checksum(*buf_map);
sftl->map[cluster] = sftl->free_start_seg*seg_clust + sftl->buf_size;
sftl->clust_map[sftl->map[cluster]] = 1 + cluster;
sftl->ver[cluster] = buf_map->ver;
// Read into buffer
ow = kmalloc(sizeof(struct sftl_overwrite_info));
ow->info = info;
ow->cluster = k;
bio_submit_kern_seq(sftl->blkdev, buf + sftl->buf_size*clust*sz, clust_sz, GFP_KERNEL,
min_freeable_start*(seg_clust*clust_blocks+1) + j*clust_blocks, ow, sftl_overwrite_one, READ);
sftl->buf_size++;
//// Then write back from a callback
//WRITE(sftl, sftl->clust_map[k]-1, buf);
}
}
}
sftl_continue_overwrite(info);
}
else
{