Use 1 R/W spinlock

master
Vitaliy Filippov 2013-06-01 02:49:24 +04:00
parent 403c0a5df6
commit ba21f509c2
1 changed files with 22 additions and 16 deletions

38
sftl.c
View File

@ -163,56 +163,62 @@ static void sftl_make_request(struct request_queue *q, struct bio *bio)
} }
else else
{ {
// R/W locking using 1 R/W spinlock and 1 event.
//
// Reading:
// * Take read lock
// * Check if requested cluster is mapped into buffer
// * If yes:
// ** Read from the buffer
// * If no:
// ** Initiate block read operation
// * Unlock
//
// Writing:
// (Start): // (Start):
// * Take write lock on buffer // * Take write lock
// * Check for free space in buffer // * Check for free space in buffer
// * If sufficient: // * If sufficient:
// ** Take write lock on translation maps so readers won't get them partially modified
// ** Write current bio into buffer // ** Write current bio into buffer
// ** Modify translation maps // ** Modify translation maps
// ** Unlock translation maps
// * If insufficient: // * If insufficient:
// ** Check flush flag (no need for atomic/etc as already within buffer lock) // ** (Insufficient) Check flush flag (no need for atomic/etc as already within buffer lock)
// ** If someone is already flushing: // ** If someone is already flushing:
// *** Unlock buffer // *** Unlock
// *** Wait until flushing ends using an event // *** Wait until flushing ends using an event
// *** Goto (Start) // *** Goto (Start)
// ** If no one is flushing yet: // ** If no one is flushing yet:
// *** Set flush flag // *** Set flush flag
// *** Remember current bio and initiate (Flush) operation // *** Remember current bio and initiate (Flush) operation
// * Unlock buffer // * Unlock
// //
// After (Flush) operation ends: // After (Flush) operation ends:
// * Take write lock on the buffer (writers are already blocked, this is to block readers) // * Take write lock (writers are already blocked, this is to block readers)
// * Clear buffer // * Clear buffer
// * If the free sequence pointer can be moved without cleaning: // * If the free sequence pointer can be moved without cleaning:
// ** Move pointer // ** Move pointer
// ** Perform own remembered write operation // ** Perform own remembered write operation
// ** Unset flush flag // ** Unset flush flag
// ** Unlock buffer // ** Unlock
// ** Wake up waiting writers // ** Wake up waiting writers
// * If not: // * If not:
// ** Initiate cleaning process // ** Initiate cleaning process
// ** Unlock buffer // ** Unlock
// //
// After cleaning operation ends: // After cleaning operation ends:
// * Take write lock on translation maps // * Take write lock
// * Modify translation maps // * Modify translation maps
// * Unlock translation maps
// * Take write lock on the buffer
// * Move free sequence pointer // * Move free sequence pointer
// * If there are no more pending cleaning operations: // * If there are no more pending cleaning operations:
// ** Perform own remembered write operation: // ** Perform own remembered write operation:
// *** Take write lock on translation maps so readers won't get them partially modified
// *** Write current bio into buffer // *** Write current bio into buffer
// *** Modify translation maps // *** Modify translation maps
// *** Unlock translation maps
// ** Unset flush flag // ** Unset flush flag
// ** Unlock buffer // ** Unlock
// ** Wake up waiting writers // ** Wake up waiting writers
// * Else: // * Else:
// ** Initiate next cleaning operation // ** Initiate next cleaning operation
// ** Unlock buffer // ** Unlock
struct sftl_map *buf_map = (struct sftl_map *)(sftl->buf + seg_clust*clust_sz) + sftl->buf_size; struct sftl_map *buf_map = (struct sftl_map *)(sftl->buf + seg_clust*clust_sz) + sftl->buf_size;
char *buffer = __bio_kmap_atomic(bio, 0, KM_USER0); char *buffer = __bio_kmap_atomic(bio, 0, KM_USER0);
memcpy(sftl->buf + clust_sz*sftl->buf_size, buffer, clust_sz); memcpy(sftl->buf + clust_sz*sftl->buf_size, buffer, clust_sz);