diff --git a/fuseops/convert.go b/fuseops/convert.go index 4191af4..a77a975 100644 --- a/fuseops/convert.go +++ b/fuseops/convert.go @@ -36,7 +36,10 @@ func Convert(r bazilfuse.Request, logger *log.Logger) (o Op) { switch typed := r.(type) { case *bazilfuse.InitRequest: - to := &InitOp{} + to := &InitOp{ + maxReadahead: typed.MaxReadahead, + } + o = to co = &to.commonOp diff --git a/fuseops/ops.go b/fuseops/ops.go index 93ea326..08f12da 100644 --- a/fuseops/ops.go +++ b/fuseops/ops.go @@ -48,6 +48,8 @@ type Op interface { // mount to succeed. type InitOp struct { commonOp + + maxReadahead uint32 } func (o *InitOp) Respond(err error) { @@ -56,10 +58,50 @@ func (o *InitOp) Respond(err error) { return } - resp := &bazilfuse.InitResponse{} + resp := bazilfuse.InitResponse{} + // Ask the Linux kernel for larger write requests. + // + // As of 2015-03-26, the behavior in the kernel is: + // + // * (http://goo.gl/jMKHMZ, http://goo.gl/XTF4ZH) Cap the max write size at + // the maximum of 4096 and init_response->max_write. + // + // * (http://goo.gl/gEIvHZ) If FUSE_BIG_WRITES isn't set, don't return more + // than one page. + // + // * (http://goo.gl/4RLhxZ, http://goo.gl/hi0Cm2) Never write more than + // FUSE_MAX_PAGES_PER_REQ pages (128 KiB on x86). + // + // 4 KiB is crazy small. Ask for significantly more, and take what the kernel + // will give us. + const maxWrite = 1 << 21 + resp.Flags |= bazilfuse.InitBigWrites + resp.MaxWrite = maxWrite + + // Ask the Linux kernel for larger read requests. + // + // As of 2015-03-26, the behavior in the kernel is: + // + // * (http://goo.gl/bQ1f1i, http://goo.gl/HwBrR6) Set the local variable + // ra_pages to be init_response->max_readahead divided by the page size. + // + // * (http://goo.gl/gcIsSh, http://goo.gl/LKV2vA) Set + // backing_dev_info::ra_pages to the min of that value and what was sent + // in the request's max_readahead field. + // + // * (http://goo.gl/u2SqzH) Use backing_dev_info::ra_pages when deciding + // how much to read ahead. + // + // * (http://goo.gl/JnhbdL) Don't read ahead at all if that field is zero. + // + // Reading a page at a time is a drag. Ask for as much as the kernel is + // willing to give us. + resp.MaxReadahead = o.maxReadahead + + // Respond. o.commonOp.logger.Printf("Responding: %v", &resp) - o.r.(*bazilfuse.InitRequest).Respond(resp) + o.r.(*bazilfuse.InitRequest).Respond(&resp) } //////////////////////////////////////////////////////////////////////// diff --git a/samples/memfs/memfs_test.go b/samples/memfs/memfs_test.go index a5d9fd9..40f84cb 100644 --- a/samples/memfs/memfs_test.go +++ b/samples/memfs/memfs_test.go @@ -15,6 +15,7 @@ package memfs_test import ( + "bytes" "io" "io/ioutil" "os" @@ -825,6 +826,27 @@ func (t *MemFSTest) WriteAtDoesntChangeOffset_AppendMode() { ExpectEq(4, offset) } +func (t *MemFSTest) LargeFile() { + var err error + + // Create a file. + f, err := os.Create(path.Join(t.Dir, "foo")) + t.ToClose = append(t.ToClose, f) + AssertEq(nil, err) + + // Copy in large contents. + const size = 1 << 24 + contents := bytes.Repeat([]byte{0x20}, size) + + _, err = io.Copy(f, bytes.NewReader(contents)) + AssertEq(nil, err) + + // Read the full contents of the file. + contents, err = ioutil.ReadFile(f.Name()) + AssertEq(nil, err) + ExpectEq(size, len(contents)) +} + func (t *MemFSTest) AppendMode() { var err error var n int