diff --git a/block.c b/block.c index d2dac3dce9..30d64e6ca5 100644 --- a/block.c +++ b/block.c @@ -2837,7 +2837,7 @@ bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs) { BlockDriverInfo bdi; - if (bs->backing || !(bs->open_flags & BDRV_O_UNMAP)) { + if (!(bs->open_flags & BDRV_O_UNMAP)) { return false; } diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c index 4902ca557f..f2bfdcfdea 100644 --- a/block/dirty-bitmap.c +++ b/block/dirty-bitmap.c @@ -326,14 +326,14 @@ void bdrv_dirty_iter_init(BdrvDirtyBitmap *bitmap, HBitmapIter *hbi) } void bdrv_set_dirty_bitmap(BdrvDirtyBitmap *bitmap, - int64_t cur_sector, int nr_sectors) + int64_t cur_sector, int64_t nr_sectors) { assert(bdrv_dirty_bitmap_enabled(bitmap)); hbitmap_set(bitmap->bitmap, cur_sector, nr_sectors); } void bdrv_reset_dirty_bitmap(BdrvDirtyBitmap *bitmap, - int64_t cur_sector, int nr_sectors) + int64_t cur_sector, int64_t nr_sectors) { assert(bdrv_dirty_bitmap_enabled(bitmap)); hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors); @@ -361,7 +361,7 @@ void bdrv_undo_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *in) } void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, - int nr_sectors) + int64_t nr_sectors) { BdrvDirtyBitmap *bitmap; QLIST_FOREACH(bitmap, &bs->dirty_bitmaps, list) { diff --git a/block/gluster.c b/block/gluster.c index 406c1e6357..296bd9929e 100644 --- a/block/gluster.c +++ b/block/gluster.c @@ -11,7 +11,27 @@ #include #include "block/block_int.h" #include "qapi/error.h" +#include "qapi/qmp/qerror.h" #include "qemu/uri.h" +#include "qemu/error-report.h" + +#define GLUSTER_OPT_FILENAME "filename" +#define GLUSTER_OPT_VOLUME "volume" +#define GLUSTER_OPT_PATH "path" +#define GLUSTER_OPT_TYPE "type" +#define GLUSTER_OPT_SERVER_PATTERN "server." +#define GLUSTER_OPT_HOST "host" +#define GLUSTER_OPT_PORT "port" +#define GLUSTER_OPT_TO "to" +#define GLUSTER_OPT_IPV4 "ipv4" +#define GLUSTER_OPT_IPV6 "ipv6" +#define GLUSTER_OPT_SOCKET "socket" +#define GLUSTER_OPT_DEBUG "debug" +#define GLUSTER_DEFAULT_PORT 24007 +#define GLUSTER_DEBUG_DEFAULT 4 +#define GLUSTER_DEBUG_MAX 9 + +#define GERR_INDEX_HINT "hint: check in 'server' array index '%d'\n" typedef struct GlusterAIOCB { int64_t size; @@ -28,27 +48,141 @@ typedef struct BDRVGlusterState { int debug_level; } BDRVGlusterState; -typedef struct GlusterConf { - char *server; - int port; - char *volname; - char *image; - char *transport; - int debug_level; -} GlusterConf; +typedef struct BDRVGlusterReopenState { + struct glfs *glfs; + struct glfs_fd *fd; +} BDRVGlusterReopenState; -static void qemu_gluster_gconf_free(GlusterConf *gconf) -{ - if (gconf) { - g_free(gconf->server); - g_free(gconf->volname); - g_free(gconf->image); - g_free(gconf->transport); - g_free(gconf); + +static QemuOptsList qemu_gluster_create_opts = { + .name = "qemu-gluster-create-opts", + .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head), + .desc = { + { + .name = BLOCK_OPT_SIZE, + .type = QEMU_OPT_SIZE, + .help = "Virtual disk size" + }, + { + .name = BLOCK_OPT_PREALLOC, + .type = QEMU_OPT_STRING, + .help = "Preallocation mode (allowed values: off, full)" + }, + { + .name = GLUSTER_OPT_DEBUG, + .type = QEMU_OPT_NUMBER, + .help = "Gluster log level, valid range is 0-9", + }, + { /* end of list */ } } -} +}; -static int parse_volume_options(GlusterConf *gconf, char *path) +static QemuOptsList runtime_opts = { + .name = "gluster", + .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), + .desc = { + { + .name = GLUSTER_OPT_FILENAME, + .type = QEMU_OPT_STRING, + .help = "URL to the gluster image", + }, + { + .name = GLUSTER_OPT_DEBUG, + .type = QEMU_OPT_NUMBER, + .help = "Gluster log level, valid range is 0-9", + }, + { /* end of list */ } + }, +}; + +static QemuOptsList runtime_json_opts = { + .name = "gluster_json", + .head = QTAILQ_HEAD_INITIALIZER(runtime_json_opts.head), + .desc = { + { + .name = GLUSTER_OPT_VOLUME, + .type = QEMU_OPT_STRING, + .help = "name of gluster volume where VM image resides", + }, + { + .name = GLUSTER_OPT_PATH, + .type = QEMU_OPT_STRING, + .help = "absolute path to image file in gluster volume", + }, + { + .name = GLUSTER_OPT_DEBUG, + .type = QEMU_OPT_NUMBER, + .help = "Gluster log level, valid range is 0-9", + }, + { /* end of list */ } + }, +}; + +static QemuOptsList runtime_type_opts = { + .name = "gluster_type", + .head = QTAILQ_HEAD_INITIALIZER(runtime_type_opts.head), + .desc = { + { + .name = GLUSTER_OPT_TYPE, + .type = QEMU_OPT_STRING, + .help = "tcp|unix", + }, + { /* end of list */ } + }, +}; + +static QemuOptsList runtime_unix_opts = { + .name = "gluster_unix", + .head = QTAILQ_HEAD_INITIALIZER(runtime_unix_opts.head), + .desc = { + { + .name = GLUSTER_OPT_SOCKET, + .type = QEMU_OPT_STRING, + .help = "socket file path)", + }, + { /* end of list */ } + }, +}; + +static QemuOptsList runtime_tcp_opts = { + .name = "gluster_tcp", + .head = QTAILQ_HEAD_INITIALIZER(runtime_tcp_opts.head), + .desc = { + { + .name = GLUSTER_OPT_TYPE, + .type = QEMU_OPT_STRING, + .help = "tcp|unix", + }, + { + .name = GLUSTER_OPT_HOST, + .type = QEMU_OPT_STRING, + .help = "host address (hostname/ipv4/ipv6 addresses)", + }, + { + .name = GLUSTER_OPT_PORT, + .type = QEMU_OPT_NUMBER, + .help = "port number on which glusterd is listening (default 24007)", + }, + { + .name = "to", + .type = QEMU_OPT_NUMBER, + .help = "max port number, not supported by gluster", + }, + { + .name = "ipv4", + .type = QEMU_OPT_BOOL, + .help = "ipv4 bool value, not supported by gluster", + }, + { + .name = "ipv6", + .type = QEMU_OPT_BOOL, + .help = "ipv6 bool value, not supported by gluster", + }, + { /* end of list */ } + }, +}; + +static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path) { char *p, *q; @@ -62,31 +196,29 @@ static int parse_volume_options(GlusterConf *gconf, char *path) if (*p == '\0') { return -EINVAL; } - gconf->volname = g_strndup(q, p - q); + gconf->volume = g_strndup(q, p - q); - /* image */ + /* path */ p += strspn(p, "/"); if (*p == '\0') { return -EINVAL; } - gconf->image = g_strdup(p); + gconf->path = g_strdup(p); return 0; } /* - * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...] + * file=gluster[+transport]://[host[:port]]/volume/path[?socket=...] * * 'gluster' is the protocol. * * 'transport' specifies the transport type used to connect to gluster * management daemon (glusterd). Valid transport types are - * tcp, unix and rdma. If a transport type isn't specified, then tcp - * type is assumed. + * tcp or unix. If a transport type isn't specified, then tcp type is assumed. * - * 'server' specifies the server where the volume file specification for - * the given volume resides. This can be either hostname, ipv4 address - * or ipv6 address. ipv6 address needs to be within square brackets [ ]. - * If transport type is 'unix', then 'server' field should not be specified. + * 'host' specifies the host where the volume file specification for + * the given volume resides. This can be either hostname or ipv4 address. + * If transport type is 'unix', then 'host' field should not be specified. * The 'socket' field needs to be populated with the path to unix domain * socket. * @@ -95,23 +227,22 @@ static int parse_volume_options(GlusterConf *gconf, char *path) * default port. If the transport type is unix, then 'port' should not be * specified. * - * 'volname' is the name of the gluster volume which contains the VM image. + * 'volume' is the name of the gluster volume which contains the VM image. * - * 'image' is the path to the actual VM image that resides on gluster volume. + * 'path' is the path to the actual VM image that resides on gluster volume. * * Examples: * * file=gluster://1.2.3.4/testvol/a.img * file=gluster+tcp://1.2.3.4/testvol/a.img * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img - * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img - * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img - * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img + * file=gluster+tcp://host.domain.com:24007/testvol/dir/a.img * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket - * file=gluster+rdma://1.2.3.4:24007/testvol/a.img */ -static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename) +static int qemu_gluster_parse_uri(BlockdevOptionsGluster *gconf, + const char *filename) { + GlusterServer *gsconf; URI *uri; QueryParams *qp = NULL; bool is_unix = false; @@ -122,16 +253,21 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename) return -EINVAL; } + gconf->server = g_new0(GlusterServerList, 1); + gconf->server->value = gsconf = g_new0(GlusterServer, 1); + /* transport */ if (!uri->scheme || !strcmp(uri->scheme, "gluster")) { - gconf->transport = g_strdup("tcp"); + gsconf->type = GLUSTER_TRANSPORT_TCP; } else if (!strcmp(uri->scheme, "gluster+tcp")) { - gconf->transport = g_strdup("tcp"); + gsconf->type = GLUSTER_TRANSPORT_TCP; } else if (!strcmp(uri->scheme, "gluster+unix")) { - gconf->transport = g_strdup("unix"); + gsconf->type = GLUSTER_TRANSPORT_UNIX; is_unix = true; } else if (!strcmp(uri->scheme, "gluster+rdma")) { - gconf->transport = g_strdup("rdma"); + gsconf->type = GLUSTER_TRANSPORT_TCP; + error_report("Warning: rdma feature is not supported, falling " + "back to tcp"); } else { ret = -EINVAL; goto out; @@ -157,10 +293,14 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename) ret = -EINVAL; goto out; } - gconf->server = g_strdup(qp->p[0].value); + gsconf->u.q_unix.path = g_strdup(qp->p[0].value); } else { - gconf->server = g_strdup(uri->server ? uri->server : "localhost"); - gconf->port = uri->port; + gsconf->u.tcp.host = g_strdup(uri->server ? uri->server : "localhost"); + if (uri->port) { + gsconf->u.tcp.port = g_strdup_printf("%d", uri->port); + } else { + gsconf->u.tcp.port = g_strdup_printf("%d", GLUSTER_DEFAULT_PORT); + } } out: @@ -171,30 +311,34 @@ out: return ret; } -static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename, - Error **errp) +static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf, + Error **errp) { - struct glfs *glfs = NULL; + struct glfs *glfs; int ret; int old_errno; + GlusterServerList *server; - ret = qemu_gluster_parseuri(gconf, filename); - if (ret < 0) { - error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/" - "volname/image[?socket=...]"); - errno = -ret; - goto out; - } - - glfs = glfs_new(gconf->volname); + glfs = glfs_new(gconf->volume); if (!glfs) { goto out; } - ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->server, - gconf->port); - if (ret < 0) { - goto out; + for (server = gconf->server; server; server = server->next) { + if (server->value->type == GLUSTER_TRANSPORT_UNIX) { + ret = glfs_set_volfile_server(glfs, + GlusterTransport_lookup[server->value->type], + server->value->u.q_unix.path, 0); + } else { + ret = glfs_set_volfile_server(glfs, + GlusterTransport_lookup[server->value->type], + server->value->u.tcp.host, + atoi(server->value->u.tcp.port)); + } + + if (ret < 0) { + goto out; + } } ret = glfs_set_logging(glfs, "-", gconf->debug_level); @@ -204,15 +348,25 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename, ret = glfs_init(glfs); if (ret) { - error_setg_errno(errp, errno, - "Gluster connection failed for server=%s port=%d " - "volume=%s image=%s transport=%s", gconf->server, - gconf->port, gconf->volname, gconf->image, - gconf->transport); + error_setg(errp, "Gluster connection for volume %s, path %s failed" + " to connect", gconf->volume, gconf->path); + for (server = gconf->server; server; server = server->next) { + if (server->value->type == GLUSTER_TRANSPORT_UNIX) { + error_append_hint(errp, "hint: failed on socket %s ", + server->value->u.q_unix.path); + } else { + error_append_hint(errp, "hint: failed on host %s and port %s ", + server->value->u.tcp.host, + server->value->u.tcp.port); + } + } + + error_append_hint(errp, "Please refer to gluster logs for more info\n"); /* glfs_init sometimes doesn't set errno although docs suggest that */ - if (errno == 0) + if (errno == 0) { errno = EINVAL; + } goto out; } @@ -227,6 +381,226 @@ out: return NULL; } +static int qapi_enum_parse(const char *opt) +{ + int i; + + if (!opt) { + return GLUSTER_TRANSPORT__MAX; + } + + for (i = 0; i < GLUSTER_TRANSPORT__MAX; i++) { + if (!strcmp(opt, GlusterTransport_lookup[i])) { + return i; + } + } + + return i; +} + +/* + * Convert the json formatted command line into qapi. +*/ +static int qemu_gluster_parse_json(BlockdevOptionsGluster *gconf, + QDict *options, Error **errp) +{ + QemuOpts *opts; + GlusterServer *gsconf; + GlusterServerList *curr = NULL; + QDict *backing_options = NULL; + Error *local_err = NULL; + char *str = NULL; + const char *ptr; + size_t num_servers; + int i; + + /* create opts info from runtime_json_opts list */ + opts = qemu_opts_create(&runtime_json_opts, NULL, 0, &error_abort); + qemu_opts_absorb_qdict(opts, options, &local_err); + if (local_err) { + goto out; + } + + num_servers = qdict_array_entries(options, GLUSTER_OPT_SERVER_PATTERN); + if (num_servers < 1) { + error_setg(&local_err, QERR_MISSING_PARAMETER, "server"); + goto out; + } + + ptr = qemu_opt_get(opts, GLUSTER_OPT_VOLUME); + if (!ptr) { + error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_VOLUME); + goto out; + } + gconf->volume = g_strdup(ptr); + + ptr = qemu_opt_get(opts, GLUSTER_OPT_PATH); + if (!ptr) { + error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_PATH); + goto out; + } + gconf->path = g_strdup(ptr); + qemu_opts_del(opts); + + for (i = 0; i < num_servers; i++) { + str = g_strdup_printf(GLUSTER_OPT_SERVER_PATTERN"%d.", i); + qdict_extract_subqdict(options, &backing_options, str); + + /* create opts info from runtime_type_opts list */ + opts = qemu_opts_create(&runtime_type_opts, NULL, 0, &error_abort); + qemu_opts_absorb_qdict(opts, backing_options, &local_err); + if (local_err) { + goto out; + } + + ptr = qemu_opt_get(opts, GLUSTER_OPT_TYPE); + gsconf = g_new0(GlusterServer, 1); + gsconf->type = qapi_enum_parse(ptr); + if (!ptr) { + error_setg(&local_err, QERR_MISSING_PARAMETER, GLUSTER_OPT_TYPE); + error_append_hint(&local_err, GERR_INDEX_HINT, i); + goto out; + + } + if (gsconf->type == GLUSTER_TRANSPORT__MAX) { + error_setg(&local_err, QERR_INVALID_PARAMETER_VALUE, + GLUSTER_OPT_TYPE, "tcp or unix"); + error_append_hint(&local_err, GERR_INDEX_HINT, i); + goto out; + } + qemu_opts_del(opts); + + if (gsconf->type == GLUSTER_TRANSPORT_TCP) { + /* create opts info from runtime_tcp_opts list */ + opts = qemu_opts_create(&runtime_tcp_opts, NULL, 0, &error_abort); + qemu_opts_absorb_qdict(opts, backing_options, &local_err); + if (local_err) { + goto out; + } + + ptr = qemu_opt_get(opts, GLUSTER_OPT_HOST); + if (!ptr) { + error_setg(&local_err, QERR_MISSING_PARAMETER, + GLUSTER_OPT_HOST); + error_append_hint(&local_err, GERR_INDEX_HINT, i); + goto out; + } + gsconf->u.tcp.host = g_strdup(ptr); + ptr = qemu_opt_get(opts, GLUSTER_OPT_PORT); + if (!ptr) { + error_setg(&local_err, QERR_MISSING_PARAMETER, + GLUSTER_OPT_PORT); + error_append_hint(&local_err, GERR_INDEX_HINT, i); + goto out; + } + gsconf->u.tcp.port = g_strdup(ptr); + + /* defend for unsupported fields in InetSocketAddress, + * i.e. @ipv4, @ipv6 and @to + */ + ptr = qemu_opt_get(opts, GLUSTER_OPT_TO); + if (ptr) { + gsconf->u.tcp.has_to = true; + } + ptr = qemu_opt_get(opts, GLUSTER_OPT_IPV4); + if (ptr) { + gsconf->u.tcp.has_ipv4 = true; + } + ptr = qemu_opt_get(opts, GLUSTER_OPT_IPV6); + if (ptr) { + gsconf->u.tcp.has_ipv6 = true; + } + if (gsconf->u.tcp.has_to) { + error_setg(&local_err, "Parameter 'to' not supported"); + goto out; + } + if (gsconf->u.tcp.has_ipv4 || gsconf->u.tcp.has_ipv6) { + error_setg(&local_err, "Parameters 'ipv4/ipv6' not supported"); + goto out; + } + qemu_opts_del(opts); + } else { + /* create opts info from runtime_unix_opts list */ + opts = qemu_opts_create(&runtime_unix_opts, NULL, 0, &error_abort); + qemu_opts_absorb_qdict(opts, backing_options, &local_err); + if (local_err) { + goto out; + } + + ptr = qemu_opt_get(opts, GLUSTER_OPT_SOCKET); + if (!ptr) { + error_setg(&local_err, QERR_MISSING_PARAMETER, + GLUSTER_OPT_SOCKET); + error_append_hint(&local_err, GERR_INDEX_HINT, i); + goto out; + } + gsconf->u.q_unix.path = g_strdup(ptr); + qemu_opts_del(opts); + } + + if (gconf->server == NULL) { + gconf->server = g_new0(GlusterServerList, 1); + gconf->server->value = gsconf; + curr = gconf->server; + } else { + curr->next = g_new0(GlusterServerList, 1); + curr->next->value = gsconf; + curr = curr->next; + } + + qdict_del(backing_options, str); + g_free(str); + str = NULL; + } + + return 0; + +out: + error_propagate(errp, local_err); + qemu_opts_del(opts); + if (str) { + qdict_del(backing_options, str); + g_free(str); + } + errno = EINVAL; + return -errno; +} + +static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf, + const char *filename, + QDict *options, Error **errp) +{ + int ret; + if (filename) { + ret = qemu_gluster_parse_uri(gconf, filename); + if (ret < 0) { + error_setg(errp, "invalid URI"); + error_append_hint(errp, "Usage: file=gluster[+transport]://" + "[host[:port]]/volume/path[?socket=...]\n"); + errno = -ret; + return NULL; + } + } else { + ret = qemu_gluster_parse_json(gconf, options, errp); + if (ret < 0) { + error_append_hint(errp, "Usage: " + "-drive driver=qcow2,file.driver=gluster," + "file.volume=testvol,file.path=/path/a.qcow2" + "[,file.debug=9],file.server.0.type=tcp," + "file.server.0.host=1.2.3.4," + "file.server.0.port=24007," + "file.server.1.transport=unix," + "file.server.1.socket=/var/run/glusterd.socket ..." + "\n"); + errno = -ret; + return NULL; + } + + } + + return qemu_gluster_glfs_init(gconf, errp); +} + static void qemu_gluster_complete_aio(void *opaque) { GlusterAIOCB *acb = (GlusterAIOCB *)opaque; @@ -255,30 +629,6 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg) qemu_bh_schedule(acb->bh); } -#define GLUSTER_OPT_FILENAME "filename" -#define GLUSTER_OPT_DEBUG "debug" -#define GLUSTER_DEBUG_DEFAULT 4 -#define GLUSTER_DEBUG_MAX 9 - -/* TODO Convert to fine grained options */ -static QemuOptsList runtime_opts = { - .name = "gluster", - .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), - .desc = { - { - .name = GLUSTER_OPT_FILENAME, - .type = QEMU_OPT_STRING, - .help = "URL to the gluster image", - }, - { - .name = GLUSTER_OPT_DEBUG, - .type = QEMU_OPT_NUMBER, - .help = "Gluster log level, valid range is 0-9", - }, - { /* end of list */ } - }, -}; - static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags) { assert(open_flags != NULL); @@ -324,7 +674,7 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options, BDRVGlusterState *s = bs->opaque; int open_flags = 0; int ret = 0; - GlusterConf *gconf = g_new0(GlusterConf, 1); + BlockdevOptionsGluster *gconf = NULL; QemuOpts *opts; Error *local_err = NULL; const char *filename; @@ -347,8 +697,10 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options, s->debug_level = GLUSTER_DEBUG_MAX; } + gconf = g_new0(BlockdevOptionsGluster, 1); gconf->debug_level = s->debug_level; - s->glfs = qemu_gluster_init(gconf, filename, errp); + gconf->has_debug_level = true; + s->glfs = qemu_gluster_init(gconf, filename, options, errp); if (!s->glfs) { ret = -errno; goto out; @@ -373,7 +725,7 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options, qemu_gluster_parse_flags(bdrv_flags, &open_flags); - s->fd = glfs_open(s->glfs, gconf->image, open_flags); + s->fd = glfs_open(s->glfs, gconf->path, open_flags); if (!s->fd) { ret = -errno; } @@ -382,7 +734,7 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options, out: qemu_opts_del(opts); - qemu_gluster_gconf_free(gconf); + qapi_free_BlockdevOptionsGluster(gconf); if (!ret) { return ret; } @@ -395,19 +747,13 @@ out: return ret; } -typedef struct BDRVGlusterReopenState { - struct glfs *glfs; - struct glfs_fd *fd; -} BDRVGlusterReopenState; - - static int qemu_gluster_reopen_prepare(BDRVReopenState *state, BlockReopenQueue *queue, Error **errp) { int ret = 0; BDRVGlusterState *s; BDRVGlusterReopenState *reop_s; - GlusterConf *gconf = NULL; + BlockdevOptionsGluster *gconf; int open_flags = 0; assert(state != NULL); @@ -420,10 +766,10 @@ static int qemu_gluster_reopen_prepare(BDRVReopenState *state, qemu_gluster_parse_flags(state->flags, &open_flags); - gconf = g_new0(GlusterConf, 1); - + gconf = g_new0(BlockdevOptionsGluster, 1); gconf->debug_level = s->debug_level; - reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, errp); + gconf->has_debug_level = true; + reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, NULL, errp); if (reop_s->glfs == NULL) { ret = -errno; goto exit; @@ -439,7 +785,7 @@ static int qemu_gluster_reopen_prepare(BDRVReopenState *state, } #endif - reop_s->fd = glfs_open(reop_s->glfs, gconf->image, open_flags); + reop_s->fd = glfs_open(reop_s->glfs, gconf->path, open_flags); if (reop_s->fd == NULL) { /* reops->glfs will be cleaned up in _abort */ ret = -errno; @@ -448,7 +794,7 @@ static int qemu_gluster_reopen_prepare(BDRVReopenState *state, exit: /* state->opaque will be freed in either the _abort or _commit */ - qemu_gluster_gconf_free(gconf); + qapi_free_BlockdevOptionsGluster(gconf); return ret; } @@ -501,7 +847,9 @@ static void qemu_gluster_reopen_abort(BDRVReopenState *state) #ifdef CONFIG_GLUSTERFS_ZEROFILL static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int size, BdrvRequestFlags flags) + int64_t offset, + int size, + BdrvRequestFlags flags) { int ret; GlusterAIOCB acb; @@ -527,7 +875,7 @@ static inline bool gluster_supports_zerofill(void) } static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset, - int64_t size) + int64_t size) { return glfs_zerofill(fd, offset, size); } @@ -539,7 +887,7 @@ static inline bool gluster_supports_zerofill(void) } static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset, - int64_t size) + int64_t size) { return 0; } @@ -548,14 +896,15 @@ static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset, static int qemu_gluster_create(const char *filename, QemuOpts *opts, Error **errp) { + BlockdevOptionsGluster *gconf; struct glfs *glfs; struct glfs_fd *fd; int ret = 0; int prealloc = 0; int64_t total_size = 0; char *tmp = NULL; - GlusterConf *gconf = g_new0(GlusterConf, 1); + gconf = g_new0(BlockdevOptionsGluster, 1); gconf->debug_level = qemu_opt_get_number_del(opts, GLUSTER_OPT_DEBUG, GLUSTER_DEBUG_DEFAULT); if (gconf->debug_level < 0) { @@ -563,8 +912,9 @@ static int qemu_gluster_create(const char *filename, } else if (gconf->debug_level > GLUSTER_DEBUG_MAX) { gconf->debug_level = GLUSTER_DEBUG_MAX; } + gconf->has_debug_level = true; - glfs = qemu_gluster_init(gconf, filename, errp); + glfs = qemu_gluster_init(gconf, filename, NULL, errp); if (!glfs) { ret = -errno; goto out; @@ -576,19 +926,17 @@ static int qemu_gluster_create(const char *filename, tmp = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); if (!tmp || !strcmp(tmp, "off")) { prealloc = 0; - } else if (!strcmp(tmp, "full") && - gluster_supports_zerofill()) { + } else if (!strcmp(tmp, "full") && gluster_supports_zerofill()) { prealloc = 1; } else { error_setg(errp, "Invalid preallocation mode: '%s'" - " or GlusterFS doesn't support zerofill API", - tmp); + " or GlusterFS doesn't support zerofill API", tmp); ret = -EINVAL; goto out; } - fd = glfs_creat(glfs, gconf->image, - O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR); + fd = glfs_creat(glfs, gconf->path, + O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR); if (!fd) { ret = -errno; } else { @@ -606,7 +954,7 @@ static int qemu_gluster_create(const char *filename, } out: g_free(tmp); - qemu_gluster_gconf_free(gconf); + qapi_free_BlockdevOptionsGluster(gconf); if (glfs) { glfs_fini(glfs); } @@ -614,7 +962,8 @@ out: } static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write) + int64_t sector_num, int nb_sectors, + QEMUIOVector *qiov, int write) { int ret; GlusterAIOCB acb; @@ -629,10 +978,10 @@ static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs, if (write) { ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0, - gluster_finish_aiocb, &acb); + gluster_finish_aiocb, &acb); } else { ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0, - gluster_finish_aiocb, &acb); + gluster_finish_aiocb, &acb); } if (ret < 0) { @@ -657,13 +1006,17 @@ static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset) } static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) + int64_t sector_num, + int nb_sectors, + QEMUIOVector *qiov) { return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0); } static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs, - int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) + int64_t sector_num, + int nb_sectors, + QEMUIOVector *qiov) { return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1); } @@ -725,7 +1078,8 @@ error: #ifdef CONFIG_GLUSTERFS_DISCARD static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs, - int64_t sector_num, int nb_sectors) + int64_t sector_num, + int nb_sectors) { int ret; GlusterAIOCB acb; @@ -934,34 +1288,11 @@ static int64_t coroutine_fn qemu_gluster_co_get_block_status( } -static QemuOptsList qemu_gluster_create_opts = { - .name = "qemu-gluster-create-opts", - .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head), - .desc = { - { - .name = BLOCK_OPT_SIZE, - .type = QEMU_OPT_SIZE, - .help = "Virtual disk size" - }, - { - .name = BLOCK_OPT_PREALLOC, - .type = QEMU_OPT_STRING, - .help = "Preallocation mode (allowed values: off, full)" - }, - { - .name = GLUSTER_OPT_DEBUG, - .type = QEMU_OPT_NUMBER, - .help = "Gluster log level, valid range is 0-9", - }, - { /* end of list */ } - } -}; - static BlockDriver bdrv_gluster = { .format_name = "gluster", .protocol_name = "gluster", .instance_size = sizeof(BDRVGlusterState), - .bdrv_needs_filename = true, + .bdrv_needs_filename = false, .bdrv_file_open = qemu_gluster_open, .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, .bdrv_reopen_commit = qemu_gluster_reopen_commit, @@ -989,7 +1320,7 @@ static BlockDriver bdrv_gluster_tcp = { .format_name = "gluster", .protocol_name = "gluster+tcp", .instance_size = sizeof(BDRVGlusterState), - .bdrv_needs_filename = true, + .bdrv_needs_filename = false, .bdrv_file_open = qemu_gluster_open, .bdrv_reopen_prepare = qemu_gluster_reopen_prepare, .bdrv_reopen_commit = qemu_gluster_reopen_commit, @@ -1041,6 +1372,12 @@ static BlockDriver bdrv_gluster_unix = { .create_opts = &qemu_gluster_create_opts, }; +/* rdma is deprecated (actually never supported for volfile fetch). + * Let's maintain it for the protocol compatibility, to make sure things + * won't break immediately. For now, gluster+rdma will fall back to gluster+tcp + * protocol with a warning. + * TODO: remove gluster+rdma interface support + */ static BlockDriver bdrv_gluster_rdma = { .format_name = "gluster", .protocol_name = "gluster+rdma", diff --git a/block/mirror.c b/block/mirror.c index b1e633ecad..836a5d0194 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -58,9 +58,10 @@ typedef struct MirrorBlockJob { QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; int buf_free_count; + uint64_t last_pause_ns; unsigned long *in_flight_bitmap; int in_flight; - int sectors_in_flight; + int64_t sectors_in_flight; int ret; bool unmap; bool waiting_for_io; @@ -322,6 +323,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) int nb_chunks = 1; int64_t end = s->bdev_length / BDRV_SECTOR_SIZE; int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; + bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)); sector_num = hbitmap_iter_next(&s->hbi); if (sector_num < 0) { @@ -372,7 +374,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) bitmap_set(s->in_flight_bitmap, sector_num / sectors_per_chunk, nb_chunks); while (nb_chunks > 0 && sector_num < end) { int ret; - int io_sectors; + int io_sectors, io_sectors_acct; BlockDriverState *file; enum MirrorMethod { MIRROR_METHOD_COPY, @@ -405,16 +407,26 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) } } + while (s->in_flight >= MAX_IN_FLIGHT) { + trace_mirror_yield_in_flight(s, sector_num, s->in_flight); + mirror_wait_for_io(s); + } + mirror_clip_sectors(s, sector_num, &io_sectors); switch (mirror_method) { case MIRROR_METHOD_COPY: io_sectors = mirror_do_read(s, sector_num, io_sectors); + io_sectors_acct = io_sectors; break; case MIRROR_METHOD_ZERO: - mirror_do_zero_or_discard(s, sector_num, io_sectors, false); - break; case MIRROR_METHOD_DISCARD: - mirror_do_zero_or_discard(s, sector_num, io_sectors, true); + mirror_do_zero_or_discard(s, sector_num, io_sectors, + mirror_method == MIRROR_METHOD_DISCARD); + if (write_zeroes_ok) { + io_sectors_acct = 0; + } else { + io_sectors_acct = io_sectors; + } break; default: abort(); @@ -423,7 +435,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) sector_num += io_sectors; nb_chunks -= DIV_ROUND_UP(io_sectors, sectors_per_chunk); if (s->common.speed) { - delay_ns = ratelimit_calculate_delay(&s->limit, io_sectors); + delay_ns = ratelimit_calculate_delay(&s->limit, io_sectors_acct); } } return delay_ns; @@ -514,19 +526,94 @@ static void mirror_exit(BlockJob *job, void *opaque) bdrv_unref(src); } +static void mirror_throttle(MirrorBlockJob *s) +{ + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); + + if (now - s->last_pause_ns > SLICE_TIME) { + s->last_pause_ns = now; + block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0); + } else { + block_job_pause_point(&s->common); + } +} + +static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) +{ + int64_t sector_num, end; + BlockDriverState *base = s->base; + BlockDriverState *bs = blk_bs(s->common.blk); + BlockDriverState *target_bs = blk_bs(s->target); + int ret, n; + + end = s->bdev_length / BDRV_SECTOR_SIZE; + + if (base == NULL && !bdrv_has_zero_init(target_bs)) { + if (!bdrv_can_write_zeroes_with_unmap(target_bs)) { + bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, end); + return 0; + } + + for (sector_num = 0; sector_num < end; ) { + int nb_sectors = MIN(end - sector_num, + QEMU_ALIGN_DOWN(INT_MAX, s->granularity) >> BDRV_SECTOR_BITS); + + mirror_throttle(s); + + if (block_job_is_cancelled(&s->common)) { + return 0; + } + + if (s->in_flight >= MAX_IN_FLIGHT) { + trace_mirror_yield(s, s->in_flight, s->buf_free_count, -1); + mirror_wait_for_io(s); + continue; + } + + mirror_do_zero_or_discard(s, sector_num, nb_sectors, false); + sector_num += nb_sectors; + } + + mirror_drain(s); + } + + /* First part, loop on the sectors and initialize the dirty bitmap. */ + for (sector_num = 0; sector_num < end; ) { + /* Just to make sure we are not exceeding int limit. */ + int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS, + end - sector_num); + + mirror_throttle(s); + + if (block_job_is_cancelled(&s->common)) { + return 0; + } + + ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n); + if (ret < 0) { + return ret; + } + + assert(n > 0); + if (ret == 1) { + bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n); + } + sector_num += n; + } + return 0; +} + static void coroutine_fn mirror_run(void *opaque) { MirrorBlockJob *s = opaque; MirrorExitData *data; BlockDriverState *bs = blk_bs(s->common.blk); BlockDriverState *target_bs = blk_bs(s->target); - int64_t sector_num, end, length; - uint64_t last_pause_ns; + int64_t length; BlockDriverInfo bdi; char backing_filename[2]; /* we only need 2 characters because we are only checking for a NULL string */ int ret = 0; - int n; int target_cluster_size = BDRV_SECTOR_SIZE; if (block_job_is_cancelled(&s->common)) { @@ -568,7 +655,6 @@ static void coroutine_fn mirror_run(void *opaque) s->target_cluster_sectors = target_cluster_size >> BDRV_SECTOR_BITS; s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov); - end = s->bdev_length / BDRV_SECTOR_SIZE; s->buf = qemu_try_blockalign(bs, s->buf_size); if (s->buf == NULL) { ret = -ENOMEM; @@ -577,47 +663,18 @@ static void coroutine_fn mirror_run(void *opaque) mirror_free_init(s); - last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); + s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); if (!s->is_none_mode) { - /* First part, loop on the sectors and initialize the dirty bitmap. */ - BlockDriverState *base = s->base; - bool mark_all_dirty = s->base == NULL && !bdrv_has_zero_init(target_bs); - - for (sector_num = 0; sector_num < end; ) { - /* Just to make sure we are not exceeding int limit. */ - int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS, - end - sector_num); - int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); - - if (now - last_pause_ns > SLICE_TIME) { - last_pause_ns = now; - block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0); - } else { - block_job_pause_point(&s->common); - } - - if (block_job_is_cancelled(&s->common)) { - goto immediate_exit; - } - - ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n); - - if (ret < 0) { - goto immediate_exit; - } - - assert(n > 0); - if (ret == 1 || mark_all_dirty) { - bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n); - } - sector_num += n; + ret = mirror_dirty_init(s); + if (ret < 0 || block_job_is_cancelled(&s->common)) { + goto immediate_exit; } } bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); for (;;) { uint64_t delay_ns = 0; - int64_t cnt; + int64_t cnt, delta; bool should_complete; if (s->ret < 0) { @@ -640,9 +697,10 @@ static void coroutine_fn mirror_run(void *opaque) * We do so every SLICE_TIME nanoseconds, or when there is an error, * or when the source is clean, whichever comes first. */ - if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME && + delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns; + if (delta < SLICE_TIME && s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { - if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 || + if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || (cnt == 0 && s->in_flight > 0)) { trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt); mirror_wait_for_io(s); @@ -710,7 +768,7 @@ static void coroutine_fn mirror_run(void *opaque) s->common.cancelled = false; break; } - last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); + s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); } immediate_exit: diff --git a/include/block/block_int.h b/include/block/block_int.h index a6b13adb45..09be16f88c 100644 --- a/include/block/block_int.h +++ b/include/block/block_int.h @@ -783,7 +783,7 @@ void blk_dev_eject_request(BlockBackend *blk, bool force); bool blk_dev_is_tray_open(BlockBackend *blk); bool blk_dev_is_medium_locked(BlockBackend *blk); -void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors); +void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, int64_t nr_sect); bool bdrv_requests_pending(BlockDriverState *bs); void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out); diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h index 80afe603f6..ee3388f90d 100644 --- a/include/block/dirty-bitmap.h +++ b/include/block/dirty-bitmap.h @@ -33,9 +33,9 @@ DirtyBitmapStatus bdrv_dirty_bitmap_status(BdrvDirtyBitmap *bitmap); int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector); void bdrv_set_dirty_bitmap(BdrvDirtyBitmap *bitmap, - int64_t cur_sector, int nr_sectors); + int64_t cur_sector, int64_t nr_sectors); void bdrv_reset_dirty_bitmap(BdrvDirtyBitmap *bitmap, - int64_t cur_sector, int nr_sectors); + int64_t cur_sector, int64_t nr_sectors); void bdrv_dirty_iter_init(BdrvDirtyBitmap *bitmap, struct HBitmapIter *hbi); void bdrv_set_dirty_iter(struct HBitmapIter *hbi, int64_t offset); int64_t bdrv_get_dirty_count(BdrvDirtyBitmap *bitmap); diff --git a/qapi/block-core.json b/qapi/block-core.json index f4f3ef970e..f462345ca3 100644 --- a/qapi/block-core.json +++ b/qapi/block-core.json @@ -1690,13 +1690,14 @@ # @host_device, @host_cdrom: Since 2.1 # # Since: 2.0 +# @gluster: Since 2.7 ## { 'enum': 'BlockdevDriver', 'data': [ 'archipelago', 'blkdebug', 'blkverify', 'bochs', 'cloop', - 'dmg', 'file', 'ftp', 'ftps', 'host_cdrom', 'host_device', - 'http', 'https', 'luks', 'null-aio', 'null-co', 'parallels', - 'qcow', 'qcow2', 'qed', 'quorum', 'raw', 'tftp', 'vdi', 'vhdx', - 'vmdk', 'vpc', 'vvfat' ] } + 'dmg', 'file', 'ftp', 'ftps', 'gluster', 'host_cdrom', + 'host_device', 'http', 'https', 'luks', 'null-aio', 'null-co', + 'parallels', 'qcow', 'qcow2', 'qed', 'quorum', 'raw', 'tftp', + 'vdi', 'vhdx', 'vmdk', 'vpc', 'vvfat' ] } ## # @BlockdevOptionsFile @@ -2088,6 +2089,63 @@ '*rewrite-corrupted': 'bool', '*read-pattern': 'QuorumReadPattern' } } +## +# @GlusterTransport +# +# An enumeration of Gluster transport types +# +# @tcp: TCP - Transmission Control Protocol +# +# @unix: UNIX - Unix domain socket +# +# Since: 2.7 +## +{ 'enum': 'GlusterTransport', + 'data': [ 'unix', 'tcp' ] } + + +## +# @GlusterServer +# +# Captures the address of a socket +# +# Details for connecting to a gluster server +# +# @type: Transport type used for gluster connection +# +# @unix: socket file +# +# @tcp: host address and port number +# +# Since: 2.7 +## +{ 'union': 'GlusterServer', + 'base': { 'type': 'GlusterTransport' }, + 'discriminator': 'type', + 'data': { 'unix': 'UnixSocketAddress', + 'tcp': 'InetSocketAddress' } } + +## +# @BlockdevOptionsGluster +# +# Driver specific block device options for Gluster +# +# @volume: name of gluster volume where VM image resides +# +# @path: absolute path to image file in gluster volume +# +# @server: gluster server description +# +# @debug-level: #optional libgfapi log level (default '4' which is Error) +# +# Since: 2.7 +## +{ 'struct': 'BlockdevOptionsGluster', + 'data': { 'volume': 'str', + 'path': 'str', + 'server': ['GlusterServer'], + '*debug_level': 'int' } } + ## # @BlockdevOptions # @@ -2135,7 +2193,7 @@ 'file': 'BlockdevOptionsFile', 'ftp': 'BlockdevOptionsFile', 'ftps': 'BlockdevOptionsFile', -# TODO gluster: Wait for structured options + 'gluster': 'BlockdevOptionsGluster', 'host_cdrom': 'BlockdevOptionsFile', 'host_device':'BlockdevOptionsFile', 'http': 'BlockdevOptionsFile',