Migration and virtiofsd pull 2020-08-28

Migration:
    vsock support for migration
    minor fixes
 
 virtiofsd:
    Disable remote posix locks by default - because we
      never supported blocking variants and this breaks things
    Some prep work for un/less priviliged modes
 
 Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEERfXHG0oMt/uXep+pBRYzHrxb/ecFAl9I++YACgkQBRYzHrxb
 /efI3A//bkUbMsQNu+AsK8xpRbj04/d7e0ImXr3Jn7eMR5PyZXoC1692f2hwr4w1
 iwdZ4an4dferCkK6wvjTjHUc6CFStkrdVNbBd1m44kyzbAdRarzr3Q108TnRgYzd
 xMwnF6T3tOWywUZ47ai+Vqn3TBPCAER1aAiAkQwSQkaJ/2kaflUv9eG8t63Yk5do
 ZRdsnCR3PWSKwO5FgA9QW+BfMpsSFYmN4d+RT6qaAytBeb0ID+HBd+ExDGEZiCbn
 nB8aY5nrb/61xjWpWGJ8DSXGveMv0nLYfiKV8ZBZ/YFm3O6IUceg4oPA8XVKLt+E
 JMh24MnoGymsyWAlfB9/UzuVY15Z0UCwtJuQNkuG+fDZAdXof+1Q9hG4bP0JlW8j
 mwvqChiheB6zEaAw6Qm+3YjbxeCZRF7WWfPrI5r5XHhdEOfOP8762PSnwhk9KGb9
 YcC9ElslsgkZGJW6MIP4R6XeINh7Xg6tTkhzKmzxjfkeMabuF4Yve5WiC3nIDFOA
 kcEBQ6CdieuY/YTkOOdw4D6Poy4z4j3DTfu4PKjoL5c2uPi0EAfvirhGN+237gDY
 B6Gc8EpOZz3qsSXSHyBJvdMpwwIAyGpU+kT5HiWrziHerPCsfI+QKPoh9v3EMg3g
 c/COVLfhoXBQYmoAy+NHmsoVXun1ykS8xpO3u+yR5dk6eVqCVcE=
 =Npro
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20200828a' into staging

Migration and virtiofsd pull 2020-08-28

Migration:
   vsock support for migration
   minor fixes

virtiofsd:
   Disable remote posix locks by default - because we
     never supported blocking variants and this breaks things
   Some prep work for un/less priviliged modes

Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>

# gpg: Signature made Fri 28 Aug 2020 13:43:18 BST
# gpg:                using RSA key 45F5C71B4A0CB7FB977A9FA90516331EBC5BFDE7
# gpg: Good signature from "Dr. David Alan Gilbert (RH2) <dgilbert@redhat.com>" [full]
# Primary key fingerprint: 45F5 C71B 4A0C B7FB 977A  9FA9 0516 331E BC5B FDE7

* remotes/dgilbert/tags/pull-migration-20200828a:
  virtiofsd: probe unshare(CLONE_FS) and print an error
  virtiofsd: drop CAP_DAC_READ_SEARCH
  virtiofsd: Remove "norace" from cmdline help and docs
  virtiofsd: Disable remote posix locks by default
  migration: tls: fix memory leak in migration_tls_get_creds
  migration: improve error reporting of block driver state name
  migration: add vsock as data channel support
  migration: unify the framework of socket-type channel

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
master
Peter Maydell 2020-08-28 18:37:49 +01:00
commit a4e236b7d4
10 changed files with 54 additions and 92 deletions

View File

@ -63,11 +63,8 @@ Options
Print only log messages matching LEVEL or more severe. LEVEL is one of Print only log messages matching LEVEL or more severe. LEVEL is one of
``err``, ``warn``, ``info``, or ``debug``. The default is ``info``. ``err``, ``warn``, ``info``, or ``debug``. The default is ``info``.
* norace -
Disable racy fallback. The default is false.
* posix_lock|no_posix_lock - * posix_lock|no_posix_lock -
Enable/disable remote POSIX locks. The default is ``posix_lock``. Enable/disable remote POSIX locks. The default is ``no_posix_lock``.
* readdirplus|no_readdirplus - * readdirplus|no_readdirplus -
Enable/disable readdirplus. The default is ``readdirplus``. Enable/disable readdirplus. The default is ``readdirplus``.

View File

@ -378,21 +378,21 @@ void migrate_add_address(SocketAddress *address)
void qemu_start_incoming_migration(const char *uri, Error **errp) void qemu_start_incoming_migration(const char *uri, Error **errp)
{ {
const char *p; const char *p = NULL;
qapi_event_send_migration(MIGRATION_STATUS_SETUP); qapi_event_send_migration(MIGRATION_STATUS_SETUP);
if (!strcmp(uri, "defer")) { if (!strcmp(uri, "defer")) {
deferred_incoming_migration(errp); deferred_incoming_migration(errp);
} else if (strstart(uri, "tcp:", &p)) { } else if (strstart(uri, "tcp:", &p) ||
tcp_start_incoming_migration(p, errp); strstart(uri, "unix:", NULL) ||
strstart(uri, "vsock:", NULL)) {
socket_start_incoming_migration(p ? p : uri, errp);
#ifdef CONFIG_RDMA #ifdef CONFIG_RDMA
} else if (strstart(uri, "rdma:", &p)) { } else if (strstart(uri, "rdma:", &p)) {
rdma_start_incoming_migration(p, errp); rdma_start_incoming_migration(p, errp);
#endif #endif
} else if (strstart(uri, "exec:", &p)) { } else if (strstart(uri, "exec:", &p)) {
exec_start_incoming_migration(p, errp); exec_start_incoming_migration(p, errp);
} else if (strstart(uri, "unix:", &p)) {
unix_start_incoming_migration(p, errp);
} else if (strstart(uri, "fd:", &p)) { } else if (strstart(uri, "fd:", &p)) {
fd_start_incoming_migration(p, errp); fd_start_incoming_migration(p, errp);
} else { } else {
@ -2094,7 +2094,7 @@ void qmp_migrate(const char *uri, bool has_blk, bool blk,
{ {
Error *local_err = NULL; Error *local_err = NULL;
MigrationState *s = migrate_get_current(); MigrationState *s = migrate_get_current();
const char *p; const char *p = NULL;
if (!migrate_prepare(s, has_blk && blk, has_inc && inc, if (!migrate_prepare(s, has_blk && blk, has_inc && inc,
has_resume && resume, errp)) { has_resume && resume, errp)) {
@ -2102,16 +2102,16 @@ void qmp_migrate(const char *uri, bool has_blk, bool blk,
return; return;
} }
if (strstart(uri, "tcp:", &p)) { if (strstart(uri, "tcp:", &p) ||
tcp_start_outgoing_migration(s, p, &local_err); strstart(uri, "unix:", NULL) ||
strstart(uri, "vsock:", NULL)) {
socket_start_outgoing_migration(s, p ? p : uri, &local_err);
#ifdef CONFIG_RDMA #ifdef CONFIG_RDMA
} else if (strstart(uri, "rdma:", &p)) { } else if (strstart(uri, "rdma:", &p)) {
rdma_start_outgoing_migration(s, p, &local_err); rdma_start_outgoing_migration(s, p, &local_err);
#endif #endif
} else if (strstart(uri, "exec:", &p)) { } else if (strstart(uri, "exec:", &p)) {
exec_start_outgoing_migration(s, p, &local_err); exec_start_outgoing_migration(s, p, &local_err);
} else if (strstart(uri, "unix:", &p)) {
unix_start_outgoing_migration(s, p, &local_err);
} else if (strstart(uri, "fd:", &p)) { } else if (strstart(uri, "fd:", &p)) {
fd_start_outgoing_migration(s, p, &local_err); fd_start_outgoing_migration(s, p, &local_err);
} else { } else {

View File

@ -2682,7 +2682,7 @@ int save_snapshot(const char *name, Error **errp)
if (!bdrv_all_can_snapshot(&bs)) { if (!bdrv_all_can_snapshot(&bs)) {
error_setg(errp, "Device '%s' is writable but does not support " error_setg(errp, "Device '%s' is writable but does not support "
"snapshots", bdrv_get_device_name(bs)); "snapshots", bdrv_get_device_or_node_name(bs));
return ret; return ret;
} }
@ -2691,7 +2691,7 @@ int save_snapshot(const char *name, Error **errp)
ret = bdrv_all_delete_snapshot(name, &bs1, errp); ret = bdrv_all_delete_snapshot(name, &bs1, errp);
if (ret < 0) { if (ret < 0) {
error_prepend(errp, "Error while deleting snapshot on device " error_prepend(errp, "Error while deleting snapshot on device "
"'%s': ", bdrv_get_device_name(bs1)); "'%s': ", bdrv_get_device_or_node_name(bs1));
return ret; return ret;
} }
} }
@ -2766,7 +2766,7 @@ int save_snapshot(const char *name, Error **errp)
ret = bdrv_all_create_snapshot(sn, bs, vm_state_size, &bs); ret = bdrv_all_create_snapshot(sn, bs, vm_state_size, &bs);
if (ret < 0) { if (ret < 0) {
error_setg(errp, "Error while creating snapshot on '%s'", error_setg(errp, "Error while creating snapshot on '%s'",
bdrv_get_device_name(bs)); bdrv_get_device_or_node_name(bs));
goto the_end; goto the_end;
} }
@ -2884,14 +2884,14 @@ int load_snapshot(const char *name, Error **errp)
if (!bdrv_all_can_snapshot(&bs)) { if (!bdrv_all_can_snapshot(&bs)) {
error_setg(errp, error_setg(errp,
"Device '%s' is writable but does not support snapshots", "Device '%s' is writable but does not support snapshots",
bdrv_get_device_name(bs)); bdrv_get_device_or_node_name(bs));
return -ENOTSUP; return -ENOTSUP;
} }
ret = bdrv_all_find_snapshot(name, &bs); ret = bdrv_all_find_snapshot(name, &bs);
if (ret < 0) { if (ret < 0) {
error_setg(errp, error_setg(errp,
"Device '%s' does not have the requested snapshot '%s'", "Device '%s' does not have the requested snapshot '%s'",
bdrv_get_device_name(bs), name); bdrv_get_device_or_node_name(bs), name);
return ret; return ret;
} }
@ -2920,7 +2920,7 @@ int load_snapshot(const char *name, Error **errp)
ret = bdrv_all_goto_snapshot(name, &bs, errp); ret = bdrv_all_goto_snapshot(name, &bs, errp);
if (ret < 0) { if (ret < 0) {
error_prepend(errp, "Could not load snapshot '%s' on '%s': ", error_prepend(errp, "Could not load snapshot '%s' on '%s': ",
name, bdrv_get_device_name(bs)); name, bdrv_get_device_or_node_name(bs));
goto err_drain; goto err_drain;
} }

View File

@ -50,34 +50,6 @@ int socket_send_channel_destroy(QIOChannel *send)
return 0; return 0;
} }
static SocketAddress *tcp_build_address(const char *host_port, Error **errp)
{
SocketAddress *saddr;
saddr = g_new0(SocketAddress, 1);
saddr->type = SOCKET_ADDRESS_TYPE_INET;
if (inet_parse(&saddr->u.inet, host_port, errp)) {
qapi_free_SocketAddress(saddr);
return NULL;
}
return saddr;
}
static SocketAddress *unix_build_address(const char *path)
{
SocketAddress *saddr;
saddr = g_new0(SocketAddress, 1);
saddr->type = SOCKET_ADDRESS_TYPE_UNIX;
saddr->u.q_unix.path = g_strdup(path);
return saddr;
}
struct SocketConnectData { struct SocketConnectData {
MigrationState *s; MigrationState *s;
char *hostname; char *hostname;
@ -109,9 +81,10 @@ static void socket_outgoing_migration(QIOTask *task,
object_unref(OBJECT(sioc)); object_unref(OBJECT(sioc));
} }
static void socket_start_outgoing_migration(MigrationState *s, static void
SocketAddress *saddr, socket_start_outgoing_migration_internal(MigrationState *s,
Error **errp) SocketAddress *saddr,
Error **errp)
{ {
QIOChannelSocket *sioc = qio_channel_socket_new(); QIOChannelSocket *sioc = qio_channel_socket_new();
struct SocketConnectData *data = g_new0(struct SocketConnectData, 1); struct SocketConnectData *data = g_new0(struct SocketConnectData, 1);
@ -135,27 +108,18 @@ static void socket_start_outgoing_migration(MigrationState *s,
NULL); NULL);
} }
void tcp_start_outgoing_migration(MigrationState *s, void socket_start_outgoing_migration(MigrationState *s,
const char *host_port, const char *str,
Error **errp) Error **errp)
{ {
Error *err = NULL; Error *err = NULL;
SocketAddress *saddr = tcp_build_address(host_port, &err); SocketAddress *saddr = socket_parse(str, &err);
if (!err) { if (!err) {
socket_start_outgoing_migration(s, saddr, &err); socket_start_outgoing_migration_internal(s, saddr, &err);
} }
error_propagate(errp, err); error_propagate(errp, err);
} }
void unix_start_outgoing_migration(MigrationState *s,
const char *path,
Error **errp)
{
SocketAddress *saddr = unix_build_address(path);
socket_start_outgoing_migration(s, saddr, errp);
}
static void socket_accept_incoming_migration(QIONetListener *listener, static void socket_accept_incoming_migration(QIONetListener *listener,
QIOChannelSocket *cioc, QIOChannelSocket *cioc,
gpointer opaque) gpointer opaque)
@ -173,8 +137,9 @@ static void socket_accept_incoming_migration(QIONetListener *listener,
} }
static void socket_start_incoming_migration(SocketAddress *saddr, static void
Error **errp) socket_start_incoming_migration_internal(SocketAddress *saddr,
Error **errp)
{ {
QIONetListener *listener = qio_net_listener_new(); QIONetListener *listener = qio_net_listener_new();
size_t i; size_t i;
@ -207,20 +172,13 @@ static void socket_start_incoming_migration(SocketAddress *saddr,
} }
} }
void tcp_start_incoming_migration(const char *host_port, Error **errp) void socket_start_incoming_migration(const char *str, Error **errp)
{ {
Error *err = NULL; Error *err = NULL;
SocketAddress *saddr = tcp_build_address(host_port, &err); SocketAddress *saddr = socket_parse(str, &err);
if (!err) { if (!err) {
socket_start_incoming_migration(saddr, &err); socket_start_incoming_migration_internal(saddr, &err);
} }
qapi_free_SocketAddress(saddr); qapi_free_SocketAddress(saddr);
error_propagate(errp, err); error_propagate(errp, err);
} }
void unix_start_incoming_migration(const char *path, Error **errp)
{
SocketAddress *saddr = unix_build_address(path);
socket_start_incoming_migration(saddr, errp);
qapi_free_SocketAddress(saddr);
}

View File

@ -23,13 +23,8 @@
void socket_send_channel_create(QIOTaskFunc f, void *data); void socket_send_channel_create(QIOTaskFunc f, void *data);
int socket_send_channel_destroy(QIOChannel *send); int socket_send_channel_destroy(QIOChannel *send);
void tcp_start_incoming_migration(const char *host_port, Error **errp); void socket_start_incoming_migration(const char *str, Error **errp);
void tcp_start_outgoing_migration(MigrationState *s, const char *host_port, void socket_start_outgoing_migration(MigrationState *s, const char *str,
Error **errp); Error **errp);
void unix_start_incoming_migration(const char *path, Error **errp);
void unix_start_outgoing_migration(MigrationState *s, const char *path,
Error **errp);
#endif #endif

View File

@ -58,7 +58,6 @@ migration_tls_get_creds(MigrationState *s,
return NULL; return NULL;
} }
object_ref(OBJECT(ret));
return ret; return ret;
} }

View File

@ -81,11 +81,11 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
Testing: -blockdev driver=file,filename=TEST_DIR/t.IMGFMT,node-name=file Testing: -blockdev driver=file,filename=TEST_DIR/t.IMGFMT,node-name=file
QEMU X.Y.Z monitor - type 'help' for more information QEMU X.Y.Z monitor - type 'help' for more information
(qemu) savevm snap0 (qemu) savevm snap0
Error: Device '' is writable but does not support snapshots Error: Device 'file' is writable but does not support snapshots
(qemu) info snapshots (qemu) info snapshots
No available block device supports snapshots No available block device supports snapshots
(qemu) loadvm snap0 (qemu) loadvm snap0
Error: Device '' is writable but does not support snapshots Error: Device 'file' is writable but does not support snapshots
(qemu) quit (qemu) quit
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728

View File

@ -949,6 +949,22 @@ int virtio_session_mount(struct fuse_session *se)
{ {
int ret; int ret;
/*
* Test that unshare(CLONE_FS) works. fv_queue_worker() will need it. It's
* an unprivileged system call but some Docker/Moby versions are known to
* reject it via seccomp when CAP_SYS_ADMIN is not given.
*
* Note that the program is single-threaded here so this syscall has no
* visible effect and is safe to make.
*/
ret = unshare(CLONE_FS);
if (ret == -1 && errno == EPERM) {
fuse_log(FUSE_LOG_ERR, "unshare(CLONE_FS) failed with EPERM. If "
"running in a container please check that the container "
"runtime seccomp policy allows unshare.\n");
return -1;
}
ret = fv_create_listen_socket(se); ret = fv_create_listen_socket(se);
if (ret < 0) { if (ret < 0) {
return ret; return ret;

View File

@ -159,8 +159,6 @@ void fuse_cmdline_help(void)
" -o max_idle_threads the maximum number of idle worker " " -o max_idle_threads the maximum number of idle worker "
"threads\n" "threads\n"
" allowed (default: 10)\n" " allowed (default: 10)\n"
" -o norace disable racy fallback\n"
" default: false\n"
" -o posix_lock|no_posix_lock\n" " -o posix_lock|no_posix_lock\n"
" enable/disable remote posix lock\n" " enable/disable remote posix lock\n"
" default: posix_lock\n" " default: posix_lock\n"

View File

@ -2596,7 +2596,6 @@ static void setup_capabilities(char *modcaps_in)
if (capng_updatev(CAPNG_ADD, CAPNG_PERMITTED | CAPNG_EFFECTIVE, if (capng_updatev(CAPNG_ADD, CAPNG_PERMITTED | CAPNG_EFFECTIVE,
CAP_CHOWN, CAP_CHOWN,
CAP_DAC_OVERRIDE, CAP_DAC_OVERRIDE,
CAP_DAC_READ_SEARCH,
CAP_FOWNER, CAP_FOWNER,
CAP_FSETID, CAP_FSETID,
CAP_SETGID, CAP_SETGID,
@ -2823,7 +2822,7 @@ int main(int argc, char *argv[])
struct lo_data lo = { struct lo_data lo = {
.debug = 0, .debug = 0,
.writeback = 0, .writeback = 0,
.posix_lock = 1, .posix_lock = 0,
.proc_self_fd = -1, .proc_self_fd = -1,
}; };
struct lo_map_elem *root_elem; struct lo_map_elem *root_elem;