migration/rdma: Dumb down remaining int error values to -1

This is just to make the error value more obvious.  Callers don't
mind.

Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Li Zhijian <lizhijian@fujitsu.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20230928132019.2544702-27-armbru@redhat.com>
master
Markus Armbruster 2023-09-28 15:19:52 +02:00 committed by Juan Quintela
parent 8c6513f750
commit ec48697453
1 changed files with 23 additions and 22 deletions

View File

@ -1434,7 +1434,7 @@ static int qemu_rdma_unregister_waiting(RDMAContext *rdma)
ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) &reg,
&resp, NULL, NULL);
if (ret < 0) {
return ret;
return -1;
}
trace_qemu_rdma_unregister_waiting_complete(chunk);
@ -1475,7 +1475,7 @@ static int qemu_rdma_poll(RDMAContext *rdma, struct ibv_cq *cq,
if (ret < 0) {
error_report("ibv_poll_cq failed");
return ret;
return -1;
}
wr_id = wc.wr_id & RDMA_WRID_TYPE_MASK;
@ -1604,7 +1604,7 @@ static int qemu_rdma_wait_comp_channel(RDMAContext *rdma,
if (rdma->received_error) {
return -1;
}
return rdma->error_state;
return -!!rdma->error_state;
}
static struct ibv_comp_channel *to_channel(RDMAContext *rdma, uint64_t wrid)
@ -1649,7 +1649,7 @@ static int qemu_rdma_block_for_wrid(RDMAContext *rdma,
while (wr_id != wrid_requested) {
ret = qemu_rdma_poll(rdma, poll_cq, &wr_id_in, byte_len);
if (ret < 0) {
return ret;
return -1;
}
wr_id = wr_id_in & RDMA_WRID_TYPE_MASK;
@ -1723,7 +1723,7 @@ err_block_for_wrid:
}
rdma->error_state = ret;
return ret;
return -1;
}
/*
@ -1778,9 +1778,10 @@ static int qemu_rdma_post_send_control(RDMAContext *rdma, uint8_t *buf,
ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_SEND_CONTROL, NULL);
if (ret < 0) {
error_report("rdma migration: send polling control error");
return -1;
}
return ret;
return 0;
}
/*
@ -1822,7 +1823,7 @@ static int qemu_rdma_exchange_get_response(RDMAContext *rdma,
if (ret < 0) {
error_report("rdma migration: recv polling control error!");
return ret;
return -1;
}
network_to_control((void *) rdma->wr_data[idx].control);
@ -1902,7 +1903,7 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
RDMA_CONTROL_READY,
RDMA_WRID_READY);
if (ret < 0) {
return ret;
return -1;
}
}
@ -1914,7 +1915,7 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
if (ret) {
error_report("rdma migration: error posting"
" extra control recv for anticipated result!");
return ret;
return -1;
}
}
@ -1924,7 +1925,7 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
if (ret) {
error_report("rdma migration: error posting first control recv!");
return ret;
return -1;
}
/*
@ -1934,7 +1935,7 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
if (ret < 0) {
error_report("Failed to send control buffer!");
return ret;
return -1;
}
/*
@ -1945,7 +1946,7 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
trace_qemu_rdma_exchange_send_issue_callback();
ret = callback(rdma);
if (ret < 0) {
return ret;
return -1;
}
}
@ -1954,7 +1955,7 @@ static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head,
resp->type, RDMA_WRID_DATA);
if (ret < 0) {
return ret;
return -1;
}
qemu_rdma_move_header(rdma, RDMA_WRID_DATA, resp);
@ -1990,7 +1991,7 @@ static int qemu_rdma_exchange_recv(RDMAContext *rdma, RDMAControlHeader *head,
if (ret < 0) {
error_report("Failed to send control buffer!");
return ret;
return -1;
}
/*
@ -2000,7 +2001,7 @@ static int qemu_rdma_exchange_recv(RDMAContext *rdma, RDMAControlHeader *head,
expecting, RDMA_WRID_READY);
if (ret < 0) {
return ret;
return -1;
}
qemu_rdma_move_header(rdma, RDMA_WRID_READY, head);
@ -2011,7 +2012,7 @@ static int qemu_rdma_exchange_recv(RDMAContext *rdma, RDMAControlHeader *head,
ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY);
if (ret) {
error_report("rdma migration: error posting second control recv!");
return ret;
return -1;
}
return 0;
@ -2084,7 +2085,7 @@ retry:
"block %d chunk %" PRIu64
" current %" PRIu64 " len %" PRIu64 " %d",
current_index, chunk, sge.addr, length, rdma->nb_sent);
return ret;
return -1;
}
}
@ -2151,7 +2152,7 @@ retry:
ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) &reg,
&resp, &reg_result_idx, NULL);
if (ret < 0) {
return ret;
return -1;
}
/* try to overlap this single registration with the one we sent. */
@ -2225,7 +2226,7 @@ retry:
if (ret < 0) {
error_report("rdma migration: failed to make "
"room in full send queue!");
return ret;
return -1;
}
goto retry;
@ -2276,7 +2277,7 @@ static int qemu_rdma_write_flush(RDMAContext *rdma)
rdma->current_index, rdma->current_addr, rdma->current_length);
if (ret < 0) {
return ret;
return -1;
}
if (ret == 0) {
@ -2358,7 +2359,7 @@ static int qemu_rdma_write(RDMAContext *rdma,
if (!qemu_rdma_buffer_mergeable(rdma, current_addr, len)) {
ret = qemu_rdma_write_flush(rdma);
if (ret) {
return ret;
return -1;
}
rdma->current_length = 0;
rdma->current_addr = current_addr;
@ -3524,7 +3525,7 @@ err_rdma_dest_wait:
rdma->error_state = ret;
qemu_rdma_cleanup(rdma);
g_free(rdma_return_path);
return ret;
return -1;
}
static int dest_ram_sort_func(const void *a, const void *b)