pc and misc cleanups and fixes, virtio optimizations

Included here:
 Refactoring and bugfix patches in PC/ACPI.
 New commands for ipmi.
 Virtio optimizations.
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJWtj8KAAoJECgfDbjSjVRpBIQIAJSB9xwTcBLXwD0+8z5lqjKC
 GTtuVbHU0+Y/eO8O3llN5l+SzaRtPHo18Ele20Oz7IQc0ompANY273K6TOlyILwB
 rOhrub71uqpOKbGlxXJflroEAXb78xVK02lohSUvOzCDpwV+6CS4ZaSer7yDCYkA
 MODZj7rrEuN0RmBWqxbs1R7Mj2CeQJzlgTUNTBGCLEstoZGFOJq8FjVdG5P1q8vI
 fnI9mGJ1JsDnmcUZe/bTFfB4VreqeQ7UuGyNAMMGnvIbr0D1a+CoaMdV7/HZ+KyT
 5TIs0siVdhZei60A/Cq2OtSVCbj5QdxPBLhZfwJCp6oU4lh2U5tSvva0mh7MwJ0=
 =D/cA
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging

pc and misc cleanups and fixes, virtio optimizations

Included here:
Refactoring and bugfix patches in PC/ACPI.
New commands for ipmi.
Virtio optimizations.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# gpg: Signature made Sat 06 Feb 2016 18:44:26 GMT using RSA key ID D28D5469
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>"
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>"

* remotes/mst/tags/for_upstream: (45 commits)
  net: set endianness on all backend devices
  fix MSI injection on Xen
  intel_iommu: large page support
  dimm: Correct type of MemoryHotplugState->base
  pc: set the OEM fields in the RSDT and the FADT from the SLIC
  acpi: add function to extract oem_id and oem_table_id from the user's SLIC
  acpi: expose oem_id and oem_table_id in build_rsdt()
  acpi: take oem_id in build_header(), optionally
  pc: Eliminate PcGuestInfo struct
  pc: Move APIC and NUMA data from PcGuestInfo to PCMachineState
  pc: Move PcGuestInfo.fw_cfg to PCMachineState
  pc: Remove PcGuestInfo.isapc_ram_fw field
  pc: Remove RAM size fields from PcGuestInfo
  pc: Remove compat fields from PcGuestInfo
  acpi: Don't save PcGuestInfo on AcpiBuildState
  acpi: Remove guest_info parameters from functions
  pc: Simplify xen_load_linux() signature
  pc: Simplify pc_memory_init() signature
  pc: Eliminate struct PcGuestInfoState
  pc: Move PcGuestInfo declaration to top of file
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
master
Peter Maydell 2016-02-08 11:25:31 +00:00
commit bdad0f3977
59 changed files with 1476 additions and 1122 deletions

View File

@ -1587,7 +1587,7 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
int read_count;
int64_t xattr_len;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
VirtQueueElement *elem = &v->elems[pdu->idx];
VirtQueueElement *elem = v->elems[pdu->idx];
xattr_len = fidp->fs.xattr.len;
read_count = xattr_len - off;

View File

@ -26,10 +26,12 @@ void virtio_9p_push_and_notify(V9fsPDU *pdu)
{
V9fsState *s = pdu->s;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
VirtQueueElement *elem = &v->elems[pdu->idx];
VirtQueueElement *elem = v->elems[pdu->idx];
/* push onto queue and notify */
virtqueue_push(v->vq, elem, pdu->size);
g_free(elem);
v->elems[pdu->idx] = NULL;
/* FIXME: we should batch these completions */
virtio_notify(VIRTIO_DEVICE(v), v->vq);
@ -48,10 +50,10 @@ static void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq)
uint8_t id;
uint16_t tag_le;
} QEMU_PACKED out;
VirtQueueElement *elem = &v->elems[pdu->idx];
VirtQueueElement *elem;
len = virtqueue_pop(vq, elem);
if (!len) {
elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) {
pdu_free(pdu);
break;
}
@ -59,6 +61,7 @@ static void handle_9p_output(VirtIODevice *vdev, VirtQueue *vq)
BUG_ON(elem->out_num == 0 || elem->in_num == 0);
QEMU_BUILD_BUG_ON(sizeof out != 7);
v->elems[pdu->idx] = elem;
len = iov_to_buf(elem->out_sg, elem->out_num, 0,
&out, sizeof out);
BUG_ON(len != sizeof out);
@ -141,7 +144,7 @@ ssize_t virtio_pdu_vmarshal(V9fsPDU *pdu, size_t offset,
{
V9fsState *s = pdu->s;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
VirtQueueElement *elem = &v->elems[pdu->idx];
VirtQueueElement *elem = v->elems[pdu->idx];
return v9fs_iov_vmarshal(elem->in_sg, elem->in_num, offset, 1, fmt, ap);
}
@ -151,7 +154,7 @@ ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
{
V9fsState *s = pdu->s;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
VirtQueueElement *elem = &v->elems[pdu->idx];
VirtQueueElement *elem = v->elems[pdu->idx];
return v9fs_iov_vunmarshal(elem->out_sg, elem->out_num, offset, 1, fmt, ap);
}
@ -161,7 +164,7 @@ void virtio_init_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
{
V9fsState *s = pdu->s;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
VirtQueueElement *elem = &v->elems[pdu->idx];
VirtQueueElement *elem = v->elems[pdu->idx];
if (is_write) {
*piov = elem->out_sg;

View File

@ -11,7 +11,7 @@ typedef struct V9fsVirtioState
VirtQueue *vq;
size_t config_size;
V9fsPDU pdus[MAX_REQ];
VirtQueueElement elems[MAX_REQ];
VirtQueueElement *elems[MAX_REQ];
V9fsState state;
} V9fsVirtioState;

View File

@ -1426,12 +1426,17 @@ Aml *aml_alias(const char *source_object, const char *alias_object)
void
build_header(GArray *linker, GArray *table_data,
AcpiTableHeader *h, const char *sig, int len, uint8_t rev,
const char *oem_table_id)
const char *oem_id, const char *oem_table_id)
{
memcpy(&h->signature, sig, 4);
h->length = cpu_to_le32(len);
h->revision = rev;
memcpy(h->oem_id, ACPI_BUILD_APPNAME6, 6);
if (oem_id) {
strncpy((char *)h->oem_id, oem_id, sizeof h->oem_id);
} else {
memcpy(h->oem_id, ACPI_BUILD_APPNAME6, 6);
}
if (oem_table_id) {
strncpy((char *)h->oem_table_id, oem_table_id, sizeof(h->oem_table_id));
@ -1487,7 +1492,8 @@ void acpi_build_tables_cleanup(AcpiBuildTables *tables, bool mfre)
/* Build rsdt table */
void
build_rsdt(GArray *table_data, GArray *linker, GArray *table_offsets)
build_rsdt(GArray *table_data, GArray *linker, GArray *table_offsets,
const char *oem_id, const char *oem_table_id)
{
AcpiRsdtDescriptorRev1 *rsdt;
size_t rsdt_len;
@ -1506,5 +1512,5 @@ build_rsdt(GArray *table_data, GArray *linker, GArray *table_offsets)
sizeof(uint32_t));
}
build_header(linker, table_data,
(void *)rsdt, "RSDT", rsdt_len, 1, NULL);
(void *)rsdt, "RSDT", rsdt_len, 1, oem_id, oem_table_id);
}

View File

@ -350,6 +350,22 @@ uint8_t *acpi_table_next(uint8_t *current)
}
}
int acpi_get_slic_oem(AcpiSlicOem *oem)
{
uint8_t *u;
for (u = acpi_table_first(); u; u = acpi_table_next(u)) {
struct acpi_table_header *hdr = (void *)(u - sizeof(hdr->_length));
if (memcmp(hdr->sig, "SLIC", 4) == 0) {
oem->id = hdr->oem_id;
oem->table_id = hdr->oem_table_id;
return 0;
}
}
return -1;
}
static void acpi_notify_wakeup(Notifier *notifier, void *data)
{
ACPIREGS *ar = container_of(notifier, ACPIREGS, wakeup);

View File

@ -366,7 +366,7 @@ static void nvdimm_build_nfit(GSList *device_list, GArray *table_offsets,
build_header(linker, table_data,
(void *)(table_data->data + header), "NFIT",
sizeof(NvdimmNfitHeader) + structures->len, 1, NULL);
sizeof(NvdimmNfitHeader) + structures->len, 1, NULL, NULL);
g_array_free(structures, true);
}
@ -471,7 +471,7 @@ static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets,
g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len);
build_header(linker, table_data,
(void *)(table_data->data + table_data->len - ssdt->buf->len),
"SSDT", ssdt->buf->len, 1, "NVDIMM");
"SSDT", ssdt->buf->len, 1, NULL, "NVDIMM");
free_aml_allocator();
}

View File

@ -394,7 +394,7 @@ build_spcr(GArray *table_data, GArray *linker, VirtGuestInfo *guest_info)
spcr->pci_vendor_id = 0xffff; /* PCI Vendor ID: not a PCI device */
build_header(linker, table_data, (void *)spcr, "SPCR", sizeof(*spcr), 2,
NULL);
NULL, NULL);
}
static void
@ -413,7 +413,7 @@ build_mcfg(GArray *table_data, GArray *linker, VirtGuestInfo *guest_info)
mcfg->allocation[0].end_bus_number = (memmap[VIRT_PCIE_ECAM].size
/ PCIE_MMCFG_SIZE_MIN) - 1;
build_header(linker, table_data, (void *)mcfg, "MCFG", len, 1, NULL);
build_header(linker, table_data, (void *)mcfg, "MCFG", len, 1, NULL, NULL);
}
/* GTDT */
@ -439,7 +439,7 @@ build_gtdt(GArray *table_data, GArray *linker)
build_header(linker, table_data,
(void *)(table_data->data + gtdt_start), "GTDT",
table_data->len - gtdt_start, 2, NULL);
table_data->len - gtdt_start, 2, NULL, NULL);
}
/* MADT */
@ -498,7 +498,7 @@ build_madt(GArray *table_data, GArray *linker, VirtGuestInfo *guest_info)
build_header(linker, table_data,
(void *)(table_data->data + madt_start), "APIC",
table_data->len - madt_start, 3, NULL);
table_data->len - madt_start, 3, NULL, NULL);
}
/* FADT */
@ -523,7 +523,7 @@ build_fadt(GArray *table_data, GArray *linker, unsigned dsdt)
sizeof fadt->dsdt);
build_header(linker, table_data,
(void *)fadt, "FACP", sizeof(*fadt), 5, NULL);
(void *)fadt, "FACP", sizeof(*fadt), 5, NULL, NULL);
}
/* DSDT */
@ -562,7 +562,7 @@ build_dsdt(GArray *table_data, GArray *linker, VirtGuestInfo *guest_info)
g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len);
build_header(linker, table_data,
(void *)(table_data->data + table_data->len - dsdt->buf->len),
"DSDT", dsdt->buf->len, 2, NULL);
"DSDT", dsdt->buf->len, 2, NULL, NULL);
free_aml_allocator();
}
@ -623,7 +623,7 @@ void virt_acpi_build(VirtGuestInfo *guest_info, AcpiBuildTables *tables)
/* RSDT is pointed to by RSDP */
rsdt = tables_blob->len;
build_rsdt(tables_blob, tables->linker, table_offsets);
build_rsdt(tables_blob, tables->linker, table_offsets, NULL, NULL);
/* RSDP is in FSEG memory, so allocate it separately */
build_rsdp(tables->rsdp, tables->linker, rsdt);

View File

@ -100,20 +100,19 @@ static void handle_notify(EventNotifier *e)
blk_io_plug(s->conf->conf.blk);
for (;;) {
MultiReqBuffer mrb = {};
int ret;
/* Disable guest->host notifies to avoid unnecessary vmexits */
vring_disable_notification(s->vdev, &s->vring);
for (;;) {
VirtIOBlockReq *req = virtio_blk_alloc_request(vblk);
VirtIOBlockReq *req = vring_pop(s->vdev, &s->vring,
sizeof(VirtIOBlockReq));
ret = vring_pop(s->vdev, &s->vring, &req->elem);
if (ret < 0) {
virtio_blk_free_request(req);
if (req == NULL) {
break; /* no more requests */
}
virtio_blk_init_request(vblk, req);
trace_virtio_blk_data_plane_process_request(s, req->elem.out_num,
req->elem.in_num,
req->elem.index);
@ -125,7 +124,7 @@ static void handle_notify(EventNotifier *e)
virtio_blk_submit_multireq(s->conf->conf.blk, &mrb);
}
if (likely(ret == -EAGAIN)) { /* vring emptied */
if (likely(!vring_more_avail(s->vdev, &s->vring))) { /* vring emptied */
/* Re-enable guest->host notifies and stop processing the vring.
* But if the guest has snuck in more descriptors, keep processing.
*/

View File

@ -29,15 +29,13 @@
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-access.h"
VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
void virtio_blk_init_request(VirtIOBlock *s, VirtIOBlockReq *req)
{
VirtIOBlockReq *req = g_new(VirtIOBlockReq, 1);
req->dev = s;
req->qiov.size = 0;
req->in_len = 0;
req->next = NULL;
req->mr_next = NULL;
return req;
}
void virtio_blk_free_request(VirtIOBlockReq *req)
@ -193,13 +191,11 @@ out:
static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s)
{
VirtIOBlockReq *req = virtio_blk_alloc_request(s);
VirtIOBlockReq *req = virtqueue_pop(s->vq, sizeof(VirtIOBlockReq));
if (!virtqueue_pop(s->vq, &req->elem)) {
virtio_blk_free_request(req);
return NULL;
if (req) {
virtio_blk_init_request(s, req);
}
return req;
}
@ -812,8 +808,7 @@ static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f)
while (req) {
qemu_put_sbyte(f, 1);
qemu_put_buffer(f, (unsigned char *)&req->elem,
sizeof(VirtQueueElement));
qemu_put_virtqueue_element(f, &req->elem);
req = req->next;
}
qemu_put_sbyte(f, 0);
@ -836,13 +831,11 @@ static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
VirtIOBlock *s = VIRTIO_BLK(vdev);
while (qemu_get_sbyte(f)) {
VirtIOBlockReq *req = virtio_blk_alloc_request(s);
qemu_get_buffer(f, (unsigned char *)&req->elem,
sizeof(VirtQueueElement));
VirtIOBlockReq *req;
req = qemu_get_virtqueue_element(f, sizeof(VirtIOBlockReq));
virtio_blk_init_request(s, req);
req->next = s->rq;
s->rq = req;
virtqueue_map(&req->elem);
}
return 0;

View File

@ -83,7 +83,7 @@ static bool use_multiport(VirtIOSerial *vser)
static size_t write_to_port(VirtIOSerialPort *port,
const uint8_t *buf, size_t size)
{
VirtQueueElement elem;
VirtQueueElement *elem;
VirtQueue *vq;
size_t offset;
@ -96,15 +96,17 @@ static size_t write_to_port(VirtIOSerialPort *port,
while (offset < size) {
size_t len;
if (!virtqueue_pop(vq, &elem)) {
elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) {
break;
}
len = iov_from_buf(elem.in_sg, elem.in_num, 0,
len = iov_from_buf(elem->in_sg, elem->in_num, 0,
buf + offset, size - offset);
offset += len;
virtqueue_push(vq, &elem, len);
virtqueue_push(vq, elem, len);
g_free(elem);
}
virtio_notify(VIRTIO_DEVICE(port->vser), vq);
@ -113,13 +115,18 @@ static size_t write_to_port(VirtIOSerialPort *port,
static void discard_vq_data(VirtQueue *vq, VirtIODevice *vdev)
{
VirtQueueElement elem;
VirtQueueElement *elem;
if (!virtio_queue_ready(vq)) {
return;
}
while (virtqueue_pop(vq, &elem)) {
virtqueue_push(vq, &elem, 0);
for (;;) {
elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) {
break;
}
virtqueue_push(vq, elem, 0);
g_free(elem);
}
virtio_notify(vdev, vq);
}
@ -138,21 +145,22 @@ static void do_flush_queued_data(VirtIOSerialPort *port, VirtQueue *vq,
unsigned int i;
/* Pop an elem only if we haven't left off a previous one mid-way */
if (!port->elem.out_num) {
if (!virtqueue_pop(vq, &port->elem)) {
if (!port->elem) {
port->elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!port->elem) {
break;
}
port->iov_idx = 0;
port->iov_offset = 0;
}
for (i = port->iov_idx; i < port->elem.out_num; i++) {
for (i = port->iov_idx; i < port->elem->out_num; i++) {
size_t buf_size;
ssize_t ret;
buf_size = port->elem.out_sg[i].iov_len - port->iov_offset;
buf_size = port->elem->out_sg[i].iov_len - port->iov_offset;
ret = vsc->have_data(port,
port->elem.out_sg[i].iov_base
port->elem->out_sg[i].iov_base
+ port->iov_offset,
buf_size);
if (port->throttled) {
@ -167,8 +175,9 @@ static void do_flush_queued_data(VirtIOSerialPort *port, VirtQueue *vq,
if (port->throttled) {
break;
}
virtqueue_push(vq, &port->elem, 0);
port->elem.out_num = 0;
virtqueue_push(vq, port->elem, 0);
g_free(port->elem);
port->elem = NULL;
}
virtio_notify(vdev, vq);
}
@ -185,22 +194,26 @@ static void flush_queued_data(VirtIOSerialPort *port)
static size_t send_control_msg(VirtIOSerial *vser, void *buf, size_t len)
{
VirtQueueElement elem;
VirtQueueElement *elem;
VirtQueue *vq;
vq = vser->c_ivq;
if (!virtio_queue_ready(vq)) {
return 0;
}
if (!virtqueue_pop(vq, &elem)) {
elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) {
return 0;
}
/* TODO: detect a buffer that's too short, set NEEDS_RESET */
iov_from_buf(elem.in_sg, elem.in_num, 0, buf, len);
iov_from_buf(elem->in_sg, elem->in_num, 0, buf, len);
virtqueue_push(vq, &elem, len);
virtqueue_push(vq, elem, len);
virtio_notify(VIRTIO_DEVICE(vser), vq);
g_free(elem);
return len;
}
@ -414,7 +427,7 @@ static void control_in(VirtIODevice *vdev, VirtQueue *vq)
static void control_out(VirtIODevice *vdev, VirtQueue *vq)
{
VirtQueueElement elem;
VirtQueueElement *elem;
VirtIOSerial *vser;
uint8_t *buf;
size_t len;
@ -423,10 +436,15 @@ static void control_out(VirtIODevice *vdev, VirtQueue *vq)
len = 0;
buf = NULL;
while (virtqueue_pop(vq, &elem)) {
for (;;) {
size_t cur_len;
cur_len = iov_size(elem.out_sg, elem.out_num);
elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) {
break;
}
cur_len = iov_size(elem->out_sg, elem->out_num);
/*
* Allocate a new buf only if we didn't have one previously or
* if the size of the buf differs
@ -437,10 +455,11 @@ static void control_out(VirtIODevice *vdev, VirtQueue *vq)
buf = g_malloc(cur_len);
len = cur_len;
}
iov_to_buf(elem.out_sg, elem.out_num, 0, buf, cur_len);
iov_to_buf(elem->out_sg, elem->out_num, 0, buf, cur_len);
handle_control_message(vser, buf, cur_len);
virtqueue_push(vq, &elem, 0);
virtqueue_push(vq, elem, 0);
g_free(elem);
}
g_free(buf);
virtio_notify(vdev, vq);
@ -620,16 +639,14 @@ static void virtio_serial_save_device(VirtIODevice *vdev, QEMUFile *f)
qemu_put_byte(f, port->host_connected);
elem_popped = 0;
if (port->elem.out_num) {
if (port->elem) {
elem_popped = 1;
}
qemu_put_be32s(f, &elem_popped);
if (elem_popped) {
qemu_put_be32s(f, &port->iov_idx);
qemu_put_be64s(f, &port->iov_offset);
qemu_put_buffer(f, (unsigned char *)&port->elem,
sizeof(port->elem));
qemu_put_virtqueue_element(f, port->elem);
}
}
}
@ -704,9 +721,8 @@ static int fetch_active_ports_list(QEMUFile *f, int version_id,
qemu_get_be32s(f, &port->iov_idx);
qemu_get_be64s(f, &port->iov_offset);
qemu_get_buffer(f, (unsigned char *)&port->elem,
sizeof(port->elem));
virtqueue_map(&port->elem);
port->elem =
qemu_get_virtqueue_element(f, sizeof(VirtQueueElement));
/*
* Port was throttled on source machine. Let's
@ -928,7 +944,7 @@ static void virtser_port_device_realize(DeviceState *dev, Error **errp)
return;
}
port->elem.out_num = 0;
port->elem = NULL;
}
static void virtser_port_device_plug(HotplugHandler *hotplug_dev,

View File

@ -804,16 +804,15 @@ static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
}
#endif
cmd = g_new(struct virtio_gpu_ctrl_command, 1);
while (virtqueue_pop(vq, &cmd->elem)) {
cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
while (cmd) {
cmd->vq = vq;
cmd->error = 0;
cmd->finished = false;
cmd->waiting = false;
QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
cmd = g_new(struct virtio_gpu_ctrl_command, 1);
cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
}
g_free(cmd);
virtio_gpu_process_cmdq(g);
@ -833,15 +832,20 @@ static void virtio_gpu_ctrl_bh(void *opaque)
static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOGPU *g = VIRTIO_GPU(vdev);
VirtQueueElement elem;
VirtQueueElement *elem;
size_t s;
struct virtio_gpu_update_cursor cursor_info;
if (!virtio_queue_ready(vq)) {
return;
}
while (virtqueue_pop(vq, &elem)) {
s = iov_to_buf(elem.out_sg, elem.out_num, 0,
for (;;) {
elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) {
break;
}
s = iov_to_buf(elem->out_sg, elem->out_num, 0,
&cursor_info, sizeof(cursor_info));
if (s != sizeof(cursor_info)) {
qemu_log_mask(LOG_GUEST_ERROR,
@ -850,8 +854,9 @@ static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
} else {
update_cursor(g, &cursor_info);
}
virtqueue_push(vq, &elem, 0);
virtqueue_push(vq, elem, 0);
virtio_notify(vdev, vq);
g_free(elem);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,6 @@
#include "qemu/typedefs.h"
void acpi_setup(PcGuestInfo *);
void acpi_setup(void);
#endif

View File

@ -153,14 +153,27 @@ static gboolean vtd_hash_remove_by_domain(gpointer key, gpointer value,
return entry->domain_id == domain_id;
}
/* The shift of an addr for a certain level of paging structure */
static inline uint32_t vtd_slpt_level_shift(uint32_t level)
{
return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_SL_LEVEL_BITS;
}
static inline uint64_t vtd_slpt_level_page_mask(uint32_t level)
{
return ~((1ULL << vtd_slpt_level_shift(level)) - 1);
}
static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value,
gpointer user_data)
{
VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value;
VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data;
uint64_t gfn = info->gfn & info->mask;
uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask;
uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K;
return (entry->domain_id == info->domain_id) &&
((entry->gfn & info->mask) == gfn);
(((entry->gfn & info->mask) == gfn) ||
(entry->gfn == gfn_tlb));
}
/* Reset all the gen of VTDAddressSpace to zero and set the gen of
@ -194,24 +207,46 @@ static void vtd_reset_iotlb(IntelIOMMUState *s)
g_hash_table_remove_all(s->iotlb);
}
static uint64_t vtd_get_iotlb_key(uint64_t gfn, uint8_t source_id,
uint32_t level)
{
return gfn | ((uint64_t)(source_id) << VTD_IOTLB_SID_SHIFT) |
((uint64_t)(level) << VTD_IOTLB_LVL_SHIFT);
}
static uint64_t vtd_get_iotlb_gfn(hwaddr addr, uint32_t level)
{
return (addr & vtd_slpt_level_page_mask(level)) >> VTD_PAGE_SHIFT_4K;
}
static VTDIOTLBEntry *vtd_lookup_iotlb(IntelIOMMUState *s, uint16_t source_id,
hwaddr addr)
{
VTDIOTLBEntry *entry;
uint64_t key;
int level;
key = (addr >> VTD_PAGE_SHIFT_4K) |
((uint64_t)(source_id) << VTD_IOTLB_SID_SHIFT);
return g_hash_table_lookup(s->iotlb, &key);
for (level = VTD_SL_PT_LEVEL; level < VTD_SL_PML4_LEVEL; level++) {
key = vtd_get_iotlb_key(vtd_get_iotlb_gfn(addr, level),
source_id, level);
entry = g_hash_table_lookup(s->iotlb, &key);
if (entry) {
goto out;
}
}
out:
return entry;
}
static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id,
uint16_t domain_id, hwaddr addr, uint64_t slpte,
bool read_flags, bool write_flags)
bool read_flags, bool write_flags,
uint32_t level)
{
VTDIOTLBEntry *entry = g_malloc(sizeof(*entry));
uint64_t *key = g_malloc(sizeof(*key));
uint64_t gfn = addr >> VTD_PAGE_SHIFT_4K;
uint64_t gfn = vtd_get_iotlb_gfn(addr, level);
VTD_DPRINTF(CACHE, "update iotlb sid 0x%"PRIx16 " gpa 0x%"PRIx64
" slpte 0x%"PRIx64 " did 0x%"PRIx16, source_id, addr, slpte,
@ -226,7 +261,8 @@ static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id,
entry->slpte = slpte;
entry->read_flags = read_flags;
entry->write_flags = write_flags;
*key = gfn | ((uint64_t)(source_id) << VTD_IOTLB_SID_SHIFT);
entry->mask = vtd_slpt_level_page_mask(level);
*key = vtd_get_iotlb_key(gfn, source_id, level);
g_hash_table_replace(s->iotlb, key, entry);
}
@ -501,12 +537,6 @@ static inline dma_addr_t vtd_get_slpt_base_from_context(VTDContextEntry *ce)
return ce->lo & VTD_CONTEXT_ENTRY_SLPTPTR;
}
/* The shift of an addr for a certain level of paging structure */
static inline uint32_t vtd_slpt_level_shift(uint32_t level)
{
return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_SL_LEVEL_BITS;
}
static inline uint64_t vtd_get_slpte_addr(uint64_t slpte)
{
return slpte & VTD_SL_PT_BASE_ADDR_MASK;
@ -762,7 +792,7 @@ static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
VTDContextEntry ce;
uint8_t bus_num = pci_bus_num(bus);
VTDContextCacheEntry *cc_entry = &vtd_as->context_cache_entry;
uint64_t slpte;
uint64_t slpte, page_mask;
uint32_t level;
uint16_t source_id = vtd_make_source_id(bus_num, devfn);
int ret_fr;
@ -802,6 +832,7 @@ static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
slpte = iotlb_entry->slpte;
reads = iotlb_entry->read_flags;
writes = iotlb_entry->write_flags;
page_mask = iotlb_entry->mask;
goto out;
}
/* Try to fetch context-entry from cache first */
@ -848,12 +879,13 @@ static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
return;
}
page_mask = vtd_slpt_level_page_mask(level);
vtd_update_iotlb(s, source_id, VTD_CONTEXT_ENTRY_DID(ce.hi), addr, slpte,
reads, writes);
reads, writes, level);
out:
entry->iova = addr & VTD_PAGE_MASK_4K;
entry->translated_addr = vtd_get_slpte_addr(slpte) & VTD_PAGE_MASK_4K;
entry->addr_mask = ~VTD_PAGE_MASK_4K;
entry->iova = addr & page_mask;
entry->translated_addr = vtd_get_slpte_addr(slpte) & page_mask;
entry->addr_mask = ~page_mask;
entry->perm = (writes ? 2 : 0) + (reads ? 1 : 0);
}
@ -991,7 +1023,7 @@ static void vtd_iotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
assert(am <= VTD_MAMV);
info.domain_id = domain_id;
info.gfn = addr >> VTD_PAGE_SHIFT_4K;
info.addr = addr;
info.mask = ~((1 << am) - 1);
g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info);
}
@ -1917,7 +1949,7 @@ static void vtd_init(IntelIOMMUState *s)
s->iq_last_desc_type = VTD_INV_DESC_NONE;
s->next_frcd_reg = 0;
s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | VTD_CAP_MGAW |
VTD_CAP_SAGAW | VTD_CAP_MAMV | VTD_CAP_PSI;
VTD_CAP_SAGAW | VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS;
s->ecap = VTD_ECAP_QI | VTD_ECAP_IRO;
vtd_reset_context_cache(s);

View File

@ -113,6 +113,7 @@
/* The shift of source_id in the key of IOTLB hash table */
#define VTD_IOTLB_SID_SHIFT 36
#define VTD_IOTLB_LVL_SHIFT 44
#define VTD_IOTLB_MAX_SIZE 1024 /* Max size of the hash table */
/* IOTLB_REG */
@ -185,9 +186,10 @@
#define VTD_CAP_ND (((VTD_DOMAIN_ID_SHIFT - 4) / 2) & 7ULL)
#define VTD_MGAW 39 /* Maximum Guest Address Width */
#define VTD_CAP_MGAW (((VTD_MGAW - 1) & 0x3fULL) << 16)
#define VTD_MAMV 9ULL
#define VTD_MAMV 18ULL
#define VTD_CAP_MAMV (VTD_MAMV << 48)
#define VTD_CAP_PSI (1ULL << 39)
#define VTD_CAP_SLLPS ((1ULL << 34) | (1ULL << 35))
/* Supported Adjusted Guest Address Widths */
#define VTD_CAP_SAGAW_SHIFT 8
@ -320,7 +322,7 @@ typedef struct VTDInvDesc VTDInvDesc;
/* Information about page-selective IOTLB invalidate */
struct VTDIOTLBPageInvInfo {
uint16_t domain_id;
uint64_t gfn;
uint64_t addr;
uint8_t mask;
};
typedef struct VTDIOTLBPageInvInfo VTDIOTLBPageInvInfo;

View File

@ -1156,18 +1156,12 @@ typedef struct PcRomPciInfo {
uint64_t w64_max;
} PcRomPciInfo;
typedef struct PcGuestInfoState {
PcGuestInfo info;
Notifier machine_done;
} PcGuestInfoState;
static
void pc_guest_info_machine_done(Notifier *notifier, void *data)
void pc_machine_done(Notifier *notifier, void *data)
{
PcGuestInfoState *guest_info_state = container_of(notifier,
PcGuestInfoState,
machine_done);
PCIBus *bus = PC_MACHINE(qdev_get_machine())->bus;
PCMachineState *pcms = container_of(notifier,
PCMachineState, machine_done);
PCIBus *bus = pcms->bus;
if (bus) {
int extra_hosts = 0;
@ -1178,51 +1172,46 @@ void pc_guest_info_machine_done(Notifier *notifier, void *data)
extra_hosts++;
}
}
if (extra_hosts && guest_info_state->info.fw_cfg) {
if (extra_hosts && pcms->fw_cfg) {
uint64_t *val = g_malloc(sizeof(*val));
*val = cpu_to_le64(extra_hosts);
fw_cfg_add_file(guest_info_state->info.fw_cfg,
fw_cfg_add_file(pcms->fw_cfg,
"etc/extra-pci-roots", val, sizeof(*val));
}
}
acpi_setup(&guest_info_state->info);
acpi_setup();
}
PcGuestInfo *pc_guest_info_init(PCMachineState *pcms)
void pc_guest_info_init(PCMachineState *pcms)
{
PcGuestInfoState *guest_info_state = g_malloc0(sizeof *guest_info_state);
PcGuestInfo *guest_info = &guest_info_state->info;
int i, j;
guest_info->ram_size_below_4g = pcms->below_4g_mem_size;
guest_info->ram_size = pcms->below_4g_mem_size + pcms->above_4g_mem_size;
guest_info->apic_id_limit = pc_apic_id_limit(max_cpus);
guest_info->apic_xrupt_override = kvm_allows_irq0_override();
guest_info->numa_nodes = nb_numa_nodes;
guest_info->node_mem = g_malloc0(guest_info->numa_nodes *
sizeof *guest_info->node_mem);
pcms->apic_id_limit = pc_apic_id_limit(max_cpus);
pcms->apic_xrupt_override = kvm_allows_irq0_override();
pcms->numa_nodes = nb_numa_nodes;
pcms->node_mem = g_malloc0(pcms->numa_nodes *
sizeof *pcms->node_mem);
for (i = 0; i < nb_numa_nodes; i++) {
guest_info->node_mem[i] = numa_info[i].node_mem;
pcms->node_mem[i] = numa_info[i].node_mem;
}
guest_info->node_cpu = g_malloc0(guest_info->apic_id_limit *
sizeof *guest_info->node_cpu);
pcms->node_cpu = g_malloc0(pcms->apic_id_limit *
sizeof *pcms->node_cpu);
for (i = 0; i < max_cpus; i++) {
unsigned int apic_id = x86_cpu_apic_id_from_index(i);
assert(apic_id < guest_info->apic_id_limit);
assert(apic_id < pcms->apic_id_limit);
for (j = 0; j < nb_numa_nodes; j++) {
if (test_bit(i, numa_info[j].node_cpu)) {
guest_info->node_cpu[apic_id] = j;
pcms->node_cpu[apic_id] = j;
break;
}
}
}
guest_info_state->machine_done.notify = pc_guest_info_machine_done;
qemu_add_machine_init_done_notifier(&guest_info_state->machine_done);
return guest_info;
pcms->machine_done.notify = pc_machine_done;
qemu_add_machine_init_done_notifier(&pcms->machine_done);
}
/* setup pci memory address space mapping into system address space */
@ -1262,8 +1251,7 @@ void pc_acpi_init(const char *default_dsdt)
}
}
FWCfgState *xen_load_linux(PCMachineState *pcms,
PcGuestInfo *guest_info)
void xen_load_linux(PCMachineState *pcms)
{
int i;
FWCfgState *fw_cfg;
@ -1279,15 +1267,13 @@ FWCfgState *xen_load_linux(PCMachineState *pcms,
!strcmp(option_rom[i].name, "multiboot.bin"));
rom_add_option(option_rom[i].name, option_rom[i].bootindex);
}
guest_info->fw_cfg = fw_cfg;
return fw_cfg;
pcms->fw_cfg = fw_cfg;
}
FWCfgState *pc_memory_init(PCMachineState *pcms,
MemoryRegion *system_memory,
MemoryRegion *rom_memory,
MemoryRegion **ram_memory,
PcGuestInfo *guest_info)
void pc_memory_init(PCMachineState *pcms,
MemoryRegion *system_memory,
MemoryRegion *rom_memory,
MemoryRegion **ram_memory)
{
int linux_boot, i;
MemoryRegion *ram, *option_rom_mr;
@ -1324,7 +1310,7 @@ FWCfgState *pc_memory_init(PCMachineState *pcms,
e820_add_entry(0x100000000ULL, pcms->above_4g_mem_size, E820_RAM);
}
if (!guest_info->has_reserved_memory &&
if (!pcmc->has_reserved_memory &&
(machine->ram_slots ||
(machine->maxram_size > machine->ram_size))) {
MachineClass *mc = MACHINE_GET_CLASS(machine);
@ -1335,7 +1321,7 @@ FWCfgState *pc_memory_init(PCMachineState *pcms,
}
/* initialize hotplug memory address space */
if (guest_info->has_reserved_memory &&
if (pcmc->has_reserved_memory &&
(machine->ram_size < machine->maxram_size)) {
ram_addr_t hotplug_mem_size =
machine->maxram_size - machine->ram_size;
@ -1375,7 +1361,7 @@ FWCfgState *pc_memory_init(PCMachineState *pcms,
}
/* Initialize PC system firmware */
pc_system_firmware_init(rom_memory, guest_info->isapc_ram_fw);
pc_system_firmware_init(rom_memory, !pcmc->pci_enabled);
option_rom_mr = g_malloc(sizeof(*option_rom_mr));
memory_region_init_ram(option_rom_mr, NULL, "pc.rom", PC_ROM_SIZE,
@ -1390,7 +1376,7 @@ FWCfgState *pc_memory_init(PCMachineState *pcms,
rom_set_fw(fw_cfg);
if (guest_info->has_reserved_memory && pcms->hotplug_memory.base) {
if (pcmc->has_reserved_memory && pcms->hotplug_memory.base) {
uint64_t *val = g_malloc(sizeof(*val));
PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
uint64_t res_mem_end = pcms->hotplug_memory.base;
@ -1409,8 +1395,7 @@ FWCfgState *pc_memory_init(PCMachineState *pcms,
for (i = 0; i < nb_option_roms; i++) {
rom_add_option(option_rom[i].name, option_rom[i].bootindex);
}
guest_info->fw_cfg = fw_cfg;
return fw_cfg;
pcms->fw_cfg = fw_cfg;
}
qemu_irq pc_allocate_cpu_irq(void)

View File

@ -85,7 +85,6 @@ static void pc_init1(MachineState *machine,
MemoryRegion *ram_memory;
MemoryRegion *pci_memory;
MemoryRegion *rom_memory;
PcGuestInfo *guest_info;
ram_addr_t lowmem;
/* Check whether RAM fits below 4G (leaving 1/2 GByte for IO memory).
@ -141,14 +140,7 @@ static void pc_init1(MachineState *machine,
rom_memory = system_memory;
}
guest_info = pc_guest_info_init(pcms);
guest_info->has_acpi_build = pcmc->has_acpi_build;
guest_info->legacy_acpi_table_size = pcmc->legacy_acpi_table_size;
guest_info->isapc_ram_fw = !pcmc->pci_enabled;
guest_info->has_reserved_memory = pcmc->has_reserved_memory;
guest_info->rsdp_in_ram = pcmc->rsdp_in_ram;
pc_guest_info_init(pcms);
if (pcmc->smbios_defaults) {
MachineClass *mc = MACHINE_GET_CLASS(machine);
@ -162,10 +154,10 @@ static void pc_init1(MachineState *machine,
/* allocate ram and load rom/bios */
if (!xen_enabled()) {
pc_memory_init(pcms, system_memory,
rom_memory, &ram_memory, guest_info);
rom_memory, &ram_memory);
} else if (machine->kernel_filename != NULL) {
/* For xen HVM direct kernel boot, load linux here */
xen_load_linux(pcms, guest_info);
xen_load_linux(pcms);
}
gsi_state = g_malloc0(sizeof(*gsi_state));

View File

@ -71,7 +71,6 @@ static void pc_q35_init(MachineState *machine)
int i;
ICH9LPCState *ich9_lpc;
PCIDevice *ahci;
PcGuestInfo *guest_info;
ram_addr_t lowmem;
DriveInfo *hd[MAX_SATA_PORTS];
MachineClass *mc = MACHINE_GET_CLASS(machine);
@ -134,16 +133,7 @@ static void pc_q35_init(MachineState *machine)
rom_memory = get_system_memory();
}
guest_info = pc_guest_info_init(pcms);
guest_info->isapc_ram_fw = false;
guest_info->has_acpi_build = pcmc->has_acpi_build;
guest_info->has_reserved_memory = pcmc->has_reserved_memory;
guest_info->rsdp_in_ram = pcmc->rsdp_in_ram;
/* Migration was not supported in 2.0 for Q35, so do not bother
* with this hack (see hw/i386/acpi-build.c).
*/
guest_info->legacy_acpi_table_size = 0;
pc_guest_info_init(pcms);
if (pcmc->smbios_defaults) {
/* These values are guest ABI, do not change */
@ -156,7 +146,7 @@ static void pc_q35_init(MachineState *machine)
/* allocate ram and load rom/bios */
if (!xen_enabled()) {
pc_memory_init(pcms, get_system_memory(),
rom_memory, &ram_memory, guest_info);
rom_memory, &ram_memory);
}
/* irq lines */

View File

@ -17,7 +17,7 @@
void virtio_input_send(VirtIOInput *vinput, virtio_input_event *event)
{
VirtQueueElement elem;
VirtQueueElement *elem;
unsigned have, need;
int i, len;
@ -50,14 +50,16 @@ void virtio_input_send(VirtIOInput *vinput, virtio_input_event *event)
/* ... and finally pass them to the guest */
for (i = 0; i < vinput->qindex; i++) {
if (!virtqueue_pop(vinput->evt, &elem)) {
elem = virtqueue_pop(vinput->evt, sizeof(VirtQueueElement));
if (!elem) {
/* should not happen, we've checked for space beforehand */
fprintf(stderr, "%s: Huh? No vq elem available ...\n", __func__);
return;
}
len = iov_from_buf(elem.in_sg, elem.in_num,
len = iov_from_buf(elem->in_sg, elem->in_num,
0, vinput->queue+i, sizeof(virtio_input_event));
virtqueue_push(vinput->evt, &elem, len);
virtqueue_push(vinput->evt, elem, len);
g_free(elem);
}
virtio_notify(VIRTIO_DEVICE(vinput), vinput->evt);
vinput->qindex = 0;
@ -73,17 +75,23 @@ static void virtio_input_handle_sts(VirtIODevice *vdev, VirtQueue *vq)
VirtIOInputClass *vic = VIRTIO_INPUT_GET_CLASS(vdev);
VirtIOInput *vinput = VIRTIO_INPUT(vdev);
virtio_input_event event;
VirtQueueElement elem;
VirtQueueElement *elem;
int len;
while (virtqueue_pop(vinput->sts, &elem)) {
for (;;) {
elem = virtqueue_pop(vinput->sts, sizeof(VirtQueueElement));
if (!elem) {
break;
}
memset(&event, 0, sizeof(event));
len = iov_to_buf(elem.out_sg, elem.out_num,
len = iov_to_buf(elem->out_sg, elem->out_num,
0, &event, sizeof(event));
if (vic->handle_status) {
vic->handle_status(vinput, &event);
}
virtqueue_push(vinput->sts, &elem, len);
virtqueue_push(vinput->sts, elem, len);
g_free(elem);
}
virtio_notify(vdev, vinput->sts);
}

View File

@ -23,32 +23,36 @@
*/
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "qemu/timer.h"
#include "hw/ipmi/ipmi.h"
#include "qemu/error-report.h"
#define IPMI_NETFN_CHASSIS 0x00
#define IPMI_NETFN_CHASSIS_MAXCMD 0x03
#define IPMI_CMD_GET_CHASSIS_CAPABILITIES 0x00
#define IPMI_CMD_GET_CHASSIS_STATUS 0x01
#define IPMI_CMD_CHASSIS_CONTROL 0x02
#define IPMI_CMD_GET_SYS_RESTART_CAUSE 0x09
#define IPMI_NETFN_SENSOR_EVENT 0x04
#define IPMI_NETFN_SENSOR_EVENT_MAXCMD 0x2e
#define IPMI_CMD_SET_SENSOR_EVT_ENABLE 0x28
#define IPMI_CMD_GET_SENSOR_EVT_ENABLE 0x29
#define IPMI_CMD_REARM_SENSOR_EVTS 0x2a
#define IPMI_CMD_GET_SENSOR_EVT_STATUS 0x2b
#define IPMI_CMD_GET_SENSOR_READING 0x2d
#define IPMI_CMD_SET_SENSOR_TYPE 0x2e
#define IPMI_CMD_GET_SENSOR_TYPE 0x2f
/* #define IPMI_NETFN_APP 0x06 In ipmi.h */
#define IPMI_NETFN_APP_MAXCMD 0x36
#define IPMI_CMD_GET_DEVICE_ID 0x01
#define IPMI_CMD_COLD_RESET 0x02
#define IPMI_CMD_WARM_RESET 0x03
#define IPMI_CMD_SET_ACPI_POWER_STATE 0x06
#define IPMI_CMD_GET_ACPI_POWER_STATE 0x07
#define IPMI_CMD_GET_DEVICE_GUID 0x08
#define IPMI_CMD_RESET_WATCHDOG_TIMER 0x22
#define IPMI_CMD_SET_WATCHDOG_TIMER 0x24
#define IPMI_CMD_GET_WATCHDOG_TIMER 0x25
@ -61,7 +65,6 @@
#define IPMI_CMD_READ_EVT_MSG_BUF 0x35
#define IPMI_NETFN_STORAGE 0x0a
#define IPMI_NETFN_STORAGE_MAXCMD 0x4a
#define IPMI_CMD_GET_SDR_REP_INFO 0x20
#define IPMI_CMD_GET_SDR_REP_ALLOC_INFO 0x21
@ -197,6 +200,11 @@ struct IPMIBmcSim {
uint8_t mfg_id[3];
uint8_t product_id[2];
uint8_t restart_cause;
uint8_t acpi_power_state[2];
uint8_t uuid[16];
IPMISel sel;
IPMISdr sdr;
IPMISensor sensors[MAX_SENSORS];
@ -256,7 +264,7 @@ struct IPMIBmcSim {
do { \
if (*rsp_len >= max_rsp_len) { \
rsp[2] = IPMI_CC_REQUEST_DATA_TRUNCATED; \
goto out; \
return; \
} \
rsp[(*rsp_len)++] = (b); \
} while (0)
@ -265,7 +273,7 @@ struct IPMIBmcSim {
#define IPMI_CHECK_CMD_LEN(l) \
if (cmd_len < l) { \
rsp[2] = IPMI_CC_REQUEST_DATA_LENGTH_INVALID; \
goto out; \
return; \
}
/* Check that the reservation in the command is valid. */
@ -273,7 +281,7 @@ struct IPMIBmcSim {
do { \
if ((cmd[off] | (cmd[off + 1] << 8)) != r) { \
rsp[2] = IPMI_CC_INVALID_RESERVATION; \
goto out; \
return; \
} \
} while (0)
@ -322,14 +330,18 @@ static void sdr_inc_reservation(IPMISdr *sdr)
}
}
static int sdr_add_entry(IPMIBmcSim *ibs, const uint8_t *entry,
static int sdr_add_entry(IPMIBmcSim *ibs,
const struct ipmi_sdr_header *sdrh_entry,
unsigned int len, uint16_t *recid)
{
if ((len < 5) || (len > 255)) {
struct ipmi_sdr_header *sdrh =
(struct ipmi_sdr_header *) &ibs->sdr.sdr[ibs->sdr.next_free];
if ((len < IPMI_SDR_HEADER_SIZE) || (len > 255)) {
return 1;
}
if (entry[4] != len - 5) {
if (ipmi_sdr_length(sdrh_entry) != len) {
return 1;
}
@ -338,10 +350,10 @@ static int sdr_add_entry(IPMIBmcSim *ibs, const uint8_t *entry,
return 1;
}
memcpy(ibs->sdr.sdr + ibs->sdr.next_free, entry, len);
ibs->sdr.sdr[ibs->sdr.next_free] = ibs->sdr.next_rec_id & 0xff;
ibs->sdr.sdr[ibs->sdr.next_free+1] = (ibs->sdr.next_rec_id >> 8) & 0xff;
ibs->sdr.sdr[ibs->sdr.next_free+2] = 0x51; /* Conform to IPMI 1.5 spec */
memcpy(sdrh, sdrh_entry, len);
sdrh->rec_id[0] = ibs->sdr.next_rec_id & 0xff;
sdrh->rec_id[1] = (ibs->sdr.next_rec_id >> 8) & 0xff;
sdrh->sdr_version = 0x51; /* Conform to IPMI 1.5 spec */
if (recid) {
*recid = ibs->sdr.next_rec_id;
@ -359,8 +371,10 @@ static int sdr_find_entry(IPMISdr *sdr, uint16_t recid,
unsigned int pos = *retpos;
while (pos < sdr->next_free) {
uint16_t trec = sdr->sdr[pos] | (sdr->sdr[pos + 1] << 8);
unsigned int nextpos = pos + sdr->sdr[pos + 4];
struct ipmi_sdr_header *sdrh =
(struct ipmi_sdr_header *) &sdr->sdr[pos];
uint16_t trec = ipmi_sdr_recid(sdrh);
unsigned int nextpos = pos + ipmi_sdr_length(sdrh);
if (trec == recid) {
if (nextrec) {
@ -451,14 +465,12 @@ static void gen_event(IPMIBmcSim *ibs, unsigned int sens_num, uint8_t deassert,
}
if (ibs->msg_flags & IPMI_BMC_MSG_FLAG_EVT_BUF_FULL) {
goto out;
return;
}
memcpy(ibs->evtbuf, evt, 16);
ibs->msg_flags |= IPMI_BMC_MSG_FLAG_EVT_BUF_FULL;
k->set_atn(s, 1, attn_irq_enabled(ibs));
out:
return;
}
static void sensor_set_discrete_bit(IPMIBmcSim *ibs, unsigned int sensor,
@ -511,29 +523,32 @@ static void ipmi_init_sensors_from_sdrs(IPMIBmcSim *s)
pos = 0;
for (i = 0; !sdr_find_entry(&s->sdr, i, &pos, NULL); i++) {
uint8_t *sdr = s->sdr.sdr + pos;
unsigned int len = sdr[4];
struct ipmi_sdr_compact *sdr =
(struct ipmi_sdr_compact *) &s->sdr.sdr[pos];
unsigned int len = sdr->header.rec_length;
if (len < 20) {
continue;
}
if ((sdr[3] < 1) || (sdr[3] > 2)) {
if (sdr->header.rec_type != IPMI_SDR_COMPACT_TYPE) {
continue; /* Not a sensor SDR we set from */
}
if (sdr[7] > MAX_SENSORS) {
if (sdr->sensor_owner_number > MAX_SENSORS) {
continue;
}
sens = s->sensors + sdr[7];
sens = s->sensors + sdr->sensor_owner_number;
IPMI_SENSOR_SET_PRESENT(sens, 1);
IPMI_SENSOR_SET_SCAN_ON(sens, (sdr[10] >> 6) & 1);
IPMI_SENSOR_SET_EVENTS_ON(sens, (sdr[10] >> 5) & 1);
sens->assert_suppt = sdr[14] | (sdr[15] << 8);
sens->deassert_suppt = sdr[16] | (sdr[17] << 8);
sens->states_suppt = sdr[18] | (sdr[19] << 8);
sens->sensor_type = sdr[12];
sens->evt_reading_type_code = sdr[13] & 0x7f;
IPMI_SENSOR_SET_SCAN_ON(sens, (sdr->sensor_init >> 6) & 1);
IPMI_SENSOR_SET_EVENTS_ON(sens, (sdr->sensor_init >> 5) & 1);
sens->assert_suppt = sdr->assert_mask[0] | (sdr->assert_mask[1] << 8);
sens->deassert_suppt =
sdr->deassert_mask[0] | (sdr->deassert_mask[1] << 8);
sens->states_suppt =
sdr->discrete_mask[0] | (sdr->discrete_mask[1] << 8);
sens->sensor_type = sdr->sensor_type;
sens->evt_reading_type_code = sdr->reading_type & 0x7f;
/* Enable all the events that are supported. */
sens->assert_enable = sens->assert_suppt;
@ -579,6 +594,11 @@ static void ipmi_sim_handle_command(IPMIBmc *b,
/* Set up the response, set the low bit of NETFN. */
/* Note that max_rsp_len must be at least 3 */
if (max_rsp_len < 3) {
rsp[2] = IPMI_CC_REQUEST_DATA_TRUNCATED;
goto out;
}
IPMI_ADD_RSP_DATA(cmd[0] | 0x04);
IPMI_ADD_RSP_DATA(cmd[1]);
IPMI_ADD_RSP_DATA(0); /* Assume success */
@ -696,8 +716,6 @@ static void chassis_capabilities(IPMIBmcSim *ibs,
IPMI_ADD_RSP_DATA(ibs->parent.slave_addr);
IPMI_ADD_RSP_DATA(ibs->parent.slave_addr);
IPMI_ADD_RSP_DATA(ibs->parent.slave_addr);
out:
return;
}
static void chassis_status(IPMIBmcSim *ibs,
@ -709,8 +727,6 @@ static void chassis_status(IPMIBmcSim *ibs,
IPMI_ADD_RSP_DATA(0);
IPMI_ADD_RSP_DATA(0);
IPMI_ADD_RSP_DATA(0);
out:
return;
}
static void chassis_control(IPMIBmcSim *ibs,
@ -744,10 +760,17 @@ static void chassis_control(IPMIBmcSim *ibs,
break;
default:
rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
goto out;
return;
}
out:
return;
}
static void chassis_get_sys_restart_cause(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
uint8_t *rsp, unsigned int *rsp_len,
unsigned int max_rsp_len)
{
IPMI_ADD_RSP_DATA(ibs->restart_cause & 0xf); /* Restart Cause */
IPMI_ADD_RSP_DATA(0); /* Channel 0 */
}
static void get_device_id(IPMIBmcSim *ibs,
@ -766,8 +789,6 @@ static void get_device_id(IPMIBmcSim *ibs,
IPMI_ADD_RSP_DATA(ibs->mfg_id[2]);
IPMI_ADD_RSP_DATA(ibs->product_id[0]);
IPMI_ADD_RSP_DATA(ibs->product_id[1]);
out:
return;
}
static void set_global_enables(IPMIBmcSim *ibs, uint8_t val)
@ -812,6 +833,36 @@ static void warm_reset(IPMIBmcSim *ibs,
k->reset(s, false);
}
}
static void set_acpi_power_state(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
uint8_t *rsp, unsigned int *rsp_len,
unsigned int max_rsp_len)
{
IPMI_CHECK_CMD_LEN(4);
ibs->acpi_power_state[0] = cmd[2];
ibs->acpi_power_state[1] = cmd[3];
}
static void get_acpi_power_state(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
uint8_t *rsp, unsigned int *rsp_len,
unsigned int max_rsp_len)
{
IPMI_ADD_RSP_DATA(ibs->acpi_power_state[0]);
IPMI_ADD_RSP_DATA(ibs->acpi_power_state[1]);
}
static void get_device_guid(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
uint8_t *rsp, unsigned int *rsp_len,
unsigned int max_rsp_len)
{
unsigned int i;
for (i = 0; i < 16; i++) {
IPMI_ADD_RSP_DATA(ibs->uuid[i]);
}
}
static void set_bmc_global_enables(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
@ -820,8 +871,6 @@ static void set_bmc_global_enables(IPMIBmcSim *ibs,
{
IPMI_CHECK_CMD_LEN(3);
set_global_enables(ibs, cmd[2]);
out:
return;
}
static void get_bmc_global_enables(IPMIBmcSim *ibs,
@ -830,8 +879,6 @@ static void get_bmc_global_enables(IPMIBmcSim *ibs,
unsigned int max_rsp_len)
{
IPMI_ADD_RSP_DATA(ibs->bmc_global_enables);
out:
return;
}
static void clr_msg_flags(IPMIBmcSim *ibs,
@ -845,8 +892,6 @@ static void clr_msg_flags(IPMIBmcSim *ibs,
IPMI_CHECK_CMD_LEN(3);
ibs->msg_flags &= ~cmd[2];
k->set_atn(s, attn_set(ibs), attn_irq_enabled(ibs));
out:
return;
}
static void get_msg_flags(IPMIBmcSim *ibs,
@ -855,8 +900,6 @@ static void get_msg_flags(IPMIBmcSim *ibs,
unsigned int max_rsp_len)
{
IPMI_ADD_RSP_DATA(ibs->msg_flags);
out:
return;
}
static void read_evt_msg_buf(IPMIBmcSim *ibs,
@ -870,15 +913,13 @@ static void read_evt_msg_buf(IPMIBmcSim *ibs,
if (!(ibs->msg_flags & IPMI_BMC_MSG_FLAG_EVT_BUF_FULL)) {
rsp[2] = 0x80;
goto out;
return;
}
for (i = 0; i < 16; i++) {
IPMI_ADD_RSP_DATA(ibs->evtbuf[i]);
}
ibs->msg_flags &= ~IPMI_BMC_MSG_FLAG_EVT_BUF_FULL;
k->set_atn(s, attn_set(ibs), attn_irq_enabled(ibs));
out:
return;
}
static void get_msg(IPMIBmcSim *ibs,
@ -909,7 +950,7 @@ static void get_msg(IPMIBmcSim *ibs,
k->set_atn(s, attn_set(ibs), attn_irq_enabled(ibs));
}
out:
out:
qemu_mutex_unlock(&ibs->lock);
return;
}
@ -940,14 +981,14 @@ static void send_msg(IPMIBmcSim *ibs,
if (cmd[2] != 0) {
/* We only handle channel 0 with no options */
rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
goto out;
return;
}
IPMI_CHECK_CMD_LEN(10);
if (cmd[3] != 0x40) {
/* We only emulate a MC at address 0x40. */
rsp[2] = 0x83; /* NAK on write */
goto out;
return;
}
cmd += 3; /* Skip the header. */
@ -959,7 +1000,7 @@ static void send_msg(IPMIBmcSim *ibs,
*/
if (ipmb_checksum(cmd, cmd_len, 0) != 0 ||
cmd[3] != 0x20) { /* Improper response address */
goto out; /* No response */
return; /* No response */
}
netfn = cmd[1] >> 2;
@ -969,7 +1010,7 @@ static void send_msg(IPMIBmcSim *ibs,
if (rqLun != 2) {
/* We only support LUN 2 coming back to us. */
goto out;
return;
}
msg = g_malloc(sizeof(*msg));
@ -1009,9 +1050,6 @@ static void send_msg(IPMIBmcSim *ibs,
ibs->msg_flags |= IPMI_BMC_MSG_FLAG_RCV_MSG_QUEUE;
k->set_atn(s, 1, attn_irq_enabled(ibs));
qemu_mutex_unlock(&ibs->lock);
out:
return;
}
static void do_watchdog_reset(IPMIBmcSim *ibs)
@ -1040,11 +1078,9 @@ static void reset_watchdog_timer(IPMIBmcSim *ibs,
{
if (!ibs->watchdog_initialized) {
rsp[2] = 0x80;
goto out;
return;
}
do_watchdog_reset(ibs);
out:
return;
}
static void set_watchdog_timer(IPMIBmcSim *ibs,
@ -1060,7 +1096,7 @@ static void set_watchdog_timer(IPMIBmcSim *ibs,
val = cmd[2] & 0x7; /* Validate use */
if (val == 0 || val > 5) {
rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
goto out;
return;
}
val = cmd[3] & 0x7; /* Validate action */
switch (val) {
@ -1084,7 +1120,7 @@ static void set_watchdog_timer(IPMIBmcSim *ibs,
}
if (rsp[2]) {
rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
goto out;
return;
}
val = (cmd[3] >> 4) & 0x7; /* Validate preaction */
@ -1097,12 +1133,12 @@ static void set_watchdog_timer(IPMIBmcSim *ibs,
if (!k->do_hw_op(s, IPMI_SEND_NMI, 1)) {
/* NMI not supported. */
rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
goto out;
return;
}
default:
/* We don't support PRE_SMI */
rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
goto out;
return;
}
ibs->watchdog_initialized = 1;
@ -1116,8 +1152,6 @@ static void set_watchdog_timer(IPMIBmcSim *ibs,
} else {
ibs->watchdog_running = 0;
}
out:
return;
}
static void get_watchdog_timer(IPMIBmcSim *ibs,
@ -1139,8 +1173,6 @@ static void get_watchdog_timer(IPMIBmcSim *ibs,
IPMI_ADD_RSP_DATA(0);
IPMI_ADD_RSP_DATA(0);
}
out:
return;
}
static void get_sdr_rep_info(IPMIBmcSim *ibs,
@ -1163,8 +1195,6 @@ static void get_sdr_rep_info(IPMIBmcSim *ibs,
}
/* Only modal support, reserve supported */
IPMI_ADD_RSP_DATA((ibs->sdr.overflow << 7) | 0x22);
out:
return;
}
static void reserve_sdr_rep(IPMIBmcSim *ibs,
@ -1174,8 +1204,6 @@ static void reserve_sdr_rep(IPMIBmcSim *ibs,
{
IPMI_ADD_RSP_DATA(ibs->sdr.reservation & 0xff);
IPMI_ADD_RSP_DATA((ibs->sdr.reservation >> 8) & 0xff);
out:
return;
}
static void get_sdr(IPMIBmcSim *ibs,
@ -1185,6 +1213,7 @@ static void get_sdr(IPMIBmcSim *ibs,
{
unsigned int pos;
uint16_t nextrec;
struct ipmi_sdr_header *sdrh;
IPMI_CHECK_CMD_LEN(8);
if (cmd[6]) {
@ -1194,28 +1223,29 @@ static void get_sdr(IPMIBmcSim *ibs,
if (sdr_find_entry(&ibs->sdr, cmd[4] | (cmd[5] << 8),
&pos, &nextrec)) {
rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
goto out;
return;
}
if (cmd[6] > (ibs->sdr.sdr[pos + 4])) {
sdrh = (struct ipmi_sdr_header *) &ibs->sdr.sdr[pos];
if (cmd[6] > ipmi_sdr_length(sdrh)) {
rsp[2] = IPMI_CC_PARM_OUT_OF_RANGE;
goto out;
return;
}
IPMI_ADD_RSP_DATA(nextrec & 0xff);
IPMI_ADD_RSP_DATA((nextrec >> 8) & 0xff);
if (cmd[7] == 0xff) {
cmd[7] = ibs->sdr.sdr[pos + 4] - cmd[6];
cmd[7] = ipmi_sdr_length(sdrh) - cmd[6];
}
if ((cmd[7] + *rsp_len) > max_rsp_len) {
rsp[2] = IPMI_CC_CANNOT_RETURN_REQ_NUM_BYTES;
goto out;
return;
}
memcpy(rsp + *rsp_len, ibs->sdr.sdr + pos + cmd[6], cmd[7]);
*rsp_len += cmd[7];
out:
return;
}
static void add_sdr(IPMIBmcSim *ibs,
@ -1224,15 +1254,14 @@ static void add_sdr(IPMIBmcSim *ibs,
unsigned int max_rsp_len)
{
uint16_t recid;
struct ipmi_sdr_header *sdrh = (struct ipmi_sdr_header *) cmd + 2;
if (sdr_add_entry(ibs, cmd + 2, cmd_len - 2, &recid)) {
if (sdr_add_entry(ibs, sdrh, cmd_len - 2, &recid)) {
rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
goto out;
return;
}
IPMI_ADD_RSP_DATA(recid & 0xff);
IPMI_ADD_RSP_DATA((recid >> 8) & 0xff);
out:
return;
}
static void clear_sdr_rep(IPMIBmcSim *ibs,
@ -1244,7 +1273,7 @@ static void clear_sdr_rep(IPMIBmcSim *ibs,
IPMI_CHECK_RESERVATION(2, ibs->sdr.reservation);
if (cmd[4] != 'C' || cmd[5] != 'L' || cmd[6] != 'R') {
rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
goto out;
return;
}
if (cmd[7] == 0xaa) {
ibs->sdr.next_free = 0;
@ -1256,10 +1285,8 @@ static void clear_sdr_rep(IPMIBmcSim *ibs,
IPMI_ADD_RSP_DATA(1); /* Erasure complete */
} else {
rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
goto out;
return;
}
out:
return;
}
static void get_sel_info(IPMIBmcSim *ibs,
@ -1283,8 +1310,6 @@ static void get_sel_info(IPMIBmcSim *ibs,
}
/* Only support Reserve SEL */
IPMI_ADD_RSP_DATA((ibs->sel.overflow << 7) | 0x02);
out:
return;
}
static void reserve_sel(IPMIBmcSim *ibs,
@ -1294,8 +1319,6 @@ static void reserve_sel(IPMIBmcSim *ibs,
{
IPMI_ADD_RSP_DATA(ibs->sel.reservation & 0xff);
IPMI_ADD_RSP_DATA((ibs->sel.reservation >> 8) & 0xff);
out:
return;
}
static void get_sel_entry(IPMIBmcSim *ibs,
@ -1311,17 +1334,17 @@ static void get_sel_entry(IPMIBmcSim *ibs,
}
if (ibs->sel.next_free == 0) {
rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
goto out;
return;
}
if (cmd[6] > 15) {
rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
goto out;
return;
}
if (cmd[7] == 0xff) {
cmd[7] = 16;
} else if ((cmd[7] + cmd[6]) > 16) {
rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
goto out;
return;
} else {
cmd[7] += cmd[6];
}
@ -1331,7 +1354,7 @@ static void get_sel_entry(IPMIBmcSim *ibs,
val = ibs->sel.next_free - 1;
} else if (val >= ibs->sel.next_free) {
rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
goto out;
return;
}
if ((val + 1) == ibs->sel.next_free) {
IPMI_ADD_RSP_DATA(0xff);
@ -1343,8 +1366,6 @@ static void get_sel_entry(IPMIBmcSim *ibs,
for (; cmd[6] < cmd[7]; cmd[6]++) {
IPMI_ADD_RSP_DATA(ibs->sel.sel[val][cmd[6]]);
}
out:
return;
}
static void add_sel_entry(IPMIBmcSim *ibs,
@ -1355,13 +1376,11 @@ static void add_sel_entry(IPMIBmcSim *ibs,
IPMI_CHECK_CMD_LEN(18);
if (sel_add_event(ibs, cmd + 2)) {
rsp[2] = IPMI_CC_OUT_OF_SPACE;
goto out;
return;
}
/* sel_add_event fills in the record number. */
IPMI_ADD_RSP_DATA(cmd[2]);
IPMI_ADD_RSP_DATA(cmd[3]);
out:
return;
}
static void clear_sel(IPMIBmcSim *ibs,
@ -1373,7 +1392,7 @@ static void clear_sel(IPMIBmcSim *ibs,
IPMI_CHECK_RESERVATION(2, ibs->sel.reservation);
if (cmd[4] != 'C' || cmd[5] != 'L' || cmd[6] != 'R') {
rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
goto out;
return;
}
if (cmd[7] == 0xaa) {
ibs->sel.next_free = 0;
@ -1385,10 +1404,8 @@ static void clear_sel(IPMIBmcSim *ibs,
IPMI_ADD_RSP_DATA(1); /* Erasure complete */
} else {
rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
goto out;
return;
}
out:
return;
}
static void get_sel_time(IPMIBmcSim *ibs,
@ -1405,8 +1422,6 @@ static void get_sel_time(IPMIBmcSim *ibs,
IPMI_ADD_RSP_DATA((val >> 8) & 0xff);
IPMI_ADD_RSP_DATA((val >> 16) & 0xff);
IPMI_ADD_RSP_DATA((val >> 24) & 0xff);
out:
return;
}
static void set_sel_time(IPMIBmcSim *ibs,
@ -1421,8 +1436,6 @@ static void set_sel_time(IPMIBmcSim *ibs,
val = cmd[2] | (cmd[3] << 8) | (cmd[4] << 16) | (cmd[5] << 24);
ipmi_gettime(&now);
ibs->sel.time_offset = now.tv_sec - ((long) val);
out:
return;
}
static void set_sensor_evt_enable(IPMIBmcSim *ibs,
@ -1436,7 +1449,7 @@ static void set_sensor_evt_enable(IPMIBmcSim *ibs,
if ((cmd[2] > MAX_SENSORS) ||
!IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
goto out;
return;
}
sens = ibs->sensors + cmd[2];
switch ((cmd[3] >> 4) & 0x3) {
@ -1472,11 +1485,9 @@ static void set_sensor_evt_enable(IPMIBmcSim *ibs,
break;
case 3:
rsp[2] = IPMI_CC_INVALID_DATA_FIELD;
goto out;
return;
}
IPMI_SENSOR_SET_RET_STATUS(sens, cmd[3]);
out:
return;
}
static void get_sensor_evt_enable(IPMIBmcSim *ibs,
@ -1490,7 +1501,7 @@ static void get_sensor_evt_enable(IPMIBmcSim *ibs,
if ((cmd[2] > MAX_SENSORS) ||
!IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
goto out;
return;
}
sens = ibs->sensors + cmd[2];
IPMI_ADD_RSP_DATA(IPMI_SENSOR_GET_RET_STATUS(sens));
@ -1498,8 +1509,6 @@ static void get_sensor_evt_enable(IPMIBmcSim *ibs,
IPMI_ADD_RSP_DATA((sens->assert_enable >> 8) & 0xff);
IPMI_ADD_RSP_DATA(sens->deassert_enable & 0xff);
IPMI_ADD_RSP_DATA((sens->deassert_enable >> 8) & 0xff);
out:
return;
}
static void rearm_sensor_evts(IPMIBmcSim *ibs,
@ -1513,17 +1522,15 @@ static void rearm_sensor_evts(IPMIBmcSim *ibs,
if ((cmd[2] > MAX_SENSORS) ||
!IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
goto out;
return;
}
sens = ibs->sensors + cmd[2];
if ((cmd[3] & 0x80) == 0) {
/* Just clear everything */
sens->states = 0;
goto out;
return;
}
out:
return;
}
static void get_sensor_evt_status(IPMIBmcSim *ibs,
@ -1537,7 +1544,7 @@ static void get_sensor_evt_status(IPMIBmcSim *ibs,
if ((cmd[2] > MAX_SENSORS) ||
!IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
goto out;
return;
}
sens = ibs->sensors + cmd[2];
IPMI_ADD_RSP_DATA(sens->reading);
@ -1546,8 +1553,6 @@ static void get_sensor_evt_status(IPMIBmcSim *ibs,
IPMI_ADD_RSP_DATA((sens->assert_states >> 8) & 0xff);
IPMI_ADD_RSP_DATA(sens->deassert_states & 0xff);
IPMI_ADD_RSP_DATA((sens->deassert_states >> 8) & 0xff);
out:
return;
}
static void get_sensor_reading(IPMIBmcSim *ibs,
@ -1561,7 +1566,7 @@ static void get_sensor_reading(IPMIBmcSim *ibs,
if ((cmd[2] > MAX_SENSORS) ||
!IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
goto out;
return;
}
sens = ibs->sensors + cmd[2];
IPMI_ADD_RSP_DATA(sens->reading);
@ -1570,37 +1575,79 @@ static void get_sensor_reading(IPMIBmcSim *ibs,
if (IPMI_SENSOR_IS_DISCRETE(sens)) {
IPMI_ADD_RSP_DATA((sens->states >> 8) & 0xff);
}
out:
return;
}
static const IPMICmdHandler chassis_cmds[IPMI_NETFN_CHASSIS_MAXCMD] = {
static void set_sensor_type(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
uint8_t *rsp, unsigned int *rsp_len,
unsigned int max_rsp_len)
{
IPMISensor *sens;
IPMI_CHECK_CMD_LEN(5);
if ((cmd[2] > MAX_SENSORS) ||
!IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
return;
}
sens = ibs->sensors + cmd[2];
sens->sensor_type = cmd[3];
sens->evt_reading_type_code = cmd[4] & 0x7f;
}
static void get_sensor_type(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
uint8_t *rsp, unsigned int *rsp_len,
unsigned int max_rsp_len)
{
IPMISensor *sens;
IPMI_CHECK_CMD_LEN(3);
if ((cmd[2] > MAX_SENSORS) ||
!IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
return;
}
sens = ibs->sensors + cmd[2];
IPMI_ADD_RSP_DATA(sens->sensor_type);
IPMI_ADD_RSP_DATA(sens->evt_reading_type_code);
}
static const IPMICmdHandler chassis_cmds[] = {
[IPMI_CMD_GET_CHASSIS_CAPABILITIES] = chassis_capabilities,
[IPMI_CMD_GET_CHASSIS_STATUS] = chassis_status,
[IPMI_CMD_CHASSIS_CONTROL] = chassis_control
[IPMI_CMD_CHASSIS_CONTROL] = chassis_control,
[IPMI_CMD_GET_SYS_RESTART_CAUSE] = chassis_get_sys_restart_cause
};
static const IPMINetfn chassis_netfn = {
.cmd_nums = IPMI_NETFN_CHASSIS_MAXCMD,
.cmd_nums = ARRAY_SIZE(chassis_cmds),
.cmd_handlers = chassis_cmds
};
static const IPMICmdHandler
sensor_event_cmds[IPMI_NETFN_SENSOR_EVENT_MAXCMD] = {
static const IPMICmdHandler sensor_event_cmds[] = {
[IPMI_CMD_SET_SENSOR_EVT_ENABLE] = set_sensor_evt_enable,
[IPMI_CMD_GET_SENSOR_EVT_ENABLE] = get_sensor_evt_enable,
[IPMI_CMD_REARM_SENSOR_EVTS] = rearm_sensor_evts,
[IPMI_CMD_GET_SENSOR_EVT_STATUS] = get_sensor_evt_status,
[IPMI_CMD_GET_SENSOR_READING] = get_sensor_reading
[IPMI_CMD_GET_SENSOR_READING] = get_sensor_reading,
[IPMI_CMD_SET_SENSOR_TYPE] = set_sensor_type,
[IPMI_CMD_GET_SENSOR_TYPE] = get_sensor_type,
};
static const IPMINetfn sensor_event_netfn = {
.cmd_nums = IPMI_NETFN_SENSOR_EVENT_MAXCMD,
.cmd_nums = ARRAY_SIZE(sensor_event_cmds),
.cmd_handlers = sensor_event_cmds
};
static const IPMICmdHandler app_cmds[IPMI_NETFN_APP_MAXCMD] = {
static const IPMICmdHandler app_cmds[] = {
[IPMI_CMD_GET_DEVICE_ID] = get_device_id,
[IPMI_CMD_COLD_RESET] = cold_reset,
[IPMI_CMD_WARM_RESET] = warm_reset,
[IPMI_CMD_SET_ACPI_POWER_STATE] = set_acpi_power_state,
[IPMI_CMD_GET_ACPI_POWER_STATE] = get_acpi_power_state,
[IPMI_CMD_GET_DEVICE_GUID] = get_device_guid,
[IPMI_CMD_SET_BMC_GLOBAL_ENABLES] = set_bmc_global_enables,
[IPMI_CMD_GET_BMC_GLOBAL_ENABLES] = get_bmc_global_enables,
[IPMI_CMD_CLR_MSG_FLAGS] = clr_msg_flags,
@ -1613,11 +1660,11 @@ static const IPMICmdHandler app_cmds[IPMI_NETFN_APP_MAXCMD] = {
[IPMI_CMD_GET_WATCHDOG_TIMER] = get_watchdog_timer,
};
static const IPMINetfn app_netfn = {
.cmd_nums = IPMI_NETFN_APP_MAXCMD,
.cmd_nums = ARRAY_SIZE(app_cmds),
.cmd_handlers = app_cmds
};
static const IPMICmdHandler storage_cmds[IPMI_NETFN_STORAGE_MAXCMD] = {
static const IPMICmdHandler storage_cmds[] = {
[IPMI_CMD_GET_SDR_REP_INFO] = get_sdr_rep_info,
[IPMI_CMD_RESERVE_SDR_REP] = reserve_sdr_rep,
[IPMI_CMD_GET_SDR] = get_sdr,
@ -1633,7 +1680,7 @@ static const IPMICmdHandler storage_cmds[IPMI_NETFN_STORAGE_MAXCMD] = {
};
static const IPMINetfn storage_netfn = {
.cmd_nums = IPMI_NETFN_STORAGE_MAXCMD,
.cmd_nums = ARRAY_SIZE(storage_cmds),
.cmd_handlers = storage_cmds
};
@ -1697,6 +1744,7 @@ static void ipmi_sim_init(Object *obj)
ibs->bmc_global_enables = (1 << IPMI_BMC_EVENT_LOG_BIT);
ibs->device_id = 0x20;
ibs->ipmi_version = 0x02; /* IPMI 2.0 */
ibs->restart_cause = 0;
for (i = 0; i < 4; i++) {
ibs->sel.last_addition[i] = 0xff;
ibs->sel.last_clear[i] = 0xff;
@ -1705,22 +1753,33 @@ static void ipmi_sim_init(Object *obj)
}
for (i = 0;;) {
struct ipmi_sdr_header *sdrh;
int len;
if ((i + 5) > sizeof(init_sdrs)) {
error_report("Problem with recid 0x%4.4x: \n", i);
if ((i + IPMI_SDR_HEADER_SIZE) > sizeof(init_sdrs)) {
error_report("Problem with recid 0x%4.4x", i);
return;
}
len = init_sdrs[i + 4];
recid = init_sdrs[i] | (init_sdrs[i + 1] << 8);
sdrh = (struct ipmi_sdr_header *) &init_sdrs[i];
len = ipmi_sdr_length(sdrh);
recid = ipmi_sdr_recid(sdrh);
if (recid == 0xffff) {
break;
}
if ((i + len + 5) > sizeof(init_sdrs)) {
error_report("Problem with recid 0x%4.4x\n", i);
if ((i + len) > sizeof(init_sdrs)) {
error_report("Problem with recid 0x%4.4x", i);
return;
}
sdr_add_entry(ibs, init_sdrs + i, len, NULL);
i += len + 5;
sdr_add_entry(ibs, sdrh, len, NULL);
i += len;
}
ibs->acpi_power_state[0] = 0;
ibs->acpi_power_state[1] = 0;
if (qemu_uuid_set) {
memcpy(&ibs->uuid, qemu_uuid, 16);
} else {
memset(&ibs->uuid, 0, 16);
}
ipmi_init_sensors_from_sdrs(ibs);

View File

@ -298,21 +298,19 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
VirtioBusState *vbus = VIRTIO_BUS(qbus);
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
int r, e, i;
int r, e, i, j;
if (!k->set_guest_notifiers) {
error_report("binding does not support guest notifiers");
r = -ENOSYS;
goto err;
return -ENOSYS;
}
r = vhost_net_set_vnet_endian(dev, ncs[0].peer, true);
if (r < 0) {
goto err;
}
for (i = 0; i < total_queues; i++) {
vhost_net_set_vq_index(get_vhost_net(ncs[i].peer), i * 2);
for (j = 0; j < total_queues; j++) {
r = vhost_net_set_vnet_endian(dev, ncs[j].peer, true);
if (r < 0) {
goto err_endian;
}
vhost_net_set_vq_index(get_vhost_net(ncs[j].peer), j * 2);
}
r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
@ -341,8 +339,9 @@ err_start:
fflush(stderr);
}
err_endian:
vhost_net_set_vnet_endian(dev, ncs[0].peer, false);
err:
while (--j >= 0) {
vhost_net_set_vnet_endian(dev, ncs[j].peer, false);
}
return r;
}

View File

@ -819,20 +819,24 @@ static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
VirtIONet *n = VIRTIO_NET(vdev);
struct virtio_net_ctrl_hdr ctrl;
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
VirtQueueElement elem;
VirtQueueElement *elem;
size_t s;
struct iovec *iov, *iov2;
unsigned int iov_cnt;
while (virtqueue_pop(vq, &elem)) {
if (iov_size(elem.in_sg, elem.in_num) < sizeof(status) ||
iov_size(elem.out_sg, elem.out_num) < sizeof(ctrl)) {
for (;;) {
elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) {
break;
}
if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
error_report("virtio-net ctrl missing headers");
exit(1);
}
iov_cnt = elem.out_num;
iov2 = iov = g_memdup(elem.out_sg, sizeof(struct iovec) * elem.out_num);
iov_cnt = elem->out_num;
iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num);
s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
if (s != sizeof(ctrl)) {
@ -851,12 +855,13 @@ static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
}
s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status));
s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status));
assert(s == sizeof(status));
virtqueue_push(vq, &elem, sizeof(status));
virtqueue_push(vq, elem, sizeof(status));
virtio_notify(vdev, vq);
g_free(iov2);
g_free(elem);
}
}
@ -1045,13 +1050,14 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t
offset = i = 0;
while (offset < size) {
VirtQueueElement elem;
VirtQueueElement *elem;
int len, total;
const struct iovec *sg = elem.in_sg;
const struct iovec *sg;
total = 0;
if (virtqueue_pop(q->rx_vq, &elem) == 0) {
elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
if (!elem) {
if (i == 0)
return -1;
error_report("virtio-net unexpected empty queue: "
@ -1064,21 +1070,22 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t
exit(1);
}
if (elem.in_num < 1) {
if (elem->in_num < 1) {
error_report("virtio-net receive queue contains no in buffers");
exit(1);
}
sg = elem->in_sg;
if (i == 0) {
assert(offset == 0);
if (n->mergeable_rx_bufs) {
mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
sg, elem.in_num,
sg, elem->in_num,
offsetof(typeof(mhdr), num_buffers),
sizeof(mhdr.num_buffers));
}
receive_header(n, sg, elem.in_num, buf, size);
receive_header(n, sg, elem->in_num, buf, size);
offset = n->host_hdr_len;
total += n->guest_hdr_len;
guest_offset = n->guest_hdr_len;
@ -1087,7 +1094,7 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t
}
/* copy in packet. ugh */
len = iov_from_buf(sg, elem.in_num, guest_offset,
len = iov_from_buf(sg, elem->in_num, guest_offset,
buf + offset, size - offset);
total += len;
offset += len;
@ -1095,12 +1102,14 @@ static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t
* must have consumed the complete packet.
* Otherwise, drop it. */
if (!n->mergeable_rx_bufs && offset < size) {
virtqueue_discard(q->rx_vq, &elem, total);
virtqueue_discard(q->rx_vq, elem, total);
g_free(elem);
return size;
}
/* signal other side */
virtqueue_fill(q->rx_vq, &elem, total, i++);
virtqueue_fill(q->rx_vq, elem, total, i++);
g_free(elem);
}
if (mhdr_cnt) {
@ -1124,10 +1133,11 @@ static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
VirtIONetQueue *q = virtio_net_get_subqueue(nc);
VirtIODevice *vdev = VIRTIO_DEVICE(n);
virtqueue_push(q->tx_vq, &q->async_tx.elem, 0);
virtqueue_push(q->tx_vq, q->async_tx.elem, 0);
virtio_notify(vdev, q->tx_vq);
q->async_tx.elem.out_num = 0;
g_free(q->async_tx.elem);
q->async_tx.elem = NULL;
virtio_queue_set_notification(q->tx_vq, 1);
virtio_net_flush_tx(q);
@ -1138,25 +1148,31 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
{
VirtIONet *n = q->n;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
VirtQueueElement elem;
VirtQueueElement *elem;
int32_t num_packets = 0;
int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
return num_packets;
}
if (q->async_tx.elem.out_num) {
if (q->async_tx.elem) {
virtio_queue_set_notification(q->tx_vq, 0);
return num_packets;
}
while (virtqueue_pop(q->tx_vq, &elem)) {
for (;;) {
ssize_t ret;
unsigned int out_num = elem.out_num;
struct iovec *out_sg = &elem.out_sg[0];
struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1];
unsigned int out_num;
struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
struct virtio_net_hdr_mrg_rxbuf mhdr;
elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
if (!elem) {
break;
}
out_num = elem->out_num;
out_sg = elem->out_sg;
if (out_num < 1) {
error_report("virtio-net header not in first element");
exit(1);
@ -1208,8 +1224,9 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
}
drop:
virtqueue_push(q->tx_vq, &elem, 0);
virtqueue_push(q->tx_vq, elem, 0);
virtio_notify(vdev, q->tx_vq);
g_free(elem);
if (++num_packets >= n->tx_burst) {
break;

View File

@ -302,6 +302,7 @@ static void pxb_dev_class_init(ObjectClass *klass, void *data)
dc->desc = "PCI Expander Bridge";
dc->props = pxb_dev_properties;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
}
static const TypeInfo pxb_dev_info = {
@ -334,6 +335,7 @@ static void pxb_pcie_dev_class_init(ObjectClass *klass, void *data)
dc->desc = "PCI Express Expander Bridge";
dc->props = pxb_dev_properties;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
}
static const TypeInfo pxb_pcie_dev_info = {

View File

@ -20,6 +20,7 @@
#include "qemu/osdep.h"
#include "hw/pci/msi.h"
#include "hw/xen/xen.h"
#include "qemu/range.h"
/* PCI_MSI_ADDRESS_LO */
@ -254,13 +255,19 @@ void msi_reset(PCIDevice *dev)
static bool msi_is_masked(const PCIDevice *dev, unsigned int vector)
{
uint16_t flags = pci_get_word(dev->config + msi_flags_off(dev));
uint32_t mask;
uint32_t mask, data;
bool msi64bit = flags & PCI_MSI_FLAGS_64BIT;
assert(vector < PCI_MSI_VECTORS_MAX);
if (!(flags & PCI_MSI_FLAGS_MASKBIT)) {
return false;
}
data = pci_get_word(dev->config + msi_data_off(dev, msi64bit));
if (xen_is_pirq_msi(data)) {
return false;
}
mask = pci_get_long(dev->config +
msi_mask_off(dev, flags & PCI_MSI_FLAGS_64BIT));
return mask & (1U << vector);

View File

@ -19,6 +19,7 @@
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "hw/pci/pci.h"
#include "hw/xen/xen.h"
#include "qemu/range.h"
#define MSIX_CAP_LENGTH 12
@ -78,8 +79,15 @@ static void msix_clr_pending(PCIDevice *dev, int vector)
static bool msix_vector_masked(PCIDevice *dev, unsigned int vector, bool fmask)
{
unsigned offset = vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL;
return fmask || dev->msix_table[offset] & PCI_MSIX_ENTRY_CTRL_MASKBIT;
unsigned offset = vector * PCI_MSIX_ENTRY_SIZE;
uint32_t *data = (uint32_t *)&dev->msix_table[offset + PCI_MSIX_ENTRY_DATA];
/* MSIs on Xen can be remapped into pirqs. In those cases, masking
* and unmasking go through the PV evtchn path. */
if (xen_is_pirq_msi(*data)) {
return false;
}
return fmask || dev->msix_table[offset + PCI_MSIX_ENTRY_VECTOR_CTRL] &
PCI_MSIX_ENTRY_CTRL_MASKBIT;
}
bool msix_is_masked(PCIDevice *dev, unsigned int vector)

View File

@ -851,6 +851,13 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, PCIBus *bus,
DeviceState *dev = DEVICE(pci_dev);
pci_dev->bus = bus;
/* Only pci bridges can be attached to extra PCI root buses */
if (pci_bus_is_root(bus) && bus->parent_dev && !pc->is_bridge) {
error_setg(errp,
"PCI: Only PCI/PCIe bridges can be plugged into %s",
bus->parent_dev->name);
return NULL;
}
if (devfn < 0) {
for(devfn = bus->devfn_min ; devfn < ARRAY_SIZE(bus->devices);

View File

@ -81,15 +81,16 @@ fail_vring:
VirtIOSCSIReq *virtio_scsi_pop_req_vring(VirtIOSCSI *s,
VirtIOSCSIVring *vring)
{
VirtIOSCSIReq *req = virtio_scsi_init_req(s, NULL);
int r;
VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
VirtIOSCSIReq *req;
req->vring = vring;
r = vring_pop((VirtIODevice *)s, &vring->vring, &req->elem);
if (r < 0) {
virtio_scsi_free_req(req);
req = NULL;
req = vring_pop((VirtIODevice *)s, &vring->vring,
sizeof(VirtIOSCSIReq) + vs->cdb_size);
if (!req) {
return NULL;
}
virtio_scsi_init_req(s, NULL, req);
req->vring = vring;
return req;
}

View File

@ -41,20 +41,15 @@ static inline SCSIDevice *virtio_scsi_device_find(VirtIOSCSI *s, uint8_t *lun)
return scsi_device_find(&s->bus, 0, lun[1], virtio_scsi_get_lun(lun));
}
VirtIOSCSIReq *virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq)
void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req)
{
VirtIOSCSIReq *req;
VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
const size_t zero_skip = offsetof(VirtIOSCSIReq, elem)
+ sizeof(VirtQueueElement);
const size_t zero_skip = offsetof(VirtIOSCSIReq, vring);
req = g_malloc(sizeof(*req) + vs->cdb_size);
req->vq = vq;
req->dev = s;
qemu_sglist_init(&req->qsgl, DEVICE(s), 8, &address_space_memory);
qemu_iovec_init(&req->resp_iov, 1);
memset((uint8_t *)req + zero_skip, 0, sizeof(*req) - zero_skip);
return req;
}
void virtio_scsi_free_req(VirtIOSCSIReq *req)
@ -175,11 +170,14 @@ static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq)
{
VirtIOSCSIReq *req = virtio_scsi_init_req(s, vq);
if (!virtqueue_pop(vq, &req->elem)) {
virtio_scsi_free_req(req);
VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
VirtIOSCSIReq *req;
req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size);
if (!req) {
return NULL;
}
virtio_scsi_init_req(s, vq, req);
return req;
}
@ -191,7 +189,7 @@ static void virtio_scsi_save_request(QEMUFile *f, SCSIRequest *sreq)
assert(n < vs->conf.num_queues);
qemu_put_be32s(f, &n);
qemu_put_buffer(f, (unsigned char *)&req->elem, sizeof(req->elem));
qemu_put_virtqueue_element(f, &req->elem);
}
static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
@ -204,10 +202,8 @@ static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
qemu_get_be32s(f, &n);
assert(n < vs->conf.num_queues);
req = virtio_scsi_init_req(s, vs->cmd_vqs[n]);
qemu_get_buffer(f, (unsigned char *)&req->elem, sizeof(req->elem));
virtqueue_map(&req->elem);
req = qemu_get_virtqueue_element(f, sizeof(VirtIOSCSIReq) + vs->cdb_size);
virtio_scsi_init_req(s, vs->cmd_vqs[n], req);
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
sizeof(VirtIOSCSICmdResp) + vs->sense_size) < 0) {

View File

@ -218,8 +218,14 @@ bool vring_should_notify(VirtIODevice *vdev, Vring *vring)
new, old);
}
typedef struct VirtQueueCurrentElement {
unsigned in_num;
unsigned out_num;
hwaddr addr[VIRTQUEUE_MAX_SIZE];
struct iovec iov[VIRTQUEUE_MAX_SIZE];
} VirtQueueCurrentElement;
static int get_desc(Vring *vring, VirtQueueElement *elem,
static int get_desc(Vring *vring, VirtQueueCurrentElement *elem,
struct vring_desc *desc)
{
unsigned *num;
@ -230,12 +236,12 @@ static int get_desc(Vring *vring, VirtQueueElement *elem,
if (desc->flags & VRING_DESC_F_WRITE) {
num = &elem->in_num;
iov = &elem->in_sg[*num];
addr = &elem->in_addr[*num];
iov = &elem->iov[elem->out_num + *num];
addr = &elem->addr[elem->out_num + *num];
} else {
num = &elem->out_num;
iov = &elem->out_sg[*num];
addr = &elem->out_addr[*num];
iov = &elem->iov[*num];
addr = &elem->addr[*num];
/* If it's an output descriptor, they're all supposed
* to come before any input descriptors. */
@ -299,7 +305,8 @@ static bool read_vring_desc(VirtIODevice *vdev,
/* This is stolen from linux/drivers/vhost/vhost.c. */
static int get_indirect(VirtIODevice *vdev, Vring *vring,
VirtQueueElement *elem, struct vring_desc *indirect)
VirtQueueCurrentElement *cur_elem,
struct vring_desc *indirect)
{
struct vring_desc desc;
unsigned int i = 0, count, found = 0;
@ -351,7 +358,7 @@ static int get_indirect(VirtIODevice *vdev, Vring *vring,
return -EFAULT;
}
ret = get_desc(vring, elem, &desc);
ret = get_desc(vring, cur_elem, &desc);
if (ret < 0) {
vring->broken |= (ret == -EFAULT);
return ret;
@ -389,23 +396,23 @@ static void vring_unmap_element(VirtQueueElement *elem)
*
* Stolen from linux/drivers/vhost/vhost.c.
*/
int vring_pop(VirtIODevice *vdev, Vring *vring,
VirtQueueElement *elem)
void *vring_pop(VirtIODevice *vdev, Vring *vring, size_t sz)
{
struct vring_desc desc;
unsigned int i, head, found = 0, num = vring->vr.num;
uint16_t avail_idx, last_avail_idx;
VirtQueueCurrentElement cur_elem;
VirtQueueElement *elem = NULL;
int ret;
/* Initialize elem so it can be safely unmapped */
elem->in_num = elem->out_num = 0;
/* If there was a fatal error then refuse operation */
if (vring->broken) {
ret = -EFAULT;
goto out;
}
cur_elem.in_num = cur_elem.out_num = 0;
/* Check it isn't doing very strange things with descriptor numbers. */
last_avail_idx = vring->last_avail_idx;
avail_idx = vring_get_avail_idx(vdev, vring);
@ -431,8 +438,6 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
* the index we've seen. */
head = vring_get_avail_ring(vdev, vring, last_avail_idx % num);
elem->index = head;
/* If their number is silly, that's an error. */
if (unlikely(head >= num)) {
error_report("Guest says index %u > %u is available", head, num);
@ -459,14 +464,14 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
barrier();
if (desc.flags & VRING_DESC_F_INDIRECT) {
ret = get_indirect(vdev, vring, elem, &desc);
ret = get_indirect(vdev, vring, &cur_elem, &desc);
if (ret < 0) {
goto out;
}
continue;
}
ret = get_desc(vring, elem, &desc);
ret = get_desc(vring, &cur_elem, &desc);
if (ret < 0) {
goto out;
}
@ -481,15 +486,32 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
virtio_tswap16(vdev, vring->last_avail_idx);
}
return head;
/* Now copy what we have collected and mapped */
elem = virtqueue_alloc_element(sz, cur_elem.out_num, cur_elem.in_num);
elem->index = head;
for (i = 0; i < cur_elem.out_num; i++) {
elem->out_addr[i] = cur_elem.addr[i];
elem->out_sg[i] = cur_elem.iov[i];
}
for (i = 0; i < cur_elem.in_num; i++) {
elem->in_addr[i] = cur_elem.addr[cur_elem.out_num + i];
elem->in_sg[i] = cur_elem.iov[cur_elem.out_num + i];
}
return elem;
out:
assert(ret < 0);
if (ret == -EFAULT) {
vring->broken = true;
}
vring_unmap_element(elem);
return ret;
for (i = 0; i < cur_elem.out_num + cur_elem.in_num; i++) {
vring_unmap(cur_elem.iov[i].iov_base, false);
}
g_free(elem);
return NULL;
}
/* After we've used one of their buffers, we tell them about it.

View File

@ -107,8 +107,10 @@ static void balloon_stats_poll_cb(void *opaque)
return;
}
virtqueue_push(s->svq, &s->stats_vq_elem, s->stats_vq_offset);
virtqueue_push(s->svq, s->stats_vq_elem, s->stats_vq_offset);
virtio_notify(vdev, s->svq);
g_free(s->stats_vq_elem);
s->stats_vq_elem = NULL;
}
static void balloon_stats_get_all(Object *obj, struct Visitor *v,
@ -206,14 +208,18 @@ static void balloon_stats_set_poll_interval(Object *obj, struct Visitor *v,
static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
VirtQueueElement elem;
VirtQueueElement *elem;
MemoryRegionSection section;
while (virtqueue_pop(vq, &elem)) {
for (;;) {
size_t offset = 0;
uint32_t pfn;
elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) {
return;
}
while (iov_to_buf(elem.out_sg, elem.out_num, offset, &pfn, 4) == 4) {
while (iov_to_buf(elem->out_sg, elem->out_num, offset, &pfn, 4) == 4) {
ram_addr_t pa;
ram_addr_t addr;
int p = virtio_ldl_p(vdev, &pfn);
@ -236,20 +242,22 @@ static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
memory_region_unref(section.mr);
}
virtqueue_push(vq, &elem, offset);
virtqueue_push(vq, elem, offset);
virtio_notify(vdev, vq);
g_free(elem);
}
}
static void virtio_balloon_receive_stats(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
VirtQueueElement *elem = &s->stats_vq_elem;
VirtQueueElement *elem;
VirtIOBalloonStat stat;
size_t offset = 0;
qemu_timeval tv;
if (!virtqueue_pop(vq, elem)) {
s->stats_vq_elem = elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
if (!elem) {
goto out;
}

View File

@ -44,7 +44,7 @@ static void chr_read(void *opaque, const void *buf, size_t size)
{
VirtIORNG *vrng = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(vrng);
VirtQueueElement elem;
VirtQueueElement *elem;
size_t len;
int offset;
@ -56,15 +56,17 @@ static void chr_read(void *opaque, const void *buf, size_t size)
offset = 0;
while (offset < size) {
if (!virtqueue_pop(vrng->vq, &elem)) {
elem = virtqueue_pop(vrng->vq, sizeof(VirtQueueElement));
if (!elem) {
break;
}
len = iov_from_buf(elem.in_sg, elem.in_num,
len = iov_from_buf(elem->in_sg, elem->in_num,
0, buf + offset, size - offset);
offset += len;
virtqueue_push(vrng->vq, &elem, len);
virtqueue_push(vrng->vq, elem, len);
trace_virtio_rng_pushed(vrng, len);
g_free(elem);
}
virtio_notify(vdev, vrng->vq);
}

View File

@ -70,7 +70,15 @@ typedef struct VRing
struct VirtQueue
{
VRing vring;
/* Next head to pop */
uint16_t last_avail_idx;
/* Last avail_idx read from VQ. */
uint16_t shadow_avail_idx;
uint16_t used_idx;
/* Last used index value we have signalled on */
uint16_t signalled_used;
@ -107,35 +115,15 @@ void virtio_queue_update_rings(VirtIODevice *vdev, int n)
vring->align);
}
static inline uint64_t vring_desc_addr(VirtIODevice *vdev, hwaddr desc_pa,
int i)
static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
hwaddr desc_pa, int i)
{
hwaddr pa;
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
return virtio_ldq_phys(vdev, pa);
}
static inline uint32_t vring_desc_len(VirtIODevice *vdev, hwaddr desc_pa, int i)
{
hwaddr pa;
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
return virtio_ldl_phys(vdev, pa);
}
static inline uint16_t vring_desc_flags(VirtIODevice *vdev, hwaddr desc_pa,
int i)
{
hwaddr pa;
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
return virtio_lduw_phys(vdev, pa);
}
static inline uint16_t vring_desc_next(VirtIODevice *vdev, hwaddr desc_pa,
int i)
{
hwaddr pa;
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
return virtio_lduw_phys(vdev, pa);
address_space_read(&address_space_memory, desc_pa + i * sizeof(VRingDesc),
MEMTXATTRS_UNSPECIFIED, (void *)desc, sizeof(VRingDesc));
virtio_tswap64s(vdev, &desc->addr);
virtio_tswap32s(vdev, &desc->len);
virtio_tswap16s(vdev, &desc->flags);
virtio_tswap16s(vdev, &desc->next);
}
static inline uint16_t vring_avail_flags(VirtQueue *vq)
@ -149,7 +137,8 @@ static inline uint16_t vring_avail_idx(VirtQueue *vq)
{
hwaddr pa;
pa = vq->vring.avail + offsetof(VRingAvail, idx);
return virtio_lduw_phys(vq->vdev, pa);
vq->shadow_avail_idx = virtio_lduw_phys(vq->vdev, pa);
return vq->shadow_avail_idx;
}
static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
@ -164,18 +153,15 @@ static inline uint16_t vring_get_used_event(VirtQueue *vq)
return vring_avail_ring(vq, vq->vring.num);
}
static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
int i)
{
hwaddr pa;
pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
virtio_stl_phys(vq->vdev, pa, val);
}
static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
{
hwaddr pa;
pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
virtio_stl_phys(vq->vdev, pa, val);
virtio_tswap32s(vq->vdev, &uelem->id);
virtio_tswap32s(vq->vdev, &uelem->len);
pa = vq->vring.used + offsetof(VRingUsed, ring[i]);
address_space_write(&address_space_memory, pa, MEMTXATTRS_UNSPECIFIED,
(void *)uelem, sizeof(VRingUsedElem));
}
static uint16_t vring_used_idx(VirtQueue *vq)
@ -190,6 +176,7 @@ static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
hwaddr pa;
pa = vq->vring.used + offsetof(VRingUsed, idx);
virtio_stw_phys(vq->vdev, pa, val);
vq->used_idx = val;
}
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
@ -239,8 +226,14 @@ int virtio_queue_ready(VirtQueue *vq)
return vq->vring.avail != 0;
}
/* Fetch avail_idx from VQ memory only when we really need to know if
* guest has added some buffers. */
int virtio_queue_empty(VirtQueue *vq)
{
if (vq->shadow_avail_idx != vq->last_avail_idx) {
return 0;
}
return vring_avail_idx(vq) == vq->last_avail_idx;
}
@ -277,15 +270,17 @@ void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem,
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len, unsigned int idx)
{
VRingUsedElem uelem;
trace_virtqueue_fill(vq, elem, len, idx);
virtqueue_unmap_sg(vq, elem, len);
idx = (idx + vring_used_idx(vq)) % vq->vring.num;
idx = (idx + vq->used_idx) % vq->vring.num;
/* Get a pointer to the next entry in the used ring. */
vring_used_ring_id(vq, idx, elem->index);
vring_used_ring_len(vq, idx, len);
uelem.id = elem->index;
uelem.len = len;
vring_used_write(vq, &uelem, idx);
}
void virtqueue_flush(VirtQueue *vq, unsigned int count)
@ -294,7 +289,7 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count)
/* Make sure buffer is written before we update index. */
smp_wmb();
trace_virtqueue_flush(vq, count);
old = vring_used_idx(vq);
old = vq->used_idx;
new = old + count;
vring_used_idx_set(vq, new);
vq->inuse -= count;
@ -316,7 +311,7 @@ static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
/* Check it isn't doing very strange things with descriptor numbers. */
if (num_heads > vq->vring.num) {
error_report("Guest moved used index from %u to %u",
idx, vring_avail_idx(vq));
idx, vq->shadow_avail_idx);
exit(1);
}
/* On success, callers read a descriptor at vq->last_avail_idx.
@ -345,18 +340,18 @@ static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
return head;
}
static unsigned virtqueue_next_desc(VirtIODevice *vdev, hwaddr desc_pa,
unsigned int i, unsigned int max)
static unsigned virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
hwaddr desc_pa, unsigned int max)
{
unsigned int next;
/* If this descriptor says it doesn't chain, we're done. */
if (!(vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_NEXT)) {
if (!(desc->flags & VRING_DESC_F_NEXT)) {
return max;
}
/* Check they're not leading us off end of descriptors. */
next = vring_desc_next(vdev, desc_pa, i);
next = desc->next;
/* Make sure compiler knows to grab that: we don't want it changing! */
smp_wmb();
@ -365,6 +360,7 @@ static unsigned virtqueue_next_desc(VirtIODevice *vdev, hwaddr desc_pa,
exit(1);
}
vring_desc_read(vdev, desc, desc_pa, next);
return next;
}
@ -381,6 +377,7 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
while (virtqueue_num_heads(vq, idx)) {
VirtIODevice *vdev = vq->vdev;
unsigned int max, num_bufs, indirect = 0;
VRingDesc desc;
hwaddr desc_pa;
int i;
@ -388,9 +385,10 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
num_bufs = total_bufs;
i = virtqueue_get_head(vq, idx++);
desc_pa = vq->vring.desc;
vring_desc_read(vdev, &desc, desc_pa, i);
if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_INDIRECT) {
if (vring_desc_len(vdev, desc_pa, i) % sizeof(VRingDesc)) {
if (desc.flags & VRING_DESC_F_INDIRECT) {
if (desc.len % sizeof(VRingDesc)) {
error_report("Invalid size for indirect buffer table");
exit(1);
}
@ -403,9 +401,10 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
/* loop over the indirect descriptor table */
indirect = 1;
max = vring_desc_len(vdev, desc_pa, i) / sizeof(VRingDesc);
desc_pa = vring_desc_addr(vdev, desc_pa, i);
max = desc.len / sizeof(VRingDesc);
desc_pa = desc.addr;
num_bufs = i = 0;
vring_desc_read(vdev, &desc, desc_pa, i);
}
do {
@ -415,15 +414,15 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
exit(1);
}
if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_WRITE) {
in_total += vring_desc_len(vdev, desc_pa, i);
if (desc.flags & VRING_DESC_F_WRITE) {
in_total += desc.len;
} else {
out_total += vring_desc_len(vdev, desc_pa, i);
out_total += desc.len;
}
if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
goto done;
}
} while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max);
} while ((i = virtqueue_read_next_desc(vdev, &desc, desc_pa, max)) != max);
if (!indirect)
total_bufs = num_bufs;
@ -448,6 +447,32 @@ int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
return in_bytes <= in_total && out_bytes <= out_total;
}
static void virtqueue_map_desc(unsigned int *p_num_sg, hwaddr *addr, struct iovec *iov,
unsigned int max_num_sg, bool is_write,
hwaddr pa, size_t sz)
{
unsigned num_sg = *p_num_sg;
assert(num_sg <= max_num_sg);
while (sz) {
hwaddr len = sz;
if (num_sg == max_num_sg) {
error_report("virtio: too many write descriptors in indirect table");
exit(1);
}
iov[num_sg].iov_base = cpu_physical_memory_map(pa, &len, is_write);
iov[num_sg].iov_len = len;
addr[num_sg] = pa;
sz -= len;
pa += len;
num_sg++;
}
*p_num_sg = num_sg;
}
static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr,
unsigned int *num_sg, unsigned int max_size,
int is_write)
@ -474,44 +499,62 @@ static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr,
error_report("virtio: error trying to map MMIO memory");
exit(1);
}
if (len == sg[i].iov_len) {
continue;
}
if (*num_sg >= max_size) {
error_report("virtio: memory split makes iovec too large");
if (len != sg[i].iov_len) {
error_report("virtio: unexpected memory split");
exit(1);
}
memmove(sg + i + 1, sg + i, sizeof(*sg) * (*num_sg - i));
memmove(addr + i + 1, addr + i, sizeof(*addr) * (*num_sg - i));
assert(len < sg[i + 1].iov_len);
sg[i].iov_len = len;
addr[i + 1] += len;
sg[i + 1].iov_len -= len;
++*num_sg;
}
}
void virtqueue_map(VirtQueueElement *elem)
{
virtqueue_map_iovec(elem->in_sg, elem->in_addr, &elem->in_num,
MIN(ARRAY_SIZE(elem->in_sg), ARRAY_SIZE(elem->in_addr)),
1);
VIRTQUEUE_MAX_SIZE, 1);
virtqueue_map_iovec(elem->out_sg, elem->out_addr, &elem->out_num,
MIN(ARRAY_SIZE(elem->out_sg), ARRAY_SIZE(elem->out_addr)),
0);
VIRTQUEUE_MAX_SIZE, 0);
}
int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
{
VirtQueueElement *elem;
size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
assert(sz >= sizeof(VirtQueueElement));
elem = g_malloc(out_sg_end);
elem->out_num = out_num;
elem->in_num = in_num;
elem->in_addr = (void *)elem + in_addr_ofs;
elem->out_addr = (void *)elem + out_addr_ofs;
elem->in_sg = (void *)elem + in_sg_ofs;
elem->out_sg = (void *)elem + out_sg_ofs;
return elem;
}
void *virtqueue_pop(VirtQueue *vq, size_t sz)
{
unsigned int i, head, max;
hwaddr desc_pa = vq->vring.desc;
VirtIODevice *vdev = vq->vdev;
VirtQueueElement *elem;
unsigned out_num, in_num;
hwaddr addr[VIRTQUEUE_MAX_SIZE];
struct iovec iov[VIRTQUEUE_MAX_SIZE];
VRingDesc desc;
if (!virtqueue_num_heads(vq, vq->last_avail_idx))
return 0;
if (virtio_queue_empty(vq)) {
return NULL;
}
/* Needed after virtio_queue_empty(), see comment in
* virtqueue_num_heads(). */
smp_rmb();
/* When we start there are none of either input nor output. */
elem->out_num = elem->in_num = 0;
out_num = in_num = 0;
max = vq->vring.num;
@ -520,56 +563,140 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
vring_set_avail_event(vq, vq->last_avail_idx);
}
if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_INDIRECT) {
if (vring_desc_len(vdev, desc_pa, i) % sizeof(VRingDesc)) {
vring_desc_read(vdev, &desc, desc_pa, i);
if (desc.flags & VRING_DESC_F_INDIRECT) {
if (desc.len % sizeof(VRingDesc)) {
error_report("Invalid size for indirect buffer table");
exit(1);
}
/* loop over the indirect descriptor table */
max = vring_desc_len(vdev, desc_pa, i) / sizeof(VRingDesc);
desc_pa = vring_desc_addr(vdev, desc_pa, i);
max = desc.len / sizeof(VRingDesc);
desc_pa = desc.addr;
i = 0;
vring_desc_read(vdev, &desc, desc_pa, i);
}
/* Collect all the descriptors */
do {
struct iovec *sg;
if (vring_desc_flags(vdev, desc_pa, i) & VRING_DESC_F_WRITE) {
if (elem->in_num >= ARRAY_SIZE(elem->in_sg)) {
error_report("Too many write descriptors in indirect table");
exit(1);
}
elem->in_addr[elem->in_num] = vring_desc_addr(vdev, desc_pa, i);
sg = &elem->in_sg[elem->in_num++];
if (desc.flags & VRING_DESC_F_WRITE) {
virtqueue_map_desc(&in_num, addr + out_num, iov + out_num,
VIRTQUEUE_MAX_SIZE - out_num, true, desc.addr, desc.len);
} else {
if (elem->out_num >= ARRAY_SIZE(elem->out_sg)) {
error_report("Too many read descriptors in indirect table");
if (in_num) {
error_report("Incorrect order for descriptors");
exit(1);
}
elem->out_addr[elem->out_num] = vring_desc_addr(vdev, desc_pa, i);
sg = &elem->out_sg[elem->out_num++];
virtqueue_map_desc(&out_num, addr, iov,
VIRTQUEUE_MAX_SIZE, false, desc.addr, desc.len);
}
sg->iov_len = vring_desc_len(vdev, desc_pa, i);
/* If we've got too many, that implies a descriptor loop. */
if ((elem->in_num + elem->out_num) > max) {
if ((in_num + out_num) > max) {
error_report("Looped descriptor");
exit(1);
}
} while ((i = virtqueue_next_desc(vdev, desc_pa, i, max)) != max);
/* Now map what we have collected */
virtqueue_map(elem);
} while ((i = virtqueue_read_next_desc(vdev, &desc, desc_pa, max)) != max);
/* Now copy what we have collected and mapped */
elem = virtqueue_alloc_element(sz, out_num, in_num);
elem->index = head;
for (i = 0; i < out_num; i++) {
elem->out_addr[i] = addr[i];
elem->out_sg[i] = iov[i];
}
for (i = 0; i < in_num; i++) {
elem->in_addr[i] = addr[out_num + i];
elem->in_sg[i] = iov[out_num + i];
}
vq->inuse++;
trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
return elem->in_num + elem->out_num;
return elem;
}
/* Reading and writing a structure directly to QEMUFile is *awful*, but
* it is what QEMU has always done by mistake. We can change it sooner
* or later by bumping the version number of the affected vm states.
* In the meanwhile, since the in-memory layout of VirtQueueElement
* has changed, we need to marshal to and from the layout that was
* used before the change.
*/
typedef struct VirtQueueElementOld {
unsigned int index;
unsigned int out_num;
unsigned int in_num;
hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
} VirtQueueElementOld;
void *qemu_get_virtqueue_element(QEMUFile *f, size_t sz)
{
VirtQueueElement *elem;
VirtQueueElementOld data;
int i;
qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
elem->index = data.index;
for (i = 0; i < elem->in_num; i++) {
elem->in_addr[i] = data.in_addr[i];
}
for (i = 0; i < elem->out_num; i++) {
elem->out_addr[i] = data.out_addr[i];
}
for (i = 0; i < elem->in_num; i++) {
/* Base is overwritten by virtqueue_map. */
elem->in_sg[i].iov_base = 0;
elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
}
for (i = 0; i < elem->out_num; i++) {
/* Base is overwritten by virtqueue_map. */
elem->out_sg[i].iov_base = 0;
elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
}
virtqueue_map(elem);
return elem;
}
void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem)
{
VirtQueueElementOld data;
int i;
memset(&data, 0, sizeof(data));
data.index = elem->index;
data.in_num = elem->in_num;
data.out_num = elem->out_num;
for (i = 0; i < elem->in_num; i++) {
data.in_addr[i] = elem->in_addr[i];
}
for (i = 0; i < elem->out_num; i++) {
data.out_addr[i] = elem->out_addr[i];
}
for (i = 0; i < elem->in_num; i++) {
/* Base is overwritten by virtqueue_map when loading. Do not
* save it, as it would leak the QEMU address space layout. */
data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
}
for (i = 0; i < elem->out_num; i++) {
/* Do not save iov_base as above. */
data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
}
qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
}
/* virtio device */
@ -673,6 +800,8 @@ void virtio_reset(void *opaque)
vdev->vq[i].vring.avail = 0;
vdev->vq[i].vring.used = 0;
vdev->vq[i].last_avail_idx = 0;
vdev->vq[i].shadow_avail_idx = 0;
vdev->vq[i].used_idx = 0;
virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
vdev->vq[i].signalled_used = 0;
vdev->vq[i].signalled_used_valid = false;
@ -1041,7 +1170,7 @@ static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq)
smp_mb();
/* Always notify when queue is empty (when feature acknowledge) */
if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
!vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx) {
!vq->inuse && virtio_queue_empty(vq)) {
return true;
}
@ -1052,7 +1181,7 @@ static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq)
v = vq->signalled_used_valid;
vq->signalled_used_valid = true;
old = vq->signalled_used;
new = vq->signalled_used = vring_used_idx(vq);
new = vq->signalled_used = vq->used_idx;
return !v || vring_need_event(vring_get_used_event(vq), new, old);
}
@ -1143,8 +1272,8 @@ static const VMStateDescription vmstate_virtio_virtqueues = {
.minimum_version_id = 1,
.needed = &virtio_virtqueue_needed,
.fields = (VMStateField[]) {
VMSTATE_STRUCT_VARRAY_KNOWN(vq, struct VirtIODevice, VIRTIO_QUEUE_MAX,
0, vmstate_virtqueue, VirtQueue),
VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
VMSTATE_END_OF_LIST()
}
};
@ -1165,8 +1294,8 @@ static const VMStateDescription vmstate_virtio_ringsize = {
.minimum_version_id = 1,
.needed = &virtio_ringsize_needed,
.fields = (VMStateField[]) {
VMSTATE_STRUCT_VARRAY_KNOWN(vq, struct VirtIODevice, VIRTIO_QUEUE_MAX,
0, vmstate_ringsize, VirtQueue),
VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
VMSTATE_END_OF_LIST()
}
};
@ -1464,6 +1593,8 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
vdev->vq[i].last_avail_idx, nheads);
return -1;
}
vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
}
}
@ -1599,6 +1730,7 @@ uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
{
vdev->vq[n].last_avail_idx = idx;
vdev->vq[n].shadow_avail_idx = idx;
}
void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)

View File

@ -115,9 +115,7 @@ static int msi_msix_setup(XenPCIPassthroughState *s,
assert((!is_msix && msix_entry == 0) || is_msix);
if (gvec == 0) {
/* if gvec is 0, the guest is asking for a particular pirq that
* is passed as dest_id */
if (xen_is_pirq_msi(data)) {
*ppirq = msi_ext_dest_id(addr >> 32) | msi_dest_id(addr);
if (!*ppirq) {
/* this probably identifies an misconfiguration of the guest,

View File

@ -196,4 +196,11 @@ unsigned acpi_table_len(void *current);
void acpi_table_add(const QemuOpts *opts, Error **errp);
void acpi_table_add_builtin(const QemuOpts *opts, Error **errp);
typedef struct AcpiSlicOem AcpiSlicOem;
struct AcpiSlicOem {
char *id;
char *table_id;
};
int acpi_get_slic_oem(AcpiSlicOem *oem);
#endif /* !QEMU_HW_ACPI_H */

View File

@ -357,13 +357,14 @@ Aml *aml_sizeof(Aml *arg);
void
build_header(GArray *linker, GArray *table_data,
AcpiTableHeader *h, const char *sig, int len, uint8_t rev,
const char *oem_table_id);
const char *oem_id, const char *oem_table_id);
void *acpi_data_push(GArray *table_data, unsigned size);
unsigned acpi_data_len(GArray *table);
void acpi_add_table(GArray *table_offsets, GArray *table_data);
void acpi_build_tables_init(AcpiBuildTables *tables);
void acpi_build_tables_cleanup(AcpiBuildTables *tables, bool mfre);
void
build_rsdt(GArray *table_data, GArray *linker, GArray *table_offsets);
build_rsdt(GArray *table_data, GArray *linker, GArray *table_offsets,
const char *oem_id, const char *oem_table_id);
#endif

View File

@ -83,6 +83,7 @@ struct VTDIOTLBEntry {
uint64_t gfn;
uint16_t domain_id;
uint64_t slpte;
uint64_t mask;
bool read_flags;
bool write_flags;
};

View File

@ -45,11 +45,13 @@ struct PCMachineState {
/* State for other subsystems/APIs: */
MemoryHotplugState hotplug_memory;
Notifier machine_done;
/* Pointers to devices and objects: */
HotplugHandler *acpi_dev;
ISADevice *rtc;
PCIBus *bus;
FWCfgState *fw_cfg;
/* Configuration options: */
uint64_t max_ram_below_4g;
@ -59,6 +61,15 @@ struct PCMachineState {
/* RAM information (sizes, addresses, configuration): */
ram_addr_t below_4g_mem_size, above_4g_mem_size;
/* CPU and apic information: */
bool apic_xrupt_override;
unsigned apic_id_limit;
/* NUMA information: */
uint64_t numa_nodes;
uint64_t *node_mem;
uint64_t *node_cpu;
};
#define PC_MACHINE_ACPI_DEVICE_PROP "acpi-device"
@ -151,21 +162,6 @@ typedef struct PcPciInfo {
#define ACPI_PM_PROP_GPE0_BLK_LEN "gpe0_blk_len"
#define ACPI_PM_PROP_TCO_ENABLED "enable_tco"
struct PcGuestInfo {
bool isapc_ram_fw;
hwaddr ram_size, ram_size_below_4g;
unsigned apic_id_limit;
bool apic_xrupt_override;
uint64_t numa_nodes;
uint64_t *node_mem;
uint64_t *node_cpu;
FWCfgState *fw_cfg;
int legacy_acpi_table_size;
bool has_acpi_build;
bool has_reserved_memory;
bool rsdp_in_ram;
};
/* parallel.c */
void parallel_hds_isa_init(ISABus *bus, int n);
@ -232,7 +228,7 @@ void pc_cpus_init(PCMachineState *pcms);
void pc_hot_add_cpu(const int64_t id, Error **errp);
void pc_acpi_init(const char *default_dsdt);
PcGuestInfo *pc_guest_info_init(PCMachineState *pcms);
void pc_guest_info_init(PCMachineState *pcms);
#define PCI_HOST_PROP_PCI_HOLE_START "pci-hole-start"
#define PCI_HOST_PROP_PCI_HOLE_END "pci-hole-end"
@ -245,13 +241,11 @@ PcGuestInfo *pc_guest_info_init(PCMachineState *pcms);
void pc_pci_as_mapping_init(Object *owner, MemoryRegion *system_memory,
MemoryRegion *pci_address_space);
FWCfgState *xen_load_linux(PCMachineState *pcms,
PcGuestInfo *guest_info);
FWCfgState *pc_memory_init(PCMachineState *pcms,
MemoryRegion *system_memory,
MemoryRegion *rom_memory,
MemoryRegion **ram_memory,
PcGuestInfo *guest_info);
void xen_load_linux(PCMachineState *pcms);
void pc_memory_init(PCMachineState *pcms,
MemoryRegion *system_memory,
MemoryRegion *rom_memory,
MemoryRegion **ram_memory);
qemu_irq pc_allocate_cpu_irq(void);
DeviceState *pc_vga_init(ISABus *isa_bus, PCIBus *pci_bus);
void pc_basic_device_init(ISABus *isa_bus, qemu_irq *gsi,

View File

@ -210,4 +210,49 @@ IPMIFwInfo *ipmi_next_fwinfo(IPMIFwInfo *current);
#define ipmi_debug(fs, ...)
#endif
struct ipmi_sdr_header {
uint8_t rec_id[2];
uint8_t sdr_version; /* 0x51 */
uint8_t rec_type;
uint8_t rec_length;
};
#define IPMI_SDR_HEADER_SIZE sizeof(struct ipmi_sdr_header)
#define ipmi_sdr_recid(sdr) ((sdr)->rec_id[0] | ((sdr)->rec_id[1] << 8))
#define ipmi_sdr_length(sdr) ((sdr)->rec_length + IPMI_SDR_HEADER_SIZE)
/*
* 43.2 SDR Type 02h. Compact Sensor Record
*/
#define IPMI_SDR_COMPACT_TYPE 2
struct ipmi_sdr_compact {
struct ipmi_sdr_header header;
uint8_t sensor_owner_id;
uint8_t sensor_owner_lun;
uint8_t sensor_owner_number; /* byte 8 */
uint8_t entity_id;
uint8_t entity_instance;
uint8_t sensor_init;
uint8_t sensor_caps;
uint8_t sensor_type;
uint8_t reading_type;
uint8_t assert_mask[2]; /* byte 16 */
uint8_t deassert_mask[2];
uint8_t discrete_mask[2];
uint8_t sensor_unit1;
uint8_t sensor_unit2;
uint8_t sensor_unit3;
uint8_t sensor_direction[2]; /* byte 24 */
uint8_t positive_threshold;
uint8_t negative_threshold;
uint8_t reserved[3];
uint8_t oem;
uint8_t id_str_len; /* byte 32 */
uint8_t id_string[16];
};
typedef uint8_t ipmi_sdr_compact_buffer[sizeof(struct ipmi_sdr_compact)];
#endif

View File

@ -77,7 +77,7 @@ typedef struct PCDIMMDeviceClass {
* @mr: hotplug memory address space container
*/
typedef struct MemoryHotplugState {
ram_addr_t base;
hwaddr base;
MemoryRegion mr;
} MemoryHotplugState;

View File

@ -44,7 +44,7 @@ void vring_teardown(Vring *vring, VirtIODevice *vdev, int n);
void vring_disable_notification(VirtIODevice *vdev, Vring *vring);
bool vring_enable_notification(VirtIODevice *vdev, Vring *vring);
bool vring_should_notify(VirtIODevice *vdev, Vring *vring);
int vring_pop(VirtIODevice *vdev, Vring *vring, VirtQueueElement *elem);
void *vring_pop(VirtIODevice *vdev, Vring *vring, size_t sz);
void vring_push(VirtIODevice *vdev, Vring *vring, VirtQueueElement *elem,
int len);

View File

@ -37,7 +37,7 @@ typedef struct VirtIOBalloon {
uint32_t num_pages;
uint32_t actual;
uint64_t stats[VIRTIO_BALLOON_S_NR];
VirtQueueElement stats_vq_elem;
VirtQueueElement *stats_vq_elem;
size_t stats_vq_offset;
QEMUTimer *stats_timer;
int64_t stats_last_update;

View File

@ -60,9 +60,9 @@ typedef struct VirtIOBlock {
} VirtIOBlock;
typedef struct VirtIOBlockReq {
VirtQueueElement elem;
int64_t sector_num;
VirtIOBlock *dev;
VirtQueueElement elem;
struct virtio_blk_inhdr *in;
struct virtio_blk_outhdr out;
QEMUIOVector qiov;
@ -80,8 +80,7 @@ typedef struct MultiReqBuffer {
bool is_write;
} MultiReqBuffer;
VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s);
void virtio_blk_init_request(VirtIOBlock *s, VirtIOBlockReq *req);
void virtio_blk_free_request(VirtIOBlockReq *req);
void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb);

View File

@ -47,7 +47,7 @@ typedef struct VirtIONetQueue {
QEMUBH *tx_bh;
int tx_waiting;
struct {
VirtQueueElement elem;
VirtQueueElement *elem;
} async_tx;
struct VirtIONet *n;
} VirtIONetQueue;

View File

@ -112,18 +112,17 @@ typedef struct VirtIOSCSI {
} VirtIOSCSI;
typedef struct VirtIOSCSIReq {
/* Note:
* - fields up to resp_iov are initialized by virtio_scsi_init_req;
* - fields starting at vring are zeroed by virtio_scsi_init_req.
* */
VirtQueueElement elem;
VirtIOSCSI *dev;
VirtQueue *vq;
QEMUSGList qsgl;
QEMUIOVector resp_iov;
/* Note:
* - fields before elem are initialized by virtio_scsi_init_req;
* - elem is uninitialized at the time of allocation.
* - fields after elem are zeroed by virtio_scsi_init_req.
* */
VirtQueueElement elem;
/* Set by dataplane code. */
VirtIOSCSIVring *vring;
@ -161,7 +160,7 @@ void virtio_scsi_common_unrealize(DeviceState *dev, Error **errp);
void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req);
bool virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req);
void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req);
VirtIOSCSIReq *virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq);
void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req);
void virtio_scsi_free_req(VirtIOSCSIReq *req);
void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
uint32_t event, uint32_t reason);

View File

@ -122,7 +122,7 @@ struct VirtIOSerialPort {
* element popped and continue consuming it once the backend
* becomes writable again.
*/
VirtQueueElement elem;
VirtQueueElement *elem;
/*
* The index and the offset into the iov buffer that was popped in

View File

@ -46,10 +46,10 @@ typedef struct VirtQueueElement
unsigned int index;
unsigned int out_num;
unsigned int in_num;
hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
hwaddr *in_addr;
hwaddr *out_addr;
struct iovec *in_sg;
struct iovec *out_sg;
} VirtQueueElement;
#define VIRTIO_QUEUE_MAX 1024
@ -143,6 +143,7 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
void virtio_del_queue(VirtIODevice *vdev, int n);
void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num);
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len);
void virtqueue_flush(VirtQueue *vq, unsigned int count);
@ -152,7 +153,9 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len, unsigned int idx);
void virtqueue_map(VirtQueueElement *elem);
int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem);
void *virtqueue_pop(VirtQueue *vq, size_t sz);
void *qemu_get_virtqueue_element(QEMUFile *f, size_t sz);
void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem);
int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
unsigned int out_bytes);
void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,

View File

@ -33,6 +33,7 @@ int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num);
void xen_piix3_set_irq(void *opaque, int irq_num, int level);
void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len);
void xen_hvm_inject_msi(uint64_t addr, uint32_t data);
int xen_is_pirq_msi(uint32_t msi_data);
qemu_irq *xen_interrupt_controller_init(void);

View File

@ -386,19 +386,6 @@ extern const VMStateInfo vmstate_info_bitmap;
.offset = vmstate_offset_array(_state, _field, _type, _num),\
}
/* a variable length array (i.e. _type *_field) but we know the
* length
*/
#define VMSTATE_STRUCT_VARRAY_KNOWN(_field, _state, _num, _version, _vmsd, _type) { \
.name = (stringify(_field)), \
.num = (_num), \
.version_id = (_version), \
.vmsd = &(_vmsd), \
.size = sizeof(_type), \
.flags = VMS_STRUCT|VMS_ARRAY, \
.offset = offsetof(_state, _field), \
}
#define VMSTATE_STRUCT_VARRAY_UINT8(_field, _state, _field_num, _version, _vmsd, _type) { \
.name = (stringify(_field)), \
.num_offset = vmstate_offset_value(_state, _field_num, uint8_t), \
@ -409,6 +396,19 @@ extern const VMStateInfo vmstate_info_bitmap;
.offset = offsetof(_state, _field), \
}
/* a variable length array (i.e. _type *_field) but we know the
* length
*/
#define VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(_field, _state, _num, _version, _vmsd, _type) { \
.name = (stringify(_field)), \
.num = (_num), \
.version_id = (_version), \
.vmsd = &(_vmsd), \
.size = sizeof(_type), \
.flags = VMS_STRUCT|VMS_ARRAY|VMS_POINTER, \
.offset = offsetof(_state, _field), \
}
#define VMSTATE_STRUCT_VARRAY_POINTER_INT32(_field, _state, _field_num, _vmsd, _type) { \
.name = (stringify(_field)), \
.version_id = 0, \

View File

@ -1472,6 +1472,10 @@ ACPI headers (possible overridden by other options).
For data=, only data
portion of the table is used, all header information is specified in the
command line.
If a SLIC table is supplied to QEMU, then the SLIC's oem_id and oem_table_id
fields will override the same in the RSDT and the FADT (a.k.a. FACP), in order
to ensure the field matches required by the Microsoft SLIC spec and the ACPI
spec.
ETEXI
DEF("smbios", HAS_ARG, QEMU_OPTION_smbios,

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -35,11 +35,10 @@
"mem-path=%s,share=on -numa node,memdev=mem"
#define QEMU_CMD_CHR " -chardev socket,id=%s,path=%s"
#define QEMU_CMD_NETDEV " -netdev vhost-user,id=net0,chardev=%s,vhostforce"
#define QEMU_CMD_NET " -device virtio-net-pci,netdev=net0 "
#define QEMU_CMD_ROM " -option-rom ../pc-bios/pxe-virtio.rom"
#define QEMU_CMD_NET " -device virtio-net-pci,netdev=net0,romfile=./pc-bios/pxe-virtio.rom"
#define QEMU_CMD QEMU_CMD_ACCEL QEMU_CMD_MEM QEMU_CMD_CHR \
QEMU_CMD_NETDEV QEMU_CMD_NET QEMU_CMD_ROM
QEMU_CMD_NETDEV QEMU_CMD_NET
#define HUGETLBFS_MAGIC 0x958458f6

View File

@ -31,6 +31,11 @@ void xen_hvm_inject_msi(uint64_t addr, uint32_t data)
{
}
int xen_is_pirq_msi(uint32_t msi_data)
{
return 0;
}
void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
Error **errp)
{

View File

@ -13,6 +13,7 @@
#include "hw/pci/pci.h"
#include "hw/i386/pc.h"
#include "hw/i386/apic-msidef.h"
#include "hw/xen/xen_common.h"
#include "hw/xen/xen_backend.h"
#include "qmp-commands.h"
@ -158,6 +159,14 @@ void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len)
}
}
int xen_is_pirq_msi(uint32_t msi_data)
{
/* If vector is 0, the msi is remapped into a pirq, passed as
* dest_id.
*/
return ((msi_data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT) == 0;
}
void xen_hvm_inject_msi(uint64_t addr, uint32_t data)
{
xen_xc_hvm_inject_msi(xen_xc, xen_domid, addr, data);