diff --git a/dpdk/app/meson.build b/dpdk/app/meson.build index a9a026bb..fd5f701b 100644 --- a/dpdk/app/meson.build +++ b/dpdk/app/meson.build @@ -13,9 +13,6 @@ lib_execinfo = cc.find_library('execinfo', required: false) default_cflags = machine_args -# specify -D_GNU_SOURCE unconditionally -default_cflags += '-D_GNU_SOURCE' - foreach app:apps build = true name = app diff --git a/dpdk/app/test-crypto-perf/cperf_test_latency.c b/dpdk/app/test-crypto-perf/cperf_test_latency.c index c9c98dc5..ec229bed 100644 --- a/dpdk/app/test-crypto-perf/cperf_test_latency.c +++ b/dpdk/app/test-crypto-perf/cperf_test_latency.c @@ -128,7 +128,7 @@ cperf_latency_test_runner(void *arg) uint8_t burst_size_idx = 0; uint32_t imix_idx = 0; - static int only_once; + static rte_atomic16_t display_once = RTE_ATOMIC16_INIT(0); if (ctx == NULL) return 0; @@ -310,7 +310,7 @@ cperf_latency_test_runner(void *arg) time_min = tunit*(double)(tsc_min) / tsc_hz; if (ctx->options->csv) { - if (!only_once) + if (rte_atomic16_test_and_set(&display_once)) printf("\n# lcore, Buffer Size, Burst Size, Pakt Seq #, " "Packet Size, cycles, time (us)"); @@ -325,7 +325,6 @@ cperf_latency_test_runner(void *arg) / tsc_hz); } - only_once = 1; } else { printf("\n# Device %d on lcore %u\n", ctx->dev_id, ctx->lcore_id); diff --git a/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c index c8d16db6..1f700a3d 100644 --- a/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c +++ b/dpdk/app/test-crypto-perf/cperf_test_pmd_cyclecount.c @@ -16,7 +16,7 @@ #define PRETTY_HDR_FMT "%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n" #define PRETTY_LINE_FMT "%12u%12u%12u%12u%12u%12u%12u%12.0f%12.0f%12.0f\n" #define CSV_HDR_FMT "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n" -#define CSV_LINE_FMT "%10u;%10u;%u;%u;%u;%u;%u;%.f3;%.f3;%.f3\n" +#define CSV_LINE_FMT "%10u;%10u;%u;%u;%u;%u;%u;%.3f;%.3f;%.3f\n" struct cperf_pmd_cyclecount_ctx { uint8_t dev_id; @@ -390,7 +390,7 @@ cperf_pmd_cyclecount_test_runner(void *test_ctx) state.lcore = rte_lcore_id(); state.linearize = 0; - static int only_once; + static rte_atomic16_t display_once = RTE_ATOMIC16_INIT(0); static bool warmup = true; /* @@ -436,13 +436,12 @@ cperf_pmd_cyclecount_test_runner(void *test_ctx) } if (!opts->csv) { - if (!only_once) + if (rte_atomic16_test_and_set(&display_once)) printf(PRETTY_HDR_FMT, "lcore id", "Buf Size", "Burst Size", "Enqueued", "Dequeued", "Enq Retries", "Deq Retries", "Cycles/Op", "Cycles/Enq", "Cycles/Deq"); - only_once = 1; printf(PRETTY_LINE_FMT, state.ctx->lcore_id, opts->test_buffer_size, test_burst_size, @@ -453,13 +452,12 @@ cperf_pmd_cyclecount_test_runner(void *test_ctx) state.cycles_per_enq, state.cycles_per_deq); } else { - if (!only_once) + if (rte_atomic16_test_and_set(&display_once)) printf(CSV_HDR_FMT, "# lcore id", "Buf Size", "Burst Size", "Enqueued", "Dequeued", "Enq Retries", "Deq Retries", "Cycles/Op", "Cycles/Enq", "Cycles/Deq"); - only_once = 1; printf(CSV_LINE_FMT, state.ctx->lcore_id, opts->test_buffer_size, test_burst_size, diff --git a/dpdk/app/test-crypto-perf/cperf_test_throughput.c b/dpdk/app/test-crypto-perf/cperf_test_throughput.c index 8766d6e9..5838f8cf 100644 --- a/dpdk/app/test-crypto-perf/cperf_test_throughput.c +++ b/dpdk/app/test-crypto-perf/cperf_test_throughput.c @@ -94,7 +94,7 @@ cperf_throughput_test_runner(void *test_ctx) uint8_t burst_size_idx = 0; uint32_t imix_idx = 0; - static int only_once; + static rte_atomic16_t display_once = RTE_ATOMIC16_INIT(0); struct rte_crypto_op *ops[ctx->options->max_burst_size]; struct rte_crypto_op *ops_processed[ctx->options->max_burst_size]; @@ -261,13 +261,12 @@ cperf_throughput_test_runner(void *test_ctx) ctx->options->total_ops); if (!ctx->options->csv) { - if (!only_once) + if (rte_atomic16_test_and_set(&display_once)) printf("%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n", "lcore id", "Buf Size", "Burst Size", "Enqueued", "Dequeued", "Failed Enq", "Failed Deq", "MOps", "Gbps", "Cycles/Buf"); - only_once = 1; printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64 "%12"PRIu64"%12.4f%12.4f%12.2f\n", @@ -282,12 +281,11 @@ cperf_throughput_test_runner(void *test_ctx) throughput_gbps, cycles_per_packet); } else { - if (!only_once) + if (rte_atomic16_test_and_set(&display_once)) printf("#lcore id,Buffer Size(B)," "Burst Size,Enqueued,Dequeued,Failed Enq," "Failed Deq,Ops(Millions),Throughput(Gbps)," "Cycles/Buf\n\n"); - only_once = 1; printf("%u;%u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";" "%.3f;%.3f;%.3f\n", diff --git a/dpdk/app/test-crypto-perf/cperf_test_verify.c b/dpdk/app/test-crypto-perf/cperf_test_verify.c index 9134b921..2f11b73a 100644 --- a/dpdk/app/test-crypto-perf/cperf_test_verify.c +++ b/dpdk/app/test-crypto-perf/cperf_test_verify.c @@ -232,7 +232,7 @@ cperf_verify_test_runner(void *test_ctx) uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0; uint64_t ops_failed = 0; - static int only_once; + static rte_atomic16_t display_once = RTE_ATOMIC16_INIT(0); uint64_t i; uint16_t ops_unused = 0; @@ -375,12 +375,11 @@ cperf_verify_test_runner(void *test_ctx) } if (!ctx->options->csv) { - if (!only_once) + if (rte_atomic16_test_and_set(&display_once)) printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n", "lcore id", "Buf Size", "Burst size", "Enqueued", "Dequeued", "Failed Enq", "Failed Deq", "Failed Ops"); - only_once = 1; printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64 "%12"PRIu64"%12"PRIu64"\n", @@ -393,11 +392,10 @@ cperf_verify_test_runner(void *test_ctx) ops_deqd_failed, ops_failed); } else { - if (!only_once) + if (rte_atomic16_test_and_set(&display_once)) printf("\n# lcore id, Buffer Size(B), " "Burst Size,Enqueued,Dequeued,Failed Enq," "Failed Deq,Failed Ops\n"); - only_once = 1; printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";" "%"PRIu64"\n", diff --git a/dpdk/app/test-crypto-perf/main.c b/dpdk/app/test-crypto-perf/main.c index 0aa0de8b..92139479 100644 --- a/dpdk/app/test-crypto-perf/main.c +++ b/dpdk/app/test-crypto-perf/main.c @@ -627,9 +627,12 @@ main(int argc, char **argv) if (i == total_nb_qps) break; - rte_eal_wait_lcore(lcore_id); + ret |= rte_eal_wait_lcore(lcore_id); i++; } + + if (ret != EXIT_SUCCESS) + goto err; } else { /* Get next size from range or list */ @@ -654,10 +657,13 @@ main(int argc, char **argv) if (i == total_nb_qps) break; - rte_eal_wait_lcore(lcore_id); + ret |= rte_eal_wait_lcore(lcore_id); i++; } + if (ret != EXIT_SUCCESS) + goto err; + /* Get next size from range or list */ if (opts.inc_buffer_size != 0) opts.test_buffer_size += opts.inc_buffer_size; diff --git a/dpdk/app/test-eventdev/test_order_common.c b/dpdk/app/test-eventdev/test_order_common.c index 8a342013..252e4a14 100644 --- a/dpdk/app/test-eventdev/test_order_common.c +++ b/dpdk/app/test-eventdev/test_order_common.c @@ -67,6 +67,11 @@ order_producer(void *arg) int order_opt_check(struct evt_options *opt) { + if (opt->prod_type != EVT_PROD_TYPE_SYNT) { + evt_err("Invalid producer type"); + return -EINVAL; + } + /* 1 producer + N workers + 1 master */ if (rte_lcore_count() < 3) { evt_err("test need minimum 3 lcores"); @@ -298,12 +303,23 @@ order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, int ret; uint8_t port; struct test_order *t = evt_test_priv(test); + struct rte_event_dev_info dev_info; + + memset(&dev_info, 0, sizeof(struct rte_event_dev_info)); + ret = rte_event_dev_info_get(opt->dev_id, &dev_info); + if (ret) { + evt_err("failed to get eventdev info %d", opt->dev_id); + return ret; + } + + if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth) + opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth; /* port configuration */ - const struct rte_event_port_conf wkr_p_conf = { + const struct rte_event_port_conf p_conf = { .dequeue_depth = opt->wkr_deq_dep, - .enqueue_depth = 64, - .new_event_threshold = 4096, + .enqueue_depth = dev_info.max_event_port_dequeue_depth, + .new_event_threshold = dev_info.max_num_events, }; /* setup one port per worker, linking to all queues */ @@ -314,7 +330,7 @@ order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, w->port_id = port; w->t = t; - ret = rte_event_port_setup(opt->dev_id, port, &wkr_p_conf); + ret = rte_event_port_setup(opt->dev_id, port, &p_conf); if (ret) { evt_err("failed to setup port %d", port); return ret; @@ -326,12 +342,6 @@ order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, return -EINVAL; } } - /* port for producer, no links */ - const struct rte_event_port_conf prod_conf = { - .dequeue_depth = 8, - .enqueue_depth = 32, - .new_event_threshold = 1200, - }; struct prod_data *p = &t->prod; p->dev_id = opt->dev_id; @@ -339,7 +349,7 @@ order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, p->queue_id = 0; p->t = t; - ret = rte_event_port_setup(opt->dev_id, port, &prod_conf); + ret = rte_event_port_setup(opt->dev_id, port, &p_conf); if (ret) { evt_err("failed to setup producer port %d", port); return ret; diff --git a/dpdk/app/test-eventdev/test_perf_common.c b/dpdk/app/test-eventdev/test_perf_common.c index f99a6a60..f93729a1 100644 --- a/dpdk/app/test-eventdev/test_perf_common.c +++ b/dpdk/app/test-eventdev/test_perf_common.c @@ -583,7 +583,8 @@ perf_opt_check(struct evt_options *opt, uint64_t nb_queues) return -1; } - if (opt->prod_type == EVT_PROD_TYPE_SYNT) { + if (opt->prod_type == EVT_PROD_TYPE_SYNT || + opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { /* Validate producer lcores */ if (evt_lcores_has_overlap(opt->plcores, rte_get_master_lcore())) { diff --git a/dpdk/app/test-pmd/cmdline.c b/dpdk/app/test-pmd/cmdline.c index eeadb2de..8a6f246b 100644 --- a/dpdk/app/test-pmd/cmdline.c +++ b/dpdk/app/test-pmd/cmdline.c @@ -2042,6 +2042,7 @@ cmd_config_rx_mode_flag_parsed(void *parsed_result, { struct cmd_config_rx_mode_flag *res = parsed_result; portid_t pid; + int k; if (!all_ports_stopped()) { printf("Please stop all ports first\n"); @@ -2142,6 +2143,10 @@ cmd_config_rx_mode_flag_parsed(void *parsed_result, return; } port->dev_conf.rxmode.offloads = rx_offloads; + /* Apply Rx offloads configuration */ + for (k = 0; k < port->dev_info.max_rx_queues; k++) + port->rx_conf[k].offloads = + port->dev_conf.rxmode.offloads; } init_port_config(); @@ -4354,6 +4359,17 @@ csum_show(int port_id) } } +static void +cmd_config_queue_tx_offloads(struct rte_port *port) +{ + int k; + + /* Apply queue tx offloads configuration */ + for (k = 0; k < port->dev_info.max_rx_queues; k++) + port->tx_conf[k].offloads = + port->dev_conf.txmode.offloads; +} + static void cmd_csum_parsed(void *parsed_result, __attribute__((unused)) struct cmdline *cl, @@ -4438,6 +4454,7 @@ cmd_csum_parsed(void *parsed_result, ports[res->port_id].dev_conf.txmode.offloads &= (~csum_offloads); } + cmd_config_queue_tx_offloads(&ports[res->port_id]); } csum_show(res->port_id); @@ -4589,6 +4606,7 @@ cmd_tso_set_parsed(void *parsed_result, printf("TSO segment size for non-tunneled packets is %d\n", ports[res->port_id].tso_segsz); } + cmd_config_queue_tx_offloads(&ports[res->port_id]); /* display warnings if configuration is not supported by the NIC */ rte_eth_dev_info_get(res->port_id, &dev_info); @@ -4744,6 +4762,7 @@ cmd_tunnel_tso_set_parsed(void *parsed_result, "if outer L3 is IPv4; not necessary for IPv6\n"); } + cmd_config_queue_tx_offloads(&ports[res->port_id]); cmd_reconfig_device_queue(res->port_id, 1, 1); } @@ -8348,19 +8367,19 @@ cmd_set_vf_rxmode_parsed(void *parsed_result, __attribute__((unused)) void *data) { int ret = -ENOTSUP; - uint16_t rx_mode = 0; + uint16_t vf_rxmode = 0; struct cmd_set_vf_rxmode *res = parsed_result; int is_on = (strcmp(res->on, "on") == 0) ? 1 : 0; if (!strcmp(res->what,"rxmode")) { if (!strcmp(res->mode, "AUPE")) - rx_mode |= ETH_VMDQ_ACCEPT_UNTAG; + vf_rxmode |= ETH_VMDQ_ACCEPT_UNTAG; else if (!strcmp(res->mode, "ROPE")) - rx_mode |= ETH_VMDQ_ACCEPT_HASH_UC; + vf_rxmode |= ETH_VMDQ_ACCEPT_HASH_UC; else if (!strcmp(res->mode, "BAM")) - rx_mode |= ETH_VMDQ_ACCEPT_BROADCAST; + vf_rxmode |= ETH_VMDQ_ACCEPT_BROADCAST; else if (!strncmp(res->mode, "MPE",3)) - rx_mode |= ETH_VMDQ_ACCEPT_MULTICAST; + vf_rxmode |= ETH_VMDQ_ACCEPT_MULTICAST; } RTE_SET_USED(is_on); @@ -8368,12 +8387,12 @@ cmd_set_vf_rxmode_parsed(void *parsed_result, #ifdef RTE_LIBRTE_IXGBE_PMD if (ret == -ENOTSUP) ret = rte_pmd_ixgbe_set_vf_rxmode(res->port_id, res->vf_id, - rx_mode, (uint8_t)is_on); + vf_rxmode, (uint8_t)is_on); #endif #ifdef RTE_LIBRTE_BNXT_PMD if (ret == -ENOTSUP) ret = rte_pmd_bnxt_set_vf_rxmode(res->port_id, res->vf_id, - rx_mode, (uint8_t)is_on); + vf_rxmode, (uint8_t)is_on); #endif if (ret < 0) printf("bad VF receive mode parameter, return code = %d \n", diff --git a/dpdk/app/test-pmd/cmdline_flow.c b/dpdk/app/test-pmd/cmdline_flow.c index d202566b..d1fd2732 100644 --- a/dpdk/app/test-pmd/cmdline_flow.c +++ b/dpdk/app/test-pmd/cmdline_flow.c @@ -3378,6 +3378,7 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token, { static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE); struct action_rss_data *action_rss_data; + const struct arg *arg; int ret; int i; @@ -3393,10 +3394,10 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token, } if (i >= ACTION_RSS_QUEUE_NUM) return -1; - if (push_args(ctx, - ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) + - i * sizeof(action_rss_data->queue[i]), - sizeof(action_rss_data->queue[i])))) + arg = ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) + + i * sizeof(action_rss_data->queue[i]), + sizeof(action_rss_data->queue[i])); + if (push_args(ctx, arg)) return -1; ret = parse_int(ctx, token, str, len, NULL, 0); if (ret < 0) { @@ -3746,6 +3747,8 @@ parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token, .src_addr = mplsogre_encap_conf.ipv4_src, .dst_addr = mplsogre_encap_conf.ipv4_dst, .next_proto_id = IPPROTO_GRE, + .version_ihl = IPV4_VHL_DEF, + .time_to_live = IPDEFTTL, }, }; struct rte_flow_item_ipv6 ipv6 = { @@ -3847,6 +3850,7 @@ parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token, struct rte_flow_item_ipv6 ipv6 = { .hdr = { .proto = IPPROTO_GRE, + .hop_limits = IPDEFTTL, }, }; struct rte_flow_item_gre gre = { @@ -3934,6 +3938,8 @@ parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token, .src_addr = mplsoudp_encap_conf.ipv4_src, .dst_addr = mplsoudp_encap_conf.ipv4_dst, .next_proto_id = IPPROTO_UDP, + .version_ihl = IPV4_VHL_DEF, + .time_to_live = IPDEFTTL, }, }; struct rte_flow_item_ipv6 ipv6 = { @@ -4038,6 +4044,7 @@ parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token, struct rte_flow_item_ipv6 ipv6 = { .hdr = { .proto = IPPROTO_UDP, + .hop_limits = IPDEFTTL, }, }; struct rte_flow_item_udp udp = { diff --git a/dpdk/app/test-pmd/config.c b/dpdk/app/test-pmd/config.c index 4004e3a4..4870b900 100644 --- a/dpdk/app/test-pmd/config.c +++ b/dpdk/app/test-pmd/config.c @@ -510,6 +510,10 @@ port_infos_display(portid_t port_id) printf("Min possible number of TXDs per queue: %hu\n", dev_info.tx_desc_lim.nb_min); printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); + printf("Max segment number per packet: %hu\n", + dev_info.tx_desc_lim.nb_seg_max); + printf("Max segment number per MTU/TSO: %hu\n", + dev_info.tx_desc_lim.nb_mtu_seg_max); /* Show switch info only if valid switch domain and port id is set */ if (dev_info.switch_info.domain_id != diff --git a/dpdk/app/test-pmd/testpmd.c b/dpdk/app/test-pmd/testpmd.c index cf983b16..83b5e42a 100644 --- a/dpdk/app/test-pmd/testpmd.c +++ b/dpdk/app/test-pmd/testpmd.c @@ -2290,7 +2290,7 @@ attach_port(char *identifier) return; } - if (rte_dev_probe(identifier) != 0) { + if (rte_dev_probe(identifier) < 0) { TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier); return; } @@ -2360,7 +2360,7 @@ detach_port_device(portid_t port_id) port_flow_flush(port_id); } - if (rte_dev_remove(dev) != 0) { + if (rte_dev_remove(dev) < 0) { TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name); return; } @@ -2735,9 +2735,13 @@ static void rxtx_port_config(struct rte_port *port) { uint16_t qid; + uint64_t offloads; for (qid = 0; qid < nb_rxq; qid++) { + offloads = port->rx_conf[qid].offloads; port->rx_conf[qid] = port->dev_info.default_rxconf; + if (offloads != 0) + port->rx_conf[qid].offloads = offloads; /* Check if any Rx parameters have been passed */ if (rx_pthresh != RTE_PMD_PARAM_UNSET) @@ -2759,7 +2763,10 @@ rxtx_port_config(struct rte_port *port) } for (qid = 0; qid < nb_txq; qid++) { + offloads = port->tx_conf[qid].offloads; port->tx_conf[qid] = port->dev_info.default_txconf; + if (offloads != 0) + port->tx_conf[qid].offloads = offloads; /* Check if any Tx parameters have been passed */ if (tx_pthresh != RTE_PMD_PARAM_UNSET) @@ -3071,7 +3078,8 @@ signal_handler(int signum) rte_pdump_uninit(); #endif #ifdef RTE_LIBRTE_LATENCY_STATS - rte_latencystats_uninit(); + if (latencystats_enabled != 0) + rte_latencystats_uninit(); #endif force_quit(); /* Set flag to indicate the force termination. */ diff --git a/dpdk/app/test-pmd/util.c b/dpdk/app/test-pmd/util.c index 687bfa49..042dc6fb 100644 --- a/dpdk/app/test-pmd/util.c +++ b/dpdk/app/test-pmd/util.c @@ -14,7 +14,7 @@ #include "testpmd.h" static inline void -print_ether_addr(const char *what, struct ether_addr *eth_addr) +print_ether_addr(const char *what, const struct ether_addr *eth_addr) { char buf[ETHER_ADDR_FMT_SIZE]; ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); @@ -26,7 +26,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, int is_rx) { struct rte_mbuf *mb; - struct ether_hdr *eth_hdr; + const struct ether_hdr *eth_hdr; + struct ether_hdr _eth_hdr; uint16_t eth_type; uint64_t ol_flags; uint16_t i, packet_type; @@ -45,7 +46,7 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], (unsigned int) nb_pkts); for (i = 0; i < nb_pkts; i++) { mb = pkts[i]; - eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *); + eth_hdr = rte_pktmbuf_read(mb, 0, sizeof(_eth_hdr), &_eth_hdr); eth_type = RTE_BE_TO_CPU_16(eth_hdr->ether_type); ol_flags = mb->ol_flags; packet_type = mb->packet_type; diff --git a/dpdk/config/arm/meson.build b/dpdk/config/arm/meson.build index 9feb54f2..0457e6fb 100644 --- a/dpdk/config/arm/meson.build +++ b/dpdk/config/arm/meson.build @@ -9,7 +9,7 @@ arm_force_native_march = false arm_force_default_march = (machine == 'default') machine_args_generic = [ - ['default', ['-march=armv8-a+crc+crypto']], + ['default', ['-march=armv8-a+crc']], ['native', ['-march=native']], ['0xd03', ['-mcpu=cortex-a53']], ['0xd04', ['-mcpu=cortex-a35']], @@ -90,7 +90,7 @@ impl_dpaa2 = ['NXP DPAA2', flags_dpaa2, machine_args_generic] dpdk_conf.set('RTE_FORCE_INTRINSICS', 1) -if cc.sizeof('void *') != 8 +if not dpdk_conf.get('RTE_ARCH_64') dpdk_conf.set('RTE_CACHE_LINE_SIZE', 64) dpdk_conf.set('RTE_ARCH_ARM', 1) dpdk_conf.set('RTE_ARCH_ARMv7', 1) @@ -100,7 +100,6 @@ if cc.sizeof('void *') != 8 else dpdk_conf.set('RTE_CACHE_LINE_SIZE', 128) dpdk_conf.set('RTE_ARCH_ARM64', 1) - dpdk_conf.set('RTE_ARCH_64', 1) machine = [] cmd_generic = ['generic', '', '', 'default', ''] diff --git a/dpdk/config/meson.build b/dpdk/config/meson.build index 80d25382..d4d45082 100644 --- a/dpdk/config/meson.build +++ b/dpdk/config/meson.build @@ -50,6 +50,8 @@ toolchain = cc.get_id() dpdk_conf.set_quoted('RTE_TOOLCHAIN', toolchain) dpdk_conf.set('RTE_TOOLCHAIN_' + toolchain.to_upper(), 1) +dpdk_conf.set('RTE_ARCH_64', cc.sizeof('void *') == 8) + add_project_link_arguments('-Wl,--no-as-needed', language: 'c') dpdk_extra_ldflags += '-Wl,--no-as-needed' @@ -97,7 +99,7 @@ warning_flags = [ '-Wcast-qual', '-Wno-address-of-packed-member' ] -if cc.sizeof('void *') == 4 +if not dpdk_conf.get('RTE_ARCH_64') # for 32-bit, don't warn about casting a 32-bit pointer to 64-bit int - it's fine!! warning_flags += '-Wno-pointer-to-int-cast' endif @@ -117,6 +119,7 @@ dpdk_conf.set('RTE_MAX_VFIO_GROUPS', 64) dpdk_conf.set('RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB', 64) dpdk_conf.set('RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', true) + compile_time_cpuflags = [] if host_machine.cpu_family().startswith('x86') arch_subdir = 'x86' @@ -135,3 +138,11 @@ install_headers('rte_config.h', subdir: get_option('include_subdir_arch')) # enable VFIO only if it is linux OS dpdk_conf.set('RTE_EAL_VFIO', host_machine.system() == 'linux') + +# specify -D_GNU_SOURCE unconditionally +add_project_arguments('-D_GNU_SOURCE', language: 'c') + +# specify -D__BSD_VISIBLE for FreeBSD +if host_machine.system() == 'freebsd' + add_project_arguments('-D__BSD_VISIBLE', language: 'c') +endif diff --git a/dpdk/config/ppc_64/meson.build b/dpdk/config/ppc_64/meson.build index e207c438..7ceae1d3 100644 --- a/dpdk/config/ppc_64/meson.build +++ b/dpdk/config/ppc_64/meson.build @@ -1,9 +1,11 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2018 Luca Boccassi +if not dpdk_conf.get('RTE_ARCH_64') + error('Only 64-bit compiles are supported for this platform type') +endif dpdk_conf.set('RTE_ARCH', 'ppc_64') dpdk_conf.set('RTE_ARCH_PPC_64', 1) -dpdk_conf.set('RTE_ARCH_64', 1) # overrides specific to ppc64 dpdk_conf.set('RTE_MAX_LCORE', 256) diff --git a/dpdk/config/x86/meson.build b/dpdk/config/x86/meson.build index ae92f86a..50a9edc4 100644 --- a/dpdk/config/x86/meson.build +++ b/dpdk/config/x86/meson.build @@ -29,10 +29,9 @@ foreach f:base_flags endforeach dpdk_conf.set('RTE_ARCH_X86', 1) -if (host_machine.cpu_family() == 'x86_64') +if dpdk_conf.get('RTE_ARCH_64') dpdk_conf.set('RTE_ARCH_X86_64', 1) dpdk_conf.set('RTE_ARCH', 'x86_64') - dpdk_conf.set('RTE_ARCH_64', 1) else dpdk_conf.set('RTE_ARCH_I686', 1) dpdk_conf.set('RTE_ARCH', 'i686') diff --git a/dpdk/devtools/build-tags.sh b/dpdk/devtools/build-tags.sh index 78001f04..5bfbf7ed 100755 --- a/dpdk/devtools/build-tags.sh +++ b/dpdk/devtools/build-tags.sh @@ -68,11 +68,13 @@ common_sources() linux_sources() { find_sources "lib/librte_eal/linuxapp" '*.[chS]' + find_sources "kernel/linux" '*.[chS]' } bsd_sources() { find_sources "lib/librte_eal/bsdapp" '*.[chS]' + find_sources "kernel/freebsd" '*.[chS]' } arm_common() diff --git a/dpdk/doc/api/meson.build b/dpdk/doc/api/meson.build index 30bdc573..1c48b767 100644 --- a/dpdk/doc/api/meson.build +++ b/dpdk/doc/api/meson.build @@ -26,7 +26,7 @@ if doxygen.found() command: [generate_examples, '@INPUT@', '@OUTPUT@'], install: get_option('enable_docs'), install_dir: htmldir, - build_by_default: false) + build_by_default: get_option('enable_docs')) cdata = configuration_data() cdata.set('VERSION', meson.project_version()) @@ -48,7 +48,7 @@ if doxygen.found() command: [generate_doxygen, '@INPUT@', '@OUTPUT@', generate_css], install: get_option('enable_docs'), install_dir: htmldir, - build_by_default: false) + build_by_default: get_option('enable_docs')) doc_targets += doxy_build doc_target_names += 'Doxygen_API' diff --git a/dpdk/doc/guides/conf.py b/dpdk/doc/guides/conf.py index a85f6c9d..94b97dc3 100644 --- a/dpdk/doc/guides/conf.py +++ b/dpdk/doc/guides/conf.py @@ -64,9 +64,13 @@ latex_documents = [ custom_latex_preamble = r""" \usepackage[utf8]{inputenc} \usepackage[T1]{fontenc} +\usepackage{textalpha} \usepackage{helvet} \renewcommand{\familydefault}{\sfdefault} \RecustomVerbatimEnvironment{Verbatim}{Verbatim}{xleftmargin=5mm} +\usepackage{etoolbox} +\robustify\( +\robustify\) """ # Configuration for the latex/pdf docs. diff --git a/dpdk/doc/guides/contributing/documentation.rst b/dpdk/doc/guides/contributing/documentation.rst index 408859e2..a45b62ba 100644 --- a/dpdk/doc/guides/contributing/documentation.rst +++ b/dpdk/doc/guides/contributing/documentation.rst @@ -200,10 +200,10 @@ The main required packages can be installed as follows: .. code-block:: console # Ubuntu/Debian. - sudo apt-get -y install texlive-latex-extra + sudo apt-get -y install texlive-latex-extra texlive-lang-greek # Red Hat/Fedora, selective install. - sudo dnf -y install texlive-collection-latexextra + sudo dnf -y install texlive-collection-latexextra texlive-greek-fontenc `Latexmk `_ is a perl script for running LaTeX for resolving cross references, diff --git a/dpdk/doc/guides/contributing/patches.rst b/dpdk/doc/guides/contributing/patches.rst index 1bd91b7e..02cd0555 100644 --- a/dpdk/doc/guides/contributing/patches.rst +++ b/dpdk/doc/guides/contributing/patches.rst @@ -637,12 +637,3 @@ patch accepted. The general cycle for patch review and acceptance is: than rework of the original. * Trivial patches may be merged sooner than described above at the tree committer's discretion. - -DPDK Maintainers ----------------- - -The following are the DPDK maintainers as listed in the ``MAINTAINERS`` file -in the DPDK root directory. - -.. literalinclude:: ../../../MAINTAINERS - :lines: 3- diff --git a/dpdk/doc/guides/cryptodevs/armv8.rst b/dpdk/doc/guides/cryptodevs/armv8.rst index 725398da..81d7f8ee 100644 --- a/dpdk/doc/guides/cryptodevs/armv8.rst +++ b/dpdk/doc/guides/cryptodevs/armv8.rst @@ -59,7 +59,6 @@ User can use app/test application to check how to use this PMD and to verify crypto processing. Test name is cryptodev_sw_armv8_autotest. -For performance test cryptodev_sw_armv8_perftest can be used. Limitations ----------- diff --git a/dpdk/doc/guides/cryptodevs/openssl.rst b/dpdk/doc/guides/cryptodevs/openssl.rst index 2ac8090c..f9a54fe7 100644 --- a/dpdk/doc/guides/cryptodevs/openssl.rst +++ b/dpdk/doc/guides/cryptodevs/openssl.rst @@ -87,7 +87,6 @@ User can use app/test application to check how to use this pmd and to verify crypto processing. Test name is cryptodev_openssl_autotest. -For performance test cryptodev_openssl_perftest can be used. For asymmetric crypto operations testing, run cryptodev_openssl_asym_autotest. To verify real traffic l2fwd-crypto example can be used with this command: diff --git a/dpdk/doc/guides/howto/rte_flow.rst b/dpdk/doc/guides/howto/rte_flow.rst index e080570e..81fa805e 100644 --- a/dpdk/doc/guides/howto/rte_flow.rst +++ b/dpdk/doc/guides/howto/rte_flow.rst @@ -45,7 +45,7 @@ Code pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH; pattern[0].spec = ð - /* set the vlan to pas all packets */ + /* set the vlan to pass all packets */ pattern[1] = RTE_FLOW_ITEM_TYPE_VLAN; pattern[1].spec = &vlan; @@ -141,7 +141,7 @@ Code pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH; pattern[0].spec = ð - /* set the vlan to pas all packets */ + /* set the vlan to pass all packets */ pattern[1] = RTE_FLOW_ITEM_TYPE_VLAN; pattern[1].spec = &vlan; diff --git a/dpdk/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst b/dpdk/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst index fd7a46c8..73a0b87c 100644 --- a/dpdk/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst +++ b/dpdk/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst @@ -71,6 +71,7 @@ Copy the NUMA header files and lib to the cross compiler's directories: cp /include/numa*.h /gcc-arm-8.2-2019.01-x86_64-aarch64-linux-gnu/bin/../aarch64-linux-gnu/libc/usr/include/ cp /lib/libnuma.a /gcc-arm-8.2-2019.01-x86_64-aarch64-linux-gnu/lib/gcc/aarch64-linux-gnu/8.2/ + cp /lib/libnuma.so /gcc-arm-8.2-2019.01-x86_64-aarch64-linux-gnu/lib/gcc/aarch64-linux-gnu/8.2/ .. _configure_and_cross_compile_dpdk_build: diff --git a/dpdk/doc/guides/linux_gsg/linux_drivers.rst b/dpdk/doc/guides/linux_gsg/linux_drivers.rst index 8da6a31b..ef8f5040 100644 --- a/dpdk/doc/guides/linux_gsg/linux_drivers.rst +++ b/dpdk/doc/guides/linux_gsg/linux_drivers.rst @@ -103,7 +103,7 @@ Such model has the following benefits: More about the bifurcated driver can be found in `Mellanox Bifurcated DPDK PMD -`__. +`__. .. _linux_gsg_binding_kernel: diff --git a/dpdk/doc/guides/meson.build b/dpdk/doc/guides/meson.build index 06f14882..7931ef3b 100644 --- a/dpdk/doc/guides/meson.build +++ b/dpdk/doc/guides/meson.build @@ -11,7 +11,7 @@ if sphinx.found() command: [sphinx, '-b', 'html', '-d', meson.current_build_dir() + '/.doctrees', '@INPUT@', meson.current_build_dir() + '/guides'], - build_by_default: false, + build_by_default: get_option('enable_docs'), install: get_option('enable_docs'), install_dir: htmldir) diff --git a/dpdk/doc/guides/nics/bnx2x.rst b/dpdk/doc/guides/nics/bnx2x.rst index cecbfc2e..8577c1a4 100644 --- a/dpdk/doc/guides/nics/bnx2x.rst +++ b/dpdk/doc/guides/nics/bnx2x.rst @@ -34,7 +34,7 @@ BNX2X Poll Mode Driver The BNX2X poll mode driver library (**librte_pmd_bnx2x**) implements support for **QLogic 578xx** 10/20 Gbps family of adapters as well as their virtual functions (VF) in SR-IOV context. It is supported on several standard Linux -distros like Red Hat 7.x and SLES12 OS. It is compile-tested under FreeBSD OS. +distros like RHEL and SLES. It is compile-tested under FreeBSD OS. More information can be found at `QLogic Corporation's Official Website `_. @@ -65,14 +65,26 @@ The features not yet supported include: Co-existence considerations --------------------------- -- BCM578xx being a CNA can have both NIC and Storage personalities. - However, coexistence with storage protocol drivers (cnic, bnx2fc and - bnx2fi) is not supported on the same adapter. So storage personality - has to be disabled on that adapter when used in DPDK applications. +- QLogic 578xx CNAs support Ethernet, iSCSI and FCoE functionalities. + These functionalities are supported using QLogic Linux kernel + drivers bnx2x, cnic, bnx2i and bnx2fc. DPDK is supported on these + adapters using bnx2x PMD. -- For SR-IOV case, bnx2x PMD will be used to bind to SR-IOV VF device and - Linux native kernel driver (bnx2x) will be attached to SR-IOV PF. +- When SR-IOV is not enabled on the adapter, + QLogic Linux kernel drivers (bnx2x, cnic, bnx2i and bnx2fc) and bnx2x + PMD can’t be attached to different PFs on a given QLogic 578xx + adapter. + A given adapter needs to be completely used by DPDK or Linux drivers. + Before binding DPDK driver to one or more PFs on the adapter, + please make sure to unbind Linux drivers from all PFs of the adapter. + If there are multiple adapters on the system, one or more adapters + can be used by DPDK driver completely and other adapters can be used + by Linux drivers completely. +- When SR-IOV is enabled on the adapter, + Linux kernel drivers (bnx2x, cnic, bnx2i and bnx2fc) can be bound + to the PFs of a given adapter and either bnx2x PMD or Linux drivers + bnx2x can be bound to the VFs of the adapter. Supported QLogic NICs --------------------- diff --git a/dpdk/doc/guides/nics/enic.rst b/dpdk/doc/guides/nics/enic.rst index c1b83b9b..773f13b0 100644 --- a/dpdk/doc/guides/nics/enic.rst +++ b/dpdk/doc/guides/nics/enic.rst @@ -260,12 +260,6 @@ Generic Flow API is supported. The baseline support is: - Selectors: 'is', 'spec' and 'mask'. 'last' is not supported - In total, up to 64 bytes of mask is allowed across all headers -- **1400 and later series VICS with advanced filters enabled** - - All the above plus: - - - Action: count - The VIC performs packet matching after applying VLAN strip. If VLAN stripping is enabled, EtherType in the ETH item corresponds to the stripped VLAN header's EtherType. Stripping does not affect the VLAN diff --git a/dpdk/doc/guides/nics/features/mlx4.ini b/dpdk/doc/guides/nics/features/mlx4.ini index 98a3f611..659c6b17 100644 --- a/dpdk/doc/guides/nics/features/mlx4.ini +++ b/dpdk/doc/guides/nics/features/mlx4.ini @@ -21,6 +21,9 @@ Multicast MAC filter = Y RSS hash = Y SR-IOV = Y VLAN filter = Y +Flow control = Y +Flow API = Y +CRC offload = Y L3 checksum offload = Y L4 checksum offload = Y Inner L3 checksum = Y diff --git a/dpdk/doc/guides/nics/features/mlx5.ini b/dpdk/doc/guides/nics/features/mlx5.ini index b28b43e5..d92d5895 100644 --- a/dpdk/doc/guides/nics/features/mlx5.ini +++ b/dpdk/doc/guides/nics/features/mlx5.ini @@ -25,6 +25,7 @@ Inner RSS = Y SR-IOV = Y VLAN filter = Y Flow director = Y +Flow control = Y Flow API = Y CRC offload = Y VLAN offload = Y diff --git a/dpdk/doc/guides/nics/ixgbe.rst b/dpdk/doc/guides/nics/ixgbe.rst index 975143f8..5c3a7e4f 100644 --- a/dpdk/doc/guides/nics/ixgbe.rst +++ b/dpdk/doc/guides/nics/ixgbe.rst @@ -82,6 +82,31 @@ To guarantee the constraint, capabilities in dev_conf.rxmode.offloads will be ch fdir_conf->mode will also be checked. +VF Runtime Options +^^^^^^^^^^^^^^^^^^ + +The following ``devargs`` options can be enabled at runtime. They must +be passed as part of EAL arguments. For example, + +.. code-block:: console + + testpmd -w af:10.0,pflink_fullchk=1 -- -i + +- ``pflink_fullchk`` (default **0**) + + When calling ``rte_eth_link_get_nowait()`` to get VF link status, + this option is used to control how VF synchronizes its status with + PF's. If set, VF will not only check the PF's physical link status + by reading related register, but also check the mailbox status. We + call this behavior as fully checking. And checking mailbox will + trigger PF's mailbox interrupt generation. If unset, the application + can get the VF's link status quickly by just reading the PF's link + status register, this will avoid the whole system's mailbox interrupt + generation. + + ``rte_eth_link_get()`` will still use the mailbox method regardless + of the pflink_fullchk setting. + RX Burst Size ^^^^^^^^^^^^^ diff --git a/dpdk/doc/guides/nics/mlx5.rst b/dpdk/doc/guides/nics/mlx5.rst index 31238ae3..6acbbca6 100644 --- a/dpdk/doc/guides/nics/mlx5.rst +++ b/dpdk/doc/guides/nics/mlx5.rst @@ -56,6 +56,7 @@ Features - Several RSS hash keys, one for each flow type. - Default RSS operation with no hash key specification. - Configurable RETA table. +- Link flow control (pause frame). - Support for multiple MAC addresses. - VLAN filtering. - RX VLAN stripping. @@ -177,7 +178,7 @@ Statistics MLX5 supports various of methods to report statistics: -Port statistics can be queried using ``rte_eth_stats_get()``. The port statistics are through SW only and counts the number of packets received or sent successfully by the PMD. +Port statistics can be queried using ``rte_eth_stats_get()``. The received and sent statistics are through SW only and counts the number of packets received or sent successfully by the PMD. The imissed counter is the amount of packets that could not be delivered to SW because a queue was full. Packets not received due to congestion in the bus or on the NIC can be queried via the rx_discards_phy xstats counter. Extended statistics can be queried using ``rte_eth_xstats_get()``. The extended statistics expose a wider set of counters counted by the device. The extended port statistics counts the number of packets received or sent successfully by the port. As Mellanox NICs are using the :ref:`Bifurcated Linux Driver ` those counters counts also packet received or sent by the Linux kernel. The counters with ``_phy`` suffix counts the total events on the physical port, therefore not valid for VF. diff --git a/dpdk/doc/guides/nics/qede.rst b/dpdk/doc/guides/nics/qede.rst index c0a38338..05a6aef5 100644 --- a/dpdk/doc/guides/nics/qede.rst +++ b/dpdk/doc/guides/nics/qede.rst @@ -7,7 +7,7 @@ QEDE Poll Mode Driver The QEDE poll mode driver library (**librte_pmd_qede**) implements support for **QLogic FastLinQ QL4xxxx 10G/25G/40G/50G/100G Intelligent Ethernet Adapters (IEA) and Converged Network Adapters (CNA)** family of adapters as well as SR-IOV virtual functions (VF). It is supported on -several standard Linux distros like RHEL7.x, SLES12.x and Ubuntu. +several standard Linux distros like RHEL, SLES, Ubuntu etc. It is compile-tested under FreeBSD OS. More information can be found at `QLogic Corporation's Website @@ -47,8 +47,27 @@ Non-supported Features Co-existence considerations --------------------------- -- QLogic FastLinQ QL4xxxx CNAs can have both NIC and Storage personalities. However, coexistence with storage protocol drivers (qedi and qedf) is not supported on the same adapter. So storage personality has to be disabled on that adapter when used in DPDK applications. -- For SR-IOV case, qede PMD will be used to bind to SR-IOV VF device and Linux native kernel driver (qede) will be attached to SR-IOV PF. + +- QLogic FastLinQ QL4xxxx CNAs support Ethernet, RDMA, iSCSI and FCoE + functionalities. These functionalities are supported using + QLogic Linux kernel drivers qed, qede, qedr, qedi and qedf. DPDK is + supported on these adapters using qede PMD. + +- When SR-IOV is not enabled on the adapter, + QLogic Linux kernel drivers (qed, qede, qedr, qedi and qedf) and qede + PMD can’t be attached to different PFs on a given QLogic FastLinQ + QL4xxx adapter. + A given adapter needs to be completely used by DPDK or Linux drivers + Before binding DPDK driver to one or more PFs on the adapter, + please make sure to unbind Linux drivers from all PFs of the adapter. + If there are multiple adapters on the system, one or more adapters + can be used by DPDK driver completely and other adapters can be used + by Linux drivers completely. + +- When SR-IOV is enabled on the adapter, + Linux kernel drivers (qed, qede, qedr, qedi and qedf) can be bound + to the PFs of a given adapter and either qede PMD or Linux drivers + (qed and qede) can be bound to the VFs of the adapter. Supported QLogic Adapters ------------------------- diff --git a/dpdk/doc/guides/prog_guide/bbdev.rst b/dpdk/doc/guides/prog_guide/bbdev.rst index 658ffd40..12e948a2 100644 --- a/dpdk/doc/guides/prog_guide/bbdev.rst +++ b/dpdk/doc/guides/prog_guide/bbdev.rst @@ -44,7 +44,7 @@ From the command line using the --vdev EAL option --vdev 'baseband_turbo_sw,max_nb_queues=8,socket_id=0' -Our using the rte_vdev_init API within the application code. +Or using the rte_vdev_init API within the application code. .. code-block:: c @@ -284,7 +284,7 @@ baseband operations is usually completed during the enqueue call to the bbdev device. The dequeue burst API will retrieve any processed operations available from the queue on the bbdev device, from physical devices this is usually directly from the device's processed queue, and for virtual device's from a -``rte_ring`` where processed operations are place after being processed on the +``rte_ring`` where processed operations are placed after being processed on the enqueue call. diff --git a/dpdk/doc/guides/prog_guide/compressdev.rst b/dpdk/doc/guides/prog_guide/compressdev.rst index 3ba4238c..0e8d8c3d 100644 --- a/dpdk/doc/guides/prog_guide/compressdev.rst +++ b/dpdk/doc/guides/prog_guide/compressdev.rst @@ -201,7 +201,7 @@ for stateful processing of ops. Operation Status ~~~~~~~~~~~~~~~~ Each operation carries a status information updated by PMD after it is processed. -following are currently supported status: +Following are currently supported: - RTE_COMP_OP_STATUS_SUCCESS, Operation is successfully completed @@ -227,14 +227,24 @@ following are currently supported status: is not an error case. Output data up to op.produced can be used and next op in the stream should continue on from op.consumed+1. +Operation status after enqueue / dequeue +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Some of the above values may arise in the op after an +``rte_compressdev_enqueue_burst()``. If number ops enqueued < number ops requested then +the app should check the op.status of nb_enqd+1. If status is RTE_COMP_OP_STATUS_NOT_PROCESSED, +it likely indicates a full-queue case for a hardware device and a retry after dequeuing some ops is likely +to be successful. If the op holds any other status, e.g. RTE_COMP_OP_STATUS_INVALID_ARGS, a retry with +the same op is unlikely to be successful. + + Produced, Consumed And Operation Status ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - If status is RTE_COMP_OP_STATUS_SUCCESS, consumed = amount of data read from input buffer, and produced = amount of data written in destination buffer -- If status is RTE_COMP_OP_STATUS_FAILURE, - consumed = produced = 0 or undefined +- If status is RTE_COMP_OP_STATUS_ERROR, + consumed = produced = undefined - If status is RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED, consumed = 0 and produced = usually 0, but in decompression cases a PMD may return > 0 @@ -568,7 +578,7 @@ operations is usually completed during the enqueue call to the compression device. The dequeue burst API will retrieve any processed operations available from the queue pair on the compression device, from physical devices this is usually directly from the devices processed queue, and for virtual device's from a -``rte_ring`` where processed operations are place after being processed on the +``rte_ring`` where processed operations are placed after being processed on the enqueue call. A burst in DPDK compression can be a combination of stateless and stateful operations with a condition diff --git a/dpdk/doc/guides/prog_guide/cryptodev_lib.rst b/dpdk/doc/guides/prog_guide/cryptodev_lib.rst index 23770ffd..7a95053a 100644 --- a/dpdk/doc/guides/prog_guide/cryptodev_lib.rst +++ b/dpdk/doc/guides/prog_guide/cryptodev_lib.rst @@ -52,7 +52,7 @@ From the command line using the --vdev EAL option Example: ``--vdev 'crypto_aesni_mb0' --vdev 'crypto_aesni_mb1'`` -Our using the rte_vdev_init API within the application code. +Or using the rte_vdev_init API within the application code. .. code-block:: c @@ -294,7 +294,7 @@ Crypto operations is usually completed during the enqueue call to the Crypto device. The dequeue burst API will retrieve any processed operations available from the queue pair on the Crypto device, from physical devices this is usually directly from the devices processed queue, and for virtual device's from a -``rte_ring`` where processed operations are place after being processed on the +``rte_ring`` where processed operations are placed after being processed on the enqueue call. diff --git a/dpdk/doc/guides/prog_guide/img/linuxapp_launch.svg b/dpdk/doc/guides/prog_guide/img/linuxapp_launch.svg index af685897..c2bd34e0 100644 --- a/dpdk/doc/guides/prog_guide/img/linuxapp_launch.svg +++ b/dpdk/doc/guides/prog_guide/img/linuxapp_launch.svg @@ -659,7 +659,7 @@ sodipodi:role="line" id="tspan11522" x="69.303398" - y="858.42419">rte_eal_remote_lauch(app) + y="858.42419">rte_eal_remote_launch(app) rte_eal_remote_lauch(rte_eal_remote_launch( RTE_PKTMBUF_HEADROOM -#error "Annotation requirement is more than RTE_PKTMBUF_HEADROOM" -#endif - /* we will re-use the HEADROOM for annotation in RX */ #define DPAA2_HW_BUF_RESERVE 0 #define DPAA2_PACKET_LAYOUT_ALIGN 64 /*changing from 256 */ @@ -206,6 +202,7 @@ enum qbman_fd_format { ((fd)->simple.frc = (0x80000000 | (len))) #define DPAA2_GET_FD_FRC_PARSE_SUM(fd) \ ((uint16_t)(((fd)->simple.frc & 0xffff0000) >> 16)) +#define DPAA2_RESET_FD_FRC(fd) ((fd)->simple.frc = 0) #define DPAA2_SET_FD_FRC(fd, _frc) ((fd)->simple.frc = _frc) #define DPAA2_RESET_FD_CTRL(fd) ((fd)->simple.ctrl = 0) diff --git a/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h b/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h index 10c72e04..00b353a2 100644 --- a/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h +++ b/dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h @@ -15,7 +15,7 @@ * - Enqueue, including setting the enqueue descriptor, and issuing enqueue * command etc. * - Dequeue, including setting the dequeue descriptor, issuing dequeue command, - * parsing the dequeue response in DQRR and memeory, parsing the state change + * parsing the dequeue response in DQRR and memory, parsing the state change * notifications etc. * - Release, including setting the release descriptor, and issuing the buffer * release command. diff --git a/dpdk/drivers/bus/pci/linux/pci.c b/dpdk/drivers/bus/pci/linux/pci.c index c99d523f..74794a3b 100644 --- a/dpdk/drivers/bus/pci/linux/pci.c +++ b/dpdk/drivers/bus/pci/linux/pci.c @@ -583,7 +583,6 @@ pci_one_device_iommu_support_va(struct rte_pci_device *dev) { #define VTD_CAP_MGAW_SHIFT 16 #define VTD_CAP_MGAW_MASK (0x3fULL << VTD_CAP_MGAW_SHIFT) -#define X86_VA_WIDTH 47 /* From Documentation/x86/x86_64/mm.txt */ struct rte_pci_addr *addr = &dev->addr; char filename[PATH_MAX]; FILE *fp; diff --git a/dpdk/drivers/bus/pci/linux/pci_uio.c b/dpdk/drivers/bus/pci/linux/pci_uio.c index 09ecbb7a..0d1b9aa3 100644 --- a/dpdk/drivers/bus/pci/linux/pci_uio.c +++ b/dpdk/drivers/bus/pci/linux/pci_uio.c @@ -314,12 +314,11 @@ pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx, loc->domain, loc->bus, loc->devid, loc->function, res_idx); - if (access(devname, R_OK|W_OK) != -1) { - fd = open(devname, O_RDWR); - if (fd < 0) - RTE_LOG(INFO, EAL, "%s cannot be mapped. " - "Fall-back to non prefetchable mode.\n", - devname); + fd = open(devname, O_RDWR); + if (fd < 0 && errno != ENOENT) { + RTE_LOG(INFO, EAL, "%s cannot be mapped. " + "Fall-back to non prefetchable mode.\n", + devname); } } diff --git a/dpdk/drivers/bus/vmbus/linux/vmbus_bus.c b/dpdk/drivers/bus/vmbus/linux/vmbus_bus.c index a4755a38..3c924eee 100644 --- a/dpdk/drivers/bus/vmbus/linux/vmbus_bus.c +++ b/dpdk/drivers/bus/vmbus/linux/vmbus_bus.c @@ -25,6 +25,18 @@ /** Pathname of VMBUS devices directory. */ #define SYSFS_VMBUS_DEVICES "/sys/bus/vmbus/devices" +/* + * GUID associated with network devices + * {f8615163-df3e-46c5-913f-f2d2f965ed0e} + */ +static const rte_uuid_t vmbus_nic_uuid = { + 0xf8, 0x61, 0x51, 0x63, + 0xdf, 0x3e, + 0x46, 0xc5, + 0x91, 0x3f, + 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0xe +}; + extern struct rte_vmbus_bus rte_vmbus_bus; /* Read sysfs file to get UUID */ @@ -242,16 +254,22 @@ vmbus_scan_one(const char *name) snprintf(dirname, sizeof(dirname), "%s/%s", SYSFS_VMBUS_DEVICES, name); - /* get device id */ - snprintf(filename, sizeof(filename), "%s/device_id", dirname); - if (parse_sysfs_uuid(filename, dev->device_id) < 0) - goto error; - /* get device class */ snprintf(filename, sizeof(filename), "%s/class_id", dirname); if (parse_sysfs_uuid(filename, dev->class_id) < 0) goto error; + /* skip non-network devices */ + if (rte_uuid_compare(dev->class_id, vmbus_nic_uuid) != 0) { + free(dev); + return 0; + } + + /* get device id */ + snprintf(filename, sizeof(filename), "%s/device_id", dirname); + if (parse_sysfs_uuid(filename, dev->device_id) < 0) + goto error; + /* get relid */ snprintf(filename, sizeof(filename), "%s/id", dirname); if (eal_parse_sysfs_value(filename, &tmp) < 0) diff --git a/dpdk/drivers/common/dpaax/dpaax_iova_table.c b/dpdk/drivers/common/dpaax/dpaax_iova_table.c index 2dd38a92..ae0af091 100644 --- a/dpdk/drivers/common/dpaax/dpaax_iova_table.c +++ b/dpdk/drivers/common/dpaax/dpaax_iova_table.c @@ -99,7 +99,7 @@ read_memory_node(unsigned int *count) goto cleanup; } - DPAAX_DEBUG("Size of device-tree mem node: %lu", statbuf.st_size); + DPAAX_DEBUG("Size of device-tree mem node: %" PRIu64, statbuf.st_size); if (statbuf.st_size > MEM_NODE_FILE_LEN) { DPAAX_DEBUG("More memory nodes available than assumed."); DPAAX_DEBUG("System may not work properly!"); @@ -118,7 +118,7 @@ read_memory_node(unsigned int *count) */ *count = (statbuf.st_size / 16); if ((*count) <= 0 || (statbuf.st_size % 16 != 0)) { - DPAAX_DEBUG("Invalid memory node values or count. (size=%lu)", + DPAAX_DEBUG("Invalid memory node values or count. (size=%" PRIu64 ")", statbuf.st_size); goto cleanup; } diff --git a/dpdk/drivers/compress/isal/isal_compress_pmd_ops.c b/dpdk/drivers/compress/isal/isal_compress_pmd_ops.c index 472e54e8..a395a660 100644 --- a/dpdk/drivers/compress/isal/isal_compress_pmd_ops.c +++ b/dpdk/drivers/compress/isal/isal_compress_pmd_ops.c @@ -169,18 +169,12 @@ isal_comp_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id) if (qp == NULL) return -EINVAL; - if (qp->stream != NULL) - rte_free(qp->stream); - - if (qp->stream->level_buf != NULL) + if (qp->stream) rte_free(qp->stream->level_buf); - if (qp->state != NULL) - rte_free(qp->state); - - if (qp->processed_pkts != NULL) - rte_ring_free(qp->processed_pkts); - + rte_free(qp->state); + rte_ring_free(qp->processed_pkts); + rte_free(qp->stream); rte_free(qp); dev->data->queue_pairs[qp_id] = NULL; diff --git a/dpdk/drivers/compress/zlib/zlib_pmd.c b/dpdk/drivers/compress/zlib/zlib_pmd.c index 5a4d47d4..19f9200c 100644 --- a/dpdk/drivers/compress/zlib/zlib_pmd.c +++ b/dpdk/drivers/compress/zlib/zlib_pmd.c @@ -30,6 +30,7 @@ process_zlib_deflate(struct rte_comp_op *op, z_stream *strm) default: op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; ZLIB_PMD_ERR("Invalid flush value\n"); + return; } if (unlikely(!strm)) { diff --git a/dpdk/drivers/crypto/caam_jr/caam_jr.c b/dpdk/drivers/crypto/caam_jr/caam_jr.c index efc69b6e..a4e70bab 100644 --- a/dpdk/drivers/crypto/caam_jr/caam_jr.c +++ b/dpdk/drivers/crypto/caam_jr/caam_jr.c @@ -70,7 +70,7 @@ static inline void caam_jr_op_ending(struct caam_jr_op_ctx *ctx) { PMD_INIT_FUNC_TRACE(); - /* report op status to sym->op and then free the ctx memeory */ + /* report op status to sym->op and then free the ctx memory */ rte_mempool_put(ctx->ctx_pool, (void *)ctx); } diff --git a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c index a7973cc0..ae06438d 100644 --- a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c +++ b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c @@ -2193,6 +2193,7 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, void *sess) { dpaa2_sec_session *session = sess; + int ret; PMD_INIT_FUNC_TRACE(); @@ -2208,37 +2209,37 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, /* Cipher Only */ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) { session->ctxt_type = DPAA2_SEC_CIPHER; - dpaa2_sec_cipher_init(dev, xform, session); + ret = dpaa2_sec_cipher_init(dev, xform, session); /* Authentication Only */ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL) { session->ctxt_type = DPAA2_SEC_AUTH; - dpaa2_sec_auth_init(dev, xform, session); + ret = dpaa2_sec_auth_init(dev, xform, session); /* Cipher then Authenticate */ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { session->ext_params.aead_ctxt.auth_cipher_text = true; - dpaa2_sec_aead_chain_init(dev, xform, session); + ret = dpaa2_sec_aead_chain_init(dev, xform, session); /* Authenticate then Cipher */ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { session->ext_params.aead_ctxt.auth_cipher_text = false; - dpaa2_sec_aead_chain_init(dev, xform, session); + ret = dpaa2_sec_aead_chain_init(dev, xform, session); /* AEAD operation for AES-GCM kind of Algorithms */ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && xform->next == NULL) { - dpaa2_sec_aead_init(dev, xform, session); + ret = dpaa2_sec_aead_init(dev, xform, session); } else { DPAA2_SEC_ERR("Invalid crypto type"); return -EINVAL; } - return 0; + return ret; } static int diff --git a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c index 10201c58..742e24c5 100644 --- a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c +++ b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c @@ -60,7 +60,7 @@ dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx) ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; } - /* report op status to sym->op and then free the ctx memeory */ + /* report op status to sym->op and then free the ctx memory */ rte_mempool_put(ctx->ctx_pool, (void *)ctx); } diff --git a/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c b/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c index 9956f051..0e7ade60 100644 --- a/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c +++ b/dpdk/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c @@ -726,7 +726,7 @@ mrvl_crypto_pmd_sym_session_get_size(__rte_unused struct rte_cryptodev *dev) /** Configure the session from a crypto xform chain (PMD ops callback). * * @param dev Pointer to the device structure. - * @param xform Pointer to the crytpo configuration structure. + * @param xform Pointer to the crypto configuration structure. * @param sess Pointer to the empty session structure. * @returns 0 upon success, negative value otherwise. */ diff --git a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c index 5b27bb91..29f4ed6a 100644 --- a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c +++ b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c @@ -1528,7 +1528,7 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op, } if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) { - if (memcmp(dst, op->sym->auth.digest.data, + if (CRYPTO_memcmp(dst, op->sym->auth.digest.data, sess->auth.digest_length) != 0) { op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; } @@ -1605,12 +1605,9 @@ process_openssl_dsa_verify_op(struct rte_crypto_op *cop, op->y.length, pub_key); if (!r || !s || !pub_key) { - if (r) - BN_free(r); - if (s) - BN_free(s); - if (pub_key) - BN_free(pub_key); + BN_free(r); + BN_free(s); + BN_free(pub_key); cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; return -1; @@ -1781,10 +1778,8 @@ process_openssl_modinv_op(struct rte_crypto_op *cop, BIGNUM *res = BN_CTX_get(sess->u.m.ctx); if (unlikely(base == NULL || res == NULL)) { - if (base) - BN_free(base); - if (res) - BN_free(res); + BN_free(base); + BN_free(res); cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; return -1; } @@ -1815,10 +1810,8 @@ process_openssl_modexp_op(struct rte_crypto_op *cop, BIGNUM *res = BN_CTX_get(sess->u.e.ctx); if (unlikely(base == NULL || res == NULL)) { - if (base) - BN_free(base); - if (res) - BN_free(res); + BN_free(base); + BN_free(res); cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; return -1; } @@ -1920,7 +1913,7 @@ process_openssl_rsa_op(struct rte_crypto_op *cop, "Length of public_decrypt %d " "length of message %zd\n", ret, op->rsa.message.length); - if ((ret <= 0) || (memcmp(tmp, op->rsa.message.data, + if ((ret <= 0) || (CRYPTO_memcmp(tmp, op->rsa.message.data, op->rsa.message.length))) { OPENSSL_LOG(ERR, "RSA sign Verification failed"); cop->status = RTE_CRYPTO_OP_STATUS_ERROR; diff --git a/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c index a65f9e58..234089c8 100644 --- a/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c +++ b/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c @@ -911,22 +911,14 @@ static int openssl_set_asym_session_parameters( asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_RSA; break; err_rsa: - if (n) - BN_free(n); - if (e) - BN_free(e); - if (d) - BN_free(d); - if (p) - BN_free(p); - if (q) - BN_free(q); - if (dmp1) - BN_free(dmp1); - if (dmq1) - BN_free(dmq1); - if (iqmp) - BN_free(iqmp); + BN_clear_free(n); + BN_clear_free(e); + BN_clear_free(d); + BN_clear_free(p); + BN_clear_free(q); + BN_clear_free(dmp1); + BN_clear_free(dmq1); + BN_clear_free(iqmp); return -1; } @@ -1048,10 +1040,8 @@ err_rsa: err_dh: OPENSSL_LOG(ERR, " failed to set dh params\n"); - if (p) - BN_free(p); - if (g) - BN_free(g); + BN_free(p); + BN_free(g); return -1; } case RTE_CRYPTO_ASYM_XFORM_DSA: @@ -1117,16 +1107,11 @@ err_dh: break; err_dsa: - if (p) - BN_free(p); - if (q) - BN_free(q); - if (g) - BN_free(g); - if (priv_key) - BN_free(priv_key); - if (pub_key) - BN_free(pub_key); + BN_free(p); + BN_free(q); + BN_free(g); + BN_free(priv_key); + BN_free(pub_key); return -1; } default: diff --git a/dpdk/drivers/crypto/qat/qat_sym.c b/dpdk/drivers/crypto/qat/qat_sym.c index 8801ca56..7515a55d 100644 --- a/dpdk/drivers/crypto/qat/qat_sym.c +++ b/dpdk/drivers/crypto/qat/qat_sym.c @@ -530,6 +530,8 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg, qat_req->comn_mid.dest_data_addr = cookie->qat_sgl_dst_phys_addr; } + qat_req->comn_mid.src_length = 0; + qat_req->comn_mid.dst_length = 0; } else { qat_req->comn_mid.src_data_addr = src_buf_start; qat_req->comn_mid.dest_data_addr = dst_buf_start; diff --git a/dpdk/drivers/crypto/virtio/virtio_pci.c b/dpdk/drivers/crypto/virtio/virtio_pci.c index 832c465b..e24ccb60 100644 --- a/dpdk/drivers/crypto/virtio/virtio_pci.c +++ b/dpdk/drivers/crypto/virtio/virtio_pci.c @@ -397,9 +397,13 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_crypto_hw *hw) hw->common_cfg = get_cfg_addr(dev, &cap); break; case VIRTIO_PCI_CAP_NOTIFY_CFG: - rte_pci_read_config(dev, &hw->notify_off_multiplier, + ret = rte_pci_read_config(dev, &hw->notify_off_multiplier, 4, pos + sizeof(cap)); - hw->notify_base = get_cfg_addr(dev, &cap); + if (ret != 4) + VIRTIO_CRYPTO_INIT_LOG_ERR( + "failed to read notify_off_multiplier: ret %d", ret); + else + hw->notify_base = get_cfg_addr(dev, &cap); break; case VIRTIO_PCI_CAP_DEVICE_CFG: hw->dev_cfg = get_cfg_addr(dev, &cap); diff --git a/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c b/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c index 8d168b02..926b7edd 100644 --- a/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c +++ b/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c @@ -640,7 +640,7 @@ dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, EVENTDEV_INIT_FUNC_TRACE(); RTE_SET_USED(dev); - *timeout_ticks = ns * scale; + *timeout_ticks = ns / scale; return 0; } diff --git a/dpdk/drivers/event/octeontx/meson.build b/dpdk/drivers/event/octeontx/meson.build index 04185533..0b8ba548 100644 --- a/dpdk/drivers/event/octeontx/meson.build +++ b/dpdk/drivers/event/octeontx/meson.build @@ -12,3 +12,8 @@ sources = files('ssovf_worker.c', allow_experimental_apis = true deps += ['common_octeontx', 'mempool_octeontx', 'bus_vdev', 'pmd_octeontx'] + +# for clang 32-bit compiles we need libatomic for 64-bit atomic ops +if cc.get_id() == 'clang' and dpdk_conf.get('RTE_ARCH_64') == false + ext_deps += cc.find_library('atomic') +endif diff --git a/dpdk/drivers/event/opdl/meson.build b/dpdk/drivers/event/opdl/meson.build index cc6029c6..1fe034ea 100644 --- a/dpdk/drivers/event/opdl/meson.build +++ b/dpdk/drivers/event/opdl/meson.build @@ -9,3 +9,8 @@ sources = files( 'opdl_test.c', ) deps += ['bus_vdev'] + +# for clang 32-bit compiles we need libatomic for 64-bit atomic ops +if cc.get_id() == 'clang' and dpdk_conf.get('RTE_ARCH_64') == false + ext_deps += cc.find_library('atomic') +endif diff --git a/dpdk/drivers/event/opdl/opdl_evdev.c b/dpdk/drivers/event/opdl/opdl_evdev.c index d2d2be44..3beca895 100644 --- a/dpdk/drivers/event/opdl/opdl_evdev.c +++ b/dpdk/drivers/event/opdl/opdl_evdev.c @@ -102,7 +102,7 @@ opdl_port_link(struct rte_eventdev *dev, dev->data->dev_id, queues[0], p->id); - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } @@ -113,7 +113,7 @@ opdl_port_link(struct rte_eventdev *dev, dev->data->dev_id, num, p->id); - rte_errno = -EDQUOT; + rte_errno = EDQUOT; return 0; } @@ -123,7 +123,7 @@ opdl_port_link(struct rte_eventdev *dev, dev->data->dev_id, p->id, queues[0]); - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } @@ -134,7 +134,7 @@ opdl_port_link(struct rte_eventdev *dev, p->id, p->external_qid, queues[0]); - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } @@ -160,7 +160,7 @@ opdl_port_unlink(struct rte_eventdev *dev, dev->data->dev_id, queues[0], p->id); - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } RTE_SET_USED(nb_unlinks); diff --git a/dpdk/drivers/event/opdl/opdl_evdev_init.c b/dpdk/drivers/event/opdl/opdl_evdev_init.c index 582ad698..15aae475 100644 --- a/dpdk/drivers/event/opdl/opdl_evdev_init.c +++ b/dpdk/drivers/event/opdl/opdl_evdev_init.c @@ -35,7 +35,7 @@ enqueue_check(struct opdl_port *p, p->id, ev[i].queue_id, p->next_external_qid); - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } } @@ -63,7 +63,7 @@ enqueue_check(struct opdl_port *p, } else { if (num > 0 && ev[0].queue_id != p->next_external_qid) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } } @@ -116,7 +116,7 @@ opdl_rx_error_enqueue(struct opdl_port *p, RTE_SET_USED(ev); RTE_SET_USED(num); - rte_errno = -ENOSPC; + rte_errno = ENOSPC; return 0; } @@ -145,7 +145,7 @@ opdl_rx_enqueue(struct opdl_port *p, if (enqueued < num) - rte_errno = -ENOSPC; + rte_errno = ENOSPC; return enqueued; } @@ -164,7 +164,7 @@ opdl_tx_error_dequeue(struct opdl_port *p, RTE_SET_USED(ev); RTE_SET_USED(num); - rte_errno = -ENOSPC; + rte_errno = ENOSPC; return 0; } @@ -240,7 +240,7 @@ opdl_claim(struct opdl_port *p, struct rte_event ev[], uint16_t num) "Attempt to dequeue num of events larger than port (%d) max", opdl_pmd_dev_id(p->opdl), p->id); - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } diff --git a/dpdk/drivers/event/opdl/opdl_ring.c b/dpdk/drivers/event/opdl/opdl_ring.c index 8aca481c..c0bc4b00 100644 --- a/dpdk/drivers/event/opdl/opdl_ring.c +++ b/dpdk/drivers/event/opdl/opdl_ring.c @@ -755,7 +755,7 @@ int opdl_stage_disclaim(struct opdl_stage *s, uint32_t num_entries, bool block) { if (num_entries != s->num_event) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } if (s->threadsafe == false) { diff --git a/dpdk/drivers/event/sw/sw_evdev.c b/dpdk/drivers/event/sw/sw_evdev.c index 1175d6cd..fb8e8beb 100644 --- a/dpdk/drivers/event/sw/sw_evdev.c +++ b/dpdk/drivers/event/sw/sw_evdev.c @@ -38,12 +38,12 @@ sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[], /* check for qid map overflow */ if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map)) { - rte_errno = -EDQUOT; + rte_errno = EDQUOT; break; } if (p->is_directed && p->num_qids_mapped > 0) { - rte_errno = -EDQUOT; + rte_errno = EDQUOT; break; } @@ -59,12 +59,12 @@ sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[], if (q->type == SW_SCHED_TYPE_DIRECT) { /* check directed qids only map to one port */ if (p->num_qids_mapped > 0) { - rte_errno = -EDQUOT; + rte_errno = EDQUOT; break; } /* check port only takes a directed flow */ if (num > 1) { - rte_errno = -EDQUOT; + rte_errno = EDQUOT; break; } diff --git a/dpdk/drivers/meson.build b/dpdk/drivers/meson.build index c3c66bbc..f965c33e 100644 --- a/dpdk/drivers/meson.build +++ b/dpdk/drivers/meson.build @@ -17,9 +17,6 @@ if cc.has_argument('-Wno-format-truncation') default_cflags += '-Wno-format-truncation' endif -# specify -D_GNU_SOURCE unconditionally -default_cflags += '-D_GNU_SOURCE' - foreach class:driver_classes drivers = [] std_deps = [] diff --git a/dpdk/drivers/net/af_packet/rte_eth_af_packet.c b/dpdk/drivers/net/af_packet/rte_eth_af_packet.c index 264cfc08..cca22395 100644 --- a/dpdk/drivers/net/af_packet/rte_eth_af_packet.c +++ b/dpdk/drivers/net/af_packet/rte_eth_af_packet.c @@ -527,8 +527,6 @@ open_packet_iface(const char *key __rte_unused, return 0; } -static struct rte_vdev_driver pmd_af_packet_drv; - static int rte_pmd_init_internals(struct rte_vdev_device *dev, const int sockfd, diff --git a/dpdk/drivers/net/ark/ark_ethdev.c b/dpdk/drivers/net/ark/ark_ethdev.c index 4f52e2bd..18eb15e5 100644 --- a/dpdk/drivers/net/ark/ark_ethdev.c +++ b/dpdk/drivers/net/ark/ark_ethdev.c @@ -241,8 +241,7 @@ check_for_ext(struct ark_adapter *ark) static int eth_ark_dev_init(struct rte_eth_dev *dev) { - struct ark_adapter *ark = - (struct ark_adapter *)dev->data->dev_private; + struct ark_adapter *ark = dev->data->dev_private; struct rte_pci_device *pci_dev; int ret; int port_count = 1; @@ -403,9 +402,9 @@ eth_ark_dev_init(struct rte_eth_dev *dev) return ret; - error: - if (dev->data->mac_addrs) - rte_free(dev->data->mac_addrs); +error: + rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = NULL; return -1; } @@ -417,8 +416,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev) static int ark_config_device(struct rte_eth_dev *dev) { - struct ark_adapter *ark = - (struct ark_adapter *)dev->data->dev_private; + struct ark_adapter *ark = dev->data->dev_private; uint16_t num_q, i; struct ark_mpu_t *mpu; @@ -493,8 +491,7 @@ ark_config_device(struct rte_eth_dev *dev) static int eth_ark_dev_uninit(struct rte_eth_dev *dev) { - struct ark_adapter *ark = - (struct ark_adapter *)dev->data->dev_private; + struct ark_adapter *ark = dev->data->dev_private; if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; @@ -516,8 +513,7 @@ static int eth_ark_dev_configure(struct rte_eth_dev *dev) { PMD_FUNC_LOG(DEBUG, "\n"); - struct ark_adapter *ark = - (struct ark_adapter *)dev->data->dev_private; + struct ark_adapter *ark = dev->data->dev_private; eth_ark_dev_set_link_up(dev); if (ark->user_ext.dev_configure) @@ -543,8 +539,7 @@ delay_pg_start(void *arg) static int eth_ark_dev_start(struct rte_eth_dev *dev) { - struct ark_adapter *ark = - (struct ark_adapter *)dev->data->dev_private; + struct ark_adapter *ark = dev->data->dev_private; int i; PMD_FUNC_LOG(DEBUG, "\n"); @@ -596,8 +591,7 @@ eth_ark_dev_stop(struct rte_eth_dev *dev) { uint16_t i; int status; - struct ark_adapter *ark = - (struct ark_adapter *)dev->data->dev_private; + struct ark_adapter *ark = dev->data->dev_private; struct ark_mpu_t *mpu; PMD_FUNC_LOG(DEBUG, "\n"); @@ -687,8 +681,7 @@ eth_ark_dev_stop(struct rte_eth_dev *dev) static void eth_ark_dev_close(struct rte_eth_dev *dev) { - struct ark_adapter *ark = - (struct ark_adapter *)dev->data->dev_private; + struct ark_adapter *ark = dev->data->dev_private; uint16_t i; if (ark->user_ext.dev_close) @@ -718,8 +711,7 @@ static void eth_ark_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { - struct ark_adapter *ark = - (struct ark_adapter *)dev->data->dev_private; + struct ark_adapter *ark = dev->data->dev_private; struct ark_mpu_t *tx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_TX_BASE); struct ark_mpu_t *rx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_RX_BASE); uint16_t ports = ark->num_ports; @@ -754,8 +746,7 @@ eth_ark_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) { PMD_DEBUG_LOG(DEBUG, "link status = %d\n", dev->data->dev_link.link_status); - struct ark_adapter *ark = - (struct ark_adapter *)dev->data->dev_private; + struct ark_adapter *ark = dev->data->dev_private; if (ark->user_ext.link_update) { return ark->user_ext.link_update @@ -769,8 +760,7 @@ static int eth_ark_dev_set_link_up(struct rte_eth_dev *dev) { dev->data->dev_link.link_status = 1; - struct ark_adapter *ark = - (struct ark_adapter *)dev->data->dev_private; + struct ark_adapter *ark = dev->data->dev_private; if (ark->user_ext.dev_set_link_up) return ark->user_ext.dev_set_link_up(dev, @@ -782,8 +772,7 @@ static int eth_ark_dev_set_link_down(struct rte_eth_dev *dev) { dev->data->dev_link.link_status = 0; - struct ark_adapter *ark = - (struct ark_adapter *)dev->data->dev_private; + struct ark_adapter *ark = dev->data->dev_private; if (ark->user_ext.dev_set_link_down) return ark->user_ext.dev_set_link_down(dev, @@ -795,8 +784,7 @@ static int eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { uint16_t i; - struct ark_adapter *ark = - (struct ark_adapter *)dev->data->dev_private; + struct ark_adapter *ark = dev->data->dev_private; stats->ipackets = 0; stats->ibytes = 0; @@ -819,8 +807,7 @@ static void eth_ark_dev_stats_reset(struct rte_eth_dev *dev) { uint16_t i; - struct ark_adapter *ark = - (struct ark_adapter *)dev->data->dev_private; + struct ark_adapter *ark = dev->data->dev_private; for (i = 0; i < dev->data->nb_tx_queues; i++) eth_tx_queue_stats_reset(dev->data->tx_queues[i]); @@ -837,8 +824,7 @@ eth_ark_macaddr_add(struct rte_eth_dev *dev, uint32_t index, uint32_t pool) { - struct ark_adapter *ark = - (struct ark_adapter *)dev->data->dev_private; + struct ark_adapter *ark = dev->data->dev_private; if (ark->user_ext.mac_addr_add) { ark->user_ext.mac_addr_add(dev, @@ -854,8 +840,7 @@ eth_ark_macaddr_add(struct rte_eth_dev *dev, static void eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) { - struct ark_adapter *ark = - (struct ark_adapter *)dev->data->dev_private; + struct ark_adapter *ark = dev->data->dev_private; if (ark->user_ext.mac_addr_remove) ark->user_ext.mac_addr_remove(dev, index, @@ -866,8 +851,7 @@ static int eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { - struct ark_adapter *ark = - (struct ark_adapter *)dev->data->dev_private; + struct ark_adapter *ark = dev->data->dev_private; if (ark->user_ext.mac_addr_set) { ark->user_ext.mac_addr_set(dev, mac_addr, @@ -880,8 +864,7 @@ eth_ark_set_default_mac_addr(struct rte_eth_dev *dev, static int eth_ark_set_mtu(struct rte_eth_dev *dev, uint16_t size) { - struct ark_adapter *ark = - (struct ark_adapter *)dev->data->dev_private; + struct ark_adapter *ark = dev->data->dev_private; if (ark->user_ext.set_mtu) return ark->user_ext.set_mtu(dev, size, diff --git a/dpdk/drivers/net/ark/ark_ethdev_rx.c b/dpdk/drivers/net/ark/ark_ethdev_rx.c index 300029d6..6156730b 100644 --- a/dpdk/drivers/net/ark/ark_ethdev_rx.c +++ b/dpdk/drivers/net/ark/ark_ethdev_rx.c @@ -121,15 +121,13 @@ eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev, struct rte_mempool *mb_pool) { static int warning1; /* = 0 */ - struct ark_adapter *ark = (struct ark_adapter *)dev->data->dev_private; + struct ark_adapter *ark = dev->data->dev_private; struct ark_rx_queue *queue; uint32_t i; int status; - /* Future works: divide the Q's evenly with multi-ports */ - int port = dev->data->port_id; - int qidx = port + queue_idx; + int qidx = queue_idx; /* We may already be setup, free memory prior to re-allocation */ if (dev->data->rx_queues[queue_idx] != NULL) { @@ -611,7 +609,7 @@ eth_rx_queue_stats_reset(void *vqueue) void eth_ark_udm_force_close(struct rte_eth_dev *dev) { - struct ark_adapter *ark = (struct ark_adapter *)dev->data->dev_private; + struct ark_adapter *ark = dev->data->dev_private; struct ark_rx_queue *queue; uint32_t index; uint16_t i; diff --git a/dpdk/drivers/net/ark/ark_ethdev_tx.c b/dpdk/drivers/net/ark/ark_ethdev_tx.c index 94da5f95..08bcf431 100644 --- a/dpdk/drivers/net/ark/ark_ethdev_tx.c +++ b/dpdk/drivers/net/ark/ark_ethdev_tx.c @@ -207,13 +207,11 @@ eth_ark_tx_queue_setup(struct rte_eth_dev *dev, unsigned int socket_id, const struct rte_eth_txconf *tx_conf __rte_unused) { - struct ark_adapter *ark = (struct ark_adapter *)dev->data->dev_private; + struct ark_adapter *ark = dev->data->dev_private; struct ark_tx_queue *queue; int status; - /* Future: divide the Q's evenly with multi-ports */ - int port = dev->data->port_id; - int qidx = port + queue_idx; + int qidx = queue_idx; if (!rte_is_power_of_2(nb_desc)) { PMD_DRV_LOG(ERR, diff --git a/dpdk/drivers/net/atlantic/atl_ethdev.c b/dpdk/drivers/net/atlantic/atl_ethdev.c index 2d05bb4c..06d4f2ea 100644 --- a/dpdk/drivers/net/atlantic/atl_ethdev.c +++ b/dpdk/drivers/net/atlantic/atl_ethdev.c @@ -322,8 +322,7 @@ atl_disable_intr(struct aq_hw_s *hw) static int eth_atl_dev_init(struct rte_eth_dev *eth_dev) { - struct atl_adapter *adapter = - (struct atl_adapter *)eth_dev->data->dev_private; + struct atl_adapter *adapter = eth_dev->data->dev_private; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); diff --git a/dpdk/drivers/net/atlantic/atl_rxtx.c b/dpdk/drivers/net/atlantic/atl_rxtx.c index fe007704..449ffd45 100644 --- a/dpdk/drivers/net/atlantic/atl_rxtx.c +++ b/dpdk/drivers/net/atlantic/atl_rxtx.c @@ -824,13 +824,13 @@ atl_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } diff --git a/dpdk/drivers/net/avf/avf_ethdev.c b/dpdk/drivers/net/avf/avf_ethdev.c index 4dc61d9f..9739dc9b 100644 --- a/dpdk/drivers/net/avf/avf_ethdev.c +++ b/dpdk/drivers/net/avf/avf_ethdev.c @@ -993,6 +993,7 @@ avf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->imissed = pstats->rx_discards; stats->oerrors = pstats->tx_errors + pstats->tx_discards; stats->ibytes = pstats->rx_bytes; + stats->ibytes -= stats->ipackets * ETHER_CRC_LEN; stats->obytes = pstats->tx_bytes; } else { PMD_DRV_LOG(ERR, "Get statistics failed"); diff --git a/dpdk/drivers/net/avf/avf_rxtx.c b/dpdk/drivers/net/avf/avf_rxtx.c index 8c7a9672..8c8771e6 100644 --- a/dpdk/drivers/net/avf/avf_rxtx.c +++ b/dpdk/drivers/net/avf/avf_rxtx.c @@ -144,7 +144,8 @@ check_rx_bulk_allow(struct avf_rx_queue *rxq) static inline void reset_rx_queue(struct avf_rx_queue *rxq) { - uint16_t len, i; + uint16_t len; + uint32_t i; if (!rxq) return; @@ -174,7 +175,8 @@ static inline void reset_tx_queue(struct avf_tx_queue *txq) { struct avf_tx_entry *txe; - uint16_t i, prev, size; + uint32_t i, size; + uint16_t prev; if (!txq) { PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL"); @@ -1583,6 +1585,9 @@ avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Setup TX context descriptor if required */ uint64_t cd_type_cmd_tso_mss = AVF_TX_DESC_DTYPE_CONTEXT; + volatile struct avf_tx_context_desc *ctx_txd = + (volatile struct avf_tx_context_desc *) + &txr[tx_id]; txn = &sw_ring[txe->next_id]; RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); @@ -1596,6 +1601,9 @@ avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) cd_type_cmd_tso_mss |= avf_set_tso_ctx(tx_pkt, tx_offload); + ctx_txd->type_cmd_tso_mss = + rte_cpu_to_le_64(cd_type_cmd_tso_mss); + AVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id); txe->last_id = tx_last; tx_id = txe->next_id; @@ -1698,31 +1706,31 @@ avf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, /* Check condition for nb_segs > AVF_TX_MAX_MTU_SEG. */ if (!(ol_flags & PKT_TX_TCP_SEG)) { if (m->nb_segs > AVF_TX_MAX_MTU_SEG) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } } else if ((m->tso_segsz < AVF_MIN_TSO_MSS) || (m->tso_segsz > AVF_MAX_TSO_MSS)) { /* MSS outside the range are considered malicious */ - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } if (ol_flags & AVF_TX_OFFLOAD_NOTSUP_MASK) { - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; return i; } #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } diff --git a/dpdk/drivers/net/avf/avf_rxtx_vec_sse.c b/dpdk/drivers/net/avf/avf_rxtx_vec_sse.c index 343a6aac..13e94ceb 100644 --- a/dpdk/drivers/net/avf/avf_rxtx_vec_sse.c +++ b/dpdk/drivers/net/avf/avf_rxtx_vec_sse.c @@ -521,6 +521,7 @@ avf_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, i++; if (i == nb_bufs) return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; } return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, &split_flags[i]); diff --git a/dpdk/drivers/net/axgbe/axgbe_ethdev.c b/dpdk/drivers/net/axgbe/axgbe_ethdev.c index e89c0ec2..8e534a76 100644 --- a/dpdk/drivers/net/axgbe/axgbe_ethdev.c +++ b/dpdk/drivers/net/axgbe/axgbe_ethdev.c @@ -157,7 +157,7 @@ axgbe_dev_configure(struct rte_eth_dev *dev) static int axgbe_dev_rx_mq_config(struct rte_eth_dev *dev) { - struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private; + struct axgbe_port *pdata = dev->data->dev_private; if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) pdata->rss_enable = 1; @@ -171,10 +171,11 @@ axgbe_dev_rx_mq_config(struct rte_eth_dev *dev) static int axgbe_dev_start(struct rte_eth_dev *dev) { - PMD_INIT_FUNC_TRACE(); - struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private; + struct axgbe_port *pdata = dev->data->dev_private; int ret; + PMD_INIT_FUNC_TRACE(); + /* Multiqueue RSS */ ret = axgbe_dev_rx_mq_config(dev); if (ret) { @@ -209,9 +210,10 @@ axgbe_dev_start(struct rte_eth_dev *dev) static void axgbe_dev_stop(struct rte_eth_dev *dev) { - PMD_INIT_FUNC_TRACE(); struct axgbe_port *pdata = dev->data->dev_private; + PMD_INIT_FUNC_TRACE(); + rte_intr_disable(&pdata->pci_dev->intr_handle); if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state)) @@ -237,27 +239,30 @@ axgbe_dev_close(struct rte_eth_dev *dev) static void axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) { - PMD_INIT_FUNC_TRACE(); struct axgbe_port *pdata = dev->data->dev_private; + PMD_INIT_FUNC_TRACE(); + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1); } static void axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) { - PMD_INIT_FUNC_TRACE(); struct axgbe_port *pdata = dev->data->dev_private; + PMD_INIT_FUNC_TRACE(); + AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0); } static void axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) { - PMD_INIT_FUNC_TRACE(); struct axgbe_port *pdata = dev->data->dev_private; + PMD_INIT_FUNC_TRACE(); + if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) return; AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1); @@ -266,9 +271,10 @@ axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) static void axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) { - PMD_INIT_FUNC_TRACE(); struct axgbe_port *pdata = dev->data->dev_private; + PMD_INIT_FUNC_TRACE(); + if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) return; AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0); @@ -578,7 +584,7 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - pdata = (struct axgbe_port *)eth_dev->data->dev_private; + pdata = eth_dev->data->dev_private; /* initial state */ axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); @@ -694,6 +700,7 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) ret = pdata->phy_if.phy_init(pdata); if (ret) { rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; return ret; } diff --git a/dpdk/drivers/net/axgbe/axgbe_rxtx.c b/dpdk/drivers/net/axgbe/axgbe_rxtx.c index b5a29a95..d80f378c 100644 --- a/dpdk/drivers/net/axgbe/axgbe_rxtx.c +++ b/dpdk/drivers/net/axgbe/axgbe_rxtx.c @@ -342,7 +342,7 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, const struct rte_memzone *tz; tx_desc = nb_desc; - pdata = (struct axgbe_port *)dev->data->dev_private; + pdata = dev->data->dev_private; /* * validate tx descriptors count diff --git a/dpdk/drivers/net/bnx2x/bnx2x.c b/dpdk/drivers/net/bnx2x/bnx2x.c index 3e705c7a..3b034b1e 100644 --- a/dpdk/drivers/net/bnx2x/bnx2x.c +++ b/dpdk/drivers/net/bnx2x/bnx2x.c @@ -2015,6 +2015,8 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link uint8_t global = FALSE; uint32_t val; + PMD_INIT_FUNC_TRACE(sc); + PMD_DRV_LOG(DEBUG, sc, "Starting NIC unload..."); /* mark driver as unloaded in shmem2 */ @@ -2118,6 +2120,9 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link bnx2x_free_mem(sc); } + /* free the host hardware/software hsi structures */ + bnx2x_free_hsi_mem(sc); + bnx2x_free_fw_stats_mem(sc); sc->state = BNX2X_STATE_CLOSED; @@ -4572,6 +4577,8 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp) } } + /* Assuming we have completed slow path completion, clear the flag */ + rte_atomic32_set(&sc->scan_fp, 0); bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); } @@ -7228,6 +7235,14 @@ int bnx2x_nic_load(struct bnx2x_softc *sc) } } + /* allocate the host hardware/software hsi structures */ + if (bnx2x_alloc_hsi_mem(sc) != 0) { + PMD_DRV_LOG(ERR, sc, "bnx2x_alloc_hsi_mem was failed"); + sc->state = BNX2X_STATE_CLOSED; + rc = -ENOMEM; + goto bnx2x_nic_load_error0; + } + if (bnx2x_alloc_fw_stats_mem(sc) != 0) { sc->state = BNX2X_STATE_CLOSED; rc = -ENOMEM; @@ -7443,6 +7458,7 @@ bnx2x_nic_load_error1: bnx2x_nic_load_error0: bnx2x_free_fw_stats_mem(sc); + bnx2x_free_hsi_mem(sc); bnx2x_free_mem(sc); return rc; @@ -8888,9 +8904,9 @@ int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc) uint32_t i; if (IS_PF(sc)) { -/************************/ -/* DEFAULT STATUS BLOCK */ -/************************/ + /************************/ + /* DEFAULT STATUS BLOCK */ + /************************/ if (bnx2x_dma_alloc(sc, sizeof(struct host_sp_status_block), &sc->def_sb_dma, "def_sb", @@ -8900,9 +8916,9 @@ int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc) sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr; -/***************/ -/* EVENT QUEUE */ -/***************/ + /***************/ + /* EVENT QUEUE */ + /***************/ if (bnx2x_dma_alloc(sc, BNX2X_PAGE_SIZE, &sc->eq_dma, "ev_queue", @@ -8913,9 +8929,9 @@ int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc) sc->eq = (union event_ring_elem *)sc->eq_dma.vaddr; -/*************/ -/* SLOW PATH */ -/*************/ + /*************/ + /* SLOW PATH */ + /*************/ if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_slowpath), &sc->sp_dma, "sp", @@ -8927,9 +8943,9 @@ int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc) sc->sp = (struct bnx2x_slowpath *)sc->sp_dma.vaddr; -/*******************/ -/* SLOW PATH QUEUE */ -/*******************/ + /*******************/ + /* SLOW PATH QUEUE */ + /*******************/ if (bnx2x_dma_alloc(sc, BNX2X_PAGE_SIZE, &sc->spq_dma, "sp_queue", @@ -8942,9 +8958,9 @@ int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc) sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; -/***************************/ -/* FW DECOMPRESSION BUFFER */ -/***************************/ + /***************************/ + /* FW DECOMPRESSION BUFFER */ + /***************************/ if (bnx2x_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, "fw_buf", RTE_CACHE_LINE_SIZE) != 0) { @@ -8968,9 +8984,9 @@ int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc) fp->sc = sc; fp->index = i; -/*******************/ -/* FP STATUS BLOCK */ -/*******************/ + /*******************/ + /* FP STATUS BLOCK */ + /*******************/ snprintf(buf, sizeof(buf), "fp_%d_sb", i); if (bnx2x_dma_alloc(sc, sizeof(union bnx2x_host_hc_status_block), @@ -9001,43 +9017,50 @@ void bnx2x_free_hsi_mem(struct bnx2x_softc *sc) for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; -/*******************/ -/* FP STATUS BLOCK */ -/*******************/ + /*******************/ + /* FP STATUS BLOCK */ + /*******************/ memset(&fp->status_block, 0, sizeof(fp->status_block)); + bnx2x_dma_free(&fp->sb_dma); } - /***************************/ - /* FW DECOMPRESSION BUFFER */ - /***************************/ + if (IS_PF(sc)) { + /***************************/ + /* FW DECOMPRESSION BUFFER */ + /***************************/ - sc->gz_buf = NULL; + bnx2x_dma_free(&sc->gz_buf_dma); + sc->gz_buf = NULL; - /*******************/ - /* SLOW PATH QUEUE */ - /*******************/ + /*******************/ + /* SLOW PATH QUEUE */ + /*******************/ - sc->spq = NULL; + bnx2x_dma_free(&sc->spq_dma); + sc->spq = NULL; - /*************/ - /* SLOW PATH */ - /*************/ + /*************/ + /* SLOW PATH */ + /*************/ - sc->sp = NULL; + bnx2x_dma_free(&sc->sp_dma); + sc->sp = NULL; - /***************/ - /* EVENT QUEUE */ - /***************/ + /***************/ + /* EVENT QUEUE */ + /***************/ - sc->eq = NULL; + bnx2x_dma_free(&sc->eq_dma); + sc->eq = NULL; - /************************/ - /* DEFAULT STATUS BLOCK */ - /************************/ - - sc->def_sb = NULL; + /************************/ + /* DEFAULT STATUS BLOCK */ + /************************/ + bnx2x_dma_free(&sc->def_sb_dma); + sc->def_sb = NULL; + } } /* diff --git a/dpdk/drivers/net/bnx2x/bnx2x.h b/dpdk/drivers/net/bnx2x/bnx2x.h index ef1688ff..ceaecb03 100644 --- a/dpdk/drivers/net/bnx2x/bnx2x.h +++ b/dpdk/drivers/net/bnx2x/bnx2x.h @@ -155,13 +155,14 @@ struct bnx2x_device_type { * Transmit Buffer Descriptor (tx_bd) definitions* */ /* NUM_TX_PAGES must be a power of 2. */ +#define NUM_TX_PAGES 16 #define TOTAL_TX_BD_PER_PAGE (BNX2X_PAGE_SIZE / sizeof(union eth_tx_bd_types)) /* 256 */ #define USABLE_TX_BD_PER_PAGE (TOTAL_TX_BD_PER_PAGE - 1) /* 255 */ #define TOTAL_TX_BD(q) (TOTAL_TX_BD_PER_PAGE * q->nb_tx_pages) /* 512 */ #define USABLE_TX_BD(q) (USABLE_TX_BD_PER_PAGE * q->nb_tx_pages) /* 510 */ #define MAX_TX_BD(q) (TOTAL_TX_BD(q) - 1) /* 511 */ - +#define MAX_TX_AVAIL (USABLE_TX_BD_PER_PAGE * NUM_TX_PAGES - 2) #define NEXT_TX_BD(x) \ ((((x) & USABLE_TX_BD_PER_PAGE) == \ (USABLE_TX_BD_PER_PAGE - 1)) ? (x) + 2 : (x) + 1) @@ -182,13 +183,14 @@ struct bnx2x_device_type { /* * Receive Buffer Descriptor (rx_bd) definitions* */ -//#define NUM_RX_PAGES 1 +#define MAX_RX_PAGES 8 #define TOTAL_RX_BD_PER_PAGE (BNX2X_PAGE_SIZE / sizeof(struct eth_rx_bd)) /* 512 */ #define USABLE_RX_BD_PER_PAGE (TOTAL_RX_BD_PER_PAGE - 2) /* 510 */ #define RX_BD_PER_PAGE_MASK (TOTAL_RX_BD_PER_PAGE - 1) /* 511 */ #define TOTAL_RX_BD(q) (TOTAL_RX_BD_PER_PAGE * q->nb_rx_pages) /* 512 */ #define USABLE_RX_BD(q) (USABLE_RX_BD_PER_PAGE * q->nb_rx_pages) /* 510 */ #define MAX_RX_BD(q) (TOTAL_RX_BD(q) - 1) /* 511 */ +#define MAX_RX_AVAIL (USABLE_RX_BD_PER_PAGE * MAX_RX_PAGES - 2) #define RX_BD_NEXT_PAGE_DESC_CNT 2 #define NEXT_RX_BD(x) \ @@ -244,6 +246,10 @@ struct bnx2x_device_type { #define MIN_RX_AVAIL(sc) \ ((sc)->dropless_fc ? BD_TH_HI(sc) + 128 : 128) +#define MIN_RX_SIZE_NONTPA_HW ETH_MIN_RX_CQES_WITHOUT_TPA +#define MIN_RX_SIZE_NONTPA (RTE_MAX((uint32_t)MIN_RX_SIZE_NONTPA_HW,\ + (uint32_t)MIN_RX_AVAIL(sc))) + /* * dropless fc calculations for RCQs * Number of RCQs should be as number of buffers in BRB: diff --git a/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c b/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c index c628cdc0..5cff7733 100644 --- a/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c +++ b/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c @@ -87,7 +87,6 @@ bnx2x_link_update(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(sc); - bnx2x_link_status_update(sc); memset(&link, 0, sizeof(link)); mb(); link.link_speed = sc->link_vars.line_speed; @@ -150,7 +149,6 @@ static void bnx2x_periodic_start(void *param) if (ret) { PMD_DRV_LOG(ERR, sc, "Unable to start periodic" " timer rc %d", ret); - assert(false && "Unable to start periodic timer"); } } } @@ -206,13 +204,6 @@ bnx2x_dev_configure(struct rte_eth_dev *dev) return -ENXIO; } - /* allocate the host hardware/software hsi structures */ - if (bnx2x_alloc_hsi_mem(sc) != 0) { - PMD_DRV_LOG(ERR, sc, "bnx2x_alloc_hsi_mem was failed"); - bnx2x_free_ilt_mem(sc); - return -ENXIO; - } - bnx2x_dev_rxtx_init_dummy(dev); return 0; } @@ -226,9 +217,12 @@ bnx2x_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(sc); /* start the periodic callout */ - if (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP) { - bnx2x_periodic_start(dev); - PMD_DRV_LOG(DEBUG, sc, "Periodic poll re-started"); + if (IS_PF(sc)) { + if (atomic_load_acq_long(&sc->periodic_flags) == + PERIODIC_STOP) { + bnx2x_periodic_start(dev); + PMD_DRV_LOG(DEBUG, sc, "Periodic poll re-started"); + } } ret = bnx2x_init(sc); @@ -266,10 +260,10 @@ bnx2x_dev_stop(struct rte_eth_dev *dev) rte_intr_disable(&sc->pci_dev->intr_handle); rte_intr_callback_unregister(&sc->pci_dev->intr_handle, bnx2x_interrupt_handler, (void *)dev); - } - /* stop the periodic callout */ - bnx2x_periodic_stop(dev); + /* stop the periodic callout */ + bnx2x_periodic_stop(dev); + } ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE); if (ret) { @@ -293,9 +287,6 @@ bnx2x_dev_close(struct rte_eth_dev *dev) bnx2x_dev_clear_queues(dev); memset(&(dev->data->dev_link), 0 , sizeof(struct rte_eth_link)); - /* free the host hardware/software hsi structures */ - bnx2x_free_hsi_mem(sc); - /* free ilt */ bnx2x_free_ilt_mem(sc); } @@ -491,6 +482,7 @@ static void bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct bnx2x_softc *sc = dev->data->dev_private; + dev_info->max_rx_queues = sc->max_rx_queues; dev_info->max_tx_queues = sc->max_tx_queues; dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE; @@ -498,6 +490,10 @@ bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS; dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G; dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME; + + dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL; + dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA; + dev_info->tx_desc_lim.nb_max = MAX_TX_AVAIL; } static int @@ -686,7 +682,9 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf) return 0; out: - bnx2x_periodic_stop(eth_dev); + if (IS_PF(sc)) + bnx2x_periodic_stop(eth_dev); + return ret; } @@ -706,6 +704,13 @@ eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev) return bnx2x_common_dev_init(eth_dev, 1); } +static int eth_bnx2x_dev_uninit(struct rte_eth_dev *eth_dev) +{ + /* mac_addrs must not be freed alone because part of dev_private */ + eth_dev->data->mac_addrs = NULL; + return 0; +} + static struct rte_pci_driver rte_bnx2x_pmd; static struct rte_pci_driver rte_bnx2xvf_pmd; @@ -724,7 +729,7 @@ static int eth_bnx2x_pci_probe(struct rte_pci_driver *pci_drv, static int eth_bnx2x_pci_remove(struct rte_pci_device *pci_dev) { - return rte_eth_dev_pci_generic_remove(pci_dev, NULL); + return rte_eth_dev_pci_generic_remove(pci_dev, eth_bnx2x_dev_uninit); } static struct rte_pci_driver rte_bnx2x_pmd = { diff --git a/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c b/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c index 048bf126..6d966ff0 100644 --- a/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c +++ b/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c @@ -162,20 +162,26 @@ static inline uint16_t bnx2x_check_me_flags(uint32_t val) #define BNX2X_ME_ANSWER_DELAY 100 #define BNX2X_ME_ANSWER_TRIES 10 -static inline int bnx2x_read_vf_id(struct bnx2x_softc *sc) +static inline int bnx2x_read_vf_id(struct bnx2x_softc *sc, uint32_t *vf_id) { uint32_t val; uint8_t i = 0; while (i <= BNX2X_ME_ANSWER_TRIES) { val = BNX2X_DB_READ(DOORBELL_ADDR(sc, 0)); - if (bnx2x_check_me_flags(val)) - return VF_ID(val); + if (bnx2x_check_me_flags(val)) { + PMD_DRV_LOG(DEBUG, sc, + "valid register value: 0x%08x", val); + *vf_id = VF_ID(val); + return 0; + } DELAY_MS(BNX2X_ME_ANSWER_DELAY); i++; } + PMD_DRV_LOG(ERR, sc, "Invalid register value: 0x%08x", val); + return -EINVAL; } @@ -240,14 +246,13 @@ int bnx2x_loop_obtain_resources(struct bnx2x_softc *sc) int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_count) { struct vf_acquire_tlv *acq = &sc->vf2pf_mbox->query[0].acquire; - int vf_id; + uint32_t vf_id; int rc; bnx2x_vf_close(sc); bnx2x_vf_prep(sc, &acq->first_tlv, BNX2X_VF_TLV_ACQUIRE, sizeof(*acq)); - vf_id = bnx2x_read_vf_id(sc); - if (vf_id < 0) { + if (bnx2x_read_vf_id(sc, &vf_id)) { rc = -EAGAIN; goto out; } @@ -318,25 +323,30 @@ bnx2x_vf_close(struct bnx2x_softc *sc) { struct vf_release_tlv *query; struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply; - int vf_id = bnx2x_read_vf_id(sc); + uint32_t vf_id; int rc; - if (vf_id >= 0) { - query = &sc->vf2pf_mbox->query[0].release; - bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_RELEASE, - sizeof(*query)); + query = &sc->vf2pf_mbox->query[0].release; + bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_RELEASE, + sizeof(*query)); - query->vf_id = vf_id; - bnx2x_add_tlv(sc, query, query->first_tlv.tl.length, - BNX2X_VF_TLV_LIST_END, - sizeof(struct channel_list_end_tlv)); - - rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); - if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS) - PMD_DRV_LOG(ERR, sc, "Failed to release VF"); - - bnx2x_vf_finalize(sc, &query->first_tlv); + if (bnx2x_read_vf_id(sc, &vf_id)) { + rc = -EAGAIN; + goto out; } + + query->vf_id = vf_id; + + bnx2x_add_tlv(sc, query, query->first_tlv.tl.length, + BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS) + PMD_DRV_LOG(ERR, sc, "Failed to release VF"); + +out: + bnx2x_vf_finalize(sc, &query->first_tlv); } /* Let PF know the VF status blocks phys_addrs */ @@ -347,6 +357,8 @@ bnx2x_vf_init(struct bnx2x_softc *sc) struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply; int i, rc; + PMD_INIT_FUNC_TRACE(sc); + query = &sc->vf2pf_mbox->query[0].init; bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_INIT, sizeof(*query)); @@ -383,51 +395,38 @@ bnx2x_vf_unload(struct bnx2x_softc *sc) { struct vf_close_tlv *query; struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply; - struct vf_q_op_tlv *query_op; - int i, vf_id, rc; + uint32_t vf_id; + int i, rc; - vf_id = bnx2x_read_vf_id(sc); - if (vf_id > 0) { - FOR_EACH_QUEUE(sc, i) { - query_op = &sc->vf2pf_mbox->query[0].q_op; - bnx2x_vf_prep(sc, &query_op->first_tlv, - BNX2X_VF_TLV_TEARDOWN_Q, - sizeof(*query_op)); + PMD_INIT_FUNC_TRACE(sc); - query_op->vf_qid = i; + FOR_EACH_QUEUE(sc, i) + bnx2x_vf_teardown_queue(sc, i); - bnx2x_add_tlv(sc, query_op, - query_op->first_tlv.tl.length, - BNX2X_VF_TLV_LIST_END, - sizeof(struct channel_list_end_tlv)); + bnx2x_vf_set_mac(sc, false); - rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); - if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS) - PMD_DRV_LOG(ERR, sc, - "Bad reply for vf_q %d teardown", i); + query = &sc->vf2pf_mbox->query[0].close; + bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_CLOSE, + sizeof(*query)); - bnx2x_vf_finalize(sc, &query_op->first_tlv); - } - - bnx2x_vf_set_mac(sc, false); - - query = &sc->vf2pf_mbox->query[0].close; - bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_CLOSE, - sizeof(*query)); - - query->vf_id = vf_id; - - bnx2x_add_tlv(sc, query, query->first_tlv.tl.length, - BNX2X_VF_TLV_LIST_END, - sizeof(struct channel_list_end_tlv)); - - rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); - if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS) - PMD_DRV_LOG(ERR, sc, - "Bad reply from PF for close message"); - - bnx2x_vf_finalize(sc, &query->first_tlv); + if (bnx2x_read_vf_id(sc, &vf_id)) { + rc = -EAGAIN; + goto out; } + + query->vf_id = vf_id; + + bnx2x_add_tlv(sc, query, query->first_tlv.tl.length, + BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS) + PMD_DRV_LOG(ERR, sc, + "Bad reply from PF for close message"); + +out: + bnx2x_vf_finalize(sc, &query->first_tlv); } static inline uint16_t @@ -521,6 +520,35 @@ out: return rc; } +int +bnx2x_vf_teardown_queue(struct bnx2x_softc *sc, int qid) +{ + struct vf_q_op_tlv *query_op; + struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply; + int rc; + + query_op = &sc->vf2pf_mbox->query[0].q_op; + bnx2x_vf_prep(sc, &query_op->first_tlv, + BNX2X_VF_TLV_TEARDOWN_Q, + sizeof(*query_op)); + + query_op->vf_qid = qid; + + bnx2x_add_tlv(sc, query_op, + query_op->first_tlv.tl.length, + BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); + if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS) + PMD_DRV_LOG(ERR, sc, + "Bad reply for vf_q %d teardown", qid); + + bnx2x_vf_finalize(sc, &query_op->first_tlv); + + return rc; +} + int bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set) { diff --git a/dpdk/drivers/net/bnx2x/bnx2x_vfpf.h b/dpdk/drivers/net/bnx2x/bnx2x_vfpf.h index cc6fef95..0030e194 100644 --- a/dpdk/drivers/net/bnx2x/bnx2x_vfpf.h +++ b/dpdk/drivers/net/bnx2x/bnx2x_vfpf.h @@ -328,6 +328,7 @@ struct bnx2x_vf_mbx_msg { union resp_tlvs resp; }; +int bnx2x_vf_teardown_queue(struct bnx2x_softc *sc, int qid); int bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set); int bnx2x_vf_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *params); diff --git a/dpdk/drivers/net/bnx2x/ecore_sp.c b/dpdk/drivers/net/bnx2x/ecore_sp.c index 43194095..5ac22e72 100644 --- a/dpdk/drivers/net/bnx2x/ecore_sp.c +++ b/dpdk/drivers/net/bnx2x/ecore_sp.c @@ -291,10 +291,6 @@ static int ecore_state_wait(struct bnx2x_softc *sc, int state, cnt *= 20; ECORE_MSG(sc, "waiting for state to become %d", state); - /* being over protective to remind bnx2x_intr_legacy() to - * process RAMROD - */ - rte_atomic32_set(&sc->scan_fp, 1); ECORE_MIGHT_SLEEP(); while (cnt--) { diff --git a/dpdk/drivers/net/bnxt/bnxt.h b/dpdk/drivers/net/bnxt/bnxt.h index 5535c376..0ef5afcb 100644 --- a/dpdk/drivers/net/bnxt/bnxt.h +++ b/dpdk/drivers/net/bnxt/bnxt.h @@ -332,6 +332,7 @@ struct bnxt { uint16_t max_tx_rings; uint16_t max_rx_rings; uint16_t max_l2_ctx; + uint16_t max_rx_em_flows; uint16_t max_vnics; uint16_t max_stat_ctx; uint16_t vlan; diff --git a/dpdk/drivers/net/bnxt/bnxt_cpr.h b/dpdk/drivers/net/bnxt/bnxt_cpr.h index c7af5698..ee5ca820 100644 --- a/dpdk/drivers/net/bnxt/bnxt_cpr.h +++ b/dpdk/drivers/net/bnxt/bnxt_cpr.h @@ -10,11 +10,12 @@ #include #define CMP_VALID(cmp, raw_cons, ring) \ - (!!(((struct cmpl_base *)(cmp))->info3_v & CMPL_BASE_V) == \ - !((raw_cons) & ((ring)->ring_size))) + (!!(rte_le_to_cpu_32(((struct cmpl_base *)(cmp))->info3_v) & \ + CMPL_BASE_V) == !((raw_cons) & ((ring)->ring_size))) #define CMPL_VALID(cmp, v) \ - (!!(((struct cmpl_base *)(cmp))->info3_v & CMPL_BASE_V) == !(v)) + (!!(rte_le_to_cpu_32(((struct cmpl_base *)(cmp))->info3_v) & \ + CMPL_BASE_V) == !(v)) #define CMP_TYPE(cmp) \ (((struct cmpl_base *)cmp)->type & CMPL_BASE_TYPE_MASK) @@ -31,7 +32,7 @@ #define NEXT_CMPL(cpr, idx, v, inc) do { \ (idx) += (inc); \ - if (unlikely((idx) == (cpr)->cp_ring_struct->ring_size)) { \ + if (unlikely((idx) >= (cpr)->cp_ring_struct->ring_size)) { \ (v) = !(v); \ (idx) = 0; \ } \ diff --git a/dpdk/drivers/net/bnxt/bnxt_ethdev.c b/dpdk/drivers/net/bnxt/bnxt_ethdev.c index e26b9e3c..2f3e067e 100644 --- a/dpdk/drivers/net/bnxt/bnxt_ethdev.c +++ b/dpdk/drivers/net/bnxt/bnxt_ethdev.c @@ -211,9 +211,6 @@ static int bnxt_init_chip(struct bnxt *bp) unsigned int i, j; int rc; - /* disable uio/vfio intr/eventfd mapping */ - rte_intr_disable(intr_handle); - if (bp->eth_dev->data->mtu > ETHER_MTU) { bp->eth_dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; @@ -354,8 +351,9 @@ static int bnxt_init_chip(struct bnxt *bp) bp->rx_cp_nr_rings); return -ENOTSUP; } - if (rte_intr_efd_enable(intr_handle, intr_vector)) - return -1; + rc = rte_intr_efd_enable(intr_handle, intr_vector); + if (rc) + return rc; } if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { @@ -366,28 +364,31 @@ static int bnxt_init_chip(struct bnxt *bp) if (intr_handle->intr_vec == NULL) { PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues" " intr_vec", bp->eth_dev->data->nb_rx_queues); - return -ENOMEM; + rc = -ENOMEM; + goto err_disable; } PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p " "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", intr_handle->intr_vec, intr_handle->nb_efd, intr_handle->max_intr); - } - - for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; - queue_id++) { - intr_handle->intr_vec[queue_id] = vec; - if (vec < base + intr_handle->nb_efd - 1) - vec++; + for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; + queue_id++) { + intr_handle->intr_vec[queue_id] = + vec + BNXT_RX_VEC_START; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } } /* enable uio/vfio intr/eventfd mapping */ - rte_intr_enable(intr_handle); + rc = rte_intr_enable(intr_handle); + if (rc) + goto err_free; rc = bnxt_get_hwrm_link_config(bp, &new); if (rc) { PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc); - goto err_out; + goto err_free; } if (!bp->link_info.link_up) { @@ -395,16 +396,18 @@ static int bnxt_init_chip(struct bnxt *bp) if (rc) { PMD_DRV_LOG(ERR, "HWRM link config failure rc: %x\n", rc); - goto err_out; + goto err_free; } } bnxt_print_link_info(bp->eth_dev); return 0; +err_free: + rte_free(intr_handle->intr_vec); +err_disable: + rte_intr_efd_disable(intr_handle); err_out: - bnxt_free_all_hwrm_resources(bp); - /* Some of the error status returned by FW may not be from errno.h */ if (rc > 0) rc = -EIO; @@ -441,7 +444,7 @@ static int bnxt_init_nic(struct bnxt *bp) static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *dev_info) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; uint16_t max_vnics, i, j, vpool, vrxq; unsigned int max_rx_rings; @@ -452,7 +455,7 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, /* PF/VF specifics */ if (BNXT_PF(bp)) dev_info->max_vfs = bp->pdev->max_vfs; - max_rx_rings = RTE_MIN(bp->max_vnics, bp->max_stat_ctx); + max_rx_rings = RTE_MIN(bp->max_rx_rings, bp->max_stat_ctx); /* For the sake of symmetry, max_rx_queues = max_tx_queues */ dev_info->max_rx_queues = max_rx_rings; dev_info->max_tx_queues = max_rx_rings; @@ -537,7 +540,7 @@ found: /* Configure the device based on the configuration provided */ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; int rc; @@ -618,15 +621,9 @@ static void bnxt_print_link_info(struct rte_eth_dev *eth_dev) eth_dev->data->port_id); } -static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev) -{ - bnxt_print_link_info(eth_dev); - return 0; -} - static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; int vlan_mask = 0; int rc; @@ -636,7 +633,6 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); } - bp->dev_stopped = 0; rc = bnxt_init_chip(bp); if (rc) @@ -652,7 +648,9 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) if (rc) goto error; + bnxt_enable_int(bp); bp->flags |= BNXT_FLAG_INIT_DONE; + bp->dev_stopped = 0; return 0; error: @@ -664,7 +662,7 @@ error: static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; int rc = 0; if (!bp->link_info.link_up) @@ -678,7 +676,7 @@ static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; eth_dev->data->dev_link.link_status = 0; bnxt_set_hwrm_link_config(bp, false); @@ -690,7 +688,14 @@ static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) /* Unload the driver, release resources */ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + bnxt_disable_int(bp); + + /* disable uio/vfio intr/eventfd mapping */ + rte_intr_disable(intr_handle); bp->flags &= ~BNXT_FLAG_INIT_DONE; if (bp->eth_dev->data->dev_started) { @@ -698,6 +703,14 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) eth_dev->data->dev_link.link_status = 0; } bnxt_set_hwrm_link_config(bp, false); + + /* Clean queue intr-vector mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec != NULL) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + bnxt_hwrm_port_clr_stats(bp); bnxt_free_tx_mbufs(bp); bnxt_free_rx_mbufs(bp); @@ -707,7 +720,7 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; if (bp->dev_stopped == 0) bnxt_dev_stop_op(eth_dev); @@ -727,7 +740,7 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, uint32_t index) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; struct bnxt_vnic_info *vnic; struct bnxt_filter_info *filter, *temp_filter; @@ -763,9 +776,10 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct bnxt_vnic_info *vnic = &bp->vnic_info[pool]; struct bnxt_filter_info *filter; + int rc = 0; if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) { PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n"); @@ -789,16 +803,26 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, PMD_DRV_LOG(ERR, "L2 filter alloc failed\n"); return -ENODEV; } - STAILQ_INSERT_TAIL(&vnic->filter, filter, next); + filter->mac_index = index; memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN); - return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); + + rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); + if (!rc) { + STAILQ_INSERT_TAIL(&vnic->filter, filter, next); + } else { + filter->mac_index = INVALID_MAC_INDEX; + memset(&filter->l2_addr, 0, ETHER_ADDR_LEN); + bnxt_free_filter(bp, filter); + } + + return rc; } int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) { int rc = 0; - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct rte_eth_link new; unsigned int cnt = BNXT_LINK_WAIT_CNT; @@ -813,11 +837,12 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) "Failed to retrieve link rc = 0x%x!\n", rc); goto out; } - rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); - if (!wait_to_complete) + if (!wait_to_complete || new.link_status) break; - } while (!new.link_status && cnt--); + + rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); + } while (cnt--); out: /* Timed out or success */ @@ -838,7 +863,7 @@ out: static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct bnxt_vnic_info *vnic; if (bp->vnic_info == NULL) @@ -852,7 +877,7 @@ static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct bnxt_vnic_info *vnic; if (bp->vnic_info == NULL) @@ -866,7 +891,7 @@ static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct bnxt_vnic_info *vnic; if (bp->vnic_info == NULL) @@ -880,7 +905,7 @@ static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct bnxt_vnic_info *vnic; if (bp->vnic_info == NULL) @@ -892,30 +917,72 @@ static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL); } +/* Return bnxt_rx_queue pointer corresponding to a given rxq. */ +static struct bnxt_rx_queue *bnxt_qid_to_rxq(struct bnxt *bp, uint16_t qid) +{ + if (qid >= bp->rx_nr_rings) + return NULL; + + return bp->eth_dev->data->rx_queues[qid]; +} + +/* Return rxq corresponding to a given rss table ring/group ID. */ +static uint16_t bnxt_rss_to_qid(struct bnxt *bp, uint16_t fwr) +{ + unsigned int i; + + for (i = 0; i < bp->rx_nr_rings; i++) { + if (bp->grp_info[i].fw_grp_id == fwr) + return i; + } + + return INVALID_HW_RING_ID; +} + static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; - struct bnxt_vnic_info *vnic; + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + uint16_t tbl_size = HW_HASH_INDEX_SIZE; + uint16_t idx, sft; int i; + if (!vnic->rss_table) + return -EINVAL; + if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) return -EINVAL; - if (reta_size != HW_HASH_INDEX_SIZE) { + if (reta_size != tbl_size) { PMD_DRV_LOG(ERR, "The configured hash table lookup size " "(%d) must equal the size supported by the hardware " - "(%d)\n", reta_size, HW_HASH_INDEX_SIZE); + "(%d)\n", reta_size, tbl_size); return -EINVAL; } - /* Update the RSS VNIC(s) */ - for (i = 0; i < bp->max_vnics; i++) { - vnic = &bp->vnic_info[i]; - memcpy(vnic->rss_table, reta_conf, reta_size); - bnxt_hwrm_vnic_rss_cfg(bp, vnic); + + for (i = 0; i < reta_size; i++) { + struct bnxt_rx_queue *rxq; + + idx = i / RTE_RETA_GROUP_SIZE; + sft = i % RTE_RETA_GROUP_SIZE; + + if (!(reta_conf[idx].mask & (1ULL << sft))) + continue; + + rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]); + if (!rxq) { + PMD_DRV_LOG(ERR, "Invalid ring in reta_conf.\n"); + return -EINVAL; + } + + vnic->rss_table[i] = + vnic->fw_grp_ids[reta_conf[idx].reta[sft]]; } + + bnxt_hwrm_vnic_rss_cfg(bp, vnic); return 0; } @@ -923,10 +990,10 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; - struct rte_intr_handle *intr_handle - = &bp->pdev->intr_handle; + uint16_t tbl_size = HW_HASH_INDEX_SIZE; + uint16_t idx, sft, i; /* Retrieve from the default VNIC */ if (!vnic) @@ -934,18 +1001,28 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, if (!vnic->rss_table) return -EINVAL; - if (reta_size != HW_HASH_INDEX_SIZE) { + if (reta_size != tbl_size) { PMD_DRV_LOG(ERR, "The configured hash table lookup size " "(%d) must equal the size supported by the hardware " - "(%d)\n", reta_size, HW_HASH_INDEX_SIZE); + "(%d)\n", reta_size, tbl_size); return -EINVAL; } - /* EW - need to revisit here copying from uint64_t to uint16_t */ - memcpy(reta_conf, vnic->rss_table, reta_size); - if (rte_intr_allow_others(intr_handle)) { - if (eth_dev->data->dev_conf.intr_conf.lsc != 0) - bnxt_dev_lsc_intr_setup(eth_dev); + for (idx = 0, i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + sft = i % RTE_RETA_GROUP_SIZE; + + if (reta_conf[idx].mask & (1ULL << sft)) { + uint16_t qid; + + qid = bnxt_rss_to_qid(bp, vnic->rss_table[i]); + + if (qid == INVALID_HW_RING_ID) { + PMD_DRV_LOG(ERR, "Inv. entry in rss table.\n"); + return -EINVAL; + } + reta_conf[idx].reta[sft] = qid; + } } return 0; @@ -954,7 +1031,7 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, struct rte_eth_rss_conf *rss_conf) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; struct bnxt_vnic_info *vnic; uint16_t hash_type = 0; @@ -1010,7 +1087,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, struct rte_eth_rss_conf *rss_conf) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; int len; uint32_t hash_types; @@ -1068,7 +1145,7 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; struct rte_eth_link link_info; int rc; @@ -1100,7 +1177,7 @@ static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) { PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n"); @@ -1156,7 +1233,7 @@ static int bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev, struct rte_eth_udp_tunnel *udp_tunnel) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; uint16_t tunnel_type = 0; int rc = 0; @@ -1204,7 +1281,7 @@ static int bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev, struct rte_eth_udp_tunnel *udp_tunnel) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; uint16_t tunnel_type = 0; uint16_t port = 0; int rc = 0; @@ -1404,7 +1481,7 @@ exit: static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; /* These operations apply to ALL existing MAC/VLAN filters */ if (on) @@ -1416,7 +1493,7 @@ static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; unsigned int i; @@ -1453,7 +1530,7 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) static int bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; /* Default Filter is tied to VNIC 0 */ struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; struct bnxt_filter_info *filter; @@ -1462,26 +1539,28 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) return -EPERM; - memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr)); + if (is_zero_ether_addr(addr)) + return -EINVAL; STAILQ_FOREACH(filter, &vnic->filter, next) { /* Default Filter is at Index 0 */ if (filter->mac_index != 0) continue; - rc = bnxt_hwrm_clear_l2_filter(bp, filter); - if (rc) - return rc; + memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN); memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN); filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; filter->enables |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; + rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); if (rc) return rc; - filter->mac_index = 0; + + memcpy(bp->mac_addr, addr, ETHER_ADDR_LEN); PMD_DRV_LOG(DEBUG, "Set MAC addr\n"); + return 0; } return 0; @@ -1492,7 +1571,7 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addr_set, uint32_t nb_mc_addr) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; char *mc_addr_list = (char *)mc_addr_set; struct bnxt_vnic_info *vnic; uint32_t off = 0, i = 0; @@ -1520,7 +1599,7 @@ allmulti: static int bnxt_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; uint8_t fw_major = (bp->fw_ver >> 24) & 0xff; uint8_t fw_minor = (bp->fw_ver >> 16) & 0xff; uint8_t fw_updt = (bp->fw_ver >> 8) & 0xff; @@ -1629,7 +1708,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) static int bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; uint16_t vlan = bp->vlan; int rc; @@ -1649,7 +1728,7 @@ bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on) static int bnxt_dev_led_on_op(struct rte_eth_dev *dev) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; return bnxt_hwrm_port_led_cfg(bp, true); } @@ -1657,7 +1736,7 @@ bnxt_dev_led_on_op(struct rte_eth_dev *dev) static int bnxt_dev_led_off_op(struct rte_eth_dev *dev) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; return bnxt_hwrm_port_led_cfg(bp, false); } @@ -1849,7 +1928,7 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; struct rte_eth_ethertype_filter *efilter = (struct rte_eth_ethertype_filter *)arg; struct bnxt_filter_info *bfilter, *filter1; @@ -1892,7 +1971,7 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev, filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0); if (filter1 == NULL) { - ret = -1; + ret = -EINVAL; goto cleanup; } bfilter->enables |= @@ -2086,7 +2165,7 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp, vnic0 = &bp->vnic_info[0]; filter1 = STAILQ_FIRST(&vnic0->filter); if (filter1 == NULL) { - ret = -1; + ret = -EINVAL; goto free_filter; } @@ -2153,7 +2232,7 @@ bnxt_ntuple_filter(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; int ret; if (filter_op == RTE_ETH_FILTER_NOP) @@ -2469,7 +2548,7 @@ bnxt_fdir_filter(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg; struct bnxt_filter_info *filter, *match; struct bnxt_vnic_info *vnic, *mvnic; @@ -2755,7 +2834,7 @@ static int bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) { uint64_t ns; - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; if (!ptp) @@ -2772,7 +2851,7 @@ static int bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) { uint64_t ns, systime_cycles; - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; if (!ptp) @@ -2787,7 +2866,7 @@ bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) static int bnxt_timesync_enable(struct rte_eth_dev *dev) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; uint32_t shift = 0; @@ -2823,7 +2902,7 @@ bnxt_timesync_enable(struct rte_eth_dev *dev) static int bnxt_timesync_disable(struct rte_eth_dev *dev) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; if (!ptp) @@ -2845,7 +2924,7 @@ bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev, struct timespec *timestamp, uint32_t flags __rte_unused) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; uint64_t rx_tstamp_cycles = 0; uint64_t ns; @@ -2863,7 +2942,7 @@ static int bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, struct timespec *timestamp) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; uint64_t tx_tstamp_cycles = 0; uint64_t ns; @@ -2881,7 +2960,7 @@ bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev, static int bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; if (!ptp) @@ -2895,7 +2974,7 @@ bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) static int bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; int rc; uint32_t dir_entries; uint32_t entry_length; @@ -2915,7 +2994,7 @@ static int bnxt_get_eeprom_op(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *in_eeprom) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; uint32_t index; uint32_t offset; @@ -2986,7 +3065,7 @@ static int bnxt_set_eeprom_op(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *in_eeprom) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; uint8_t index, dir_op; uint16_t type, ext, ordinal, attr; @@ -3026,7 +3105,6 @@ bnxt_set_eeprom_op(struct rte_eth_dev *dev, return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, in_eeprom->data, in_eeprom->length); - return 0; } /* @@ -3128,54 +3206,27 @@ bool bnxt_stratus_device(struct bnxt *bp) static int bnxt_init_board(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = eth_dev->data->dev_private; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - int rc; + struct bnxt *bp = eth_dev->data->dev_private; /* enable device (incl. PCI PM wakeup), and bus-mastering */ - if (!pci_dev->mem_resource[0].addr) { - PMD_DRV_LOG(ERR, - "Cannot find PCI device base address, aborting\n"); - rc = -ENODEV; - goto init_err_disable; + bp->bar0 = (void *)pci_dev->mem_resource[0].addr; + bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; + if (!bp->bar0 || !bp->doorbell_base) { + PMD_DRV_LOG(ERR, "Unable to access Hardware\n"); + return -ENODEV; } bp->eth_dev = eth_dev; bp->pdev = pci_dev; - bp->bar0 = (void *)pci_dev->mem_resource[0].addr; - if (!bp->bar0) { - PMD_DRV_LOG(ERR, "Cannot map device registers, aborting\n"); - rc = -ENOMEM; - goto init_err_release; - } - - if (!pci_dev->mem_resource[2].addr) { - PMD_DRV_LOG(ERR, - "Cannot find PCI device BAR 2 address, aborting\n"); - rc = -ENODEV; - goto init_err_release; - } else { - bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr; - } - return 0; - -init_err_release: - if (bp->bar0) - bp->bar0 = NULL; - if (bp->doorbell_base) - bp->doorbell_base = NULL; - -init_err_disable: - - return rc; } #define ALLOW_FUNC(x) \ { \ - typeof(x) arg = (x); \ + uint32_t arg = (x); \ bp->pf.vf_req_fwd[((arg) >> 5)] &= \ ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \ } @@ -3200,8 +3251,16 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) bp->dev_stopped = 1; + eth_dev->dev_ops = &bnxt_dev_ops; + eth_dev->rx_pkt_burst = &bnxt_recv_pkts; + eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; + + /* + * For secondary processes, we don't initialise any further + * as primary has already done this work. + */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) - goto skip_init; + return 0; if (bnxt_vf_pciid(pci_dev->id.device_id)) bp->flags |= BNXT_FLAG_VF; @@ -3212,12 +3271,6 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) "Board initialization failed rc: %x\n", rc); goto error; } -skip_init: - eth_dev->dev_ops = &bnxt_dev_ops; - eth_dev->rx_pkt_burst = &bnxt_recv_pkts; - eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; if (pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) { snprintf(mz_name, RTE_MEMZONE_NAMESIZE, @@ -3303,7 +3356,8 @@ skip_init: goto skip_ext_stats; bp->hw_rx_port_stats_ext = (void *) - (bp->hw_rx_port_stats + sizeof(struct rx_port_stats)); + ((uint8_t *)bp->hw_rx_port_stats + + sizeof(struct rx_port_stats)); bp->hw_rx_port_stats_ext_map = bp->hw_rx_port_stats_map + sizeof(struct rx_port_stats); bp->flags |= BNXT_FLAG_EXT_RX_PORT_STATS; @@ -3311,7 +3365,8 @@ skip_init: if (bp->hwrm_spec_code < HWRM_SPEC_CODE_1_9_2) { bp->hw_tx_port_stats_ext = (void *) - (bp->hw_tx_port_stats + sizeof(struct tx_port_stats)); + ((uint8_t *)bp->hw_tx_port_stats + + sizeof(struct tx_port_stats)); bp->hw_tx_port_stats_ext_map = bp->hw_tx_port_stats_map + sizeof(struct tx_port_stats); @@ -3464,22 +3519,16 @@ skip_ext_stats: rc = bnxt_alloc_mem(bp); if (rc) - goto error_free_int; + goto error_free; + + bnxt_init_nic(bp); rc = bnxt_request_int(bp); if (rc) - goto error_free_int; - - bnxt_enable_int(bp); - bnxt_init_nic(bp); + goto error_free; return 0; -error_free_int: - bnxt_disable_int(bp); - bnxt_hwrm_func_buf_unrgtr(bp); - bnxt_free_int(bp); - bnxt_free_mem(bp); error_free: bnxt_dev_uninit(eth_dev); error: @@ -3499,6 +3548,9 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) bnxt_disable_int(bp); bnxt_free_int(bp); bnxt_free_mem(bp); + + bnxt_hwrm_func_buf_unrgtr(bp); + if (bp->grp_info != NULL) { rte_free(bp->grp_info); bp->grp_info = NULL; diff --git a/dpdk/drivers/net/bnxt/bnxt_flow.c b/dpdk/drivers/net/bnxt/bnxt_flow.c index 1afe6740..6a599538 100644 --- a/dpdk/drivers/net/bnxt/bnxt_flow.c +++ b/dpdk/drivers/net/bnxt/bnxt_flow.c @@ -715,7 +715,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev, { const struct rte_flow_action *act = bnxt_flow_non_void_action(actions); - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; const struct rte_flow_action_queue *act_q; const struct rte_flow_action_vf *act_vf; struct bnxt_vnic_info *vnic, *vnic0; @@ -900,7 +900,7 @@ bnxt_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; struct bnxt_filter_info *filter; int ret = 0; @@ -998,7 +998,7 @@ bnxt_flow_create(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; struct bnxt_filter_info *filter; struct bnxt_vnic_info *vnic = NULL; bool update_flow = false; @@ -1099,7 +1099,7 @@ bnxt_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; struct bnxt_filter_info *filter = flow->filter; struct bnxt_vnic_info *vnic = flow->vnic; int ret = 0; @@ -1128,7 +1128,7 @@ bnxt_flow_destroy(struct rte_eth_dev *dev, static int bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; struct bnxt_vnic_info *vnic; struct rte_flow *flow; unsigned int i; diff --git a/dpdk/drivers/net/bnxt/bnxt_hwrm.c b/dpdk/drivers/net/bnxt/bnxt_hwrm.c index 99997605..69b45283 100644 --- a/dpdk/drivers/net/bnxt/bnxt_hwrm.c +++ b/dpdk/drivers/net/bnxt/bnxt_hwrm.c @@ -139,14 +139,11 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, } if (i >= HWRM_CMD_TIMEOUT) { - PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n", - req->req_type); - goto err_ret; + PMD_DRV_LOG(ERR, "Error(timeout) sending msg 0x%04x\n", + req->req_type); + return -ETIMEDOUT; } return 0; - -err_ret: - return -1; } /* @@ -576,7 +573,9 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings); bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings); bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings); - bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); + bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows); + bp->max_l2_ctx = + rte_le_to_cpu_16(resp->max_l2_ctxs) + bp->max_rx_em_flows; /* TODO: For now, do not support VMDq/RFS on VFs. */ if (BNXT_PF(bp)) { if (bp->pf.max_vfs) @@ -770,7 +769,12 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings); bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings); bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps); - bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); + /* func_resource_qcaps does not return max_rx_em_flows. + * So use the value provided by func_qcaps. + */ + bp->max_l2_ctx = + rte_le_to_cpu_16(resp->max_l2_ctxs) + + bp->max_rx_em_flows; bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics); bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); } @@ -850,7 +854,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) rte_mem_lock_page(bp->hwrm_cmd_resp_addr); bp->hwrm_cmd_resp_dma_addr = rte_mem_virt2iova(bp->hwrm_cmd_resp_addr); - if (bp->hwrm_cmd_resp_dma_addr == 0) { + if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "Unable to map response buffer to physical memory.\n"); rc = -ENOMEM; @@ -876,7 +880,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) rte_mem_lock_page(bp->hwrm_short_cmd_req_addr); bp->hwrm_short_cmd_req_dma_addr = rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr); - if (bp->hwrm_short_cmd_req_dma_addr == 0) { + if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) { rte_free(bp->hwrm_short_cmd_req_addr); PMD_DRV_LOG(ERR, "Unable to map buffer to physical memory.\n"); @@ -1109,7 +1113,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX: req.ring_type = ring_type; req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id); - req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id); + req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id); if (stats_ctx_id != INVALID_STATS_CTX_ID) enables |= HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID; @@ -1126,7 +1130,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n", ring_type); HWRM_UNLOCK(); - return -1; + return -EINVAL; } req.enables = rte_cpu_to_le_32(enables); @@ -1136,17 +1140,17 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, if (rc == 0 && resp->error_code) rc = rte_le_to_cpu_16(resp->error_code); switch (ring_type) { - case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL: + case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL: PMD_DRV_LOG(ERR, "hwrm_ring_alloc cp failed. rc:%d\n", rc); HWRM_UNLOCK(); return rc; - case HWRM_RING_FREE_INPUT_RING_TYPE_RX: + case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX: PMD_DRV_LOG(ERR, "hwrm_ring_alloc rx failed. rc:%d\n", rc); HWRM_UNLOCK(); return rc; - case HWRM_RING_FREE_INPUT_RING_TYPE_TX: + case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX: PMD_DRV_LOG(ERR, "hwrm_ring_alloc tx failed. rc:%d\n", rc); HWRM_UNLOCK(); @@ -1259,7 +1263,7 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB); - req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id); + req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -1287,7 +1291,7 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, HWRM_CHECK_RESULT(); - cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id); + cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id); HWRM_UNLOCK(); @@ -1303,7 +1307,7 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB); - req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id); + req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -1382,6 +1386,11 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp, struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 }; struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr; + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { + PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id); + return rc; + } + HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); @@ -1408,8 +1417,8 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) int rc = 0; struct hwrm_vnic_cfg_input req = {.req_type = 0 }; struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_plcmodes_cfg pmodes = { 0 }; uint32_t ctx_enable_flag = 0; - struct bnxt_plcmodes_cfg pmodes; if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id); @@ -1607,6 +1616,7 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp, req.hash_key_tbl_addr = rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr); req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -1896,6 +1906,7 @@ static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring)); cpr->cp_raw_cons = 0; + cpr->valid = 0; } void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index) @@ -2008,7 +2019,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp) return -ENOMEM; bp->hwrm_cmd_resp_dma_addr = rte_mem_virt2iova(bp->hwrm_cmd_resp_addr); - if (bp->hwrm_cmd_resp_dma_addr == 0) { + if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); return -ENOMEM; @@ -2046,7 +2057,7 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic) STAILQ_FOREACH(flow, &vnic->flow_list, next) { filter = flow->filter; - PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type); + PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type); if (filter->filter_type == HWRM_CFA_EM_FILTER) rc = bnxt_hwrm_clear_em_filter(bp, filter); else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) @@ -2109,6 +2120,11 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp) for (i = bp->nr_vnics - 1; i >= 0; i--) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { + PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n"); + return; + } + bnxt_clear_hwrm_vnic_flows(bp, vnic); bnxt_clear_hwrm_vnic_filters(bp, vnic); @@ -2641,14 +2657,7 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); - if (rc) { - PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc); - return -1; - } else if (resp->error_code) { - rc = rte_le_to_cpu_16(resp->error_code); - PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc); - return -1; - } + HWRM_CHECK_RESULT(); rc = rte_le_to_cpu_16(resp->vlan); HWRM_UNLOCK(); @@ -2683,7 +2692,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp) if (!BNXT_PF(bp)) { PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n"); - return -1; + return -EINVAL; } rc = bnxt_hwrm_func_qcaps(bp); @@ -2710,7 +2719,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) if (!BNXT_PF(bp)) { PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n"); - return -1; + return -EINVAL; } rc = bnxt_hwrm_func_qcaps(bp); @@ -2929,7 +2938,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN); req.req_buf_page_addr0 = rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf)); - if (req.req_buf_page_addr0 == 0) { + if (req.req_buf_page_addr0 == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map buffer address to physical memory\n"); return -ENOMEM; @@ -2949,6 +2958,9 @@ int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp) struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 }; struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr; + if (!(BNXT_PF(bp) && bp->pdev->max_vfs)) + return 0; + HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -3327,12 +3339,11 @@ int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries, rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); - HWRM_UNLOCK(); - if (!rc) { - *entries = rte_le_to_cpu_32(resp->entries); - *length = rte_le_to_cpu_32(resp->entry_length); - } + *entries = rte_le_to_cpu_32(resp->entries); + *length = rte_le_to_cpu_32(resp->entry_length); + + HWRM_UNLOCK(); return rc; } @@ -3362,7 +3373,7 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data) if (buf == NULL) return -ENOMEM; dma_handle = rte_mem_virt2iova(buf); - if (dma_handle == 0) { + if (dma_handle == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); return -ENOMEM; @@ -3397,7 +3408,7 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index, return -ENOMEM; dma_handle = rte_mem_virt2iova(buf); - if (dma_handle == 0) { + if (dma_handle == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); return -ENOMEM; @@ -3451,7 +3462,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, return -ENOMEM; dma_handle = rte_mem_virt2iova(buf); - if (dma_handle == 0) { + if (dma_handle == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); return -ENOMEM; @@ -3515,7 +3526,7 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf, req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics); req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids)); - if (req.vnic_id_tbl_addr == 0) { + if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) { HWRM_UNLOCK(); PMD_DRV_LOG(ERR, "unable to map VNIC ID table address to physical memory\n"); @@ -3559,10 +3570,9 @@ int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf, vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids); vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz, RTE_CACHE_LINE_SIZE); - if (vnic_ids == NULL) { - rc = -ENOMEM; - return rc; - } + if (vnic_ids == NULL) + return -ENOMEM; + for (sz = 0; sz < vnic_id_sz; sz += getpagesize()) rte_mem_lock_page(((char *)vnic_ids) + sz); @@ -3629,10 +3639,8 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf) vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids); vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz, RTE_CACHE_LINE_SIZE); - if (vnic_ids == NULL) { - rc = -ENOMEM; - return rc; - } + if (vnic_ids == NULL) + return -ENOMEM; for (sz = 0; sz < vnic_id_sz; sz += getpagesize()) rte_mem_lock_page(((char *)vnic_ids) + sz); @@ -3663,7 +3671,7 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf) PMD_DRV_LOG(ERR, "No default VNIC\n"); exit: rte_free(vnic_ids); - return -1; + return rc; } int bnxt_hwrm_set_em_filter(struct bnxt *bp, diff --git a/dpdk/drivers/net/bnxt/bnxt_irq.c b/dpdk/drivers/net/bnxt/bnxt_irq.c index 7ef7023e..9c913d2d 100644 --- a/dpdk/drivers/net/bnxt/bnxt_irq.c +++ b/dpdk/drivers/net/bnxt/bnxt_irq.c @@ -5,6 +5,7 @@ #include +#include #include #include "bnxt.h" @@ -20,7 +21,7 @@ static void bnxt_int_handler(void *param) { struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; struct cmpl_base *cmp; uint32_t raw_cons; @@ -31,7 +32,7 @@ static void bnxt_int_handler(void *param) raw_cons = cpr->cp_raw_cons; while (1) { - if (!cpr || !cpr->cp_ring_struct) + if (!cpr || !cpr->cp_ring_struct || !cpr->cp_doorbell) return; cons = RING_CMP(cpr->cp_ring_struct, raw_cons); @@ -48,22 +49,45 @@ static void bnxt_int_handler(void *param) B_CP_DB_REARM(cpr, cpr->cp_raw_cons); } -void bnxt_free_int(struct bnxt *bp) +int bnxt_free_int(struct bnxt *bp) { - struct bnxt_irq *irq; + struct rte_intr_handle *intr_handle = &bp->pdev->intr_handle; + struct bnxt_irq *irq = bp->irq_tbl; + int rc = 0; - irq = bp->irq_tbl; - if (irq) { - if (irq->requested) { - rte_intr_disable(&bp->pdev->intr_handle); - rte_intr_callback_unregister(&bp->pdev->intr_handle, - irq->handler, - (void *)bp->eth_dev); - irq->requested = 0; + if (!irq) + return 0; + + if (irq->requested) { + int count = 0; + + /* + * Callback deregistration will fail with rc -EAGAIN if the + * callback is currently active. Retry every 50 ms until + * successful or 500 ms has elapsed. + */ + do { + rc = rte_intr_callback_unregister(intr_handle, + irq->handler, + bp->eth_dev); + if (rc >= 0) { + irq->requested = 0; + break; + } + rte_delay_ms(50); + } while (count++ < 10); + + if (rc < 0) { + PMD_DRV_LOG(ERR, "irq cb unregister failed rc: %d\n", + rc); + return rc; } - rte_free((void *)bp->irq_tbl); - bp->irq_tbl = NULL; } + + rte_free(bp->irq_tbl); + bp->irq_tbl = NULL; + + return 0; } void bnxt_disable_int(struct bnxt *bp) @@ -114,14 +138,20 @@ setup_exit: int bnxt_request_int(struct bnxt *bp) { + struct rte_intr_handle *intr_handle = &bp->pdev->intr_handle; + struct bnxt_irq *irq = bp->irq_tbl; int rc = 0; - struct bnxt_irq *irq = bp->irq_tbl; + if (!irq) + return 0; - rte_intr_callback_register(&bp->pdev->intr_handle, irq->handler, - (void *)bp->eth_dev); - rte_intr_enable(&bp->pdev->intr_handle); + if (!irq->requested) { + rc = rte_intr_callback_register(intr_handle, + irq->handler, + bp->eth_dev); + if (!rc) + irq->requested = 1; + } - irq->requested = 1; return rc; } diff --git a/dpdk/drivers/net/bnxt/bnxt_irq.h b/dpdk/drivers/net/bnxt/bnxt_irq.h index 75ba2135..460a97a0 100644 --- a/dpdk/drivers/net/bnxt/bnxt_irq.h +++ b/dpdk/drivers/net/bnxt/bnxt_irq.h @@ -17,7 +17,7 @@ struct bnxt_irq { }; struct bnxt; -void bnxt_free_int(struct bnxt *bp); +int bnxt_free_int(struct bnxt *bp); void bnxt_disable_int(struct bnxt *bp); void bnxt_enable_int(struct bnxt *bp); int bnxt_setup_int(struct bnxt *bp); diff --git a/dpdk/drivers/net/bnxt/bnxt_ring.c b/dpdk/drivers/net/bnxt/bnxt_ring.c index fcbd6bc6..0628a0b4 100644 --- a/dpdk/drivers/net/bnxt/bnxt_ring.c +++ b/dpdk/drivers/net/bnxt/bnxt_ring.c @@ -163,7 +163,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, for (sz = 0; sz < total_alloc_len; sz += getpagesize()) rte_mem_lock_page(((char *)mz->addr) + sz); mz_phys_addr = rte_mem_virt2iova(mz->addr); - if (mz_phys_addr == 0) { + if (mz_phys_addr == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map ring address to physical memory\n"); return -ENOMEM; diff --git a/dpdk/drivers/net/bnxt/bnxt_rxq.c b/dpdk/drivers/net/bnxt/bnxt_rxq.c index 17e2909a..1fbc6627 100644 --- a/dpdk/drivers/net/bnxt/bnxt_rxq.c +++ b/dpdk/drivers/net/bnxt/bnxt_rxq.c @@ -288,7 +288,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; struct bnxt_rx_queue *rxq; int rc = 0; @@ -373,7 +373,7 @@ bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id) return rc; } cpr = rxq->cp_ring; - B_CP_DB_ARM(cpr); + B_CP_DB_REARM(cpr, cpr->cp_raw_cons); } return rc; } @@ -399,7 +399,7 @@ bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id) int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id]; struct bnxt_vnic_info *vnic = NULL; @@ -439,7 +439,7 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; struct bnxt_vnic_info *vnic = NULL; struct bnxt_rx_queue *rxq = NULL; diff --git a/dpdk/drivers/net/bnxt/bnxt_rxr.c b/dpdk/drivers/net/bnxt/bnxt_rxr.c index dc695e17..88ea9421 100644 --- a/dpdk/drivers/net/bnxt/bnxt_rxr.c +++ b/dpdk/drivers/net/bnxt/bnxt_rxr.c @@ -362,6 +362,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, int rc = 0; uint8_t agg_buf = 0; uint16_t cmp_type; + uint32_t flags2_f = 0; rxcmp = (struct rx_pkt_cmpl *) &cpr->cp_desc_ring[cp_cons]; @@ -440,19 +441,41 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; } - if (likely(RX_CMP_IP_CS_OK(rxcmp1))) - mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; - else if (likely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) - mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; - else + flags2_f = flags2_0xf(rxcmp1); + /* IP Checksum */ + if (unlikely(((IS_IP_NONTUNNEL_PKT(flags2_f)) && + (RX_CMP_IP_CS_ERROR(rxcmp1))) || + (IS_IP_TUNNEL_PKT(flags2_f) && + (RX_CMP_IP_OUTER_CS_ERROR(rxcmp1))))) { mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + } else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) { + mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; + } else { + mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + } - if (likely(RX_CMP_L4_CS_OK(rxcmp1))) - mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; - else if (likely(RX_CMP_L4_CS_UNKNOWN(rxcmp1))) + /* L4 Checksum */ + if (likely(IS_L4_NONTUNNEL_PKT(flags2_f))) { + if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1))) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + else + mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + } else if (IS_L4_TUNNEL_PKT(flags2_f)) { + if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1))) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + else + mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + if (unlikely(RX_CMP_L4_OUTER_CS_ERR2(rxcmp1))) { + mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD; + } else if (unlikely(IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS + (flags2_f))) { + mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN; + } else { + mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD; + } + } else if (unlikely(RX_CMP_L4_CS_UNKNOWN(rxcmp1))) { mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; - else - mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + } mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1); diff --git a/dpdk/drivers/net/bnxt/bnxt_rxr.h b/dpdk/drivers/net/bnxt/bnxt_rxr.h index 3815a219..15986ef8 100644 --- a/dpdk/drivers/net/bnxt/bnxt_rxr.h +++ b/dpdk/drivers/net/bnxt/bnxt_rxr.h @@ -24,36 +24,116 @@ #define BNXT_TPA_OUTER_L3_OFF(hdr_info) \ ((hdr_info) & 0x1ff) -#define RX_CMP_L4_CS_BITS \ - rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \ - RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC) +#define flags2_0xf(rxcmp1) \ + (((rxcmp1)->flags2) & 0xf) -#define RX_CMP_L4_CS_ERR_BITS \ - rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR | \ - RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR) +/* IP non tunnel can be with or without L4- + * Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP Or + * Ether / (vlan) / outer IP|IP6 / ICMP + * we use '==' instead of '&' because tunnel pkts have all 4 fields set. + */ +#define IS_IP_NONTUNNEL_PKT(flags2_f) \ + ( \ + ((flags2_f) == \ + (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC))) || \ + ((flags2_f) == \ + (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_L4_CS_CALC))) \ + ) -#define RX_CMP_L4_CS_OK(rxcmp1) \ - (((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS) && \ - !((rxcmp1)->errors_v2 & RX_CMP_L4_CS_ERR_BITS)) +/* IP Tunnel pkt must have atleast tunnel-IP-calc set. + * again tunnel ie outer L4 is optional bcoz of + * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 / + * UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / Ether / IP|IP6 / + * UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / IP|IP6 / + * UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP + * also inner L3 chksum error is not taken into consideration by DPDK. + */ +#define IS_IP_TUNNEL_PKT(flags2_f) \ + ((flags2_f) & rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)) -#define RX_CMP_L4_CS_UNKNOWN(rxcmp1) \ - !((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS) +/* RX_PKT_CMPL_ERRORS_IP_CS_ERROR only for Non-tunnel pkts. + * For tunnel pkts RX_PKT_CMPL_ERRORS_IP_CS_ERROR is not accounted and treated + * as good csum pkt. + */ +#define RX_CMP_IP_CS_ERROR(rxcmp1) \ + ((rxcmp1)->errors_v2 & \ + rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR)) -#define RX_CMP_IP_CS_ERR_BITS \ - rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR | \ - RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR) +#define RX_CMP_IP_OUTER_CS_ERROR(rxcmp1) \ + ((rxcmp1)->errors_v2 & \ + rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR)) #define RX_CMP_IP_CS_BITS \ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \ RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC) -#define RX_CMP_IP_CS_OK(rxcmp1) \ - (((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS) && \ - !((rxcmp1)->errors_v2 & RX_CMP_IP_CS_ERR_BITS)) - -#define RX_CMP_IP_CS_UNKNOWN(rxcmp1) \ +#define RX_CMP_IP_CS_UNKNOWN(rxcmp1) \ !((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS) +/* L4 non tunnel pkt- + * Ether / (vlan) / IP6 / UDP|TCP|SCTP + */ +#define IS_L4_NONTUNNEL_PKT(flags2_f) \ + ( \ + ((flags2_f) == \ + (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_L4_CS_CALC)))) + +/* L4 tunnel pkt- + * Outer L4 is not mandatory. Eg: GRE- + * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 / + * UDP|TCP|SCTP + */ +#define IS_L4_TUNNEL_PKT_INNER_OUTER_L4_CS(flags2_f) \ + ((flags2_f) == \ + (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC))) + +#define IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS(flags2_f) \ + ((flags2_f) == \ + (rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC))) + +#define IS_L4_TUNNEL_PKT(flags2_f) \ + ( \ + IS_L4_TUNNEL_PKT_INNER_OUTER_L4_CS(flags2_f) || \ + IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS(flags2_f) \ + ) + +#define RX_CMP_L4_CS_BITS \ + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC) + +#define RX_CMP_L4_CS_UNKNOWN(rxcmp1) \ + !((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS) + +#define RX_CMP_T_L4_CS_BITS \ + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC) + +#define RX_CMP_T_L4_CS_UNKNOWN(rxcmp1) \ + !((rxcmp1)->flags2 & RX_CMP_T_L4_CS_BITS) + +/* Outer L4 chksum error + */ +#define RX_CMP_L4_OUTER_CS_ERR2(rxcmp1) \ + ((rxcmp1)->errors_v2 & \ + rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR)) + +/* Inner L4 chksum error + */ +#define RX_CMP_L4_INNER_CS_ERR2(rxcmp1) \ + ((rxcmp1)->errors_v2 & \ + rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR)) + #define BNXT_RX_POST_THRESH 32 enum pkt_hash_types { diff --git a/dpdk/drivers/net/bnxt/bnxt_stats.c b/dpdk/drivers/net/bnxt/bnxt_stats.c index c16bf99d..e0f70781 100644 --- a/dpdk/drivers/net/bnxt/bnxt_stats.c +++ b/dpdk/drivers/net/bnxt/bnxt_stats.c @@ -350,6 +350,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev, int rc = 0; unsigned int i; struct bnxt *bp = eth_dev->data->dev_private; + unsigned int num_q_stats; memset(bnxt_stats, 0, sizeof(*bnxt_stats)); if (!(bp->flags & BNXT_FLAG_INIT_DONE)) { @@ -357,7 +358,10 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev, return -1; } - for (i = 0; i < bp->rx_cp_nr_rings; i++) { + num_q_stats = RTE_MIN(bp->rx_cp_nr_rings, + (unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS); + + for (i = 0; i < num_q_stats; i++) { struct bnxt_rx_queue *rxq = bp->rx_queues[i]; struct bnxt_cp_ring_info *cpr = rxq->cp_ring; @@ -369,7 +373,10 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev, rte_atomic64_read(&rxq->rx_mbuf_alloc_fail); } - for (i = 0; i < bp->tx_cp_nr_rings; i++) { + num_q_stats = RTE_MIN(bp->tx_cp_nr_rings, + (unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS); + + for (i = 0; i < num_q_stats; i++) { struct bnxt_tx_queue *txq = bp->tx_queues[i]; struct bnxt_cp_ring_info *cpr = txq->cp_ring; @@ -386,7 +393,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev, void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; unsigned int i; if (!(bp->flags & BNXT_FLAG_INIT_DONE)) { @@ -405,7 +412,7 @@ void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev) int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats, unsigned int n) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; unsigned int count, i; uint64_t tx_drop_pkts; @@ -414,11 +421,17 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, unsigned int stat_size = sizeof(uint64_t); unsigned int stat_count; + memset(xstats, 0, sizeof(*xstats)); + bnxt_hwrm_port_qstats(bp); bnxt_hwrm_func_qstats_tx_drop(bp, 0xffff, &tx_drop_pkts); bnxt_hwrm_ext_port_qstats(bp); - rx_port_stats_ext_cnt = bp->fw_rx_port_stats_ext_size / stat_size; - tx_port_stats_ext_cnt = bp->fw_tx_port_stats_ext_size / stat_size; + rx_port_stats_ext_cnt = RTE_MIN(RTE_DIM(bnxt_rx_ext_stats_strings), + (bp->fw_rx_port_stats_ext_size / + stat_size)); + tx_port_stats_ext_cnt = RTE_MIN(RTE_DIM(bnxt_tx_ext_stats_strings), + (bp->fw_tx_port_stats_ext_size / + stat_size)); count = RTE_DIM(bnxt_rx_stats_strings) + RTE_DIM(bnxt_tx_stats_strings) + 1/* For tx_drop_pkts */ + @@ -453,16 +466,6 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, xstats[count].value = rte_le_to_cpu_64(tx_drop_pkts); count++; - for (i = 0; i < tx_port_stats_ext_cnt; i++) { - uint64_t *tx_stats_ext = (uint64_t *)bp->hw_tx_port_stats_ext; - - xstats[count].value = rte_le_to_cpu_64 - (*(uint64_t *)((char *)tx_stats_ext + - bnxt_tx_ext_stats_strings[i].offset)); - - count++; - } - for (i = 0; i < rx_port_stats_ext_cnt; i++) { uint64_t *rx_stats_ext = (uint64_t *)bp->hw_rx_port_stats_ext; @@ -473,6 +476,16 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, count++; } + for (i = 0; i < tx_port_stats_ext_cnt; i++) { + uint64_t *tx_stats_ext = (uint64_t *)bp->hw_tx_port_stats_ext; + + xstats[count].value = rte_le_to_cpu_64 + (*(uint64_t *)((char *)tx_stats_ext + + bnxt_tx_ext_stats_strings[i].offset)); + + count++; + } + return stat_count; } @@ -536,7 +549,7 @@ int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev, void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; if (bp->flags & BNXT_FLAG_PORT_STATS && BNXT_SINGLE_PF(bp)) bnxt_hwrm_port_clr_stats(bp); diff --git a/dpdk/drivers/net/bnxt/bnxt_txq.c b/dpdk/drivers/net/bnxt/bnxt_txq.c index b9b975e4..d6b93b44 100644 --- a/dpdk/drivers/net/bnxt/bnxt_txq.c +++ b/dpdk/drivers/net/bnxt/bnxt_txq.c @@ -79,7 +79,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { - struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt *bp = eth_dev->data->dev_private; struct bnxt_tx_queue *txq; int rc = 0; diff --git a/dpdk/drivers/net/bnxt/bnxt_txq.h b/dpdk/drivers/net/bnxt/bnxt_txq.h index f2c712a7..720ca90c 100644 --- a/dpdk/drivers/net/bnxt/bnxt_txq.h +++ b/dpdk/drivers/net/bnxt/bnxt_txq.h @@ -24,7 +24,6 @@ struct bnxt_tx_queue { uint8_t wthresh; /* Write-back threshold reg */ uint32_t ctx_curr; /* Hardware context states */ uint8_t tx_deferred_start; /* not in global dev start */ - uint8_t cmpl_next; /* Next BD to trigger a compl */ struct bnxt *bp; int index; diff --git a/dpdk/drivers/net/bnxt/bnxt_txr.c b/dpdk/drivers/net/bnxt/bnxt_txr.c index 39be7bdf..95272c99 100644 --- a/dpdk/drivers/net/bnxt/bnxt_txr.c +++ b/dpdk/drivers/net/bnxt/bnxt_txr.c @@ -103,27 +103,32 @@ int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id) return 0; } -static inline uint32_t bnxt_tx_avail(struct bnxt_tx_ring_info *txr) +static inline uint32_t bnxt_tx_bds_in_hw(struct bnxt_tx_queue *txq) +{ + return ((txq->tx_ring->tx_prod - txq->tx_ring->tx_cons) & + txq->tx_ring->tx_ring_struct->ring_mask); +} + +static inline uint32_t bnxt_tx_avail(struct bnxt_tx_queue *txq) { /* Tell compiler to fetch tx indices from memory. */ rte_compiler_barrier(); - return txr->tx_ring_struct->ring_size - - ((txr->tx_prod - txr->tx_cons) & - txr->tx_ring_struct->ring_mask) - 1; + return ((txq->tx_ring->tx_ring_struct->ring_size - + bnxt_tx_bds_in_hw(txq)) - 1); } static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, struct bnxt_tx_queue *txq, uint16_t *coal_pkts, - uint16_t *cmpl_next) + struct tx_bd_long **last_txbd) { struct bnxt_tx_ring_info *txr = txq->tx_ring; struct tx_bd_long *txbd; struct tx_bd_long_hi *txbd1 = NULL; uint32_t vlan_tag_flags, cfa_action; bool long_bd = false; - uint16_t last_prod = 0; + unsigned short nr_bds = 0; struct rte_mbuf *m_seg; struct bnxt_sw_tx_bd *tx_buf; static const uint32_t lhint_arr[4] = { @@ -140,31 +145,52 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, PKT_TX_TUNNEL_GENEVE)) long_bd = true; + nr_bds = long_bd + tx_pkt->nb_segs; + if (unlikely(bnxt_tx_avail(txq) < nr_bds)) + return -ENOMEM; + + /* Check if number of Tx descriptors is above HW limit */ + if (unlikely(nr_bds > BNXT_MAX_TSO_SEGS)) { + PMD_DRV_LOG(ERR, + "Num descriptors %d exceeds HW limit\n", nr_bds); + return -ENOSPC; + } + + /* If packet length is less than minimum packet size, pad it */ + if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) < BNXT_MIN_PKT_SIZE)) { + uint8_t pad = BNXT_MIN_PKT_SIZE - rte_pktmbuf_pkt_len(tx_pkt); + char *seg = rte_pktmbuf_append(tx_pkt, pad); + + if (!seg) { + PMD_DRV_LOG(ERR, + "Failed to pad mbuf by %d bytes\n", + pad); + return -ENOMEM; + } + + /* Note: data_len, pkt len are updated in rte_pktmbuf_append */ + memset(seg, 0, pad); + } + + /* Check non zero data_len */ + RTE_VERIFY(tx_pkt->data_len); + tx_buf = &txr->tx_buf_ring[txr->tx_prod]; tx_buf->mbuf = tx_pkt; - tx_buf->nr_bds = long_bd + tx_pkt->nb_segs; - last_prod = (txr->tx_prod + tx_buf->nr_bds - 1) & - txr->tx_ring_struct->ring_mask; - - if (unlikely(bnxt_tx_avail(txr) < tx_buf->nr_bds)) - return -ENOMEM; + tx_buf->nr_bds = nr_bds; txbd = &txr->tx_desc_ring[txr->tx_prod]; txbd->opaque = *coal_pkts; - txbd->flags_type = tx_buf->nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT; + txbd->flags_type = nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT; txbd->flags_type |= TX_BD_SHORT_FLAGS_COAL_NOW; - if (!*cmpl_next) { - txbd->flags_type |= TX_BD_LONG_FLAGS_NO_CMPL; - } else { - *coal_pkts = 0; - *cmpl_next = false; - } + txbd->flags_type |= TX_BD_LONG_FLAGS_NO_CMPL; txbd->len = tx_pkt->data_len; if (tx_pkt->pkt_len >= 2014) txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K; else txbd->flags_type |= lhint_arr[tx_pkt->pkt_len >> 9]; txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_buf->mbuf)); + *last_txbd = txbd; if (long_bd) { txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG; @@ -193,12 +219,19 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, txbd1->cfa_action = cfa_action; if (tx_pkt->ol_flags & PKT_TX_TCP_SEG) { + uint16_t hdr_size; + /* TSO */ txbd1->lflags |= TX_BD_LONG_LFLAGS_LSO; - txbd1->hdr_size = tx_pkt->l2_len + tx_pkt->l3_len + + hdr_size = tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len + tx_pkt->outer_l2_len + tx_pkt->outer_l3_len; + /* The hdr_size is multiple of 16bit units not 8bit. + * Hence divide by 2. + */ + txbd1->hdr_size = hdr_size >> 1; txbd1->mss = tx_pkt->tso_segsz; + RTE_VERIFY(txbd1->mss); } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) == PKT_TX_OIP_IIP_TCP_UDP_CKSUM) { @@ -281,22 +314,21 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, } m_seg = tx_pkt->next; - /* i is set at the end of the if(long_bd) block */ - while (txr->tx_prod != last_prod) { + while (m_seg) { + /* Check non zero data_len */ + RTE_VERIFY(m_seg->data_len); txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod); tx_buf = &txr->tx_buf_ring[txr->tx_prod]; txbd = &txr->tx_desc_ring[txr->tx_prod]; txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(m_seg)); - txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT; + txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT; txbd->len = m_seg->data_len; m_seg = m_seg->next; } txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END; - if (txbd1) - txbd1->lflags = rte_cpu_to_le_32(txbd1->lflags); txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod); @@ -340,8 +372,7 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq) uint32_t ring_mask = cp_ring_struct->ring_mask; uint32_t opaque = 0; - if (((txq->tx_ring->tx_prod - txq->tx_ring->tx_cons) & - txq->tx_ring->tx_ring_struct->ring_mask) < txq->tx_free_thresh) + if (bnxt_tx_bds_in_hw(txq) < txq->tx_free_thresh) return 0; do { @@ -377,10 +408,11 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq) uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - struct bnxt_tx_queue *txq = tx_queue; + int rc; uint16_t nb_tx_pkts = 0; uint16_t coal_pkts = 0; - uint16_t cmpl_next = txq->cmpl_next; + struct bnxt_tx_queue *txq = tx_queue; + struct tx_bd_long *last_txbd = NULL; /* Handle TX completions */ bnxt_handle_tx_cp(txq); @@ -391,33 +423,28 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return 0; } - txq->cmpl_next = 0; /* Handle TX burst request */ for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) { - int rc; - - /* Request a completion on first and last packet */ - cmpl_next |= (nb_pkts == nb_tx_pkts + 1); coal_pkts++; rc = bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq, - &coal_pkts, &cmpl_next); + &coal_pkts, &last_txbd); - if (unlikely(rc)) { - /* Request a completion in next cycle */ - txq->cmpl_next = 1; + if (unlikely(rc)) break; - } } - if (nb_tx_pkts) + if (likely(nb_tx_pkts)) { + /* Request a completion on the last packet */ + last_txbd->flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL; B_TX_DB(txq->tx_ring->tx_doorbell, txq->tx_ring->tx_prod); + } return nb_tx_pkts; } int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id]; dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; @@ -429,7 +456,7 @@ int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) { - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt *bp = dev->data->dev_private; struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id]; /* Handle TX completions */ diff --git a/dpdk/drivers/net/bnxt/bnxt_txr.h b/dpdk/drivers/net/bnxt/bnxt_txr.h index 7f3c7cdb..f802d508 100644 --- a/dpdk/drivers/net/bnxt/bnxt_txr.h +++ b/dpdk/drivers/net/bnxt/bnxt_txr.h @@ -10,6 +10,8 @@ #define MAX_TX_RINGS 16 #define BNXT_TX_PUSH_THRESH 92 +#define BNXT_MAX_TSO_SEGS 32 +#define BNXT_MIN_PKT_SIZE 52 #define B_TX_DB(db, prod) rte_write32((DB_KEY_TX | (prod)), db) diff --git a/dpdk/drivers/net/bnxt/bnxt_vnic.c b/dpdk/drivers/net/bnxt/bnxt_vnic.c index aebfb1f1..cc0b0ae0 100644 --- a/dpdk/drivers/net/bnxt/bnxt_vnic.c +++ b/dpdk/drivers/net/bnxt/bnxt_vnic.c @@ -89,6 +89,9 @@ void bnxt_free_vnic_attributes(struct bnxt *bp) struct bnxt_vnic_info *vnic; unsigned int i; + if (bp->vnic_info == NULL) + return; + for (i = 0; i < bp->max_vnics; i++) { vnic = &bp->vnic_info[i]; if (vnic->rss_table) { @@ -143,9 +146,9 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp) PMD_DRV_LOG(WARNING, "Using rte_mem_virt2iova()\n"); mz_phys_addr = rte_mem_virt2iova(mz->addr); - if (mz_phys_addr == 0) { + if (mz_phys_addr == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, - "unable to map vnic address to physical memory\n"); + "unable to map to physical memory\n"); return -ENOMEM; } } diff --git a/dpdk/drivers/net/bnxt/rte_pmd_bnxt.c b/dpdk/drivers/net/bnxt/rte_pmd_bnxt.c index c298de83..56a58a2f 100644 --- a/dpdk/drivers/net/bnxt/rte_pmd_bnxt.c +++ b/dpdk/drivers/net/bnxt/rte_pmd_bnxt.c @@ -54,7 +54,7 @@ int rte_pmd_bnxt_set_tx_loopback(uint16_t port, uint8_t on) if (!is_bnxt_supported(eth_dev)) return -ENOTSUP; - bp = (struct bnxt *)eth_dev->data->dev_private; + bp = eth_dev->data->dev_private; if (!BNXT_PF(bp)) { PMD_DRV_LOG(ERR, @@ -96,7 +96,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on) if (!is_bnxt_supported(eth_dev)) return -ENOTSUP; - bp = (struct bnxt *)eth_dev->data->dev_private; + bp = eth_dev->data->dev_private; if (!BNXT_PF(bp)) { PMD_DRV_LOG(ERR, @@ -146,7 +146,7 @@ int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf, return -ENOTSUP; rte_eth_dev_info_get(port, &dev_info); - bp = (struct bnxt *)dev->data->dev_private; + bp = dev->data->dev_private; if (vf >= dev_info.max_vfs || mac_addr == NULL) return -EINVAL; @@ -180,7 +180,7 @@ int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf, return -ENOTSUP; rte_eth_dev_info_get(port, &dev_info); - bp = (struct bnxt *)eth_dev->data->dev_private; + bp = eth_dev->data->dev_private; if (!bp->pf.active_vfs) return -EINVAL; @@ -231,7 +231,7 @@ int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on) return -ENOTSUP; rte_eth_dev_info_get(port, &dev_info); - bp = (struct bnxt *)dev->data->dev_private; + bp = dev->data->dev_private; if (!BNXT_PF(bp)) { PMD_DRV_LOG(ERR, @@ -283,7 +283,7 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on) return -ENOTSUP; rte_eth_dev_info_get(port, &dev_info); - bp = (struct bnxt *)dev->data->dev_private; + bp = dev->data->dev_private; if (!BNXT_PF(bp)) { PMD_DRV_LOG(ERR, @@ -333,7 +333,7 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on) return -ENOTSUP; rte_eth_dev_info_get(port, &dev_info); - bp = (struct bnxt *)dev->data->dev_private; + bp = dev->data->dev_private; if (vf >= dev_info.max_vfs) return -EINVAL; @@ -370,7 +370,7 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf, return -ENOTSUP; rte_eth_dev_info_get(port, &dev_info); - bp = (struct bnxt *)dev->data->dev_private; + bp = dev->data->dev_private; if (!bp->pf.vf_info) return -EINVAL; @@ -462,7 +462,7 @@ int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan, if (!is_bnxt_supported(dev)) return -ENOTSUP; - bp = (struct bnxt *)dev->data->dev_private; + bp = dev->data->dev_private; if (!bp->pf.vf_info) return -EINVAL; @@ -551,7 +551,7 @@ int rte_pmd_bnxt_get_vf_stats(uint16_t port, return -ENOTSUP; rte_eth_dev_info_get(port, &dev_info); - bp = (struct bnxt *)dev->data->dev_private; + bp = dev->data->dev_private; if (vf_id >= dev_info.max_vfs) return -EINVAL; @@ -578,7 +578,7 @@ int rte_pmd_bnxt_reset_vf_stats(uint16_t port, return -ENOTSUP; rte_eth_dev_info_get(port, &dev_info); - bp = (struct bnxt *)dev->data->dev_private; + bp = dev->data->dev_private; if (vf_id >= dev_info.max_vfs) return -EINVAL; @@ -604,7 +604,7 @@ int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id) return -ENOTSUP; rte_eth_dev_info_get(port, &dev_info); - bp = (struct bnxt *)dev->data->dev_private; + bp = dev->data->dev_private; if (vf_id >= dev_info.max_vfs) return -EINVAL; @@ -631,7 +631,7 @@ int rte_pmd_bnxt_get_vf_tx_drop_count(uint16_t port, uint16_t vf_id, return -ENOTSUP; rte_eth_dev_info_get(port, &dev_info); - bp = (struct bnxt *)dev->data->dev_private; + bp = dev->data->dev_private; if (vf_id >= dev_info.max_vfs) return -EINVAL; @@ -663,7 +663,7 @@ int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct ether_addr *addr, return -ENOTSUP; rte_eth_dev_info_get(port, &dev_info); - bp = (struct bnxt *)dev->data->dev_private; + bp = dev->data->dev_private; if (vf_id >= dev_info.max_vfs) return -EINVAL; @@ -739,7 +739,7 @@ rte_pmd_bnxt_set_vf_vlan_insert(uint16_t port, uint16_t vf, return -ENOTSUP; rte_eth_dev_info_get(port, &dev_info); - bp = (struct bnxt *)dev->data->dev_private; + bp = dev->data->dev_private; if (vf >= dev_info.max_vfs) return -EINVAL; @@ -776,7 +776,7 @@ int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on) dev = &rte_eth_devices[port]; rte_eth_dev_info_get(port, &dev_info); - bp = (struct bnxt *)dev->data->dev_private; + bp = dev->data->dev_private; if (!BNXT_PF(bp)) { PMD_DRV_LOG(ERR, diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c b/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c index 154257ff..64518200 100644 --- a/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c @@ -158,8 +158,7 @@ bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev, uint16_t slave_port) { struct rte_eth_dev_info slave_info; struct rte_flow_error error; - struct bond_dev_private *internals = (struct bond_dev_private *) - (bond_dev->data->dev_private); + struct bond_dev_private *internals = bond_dev->data->dev_private; const struct rte_flow_action_queue lacp_queue_conf = { .index = 0, @@ -199,8 +198,7 @@ bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev, int bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) { struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id]; - struct bond_dev_private *internals = (struct bond_dev_private *) - (bond_dev->data->dev_private); + struct bond_dev_private *internals = bond_dev->data->dev_private; struct rte_eth_dev_info bond_info; uint16_t idx; @@ -225,9 +223,7 @@ int bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) { struct rte_flow_error error; - struct bond_dev_private *internals = (struct bond_dev_private *) - (bond_dev->data->dev_private); - + struct bond_dev_private *internals = bond_dev->data->dev_private; struct rte_flow_action_queue lacp_queue_conf = { .index = internals->mode4.dedicated_queues.rx_qid, }; @@ -1717,8 +1713,7 @@ slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev, struct rte_eth_dev *slave_eth_dev) { int errval = 0; - struct bond_dev_private *internals = (struct bond_dev_private *) - bonded_eth_dev->data->dev_private; + struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id]; if (port->slow_pool == NULL) { @@ -1784,8 +1779,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, uint16_t q_id; struct rte_flow_error flow_error; - struct bond_dev_private *internals = (struct bond_dev_private *) - bonded_eth_dev->data->dev_private; + struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; /* Stop slave */ rte_eth_dev_stop(slave_eth_dev->data->port_id); @@ -2403,8 +2397,8 @@ bond_ethdev_slave_link_status_change_monitor(void *cb_arg) if (cb_arg == NULL) return; - bonded_ethdev = (struct rte_eth_dev *)cb_arg; - internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private; + bonded_ethdev = cb_arg; + internals = bonded_ethdev->data->dev_private; if (!bonded_ethdev->data->dev_started || !internals->link_status_polling_enabled) diff --git a/dpdk/drivers/net/cxgbe/base/adapter.h b/dpdk/drivers/net/cxgbe/base/adapter.h index 47cfc5f5..063e5bd7 100644 --- a/dpdk/drivers/net/cxgbe/base/adapter.h +++ b/dpdk/drivers/net/cxgbe/base/adapter.h @@ -368,7 +368,7 @@ static inline void t4_os_write_unlock(rte_rwlock_t *lock) */ static inline struct port_info *ethdev2pinfo(const struct rte_eth_dev *dev) { - return (struct port_info *)dev->data->dev_private; + return dev->data->dev_private; } /** diff --git a/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c b/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c index 7babdfb4..8926f8a2 100644 --- a/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c +++ b/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c @@ -112,7 +112,7 @@ uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *device_info) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; int max_queues = adapter->sge.max_ethqsets / adapter->params.nports; @@ -148,7 +148,7 @@ void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, @@ -157,7 +157,7 @@ void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, @@ -166,7 +166,7 @@ void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; /* TODO: address filters ?? */ @@ -177,7 +177,7 @@ void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; /* TODO: address filters ?? */ @@ -189,7 +189,7 @@ void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; struct sge *s = &adapter->sge; struct rte_eth_link new_link = { 0 }; @@ -224,7 +224,7 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, */ int cxgbe_dev_set_link_up(struct rte_eth_dev *dev) { - struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct port_info *pi = dev->data->dev_private; struct adapter *adapter = pi->adapter; unsigned int work_done, budget = 32; struct sge *s = &adapter->sge; @@ -250,7 +250,7 @@ int cxgbe_dev_set_link_up(struct rte_eth_dev *dev) */ int cxgbe_dev_set_link_down(struct rte_eth_dev *dev) { - struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct port_info *pi = dev->data->dev_private; struct adapter *adapter = pi->adapter; unsigned int work_done, budget = 32; struct sge *s = &adapter->sge; @@ -273,7 +273,7 @@ int cxgbe_dev_set_link_down(struct rte_eth_dev *dev) int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; struct rte_eth_dev_info dev_info; int err; @@ -306,7 +306,7 @@ int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) */ void cxgbe_dev_close(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; CXGBE_FUNC_TRACE(); @@ -328,7 +328,7 @@ void cxgbe_dev_close(struct rte_eth_dev *eth_dev) */ int cxgbe_dev_start(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct rte_eth_rxmode *rx_conf = ð_dev->data->dev_conf.rxmode; struct adapter *adapter = pi->adapter; int err = 0, i; @@ -386,7 +386,7 @@ out: */ void cxgbe_dev_stop(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; CXGBE_FUNC_TRACE(); @@ -406,7 +406,7 @@ void cxgbe_dev_stop(struct rte_eth_dev *eth_dev) int cxgbe_dev_configure(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; int err; @@ -466,7 +466,7 @@ int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, unsigned int socket_id, const struct rte_eth_txconf *tx_conf __rte_unused) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; struct sge *s = &adapter->sge; struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx]; @@ -531,7 +531,7 @@ void cxgbe_dev_tx_queue_release(void *q) int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) { int ret; - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adap = pi->adapter; struct sge_rspq *q; @@ -550,7 +550,7 @@ int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) { int ret; - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adap = pi->adapter; struct sge_rspq *q; @@ -571,7 +571,7 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, const struct rte_eth_rxconf *rx_conf __rte_unused, struct rte_mempool *mp) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; struct sge *s = &adapter->sge; struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx]; @@ -668,7 +668,7 @@ void cxgbe_dev_rx_queue_release(void *q) static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; struct sge *s = &adapter->sge; struct port_stats ps; @@ -716,7 +716,7 @@ static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev, */ static void cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; struct sge *s = &adapter->sge; unsigned int i; @@ -742,7 +742,7 @@ static void cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev) static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev, struct rte_eth_fc_conf *fc_conf) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct link_config *lc = &pi->link_cfg; int rx_pause, tx_pause; @@ -764,7 +764,7 @@ static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev, static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev, struct rte_eth_fc_conf *fc_conf) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; struct link_config *lc = &pi->link_cfg; @@ -810,7 +810,7 @@ cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { - struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct port_info *pi = dev->data->dev_private; struct adapter *adapter = pi->adapter; int err; @@ -840,7 +840,7 @@ static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev, static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { - struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct port_info *pi = dev->data->dev_private; struct adapter *adapter = pi->adapter; u64 rss_hf = 0; u64 flags = 0; @@ -949,7 +949,7 @@ static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) static int cxgbe_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *e) { - struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct port_info *pi = dev->data->dev_private; struct adapter *adapter = pi->adapter; u32 i, err = 0; u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0); @@ -970,7 +970,7 @@ static int cxgbe_get_eeprom(struct rte_eth_dev *dev, static int cxgbe_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom) { - struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct port_info *pi = dev->data->dev_private; struct adapter *adapter = pi->adapter; u8 *buf; int err = 0; @@ -1028,7 +1028,7 @@ out: static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; return t4_get_regs_len(adapter) / sizeof(uint32_t); @@ -1037,7 +1037,7 @@ static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev) static int cxgbe_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) | @@ -1058,7 +1058,7 @@ static int cxgbe_get_regs(struct rte_eth_dev *eth_dev, int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr) { - struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct port_info *pi = dev->data->dev_private; int ret; ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, (u8 *)addr); @@ -1115,7 +1115,7 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = { static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev) { struct rte_pci_device *pci_dev; - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = NULL; char name[RTE_ETH_NAME_MAX_LEN]; int err = 0; @@ -1186,7 +1186,7 @@ out_free_adapter: static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adap = pi->adapter; /* Free up other ports and all resources */ diff --git a/dpdk/drivers/net/cxgbe/cxgbe_filter.c b/dpdk/drivers/net/cxgbe/cxgbe_filter.c index 3a7912e4..93212701 100644 --- a/dpdk/drivers/net/cxgbe/cxgbe_filter.c +++ b/dpdk/drivers/net/cxgbe/cxgbe_filter.c @@ -853,7 +853,7 @@ int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id, struct ch_filter_specification *fs, struct filter_ctx *ctx) { - struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct port_info *pi = dev->data->dev_private; struct adapter *adapter = pi->adapter; struct filter_entry *f; unsigned int chip_ver; diff --git a/dpdk/drivers/net/cxgbe/cxgbe_main.c b/dpdk/drivers/net/cxgbe/cxgbe_main.c index 6a3cbc1e..38938ecc 100644 --- a/dpdk/drivers/net/cxgbe/cxgbe_main.c +++ b/dpdk/drivers/net/cxgbe/cxgbe_main.c @@ -477,7 +477,7 @@ static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, int cxgbe_cfg_queue_count(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adap = pi->adapter; struct sge *s = &adap->sge; unsigned int max_queues = s->max_ethqsets / adap->params.nports; @@ -504,8 +504,7 @@ int cxgbe_cfg_queue_count(struct rte_eth_dev *eth_dev) void cxgbe_cfg_queues(struct rte_eth_dev *eth_dev) { - struct rte_config *config = rte_eal_get_configuration(); - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adap = pi->adapter; struct sge *s = &adap->sge; unsigned int i, nb_ports = 0, qidx = 0; @@ -527,8 +526,8 @@ void cxgbe_cfg_queues(struct rte_eth_dev *eth_dev) (adap->params.nports - nb_ports)) / nb_ports; - if (q_per_port > config->lcore_count) - q_per_port = config->lcore_count; + if (q_per_port > rte_lcore_count()) + q_per_port = rte_lcore_count(); for_each_port(adap, i) { struct port_info *pi = adap2pinfo(adap, i); @@ -1825,7 +1824,7 @@ int cxgbe_probe(struct adapter *adapter) goto out_free; allocate_mac: - pi = (struct port_info *)eth_dev->data->dev_private; + pi = eth_dev->data->dev_private; adapter->port[i] = pi; pi->eth_dev = eth_dev; pi->adapter = adapter; diff --git a/dpdk/drivers/net/cxgbe/cxgbevf_ethdev.c b/dpdk/drivers/net/cxgbe/cxgbevf_ethdev.c index a6458d53..b9606be8 100644 --- a/dpdk/drivers/net/cxgbe/cxgbevf_ethdev.c +++ b/dpdk/drivers/net/cxgbe/cxgbevf_ethdev.c @@ -36,7 +36,7 @@ static int cxgbevf_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; struct sge *s = &adapter->sge; struct port_stats ps; @@ -107,7 +107,7 @@ static const struct eth_dev_ops cxgbevf_eth_dev_ops = { */ static int eth_cxgbevf_dev_init(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct rte_pci_device *pci_dev; char name[RTE_ETH_NAME_MAX_LEN]; struct adapter *adapter = NULL; @@ -179,7 +179,7 @@ out_free_adapter: static int eth_cxgbevf_dev_uninit(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adap = pi->adapter; /* Free up other ports and all resources */ diff --git a/dpdk/drivers/net/cxgbe/cxgbevf_main.c b/dpdk/drivers/net/cxgbe/cxgbevf_main.c index c46bc98a..d05db06b 100644 --- a/dpdk/drivers/net/cxgbe/cxgbevf_main.c +++ b/dpdk/drivers/net/cxgbe/cxgbevf_main.c @@ -230,7 +230,7 @@ int cxgbevf_probe(struct adapter *adapter) goto out_free; allocate_mac: - pi = (struct port_info *)eth_dev->data->dev_private; + pi = eth_dev->data->dev_private; adapter->port[i] = pi; pi->eth_dev = eth_dev; pi->adapter = adapter; diff --git a/dpdk/drivers/net/cxgbe/sge.c b/dpdk/drivers/net/cxgbe/sge.c index 663c0a79..ea28ea06 100644 --- a/dpdk/drivers/net/cxgbe/sge.c +++ b/dpdk/drivers/net/cxgbe/sge.c @@ -1137,7 +1137,7 @@ out_free: (unlikely(m->pkt_len > max_pkt_len))) goto out_free; - pi = (struct port_info *)txq->data->dev_private; + pi = txq->data->dev_private; adap = pi->adapter; cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS; @@ -1882,7 +1882,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, int ret, flsz = 0; struct fw_iq_cmd c; struct sge *s = &adap->sge; - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; char z_name[RTE_MEMZONE_NAMESIZE]; char z_name_sw[RTE_MEMZONE_NAMESIZE]; unsigned int nb_refill; @@ -2152,7 +2152,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, int ret, nentries; struct fw_eq_eth_cmd c; struct sge *s = &adap->sge; - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; char z_name[RTE_MEMZONE_NAMESIZE]; char z_name_sw[RTE_MEMZONE_NAMESIZE]; u8 pciechan; @@ -2231,7 +2231,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, int ret, nentries; struct fw_eq_ctrl_cmd c; struct sge *s = &adap->sge; - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; char z_name[RTE_MEMZONE_NAMESIZE]; char z_name_sw[RTE_MEMZONE_NAMESIZE]; diff --git a/dpdk/drivers/net/dpaa/dpaa_ethdev.c b/dpdk/drivers/net/dpaa/dpaa_ethdev.c index 5448a2ca..5fbf152f 100644 --- a/dpdk/drivers/net/dpaa/dpaa_ethdev.c +++ b/dpdk/drivers/net/dpaa/dpaa_ethdev.c @@ -1460,6 +1460,16 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused, PMD_INIT_FUNC_TRACE(); + if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) > + RTE_PKTMBUF_HEADROOM) { + DPAA_PMD_ERR( + "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)", + RTE_PKTMBUF_HEADROOM, + DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE); + + return -1; + } + /* In case of secondary process, the device is already configured * and no further action is required, except portal initialization * and verifying secondary attachment to port name. diff --git a/dpdk/drivers/net/dpaa/dpaa_ethdev.h b/dpdk/drivers/net/dpaa/dpaa_ethdev.h index 2fc72317..acbcaf6e 100644 --- a/dpdk/drivers/net/dpaa/dpaa_ethdev.h +++ b/dpdk/drivers/net/dpaa/dpaa_ethdev.h @@ -21,10 +21,6 @@ #define DPAA_MBUF_HW_ANNOTATION 64 #define DPAA_FD_PTA_SIZE 64 -#if (DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) > RTE_PKTMBUF_HEADROOM -#error "Annotation requirement is more than RTE_PKTMBUF_HEADROOM" -#endif - /* mbuf->seqn will be used to store event entry index for * driver specific usage. For parallel mode queues, invalid * index will be set and for atomic mode queues, valid value diff --git a/dpdk/drivers/net/dpaa/dpaa_rxtx.c b/dpdk/drivers/net/dpaa/dpaa_rxtx.c index c4471c22..ba01bd17 100644 --- a/dpdk/drivers/net/dpaa/dpaa_rxtx.c +++ b/dpdk/drivers/net/dpaa/dpaa_rxtx.c @@ -952,6 +952,16 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) goto send_pkts; } } else { + /* TODO not supporting sg for external bufs*/ + if (unlikely(mbuf->nb_segs > 1)) { + /* Set frames_to_send & nb_bufs so + * that packets are transmitted till + * previous frame. + */ + frames_to_send = loop; + nb_bufs = loop; + goto send_pkts; + } state = tx_on_external_pool(q, mbuf, &fd_arr[loop]); if (unlikely(state)) { diff --git a/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c b/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c index 39f85ae7..e3266085 100644 --- a/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c +++ b/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c @@ -2098,6 +2098,16 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, struct rte_eth_dev *eth_dev; int diag; + if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) > + RTE_PKTMBUF_HEADROOM) { + DPAA2_PMD_ERR( + "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)", + RTE_PKTMBUF_HEADROOM, + DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE); + + return -1; + } + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); if (!eth_dev) diff --git a/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c b/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c index eab943dc..03320ca1 100644 --- a/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c +++ b/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c @@ -352,8 +352,9 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, DPAA2_SET_FD_LEN(fd, mbuf->pkt_len); DPAA2_SET_ONLY_FD_BPID(fd, bpid); DPAA2_SET_FD_OFFSET(fd, temp->data_off); - DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL); DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg); + DPAA2_RESET_FD_FRC(fd); + DPAA2_RESET_FD_CTRL(fd); /*Set Scatter gather table and Scatter gather entries*/ sgt = (struct qbman_sge *)( (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) diff --git a/dpdk/drivers/net/e1000/base/e1000_ich8lan.h b/dpdk/drivers/net/e1000/base/e1000_ich8lan.h index bc4ed1dd..9ee94f6b 100644 --- a/dpdk/drivers/net/e1000/base/e1000_ich8lan.h +++ b/dpdk/drivers/net/e1000/base/e1000_ich8lan.h @@ -133,6 +133,7 @@ POSSIBILITY OF SUCH DAMAGE. #define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs*/ #define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */ #define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) +#define E1000_TARC0_CB_MULTIQ_2_REQ (1 << 29) #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define E1000_ICH_RAR_ENTRIES 7 diff --git a/dpdk/drivers/net/e1000/em_rxtx.c b/dpdk/drivers/net/e1000/em_rxtx.c index 005e1ea9..67c7ec70 100644 --- a/dpdk/drivers/net/e1000/em_rxtx.c +++ b/dpdk/drivers/net/e1000/em_rxtx.c @@ -616,20 +616,20 @@ eth_em_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, m = tx_pkts[i]; if (m->ol_flags & E1000_TX_OFFLOAD_NOTSUP_MASK) { - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; return i; } #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } @@ -1964,6 +1964,22 @@ eth_em_tx_init(struct rte_eth_dev *dev) tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); + /* SPT and CNP Si errata workaround to avoid data corruption */ + if (hw->mac.type == e1000_pch_spt) { + uint32_t reg_val; + reg_val = E1000_READ_REG(hw, E1000_IOSFPC); + reg_val |= E1000_RCTL_RDMTS_HEX; + E1000_WRITE_REG(hw, E1000_IOSFPC, reg_val); + + /* Dropping the number of outstanding requests from + * 3 to 2 in order to avoid a buffer overrun. + */ + reg_val = E1000_READ_REG(hw, E1000_TARC(0)); + reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ; + reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ; + E1000_WRITE_REG(hw, E1000_TARC(0), reg_val); + } + /* This write will effectively turn on the transmit unit. */ E1000_WRITE_REG(hw, E1000_TCTL, tctl); } diff --git a/dpdk/drivers/net/e1000/igb_ethdev.c b/dpdk/drivers/net/e1000/igb_ethdev.c index 87c9aedf..96ccf976 100644 --- a/dpdk/drivers/net/e1000/igb_ethdev.c +++ b/dpdk/drivers/net/e1000/igb_ethdev.c @@ -5021,8 +5021,7 @@ static void igb_start_timecounters(struct rte_eth_dev *dev) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct e1000_adapter *adapter = - (struct e1000_adapter *)dev->data->dev_private; + struct e1000_adapter *adapter = dev->data->dev_private; uint32_t incval = 1; uint32_t shift = 0; uint64_t mask = E1000_CYCLECOUNTER_MASK; @@ -5073,8 +5072,7 @@ igb_start_timecounters(struct rte_eth_dev *dev) static int igb_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) { - struct e1000_adapter *adapter = - (struct e1000_adapter *)dev->data->dev_private; + struct e1000_adapter *adapter = dev->data->dev_private; adapter->systime_tc.nsec += delta; adapter->rx_tstamp_tc.nsec += delta; @@ -5087,8 +5085,7 @@ static int igb_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) { uint64_t ns; - struct e1000_adapter *adapter = - (struct e1000_adapter *)dev->data->dev_private; + struct e1000_adapter *adapter = dev->data->dev_private; ns = rte_timespec_to_ns(ts); @@ -5104,8 +5101,7 @@ static int igb_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) { uint64_t ns, systime_cycles; - struct e1000_adapter *adapter = - (struct e1000_adapter *)dev->data->dev_private; + struct e1000_adapter *adapter = dev->data->dev_private; systime_cycles = igb_read_systime_cyclecounter(dev); ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); @@ -5198,8 +5194,7 @@ igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev, uint32_t flags __rte_unused) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct e1000_adapter *adapter = - (struct e1000_adapter *)dev->data->dev_private; + struct e1000_adapter *adapter = dev->data->dev_private; uint32_t tsync_rxctl; uint64_t rx_tstamp_cycles; uint64_t ns; @@ -5220,8 +5215,7 @@ igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev, struct timespec *timestamp) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct e1000_adapter *adapter = - (struct e1000_adapter *)dev->data->dev_private; + struct e1000_adapter *adapter = dev->data->dev_private; uint32_t tsync_txctl; uint64_t tx_tstamp_cycles; uint64_t ns; diff --git a/dpdk/drivers/net/e1000/igb_rxtx.c b/dpdk/drivers/net/e1000/igb_rxtx.c index ab0a80e1..28fff6ab 100644 --- a/dpdk/drivers/net/e1000/igb_rxtx.c +++ b/dpdk/drivers/net/e1000/igb_rxtx.c @@ -629,25 +629,25 @@ eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, if ((m->tso_segsz > IGB_TSO_MAX_MSS) || (m->l2_len + m->l3_len + m->l4_len > IGB_TSO_MAX_HDRLEN)) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) { - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; return i; } #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } diff --git a/dpdk/drivers/net/ena/base/ena_com.c b/dpdk/drivers/net/ena/base/ena_com.c index 4abf1a28..f22d67cd 100644 --- a/dpdk/drivers/net/ena/base/ena_com.c +++ b/dpdk/drivers/net/ena/base/ena_com.c @@ -526,7 +526,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c struct ena_com_admin_queue *admin_queue) { unsigned long flags = 0; - unsigned long timeout; + uint64_t timeout; int ret; timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout); diff --git a/dpdk/drivers/net/ena/ena_ethdev.c b/dpdk/drivers/net/ena/ena_ethdev.c index 6f424337..0779bb77 100644 --- a/dpdk/drivers/net/ena/ena_ethdev.c +++ b/dpdk/drivers/net/ena/ena_ethdev.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include "ena_ethdev.h" @@ -281,22 +280,6 @@ static const struct eth_dev_ops ena_dev_ops = { .reta_query = ena_rss_reta_query, }; -#define NUMA_NO_NODE SOCKET_ID_ANY - -static inline int ena_cpu_to_node(int cpu) -{ - struct rte_config *config = rte_eal_get_configuration(); - struct rte_fbarray *arr = &config->mem_config->memzones; - const struct rte_memzone *mz; - - if (unlikely(cpu >= RTE_MAX_MEMZONE)) - return NUMA_NO_NODE; - - mz = rte_fbarray_get(arr, cpu); - - return mz->socket_id; -} - static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, struct ena_com_rx_ctx *ena_rx_ctx) { @@ -356,12 +339,13 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, } /* check if L4 checksum is needed */ - if ((mbuf->ol_flags & PKT_TX_TCP_CKSUM) && + if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) && (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) { ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; ena_tx_ctx->l4_csum_enable = true; - } else if ((mbuf->ol_flags & PKT_TX_UDP_CKSUM) && - (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { + } else if (((mbuf->ol_flags & PKT_TX_L4_MASK) == + PKT_TX_UDP_CKSUM) && + (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; ena_tx_ctx->l4_csum_enable = true; } else { @@ -517,8 +501,7 @@ static void ena_close(struct rte_eth_dev *dev) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; - struct ena_adapter *adapter = - (struct ena_adapter *)(dev->data->dev_private); + struct ena_adapter *adapter = dev->data->dev_private; if (adapter->state == ENA_ADAPTER_STATE_RUNNING) ena_stop(dev); @@ -559,8 +542,7 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { - struct ena_adapter *adapter = - (struct ena_adapter *)(dev->data->dev_private); + struct ena_adapter *adapter = dev->data->dev_private; struct ena_com_dev *ena_dev = &adapter->ena_dev; int rc, i; u16 entry_value; @@ -615,8 +597,7 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { - struct ena_adapter *adapter = - (struct ena_adapter *)(dev->data->dev_private); + struct ena_adapter *adapter = dev->data->dev_private; struct ena_com_dev *ena_dev = &adapter->ena_dev; int rc; int i; @@ -788,9 +769,7 @@ static int ena_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) { struct rte_eth_link *link = &dev->data->dev_link; - struct ena_adapter *adapter; - - adapter = (struct ena_adapter *)(dev->data->dev_private); + struct ena_adapter *adapter = dev->data->dev_private; link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; link->link_speed = ETH_SPEED_NUM_NONE; @@ -802,8 +781,7 @@ static int ena_link_update(struct rte_eth_dev *dev, static int ena_queue_start_all(struct rte_eth_dev *dev, enum ena_ring_type ring_type) { - struct ena_adapter *adapter = - (struct ena_adapter *)(dev->data->dev_private); + struct ena_adapter *adapter = dev->data->dev_private; struct ena_ring *queues = NULL; int nb_queues; int i = 0; @@ -908,8 +886,7 @@ ena_calc_queue_size(struct ena_com_dev *ena_dev, static void ena_stats_restart(struct rte_eth_dev *dev) { - struct ena_adapter *adapter = - (struct ena_adapter *)(dev->data->dev_private); + struct ena_adapter *adapter = dev->data->dev_private; rte_atomic64_init(&adapter->drv_stats->ierrors); rte_atomic64_init(&adapter->drv_stats->oerrors); @@ -920,8 +897,7 @@ static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct ena_admin_basic_stats ena_stats; - struct ena_adapter *adapter = - (struct ena_adapter *)(dev->data->dev_private); + struct ena_adapter *adapter = dev->data->dev_private; struct ena_com_dev *ena_dev = &adapter->ena_dev; int rc; @@ -962,7 +938,7 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) ena_assert_msg(dev->data != NULL, "Uninitialized device"); ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device"); - adapter = (struct ena_adapter *)(dev->data->dev_private); + adapter = dev->data->dev_private; ena_dev = &adapter->ena_dev; ena_assert_msg(ena_dev != NULL, "Uninitialized device"); @@ -986,8 +962,7 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) static int ena_start(struct rte_eth_dev *dev) { - struct ena_adapter *adapter = - (struct ena_adapter *)(dev->data->dev_private); + struct ena_adapter *adapter = dev->data->dev_private; uint64_t ticks; int rc = 0; @@ -1032,8 +1007,7 @@ err_start_tx: static void ena_stop(struct rte_eth_dev *dev) { - struct ena_adapter *adapter = - (struct ena_adapter *)(dev->data->dev_private); + struct ena_adapter *adapter = dev->data->dev_private; struct ena_com_dev *ena_dev = &adapter->ena_dev; int rc; @@ -1081,7 +1055,7 @@ static int ena_create_io_queue(struct ena_ring *ring) } ctx.qid = ena_qid; ctx.msix_vector = -1; /* interrupts not used */ - ctx.numa_node = ena_cpu_to_node(ring->id); + ctx.numa_node = ring->numa_socket_id; rc = ena_com_create_io_queue(ena_dev, &ctx); if (rc) { @@ -1124,8 +1098,7 @@ static void ena_queue_stop(struct ena_ring *ring) static void ena_queue_stop_all(struct rte_eth_dev *dev, enum ena_ring_type ring_type) { - struct ena_adapter *adapter = - (struct ena_adapter *)(dev->data->dev_private); + struct ena_adapter *adapter = dev->data->dev_private; struct ena_ring *queues = NULL; uint16_t nb_queues, i; @@ -1176,12 +1149,11 @@ static int ena_queue_start(struct ena_ring *ring) static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, - __rte_unused unsigned int socket_id, + unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { struct ena_ring *txq = NULL; - struct ena_adapter *adapter = - (struct ena_adapter *)(dev->data->dev_private); + struct ena_adapter *adapter = dev->data->dev_private; unsigned int i; txq = &adapter->tx_ring[queue_idx]; @@ -1211,6 +1183,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, txq->next_to_clean = 0; txq->next_to_use = 0; txq->ring_size = nb_desc; + txq->numa_socket_id = socket_id; txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info", sizeof(struct ena_tx_buffer) * @@ -1248,12 +1221,11 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, - __rte_unused unsigned int socket_id, + unsigned int socket_id, __rte_unused const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { - struct ena_adapter *adapter = - (struct ena_adapter *)(dev->data->dev_private); + struct ena_adapter *adapter = dev->data->dev_private; struct ena_ring *rxq = NULL; int i; @@ -1283,6 +1255,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, rxq->next_to_clean = 0; rxq->next_to_use = 0; rxq->ring_size = nb_desc; + rxq->numa_socket_id = socket_id; rxq->mb_pool = mp; rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info", @@ -1492,7 +1465,7 @@ err_mmio_read_less: static void ena_interrupt_handler_rte(void *cb_arg) { - struct ena_adapter *adapter = (struct ena_adapter *)cb_arg; + struct ena_adapter *adapter = cb_arg; struct ena_com_dev *ena_dev = &adapter->ena_dev; ena_com_admin_q_comp_intr_handler(ena_dev); @@ -1529,7 +1502,7 @@ static void check_for_admin_com_state(struct ena_adapter *adapter) static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, void *arg) { - struct ena_adapter *adapter = (struct ena_adapter *)arg; + struct ena_adapter *adapter = arg; struct rte_eth_dev *dev = adapter->rte_dev; check_for_missing_keep_alive(adapter); @@ -1564,8 +1537,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) { struct rte_pci_device *pci_dev; struct rte_intr_handle *intr_handle; - struct ena_adapter *adapter = - (struct ena_adapter *)(eth_dev->data->dev_private); + struct ena_adapter *adapter = eth_dev->data->dev_private; struct ena_com_dev *ena_dev = &adapter->ena_dev; struct ena_com_dev_get_features_ctx get_feat_ctx; int queue_size, rc; @@ -1700,8 +1672,7 @@ err: static void ena_destroy_device(struct rte_eth_dev *eth_dev) { - struct ena_adapter *adapter = - (struct ena_adapter *)(eth_dev->data->dev_private); + struct ena_adapter *adapter = eth_dev->data->dev_private; struct ena_com_dev *ena_dev = &adapter->ena_dev; if (adapter->state == ENA_ADAPTER_STATE_FREE) @@ -1740,8 +1711,7 @@ static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) static int ena_dev_configure(struct rte_eth_dev *dev) { - struct ena_adapter *adapter = - (struct ena_adapter *)(dev->data->dev_private); + struct ena_adapter *adapter = dev->data->dev_private; adapter->state = ENA_ADAPTER_STATE_CONFIG; @@ -1787,7 +1757,7 @@ static void ena_infos_get(struct rte_eth_dev *dev, ena_assert_msg(dev->data != NULL, "Uninitialized device"); ena_assert_msg(dev->data->dev_private != NULL, "Uninitialized device"); - adapter = (struct ena_adapter *)(dev->data->dev_private); + adapter = dev->data->dev_private; ena_dev = &adapter->ena_dev; ena_assert_msg(ena_dev != NULL, "Uninitialized device"); @@ -1947,6 +1917,12 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, /* fill mbuf attributes if any */ ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx); + + if (unlikely(mbuf_head->ol_flags & + (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) + rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors); + + mbuf_head->hash.rss = ena_rx_ctx.hash; /* pass to DPDK application head mbuf */ @@ -2138,10 +2114,6 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* Set TX offloads flags, if applicable */ ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads); - if (unlikely(mbuf->ol_flags & - (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD))) - rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); - rte_prefetch0(tx_pkts[(sent_idx + 4) & ring_mask]); /* Process first segment taking into @@ -2267,7 +2239,7 @@ static void ena_update_on_link_change(void *adapter_data, struct ena_admin_aenq_link_change_desc *aenq_link_desc; uint32_t status; - adapter = (struct ena_adapter *)adapter_data; + adapter = adapter_data; aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; eth_dev = adapter->rte_dev; @@ -2281,7 +2253,7 @@ static void ena_update_on_link_change(void *adapter_data, static void ena_notification(void *data, struct ena_admin_aenq_entry *aenq_e) { - struct ena_adapter *adapter = (struct ena_adapter *)data; + struct ena_adapter *adapter = data; struct ena_admin_ena_hw_hints *hints; if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION) @@ -2304,7 +2276,7 @@ static void ena_notification(void *data, static void ena_keep_alive(void *adapter_data, __rte_unused struct ena_admin_aenq_entry *aenq_e) { - struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; + struct ena_adapter *adapter = adapter_data; adapter->timestamp_wd = rte_get_timer_cycles(); } diff --git a/dpdk/drivers/net/ena/ena_ethdev.h b/dpdk/drivers/net/ena/ena_ethdev.h index 322e90ac..a4aba7fd 100644 --- a/dpdk/drivers/net/ena/ena_ethdev.h +++ b/dpdk/drivers/net/ena/ena_ethdev.h @@ -105,6 +105,7 @@ struct ena_ring { struct ena_adapter *adapter; uint64_t offloads; u16 sgl_size; + unsigned int numa_socket_id; } __rte_cache_aligned; enum ena_adapter_state { diff --git a/dpdk/drivers/net/enic/base/vnic_dev.c b/dpdk/drivers/net/enic/base/vnic_dev.c index fd303fec..16e8814a 100644 --- a/dpdk/drivers/net/enic/base/vnic_dev.c +++ b/dpdk/drivers/net/enic/base/vnic_dev.c @@ -57,9 +57,6 @@ struct vnic_dev { void (*free_consistent)(void *priv, size_t size, void *vaddr, dma_addr_t dma_handle); - struct vnic_counter_counts *flow_counters; - dma_addr_t flow_counters_pa; - u8 flow_counters_dma_active; }; #define VNIC_MAX_RES_HDR_SIZE \ @@ -67,8 +64,6 @@ struct vnic_dev { sizeof(struct vnic_resource) * RES_TYPE_MAX) #define VNIC_RES_STRIDE 128 -#define VNIC_MAX_FLOW_COUNTERS 2048 - void *vnic_dev_priv(struct vnic_dev *vdev) { return vdev->priv; @@ -616,35 +611,6 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); } -/* - * Configure counter DMA - */ -int vnic_dev_counter_dma_cfg(struct vnic_dev *vdev, u32 period, - u32 num_counters) -{ - u64 args[3]; - int wait = 1000; - int err; - - if (num_counters > VNIC_MAX_FLOW_COUNTERS) - return -ENOMEM; - if (period > 0 && (period < VNIC_COUNTER_DMA_MIN_PERIOD || - num_counters == 0)) - return -EINVAL; - - args[0] = num_counters; - args[1] = vdev->flow_counters_pa; - args[2] = period; - err = vnic_dev_cmd_args(vdev, CMD_COUNTER_DMA_CONFIG, args, 3, wait); - - /* record if DMAs need to be stopped on close */ - if (!err) - vdev->flow_counters_dma_active = (num_counters != 0 && - period != 0); - - return err; -} - int vnic_dev_close(struct vnic_dev *vdev) { u64 a0 = 0, a1 = 0; @@ -973,24 +939,6 @@ int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev) return vdev->stats == NULL ? -ENOMEM : 0; } -/* - * Initialize for up to VNIC_MAX_FLOW_COUNTERS - */ -int vnic_dev_alloc_counter_mem(struct vnic_dev *vdev) -{ - char name[NAME_MAX]; - static u32 instance; - - snprintf((char *)name, sizeof(name), "vnic_flow_ctrs-%u", instance++); - vdev->flow_counters = vdev->alloc_consistent(vdev->priv, - sizeof(struct vnic_counter_counts) - * VNIC_MAX_FLOW_COUNTERS, - &vdev->flow_counters_pa, - (u8 *)name); - vdev->flow_counters_dma_active = 0; - return vdev->flow_counters == NULL ? -ENOMEM : 0; -} - void vnic_dev_unregister(struct vnic_dev *vdev) { if (vdev) { @@ -1003,16 +951,6 @@ void vnic_dev_unregister(struct vnic_dev *vdev) vdev->free_consistent(vdev->priv, sizeof(struct vnic_stats), vdev->stats, vdev->stats_pa); - if (vdev->flow_counters) { - /* turn off counter DMAs before freeing memory */ - if (vdev->flow_counters_dma_active) - vnic_dev_counter_dma_cfg(vdev, 0, 0); - - vdev->free_consistent(vdev->priv, - sizeof(struct vnic_counter_counts) - * VNIC_MAX_FLOW_COUNTERS, - vdev->flow_counters, vdev->flow_counters_pa); - } if (vdev->fw_info) vdev->free_consistent(vdev->priv, sizeof(struct vnic_devcmd_fw_info), @@ -1156,46 +1094,3 @@ int vnic_dev_capable_vxlan(struct vnic_dev *vdev) (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) == (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ); } - -bool vnic_dev_counter_alloc(struct vnic_dev *vdev, uint32_t *idx) -{ - u64 a0 = 0; - u64 a1 = 0; - int wait = 1000; - - if (vnic_dev_cmd(vdev, CMD_COUNTER_ALLOC, &a0, &a1, wait)) - return false; - *idx = (uint32_t)a0; - return true; -} - -bool vnic_dev_counter_free(struct vnic_dev *vdev, uint32_t idx) -{ - u64 a0 = idx; - u64 a1 = 0; - int wait = 1000; - - return vnic_dev_cmd(vdev, CMD_COUNTER_FREE, &a0, &a1, - wait) == 0; -} - -bool vnic_dev_counter_query(struct vnic_dev *vdev, uint32_t idx, - bool reset, uint64_t *packets, uint64_t *bytes) -{ - u64 a0 = idx; - u64 a1 = reset ? 1 : 0; - int wait = 1000; - - if (reset) { - /* query/reset returns updated counters */ - if (vnic_dev_cmd(vdev, CMD_COUNTER_QUERY, &a0, &a1, wait)) - return false; - *packets = a0; - *bytes = a1; - } else { - /* Get values DMA'd from the adapter */ - *packets = vdev->flow_counters[idx].vcc_packets; - *bytes = vdev->flow_counters[idx].vcc_bytes; - } - return true; -} diff --git a/dpdk/drivers/net/enic/base/vnic_dev.h b/dpdk/drivers/net/enic/base/vnic_dev.h index de2645c4..270a47bd 100644 --- a/dpdk/drivers/net/enic/base/vnic_dev.h +++ b/dpdk/drivers/net/enic/base/vnic_dev.h @@ -118,8 +118,6 @@ int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size, void *value); int vnic_dev_stats_clear(struct vnic_dev *vdev); int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); -int vnic_dev_counter_dma_cfg(struct vnic_dev *vdev, u32 period, - u32 num_counters); int vnic_dev_hang_notify(struct vnic_dev *vdev); int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, int broadcast, int promisc, int allmulti); @@ -172,7 +170,6 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, unsigned int num_bars); struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev); int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev); -int vnic_dev_alloc_counter_mem(struct vnic_dev *vdev); int vnic_dev_cmd_init(struct vnic_dev *vdev, int fallback); int vnic_dev_get_size(void); int vnic_dev_int13(struct vnic_dev *vdev, u64 arg, u32 op); @@ -190,9 +187,4 @@ int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, u16 vxlan_udp_port_number); int vnic_dev_capable_vxlan(struct vnic_dev *vdev); -bool vnic_dev_counter_alloc(struct vnic_dev *vdev, uint32_t *idx); -bool vnic_dev_counter_free(struct vnic_dev *vdev, uint32_t idx); -bool vnic_dev_counter_query(struct vnic_dev *vdev, uint32_t idx, - bool reset, uint64_t *packets, uint64_t *bytes); - #endif /* _VNIC_DEV_H_ */ diff --git a/dpdk/drivers/net/enic/base/vnic_devcmd.h b/dpdk/drivers/net/enic/base/vnic_devcmd.h index 3aad2dbd..fffe307e 100644 --- a/dpdk/drivers/net/enic/base/vnic_devcmd.h +++ b/dpdk/drivers/net/enic/base/vnic_devcmd.h @@ -598,48 +598,6 @@ enum vnic_devcmd_cmd { * a3 = bitmask of supported actions */ CMD_ADD_ADV_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 77), - - /* - * Allocate a counter for use with CMD_ADD_FILTER - * out:(u32) a0 = counter index - */ - CMD_COUNTER_ALLOC = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ENET, 85), - - /* - * Free a counter - * in: (u32) a0 = counter_id - */ - CMD_COUNTER_FREE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 86), - - /* - * Read a counter - * in: (u32) a0 = counter_id - * (u32) a1 = clear counter if non-zero - * out:(u64) a0 = packet count - * (u64) a1 = byte count - */ - CMD_COUNTER_QUERY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 87), - - /* - * Configure periodic counter DMA. This will trigger an immediate - * DMA of the counters (unless period == 0), and then schedule a DMA - * of the counters every seconds until disdabled. - * Each new COUNTER_DMA_CONFIG will override all previous commands on - * this vnic. - * Setting a2 (period) = 0 will disable periodic DMAs - * If a0 (num_counters) != 0, an immediate DMA will always be done, - * irrespective of the value in a2. - * in: (u32) a0 = number of counters to DMA - * (u64) a1 = host target DMA address - * (u32) a2 = DMA period in milliseconds (0 to disable) - */ - CMD_COUNTER_DMA_CONFIG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 88), -#define VNIC_COUNTER_DMA_MIN_PERIOD 500 - - /* - * Clear all counters on a vnic - */ - CMD_COUNTER_CLEAR_ALL = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ENET, 89), }; /* Modes for exchanging advanced filter capabilities. The modes supported by @@ -905,11 +863,9 @@ struct filter_action { #define FILTER_ACTION_RQ_STEERING_FLAG (1 << 0) #define FILTER_ACTION_FILTER_ID_FLAG (1 << 1) #define FILTER_ACTION_DROP_FLAG (1 << 2) -#define FILTER_ACTION_COUNTER_FLAG (1 << 3) #define FILTER_ACTION_V2_ALL (FILTER_ACTION_RQ_STEERING_FLAG \ - | FILTER_ACTION_FILTER_ID_FLAG \ | FILTER_ACTION_DROP_FLAG \ - | FILTER_ACTION_COUNTER_FLAG) + | FILTER_ACTION_FILTER_ID_FLAG) /* Version 2 of filter action must be a strict extension of struct filter_action * where the first fields exactly match in size and meaning. @@ -919,8 +875,7 @@ struct filter_action_v2 { u32 rq_idx; u32 flags; /* use FILTER_ACTION_XXX_FLAG defines */ u16 filter_id; - u32 counter_index; - uint8_t reserved[28]; /* for future expansion */ + uint8_t reserved[32]; /* for future expansion */ } __attribute__((packed)); /* Specifies the filter type. */ @@ -1167,13 +1122,4 @@ typedef enum { GRPINTR_UPD_VECT, } grpintr_subcmd_t; -/* - * Structure for counter DMA - * (DMAed by CMD_COUNTER_DMA_CONFIG) - */ -struct vnic_counter_counts { - u64 vcc_packets; - u64 vcc_bytes; -}; - #endif /* _VNIC_DEVCMD_H_ */ diff --git a/dpdk/drivers/net/enic/enic.h b/dpdk/drivers/net/enic/enic.h index 377f607f..49831b00 100644 --- a/dpdk/drivers/net/enic/enic.h +++ b/dpdk/drivers/net/enic/enic.h @@ -39,9 +39,6 @@ #define PAGE_ROUND_UP(x) \ ((((unsigned long)(x)) + ENIC_PAGE_SIZE-1) & (~(ENIC_PAGE_SIZE-1))) -/* must be >= VNIC_COUNTER_DMA_MIN_PERIOD */ -#define VNIC_FLOW_COUNTER_UPDATE_MSECS 500 - #define ENICPMD_VFIO_PATH "/dev/vfio/vfio" /*#define ENIC_DESC_COUNT_MAKE_ODD (x) do{if ((~(x)) & 1) { (x)--; } }while(0)*/ @@ -97,7 +94,6 @@ struct rte_flow { LIST_ENTRY(rte_flow) next; u16 enic_filter_id; struct filter_v2 enic_filter; - int counter_idx; /* NIC allocated counter index (-1 = invalid) */ }; /* Per-instance private data structure */ @@ -175,8 +171,6 @@ struct enic { rte_spinlock_t mtu_lock; LIST_HEAD(enic_flows, rte_flow) flows; - int max_flow_counter; - rte_spinlock_t flows_lock; /* RSS */ uint16_t reta_size; @@ -250,7 +244,7 @@ static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev) { - return (struct enic *)eth_dev->data->dev_private; + return eth_dev->data->dev_private; } static inline uint32_t diff --git a/dpdk/drivers/net/enic/enic_flow.c b/dpdk/drivers/net/enic/enic_flow.c index dbc8de83..ada57084 100644 --- a/dpdk/drivers/net/enic/enic_flow.c +++ b/dpdk/drivers/net/enic/enic_flow.c @@ -337,15 +337,6 @@ static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = { RTE_FLOW_ACTION_TYPE_END, }; -static const enum rte_flow_action_type enic_supported_actions_v2_count[] = { - RTE_FLOW_ACTION_TYPE_QUEUE, - RTE_FLOW_ACTION_TYPE_MARK, - RTE_FLOW_ACTION_TYPE_FLAG, - RTE_FLOW_ACTION_TYPE_DROP, - RTE_FLOW_ACTION_TYPE_COUNT, - RTE_FLOW_ACTION_TYPE_END, -}; - /** Action capabilities indexed by NIC version information */ static const struct enic_action_cap enic_action_cap[] = { [FILTER_ACTION_RQ_STEERING_FLAG] = { @@ -360,10 +351,6 @@ static const struct enic_action_cap enic_action_cap[] = { .actions = enic_supported_actions_v2_drop, .copy_fn = enic_copy_action_v2, }, - [FILTER_ACTION_COUNTER_FLAG] = { - .actions = enic_supported_actions_v2_count, - .copy_fn = enic_copy_action_v2, - }, }; static int @@ -1188,10 +1175,6 @@ enic_copy_action_v2(const struct rte_flow_action actions[], enic_action->flags |= FILTER_ACTION_DROP_FLAG; break; } - case RTE_FLOW_ACTION_TYPE_COUNT: { - enic_action->flags |= FILTER_ACTION_COUNTER_FLAG; - break; - } case RTE_FLOW_ACTION_TYPE_VOID: continue; default: @@ -1236,9 +1219,7 @@ enic_get_action_cap(struct enic *enic) uint8_t actions; actions = enic->filter_actions; - if (actions & FILTER_ACTION_COUNTER_FLAG) - ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG]; - else if (actions & FILTER_ACTION_DROP_FLAG) + if (actions & FILTER_ACTION_DROP_FLAG) ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG]; else if (actions & FILTER_ACTION_FILTER_ID_FLAG) ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG]; @@ -1522,9 +1503,7 @@ enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter, { struct rte_flow *flow; int err; - uint16_t entry; - int ctr_idx; - int last_max_flow_ctr; + u16 entry; FLOW_TRACE(); @@ -1535,34 +1514,6 @@ enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter, return NULL; } - flow->counter_idx = -1; - last_max_flow_ctr = -1; - if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) { - if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - NULL, "cannot allocate counter"); - goto unwind_flow_alloc; - } - flow->counter_idx = ctr_idx; - enic_action->counter_index = ctr_idx; - - /* If index is the largest, increase the counter DMA size */ - if (ctr_idx > enic->max_flow_counter) { - err = vnic_dev_counter_dma_cfg(enic->vdev, - VNIC_FLOW_COUNTER_UPDATE_MSECS, - ctr_idx + 1); - if (err) { - rte_flow_error_set(error, -err, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - NULL, "counter DMA config failed"); - goto unwind_ctr_alloc; - } - last_max_flow_ctr = enic->max_flow_counter; - enic->max_flow_counter = ctr_idx; - } - } - /* entry[in] is the queue id, entry[out] is the filter Id for delete */ entry = enic_action->rq_idx; err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter, @@ -1570,29 +1521,13 @@ enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter, if (err) { rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "vnic_dev_classifier error"); - goto unwind_ctr_dma_cfg; + rte_free(flow); + return NULL; } flow->enic_filter_id = entry; flow->enic_filter = *enic_filter; - return flow; - -/* unwind if there are errors */ -unwind_ctr_dma_cfg: - if (last_max_flow_ctr != -1) { - /* reduce counter DMA size */ - vnic_dev_counter_dma_cfg(enic->vdev, - VNIC_FLOW_COUNTER_UPDATE_MSECS, - last_max_flow_ctr + 1); - enic->max_flow_counter = last_max_flow_ctr; - } -unwind_ctr_alloc: - if (flow->counter_idx != -1) - vnic_dev_counter_free(enic->vdev, ctr_idx); -unwind_flow_alloc: - rte_free(flow); - return NULL; } /** @@ -1622,13 +1557,6 @@ enic_flow_del_filter(struct enic *enic, struct rte_flow *flow, NULL, "vnic_dev_classifier failed"); return -err; } - - if (flow->counter_idx != -1) { - if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx)) - dev_err(enic, "counter free failed, idx: %d\n", - flow->counter_idx); - flow->counter_idx = -1; - } return 0; } @@ -1687,12 +1615,10 @@ enic_flow_create(struct rte_eth_dev *dev, if (ret < 0) return NULL; - rte_spinlock_lock(&enic->flows_lock); flow = enic_flow_add_filter(enic, &enic_filter, &enic_action, error); if (flow) LIST_INSERT_HEAD(&enic->flows, flow, next); - rte_spinlock_unlock(&enic->flows_lock); return flow; } @@ -1711,10 +1637,8 @@ enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, FLOW_TRACE(); - rte_spinlock_lock(&enic->flows_lock); enic_flow_del_filter(enic, flow, error); LIST_REMOVE(flow, next); - rte_spinlock_unlock(&enic->flows_lock); rte_free(flow); return 0; } @@ -1733,7 +1657,6 @@ enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) FLOW_TRACE(); - rte_spinlock_lock(&enic->flows_lock); while (!LIST_EMPTY(&enic->flows)) { flow = LIST_FIRST(&enic->flows); @@ -1741,70 +1664,6 @@ enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) LIST_REMOVE(flow, next); rte_free(flow); } - rte_spinlock_unlock(&enic->flows_lock); - return 0; -} - -static int -enic_flow_query_count(struct rte_eth_dev *dev, - struct rte_flow *flow, void *data, - struct rte_flow_error *error) -{ - struct enic *enic = pmd_priv(dev); - struct rte_flow_query_count *query; - uint64_t packets, bytes; - - FLOW_TRACE(); - - if (flow->counter_idx == -1) { - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "flow does not have counter"); - } - query = (struct rte_flow_query_count *)data; - if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx, - !!query->reset, &packets, &bytes)) { - return rte_flow_error_set - (error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "cannot read counter"); - } - query->hits_set = 1; - query->bytes_set = 1; - query->hits = packets; - query->bytes = bytes; - return 0; -} - -static int -enic_flow_query(struct rte_eth_dev *dev, - struct rte_flow *flow, - const struct rte_flow_action *actions, - void *data, - struct rte_flow_error *error) -{ - int ret = 0; - - FLOW_TRACE(); - - for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { - switch (actions->type) { - case RTE_FLOW_ACTION_TYPE_VOID: - break; - case RTE_FLOW_ACTION_TYPE_COUNT: - ret = enic_flow_query_count(dev, flow, data, error); - break; - default: - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "action not supported"); - } - if (ret < 0) - return ret; - } return 0; } @@ -1818,5 +1677,4 @@ const struct rte_flow_ops enic_flow_ops = { .create = enic_flow_create, .destroy = enic_flow_destroy, .flush = enic_flow_flush, - .query = enic_flow_query, }; diff --git a/dpdk/drivers/net/enic/enic_main.c b/dpdk/drivers/net/enic/enic_main.c index 098a18d6..5fb5122d 100644 --- a/dpdk/drivers/net/enic/enic_main.c +++ b/dpdk/drivers/net/enic/enic_main.c @@ -1680,8 +1680,6 @@ static int enic_dev_init(struct enic *enic) vnic_dev_set_reset_flag(enic->vdev, 0); LIST_INIT(&enic->flows); - rte_spinlock_init(&enic->flows_lock); - enic->max_flow_counter = -1; /* set up link status checking */ vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */ @@ -1769,20 +1767,14 @@ int enic_probe(struct enic *enic) enic_free_consistent); /* - * Allocate the consistent memory for stats and counters upfront so - * both primary and secondary processes can access them. + * Allocate the consistent memory for stats upfront so both primary and + * secondary processes can dump stats. */ err = vnic_dev_alloc_stats_mem(enic->vdev); if (err) { dev_err(enic, "Failed to allocate cmd memory, aborting\n"); goto err_out_unregister; } - err = vnic_dev_alloc_counter_mem(enic->vdev); - if (err) { - dev_err(enic, "Failed to allocate counter memory, aborting\n"); - goto err_out_unregister; - } - /* Issue device open to get device in known state */ err = enic_dev_open(enic); if (err) { diff --git a/dpdk/drivers/net/enic/enic_res.c b/dpdk/drivers/net/enic/enic_res.c index 78bb6b8f..d53202f9 100644 --- a/dpdk/drivers/net/enic/enic_res.c +++ b/dpdk/drivers/net/enic/enic_res.c @@ -84,7 +84,7 @@ int enic_get_vnic_config(struct enic *enic) vnic_dev_capable_udp_rss_weak(enic->vdev, &enic->nic_cfg_chk, &enic->udp_rss_weak); - dev_info(enic, "Flow api filter mode: %s Actions: %s%s%s%s\n", + dev_info(enic, "Flow api filter mode: %s Actions: %s%s%s\n", ((enic->flow_filter_mode == FILTER_DPDK_1) ? "DPDK" : ((enic->flow_filter_mode == FILTER_USNIC_IP) ? "USNIC" : ((enic->flow_filter_mode == FILTER_IPV4_5TUPLE) ? "5TUPLE" : @@ -94,9 +94,7 @@ int enic_get_vnic_config(struct enic *enic) ((enic->filter_actions & FILTER_ACTION_FILTER_ID_FLAG) ? "tag " : ""), ((enic->filter_actions & FILTER_ACTION_DROP_FLAG) ? - "drop " : ""), - ((enic->filter_actions & FILTER_ACTION_COUNTER_FLAG) ? - "count " : "")); + "drop " : "")); c->wq_desc_count = min_t(u32, ENIC_MAX_WQ_DESCS, diff --git a/dpdk/drivers/net/enic/enic_rxtx.c b/dpdk/drivers/net/enic/enic_rxtx.c index 0aadd342..8fa86473 100644 --- a/dpdk/drivers/net/enic/enic_rxtx.c +++ b/dpdk/drivers/net/enic/enic_rxtx.c @@ -416,13 +416,13 @@ uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } diff --git a/dpdk/drivers/net/enic/meson.build b/dpdk/drivers/net/enic/meson.build index 06448711..9e9a567a 100644 --- a/dpdk/drivers/net/enic/meson.build +++ b/dpdk/drivers/net/enic/meson.build @@ -19,13 +19,13 @@ deps += ['hash'] includes += include_directories('base') # The current implementation assumes 64-bit pointers -if dpdk_conf.has('RTE_MACHINE_CPUFLAG_AVX2') and cc.sizeof('void *') == 8 +if dpdk_conf.has('RTE_MACHINE_CPUFLAG_AVX2') and dpdk_conf.get('RTE_ARCH_64') sources += files('enic_rxtx_vec_avx2.c') # Build the avx2 handler if the compiler supports it, even though 'machine' # does not. This is to support users who build for the min supported machine # and need to run the binary on newer CPUs too. # This part is from i40e meson.build -elif cc.has_argument('-mavx2') and cc.sizeof('void *') == 8 +elif cc.has_argument('-mavx2') and dpdk_conf.get('RTE_ARCH_64') enic_avx2_lib = static_library('enic_avx2_lib', 'enic_rxtx_vec_avx2.c', dependencies: [static_rte_ethdev, static_rte_bus_pci], diff --git a/dpdk/drivers/net/failsafe/failsafe_eal.c b/dpdk/drivers/net/failsafe/failsafe_eal.c index 8a888b1f..88df6ea0 100644 --- a/dpdk/drivers/net/failsafe/failsafe_eal.c +++ b/dpdk/drivers/net/failsafe/failsafe_eal.c @@ -47,7 +47,7 @@ fs_bus_init(struct rte_eth_dev *dev) ret = rte_eal_hotplug_add(da->bus->name, da->name, da->args); - if (ret) { + if (ret < 0) { ERROR("sub_device %d probe failed %s%s%s", i, rte_errno ? "(" : "", rte_errno ? strerror(rte_errno) : "", @@ -145,7 +145,7 @@ fs_bus_uninit(struct rte_eth_dev *dev) FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { sdev_ret = rte_dev_remove(sdev->dev); - if (sdev_ret) { + if (sdev_ret < 0) { ERROR("Failed to remove requested device %s (err: %d)", sdev->dev->name, sdev_ret); continue; diff --git a/dpdk/drivers/net/failsafe/failsafe_ether.c b/dpdk/drivers/net/failsafe/failsafe_ether.c index 17831652..2ff234ea 100644 --- a/dpdk/drivers/net/failsafe/failsafe_ether.c +++ b/dpdk/drivers/net/failsafe/failsafe_ether.c @@ -283,7 +283,7 @@ fs_dev_remove(struct sub_device *sdev) /* fallthrough */ case DEV_PROBED: ret = rte_dev_remove(sdev->dev); - if (ret) { + if (ret < 0) { ERROR("Bus detach failed for sub_device %u", SUB_ID(sdev)); } else { diff --git a/dpdk/drivers/net/failsafe/failsafe_ops.c b/dpdk/drivers/net/failsafe/failsafe_ops.c index 7f8bcd4c..a63e4edc 100644 --- a/dpdk/drivers/net/failsafe/failsafe_ops.c +++ b/dpdk/drivers/net/failsafe/failsafe_ops.c @@ -17,80 +17,6 @@ #include "failsafe_private.h" -static struct rte_eth_dev_info default_infos = { - /* Max possible number of elements */ - .max_rx_pktlen = UINT32_MAX, - .max_rx_queues = RTE_MAX_QUEUES_PER_PORT, - .max_tx_queues = RTE_MAX_QUEUES_PER_PORT, - .max_mac_addrs = FAILSAFE_MAX_ETHADDR, - .max_hash_mac_addrs = UINT32_MAX, - .max_vfs = UINT16_MAX, - .max_vmdq_pools = UINT16_MAX, - .rx_desc_lim = { - .nb_max = UINT16_MAX, - .nb_min = 0, - .nb_align = 1, - .nb_seg_max = UINT16_MAX, - .nb_mtu_seg_max = UINT16_MAX, - }, - .tx_desc_lim = { - .nb_max = UINT16_MAX, - .nb_min = 0, - .nb_align = 1, - .nb_seg_max = UINT16_MAX, - .nb_mtu_seg_max = UINT16_MAX, - }, - /* - * Set of capabilities that can be verified upon - * configuring a sub-device. - */ - .rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_TCP_LRO | - DEV_RX_OFFLOAD_QINQ_STRIP | - DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_RX_OFFLOAD_MACSEC_STRIP | - DEV_RX_OFFLOAD_HEADER_SPLIT | - DEV_RX_OFFLOAD_VLAN_FILTER | - DEV_RX_OFFLOAD_VLAN_EXTEND | - DEV_RX_OFFLOAD_JUMBO_FRAME | - DEV_RX_OFFLOAD_SCATTER | - DEV_RX_OFFLOAD_TIMESTAMP | - DEV_RX_OFFLOAD_SECURITY, - .rx_queue_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_TCP_LRO | - DEV_RX_OFFLOAD_QINQ_STRIP | - DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_RX_OFFLOAD_MACSEC_STRIP | - DEV_RX_OFFLOAD_HEADER_SPLIT | - DEV_RX_OFFLOAD_VLAN_FILTER | - DEV_RX_OFFLOAD_VLAN_EXTEND | - DEV_RX_OFFLOAD_JUMBO_FRAME | - DEV_RX_OFFLOAD_SCATTER | - DEV_RX_OFFLOAD_TIMESTAMP | - DEV_RX_OFFLOAD_SECURITY, - .tx_offload_capa = - DEV_TX_OFFLOAD_MULTI_SEGS | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO, - .flow_type_rss_offloads = - ETH_RSS_IP | - ETH_RSS_UDP | - ETH_RSS_TCP, - .dev_capa = - RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | - RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, -}; - static int fs_dev_configure(struct rte_eth_dev *dev) { @@ -863,6 +789,46 @@ fs_stats_reset(struct rte_eth_dev *dev) fs_unlock(dev, 0); } +static void +fs_dev_merge_desc_lim(struct rte_eth_desc_lim *to, + const struct rte_eth_desc_lim *from) +{ + to->nb_max = RTE_MIN(to->nb_max, from->nb_max); + to->nb_min = RTE_MAX(to->nb_min, from->nb_min); + to->nb_align = RTE_MAX(to->nb_align, from->nb_align); + + to->nb_seg_max = RTE_MIN(to->nb_seg_max, from->nb_seg_max); + to->nb_mtu_seg_max = RTE_MIN(to->nb_mtu_seg_max, from->nb_mtu_seg_max); +} + +/* + * Merge the information from sub-devices. + * + * The reported values must be the common subset of all sub devices + */ +static void +fs_dev_merge_info(struct rte_eth_dev_info *info, + const struct rte_eth_dev_info *sinfo) +{ + info->max_rx_pktlen = RTE_MIN(info->max_rx_pktlen, sinfo->max_rx_pktlen); + info->max_rx_queues = RTE_MIN(info->max_rx_queues, sinfo->max_rx_queues); + info->max_tx_queues = RTE_MIN(info->max_tx_queues, sinfo->max_tx_queues); + info->max_mac_addrs = RTE_MIN(info->max_mac_addrs, sinfo->max_mac_addrs); + info->max_hash_mac_addrs = RTE_MIN(info->max_hash_mac_addrs, + sinfo->max_hash_mac_addrs); + info->max_vmdq_pools = RTE_MIN(info->max_vmdq_pools, sinfo->max_vmdq_pools); + info->max_vfs = RTE_MIN(info->max_vfs, sinfo->max_vfs); + + fs_dev_merge_desc_lim(&info->rx_desc_lim, &sinfo->rx_desc_lim); + fs_dev_merge_desc_lim(&info->tx_desc_lim, &sinfo->tx_desc_lim); + + info->rx_offload_capa &= sinfo->rx_offload_capa; + info->tx_offload_capa &= sinfo->tx_offload_capa; + info->rx_queue_offload_capa &= sinfo->rx_queue_offload_capa; + info->tx_queue_offload_capa &= sinfo->tx_queue_offload_capa; + info->flow_type_rss_offloads &= sinfo->flow_type_rss_offloads; +} + /** * Fail-safe dev_infos_get rules: * @@ -901,43 +867,76 @@ fs_dev_infos_get(struct rte_eth_dev *dev, struct sub_device *sdev; uint8_t i; - sdev = TX_SUBDEV(dev); - if (sdev == NULL) { - DEBUG("No probed device, using default infos"); - rte_memcpy(&PRIV(dev)->infos, &default_infos, - sizeof(default_infos)); - } else { - uint64_t rx_offload_capa; - uint64_t rxq_offload_capa; - uint64_t rss_hf_offload_capa; - uint64_t dev_capa; + /* Use maximum upper bounds by default */ + infos->max_rx_pktlen = UINT32_MAX; + infos->max_rx_queues = RTE_MAX_QUEUES_PER_PORT; + infos->max_tx_queues = RTE_MAX_QUEUES_PER_PORT; + infos->max_mac_addrs = FAILSAFE_MAX_ETHADDR; + infos->max_hash_mac_addrs = UINT32_MAX; + infos->max_vfs = UINT16_MAX; + infos->max_vmdq_pools = UINT16_MAX; - rx_offload_capa = default_infos.rx_offload_capa; - rxq_offload_capa = default_infos.rx_queue_offload_capa; - rss_hf_offload_capa = default_infos.flow_type_rss_offloads; - dev_capa = default_infos.dev_capa; - FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { - rte_eth_dev_info_get(PORT_ID(sdev), - &PRIV(dev)->infos); - rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa; - rxq_offload_capa &= - PRIV(dev)->infos.rx_queue_offload_capa; - rss_hf_offload_capa &= - PRIV(dev)->infos.flow_type_rss_offloads; - dev_capa &= PRIV(dev)->infos.dev_capa; - } - sdev = TX_SUBDEV(dev); - rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos); - PRIV(dev)->infos.rx_offload_capa = rx_offload_capa; - PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa; - PRIV(dev)->infos.flow_type_rss_offloads = rss_hf_offload_capa; - PRIV(dev)->infos.dev_capa = dev_capa; - PRIV(dev)->infos.tx_offload_capa &= - default_infos.tx_offload_capa; - PRIV(dev)->infos.tx_queue_offload_capa &= - default_infos.tx_queue_offload_capa; + /* + * Set of capabilities that can be verified upon + * configuring a sub-device. + */ + infos->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_TCP_LRO | + DEV_RX_OFFLOAD_QINQ_STRIP | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_MACSEC_STRIP | + DEV_RX_OFFLOAD_HEADER_SPLIT | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_VLAN_EXTEND | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_TIMESTAMP | + DEV_RX_OFFLOAD_SECURITY; + + infos->rx_queue_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_TCP_LRO | + DEV_RX_OFFLOAD_QINQ_STRIP | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_MACSEC_STRIP | + DEV_RX_OFFLOAD_HEADER_SPLIT | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_VLAN_EXTEND | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_TIMESTAMP | + DEV_RX_OFFLOAD_SECURITY; + + infos->tx_offload_capa = + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_MBUF_FAST_FREE | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + + infos->flow_type_rss_offloads = + ETH_RSS_IP | + ETH_RSS_UDP | + ETH_RSS_TCP; + infos->dev_capa = + RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | + RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; + + FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { + struct rte_eth_dev_info sub_info; + + rte_eth_dev_info_get(PORT_ID(sdev), &sub_info); + + fs_dev_merge_info(infos, &sub_info); } - rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos)); } static const uint32_t * diff --git a/dpdk/drivers/net/failsafe/failsafe_private.h b/dpdk/drivers/net/failsafe/failsafe_private.h index 7e318968..3264bff2 100644 --- a/dpdk/drivers/net/failsafe/failsafe_private.h +++ b/dpdk/drivers/net/failsafe/failsafe_private.h @@ -149,7 +149,6 @@ struct fs_priv { uint32_t nb_mcast_addr; struct ether_addr *mcast_addrs; /* current capabilities */ - struct rte_eth_dev_info infos; struct rte_eth_dev_owner my_owner; /* Unique owner. */ struct rte_intr_handle intr_handle; /* Port interrupt handle. */ /* diff --git a/dpdk/drivers/net/fm10k/base/fm10k_api.c b/dpdk/drivers/net/fm10k/base/fm10k_api.c index c49d20df..e7b2fe71 100644 --- a/dpdk/drivers/net/fm10k/base/fm10k_api.c +++ b/dpdk/drivers/net/fm10k/base/fm10k_api.c @@ -234,8 +234,14 @@ s32 fm10k_read_mac_addr(struct fm10k_hw *hw) * */ void fm10k_update_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats) { - if (hw->mac.ops.update_hw_stats) - hw->mac.ops.update_hw_stats(hw, stats); + switch (hw->mac.type) { + case fm10k_mac_pf: + return fm10k_update_hw_stats_pf(hw, stats); + case fm10k_mac_vf: + return fm10k_update_hw_stats_vf(hw, stats); + default: + break; + } } /** @@ -246,8 +252,14 @@ void fm10k_update_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats) * */ void fm10k_rebind_hw_stats(struct fm10k_hw *hw, struct fm10k_hw_stats *stats) { - if (hw->mac.ops.rebind_hw_stats) - hw->mac.ops.rebind_hw_stats(hw, stats); + switch (hw->mac.type) { + case fm10k_mac_pf: + return fm10k_rebind_hw_stats_pf(hw, stats); + case fm10k_mac_vf: + return fm10k_rebind_hw_stats_vf(hw, stats); + default: + break; + } } /** diff --git a/dpdk/drivers/net/fm10k/base/fm10k_pf.c b/dpdk/drivers/net/fm10k/base/fm10k_pf.c index db5f4912..f5b6a9e2 100644 --- a/dpdk/drivers/net/fm10k/base/fm10k_pf.c +++ b/dpdk/drivers/net/fm10k/base/fm10k_pf.c @@ -1511,7 +1511,7 @@ const struct fm10k_msg_data fm10k_iov_msg_data_pf[] = { * This function collects and aggregates global and per queue hardware * statistics. **/ -STATIC void fm10k_update_hw_stats_pf(struct fm10k_hw *hw, +void fm10k_update_hw_stats_pf(struct fm10k_hw *hw, struct fm10k_hw_stats *stats) { u32 timeout, ur, ca, um, xec, vlan_drop, loopback_drop, nodesc_drop; @@ -1584,7 +1584,7 @@ STATIC void fm10k_update_hw_stats_pf(struct fm10k_hw *hw, * This function resets the base for global and per queue hardware * statistics. **/ -STATIC void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw, +void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw, struct fm10k_hw_stats *stats) { DEBUGFUNC("fm10k_rebind_hw_stats_pf"); diff --git a/dpdk/drivers/net/fm10k/base/fm10k_pf.h b/dpdk/drivers/net/fm10k/base/fm10k_pf.h index ca125c27..2c22bdd0 100644 --- a/dpdk/drivers/net/fm10k/base/fm10k_pf.h +++ b/dpdk/drivers/net/fm10k/base/fm10k_pf.h @@ -184,4 +184,10 @@ extern const struct fm10k_msg_data fm10k_iov_msg_data_pf[]; #endif s32 fm10k_init_ops_pf(struct fm10k_hw *hw); + +void fm10k_update_hw_stats_pf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats); + +void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats); #endif /* _FM10K_PF_H */ diff --git a/dpdk/drivers/net/fm10k/base/fm10k_vf.c b/dpdk/drivers/net/fm10k/base/fm10k_vf.c index bd449773..2f4b5f5d 100644 --- a/dpdk/drivers/net/fm10k/base/fm10k_vf.c +++ b/dpdk/drivers/net/fm10k/base/fm10k_vf.c @@ -526,7 +526,7 @@ const struct fm10k_tlv_attr fm10k_1588_msg_attr[] = { * * This function collects and aggregates per queue hardware statistics. **/ -STATIC void fm10k_update_hw_stats_vf(struct fm10k_hw *hw, +void fm10k_update_hw_stats_vf(struct fm10k_hw *hw, struct fm10k_hw_stats *stats) { DEBUGFUNC("fm10k_update_hw_stats_vf"); @@ -541,7 +541,7 @@ STATIC void fm10k_update_hw_stats_vf(struct fm10k_hw *hw, * * This function resets the base for queue hardware statistics. **/ -STATIC void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw, +void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw, struct fm10k_hw_stats *stats) { DEBUGFUNC("fm10k_rebind_hw_stats_vf"); diff --git a/dpdk/drivers/net/fm10k/base/fm10k_vf.h b/dpdk/drivers/net/fm10k/base/fm10k_vf.h index 116c56fc..d4edd330 100644 --- a/dpdk/drivers/net/fm10k/base/fm10k_vf.h +++ b/dpdk/drivers/net/fm10k/base/fm10k_vf.h @@ -89,4 +89,9 @@ extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[]; FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func) s32 fm10k_init_ops_vf(struct fm10k_hw *hw); + +void fm10k_update_hw_stats_vf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats); +void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw, + struct fm10k_hw_stats *stats); #endif /* _FM10K_VF_H */ diff --git a/dpdk/drivers/net/fm10k/fm10k_ethdev.c b/dpdk/drivers/net/fm10k/fm10k_ethdev.c index caf4d1bc..f64d07bb 100644 --- a/dpdk/drivers/net/fm10k/fm10k_ethdev.c +++ b/dpdk/drivers/net/fm10k/fm10k_ethdev.c @@ -1389,6 +1389,15 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t); dev_info->reta_size = FM10K_MAX_RSS_INDICES; + dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 | + ETH_RSS_IPV6 | + ETH_RSS_IPV6_EX | + ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_NONFRAG_IPV6_TCP | + ETH_RSS_IPV6_TCP_EX | + ETH_RSS_NONFRAG_IPV4_UDP | + ETH_RSS_NONFRAG_IPV6_UDP | + ETH_RSS_IPV6_UDP_EX; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { diff --git a/dpdk/drivers/net/fm10k/fm10k_rxtx.c b/dpdk/drivers/net/fm10k/fm10k_rxtx.c index fb02e115..cd5231c9 100644 --- a/dpdk/drivers/net/fm10k/fm10k_rxtx.c +++ b/dpdk/drivers/net/fm10k/fm10k_rxtx.c @@ -671,25 +671,25 @@ fm10k_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, if ((m->ol_flags & PKT_TX_TCP_SEG) && (m->tso_segsz < FM10K_TSO_MINMSS)) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } if (m->ol_flags & FM10K_TX_OFFLOAD_NOTSUP_MASK) { - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; return i; } #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } diff --git a/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c b/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c index 96b46a2b..45542bef 100644 --- a/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c +++ b/dpdk/drivers/net/fm10k/fm10k_rxtx_vec.c @@ -678,6 +678,7 @@ fm10k_recv_scattered_pkts_vec(void *rx_queue, i++; if (i == nb_bufs) return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; } return i + fm10k_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, &split_flags[i]); @@ -711,7 +712,7 @@ vtx1(volatile struct fm10k_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags) { __m128i descriptor = _mm_set_epi64x(flags << 56 | - pkt->vlan_tci << 16 | pkt->data_len, + (uint64_t)pkt->vlan_tci << 16 | (uint64_t)pkt->data_len, MBUF_DMA_ADDR(pkt)); _mm_store_si128((__m128i *)txdp, descriptor); } diff --git a/dpdk/drivers/net/i40e/i40e_ethdev.c b/dpdk/drivers/net/i40e/i40e_ethdev.c index af5e844b..b1b145f3 100644 --- a/dpdk/drivers/net/i40e/i40e_ethdev.c +++ b/dpdk/drivers/net/i40e/i40e_ethdev.c @@ -238,10 +238,6 @@ static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, unsigned limit); static void i40e_dev_stats_reset(struct rte_eth_dev *dev); -static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev, - uint16_t queue_id, - uint8_t stat_idx, - uint8_t is_rx); static int i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size); static void i40e_dev_info_get(struct rte_eth_dev *dev, @@ -455,7 +451,6 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .xstats_get_names = i40e_dev_xstats_get_names, .stats_reset = i40e_dev_stats_reset, .xstats_reset = i40e_dev_stats_reset, - .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set, .fw_version_get = i40e_fw_version_get, .dev_infos_get = i40e_dev_info_get, .dev_supported_ptypes_get = i40e_dev_supported_ptypes_get, @@ -524,13 +519,13 @@ static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = { {"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)}, {"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)}, {"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)}, - {"rx_dropped", offsetof(struct i40e_eth_stats, rx_discards)}, + {"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)}, {"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats, rx_unknown_protocol)}, {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)}, {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)}, {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)}, - {"tx_dropped", offsetof(struct i40e_eth_stats, tx_discards)}, + {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)}, }; #define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \ @@ -1209,11 +1204,9 @@ i40e_parse_latest_vec_handler(__rte_unused const char *key, const char *value, void *opaque) { - struct i40e_adapter *ad; + struct i40e_adapter *ad = opaque; int use_latest_vec; - ad = (struct i40e_adapter *)opaque; - use_latest_vec = atoi(value); if (use_latest_vec != 0 && use_latest_vec != 1) @@ -1363,6 +1356,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret); return -EIO; } + /* Firmware of SFP x722 does not support adminq option */ + if (hw->device_id == I40E_DEV_ID_SFP_X722) + hw->flags &= ~I40E_HW_FLAG_802_1AD_CAPABLE; + PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x", hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.api_maj_ver, hw->aq.api_min_ver, @@ -1600,6 +1597,7 @@ err_init_tunnel_filter_list: rte_free(pf->ethertype.hash_map); err_init_ethtype_filter_list: rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = NULL; err_mac_alloc: i40e_vsi_release(pf->main_vsi); err_setup_pf_switch: @@ -3430,17 +3428,6 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, return count; } -static int -i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev, - __rte_unused uint16_t queue_id, - __rte_unused uint8_t stat_idx, - __rte_unused uint8_t is_rx) -{ - PMD_INIT_FUNC_TRACE(); - - return -ENOSYS; -} - static int i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) { @@ -10827,8 +10814,7 @@ static void i40e_start_timecounters(struct rte_eth_dev *dev) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct i40e_adapter *adapter = - (struct i40e_adapter *)dev->data->dev_private; + struct i40e_adapter *adapter = dev->data->dev_private; struct rte_eth_link link; uint32_t tsync_inc_l; uint32_t tsync_inc_h; @@ -10880,8 +10866,7 @@ i40e_start_timecounters(struct rte_eth_dev *dev) static int i40e_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) { - struct i40e_adapter *adapter = - (struct i40e_adapter *)dev->data->dev_private; + struct i40e_adapter *adapter = dev->data->dev_private; adapter->systime_tc.nsec += delta; adapter->rx_tstamp_tc.nsec += delta; @@ -10894,8 +10879,7 @@ static int i40e_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) { uint64_t ns; - struct i40e_adapter *adapter = - (struct i40e_adapter *)dev->data->dev_private; + struct i40e_adapter *adapter = dev->data->dev_private; ns = rte_timespec_to_ns(ts); @@ -10911,8 +10895,7 @@ static int i40e_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) { uint64_t ns, systime_cycles; - struct i40e_adapter *adapter = - (struct i40e_adapter *)dev->data->dev_private; + struct i40e_adapter *adapter = dev->data->dev_private; systime_cycles = i40e_read_systime_cyclecounter(dev); ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); @@ -10988,9 +10971,7 @@ i40e_timesync_read_rx_timestamp(struct rte_eth_dev *dev, struct timespec *timestamp, uint32_t flags) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct i40e_adapter *adapter = - (struct i40e_adapter *)dev->data->dev_private; - + struct i40e_adapter *adapter = dev->data->dev_private; uint32_t sync_status; uint32_t index = flags & 0x03; uint64_t rx_tstamp_cycles; @@ -11012,9 +10993,7 @@ i40e_timesync_read_tx_timestamp(struct rte_eth_dev *dev, struct timespec *timestamp) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct i40e_adapter *adapter = - (struct i40e_adapter *)dev->data->dev_private; - + struct i40e_adapter *adapter = dev->data->dev_private; uint32_t sync_status; uint64_t tx_tstamp_cycles; uint64_t ns; diff --git a/dpdk/drivers/net/i40e/i40e_ethdev_vf.c b/dpdk/drivers/net/i40e/i40e_ethdev_vf.c index 551f6fa6..49bdeb32 100644 --- a/dpdk/drivers/net/i40e/i40e_ethdev_vf.c +++ b/dpdk/drivers/net/i40e/i40e_ethdev_vf.c @@ -550,7 +550,7 @@ i40evf_fill_virtchnl_vsi_txq_info(struct virtchnl_txq_info *txq_info, { txq_info->vsi_id = vsi_id; txq_info->queue_id = queue_id; - if (queue_id < nb_txq) { + if (queue_id < nb_txq && txq) { txq_info->ring_len = txq->nb_tx_desc; txq_info->dma_ring_addr = txq->tx_ring_phys_addr; } @@ -567,7 +567,7 @@ i40evf_fill_virtchnl_vsi_rxq_info(struct virtchnl_rxq_info *rxq_info, rxq_info->vsi_id = vsi_id; rxq_info->queue_id = queue_id; rxq_info->max_pkt_size = max_pkt_size; - if (queue_id < nb_rxq) { + if (queue_id < nb_rxq && rxq) { rxq_info->ring_len = rxq->nb_rx_desc; rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr; rxq_info->databuffer_size = @@ -600,10 +600,11 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev) for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) { i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq, - vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]); + vc_vqci->vsi_id, i, dev->data->nb_tx_queues, + txq ? txq[i] : NULL); i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq, vc_vqci->vsi_id, i, dev->data->nb_rx_queues, - vf->max_pkt_len, rxq[i]); + vf->max_pkt_len, rxq ? rxq[i] : NULL); } memset(&args, 0, sizeof(args)); args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES; @@ -1433,7 +1434,6 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev) return 0; } i40e_set_default_ptype_table(eth_dev); - i40e_set_default_pctype_table(eth_dev); rte_eth_copy_pci_info(eth_dev, pci_dev); hw->vendor_id = pci_dev->id.vendor_id; @@ -1451,6 +1451,7 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev) return -1; } + i40e_set_default_pctype_table(eth_dev); rte_eal_alarm_set(I40EVF_ALARM_INTERVAL, i40evf_dev_alarm_handler, eth_dev); diff --git a/dpdk/drivers/net/i40e/i40e_flow.c b/dpdk/drivers/net/i40e/i40e_flow.c index a614ec1d..4208d830 100644 --- a/dpdk/drivers/net/i40e/i40e_flow.c +++ b/dpdk/drivers/net/i40e/i40e_flow.c @@ -2443,6 +2443,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, uint64_t input_set = I40E_INSET_NONE; uint16_t frag_off; enum rte_flow_item_type item_type; + enum rte_flow_item_type next_type; enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END; enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END; uint32_t i, j; @@ -2483,6 +2484,16 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_ETH: eth_spec = item->spec; eth_mask = item->mask; + next_type = (item + 1)->type; + + if (next_type == RTE_FLOW_ITEM_TYPE_END && + (!eth_spec || !eth_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "NULL eth spec/mask."); + return -rte_errno; + } if (eth_spec && eth_mask) { if (!is_zero_ether_addr(ð_mask->src) || @@ -2495,8 +2506,6 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, } } if (eth_spec && eth_mask && eth_mask->type) { - enum rte_flow_item_type next = (item + 1)->type; - if (eth_mask->type != RTE_BE16(0xffff)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -2507,7 +2516,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, ether_type = rte_be_to_cpu_16(eth_spec->type); - if (next == RTE_FLOW_ITEM_TYPE_VLAN || + if (next_type == RTE_FLOW_ITEM_TYPE_VLAN || ether_type == ETHER_TYPE_IPv4 || ether_type == ETHER_TYPE_IPv6 || ether_type == ETHER_TYPE_ARP || @@ -3147,8 +3156,8 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev, cons_filter_type = RTE_ETH_FILTER_FDIR; - if (dev->data->dev_conf.fdir_conf.mode != - RTE_FDIR_MODE_PERFECT) { + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT || + pf->fdir.fdir_vsi == NULL) { /* Enable fdir when fdir flow is added at first time. */ ret = i40e_fdir_setup(pf); if (ret != I40E_SUCCESS) { @@ -4734,7 +4743,7 @@ i40e_flow_destroy(struct rte_eth_dev *dev, &((struct i40e_fdir_filter *)flow->rule)->fdir, 0); /* If the last flow is destroyed, disable fdir. */ - if (!ret && !TAILQ_EMPTY(&pf->fdir.fdir_list)) { + if (!ret && TAILQ_EMPTY(&pf->fdir.fdir_list)) { i40e_fdir_teardown(pf); dev->data->dev_conf.fdir_conf.mode = RTE_FDIR_MODE_NONE; diff --git a/dpdk/drivers/net/i40e/i40e_rxtx.c b/dpdk/drivers/net/i40e/i40e_rxtx.c index 1489552d..4285f842 100644 --- a/dpdk/drivers/net/i40e/i40e_rxtx.c +++ b/dpdk/drivers/net/i40e/i40e_rxtx.c @@ -1446,7 +1446,7 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, if (!(ol_flags & PKT_TX_TCP_SEG)) { if (m->nb_segs > I40E_TX_MAX_MTU_SEG || m->pkt_len > I40E_FRAME_SIZE_MAX) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } } else if (m->nb_segs > I40E_TX_MAX_SEG || @@ -1456,31 +1456,31 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, /* MSS outside the range (256B - 9674B) are considered * malicious */ - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } if (ol_flags & I40E_TX_OFFLOAD_NOTSUP_MASK) { - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; return i; } /* check the size of packet */ if (m->pkt_len < I40E_TX_MIN_PKT_LEN) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } @@ -2169,15 +2169,30 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, * - tx_rs_thresh must be a divisor of the ring size. * - tx_free_thresh must be greater than 0. * - tx_free_thresh must be less than the size of the ring minus 3. + * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc. * * One descriptor in the TX ring is used as a sentinel to avoid a H/W * race condition, hence the maximum threshold constraints. When set * to zero use default values. */ - tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ? - tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH); tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); + /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */ + tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ? + nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH; + if (tx_conf->tx_rs_thresh > 0) + tx_rs_thresh = tx_conf->tx_rs_thresh; + if (tx_rs_thresh + tx_free_thresh > nb_desc) { + PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not " + "exceed nb_desc. (tx_rs_thresh=%u " + "tx_free_thresh=%u nb_desc=%u port=%d queue=%d)", + (unsigned int)tx_rs_thresh, + (unsigned int)tx_free_thresh, + (unsigned int)nb_desc, + (int)dev->data->port_id, + (int)queue_idx); + return I40E_ERR_PARAM; + } if (tx_rs_thresh >= (nb_desc - 2)) { PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the " "number of TX descriptors minus 2. " @@ -3159,7 +3174,8 @@ i40e_set_default_pctype_table(struct rte_eth_dev *dev) ad->pctypes_tbl[RTE_ETH_FLOW_L2_PAYLOAD] = (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD); - if (hw->mac.type == I40E_MAC_X722) { + if (hw->mac.type == I40E_MAC_X722 || + hw->mac.type == I40E_MAC_X722_VF) { ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |= (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP); ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |= diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c index 23179b3b..472f2e2d 100644 --- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c +++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c @@ -619,6 +619,7 @@ i40e_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, i++; if (i == nb_bufs) return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; } return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, &split_flags[i]); diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c index 3b22588c..1fc66b78 100644 --- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c +++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_sse.c @@ -506,6 +506,7 @@ i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, i++; if (i == nb_bufs) return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; } return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, &split_flags[i]); diff --git a/dpdk/drivers/net/i40e/rte_pmd_i40e.c b/dpdk/drivers/net/i40e/rte_pmd_i40e.c index c49c872b..f21c7608 100644 --- a/dpdk/drivers/net/i40e/rte_pmd_i40e.c +++ b/dpdk/drivers/net/i40e/rte_pmd_i40e.c @@ -580,6 +580,7 @@ rte_pmd_i40e_remove_vf_mac_addr(uint16_t port, uint16_t vf_id, struct i40e_pf_vf *vf; struct i40e_vsi *vsi; struct i40e_pf *pf; + int ret; if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS) return -EINVAL; @@ -608,8 +609,9 @@ rte_pmd_i40e_remove_vf_mac_addr(uint16_t port, uint16_t vf_id, ether_addr_copy(&null_mac_addr, &vf->mac_addr); /* Remove the mac */ - i40e_vsi_delete_mac(vsi, mac_addr); - + ret = i40e_vsi_delete_mac(vsi, mac_addr); + if (ret != I40E_SUCCESS) + return ret; return 0; } diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_common.c b/dpdk/drivers/net/ixgbe/base/ixgbe_common.c index fb50719f..62ff7672 100644 --- a/dpdk/drivers/net/ixgbe/base/ixgbe_common.c +++ b/dpdk/drivers/net/ixgbe/base/ixgbe_common.c @@ -5040,7 +5040,7 @@ void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset); /* Return is offset to OEM Product Version block is invalid */ - if (offset == 0x0 && offset == NVM_INVALID_PTR) + if (offset == 0x0 || offset == NVM_INVALID_PTR) return; /* Read product version block */ diff --git a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c index e9533e5a..00a06ef6 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -126,6 +127,13 @@ #define IXGBE_EXVET_VET_EXT_SHIFT 16 #define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 +#define IXGBEVF_DEVARG_PFLINK_FULLCHK "pflink_fullchk" + +static const char * const ixgbevf_valid_arguments[] = { + IXGBEVF_DEVARG_PFLINK_FULLCHK, + NULL +}; + static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params); static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); @@ -1339,6 +1347,9 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) /* cancel the delay handler before remove dev */ rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, eth_dev); + /* cancel the link handler before remove dev */ + rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, eth_dev); + /* uninitialize PF if max_vfs not zero */ ixgbe_pf_host_uninit(eth_dev); @@ -1539,6 +1550,45 @@ generate_random_mac_addr(struct ether_addr *mac_addr) memcpy(&mac_addr->addr_bytes[3], &random, 3); } +static int +devarg_handle_int(__rte_unused const char *key, const char *value, + void *extra_args) +{ + uint16_t *n = extra_args; + + if (value == NULL || extra_args == NULL) + return -EINVAL; + + *n = (uint16_t)strtoul(value, NULL, 0); + if (*n == USHRT_MAX && errno == ERANGE) + return -1; + + return 0; +} + +static void +ixgbevf_parse_devargs(struct ixgbe_adapter *adapter, + struct rte_devargs *devargs) +{ + struct rte_kvargs *kvlist; + uint16_t pflink_fullchk; + + if (devargs == NULL) + return; + + kvlist = rte_kvargs_parse(devargs->args, ixgbevf_valid_arguments); + if (kvlist == NULL) + return; + + if (rte_kvargs_count(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK) == 1 && + rte_kvargs_process(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK, + devarg_handle_int, &pflink_fullchk) == 0 && + pflink_fullchk == 1) + adapter->pflink_fullchk = 1; + + rte_kvargs_free(kvlist); +} + /* * Virtual Function device init */ @@ -1586,6 +1636,9 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) return 0; } + ixgbevf_parse_devargs(eth_dev->data->dev_private, + pci_dev->device.devargs); + rte_eth_copy_pci_info(eth_dev, pci_dev); hw->device_id = pci_dev->id.device_id; @@ -2396,8 +2449,7 @@ ixgbe_dev_configure(struct rte_eth_dev *dev) { struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; int ret; PMD_INIT_FUNC_TRACE(); @@ -2793,8 +2845,7 @@ static void ixgbe_dev_stop(struct rte_eth_dev *dev) { struct rte_eth_link link; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_vf_info *vfinfo = @@ -3856,6 +3907,8 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, dev_info->rx_queue_offload_capa); dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); + dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); + dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -3887,6 +3940,8 @@ static int ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, int *link_up, int wait_to_complete) { + struct ixgbe_adapter *adapter = container_of(hw, + struct ixgbe_adapter, hw); struct ixgbe_mbx_info *mbx = &hw->mbx; struct ixgbe_mac_info *mac = &hw->mac; uint32_t links_reg, in_msg; @@ -3947,6 +4002,15 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, *speed = IXGBE_LINK_SPEED_UNKNOWN; } + if (wait_to_complete == 0 && adapter->pflink_fullchk == 0) { + if (*speed == IXGBE_LINK_SPEED_UNKNOWN) + mac->get_link_status = true; + else + mac->get_link_status = false; + + goto out; + } + /* if the read failed it could just be a mailbox collision, best wait * until we are called again and don't report an error */ @@ -4786,8 +4850,7 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, uint8_t j, mask; uint32_t reta, r; uint16_t idx, shift; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t reta_reg; @@ -5020,8 +5083,7 @@ static int ixgbevf_dev_configure(struct rte_eth_dev *dev) { struct rte_eth_conf *conf = &dev->data->dev_conf; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", dev->data->port_id); @@ -5153,8 +5215,7 @@ static void ixgbevf_dev_stop(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; @@ -6909,8 +6970,7 @@ static void ixgbe_start_timecounters(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; struct rte_eth_link link; uint32_t incval = 0; uint32_t shift = 0; @@ -6978,8 +7038,7 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev) static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) { - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; adapter->systime_tc.nsec += delta; adapter->rx_tstamp_tc.nsec += delta; @@ -6992,8 +7051,7 @@ static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) { uint64_t ns; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; ns = rte_timespec_to_ns(ts); /* Set the timecounters to a new value. */ @@ -7008,8 +7066,7 @@ static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) { uint64_t ns, systime_cycles; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; systime_cycles = ixgbe_read_systime_cyclecounter(dev); ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); @@ -7090,8 +7147,7 @@ ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, uint32_t flags __rte_unused) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; uint32_t tsync_rxctl; uint64_t rx_tstamp_cycles; uint64_t ns; @@ -7112,8 +7168,7 @@ ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, struct timespec *timestamp) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; uint32_t tsync_txctl; uint64_t tx_tstamp_cycles; uint64_t ns; @@ -7352,6 +7407,9 @@ ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { case ixgbe_mac_X550EM_x_vf: case ixgbe_mac_X550EM_a_vf: return ETH_RSS_RETA_SIZE_64; + case ixgbe_mac_X540_vf: + case ixgbe_mac_82599_vf: + return 0; default: return ETH_RSS_RETA_SIZE_128; } @@ -8619,6 +8677,8 @@ RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci"); RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe_vf, + IXGBEVF_DEVARG_PFLINK_FULLCHK "=<0|1>"); RTE_INIT(ixgbe_init_log) { diff --git a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h index 565c69c9..5023fa13 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h +++ b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.h @@ -493,6 +493,11 @@ struct ixgbe_adapter { /* For RSS reta table update */ uint8_t rss_reta_updated; + + /* Used for VF link sync with PF's physical and logical (by checking + * mailbox status) link status. + */ + uint8_t pflink_fullchk; }; struct ixgbe_vf_representor { diff --git a/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c b/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c index 5a416885..1eea7071 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c +++ b/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c @@ -154,8 +154,12 @@ ixgbe_crypto_add_sa(struct ixgbe_crypto_session *ic_session) if (ic_session->op == IXGBE_OP_AUTHENTICATED_DECRYPTION) priv->rx_sa_tbl[sa_index].mode |= (IPSRXMOD_PROTO | IPSRXMOD_DECRYPT); - if (ic_session->dst_ip.type == IPv6) + if (ic_session->dst_ip.type == IPv6) { priv->rx_sa_tbl[sa_index].mode |= IPSRXMOD_IPV6; + priv->rx_ip_tbl[ip_index].ip.type = IPv6; + } else if (ic_session->dst_ip.type == IPv4) + priv->rx_ip_tbl[ip_index].ip.type = IPv4; + priv->rx_sa_tbl[sa_index].used = 1; /* write IP table entry*/ diff --git a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c index 46c93f59..94bec218 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c @@ -981,25 +981,25 @@ ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) */ if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) { - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; return i; } #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } @@ -2496,14 +2496,29 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, * tx_rs_thresh must be a divisor of the ring size. * tx_free_thresh must be greater than 0. * tx_free_thresh must be less than the size of the ring minus 3. + * tx_free_thresh + tx_rs_thresh must not exceed nb_desc. * One descriptor in the TX ring is used as a sentinel to avoid a * H/W race condition, hence the maximum threshold constraints. * When set to zero use default values. */ - tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ? - tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH); tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); + /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */ + tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ? + nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH; + if (tx_conf->tx_rs_thresh > 0) + tx_rs_thresh = tx_conf->tx_rs_thresh; + if (tx_rs_thresh + tx_free_thresh > nb_desc) { + PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not " + "exceed nb_desc. (tx_rs_thresh=%u " + "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)", + (unsigned int)tx_rs_thresh, + (unsigned int)tx_free_thresh, + (unsigned int)nb_desc, + (int)dev->data->port_id, + (int)queue_idx); + return -(EINVAL); + } if (tx_rs_thresh >= (nb_desc - 2)) { PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number " "of TX descriptors minus 2. (tx_rs_thresh=%u " @@ -2901,8 +2916,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, struct ixgbe_rx_queue *rxq; struct ixgbe_hw *hw; uint16_t len; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; uint64_t offloads; PMD_INIT_FUNC_TRACE(); @@ -3172,8 +3186,7 @@ void __attribute__((cold)) ixgbe_dev_clear_queues(struct rte_eth_dev *dev) { unsigned i; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; PMD_INIT_FUNC_TRACE(); @@ -3427,7 +3440,7 @@ ixgbe_rss_configure(struct rte_eth_dev *dev) uint32_t reta_reg; PMD_INIT_FUNC_TRACE(); - adapter = (struct ixgbe_adapter *)dev->data->dev_private; + adapter = dev->data->dev_private; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); sp_reta_size = ixgbe_reta_size_get(hw->mac.type); @@ -4571,8 +4584,7 @@ void __attribute__((cold)) ixgbe_set_rx_function(struct rte_eth_dev *dev) { uint16_t i, rx_using_sse; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; /* * In order to allow Vector Rx there are a few configuration @@ -5220,8 +5232,7 @@ int __attribute__((cold)) ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct ixgbe_hw *hw; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; struct ixgbe_rx_queue *rxq; uint32_t rxdctl; int poll_ms; diff --git a/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c index c9ba4824..599ba30e 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c +++ b/dpdk/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c @@ -609,6 +609,7 @@ ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, i++; if (i == nb_bufs) return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; } return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, &split_flags[i]); diff --git a/dpdk/drivers/net/mlx4/mlx4.c b/dpdk/drivers/net/mlx4/mlx4.c index 4bc966d5..4428edf1 100644 --- a/dpdk/drivers/net/mlx4/mlx4.c +++ b/dpdk/drivers/net/mlx4/mlx4.c @@ -520,6 +520,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) }; unsigned int vf; int i; + char ifname[IF_NAMESIZE]; (void)pci_drv; assert(pci_drv == &mlx4_driver); @@ -703,17 +704,15 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) mac.addr_bytes[4], mac.addr_bytes[5]); /* Register MAC address. */ priv->mac[0] = mac; -#ifndef NDEBUG - { - char ifname[IF_NAMESIZE]; - if (mlx4_get_ifname(priv, &ifname) == 0) - DEBUG("port %u ifname is \"%s\"", - priv->port, ifname); - else - DEBUG("port %u ifname is unknown", priv->port); + if (mlx4_get_ifname(priv, &ifname) == 0) { + DEBUG("port %u ifname is \"%s\"", + priv->port, ifname); + priv->if_index = if_nametoindex(ifname); + } else { + DEBUG("port %u ifname is unknown", priv->port); } -#endif + /* Get actual MTU if possible. */ mlx4_mtu_get(priv, &priv->mtu); DEBUG("port %u MTU is %u", priv->port, priv->mtu); diff --git a/dpdk/drivers/net/mlx4/mlx4.h b/dpdk/drivers/net/mlx4/mlx4.h index fc568eb3..758b7aa4 100644 --- a/dpdk/drivers/net/mlx4/mlx4.h +++ b/dpdk/drivers/net/mlx4/mlx4.h @@ -84,6 +84,7 @@ struct mlx4_priv { struct ibv_device_attr device_attr; /**< Device properties. */ struct ibv_pd *pd; /**< Protection Domain. */ /* Device properties. */ + unsigned int if_index; /**< Associated network device index */ uint16_t mtu; /**< Configured MTU. */ uint8_t port; /**< Physical port number. */ uint32_t started:1; /**< Device started, flows enabled. */ diff --git a/dpdk/drivers/net/mlx4/mlx4_ethdev.c b/dpdk/drivers/net/mlx4/mlx4_ethdev.c index 084b24e4..bd8b6982 100644 --- a/dpdk/drivers/net/mlx4/mlx4_ethdev.c +++ b/dpdk/drivers/net/mlx4/mlx4_ethdev.c @@ -559,7 +559,6 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) { struct mlx4_priv *priv = dev->data->dev_private; unsigned int max; - char ifname[IF_NAMESIZE]; /* FIXME: we should ask the device for these values. */ info->min_rx_bufsize = 32; @@ -580,8 +579,7 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) info->rx_queue_offload_capa = mlx4_get_rx_queue_offloads(priv); info->rx_offload_capa = (mlx4_get_rx_port_offloads(priv) | info->rx_queue_offload_capa); - if (mlx4_get_ifname(priv, &ifname) == 0) - info->if_index = if_nametoindex(ifname); + info->if_index = priv->if_index; info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE; info->speed_capa = ETH_LINK_SPEED_1G | diff --git a/dpdk/drivers/net/mlx5/mlx5.c b/dpdk/drivers/net/mlx5/mlx5.c index d91d55b5..27db1562 100644 --- a/dpdk/drivers/net/mlx5/mlx5.c +++ b/dpdk/drivers/net/mlx5/mlx5.c @@ -574,8 +574,10 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) return 0; /* Following UGLY cast is done to pass checkpatch. */ kvlist = rte_kvargs_parse(devargs->args, params); - if (kvlist == NULL) - return 0; + if (kvlist == NULL) { + rte_errno = EINVAL; + return -rte_errno; + } /* Process parameters. */ for (i = 0; (params[i] != NULL); ++i) { if (rte_kvargs_count(kvlist, params[i])) { diff --git a/dpdk/drivers/net/mlx5/mlx5.h b/dpdk/drivers/net/mlx5/mlx5.h index 91efd21b..b409f20e 100644 --- a/dpdk/drivers/net/mlx5/mlx5.h +++ b/dpdk/drivers/net/mlx5/mlx5.h @@ -91,6 +91,11 @@ struct mlx5_xstats_ctrl { struct mlx5_counter_ctrl info[MLX5_MAX_XSTATS]; }; +struct mlx5_stats_ctrl { + /* Base for imissed counter. */ + uint64_t imissed_base; +}; + /* Flow list . */ TAILQ_HEAD(mlx5_flows, rte_flow); @@ -145,7 +150,7 @@ struct mlx5_dev_config { }; /** - * Type of objet being allocated. + * Type of object being allocated. */ enum mlx5_verbs_alloc_type { MLX5_VERBS_ALLOC_TYPE_NONE, @@ -225,6 +230,7 @@ struct mlx5_priv { LIST_HEAD(encap_decap, mlx5_flow_dv_encap_decap_resource) encaps_decaps; uint32_t link_speed_capa; /* Link speed capabilities. */ struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */ + struct mlx5_stats_ctrl stats_ctrl; /* Stats control. */ int primary_socket; /* Unix socket for primary process. */ void *uar_base; /* Reserved address space for UAR mapping */ struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */ @@ -317,7 +323,7 @@ void mlx5_allmulticast_disable(struct rte_eth_dev *dev); /* mlx5_stats.c */ -void mlx5_xstats_init(struct rte_eth_dev *dev); +void mlx5_stats_init(struct rte_eth_dev *dev); int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); void mlx5_stats_reset(struct rte_eth_dev *dev); int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, diff --git a/dpdk/drivers/net/mlx5/mlx5_ethdev.c b/dpdk/drivers/net/mlx5/mlx5_ethdev.c index fb8e313a..9c7fc6b4 100644 --- a/dpdk/drivers/net/mlx5/mlx5_ethdev.c +++ b/dpdk/drivers/net/mlx5/mlx5_ethdev.c @@ -730,7 +730,8 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, dev->data->port_id, strerror(rte_errno)); return ret; } - dev_link.link_speed = ecmd->speed; + dev_link.link_speed = (ecmd->speed == UINT32_MAX) ? ETH_SPEED_NUM_NONE : + ecmd->speed; sc = ecmd->link_mode_masks[0] | ((uint64_t)ecmd->link_mode_masks[1] << 32); priv->link_speed_capa = 0; @@ -802,7 +803,7 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) do { ret = mlx5_link_update_unlocked_gs(dev, &dev_link); - if (ret) + if (ret == -ENOTSUP) ret = mlx5_link_update_unlocked_gset(dev, &dev_link); if (ret == 0) break; diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.c b/dpdk/drivers/net/mlx5/mlx5_flow.c index 222cd81d..f19b27ed 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow.c @@ -473,7 +473,7 @@ mlx5_flow_item_acceptable(const struct rte_flow_item *item, * Item hash fields. * * @return - * The hash fileds that should be used. + * The hash fields that should be used. */ uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, @@ -1098,8 +1098,8 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, const struct rte_flow_item_vlan *spec = item->spec; const struct rte_flow_item_vlan *mask = item->mask; const struct rte_flow_item_vlan nic_mask = { - .tci = RTE_BE16(0x0fff), - .inner_type = RTE_BE16(0xffff), + .tci = RTE_BE16(UINT16_MAX), + .inner_type = RTE_BE16(UINT16_MAX), }; uint16_t vlan_tag = 0; const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); @@ -1670,19 +1670,20 @@ flow_null_validate(struct rte_eth_dev *dev __rte_unused, const struct rte_flow_attr *attr __rte_unused, const struct rte_flow_item items[] __rte_unused, const struct rte_flow_action actions[] __rte_unused, - struct rte_flow_error *error __rte_unused) + struct rte_flow_error *error) { - rte_errno = ENOTSUP; - return -rte_errno; + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); } static struct mlx5_flow * flow_null_prepare(const struct rte_flow_attr *attr __rte_unused, const struct rte_flow_item items[] __rte_unused, const struct rte_flow_action actions[] __rte_unused, - struct rte_flow_error *error __rte_unused) + struct rte_flow_error *error) { - rte_errno = ENOTSUP; + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); return NULL; } @@ -1692,19 +1693,19 @@ flow_null_translate(struct rte_eth_dev *dev __rte_unused, const struct rte_flow_attr *attr __rte_unused, const struct rte_flow_item items[] __rte_unused, const struct rte_flow_action actions[] __rte_unused, - struct rte_flow_error *error __rte_unused) + struct rte_flow_error *error) { - rte_errno = ENOTSUP; - return -rte_errno; + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); } static int flow_null_apply(struct rte_eth_dev *dev __rte_unused, struct rte_flow *flow __rte_unused, - struct rte_flow_error *error __rte_unused) + struct rte_flow_error *error) { - rte_errno = ENOTSUP; - return -rte_errno; + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); } static void @@ -1724,10 +1725,10 @@ flow_null_query(struct rte_eth_dev *dev __rte_unused, struct rte_flow *flow __rte_unused, const struct rte_flow_action *actions __rte_unused, void *data __rte_unused, - struct rte_flow_error *error __rte_unused) + struct rte_flow_error *error) { - rte_errno = ENOTSUP; - return -rte_errno; + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL); } /* Void driver to protect from null pointer reference. */ @@ -2068,6 +2069,10 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, else flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); flow = rte_calloc(__func__, 1, flow_size, 0); + if (!flow) { + rte_errno = ENOMEM; + return NULL; + } flow->drv_type = flow_get_drv_type(dev, attr); assert(flow->drv_type > MLX5_FLOW_TYPE_MIN && flow->drv_type < MLX5_FLOW_TYPE_MAX); @@ -2130,7 +2135,7 @@ mlx5_flow_create(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct mlx5_priv *priv = (struct mlx5_priv *)dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; return flow_list_create(dev, &priv->flows, attr, items, actions, error); diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c index 207edcbc..3c61083d 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c @@ -196,6 +196,8 @@ flow_dv_validate_action_raw_encap(uint64_t action_flags, const struct rte_flow_attr *attr, struct rte_flow_error *error) { + const struct rte_flow_action_raw_encap *raw_encap = + (const struct rte_flow_action_raw_encap *)action->conf; if (!(action->conf)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, action, @@ -216,6 +218,10 @@ flow_dv_validate_action_raw_encap(uint64_t action_flags, NULL, "encap action not supported for " "ingress"); + if (!raw_encap->size || !raw_encap->data) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "raw encap data cannot be empty"); return 0; } @@ -1176,10 +1182,6 @@ flow_dv_translate_item_vlan(void *matcher, void *key, { const struct rte_flow_item_vlan *vlan_m = item->mask; const struct rte_flow_item_vlan *vlan_v = item->spec; - const struct rte_flow_item_vlan nic_mask = { - .tci = RTE_BE16(0x0fff), - .inner_type = RTE_BE16(0xffff), - }; void *headers_m; void *headers_v; uint16_t tci_m; @@ -1188,7 +1190,7 @@ flow_dv_translate_item_vlan(void *matcher, void *key, if (!vlan_v) return; if (!vlan_m) - vlan_m = &nic_mask; + vlan_m = &rte_flow_item_vlan_mask; if (inner) { headers_m = MLX5_ADDR_OF(fte_match_param, matcher, inner_headers); @@ -1208,6 +1210,10 @@ flow_dv_translate_item_vlan(void *matcher, void *key, MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12); MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13); MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, + rte_be_to_cpu_16(vlan_m->inner_type)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, + rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type)); } /** diff --git a/dpdk/drivers/net/mlx5/mlx5_rxq.c b/dpdk/drivers/net/mlx5/mlx5_rxq.c index f1ce3170..f6edb10f 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rxq.c +++ b/dpdk/drivers/net/mlx5/mlx5_rxq.c @@ -611,11 +611,12 @@ mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev) continue; /** * Need to access directly the queue to release the reference - * kept in priv_rx_intr_vec_enable(). + * kept in mlx5_rx_intr_vec_enable(). */ rxq_data = (*priv->rxqs)[i]; rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - mlx5_rxq_ibv_release(rxq_ctrl->ibv); + if (rxq_ctrl->ibv) + mlx5_rxq_ibv_release(rxq_ctrl->ibv); } free: rte_intr_free_epoll_fd(intr_handle); @@ -772,11 +773,10 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) struct mlx5dv_wq_init_attr mlx5; #endif } wq; - struct ibv_cq_ex cq_attr; } attr; unsigned int cqe_n; unsigned int wqe_n = 1 << rxq_data->elts_n; - struct mlx5_rxq_ibv *tmpl; + struct mlx5_rxq_ibv *tmpl = NULL; struct mlx5dv_cq cq_info; struct mlx5dv_rwq rwq; unsigned int i; @@ -1017,15 +1017,19 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; return tmpl; error: - ret = rte_errno; /* Save rte_errno before cleanup. */ - if (tmpl->wq) - claim_zero(mlx5_glue->destroy_wq(tmpl->wq)); - if (tmpl->cq) - claim_zero(mlx5_glue->destroy_cq(tmpl->cq)); - if (tmpl->channel) - claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel)); + if (tmpl) { + ret = rte_errno; /* Save rte_errno before cleanup. */ + if (tmpl->wq) + claim_zero(mlx5_glue->destroy_wq(tmpl->wq)); + if (tmpl->cq) + claim_zero(mlx5_glue->destroy_cq(tmpl->cq)); + if (tmpl->channel) + claim_zero(mlx5_glue->destroy_comp_channel + (tmpl->channel)); + rte_free(tmpl); + rte_errno = ret; /* Restore rte_errno. */ + } priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; - rte_errno = ret; /* Restore rte_errno. */ return NULL; } @@ -1160,7 +1164,7 @@ mlx5_mprq_free_mp(struct rte_eth_dev *dev) dev->data->port_id, mp->name); /* * If a buffer in the pool has been externally attached to a mbuf and it - * is still in use by application, destroying the Rx qeueue can spoil + * is still in use by application, destroying the Rx queue can spoil * the packet. It is unlikely to happen but if application dynamically * creates and destroys with holding Rx packets, this can happen. * @@ -1558,8 +1562,9 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) * RX queue index. * * @return - * 1 if the queue can be released, negative errno otherwise and rte_errno is - * set. + * 1 if the queue can be released + * 0 if the queue can not be released, there are references to it. + * Negative errno and rte_errno is set if queue doesn't exist. */ int mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx) diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h index 38e915c5..b1e0e8f3 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h +++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h @@ -171,7 +171,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, /* Fill ESEG in the header. */ vst1q_u32((void *)(t_wqe + 1), ((uint32x4_t){ 0, - cs_flags << 16 | rte_cpu_to_be_16(len), + rte_cpu_to_be_16(len) << 16 | cs_flags, metadata, 0 })); txq->wqe_ci = wqe_ci; } diff --git a/dpdk/drivers/net/mlx5/mlx5_stats.c b/dpdk/drivers/net/mlx5/mlx5_stats.c index 132bf5b4..6906dc81 100644 --- a/dpdk/drivers/net/mlx5/mlx5_stats.c +++ b/dpdk/drivers/net/mlx5/mlx5_stats.c @@ -99,6 +99,14 @@ static const struct mlx5_counter_ctrl mlx5_counters_init[] = { .dpdk_name = "rx_packets_phy", .ctr_name = "rx_packets_phy", }, + { + .dpdk_name = "tx_discards_phy", + .ctr_name = "tx_discards_phy", + }, + { + .dpdk_name = "rx_discards_phy", + .ctr_name = "rx_discards_phy", + }, { .dpdk_name = "tx_bytes_phy", .ctr_name = "tx_bytes_phy", @@ -128,6 +136,24 @@ static const struct mlx5_counter_ctrl mlx5_counters_init[] = { static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); +static inline void +mlx5_read_ib_stat(struct mlx5_priv *priv, const char *ctr_name, uint64_t *stat) +{ + FILE *file; + MKSTR(path, "%s/ports/1/hw_counters/%s", + priv->ibdev_path, + ctr_name); + + file = fopen(path, "rb"); + if (file) { + int n = fscanf(file, "%" SCNu64, stat); + + fclose(file); + if (n != 1) + stat = 0; + } +} + /** * Read device counters table. * @@ -164,19 +190,8 @@ mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) } for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) { if (xstats_ctrl->info[i].ib) { - FILE *file; - MKSTR(path, "%s/ports/1/hw_counters/%s", - priv->ibdev_path, - xstats_ctrl->info[i].ctr_name); - - file = fopen(path, "rb"); - if (file) { - int n = fscanf(file, "%" SCNu64, &stats[i]); - - fclose(file); - if (n != 1) - stats[i] = 0; - } + mlx5_read_ib_stat(priv, xstats_ctrl->info[i].ctr_name, + &stats[i]); } else { stats[i] = (uint64_t) et_stats->data[xstats_ctrl->dev_table_idx[i]]; @@ -219,10 +234,11 @@ mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) { * Pointer to Ethernet device. */ void -mlx5_xstats_init(struct rte_eth_dev *dev) +mlx5_stats_init(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + struct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl; unsigned int i; unsigned int j; struct ifreq ifr; @@ -290,6 +306,7 @@ mlx5_xstats_init(struct rte_eth_dev *dev) if (ret) DRV_LOG(ERR, "port %u cannot read device counters: %s", dev->data->port_id, strerror(rte_errno)); + mlx5_read_ib_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base); free: rte_free(strings); } @@ -326,7 +343,7 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, if (stats_n < 0) return stats_n; if (xstats_ctrl->stats_n != stats_n) - mlx5_xstats_init(dev); + mlx5_stats_init(dev); ret = mlx5_read_dev_counters(dev, counters); if (ret) return ret; @@ -400,6 +417,8 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) #endif tmp.oerrors += txq->stats.oerrors; } + mlx5_read_ib_stat(priv, "out_of_buffer", &tmp.imissed); + tmp.imissed -= priv->stats_ctrl.imissed_base; #ifndef MLX5_PMD_SOFT_COUNTERS /* FIXME: retrieve and add hardware counters. */ #endif @@ -417,6 +436,7 @@ void mlx5_stats_reset(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl; unsigned int i; unsigned int idx; @@ -434,6 +454,7 @@ mlx5_stats_reset(struct rte_eth_dev *dev) (*priv->txqs)[i]->stats = (struct mlx5_txq_stats){ .idx = idx }; } + mlx5_read_ib_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base); #ifndef MLX5_PMD_SOFT_COUNTERS /* FIXME: reset hardware counters. */ #endif @@ -463,7 +484,7 @@ mlx5_xstats_reset(struct rte_eth_dev *dev) return; } if (xstats_ctrl->stats_n != stats_n) - mlx5_xstats_init(dev); + mlx5_stats_init(dev); ret = mlx5_read_dev_counters(dev, counters); if (ret) { DRV_LOG(ERR, "port %u cannot read device counters: %s", diff --git a/dpdk/drivers/net/mlx5/mlx5_trigger.c b/dpdk/drivers/net/mlx5/mlx5_trigger.c index f874657c..2137bdc4 100644 --- a/dpdk/drivers/net/mlx5/mlx5_trigger.c +++ b/dpdk/drivers/net/mlx5/mlx5_trigger.c @@ -181,7 +181,7 @@ mlx5_dev_start(struct rte_eth_dev *dev) dev->data->port_id); goto error; } - mlx5_xstats_init(dev); + mlx5_stats_init(dev); ret = mlx5_traffic_enable(dev); if (ret) { DRV_LOG(DEBUG, "port %u failed to set defaults flows", diff --git a/dpdk/drivers/net/mlx5/mlx5_txq.c b/dpdk/drivers/net/mlx5/mlx5_txq.c index c5a3d1b4..e6020fbc 100644 --- a/dpdk/drivers/net/mlx5/mlx5_txq.c +++ b/dpdk/drivers/net/mlx5/mlx5_txq.c @@ -10,6 +10,7 @@ #include #include #include +#include /* Verbs header. */ /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ @@ -359,12 +360,11 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) struct mlx5_txq_ctrl *txq_ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq); struct mlx5_txq_ibv tmpl; - struct mlx5_txq_ibv *txq_ibv; + struct mlx5_txq_ibv *txq_ibv = NULL; union { struct ibv_qp_init_attr_ex init; struct ibv_cq_init_attr_ex cq; struct ibv_qp_attr mod; - struct ibv_cq_ex cq_attr; } attr; unsigned int cqe_n; struct mlx5dv_qp qp = { .comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET }; @@ -523,7 +523,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) rte_atomic32_inc(&txq_ibv->refcnt); if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) { txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset; - DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%lx", + DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%"PRIx64, dev->data->port_id, txq_ctrl->uar_mmap_offset); } else { DRV_LOG(ERR, @@ -543,6 +543,8 @@ error: claim_zero(mlx5_glue->destroy_cq(tmpl.cq)); if (tmpl.qp) claim_zero(mlx5_glue->destroy_qp(tmpl.qp)); + if (txq_ibv) + rte_free(txq_ibv); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; rte_errno = ret; /* Restore rte_errno. */ return NULL; @@ -636,7 +638,7 @@ mlx5_txq_ibv_verify(struct rte_eth_dev *dev) } /** - * Calcuate the total number of WQEBB for Tx queue. + * Calculate the total number of WQEBB for Tx queue. * * Simplified version of calc_sq_size() in rdma-core. * diff --git a/dpdk/drivers/net/mvneta/mvneta_ethdev.c b/dpdk/drivers/net/mvneta/mvneta_ethdev.c index 2d766645..91985688 100644 --- a/dpdk/drivers/net/mvneta/mvneta_ethdev.c +++ b/dpdk/drivers/net/mvneta/mvneta_ethdev.c @@ -706,10 +706,7 @@ mvneta_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->imissed += ppio_stats.rx_discard + ppio_stats.rx_overrun - priv->prev_stats.imissed; - - stats->ierrors = ppio_stats.rx_packets_err + - ppio_stats.rx_errors + - ppio_stats.rx_crc_error - + stats->ierrors = ppio_stats.rx_packets_err - priv->prev_stats.ierrors; stats->oerrors = ppio_stats.tx_errors - priv->prev_stats.oerrors; diff --git a/dpdk/drivers/net/netvsc/hn_ethdev.c b/dpdk/drivers/net/netvsc/hn_ethdev.c index fad209f2..c2cf0afc 100644 --- a/dpdk/drivers/net/netvsc/hn_ethdev.c +++ b/dpdk/drivers/net/netvsc/hn_ethdev.c @@ -234,8 +234,8 @@ static void hn_dev_info_get(struct rte_eth_dev *dev, dev_info->max_mac_addrs = 1; dev_info->hash_key_size = NDIS_HASH_KEYSIZE_TOEPLITZ; - dev_info->flow_type_rss_offloads = - ETH_RSS_IPV4 | ETH_RSS_IPV6 | ETH_RSS_TCP | ETH_RSS_UDP; + dev_info->flow_type_rss_offloads = hv->rss_offloads; + dev_info->reta_size = ETH_RSS_RETA_SIZE_128; dev_info->max_rx_queues = hv->max_queues; dev_info->max_tx_queues = hv->max_queues; @@ -572,9 +572,11 @@ hn_dev_xstats_get(struct rte_eth_dev *dev, continue; stats = (const char *)&txq->stats; - for (t = 0; t < RTE_DIM(hn_stat_strings); t++) - xstats[count++].value = *(const uint64_t *) + for (t = 0; t < RTE_DIM(hn_stat_strings); t++, count++) { + xstats[count].id = count; + xstats[count].value = *(const uint64_t *) (stats + hn_stat_strings[t].offset); + } } for (i = 0; i < dev->data->nb_rx_queues; i++) { @@ -584,12 +586,14 @@ hn_dev_xstats_get(struct rte_eth_dev *dev, continue; stats = (const char *)&rxq->stats; - for (t = 0; t < RTE_DIM(hn_stat_strings); t++) - xstats[count++].value = *(const uint64_t *) + for (t = 0; t < RTE_DIM(hn_stat_strings); t++, count++) { + xstats[count].id = count; + xstats[count].value = *(const uint64_t *) (stats + hn_stat_strings[t].offset); + } } - ret = hn_vf_xstats_get(dev, xstats + count, n - count); + ret = hn_vf_xstats_get(dev, xstats, count, n); if (ret < 0) return ret; @@ -733,6 +737,7 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev) hv->port_id = eth_dev->data->port_id; hv->latency = HN_CHAN_LATENCY_NS; hv->max_queues = 1; + rte_spinlock_init(&hv->vf_lock); hv->vf_port = HN_INVALID_PORT; err = hn_parse_args(eth_dev); diff --git a/dpdk/drivers/net/netvsc/hn_var.h b/dpdk/drivers/net/netvsc/hn_var.h index b3156343..d10e164e 100644 --- a/dpdk/drivers/net/netvsc/hn_var.h +++ b/dpdk/drivers/net/netvsc/hn_var.h @@ -235,5 +235,5 @@ int hn_vf_xstats_get_names(struct rte_eth_dev *dev, unsigned int size); int hn_vf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, - unsigned int n); + unsigned int offset, unsigned int n); void hn_vf_xstats_reset(struct rte_eth_dev *dev); diff --git a/dpdk/drivers/net/netvsc/hn_vf.c b/dpdk/drivers/net/netvsc/hn_vf.c index 4127e411..50f92a00 100644 --- a/dpdk/drivers/net/netvsc/hn_vf.c +++ b/dpdk/drivers/net/netvsc/hn_vf.c @@ -500,17 +500,19 @@ int hn_vf_xstats_get_names(struct rte_eth_dev *dev, struct hn_data *hv = dev->data->dev_private; struct rte_eth_dev *vf_dev; int i, count = 0; - char tmp[RTE_ETH_XSTATS_NAME_SIZE]; rte_spinlock_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); - if (vf_dev && vf_dev->dev_ops->xstats_get_names) - count = vf_dev->dev_ops->xstats_get_names(vf_dev, names, n); + if (vf_dev) + count = rte_eth_xstats_get_names(vf_dev->data->port_id, + names, n); rte_spinlock_unlock(&hv->vf_lock); /* add vf_ prefix to xstat names */ if (names) { for (i = 0; i < count; i++) { + char tmp[RTE_ETH_XSTATS_NAME_SIZE]; + snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name); strlcpy(names[i].name, tmp, sizeof(names[i].name)); } @@ -521,18 +523,26 @@ int hn_vf_xstats_get_names(struct rte_eth_dev *dev, int hn_vf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned int offset, unsigned int n) { struct hn_data *hv = dev->data->dev_private; struct rte_eth_dev *vf_dev; - int count = 0; + int i, count = 0; rte_spinlock_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); - if (vf_dev && vf_dev->dev_ops->xstats_get) - count = vf_dev->dev_ops->xstats_get(vf_dev, xstats, n); + if (vf_dev) + count = rte_eth_xstats_get(vf_dev->data->port_id, + xstats + offset, n - offset); rte_spinlock_unlock(&hv->vf_lock); + /* Offset id's for VF stats */ + if (count > 0) { + for (i = 0; i < count; i++) + xstats[i + offset].id += offset; + } + return count; } @@ -543,7 +553,7 @@ void hn_vf_xstats_reset(struct rte_eth_dev *dev) rte_spinlock_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); - if (vf_dev && vf_dev->dev_ops->xstats_reset) - vf_dev->dev_ops->xstats_reset(vf_dev); + if (vf_dev) + rte_eth_xstats_reset(vf_dev->data->port_id); rte_spinlock_unlock(&hv->vf_lock); } diff --git a/dpdk/drivers/net/netvsc/ndis.h b/dpdk/drivers/net/netvsc/ndis.h index 2e7ca99b..d97a397a 100644 --- a/dpdk/drivers/net/netvsc/ndis.h +++ b/dpdk/drivers/net/netvsc/ndis.h @@ -262,17 +262,17 @@ struct ndis_lsov2_offload { struct ndis_ipsecv2_offload { uint32_t ndis_encap; /*NDIS_OFFLOAD_ENCAP_*/ - uint16_t ndis_ip6; - uint16_t ndis_ip4opt; - uint16_t ndis_ip6ext; - uint16_t ndis_ah; - uint16_t ndis_esp; - uint16_t ndis_ah_esp; - uint16_t ndis_xport; - uint16_t ndis_tun; - uint16_t ndis_xport_tun; - uint16_t ndis_lso; - uint16_t ndis_extseq; + uint8_t ndis_ip6; + uint8_t ndis_ip4opt; + uint8_t ndis_ip6ext; + uint8_t ndis_ah; + uint8_t ndis_esp; + uint8_t ndis_ah_esp; + uint8_t ndis_xport; + uint8_t ndis_tun; + uint8_t ndis_xport_tun; + uint8_t ndis_lso; + uint8_t ndis_extseq; uint32_t ndis_udp_esp; uint32_t ndis_auth; uint32_t ndis_crypto; @@ -280,8 +280,8 @@ struct ndis_ipsecv2_offload { }; struct ndis_rsc_offload { - uint16_t ndis_ip4; - uint16_t ndis_ip6; + uint8_t ndis_ip4; + uint8_t ndis_ip6; }; struct ndis_encap_offload { diff --git a/dpdk/drivers/net/nfp/meson.build b/dpdk/drivers/net/nfp/meson.build index ba6a22e8..b3c00723 100644 --- a/dpdk/drivers/net/nfp/meson.build +++ b/dpdk/drivers/net/nfp/meson.build @@ -1,7 +1,7 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2018 Intel Corporation -if host_machine.system() != 'linux' +if host_machine.system() != 'linux' or not dpdk_conf.get('RTE_ARCH_64') build = false endif sources = files('nfpcore/nfp_cpp_pcie_ops.c', diff --git a/dpdk/drivers/net/null/rte_eth_null.c b/dpdk/drivers/net/null/rte_eth_null.c index 159c1c1f..da081362 100644 --- a/dpdk/drivers/net/null/rte_eth_null.c +++ b/dpdk/drivers/net/null/rte_eth_null.c @@ -492,8 +492,6 @@ static const struct eth_dev_ops ops = { .rss_hash_conf_get = eth_rss_hash_conf_get }; -static struct rte_vdev_driver pmd_null_drv; - static int eth_dev_null_create(struct rte_vdev_device *dev, unsigned packet_size, diff --git a/dpdk/drivers/net/pcap/rte_eth_pcap.c b/dpdk/drivers/net/pcap/rte_eth_pcap.c index 65bbd7e2..4f3ad2c4 100644 --- a/dpdk/drivers/net/pcap/rte_eth_pcap.c +++ b/dpdk/drivers/net/pcap/rte_eth_pcap.c @@ -45,7 +45,6 @@ #define RTE_PMD_PCAP_MAX_QUEUES 16 static char errbuf[PCAP_ERRBUF_SIZE]; -static unsigned char tx_pcap_data[RTE_ETH_PCAP_SNAPLEN]; static struct timeval start_time; static uint64_t start_cycles; static uint64_t hz; @@ -163,21 +162,6 @@ eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf, return mbuf->nb_segs; } -/* Copy data from mbuf chain to a buffer suitable for writing to a PCAP file. */ -static void -eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf) -{ - uint16_t data_len = 0; - - while (mbuf) { - rte_memcpy(data + data_len, rte_pktmbuf_mtod(mbuf, void *), - mbuf->data_len); - - data_len += mbuf->data_len; - mbuf = mbuf->next; - } -} - static uint16_t eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { @@ -188,7 +172,6 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct rte_mbuf *mbuf; struct pcap_rx_queue *pcap_q = queue; uint16_t num_rx = 0; - uint16_t buf_size; uint32_t rx_bytes = 0; pcap_t *pcap; @@ -211,11 +194,7 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) if (unlikely(mbuf == NULL)) break; - /* Now get the space available for data in the mbuf */ - buf_size = rte_pktmbuf_data_room_size(pcap_q->mb_pool) - - RTE_PKTMBUF_HEADROOM; - - if (header.caplen <= buf_size) { + if (header.caplen <= rte_pktmbuf_tailroom(mbuf)) { /* pcap packet will fit in the mbuf, can copy it */ rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet, header.caplen); @@ -268,6 +247,8 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) uint32_t tx_bytes = 0; struct pcap_pkthdr header; pcap_dumper_t *dumper; + unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN]; + size_t len; pp = rte_eth_devices[dumper_q->port_id].process_private; dumper = pp->tx_dumper[dumper_q->queue_id]; @@ -279,31 +260,28 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) * dumper */ for (i = 0; i < nb_pkts; i++) { mbuf = bufs[i]; - calculate_timestamp(&header.ts); - header.len = mbuf->pkt_len; - header.caplen = header.len; - - if (likely(mbuf->nb_segs == 1)) { - pcap_dump((u_char *)dumper, &header, - rte_pktmbuf_mtod(mbuf, void*)); - } else { - if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) { - eth_pcap_gather_data(tx_pcap_data, mbuf); - pcap_dump((u_char *)dumper, &header, - tx_pcap_data); - } else { - PMD_LOG(ERR, - "Dropping PCAP packet. Size (%d) > max jumbo size (%d).", - mbuf->pkt_len, - ETHER_MAX_JUMBO_FRAME_LEN); - - rte_pktmbuf_free(mbuf); - break; - } + len = rte_pktmbuf_pkt_len(mbuf); + if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) && + len > sizeof(temp_data))) { + PMD_LOG(ERR, + "Dropping multi segment PCAP packet. Size (%zd) > max size (%zd).", + len, sizeof(temp_data)); + rte_pktmbuf_free(mbuf); + continue; } + calculate_timestamp(&header.ts); + header.len = len; + header.caplen = header.len; + /* rte_pktmbuf_read() returns a pointer to the data directly + * in the mbuf (when the mbuf is contiguous) or, otherwise, + * a pointer to temp_data after copying into it. + */ + pcap_dump((u_char *)dumper, &header, + rte_pktmbuf_read(mbuf, 0, len, temp_data)); + num_tx++; - tx_bytes += mbuf->pkt_len; + tx_bytes += len; rte_pktmbuf_free(mbuf); } @@ -317,7 +295,7 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) dumper_q->tx_stat.bytes += tx_bytes; dumper_q->tx_stat.err_pkts += nb_pkts - num_tx; - return num_tx; + return nb_pkts; } /* @@ -334,6 +312,8 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) uint16_t num_tx = 0; uint32_t tx_bytes = 0; pcap_t *pcap; + unsigned char temp_data[RTE_ETH_PCAP_SNAPLEN]; + size_t len; pp = rte_eth_devices[tx_queue->port_id].process_private; pcap = pp->tx_pcap[tx_queue->queue_id]; @@ -343,39 +323,34 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) for (i = 0; i < nb_pkts; i++) { mbuf = bufs[i]; - - if (likely(mbuf->nb_segs == 1)) { - ret = pcap_sendpacket(pcap, - rte_pktmbuf_mtod(mbuf, u_char *), - mbuf->pkt_len); - } else { - if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) { - eth_pcap_gather_data(tx_pcap_data, mbuf); - ret = pcap_sendpacket(pcap, - tx_pcap_data, mbuf->pkt_len); - } else { - PMD_LOG(ERR, - "Dropping PCAP packet. Size (%d) > max jumbo size (%d).", - mbuf->pkt_len, - ETHER_MAX_JUMBO_FRAME_LEN); - - rte_pktmbuf_free(mbuf); - break; - } + len = rte_pktmbuf_pkt_len(mbuf); + if (unlikely(!rte_pktmbuf_is_contiguous(mbuf) && + len > sizeof(temp_data))) { + PMD_LOG(ERR, + "Dropping multi segment PCAP packet. Size (%zd) > max size (%zd).", + len, sizeof(temp_data)); + rte_pktmbuf_free(mbuf); + continue; } + /* rte_pktmbuf_read() returns a pointer to the data directly + * in the mbuf (when the mbuf is contiguous) or, otherwise, + * a pointer to temp_data after copying into it. + */ + ret = pcap_sendpacket(pcap, + rte_pktmbuf_read(mbuf, 0, len, temp_data), len); if (unlikely(ret != 0)) break; num_tx++; - tx_bytes += mbuf->pkt_len; + tx_bytes += len; rte_pktmbuf_free(mbuf); } tx_queue->tx_stat.pkts += num_tx; tx_queue->tx_stat.bytes += tx_bytes; - tx_queue->tx_stat.err_pkts += nb_pkts - num_tx; + tx_queue->tx_stat.err_pkts += i - num_tx; - return num_tx; + return i; } /* @@ -908,8 +883,6 @@ select_phy_mac(const char *key __rte_unused, const char *value, return 0; } -static struct rte_vdev_driver pmd_pcap_drv; - static int pmd_init_internals(struct rte_vdev_device *vdev, const unsigned int nb_rx_queues, diff --git a/dpdk/drivers/net/qede/base/bcm_osal.c b/dpdk/drivers/net/qede/base/bcm_osal.c index 693328f1..9915df44 100644 --- a/dpdk/drivers/net/qede/base/bcm_osal.c +++ b/dpdk/drivers/net/qede/base/bcm_osal.c @@ -128,7 +128,7 @@ void *osal_dma_alloc_coherent(struct ecore_dev *p_dev, } OSAL_MEM_ZERO(mz_name, sizeof(*mz_name)); - snprintf(mz_name, sizeof(mz_name) - 1, "%lx", + snprintf(mz_name, sizeof(mz_name), "%lx", (unsigned long)rte_get_timer_cycles()); if (core_id == (unsigned int)LCORE_ID_ANY) core_id = rte_get_master_lcore(); @@ -167,7 +167,7 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev, } OSAL_MEM_ZERO(mz_name, sizeof(*mz_name)); - snprintf(mz_name, sizeof(mz_name) - 1, "%lx", + snprintf(mz_name, sizeof(mz_name), "%lx", (unsigned long)rte_get_timer_cycles()); if (core_id == (unsigned int)LCORE_ID_ANY) core_id = rte_get_master_lcore(); diff --git a/dpdk/drivers/net/qede/qede_ethdev.c b/dpdk/drivers/net/qede/qede_ethdev.c index 0b2f305e..c4f5ad1e 100644 --- a/dpdk/drivers/net/qede/qede_ethdev.c +++ b/dpdk/drivers/net/qede/qede_ethdev.c @@ -1423,7 +1423,6 @@ static void qede_poll_sp_sb_cb(void *param) if (rc != 0) { DP_ERR(edev, "Unable to start periodic" " timer rc %d\n", rc); - assert(false && "Unable to start periodic timer"); } } diff --git a/dpdk/drivers/net/qede/qede_filter.c b/dpdk/drivers/net/qede/qede_filter.c index 5e6571ca..0beade6d 100644 --- a/dpdk/drivers/net/qede/qede_filter.c +++ b/dpdk/drivers/net/qede/qede_filter.c @@ -290,7 +290,7 @@ qede_config_arfs_filter(struct rte_eth_dev *eth_dev, /* soft_id could have been used as memzone string, but soft_id is * not currently used so it has no significance. */ - snprintf(mz_name, sizeof(mz_name) - 1, "%lx", + snprintf(mz_name, sizeof(mz_name), "%lx", (unsigned long)rte_get_timer_cycles()); mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE); diff --git a/dpdk/drivers/net/qede/qede_rxtx.c b/dpdk/drivers/net/qede/qede_rxtx.c index 27bac099..03f7785b 100644 --- a/dpdk/drivers/net/qede/qede_rxtx.c +++ b/dpdk/drivers/net/qede/qede_rxtx.c @@ -1796,17 +1796,17 @@ qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts, ol_flags = m->ol_flags; if (ol_flags & PKT_TX_TCP_SEG) { if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) { - rte_errno = -EINVAL; + rte_errno = EINVAL; break; } /* TBD: confirm its ~9700B for both ? */ if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) { - rte_errno = -EINVAL; + rte_errno = EINVAL; break; } } else { if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) { - rte_errno = -EINVAL; + rte_errno = EINVAL; break; } } @@ -1823,14 +1823,14 @@ qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts, continue; } - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; break; } #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; break; } #endif diff --git a/dpdk/drivers/net/ring/rte_eth_ring.c b/dpdk/drivers/net/ring/rte_eth_ring.c index c438da51..452114d0 100644 --- a/dpdk/drivers/net/ring/rte_eth_ring.c +++ b/dpdk/drivers/net/ring/rte_eth_ring.c @@ -249,8 +249,6 @@ static const struct eth_dev_ops ops = { .mac_addr_add = eth_mac_addr_add, }; -static struct rte_vdev_driver pmd_ring_drv; - static int do_eth_dev_ring_create(const char *name, struct rte_ring * const rx_queues[], diff --git a/dpdk/drivers/net/sfc/base/ef10_impl.h b/dpdk/drivers/net/sfc/base/ef10_impl.h index f971063a..6f5d0f9a 100644 --- a/dpdk/drivers/net/sfc/base/ef10_impl.h +++ b/dpdk/drivers/net/sfc/base/ef10_impl.h @@ -1243,10 +1243,11 @@ efx_mcdi_set_nic_global( #define EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE 8 /* Minimum space for packet in packed stream mode */ -#define EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE \ - P2ROUNDUP(EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE + \ - EFX_MAC_PDU_MIN + \ - EFX_RX_PACKED_STREAM_ALIGNMENT, \ +#define EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE \ + EFX_P2ROUNDUP(size_t, \ + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE + \ + EFX_MAC_PDU_MIN + \ + EFX_RX_PACKED_STREAM_ALIGNMENT, \ EFX_RX_PACKED_STREAM_ALIGNMENT) /* Maximum number of credits */ diff --git a/dpdk/drivers/net/sfc/base/ef10_nic.c b/dpdk/drivers/net/sfc/base/ef10_nic.c index 50e23b7d..60b0ba14 100644 --- a/dpdk/drivers/net/sfc/base/ef10_nic.c +++ b/dpdk/drivers/net/sfc/base/ef10_nic.c @@ -1748,6 +1748,56 @@ fail1: return (rc); } +static __checkReturn efx_rc_t +ef10_set_workaround_bug26807( + __in efx_nic_t *enp) +{ + efx_nic_cfg_t *encp = &(enp->en_nic_cfg); + uint32_t flags; + efx_rc_t rc; + + /* + * If the bug26807 workaround is enabled, then firmware has enabled + * support for chained multicast filters. Firmware will reset (FLR) + * functions which have filters in the hardware filter table when the + * workaround is enabled/disabled. + * + * We must recheck if the workaround is enabled after inserting the + * first hardware filter, in case it has been changed since this check. + */ + rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG26807, + B_TRUE, &flags); + if (rc == 0) { + encp->enc_bug26807_workaround = B_TRUE; + if (flags & (1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN)) { + /* + * Other functions had installed filters before the + * workaround was enabled, and they have been reset + * by firmware. + */ + EFSYS_PROBE(bug26807_workaround_flr_done); + /* FIXME: bump MC warm boot count ? */ + } + } else if (rc == EACCES) { + /* + * Unprivileged functions cannot enable the workaround in older + * firmware. + */ + encp->enc_bug26807_workaround = B_FALSE; + } else if ((rc == ENOTSUP) || (rc == ENOENT)) { + encp->enc_bug26807_workaround = B_FALSE; + } else { + goto fail1; + } + + return (0); + +fail1: + EFSYS_PROBE1(fail1, efx_rc_t, rc); + + return (rc); +} + static __checkReturn efx_rc_t ef10_nic_board_cfg( __in efx_nic_t *enp) @@ -1882,7 +1932,7 @@ ef10_nic_board_cfg( encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET; encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET; - encp->enc_buftbl_limit = 0xFFFFFFFF; + encp->enc_buftbl_limit = UINT32_MAX; /* Get interrupt vector limits */ if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) { @@ -1906,13 +1956,18 @@ ef10_nic_board_cfg( goto fail10; encp->enc_privilege_mask = mask; + if ((rc = ef10_set_workaround_bug26807(enp)) != 0) + goto fail11; + /* Get remaining controller-specific board config */ if ((rc = enop->eno_board_cfg(enp)) != 0) if (rc != EACCES) - goto fail11; + goto fail12; return (0); +fail12: + EFSYS_PROBE(fail12); fail11: EFSYS_PROBE(fail11); fail10: diff --git a/dpdk/drivers/net/sfc/base/ef10_nvram.c b/dpdk/drivers/net/sfc/base/ef10_nvram.c index 8d1b64f2..2f334fe0 100644 --- a/dpdk/drivers/net/sfc/base/ef10_nvram.c +++ b/dpdk/drivers/net/sfc/base/ef10_nvram.c @@ -367,7 +367,8 @@ tlv_write( if (len > 0) { ptr[(len - 1) / sizeof (uint32_t)] = 0; memcpy(ptr, data, len); - ptr += P2ROUNDUP(len, sizeof (uint32_t)) / sizeof (*ptr); + ptr += EFX_P2ROUNDUP(uint32_t, len, + sizeof (uint32_t)) / sizeof (*ptr); } return (ptr); diff --git a/dpdk/drivers/net/sfc/base/ef10_rx.c b/dpdk/drivers/net/sfc/base/ef10_rx.c index 3c8f4f3b..d182ec80 100644 --- a/dpdk/drivers/net/sfc/base/ef10_rx.c +++ b/dpdk/drivers/net/sfc/base/ef10_rx.c @@ -842,7 +842,7 @@ ef10_rx_qpush( efx_dword_t dword; /* Hardware has alignment restriction for WPTR */ - wptr = P2ALIGN(added, EF10_RX_WPTR_ALIGN); + wptr = EFX_P2ALIGN(unsigned int, added, EF10_RX_WPTR_ALIGN); if (pushed == wptr) return; @@ -930,8 +930,9 @@ ef10_rx_qps_packet_info( *lengthp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_ORIG_LEN); buf_len = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_CAP_LEN); - buf_len = P2ROUNDUP(buf_len + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE, - EFX_RX_PACKED_STREAM_ALIGNMENT); + buf_len = EFX_P2ROUNDUP(uint16_t, + buf_len + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE, + EFX_RX_PACKED_STREAM_ALIGNMENT); *next_offsetp = current_offset + buf_len + EFX_RX_PACKED_STREAM_ALIGNMENT; @@ -1095,12 +1096,12 @@ ef10_rx_qcreate( rc = ENOTSUP; goto fail7; } - if (!IS_P2ALIGNED(es_max_dma_len, + if (!EFX_IS_P2ALIGNED(uint32_t, es_max_dma_len, EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT)) { rc = EINVAL; goto fail8; } - if (!IS_P2ALIGNED(es_buf_stride, + if (!EFX_IS_P2ALIGNED(uint32_t, es_buf_stride, EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT)) { rc = EINVAL; goto fail9; diff --git a/dpdk/drivers/net/sfc/base/efx.h b/dpdk/drivers/net/sfc/base/efx.h index 2e847b6c..444f6d1d 100644 --- a/dpdk/drivers/net/sfc/base/efx.h +++ b/dpdk/drivers/net/sfc/base/efx.h @@ -28,6 +28,18 @@ extern "C" { /* The macro expands divider twice */ #define EFX_DIV_ROUND_UP(_n, _d) (((_n) + (_d) - 1) / (_d)) +/* Round value up to the nearest power of two. */ +#define EFX_P2ROUNDUP(_type, _value, _align) \ + (-(-(_type)(_value) & -(_type)(_align))) + +/* Align value down to the nearest power of two. */ +#define EFX_P2ALIGN(_type, _value, _align) \ + ((_type)(_value) & -(_type)(_align)) + +/* Test if value is power of 2 aligned. */ +#define EFX_IS_P2ALIGNED(_type, _value, _align) \ + ((((_type)(_value)) & ((_type)(_align) - 1)) == 0) + /* Return codes */ typedef __success(return == 0) int efx_rc_t; @@ -494,10 +506,10 @@ typedef enum efx_link_mode_e { + /* bug16011 */ 16) \ #define EFX_MAC_PDU(_sdu) \ - P2ROUNDUP((_sdu) + EFX_MAC_PDU_ADJUSTMENT, 8) + EFX_P2ROUNDUP(size_t, (_sdu) + EFX_MAC_PDU_ADJUSTMENT, 8) /* - * Due to the P2ROUNDUP in EFX_MAC_PDU(), EFX_MAC_SDU_FROM_PDU() may give + * Due to the EFX_P2ROUNDUP in EFX_MAC_PDU(), EFX_MAC_SDU_FROM_PDU() may give * the SDU rounded up slightly. */ #define EFX_MAC_SDU_FROM_PDU(_pdu) ((_pdu) - EFX_MAC_PDU_ADJUSTMENT) @@ -583,8 +595,9 @@ efx_mac_stat_name( #define EFX_MAC_STATS_MASK_BITS_PER_PAGE (8 * sizeof (uint32_t)) -#define EFX_MAC_STATS_MASK_NPAGES \ - (P2ROUNDUP(EFX_MAC_NSTATS, EFX_MAC_STATS_MASK_BITS_PER_PAGE) / \ +#define EFX_MAC_STATS_MASK_NPAGES \ + (EFX_P2ROUNDUP(uint32_t, EFX_MAC_NSTATS, \ + EFX_MAC_STATS_MASK_BITS_PER_PAGE) / \ EFX_MAC_STATS_MASK_BITS_PER_PAGE) /* diff --git a/dpdk/drivers/net/sfc/base/efx_mcdi.h b/dpdk/drivers/net/sfc/base/efx_mcdi.h index ddf91c11..a4839485 100644 --- a/dpdk/drivers/net/sfc/base/efx_mcdi.h +++ b/dpdk/drivers/net/sfc/base/efx_mcdi.h @@ -384,6 +384,11 @@ efx_mcdi_phy_module_get_info( (((mask) & (MC_CMD_PRIVILEGE_MASK_IN_GRP_ ## priv)) == \ (MC_CMD_PRIVILEGE_MASK_IN_GRP_ ## priv)) +#define EFX_MCDI_BUF_SIZE(_in_len, _out_len) \ + EFX_P2ROUNDUP(size_t, \ + MAX(MAX(_in_len, _out_len), (2 * sizeof (efx_dword_t))),\ + sizeof (efx_dword_t)) + /* * The buffer size must be a multiple of dword to ensure that MCDI works * properly with Siena based boards (which use on-chip buffer). Also, it @@ -391,9 +396,7 @@ efx_mcdi_phy_module_get_info( * error responses if the request/response buffer sizes are smaller. */ #define EFX_MCDI_DECLARE_BUF(_name, _in_len, _out_len) \ - uint8_t _name[P2ROUNDUP(MAX(MAX(_in_len, _out_len), \ - (2 * sizeof (efx_dword_t))), \ - sizeof (efx_dword_t))] = {0} + uint8_t _name[EFX_MCDI_BUF_SIZE(_in_len, _out_len)] = {0} typedef enum efx_mcdi_feature_id_e { EFX_MCDI_FEATURE_FW_UPDATE = 0, diff --git a/dpdk/drivers/net/sfc/base/efx_tx.c b/dpdk/drivers/net/sfc/base/efx_tx.c index 9fa9e2ed..2a3f1ce1 100644 --- a/dpdk/drivers/net/sfc/base/efx_tx.c +++ b/dpdk/drivers/net/sfc/base/efx_tx.c @@ -768,7 +768,7 @@ siena_tx_qpost( * Fragments must not span 4k boundaries. * Here it is a stricter requirement than the maximum length. */ - EFSYS_ASSERT(P2ROUNDUP(start + 1, + EFSYS_ASSERT(EFX_P2ROUNDUP(efsys_dma_addr_t, start + 1, etp->et_enp->en_nic_cfg.enc_tx_dma_desc_boundary) >= end); EFX_TX_DESC(etp, start, size, ebp->eb_eop, added); @@ -1038,7 +1038,7 @@ siena_tx_qdesc_dma_create( * Fragments must not span 4k boundaries. * Here it is a stricter requirement than the maximum length. */ - EFSYS_ASSERT(P2ROUNDUP(addr + 1, + EFSYS_ASSERT(EFX_P2ROUNDUP(efsys_dma_addr_t, addr + 1, etp->et_enp->en_nic_cfg.enc_tx_dma_desc_boundary) >= addr + size); EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index, diff --git a/dpdk/drivers/net/sfc/base/hunt_nic.c b/dpdk/drivers/net/sfc/base/hunt_nic.c index ca30e90f..abff18eb 100644 --- a/dpdk/drivers/net/sfc/base/hunt_nic.c +++ b/dpdk/drivers/net/sfc/base/hunt_nic.c @@ -72,7 +72,6 @@ hunt_board_cfg( { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_port_t *epp = &(enp->en_port); - uint32_t flags; uint32_t sysclk, dpcpu_clk; uint32_t bandwidth; efx_rc_t rc; @@ -130,43 +129,9 @@ hunt_board_cfg( encp->enc_bug41750_workaround = B_TRUE; } - /* - * If the bug26807 workaround is enabled, then firmware has enabled - * support for chained multicast filters. Firmware will reset (FLR) - * functions which have filters in the hardware filter table when the - * workaround is enabled/disabled. - * - * We must recheck if the workaround is enabled after inserting the - * first hardware filter, in case it has been changed since this check. - */ - rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG26807, - B_TRUE, &flags); - if (rc == 0) { - encp->enc_bug26807_workaround = B_TRUE; - if (flags & (1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN)) { - /* - * Other functions had installed filters before the - * workaround was enabled, and they have been reset - * by firmware. - */ - EFSYS_PROBE(bug26807_workaround_flr_done); - /* FIXME: bump MC warm boot count ? */ - } - } else if (rc == EACCES) { - /* - * Unprivileged functions cannot enable the workaround in older - * firmware. - */ - encp->enc_bug26807_workaround = B_FALSE; - } else if ((rc == ENOTSUP) || (rc == ENOENT)) { - encp->enc_bug26807_workaround = B_FALSE; - } else { - goto fail3; - } - /* Get clock frequencies (in MHz). */ if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0) - goto fail4; + goto fail3; /* * The Huntington timer quantum is 1536 sysclk cycles, documented for @@ -202,7 +167,7 @@ hunt_board_cfg( encp->enc_piobuf_min_alloc_size = HUNT_MIN_PIO_ALLOC_SIZE; if ((rc = hunt_nic_get_required_pcie_bandwidth(enp, &bandwidth)) != 0) - goto fail5; + goto fail4; encp->enc_required_pcie_bandwidth_mbps = bandwidth; /* All Huntington devices have a PCIe Gen3, 8 lane connector */ @@ -210,8 +175,6 @@ hunt_board_cfg( return (0); -fail5: - EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); fail3: diff --git a/dpdk/drivers/net/sfc/base/mcdi_mon.c b/dpdk/drivers/net/sfc/base/mcdi_mon.c index b53de0d6..d0247dc4 100644 --- a/dpdk/drivers/net/sfc/base/mcdi_mon.c +++ b/dpdk/drivers/net/sfc/base/mcdi_mon.c @@ -73,7 +73,8 @@ mcdi_mon_decode_stats( /* This sensor is one of the page boundary bits. */ } - if (~(sensor_mask[page]) & (1U << sensor)) + if (~(sensor_mask[page]) & + (1U << (sensor % (sizeof (sensor_mask[page]) * 8)))) continue; /* This sensor not in DMA buffer */ diff --git a/dpdk/drivers/net/sfc/base/medford2_nic.c b/dpdk/drivers/net/sfc/base/medford2_nic.c index 6bc1e87c..0012350b 100644 --- a/dpdk/drivers/net/sfc/base/medford2_nic.c +++ b/dpdk/drivers/net/sfc/base/medford2_nic.c @@ -69,9 +69,6 @@ medford2_board_cfg( encp->enc_bug41750_workaround = B_TRUE; } - /* Chained multicast is always enabled on Medford2 */ - encp->enc_bug26807_workaround = B_TRUE; - /* * If the bug61265 workaround is enabled, then interrupt holdoff timers * cannot be controlled by timer table writes, so MCDI must be used diff --git a/dpdk/drivers/net/sfc/base/medford_nic.c b/dpdk/drivers/net/sfc/base/medford_nic.c index bfe01ca9..be081446 100644 --- a/dpdk/drivers/net/sfc/base/medford_nic.c +++ b/dpdk/drivers/net/sfc/base/medford_nic.c @@ -67,9 +67,6 @@ medford_board_cfg( encp->enc_bug41750_workaround = B_TRUE; } - /* Chained multicast is always enabled on Medford */ - encp->enc_bug26807_workaround = B_TRUE; - /* * If the bug61265 workaround is enabled, then interrupt holdoff timers * cannot be controlled by timer table writes, so MCDI must be used diff --git a/dpdk/drivers/net/sfc/efsys.h b/dpdk/drivers/net/sfc/efsys.h index f7bcc74d..2bfa29e7 100644 --- a/dpdk/drivers/net/sfc/efsys.h +++ b/dpdk/drivers/net/sfc/efsys.h @@ -69,21 +69,6 @@ typedef bool boolean_t; #define MIN(v1, v2) ((v1) < (v2) ? (v1) : (v2)) #endif -/* There are macros for alignment in DPDK, but we need to make a proper - * correspondence here, if we want to re-use them at all - */ -#ifndef IS_P2ALIGNED -#define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0) -#endif - -#ifndef P2ROUNDUP -#define P2ROUNDUP(x, align) (-(-(x) & -(align))) -#endif - -#ifndef P2ALIGN -#define P2ALIGN(_x, _a) ((_x) & -(_a)) -#endif - #ifndef ISP2 #define ISP2(x) rte_is_power_of_2(x) #endif @@ -235,7 +220,8 @@ typedef struct efsys_mem_s { volatile uint32_t *_addr; \ \ _NOTE(CONSTANTCONDITION); \ - SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_dword_t))); \ \ _addr = (volatile uint32_t *)(_base + (_offset)); \ (_edp)->ed_u32[0] = _addr[0]; \ @@ -252,7 +238,8 @@ typedef struct efsys_mem_s { volatile uint64_t *_addr; \ \ _NOTE(CONSTANTCONDITION); \ - SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_qword_t))); \ \ _addr = (volatile uint64_t *)(_base + (_offset)); \ (_eqp)->eq_u64[0] = _addr[0]; \ @@ -270,7 +257,8 @@ typedef struct efsys_mem_s { volatile __m128i *_addr; \ \ _NOTE(CONSTANTCONDITION); \ - SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_oword_t))); \ \ _addr = (volatile __m128i *)(_base + (_offset)); \ (_eop)->eo_u128[0] = _addr[0]; \ @@ -291,7 +279,8 @@ typedef struct efsys_mem_s { volatile uint32_t *_addr; \ \ _NOTE(CONSTANTCONDITION); \ - SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_dword_t))); \ \ EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \ uint32_t, (_edp)->ed_u32[0]); \ @@ -308,7 +297,8 @@ typedef struct efsys_mem_s { volatile uint64_t *_addr; \ \ _NOTE(CONSTANTCONDITION); \ - SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_qword_t))); \ \ EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \ uint32_t, (_eqp)->eq_u32[1], \ @@ -326,7 +316,8 @@ typedef struct efsys_mem_s { volatile __m128i *_addr; \ \ _NOTE(CONSTANTCONDITION); \ - SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_oword_t))); \ \ \ EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \ @@ -391,7 +382,8 @@ typedef struct efsys_bar_s { volatile uint32_t *_addr; \ \ _NOTE(CONSTANTCONDITION); \ - SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_dword_t))); \ _NOTE(CONSTANTCONDITION); \ if (_lock) \ SFC_BAR_LOCK(_esbp); \ @@ -415,7 +407,8 @@ typedef struct efsys_bar_s { volatile uint64_t *_addr; \ \ _NOTE(CONSTANTCONDITION); \ - SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_qword_t))); \ \ SFC_BAR_LOCK(_esbp); \ \ @@ -437,7 +430,8 @@ typedef struct efsys_bar_s { volatile __m128i *_addr; \ \ _NOTE(CONSTANTCONDITION); \ - SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_oword_t))); \ \ _NOTE(CONSTANTCONDITION); \ if (_lock) \ @@ -467,7 +461,8 @@ typedef struct efsys_bar_s { volatile uint32_t *_addr; \ \ _NOTE(CONSTANTCONDITION); \ - SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_dword_t))); \ \ _NOTE(CONSTANTCONDITION); \ if (_lock) \ @@ -492,7 +487,8 @@ typedef struct efsys_bar_s { volatile uint64_t *_addr; \ \ _NOTE(CONSTANTCONDITION); \ - SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_qword_t))); \ \ SFC_BAR_LOCK(_esbp); \ \ @@ -526,7 +522,8 @@ typedef struct efsys_bar_s { volatile __m128i *_addr; \ \ _NOTE(CONSTANTCONDITION); \ - SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \ + SFC_ASSERT(EFX_IS_P2ALIGNED(size_t, _offset, \ + sizeof(efx_oword_t))); \ \ _NOTE(CONSTANTCONDITION); \ if (_lock) \ diff --git a/dpdk/drivers/net/sfc/meson.build b/dpdk/drivers/net/sfc/meson.build index 2d34e869..e6756099 100644 --- a/dpdk/drivers/net/sfc/meson.build +++ b/dpdk/drivers/net/sfc/meson.build @@ -6,7 +6,7 @@ # This software was jointly developed between OKTET Labs (under contract # for Solarflare) and Solarflare Communications, Inc. -if arch_subdir != 'x86' or cc.sizeof('void *') == 4 +if arch_subdir != 'x86' or not dpdk_conf.get('RTE_ARCH_64') build = false endif diff --git a/dpdk/drivers/net/sfc/sfc_ethdev.c b/dpdk/drivers/net/sfc/sfc_ethdev.c index 052d38cd..8c7d2243 100644 --- a/dpdk/drivers/net/sfc/sfc_ethdev.c +++ b/dpdk/drivers/net/sfc/sfc_ethdev.c @@ -906,7 +906,7 @@ sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) if (pdu > EFX_MAC_PDU_MAX) { sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)", (unsigned int)mtu, (unsigned int)pdu, - EFX_MAC_PDU_MAX); + (unsigned int)EFX_MAC_PDU_MAX); goto fail_inval; } @@ -2091,6 +2091,8 @@ sfc_eth_dev_uninit(struct rte_eth_dev *dev) return 0; } + sfc_dev_close(dev); + sa = dev->data->dev_private; sfc_log_init(sa, "entry"); diff --git a/dpdk/drivers/net/sfc/sfc_rx.c b/dpdk/drivers/net/sfc/sfc_rx.c index a78d35a2..630f6b45 100644 --- a/dpdk/drivers/net/sfc/sfc_rx.c +++ b/dpdk/drivers/net/sfc/sfc_rx.c @@ -952,7 +952,7 @@ sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool) * Start is aligned the same or better than end, * just align length. */ - buf_size = P2ALIGN(buf_size, nic_align_end); + buf_size = EFX_P2ALIGN(uint32_t, buf_size, nic_align_end); } return buf_size; diff --git a/dpdk/drivers/net/tap/rte_eth_tap.c b/dpdk/drivers/net/tap/rte_eth_tap.c index 86787368..37bd2501 100644 --- a/dpdk/drivers/net/tap/rte_eth_tap.c +++ b/dpdk/drivers/net/tap/rte_eth_tap.c @@ -70,8 +70,6 @@ #define TAP_IOV_DEFAULT_MAX 1024 static int tap_devices_count; -static struct rte_vdev_driver pmd_tap_drv; -static struct rte_vdev_driver pmd_tun_drv; static const char *valid_arguments[] = { ETH_TAP_IFACE_ARG, diff --git a/dpdk/drivers/net/thunderx/nicvf_ethdev.c b/dpdk/drivers/net/thunderx/nicvf_ethdev.c index 879d8899..ae5a33e3 100644 --- a/dpdk/drivers/net/thunderx/nicvf_ethdev.c +++ b/dpdk/drivers/net/thunderx/nicvf_ethdev.c @@ -2083,6 +2083,16 @@ kvlist_free: return ret; } static int +nicvf_eth_dev_uninit(struct rte_eth_dev *dev) +{ + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + nicvf_dev_close(dev); + + return 0; +} +static int nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) { int ret; @@ -2205,6 +2215,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) malloc_fail: rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; alarm_fail: nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev); fail: @@ -2254,7 +2265,7 @@ static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev) { - return rte_eth_dev_pci_generic_remove(pci_dev, NULL); + return rte_eth_dev_pci_generic_remove(pci_dev, nicvf_eth_dev_uninit); } static struct rte_pci_driver rte_nicvf_pmd = { diff --git a/dpdk/drivers/net/vdev_netvsc/vdev_netvsc.c b/dpdk/drivers/net/vdev_netvsc/vdev_netvsc.c index a5fd64e0..a865a828 100644 --- a/dpdk/drivers/net/vdev_netvsc/vdev_netvsc.c +++ b/dpdk/drivers/net/vdev_netvsc/vdev_netvsc.c @@ -333,7 +333,7 @@ vdev_netvsc_sysfs_readlink(char *buf, size_t size, const char *if_name, char in[RTE_MAX(sizeof(ctx->yield), 256u)]; int ret; - ret = snprintf(in, sizeof(in) - 1, "/sys/class/net/%s/%s", + ret = snprintf(in, sizeof(in), "/sys/class/net/%s/%s", if_name, relpath); if (ret == -1 || (size_t)ret >= sizeof(in)) return -ENOBUFS; @@ -636,7 +636,7 @@ vdev_netvsc_netvsc_probe(const struct if_nameindex *iface, ctx->devname, ctx->devargs); vdev_netvsc_foreach_iface(vdev_netvsc_device_probe, 0, ctx); ret = rte_eal_hotplug_add("vdev", ctx->devname, ctx->devargs); - if (ret) + if (ret < 0) goto error; LIST_INSERT_HEAD(&vdev_netvsc_ctx_list, ctx, entry); ++vdev_netvsc_ctx_count; diff --git a/dpdk/drivers/net/vhost/rte_eth_vhost.c b/dpdk/drivers/net/vhost/rte_eth_vhost.c index b2cda048..ad18b1bd 100644 --- a/dpdk/drivers/net/vhost/rte_eth_vhost.c +++ b/dpdk/drivers/net/vhost/rte_eth_vhost.c @@ -1196,8 +1196,6 @@ static const struct eth_dev_ops ops = { .rx_queue_intr_disable = eth_rxq_intr_disable, }; -static struct rte_vdev_driver pmd_vhost_drv; - static int eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name, int16_t queues, const unsigned int numa_node, uint64_t flags) diff --git a/dpdk/drivers/net/virtio/virtio_ethdev.c b/dpdk/drivers/net/virtio/virtio_ethdev.c index f938b7ce..8c54edc2 100644 --- a/dpdk/drivers/net/virtio/virtio_ethdev.c +++ b/dpdk/drivers/net/virtio/virtio_ethdev.c @@ -1341,6 +1341,7 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) { struct virtio_hw *hw = eth_dev->data->dev_private; + eth_dev->tx_pkt_prepare = virtio_xmit_pkts_prepare; if (hw->use_simple_rx) { PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u", eth_dev->data->port_id); @@ -1593,6 +1594,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) if (eth_dev->data->dev_conf.intr_conf.rxq) { if (virtio_configure_intr(eth_dev) < 0) { PMD_INIT_LOG(ERR, "failed to configure interrupt"); + virtio_free_queues(hw); return -1; } } @@ -1663,7 +1665,14 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev) struct virtio_hw *hw = eth_dev->data->dev_private; int ret; - RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf)); + if (sizeof(struct virtio_net_hdr_mrg_rxbuf) > RTE_PKTMBUF_HEADROOM) { + PMD_INIT_LOG(ERR, + "Not sufficient headroom required = %d, avail = %d", + (int)sizeof(struct virtio_net_hdr_mrg_rxbuf), + RTE_PKTMBUF_HEADROOM); + + return -1; + } eth_dev->dev_ops = &virtio_eth_dev_ops; @@ -1696,17 +1705,23 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev) if (!hw->virtio_user_dev) { ret = vtpci_init(RTE_ETH_DEV_TO_PCI(eth_dev), hw); if (ret) - goto out; + goto err_vtpci_init; } /* reset device and negotiate default features */ ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES); if (ret < 0) - goto out; + goto err_virtio_init; return 0; -out: +err_virtio_init: + if (!hw->virtio_user_dev) { + rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev)); + if (!hw->modern) + rte_pci_ioport_unmap(VTPCI_IO(hw)); + } +err_vtpci_init: rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; return ret; @@ -1715,6 +1730,8 @@ out: static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev) { + struct virtio_hw *hw = eth_dev->data->dev_private; + PMD_INIT_FUNC_TRACE(); if (rte_eal_process_type() == RTE_PROC_SECONDARY) @@ -1727,8 +1744,11 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev) eth_dev->tx_pkt_burst = NULL; eth_dev->rx_pkt_burst = NULL; - if (eth_dev->device) + if (eth_dev->device) { rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev)); + if (!hw->modern) + rte_pci_ioport_unmap(VTPCI_IO(hw)); + } PMD_INIT_LOG(DEBUG, "dev_uninit completed"); diff --git a/dpdk/drivers/net/virtio/virtio_ethdev.h b/dpdk/drivers/net/virtio/virtio_ethdev.h index 39a9f7b7..8eb86607 100644 --- a/dpdk/drivers/net/virtio/virtio_ethdev.h +++ b/dpdk/drivers/net/virtio/virtio_ethdev.h @@ -82,6 +82,9 @@ uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t virtio_xmit_pkts_prepare(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); diff --git a/dpdk/drivers/net/virtio/virtio_pci.c b/dpdk/drivers/net/virtio/virtio_pci.c index c8883c32..6e984ea9 100644 --- a/dpdk/drivers/net/virtio/virtio_pci.c +++ b/dpdk/drivers/net/virtio/virtio_pci.c @@ -678,6 +678,7 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw) PMD_INIT_LOG(INFO, "trying with legacy virtio pci."); if (rte_pci_ioport_map(dev, 0, VTPCI_IO(hw)) < 0) { + rte_pci_unmap_device(dev); if (dev->kdrv == RTE_KDRV_UNKNOWN && (!dev->device.devargs || dev->device.devargs->bus != diff --git a/dpdk/drivers/net/virtio/virtio_rxtx.c b/dpdk/drivers/net/virtio/virtio_rxtx.c index 7f7562dd..a02e1207 100644 --- a/dpdk/drivers/net/virtio/virtio_rxtx.c +++ b/dpdk/drivers/net/virtio/virtio_rxtx.c @@ -375,7 +375,6 @@ virtqueue_xmit_offload(struct virtio_net_hdr *hdr, /* TCP Segmentation Offload */ if (cookie->ol_flags & PKT_TX_TCP_SEG) { - virtio_tso_fix_cksum(cookie); hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ? VIRTIO_NET_HDR_GSO_TCPV6 : VIRTIO_NET_HDR_GSO_TCPV4; @@ -573,11 +572,6 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, rxvq = &vq->rxq; rxvq->queue_id = queue_idx; rxvq->mpool = mp; - if (rxvq->mpool == NULL) { - rte_exit(EXIT_FAILURE, - "Cannot allocate mbufs for rx virtqueue"); - } - dev->data->rx_queues[queue_idx] = rxvq; return 0; @@ -989,7 +983,7 @@ virtio_recv_mergeable_pkts_inorder(void *rx_queue, struct virtqueue *vq = rxvq->vq; struct virtio_hw *hw = vq->hw; struct rte_mbuf *rxm; - struct rte_mbuf *prev; + struct rte_mbuf *prev = NULL; uint16_t nb_used, num, nb_rx; uint32_t len[VIRTIO_MBUF_BURST_SZ]; struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; @@ -1077,7 +1071,6 @@ virtio_recv_mergeable_pkts_inorder(void *rx_queue, rxm->data_len = (uint16_t)(len[i]); rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]); - rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]); if (prev) prev->next = rxm; @@ -1097,7 +1090,6 @@ virtio_recv_mergeable_pkts_inorder(void *rx_queue, uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res, VIRTIO_MBUF_BURST_SZ); - prev = rcv_pkts[nb_rx]; if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) { virtio_rmb(); num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, @@ -1114,7 +1106,6 @@ virtio_recv_mergeable_pkts_inorder(void *rx_queue, prev->next = rxm; prev = rxm; rx_pkts[nb_rx]->pkt_len += len[extra_idx]; - rx_pkts[nb_rx]->data_len += len[extra_idx]; extra_idx += 1; }; seg_res -= rcv_cnt; @@ -1126,7 +1117,7 @@ virtio_recv_mergeable_pkts_inorder(void *rx_queue, } else { PMD_RX_LOG(ERR, "No enough segments for packet."); - virtio_discard_rxbuf_inorder(vq, prev); + rte_pktmbuf_free(rx_pkts[nb_rx]); rxvq->stats.errors++; break; } @@ -1340,6 +1331,51 @@ virtio_recv_mergeable_pkts(void *rx_queue, return nb_rx; } +uint16_t +virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx; + int error; + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + struct rte_mbuf *m = tx_pkts[nb_tx]; + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + error = rte_validate_tx_offload(m); + if (unlikely(error)) { + rte_errno = -error; + break; + } +#endif + + /* Do VLAN tag insertion */ + if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) { + error = rte_vlan_insert(&m); + /* rte_vlan_insert() may change pointer + * even in the case of failure + */ + tx_pkts[nb_tx] = m; + + if (unlikely(error)) { + rte_errno = -error; + break; + } + } + + error = rte_net_intel_cksum_prepare(m); + if (unlikely(error)) { + rte_errno = -error; + break; + } + + if (m->ol_flags & PKT_TX_TCP_SEG) + virtio_tso_fix_cksum(m); + } + + return nb_tx; +} + uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -1348,7 +1384,6 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct virtio_hw *hw = vq->hw; uint16_t hdr_size = hw->vtnet_hdr_size; uint16_t nb_used, nb_tx = 0; - int error; if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) return nb_tx; @@ -1367,17 +1402,6 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct rte_mbuf *txm = tx_pkts[nb_tx]; int can_push = 0, use_indirect = 0, slots, need; - /* Do VLAN tag insertion */ - if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) { - error = rte_vlan_insert(&txm); - if (unlikely(error)) { - rte_pktmbuf_free(txm); - continue; - } - /* vlan_insert may add a header mbuf */ - tx_pkts[nb_tx] = txm; - } - /* optimize ring usage */ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && @@ -1448,7 +1472,6 @@ virtio_xmit_pkts_inorder(void *tx_queue, uint16_t hdr_size = hw->vtnet_hdr_size; uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0; struct rte_mbuf *inorder_pkts[nb_pkts]; - int error; if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) return nb_tx; @@ -1473,17 +1496,6 @@ virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf *txm = tx_pkts[nb_tx]; int slots, need; - /* Do VLAN tag insertion */ - if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) { - error = rte_vlan_insert(&txm); - if (unlikely(error)) { - rte_pktmbuf_free(txm); - continue; - } - /* vlan_insert may add a header mbuf */ - tx_pkts[nb_tx] = txm; - } - /* optimize ring usage */ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && diff --git a/dpdk/drivers/net/virtio/virtio_user/vhost.h b/dpdk/drivers/net/virtio/virtio_user/vhost.h index 55f47036..1e784e58 100644 --- a/dpdk/drivers/net/virtio/virtio_user/vhost.h +++ b/dpdk/drivers/net/virtio/virtio_user/vhost.h @@ -67,7 +67,7 @@ enum vhost_user_request { VHOST_USER_MAX }; -const char * const vhost_msg_strings[VHOST_USER_MAX]; +extern const char * const vhost_msg_strings[VHOST_USER_MAX]; struct vhost_memory_region { uint64_t guest_phys_addr; diff --git a/dpdk/drivers/net/virtio/virtio_user_ethdev.c b/dpdk/drivers/net/virtio/virtio_user_ethdev.c index 5781c094..63b647dc 100644 --- a/dpdk/drivers/net/virtio/virtio_user_ethdev.c +++ b/dpdk/drivers/net/virtio/virtio_user_ethdev.c @@ -396,8 +396,6 @@ get_integer_arg(const char *key __rte_unused, return 0; } -static struct rte_vdev_driver virtio_user_driver; - static struct rte_eth_dev * virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev) { diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c index 812e1857..c856b77f 100644 --- a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c +++ b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c @@ -1119,8 +1119,8 @@ vmxnet3_dev_stats_reset(struct rte_eth_dev *dev) { unsigned int i; struct vmxnet3_hw *hw = dev->data->dev_private; - struct UPT1_TxStats txStats; - struct UPT1_RxStats rxStats; + struct UPT1_TxStats txStats = {0}; + struct UPT1_RxStats rxStats = {0}; VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c b/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c index d30914a8..6efa3ac2 100644 --- a/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c +++ b/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c @@ -361,7 +361,7 @@ vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, */ if ((ol_flags & PKT_TX_TCP_SEG) == 0 && m->nb_segs > VMXNET3_MAX_TXD_PER_PKT) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } @@ -369,20 +369,20 @@ vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, if ((ol_flags & VMXNET3_TX_OFFLOAD_NOTSUP_MASK) != 0 || (ol_flags & PKT_TX_L4_MASK) == PKT_TX_SCTP_CKSUM) { - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; return i; } #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } diff --git a/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c b/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c index 11951980..55ded464 100644 --- a/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c +++ b/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c @@ -26,9 +26,6 @@ int dpaa2_cmdif_logtype; /* CMDIF driver name */ #define DPAA2_CMDIF_PMD_NAME dpaa2_dpci -/* CMDIF driver object */ -static struct rte_vdev_driver dpaa2_cmdif_drv; - /* * This API provides the DPCI device ID in 'attr_value'. * The device ID shall be passed by GPP to the AIOP using CMDIF commands. diff --git a/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_api.c b/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_api.c index 540e171a..55bcb683 100644 --- a/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_api.c +++ b/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_api.c @@ -76,6 +76,7 @@ static int ifpga_acc_get_region_info(struct opae_accelerator *acc, info->flags = ACC_REGION_READ | ACC_REGION_WRITE | ACC_REGION_MMIO; info->len = afu_info->region[info->index].len; info->addr = afu_info->region[info->index].addr; + info->phys_addr = afu_info->region[info->index].phys_addr; return 0; } @@ -183,7 +184,7 @@ struct opae_bridge_ops ifpga_br_ops = { }; /* Manager APIs */ -static int ifpga_mgr_flash(struct opae_manager *mgr, int id, void *buf, +static int ifpga_mgr_flash(struct opae_manager *mgr, int id, const char *buf, u32 size, u64 *status) { struct ifpga_fme_hw *fme = mgr->data; @@ -230,7 +231,7 @@ struct opae_adapter_ops ifpga_adapter_ops = { * - 0: Success, partial reconfiguration finished. * - <0: Error code returned in partial reconfiguration. **/ -int ifpga_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size, +int ifpga_pr(struct ifpga_hw *hw, u32 port_id, const char *buffer, u32 size, u64 *status) { if (!is_valid_port_id(hw, port_id)) diff --git a/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_api.h b/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_api.h index dae7ca14..f203f3de 100644 --- a/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_api.h +++ b/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_api.h @@ -22,7 +22,7 @@ int ifpga_set_irq(struct ifpga_hw *hw, u32 fiu_id, u32 port_id, u32 feature_id, void *irq_set); /* FME APIs */ -int ifpga_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size, +int ifpga_pr(struct ifpga_hw *hw, u32 port_id, const char *buffer, u32 size, u64 *status); #endif /* _IFPGA_API_H_ */ diff --git a/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h b/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h index 4391f2fd..a58dbdc7 100644 --- a/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h +++ b/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h @@ -121,7 +121,7 @@ static inline int fpga_port_reset(struct ifpga_port_hw *port) return ret; } -int do_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size, +int do_pr(struct ifpga_hw *hw, u32 port_id, const char *buffer, u32 size, u64 *status); int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop); diff --git a/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c b/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c index ec0beeb1..cc91dff5 100644 --- a/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c +++ b/dpdk/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c @@ -223,8 +223,8 @@ static int fpga_pr_buf_load(struct ifpga_fme_hw *fme_dev, return 0; } -static int fme_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size, - u64 *status) +static int fme_pr(struct ifpga_hw *hw, u32 port_id, const char *buffer, + u32 size, u64 *status) { struct feature_fme_header *fme_hdr; struct feature_fme_capability fme_capability; @@ -269,7 +269,7 @@ static int fme_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size, /* Disable Port before PR */ fpga_port_disable(port); - ret = fpga_pr_buf_load(fme, &info, (void *)buffer, size); + ret = fpga_pr_buf_load(fme, &info, buffer, size); *status = info.pr_err; @@ -280,27 +280,32 @@ static int fme_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size, return ret; } -int do_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size, u64 *status) +int do_pr(struct ifpga_hw *hw, u32 port_id, const char *buffer, + u32 size, u64 *status) { - struct bts_header *bts_hdr; - void *buf; + const struct bts_header *bts_hdr; + const char *buf; struct ifpga_port_hw *port; int ret; + u32 header_size; if (!buffer || size == 0) { dev_err(hw, "invalid parameter\n"); return -EINVAL; } - bts_hdr = (struct bts_header *)buffer; + bts_hdr = (const struct bts_header *)buffer; if (is_valid_bts(bts_hdr)) { dev_info(hw, "this is a valid bitsteam..\n"); - size -= (sizeof(struct bts_header) + - bts_hdr->metadata_len); - buf = (u8 *)buffer + sizeof(struct bts_header) + - bts_hdr->metadata_len; + header_size = sizeof(struct bts_header) + + bts_hdr->metadata_len; + if (size < header_size) + return -EINVAL; + size -= header_size; + buf = buffer + header_size; } else { + dev_err(hw, "this is an invalid bitstream..\n"); return -EINVAL; } diff --git a/dpdk/drivers/raw/ifpga_rawdev/base/opae_hw_api.c b/dpdk/drivers/raw/ifpga_rawdev/base/opae_hw_api.c index 1541b679..ff0ed8c3 100644 --- a/dpdk/drivers/raw/ifpga_rawdev/base/opae_hw_api.c +++ b/dpdk/drivers/raw/ifpga_rawdev/base/opae_hw_api.c @@ -241,8 +241,8 @@ opae_manager_alloc(const char *name, struct opae_manager_ops *ops, void *data) * * Return: 0 on success, otherwise error code. */ -int opae_manager_flash(struct opae_manager *mgr, int id, void *buf, u32 size, - u64 *status) +int opae_manager_flash(struct opae_manager *mgr, int id, const char *buf, + u32 size, u64 *status) { if (!mgr) return -EINVAL; diff --git a/dpdk/drivers/raw/ifpga_rawdev/base/opae_hw_api.h b/dpdk/drivers/raw/ifpga_rawdev/base/opae_hw_api.h index 332e0f3f..2a2121c0 100644 --- a/dpdk/drivers/raw/ifpga_rawdev/base/opae_hw_api.h +++ b/dpdk/drivers/raw/ifpga_rawdev/base/opae_hw_api.h @@ -40,7 +40,7 @@ struct opae_manager { /* FIXME: add more management ops, e.g power/thermal and etc */ struct opae_manager_ops { - int (*flash)(struct opae_manager *mgr, int id, void *buffer, + int (*flash)(struct opae_manager *mgr, int id, const char *buffer, u32 size, u64 *status); }; @@ -48,7 +48,7 @@ struct opae_manager_ops { struct opae_manager * opae_manager_alloc(const char *name, struct opae_manager_ops *ops, void *data); #define opae_manager_free(mgr) opae_free(mgr) -int opae_manager_flash(struct opae_manager *mgr, int acc_id, void *buf, +int opae_manager_flash(struct opae_manager *mgr, int acc_id, const char *buf, u32 size, u64 *status); /* OPAE Bridge Data Structure */ diff --git a/dpdk/drivers/raw/ifpga_rawdev/ifpga_rawdev.c b/dpdk/drivers/raw/ifpga_rawdev/ifpga_rawdev.c index eff001b5..94161646 100644 --- a/dpdk/drivers/raw/ifpga_rawdev/ifpga_rawdev.c +++ b/dpdk/drivers/raw/ifpga_rawdev/ifpga_rawdev.c @@ -177,7 +177,7 @@ ifpga_rawdev_reset(struct rte_rawdev *dev) } static int -fpga_pr(struct rte_rawdev *raw_dev, u32 port_id, u64 *buffer, u32 size, +fpga_pr(struct rte_rawdev *raw_dev, u32 port_id, const char *buffer, u32 size, u64 *status) { @@ -248,6 +248,11 @@ rte_fpga_do_pr(struct rte_rawdev *rawdev, int port_id, goto close_fd; } buffer_size = file_stat.st_size; + if (buffer_size <= 0) { + ret = -EINVAL; + goto close_fd; + } + IFPGA_RAWDEV_PMD_INFO("bitstream file size: %zu\n", buffer_size); buffer = rte_malloc(NULL, buffer_size, 0); if (!buffer) { diff --git a/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev.c b/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev.c index 63f2b9a0..6eabd97e 100644 --- a/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev.c +++ b/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev.c @@ -37,9 +37,6 @@ static uint16_t skeldev_init_once; /**< Rawdev Skeleton dummy driver name */ #define SKELETON_PMD_RAWDEV_NAME rawdev_skeleton -/**< Skeleton rawdev driver object */ -static struct rte_vdev_driver skeleton_pmd_drv; - struct queue_buffers { void *bufs[SKELETON_QUEUE_MAX_DEPTH]; }; diff --git a/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c b/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c index 359c9e29..3250c229 100644 --- a/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c +++ b/dpdk/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c @@ -274,17 +274,14 @@ static int test_rawdev_attr_set_get(void) { int ret; - int *dummy_value; + int *dummy_value, set_value; uint64_t ret_value; /* Set an attribute and fetch it */ ret = rte_rawdev_set_attr(TEST_DEV_ID, "Test1", 100); RTE_TEST_ASSERT(!ret, "Unable to set an attribute (Test1)"); - dummy_value = malloc(sizeof(int)); - if (!dummy_value) - RTE_TEST_ASSERT(1, "Unable to allocate memory (dummy_value)"); - + dummy_value = &set_value; *dummy_value = 200; ret = rte_rawdev_set_attr(TEST_DEV_ID, "Test2", (uintptr_t)dummy_value); @@ -294,11 +291,9 @@ test_rawdev_attr_set_get(void) "Attribute (Test1) not set correctly (%" PRIu64 ")", ret_value); - free(dummy_value); - ret_value = 0; ret = rte_rawdev_get_attr(TEST_DEV_ID, "Test2", &ret_value); - RTE_TEST_ASSERT_EQUAL(*((int *)(uintptr_t)ret_value), 200, + RTE_TEST_ASSERT_EQUAL(*((int *)(uintptr_t)ret_value), set_value, "Attribute (Test2) not set correctly (%" PRIu64 ")", ret_value); diff --git a/dpdk/examples/bbdev_app/Makefile b/dpdk/examples/bbdev_app/Makefile index 18dd99db..378b4cb5 100644 --- a/dpdk/examples/bbdev_app/Makefile +++ b/dpdk/examples/bbdev_app/Makefile @@ -8,8 +8,7 @@ APP = bbdev SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -37,7 +36,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else diff --git a/dpdk/examples/bond/Makefile b/dpdk/examples/bond/Makefile index d6e500aa..665fcf6a 100644 --- a/dpdk/examples/bond/Makefile +++ b/dpdk/examples/bond/Makefile @@ -8,8 +8,7 @@ APP = bond_app SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -39,7 +38,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else diff --git a/dpdk/examples/cmdline/Makefile b/dpdk/examples/cmdline/Makefile index a617cce1..fbe521c3 100644 --- a/dpdk/examples/cmdline/Makefile +++ b/dpdk/examples/cmdline/Makefile @@ -8,8 +8,7 @@ APP = cmdline SRCS-y := main.c commands.c parse_obj_list.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else diff --git a/dpdk/examples/distributor/Makefile b/dpdk/examples/distributor/Makefile index 05ea0bfe..372446f9 100644 --- a/dpdk/examples/distributor/Makefile +++ b/dpdk/examples/distributor/Makefile @@ -8,8 +8,7 @@ APP = distributor_app SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else diff --git a/dpdk/examples/eventdev_pipeline/Makefile b/dpdk/examples/eventdev_pipeline/Makefile index 1a789ccc..cab4d405 100644 --- a/dpdk/examples/eventdev_pipeline/Makefile +++ b/dpdk/examples/eventdev_pipeline/Makefile @@ -10,8 +10,7 @@ SRCS-y += pipeline_worker_generic.c SRCS-y += pipeline_worker_tx.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -39,7 +38,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else diff --git a/dpdk/examples/exception_path/Makefile b/dpdk/examples/exception_path/Makefile index ae74781e..013ae1cb 100644 --- a/dpdk/examples/exception_path/Makefile +++ b/dpdk/examples/exception_path/Makefile @@ -8,8 +8,7 @@ APP = exception_path SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else diff --git a/dpdk/examples/fips_validation/Makefile b/dpdk/examples/fips_validation/Makefile index 7b1fe34a..5fb64e4d 100644 --- a/dpdk/examples/fips_validation/Makefile +++ b/dpdk/examples/fips_validation/Makefile @@ -15,8 +15,7 @@ SRCS-y += fips_validation_ccm.c SRCS-y += main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -42,7 +41,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else diff --git a/dpdk/examples/flow_classify/Makefile b/dpdk/examples/flow_classify/Makefile index f1fa4df6..5c0f7fc5 100644 --- a/dpdk/examples/flow_classify/Makefile +++ b/dpdk/examples/flow_classify/Makefile @@ -8,8 +8,7 @@ APP = flow_classify SRCS-y := flow_classify.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -37,7 +36,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else diff --git a/dpdk/examples/flow_filtering/Makefile b/dpdk/examples/flow_filtering/Makefile index 8f86b7b2..5140f507 100644 --- a/dpdk/examples/flow_filtering/Makefile +++ b/dpdk/examples/flow_filtering/Makefile @@ -6,8 +6,7 @@ APP = flow SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -33,7 +32,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else diff --git a/dpdk/examples/helloworld/Makefile b/dpdk/examples/helloworld/Makefile index d66b526b..970c9ea8 100644 --- a/dpdk/examples/helloworld/Makefile +++ b/dpdk/examples/helloworld/Makefile @@ -8,8 +8,7 @@ APP = helloworld SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else diff --git a/dpdk/examples/ip_fragmentation/Makefile b/dpdk/examples/ip_fragmentation/Makefile index 9e89e744..84e66f23 100644 --- a/dpdk/examples/ip_fragmentation/Makefile +++ b/dpdk/examples/ip_fragmentation/Makefile @@ -9,8 +9,7 @@ APP = ip_fragmentation SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -36,7 +35,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else diff --git a/dpdk/examples/ip_fragmentation/main.c b/dpdk/examples/ip_fragmentation/main.c index 17a877da..68d40c19 100644 --- a/dpdk/examples/ip_fragmentation/main.c +++ b/dpdk/examples/ip_fragmentation/main.c @@ -233,16 +233,19 @@ l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf, { struct rx_queue *rxq; uint32_t i, len, next_hop; - uint8_t ipv6; - uint16_t port_out; + uint16_t port_out, ether_type; int32_t len2; + const struct ether_hdr *eth; - ipv6 = 0; rxq = &qconf->rx_queue_list[queueid]; /* by default, send everything back to the source port */ port_out = port_in; + /* save ether type of the incoming packet */ + eth = rte_pktmbuf_mtod(m, const struct ether_hdr *); + ether_type = eth->ether_type; + /* Remove the Ethernet header and trailer from the input packet */ rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr)); @@ -288,8 +291,6 @@ l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf, /* if this is an IPv6 packet */ struct ipv6_hdr *ip_hdr; - ipv6 = 1; - /* Read the lookup key (i.e. ip_dst) from the input packet */ ip_hdr = rte_pktmbuf_mtod(m, struct ipv6_hdr *); @@ -346,10 +347,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf, /* src addr */ ether_addr_copy(&ports_eth_addr[port_out], ð_hdr->s_addr); - if (ipv6) - eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv6); - else - eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4); + eth_hdr->ether_type = ether_type; } len += len2; @@ -923,9 +921,6 @@ main(int argc, char **argv) n_tx_queue = nb_lcores; if (n_tx_queue > MAX_TX_QUEUE_PER_PORT) n_tx_queue = MAX_TX_QUEUE_PER_PORT; - if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) - local_port_conf.txmode.offloads |= - DEV_TX_OFFLOAD_MBUF_FAST_FREE; ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue, &local_port_conf); if (ret < 0) { @@ -961,11 +956,15 @@ main(int argc, char **argv) printf("\n"); /* init one TX queue per couple (lcore,port) */ + rte_eth_dev_info_get(portid, &dev_info); queueid = 0; for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { if (rte_lcore_is_enabled(lcore_id) == 0) continue; + if (queueid >= dev_info.nb_tx_queues) + break; + socket = (int) rte_lcore_to_socket_id(lcore_id); printf("txq=%u,%d ", lcore_id, queueid); fflush(stdout); diff --git a/dpdk/examples/ip_pipeline/Makefile b/dpdk/examples/ip_pipeline/Makefile index 41ba7df2..d667af81 100644 --- a/dpdk/examples/ip_pipeline/Makefile +++ b/dpdk/examples/ip_pipeline/Makefile @@ -21,8 +21,7 @@ SRCS-y += tmgr.c SRCS-y += cryptodev.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -55,7 +54,7 @@ build: .PHONY: clean clean: rm -f build/$(APP)* build/*.o - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else diff --git a/dpdk/examples/ip_reassembly/Makefile b/dpdk/examples/ip_reassembly/Makefile index 1e81315f..ad7e5fea 100644 --- a/dpdk/examples/ip_reassembly/Makefile +++ b/dpdk/examples/ip_reassembly/Makefile @@ -9,8 +9,7 @@ APP = ip_reassembly SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -36,7 +35,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else diff --git a/dpdk/examples/ipsec-secgw/Makefile b/dpdk/examples/ipsec-secgw/Makefile index a6933801..3fd2079d 100644 --- a/dpdk/examples/ipsec-secgw/Makefile +++ b/dpdk/examples/ipsec-secgw/Makefile @@ -18,8 +18,7 @@ SRCS-y += ipsec-secgw.c CFLAGS += -gdwarf-2 # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -47,7 +46,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else diff --git a/dpdk/examples/ipsec-secgw/esp.c b/dpdk/examples/ipsec-secgw/esp.c index faa84ddd..ea2ba239 100644 --- a/dpdk/examples/ipsec-secgw/esp.c +++ b/dpdk/examples/ipsec-secgw/esp.c @@ -189,7 +189,7 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, } } - if (unlikely(sa->flags == TRANSPORT)) { + if (unlikely(IS_TRANSPORT(sa->flags))) { ip = rte_pktmbuf_mtod(m, struct ip *); ip4 = (struct ip *)rte_pktmbuf_adj(m, sizeof(struct esp_hdr) + sa->iv_len); @@ -230,13 +230,13 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, ip4 = rte_pktmbuf_mtod(m, struct ip *); if (likely(ip4->ip_v == IPVERSION)) { - if (unlikely(sa->flags == TRANSPORT)) { + if (unlikely(IS_TRANSPORT(sa->flags))) { ip_hdr_len = ip4->ip_hl * 4; nlp = ip4->ip_p; } else nlp = IPPROTO_IPIP; } else if (ip4->ip_v == IP6_VERSION) { - if (unlikely(sa->flags == TRANSPORT)) { + if (unlikely(IS_TRANSPORT(sa->flags))) { /* XXX No option headers supported */ ip_hdr_len = sizeof(struct ip6_hdr); ip6 = (struct ip6_hdr *)ip4; @@ -254,14 +254,13 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, ip_hdr_len + 2, sa->block_size); pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m); - RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL || - sa->flags == TRANSPORT); + RTE_ASSERT(IS_TUNNEL(sa->flags) || IS_TRANSPORT(sa->flags)); - if (likely(sa->flags == IP4_TUNNEL)) + if (likely(IS_IP4_TUNNEL(sa->flags))) ip_hdr_len = sizeof(struct ip); - else if (sa->flags == IP6_TUNNEL) + else if (IS_IP6_TUNNEL(sa->flags)) ip_hdr_len = sizeof(struct ip6_hdr); - else if (sa->flags != TRANSPORT) { + else if (!IS_TRANSPORT(sa->flags)) { RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n", sa->flags); return -EINVAL; @@ -288,7 +287,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, rte_prefetch0(padding); } - switch (sa->flags) { + switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) { case IP4_TUNNEL: ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len, &sa->src, &sa->dst); diff --git a/dpdk/examples/ipsec-secgw/ipsec.c b/dpdk/examples/ipsec-secgw/ipsec.c index 72a29bcb..872a8b81 100644 --- a/dpdk/examples/ipsec-secgw/ipsec.c +++ b/dpdk/examples/ipsec-secgw/ipsec.c @@ -23,7 +23,7 @@ set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec) if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) { struct rte_security_ipsec_tunnel_param *tunnel = &ipsec->tunnel; - if (sa->flags == IP4_TUNNEL) { + if (IS_IP4_TUNNEL(sa->flags)) { tunnel->type = RTE_SECURITY_IPSEC_TUNNEL_IPV4; tunnel->ipv4.ttl = IPDEFTTL; @@ -83,8 +83,7 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa) .options = { 0 }, .direction = sa->direction, .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, - .mode = (sa->flags == IP4_TUNNEL || - sa->flags == IP6_TUNNEL) ? + .mode = (IS_TUNNEL(sa->flags)) ? RTE_SECURITY_IPSEC_SA_MODE_TUNNEL : RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, } }, @@ -134,7 +133,7 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa) sec_cap->protocol == RTE_SECURITY_PROTOCOL_IPSEC && sec_cap->ipsec.mode == - RTE_SECURITY_IPSEC_SA_MODE_TUNNEL && + sess_conf.ipsec.mode && sec_cap->ipsec.direction == sa->direction) break; sec_cap++; @@ -150,16 +149,20 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa) sa->security_ctx = ctx; sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH; - sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4; - sa->pattern[1].mask = &rte_flow_item_ipv4_mask; - if (sa->flags & IP6_TUNNEL) { + if (IS_IP6(sa->flags)) { + sa->pattern[1].mask = &rte_flow_item_ipv6_mask; + sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6; sa->pattern[1].spec = &sa->ipv6_spec; + memcpy(sa->ipv6_spec.hdr.dst_addr, sa->dst.ip.ip6.ip6_b, 16); memcpy(sa->ipv6_spec.hdr.src_addr, sa->src.ip.ip6.ip6_b, 16); - } else { + } else if (IS_IP4(sa->flags)) { + sa->pattern[1].mask = &rte_flow_item_ipv4_mask; + sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4; sa->pattern[1].spec = &sa->ipv4_spec; + sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4; sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4; } @@ -186,23 +189,22 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa) .rss_key = rss_key, .rss_key_len = 40, }; - struct rte_eth_dev *eth_dev; + struct rte_eth_dev_info dev_info; uint16_t queue[RTE_MAX_QUEUES_PER_PORT]; struct rte_flow_action_rss action_rss; unsigned int i; unsigned int j; + rte_eth_dev_info_get(sa->portid, &dev_info); sa->action[2].type = RTE_FLOW_ACTION_TYPE_END; /* Try RSS. */ sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS; sa->action[1].conf = &action_rss; - eth_dev = ctx->device; rte_eth_dev_rss_hash_conf_get(sa->portid, &rss_conf); for (i = 0, j = 0; - i < eth_dev->data->nb_rx_queues; ++i) - if (eth_dev->data->rx_queues[i]) - queue[j++] = i; + i < dev_info.nb_rx_queues; ++i) + queue[j++] = i; action_rss = (struct rte_flow_action_rss){ .types = rss_conf.rss_hf, .key_len = rss_conf.rss_key_len, @@ -303,7 +305,7 @@ flow_create_failure: sec_cap->protocol == RTE_SECURITY_PROTOCOL_IPSEC && sec_cap->ipsec.mode == - RTE_SECURITY_IPSEC_SA_MODE_TUNNEL && + sess_conf.ipsec.mode && sec_cap->ipsec.direction == sa->direction) break; sec_cap++; diff --git a/dpdk/examples/ipsec-secgw/ipsec.h b/dpdk/examples/ipsec-secgw/ipsec.h index 86d8f7df..979a6e23 100644 --- a/dpdk/examples/ipsec-secgw/ipsec.h +++ b/dpdk/examples/ipsec-secgw/ipsec.h @@ -87,6 +87,8 @@ struct ipsec_sa { #define IP4_TUNNEL (1 << 0) #define IP6_TUNNEL (1 << 1) #define TRANSPORT (1 << 2) +#define IP4_TRANSPORT (1 << 3) +#define IP6_TRANSPORT (1 << 4) struct ip_addr src; struct ip_addr dst; uint8_t cipher_key[MAX_KEY_SIZE]; @@ -125,6 +127,27 @@ struct ipsec_mbuf_metadata { uint8_t buf[32]; } __rte_cache_aligned; +#define IS_TRANSPORT(flags) ((flags) & TRANSPORT) + +#define IS_TUNNEL(flags) ((flags) & (IP4_TUNNEL | IP6_TUNNEL)) + +#define IS_IP4(flags) ((flags) & (IP4_TUNNEL | IP4_TRANSPORT)) + +#define IS_IP6(flags) ((flags) & (IP6_TUNNEL | IP6_TRANSPORT)) + +#define IS_IP4_TUNNEL(flags) ((flags) & IP4_TUNNEL) + +#define IS_IP6_TUNNEL(flags) ((flags) & IP6_TUNNEL) + +/* + * Macro for getting ipsec_sa flags statuses without version of protocol + * used for transport (IP4_TRANSPORT and IP6_TRANSPORT flags). + */ +#define WITHOUT_TRANSPORT_VERSION(flags) \ + ((flags) & (IP4_TUNNEL | \ + IP6_TUNNEL | \ + TRANSPORT)) + struct cdev_qp { uint16_t id; uint16_t qp; @@ -239,6 +262,18 @@ sp4_init(struct socket_ctx *ctx, int32_t socket_id); void sp6_init(struct socket_ctx *ctx, int32_t socket_id); +/* + * Search through SP rules for given SPI. + * Returns first rule index if found(greater or equal then zero), + * or -ENOENT otherwise. + */ +int +sp4_spi_present(uint32_t spi, int inbound, struct ip_addr ip_addr[2], + uint32_t mask[2]); +int +sp6_spi_present(uint32_t spi, int inbound, struct ip_addr ip_addr[2], + uint32_t mask[2]); + /* * Search through SA entries for given SPI. * Returns first entry index if found(greater or equal then zero), diff --git a/dpdk/examples/ipsec-secgw/sa.c b/dpdk/examples/ipsec-secgw/sa.c index f7b6eb0b..45b81036 100644 --- a/dpdk/examples/ipsec-secgw/sa.c +++ b/dpdk/examples/ipsec-secgw/sa.c @@ -26,6 +26,10 @@ #define IPDEFTTL 64 +#define IP4_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip4) * CHAR_BIT) + +#define IP6_FULL_MASK (sizeof(((struct ip_addr *)NULL)->ip.ip6.ip6) * CHAR_BIT) + struct supported_cipher_algo { const char *keyword; enum rte_crypto_cipher_algorithm algo; @@ -467,7 +471,7 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, if (status->status < 0) return; - if (rule->flags == IP4_TUNNEL) { + if (IS_IP4_TUNNEL(rule->flags)) { struct in_addr ip; APP_CHECK(parse_ipv4_addr(tokens[ti], @@ -479,7 +483,7 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, return; rule->src.ip.ip4 = rte_bswap32( (uint32_t)ip.s_addr); - } else if (rule->flags == IP6_TUNNEL) { + } else if (IS_IP6_TUNNEL(rule->flags)) { struct in6_addr ip; APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, @@ -491,7 +495,7 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, return; memcpy(rule->src.ip.ip6.ip6_b, ip.s6_addr, 16); - } else if (rule->flags == TRANSPORT) { + } else if (IS_TRANSPORT(rule->flags)) { APP_CHECK(0, status, "unrecognized input " "\"%s\"", tokens[ti]); return; @@ -510,7 +514,7 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, if (status->status < 0) return; - if (rule->flags == IP4_TUNNEL) { + if (IS_IP4_TUNNEL(rule->flags)) { struct in_addr ip; APP_CHECK(parse_ipv4_addr(tokens[ti], @@ -522,7 +526,7 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, return; rule->dst.ip.ip4 = rte_bswap32( (uint32_t)ip.s_addr); - } else if (rule->flags == IP6_TUNNEL) { + } else if (IS_IP6_TUNNEL(rule->flags)) { struct in6_addr ip; APP_CHECK(parse_ipv6_addr(tokens[ti], &ip, @@ -533,7 +537,7 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, if (status->status < 0) return; memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16); - } else if (rule->flags == TRANSPORT) { + } else if (IS_TRANSPORT(rule->flags)) { APP_CHECK(0, status, "unrecognized " "input \"%s\"", tokens[ti]); return; @@ -662,7 +666,7 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound) printf("mode:"); - switch (sa->flags) { + switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) { case IP4_TUNNEL: printf("IP4Tunnel "); uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d); @@ -737,7 +741,7 @@ sa_create(const char *name, int32_t socket_id) RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY); if (mz == NULL) { printf("Failed to allocate SA DB memory\n"); - rte_errno = -ENOMEM; + rte_errno = ENOMEM; return NULL; } @@ -772,6 +776,93 @@ check_eth_dev_caps(uint16_t portid, uint32_t inbound) return 0; } +/* + * Helper function, tries to determine next_proto for SPI + * by searching though SP rules. + */ +static int +get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir, + struct ip_addr ip_addr[2], uint32_t mask[2]) +{ + int32_t rc4, rc6; + + rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS, + ip_addr, mask); + rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS, + ip_addr, mask); + + if (rc4 >= 0) { + if (rc6 >= 0) { + RTE_LOG(ERR, IPSEC, + "%s: SPI %u used simultaeously by " + "IPv4(%d) and IPv6 (%d) SP rules\n", + __func__, spi, rc4, rc6); + return -EINVAL; + } else + return IPPROTO_IPIP; + } else if (rc6 < 0) { + RTE_LOG(ERR, IPSEC, + "%s: SPI %u is not used by any SP rule\n", + __func__, spi); + return -EINVAL; + } else + return IPPROTO_IPV6; +} + +/* + * Helper function for getting source and destination IP addresses + * from SP. Needed for inline crypto transport mode, as addresses are not + * provided in config file for that mode. It checks if SP for current SA exists, + * and based on what type of protocol is returned, it stores appropriate + * addresses got from SP into SA. + */ +static int +sa_add_address_inline_crypto(struct ipsec_sa *sa) +{ + int protocol; + struct ip_addr ip_addr[2]; + uint32_t mask[2]; + + protocol = get_spi_proto(sa->spi, sa->direction, ip_addr, mask); + if (protocol < 0) + return protocol; + else if (protocol == IPPROTO_IPIP) { + sa->flags |= IP4_TRANSPORT; + if (mask[0] == IP4_FULL_MASK && + mask[1] == IP4_FULL_MASK && + ip_addr[0].ip.ip4 != 0 && + ip_addr[1].ip.ip4 != 0) { + + sa->src.ip.ip4 = ip_addr[0].ip.ip4; + sa->dst.ip.ip4 = ip_addr[1].ip.ip4; + } else { + RTE_LOG(ERR, IPSEC, + "%s: No valid address or mask entry in" + " IPv4 SP rule for SPI %u\n", + __func__, sa->spi); + return -EINVAL; + } + } else if (protocol == IPPROTO_IPV6) { + sa->flags |= IP6_TRANSPORT; + if (mask[0] == IP6_FULL_MASK && + mask[1] == IP6_FULL_MASK && + (ip_addr[0].ip.ip6.ip6[0] != 0 || + ip_addr[0].ip.ip6.ip6[1] != 0) && + (ip_addr[1].ip.ip6.ip6[0] != 0 || + ip_addr[1].ip.ip6.ip6[1] != 0)) { + + sa->src.ip.ip6 = ip_addr[0].ip.ip6; + sa->dst.ip.ip6 = ip_addr[1].ip.ip6; + } else { + RTE_LOG(ERR, IPSEC, + "%s: No valid address or mask entry in" + " IPv6 SP rule for SPI %u\n", + __func__, sa->spi); + return -EINVAL; + } + } + return 0; +} static int sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], @@ -780,6 +871,7 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], struct ipsec_sa *sa; uint32_t i, idx; uint16_t iv_length; + int inline_status; for (i = 0; i < nb_entries; i++) { idx = SPI2IDX(entries[i].spi); @@ -802,10 +894,20 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], RTE_SECURITY_IPSEC_SA_DIR_INGRESS : RTE_SECURITY_IPSEC_SA_DIR_EGRESS; - switch (sa->flags) { + switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) { case IP4_TUNNEL: sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4); sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4); + break; + case TRANSPORT: + if (sa->type == + RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + inline_status = + sa_add_address_inline_crypto(sa); + if (inline_status < 0) + return inline_status; + } + break; } if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { @@ -1023,7 +1125,7 @@ single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt, if (rte_be_to_cpu_32(esp->spi) != sa->spi) return; - switch (sa->flags) { + switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) { case IP4_TUNNEL: src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src)); if ((ip->ip_v == IPVERSION) && diff --git a/dpdk/examples/ipsec-secgw/sp4.c b/dpdk/examples/ipsec-secgw/sp4.c index 99362a68..3871c6cc 100644 --- a/dpdk/examples/ipsec-secgw/sp4.c +++ b/dpdk/examples/ipsec-secgw/sp4.c @@ -17,6 +17,18 @@ #define MAX_ACL_RULE_NUM 1024 +#define IPV4_DST_FROM_SP(acr) \ + (rte_cpu_to_be_32((acr).field[DST_FIELD_IPV4].value.u32)) + +#define IPV4_SRC_FROM_SP(acr) \ + (rte_cpu_to_be_32((acr).field[SRC_FIELD_IPV4].value.u32)) + +#define IPV4_DST_MASK_FROM_SP(acr) \ + ((acr).field[DST_FIELD_IPV4].mask_range.u32) + +#define IPV4_SRC_MASK_FROM_SP(acr) \ + ((acr).field[SRC_FIELD_IPV4].mask_range.u32) + /* * Rule and trace formats definitions. */ @@ -547,3 +559,36 @@ sp4_init(struct socket_ctx *ctx, int32_t socket_id) RTE_LOG(WARNING, IPSEC, "No IPv4 SP Outbound rule " "specified\n"); } + +/* + * Search though SP rules for given SPI. + */ +int +sp4_spi_present(uint32_t spi, int inbound, struct ip_addr ip_addr[2], + uint32_t mask[2]) +{ + uint32_t i, num; + const struct acl4_rules *acr; + + if (inbound != 0) { + acr = acl4_rules_in; + num = nb_acl4_rules_in; + } else { + acr = acl4_rules_out; + num = nb_acl4_rules_out; + } + + for (i = 0; i != num; i++) { + if (acr[i].data.userdata == spi) { + if (NULL != ip_addr && NULL != mask) { + ip_addr[0].ip.ip4 = IPV4_SRC_FROM_SP(acr[i]); + ip_addr[1].ip.ip4 = IPV4_DST_FROM_SP(acr[i]); + mask[0] = IPV4_SRC_MASK_FROM_SP(acr[i]); + mask[1] = IPV4_DST_MASK_FROM_SP(acr[i]); + } + return i; + } + } + + return -ENOENT; +} diff --git a/dpdk/examples/ipsec-secgw/sp6.c b/dpdk/examples/ipsec-secgw/sp6.c index bfcabf39..d8be6b1b 100644 --- a/dpdk/examples/ipsec-secgw/sp6.c +++ b/dpdk/examples/ipsec-secgw/sp6.c @@ -17,6 +17,36 @@ #define MAX_ACL_RULE_NUM 1024 +#define IPV6_FROM_SP(acr, fidx_low, fidx_high) \ + (((uint64_t)(acr).field[(fidx_high)].value.u32 << 32) | \ + (acr).field[(fidx_low)].value.u32) + +#define IPV6_DST_FROM_SP(addr, acr) do {\ + (addr).ip.ip6.ip6[0] = rte_cpu_to_be_64(IPV6_FROM_SP((acr), \ + IP6_DST1, IP6_DST0));\ + (addr).ip.ip6.ip6[1] = rte_cpu_to_be_64(IPV6_FROM_SP((acr), \ + IP6_DST3, IP6_DST2));\ + } while (0) + +#define IPV6_SRC_FROM_SP(addr, acr) do {\ + (addr).ip.ip6.ip6[0] = rte_cpu_to_be_64(IPV6_FROM_SP((acr), \ + IP6_SRC1, IP6_SRC0));\ + (addr).ip.ip6.ip6[1] = rte_cpu_to_be_64(IPV6_FROM_SP((acr), \ + IP6_SRC3, IP6_SRC2));\ + } while (0) + +#define IPV6_DST_MASK_FROM_SP(mask, acr) \ + ((mask) = (acr).field[IP6_DST0].mask_range.u32 + \ + (acr).field[IP6_DST1].mask_range.u32 + \ + (acr).field[IP6_DST2].mask_range.u32 + \ + (acr).field[IP6_DST3].mask_range.u32) + +#define IPV6_SRC_MASK_FROM_SP(mask, acr) \ + ((mask) = (acr).field[IP6_SRC0].mask_range.u32 + \ + (acr).field[IP6_SRC1].mask_range.u32 + \ + (acr).field[IP6_SRC2].mask_range.u32 + \ + (acr).field[IP6_SRC3].mask_range.u32) + enum { IP6_PROTO, IP6_SRC0, @@ -661,3 +691,36 @@ sp6_init(struct socket_ctx *ctx, int32_t socket_id) RTE_LOG(WARNING, IPSEC, "No IPv6 SP Outbound rule " "specified\n"); } + +/* + * Search though SP rules for given SPI. + */ +int +sp6_spi_present(uint32_t spi, int inbound, struct ip_addr ip_addr[2], + uint32_t mask[2]) +{ + uint32_t i, num; + const struct acl6_rules *acr; + + if (inbound != 0) { + acr = acl6_rules_in; + num = nb_acl6_rules_in; + } else { + acr = acl6_rules_out; + num = nb_acl6_rules_out; + } + + for (i = 0; i != num; i++) { + if (acr[i].data.userdata == spi) { + if (NULL != ip_addr && NULL != mask) { + IPV6_SRC_FROM_SP(ip_addr[0], acr[i]); + IPV6_DST_FROM_SP(ip_addr[1], acr[i]); + IPV6_SRC_MASK_FROM_SP(mask[0], acr[i]); + IPV6_DST_MASK_FROM_SP(mask[1], acr[i]); + } + return i; + } + } + + return -ENOENT; +} diff --git a/dpdk/examples/ipv4_multicast/Makefile b/dpdk/examples/ipv4_multicast/Makefile index a16c6233..83a21feb 100644 --- a/dpdk/examples/ipv4_multicast/Makefile +++ b/dpdk/examples/ipv4_multicast/Makefile @@ -9,8 +9,7 @@ APP = ipv4_multicast SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -36,7 +35,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/kni/Makefile b/dpdk/examples/kni/Makefile index dd90d7d7..6c3e3039 100644 --- a/dpdk/examples/kni/Makefile +++ b/dpdk/examples/kni/Makefile @@ -8,8 +8,7 @@ APP = kni SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -36,7 +35,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/l2fwd-cat/Makefile b/dpdk/examples/l2fwd-cat/Makefile index b6eeabde..3c39ed6d 100644 --- a/dpdk/examples/l2fwd-cat/Makefile +++ b/dpdk/examples/l2fwd-cat/Makefile @@ -8,8 +8,7 @@ APP = l2fwd-cat SRCS-y := l2fwd-cat.c cat.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -37,7 +36,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/l2fwd-crypto/Makefile b/dpdk/examples/l2fwd-crypto/Makefile index 6658fd0d..21fd8eea 100644 --- a/dpdk/examples/l2fwd-crypto/Makefile +++ b/dpdk/examples/l2fwd-crypto/Makefile @@ -8,8 +8,7 @@ APP = l2fwd-crypto SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/l2fwd-jobstats/Makefile b/dpdk/examples/l2fwd-jobstats/Makefile index 696a8b21..aec35390 100644 --- a/dpdk/examples/l2fwd-jobstats/Makefile +++ b/dpdk/examples/l2fwd-jobstats/Makefile @@ -8,8 +8,7 @@ APP = l2fwd-jobstats SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/l2fwd-keepalive/Makefile b/dpdk/examples/l2fwd-keepalive/Makefile index 4ab67db4..5c7eb85c 100644 --- a/dpdk/examples/l2fwd-keepalive/Makefile +++ b/dpdk/examples/l2fwd-keepalive/Makefile @@ -8,8 +8,7 @@ APP = l2fwd-keepalive SRCS-y := main.c shm.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -37,7 +36,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/l2fwd/Makefile b/dpdk/examples/l2fwd/Makefile index a8a47ad4..42b2234a 100644 --- a/dpdk/examples/l2fwd/Makefile +++ b/dpdk/examples/l2fwd/Makefile @@ -8,8 +8,7 @@ APP = l2fwd SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/l3fwd-acl/Makefile b/dpdk/examples/l3fwd-acl/Makefile index 285683f8..8f01f755 100644 --- a/dpdk/examples/l3fwd-acl/Makefile +++ b/dpdk/examples/l3fwd-acl/Makefile @@ -8,8 +8,7 @@ APP = l3fwd-acl SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/l3fwd-acl/main.c b/dpdk/examples/l3fwd-acl/main.c index a322ce4f..8ed0c07e 100644 --- a/dpdk/examples/l3fwd-acl/main.c +++ b/dpdk/examples/l3fwd-acl/main.c @@ -2018,14 +2018,10 @@ main(int argc, char **argv) fflush(stdout); /* init RX queues */ for (queue = 0; queue < qconf->n_rx_queue; ++queue) { - struct rte_eth_dev *dev; - struct rte_eth_conf *conf; struct rte_eth_rxconf rxq_conf; portid = qconf->rx_queue_list[queue].port_id; queueid = qconf->rx_queue_list[queue].queue_id; - dev = &rte_eth_devices[portid]; - conf = &dev->data->dev_conf; if (numa_on) socketid = (uint8_t) @@ -2038,7 +2034,7 @@ main(int argc, char **argv) rte_eth_dev_info_get(portid, &dev_info); rxq_conf = dev_info.default_rxconf; - rxq_conf.offloads = conf->rxmode.offloads; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, socketid, &rxq_conf, pktmbuf_pool[socketid]); diff --git a/dpdk/examples/l3fwd-power/Makefile b/dpdk/examples/l3fwd-power/Makefile index 772ec7ba..53aaaca4 100644 --- a/dpdk/examples/l3fwd-power/Makefile +++ b/dpdk/examples/l3fwd-power/Makefile @@ -8,8 +8,7 @@ APP = l3fwd-power SRCS-y := main.c perf_core.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -37,7 +36,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/l3fwd-power/main.c b/dpdk/examples/l3fwd-power/main.c index 9c7b3156..3421de8e 100644 --- a/dpdk/examples/l3fwd-power/main.c +++ b/dpdk/examples/l3fwd-power/main.c @@ -2118,13 +2118,9 @@ main(int argc, char **argv) /* init RX queues */ for(queue = 0; queue < qconf->n_rx_queue; ++queue) { struct rte_eth_rxconf rxq_conf; - struct rte_eth_dev *dev; - struct rte_eth_conf *conf; portid = qconf->rx_queue_list[queue].port_id; queueid = qconf->rx_queue_list[queue].queue_id; - dev = &rte_eth_devices[portid]; - conf = &dev->data->dev_conf; if (numa_on) socketid = \ @@ -2137,7 +2133,7 @@ main(int argc, char **argv) rte_eth_dev_info_get(portid, &dev_info); rxq_conf = dev_info.default_rxconf; - rxq_conf.offloads = conf->rxmode.offloads; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, socketid, &rxq_conf, pktmbuf_pool[socketid]); diff --git a/dpdk/examples/l3fwd-power/meson.build b/dpdk/examples/l3fwd-power/meson.build index a3c5c2f1..257f290e 100644 --- a/dpdk/examples/l3fwd-power/meson.build +++ b/dpdk/examples/l3fwd-power/meson.build @@ -6,9 +6,11 @@ # To build this example as a standalone application with an already-installed # DPDK instance, use 'make' -if host_machine.system() != 'linux' - build = false +if not dpdk_conf.has('RTE_LIBRTE_POWER') + build = false + subdir_done() endif + allow_experimental_apis = true deps += ['power', 'timer', 'lpm', 'hash'] sources = files( diff --git a/dpdk/examples/l3fwd-vf/Makefile b/dpdk/examples/l3fwd-vf/Makefile index dfb1d52d..c5111776 100644 --- a/dpdk/examples/l3fwd-vf/Makefile +++ b/dpdk/examples/l3fwd-vf/Makefile @@ -8,8 +8,7 @@ APP = l3fwd-vf SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/l3fwd-vf/main.c b/dpdk/examples/l3fwd-vf/main.c index 41137f97..ce052544 100644 --- a/dpdk/examples/l3fwd-vf/main.c +++ b/dpdk/examples/l3fwd-vf/main.c @@ -74,25 +74,6 @@ nb_lcores*MEMPOOL_CACHE_SIZE), \ (unsigned)8192) -/* - * RX and TX Prefetch, Host, and Write-back threshold values should be - * carefully set for optimal performance. Consult the network - * controller's datasheet and supporting DPDK documentation for guidance - * on how these parameters should be set. - */ -#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */ -#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */ -#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */ - -/* - * These default values are optimized for use with the Intel(R) 82599 10 GbE - * Controller and the DPDK ixgbe PMD. Consider using other values for other - * network controllers and/or network drivers. - */ -#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */ -#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */ -#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */ - #define MAX_PKT_BURST 32 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */ @@ -1041,13 +1022,8 @@ main(int argc, char **argv) fflush(stdout); /* init RX queues */ for(queue = 0; queue < qconf->n_rx_queue; ++queue) { - struct rte_eth_dev *dev; - struct rte_eth_conf *conf; - portid = qconf->rx_queue_list[queue].port_id; queueid = qconf->rx_queue_list[queue].queue_id; - dev = &rte_eth_devices[portid]; - conf = &dev->data->dev_conf; if (numa_on) socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id); @@ -1059,7 +1035,7 @@ main(int argc, char **argv) rte_eth_dev_info_get(portid, &dev_info); rxq_conf = dev_info.default_rxconf; - rxq_conf.offloads = conf->rxmode.offloads; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, socketid, &rxq_conf, pktmbuf_pool[socketid]); diff --git a/dpdk/examples/l3fwd/Makefile b/dpdk/examples/l3fwd/Makefile index cccdd9df..4357ddb7 100644 --- a/dpdk/examples/l3fwd/Makefile +++ b/dpdk/examples/l3fwd/Makefile @@ -8,8 +8,7 @@ APP = l3fwd SRCS-y := main.c l3fwd_lpm.c l3fwd_em.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/l3fwd/l3fwd_em.c b/dpdk/examples/l3fwd/l3fwd_em.c index fa8f82be..76c5f714 100644 --- a/dpdk/examples/l3fwd/l3fwd_em.c +++ b/dpdk/examples/l3fwd/l3fwd_em.c @@ -285,7 +285,11 @@ em_get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid, void *lookup_struct) * Get part of 5 tuple: dst IP address lower 96 bits * and src IP address higher 32 bits. */ +#if defined RTE_ARCH_X86 + key.xmm[1] = _mm_loadu_si128(data1); +#else key.xmm[1] = *(xmm_t *)data1; +#endif /* * Get part of 5 tuple: dst port and src port diff --git a/dpdk/examples/l3fwd/main.c b/dpdk/examples/l3fwd/main.c index e4b99efe..71a67f42 100644 --- a/dpdk/examples/l3fwd/main.c +++ b/dpdk/examples/l3fwd/main.c @@ -945,14 +945,10 @@ main(int argc, char **argv) fflush(stdout); /* init RX queues */ for(queue = 0; queue < qconf->n_rx_queue; ++queue) { - struct rte_eth_dev *dev; - struct rte_eth_conf *conf; struct rte_eth_rxconf rxq_conf; portid = qconf->rx_queue_list[queue].port_id; queueid = qconf->rx_queue_list[queue].queue_id; - dev = &rte_eth_devices[portid]; - conf = &dev->data->dev_conf; if (numa_on) socketid = @@ -965,7 +961,7 @@ main(int argc, char **argv) rte_eth_dev_info_get(portid, &dev_info); rxq_conf = dev_info.default_rxconf; - rxq_conf.offloads = conf->rxmode.offloads; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, socketid, &rxq_conf, diff --git a/dpdk/examples/link_status_interrupt/Makefile b/dpdk/examples/link_status_interrupt/Makefile index 16068212..de11a17f 100644 --- a/dpdk/examples/link_status_interrupt/Makefile +++ b/dpdk/examples/link_status_interrupt/Makefile @@ -8,8 +8,7 @@ APP = link_status_interrupt SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/load_balancer/Makefile b/dpdk/examples/load_balancer/Makefile index 197b019d..9261ce4e 100644 --- a/dpdk/examples/load_balancer/Makefile +++ b/dpdk/examples/load_balancer/Makefile @@ -8,8 +8,7 @@ APP = load_balancer SRCS-y := main.c config.c init.c runtime.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/meson.build b/dpdk/examples/meson.build index af81c762..2809e226 100644 --- a/dpdk/examples/meson.build +++ b/dpdk/examples/meson.build @@ -23,9 +23,6 @@ if cc.has_argument('-Wno-format-truncation') default_cflags += '-Wno-format-truncation' endif -# specify -D_GNU_SOURCE unconditionally -default_cflags += '-D_GNU_SOURCE' - foreach example: examples name = example build = true diff --git a/dpdk/examples/multi_process/client_server_mp/mp_server/main.c b/dpdk/examples/multi_process/client_server_mp/mp_server/main.c index 0ddc63e9..98d67591 100644 --- a/dpdk/examples/multi_process/client_server_mp/mp_server/main.c +++ b/dpdk/examples/multi_process/client_server_mp/mp_server/main.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include diff --git a/dpdk/examples/multi_process/client_server_mp/shared/common.h b/dpdk/examples/multi_process/client_server_mp/shared/common.h index ac917552..6dd43fca 100644 --- a/dpdk/examples/multi_process/client_server_mp/shared/common.h +++ b/dpdk/examples/multi_process/client_server_mp/shared/common.h @@ -49,7 +49,7 @@ get_rx_queue_name(unsigned id) * by maximum 3 digits (plus an extra byte for safety) */ static char buffer[sizeof(MP_CLIENT_RXQ_NAME) + 2]; - snprintf(buffer, sizeof(buffer) - 1, MP_CLIENT_RXQ_NAME, id); + snprintf(buffer, sizeof(buffer), MP_CLIENT_RXQ_NAME, id); return buffer; } diff --git a/dpdk/examples/multi_process/symmetric_mp/main.c b/dpdk/examples/multi_process/symmetric_mp/main.c index c310e942..62771e03 100644 --- a/dpdk/examples/multi_process/symmetric_mp/main.c +++ b/dpdk/examples/multi_process/symmetric_mp/main.c @@ -271,7 +271,7 @@ static void assign_ports_to_cores(void) { - const unsigned lcores = rte_eal_get_configuration()->lcore_count; + const unsigned int lcores = rte_lcore_count(); const unsigned port_pairs = num_ports / 2; const unsigned pairs_per_lcore = port_pairs / lcores; unsigned extra_pairs = port_pairs % lcores; diff --git a/dpdk/examples/packet_ordering/Makefile b/dpdk/examples/packet_ordering/Makefile index 3cf1ee1d..27b82b6e 100644 --- a/dpdk/examples/packet_ordering/Makefile +++ b/dpdk/examples/packet_ordering/Makefile @@ -8,8 +8,7 @@ APP = packet_ordering SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/performance-thread/l3fwd-thread/main.c b/dpdk/examples/performance-thread/l3fwd-thread/main.c index 4f8747bc..79523d23 100644 --- a/dpdk/examples/performance-thread/l3fwd-thread/main.c +++ b/dpdk/examples/performance-thread/l3fwd-thread/main.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -3486,6 +3487,8 @@ main(int argc, char **argv) argc -= ret; argv += ret; + rte_timer_subsystem_init(); + /* pre-init dst MACs for all ports to 02:00:00:00:00:xx */ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { dest_eth_addr[portid] = ETHER_LOCAL_ADMIN_ADDR + @@ -3634,14 +3637,10 @@ main(int argc, char **argv) /* init RX queues */ for (queue = 0; queue < rx_thread[i].n_rx_queue; ++queue) { - struct rte_eth_dev *dev; - struct rte_eth_conf *conf; struct rte_eth_rxconf rxq_conf; portid = rx_thread[i].rx_queue_list[queue].port_id; queueid = rx_thread[i].rx_queue_list[queue].queue_id; - dev = &rte_eth_devices[portid]; - conf = &dev->data->dev_conf; if (numa_on) socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id); @@ -3653,7 +3652,7 @@ main(int argc, char **argv) rte_eth_dev_info_get(portid, &dev_info); rxq_conf = dev_info.default_rxconf; - rxq_conf.offloads = conf->rxmode.offloads; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, socketid, &rxq_conf, diff --git a/dpdk/examples/ptpclient/Makefile b/dpdk/examples/ptpclient/Makefile index 989e2dd4..3eec3dc1 100644 --- a/dpdk/examples/ptpclient/Makefile +++ b/dpdk/examples/ptpclient/Makefile @@ -8,8 +8,7 @@ APP = ptpclient SRCS-y := ptpclient.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/ptpclient/ptpclient.c b/dpdk/examples/ptpclient/ptpclient.c index 82ae71c1..89f19cad 100644 --- a/dpdk/examples/ptpclient/ptpclient.c +++ b/dpdk/examples/ptpclient/ptpclient.c @@ -233,7 +233,11 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool) return retval; /* Enable timesync timestamping for the Ethernet device */ - rte_eth_timesync_enable(port); + retval = rte_eth_timesync_enable(port); + if (retval < 0) { + printf("Timesync enable failed: %d\n", retval); + return retval; + } /* Enable RX in promiscuous mode for the Ethernet device. */ rte_eth_promiscuous_enable(port); @@ -413,6 +417,9 @@ parse_fup(struct ptpv2_data_slave_ordinary *ptp_data) ptp_msg->delay_req.hdr.ver = 2; ptp_msg->delay_req.hdr.control = 1; ptp_msg->delay_req.hdr.log_message_interval = 127; + ptp_msg->delay_req.hdr.message_length = + htons(sizeof(struct delay_req_msg)); + ptp_msg->delay_req.hdr.domain_number = ptp_hdr->domain_number; /* Set up clock id. */ client_clkid = diff --git a/dpdk/examples/qos_meter/Makefile b/dpdk/examples/qos_meter/Makefile index 46341b1a..95bd1b14 100644 --- a/dpdk/examples/qos_meter/Makefile +++ b/dpdk/examples/qos_meter/Makefile @@ -8,8 +8,7 @@ APP = qos_meter SRCS-y := main.c rte_policer.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -37,7 +36,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/qos_sched/Makefile b/dpdk/examples/qos_sched/Makefile index 45b0a9eb..2cdd5fa3 100644 --- a/dpdk/examples/qos_sched/Makefile +++ b/dpdk/examples/qos_sched/Makefile @@ -8,8 +8,7 @@ APP = qos_sched SRCS-y := main.c args.c init.c app_thread.c cfg_file.c cmdline.c stats.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/qos_sched/args.c b/dpdk/examples/qos_sched/args.c index 83eee95c..7431b298 100644 --- a/dpdk/examples/qos_sched/args.c +++ b/dpdk/examples/qos_sched/args.c @@ -90,16 +90,15 @@ static inline int str_is(const char *str, const char *is) static uint64_t app_eal_core_mask(void) { - uint32_t i; uint64_t cm = 0; - struct rte_config *cfg = rte_eal_get_configuration(); + uint32_t i; for (i = 0; i < APP_MAX_LCORE; i++) { - if (cfg->lcore_role[i] == ROLE_RTE) + if (rte_lcore_has_role(i, ROLE_RTE)) cm |= (1ULL << i); } - cm |= (1ULL << cfg->master_lcore); + cm |= (1ULL << rte_get_master_lcore()); return cm; } diff --git a/dpdk/examples/rxtx_callbacks/Makefile b/dpdk/examples/rxtx_callbacks/Makefile index e9d30d56..2873f799 100644 --- a/dpdk/examples/rxtx_callbacks/Makefile +++ b/dpdk/examples/rxtx_callbacks/Makefile @@ -8,8 +8,7 @@ APP = rxtx_callbacks SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/server_node_efd/shared/common.h b/dpdk/examples/server_node_efd/shared/common.h index b8b533d8..130fd4f4 100644 --- a/dpdk/examples/server_node_efd/shared/common.h +++ b/dpdk/examples/server_node_efd/shared/common.h @@ -61,7 +61,7 @@ get_rx_queue_name(unsigned int id) */ static char buffer[sizeof(MP_NODE_RXQ_NAME) + 2]; - snprintf(buffer, sizeof(buffer) - 1, MP_NODE_RXQ_NAME, id); + snprintf(buffer, sizeof(buffer), MP_NODE_RXQ_NAME, id); return buffer; } diff --git a/dpdk/examples/service_cores/Makefile b/dpdk/examples/service_cores/Makefile index a4d6b7b4..49e2a587 100644 --- a/dpdk/examples/service_cores/Makefile +++ b/dpdk/examples/service_cores/Makefile @@ -8,8 +8,7 @@ APP = service_cores SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else diff --git a/dpdk/examples/skeleton/Makefile b/dpdk/examples/skeleton/Makefile index bd980ec9..f58c8903 100644 --- a/dpdk/examples/skeleton/Makefile +++ b/dpdk/examples/skeleton/Makefile @@ -8,8 +8,7 @@ APP = basicfwd SRCS-y := basicfwd.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/tep_termination/Makefile b/dpdk/examples/tep_termination/Makefile index 4c156432..5f76a97e 100644 --- a/dpdk/examples/tep_termination/Makefile +++ b/dpdk/examples/tep_termination/Makefile @@ -8,8 +8,7 @@ APP = tep_termination SRCS-y := main.c vxlan_setup.c vxlan.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -39,7 +38,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/tep_termination/main.c b/dpdk/examples/tep_termination/main.c index d6379e31..6db604ed 100644 --- a/dpdk/examples/tep_termination/main.c +++ b/dpdk/examples/tep_termination/main.c @@ -52,11 +52,6 @@ #define JUMBO_FRAME_MAX_SIZE 0x2600 -/* State of virtio device. */ -#define DEVICE_MAC_LEARNING 0 -#define DEVICE_RX 1 -#define DEVICE_SAFE_REMOVE 2 - /* Config_core_flag status definitions. */ #define REQUEST_DEV_REMOVAL 1 #define ACK_DEV_REMOVAL 0 diff --git a/dpdk/examples/timer/Makefile b/dpdk/examples/timer/Makefile index 42b23f28..3e8a1426 100644 --- a/dpdk/examples/timer/Makefile +++ b/dpdk/examples/timer/Makefile @@ -8,8 +8,7 @@ APP = timer SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/vdpa/Makefile b/dpdk/examples/vdpa/Makefile index 42672a2b..d3321ef9 100644 --- a/dpdk/examples/vdpa/Makefile +++ b/dpdk/examples/vdpa/Makefile @@ -10,7 +10,7 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc include $(RTE_SDK)/mk/rte.vars.mk -ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp") +ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y) $(info This application can only operate in a linuxapp environment, \ please change the definition of the RTE_TARGET environment variable) all: diff --git a/dpdk/examples/vhost/Makefile b/dpdk/examples/vhost/Makefile index c6964381..a8a8aba9 100644 --- a/dpdk/examples/vhost/Makefile +++ b/dpdk/examples/vhost/Makefile @@ -8,8 +8,7 @@ APP = vhost-switch SRCS-y := main.c virtio_net.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -39,7 +38,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/vhost_crypto/main.c b/dpdk/examples/vhost_crypto/main.c index 3deb5263..c6d55c84 100644 --- a/dpdk/examples/vhost_crypto/main.c +++ b/dpdk/examples/vhost_crypto/main.c @@ -352,12 +352,6 @@ static const struct vhost_device_ops virtio_crypto_device_ops = { .destroy_device = destroy_device, }; -__attribute__((unused)) -static void clrscr(void) -{ - system("@cls||clear"); -} - static int vhost_crypto_worker(void *arg) { diff --git a/dpdk/examples/vhost_crypto/meson.build b/dpdk/examples/vhost_crypto/meson.build index 8e9860f0..2485f3bd 100644 --- a/dpdk/examples/vhost_crypto/meson.build +++ b/dpdk/examples/vhost_crypto/meson.build @@ -9,7 +9,6 @@ build = dpdk_conf.has('RTE_LIBRTE_VHOST') allow_experimental_apis = true deps += ['vhost', 'cryptodev'] -cflags += ['-D_FILE_OFFSET_BITS=64'] sources = files( 'main.c' ) diff --git a/dpdk/examples/vhost_scsi/Makefile b/dpdk/examples/vhost_scsi/Makefile index 523aee0b..69e102de 100644 --- a/dpdk/examples/vhost_scsi/Makefile +++ b/dpdk/examples/vhost_scsi/Makefile @@ -8,8 +8,7 @@ APP = vhost-scsi SRCS-y := scsi.c vhost_scsi.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -38,7 +37,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/vhost_scsi/meson.build b/dpdk/examples/vhost_scsi/meson.build index 2303bcae..3b41b3b9 100644 --- a/dpdk/examples/vhost_scsi/meson.build +++ b/dpdk/examples/vhost_scsi/meson.build @@ -10,7 +10,6 @@ if host_machine.system() != 'linux' build = false endif deps += 'vhost' -cflags += ['-D_FILE_OFFSET_BITS=64'] sources = files( 'scsi.c', 'vhost_scsi.c' ) diff --git a/dpdk/examples/vm_power_manager/guest_cli/main.c b/dpdk/examples/vm_power_manager/guest_cli/main.c index 36365b12..2094145e 100644 --- a/dpdk/examples/vm_power_manager/guest_cli/main.c +++ b/dpdk/examples/vm_power_manager/guest_cli/main.c @@ -65,7 +65,7 @@ parse_args(int argc, char **argv) switch (opt) { /* portmask */ case 'n': - strcpy(policy->vm_name, optarg); + strlcpy(policy->vm_name, optarg, VM_MAX_NAME_SZ); printf("Setting VM Name to [%s]\n", policy->vm_name); break; case 'b': diff --git a/dpdk/examples/vm_power_manager/guest_cli/meson.build b/dpdk/examples/vm_power_manager/guest_cli/meson.build index 9e821ceb..38bd8d83 100644 --- a/dpdk/examples/vm_power_manager/guest_cli/meson.build +++ b/dpdk/examples/vm_power_manager/guest_cli/meson.build @@ -10,6 +10,11 @@ # vm_power_manager app because of the way the directories are parsed. name = 'guest_cli' +if not dpdk_conf.has('RTE_LIBRTE_POWER') + build = false + subdir_done() +endif + deps += ['power'] sources = files( diff --git a/dpdk/examples/vm_power_manager/meson.build b/dpdk/examples/vm_power_manager/meson.build index f98445bc..20a4a05b 100644 --- a/dpdk/examples/vm_power_manager/meson.build +++ b/dpdk/examples/vm_power_manager/meson.build @@ -6,6 +6,13 @@ # To build this example as a standalone application with an already-installed # DPDK instance, use 'make' +if not dpdk_conf.has('RTE_LIBRTE_POWER') + build = false + subdir_done() +endif + +deps += ['power'] + if dpdk_conf.has('RTE_LIBRTE_BNXT_PMD') deps += ['pmd_bnxt'] endif @@ -18,9 +25,6 @@ if dpdk_conf.has('RTE_LIBRTE_IXGBE_PMD') deps += ['pmd_ixgbe'] endif -deps += ['power'] - - sources = files( 'channel_manager.c', 'channel_monitor.c', 'main.c', 'parse.c', 'power_manager.c', 'vm_power_cli.c' ) diff --git a/dpdk/examples/vmdq/Makefile b/dpdk/examples/vmdq/Makefile index 87abeab9..a9983a18 100644 --- a/dpdk/examples/vmdq/Makefile +++ b/dpdk/examples/vmdq/Makefile @@ -8,8 +8,7 @@ APP = vmdq_app SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/examples/vmdq_dcb/Makefile b/dpdk/examples/vmdq_dcb/Makefile index bf161cb2..5a8934c8 100644 --- a/dpdk/examples/vmdq_dcb/Makefile +++ b/dpdk/examples/vmdq_dcb/Makefile @@ -8,8 +8,7 @@ APP = vmdq_dcb_app SRCS-y := main.c # Build using pkg-config variables if possible -$(shell pkg-config --exists libdpdk) -ifeq ($(.SHELLSTATUS),0) +ifeq ($(shell pkg-config --exists libdpdk && echo 0),0) all: shared .PHONY: shared static @@ -35,7 +34,7 @@ build: .PHONY: clean clean: rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared - rmdir --ignore-fail-on-non-empty build + test -d build && rmdir -p build || true else # Build using legacy build system diff --git a/dpdk/kernel/freebsd/contigmem/contigmem.c b/dpdk/kernel/freebsd/contigmem/contigmem.c index 1715b5dc..64e0a7fe 100644 --- a/dpdk/kernel/freebsd/contigmem/contigmem.c +++ b/dpdk/kernel/freebsd/contigmem/contigmem.c @@ -13,10 +13,13 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include +#include #include #include #include +#include #include diff --git a/dpdk/kernel/freebsd/nic_uio/nic_uio.c b/dpdk/kernel/freebsd/nic_uio/nic_uio.c index 401b487e..7a81694c 100644 --- a/dpdk/kernel/freebsd/nic_uio/nic_uio.c +++ b/dpdk/kernel/freebsd/nic_uio/nic_uio.c @@ -11,6 +11,7 @@ __FBSDID("$FreeBSD$"); #include /* structs, prototypes for pci bus stuff and DEVMETHOD */ #include #include +#include #include #include diff --git a/dpdk/kernel/linux/igb_uio/igb_uio.c b/dpdk/kernel/linux/igb_uio/igb_uio.c index 3cf394bd..039f5a5f 100644 --- a/dpdk/kernel/linux/igb_uio/igb_uio.c +++ b/dpdk/kernel/linux/igb_uio/igb_uio.c @@ -236,7 +236,7 @@ igbuio_pci_enable_interrupts(struct rte_uio_pci_dev *udev) } #endif - /* fall back to MSI */ + /* falls through - to MSI */ case RTE_INTR_MODE_MSI: #ifndef HAVE_ALLOC_IRQ_VECTORS if (pci_enable_msi(udev->pdev) == 0) { @@ -255,7 +255,7 @@ igbuio_pci_enable_interrupts(struct rte_uio_pci_dev *udev) break; } #endif - /* fall back to INTX */ + /* falls through - to INTX */ case RTE_INTR_MODE_LEGACY: if (pci_intx_mask_supported(udev->pdev)) { dev_dbg(&udev->pdev->dev, "using INTX"); @@ -265,7 +265,7 @@ igbuio_pci_enable_interrupts(struct rte_uio_pci_dev *udev) break; } dev_notice(&udev->pdev->dev, "PCI INTX mask not supported\n"); - /* fall back to no IRQ */ + /* falls through - to no IRQ */ case RTE_INTR_MODE_NONE: udev->mode = RTE_INTR_MODE_NONE; udev->info.irq = UIO_IRQ_NONE; diff --git a/dpdk/kernel/linux/igb_uio/meson.build b/dpdk/kernel/linux/igb_uio/meson.build index f5a9d5cc..fac404f0 100644 --- a/dpdk/kernel/linux/igb_uio/meson.build +++ b/dpdk/kernel/linux/igb_uio/meson.build @@ -8,7 +8,7 @@ mkfile = custom_target('igb_uio_makefile', custom_target('igb_uio', input: ['igb_uio.c', 'Kbuild'], output: 'igb_uio.ko', - command: ['make', '-C', kernel_dir, + command: ['make', '-C', kernel_dir + '/build', 'M=' + meson.current_build_dir(), 'src=' + meson.current_source_dir(), 'EXTRA_CFLAGS=-I' + meson.current_source_dir() + @@ -16,5 +16,5 @@ custom_target('igb_uio', 'modules'], depends: mkfile, install: true, - install_dir: kernel_dir + '/../extra/dpdk', + install_dir: kernel_dir + '/extra/dpdk', build_by_default: get_option('enable_kmods')) diff --git a/dpdk/kernel/linux/kni/ethtool/igb/igb_main.c b/dpdk/kernel/linux/kni/ethtool/igb/igb_main.c index cda2b063..69d3ea5f 100644 --- a/dpdk/kernel/linux/kni/ethtool/igb/igb_main.c +++ b/dpdk/kernel/linux/kni/ethtool/igb/igb_main.c @@ -5331,7 +5331,7 @@ static void igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb = first->skb; struct igb_tx_buffer *tx_buffer; union e1000_adv_tx_desc *tx_desc; - struct skb_frag_struct *frag; + skb_frag_t *frag; dma_addr_t dma; unsigned int data_len, size; u32 tx_flags = first->tx_flags; @@ -8231,7 +8231,7 @@ static void igb_pull_tail(struct igb_ring *rx_ring, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned char *va; unsigned int pull_len; @@ -8249,7 +8249,11 @@ static void igb_pull_tail(struct igb_ring *rx_ring, /* update pointers to remove timestamp header */ skb_frag_size_sub(frag, IGB_TS_HDR_LEN); +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) frag->page_offset += IGB_TS_HDR_LEN; +#else + frag->bv_offset += pull_len; +#endif skb->data_len -= IGB_TS_HDR_LEN; skb->len -= IGB_TS_HDR_LEN; @@ -8269,7 +8273,11 @@ static void igb_pull_tail(struct igb_ring *rx_ring, /* update all of the pointers */ skb_frag_size_sub(frag, pull_len); +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) frag->page_offset += pull_len; +#else + frag->bv_offset += pull_len; +#endif skb->data_len -= pull_len; skb->tail += pull_len; } diff --git a/dpdk/kernel/linux/kni/ethtool/igb/kcompat.h b/dpdk/kernel/linux/kni/ethtool/igb/kcompat.h index 649a69c8..96431750 100644 --- a/dpdk/kernel/linux/kni/ethtool/igb/kcompat.h +++ b/dpdk/kernel/linux/kni/ethtool/igb/kcompat.h @@ -218,9 +218,11 @@ struct msix_entry { #define node_online(node) ((node) == 0) #endif +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) ) #ifndef num_online_cpus #define num_online_cpus() smp_num_cpus #endif +#endif #ifndef cpu_online #define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) @@ -2413,13 +2415,17 @@ static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state); #define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) -#else /* < 2.6.26 */ +#else /* < 2.6.26 or > 5.4 */ +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(5,4,0) ) +#include +#else #include +#endif #define HAVE_NETDEV_VLAN_FEATURES #ifndef PCI_EXP_LNKCAP_ASPMS #define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ #endif /* PCI_EXP_LNKCAP_ASPMS */ -#endif /* < 2.6.26 */ +#endif /* < 2.6.26 or > 5.4 */ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, @@ -3922,7 +3928,8 @@ skb_set_hash(struct sk_buff *skb, __u32 hash, __always_unused int type) (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0)) || \ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4))) #define HAVE_VF_VLAN_PROTO -#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) +#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 0))) /* In RHEL/Centos 7.4, the "new" version of ndo_set_vf_vlan * is in the struct net_device_ops_extended */ #define ndo_set_vf_vlan extended.ndo_set_vf_vlan diff --git a/dpdk/kernel/linux/kni/ethtool/ixgbe/kcompat.h b/dpdk/kernel/linux/kni/ethtool/ixgbe/kcompat.h index 419fd1f1..e1671e91 100644 --- a/dpdk/kernel/linux/kni/ethtool/ixgbe/kcompat.h +++ b/dpdk/kernel/linux/kni/ethtool/ixgbe/kcompat.h @@ -235,9 +235,11 @@ struct msix_entry { #define node_online(node) ((node) == 0) #endif +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) ) #ifndef num_online_cpus #define num_online_cpus() smp_num_cpus #endif +#endif #ifndef cpu_online #define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) @@ -2221,10 +2223,14 @@ static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state); #define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) -#else /* < 2.6.26 */ +#else /* < 2.6.26 or > 5.4 */ +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(5,4,0) ) +#include +#else #include +#endif #define HAVE_NETDEV_VLAN_FEATURES -#endif /* < 2.6.26 */ +#endif /* < 2.6.26 or > 5.4 */ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, diff --git a/dpdk/kernel/linux/kni/kni_misc.c b/dpdk/kernel/linux/kni/kni_misc.c index b74e8a3a..8bf0e210 100644 --- a/dpdk/kernel/linux/kni/kni_misc.c +++ b/dpdk/kernel/linux/kni/kni_misc.c @@ -318,11 +318,8 @@ kni_ioctl_create(struct net *net, uint32_t ioctl_num, return -EINVAL; /* Copy kni info from user space */ - ret = copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info)); - if (ret) { - pr_err("copy_from_user in kni_ioctl_create"); - return -EIO; - } + if (copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info))) + return -EFAULT; /* Check if name is zero-ended */ if (strnlen(dev_info.name, sizeof(dev_info.name)) == sizeof(dev_info.name)) { @@ -495,15 +492,12 @@ kni_ioctl_release(struct net *net, uint32_t ioctl_num, if (_IOC_SIZE(ioctl_num) > sizeof(dev_info)) return -EINVAL; - ret = copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info)); - if (ret) { - pr_err("copy_from_user in kni_ioctl_release"); - return -EIO; - } + if (copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info))) + return -EFAULT; /* Release the network device according to its name */ if (strlen(dev_info.name) == 0) - return ret; + return -EINVAL; down_write(&knet->kni_list_lock); list_for_each_entry_safe(dev, n, &knet->kni_list_head, list) { diff --git a/dpdk/kernel/linux/kni/kni_net.c b/dpdk/kernel/linux/kni/kni_net.c index 7371b6d5..432d56a1 100644 --- a/dpdk/kernel/linux/kni/kni_net.c +++ b/dpdk/kernel/linux/kni/kni_net.c @@ -61,18 +61,6 @@ kva2data_kva(struct rte_kni_mbuf *m) return phys_to_virt(m->buf_physaddr + m->data_off); } -/* virtual address to physical address */ -static void * -va2pa(void *va, struct rte_kni_mbuf *m) -{ - void *pa; - - pa = (void *)((unsigned long)va - - ((unsigned long)m->buf_addr - - (unsigned long)m->buf_physaddr)); - return pa; -} - /* * It can be called to process the request. */ @@ -173,7 +161,10 @@ kni_fifo_trans_pa2va(struct kni_dev *kni, struct rte_kni_fifo *src_pa, struct rte_kni_fifo *dst_va) { uint32_t ret, i, num_dst, num_rx; - void *kva; + struct rte_kni_mbuf *kva, *prev_kva; + int nb_segs; + int kva_nb_segs; + do { num_dst = kni_fifo_free_count(dst_va); if (num_dst == 0) @@ -188,6 +179,17 @@ kni_fifo_trans_pa2va(struct kni_dev *kni, for (i = 0; i < num_rx; i++) { kva = pa2kva(kni->pa[i]); kni->va[i] = pa2va(kni->pa[i], kva); + + kva_nb_segs = kva->nb_segs; + for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) { + if (!kva->next) + break; + + prev_kva = kva; + kva = pa2kva(kva->next); + /* Convert physical address to virtual address */ + prev_kva->next = pa2va(prev_kva->next, kva); + } } ret = kni_fifo_put(dst_va, kni->va, num_rx); @@ -313,7 +315,7 @@ kni_net_rx_normal(struct kni_dev *kni) uint32_t ret; uint32_t len; uint32_t i, num_rx, num_fq; - struct rte_kni_mbuf *kva; + struct rte_kni_mbuf *kva, *prev_kva; void *data_kva; struct sk_buff *skb; struct net_device *dev = kni->net_dev; @@ -363,8 +365,11 @@ kni_net_rx_normal(struct kni_dev *kni) if (!kva->next) break; - kva = pa2kva(va2pa(kva->next, kva)); + prev_kva = kva; + kva = pa2kva(kva->next); data_kva = kva2data_kva(kva); + /* Convert physical address to virtual address */ + prev_kva->next = pa2va(prev_kva->next, kva); } } @@ -396,7 +401,7 @@ kni_net_rx_lo_fifo(struct kni_dev *kni) uint32_t ret; uint32_t len; uint32_t i, num, num_rq, num_tq, num_aq, num_fq; - struct rte_kni_mbuf *kva; + struct rte_kni_mbuf *kva, *next_kva; void *data_kva; struct rte_kni_mbuf *alloc_kva; void *alloc_data_kva; @@ -404,7 +409,7 @@ kni_net_rx_lo_fifo(struct kni_dev *kni) /* Get the number of entries in rx_q */ num_rq = kni_fifo_count(kni->rx_q); - /* Get the number of free entrie in tx_q */ + /* Get the number of free entries in tx_q */ num_tq = kni_fifo_free_count(kni->tx_q); /* Get the number of entries in alloc_q */ @@ -435,10 +440,17 @@ kni_net_rx_lo_fifo(struct kni_dev *kni) /* Copy mbufs */ for (i = 0; i < num; i++) { kva = pa2kva(kni->pa[i]); - len = kva->pkt_len; + len = kva->data_len; data_kva = kva2data_kva(kva); kni->va[i] = pa2va(kni->pa[i], kva); + while (kva->next) { + next_kva = pa2kva(kva->next); + /* Convert physical address to virtual address */ + kva->next = pa2va(kva->next, next_kva); + kva = next_kva; + } + alloc_kva = pa2kva(kni->alloc_pa[i]); alloc_data_kva = kva2data_kva(alloc_kva); kni->alloc_va[i] = pa2va(kni->alloc_pa[i], alloc_kva); @@ -481,7 +493,7 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni) uint32_t ret; uint32_t len; uint32_t i, num_rq, num_fq, num; - struct rte_kni_mbuf *kva; + struct rte_kni_mbuf *kva, *prev_kva; void *data_kva; struct sk_buff *skb; struct net_device *dev = kni->net_dev; @@ -545,8 +557,11 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni) if (!kva->next) break; - kva = pa2kva(va2pa(kva->next, kva)); + prev_kva = kva; + kva = pa2kva(kva->next); data_kva = kva2data_kva(kva); + /* Convert physical address to virtual address */ + prev_kva->next = pa2va(prev_kva->next, kva); } } @@ -797,6 +812,7 @@ kni_net_config_lo_mode(char *lo_str) } else if (!strcmp(lo_str, "lo_mode_fifo_skb")) { pr_debug("loopback mode=lo_mode_fifo_skb enabled"); kni_net_rx_func = kni_net_rx_lo_fifo_skb; - } else - pr_debug("Incognizant parameter, loopback disabled"); + } else { + pr_debug("Unknown loopback parameter, disabled"); + } } diff --git a/dpdk/kernel/linux/kni/meson.build b/dpdk/kernel/linux/kni/meson.build index a09af5aa..e6822f49 100644 --- a/dpdk/kernel/linux/kni/meson.build +++ b/dpdk/kernel/linux/kni/meson.build @@ -16,7 +16,7 @@ kni_sources = files( custom_target('rte_kni', input: kni_sources + kni_igb_sources + kni_ixgbe_sources, output: 'rte_kni.ko', - command: ['make', '-j4', '-C', kernel_dir, + command: ['make', '-j4', '-C', kernel_dir + '/build', 'M=' + meson.current_build_dir(), 'src=' + meson.current_source_dir(), 'MODULE_CFLAGS=-include ' + meson.source_root() + '/config/rte_config.h' + @@ -30,5 +30,5 @@ custom_target('rte_kni', depends: kni_mkfile, console: true, install: true, - install_dir: kernel_dir + '/../extra/dpdk', + install_dir: kernel_dir + '/extra/dpdk', build_by_default: get_option('enable_kmods')) diff --git a/dpdk/kernel/linux/meson.build b/dpdk/kernel/linux/meson.build index 5b7ec06e..c5bcc1f3 100644 --- a/dpdk/kernel/linux/meson.build +++ b/dpdk/kernel/linux/meson.build @@ -20,11 +20,11 @@ else if kernel_dir == '' # use default path for native builds kernel_version = run_command('uname', '-r').stdout().strip() - kernel_dir = '/lib/modules/' + kernel_version + '/build' + kernel_dir = '/lib/modules/' + kernel_version endif # test running make in kernel directory, using "make kernelversion" - make_returncode = run_command('make', '-sC', kernel_dir, + make_returncode = run_command('make', '-sC', kernel_dir + '/build', 'kernelversion').returncode() if make_returncode != 0 if meson.version().version_compare('>=0.44') diff --git a/dpdk/lib/librte_acl/acl_bld.c b/dpdk/lib/librte_acl/acl_bld.c index b82191f4..b06bbe92 100644 --- a/dpdk/lib/librte_acl/acl_bld.c +++ b/dpdk/lib/librte_acl/acl_bld.c @@ -320,7 +320,7 @@ acl_add_ptr_range(struct acl_build_context *context, for (n = 0; n < UINT8_MAX + 1; n++) if (n >= low && n <= high) bitset.bits[n / (sizeof(bits_t) * 8)] |= - 1 << (n % (sizeof(bits_t) * 8)); + 1U << (n % (sizeof(bits_t) * CHAR_BIT)); return acl_add_ptr(context, root, node, &bitset); } @@ -343,7 +343,7 @@ acl_gen_mask(struct rte_acl_bitset *bitset, uint32_t value, uint32_t mask) if ((n & mask) == value) { range++; bitset->bits[n / (sizeof(bits_t) * 8)] |= - 1 << (n % (sizeof(bits_t) * 8)); + 1U << (n % (sizeof(bits_t) * CHAR_BIT)); } } return range; @@ -972,7 +972,7 @@ build_trie(struct acl_build_context *context, struct rte_acl_build_rule *head, sizeof(*end->mrt)); for (m = context->cfg.num_categories; 0 != m--; ) { - if (rule->f->data.category_mask & (1 << m)) { + if (rule->f->data.category_mask & (1U << m)) { end->mrt->results[m] = rule->f->data.userdata; end->mrt->priority[m] = rule->f->data.priority; } else { diff --git a/dpdk/lib/librte_acl/acl_gen.c b/dpdk/lib/librte_acl/acl_gen.c index 35a0140b..f1b9d12f 100644 --- a/dpdk/lib/librte_acl/acl_gen.c +++ b/dpdk/lib/librte_acl/acl_gen.c @@ -133,7 +133,7 @@ acl_node_fill_dfa(const struct rte_acl_node *node, for (n = 0; n < RTE_ACL_DFA_SIZE; n++) { if (bits->bits[n / (sizeof(bits_t) * CHAR_BIT)] & - (1 << (n % (sizeof(bits_t) * CHAR_BIT)))) { + (1U << (n % (sizeof(bits_t) * CHAR_BIT)))) { dfa[n] = resolved ? child->node_index : x; ranges += (last_bit == 0); @@ -175,7 +175,7 @@ acl_count_sequential_groups(struct rte_acl_bitset *bits, int zero_one) } for (n = 0; n < QRANGE_MIN; n++) { if (bits->bits[n / (sizeof(bits_t) * 8)] & - (1 << (n % (sizeof(bits_t) * 8)))) { + (1U << (n % (sizeof(bits_t) * CHAR_BIT)))) { if (zero_one == 1 && last_bit != 1) ranges++; last_bit = 1; diff --git a/dpdk/lib/librte_acl/acl_run_neon.h b/dpdk/lib/librte_acl/acl_run_neon.h index 01b9766d..b3196cd1 100644 --- a/dpdk/lib/librte_acl/acl_run_neon.h +++ b/dpdk/lib/librte_acl/acl_run_neon.h @@ -181,8 +181,8 @@ search_neon_8(const struct rte_acl_ctx *ctx, const uint8_t **data, while (flows.started > 0) { /* Gather 4 bytes of input data for each stream. */ - input0 = vsetq_lane_s32(GET_NEXT_4BYTES(parms, 0), input0, 0); - input1 = vsetq_lane_s32(GET_NEXT_4BYTES(parms, 4), input1, 0); + input0 = vdupq_n_s32(GET_NEXT_4BYTES(parms, 0)); + input1 = vdupq_n_s32(GET_NEXT_4BYTES(parms, 4)); input0 = vsetq_lane_s32(GET_NEXT_4BYTES(parms, 1), input0, 1); input1 = vsetq_lane_s32(GET_NEXT_4BYTES(parms, 5), input1, 1); @@ -242,7 +242,7 @@ search_neon_4(const struct rte_acl_ctx *ctx, const uint8_t **data, while (flows.started > 0) { /* Gather 4 bytes of input data for each stream. */ - input = vsetq_lane_s32(GET_NEXT_4BYTES(parms, 0), input, 0); + input = vdupq_n_s32(GET_NEXT_4BYTES(parms, 0)); input = vsetq_lane_s32(GET_NEXT_4BYTES(parms, 1), input, 1); input = vsetq_lane_s32(GET_NEXT_4BYTES(parms, 2), input, 2); input = vsetq_lane_s32(GET_NEXT_4BYTES(parms, 3), input, 3); diff --git a/dpdk/lib/librte_bpf/bpf_def.h b/dpdk/lib/librte_bpf/bpf_def.h index c10f3aec..d3999299 100644 --- a/dpdk/lib/librte_bpf/bpf_def.h +++ b/dpdk/lib/librte_bpf/bpf_def.h @@ -120,6 +120,14 @@ enum { EBPF_REG_NUM, }; +/* + * When EBPF_CALL instruction has src_reg == EBPF_PSEUDO_CALL, + * it should be treated as pseudo-call instruction, where + * imm value contains pc-relative offset to another EBPF function. + * Right now DPDK EBPF library doesn't support it. + */ +#define EBPF_PSEUDO_CALL EBPF_REG_1 + /* * eBPF instruction format */ diff --git a/dpdk/lib/librte_bpf/bpf_load_elf.c b/dpdk/lib/librte_bpf/bpf_load_elf.c index 96d3630f..926317b6 100644 --- a/dpdk/lib/librte_bpf/bpf_load_elf.c +++ b/dpdk/lib/librte_bpf/bpf_load_elf.c @@ -77,10 +77,21 @@ resolve_xsym(const char *sn, size_t ofs, struct ebpf_insn *ins, size_t ins_sz, return -ENOENT; /* for function we just need an index in our xsym table */ - if (type == RTE_BPF_XTYPE_FUNC) + if (type == RTE_BPF_XTYPE_FUNC) { + + /* we don't support multiple functions per BPF module, + * so treat EBPF_PSEUDO_CALL to extrernal function + * as an ordinary EBPF_CALL. + */ + if (ins[idx].src_reg == EBPF_PSEUDO_CALL) { + RTE_BPF_LOG(INFO, "%s(%u): " + "EBPF_PSEUDO_CALL to external function: %s\n", + __func__, idx, sn); + ins[idx].src_reg = EBPF_REG_0; + } ins[idx].imm = fidx; /* for variable we need to store its absolute address */ - else { + } else { ins[idx].imm = (uintptr_t)prm->xsym[fidx].var.val; ins[idx + 1].imm = (uint64_t)(uintptr_t)prm->xsym[fidx].var.val >> 32; diff --git a/dpdk/lib/librte_bpf/bpf_validate.c b/dpdk/lib/librte_bpf/bpf_validate.c index 83983efc..0cf41fa2 100644 --- a/dpdk/lib/librte_bpf/bpf_validate.c +++ b/dpdk/lib/librte_bpf/bpf_validate.c @@ -925,7 +925,6 @@ eval_func_arg(struct bpf_verifier *bvf, const struct rte_bpf_arg *arg, static const char * eval_call(struct bpf_verifier *bvf, const struct ebpf_insn *ins) { - uint64_t msk; uint32_t i, idx; struct bpf_reg_val *rv; const struct rte_bpf_xsym *xsym; @@ -958,10 +957,11 @@ eval_call(struct bpf_verifier *bvf, const struct ebpf_insn *ins) rv = bvf->evst->rv + EBPF_REG_0; rv->v = xsym->func.ret; - msk = (rv->v.type == RTE_BPF_ARG_RAW) ? - RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t) : UINTPTR_MAX; - eval_max_bound(rv, msk); - rv->mask = msk; + if (rv->v.type == RTE_BPF_ARG_RAW) + eval_fill_max_bound(rv, + RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t)); + else if (RTE_BPF_ARG_PTR_TYPE(rv->v.type) != 0) + eval_fill_imm64(rv, UINTPTR_MAX, 0); return err; } @@ -1084,7 +1084,7 @@ eval_jcc(struct bpf_verifier *bvf, const struct ebpf_insn *ins) /* * validate parameters for each instruction type. */ -static const struct bpf_ins_check ins_chk[UINT8_MAX] = { +static const struct bpf_ins_check ins_chk[UINT8_MAX + 1] = { /* ALU IMM 32-bit instructions */ [(BPF_ALU | BPF_ADD | BPF_K)] = { .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG}, diff --git a/dpdk/lib/librte_bpf/meson.build b/dpdk/lib/librte_bpf/meson.build index bc0cd78f..4fbb29d7 100644 --- a/dpdk/lib/librte_bpf/meson.build +++ b/dpdk/lib/librte_bpf/meson.build @@ -8,7 +8,7 @@ sources = files('bpf.c', 'bpf_pkt.c', 'bpf_validate.c') -if arch_subdir == 'x86' and cc.sizeof('void *') == 8 +if arch_subdir == 'x86' and dpdk_conf.get('RTE_ARCH_64') sources += files('bpf_jit_x86.c') endif diff --git a/dpdk/lib/librte_bpf/rte_bpf.h b/dpdk/lib/librte_bpf/rte_bpf.h index ab92af8f..c8b96017 100644 --- a/dpdk/lib/librte_bpf/rte_bpf.h +++ b/dpdk/lib/librte_bpf/rte_bpf.h @@ -134,6 +134,9 @@ rte_bpf_load(const struct rte_bpf_prm *prm); /** * Create a new eBPF execution context and load BPF code from given ELF * file into it. + * Note that if the function will encounter EBPF_PSEUDO_CALL instruction + * that references external symbol, it will treat is as standard BPF_CALL + * to the external helper function. * * @param prm * Parameters used to create and initialise the BPF execution context. diff --git a/dpdk/lib/librte_cryptodev/rte_crypto_asym.h b/dpdk/lib/librte_cryptodev/rte_crypto_asym.h index b1c1a6c1..5351bb04 100644 --- a/dpdk/lib/librte_cryptodev/rte_crypto_asym.h +++ b/dpdk/lib/librte_cryptodev/rte_crypto_asym.h @@ -366,7 +366,7 @@ struct rte_cryptodev_asym_session; */ struct rte_crypto_rsa_op_param { enum rte_crypto_asym_op_type op_type; - /**< Type of RSA operation for transform */; + /**< Type of RSA operation for transform */ rte_crypto_param message; /**< diff --git a/dpdk/lib/librte_distributor/rte_distributor.c b/dpdk/lib/librte_distributor/rte_distributor.c index d5059837..b60acdee 100644 --- a/dpdk/lib/librte_distributor/rte_distributor.c +++ b/dpdk/lib/librte_distributor/rte_distributor.c @@ -541,6 +541,9 @@ rte_distributor_flush_v1705(struct rte_distributor *d) while (total_outstanding(d) > 0) rte_distributor_process(d, NULL, 0); + /* wait 10ms to allow all worker drain the pkts */ + rte_delay_us(10000); + /* * Send empty burst to all workers to allow them to exit * gracefully, should they need to. @@ -595,6 +598,12 @@ rte_distributor_create_v1705(const char *name, RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0); RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0); + if (name == NULL || num_workers >= + (unsigned int)RTE_MIN(RTE_DISTRIB_MAX_WORKERS, RTE_MAX_LCORE)) { + rte_errno = EINVAL; + return NULL; + } + if (alg_type == RTE_DIST_ALG_SINGLE) { d = malloc(sizeof(struct rte_distributor)); if (d == NULL) { @@ -612,11 +621,6 @@ rte_distributor_create_v1705(const char *name, return d; } - if (name == NULL || num_workers >= RTE_DISTRIB_MAX_WORKERS) { - rte_errno = EINVAL; - return NULL; - } - snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name); mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS); if (mz == NULL) { diff --git a/dpdk/lib/librte_eal/bsdapp/eal/eal.c b/dpdk/lib/librte_eal/bsdapp/eal/eal.c index bfac7fdc..e85c26ae 100644 --- a/dpdk/lib/librte_eal/bsdapp/eal/eal.c +++ b/dpdk/lib/librte_eal/bsdapp/eal/eal.c @@ -253,6 +253,11 @@ rte_eal_config_create(void) } memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config)); rte_config.mem_config = rte_mem_cfg_addr; + + /* store address of the config in the config itself so that secondary + * processes could later map the config into this exact location + */ + rte_config.mem_config->mem_cfg_addr = (uintptr_t) rte_mem_cfg_addr; } /* attach to an existing shared memory config */ @@ -566,6 +571,8 @@ rte_eal_mcfg_complete(void) /* ALL shared mem_config related INIT DONE */ if (rte_config.process_type == RTE_PROC_PRIMARY) rte_config.mem_config->magic = RTE_MAGIC; + + internal_config.init_complete = 1; } /* return non-zero if hugepages are enabled. */ @@ -663,7 +670,7 @@ rte_eal_init(int argc, char **argv) } if (rte_eal_alarm_init() < 0) { - rte_eal_init_alert("Cannot init interrupt-handling thread"); + rte_eal_init_alert("Cannot init alarm"); /* rte_eal_alarm_init sets rte_errno on failure. */ return -1; } diff --git a/dpdk/lib/librte_eal/common/eal_common_dev.c b/dpdk/lib/librte_eal/common/eal_common_dev.c index fd7f5ca7..dc2bc0c9 100644 --- a/dpdk/lib/librte_eal/common/eal_common_dev.c +++ b/dpdk/lib/librte_eal/common/eal_common_dev.c @@ -172,6 +172,9 @@ local_dev_probe(const char *devargs, struct rte_device **new_dev) */ ret = dev->bus->plug(dev); + if (ret > 0) + ret = -ENOTSUP; + if (ret && !rte_dev_is_probed(dev)) { /* if hasn't ever succeeded */ RTE_LOG(ERR, EAL, "Driver cannot attach the device (%s)\n", dev->name); @@ -319,7 +322,7 @@ local_dev_remove(struct rte_device *dev) if (ret) { RTE_LOG(ERR, EAL, "Driver cannot detach the device (%s)\n", dev->name); - return ret; + return (ret < 0) ? ret : -ENOENT; } return 0; diff --git a/dpdk/lib/librte_eal/common/eal_common_memory.c b/dpdk/lib/librte_eal/common/eal_common_memory.c index e3ef3714..9a14698a 100644 --- a/dpdk/lib/librte_eal/common/eal_common_memory.c +++ b/dpdk/lib/librte_eal/common/eal_common_memory.c @@ -447,7 +447,7 @@ check_iova(const struct rte_memseg_list *msl __rte_unused, #define MAX_DMA_MASK_BITS 63 /* check memseg iovas are within the required range based on dma mask */ -static int __rte_experimental +static int check_dma_mask(uint8_t maskbits, bool thread_unsafe) { struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; diff --git a/dpdk/lib/librte_eal/common/eal_common_options.c b/dpdk/lib/librte_eal/common/eal_common_options.c index d4ab5e23..f742d4d3 100644 --- a/dpdk/lib/librte_eal/common/eal_common_options.c +++ b/dpdk/lib/librte_eal/common/eal_common_options.c @@ -258,8 +258,7 @@ eal_plugindir_init(const char *path) while ((dent = readdir(d)) != NULL) { struct stat sb; - snprintf(sopath, PATH_MAX-1, "%s/%s", path, dent->d_name); - sopath[PATH_MAX-1] = 0; + snprintf(sopath, sizeof(sopath), "%s/%s", path, dent->d_name); if (!(stat(sopath, &sb) == 0 && S_ISREG(sb.st_mode))) continue; @@ -1451,11 +1450,11 @@ compute_ctrl_threads_cpuset(struct internal_config *internal_cfg) unsigned int lcore_id; for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { - if (eal_cpu_detected(lcore_id) && - rte_lcore_has_role(lcore_id, ROLE_OFF)) { - CPU_SET(lcore_id, cpuset); - } + if (rte_lcore_has_role(lcore_id, ROLE_OFF)) + continue; + RTE_CPU_OR(cpuset, cpuset, &lcore_config[lcore_id].cpuset); } + RTE_CPU_NOT(cpuset, cpuset); if (pthread_getaffinity_np(pthread_self(), sizeof(rte_cpuset_t), &default_set)) @@ -1463,9 +1462,11 @@ compute_ctrl_threads_cpuset(struct internal_config *internal_cfg) RTE_CPU_AND(cpuset, cpuset, &default_set); - /* if no detected CPU is off, use master core */ - if (!CPU_COUNT(cpuset)) - CPU_SET(rte_get_master_lcore(), cpuset); + /* if no remaining cpu, use master lcore cpu affinity */ + if (!CPU_COUNT(cpuset)) { + memcpy(cpuset, &lcore_config[rte_get_master_lcore()].cpuset, + sizeof(*cpuset)); + } } int diff --git a/dpdk/lib/librte_eal/common/eal_common_thread.c b/dpdk/lib/librte_eal/common/eal_common_thread.c index 14f206c0..8388efb3 100644 --- a/dpdk/lib/librte_eal/common/eal_common_thread.c +++ b/dpdk/lib/librte_eal/common/eal_common_thread.c @@ -38,7 +38,8 @@ rte_lcore_has_role(unsigned int lcore_id, enum rte_lcore_role_t role) return cfg->lcore_role[lcore_id] == role; } -int eal_cpuset_socket_id(rte_cpuset_t *cpusetp) +static int +eal_cpuset_socket_id(rte_cpuset_t *cpusetp) { unsigned cpu = 0; int socket_id = SOCKET_ID_ANY; diff --git a/dpdk/lib/librte_eal/common/eal_filesystem.h b/dpdk/lib/librte_eal/common/eal_filesystem.h index 89a3adde..aaba88e0 100644 --- a/dpdk/lib/librte_eal/common/eal_filesystem.h +++ b/dpdk/lib/librte_eal/common/eal_filesystem.h @@ -38,7 +38,7 @@ eal_runtime_config_path(void) { static char buffer[PATH_MAX]; /* static so auto-zeroed */ - snprintf(buffer, sizeof(buffer) - 1, "%s/%s", rte_eal_get_runtime_dir(), + snprintf(buffer, sizeof(buffer), "%s/%s", rte_eal_get_runtime_dir(), RUNTIME_CONFIG_FNAME); return buffer; } @@ -50,7 +50,7 @@ eal_mp_socket_path(void) { static char buffer[PATH_MAX]; /* static so auto-zeroed */ - snprintf(buffer, sizeof(buffer) - 1, "%s/%s", rte_eal_get_runtime_dir(), + snprintf(buffer, sizeof(buffer), "%s/%s", rte_eal_get_runtime_dir(), MP_SOCKET_FNAME); return buffer; } @@ -70,7 +70,7 @@ eal_hugepage_info_path(void) { static char buffer[PATH_MAX]; /* static so auto-zeroed */ - snprintf(buffer, sizeof(buffer) - 1, "%s/%s", rte_eal_get_runtime_dir(), + snprintf(buffer, sizeof(buffer), "%s/%s", rte_eal_get_runtime_dir(), HUGEPAGE_INFO_FNAME); return buffer; } @@ -82,7 +82,7 @@ eal_hugepage_data_path(void) { static char buffer[PATH_MAX]; /* static so auto-zeroed */ - snprintf(buffer, sizeof(buffer) - 1, "%s/%s", rte_eal_get_runtime_dir(), + snprintf(buffer, sizeof(buffer), "%s/%s", rte_eal_get_runtime_dir(), HUGEPAGE_DATA_FNAME); return buffer; } @@ -94,7 +94,6 @@ eal_get_hugefile_path(char *buffer, size_t buflen, const char *hugedir, int f_id { snprintf(buffer, buflen, HUGEFILE_FMT, hugedir, eal_get_hugefile_prefix(), f_id); - buffer[buflen - 1] = '\0'; return buffer; } diff --git a/dpdk/lib/librte_eal/common/eal_internal_cfg.h b/dpdk/lib/librte_eal/common/eal_internal_cfg.h index 189d4f5b..ae92ec29 100644 --- a/dpdk/lib/librte_eal/common/eal_internal_cfg.h +++ b/dpdk/lib/librte_eal/common/eal_internal_cfg.h @@ -15,7 +15,11 @@ #include "eal_thread.h" +#if defined(RTE_ARCH_ARM) || defined(RTE_ARCH_ARM64) +#define MAX_HUGEPAGE_SIZES 4 /**< support up to 4 page sizes */ +#else #define MAX_HUGEPAGE_SIZES 3 /**< support up to 3 page sizes */ +#endif /* * internal configuration structure for the number, size and diff --git a/dpdk/lib/librte_eal/common/eal_thread.h b/dpdk/lib/librte_eal/common/eal_thread.h index 2d30b19b..0d0bad06 100644 --- a/dpdk/lib/librte_eal/common/eal_thread.h +++ b/dpdk/lib/librte_eal/common/eal_thread.h @@ -34,17 +34,6 @@ void eal_thread_init_master(unsigned lcore_id); */ unsigned eal_cpu_socket_id(unsigned cpu_id); -/** - * Get the NUMA socket id from cpuset. - * This function is private to EAL. - * - * @param cpusetp - * The point to a valid cpu set. - * @return - * socket_id or SOCKET_ID_ANY - */ -int eal_cpuset_socket_id(rte_cpuset_t *cpusetp); - /** * Default buffer size to use with eal_thread_dump_affinity() */ diff --git a/dpdk/lib/librte_eal/common/hotplug_mp.c b/dpdk/lib/librte_eal/common/hotplug_mp.c index 7c3f38db..287f3df0 100644 --- a/dpdk/lib/librte_eal/common/hotplug_mp.c +++ b/dpdk/lib/librte_eal/common/hotplug_mp.c @@ -418,7 +418,7 @@ int eal_dev_hotplug_request_to_secondary(struct eal_dev_mp_req *req) return 0; } -int rte_mp_dev_hotplug_init(void) +int eal_mp_dev_hotplug_init(void) { int ret; diff --git a/dpdk/lib/librte_eal/common/hotplug_mp.h b/dpdk/lib/librte_eal/common/hotplug_mp.h index 597fde3d..8fcf9b52 100644 --- a/dpdk/lib/librte_eal/common/hotplug_mp.h +++ b/dpdk/lib/librte_eal/common/hotplug_mp.h @@ -28,6 +28,15 @@ struct eal_dev_mp_req { int result; }; +/** + * Register all mp action callbacks for hotplug. + * + * @return + * 0 on success, negative on error. + */ +int +eal_mp_dev_hotplug_init(void); + /** * This is a synchronous wrapper for secondary process send * request to primary process, this is invoked when an attach diff --git a/dpdk/lib/librte_eal/common/include/rte_dev.h b/dpdk/lib/librte_eal/common/include/rte_dev.h index a9724dc9..0bfb2487 100644 --- a/dpdk/lib/librte_eal/common/include/rte_dev.h +++ b/dpdk/lib/librte_eal/common/include/rte_dev.h @@ -404,7 +404,7 @@ rte_dev_iterator_next(struct rte_dev_iterator *it); * @b EXPERIMENTAL: this API may change without prior notice * * It registers the callback for the specific device. - * Multiple callbacks cal be registered at the same time. + * Multiple callbacks can be registered at the same time. * * @param device_name * The device name, that is the param name of the struct rte_device, diff --git a/dpdk/lib/librte_eal/common/include/rte_eal.h b/dpdk/lib/librte_eal/common/include/rte_eal.h index 9951228e..595efdca 100644 --- a/dpdk/lib/librte_eal/common/include/rte_eal.h +++ b/dpdk/lib/librte_eal/common/include/rte_eal.h @@ -399,15 +399,6 @@ rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts, int __rte_experimental rte_mp_reply(struct rte_mp_msg *msg, const char *peer); -/** - * Register all mp action callbacks for hotplug. - * - * @return - * 0 on success, negative on error. - */ -int __rte_experimental -rte_mp_dev_hotplug_init(void); - /** * Usage function typedef used by the application usage function. * diff --git a/dpdk/lib/librte_eal/common/include/rte_interrupts.h b/dpdk/lib/librte_eal/common/include/rte_interrupts.h index d751a637..bcd93580 100644 --- a/dpdk/lib/librte_eal/common/include/rte_interrupts.h +++ b/dpdk/lib/librte_eal/common/include/rte_interrupts.h @@ -28,7 +28,7 @@ typedef void (*rte_intr_callback_fn)(void *cb_arg); /** * It registers the callback for the specific interrupt. Multiple - * callbacks cal be registered at the same time. + * callbacks can be registered at the same time. * @param intr_handle * Pointer to the interrupt handle. * @param cb diff --git a/dpdk/lib/librte_eal/common/include/rte_lcore.h b/dpdk/lib/librte_eal/common/include/rte_lcore.h index dea17f50..31af0e50 100644 --- a/dpdk/lib/librte_eal/common/include/rte_lcore.h +++ b/dpdk/lib/librte_eal/common/include/rte_lcore.h @@ -25,6 +25,20 @@ extern "C" { #if defined(__linux__) typedef cpu_set_t rte_cpuset_t; #define RTE_CPU_AND(dst, src1, src2) CPU_AND(dst, src1, src2) +#define RTE_CPU_OR(dst, src1, src2) CPU_OR(dst, src1, src2) +#define RTE_CPU_FILL(set) do \ +{ \ + unsigned int i; \ + CPU_ZERO(set); \ + for (i = 0; i < CPU_SETSIZE; i++) \ + CPU_SET(i, set); \ +} while (0) +#define RTE_CPU_NOT(dst, src) do \ +{ \ + cpu_set_t tmp; \ + RTE_CPU_FILL(&tmp); \ + CPU_XOR(dst, &tmp, src); \ +} while (0) #elif defined(__FreeBSD__) #include typedef cpuset_t rte_cpuset_t; @@ -35,6 +49,21 @@ typedef cpuset_t rte_cpuset_t; CPU_AND(&tmp, src2); \ CPU_COPY(&tmp, dst); \ } while (0) +#define RTE_CPU_OR(dst, src1, src2) do \ +{ \ + cpuset_t tmp; \ + CPU_COPY(src1, &tmp); \ + CPU_OR(&tmp, src2); \ + CPU_COPY(&tmp, dst); \ +} while (0) +#define RTE_CPU_FILL(set) CPU_FILL(set) +#define RTE_CPU_NOT(dst, src) do \ +{ \ + cpuset_t tmp; \ + CPU_FILL(&tmp); \ + CPU_NAND(&tmp, src); \ + CPU_COPY(&tmp, dst); \ +} while (0) #endif /** diff --git a/dpdk/lib/librte_eal/common/include/rte_memory.h b/dpdk/lib/librte_eal/common/include/rte_memory.h index d970825d..fc4e82b1 100644 --- a/dpdk/lib/librte_eal/common/include/rte_memory.h +++ b/dpdk/lib/librte_eal/common/include/rte_memory.h @@ -188,7 +188,7 @@ typedef int (*rte_memseg_walk_t)(const struct rte_memseg_list *msl, /** * Memseg contig walk function prototype. This will trigger a callback on every - * VA-contiguous are starting at memseg ``ms``, so total valid VA space at each + * VA-contiguous area starting at memseg ``ms``, so total valid VA space at each * callback call will be [``ms->addr``, ``ms->addr + len``). * * Returning 0 will continue walk diff --git a/dpdk/lib/librte_eal/common/include/rte_option.h b/dpdk/lib/librte_eal/common/include/rte_option.h index 8957b970..b8ad28b5 100644 --- a/dpdk/lib/librte_eal/common/include/rte_option.h +++ b/dpdk/lib/librte_eal/common/include/rte_option.h @@ -34,7 +34,7 @@ typedef int (*rte_option_cb)(void); */ struct rte_option { TAILQ_ENTRY(rte_option) next; /**< Next entry in the list. */ - char *opt_str; /**< The option name. */ + const char *opt_str; /**< The option name. */ rte_option_cb cb; /**< Function called when option is used. */ int enabled; /**< Set when the option is used. */ }; diff --git a/dpdk/lib/librte_eal/common/include/rte_version.h b/dpdk/lib/librte_eal/common/include/rte_version.h index 7c0b13b5..419f9c07 100644 --- a/dpdk/lib/librte_eal/common/include/rte_version.h +++ b/dpdk/lib/librte_eal/common/include/rte_version.h @@ -37,7 +37,7 @@ extern "C" { /** * Patch level number i.e. the z in yy.mm.z */ -#define RTE_VER_MINOR 2 +#define RTE_VER_MINOR 5 /** * Extra string to be appended to version number diff --git a/dpdk/lib/librte_eal/common/malloc_heap.c b/dpdk/lib/librte_eal/common/malloc_heap.c index c6a6d4f6..b8f26f2b 100644 --- a/dpdk/lib/librte_eal/common/malloc_heap.c +++ b/dpdk/lib/librte_eal/common/malloc_heap.c @@ -1120,7 +1120,7 @@ malloc_heap_add_external_memory(struct malloc_heap *heap, void *va_addr, return -1; } - snprintf(fbarray_name, sizeof(fbarray_name) - 1, "%s_%p", + snprintf(fbarray_name, sizeof(fbarray_name), "%s_%p", heap->name, va_addr); /* create the backing fbarray */ @@ -1269,7 +1269,7 @@ rte_eal_malloc_heap_init(void) char heap_name[RTE_HEAP_NAME_MAX_LEN]; int socket_id = rte_socket_id_by_idx(i); - snprintf(heap_name, sizeof(heap_name) - 1, + snprintf(heap_name, sizeof(heap_name), "socket_%i", socket_id); strlcpy(heap->name, heap_name, RTE_HEAP_NAME_MAX_LEN); heap->socket_id = socket_id; diff --git a/dpdk/lib/librte_eal/linuxapp/eal/eal.c b/dpdk/lib/librte_eal/linuxapp/eal/eal.c index 7a08cf1e..f453337f 100644 --- a/dpdk/lib/librte_eal/linuxapp/eal/eal.c +++ b/dpdk/lib/librte_eal/linuxapp/eal/eal.c @@ -59,6 +59,7 @@ #include "eal_hugepages.h" #include "eal_options.h" #include "eal_vfio.h" +#include "hotplug_mp.h" #define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL) @@ -1001,7 +1002,7 @@ rte_eal_init(int argc, char **argv) } if (rte_eal_alarm_init() < 0) { - rte_eal_init_alert("Cannot init interrupt-handling thread"); + rte_eal_init_alert("Cannot init alarm"); /* rte_eal_alarm_init sets rte_errno on failure. */ return -1; } @@ -1018,7 +1019,7 @@ rte_eal_init(int argc, char **argv) } /* register multi-process action callbacks for hotplug */ - if (rte_mp_dev_hotplug_init() < 0) { + if (eal_mp_dev_hotplug_init() < 0) { rte_eal_init_alert("failed to register mp callback for hotplug"); return -1; } diff --git a/dpdk/lib/librte_eal/linuxapp/eal/eal_alarm.c b/dpdk/lib/librte_eal/linuxapp/eal/eal_alarm.c index 840ede78..0924c920 100644 --- a/dpdk/lib/librte_eal/linuxapp/eal/eal_alarm.c +++ b/dpdk/lib/librte_eal/linuxapp/eal/eal_alarm.c @@ -137,9 +137,10 @@ rte_eal_alarm_set(uint64_t us, rte_eal_alarm_callback cb_fn, void *cb_arg) rte_spinlock_lock(&alarm_list_lk); if (!handler_registered) { - ret |= rte_intr_callback_register(&intr_handle, - eal_alarm_callback, NULL); - handler_registered = (ret == 0) ? 1 : 0; + /* registration can fail, callback can be registered later */ + if (rte_intr_callback_register(&intr_handle, + eal_alarm_callback, NULL) == 0) + handler_registered = 1; } if (LIST_EMPTY(&alarm_list)) diff --git a/dpdk/lib/librte_eal/linuxapp/eal/eal_memalloc.c b/dpdk/lib/librte_eal/linuxapp/eal/eal_memalloc.c index 81b441a9..bff7dcd5 100644 --- a/dpdk/lib/librte_eal/linuxapp/eal/eal_memalloc.c +++ b/dpdk/lib/librte_eal/linuxapp/eal/eal_memalloc.c @@ -2,7 +2,6 @@ * Copyright(c) 2017-2018 Intel Corporation */ -#define _FILE_OFFSET_BITS 64 #include #include #include @@ -732,9 +731,13 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id, } #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES - move_pages(getpid(), 1, &addr, NULL, &cur_socket_id, 0); - - if (cur_socket_id != socket_id) { + ret = get_mempolicy(&cur_socket_id, NULL, 0, addr, + MPOL_F_NODE | MPOL_F_ADDR); + if (ret < 0) { + RTE_LOG(DEBUG, EAL, "%s(): get_mempolicy: %s\n", + __func__, strerror(errno)); + goto mapped; + } else if (cur_socket_id != socket_id) { RTE_LOG(DEBUG, EAL, "%s(): allocation happened on wrong socket (wanted %d, got %d)\n", __func__, socket_id, cur_socket_id); diff --git a/dpdk/lib/librte_eal/linuxapp/eal/eal_memory.c b/dpdk/lib/librte_eal/linuxapp/eal/eal_memory.c index 898bdb77..60cf41b6 100644 --- a/dpdk/lib/librte_eal/linuxapp/eal/eal_memory.c +++ b/dpdk/lib/librte_eal/linuxapp/eal/eal_memory.c @@ -3,7 +3,6 @@ * Copyright(c) 2013 6WIND S.A. */ -#define _FILE_OFFSET_BITS 64 #include #include #include @@ -1085,6 +1084,7 @@ remap_needed_hugepages(struct hugepage_file *hugepages, int n_pages) return 0; } +__rte_unused /* function is unused on 32-bit builds */ static inline uint64_t get_socket_mem_size(int socket) { diff --git a/dpdk/lib/librte_eal/linuxapp/eal/eal_vfio.c b/dpdk/lib/librte_eal/linuxapp/eal/eal_vfio.c index c821e838..830b320d 100644 --- a/dpdk/lib/librte_eal/linuxapp/eal/eal_vfio.c +++ b/dpdk/lib/librte_eal/linuxapp/eal/eal_vfio.c @@ -1232,6 +1232,19 @@ rte_vfio_get_group_num(const char *sysfs_base, return 1; } +static int +type1_map_contig(const struct rte_memseg_list *msl, const struct rte_memseg *ms, + size_t len, void *arg) +{ + int *vfio_container_fd = arg; + + if (msl->external) + return 0; + + return vfio_type1_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova, + len, 1); +} + static int type1_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms, void *arg) @@ -1289,6 +1302,13 @@ vfio_type1_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova, static int vfio_type1_dma_map(int vfio_container_fd) { + if (rte_eal_iova_mode() == RTE_IOVA_VA) { + /* with IOVA as VA mode, we can get away with mapping contiguous + * chunks rather than going page-by-page. + */ + return rte_memseg_contig_walk(type1_map_contig, + &vfio_container_fd); + } return rte_memseg_walk(type1_map, &vfio_container_fd); } @@ -1808,7 +1828,7 @@ rte_vfio_container_create(void) return vfio_cfgs[i].vfio_container_fd; } -int __rte_experimental +int rte_vfio_container_destroy(int container_fd) { struct vfio_config *vfio_cfg; diff --git a/dpdk/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h b/dpdk/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h index 5db5a133..eb7adf34 100644 --- a/dpdk/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h +++ b/dpdk/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h @@ -86,7 +86,7 @@ struct rte_kni_mbuf { /* fields on second cache line */ char pad3[8] __attribute__((__aligned__(RTE_CACHE_LINE_MIN_SIZE))); void *pool; - void *next; + void *next; /**< Physical address of next mbuf in kernel. */ }; /* diff --git a/dpdk/lib/librte_ethdev/rte_ethdev.c b/dpdk/lib/librte_ethdev/rte_ethdev.c index 191658da..3c75d3c7 100644 --- a/dpdk/lib/librte_ethdev/rte_ethdev.c +++ b/dpdk/lib/librte_ethdev/rte_ethdev.c @@ -2517,10 +2517,15 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) .nb_align = 1, }; + /* + * Init dev_info before port_id check since caller does not have + * return status and does not know if get is successful or not. + */ + memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); + RTE_ETH_VALID_PORTID_OR_RET(port_id); dev = &rte_eth_devices[port_id]; - memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); dev_info->rx_desc_lim = lim; dev_info->tx_desc_lim = lim; dev_info->device = dev->device; diff --git a/dpdk/lib/librte_ethdev/rte_ethdev.h b/dpdk/lib/librte_ethdev/rte_ethdev.h index 0e353619..ec4b7752 100644 --- a/dpdk/lib/librte_ethdev/rte_ethdev.h +++ b/dpdk/lib/librte_ethdev/rte_ethdev.h @@ -4206,8 +4206,8 @@ rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, * The number of packets correct and ready to be sent. The return value can be * less than the value of the *tx_pkts* parameter when some packet doesn't * meet devices requirements with rte_errno set appropriately: - * - -EINVAL: offload flags are not correctly set - * - -ENOTSUP: the offload feature is not supported by the hardware + * - EINVAL: offload flags are not correctly set + * - ENOTSUP: the offload feature is not supported by the hardware * */ diff --git a/dpdk/lib/librte_ethdev/rte_ethdev_pci.h b/dpdk/lib/librte_ethdev/rte_ethdev_pci.h index 23257e98..ccdbb46e 100644 --- a/dpdk/lib/librte_ethdev/rte_ethdev_pci.h +++ b/dpdk/lib/librte_ethdev/rte_ethdev_pci.h @@ -184,7 +184,7 @@ rte_eth_dev_pci_generic_remove(struct rte_pci_device *pci_dev, eth_dev = rte_eth_dev_allocated(pci_dev->device.name); if (!eth_dev) - return -ENODEV; + return 0; if (dev_uninit) { ret = dev_uninit(eth_dev); diff --git a/dpdk/lib/librte_ethdev/rte_flow.h b/dpdk/lib/librte_ethdev/rte_flow.h index c0fe8792..4a969abf 100644 --- a/dpdk/lib/librte_ethdev/rte_flow.h +++ b/dpdk/lib/librte_ethdev/rte_flow.h @@ -923,7 +923,7 @@ struct rte_flow_item_esp { #ifndef __cplusplus static const struct rte_flow_item_esp rte_flow_item_esp_mask = { .hdr = { - .spi = 0xffffffff, + .spi = RTE_BE32(0xffffffff), }, }; #endif diff --git a/dpdk/lib/librte_eventdev/rte_event_eth_tx_adapter.h b/dpdk/lib/librte_eventdev/rte_event_eth_tx_adapter.h index 7a4a01fa..4d8a018c 100644 --- a/dpdk/lib/librte_eventdev/rte_event_eth_tx_adapter.h +++ b/dpdk/lib/librte_eventdev/rte_event_eth_tx_adapter.h @@ -375,10 +375,10 @@ rte_event_eth_tx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id); * *rte_event*. If the return value is less than *nb_events*, the remaining * events at the end of ev[] are not consumed and the caller has to take care * of them, and rte_errno is set accordingly. Possible errno values include: - * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue + * - EINVAL The port ID is invalid, device ID is invalid, an event's queue * ID is invalid, or an event's sched type doesn't match the * capabilities of the destination queue. - * - -ENOSPC The event port was backpressured and unable to enqueue + * - ENOSPC The event port was backpressured and unable to enqueue * one or more events. This error code is only applicable to * closed systems. */ @@ -393,12 +393,12 @@ rte_event_eth_tx_adapter_enqueue(uint8_t dev_id, #ifdef RTE_LIBRTE_EVENTDEV_DEBUG if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } if (port_id >= dev->data->nb_ports) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } #endif diff --git a/dpdk/lib/librte_eventdev/rte_event_timer_adapter.c b/dpdk/lib/librte_eventdev/rte_event_timer_adapter.c index 79070d48..026d639b 100644 --- a/dpdk/lib/librte_eventdev/rte_event_timer_adapter.c +++ b/dpdk/lib/librte_eventdev/rte_event_timer_adapter.c @@ -192,17 +192,17 @@ rte_event_timer_adapter_create_ext( &adapter->data->caps, &adapter->ops); if (ret < 0) { - rte_errno = ret; + rte_errno = -ret; goto free_memzone; } if (!(adapter->data->caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) { - FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, -EINVAL); + FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, EINVAL); ret = conf_cb(adapter->data->id, adapter->data->event_dev_id, &adapter->data->event_port_id, conf_arg); if (ret < 0) { - rte_errno = ret; + rte_errno = -ret; goto free_memzone; } } @@ -214,10 +214,10 @@ rte_event_timer_adapter_create_ext( adapter->ops = &sw_event_adapter_timer_ops; /* Allow driver to do some setup */ - FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, -ENOTSUP); + FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, ENOTSUP); ret = adapter->ops->init(adapter); if (ret < 0) { - rte_errno = ret; + rte_errno = -ret; goto free_memzone; } @@ -493,7 +493,7 @@ event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id, *nb_events_inv = 0; *nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id, &events[tail_idx], n); - if (*nb_events_flushed != n && rte_errno == -EINVAL) { + if (*nb_events_flushed != n && rte_errno == EINVAL) { EVTIM_LOG_ERR("failed to enqueue invalid event - dropping it"); (*nb_events_inv)++; } diff --git a/dpdk/lib/librte_eventdev/rte_eventdev.c b/dpdk/lib/librte_eventdev/rte_eventdev.c index ebaf3087..677850cd 100644 --- a/dpdk/lib/librte_eventdev/rte_eventdev.c +++ b/dpdk/lib/librte_eventdev/rte_eventdev.c @@ -888,18 +888,18 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id, uint16_t *links_map; int i, diag; - RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0); + RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0); dev = &rte_eventdevs[dev_id]; if (*dev->dev_ops->port_link == NULL) { RTE_PMD_DEBUG_TRACE("Function not supported\n"); - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; return 0; } if (!is_valid_port(dev, port_id)) { RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } @@ -920,7 +920,7 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id, for (i = 0; i < nb_links; i++) if (queues[i] >= dev->data->nb_queues) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } @@ -947,18 +947,18 @@ rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, int i, diag, j; uint16_t *links_map; - RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0); + RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0); dev = &rte_eventdevs[dev_id]; if (*dev->dev_ops->port_unlink == NULL) { RTE_PMD_DEBUG_TRACE("Function not supported\n"); - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; return 0; } if (!is_valid_port(dev, port_id)) { RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } @@ -987,7 +987,7 @@ rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, nb_unlinks = j; for (i = 0; i < nb_unlinks; i++) if (queues[i] >= dev->data->nb_queues) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } diff --git a/dpdk/lib/librte_eventdev/rte_eventdev.h b/dpdk/lib/librte_eventdev/rte_eventdev.h index 38608114..d755f194 100644 --- a/dpdk/lib/librte_eventdev/rte_eventdev.h +++ b/dpdk/lib/librte_eventdev/rte_eventdev.h @@ -181,9 +181,8 @@ * The *dequeue* operation gets one or more events from the event ports. * The application process the events and send to downstream event queue through * rte_event_enqueue_burst() if it is an intermediate stage of event processing, - * on the final stage, the application may send to different subsystem like - * ethdev to send the packet/event on the wire using ethdev - * rte_eth_tx_burst() API. + * on the final stage, the application may use Tx adapter API for maintaining + * the ingress order and then send the packet/event on the wire. * * The point at which events are scheduled to ports depends on the device. * For hardware devices, scheduling occurs asynchronously without any software @@ -1322,12 +1321,12 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, #ifdef RTE_LIBRTE_EVENTDEV_DEBUG if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } if (port_id >= dev->data->nb_ports) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } #endif @@ -1376,10 +1375,10 @@ __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, * *rte_event*. If the return value is less than *nb_events*, the remaining * events at the end of ev[] are not consumed and the caller has to take care * of them, and rte_errno is set accordingly. Possible errno values include: - * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue + * - EINVAL The port ID is invalid, device ID is invalid, an event's queue * ID is invalid, or an event's sched type doesn't match the * capabilities of the destination queue. - * - -ENOSPC The event port was backpressured and unable to enqueue + * - ENOSPC The event port was backpressured and unable to enqueue * one or more events. This error code is only applicable to * closed systems. * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH @@ -1426,10 +1425,10 @@ rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, * *rte_event*. If the return value is less than *nb_events*, the remaining * events at the end of ev[] are not consumed and the caller has to take care * of them, and rte_errno is set accordingly. Possible errno values include: - * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue + * - EINVAL The port ID is invalid, device ID is invalid, an event's queue * ID is invalid, or an event's sched type doesn't match the * capabilities of the destination queue. - * - -ENOSPC The event port was backpressured and unable to enqueue + * - ENOSPC The event port was backpressured and unable to enqueue * one or more events. This error code is only applicable to * closed systems. * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH @@ -1477,10 +1476,10 @@ rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, * *rte_event*. If the return value is less than *nb_events*, the remaining * events at the end of ev[] are not consumed and the caller has to take care * of them, and rte_errno is set accordingly. Possible errno values include: - * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue + * - EINVAL The port ID is invalid, device ID is invalid, an event's queue * ID is invalid, or an event's sched type doesn't match the * capabilities of the destination queue. - * - -ENOSPC The event port was backpressured and unable to enqueue + * - ENOSPC The event port was backpressured and unable to enqueue * one or more events. This error code is only applicable to * closed systems. * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH @@ -1599,12 +1598,12 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], #ifdef RTE_LIBRTE_EVENTDEV_DEBUG if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } if (port_id >= dev->data->nb_ports) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } #endif @@ -1677,9 +1676,9 @@ rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], * of link[] are not established, and the caller has to take care of them. * If return value is less than *nb_links* then implementation shall update the * rte_errno accordingly, Possible rte_errno values are - * (-EDQUOT) Quota exceeded(Application tried to link the queue configured with + * (EDQUOT) Quota exceeded(Application tried to link the queue configured with * RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports) - * (-EINVAL) Invalid parameter + * (EINVAL) Invalid parameter * */ int @@ -1724,7 +1723,7 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id, * end of queues[] are not unlinked, and the caller has to take care of them. * If return value is less than *nb_unlinks* then implementation shall update * the rte_errno accordingly, Possible rte_errno values are - * (-EINVAL) Invalid parameter + * (EINVAL) Invalid parameter */ int rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, diff --git a/dpdk/lib/librte_flow_classify/rte_flow_classify_parse.c b/dpdk/lib/librte_flow_classify/rte_flow_classify_parse.c index f65ceaf7..46533029 100644 --- a/dpdk/lib/librte_flow_classify/rte_flow_classify_parse.c +++ b/dpdk/lib/librte_flow_classify/rte_flow_classify_parse.c @@ -103,8 +103,6 @@ classify_pattern_skip_void_item(struct rte_flow_item *items, pb = pe; break; } - - pb = pe + 1; } /* Copy the END item. */ rte_memcpy(items, pe, sizeof(struct rte_flow_item)); diff --git a/dpdk/lib/librte_hash/rte_cuckoo_hash.c b/dpdk/lib/librte_hash/rte_cuckoo_hash.c index d7a5f4c2..e7627716 100644 --- a/dpdk/lib/librte_hash/rte_cuckoo_hash.c +++ b/dpdk/lib/librte_hash/rte_cuckoo_hash.c @@ -1168,22 +1168,31 @@ search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig, struct rte_hash_key *k, *keys = h->key_store; for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) { - key_idx = __atomic_load_n(&bkt->key_idx[i], + /* Signature comparison is done before the acquire-load + * of the key index to achieve better performance. + * This can result in the reader loading old signature + * (which matches), while the key_idx is updated to a + * value that belongs to a new key. However, the full + * key comparison will ensure that the lookup fails. + */ + if (bkt->sig_current[i] == sig) { + key_idx = __atomic_load_n(&bkt->key_idx[i], __ATOMIC_ACQUIRE); - if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) { - k = (struct rte_hash_key *) ((char *)keys + - key_idx * h->key_entry_size); - pdata = __atomic_load_n(&k->pdata, - __ATOMIC_ACQUIRE); + if (key_idx != EMPTY_SLOT) { + k = (struct rte_hash_key *) ((char *)keys + + key_idx * h->key_entry_size); + pdata = __atomic_load_n(&k->pdata, + __ATOMIC_ACQUIRE); - if (rte_hash_cmp_eq(key, k->key, h) == 0) { - if (data != NULL) - *data = pdata; - /* - * Return index where key is stored, - * subtracting the first dummy index - */ - return key_idx - 1; + if (rte_hash_cmp_eq(key, k->key, h) == 0) { + if (data != NULL) + *data = pdata; + /* + * Return index where key is stored, + * subtracting the first dummy index + */ + return key_idx - 1; + } } } } diff --git a/dpdk/lib/librte_ip_frag/rte_ipv6_fragmentation.c b/dpdk/lib/librte_ip_frag/rte_ipv6_fragmentation.c index b9437eb1..e5759a78 100644 --- a/dpdk/lib/librte_ip_frag/rte_ipv6_fragmentation.c +++ b/dpdk/lib/librte_ip_frag/rte_ipv6_fragmentation.c @@ -83,8 +83,10 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in, * Ensure the IP payload length of all fragments (except the * the last fragment) are a multiple of 8 bytes per RFC2460. */ - frag_size = RTE_ALIGN_FLOOR(mtu_size - sizeof(struct ipv6_hdr), - RTE_IPV6_EHDR_FO_ALIGN); + + frag_size = mtu_size - sizeof(struct ipv6_hdr) - + sizeof(struct ipv6_extension_fragment); + frag_size = RTE_ALIGN_FLOOR(frag_size, RTE_IPV6_EHDR_FO_ALIGN); /* Check that pkts_out is big enough to hold all fragments */ if (unlikely (frag_size * nb_pkts_out < diff --git a/dpdk/lib/librte_kni/meson.build b/dpdk/lib/librte_kni/meson.build index a738a033..055ae122 100644 --- a/dpdk/lib/librte_kni/meson.build +++ b/dpdk/lib/librte_kni/meson.build @@ -1,7 +1,7 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation -if host_machine.system() != 'linux' or cc.sizeof('void *') == 4 +if host_machine.system() != 'linux' or not dpdk_conf.get('RTE_ARCH_64') build = false endif version = 2 diff --git a/dpdk/lib/librte_kni/rte_kni.c b/dpdk/lib/librte_kni/rte_kni.c index 73aecccc..941bec96 100644 --- a/dpdk/lib/librte_kni/rte_kni.c +++ b/dpdk/lib/librte_kni/rte_kni.c @@ -59,7 +59,7 @@ struct rte_kni { uint16_t group_id; /**< Group ID of KNI devices */ uint32_t slot_id; /**< KNI pool slot ID */ struct rte_mempool *pktmbuf_pool; /**< pkt mbuf mempool */ - unsigned mbuf_size; /**< mbuf size */ + unsigned int mbuf_size; /**< mbuf size */ const struct rte_memzone *m_tx_q; /**< TX queue memzone */ const struct rte_memzone *m_rx_q; /**< RX queue memzone */ @@ -78,7 +78,7 @@ struct rte_kni { /* For request & response */ struct rte_kni_fifo *req_q; /**< Request queue */ struct rte_kni_fifo *resp_q; /**< Response queue */ - void * sync_addr; /**< Req/Resp Mem address */ + void *sync_addr; /**< Req/Resp Mem address */ struct rte_kni_ops ops; /**< operations for request */ }; @@ -97,6 +97,11 @@ static volatile int kni_fd = -1; int rte_kni_init(unsigned int max_kni_ifaces __rte_unused) { + if (rte_eal_iova_mode() != RTE_IOVA_PA) { + RTE_LOG(ERR, KNI, "KNI requires IOVA as PA\n"); + return -1; + } + /* Check FD and open */ if (kni_fd < 0) { kni_fd = open("/dev/" KNI_DEVICE, O_RDWR); @@ -353,6 +358,19 @@ va2pa(struct rte_mbuf *m) (unsigned long)m->buf_iova)); } +static void * +va2pa_all(struct rte_mbuf *mbuf) +{ + void *phy_mbuf = va2pa(mbuf); + struct rte_mbuf *next = mbuf->next; + while (next) { + mbuf->next = va2pa(next); + mbuf = next; + next = mbuf->next; + } + return phy_mbuf; +} + static void obj_free(struct rte_mempool *mp __rte_unused, void *opaque, void *obj, unsigned obj_idx __rte_unused) @@ -482,7 +500,7 @@ kni_config_promiscusity(uint16_t port_id, uint8_t to_on) int rte_kni_handle_request(struct rte_kni *kni) { - unsigned ret; + unsigned int ret; struct rte_kni_request *req = NULL; if (kni == NULL) @@ -507,8 +525,8 @@ rte_kni_handle_request(struct rte_kni *kni) break; case RTE_KNI_REQ_CFG_NETWORK_IF: /* Set network interface up/down */ if (kni->ops.config_network_if) - req->result = kni->ops.config_network_if(\ - kni->ops.port_id, req->if_up); + req->result = kni->ops.config_network_if(kni->ops.port_id, + req->if_up); break; case RTE_KNI_REQ_CHANGE_MAC_ADDR: /* Change MAC Address */ if (kni->ops.config_mac_address) @@ -543,14 +561,15 @@ rte_kni_handle_request(struct rte_kni *kni) } unsigned -rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num) +rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num) { + num = RTE_MIN(kni_fifo_free_count(kni->rx_q), num); void *phy_mbufs[num]; unsigned int ret; unsigned int i; for (i = 0; i < num; i++) - phy_mbufs[i] = va2pa(mbufs[i]); + phy_mbufs[i] = va2pa_all(mbufs[i]); ret = kni_fifo_put(kni->rx_q, phy_mbufs, num); @@ -561,9 +580,9 @@ rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num) } unsigned -rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num) +rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num) { - unsigned ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num); + unsigned int ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num); /* If buffers removed, allocate mbufs and then put them into alloc_q */ if (ret) @@ -614,7 +633,7 @@ kni_allocate_mbufs(struct rte_kni *kni) return; } - allocq_free = (kni->alloc_q->read - kni->alloc_q->write - 1) \ + allocq_free = (kni->alloc_q->read - kni->alloc_q->write - 1) & (MAX_MBUF_BURST_NUM - 1); for (i = 0; i < allocq_free; i++) { pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool); @@ -668,35 +687,35 @@ static enum kni_ops_status kni_check_request_register(struct rte_kni_ops *ops) { /* check if KNI request ops has been registered*/ - if( NULL == ops ) + if (ops == NULL) return KNI_REQ_NO_REGISTER; - if ((ops->change_mtu == NULL) - && (ops->config_network_if == NULL) - && (ops->config_mac_address == NULL) - && (ops->config_promiscusity == NULL)) + if (ops->change_mtu == NULL + && ops->config_network_if == NULL + && ops->config_mac_address == NULL + && ops->config_promiscusity == NULL) return KNI_REQ_NO_REGISTER; return KNI_REQ_REGISTERED; } int -rte_kni_register_handlers(struct rte_kni *kni,struct rte_kni_ops *ops) +rte_kni_register_handlers(struct rte_kni *kni, struct rte_kni_ops *ops) { enum kni_ops_status req_status; - if (NULL == ops) { + if (ops == NULL) { RTE_LOG(ERR, KNI, "Invalid KNI request operation.\n"); return -1; } - if (NULL == kni) { + if (kni == NULL) { RTE_LOG(ERR, KNI, "Invalid kni info.\n"); return -1; } req_status = kni_check_request_register(&kni->ops); - if ( KNI_REQ_REGISTERED == req_status) { + if (req_status == KNI_REQ_REGISTERED) { RTE_LOG(ERR, KNI, "The KNI request operation has already registered.\n"); return -1; } @@ -708,7 +727,7 @@ rte_kni_register_handlers(struct rte_kni *kni,struct rte_kni_ops *ops) int rte_kni_unregister_handlers(struct rte_kni *kni) { - if (NULL == kni) { + if (kni == NULL) { RTE_LOG(ERR, KNI, "Invalid kni info.\n"); return -1; } diff --git a/dpdk/lib/librte_kni/rte_kni_fifo.h b/dpdk/lib/librte_kni/rte_kni_fifo.h index 287d7deb..d2ec82fe 100644 --- a/dpdk/lib/librte_kni/rte_kni_fifo.h +++ b/dpdk/lib/librte_kni/rte_kni_fifo.h @@ -104,3 +104,14 @@ kni_fifo_count(struct rte_kni_fifo *fifo) unsigned fifo_read = __KNI_LOAD_ACQUIRE(&fifo->read); return (fifo->len + fifo_write - fifo_read) & (fifo->len - 1); } + +/** + * Get the num of available elements in the fifo + */ +static inline uint32_t +kni_fifo_free_count(struct rte_kni_fifo *fifo) +{ + uint32_t fifo_write = __KNI_LOAD_ACQUIRE(&fifo->write); + uint32_t fifo_read = __KNI_LOAD_ACQUIRE(&fifo->read); + return (fifo_read - fifo_write - 1) & (fifo->len - 1); +} diff --git a/dpdk/lib/librte_net/rte_ether.h b/dpdk/lib/librte_net/rte_ether.h index e0d83111..71299d9e 100644 --- a/dpdk/lib/librte_net/rte_ether.h +++ b/dpdk/lib/librte_net/rte_ether.h @@ -410,6 +410,11 @@ static inline int rte_vlan_insert(struct rte_mbuf **m) (*m)->ol_flags &= ~(PKT_RX_VLAN_STRIPPED | PKT_TX_VLAN); + if ((*m)->ol_flags & PKT_TX_TUNNEL_MASK) + (*m)->outer_l2_len += sizeof(struct vlan_hdr); + else + (*m)->l2_len += sizeof(struct vlan_hdr); + return 0; } diff --git a/dpdk/lib/librte_net/rte_ip.h b/dpdk/lib/librte_net/rte_ip.h index f9b90909..7639164c 100644 --- a/dpdk/lib/librte_net/rte_ip.h +++ b/dpdk/lib/librte_net/rte_ip.h @@ -16,7 +16,9 @@ */ #include +#include #include +#include #include #include @@ -89,6 +91,10 @@ struct ipv4_hdr { #define IS_IPV4_MCAST(x) \ ((x) >= IPV4_MIN_MCAST && (x) <= IPV4_MAX_MCAST) /**< check if IPv4 address is multicast */ +/* IPv4 default fields values */ +#define IPV4_MIN_IHL (0x5) +#define IPV4_VHL_DEF (IPVERSION | IPV4_MIN_IHL) + /** * @internal Calculate a sum of all words in the buffer. * Helper routine for the rte_raw_cksum(). @@ -352,7 +358,7 @@ struct ipv6_hdr { #define IPV6_HDR_FL_SHIFT 0 #define IPV6_HDR_TC_SHIFT 20 #define IPV6_HDR_FL_MASK ((1u << IPV6_HDR_TC_SHIFT) - 1) -#define IPV6_HDR_TC_MASK (0xf << IPV6_HDR_TC_SHIFT) +#define IPV6_HDR_TC_MASK (0xff << IPV6_HDR_TC_SHIFT) /** * Process the pseudo-header checksum of an IPv6 header. diff --git a/dpdk/lib/librte_net/rte_net.h b/dpdk/lib/librte_net/rte_net.h index e59760a0..025acba7 100644 --- a/dpdk/lib/librte_net/rte_net.h +++ b/dpdk/lib/librte_net/rte_net.h @@ -112,14 +112,14 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m, static inline int rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags) { - struct ipv4_hdr *ipv4_hdr; + /* Initialise ipv4_hdr to avoid false positive compiler warnings. */ + struct ipv4_hdr *ipv4_hdr = NULL; struct ipv6_hdr *ipv6_hdr; struct tcp_hdr *tcp_hdr; struct udp_hdr *udp_hdr; uint64_t inner_l3_offset = m->l2_len; - if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || - (ol_flags & PKT_TX_OUTER_IPV6)) + if (ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6)) inner_l3_offset += m->outer_l2_len + m->outer_l3_len; if (ol_flags & PKT_TX_IPV4) { @@ -130,7 +130,7 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags) ipv4_hdr->hdr_checksum = 0; } - if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) { + if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) { if (ol_flags & PKT_TX_IPV4) { udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr + m->l3_len); @@ -145,7 +145,7 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags) udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags); } - } else if ((ol_flags & PKT_TX_TCP_CKSUM) || + } else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM || (ol_flags & PKT_TX_TCP_SEG)) { if (ol_flags & PKT_TX_IPV4) { /* non-TSO tcp or TSO */ diff --git a/dpdk/lib/librte_security/rte_security_version.map b/dpdk/lib/librte_security/rte_security_version.map index 36e2ac48..53267bf3 100644 --- a/dpdk/lib/librte_security/rte_security_version.map +++ b/dpdk/lib/librte_security/rte_security_version.map @@ -4,12 +4,9 @@ DPDK_18.11 { rte_security_attach_session; rte_security_capabilities_get; rte_security_capability_get; - rte_security_get_userdata; rte_security_session_create; rte_security_session_destroy; rte_security_session_get_size; - rte_security_session_stats_get; - rte_security_session_update; rte_security_set_pkt_metadata; local: *; diff --git a/dpdk/lib/librte_table/rte_table_lpm_ipv6.c b/dpdk/lib/librte_table/rte_table_lpm_ipv6.c index a55f808a..4e068d79 100644 --- a/dpdk/lib/librte_table/rte_table_lpm_ipv6.c +++ b/dpdk/lib/librte_table/rte_table_lpm_ipv6.c @@ -182,7 +182,7 @@ rte_table_lpm_ipv6_entry_add( struct rte_table_lpm_ipv6 *lpm = table; struct rte_table_lpm_ipv6_key *ip_prefix = key; - uint32_t nht_pos, nht_pos0, nht_pos0_valid; + uint32_t nht_pos = 0, nht_pos0 = 0, nht_pos0_valid = 0; int status; /* Check input parameters */ diff --git a/dpdk/lib/librte_telemetry/Makefile b/dpdk/lib/librte_telemetry/Makefile index ef73a4e7..1b3fe054 100644 --- a/dpdk/lib/librte_telemetry/Makefile +++ b/dpdk/lib/librte_telemetry/Makefile @@ -7,7 +7,7 @@ include $(RTE_SDK)/mk/rte.vars.mk LIB = librte_telemetry.a CFLAGS += -O3 -CFLAGS += -I$(SRCDIR) +CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) CFLAGS += -DALLOW_EXPERIMENTAL_API LDLIBS += -lrte_eal -lrte_ethdev @@ -19,11 +19,6 @@ EXPORT_MAP := rte_telemetry_version.map LIBABIVER := 1 -ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) -CFLAGS_rte_telemetry.o += -Wno-address-of-packed-member -CFLAGS_rte_telemetry_parser.o += -Wno-address-of-packed-member -endif - # library source files SRCS-$(CONFIG_RTE_LIBRTE_TELEMETRY) := rte_telemetry.c SRCS-$(CONFIG_RTE_LIBRTE_TELEMETRY) += rte_telemetry_parser.c diff --git a/dpdk/lib/librte_telemetry/meson.build b/dpdk/lib/librte_telemetry/meson.build index 9492f544..83c48dce 100644 --- a/dpdk/lib/librte_telemetry/meson.build +++ b/dpdk/lib/librte_telemetry/meson.build @@ -2,7 +2,7 @@ # Copyright(c) 2018 Intel Corporation sources = files('rte_telemetry.c', 'rte_telemetry_parser.c', 'rte_telemetry_parser_test.c') -headers = files('rte_telemetry.h', 'rte_telemetry_internal.h', 'rte_telemetry_parser.h', 'rte_telemetry_parser_test.h') +headers = files('rte_telemetry.h', 'rte_telemetry_internal.h', 'rte_telemetry_parser.h') deps += ['metrics', 'ethdev'] cflags += '-DALLOW_EXPERIMENTAL_API' diff --git a/dpdk/lib/librte_telemetry/rte_telemetry.c b/dpdk/lib/librte_telemetry/rte_telemetry.c index 7fb247ea..c38f1fab 100644 --- a/dpdk/lib/librte_telemetry/rte_telemetry.c +++ b/dpdk/lib/librte_telemetry/rte_telemetry.c @@ -18,7 +18,6 @@ #include "rte_telemetry.h" #include "rte_telemetry_internal.h" #include "rte_telemetry_parser.h" -#include "rte_telemetry_parser_test.h" #include "rte_telemetry_socket_tests.h" #define BUF_SIZE 1024 @@ -32,13 +31,13 @@ static telemetry_impl *static_telemetry; struct telemetry_message_test { - char *test_name; + const char *test_name; int (*test_func_ptr)(struct telemetry_impl *telemetry, int fd); }; struct json_data { char *status_code; - char *data; + const char *data; int port; char *stat_name; int stat_value; @@ -137,7 +136,7 @@ rte_telemetry_update_metrics_ethdev(struct telemetry_impl *telemetry, return 0; } -int32_t +static int32_t rte_telemetry_write_to_socket(struct telemetry_impl *telemetry, const char *json_string) { @@ -318,13 +317,13 @@ eperm_fail: static int32_t rte_telemetry_json_format_port(struct telemetry_impl *telemetry, uint32_t port_id, json_t *ports, uint32_t *metric_ids, - uint32_t num_metric_ids) + int num_metric_ids) { struct rte_metric_value *metrics = 0; struct rte_metric_name *names = 0; int num_metrics, ret, err_ret; json_t *port, *stats; - uint32_t i; + int i; num_metrics = rte_metrics_get_names(NULL, 0); if (num_metrics < 0) { @@ -449,12 +448,12 @@ einval_fail: static int32_t rte_telemetry_encode_json_format(struct telemetry_impl *telemetry, - uint32_t *port_ids, uint32_t num_port_ids, uint32_t *metric_ids, - uint32_t num_metric_ids, char **json_buffer) + uint32_t *port_ids, int num_port_ids, uint32_t *metric_ids, + int num_metric_ids, char **json_buffer) { int ret; json_t *root, *ports; - uint32_t i; + int i; if (num_port_ids <= 0 || num_metric_ids <= 0) { TELEMETRY_LOG_ERR("Please provide port and metric ids to query"); @@ -661,7 +660,7 @@ rte_telemetry_initial_accept(struct telemetry_impl *telemetry) struct driver_index { const void *dev_ops; int reg_index; - } drv_idx[RTE_MAX_ETHPORTS]; + } drv_idx[RTE_MAX_ETHPORTS] = { {0} }; int nb_drv_idx = 0; uint16_t pid; int ret; @@ -911,7 +910,7 @@ close_socket: } int32_t __rte_experimental -rte_telemetry_init() +rte_telemetry_init(void) { int ret; pthread_attr_t attr; @@ -1196,7 +1195,7 @@ fail: return -1; } -int32_t +static int32_t rte_telemetry_dummy_client_socket(const char *valid_client_path) { int sockfd = socket(AF_UNIX, SOCK_SEQPACKET, 0); @@ -1671,8 +1670,8 @@ rte_telemetry_json_contents_test(struct telemetry_impl *telemetry, int fd) int ret; char buf[BUF_SIZE]; int fail_count = 0; - char *status = "Status Error: Invalid Argument 404"; - char *data = "null"; + const char *status = "Status Error: Invalid Argument 404"; + const char *data = "null"; struct json_data *data_struct; const char *invalid_contents = "{\"action\":0,\"command\":" "\"ports_stats_values_by_name\",\"data\":{\"ports\"" @@ -1728,7 +1727,7 @@ rte_telemetry_json_empty_test(struct telemetry_impl *telemetry, int fd) char buf[BUF_SIZE]; int fail_count = 0; const char *status = "Status Error: Invalid Argument 404"; - char *data = "null"; + const char *data = "null"; struct json_data *data_struct; const char *empty_json = "{}"; int buffer_read = 0; diff --git a/dpdk/lib/librte_telemetry/rte_telemetry_internal.h b/dpdk/lib/librte_telemetry/rte_telemetry_internal.h index c298c391..39b2928f 100644 --- a/dpdk/lib/librte_telemetry/rte_telemetry_internal.h +++ b/dpdk/lib/librte_telemetry/rte_telemetry_internal.h @@ -78,4 +78,7 @@ rte_telemetry_send_ports_stats_values(uint32_t *metric_ids, int num_metric_ids, int32_t rte_telemetry_socket_messaging_testing(int index, int socket); +int32_t +rte_telemetry_parser_test(struct telemetry_impl *telemetry); + #endif diff --git a/dpdk/lib/librte_telemetry/rte_telemetry_parser.c b/dpdk/lib/librte_telemetry/rte_telemetry_parser.c index 9bc16eef..07fe0284 100644 --- a/dpdk/lib/librte_telemetry/rte_telemetry_parser.c +++ b/dpdk/lib/librte_telemetry/rte_telemetry_parser.c @@ -13,11 +13,12 @@ #include #include "rte_telemetry_internal.h" +#include "rte_telemetry_parser.h" typedef int (*command_func)(struct telemetry_impl *, int, json_t *); struct rte_telemetry_command { - char *text; + const char *text; command_func fn; } command; @@ -251,7 +252,7 @@ eperm_fail: return -1; } -int32_t +static int32_t rte_telemetry_command_ports_all_stat_values(struct telemetry_impl *telemetry, int action, json_t *data) { @@ -342,6 +343,7 @@ rte_telemetry_command_ports_all_stat_values(struct telemetry_impl *telemetry, goto fail; } + free(values); return 0; fail: @@ -349,7 +351,7 @@ fail: return -1; } -int32_t +static int32_t rte_telemetry_command_ports_stats_values_by_name(struct telemetry_impl *telemetry, int action, json_t *data) { diff --git a/dpdk/lib/librte_telemetry/rte_telemetry_parser_test.c b/dpdk/lib/librte_telemetry/rte_telemetry_parser_test.c index 5fe93fa6..23ec7a77 100644 --- a/dpdk/lib/librte_telemetry/rte_telemetry_parser_test.c +++ b/dpdk/lib/librte_telemetry/rte_telemetry_parser_test.c @@ -16,6 +16,7 @@ #include #include "rte_telemetry_parser.h" +#include "rte_telemetry_internal.h" enum choices { INV_ACTION_VAL, @@ -31,7 +32,7 @@ enum choices { #define TEST_CLIENT "/var/run/dpdk/test_client" -int32_t +static int32_t rte_telemetry_create_test_socket(struct telemetry_impl *telemetry, const char *test_client_path) { @@ -82,7 +83,7 @@ rte_telemetry_create_test_socket(struct telemetry_impl *telemetry, return 0; } -int32_t +static int32_t rte_telemetry_format_port_stat_ids(int *port_ids, int num_port_ids, const char * const *stat_names, int num_stat_names, json_t **data) { @@ -165,8 +166,8 @@ fail: return -1; } -int32_t -rte_telemetry_create_json_request(int action, char *command, +static int32_t +rte_telemetry_create_json_request(int action, const char *command, const char *client_path, int *port_ids, int num_port_ids, const char * const *stat_names, int num_stat_names, char **request, int inv_choice) @@ -262,13 +263,13 @@ fail: return -1; } -int32_t +static int32_t rte_telemetry_send_get_ports_and_stats_request(struct telemetry_impl *telemetry, - int action_choice, char *command_choice, int inv_choice) + int action_choice, const char *command_choice, int inv_choice) { int ret; char *request; - char *client_path_data = NULL; + const char *client_path_data = NULL; if (telemetry == NULL) { TELEMETRY_LOG_ERR("Telemetry argument has not been initialised"); @@ -302,7 +303,7 @@ rte_telemetry_send_get_ports_and_stats_request(struct telemetry_impl *telemetry, return 0; } -int32_t +static int32_t rte_telemetry_send_get_ports_details_request(struct telemetry_impl *telemetry, int action_choice, int *port_ids, int num_port_ids, int inv_choice) { @@ -313,7 +314,7 @@ rte_telemetry_send_get_ports_details_request(struct telemetry_impl *telemetry, return -EINVAL; } - char *command = "ports_details"; + const char *command = "ports_details"; if (inv_choice == INV_ACTION_VAL) action_choice = -1; @@ -342,7 +343,7 @@ rte_telemetry_send_get_ports_details_request(struct telemetry_impl *telemetry, return 0; } -int32_t +static int32_t rte_telemetry_send_stats_values_by_name_request(struct telemetry_impl *telemetry, int action_choice, int *port_ids, int num_port_ids, const char * const *stat_names, int num_stat_names, @@ -350,7 +351,7 @@ rte_telemetry_send_stats_values_by_name_request(struct telemetry_impl { int ret; char *request; - char *command = "ports_stats_values_by_name"; + const char *command = "ports_stats_values_by_name"; if (telemetry == NULL) { TELEMETRY_LOG_ERR("Telemetry argument has not been initialised"); @@ -386,7 +387,7 @@ rte_telemetry_send_stats_values_by_name_request(struct telemetry_impl return 0; } -int32_t +static int32_t rte_telemetry_send_unreg_request(struct telemetry_impl *telemetry, int action_choice, const char *client_path, int inv_choice) { @@ -398,7 +399,7 @@ rte_telemetry_send_unreg_request(struct telemetry_impl *telemetry, return -EINVAL; } - char *command = "clients"; + const char *command = "clients"; if (inv_choice == INV_ACTION_VAL) action_choice = -1; diff --git a/dpdk/lib/librte_telemetry/rte_telemetry_parser_test.h b/dpdk/lib/librte_telemetry/rte_telemetry_parser_test.h deleted file mode 100644 index 6ada8527..00000000 --- a/dpdk/lib/librte_telemetry/rte_telemetry_parser_test.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2018 Intel Corporation - */ - -#ifndef _RTE_TELEMETRY_PARSER_TEST_H_ -#define _RTE_TELEMETRY_PARSER_TEST_H_ - -int32_t -rte_telemetry_parser_test(struct telemetry_impl *telemetry); - -int32_t -rte_telemetry_format_port_stat_ids(int *port_ids, int num_port_ids, - const char * const stat_names, int num_stat_names, json_t **data); - -int32_t -rte_telemetry_create_json_request(int action, char *command, - const char *client_path, int *port_ids, int num_port_ids, - const char * const stat_names, int num_stat_names, char **request, - int inv_choice); - -int32_t -rte_telemetry_send_get_ports_and_stats_request(struct telemetry_impl *telemetry, - int action_choice, char *command_choice, int inv_choice); - -int32_t -rte_telemetry_send_get_ports_details_request(struct telemetry_impl *telemetry, - int action_choice, int *port_ids, int num_port_ids, int inv_choice); - -int32_t -rte_telemetry_send_stats_values_by_name_request(struct telemetry_impl - *telemetry, int action_choice, int *port_ids, int num_port_ids, - const char * const stat_names, int num_stat_names, - int inv_choice); - -int32_t -rte_telemetry_send_unreg_request(int action_choice, const char *client_path, - int inv_choice); - -#endif diff --git a/dpdk/lib/librte_vhost/Makefile b/dpdk/lib/librte_vhost/Makefile index 5dd31898..8623e91c 100644 --- a/dpdk/lib/librte_vhost/Makefile +++ b/dpdk/lib/librte_vhost/Makefile @@ -11,7 +11,7 @@ EXPORT_MAP := rte_vhost_version.map LIBABIVER := 4 CFLAGS += -DALLOW_EXPERIMENTAL_API -CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 -D_FILE_OFFSET_BITS=64 +CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 CFLAGS += -I vhost_user CFLAGS += -fno-strict-aliasing LDLIBS += -lpthread diff --git a/dpdk/lib/librte_vhost/rte_vhost.h b/dpdk/lib/librte_vhost/rte_vhost.h index 5905e240..ae96ed71 100644 --- a/dpdk/lib/librte_vhost/rte_vhost.h +++ b/dpdk/lib/librte_vhost/rte_vhost.h @@ -23,6 +23,7 @@ extern "C" { /* These are not C++-aware. */ #include #include +#include #define RTE_VHOST_USER_CLIENT (1ULL << 0) #define RTE_VHOST_USER_NO_RECONNECT (1ULL << 1) diff --git a/dpdk/lib/librte_vhost/vhost_crypto.c b/dpdk/lib/librte_vhost/vhost_crypto.c index fc362ba9..d8a0f954 100644 --- a/dpdk/lib/librte_vhost/vhost_crypto.c +++ b/dpdk/lib/librte_vhost/vhost_crypto.c @@ -46,116 +46,107 @@ ((t)(uintptr_t)vhost_iova_to_vva(r->dev, r->vq, a, l, p)) static int -cipher_algo_transform(uint32_t virtio_cipher_algo) +cipher_algo_transform(uint32_t virtio_cipher_algo, + enum rte_crypto_cipher_algorithm *algo) { - int ret; - switch (virtio_cipher_algo) { case VIRTIO_CRYPTO_CIPHER_AES_CBC: - ret = RTE_CRYPTO_CIPHER_AES_CBC; + *algo = RTE_CRYPTO_CIPHER_AES_CBC; break; case VIRTIO_CRYPTO_CIPHER_AES_CTR: - ret = RTE_CRYPTO_CIPHER_AES_CTR; + *algo = RTE_CRYPTO_CIPHER_AES_CTR; break; case VIRTIO_CRYPTO_CIPHER_DES_ECB: - ret = -VIRTIO_CRYPTO_NOTSUPP; + *algo = -VIRTIO_CRYPTO_NOTSUPP; break; case VIRTIO_CRYPTO_CIPHER_DES_CBC: - ret = RTE_CRYPTO_CIPHER_DES_CBC; + *algo = RTE_CRYPTO_CIPHER_DES_CBC; break; case VIRTIO_CRYPTO_CIPHER_3DES_ECB: - ret = RTE_CRYPTO_CIPHER_3DES_ECB; + *algo = RTE_CRYPTO_CIPHER_3DES_ECB; break; case VIRTIO_CRYPTO_CIPHER_3DES_CBC: - ret = RTE_CRYPTO_CIPHER_3DES_CBC; + *algo = RTE_CRYPTO_CIPHER_3DES_CBC; break; case VIRTIO_CRYPTO_CIPHER_3DES_CTR: - ret = RTE_CRYPTO_CIPHER_3DES_CTR; + *algo = RTE_CRYPTO_CIPHER_3DES_CTR; break; case VIRTIO_CRYPTO_CIPHER_KASUMI_F8: - ret = RTE_CRYPTO_CIPHER_KASUMI_F8; + *algo = RTE_CRYPTO_CIPHER_KASUMI_F8; break; case VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2: - ret = RTE_CRYPTO_CIPHER_SNOW3G_UEA2; + *algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2; break; case VIRTIO_CRYPTO_CIPHER_AES_F8: - ret = RTE_CRYPTO_CIPHER_AES_F8; + *algo = RTE_CRYPTO_CIPHER_AES_F8; break; case VIRTIO_CRYPTO_CIPHER_AES_XTS: - ret = RTE_CRYPTO_CIPHER_AES_XTS; + *algo = RTE_CRYPTO_CIPHER_AES_XTS; break; case VIRTIO_CRYPTO_CIPHER_ZUC_EEA3: - ret = RTE_CRYPTO_CIPHER_ZUC_EEA3; + *algo = RTE_CRYPTO_CIPHER_ZUC_EEA3; break; default: - ret = -VIRTIO_CRYPTO_BADMSG; + return -VIRTIO_CRYPTO_BADMSG; break; } - return ret; + return 0; } static int -auth_algo_transform(uint32_t virtio_auth_algo) +auth_algo_transform(uint32_t virtio_auth_algo, + enum rte_crypto_auth_algorithm *algo) { - int ret; - switch (virtio_auth_algo) { - case VIRTIO_CRYPTO_NO_MAC: - ret = RTE_CRYPTO_AUTH_NULL; + *algo = RTE_CRYPTO_AUTH_NULL; break; case VIRTIO_CRYPTO_MAC_HMAC_MD5: - ret = RTE_CRYPTO_AUTH_MD5_HMAC; + *algo = RTE_CRYPTO_AUTH_MD5_HMAC; break; case VIRTIO_CRYPTO_MAC_HMAC_SHA1: - ret = RTE_CRYPTO_AUTH_SHA1_HMAC; + *algo = RTE_CRYPTO_AUTH_SHA1_HMAC; break; case VIRTIO_CRYPTO_MAC_HMAC_SHA_224: - ret = RTE_CRYPTO_AUTH_SHA224_HMAC; + *algo = RTE_CRYPTO_AUTH_SHA224_HMAC; break; case VIRTIO_CRYPTO_MAC_HMAC_SHA_256: - ret = RTE_CRYPTO_AUTH_SHA256_HMAC; + *algo = RTE_CRYPTO_AUTH_SHA256_HMAC; break; case VIRTIO_CRYPTO_MAC_HMAC_SHA_384: - ret = RTE_CRYPTO_AUTH_SHA384_HMAC; + *algo = RTE_CRYPTO_AUTH_SHA384_HMAC; break; case VIRTIO_CRYPTO_MAC_HMAC_SHA_512: - ret = RTE_CRYPTO_AUTH_SHA512_HMAC; - break; - case VIRTIO_CRYPTO_MAC_CMAC_3DES: - ret = -VIRTIO_CRYPTO_NOTSUPP; + *algo = RTE_CRYPTO_AUTH_SHA512_HMAC; break; case VIRTIO_CRYPTO_MAC_CMAC_AES: - ret = RTE_CRYPTO_AUTH_AES_CMAC; + *algo = RTE_CRYPTO_AUTH_AES_CMAC; break; case VIRTIO_CRYPTO_MAC_KASUMI_F9: - ret = RTE_CRYPTO_AUTH_KASUMI_F9; + *algo = RTE_CRYPTO_AUTH_KASUMI_F9; break; case VIRTIO_CRYPTO_MAC_SNOW3G_UIA2: - ret = RTE_CRYPTO_AUTH_SNOW3G_UIA2; + *algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2; break; case VIRTIO_CRYPTO_MAC_GMAC_AES: - ret = RTE_CRYPTO_AUTH_AES_GMAC; - break; - case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH: - ret = -VIRTIO_CRYPTO_NOTSUPP; + *algo = RTE_CRYPTO_AUTH_AES_GMAC; break; case VIRTIO_CRYPTO_MAC_CBCMAC_AES: - ret = RTE_CRYPTO_AUTH_AES_CBC_MAC; - break; - case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9: - ret = -VIRTIO_CRYPTO_NOTSUPP; + *algo = RTE_CRYPTO_AUTH_AES_CBC_MAC; break; case VIRTIO_CRYPTO_MAC_XCBC_AES: - ret = RTE_CRYPTO_AUTH_AES_XCBC_MAC; + *algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC; break; + case VIRTIO_CRYPTO_MAC_CMAC_3DES: + case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH: + case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9: + return -VIRTIO_CRYPTO_NOTSUPP; default: - ret = -VIRTIO_CRYPTO_BADMSG; - break; + return -VIRTIO_CRYPTO_BADMSG; } - return ret; + return 0; } static int get_iv_len(enum rte_crypto_cipher_algorithm algo) @@ -241,12 +232,11 @@ transform_cipher_param(struct rte_crypto_sym_xform *xform, { int ret; - ret = cipher_algo_transform(param->cipher_algo); + ret = cipher_algo_transform(param->cipher_algo, &xform->cipher.algo); if (unlikely(ret < 0)) return ret; xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER; - xform->cipher.algo = (enum rte_crypto_cipher_algorithm)ret; xform->cipher.key.length = param->cipher_key_len; if (xform->cipher.key.length > 0) xform->cipher.key.data = param->cipher_key_buf; @@ -292,11 +282,11 @@ transform_chain_param(struct rte_crypto_sym_xform *xforms, } /* cipher */ - ret = cipher_algo_transform(param->cipher_algo); + ret = cipher_algo_transform(param->cipher_algo, + &xform_cipher->cipher.algo); if (unlikely(ret < 0)) return ret; xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER; - xform_cipher->cipher.algo = (enum rte_crypto_cipher_algorithm)ret; xform_cipher->cipher.key.length = param->cipher_key_len; xform_cipher->cipher.key.data = param->cipher_key_buf; ret = get_iv_len(xform_cipher->cipher.algo); @@ -307,10 +297,9 @@ transform_chain_param(struct rte_crypto_sym_xform *xforms, /* auth */ xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH; - ret = auth_algo_transform(param->hash_algo); + ret = auth_algo_transform(param->hash_algo, &xform_auth->auth.algo); if (unlikely(ret < 0)) return ret; - xform_auth->auth.algo = (enum rte_crypto_auth_algorithm)ret; xform_auth->auth.digest_length = param->digest_len; xform_auth->auth.key.length = param->auth_key_len; xform_auth->auth.key.data = param->auth_key_buf; @@ -1017,7 +1006,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *), vc_req, &desc, chain->para.src_data_len, - nb_descs, vq_size)) < 0) { + nb_descs, vq_size) < 0)) { ret = VIRTIO_CRYPTO_BADMSG; goto error_exit; } @@ -1588,7 +1577,7 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid, op->sym->m_dst->data_off = 0; if (unlikely(vhost_crypto_process_one_req(vcrypto, vq, - op, head, desc_idx)) < 0) + op, head, desc_idx) < 0)) break; } diff --git a/dpdk/lib/librte_vhost/vhost_user.c b/dpdk/lib/librte_vhost/vhost_user.c index 5552f8bb..530823ec 100644 --- a/dpdk/lib/librte_vhost/vhost_user.c +++ b/dpdk/lib/librte_vhost/vhost_user.c @@ -83,6 +83,36 @@ static const char *vhost_message_str[VHOST_USER_MAX] = { static int send_vhost_reply(int sockfd, struct VhostUserMsg *msg); static int read_vhost_message(int sockfd, struct VhostUserMsg *msg); +static void +close_msg_fds(struct VhostUserMsg *msg) +{ + int i; + + for (i = 0; i < msg->fd_num; i++) + close(msg->fds[i]); +} + +/* + * Ensure the expected number of FDs is received, + * close all FDs and return an error if this is not the case. + */ +static int +validate_msg_fds(struct VhostUserMsg *msg, int expected_fds) +{ + if (msg->fd_num == expected_fds) + return 0; + + RTE_LOG(ERR, VHOST_CONFIG, + " Expect %d FDs for request %s, received %d\n", + expected_fds, + vhost_message_str[msg->request.master], + msg->fd_num); + + close_msg_fds(msg); + + return -1; +} + static uint64_t get_blk_size(int fd) { @@ -179,18 +209,25 @@ vhost_backend_cleanup(struct virtio_net *dev) */ static int vhost_user_set_owner(struct virtio_net **pdev __rte_unused, - struct VhostUserMsg *msg __rte_unused, + struct VhostUserMsg *msg, int main_fd __rte_unused) { + if (validate_msg_fds(msg, 0) != 0) + return VH_RESULT_ERR; + return VH_RESULT_OK; } static int vhost_user_reset_owner(struct virtio_net **pdev, - struct VhostUserMsg *msg __rte_unused, + struct VhostUserMsg *msg, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; + + if (validate_msg_fds(msg, 0) != 0) + return VH_RESULT_ERR; + vhost_destroy_device_notify(dev); cleanup_device(dev, 0); @@ -208,6 +245,9 @@ vhost_user_get_features(struct virtio_net **pdev, struct VhostUserMsg *msg, struct virtio_net *dev = *pdev; uint64_t features = 0; + if (validate_msg_fds(msg, 0) != 0) + return VH_RESULT_ERR; + rte_vhost_driver_get_features(dev->ifname, &features); msg->payload.u64 = features; @@ -227,6 +267,9 @@ vhost_user_get_queue_num(struct virtio_net **pdev, struct VhostUserMsg *msg, struct virtio_net *dev = *pdev; uint32_t queue_num = 0; + if (validate_msg_fds(msg, 0) != 0) + return VH_RESULT_ERR; + rte_vhost_driver_get_queue_num(dev->ifname, &queue_num); msg->payload.u64 = (uint64_t)queue_num; @@ -249,6 +292,9 @@ vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg, struct rte_vdpa_device *vdpa_dev; int did = -1; + if (validate_msg_fds(msg, 0) != 0) + return VH_RESULT_ERR; + rte_vhost_driver_get_features(dev->ifname, &vhost_features); if (features & ~vhost_features) { RTE_LOG(ERR, VHOST_CONFIG, @@ -329,6 +375,9 @@ vhost_user_set_vring_num(struct virtio_net **pdev, struct virtio_net *dev = *pdev; struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index]; + if (validate_msg_fds(msg, 0) != 0) + return VH_RESULT_ERR; + vq->size = msg->payload.state.num; /* VIRTIO 1.0, 2.4 Virtqueues says: @@ -346,6 +395,8 @@ vhost_user_set_vring_num(struct virtio_net **pdev, vq->nr_zmbuf = 0; vq->last_zmbuf_idx = 0; vq->zmbuf_size = vq->size; + if (vq->zmbufs) + rte_free(vq->zmbufs); vq->zmbufs = rte_zmalloc(NULL, vq->zmbuf_size * sizeof(struct zcopy_mbuf), 0); if (vq->zmbufs == NULL) { @@ -358,6 +409,8 @@ vhost_user_set_vring_num(struct virtio_net **pdev, } if (vq_is_packed(dev)) { + if (vq->shadow_used_packed) + rte_free(vq->shadow_used_packed); vq->shadow_used_packed = rte_malloc(NULL, vq->size * sizeof(struct vring_used_elem_packed), @@ -369,6 +422,8 @@ vhost_user_set_vring_num(struct virtio_net **pdev, } } else { + if (vq->shadow_used_split) + rte_free(vq->shadow_used_split); vq->shadow_used_split = rte_malloc(NULL, vq->size * sizeof(struct vring_used_elem), RTE_CACHE_LINE_SIZE); @@ -379,6 +434,8 @@ vhost_user_set_vring_num(struct virtio_net **pdev, } } + if (vq->batch_copy_elems) + rte_free(vq->batch_copy_elems); vq->batch_copy_elems = rte_malloc(NULL, vq->size * sizeof(struct batch_copy_elem), RTE_CACHE_LINE_SIZE); @@ -700,6 +757,9 @@ vhost_user_set_vring_addr(struct virtio_net **pdev, struct VhostUserMsg *msg, struct vhost_virtqueue *vq; struct vhost_vring_addr *addr = &msg->payload.addr; + if (validate_msg_fds(msg, 0) != 0) + return VH_RESULT_ERR; + if (dev->mem == NULL) return VH_RESULT_ERR; @@ -738,6 +798,9 @@ vhost_user_set_vring_base(struct virtio_net **pdev, struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index]; uint64_t val = msg->payload.state.num; + if (validate_msg_fds(msg, 0) != 0) + return VH_RESULT_ERR; + if (vq_is_packed(dev)) { /* * Bit[0:14]: avail index @@ -899,6 +962,9 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, int populate; int fd; + if (validate_msg_fds(msg, memory->nregions) != 0) + return VH_RESULT_ERR; + if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) { RTE_LOG(ERR, VHOST_CONFIG, "too many memory regions (%u)\n", memory->nregions); @@ -909,8 +975,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, RTE_LOG(INFO, VHOST_CONFIG, "(%d) memory regions not changed\n", dev->vid); - for (i = 0; i < memory->nregions; i++) - close(msg->fds[i]); + close_msg_fds(msg); return VH_RESULT_OK; } @@ -1053,6 +1118,10 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, "Failed to read qemu ack on postcopy set-mem-table\n"); goto err_mmap; } + + if (validate_msg_fds(&ack_msg, 0) != 0) + goto err_mmap; + if (ack_msg.request.master != VHOST_USER_SET_MEM_TABLE) { RTE_LOG(ERR, VHOST_CONFIG, "Bad qemu ack on postcopy set-mem-table (%d)\n", @@ -1172,6 +1241,11 @@ vhost_user_set_vring_call(struct virtio_net **pdev, struct VhostUserMsg *msg, struct virtio_net *dev = *pdev; struct vhost_vring_file file; struct vhost_virtqueue *vq; + int expected_fds; + + expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1; + if (validate_msg_fds(msg, expected_fds) != 0) + return VH_RESULT_ERR; file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK; if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) @@ -1194,6 +1268,12 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused, struct VhostUserMsg *msg, int main_fd __rte_unused) { + int expected_fds; + + expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1; + if (validate_msg_fds(msg, expected_fds) != 0) + return VH_RESULT_ERR; + if (!(msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)) close(msg->fds[0]); RTE_LOG(INFO, VHOST_CONFIG, "not implemented\n"); @@ -1208,6 +1288,11 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg, struct virtio_net *dev = *pdev; struct vhost_vring_file file; struct vhost_virtqueue *vq; + int expected_fds; + + expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1; + if (validate_msg_fds(msg, expected_fds) != 0) + return VH_RESULT_ERR; file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK; if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) @@ -1265,6 +1350,9 @@ vhost_user_get_vring_base(struct virtio_net **pdev, struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index]; uint64_t val; + if (validate_msg_fds(msg, 0) != 0) + return VH_RESULT_ERR; + /* We have to stop the queue (virtio) if it is running. */ vhost_destroy_device_notify(dev); @@ -1338,6 +1426,9 @@ vhost_user_set_vring_enable(struct virtio_net **pdev, struct rte_vdpa_device *vdpa_dev; int did = -1; + if (validate_msg_fds(msg, 0) != 0) + return VH_RESULT_ERR; + RTE_LOG(INFO, VHOST_CONFIG, "set queue enable: %d to qp idx: %d\n", enable, index); @@ -1368,6 +1459,9 @@ vhost_user_get_protocol_features(struct virtio_net **pdev, struct virtio_net *dev = *pdev; uint64_t features, protocol_features; + if (validate_msg_fds(msg, 0) != 0) + return VH_RESULT_ERR; + rte_vhost_driver_get_features(dev->ifname, &features); rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features); @@ -1396,6 +1490,9 @@ vhost_user_set_protocol_features(struct virtio_net **pdev, uint64_t protocol_features = msg->payload.u64; uint64_t slave_protocol_features = 0; + if (validate_msg_fds(msg, 0) != 0) + return VH_RESULT_ERR; + rte_vhost_driver_get_protocol_features(dev->ifname, &slave_protocol_features); if (protocol_features & ~slave_protocol_features) { @@ -1419,6 +1516,9 @@ vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg, uint64_t size, off; void *addr; + if (validate_msg_fds(msg, 1) != 0) + return VH_RESULT_ERR; + if (fd < 0) { RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd); return VH_RESULT_ERR; @@ -1482,6 +1582,9 @@ static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused, struct VhostUserMsg *msg, int main_fd __rte_unused) { + if (validate_msg_fds(msg, 1) != 0) + return VH_RESULT_ERR; + close(msg->fds[0]); RTE_LOG(INFO, VHOST_CONFIG, "not implemented.\n"); @@ -1505,6 +1608,9 @@ vhost_user_send_rarp(struct virtio_net **pdev, struct VhostUserMsg *msg, struct rte_vdpa_device *vdpa_dev; int did = -1; + if (validate_msg_fds(msg, 0) != 0) + return VH_RESULT_ERR; + RTE_LOG(DEBUG, VHOST_CONFIG, ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); @@ -1532,6 +1638,10 @@ vhost_user_net_set_mtu(struct virtio_net **pdev, struct VhostUserMsg *msg, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; + + if (validate_msg_fds(msg, 0) != 0) + return VH_RESULT_ERR; + if (msg->payload.u64 < VIRTIO_MIN_MTU || msg->payload.u64 > VIRTIO_MAX_MTU) { RTE_LOG(ERR, VHOST_CONFIG, "Invalid MTU size (%"PRIu64")\n", @@ -1552,6 +1662,9 @@ vhost_user_set_req_fd(struct virtio_net **pdev, struct VhostUserMsg *msg, struct virtio_net *dev = *pdev; int fd = msg->fds[0]; + if (validate_msg_fds(msg, 1) != 0) + return VH_RESULT_ERR; + if (fd < 0) { RTE_LOG(ERR, VHOST_CONFIG, "Invalid file descriptor for slave channel (%d)\n", @@ -1622,6 +1735,9 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg, uint16_t i; uint64_t vva, len; + if (validate_msg_fds(msg, 0) != 0) + return VH_RESULT_ERR; + switch (imsg->type) { case VHOST_IOTLB_UPDATE: len = imsg->size; @@ -1668,6 +1784,9 @@ vhost_user_set_postcopy_advise(struct virtio_net **pdev, #ifdef RTE_LIBRTE_VHOST_POSTCOPY struct uffdio_api api_struct; + if (validate_msg_fds(msg, 0) != 0) + return VH_RESULT_ERR; + dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); if (dev->postcopy_ufd == -1) { @@ -1703,6 +1822,9 @@ vhost_user_set_postcopy_listen(struct virtio_net **pdev, { struct virtio_net *dev = *pdev; + if (validate_msg_fds(msg, 0) != 0) + return VH_RESULT_ERR; + if (dev->mem && dev->mem->nregions) { RTE_LOG(ERR, VHOST_CONFIG, "Regions already registered at postcopy-listen\n"); @@ -1719,6 +1841,9 @@ vhost_user_postcopy_end(struct virtio_net **pdev, struct VhostUserMsg *msg, { struct virtio_net *dev = *pdev; + if (validate_msg_fds(msg, 0) != 0) + return VH_RESULT_ERR; + dev->postcopy_listening = 0; if (dev->postcopy_ufd >= 0) { close(dev->postcopy_ufd); diff --git a/dpdk/lib/meson.build b/dpdk/lib/meson.build index df4226c5..62b5cdfe 100644 --- a/dpdk/lib/meson.build +++ b/dpdk/lib/meson.build @@ -34,9 +34,6 @@ endif enabled_libs = [] # used to print summary at the end -# -D_GNU_SOURCE unconditionally -default_cflags += '-D_GNU_SOURCE' - foreach l:libraries build = true name = l diff --git a/dpdk/meson.build b/dpdk/meson.build index 63b32c6e..3b7de017 100644 --- a/dpdk/meson.build +++ b/dpdk/meson.build @@ -2,7 +2,7 @@ # Copyright(c) 2017 Intel Corporation project('DPDK', 'C', - version: '18.11.2', + version: '18.11.5', license: 'BSD', default_options: ['buildtype=release', 'default_library=static'], meson_version: '>= 0.41' @@ -70,6 +70,10 @@ configure_file(output: build_cfg, dpdk_drivers = ['-Wl,--whole-archive'] + dpdk_drivers + ['-Wl,--no-whole-archive'] pkg = import('pkgconfig') +pkg_extra_cflags = ['-include', 'rte_config.h'] + machine_args +if host_machine.system() == 'freebsd' + pkg_extra_cflags += ['-D__BSD_VISIBLE'] +endif pkg.generate(name: meson.project_name(), filebase: 'lib' + meson.project_name().to_lower(), version: meson.project_version(), @@ -80,7 +84,7 @@ pkg.generate(name: meson.project_name(), Note that CFLAGS might contain an -march flag higher than typical baseline. This is required for a number of static inline functions in the public headers.''', subdirs: [get_option('include_subdir_arch'), '.'], - extra_cflags: ['-include', 'rte_config.h'] + machine_args + extra_cflags: pkg_extra_cflags ) # final output, list all the libs and drivers to be built diff --git a/dpdk/meson_options.txt b/dpdk/meson_options.txt index 10b54161..91b0449a 100644 --- a/dpdk/meson_options.txt +++ b/dpdk/meson_options.txt @@ -13,7 +13,7 @@ option('examples', type: 'string', value: '', option('include_subdir_arch', type: 'string', value: '', description: 'subdirectory where to install arch-dependent headers') option('kernel_dir', type: 'string', value: '', - description: 'path to the kernel for building kernel modules, they will be installed in $DEST_DIR/$kernel_dir/../extra/dpdk') + description: 'Path to the kernel for building kernel modules. Headers must be in $kernel_dir/build. Modules will be installed in $DEST_DIR/$kernel_dir/extra/dpdk.') option('lib_musdk_dir', type: 'string', value: '', description: 'path to the MUSDK library installation directory') option('machine', type: 'string', value: 'native', diff --git a/dpdk/mk/arch/arm/rte.vars.mk b/dpdk/mk/arch/arm/rte.vars.mk index 27b11476..dc8c10a2 100644 --- a/dpdk/mk/arch/arm/rte.vars.mk +++ b/dpdk/mk/arch/arm/rte.vars.mk @@ -4,7 +4,7 @@ ARCH ?= arm CROSS ?= -CPU_CFLAGS ?= -marm -munaligned-access +CPU_CFLAGS ?= -marm -munaligned-access -D_FILE_OFFSET_BITS=64 CPU_LDFLAGS ?= CPU_ASFLAGS ?= -felf diff --git a/dpdk/mk/arch/i686/rte.vars.mk b/dpdk/mk/arch/i686/rte.vars.mk index 3f48f674..c867883f 100644 --- a/dpdk/mk/arch/i686/rte.vars.mk +++ b/dpdk/mk/arch/i686/rte.vars.mk @@ -24,7 +24,7 @@ ARCH ?= i386 ARCH_DIR := x86 CROSS ?= -CPU_CFLAGS ?= -m32 +CPU_CFLAGS ?= -m32 -D_FILE_OFFSET_BITS=64 CPU_LDFLAGS ?= -melf_i386 CPU_ASFLAGS ?= -felf diff --git a/dpdk/mk/machine/armv8a/rte.vars.mk b/dpdk/mk/machine/armv8a/rte.vars.mk index 8252efbb..5e3ffc3a 100644 --- a/dpdk/mk/machine/armv8a/rte.vars.mk +++ b/dpdk/mk/machine/armv8a/rte.vars.mk @@ -28,4 +28,4 @@ # CPU_LDFLAGS = # CPU_ASFLAGS = -MACHINE_CFLAGS += -march=armv8-a+crc+crypto +MACHINE_CFLAGS += -march=armv8-a+crc diff --git a/dpdk/mk/rte.sdkinstall.mk b/dpdk/mk/rte.sdkinstall.mk index 2d34b4e5..e8625361 100644 --- a/dpdk/mk/rte.sdkinstall.mk +++ b/dpdk/mk/rte.sdkinstall.mk @@ -24,7 +24,7 @@ export prefix ?= kerneldir ?= $(prefix)/kmod else ifeq ($(RTE_EXEC_ENV),linuxapp) -kerneldir ?= /lib/modules/$(shell uname -r)/extra/dpdk +kerneldir ?= $(RTE_KERNELDIR:/build=/extra/dpdk) else kerneldir ?= /boot/modules endif diff --git a/dpdk/mk/target/generic/rte.vars.mk b/dpdk/mk/target/generic/rte.vars.mk index dd149acc..3f403960 100644 --- a/dpdk/mk/target/generic/rte.vars.mk +++ b/dpdk/mk/target/generic/rte.vars.mk @@ -111,6 +111,11 @@ endif # always define _GNU_SOURCE CFLAGS += -D_GNU_SOURCE +# define __BSD_VISIBLE when building for FreeBSD +ifeq ($(CONFIG_RTE_EXEC_ENV_BSDAPP),y) +CFLAGS += -D__BSD_VISIBLE +endif + export CFLAGS export LDFLAGS diff --git a/dpdk/pkg/dpdk.spec b/dpdk/pkg/dpdk.spec index 8d3da901..fcce4b2b 100644 --- a/dpdk/pkg/dpdk.spec +++ b/dpdk/pkg/dpdk.spec @@ -2,7 +2,7 @@ # Copyright 2014 6WIND S.A. Name: dpdk -Version: 18.11.2 +Version: 18.11.5 Release: 1 Packager: packaging@6wind.com URL: http://dpdk.org diff --git a/dpdk/test/bpf/mbuf.h b/dpdk/test/bpf/mbuf.h index f24f908d..3059e796 100644 --- a/dpdk/test/bpf/mbuf.h +++ b/dpdk/test/bpf/mbuf.h @@ -13,7 +13,6 @@ #include #include -#include #ifdef __cplusplus extern "C" { @@ -364,6 +363,23 @@ typedef struct { volatile int16_t cnt; /**< An internal counter value. */ } rte_atomic16_t; +#define RTE_CACHE_LINE_MIN_SIZE 64 /**< Minimum Cache line size. */ + +/** + * Force minimum cache line alignment. + */ +#define __rte_cache_min_aligned __rte_aligned(RTE_CACHE_LINE_MIN_SIZE) + +/** + * IO virtual address type. + * When the physical addressing mode (IOVA as PA) is in use, + * the translation from an IO virtual address (IOVA) to a physical address + * is a direct mapping, i.e. the same value. + * Otherwise, in virtual mode (IOVA as VA), an IOMMU may do the translation. + */ +typedef uint64_t rte_iova_t; +#define RTE_BAD_IOVA ((rte_iova_t)-1) + /** * The generic rte_mbuf, containing a packet mbuf. */ @@ -377,7 +393,11 @@ struct rte_mbuf { * same mbuf cacheline0 layout for 32-bit and 64-bit. This makes * working on vector drivers easier. */ - phys_addr_t buf_physaddr __rte_aligned(sizeof(phys_addr_t)); + RTE_STD_C11 + union { + rte_iova_t buf_iova; + rte_iova_t buf_physaddr; /**< deprecated */ + } __rte_aligned(sizeof(rte_iova_t)); /* next 8 bytes are initialised on RX descriptor rearm */ MARKER64 rearm_data; diff --git a/dpdk/test/test/autotest_runner.py b/dpdk/test/test/autotest_runner.py index 36941a40..8e7a4567 100644 --- a/dpdk/test/test/autotest_runner.py +++ b/dpdk/test/test/autotest_runner.py @@ -43,9 +43,16 @@ def get_numa_nodes(): # find first (or any, really) CPU on a particular node, will be used to spread # processes around NUMA nodes to avoid exhausting memory on particular node def first_cpu_on_node(node_nr): - cpu_path = glob.glob("/sys/devices/system/node/node%d/cpu*" % node_nr)[0] - cpu_name = os.path.basename(cpu_path) - m = re.match(r"cpu(\d+)", cpu_name) + cpu_path = glob.glob("/sys/devices/system/node/node%d/cpu*" % node_nr) + r = re.compile(r"cpu(\d+)") + cpu_name = filter(None, + map(r.match, + map(os.path.basename, cpu_path) + ) + ) + # for compatibility between python 3 and 2 we need to make interable out + # of filter return as it returns list in python 2 and a generator in 3 + m = next(iter(cpu_name)) return int(m.group(1)) diff --git a/dpdk/test/test/meson.build b/dpdk/test/test/meson.build index 8f03ddda..c097840f 100644 --- a/dpdk/test/test/meson.build +++ b/dpdk/test/test/meson.build @@ -79,6 +79,7 @@ test_sources = files('commands.c', 'test_power.c', 'test_power_acpi_cpufreq.c', 'test_power_kvm_vm.c', + 'test_rawdev.c', 'test_prefetch.c', 'test_reciprocal_division.c', 'test_reciprocal_division_perf.c', @@ -122,6 +123,7 @@ test_deps = ['acl', 'metrics', 'pipeline', 'port', + 'rawdev', 'reorder', 'ring', 'timer' @@ -209,6 +211,7 @@ test_names = [ 'power_autotest', 'power_kvm_vm_autotest', 'prefetch_autotest', + 'rawdev_autotest', 'reciprocal_division', 'reciprocal_division_perf', 'red_all', @@ -237,12 +240,6 @@ test_names = [ if dpdk_conf.has('RTE_LIBRTE_PDUMP') test_deps += 'pdump' endif -if dpdk_conf.has('RTE_LIBRTE_I40E_PMD') - test_deps += 'pmd_i40e' -endif -if dpdk_conf.has('RTE_LIBRTE_IXGBE_PMD') - test_deps += 'pmd_ixgbe' -endif if dpdk_conf.has('RTE_LIBRTE_BOND_PMD') test_deps += 'pmd_bond' endif @@ -291,7 +288,7 @@ if get_option('tests') dependencies: test_dep_objs, c_args: [cflags, '-DALLOW_EXPERIMENTAL_API'], install_rpath: driver_install_path, - install: false) + install: true) # some perf tests (eg: memcpy perf autotest)take very long # to complete, so timeout to 10 minutes diff --git a/dpdk/test/test/test_distributor.c b/dpdk/test/test/test_distributor.c index da3348fd..d4996798 100644 --- a/dpdk/test/test/test_distributor.c +++ b/dpdk/test/test/test_distributor.c @@ -374,7 +374,8 @@ handle_work_for_shutdown_test(void *arg) id, buf, buf, num); while (!quit) { - worker_stats[id].handled_packets++, count++; + worker_stats[id].handled_packets += num; + count += num; rte_pktmbuf_free(pkt); num = rte_distributor_get_pkt(d, id, buf, buf, num); } diff --git a/dpdk/test/test/test_eal_flags.c b/dpdk/test/test/test_eal_flags.c index 775ccd3d..f70fb07e 100644 --- a/dpdk/test/test/test_eal_flags.c +++ b/dpdk/test/test/test_eal_flags.c @@ -1133,6 +1133,40 @@ test_file_prefix(void) return 0; } +/* This function writes in passed buf pointer a valid --socket-mem= option + * for num_sockets then concatenates the provided suffix string. + * + * Example for num_sockets 4, mem "2", suffix "plop" + * --socket-mem=2,2,2,2plop + */ +static void +populate_socket_mem_param(int num_sockets, const char *mem, + const char *suffix, char *buf, size_t buf_size) +{ + unsigned int offset = 0; + int written; + int i; + + written = snprintf(&buf[offset], buf_size - offset, "--socket-mem="); + if (written < 0 || written + offset >= buf_size) + return; + offset += written; + + for (i = 0; i < num_sockets - 1; i++) { + written = snprintf(&buf[offset], buf_size - offset, + "%s,", mem); + if (written < 0 || written + offset >= buf_size) + return; + offset += written; + } + + written = snprintf(&buf[offset], buf_size - offset, "%s%s", mem, + suffix); + if (written < 0 || written + offset >= buf_size) + return; + offset += written; +} + /* * Tests for correct handling of -m and --socket-mem flags */ @@ -1160,42 +1194,44 @@ test_memory_flags(void) "--file-prefix=" memtest, "-m", DEFAULT_MEM_SIZE}; /* valid (zero) --socket-mem flag */ + char arg2_socket_mem[SOCKET_MEM_STRLEN]; const char *argv2[] = {prgname, "-c", "10", "-n", "2", - "--file-prefix=" memtest, "--socket-mem=0,0,0,0"}; + "--file-prefix=" memtest, arg2_socket_mem}; /* invalid (incomplete) --socket-mem flag */ + char arg3_socket_mem[SOCKET_MEM_STRLEN]; const char *argv3[] = {prgname, "-c", "10", "-n", "2", - "--file-prefix=" memtest, "--socket-mem=2,2,"}; + "--file-prefix=" memtest, arg3_socket_mem}; /* invalid (mixed with invalid data) --socket-mem flag */ + char arg4_socket_mem[SOCKET_MEM_STRLEN]; const char *argv4[] = {prgname, "-c", "10", "-n", "2", - "--file-prefix=" memtest, "--socket-mem=2,2,Fred"}; + "--file-prefix=" memtest, arg4_socket_mem}; /* invalid (with numeric value as last character) --socket-mem flag */ + char arg5_socket_mem[SOCKET_MEM_STRLEN]; const char *argv5[] = {prgname, "-c", "10", "-n", "2", - "--file-prefix=" memtest, "--socket-mem=2,2,Fred0"}; + "--file-prefix=" memtest, arg5_socket_mem}; /* invalid (with empty socket) --socket-mem flag */ + char arg6_socket_mem[SOCKET_MEM_STRLEN]; const char *argv6[] = {prgname, "-c", "10", "-n", "2", - "--file-prefix=" memtest, "--socket-mem=2,,2"}; + "--file-prefix=" memtest, arg6_socket_mem}; /* invalid (null) --socket-mem flag */ const char *argv7[] = {prgname, "-c", "10", "-n", "2", "--file-prefix=" memtest, "--socket-mem="}; /* valid --socket-mem specified together with -m flag */ + char arg8_socket_mem[SOCKET_MEM_STRLEN]; const char *argv8[] = {prgname, "-c", "10", "-n", "2", - "--file-prefix=" memtest, "-m", DEFAULT_MEM_SIZE, "--socket-mem=2,2"}; - - /* construct an invalid socket mask with 2 megs on each socket plus - * extra 2 megs on socket that doesn't exist on current system */ - char invalid_socket_mem[SOCKET_MEM_STRLEN]; - char buf[SOCKET_MEM_STRLEN]; /* to avoid copying string onto itself */ + "--file-prefix=" memtest, "-m", DEFAULT_MEM_SIZE, + arg8_socket_mem}; #ifdef RTE_EXEC_ENV_BSDAPP - int i, num_sockets = 1; + int num_sockets = 1; #else - int i, num_sockets = RTE_MIN(get_number_of_sockets(), + int num_sockets = RTE_MIN(get_number_of_sockets(), RTE_MAX_NUMA_NODES); #endif @@ -1204,42 +1240,13 @@ test_memory_flags(void) return -1; } - snprintf(invalid_socket_mem, sizeof(invalid_socket_mem), "--socket-mem="); - - /* add one extra socket */ - for (i = 0; i < num_sockets + 1; i++) { - snprintf(buf, sizeof(buf), "%s%s", invalid_socket_mem, DEFAULT_MEM_SIZE); - strlcpy(invalid_socket_mem, buf, sizeof(invalid_socket_mem)); - - if (num_sockets + 1 - i > 1) { - snprintf(buf, sizeof(buf), "%s,", invalid_socket_mem); - strlcpy(invalid_socket_mem, buf, - sizeof(invalid_socket_mem)); - } - } - - /* construct a valid socket mask with 2 megs on each existing socket */ - char valid_socket_mem[SOCKET_MEM_STRLEN]; - - snprintf(valid_socket_mem, sizeof(valid_socket_mem), "--socket-mem="); - - /* add one extra socket */ - for (i = 0; i < num_sockets; i++) { - snprintf(buf, sizeof(buf), "%s%s", valid_socket_mem, DEFAULT_MEM_SIZE); - strlcpy(valid_socket_mem, buf, sizeof(valid_socket_mem)); - - if (num_sockets - i > 1) { - snprintf(buf, sizeof(buf), "%s,", valid_socket_mem); - strlcpy(valid_socket_mem, buf, - sizeof(valid_socket_mem)); - } - } - /* invalid --socket-mem flag (with extra socket) */ + char invalid_socket_mem[SOCKET_MEM_STRLEN]; const char *argv9[] = {prgname, "-c", "10", "-n", "2", "--file-prefix=" memtest, invalid_socket_mem}; /* valid --socket-mem flag */ + char valid_socket_mem[SOCKET_MEM_STRLEN]; const char *argv10[] = {prgname, "-c", "10", "-n", "2", "--file-prefix=" memtest, valid_socket_mem}; @@ -1257,34 +1264,49 @@ test_memory_flags(void) printf("Error - process failed with valid -m flag!\n"); return -1; } + + populate_socket_mem_param(num_sockets, "0", "", + arg2_socket_mem, sizeof(arg2_socket_mem)); if (launch_proc(argv2) != 0) { printf("Error - process failed with valid (zero) --socket-mem!\n"); return -1; } - if (launch_proc(argv3) == 0) { - printf("Error - process run ok with invalid " + if (num_sockets > 1) { + populate_socket_mem_param(num_sockets - 1, "2", ",", + arg3_socket_mem, sizeof(arg3_socket_mem)); + if (launch_proc(argv3) == 0) { + printf("Error - process run ok with invalid " "(incomplete) --socket-mem!\n"); - return -1; - } + return -1; + } - if (launch_proc(argv4) == 0) { - printf("Error - process run ok with invalid " + populate_socket_mem_param(num_sockets - 1, "2", ",Fred", + arg4_socket_mem, sizeof(arg4_socket_mem)); + if (launch_proc(argv4) == 0) { + printf("Error - process run ok with invalid " "(mixed with invalid input) --socket-mem!\n"); - return -1; - } + return -1; + } - if (launch_proc(argv5) == 0) { - printf("Error - process run ok with invalid " + populate_socket_mem_param(num_sockets - 1, "2", ",Fred0", + arg5_socket_mem, sizeof(arg5_socket_mem)); + if (launch_proc(argv5) == 0) { + printf("Error - process run ok with invalid " "(mixed with invalid input with a numeric value as " "last character) --socket-mem!\n"); - return -1; + return -1; + } } - if (launch_proc(argv6) == 0) { - printf("Error - process run ok with invalid " + if (num_sockets > 2) { + populate_socket_mem_param(num_sockets - 2, "2", ",,2", + arg6_socket_mem, sizeof(arg6_socket_mem)); + if (launch_proc(argv6) == 0) { + printf("Error - process run ok with invalid " "(with empty socket) --socket-mem!\n"); - return -1; + return -1; + } } if (launch_proc(argv7) == 0) { @@ -1292,16 +1314,22 @@ test_memory_flags(void) return -1; } + populate_socket_mem_param(num_sockets, "2", "", + arg8_socket_mem, sizeof(arg8_socket_mem)); if (launch_proc(argv8) == 0) { printf("Error - process run ok with --socket-mem and -m specified!\n"); return -1; } + populate_socket_mem_param(num_sockets + 1, "2", "", + invalid_socket_mem, sizeof(invalid_socket_mem)); if (launch_proc(argv9) == 0) { printf("Error - process run ok with extra socket in --socket-mem!\n"); return -1; } + populate_socket_mem_param(num_sockets, "2", "", + valid_socket_mem, sizeof(valid_socket_mem)); if (launch_proc(argv10) != 0) { printf("Error - process failed with valid --socket-mem!\n"); return -1; diff --git a/dpdk/test/test/test_flow_classify.c b/dpdk/test/test/test_flow_classify.c index 5f5beeee..90066713 100644 --- a/dpdk/test/test/test_flow_classify.c +++ b/dpdk/test/test/test_flow_classify.c @@ -124,7 +124,6 @@ static struct rte_flow_item udp_item_bad = { RTE_FLOW_ITEM_TYPE_UDP, static struct rte_flow_item end_item = { RTE_FLOW_ITEM_TYPE_END, 0, 0, 0 }; -static struct rte_flow_item end_item_bad = { -1, 0, 0, 0 }; /* test TCP pattern: * "eth / ipv4 src spec 1.2.3.4 src mask 255.255.255.00 dst spec 5.6.7.8 @@ -179,7 +178,6 @@ static struct rte_flow_action count_action = { RTE_FLOW_ACTION_TYPE_COUNT, static struct rte_flow_action count_action_bad = { -1, 0}; static struct rte_flow_action end_action = { RTE_FLOW_ACTION_TYPE_END, 0}; -static struct rte_flow_action end_action_bad = { -1, 0}; static struct rte_flow_action actions[2]; @@ -382,7 +380,7 @@ test_invalid_patterns(void) pattern[1] = ipv4_udp_item_1; pattern[2] = udp_item_bad; - pattern[3] = end_item_bad; + pattern[3] = end_item; ret = rte_flow_classify_validate(cls->cls, &attr, pattern, actions, &error); @@ -456,32 +454,6 @@ test_invalid_actions(void) return -1; } - actions[0] = count_action; - actions[1] = end_action_bad; - - ret = rte_flow_classify_validate(cls->cls, &attr, pattern, - actions, &error); - if (!ret) { - printf("Line %i: rte_flow_classify_validate", __LINE__); - printf(" should have failed!\n"); - return -1; - } - - rule = rte_flow_classify_table_entry_add(cls->cls, &attr, pattern, - actions, &key_found, &error); - if (rule) { - printf("Line %i: flow_classify_table_entry_add", __LINE__); - printf(" should have failed!\n"); - return -1; - } - - ret = rte_flow_classify_table_entry_delete(cls->cls, rule); - if (!ret) { - printf("Line %i: rte_flow_classify_table_entry_delete", - __LINE__); - printf("should have failed!\n"); - return -1; - } return 0; } diff --git a/dpdk/test/test/test_hash_readwrite_lf.c b/dpdk/test/test/test_hash_readwrite_lf.c index cbfd9322..432e86e0 100644 --- a/dpdk/test/test/test_hash_readwrite_lf.c +++ b/dpdk/test/test/test_hash_readwrite_lf.c @@ -75,7 +75,6 @@ static rte_atomic64_t gread_cycles; static rte_atomic64_t greads; static volatile uint8_t writer_done; -static volatile uint8_t multi_writer_done[4]; uint16_t enabled_core_ids[RTE_MAX_LCORE]; @@ -87,11 +86,9 @@ get_enabled_cores_list(void) uint32_t i = 0; uint16_t core_id; uint32_t max_cores = rte_lcore_count(); - for (core_id = 0; core_id < RTE_MAX_LCORE && i < max_cores; core_id++) { - if (rte_lcore_is_enabled(core_id)) { - enabled_core_ids[i] = core_id; - i++; - } + RTE_LCORE_FOREACH(core_id) { + enabled_core_ids[i] = core_id; + i++; } if (i != max_cores) { @@ -571,7 +568,6 @@ test_rwc_multi_writer(__attribute__((unused)) void *arg) for (i = offset; i < offset + tbl_rwc_test_param.single_insert; i++) rte_hash_add_key(tbl_rwc_test_param.h, tbl_rwc_test_param.keys_ks + i); - multi_writer_done[pos_core] = 1; return 0; } @@ -619,10 +615,9 @@ test_hash_add_no_ks_lookup_hit(struct rwc_perf *rwc_perf_results, int rwc_lf, rte_eal_remote_launch(test_rwc_reader, (void *)(uintptr_t)read_type, enabled_core_ids[i]); - rte_eal_mp_wait_lcore(); for (i = 1; i <= rwc_core_cnt[n]; i++) - if (lcore_config[i].ret < 0) + if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0) goto err; unsigned long long cycles_per_lookup = @@ -639,6 +634,7 @@ finish: return 0; err: + rte_eal_mp_wait_lcore(); rte_hash_free(tbl_rwc_test_param.h); return -1; } @@ -689,12 +685,11 @@ test_hash_add_no_ks_lookup_miss(struct rwc_perf *rwc_perf_results, int rwc_lf, enabled_core_ids[i]); ret = write_keys(key_shift); writer_done = 1; - rte_eal_mp_wait_lcore(); if (ret < 0) goto err; for (i = 1; i <= rwc_core_cnt[n]; i++) - if (lcore_config[i].ret < 0) + if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0) goto err; unsigned long long cycles_per_lookup = @@ -711,6 +706,7 @@ finish: return 0; err: + rte_eal_mp_wait_lcore(); rte_hash_free(tbl_rwc_test_param.h); return -1; } @@ -765,12 +761,11 @@ test_hash_add_ks_lookup_hit_non_sp(struct rwc_perf *rwc_perf_results, key_shift = 1; ret = write_keys(key_shift); writer_done = 1; - rte_eal_mp_wait_lcore(); if (ret < 0) goto err; for (i = 1; i <= rwc_core_cnt[n]; i++) - if (lcore_config[i].ret < 0) + if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0) goto err; unsigned long long cycles_per_lookup = @@ -787,6 +782,7 @@ finish: return 0; err: + rte_eal_mp_wait_lcore(); rte_hash_free(tbl_rwc_test_param.h); return -1; } @@ -822,7 +818,7 @@ test_hash_add_ks_lookup_hit_sp(struct rwc_perf *rwc_perf_results, int rwc_lf, } for (n = 0; n < NUM_TEST; n++) { unsigned int tot_lcore = rte_lcore_count(); - if (tot_lcore < rwc_core_cnt[n]) + if (tot_lcore < rwc_core_cnt[n] + 1) goto finish; printf("\nNumber of readers: %u\n", rwc_core_cnt[n]); @@ -841,12 +837,11 @@ test_hash_add_ks_lookup_hit_sp(struct rwc_perf *rwc_perf_results, int rwc_lf, key_shift = 1; ret = write_keys(key_shift); writer_done = 1; - rte_eal_mp_wait_lcore(); if (ret < 0) goto err; for (i = 1; i <= rwc_core_cnt[n]; i++) - if (lcore_config[i].ret < 0) + if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0) goto err; unsigned long long cycles_per_lookup = @@ -863,6 +858,7 @@ finish: return 0; err: + rte_eal_mp_wait_lcore(); rte_hash_free(tbl_rwc_test_param.h); return -1; } @@ -916,12 +912,11 @@ test_hash_add_ks_lookup_miss(struct rwc_perf *rwc_perf_results, int rwc_lf, int key_shift = 1; ret = write_keys(key_shift); writer_done = 1; - rte_eal_mp_wait_lcore(); if (ret < 0) goto err; for (i = 1; i <= rwc_core_cnt[n]; i++) - if (lcore_config[i].ret < 0) + if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0) goto err; unsigned long long cycles_per_lookup = @@ -937,6 +932,7 @@ finish: return 0; err: + rte_eal_mp_wait_lcore(); rte_hash_free(tbl_rwc_test_param.h); return -1; } @@ -989,8 +985,6 @@ test_hash_multi_add_lookup(struct rwc_perf *rwc_perf_results, int rwc_lf, rte_hash_reset(tbl_rwc_test_param.h); writer_done = 0; - for (i = 0; i < 4; i++) - multi_writer_done[i] = 0; key_shift = 0; if (write_keys(key_shift) < 0) goto err; @@ -1014,15 +1008,15 @@ test_hash_multi_add_lookup(struct rwc_perf *rwc_perf_results, int rwc_lf, } /* Wait for writers to complete */ - for (i = 0; i < rwc_core_cnt[m]; i++) - while - (multi_writer_done[i] == 0); + for (i = rwc_core_cnt[n] + 1; + i <= rwc_core_cnt[m] + rwc_core_cnt[n]; + i++) + rte_eal_wait_lcore(enabled_core_ids[i]); + writer_done = 1; - rte_eal_mp_wait_lcore(); - for (i = 1; i <= rwc_core_cnt[n]; i++) - if (lcore_config[i].ret < 0) + if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0) goto err; unsigned long long cycles_per_lookup = @@ -1041,6 +1035,7 @@ finish: return 0; err: + rte_eal_mp_wait_lcore(); rte_hash_free(tbl_rwc_test_param.h); return -1; } @@ -1065,6 +1060,9 @@ test_hash_readwrite_lf_main(void) setlocale(LC_NUMERIC, ""); + /* Reset tbl_rwc_test_param to discard values from previous run */ + memset(&tbl_rwc_test_param, 0, sizeof(tbl_rwc_test_param)); + if (rte_tm_supported()) htm = 1; else diff --git a/dpdk/test/test/test_rwlock.c b/dpdk/test/test/test_rwlock.c index 29171c42..7c9b919f 100644 --- a/dpdk/test/test/test_rwlock.c +++ b/dpdk/test/test/test_rwlock.c @@ -4,8 +4,10 @@ #include #include +#include #include #include +#include #include #include @@ -44,6 +46,7 @@ static rte_rwlock_t sl; static rte_rwlock_t sl_tab[RTE_MAX_LCORE]; +static rte_atomic32_t synchro; static int test_rwlock_per_core(__attribute__((unused)) void *arg) @@ -65,6 +68,79 @@ test_rwlock_per_core(__attribute__((unused)) void *arg) return 0; } +static rte_rwlock_t lk = RTE_RWLOCK_INITIALIZER; +static volatile uint64_t rwlock_data; +static uint64_t time_count[RTE_MAX_LCORE] = {0}; + +#define MAX_LOOP 10000 +#define TEST_RWLOCK_DEBUG 0 + +static int +load_loop_fn(__attribute__((unused)) void *arg) +{ + uint64_t time_diff = 0, begin; + uint64_t hz = rte_get_timer_hz(); + uint64_t lcount = 0; + const unsigned int lcore = rte_lcore_id(); + + /* wait synchro for slaves */ + if (lcore != rte_get_master_lcore()) + while (rte_atomic32_read(&synchro) == 0) + ; + + begin = rte_rdtsc_precise(); + while (lcount < MAX_LOOP) { + rte_rwlock_write_lock(&lk); + ++rwlock_data; + rte_rwlock_write_unlock(&lk); + + rte_rwlock_read_lock(&lk); + if (TEST_RWLOCK_DEBUG && !(lcount % 100)) + printf("Core [%u] rwlock_data = %"PRIu64"\n", + lcore, rwlock_data); + rte_rwlock_read_unlock(&lk); + + lcount++; + /* delay to make lock duty cycle slightly realistic */ + rte_pause(); + } + + time_diff = rte_rdtsc_precise() - begin; + time_count[lcore] = time_diff * 1000000 / hz; + return 0; +} + +static int +test_rwlock_perf(void) +{ + unsigned int i; + uint64_t total = 0; + + printf("\nRwlock Perf Test on %u cores...\n", rte_lcore_count()); + + /* clear synchro and start slaves */ + rte_atomic32_set(&synchro, 0); + if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MASTER) < 0) + return -1; + + /* start synchro and launch test on master */ + rte_atomic32_set(&synchro, 1); + load_loop_fn(NULL); + + rte_eal_mp_wait_lcore(); + + RTE_LCORE_FOREACH(i) { + printf("Core [%u] cost time = %"PRIu64" us\n", + i, time_count[i]); + total += time_count[i]; + } + + printf("Total cost time = %"PRIu64" us\n", total); + memset(time_count, 0, sizeof(time_count)); + + return 0; +} + static int test_rwlock(void) { @@ -95,6 +171,9 @@ test_rwlock(void) rte_eal_mp_wait_lcore(); + if (test_rwlock_perf() < 0) + return -1; + return 0; } diff --git a/dpdk/usertools/dpdk-devbind.py b/dpdk/usertools/dpdk-devbind.py index a9cd66a7..1c9c0187 100755 --- a/dpdk/usertools/dpdk-devbind.py +++ b/dpdk/usertools/dpdk-devbind.py @@ -203,6 +203,7 @@ def get_pci_device_details(dev_id, probe_lspci): def clear_data(): '''This function clears any old data''' + global devices devices = {} def get_device_details(devices_type): diff --git a/dpdk/usertools/dpdk-telemetry-client.py b/dpdk/usertools/dpdk-telemetry-client.py index 6dcf62ba..e0587022 100644 --- a/dpdk/usertools/dpdk-telemetry-client.py +++ b/dpdk/usertools/dpdk-telemetry-client.py @@ -13,6 +13,11 @@ API_REG = "{\"action\":1,\"command\":\"clients\",\"data\":{\"client_path\":\"" API_UNREG = "{\"action\":2,\"command\":\"clients\",\"data\":{\"client_path\":\"" DEFAULT_FP = "/var/run/dpdk/default_client" +try: + raw_input # Python 2 +except NameError: + raw_input = input # Python 3 + class Socket: def __init__(self): @@ -71,7 +76,7 @@ class Client: def repeatedlyRequestMetrics(self, sleep_time): # Recursively requests metrics for given client print("\nPlease enter the number of times you'd like to continuously request Metrics:") - n_requests = int(input("\n:")) + n_requests = int(raw_input("\n:")) print("\033[F") #Removes the user input from screen, cleans it up print("\033[K") for i in range(n_requests): @@ -86,7 +91,7 @@ class Client: print("[3] Unregister client") try: - self.choice = int(input("\n:")) + self.choice = int(raw_input("\n:")) print("\033[F") #Removes the user input for screen, cleans it up print("\033[K") if self.choice == 1: