Bonding: supported. but it can't work in multi-processes, to be fix.

dev
fengbojiang(姜凤波) 2019-09-04 21:17:10 +08:00
parent aa174b6e9a
commit c0f666848d
4 changed files with 401 additions and 189 deletions

View File

@ -41,11 +41,20 @@ pkt_tx_delay=100
# examples
# 0-3 ports 0, 1,2,3 are enabled
# 1-3,4,7 ports 1,2,3,4,7 are enabled
#
# If use bonding, shoule config the bonding port id in port_list
# and not config slave port id in port_list
# such as, port 0 and port 1 trank to a bonding port 2,
# should set `port_list=2` and config `[port2]` section
port_list=0
# Number of vdev.
nb_vdev=0
# Number of bond.
nb_vbond=0
# Port config section
# Correspond to dpdk.port_list's index: port0, port1...
[port0]
@ -56,7 +65,12 @@ gateway=192.168.1.1
# lcore list used to handle this port
# the format is same as port_list
# lcore_list= 0
#lcore_list=0
# bonding slave port list used to handle this port
# need to config while this port is a bonding port
# the format is same as port_list
#slave_port_list=0,1
# Packet capture path, this will hurt performance
#pcap=./a.pcap
@ -77,6 +91,20 @@ gateway=192.168.1.1
#mac=00:00:00:00:00:01
#cq=0
# bond config section
# See http://doc.dpdk.org/guides/prog_guide/link_bonding_poll_mode_drv_lib.html
[bond0]
#mode=4
#slave=0000:0a:00.0,slave=0000:0a:00.1
#primary=0000:0a:00.0
#mac=f0:98:38:xx:xx:xx
## opt argument
#socket_id=0
#xmit_policy=l23
#lsc_poll_period_ms=100
#up_delay=10
#down_delay=50
# Kni config: if enabled and method=reject,
# all packets that do not belong to the following tcp_port and udp_port
# will transmit to kernel; if method=accept, all packets that belong to

View File

@ -337,6 +337,30 @@ parse_port_list(struct ff_config *cfg, const char *v_str)
return res;
}
static int
parse_port_slave_list(struct ff_port_cfg *cfg, const char *v_str)
{
int res;
uint16_t ports[RTE_MAX_ETHPORTS];
int sz = RTE_MAX_ETHPORTS;
res = __parse_config_list(ports, &sz, v_str);
if (! res) return res;
uint16_t *portid_list = malloc(sizeof(uint16_t)*sz);
if (portid_list == NULL) {
fprintf(stderr, "parse_port_slave_list malloc failed\n");
return 0;
}
memcpy(portid_list, ports, sz*sizeof(uint16_t));
cfg->slave_portid_list = portid_list;
cfg->nb_slaves = sz;
return res;
}
static int
port_cfg_handler(struct ff_config *cfg, const char *section,
const char *name, const char *value) {
@ -397,6 +421,8 @@ port_cfg_handler(struct ff_config *cfg, const char *section,
cur->pcap = strdup(value);
} else if (strcmp(name, "lcore_list") == 0) {
return parse_port_lcore_list(cur, value);
} else if (strcmp(name, "slave_port_list") == 0) {
return parse_port_slave_list(cur, value);
}
return 1;
@ -456,6 +482,65 @@ vdev_cfg_handler(struct ff_config *cfg, const char *section,
return 1;
}
static int
bond_cfg_handler(struct ff_config *cfg, const char *section,
const char *name, const char *value) {
if (cfg->dpdk.nb_bond == 0) {
fprintf(stderr, "bond_cfg_handler: must config dpdk.nb_bond first\n");
return 0;
}
if (cfg->dpdk.bond_cfgs == NULL) {
struct ff_bond_cfg *vc = calloc(RTE_MAX_ETHPORTS, sizeof(struct ff_bond_cfg));
if (vc == NULL) {
fprintf(stderr, "ff_bond_cfg malloc failed\n");
return 0;
}
cfg->dpdk.bond_cfgs = vc;
}
int bondid;
int ret = sscanf(section, "bond%d", &bondid);
if (ret != 1) {
fprintf(stderr, "bond_cfg_handler section[%s] error\n", section);
return 0;
}
/* just return true if bondid >= nb_vdev because it has no effect */
if (bondid > cfg->dpdk.nb_bond) {
fprintf(stderr, "bond_cfg_handler section[%s] bigger than max bond id\n", section);
return 1;
}
struct ff_bond_cfg *cur = &cfg->dpdk.bond_cfgs[bondid];
if (cur->name == NULL) {
cur->name = strdup(section);
cur->bond_id = bondid;
}
if (strcmp(name, "mode") == 0) {
cur->mode = atoi(value);
} else if (strcmp(name, "slave") == 0) {
cur->slave = strdup(value);
} else if (strcmp(name, "primary") == 0) {
cur->primary = strdup(value);
} else if (strcmp(name, "socket_id") == 0) {
cur->socket_id = atoi(value);
} else if (strcmp(name, "mac") == 0) {
cur->bond_mac = strdup(value);
} else if (strcmp(name, "xmit_policy") == 0) {
cur->xmit_policy = strdup(value);
} else if (strcmp(name, "lsc_poll_period_ms") == 0) {
cur->lsc_poll_period_ms = atoi(value);
} else if (strcmp(name, "up_delay") == 0) {
cur->up_delay = atoi(value);
} else if (strcmp(name, "down_delay") == 0) {
cur->down_delay = atoi(value);
}
return 1;
}
static int
ini_parse_handler(void* user, const char* section, const char* name,
@ -481,6 +566,8 @@ ini_parse_handler(void* user, const char* section, const char* name,
return parse_port_list(pconfig, value);
} else if (MATCH("dpdk", "nb_vdev")) {
pconfig->dpdk.nb_vdev = atoi(value);
} else if (MATCH("dpdk", "nb_bond")) {
pconfig->dpdk.nb_bond = atoi(value);
} else if (MATCH("dpdk", "promiscuous")) {
pconfig->dpdk.promiscuous = atoi(value);
} else if (MATCH("dpdk", "numa_on")) {
@ -519,6 +606,8 @@ ini_parse_handler(void* user, const char* section, const char* name,
return port_cfg_handler(pconfig, section, name, value);
} else if (strncmp(section, "vdev", 4) == 0) {
return vdev_cfg_handler(pconfig, section, name, value);
} else if (strncmp(section, "bond", 4) == 0) {
return bond_cfg_handler(pconfig, section, name, value);
}
return 1;
@ -584,10 +673,57 @@ dpdk_args_setup(struct ff_config *cfg)
dpdk_argv[n++] = strdup(temp);
}
if (cfg->dpdk.nb_bond) {
for (i=0; i<cfg->dpdk.nb_bond; i++) {
sprintf(temp, "--vdev");
dpdk_argv[n++] = strdup(temp);
sprintf(temp, "net_bonding%d,mode=%d,slave=%s",
cfg->dpdk.bond_cfgs[i].bond_id,
cfg->dpdk.bond_cfgs[i].mode,
cfg->dpdk.bond_cfgs[i].slave);
if (cfg->dpdk.bond_cfgs[i].primary) {
sprintf(temp, "%s,primary=%s",
temp, cfg->dpdk.bond_cfgs[i].primary);
}
if (cfg->dpdk.bond_cfgs[i].socket_id) {
sprintf(temp, "%s,socket_id=%d",
temp, cfg->dpdk.bond_cfgs[i].socket_id);
}
if (cfg->dpdk.bond_cfgs[i].bond_mac) {
sprintf(temp, "%s,mac=%s",
temp, cfg->dpdk.bond_cfgs[i].bond_mac);
}
if (cfg->dpdk.bond_cfgs[i].xmit_policy) {
sprintf(temp, "%s,xmit_policy=%s",
temp, cfg->dpdk.bond_cfgs[i].xmit_policy);
}
if (cfg->dpdk.bond_cfgs[i].lsc_poll_period_ms) {
sprintf(temp, "%s,lsc_poll_period_ms=%d",
temp, cfg->dpdk.bond_cfgs[i].lsc_poll_period_ms);
}
if (cfg->dpdk.bond_cfgs[i].up_delay) {
sprintf(temp, "%s,up_delay=%d",
temp, cfg->dpdk.bond_cfgs[i].up_delay);
}
if (cfg->dpdk.bond_cfgs[i].down_delay) {
sprintf(temp, "%s,down_delay=%d",
temp, cfg->dpdk.bond_cfgs[i].down_delay);
}
dpdk_argv[n++] = strdup(temp);
}
}
dpdk_argc = n;
for (i=0; i<n; i++)
printf("%s ", dpdk_argv[i]);
printf("%s ", dpdk_argv[i]);
return n;
}

View File

@ -62,7 +62,9 @@ struct ff_port_cfg {
char *pcap;
int nb_lcores;
int nb_slaves;
uint16_t lcore_list[DPDK_MAX_LCORE];
uint16_t *slave_portid_list;
};
struct ff_vdev_cfg {
@ -76,7 +78,19 @@ struct ff_vdev_cfg {
uint16_t queue_size;
};
struct ff_bond_cfg {
char *name;
char *slave;
char *primary;
char *bond_mac;
char *xmit_policy;
uint8_t bond_id;
uint8_t mode;
uint8_t socket_id;
uint8_t lsc_poll_period_ms;
uint16_t up_delay;
uint16_t down_delay;
};
struct ff_freebsd_cfg {
char *name;
@ -105,6 +119,7 @@ struct ff_config {
int proc_id;
int promiscuous;
int nb_vdev;
int nb_bond;
int numa_on;
int tso;
int vlan_strip;
@ -124,6 +139,7 @@ struct ff_config {
// MAP(portid => struct ff_port_cfg*)
struct ff_port_cfg *port_cfgs;
struct ff_vdev_cfg *vdev_cfgs;
struct ff_bond_cfg *bond_cfgs;
} dpdk;
struct {

View File

@ -52,6 +52,7 @@
#include <rte_ip.h>
#include <rte_tcp.h>
#include <rte_udp.h>
#include <rte_eth_bond.h>
#include "ff_dpdk_if.h"
#include "ff_dpdk_pcap.h"
@ -107,6 +108,8 @@ static dispatch_func_t packet_dispatcher;
static uint16_t rss_reta_size[RTE_MAX_ETHPORTS];
#define BOND_DRIVER_NAME "net_bonding"
static inline int send_single_packet(struct rte_mbuf *m, uint8_t port);
struct ff_msg_ring {
@ -289,17 +292,18 @@ init_mem_pool(void)
uint32_t nb_lcores = ff_global_cfg.dpdk.nb_procs;
uint32_t nb_tx_queue = nb_lcores;
uint32_t nb_rx_queue = lcore_conf.nb_rx_queue * nb_lcores;
uint16_t max_portid = ff_global_cfg.dpdk.max_portid;
unsigned nb_mbuf = RTE_ALIGN_CEIL (
(nb_rx_queue*RX_QUEUE_SIZE +
nb_ports*nb_lcores*MAX_PKT_BURST +
nb_ports*nb_tx_queue*TX_QUEUE_SIZE +
nb_lcores*MEMPOOL_CACHE_SIZE +
(nb_rx_queue * (max_portid + 1) * 2 * RX_QUEUE_SIZE +
nb_ports * (max_portid + 1) * 2 * nb_lcores * MAX_PKT_BURST +
nb_ports * (max_portid + 1) * 2 * nb_tx_queue * TX_QUEUE_SIZE +
nb_lcores * MEMPOOL_CACHE_SIZE +
#ifdef FF_KNI
nb_ports*KNI_MBUF_MAX +
nb_ports*KNI_QUEUE_SIZE +
nb_ports * KNI_MBUF_MAX +
nb_ports * KNI_QUEUE_SIZE +
#endif
nb_lcores*nb_ports*DISPATCH_RING_SIZE),
nb_lcores * nb_ports * DISPATCH_RING_SIZE),
(unsigned)8192);
unsigned socketid = 0;
@ -528,207 +532,235 @@ init_port_start(void)
int nb_ports = ff_global_cfg.dpdk.nb_ports;
unsigned socketid = 0;
struct rte_mempool *mbuf_pool;
uint16_t i;
uint16_t i, j;
for (i = 0; i < nb_ports; i++) {
uint16_t port_id = ff_global_cfg.dpdk.portid_list[i];
struct ff_port_cfg *pconf = &ff_global_cfg.dpdk.port_cfgs[port_id];
uint16_t port_id, u_port_id = ff_global_cfg.dpdk.portid_list[i];
struct ff_port_cfg *pconf = &ff_global_cfg.dpdk.port_cfgs[u_port_id];
uint16_t nb_queues = pconf->nb_lcores;
struct rte_eth_dev_info dev_info;
struct rte_eth_conf port_conf = {0};
struct rte_eth_rxconf rxq_conf;
struct rte_eth_txconf txq_conf;
rte_eth_dev_info_get(port_id, &dev_info);
if (nb_queues > dev_info.max_rx_queues) {
rte_exit(EXIT_FAILURE, "num_procs[%d] bigger than max_rx_queues[%d]\n",
nb_queues,
dev_info.max_rx_queues);
}
if (nb_queues > dev_info.max_tx_queues) {
rte_exit(EXIT_FAILURE, "num_procs[%d] bigger than max_tx_queues[%d]\n",
nb_queues,
dev_info.max_tx_queues);
}
struct ether_addr addr;
rte_eth_macaddr_get(port_id, &addr);
printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
" %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
(unsigned)port_id,
addr.addr_bytes[0], addr.addr_bytes[1],
addr.addr_bytes[2], addr.addr_bytes[3],
addr.addr_bytes[4], addr.addr_bytes[5]);
rte_memcpy(pconf->mac,
addr.addr_bytes, ETHER_ADDR_LEN);
/* Set RSS mode */
uint64_t default_rss_hf = ETH_RSS_PROTO_MASK;
port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
port_conf.rx_adv_conf.rss_conf.rss_hf = default_rss_hf;
if (dev_info.hash_key_size == 52) {
port_conf.rx_adv_conf.rss_conf.rss_key = default_rsskey_52bytes;
port_conf.rx_adv_conf.rss_conf.rss_key_len = 52;
use_rsskey_52bytes = 1;
}else{
port_conf.rx_adv_conf.rss_conf.rss_key = default_rsskey_40bytes;
port_conf.rx_adv_conf.rss_conf.rss_key_len = 40;
}
port_conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
if (port_conf.rx_adv_conf.rss_conf.rss_hf !=
ETH_RSS_PROTO_MASK) {
printf("Port %u modified RSS hash function based on hardware support,"
"requested:%#"PRIx64" configured:%#"PRIx64"\n",
port_id, default_rss_hf,
port_conf.rx_adv_conf.rss_conf.rss_hf);
}
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
}
/* Set Rx VLAN stripping */
if (ff_global_cfg.dpdk.vlan_strip) {
if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
for (j=0; j<=pconf->nb_slaves; j++) {
if (j < pconf->nb_slaves) {
port_id = pconf->slave_portid_list[j];
printf("To init %s's %d'st slave port[%d]\n",
ff_global_cfg.dpdk.bond_cfgs->name,
j, port_id);
} else {
port_id = u_port_id;
}
}
/* Enable HW CRC stripping */
port_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
struct rte_eth_dev_info dev_info;
struct rte_eth_conf port_conf = {0};
struct rte_eth_rxconf rxq_conf;
struct rte_eth_txconf txq_conf;
/* FIXME: Enable TCP LRO ?*/
#if 0
if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
printf("LRO is supported\n");
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TCP_LRO;
pconf->hw_features.rx_lro = 1;
}
#endif
rte_eth_dev_info_get(port_id, &dev_info);
/* Set Rx checksum checking */
if ((dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) &&
(dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) &&
(dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM)) {
printf("RX checksum offload supported\n");
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
pconf->hw_features.rx_csum = 1;
}
if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)) {
printf("TX ip checksum offload supported\n");
port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
pconf->hw_features.tx_csum_ip = 1;
}
if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM)) {
printf("TX TCP&UDP checksum offload supported\n");
port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM;
pconf->hw_features.tx_csum_l4 = 1;
}
if (ff_global_cfg.dpdk.tso) {
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
printf("TSO is supported\n");
port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_TSO;
pconf->hw_features.tx_tso = 1;
if (nb_queues > dev_info.max_rx_queues) {
rte_exit(EXIT_FAILURE, "num_procs[%d] bigger than max_rx_queues[%d]\n",
nb_queues,
dev_info.max_rx_queues);
}
} else {
printf("TSO is disabled\n");
}
if (dev_info.reta_size) {
/* reta size must be power of 2 */
assert((dev_info.reta_size & (dev_info.reta_size - 1)) == 0);
rss_reta_size[port_id] = dev_info.reta_size;
printf("port[%d]: rss table size: %d\n", port_id,
dev_info.reta_size);
}
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
continue;
}
int ret = rte_eth_dev_configure(port_id, nb_queues, nb_queues, &port_conf);
if (ret != 0) {
return ret;
}
static uint16_t nb_rxd = RX_QUEUE_SIZE;
static uint16_t nb_txd = TX_QUEUE_SIZE;
ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd, &nb_txd);
if (ret < 0)
printf("Could not adjust number of descriptors "
"for port%u (%d)\n", (unsigned)port_id, ret);
uint16_t q;
for (q = 0; q < nb_queues; q++) {
if (numa_on) {
uint16_t lcore_id = lcore_conf.port_cfgs[port_id].lcore_list[q];
socketid = rte_lcore_to_socket_id(lcore_id);
if (nb_queues > dev_info.max_tx_queues) {
rte_exit(EXIT_FAILURE, "num_procs[%d] bigger than max_tx_queues[%d]\n",
nb_queues,
dev_info.max_tx_queues);
}
mbuf_pool = pktmbuf_pool[socketid];
txq_conf = dev_info.default_txconf;
txq_conf.offloads = port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(port_id, q, nb_txd,
socketid, &txq_conf);
if (ret < 0) {
return ret;
/* Set RSS mode */
uint64_t default_rss_hf = ETH_RSS_PROTO_MASK;
port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
port_conf.rx_adv_conf.rss_conf.rss_hf = default_rss_hf;
if (dev_info.hash_key_size == 52) {
port_conf.rx_adv_conf.rss_conf.rss_key = default_rsskey_52bytes;
port_conf.rx_adv_conf.rss_conf.rss_key_len = 52;
use_rsskey_52bytes = 1;
} else {
port_conf.rx_adv_conf.rss_conf.rss_key = default_rsskey_40bytes;
port_conf.rx_adv_conf.rss_conf.rss_key_len = 40;
}
rxq_conf = dev_info.default_rxconf;
rxq_conf.offloads = port_conf.rxmode.offloads;
ret = rte_eth_rx_queue_setup(port_id, q, nb_rxd,
socketid, &rxq_conf, mbuf_pool);
if (ret < 0) {
return ret;
port_conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
if (port_conf.rx_adv_conf.rss_conf.rss_hf !=
ETH_RSS_PROTO_MASK) {
printf("Port %u modified RSS hash function based on hardware support,"
"requested:%#"PRIx64" configured:%#"PRIx64"\n",
port_id, default_rss_hf,
port_conf.rx_adv_conf.rss_conf.rss_hf);
}
}
ret = rte_eth_dev_start(port_id);
if (ret < 0) {
return ret;
}
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
}
if (nb_queues > 1) {
/* set HW rss hash function to Toeplitz. */
if (!rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_HASH)) {
struct rte_eth_hash_filter_info info = {0};
info.info_type = RTE_ETH_HASH_FILTER_GLOBAL_CONFIG;
info.info.global_conf.hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
if (rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_HASH,
RTE_ETH_FILTER_SET, &info) < 0) {
rte_exit(EXIT_FAILURE, "port[%d] set hash func failed\n",
port_id);
/* Set Rx VLAN stripping */
if (ff_global_cfg.dpdk.vlan_strip) {
if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
}
}
set_rss_table(port_id, dev_info.reta_size, nb_queues);
}
/* Enable HW CRC stripping */
port_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
/* Enable RX in promiscuous mode for the Ethernet device. */
if (ff_global_cfg.dpdk.promiscuous) {
rte_eth_promiscuous_enable(port_id);
ret = rte_eth_promiscuous_get(port_id);
if (ret == 1) {
printf("set port %u to promiscuous mode ok\n", port_id);
} else {
printf("set port %u to promiscuous mode error\n", port_id);
/* FIXME: Enable TCP LRO ?*/
#if 0
if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
printf("LRO is supported\n");
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TCP_LRO;
pconf->hw_features.rx_lro = 1;
}
}
#endif
/* Enable pcap dump */
if (pconf->pcap) {
ff_enable_pcap(pconf->pcap);
/* Set Rx checksum checking */
if ((dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) &&
(dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) &&
(dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM)) {
printf("RX checksum offload supported\n");
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
pconf->hw_features.rx_csum = 1;
}
if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM)) {
printf("TX ip checksum offload supported\n");
port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
pconf->hw_features.tx_csum_ip = 1;
}
if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) &&
(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM)) {
printf("TX TCP&UDP checksum offload supported\n");
port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM;
pconf->hw_features.tx_csum_l4 = 1;
}
if (ff_global_cfg.dpdk.tso) {
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
printf("TSO is supported\n");
port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_TSO;
pconf->hw_features.tx_tso = 1;
}
} else {
printf("TSO is disabled\n");
}
if (dev_info.reta_size) {
/* reta size must be power of 2 */
assert((dev_info.reta_size & (dev_info.reta_size - 1)) == 0);
rss_reta_size[port_id] = dev_info.reta_size;
printf("port[%d]: rss table size: %d\n", port_id,
dev_info.reta_size);
}
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
continue;
}
int ret = rte_eth_dev_configure(port_id, nb_queues, nb_queues, &port_conf);
if (ret != 0) {
return ret;
}
static uint16_t nb_rxd = RX_QUEUE_SIZE;
static uint16_t nb_txd = TX_QUEUE_SIZE;
ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd, &nb_txd);
if (ret < 0)
printf("Could not adjust number of descriptors "
"for port%u (%d)\n", (unsigned)port_id, ret);
uint16_t q;
for (q = 0; q < nb_queues; q++) {
if (numa_on) {
uint16_t lcore_id = lcore_conf.port_cfgs[port_id].lcore_list[q];
socketid = rte_lcore_to_socket_id(lcore_id);
}
mbuf_pool = pktmbuf_pool[socketid];
txq_conf = dev_info.default_txconf;
txq_conf.offloads = port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(port_id, q, nb_txd,
socketid, &txq_conf);
if (ret < 0) {
return ret;
}
rxq_conf = dev_info.default_rxconf;
rxq_conf.offloads = port_conf.rxmode.offloads;
ret = rte_eth_rx_queue_setup(port_id, q, nb_rxd,
socketid, &rxq_conf, mbuf_pool);
if (ret < 0) {
return ret;
}
}
struct ether_addr addr;
rte_eth_macaddr_get(port_id, &addr);
printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
" %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
(unsigned)port_id,
addr.addr_bytes[0], addr.addr_bytes[1],
addr.addr_bytes[2], addr.addr_bytes[3],
addr.addr_bytes[4], addr.addr_bytes[5]);
rte_memcpy(pconf->mac,
addr.addr_bytes, ETHER_ADDR_LEN);
if (strncmp(dev_info.driver_name, BOND_DRIVER_NAME,
strlen(dev_info.driver_name)) == 0) {
int mode, count, x;
uint16_t slaves[RTE_MAX_ETHPORTS], len = RTE_MAX_ETHPORTS;
mode = rte_eth_bond_mode_get(port_id);
printf("Port %u, bond mode:%d\n", mode);
count = rte_eth_bond_slaves_get(port_id, slaves, len);
printf("Port %u, %s's slave ports count:%d\n",
ff_global_cfg.dpdk.bond_cfgs->name, count);
for (x=0; x<count; x++) {
printf("Port %u, %s's slave port[%u]\n",
ff_global_cfg.dpdk.bond_cfgs->name, slaves[x]);
}
}
ret = rte_eth_dev_start(port_id);
if (ret < 0) {
return ret;
}
if (nb_queues > 1) {
/* set HW rss hash function to Toeplitz. */
if (!rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_HASH)) {
struct rte_eth_hash_filter_info info = {0};
info.info_type = RTE_ETH_HASH_FILTER_GLOBAL_CONFIG;
info.info.global_conf.hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
if (rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_HASH,
RTE_ETH_FILTER_SET, &info) < 0) {
rte_exit(EXIT_FAILURE, "port[%d] set hash func failed\n",
port_id);
}
}
set_rss_table(port_id, dev_info.reta_size, nb_queues);
}
/* Enable RX in promiscuous mode for the Ethernet device. */
if (ff_global_cfg.dpdk.promiscuous) {
rte_eth_promiscuous_enable(port_id);
ret = rte_eth_promiscuous_get(port_id);
if (ret == 1) {
printf("set port %u to promiscuous mode ok\n", port_id);
} else {
printf("set port %u to promiscuous mode error\n", port_id);
}
}
/* Enable pcap dump */
if (pconf->pcap) {
ff_enable_pcap(pconf->pcap);
}
}
}