use 4 spaces instead tab

dev
10077240 2019-04-01 15:42:01 +08:00
parent 6149c27496
commit 04cef02d60
5 changed files with 224 additions and 224 deletions

View File

@ -351,12 +351,12 @@ init_mem_pool(void)
}
#ifdef FF_USE_PAGE_ARRAY
nb_mbuf = RTE_MAX (
nb_ports*nb_lcores*MAX_PKT_BURST +
nb_ports*nb_tx_queue*TX_QUEUE_SIZE +
nb_lcores*MEMPOOL_CACHE_SIZE,
(unsigned)4096);
ff_init_ref_pool(nb_mbuf, socketid);
nb_mbuf = RTE_MAX (
nb_ports*nb_lcores*MAX_PKT_BURST +
nb_ports*nb_tx_queue*TX_QUEUE_SIZE +
nb_lcores*MEMPOOL_CACHE_SIZE,
(unsigned)4096);
ff_init_ref_pool(nb_mbuf, socketid);
#endif
}
@ -794,7 +794,7 @@ ff_dpdk_init(int argc, char **argv)
#endif
#ifdef FF_USE_PAGE_ARRAY
ff_mmap_init();
ff_mmap_init();
#endif
@ -1238,8 +1238,8 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
for (i = 0; i < ret; i++) {
ff_traffic.tx_bytes += rte_pktmbuf_pkt_len(m_table[i]);
#ifdef FF_USE_PAGE_ARRAY
if (qconf->tx_mbufs[port].bsd_m_table[i])
ff_enq_tx_bsdmbuf(port, qconf->tx_mbufs[port].bsd_m_table[i], m_table[i]->nb_segs);
if (qconf->tx_mbufs[port].bsd_m_table[i])
ff_enq_tx_bsdmbuf(port, qconf->tx_mbufs[port].bsd_m_table[i], m_table[i]->nb_segs);
#endif
}
if (unlikely(ret < n)) {

View File

@ -57,12 +57,12 @@ ff_mmap(void *addr, uint64_t len, int prot, int flags, int fd, uint64_t offset)
int host_flags;
#ifdef FF_USE_PAGE_ARRAY
if( len == 4096 ){
return ff_mem_get_page();
}
else
if( len == 4096 ){
return ff_mem_get_page();
}
else
#endif
{
{
assert(ff_PROT_NONE == PROT_NONE);
host_prot = 0;
@ -88,9 +88,9 @@ int
ff_munmap(void *addr, uint64_t len)
{
#ifdef FF_USE_PAGE_ARRAY
if ( len == 4096 ){
return ff_mem_free_addr(addr);
}
if ( len == 4096 ){
return ff_mem_free_addr(addr);
}
#endif
//rte_free(addr);
//return 0;

View File

@ -27,7 +27,7 @@
#include <unistd.h>
#include <sys/mman.h>
#include <errno.h>
#include <rte_common.h>
#include <rte_byteorder.h>
#include <rte_log.h>
@ -61,11 +61,11 @@
#include "ff_api.h"
#include "ff_memory.h"
#define PAGE_SIZE 4096
#define PAGE_SHIFT 12
#define PAGE_MASK (PAGE_SIZE - 1)
#define trunc_page(x) ((x) & ~PAGE_MASK)
#define round_page(x) (((x) + PAGE_MASK) & ~PAGE_MASK)
#define PAGE_SIZE 4096
#define PAGE_SHIFT 12
#define PAGE_MASK (PAGE_SIZE - 1)
#define trunc_page(x) ((x) & ~PAGE_MASK)
#define round_page(x) (((x) + PAGE_MASK) & ~PAGE_MASK)
extern struct rte_mempool *pktmbuf_pool[NB_SOCKETS];
extern struct lcore_conf lcore_conf;
@ -75,15 +75,15 @@ extern struct lcore_conf lcore_conf;
// ff_ref_pool allocate rte_mbuf without data space, which data point to bsd mbuf's data address.
static struct rte_mempool *ff_ref_pool[NB_SOCKETS];
#define Head_INC(h) {\
if ( ++h >= TX_QUEUE_SIZE ) \
h = 0;\
};
#define Head_INC(h) {\
if ( ++h >= TX_QUEUE_SIZE ) \
h = 0;\
};
#define Head_DEC(h) do{\
if ( --h < 0 ) \
h = TX_QUEUE_SIZE-1;\
}while(0);
#define Head_DEC(h) do{\
if ( --h < 0 ) \
h = TX_QUEUE_SIZE-1;\
}while(0);
// bsd mbuf was moved into nic_tx_ring from tmp_tables, after rte_eth_tx_burst() succeed.
static struct mbuf_txring nic_tx_ring[RTE_MAX_ETHPORTS];
@ -92,75 +92,75 @@ static inline void ff_txring_init(struct mbuf_txring* r, uint32_t len);
typedef struct _list_manager_s
{
uint64_t *ele;
int size;
//int FreeNum;
int top;
uint64_t *ele;
int size;
//int FreeNum;
int top;
}StackList_t;
static StackList_t ff_mpage_ctl = {0};
static uint64_t ff_page_start = NULL, ff_page_end = NULL;
static phys_addr_t *ff_mpage_phy = NULL;
static StackList_t ff_mpage_ctl = {0};
static uint64_t ff_page_start = NULL, ff_page_end = NULL;
static phys_addr_t *ff_mpage_phy = NULL;
static inline void *stklist_pop(StackList_t *p);
static inline int stklist_push(StackList_t * p, uint64_t val);
static inline void *stklist_pop(StackList_t *p);
static inline int stklist_push(StackList_t * p, uint64_t val);
static int stklist_init(StackList_t*p, int size)
static int stklist_init(StackList_t*p, int size)
{
int i = 0;
if (p==NULL || size<=0){
return -1;
}
p->size = size;
p->top = 0;
if ( posix_memalign((void**)&p->ele, sizeof(uint64_t), sizeof(uint64_t)*size) != 0)
return -2;
return 0;
int i = 0;
if (p==NULL || size<=0){
return -1;
}
p->size = size;
p->top = 0;
if ( posix_memalign((void**)&p->ele, sizeof(uint64_t), sizeof(uint64_t)*size) != 0)
return -2;
return 0;
}
static inline void *stklist_pop(StackList_t *p)
{
int head = 0;
if (p==NULL)
return NULL;
int head = 0;
if (p==NULL)
return NULL;
if (p->top > 0 ){
return (void*)p->ele[--p->top];
}
else
return NULL;
if (p->top > 0 ){
return (void*)p->ele[--p->top];
}
else
return NULL;
}
//id: the id of element to be freed.
//return code: -1: faile; >=0:OK.
static inline int stklist_push(StackList_t *p, const uint64_t val){
int tail = 0;
if (p==NULL)
return -1;
if (p->top < p->size){
p->ele[p->top++] = val;
return 0;
}
else
return -1;
int tail = 0;
if (p==NULL)
return -1;
if (p->top < p->size){
p->ele[p->top++] = val;
return 0;
}
else
return -1;
}
static inline int stklist_size(StackList_t * p)
{
return p->size;
return p->size;
}
// set (void*) to rte_mbuf's priv_data.
static inline int ff_mbuf_set_uint64(struct rte_mbuf* p, uint64_t data)
{
if (rte_pktmbuf_priv_size(p->pool) >= sizeof(uint64_t))
*((uint64_t*)(p+1)) = data;
return 0;
if (rte_pktmbuf_priv_size(p->pool) >= sizeof(uint64_t))
*((uint64_t*)(p+1)) = data;
return 0;
}
/*************************
@ -172,111 +172,111 @@ static inline int ff_mbuf_set_uint64(struct rte_mbuf* p, uint64_t data)
*************************/
static inline int ff_txring_enqueue(struct mbuf_txring* q, void *p, int seg_num)
{
int i = 0;
for ( i=0; i<seg_num-1; i++){
if ( q->m_table[q->head] ){
ff_mbuf_free(q->m_table[q->head]);
q->m_table[q->head] = NULL;
}
Head_INC(q->head);
}
if ( q->m_table[q->head] )
ff_mbuf_free(q->m_table[q->head]);
q->m_table[q->head] = p;
Head_INC(q->head);
return 0;
int i = 0;
for ( i=0; i<seg_num-1; i++){
if ( q->m_table[q->head] ){
ff_mbuf_free(q->m_table[q->head]);
q->m_table[q->head] = NULL;
}
Head_INC(q->head);
}
if ( q->m_table[q->head] )
ff_mbuf_free(q->m_table[q->head]);
q->m_table[q->head] = p;
Head_INC(q->head);
return 0;
}
// pop out from head-1 .
static inline int ff_txring_pop(struct mbuf_txring* q, int num)
{
int i = 0;
int i = 0;
for (i=0; i<num; i++){
Head_DEC(q->head);
if ( (i==0 && q->m_table[q->head]==NULL) || (i>0 && q->m_table[q->head]!=NULL) ){
rte_panic("ff_txring_pop fatal error!");
}
if ( q->m_table[q->head] != NULL ){
ff_mbuf_free(q->m_table[q->head]);
q->m_table[q->head] = NULL;
}
}
for (i=0; i<num; i++){
Head_DEC(q->head);
if ( (i==0 && q->m_table[q->head]==NULL) || (i>0 && q->m_table[q->head]!=NULL) ){
rte_panic("ff_txring_pop fatal error!");
}
if ( q->m_table[q->head] != NULL ){
ff_mbuf_free(q->m_table[q->head]);
q->m_table[q->head] = NULL;
}
}
}
static inline void ff_txring_init(struct mbuf_txring* q, uint32_t num)
{
memset(q, 0, sizeof(struct mbuf_txring)*num);
memset(q, 0, sizeof(struct mbuf_txring)*num);
}
void ff_init_ref_pool(int nb_mbuf, int socketid)
{
char s[64] = {0};
if (ff_ref_pool[socketid] != NULL) {
char s[64] = {0};
if (ff_ref_pool[socketid] != NULL) {
return;
}
snprintf(s, sizeof(s), "ff_ref_pool_%d", socketid);
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
ff_ref_pool[socketid] = rte_pktmbuf_pool_create(s, nb_mbuf, MEMPOOL_CACHE_SIZE, 0, 0, socketid);
} else {
ff_ref_pool[socketid] = rte_mempool_lookup(s);
}
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
ff_ref_pool[socketid] = rte_pktmbuf_pool_create(s, nb_mbuf, MEMPOOL_CACHE_SIZE, 0, 0, socketid);
} else {
ff_ref_pool[socketid] = rte_mempool_lookup(s);
}
}
int ff_mmap_init()
{
int err = 0;
int i = 0;
uint64_t virt_addr = NULL;
phys_addr_t phys_addr = 0;
uint64_t bsd_memsz = (ff_global_cfg.freebsd.mem_size << 20);
unsigned int bsd_pagesz = 0;
ff_page_start = (uint64_t)mmap( NULL, bsd_memsz, PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_POPULATE, -1, 0);
if (ff_page_start == (uint64_t)-1){
rte_panic("ff_mmap_init get ff_page_start failed, err=%d.\n", errno);
return -1;
}
if ( mlock((void*)ff_page_start, bsd_memsz)<0 ) {
rte_panic("mlock failed, err=%d.\n", errno);
return -1;
}
ff_page_end = ff_page_start + bsd_memsz;
bsd_pagesz = (bsd_memsz>>12);
rte_log(RTE_LOG_INFO, RTE_LOGTYPE_USER1, "ff_mmap_init mmap %d pages, %d MB.\n", bsd_pagesz, ff_global_cfg.freebsd.mem_size);
printf("ff_mmap_init mem[0x%lx:0x%lx]\n", ff_page_start, ff_page_end);
int err = 0;
int i = 0;
uint64_t virt_addr = NULL;
phys_addr_t phys_addr = 0;
uint64_t bsd_memsz = (ff_global_cfg.freebsd.mem_size << 20);
unsigned int bsd_pagesz = 0;
ff_page_start = (uint64_t)mmap( NULL, bsd_memsz, PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_POPULATE, -1, 0);
if (ff_page_start == (uint64_t)-1){
rte_panic("ff_mmap_init get ff_page_start failed, err=%d.\n", errno);
return -1;
}
if ( mlock((void*)ff_page_start, bsd_memsz)<0 ) {
rte_panic("mlock failed, err=%d.\n", errno);
return -1;
}
ff_page_end = ff_page_start + bsd_memsz;
bsd_pagesz = (bsd_memsz>>12);
rte_log(RTE_LOG_INFO, RTE_LOGTYPE_USER1, "ff_mmap_init mmap %d pages, %d MB.\n", bsd_pagesz, ff_global_cfg.freebsd.mem_size);
printf("ff_mmap_init mem[0x%lx:0x%lx]\n", ff_page_start, ff_page_end);
if (posix_memalign((void**)&ff_mpage_phy, sizeof(phys_addr_t), bsd_pagesz*sizeof(phys_addr_t))!=0){
rte_panic("posix_memalign get ff_mpage_phy failed, err=%d.\n", errno);
return -1;
}
stklist_init(&ff_mpage_ctl, bsd_pagesz);
for (i=0; i<bsd_pagesz; i++ ){
virt_addr = ff_page_start + PAGE_SIZE*i;
memset((void*)virt_addr, 0, PAGE_SIZE);
stklist_push( &ff_mpage_ctl, virt_addr);
ff_mpage_phy[i] = rte_mem_virt2phy((const void*)virt_addr);
if ( ff_mpage_phy[i] == RTE_BAD_IOVA ){
rte_panic("rte_mem_virt2phy return invalid address.");
return -1;
}
}
if (posix_memalign((void**)&ff_mpage_phy, sizeof(phys_addr_t), bsd_pagesz*sizeof(phys_addr_t))!=0){
rte_panic("posix_memalign get ff_mpage_phy failed, err=%d.\n", errno);
return -1;
}
stklist_init(&ff_mpage_ctl, bsd_pagesz);
for (i=0; i<bsd_pagesz; i++ ){
virt_addr = ff_page_start + PAGE_SIZE*i;
memset((void*)virt_addr, 0, PAGE_SIZE);
stklist_push( &ff_mpage_ctl, virt_addr);
ff_mpage_phy[i] = rte_mem_virt2phy((const void*)virt_addr);
if ( ff_mpage_phy[i] == RTE_BAD_IOVA ){
rte_panic("rte_mem_virt2phy return invalid address.");
return -1;
}
}
ff_txring_init(&nic_tx_ring[0], RTE_MAX_ETHPORTS);
return 0;
return 0;
}
// 1: vma in fstack page table; 0: vma not in fstack pages, in DPDK pool.
static inline int ff_chk_vma(const uint64_t virtaddr)
{
return !!( virtaddr > ff_page_start && virtaddr < ff_page_end );
return !!( virtaddr > ff_page_start && virtaddr < ff_page_end );
}
/*
@ -284,34 +284,34 @@ static inline int ff_chk_vma(const uint64_t virtaddr)
*/
static inline uint64_t ff_mem_virt2phy(const void* virtaddr)
{
uint64_t addr = 0;
uint32_t pages = 0;
uint64_t addr = 0;
uint32_t pages = 0;
pages = (((uint64_t)virtaddr - (uint64_t)ff_page_start)>>PAGE_SHIFT);
if (pages >= stklist_size(&ff_mpage_ctl)){
rte_panic("ff_mbuf_virt2phy get invalid pages %d.", pages);
return -1;
}
addr = ff_mpage_phy[pages] + ((const uint64_t)virtaddr & PAGE_MASK);
return addr;
pages = (((uint64_t)virtaddr - (uint64_t)ff_page_start)>>PAGE_SHIFT);
if (pages >= stklist_size(&ff_mpage_ctl)){
rte_panic("ff_mbuf_virt2phy get invalid pages %d.", pages);
return -1;
}
addr = ff_mpage_phy[pages] + ((const uint64_t)virtaddr & PAGE_MASK);
return addr;
}
void *ff_mem_get_page()
{
return (void*)stklist_pop(&ff_mpage_ctl);
return (void*)stklist_pop(&ff_mpage_ctl);
}
int ff_mem_free_addr(void *p)
int ff_mem_free_addr(void *p)
{
stklist_push(&ff_mpage_ctl, (const uint64_t)p);
return 0;
stklist_push(&ff_mpage_ctl, (const uint64_t)p);
return 0;
}
static inline void ff_offload_set(struct ff_dpdk_if_context *ctx, void *m, struct rte_mbuf *head)
{
void *data = NULL;
struct ff_tx_offload offload = {0};
void *data = NULL;
struct ff_tx_offload offload = {0};
ff_mbuf_tx_offload(m, &offload);
data = rte_pktmbuf_mtod(head, void*);
@ -376,39 +376,39 @@ static inline void ff_offload_set(struct ff_dpdk_if_context *ctx, void *m, struc
}
// create rte_buf refer to data which is transmit from bsd stack by EXT_CLUSTER.
static inline struct rte_mbuf* ff_extcl_to_rte(void *m )
static inline struct rte_mbuf* ff_extcl_to_rte(void *m )
{
struct rte_mempool *mbuf_pool = pktmbuf_pool[lcore_conf.socket_id];
struct rte_mbuf *src_mbuf = NULL;
struct rte_mbuf *p_head = NULL;
struct rte_mempool *mbuf_pool = pktmbuf_pool[lcore_conf.socket_id];
struct rte_mbuf *src_mbuf = NULL;
struct rte_mbuf *p_head = NULL;
src_mbuf = (struct rte_mbuf*)ff_rte_frm_extcl(m);
if ( NULL==src_mbuf ){
return NULL;
}
p_head = rte_pktmbuf_clone(src_mbuf, mbuf_pool);
if (p_head == NULL){
return NULL;
}
return p_head;
src_mbuf = (struct rte_mbuf*)ff_rte_frm_extcl(m);
if ( NULL==src_mbuf ){
return NULL;
}
p_head = rte_pktmbuf_clone(src_mbuf, mbuf_pool);
if (p_head == NULL){
return NULL;
}
return p_head;
}
// create rte_mbuf refer to data in bsd mbuf.
static inline struct rte_mbuf* ff_bsd_to_rte(void *m, int total)
static inline struct rte_mbuf* ff_bsd_to_rte(void *m, int total)
{
struct rte_mempool *mbuf_pool = ff_ref_pool[lcore_conf.socket_id];
struct rte_mbuf *p_head = NULL;
struct rte_mbuf *cur = NULL, *prev = NULL, *tmp=NULL;
void *data = NULL;
void *p_bsdbuf = NULL;
struct rte_mempool *mbuf_pool = ff_ref_pool[lcore_conf.socket_id];
struct rte_mbuf *p_head = NULL;
struct rte_mbuf *cur = NULL, *prev = NULL, *tmp=NULL;
void *data = NULL;
void *p_bsdbuf = NULL;
unsigned len = 0;
p_head = rte_pktmbuf_alloc(mbuf_pool);
if (p_head == NULL){
return NULL;
}
p_head->pkt_len = total;
p_head = rte_pktmbuf_alloc(mbuf_pool);
if (p_head == NULL){
return NULL;
}
p_head->pkt_len = total;
p_head->nb_segs = 0;
cur = p_head;
p_bsdbuf = m;
@ -420,7 +420,7 @@ static inline struct rte_mbuf* ff_bsd_to_rte(void *m, int total)
return NULL;
}
}
ff_next_mbuf(&p_bsdbuf, &data, &len); // p_bsdbuf move to next mbuf.
ff_next_mbuf(&p_bsdbuf, &data, &len); // p_bsdbuf move to next mbuf.
cur->buf_addr = data;
cur->buf_physaddr = ff_mem_virt2phy((const void*)(cur->buf_addr));
cur->data_off = 0;
@ -433,36 +433,36 @@ static inline struct rte_mbuf* ff_bsd_to_rte(void *m, int total)
prev = cur;
cur = NULL;
}
return p_head;
return p_head;
}
int ff_if_send_onepkt(struct ff_dpdk_if_context *ctx, void *m, int total)
{
struct rte_mbuf *head = NULL;
void *src_buf = NULL;
void *p_data = NULL;
void *src_buf = NULL;
void *p_data = NULL;
struct lcore_conf *qconf = NULL;
unsigned len = 0;
unsigned len = 0;
if ( !m ){
rte_log(RTE_LOG_CRIT, RTE_LOGTYPE_USER1, "ff_dpdk_if_send_ex input invalid NULL address.");
return 0;
return 0;
}
p_data = ff_mbuf_mtod(m);
if ( ff_chk_vma((uint64_t)p_data)){
head = ff_bsd_to_rte(m, total);
}
else if ( (head = ff_extcl_to_rte(m)) == NULL ){
rte_panic("data address 0x%lx is out of page bound or not malloced by DPDK recver.", (uint64_t)p_data);
return 0;
head = ff_bsd_to_rte(m, total);
}
else if ( (head = ff_extcl_to_rte(m)) == NULL ){
rte_panic("data address 0x%lx is out of page bound or not malloced by DPDK recver.", (uint64_t)p_data);
return 0;
}
if (head == NULL){
rte_log(RTE_LOG_CRIT, RTE_LOGTYPE_USER1, "ff_if_send_onepkt call ff_bsd_to_rte failed.");
ff_mbuf_free(m);
return 0;
}
rte_log(RTE_LOG_CRIT, RTE_LOGTYPE_USER1, "ff_if_send_onepkt call ff_bsd_to_rte failed.");
ff_mbuf_free(m);
return 0;
}
ff_offload_set(ctx, m, head);
qconf = &lcore_conf;

View File

@ -71,7 +71,7 @@ struct mbuf_table {
uint16_t len;
struct rte_mbuf *m_table[MAX_PKT_BURST];
#ifdef FF_USE_PAGE_ARRAY
void* bsd_m_table[MAX_PKT_BURST]; // save bsd mbuf address which will be enquene into txring after NIC transmitted pkt.
void* bsd_m_table[MAX_PKT_BURST]; // save bsd mbuf address which will be enquene into txring after NIC transmitted pkt.
#endif
};
@ -100,8 +100,8 @@ struct lcore_conf {
// Then when txring.m_table[x] is reused, the packet in txring.m_table[x] had been transmited by NIC.
// that means the mbuf can be freed safely.
struct mbuf_txring{
void* m_table[TX_QUEUE_SIZE];
uint16_t head; // next available element.
void* m_table[TX_QUEUE_SIZE];
uint16_t head; // next available element.
};
void ff_init_ref_pool(int nb_mbuf, int socketid);

View File

@ -432,29 +432,29 @@ int ff_next_mbuf(void **mbuf_bsd, void **data, unsigned *len)
*data = mb->m_data;
if (mb->m_next)
*mbuf_bsd = mb->m_next;
*mbuf_bsd = mb->m_next;
else
*mbuf_bsd = NULL;
*mbuf_bsd = NULL;
return 0;
}
void * ff_mbuf_mtod(void* bsd_mbuf)
{
if ( !bsd_mbuf )
return NULL;
return (void*)((struct mbuf *)bsd_mbuf)->m_data;
if ( !bsd_mbuf )
return NULL;
return (void*)((struct mbuf *)bsd_mbuf)->m_data;
}
// get source rte_mbuf from ext cluster, which carry rte_mbuf while recving pkt, such as arp.
void* ff_rte_frm_extcl(void* mbuf)
{
struct mbuf *bsd_mbuf = mbuf;
if ( bsd_mbuf->m_ext.ext_type==EXT_DISPOSABLE && bsd_mbuf->m_ext.ext_free==ff_mbuf_ext_free ){
return bsd_mbuf->m_ext.ext_arg1;
}
else
return NULL;
struct mbuf *bsd_mbuf = mbuf;
if ( bsd_mbuf->m_ext.ext_type==EXT_DISPOSABLE && bsd_mbuf->m_ext.ext_free==ff_mbuf_ext_free ){
return bsd_mbuf->m_ext.ext_arg1;
}
else
return NULL;
}