dpdk bond
作者:互联网
bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode) { struct bond_dev_private *internals; internals = eth_dev->data->dev_private; switch (mode) { case BONDING_MODE_ROUND_ROBIN: eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_round_robin; eth_dev->rx_pkt_burst = bond_ethdev_rx_burst; break; case BONDING_MODE_ACTIVE_BACKUP: eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_active_backup; eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup; break; case BONDING_MODE_BALANCE: eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_balance; eth_dev->rx_pkt_burst = bond_ethdev_rx_burst; break; case BONDING_MODE_BROADCAST: eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_broadcast; eth_dev->rx_pkt_burst = bond_ethdev_rx_burst; break; case BONDING_MODE_8023AD: if (bond_mode_8023ad_enable(eth_dev) != 0) return -1; if (internals->mode4.dedicated_queues.enabled == 0) { eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad; eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad; RTE_BOND_LOG(WARNING, "Using mode 4, it is necessary to do TX burst " "and RX burst at least every 100ms."); } else { /* Use flow director's optimization */ eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad_fast_queue; eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad_fast_queue; } break; case BONDING_MODE_TLB: eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_tlb; eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_active_backup; break; case BONDING_MODE_ALB: if (bond_mode_alb_enable(eth_dev) != 0) return -1; eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_alb; eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_alb; break; default: return -1; } internals->mode = mode; return 0; }
bond_ethdev_rx_burst
static uint16_t bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { struct bond_dev_private *internals; uint16_t num_rx_total = 0; uint16_t slave_count; uint16_t active_slave; int i; /* Cast to structure, containing bonded device's port id and queue id */ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; internals = bd_rx_q->dev_private; slave_count = internals->active_slave_count; active_slave = internals->active_slave; for (i = 0; i < slave_count && nb_pkts; i++) { uint16_t num_rx_slave; /* Offset of pointer to *bufs increases as packets are received * from other slaves */ num_rx_slave = rte_eth_rx_burst(internals->active_slaves[active_slave], bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts); num_rx_total += num_rx_slave; nb_pkts -= num_rx_slave; if (++active_slave == slave_count) active_slave = 0; } if (++internals->active_slave >= slave_count) internals->active_slave = 0; return num_rx_total; }
rte_eth_bond_slave_add
rte_eth_bond_slave_add(uint16_t bonded_port_id, uint16_t slave_port_id) { struct rte_eth_dev *bonded_eth_dev; struct bond_dev_private *internals; int retval; /* Verify that port id's are valid bonded and slave ports */ if (valid_bonded_port_id(bonded_port_id) != 0) return -1; bonded_eth_dev = &rte_eth_devices[bonded_port_id]; internals = bonded_eth_dev->data->dev_private; rte_spinlock_lock(&internals->lock); retval = __eth_bond_slave_add_lock_free(bonded_port_id, slave_port_id); rte_spinlock_unlock(&internals->lock); return retval; }
标签:slave,burst,rx,dev,eth,bond,dpdk 来源: https://www.cnblogs.com/dream397/p/14870529.html