Searched refs:mbuf (Results 1 - 25 of 67) sorted by relevance

123

/deb_dpdk/lib/librte_port/
H A Drte_port.h54 * Macros to allow accessing metadata stored in the mbuf headroom
55 * just beyond the end of the mbuf data structure returned by a port
57 #define RTE_MBUF_METADATA_UINT8_PTR(mbuf, offset) \
58 (&((uint8_t *)(mbuf))[offset])
59 #define RTE_MBUF_METADATA_UINT16_PTR(mbuf, offset) \
60 ((uint16_t *) RTE_MBUF_METADATA_UINT8_PTR(mbuf, offset))
61 #define RTE_MBUF_METADATA_UINT32_PTR(mbuf, offset) \
62 ((uint32_t *) RTE_MBUF_METADATA_UINT8_PTR(mbuf, offset))
63 #define RTE_MBUF_METADATA_UINT64_PTR(mbuf, offset) \
64 ((uint64_t *) RTE_MBUF_METADATA_UINT8_PTR(mbuf, offse
[all...]
H A Drte_port_source_sink.c401 pcap_sink_write_pkt(struct rte_port_sink *port, struct rte_mbuf *mbuf) argument
412 pkt = rte_pktmbuf_mtod(mbuf, uint8_t *);
414 pcap_hdr.len = mbuf->pkt_len;
418 if (mbuf->nb_segs > 1) {
425 if (mbuf->pkt_len > ETHER_MAX_JUMBO_FRAME_LEN)
428 for (jumbo_mbuf = mbuf; jumbo_mbuf != NULL;
456 #define PCAP_SINK_WRITE_PKT(port, mbuf) \
457 pcap_sink_write_pkt(port, mbuf)
486 #define PCAP_SINK_WRITE_PKT(port, mbuf) {}
/deb_dpdk/drivers/net/bnxt/
H A Dbnxt_rxr.c73 rx_buf->mbuf = data;
82 struct rte_mbuf *mbuf)
90 prod_rx_buf->mbuf = mbuf;
109 struct rte_mbuf *mbuf; local
127 mbuf = rx_buf->mbuf;
128 rte_prefetch0(mbuf);
130 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
131 mbuf
81 bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons, struct rte_mbuf *mbuf) argument
[all...]
H A Dbnxt_txq.c64 if (sw_ring[i].mbuf) {
65 rte_pktmbuf_free(sw_ring[i].mbuf);
66 sw_ring[i].mbuf = NULL;
H A Dbnxt_txr.c170 tx_buf->mbuf = tx_pkt;
186 txbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(tx_buf->mbuf));
192 if (tx_buf->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
197 tx_buf->mbuf->vlan_tci;
332 struct rte_mbuf *mbuf; local
336 mbuf = tx_buf->mbuf;
337 tx_buf->mbuf = NULL;
343 rte_pktmbuf_free(mbuf);
/deb_dpdk/app/test/
H A Dtest_table_ports.c74 void *mbuf[RTE_PORT_IN_BURST_SIZE_MAX]; local
80 mbuf[0] = (void *)rte_pktmbuf_alloc(pool);
83 mbuf, 1);
93 mbuf[i] = rte_pktmbuf_alloc(pool);
96 (void * const *) mbuf, RTE_PORT_IN_BURST_SIZE_MAX);
152 struct rte_mbuf *mbuf[RTE_PORT_IN_BURST_SIZE_MAX]; local
160 mbuf[0] = rte_pktmbuf_alloc(pool);
162 rte_port_ring_writer_ops.f_tx(port, mbuf[0]);
175 mbuf[i] = rte_pktmbuf_alloc(pool);
176 rte_port_ring_writer_ops.f_tx(port, mbuf[
[all...]
H A Dtest_sched.c108 prepare_pkt(struct rte_mbuf *mbuf) argument
115 eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
127 rte_sched_port_pkt_write(mbuf, SUBPORT, PIPE, TC, QUEUE, e_RTE_METER_YELLOW);
130 mbuf->pkt_len = 60;
131 mbuf->data_len = 60;
H A Dtest_table.h113 void *mbuf = NULL; \
116 if (!rte_ring_dequeue((ring), &mbuf)) { \
117 if (mbuf == NULL) \
120 rte_pktmbuf_free((struct rte_mbuf *)mbuf); \
/deb_dpdk/examples/ipsec-secgw/
H A Desp.h36 struct mbuf;
/deb_dpdk/lib/librte_reorder/
H A Drte_reorder.h43 * sequence number present in mbuf.
133 * Insert given mbuf in reorder buffer in its correct position
135 * The given mbuf is to be reordered relative to other mbufs in the system.
136 * The mbuf must contain a sequence number which is then used to place
142 * Reorder buffer where the mbuf has to be inserted.
143 * @param mbuf
144 * mbuf of packet that needs to be inserted in reorder buffer.
150 * ealry mbuf, but it can be accomodated by performing drain and then insert.
151 * - ERANGE - Too early or late mbuf which is vastly out of range of expected
155 rte_reorder_insert(struct rte_reorder_buffer *b, struct rte_mbuf *mbuf);
[all...]
/deb_dpdk/drivers/net/pcap/
H A Drte_eth_pcap.c131 eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf, argument
135 uint16_t len = rte_pktmbuf_tailroom(mbuf);
136 struct rte_mbuf *m = mbuf;
138 rte_memcpy(rte_pktmbuf_append(mbuf, len), data, len);
143 /* Allocate next mbuf and point to that. */
160 mbuf->nb_segs++;
165 return mbuf->nb_segs;
168 /* Copy data from mbuf chain to a buffer suitable for writing to a PCAP file. */
170 eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf) argument
174 while (mbuf) {
189 struct rte_mbuf *mbuf; local
261 struct rte_mbuf *mbuf; local
323 struct rte_mbuf *mbuf; local
[all...]
/deb_dpdk/drivers/net/fm10k/
H A Dfm10k_rxtx.c119 struct rte_mbuf *mbuf; local
133 mbuf = q->sw_ring[next_dd];
138 rte_pktmbuf_pkt_len(mbuf) = desc.w.length;
139 rte_pktmbuf_data_len(mbuf) = desc.w.length;
141 mbuf->ol_flags = 0;
143 rx_desc_to_ol_flags(mbuf, &desc);
146 mbuf->hash.rss = desc.d.rss;
152 * is valid for each RX packet's mbuf.
154 mbuf->ol_flags |= PKT_RX_VLAN_PKT;
155 mbuf
229 struct rte_mbuf *mbuf; local
[all...]
/deb_dpdk/drivers/net/thunderx/
H A Dnicvf_ethdev.h85 nicvf_mbuff_meta_length(struct rte_mbuf *mbuf) argument
87 return (uint16_t)((uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf);
/deb_dpdk/lib/librte_distributor/
H A Drte_distributor.h88 * same flow id, or tag, in the mbuf will be procesed at the same time.
90 * The user is advocated to set tag for each mbuf before calling this function.
192 * @param mbuf
197 struct rte_mbuf *mbuf);
/deb_dpdk/drivers/net/ixgbe/
H A Dixgbe_rxtx_vec_common.h70 /* free up last mbuf */
126 m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
131 m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
146 m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
168 txep[i].mbuf = tx_pkts[i];
186 rte_pktmbuf_free_seg(txe->mbuf);
193 txe->mbuf = NULL;
209 if (rxq->sw_ring[i].mbuf != NULL)
210 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
216 if (rxq->sw_ring[i].mbuf !
[all...]
H A Dixgbe_rxtx.h92 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */ member in struct:ixgbe_rx_entry
103 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */ member in struct:ixgbe_tx_entry
112 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */ member in struct:ixgbe_tx_entry_v
119 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
149 /** flags to set in mbuf when a vlan is detected. */
151 /** need to alloc dummy mbuf, for wraparound when scanning hw ring */
/deb_dpdk/drivers/net/szedata2/
H A Drte_eth_szedata2.c106 struct rte_mbuf *mbuf; local
128 * by queue and copies the packet data into a newly allocated mbuf
132 mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
134 if (unlikely(mbuf == NULL))
153 rte_pktmbuf_free(mbuf);
161 rte_pktmbuf_free(mbuf);
172 rte_pktmbuf_free(mbuf);
289 rte_pktmbuf_free(mbuf);
293 /* get the space available for data in the mbuf */
299 /* sze packet will fit in one mbuf, g
339 struct rte_mbuf *mbuf; local
670 struct rte_mbuf *mbuf; local
[all...]
/deb_dpdk/drivers/net/ena/
H A Dena_ethdev.c74 #define GET_L4_HDR_LEN(mbuf) \
75 ((rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, \
76 mbuf->l3_len + mbuf->l2_len)->data_off) >> 4)
247 static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, argument
268 mbuf->ol_flags = ol_flags;
269 mbuf->packet_type = packet_type;
272 static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, argument
277 if (mbuf->ol_flags &
280 if (mbuf
1154 struct rte_mbuf *mbuf = mbufs[next_to_use_masked]; local
1494 struct rte_mbuf *mbuf = NULL; local
1579 struct rte_mbuf *mbuf; local
[all...]
/deb_dpdk/drivers/net/mpipe/
H A Dmpipe_tilegx.c473 mpipe_mbuf_stack_index(struct mpipe_dev_priv *priv, struct rte_mbuf *mbuf) argument
475 return (mbuf->port < RTE_MAX_ETHPORTS) ?
476 mpipe_priv(&rte_eth_devices[mbuf->port])->stack :
486 struct rte_mbuf *mbuf = RTE_PTR_SUB(va, priv->rx_offset); local
488 rte_pktmbuf_reset(mbuf);
489 mbuf->data_off = (uintptr_t)va - (uintptr_t)mbuf->buf_addr;
490 mbuf->port = in_port;
491 mbuf->data_len = size;
492 mbuf
502 mpipe_recv_push(struct mpipe_dev_priv *priv, struct rte_mbuf *mbuf) argument
515 struct rte_mbuf *mbuf; local
533 struct rte_mbuf *mbuf; local
1235 struct rte_mbuf *mbuf; local
1275 struct rte_mbuf *mbuf = NULL, *pkt = NULL; local
1411 struct rte_mbuf *mbuf; local
[all...]
/deb_dpdk/drivers/net/i40e/
H A Di40e_rxtx_vec_common.h71 /* free up last mbuf */
127 m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
132 m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
148 m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
170 txep[i].mbuf = tx_pkts[i];
185 if (rxq->sw_ring[i].mbuf != NULL)
186 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
192 if (rxq->sw_ring[i].mbuf != NULL)
193 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
207 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
[all...]
H A Di40e_rxtx.c487 /* Translate descriptor info to mbuf parameters */
489 mb = rxep[j].mbuf;
518 rxq->rx_stage[i + j] = rxep[j].mbuf;
526 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
574 /* Prefetch next mbuf */
575 rte_prefetch0(rxep[i + 1].mbuf);
577 mb = rxep[i].mbuf;
630 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
730 /* Prefetch next mbuf */
731 rte_prefetch0(sw_ring[rx_id].mbuf);
994 i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload) argument
2115 struct rte_mbuf *mbuf; local
2298 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp); local
[all...]
/deb_dpdk/examples/quota_watermark/qw/
H A Dmain.c79 struct rte_mbuf *mbuf; local
87 /* Get a mbuf from the pool */
88 mbuf = rte_pktmbuf_alloc(mbuf_pool);
89 if (unlikely(mbuf == NULL))
93 hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
107 mbuf->pkt_len = 60;
108 mbuf->data_len = 60;
110 rte_eth_tx_burst(port_id, 0, &mbuf, 1);
243 /* Dequeue up to quota mbuf from rx */
337 /* Create a pool of mbuf t
[all...]
/deb_dpdk/drivers/net/cxgbe/
H A Dsge.c88 * per mbuf buffer). We currently only support two sizes for 1500- and
148 static int map_mbuf(struct rte_mbuf *mbuf, dma_addr_t *addr) argument
150 struct rte_mbuf *m = mbuf;
178 if (d->mbuf) { /* an SGL is present */
179 rte_pktmbuf_free(d->mbuf);
180 d->mbuf = NULL;
186 rte_pktmbuf_free(d->coalesce.mbuf[i]);
187 d->coalesce.mbuf[i] = NULL;
196 RTE_MBUF_PREFETCH_TO_FREE(&q->sdesc->mbuf->pool);
207 if (d->mbuf) { /* a
407 struct rte_mbuf *mbuf = buf_bulk[i]; local
624 write_sgl(struct rte_mbuf *mbuf, struct sge_txq *q, struct ulptx_sgl *sgl, u64 *end, unsigned int start, const dma_addr_t *addr) argument
885 should_tx_packet_coalesce(struct sge_eth_txq *txq, struct rte_mbuf *mbuf, unsigned int *nflits, struct adapter *adap) argument
972 tx_do_packet_coalesce(struct sge_eth_txq *txq, struct rte_mbuf *mbuf, int flits, struct adapter *adap, const struct port_info *pi, dma_addr_t *addr) argument
1067 t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf) argument
1346 struct rte_mbuf *mbuf; local
[all...]
/deb_dpdk/drivers/net/e1000/
H A Dem_rxtx.c84 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */ member in struct:em_rx_entry
91 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */ member in struct:em_tx_entry
100 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
414 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
535 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
537 if (txe->mbuf != NULL) {
538 rte_pktmbuf_free_seg(txe->mbuf);
539 txe->mbuf
1555 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); local
[all...]
/deb_dpdk/drivers/net/nfp/
H A Dnfp_net.c288 if (rxq->rxbufs[i].mbuf) {
289 rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf);
290 rxq->rxbufs[i].mbuf = NULL;
325 if (txq->txbufs[i].mbuf) {
326 rte_pktmbuf_free_seg(txq->txbufs[i].mbuf);
327 txq->txbufs[i].mbuf = NULL;
927 /* RX ring mbuf allocation failures */
995 /* RX ring mbuf allocation failures */
1311 * Tracking mbuf size for detecting a potential mbuf overflo
1384 struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool); local
1597 nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd, struct rte_mbuf *mbuf) argument
1651 nfp_net_check_port(struct nfp_net_rx_desc *rxd, struct rte_mbuf *mbuf) argument
[all...]

Completed in 24 milliseconds

123