1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
5 #include <linux/ipv6.h>
6 #include <linux/if_vlan.h>
7 #include <net/ip6_checksum.h>
8 #include <net/netdev_queues.h>
11 #include "ionic_lif.h"
12 #include "ionic_txrx.h"
14 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
15 void *data, size_t len);
17 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
18 const skb_frag_t *frag,
19 size_t offset, size_t len);
21 static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
22 struct ionic_desc_info *desc_info);
24 static void ionic_tx_clean(struct ionic_queue *q,
25 struct ionic_desc_info *desc_info,
26 struct ionic_cq_info *cq_info,
29 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
30 ionic_desc_cb cb_func, void *cb_arg)
32 ionic_q_post(q, ring_dbell, cb_func, cb_arg);
35 static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
36 ionic_desc_cb cb_func, void *cb_arg)
38 ionic_q_post(q, ring_dbell, cb_func, cb_arg);
41 bool ionic_txq_poke_doorbell(struct ionic_queue *q)
43 struct netdev_queue *netdev_txq;
44 unsigned long now, then, dif;
45 struct net_device *netdev;
47 netdev = q->lif->netdev;
48 netdev_txq = netdev_get_tx_queue(netdev, q->index);
50 HARD_TX_LOCK(netdev, netdev_txq, smp_processor_id());
52 if (q->tail_idx == q->head_idx) {
53 HARD_TX_UNLOCK(netdev, netdev_txq);
57 now = READ_ONCE(jiffies);
58 then = q->dbell_jiffies;
61 if (dif > q->dbell_deadline) {
62 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
63 q->dbval | q->head_idx);
65 q->dbell_jiffies = now;
68 HARD_TX_UNLOCK(netdev, netdev_txq);
73 bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
75 unsigned long now, then, dif;
77 /* no lock, called from rx napi or txrx napi, nothing else can fill */
79 if (q->tail_idx == q->head_idx)
82 now = READ_ONCE(jiffies);
83 then = q->dbell_jiffies;
86 if (dif > q->dbell_deadline) {
87 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
88 q->dbval | q->head_idx);
90 q->dbell_jiffies = now;
92 dif = 2 * q->dbell_deadline;
93 if (dif > IONIC_RX_MAX_DOORBELL_DEADLINE)
94 dif = IONIC_RX_MAX_DOORBELL_DEADLINE;
96 q->dbell_deadline = dif;
102 static inline struct netdev_queue *q_to_ndq(struct net_device *netdev,
103 struct ionic_queue *q)
105 return netdev_get_tx_queue(netdev, q->index);
108 static void *ionic_rx_buf_va(struct ionic_buf_info *buf_info)
110 return page_address(buf_info->page) + buf_info->page_offset;
113 static dma_addr_t ionic_rx_buf_pa(struct ionic_buf_info *buf_info)
115 return buf_info->dma_addr + buf_info->page_offset;
118 static unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info)
120 return min_t(u32, IONIC_MAX_BUF_LEN, IONIC_PAGE_SIZE - buf_info->page_offset);
123 static int ionic_rx_page_alloc(struct ionic_queue *q,
124 struct ionic_buf_info *buf_info)
126 struct ionic_rx_stats *stats;
131 stats = q_to_rx_stats(q);
133 if (unlikely(!buf_info)) {
134 net_err_ratelimited("%s: %s invalid buf_info in alloc\n",
135 dev_name(dev), q->name);
139 page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
140 if (unlikely(!page)) {
141 net_err_ratelimited("%s: %s page alloc failed\n",
142 dev_name(dev), q->name);
147 buf_info->dma_addr = dma_map_page(dev, page, 0,
148 IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
149 if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
150 __free_pages(page, 0);
151 net_err_ratelimited("%s: %s dma map failed\n",
152 dev_name(dev), q->name);
153 stats->dma_map_err++;
157 buf_info->page = page;
158 buf_info->page_offset = 0;
163 static void ionic_rx_page_free(struct ionic_queue *q,
164 struct ionic_buf_info *buf_info)
166 struct device *dev = q->dev;
168 if (unlikely(!buf_info)) {
169 net_err_ratelimited("%s: %s invalid buf_info in free\n",
170 dev_name(dev), q->name);
177 dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
178 __free_pages(buf_info->page, 0);
179 buf_info->page = NULL;
182 static bool ionic_rx_buf_recycle(struct ionic_queue *q,
183 struct ionic_buf_info *buf_info, u32 used)
187 /* don't re-use pages allocated in low-mem condition */
188 if (page_is_pfmemalloc(buf_info->page))
191 /* don't re-use buffers from non-local numa nodes */
192 if (page_to_nid(buf_info->page) != numa_mem_id())
195 size = ALIGN(used, q->xdp_rxq_info ? IONIC_PAGE_SIZE : IONIC_PAGE_SPLIT_SZ);
196 buf_info->page_offset += size;
197 if (buf_info->page_offset >= IONIC_PAGE_SIZE)
200 get_page(buf_info->page);
205 static struct sk_buff *ionic_rx_frags(struct net_device *netdev,
206 struct ionic_queue *q,
207 struct ionic_desc_info *desc_info,
208 unsigned int headroom,
210 unsigned int num_sg_elems,
213 struct ionic_buf_info *buf_info;
214 struct ionic_rx_stats *stats;
215 struct device *dev = q->dev;
220 stats = q_to_rx_stats(q);
222 buf_info = &desc_info->bufs[0];
224 prefetchw(buf_info->page);
226 skb = napi_get_frags(&q_to_qcq(q)->napi);
227 if (unlikely(!skb)) {
228 net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
229 dev_name(dev), q->name);
234 i = num_sg_elems + 1;
236 if (unlikely(!buf_info->page)) {
242 frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
244 frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
248 dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info),
249 headroom, frag_len, DMA_FROM_DEVICE);
251 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
252 buf_info->page, buf_info->page_offset + headroom,
253 frag_len, IONIC_PAGE_SIZE);
255 if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
256 dma_unmap_page(dev, buf_info->dma_addr,
257 IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
258 buf_info->page = NULL;
261 /* only needed on the first buffer */
273 static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev,
274 struct ionic_queue *q,
275 struct ionic_desc_info *desc_info,
276 unsigned int headroom,
280 struct ionic_buf_info *buf_info;
281 struct ionic_rx_stats *stats;
282 struct device *dev = q->dev;
285 stats = q_to_rx_stats(q);
287 buf_info = &desc_info->bufs[0];
289 skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
290 if (unlikely(!skb)) {
291 net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
292 dev_name(dev), q->name);
297 if (unlikely(!buf_info->page)) {
303 dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info),
304 headroom, len, DMA_FROM_DEVICE);
305 skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info) + headroom, len);
306 dma_sync_single_range_for_device(dev, ionic_rx_buf_pa(buf_info),
307 headroom, len, DMA_FROM_DEVICE);
310 skb->protocol = eth_type_trans(skb, netdev);
315 static void ionic_xdp_tx_desc_clean(struct ionic_queue *q,
316 struct ionic_desc_info *desc_info)
318 unsigned int nbufs = desc_info->nbufs;
319 struct ionic_buf_info *buf_info;
320 struct device *dev = q->dev;
326 buf_info = desc_info->bufs;
327 dma_unmap_single(dev, buf_info->dma_addr,
328 buf_info->len, DMA_TO_DEVICE);
329 if (desc_info->act == XDP_TX)
330 __free_pages(buf_info->page, 0);
331 buf_info->page = NULL;
334 for (i = 1; i < nbufs + 1 && buf_info->page; i++, buf_info++) {
335 dma_unmap_page(dev, buf_info->dma_addr,
336 buf_info->len, DMA_TO_DEVICE);
337 if (desc_info->act == XDP_TX)
338 __free_pages(buf_info->page, 0);
339 buf_info->page = NULL;
342 if (desc_info->act == XDP_REDIRECT)
343 xdp_return_frame(desc_info->xdpf);
345 desc_info->nbufs = 0;
346 desc_info->xdpf = NULL;
350 static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
351 enum xdp_action act, struct page *page, int off,
354 struct ionic_desc_info *desc_info;
355 struct ionic_buf_info *buf_info;
356 struct ionic_tx_stats *stats;
357 struct ionic_txq_desc *desc;
358 size_t len = frame->len;
362 desc_info = &q->info[q->head_idx];
363 desc = desc_info->txq_desc;
364 buf_info = desc_info->bufs;
365 stats = q_to_tx_stats(q);
367 dma_addr = ionic_tx_map_single(q, frame->data, len);
368 if (dma_mapping_error(q->dev, dma_addr)) {
369 stats->dma_map_err++;
372 buf_info->dma_addr = dma_addr;
374 buf_info->page = page;
375 buf_info->page_offset = off;
377 desc_info->nbufs = 1;
378 desc_info->xdpf = frame;
379 desc_info->act = act;
381 if (xdp_frame_has_frags(frame)) {
382 struct ionic_txq_sg_elem *elem;
383 struct skb_shared_info *sinfo;
384 struct ionic_buf_info *bi;
389 sinfo = xdp_get_shared_info_from_frame(frame);
391 elem = desc_info->txq_sg_desc->elems;
392 for (i = 0; i < sinfo->nr_frags; i++, frag++, bi++) {
393 dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
394 if (dma_mapping_error(q->dev, dma_addr)) {
395 stats->dma_map_err++;
396 ionic_tx_desc_unmap_bufs(q, desc_info);
399 bi->dma_addr = dma_addr;
400 bi->len = skb_frag_size(frag);
401 bi->page = skb_frag_page(frag);
403 elem->addr = cpu_to_le64(bi->dma_addr);
404 elem->len = cpu_to_le16(bi->len);
411 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
412 0, (desc_info->nbufs - 1), buf_info->dma_addr);
413 desc->cmd = cpu_to_le64(cmd);
414 desc->len = cpu_to_le16(len);
415 desc->csum_start = 0;
416 desc->csum_offset = 0;
422 ionic_txq_post(q, ring_doorbell, ionic_tx_clean, NULL);
427 int ionic_xdp_xmit(struct net_device *netdev, int n,
428 struct xdp_frame **xdp_frames, u32 flags)
430 struct ionic_lif *lif = netdev_priv(netdev);
431 struct ionic_queue *txq;
432 struct netdev_queue *nq;
438 if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state)))
441 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
444 /* AdminQ is assumed on cpu 0, while we attempt to affinitize the
445 * TxRx queue pairs 0..n-1 on cpus 1..n. We try to keep with that
446 * affinitization here, but of course irqbalance and friends might
447 * have juggled things anyway, so we have to check for the 0 case.
449 cpu = smp_processor_id();
450 qi = cpu ? (cpu - 1) % lif->nxqs : cpu;
452 txq = &lif->txqcqs[qi]->q;
453 nq = netdev_get_tx_queue(netdev, txq->index);
454 __netif_tx_lock(nq, cpu);
455 txq_trans_cond_update(nq);
457 if (netif_tx_queue_stopped(nq) ||
458 !netif_txq_maybe_stop(q_to_ndq(netdev, txq),
459 ionic_q_space_avail(txq),
461 __netif_tx_unlock(nq);
465 space = min_t(int, n, ionic_q_space_avail(txq));
466 for (nxmit = 0; nxmit < space ; nxmit++) {
467 if (ionic_xdp_post_frame(txq, xdp_frames[nxmit],
469 virt_to_page(xdp_frames[nxmit]->data),
476 if (flags & XDP_XMIT_FLUSH)
477 ionic_dbell_ring(lif->kern_dbpage, txq->hw_type,
478 txq->dbval | txq->head_idx);
480 netif_txq_maybe_stop(q_to_ndq(netdev, txq),
481 ionic_q_space_avail(txq),
483 __netif_tx_unlock(nq);
488 static bool ionic_run_xdp(struct ionic_rx_stats *stats,
489 struct net_device *netdev,
490 struct bpf_prog *xdp_prog,
491 struct ionic_queue *rxq,
492 struct ionic_buf_info *buf_info,
495 u32 xdp_action = XDP_ABORTED;
496 struct xdp_buff xdp_buf;
497 struct ionic_queue *txq;
498 struct netdev_queue *nq;
499 struct xdp_frame *xdpf;
504 xdp_init_buff(&xdp_buf, IONIC_PAGE_SIZE, rxq->xdp_rxq_info);
505 frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
506 xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info),
507 XDP_PACKET_HEADROOM, frag_len, false);
509 dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info),
510 XDP_PACKET_HEADROOM, len,
513 prefetchw(&xdp_buf.data_hard_start);
515 /* We limit MTU size to one buffer if !xdp_has_frags, so
516 * if the recv len is bigger than one buffer
517 * then we know we have frag info to gather
519 remain_len = len - frag_len;
521 struct skb_shared_info *sinfo;
522 struct ionic_buf_info *bi;
526 sinfo = xdp_get_shared_info_from_buff(&xdp_buf);
528 sinfo->xdp_frags_size = 0;
529 xdp_buff_set_frags_flag(&xdp_buf);
532 if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) {
537 frag = &sinfo->frags[sinfo->nr_frags];
540 frag_len = min_t(u16, remain_len, ionic_rx_buf_size(bi));
541 dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(bi),
542 0, frag_len, DMA_FROM_DEVICE);
543 skb_frag_fill_page_desc(frag, bi->page, 0, frag_len);
544 sinfo->xdp_frags_size += frag_len;
545 remain_len -= frag_len;
547 if (page_is_pfmemalloc(bi->page))
548 xdp_buff_set_frag_pfmemalloc(&xdp_buf);
549 } while (remain_len > 0);
552 xdp_action = bpf_prog_run_xdp(xdp_prog, &xdp_buf);
554 switch (xdp_action) {
557 return false; /* false = we didn't consume the packet */
560 ionic_rx_page_free(rxq, buf_info);
565 xdpf = xdp_convert_buff_to_frame(&xdp_buf);
570 nq = netdev_get_tx_queue(netdev, txq->index);
571 __netif_tx_lock(nq, smp_processor_id());
572 txq_trans_cond_update(nq);
574 if (netif_tx_queue_stopped(nq) ||
575 !netif_txq_maybe_stop(q_to_ndq(netdev, txq),
576 ionic_q_space_avail(txq),
578 __netif_tx_unlock(nq);
582 dma_unmap_page(rxq->dev, buf_info->dma_addr,
583 IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
585 err = ionic_xdp_post_frame(txq, xdpf, XDP_TX,
587 buf_info->page_offset,
589 __netif_tx_unlock(nq);
591 netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
596 /* the Tx completion will free the buffers */
600 /* unmap the pages before handing them to a different device */
601 dma_unmap_page(rxq->dev, buf_info->dma_addr,
602 IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
604 err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog);
606 netdev_dbg(netdev, "xdp_do_redirect err %d\n", err);
609 buf_info->page = NULL;
610 rxq->xdp_flush = true;
611 stats->xdp_redirect++;
622 trace_xdp_exception(netdev, xdp_prog, xdp_action);
623 ionic_rx_page_free(rxq, buf_info);
624 stats->xdp_aborted++;
629 static void ionic_rx_clean(struct ionic_queue *q,
630 struct ionic_desc_info *desc_info,
631 struct ionic_cq_info *cq_info,
634 struct net_device *netdev = q->lif->netdev;
635 struct ionic_qcq *qcq = q_to_qcq(q);
636 struct ionic_rx_stats *stats;
637 struct ionic_rxq_comp *comp;
638 struct bpf_prog *xdp_prog;
639 unsigned int headroom;
643 comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp);
645 stats = q_to_rx_stats(q);
652 len = le16_to_cpu(comp->len);
656 xdp_prog = READ_ONCE(q->lif->xdp_prog);
658 ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len))
661 headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
662 if (len <= q->lif->rx_copybreak)
663 skb = ionic_rx_copybreak(netdev, q, desc_info,
664 headroom, len, !!xdp_prog);
666 skb = ionic_rx_frags(netdev, q, desc_info, headroom, len,
667 comp->num_sg_elems, !!xdp_prog);
669 if (unlikely(!skb)) {
674 skb_record_rx_queue(skb, q->index);
676 if (likely(netdev->features & NETIF_F_RXHASH)) {
677 switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
678 case IONIC_PKT_TYPE_IPV4:
679 case IONIC_PKT_TYPE_IPV6:
680 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
683 case IONIC_PKT_TYPE_IPV4_TCP:
684 case IONIC_PKT_TYPE_IPV6_TCP:
685 case IONIC_PKT_TYPE_IPV4_UDP:
686 case IONIC_PKT_TYPE_IPV6_UDP:
687 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
693 if (likely(netdev->features & NETIF_F_RXCSUM) &&
694 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) {
695 skb->ip_summed = CHECKSUM_COMPLETE;
696 skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
697 stats->csum_complete++;
702 if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
703 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
704 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
707 if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
708 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
709 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
710 le16_to_cpu(comp->vlan_tci));
711 stats->vlan_stripped++;
714 if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) {
715 __le64 *cq_desc_hwstamp;
721 sizeof(struct ionic_rxq_comp) -
722 IONIC_HWSTAMP_CQ_NEGOFFSET;
724 hwstamp = le64_to_cpu(*cq_desc_hwstamp);
726 if (hwstamp != IONIC_HWSTAMP_INVALID) {
727 skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
728 stats->hwstamp_valid++;
730 stats->hwstamp_invalid++;
734 if (len <= q->lif->rx_copybreak)
735 napi_gro_receive(&qcq->napi, skb);
737 napi_gro_frags(&qcq->napi);
740 bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
742 struct ionic_queue *q = cq->bound_q;
743 struct ionic_desc_info *desc_info;
744 struct ionic_rxq_comp *comp;
746 comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
748 if (!color_match(comp->pkt_type_color, cq->done_color))
751 /* check for empty queue */
752 if (q->tail_idx == q->head_idx)
755 if (q->tail_idx != le16_to_cpu(comp->comp_index))
758 desc_info = &q->info[q->tail_idx];
759 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
761 /* clean the related q entry, only one per qc completion */
762 ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
764 desc_info->cb = NULL;
765 desc_info->cb_arg = NULL;
770 static inline void ionic_write_cmb_desc(struct ionic_queue *q,
771 void __iomem *cmb_desc,
774 if (q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS)
775 memcpy_toio(cmb_desc, desc, q->desc_size);
778 void ionic_rx_fill(struct ionic_queue *q)
780 struct net_device *netdev = q->lif->netdev;
781 struct ionic_desc_info *desc_info;
782 struct ionic_rxq_sg_desc *sg_desc;
783 struct ionic_rxq_sg_elem *sg_elem;
784 struct ionic_buf_info *buf_info;
785 unsigned int fill_threshold;
786 struct ionic_rxq_desc *desc;
787 unsigned int remain_len;
788 unsigned int frag_len;
795 n_fill = ionic_q_space_avail(q);
797 fill_threshold = min_t(unsigned int, IONIC_RX_FILL_THRESHOLD,
798 q->num_descs / IONIC_RX_FILL_DIV);
799 if (n_fill < fill_threshold)
802 len = netdev->mtu + VLAN_ETH_HLEN;
804 for (i = n_fill; i; i--) {
805 unsigned int headroom;
806 unsigned int buf_len;
810 desc_info = &q->info[q->head_idx];
811 desc = desc_info->desc;
812 buf_info = &desc_info->bufs[0];
814 if (!buf_info->page) { /* alloc a new buffer? */
815 if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
822 /* fill main descriptor - buf[0]
823 * XDP uses space in the first buffer, so account for
824 * head room, tail room, and ip header in the first frag size.
826 headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
828 buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN;
830 buf_len = ionic_rx_buf_size(buf_info);
831 frag_len = min_t(u16, len, buf_len);
833 desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info) + headroom);
834 desc->len = cpu_to_le16(frag_len);
835 remain_len -= frag_len;
839 /* fill sg descriptors - buf[1..n] */
840 sg_desc = desc_info->sg_desc;
841 for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) {
842 sg_elem = &sg_desc->elems[j];
843 if (!buf_info->page) { /* alloc a new sg buffer? */
844 if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
851 sg_elem->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info));
852 frag_len = min_t(u16, remain_len, ionic_rx_buf_size(buf_info));
853 sg_elem->len = cpu_to_le16(frag_len);
854 remain_len -= frag_len;
859 /* clear end sg element as a sentinel */
860 if (j < q->max_sg_elems) {
861 sg_elem = &sg_desc->elems[j];
862 memset(sg_elem, 0, sizeof(*sg_elem));
865 desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
866 IONIC_RXQ_DESC_OPCODE_SIMPLE;
867 desc_info->nbufs = nfrags;
869 ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
871 ionic_rxq_post(q, false, ionic_rx_clean, NULL);
874 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
875 q->dbval | q->head_idx);
877 q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
878 q->dbell_jiffies = jiffies;
880 mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
881 jiffies + IONIC_NAPI_DEADLINE);
884 void ionic_rx_empty(struct ionic_queue *q)
886 struct ionic_desc_info *desc_info;
887 struct ionic_buf_info *buf_info;
890 for (i = 0; i < q->num_descs; i++) {
891 desc_info = &q->info[i];
892 for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
893 buf_info = &desc_info->bufs[j];
895 ionic_rx_page_free(q, buf_info);
898 desc_info->nbufs = 0;
899 desc_info->cb = NULL;
900 desc_info->cb_arg = NULL;
907 static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
909 struct dim_sample dim_sample;
910 struct ionic_lif *lif;
914 if (!qcq->intr.dim_coal_hw)
918 qi = qcq->cq.bound_q->index;
921 case IONIC_LIF_F_TX_DIM_INTR:
922 pkts = lif->txqstats[qi].pkts;
923 bytes = lif->txqstats[qi].bytes;
925 case IONIC_LIF_F_RX_DIM_INTR:
926 pkts = lif->rxqstats[qi].pkts;
927 bytes = lif->rxqstats[qi].bytes;
930 pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts;
931 bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes;
935 dim_update_sample(qcq->cq.bound_intr->rearm_count,
936 pkts, bytes, &dim_sample);
938 net_dim(&qcq->dim, dim_sample);
941 int ionic_tx_napi(struct napi_struct *napi, int budget)
943 struct ionic_qcq *qcq = napi_to_qcq(napi);
944 struct ionic_cq *cq = napi_to_cq(napi);
945 struct ionic_dev *idev;
946 struct ionic_lif *lif;
950 lif = cq->bound_q->lif;
951 idev = &lif->ionic->idev;
953 work_done = ionic_tx_cq_service(cq, budget);
955 if (unlikely(!budget))
958 if (work_done < budget && napi_complete_done(napi, work_done)) {
959 ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
960 flags |= IONIC_INTR_CRED_UNMASK;
961 cq->bound_intr->rearm_count++;
964 if (work_done || flags) {
965 flags |= IONIC_INTR_CRED_RESET_COALESCE;
966 ionic_intr_credits(idev->intr_ctrl,
967 cq->bound_intr->index,
971 if (!work_done && ionic_txq_poke_doorbell(&qcq->q))
972 mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
977 static void ionic_xdp_do_flush(struct ionic_cq *cq)
979 if (cq->bound_q->xdp_flush) {
981 cq->bound_q->xdp_flush = false;
985 int ionic_rx_napi(struct napi_struct *napi, int budget)
987 struct ionic_qcq *qcq = napi_to_qcq(napi);
988 struct ionic_cq *cq = napi_to_cq(napi);
989 struct ionic_dev *idev;
990 struct ionic_lif *lif;
994 if (unlikely(!budget))
997 lif = cq->bound_q->lif;
998 idev = &lif->ionic->idev;
1000 work_done = ionic_cq_service(cq, budget,
1001 ionic_rx_service, NULL, NULL);
1003 ionic_rx_fill(cq->bound_q);
1005 ionic_xdp_do_flush(cq);
1006 if (work_done < budget && napi_complete_done(napi, work_done)) {
1007 ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
1008 flags |= IONIC_INTR_CRED_UNMASK;
1009 cq->bound_intr->rearm_count++;
1012 if (work_done || flags) {
1013 flags |= IONIC_INTR_CRED_RESET_COALESCE;
1014 ionic_intr_credits(idev->intr_ctrl,
1015 cq->bound_intr->index,
1019 if (!work_done && ionic_rxq_poke_doorbell(&qcq->q))
1020 mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
1025 int ionic_txrx_napi(struct napi_struct *napi, int budget)
1027 struct ionic_qcq *rxqcq = napi_to_qcq(napi);
1028 struct ionic_cq *rxcq = napi_to_cq(napi);
1029 unsigned int qi = rxcq->bound_q->index;
1030 struct ionic_qcq *txqcq;
1031 struct ionic_dev *idev;
1032 struct ionic_lif *lif;
1033 struct ionic_cq *txcq;
1034 bool resched = false;
1035 u32 rx_work_done = 0;
1036 u32 tx_work_done = 0;
1039 lif = rxcq->bound_q->lif;
1040 idev = &lif->ionic->idev;
1041 txqcq = lif->txqcqs[qi];
1042 txcq = &lif->txqcqs[qi]->cq;
1044 tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT);
1046 if (unlikely(!budget))
1049 rx_work_done = ionic_cq_service(rxcq, budget,
1050 ionic_rx_service, NULL, NULL);
1052 ionic_rx_fill(rxcq->bound_q);
1054 ionic_xdp_do_flush(rxcq);
1055 if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
1056 ionic_dim_update(rxqcq, 0);
1057 flags |= IONIC_INTR_CRED_UNMASK;
1058 rxcq->bound_intr->rearm_count++;
1061 if (rx_work_done || flags) {
1062 flags |= IONIC_INTR_CRED_RESET_COALESCE;
1063 ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
1064 tx_work_done + rx_work_done, flags);
1067 if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q))
1069 if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q))
1072 mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
1074 return rx_work_done;
1077 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
1078 void *data, size_t len)
1080 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1081 struct device *dev = q->dev;
1082 dma_addr_t dma_addr;
1084 dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
1085 if (dma_mapping_error(dev, dma_addr)) {
1086 net_warn_ratelimited("%s: DMA single map failed on %s!\n",
1087 dev_name(dev), q->name);
1088 stats->dma_map_err++;
1094 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
1095 const skb_frag_t *frag,
1096 size_t offset, size_t len)
1098 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1099 struct device *dev = q->dev;
1100 dma_addr_t dma_addr;
1102 dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
1103 if (dma_mapping_error(dev, dma_addr)) {
1104 net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
1105 dev_name(dev), q->name);
1106 stats->dma_map_err++;
1111 static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
1112 struct ionic_desc_info *desc_info)
1114 struct ionic_buf_info *buf_info = desc_info->bufs;
1115 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1116 struct device *dev = q->dev;
1117 dma_addr_t dma_addr;
1118 unsigned int nfrags;
1122 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
1123 if (dma_mapping_error(dev, dma_addr)) {
1124 stats->dma_map_err++;
1127 buf_info->dma_addr = dma_addr;
1128 buf_info->len = skb_headlen(skb);
1131 frag = skb_shinfo(skb)->frags;
1132 nfrags = skb_shinfo(skb)->nr_frags;
1133 for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
1134 dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
1135 if (dma_mapping_error(dev, dma_addr)) {
1136 stats->dma_map_err++;
1139 buf_info->dma_addr = dma_addr;
1140 buf_info->len = skb_frag_size(frag);
1144 desc_info->nbufs = 1 + nfrags;
1149 /* unwind the frag mappings and the head mapping */
1150 while (frag_idx > 0) {
1153 dma_unmap_page(dev, buf_info->dma_addr,
1154 buf_info->len, DMA_TO_DEVICE);
1156 dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE);
1160 static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
1161 struct ionic_desc_info *desc_info)
1163 struct ionic_buf_info *buf_info = desc_info->bufs;
1164 struct device *dev = q->dev;
1167 if (!desc_info->nbufs)
1170 dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
1171 buf_info->len, DMA_TO_DEVICE);
1173 for (i = 1; i < desc_info->nbufs; i++, buf_info++)
1174 dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
1175 buf_info->len, DMA_TO_DEVICE);
1177 desc_info->nbufs = 0;
1180 static void ionic_tx_clean(struct ionic_queue *q,
1181 struct ionic_desc_info *desc_info,
1182 struct ionic_cq_info *cq_info,
1185 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1186 struct ionic_qcq *qcq = q_to_qcq(q);
1187 struct sk_buff *skb = cb_arg;
1189 if (desc_info->xdpf) {
1190 ionic_xdp_tx_desc_clean(q->partner, desc_info);
1193 if (unlikely(__netif_subqueue_stopped(q->lif->netdev, q->index)))
1194 netif_wake_subqueue(q->lif->netdev, q->index);
1199 ionic_tx_desc_unmap_bufs(q, desc_info);
1204 if (unlikely(ionic_txq_hwstamp_enabled(q))) {
1206 struct skb_shared_hwtstamps hwts = {};
1207 __le64 *cq_desc_hwstamp;
1213 sizeof(struct ionic_txq_comp) -
1214 IONIC_HWSTAMP_CQ_NEGOFFSET;
1216 hwstamp = le64_to_cpu(*cq_desc_hwstamp);
1218 if (hwstamp != IONIC_HWSTAMP_INVALID) {
1219 hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
1221 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1222 skb_tstamp_tx(skb, &hwts);
1224 stats->hwstamp_valid++;
1226 stats->hwstamp_invalid++;
1231 desc_info->bytes = skb->len;
1234 napi_consume_skb(skb, 1);
1237 static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info,
1238 unsigned int *total_pkts, unsigned int *total_bytes)
1240 struct ionic_queue *q = cq->bound_q;
1241 struct ionic_desc_info *desc_info;
1242 struct ionic_txq_comp *comp;
1243 unsigned int bytes = 0;
1244 unsigned int pkts = 0;
1247 comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
1249 if (!color_match(comp->color, cq->done_color))
1252 /* clean the related q entries, there could be
1253 * several q entries completed for each cq completion
1256 desc_info = &q->info[q->tail_idx];
1257 desc_info->bytes = 0;
1258 index = q->tail_idx;
1259 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
1260 ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
1261 if (desc_info->cb_arg) {
1263 bytes += desc_info->bytes;
1265 desc_info->cb = NULL;
1266 desc_info->cb_arg = NULL;
1267 } while (index != le16_to_cpu(comp->comp_index));
1269 (*total_pkts) += pkts;
1270 (*total_bytes) += bytes;
1275 unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
1277 struct ionic_cq_info *cq_info;
1278 unsigned int work_done = 0;
1279 unsigned int bytes = 0;
1280 unsigned int pkts = 0;
1282 if (work_to_do == 0)
1285 cq_info = &cq->info[cq->tail_idx];
1286 while (ionic_tx_service(cq, cq_info, &pkts, &bytes)) {
1287 if (cq->tail_idx == cq->num_descs - 1)
1288 cq->done_color = !cq->done_color;
1289 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
1290 cq_info = &cq->info[cq->tail_idx];
1292 if (++work_done >= work_to_do)
1297 struct ionic_queue *q = cq->bound_q;
1299 if (likely(!ionic_txq_hwstamp_enabled(q)))
1300 netif_txq_completed_wake(q_to_ndq(q->lif->netdev, q),
1302 ionic_q_space_avail(q),
1303 IONIC_TSO_DESCS_NEEDED);
1309 void ionic_tx_flush(struct ionic_cq *cq)
1311 struct ionic_dev *idev = &cq->lif->ionic->idev;
1314 work_done = ionic_tx_cq_service(cq, cq->num_descs);
1316 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
1317 work_done, IONIC_INTR_CRED_RESET_COALESCE);
1320 void ionic_tx_empty(struct ionic_queue *q)
1322 struct ionic_desc_info *desc_info;
1326 /* walk the not completed tx entries, if any */
1327 while (q->head_idx != q->tail_idx) {
1328 desc_info = &q->info[q->tail_idx];
1329 desc_info->bytes = 0;
1330 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
1331 ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
1332 if (desc_info->cb_arg) {
1334 bytes += desc_info->bytes;
1336 desc_info->cb = NULL;
1337 desc_info->cb_arg = NULL;
1340 if (likely(!ionic_txq_hwstamp_enabled(q))) {
1341 struct netdev_queue *ndq = q_to_ndq(q->lif->netdev, q);
1343 netdev_tx_completed_queue(ndq, pkts, bytes);
1344 netdev_tx_reset_queue(ndq);
1348 static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
1352 err = skb_cow_head(skb, 0);
1356 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
1357 inner_ip_hdr(skb)->check = 0;
1358 inner_tcp_hdr(skb)->check =
1359 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
1360 inner_ip_hdr(skb)->daddr,
1362 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
1363 inner_tcp_hdr(skb)->check =
1364 ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
1365 &inner_ipv6_hdr(skb)->daddr,
1372 static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
1376 err = skb_cow_head(skb, 0);
1380 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
1381 ip_hdr(skb)->check = 0;
1382 tcp_hdr(skb)->check =
1383 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1386 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
1387 tcp_v6_gso_csum_prep(skb);
1393 static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q,
1394 struct ionic_desc_info *desc_info,
1395 struct sk_buff *skb,
1396 dma_addr_t addr, u8 nsge, u16 len,
1397 unsigned int hdrlen, unsigned int mss,
1399 u16 vlan_tci, bool has_vlan,
1400 bool start, bool done)
1402 struct ionic_txq_desc *desc = desc_info->desc;
1406 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1407 flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1408 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
1409 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
1411 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
1412 desc->cmd = cpu_to_le64(cmd);
1413 desc->len = cpu_to_le16(len);
1414 desc->vlan_tci = cpu_to_le16(vlan_tci);
1415 desc->hdr_len = cpu_to_le16(hdrlen);
1416 desc->mss = cpu_to_le16(mss);
1418 ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
1421 skb_tx_timestamp(skb);
1422 if (likely(!ionic_txq_hwstamp_enabled(q)))
1423 netdev_tx_sent_queue(q_to_ndq(netdev, q), skb->len);
1424 ionic_txq_post(q, false, ionic_tx_clean, skb);
1426 ionic_txq_post(q, done, NULL, NULL);
1430 static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
1431 struct sk_buff *skb)
1433 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1434 struct ionic_desc_info *desc_info;
1435 struct ionic_buf_info *buf_info;
1436 struct ionic_txq_sg_elem *elem;
1437 struct ionic_txq_desc *desc;
1438 unsigned int chunk_len;
1439 unsigned int frag_rem;
1440 unsigned int tso_rem;
1441 unsigned int seg_rem;
1442 dma_addr_t desc_addr;
1443 dma_addr_t frag_addr;
1444 unsigned int hdrlen;
1456 desc_info = &q->info[q->head_idx];
1457 buf_info = desc_info->bufs;
1459 if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
1463 mss = skb_shinfo(skb)->gso_size;
1464 outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1468 SKB_GSO_UDP_TUNNEL |
1469 SKB_GSO_UDP_TUNNEL_CSUM));
1470 has_vlan = !!skb_vlan_tag_present(skb);
1471 vlan_tci = skb_vlan_tag_get(skb);
1472 encap = skb->encapsulation;
1474 /* Preload inner-most TCP csum field with IP pseudo hdr
1475 * calculated with IP length set to zero. HW will later
1476 * add in length to each TCP segment resulting from the TSO.
1480 err = ionic_tx_tcp_inner_pseudo_csum(skb);
1482 err = ionic_tx_tcp_pseudo_csum(skb);
1484 /* clean up mapping from ionic_tx_map_skb */
1485 ionic_tx_desc_unmap_bufs(q, desc_info);
1490 hdrlen = skb_inner_tcp_all_headers(skb);
1492 hdrlen = skb_tcp_all_headers(skb);
1495 seg_rem = min(tso_rem, hdrlen + mss);
1502 while (tso_rem > 0) {
1508 /* use fragments until we have enough to post a single descriptor */
1509 while (seg_rem > 0) {
1510 /* if the fragment is exhausted then move to the next one */
1511 if (frag_rem == 0) {
1512 /* grab the next fragment */
1513 frag_addr = buf_info->dma_addr;
1514 frag_rem = buf_info->len;
1517 chunk_len = min(frag_rem, seg_rem);
1519 /* fill main descriptor */
1520 desc = desc_info->txq_desc;
1521 elem = desc_info->txq_sg_desc->elems;
1522 desc_addr = frag_addr;
1523 desc_len = chunk_len;
1525 /* fill sg descriptor */
1526 elem->addr = cpu_to_le64(frag_addr);
1527 elem->len = cpu_to_le16(chunk_len);
1531 frag_addr += chunk_len;
1532 frag_rem -= chunk_len;
1533 tso_rem -= chunk_len;
1534 seg_rem -= chunk_len;
1536 seg_rem = min(tso_rem, mss);
1537 done = (tso_rem == 0);
1538 /* post descriptor */
1539 ionic_tx_tso_post(netdev, q, desc_info, skb,
1540 desc_addr, desc_nsge, desc_len,
1541 hdrlen, mss, outer_csum, vlan_tci, has_vlan,
1544 /* Buffer information is stored with the first tso descriptor */
1545 desc_info = &q->info[q->head_idx];
1546 desc_info->nbufs = 0;
1549 stats->pkts += DIV_ROUND_UP(len - hdrlen, mss);
1550 stats->bytes += len;
1552 stats->tso_bytes = len;
1557 static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
1558 struct ionic_desc_info *desc_info)
1560 struct ionic_txq_desc *desc = desc_info->txq_desc;
1561 struct ionic_buf_info *buf_info = desc_info->bufs;
1562 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1568 has_vlan = !!skb_vlan_tag_present(skb);
1569 encap = skb->encapsulation;
1571 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1572 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1574 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
1575 flags, skb_shinfo(skb)->nr_frags,
1576 buf_info->dma_addr);
1577 desc->cmd = cpu_to_le64(cmd);
1578 desc->len = cpu_to_le16(buf_info->len);
1580 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1581 stats->vlan_inserted++;
1585 desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
1586 desc->csum_offset = cpu_to_le16(skb->csum_offset);
1588 ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
1590 if (skb_csum_is_sctp(skb))
1591 stats->crc32_csum++;
1596 static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
1597 struct ionic_desc_info *desc_info)
1599 struct ionic_txq_desc *desc = desc_info->txq_desc;
1600 struct ionic_buf_info *buf_info = desc_info->bufs;
1601 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1607 has_vlan = !!skb_vlan_tag_present(skb);
1608 encap = skb->encapsulation;
1610 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1611 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1613 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
1614 flags, skb_shinfo(skb)->nr_frags,
1615 buf_info->dma_addr);
1616 desc->cmd = cpu_to_le64(cmd);
1617 desc->len = cpu_to_le16(buf_info->len);
1619 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1620 stats->vlan_inserted++;
1624 desc->csum_start = 0;
1625 desc->csum_offset = 0;
1627 ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
1632 static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
1633 struct ionic_desc_info *desc_info)
1635 struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc;
1636 struct ionic_buf_info *buf_info = &desc_info->bufs[1];
1637 struct ionic_txq_sg_elem *elem = sg_desc->elems;
1638 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1641 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) {
1642 elem->addr = cpu_to_le64(buf_info->dma_addr);
1643 elem->len = cpu_to_le16(buf_info->len);
1646 stats->frags += skb_shinfo(skb)->nr_frags;
1649 static int ionic_tx(struct net_device *netdev, struct ionic_queue *q,
1650 struct sk_buff *skb)
1652 struct ionic_desc_info *desc_info = &q->info[q->head_idx];
1653 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1654 bool ring_dbell = true;
1656 if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
1659 /* set up the initial descriptor */
1660 if (skb->ip_summed == CHECKSUM_PARTIAL)
1661 ionic_tx_calc_csum(q, skb, desc_info);
1663 ionic_tx_calc_no_csum(q, skb, desc_info);
1666 ionic_tx_skb_frags(q, skb, desc_info);
1668 skb_tx_timestamp(skb);
1670 stats->bytes += skb->len;
1672 if (likely(!ionic_txq_hwstamp_enabled(q))) {
1673 struct netdev_queue *ndq = q_to_ndq(netdev, q);
1675 if (unlikely(!ionic_q_has_space(q, MAX_SKB_FRAGS + 1)))
1676 netif_tx_stop_queue(ndq);
1677 ring_dbell = __netdev_tx_sent_queue(ndq, skb->len,
1678 netdev_xmit_more());
1680 ionic_txq_post(q, ring_dbell, ionic_tx_clean, skb);
1685 static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
1687 int nr_frags = skb_shinfo(skb)->nr_frags;
1688 bool too_many_frags = false;
1700 /* Each desc is mss long max, so a descriptor for each gso_seg */
1701 if (skb_is_gso(skb)) {
1702 ndescs = skb_shinfo(skb)->gso_segs;
1710 if (unlikely(nr_frags > q->max_sg_elems)) {
1711 too_many_frags = true;
1718 /* We need to scan the skb to be sure that none of the MTU sized
1719 * packets in the TSO will require more sgs per descriptor than we
1720 * can support. We loop through the frags, add up the lengths for
1721 * a packet, and count the number of sgs used per packet.
1724 frag = skb_shinfo(skb)->frags;
1725 encap = skb->encapsulation;
1727 /* start with just hdr in first part of first descriptor */
1729 hdrlen = skb_inner_tcp_all_headers(skb);
1731 hdrlen = skb_tcp_all_headers(skb);
1732 seg_rem = min_t(int, tso_rem, hdrlen + skb_shinfo(skb)->gso_size);
1735 while (tso_rem > 0) {
1737 while (seg_rem > 0) {
1740 /* We add the +1 because we can take buffers for one
1741 * more than we have SGs: one for the initial desc data
1742 * in addition to the SG segments that might follow.
1744 if (desc_bufs > q->max_sg_elems + 1) {
1745 too_many_frags = true;
1749 if (frag_rem == 0) {
1750 frag_rem = skb_frag_size(frag);
1753 chunk_len = min(frag_rem, seg_rem);
1754 frag_rem -= chunk_len;
1755 tso_rem -= chunk_len;
1756 seg_rem -= chunk_len;
1759 seg_rem = min_t(int, tso_rem, skb_shinfo(skb)->gso_size);
1763 if (too_many_frags) {
1764 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1766 err = skb_linearize(skb);
1775 static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
1776 struct net_device *netdev)
1778 struct ionic_lif *lif = netdev_priv(netdev);
1779 struct ionic_queue *q;
1782 /* Does not stop/start txq, because we post to a separate tx queue
1783 * for timestamping, and if a packet can't be posted immediately to
1784 * the timestamping queue, it is dropped.
1787 q = &lif->hwstamp_txq->q;
1788 ndescs = ionic_tx_descs_needed(q, skb);
1789 if (unlikely(ndescs < 0))
1792 if (unlikely(!ionic_q_has_space(q, ndescs)))
1795 skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP;
1796 if (skb_is_gso(skb))
1797 err = ionic_tx_tso(netdev, q, skb);
1799 err = ionic_tx(netdev, q, skb);
1804 return NETDEV_TX_OK;
1809 return NETDEV_TX_OK;
1812 netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1814 u16 queue_index = skb_get_queue_mapping(skb);
1815 struct ionic_lif *lif = netdev_priv(netdev);
1816 struct ionic_queue *q;
1820 if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
1822 return NETDEV_TX_OK;
1825 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1826 if (lif->hwstamp_txq && lif->phc->ts_config_tx_mode)
1827 return ionic_start_hwstamp_xmit(skb, netdev);
1829 if (unlikely(queue_index >= lif->nxqs))
1831 q = &lif->txqcqs[queue_index]->q;
1833 ndescs = ionic_tx_descs_needed(q, skb);
1837 if (!netif_txq_maybe_stop(q_to_ndq(netdev, q),
1838 ionic_q_space_avail(q),
1840 return NETDEV_TX_BUSY;
1842 if (skb_is_gso(skb))
1843 err = ionic_tx_tso(netdev, q, skb);
1845 err = ionic_tx(netdev, q, skb);
1850 return NETDEV_TX_OK;
1855 return NETDEV_TX_OK;