1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
4 #include <linux/ethtool.h>
5 #include <linux/printk.h>
6 #include <linux/dynamic_debug.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/if_vlan.h>
10 #include <linux/rtnetlink.h>
11 #include <linux/interrupt.h>
12 #include <linux/pci.h>
13 #include <linux/cpumask.h>
14 #include <linux/crash_dump.h>
15 #include <linux/vmalloc.h>
18 #include "ionic_bus.h"
19 #include "ionic_dev.h"
20 #include "ionic_lif.h"
21 #include "ionic_txrx.h"
22 #include "ionic_ethtool.h"
23 #include "ionic_debugfs.h"
25 /* queuetype support level */
26 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
27 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
28 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
29 [IONIC_QTYPE_RXQ] = 2, /* 0 = Base version with CQ+SG support
30 * 2 = ... with CMB rings
32 [IONIC_QTYPE_TXQ] = 3, /* 0 = Base version with CQ+SG support
33 * 1 = ... with Tx SG version 1
34 * 3 = ... with CMB rings
38 static void ionic_link_status_check(struct ionic_lif *lif);
39 static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
40 static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
41 static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
43 static void ionic_txrx_deinit(struct ionic_lif *lif);
44 static int ionic_txrx_init(struct ionic_lif *lif);
45 static int ionic_start_queues(struct ionic_lif *lif);
46 static void ionic_stop_queues(struct ionic_lif *lif);
47 static void ionic_lif_queue_identify(struct ionic_lif *lif);
49 static int ionic_xdp_queues_config(struct ionic_lif *lif);
50 static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q);
52 static void ionic_dim_work(struct work_struct *work)
54 struct dim *dim = container_of(work, struct dim, work);
55 struct ionic_intr_info *intr;
56 struct dim_cq_moder cur_moder;
57 struct ionic_qcq *qcq;
58 struct ionic_lif *lif;
61 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
62 qcq = container_of(dim, struct ionic_qcq, dim);
64 new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec);
65 new_coal = new_coal ? new_coal : 1;
68 if (intr->dim_coal_hw != new_coal) {
69 intr->dim_coal_hw = new_coal;
71 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
72 intr->index, intr->dim_coal_hw);
75 dim->state = DIM_START_MEASURE;
78 static void ionic_lif_deferred_work(struct work_struct *work)
80 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
81 struct ionic_deferred *def = &lif->deferred;
82 struct ionic_deferred_work *w = NULL;
85 spin_lock_bh(&def->lock);
86 if (!list_empty(&def->list)) {
87 w = list_first_entry(&def->list,
88 struct ionic_deferred_work, list);
91 spin_unlock_bh(&def->lock);
97 case IONIC_DW_TYPE_RX_MODE:
98 ionic_lif_rx_mode(lif);
100 case IONIC_DW_TYPE_LINK_STATUS:
101 ionic_link_status_check(lif);
103 case IONIC_DW_TYPE_LIF_RESET:
105 ionic_lif_handle_fw_up(lif);
107 ionic_lif_handle_fw_down(lif);
109 /* Fire off another watchdog to see
110 * if the FW is already back rather than
111 * waiting another whole cycle
113 mod_timer(&lif->ionic->watchdog_timer, jiffies + 1);
124 void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
125 struct ionic_deferred_work *work)
127 spin_lock_bh(&def->lock);
128 list_add_tail(&work->list, &def->list);
129 spin_unlock_bh(&def->lock);
130 schedule_work(&def->work);
133 static void ionic_link_status_check(struct ionic_lif *lif)
135 struct net_device *netdev = lif->netdev;
139 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
142 /* Don't put carrier back up if we're in a broken state */
143 if (test_bit(IONIC_LIF_F_BROKEN, lif->state)) {
144 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
148 link_status = le16_to_cpu(lif->info->status.link_status);
149 link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
154 if (netdev->flags & IFF_UP && netif_running(netdev)) {
155 mutex_lock(&lif->queue_lock);
156 err = ionic_start_queues(lif);
157 if (err && err != -EBUSY) {
159 "Failed to start queues: %d\n", err);
160 set_bit(IONIC_LIF_F_BROKEN, lif->state);
161 netif_carrier_off(lif->netdev);
163 mutex_unlock(&lif->queue_lock);
166 if (!err && !netif_carrier_ok(netdev)) {
167 ionic_port_identify(lif->ionic);
168 netdev_info(netdev, "Link up - %d Gbps\n",
169 le32_to_cpu(lif->info->status.link_speed) / 1000);
170 netif_carrier_on(netdev);
173 if (netif_carrier_ok(netdev)) {
174 lif->link_down_count++;
175 netdev_info(netdev, "Link down\n");
176 netif_carrier_off(netdev);
179 if (netdev->flags & IFF_UP && netif_running(netdev)) {
180 mutex_lock(&lif->queue_lock);
181 ionic_stop_queues(lif);
182 mutex_unlock(&lif->queue_lock);
186 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
189 void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
191 struct ionic_deferred_work *work;
193 /* we only need one request outstanding at a time */
194 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
198 work = kzalloc(sizeof(*work), GFP_ATOMIC);
200 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
204 work->type = IONIC_DW_TYPE_LINK_STATUS;
205 ionic_lif_deferred_enqueue(&lif->deferred, work);
207 ionic_link_status_check(lif);
211 static void ionic_napi_deadline(struct timer_list *timer)
213 struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline);
215 napi_schedule(&qcq->napi);
218 static irqreturn_t ionic_isr(int irq, void *data)
220 struct napi_struct *napi = data;
222 napi_schedule_irqoff(napi);
227 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
229 struct ionic_intr_info *intr = &qcq->intr;
230 struct device *dev = lif->ionic->dev;
231 struct ionic_queue *q = &qcq->q;
235 name = lif->netdev->name;
237 name = dev_name(dev);
239 snprintf(intr->name, sizeof(intr->name),
240 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
242 return devm_request_irq(dev, intr->vector, ionic_isr,
243 0, intr->name, &qcq->napi);
246 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
248 struct ionic *ionic = lif->ionic;
251 index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
252 if (index == ionic->nintrs) {
253 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
254 __func__, index, ionic->nintrs);
258 set_bit(index, ionic->intrs);
259 ionic_intr_init(&ionic->idev, intr, index);
264 static void ionic_intr_free(struct ionic *ionic, int index)
266 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
267 clear_bit(index, ionic->intrs);
270 static int ionic_qcq_enable(struct ionic_qcq *qcq)
272 struct ionic_queue *q = &qcq->q;
273 struct ionic_lif *lif = q->lif;
274 struct ionic_dev *idev;
277 struct ionic_admin_ctx ctx = {
278 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
280 .opcode = IONIC_CMD_Q_CONTROL,
281 .lif_index = cpu_to_le16(lif->index),
283 .index = cpu_to_le32(q->index),
284 .oper = IONIC_Q_ENABLE,
289 idev = &lif->ionic->idev;
290 dev = lif->ionic->dev;
292 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
293 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
295 if (qcq->flags & IONIC_QCQ_F_INTR)
296 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
298 ret = ionic_adminq_post_wait(lif, &ctx);
303 napi_enable(&qcq->napi);
305 if (qcq->flags & IONIC_QCQ_F_INTR) {
306 irq_set_affinity_hint(qcq->intr.vector,
307 &qcq->intr.affinity_mask);
308 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
309 IONIC_INTR_MASK_CLEAR);
315 static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err)
317 struct ionic_queue *q;
319 struct ionic_admin_ctx ctx = {
320 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
322 .opcode = IONIC_CMD_Q_CONTROL,
323 .oper = IONIC_Q_DISABLE,
328 netdev_err(lif->netdev, "%s: bad qcq\n", __func__);
334 if (qcq->flags & IONIC_QCQ_F_INTR) {
335 struct ionic_dev *idev = &lif->ionic->idev;
337 cancel_work_sync(&qcq->dim.work);
338 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
339 IONIC_INTR_MASK_SET);
340 synchronize_irq(qcq->intr.vector);
341 irq_set_affinity_hint(qcq->intr.vector, NULL);
342 napi_disable(&qcq->napi);
343 del_timer_sync(&qcq->napi_deadline);
346 /* If there was a previous fw communcation error, don't bother with
347 * sending the adminq command and just return the same error value.
349 if (fw_err == -ETIMEDOUT || fw_err == -ENXIO)
352 ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
353 ctx.cmd.q_control.type = q->type;
354 ctx.cmd.q_control.index = cpu_to_le32(q->index);
355 dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
356 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
358 return ionic_adminq_post_wait(lif, &ctx);
361 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
363 struct ionic_dev *idev = &lif->ionic->idev;
368 if (!(qcq->flags & IONIC_QCQ_F_INITED))
371 if (qcq->flags & IONIC_QCQ_F_INTR) {
372 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
373 IONIC_INTR_MASK_SET);
374 netif_napi_del(&qcq->napi);
377 qcq->flags &= ~IONIC_QCQ_F_INITED;
380 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
382 if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0)
385 irq_set_affinity_hint(qcq->intr.vector, NULL);
386 devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
387 qcq->intr.vector = 0;
388 ionic_intr_free(lif->ionic, qcq->intr.index);
389 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
392 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
394 struct device *dev = lif->ionic->dev;
399 ionic_debugfs_del_qcq(qcq);
402 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
407 if (qcq->cmb_q_base) {
408 iounmap(qcq->cmb_q_base);
409 ionic_put_cmb(lif, qcq->cmb_pgid, qcq->cmb_order);
412 qcq->cmb_q_base = NULL;
413 qcq->cmb_q_base_pa = 0;
417 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
423 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa);
428 ionic_xdp_unregister_rxq_info(&qcq->q);
429 ionic_qcq_intr_free(lif, qcq);
437 void ionic_qcqs_free(struct ionic_lif *lif)
439 struct device *dev = lif->ionic->dev;
440 struct ionic_qcq *adminqcq;
441 unsigned long irqflags;
443 if (lif->notifyqcq) {
444 ionic_qcq_free(lif, lif->notifyqcq);
445 devm_kfree(dev, lif->notifyqcq);
446 lif->notifyqcq = NULL;
450 spin_lock_irqsave(&lif->adminq_lock, irqflags);
451 adminqcq = READ_ONCE(lif->adminqcq);
452 lif->adminqcq = NULL;
453 spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
455 ionic_qcq_free(lif, adminqcq);
456 devm_kfree(dev, adminqcq);
461 devm_kfree(dev, lif->rxqstats);
462 lif->rxqstats = NULL;
463 devm_kfree(dev, lif->rxqcqs);
468 devm_kfree(dev, lif->txqstats);
469 lif->txqstats = NULL;
470 devm_kfree(dev, lif->txqcqs);
475 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
476 struct ionic_qcq *n_qcq)
478 n_qcq->intr.vector = src_qcq->intr.vector;
479 n_qcq->intr.index = src_qcq->intr.index;
480 n_qcq->napi_qcq = src_qcq->napi_qcq;
483 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
487 if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
488 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
492 err = ionic_intr_alloc(lif, &qcq->intr);
494 netdev_warn(lif->netdev, "no intr for %s: %d\n",
499 err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
501 netdev_warn(lif->netdev, "no vector for %s: %d\n",
503 goto err_out_free_intr;
505 qcq->intr.vector = err;
506 ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
507 IONIC_INTR_MASK_SET);
509 err = ionic_request_irq(lif, qcq);
511 netdev_warn(lif->netdev, "irq request failed %d\n", err);
512 goto err_out_free_intr;
515 /* try to get the irq on the local numa node first */
516 qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
517 dev_to_node(lif->ionic->dev));
518 if (qcq->intr.cpu != -1)
519 cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
521 netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
525 ionic_intr_free(lif->ionic, qcq->intr.index);
530 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
532 const char *name, unsigned int flags,
533 unsigned int num_descs, unsigned int desc_size,
534 unsigned int cq_desc_size,
535 unsigned int sg_desc_size,
536 unsigned int pid, struct ionic_qcq **qcq)
538 struct ionic_dev *idev = &lif->ionic->idev;
539 struct device *dev = lif->ionic->dev;
540 void *q_base, *cq_base, *sg_base;
541 dma_addr_t cq_base_pa = 0;
542 dma_addr_t sg_base_pa = 0;
543 dma_addr_t q_base_pa = 0;
544 struct ionic_qcq *new;
549 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
551 netdev_err(lif->netdev, "Cannot allocate queue structure\n");
559 new->q.info = vcalloc(num_descs, sizeof(*new->q.info));
561 netdev_err(lif->netdev, "Cannot allocate queue info\n");
563 goto err_out_free_qcq;
567 new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems;
569 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
570 desc_size, sg_desc_size, pid);
572 netdev_err(lif->netdev, "Cannot initialize queue\n");
573 goto err_out_free_q_info;
576 err = ionic_alloc_qcq_interrupt(lif, new);
580 new->cq.info = vcalloc(num_descs, sizeof(*new->cq.info));
582 netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
584 goto err_out_free_irq;
587 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
589 netdev_err(lif->netdev, "Cannot initialize completion queue\n");
590 goto err_out_free_cq_info;
593 if (flags & IONIC_QCQ_F_NOTIFYQ) {
596 /* q & cq need to be contiguous in NotifyQ, so alloc it all in q
597 * and don't alloc qc. We leave new->qc_size and new->qc_base
598 * as 0 to be sure we don't try to free it later.
600 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
601 new->q_size = PAGE_SIZE + q_size +
602 ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
603 new->q_base = dma_alloc_coherent(dev, new->q_size,
604 &new->q_base_pa, GFP_KERNEL);
606 netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n");
608 goto err_out_free_cq_info;
610 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
611 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
612 ionic_q_map(&new->q, q_base, q_base_pa);
614 cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE);
615 cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
616 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
617 ionic_cq_bind(&new->cq, &new->q);
619 /* regular DMA q descriptors */
620 new->q_size = PAGE_SIZE + (num_descs * desc_size);
621 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
624 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
626 goto err_out_free_cq_info;
628 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
629 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
630 ionic_q_map(&new->q, q_base, q_base_pa);
632 if (flags & IONIC_QCQ_F_CMB_RINGS) {
633 /* on-chip CMB q descriptors */
634 new->cmb_q_size = num_descs * desc_size;
635 new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE);
637 err = ionic_get_cmb(lif, &new->cmb_pgid, &new->cmb_q_base_pa,
640 netdev_err(lif->netdev,
641 "Cannot allocate queue order %d from cmb: err %d\n",
642 new->cmb_order, err);
646 new->cmb_q_base = ioremap_wc(new->cmb_q_base_pa, new->cmb_q_size);
647 if (!new->cmb_q_base) {
648 netdev_err(lif->netdev, "Cannot map queue from cmb\n");
649 ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
654 new->cmb_q_base_pa -= idev->phy_cmb_pages;
655 ionic_q_cmb_map(&new->q, new->cmb_q_base, new->cmb_q_base_pa);
658 /* cq DMA descriptors */
659 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
660 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
663 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n");
667 cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
668 cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
669 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
670 ionic_cq_bind(&new->cq, &new->q);
673 if (flags & IONIC_QCQ_F_SG) {
674 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size);
675 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa,
678 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n");
680 goto err_out_free_cq;
682 sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
683 sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
684 ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
687 INIT_WORK(&new->dim.work, ionic_dim_work);
688 new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
695 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
697 if (new->cmb_q_base) {
698 iounmap(new->cmb_q_base);
699 ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
701 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
702 err_out_free_cq_info:
705 if (flags & IONIC_QCQ_F_INTR) {
706 devm_free_irq(dev, new->intr.vector, &new->napi);
707 ionic_intr_free(lif->ionic, new->intr.index);
712 devm_kfree(dev, new);
714 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
718 static int ionic_qcqs_alloc(struct ionic_lif *lif)
720 struct device *dev = lif->ionic->dev;
724 flags = IONIC_QCQ_F_INTR;
725 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
727 sizeof(struct ionic_admin_cmd),
728 sizeof(struct ionic_admin_comp),
729 0, lif->kern_pid, &lif->adminqcq);
732 ionic_debugfs_add_qcq(lif, lif->adminqcq);
734 if (lif->ionic->nnqs_per_lif) {
735 flags = IONIC_QCQ_F_NOTIFYQ;
736 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
737 flags, IONIC_NOTIFYQ_LENGTH,
738 sizeof(struct ionic_notifyq_cmd),
739 sizeof(union ionic_notifyq_comp),
740 0, lif->kern_pid, &lif->notifyqcq);
743 ionic_debugfs_add_qcq(lif, lif->notifyqcq);
745 /* Let the notifyq ride on the adminq interrupt */
746 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
750 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
751 sizeof(*lif->txqcqs), GFP_KERNEL);
754 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
755 sizeof(*lif->rxqcqs), GFP_KERNEL);
759 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif + 1,
760 sizeof(*lif->txqstats), GFP_KERNEL);
763 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif + 1,
764 sizeof(*lif->rxqstats), GFP_KERNEL);
771 ionic_qcqs_free(lif);
775 static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
779 qcq->cq.tail_idx = 0;
780 qcq->cq.done_color = 1;
781 memset(qcq->q_base, 0, qcq->q_size);
783 memset_io(qcq->cmb_q_base, 0, qcq->cmb_q_size);
784 memset(qcq->cq_base, 0, qcq->cq_size);
785 memset(qcq->sg_base, 0, qcq->sg_size);
788 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
790 struct device *dev = lif->ionic->dev;
791 struct ionic_queue *q = &qcq->q;
792 struct ionic_cq *cq = &qcq->cq;
793 struct ionic_admin_ctx ctx = {
794 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
796 .opcode = IONIC_CMD_Q_INIT,
797 .lif_index = cpu_to_le16(lif->index),
799 .ver = lif->qtype_info[q->type].version,
800 .index = cpu_to_le32(q->index),
801 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
803 .intr_index = cpu_to_le16(qcq->intr.index),
804 .pid = cpu_to_le16(q->pid),
805 .ring_size = ilog2(q->num_descs),
806 .ring_base = cpu_to_le64(q->base_pa),
807 .cq_ring_base = cpu_to_le64(cq->base_pa),
808 .sg_ring_base = cpu_to_le64(q->sg_base_pa),
809 .features = cpu_to_le64(q->features),
814 if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
815 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
816 ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
819 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
820 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
821 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
822 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
823 dev_dbg(dev, "txq_init.cq_ring_base 0x%llx\n", ctx.cmd.q_init.cq_ring_base);
824 dev_dbg(dev, "txq_init.sg_ring_base 0x%llx\n", ctx.cmd.q_init.sg_ring_base);
825 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
826 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
827 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
829 ionic_qcq_sanitize(qcq);
831 err = ionic_adminq_post_wait(lif, &ctx);
835 q->hw_type = ctx.comp.q_init.hw_type;
836 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
837 q->dbval = IONIC_DBELL_QID(q->hw_index);
839 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
840 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
842 q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE;
843 q->dbell_jiffies = jiffies;
845 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
846 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi);
848 timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
851 qcq->flags |= IONIC_QCQ_F_INITED;
856 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
858 struct device *dev = lif->ionic->dev;
859 struct ionic_queue *q = &qcq->q;
860 struct ionic_cq *cq = &qcq->cq;
861 struct ionic_admin_ctx ctx = {
862 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
864 .opcode = IONIC_CMD_Q_INIT,
865 .lif_index = cpu_to_le16(lif->index),
867 .ver = lif->qtype_info[q->type].version,
868 .index = cpu_to_le32(q->index),
869 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ),
870 .intr_index = cpu_to_le16(cq->bound_intr->index),
871 .pid = cpu_to_le16(q->pid),
872 .ring_size = ilog2(q->num_descs),
873 .ring_base = cpu_to_le64(q->base_pa),
874 .cq_ring_base = cpu_to_le64(cq->base_pa),
875 .sg_ring_base = cpu_to_le64(q->sg_base_pa),
876 .features = cpu_to_le64(q->features),
882 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_SG);
884 if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
885 ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
886 ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
889 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
890 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
891 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
892 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
893 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
894 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
895 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
897 ionic_qcq_sanitize(qcq);
899 err = ionic_adminq_post_wait(lif, &ctx);
903 q->hw_type = ctx.comp.q_init.hw_type;
904 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
905 q->dbval = IONIC_DBELL_QID(q->hw_index);
907 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
908 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
910 q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
911 q->dbell_jiffies = jiffies;
913 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
914 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi);
916 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
919 timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
921 qcq->flags |= IONIC_QCQ_F_INITED;
926 int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif)
928 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz;
929 unsigned int txq_i, flags;
930 struct ionic_qcq *txq;
934 if (lif->hwstamp_txq)
937 features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP;
939 num_desc = IONIC_MIN_TXRX_DESC;
940 desc_sz = sizeof(struct ionic_txq_desc);
941 comp_sz = 2 * sizeof(struct ionic_txq_comp);
943 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
944 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz == sizeof(struct ionic_txq_sg_desc_v1))
945 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
947 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
949 txq_i = lif->ionic->ntxqs_per_lif;
950 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
952 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags,
953 num_desc, desc_sz, comp_sz, sg_desc_sz,
954 lif->kern_pid, &txq);
958 txq->q.features = features;
960 ionic_link_qcq_interrupts(lif->adminqcq, txq);
961 ionic_debugfs_add_qcq(lif, txq);
963 lif->hwstamp_txq = txq;
965 if (netif_running(lif->netdev)) {
966 err = ionic_lif_txq_init(lif, txq);
970 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
971 err = ionic_qcq_enable(txq);
980 ionic_lif_qcq_deinit(lif, txq);
982 lif->hwstamp_txq = NULL;
983 ionic_debugfs_del_qcq(txq);
984 ionic_qcq_free(lif, txq);
985 devm_kfree(lif->ionic->dev, txq);
990 int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
992 unsigned int num_desc, desc_sz, comp_sz, sg_desc_sz;
993 unsigned int rxq_i, flags;
994 struct ionic_qcq *rxq;
998 if (lif->hwstamp_rxq)
1001 features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP;
1003 num_desc = IONIC_MIN_TXRX_DESC;
1004 desc_sz = sizeof(struct ionic_rxq_desc);
1005 comp_sz = 2 * sizeof(struct ionic_rxq_comp);
1006 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
1008 rxq_i = lif->ionic->nrxqs_per_lif;
1009 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
1011 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags,
1012 num_desc, desc_sz, comp_sz, sg_desc_sz,
1013 lif->kern_pid, &rxq);
1017 rxq->q.features = features;
1019 ionic_link_qcq_interrupts(lif->adminqcq, rxq);
1020 ionic_debugfs_add_qcq(lif, rxq);
1022 lif->hwstamp_rxq = rxq;
1024 if (netif_running(lif->netdev)) {
1025 err = ionic_lif_rxq_init(lif, rxq);
1029 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
1030 ionic_rx_fill(&rxq->q);
1031 err = ionic_qcq_enable(rxq);
1033 goto err_qcq_enable;
1040 ionic_lif_qcq_deinit(lif, rxq);
1042 lif->hwstamp_rxq = NULL;
1043 ionic_debugfs_del_qcq(rxq);
1044 ionic_qcq_free(lif, rxq);
1045 devm_kfree(lif->ionic->dev, rxq);
1050 int ionic_lif_config_hwstamp_rxq_all(struct ionic_lif *lif, bool rx_all)
1052 struct ionic_queue_params qparam;
1054 ionic_init_queue_params(lif, &qparam);
1057 qparam.rxq_features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP;
1059 qparam.rxq_features = 0;
1061 /* if we're not running, just set the values and return */
1062 if (!netif_running(lif->netdev)) {
1063 lif->rxq_features = qparam.rxq_features;
1067 return ionic_reconfigure_queues(lif, &qparam);
1070 int ionic_lif_set_hwstamp_txmode(struct ionic_lif *lif, u16 txstamp_mode)
1072 struct ionic_admin_ctx ctx = {
1073 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1074 .cmd.lif_setattr = {
1075 .opcode = IONIC_CMD_LIF_SETATTR,
1076 .index = cpu_to_le16(lif->index),
1077 .attr = IONIC_LIF_ATTR_TXSTAMP,
1078 .txstamp_mode = cpu_to_le16(txstamp_mode),
1082 return ionic_adminq_post_wait(lif, &ctx);
1085 static void ionic_lif_del_hwstamp_rxfilt(struct ionic_lif *lif)
1087 struct ionic_admin_ctx ctx = {
1088 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1089 .cmd.rx_filter_del = {
1090 .opcode = IONIC_CMD_RX_FILTER_DEL,
1091 .lif_index = cpu_to_le16(lif->index),
1094 struct ionic_rx_filter *f;
1098 spin_lock_bh(&lif->rx_filters.lock);
1100 f = ionic_rx_filter_rxsteer(lif);
1102 spin_unlock_bh(&lif->rx_filters.lock);
1106 filter_id = f->filter_id;
1107 ionic_rx_filter_free(lif, f);
1109 spin_unlock_bh(&lif->rx_filters.lock);
1111 netdev_dbg(lif->netdev, "rx_filter del RXSTEER (id %d)\n", filter_id);
1113 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(filter_id);
1115 err = ionic_adminq_post_wait(lif, &ctx);
1116 if (err && err != -EEXIST)
1117 netdev_dbg(lif->netdev, "failed to delete rx_filter RXSTEER (id %d)\n", filter_id);
1120 static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
1122 struct ionic_admin_ctx ctx = {
1123 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1124 .cmd.rx_filter_add = {
1125 .opcode = IONIC_CMD_RX_FILTER_ADD,
1126 .lif_index = cpu_to_le16(lif->index),
1127 .match = cpu_to_le16(IONIC_RX_FILTER_STEER_PKTCLASS),
1128 .pkt_class = cpu_to_le64(pkt_class),
1135 if (!lif->hwstamp_rxq)
1138 qtype = lif->hwstamp_rxq->q.type;
1139 ctx.cmd.rx_filter_add.qtype = qtype;
1141 qid = lif->hwstamp_rxq->q.index;
1142 ctx.cmd.rx_filter_add.qid = cpu_to_le32(qid);
1144 netdev_dbg(lif->netdev, "rx_filter add RXSTEER\n");
1145 err = ionic_adminq_post_wait(lif, &ctx);
1146 if (err && err != -EEXIST)
1149 spin_lock_bh(&lif->rx_filters.lock);
1150 err = ionic_rx_filter_save(lif, 0, qid, 0, &ctx, IONIC_FILTER_STATE_SYNCED);
1151 spin_unlock_bh(&lif->rx_filters.lock);
1156 int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
1158 ionic_lif_del_hwstamp_rxfilt(lif);
1163 return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class);
1166 static bool ionic_notifyq_service(struct ionic_cq *cq,
1167 struct ionic_cq_info *cq_info)
1169 union ionic_notifyq_comp *comp = cq_info->cq_desc;
1170 struct ionic_deferred_work *work;
1171 struct net_device *netdev;
1172 struct ionic_queue *q;
1173 struct ionic_lif *lif;
1177 lif = q->info[0].cb_arg;
1178 netdev = lif->netdev;
1179 eid = le64_to_cpu(comp->event.eid);
1181 /* Have we run out of new completions to process? */
1182 if ((s64)(eid - lif->last_eid) <= 0)
1185 lif->last_eid = eid;
1187 dev_dbg(lif->ionic->dev, "notifyq event:\n");
1188 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
1189 comp, sizeof(*comp), true);
1191 switch (le16_to_cpu(comp->event.ecode)) {
1192 case IONIC_EVENT_LINK_CHANGE:
1193 ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
1195 case IONIC_EVENT_RESET:
1196 if (lif->ionic->idev.fw_status_ready &&
1197 !test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
1198 !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
1199 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1201 netdev_err(lif->netdev, "Reset event dropped\n");
1202 clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
1204 work->type = IONIC_DW_TYPE_LIF_RESET;
1205 ionic_lif_deferred_enqueue(&lif->deferred, work);
1210 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
1211 comp->event.ecode, eid);
1218 static bool ionic_adminq_service(struct ionic_cq *cq,
1219 struct ionic_cq_info *cq_info)
1221 struct ionic_admin_comp *comp = cq_info->cq_desc;
1223 if (!color_match(comp->color, cq->done_color))
1226 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
1231 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
1233 struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
1234 struct ionic_lif *lif = napi_to_cq(napi)->lif;
1235 struct ionic_dev *idev = &lif->ionic->idev;
1236 unsigned long irqflags;
1237 unsigned int flags = 0;
1238 bool resched = false;
1246 if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)
1247 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget,
1248 ionic_notifyq_service, NULL, NULL);
1250 spin_lock_irqsave(&lif->adminq_lock, irqflags);
1251 if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
1252 a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
1253 ionic_adminq_service, NULL, NULL);
1254 spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
1256 if (lif->hwstamp_rxq)
1257 rx_work = ionic_cq_service(&lif->hwstamp_rxq->cq, budget,
1258 ionic_rx_service, NULL, NULL);
1260 if (lif->hwstamp_txq)
1261 tx_work = ionic_cq_service(&lif->hwstamp_txq->cq, budget,
1262 ionic_tx_service, NULL, NULL);
1264 work_done = max(max(n_work, a_work), max(rx_work, tx_work));
1265 if (work_done < budget && napi_complete_done(napi, work_done)) {
1266 flags |= IONIC_INTR_CRED_UNMASK;
1267 intr->rearm_count++;
1270 if (work_done || flags) {
1271 flags |= IONIC_INTR_CRED_RESET_COALESCE;
1272 credits = n_work + a_work + rx_work + tx_work;
1273 ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
1276 if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q))
1278 if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q))
1280 if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q))
1283 mod_timer(&lif->adminqcq->napi_deadline,
1284 jiffies + IONIC_NAPI_DEADLINE);
1289 void ionic_get_stats64(struct net_device *netdev,
1290 struct rtnl_link_stats64 *ns)
1292 struct ionic_lif *lif = netdev_priv(netdev);
1293 struct ionic_lif_stats *ls;
1295 memset(ns, 0, sizeof(*ns));
1296 ls = &lif->info->stats;
1298 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
1299 le64_to_cpu(ls->rx_mcast_packets) +
1300 le64_to_cpu(ls->rx_bcast_packets);
1302 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
1303 le64_to_cpu(ls->tx_mcast_packets) +
1304 le64_to_cpu(ls->tx_bcast_packets);
1306 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
1307 le64_to_cpu(ls->rx_mcast_bytes) +
1308 le64_to_cpu(ls->rx_bcast_bytes);
1310 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
1311 le64_to_cpu(ls->tx_mcast_bytes) +
1312 le64_to_cpu(ls->tx_bcast_bytes);
1314 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
1315 le64_to_cpu(ls->rx_mcast_drop_packets) +
1316 le64_to_cpu(ls->rx_bcast_drop_packets);
1318 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
1319 le64_to_cpu(ls->tx_mcast_drop_packets) +
1320 le64_to_cpu(ls->tx_bcast_drop_packets);
1322 ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
1324 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
1326 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
1327 le64_to_cpu(ls->rx_queue_disabled) +
1328 le64_to_cpu(ls->rx_desc_fetch_error) +
1329 le64_to_cpu(ls->rx_desc_data_error);
1331 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
1332 le64_to_cpu(ls->tx_queue_disabled) +
1333 le64_to_cpu(ls->tx_desc_fetch_error) +
1334 le64_to_cpu(ls->tx_desc_data_error);
1336 ns->rx_errors = ns->rx_over_errors +
1337 ns->rx_missed_errors;
1339 ns->tx_errors = ns->tx_aborted_errors;
1342 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
1344 return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR);
1347 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
1349 /* Don't delete our own address from the uc list */
1350 if (ether_addr_equal(addr, netdev->dev_addr))
1353 return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR);
1356 void ionic_lif_rx_mode(struct ionic_lif *lif)
1358 struct net_device *netdev = lif->netdev;
1359 unsigned int nfilters;
1360 unsigned int nd_flags;
1364 #define REMAIN(__x) (sizeof(buf) - (__x))
1366 mutex_lock(&lif->config_lock);
1368 /* grab the flags once for local use */
1369 nd_flags = netdev->flags;
1371 rx_mode = IONIC_RX_MODE_F_UNICAST;
1372 rx_mode |= (nd_flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1373 rx_mode |= (nd_flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1374 rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1375 rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1377 /* sync the filters */
1378 ionic_rx_filter_sync(lif);
1380 /* check for overflow state
1381 * if so, we track that we overflowed and enable NIC PROMISC
1382 * else if the overflow is set and not needed
1383 * we remove our overflow flag and check the netdev flags
1384 * to see if we can disable NIC PROMISC
1386 nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
1388 if (((lif->nucast + lif->nmcast) >= nfilters) ||
1389 (lif->max_vlans && lif->nvlans >= lif->max_vlans)) {
1390 rx_mode |= IONIC_RX_MODE_F_PROMISC;
1391 rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1393 if (!(nd_flags & IFF_PROMISC))
1394 rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1395 if (!(nd_flags & IFF_ALLMULTI))
1396 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1399 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
1400 lif->rx_mode, rx_mode);
1401 if (rx_mode & IONIC_RX_MODE_F_UNICAST)
1402 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
1403 if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
1404 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
1405 if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
1406 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
1407 if (rx_mode & IONIC_RX_MODE_F_PROMISC)
1408 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
1409 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
1410 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
1411 if (rx_mode & IONIC_RX_MODE_F_RDMA_SNIFFER)
1412 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_RDMA_SNIFFER");
1413 netdev_dbg(netdev, "lif%d %s\n", lif->index, buf);
1415 if (lif->rx_mode != rx_mode) {
1416 struct ionic_admin_ctx ctx = {
1417 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1418 .cmd.rx_mode_set = {
1419 .opcode = IONIC_CMD_RX_MODE_SET,
1420 .lif_index = cpu_to_le16(lif->index),
1425 ctx.cmd.rx_mode_set.rx_mode = cpu_to_le16(rx_mode);
1426 err = ionic_adminq_post_wait(lif, &ctx);
1428 netdev_warn(netdev, "set rx_mode 0x%04x failed: %d\n",
1431 lif->rx_mode = rx_mode;
1434 mutex_unlock(&lif->config_lock);
1437 static void ionic_ndo_set_rx_mode(struct net_device *netdev)
1439 struct ionic_lif *lif = netdev_priv(netdev);
1440 struct ionic_deferred_work *work;
1442 /* Sync the kernel filter list with the driver filter list */
1443 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1444 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
1446 /* Shove off the rest of the rxmode work to the work task
1447 * which will include syncing the filters to the firmware.
1449 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1451 netdev_err(lif->netdev, "rxmode change dropped\n");
1454 work->type = IONIC_DW_TYPE_RX_MODE;
1455 netdev_dbg(lif->netdev, "deferred: rx_mode\n");
1456 ionic_lif_deferred_enqueue(&lif->deferred, work);
1459 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1463 if (features & NETIF_F_HW_VLAN_CTAG_TX)
1464 wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1465 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1466 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1467 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1468 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1469 if (features & NETIF_F_RXHASH)
1470 wanted |= IONIC_ETH_HW_RX_HASH;
1471 if (features & NETIF_F_RXCSUM)
1472 wanted |= IONIC_ETH_HW_RX_CSUM;
1473 if (features & NETIF_F_SG)
1474 wanted |= IONIC_ETH_HW_TX_SG;
1475 if (features & NETIF_F_HW_CSUM)
1476 wanted |= IONIC_ETH_HW_TX_CSUM;
1477 if (features & NETIF_F_TSO)
1478 wanted |= IONIC_ETH_HW_TSO;
1479 if (features & NETIF_F_TSO6)
1480 wanted |= IONIC_ETH_HW_TSO_IPV6;
1481 if (features & NETIF_F_TSO_ECN)
1482 wanted |= IONIC_ETH_HW_TSO_ECN;
1483 if (features & NETIF_F_GSO_GRE)
1484 wanted |= IONIC_ETH_HW_TSO_GRE;
1485 if (features & NETIF_F_GSO_GRE_CSUM)
1486 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1487 if (features & NETIF_F_GSO_IPXIP4)
1488 wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1489 if (features & NETIF_F_GSO_IPXIP6)
1490 wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1491 if (features & NETIF_F_GSO_UDP_TUNNEL)
1492 wanted |= IONIC_ETH_HW_TSO_UDP;
1493 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1494 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1496 return cpu_to_le64(wanted);
1499 static int ionic_set_nic_features(struct ionic_lif *lif,
1500 netdev_features_t features)
1502 struct device *dev = lif->ionic->dev;
1503 struct ionic_admin_ctx ctx = {
1504 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1505 .cmd.lif_setattr = {
1506 .opcode = IONIC_CMD_LIF_SETATTR,
1507 .index = cpu_to_le16(lif->index),
1508 .attr = IONIC_LIF_ATTR_FEATURES,
1511 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1512 IONIC_ETH_HW_VLAN_RX_STRIP |
1513 IONIC_ETH_HW_VLAN_RX_FILTER;
1514 u64 old_hw_features;
1517 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1520 ctx.cmd.lif_setattr.features |= cpu_to_le64(IONIC_ETH_HW_TIMESTAMP);
1522 err = ionic_adminq_post_wait(lif, &ctx);
1526 old_hw_features = lif->hw_features;
1527 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1528 ctx.comp.lif_setattr.features);
1530 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1531 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1533 if ((vlan_flags & le64_to_cpu(ctx.cmd.lif_setattr.features)) &&
1534 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1535 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1537 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1538 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1539 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1540 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1541 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1542 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1543 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1544 dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1545 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1546 dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1547 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1548 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1549 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1550 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1551 if (lif->hw_features & IONIC_ETH_HW_TSO)
1552 dev_dbg(dev, "feature ETH_HW_TSO\n");
1553 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1554 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1555 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1556 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1557 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1558 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1559 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1560 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1561 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1562 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1563 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1564 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1565 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1566 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1567 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1568 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1569 if (lif->hw_features & IONIC_ETH_HW_TIMESTAMP)
1570 dev_dbg(dev, "feature ETH_HW_TIMESTAMP\n");
1575 static int ionic_init_nic_features(struct ionic_lif *lif)
1577 struct net_device *netdev = lif->netdev;
1578 netdev_features_t features;
1581 /* set up what we expect to support by default */
1582 features = NETIF_F_HW_VLAN_CTAG_TX |
1583 NETIF_F_HW_VLAN_CTAG_RX |
1584 NETIF_F_HW_VLAN_CTAG_FILTER |
1592 NETIF_F_GSO_GRE_CSUM |
1593 NETIF_F_GSO_IPXIP4 |
1594 NETIF_F_GSO_IPXIP6 |
1595 NETIF_F_GSO_UDP_TUNNEL |
1596 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1599 features |= NETIF_F_RXHASH;
1601 err = ionic_set_nic_features(lif, features);
1605 /* tell the netdev what we actually can support */
1606 netdev->features |= NETIF_F_HIGHDMA;
1608 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1609 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1610 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1611 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1612 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1613 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1614 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1615 netdev->hw_features |= NETIF_F_RXHASH;
1616 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1617 netdev->hw_features |= NETIF_F_SG;
1619 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1620 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1621 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1622 netdev->hw_enc_features |= NETIF_F_RXCSUM;
1623 if (lif->hw_features & IONIC_ETH_HW_TSO)
1624 netdev->hw_enc_features |= NETIF_F_TSO;
1625 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1626 netdev->hw_enc_features |= NETIF_F_TSO6;
1627 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1628 netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1629 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1630 netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1631 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1632 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1633 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1634 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1635 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1636 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1637 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1638 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1639 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1640 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1642 netdev->hw_features |= netdev->hw_enc_features;
1643 netdev->features |= netdev->hw_features;
1644 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
1646 netdev->priv_flags |= IFF_UNICAST_FLT |
1647 IFF_LIVE_ADDR_CHANGE;
1649 netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
1654 static int ionic_set_features(struct net_device *netdev,
1655 netdev_features_t features)
1657 struct ionic_lif *lif = netdev_priv(netdev);
1660 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1661 __func__, (u64)lif->netdev->features, (u64)features);
1663 err = ionic_set_nic_features(lif, features);
1668 static int ionic_set_attr_mac(struct ionic_lif *lif, u8 *mac)
1670 struct ionic_admin_ctx ctx = {
1671 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1672 .cmd.lif_setattr = {
1673 .opcode = IONIC_CMD_LIF_SETATTR,
1674 .index = cpu_to_le16(lif->index),
1675 .attr = IONIC_LIF_ATTR_MAC,
1679 ether_addr_copy(ctx.cmd.lif_setattr.mac, mac);
1680 return ionic_adminq_post_wait(lif, &ctx);
1683 static int ionic_get_attr_mac(struct ionic_lif *lif, u8 *mac_addr)
1685 struct ionic_admin_ctx ctx = {
1686 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1687 .cmd.lif_getattr = {
1688 .opcode = IONIC_CMD_LIF_GETATTR,
1689 .index = cpu_to_le16(lif->index),
1690 .attr = IONIC_LIF_ATTR_MAC,
1695 err = ionic_adminq_post_wait(lif, &ctx);
1699 ether_addr_copy(mac_addr, ctx.comp.lif_getattr.mac);
1703 static int ionic_program_mac(struct ionic_lif *lif, u8 *mac)
1705 u8 get_mac[ETH_ALEN];
1708 err = ionic_set_attr_mac(lif, mac);
1712 err = ionic_get_attr_mac(lif, get_mac);
1716 /* To deal with older firmware that silently ignores the set attr mac:
1717 * doesn't actually change the mac and doesn't return an error, so we
1718 * do the get attr to verify whether or not the set actually happened
1720 if (!ether_addr_equal(get_mac, mac))
1726 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1728 struct ionic_lif *lif = netdev_priv(netdev);
1729 struct sockaddr *addr = sa;
1733 mac = (u8 *)addr->sa_data;
1734 if (ether_addr_equal(netdev->dev_addr, mac))
1737 err = ionic_program_mac(lif, mac);
1742 netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n",
1745 err = eth_prepare_mac_addr_change(netdev, addr);
1749 if (!is_zero_ether_addr(netdev->dev_addr)) {
1750 netdev_info(netdev, "deleting mac addr %pM\n",
1752 ionic_lif_addr_del(netdev_priv(netdev), netdev->dev_addr);
1755 eth_commit_mac_addr_change(netdev, addr);
1756 netdev_info(netdev, "updating mac addr %pM\n", mac);
1758 return ionic_lif_addr_add(netdev_priv(netdev), mac);
1761 void ionic_stop_queues_reconfig(struct ionic_lif *lif)
1763 /* Stop and clean the queues before reconfiguration */
1764 netif_device_detach(lif->netdev);
1765 ionic_stop_queues(lif);
1766 ionic_txrx_deinit(lif);
1769 static int ionic_start_queues_reconfig(struct ionic_lif *lif)
1773 /* Re-init the queues after reconfiguration */
1775 /* The only way txrx_init can fail here is if communication
1776 * with FW is suddenly broken. There's not much we can do
1777 * at this point - error messages have already been printed,
1778 * so we can continue on and the user can eventually do a
1779 * DOWN and UP to try to reset and clear the issue.
1781 err = ionic_txrx_init(lif);
1782 ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
1783 netif_device_attach(lif->netdev);
1788 static bool ionic_xdp_is_valid_mtu(struct ionic_lif *lif, u32 mtu,
1789 struct bpf_prog *xdp_prog)
1794 if (mtu <= IONIC_XDP_MAX_LINEAR_MTU)
1800 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1802 struct ionic_lif *lif = netdev_priv(netdev);
1803 struct ionic_admin_ctx ctx = {
1804 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1805 .cmd.lif_setattr = {
1806 .opcode = IONIC_CMD_LIF_SETATTR,
1807 .index = cpu_to_le16(lif->index),
1808 .attr = IONIC_LIF_ATTR_MTU,
1809 .mtu = cpu_to_le32(new_mtu),
1812 struct bpf_prog *xdp_prog;
1815 xdp_prog = READ_ONCE(lif->xdp_prog);
1816 if (!ionic_xdp_is_valid_mtu(lif, new_mtu, xdp_prog))
1819 err = ionic_adminq_post_wait(lif, &ctx);
1823 /* if we're not running, nothing more to do */
1824 if (!netif_running(netdev)) {
1825 netdev->mtu = new_mtu;
1829 mutex_lock(&lif->queue_lock);
1830 ionic_stop_queues_reconfig(lif);
1831 netdev->mtu = new_mtu;
1832 err = ionic_start_queues_reconfig(lif);
1833 mutex_unlock(&lif->queue_lock);
1838 static void ionic_tx_timeout_work(struct work_struct *ws)
1840 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1843 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
1846 /* if we were stopped before this scheduled job was launched,
1847 * don't bother the queues as they are already stopped.
1849 if (!netif_running(lif->netdev))
1852 mutex_lock(&lif->queue_lock);
1853 ionic_stop_queues_reconfig(lif);
1854 err = ionic_start_queues_reconfig(lif);
1855 mutex_unlock(&lif->queue_lock);
1858 dev_err(lif->ionic->dev, "%s: Restarting queues failed\n", __func__);
1861 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1863 struct ionic_lif *lif = netdev_priv(netdev);
1865 netdev_info(lif->netdev, "Tx Timeout triggered - txq %d\n", txqueue);
1866 schedule_work(&lif->tx_timeout_work);
1869 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1872 struct ionic_lif *lif = netdev_priv(netdev);
1875 err = ionic_lif_vlan_add(lif, vid);
1879 ionic_lif_rx_mode(lif);
1884 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1887 struct ionic_lif *lif = netdev_priv(netdev);
1890 err = ionic_lif_vlan_del(lif, vid);
1894 ionic_lif_rx_mode(lif);
1899 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1900 const u8 *key, const u32 *indir)
1902 struct ionic_admin_ctx ctx = {
1903 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1904 .cmd.lif_setattr = {
1905 .opcode = IONIC_CMD_LIF_SETATTR,
1906 .attr = IONIC_LIF_ATTR_RSS,
1907 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1910 unsigned int i, tbl_sz;
1912 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1913 lif->rss_types = types;
1914 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1918 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1921 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1922 for (i = 0; i < tbl_sz; i++)
1923 lif->rss_ind_tbl[i] = indir[i];
1926 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1927 IONIC_RSS_HASH_KEY_SIZE);
1929 return ionic_adminq_post_wait(lif, &ctx);
1932 static int ionic_lif_rss_init(struct ionic_lif *lif)
1934 unsigned int tbl_sz;
1937 lif->rss_types = IONIC_RSS_TYPE_IPV4 |
1938 IONIC_RSS_TYPE_IPV4_TCP |
1939 IONIC_RSS_TYPE_IPV4_UDP |
1940 IONIC_RSS_TYPE_IPV6 |
1941 IONIC_RSS_TYPE_IPV6_TCP |
1942 IONIC_RSS_TYPE_IPV6_UDP;
1944 /* Fill indirection table with 'default' values */
1945 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1946 for (i = 0; i < tbl_sz; i++)
1947 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1949 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1952 static void ionic_lif_rss_deinit(struct ionic_lif *lif)
1956 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1957 memset(lif->rss_ind_tbl, 0, tbl_sz);
1958 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1960 ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1963 static void ionic_lif_quiesce(struct ionic_lif *lif)
1965 struct ionic_admin_ctx ctx = {
1966 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1967 .cmd.lif_setattr = {
1968 .opcode = IONIC_CMD_LIF_SETATTR,
1969 .index = cpu_to_le16(lif->index),
1970 .attr = IONIC_LIF_ATTR_STATE,
1971 .state = IONIC_LIF_QUIESCE,
1976 err = ionic_adminq_post_wait(lif, &ctx);
1978 netdev_dbg(lif->netdev, "lif quiesce failed %d\n", err);
1981 static void ionic_txrx_disable(struct ionic_lif *lif)
1987 for (i = 0; i < lif->nxqs; i++)
1988 err = ionic_qcq_disable(lif, lif->txqcqs[i], err);
1991 if (lif->hwstamp_txq)
1992 err = ionic_qcq_disable(lif, lif->hwstamp_txq, err);
1995 for (i = 0; i < lif->nxqs; i++)
1996 err = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
1999 if (lif->hwstamp_rxq)
2000 err = ionic_qcq_disable(lif, lif->hwstamp_rxq, err);
2002 ionic_lif_quiesce(lif);
2005 static void ionic_txrx_deinit(struct ionic_lif *lif)
2010 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) {
2011 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
2012 ionic_tx_flush(&lif->txqcqs[i]->cq);
2013 ionic_tx_empty(&lif->txqcqs[i]->q);
2018 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
2019 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
2020 ionic_rx_empty(&lif->rxqcqs[i]->q);
2025 if (lif->hwstamp_txq) {
2026 ionic_lif_qcq_deinit(lif, lif->hwstamp_txq);
2027 ionic_tx_flush(&lif->hwstamp_txq->cq);
2028 ionic_tx_empty(&lif->hwstamp_txq->q);
2031 if (lif->hwstamp_rxq) {
2032 ionic_lif_qcq_deinit(lif, lif->hwstamp_rxq);
2033 ionic_rx_empty(&lif->hwstamp_rxq->q);
2037 void ionic_txrx_free(struct ionic_lif *lif)
2042 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) {
2043 ionic_qcq_free(lif, lif->txqcqs[i]);
2044 devm_kfree(lif->ionic->dev, lif->txqcqs[i]);
2045 lif->txqcqs[i] = NULL;
2050 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
2051 ionic_qcq_free(lif, lif->rxqcqs[i]);
2052 devm_kfree(lif->ionic->dev, lif->rxqcqs[i]);
2053 lif->rxqcqs[i] = NULL;
2057 if (lif->hwstamp_txq) {
2058 ionic_qcq_free(lif, lif->hwstamp_txq);
2059 devm_kfree(lif->ionic->dev, lif->hwstamp_txq);
2060 lif->hwstamp_txq = NULL;
2063 if (lif->hwstamp_rxq) {
2064 ionic_qcq_free(lif, lif->hwstamp_rxq);
2065 devm_kfree(lif->ionic->dev, lif->hwstamp_rxq);
2066 lif->hwstamp_rxq = NULL;
2070 static int ionic_txrx_alloc(struct ionic_lif *lif)
2072 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz;
2073 unsigned int flags, i;
2076 num_desc = lif->ntxq_descs;
2077 desc_sz = sizeof(struct ionic_txq_desc);
2078 comp_sz = sizeof(struct ionic_txq_comp);
2080 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
2081 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
2082 sizeof(struct ionic_txq_sg_desc_v1))
2083 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
2085 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
2087 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
2089 if (test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state))
2090 flags |= IONIC_QCQ_F_CMB_RINGS;
2092 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
2093 flags |= IONIC_QCQ_F_INTR;
2095 for (i = 0; i < lif->nxqs; i++) {
2096 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2097 num_desc, desc_sz, comp_sz, sg_desc_sz,
2098 lif->kern_pid, &lif->txqcqs[i]);
2102 if (flags & IONIC_QCQ_F_INTR) {
2103 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2104 lif->txqcqs[i]->intr.index,
2105 lif->tx_coalesce_hw);
2106 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
2107 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
2110 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
2113 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
2115 if (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state))
2116 flags |= IONIC_QCQ_F_CMB_RINGS;
2118 num_desc = lif->nrxq_descs;
2119 desc_sz = sizeof(struct ionic_rxq_desc);
2120 comp_sz = sizeof(struct ionic_rxq_comp);
2121 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
2123 if (lif->rxq_features & IONIC_Q_F_2X_CQ_DESC)
2126 for (i = 0; i < lif->nxqs; i++) {
2127 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2128 num_desc, desc_sz, comp_sz, sg_desc_sz,
2129 lif->kern_pid, &lif->rxqcqs[i]);
2133 lif->rxqcqs[i]->q.features = lif->rxq_features;
2135 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2136 lif->rxqcqs[i]->intr.index,
2137 lif->rx_coalesce_hw);
2138 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state))
2139 lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw;
2141 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
2142 ionic_link_qcq_interrupts(lif->rxqcqs[i],
2145 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
2151 ionic_txrx_free(lif);
2156 static int ionic_txrx_init(struct ionic_lif *lif)
2161 for (i = 0; i < lif->nxqs; i++) {
2162 err = ionic_lif_txq_init(lif, lif->txqcqs[i]);
2166 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]);
2168 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
2173 if (lif->netdev->features & NETIF_F_RXHASH)
2174 ionic_lif_rss_init(lif);
2176 ionic_lif_rx_mode(lif);
2182 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
2183 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
2189 static int ionic_txrx_enable(struct ionic_lif *lif)
2194 err = ionic_xdp_queues_config(lif);
2198 for (i = 0; i < lif->nxqs; i++) {
2199 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
2200 dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
2205 ionic_rx_fill(&lif->rxqcqs[i]->q);
2206 err = ionic_qcq_enable(lif->rxqcqs[i]);
2210 err = ionic_qcq_enable(lif->txqcqs[i]);
2212 derr = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
2217 if (lif->hwstamp_rxq) {
2218 ionic_rx_fill(&lif->hwstamp_rxq->q);
2219 err = ionic_qcq_enable(lif->hwstamp_rxq);
2221 goto err_out_hwstamp_rx;
2224 if (lif->hwstamp_txq) {
2225 err = ionic_qcq_enable(lif->hwstamp_txq);
2227 goto err_out_hwstamp_tx;
2233 if (lif->hwstamp_rxq)
2234 derr = ionic_qcq_disable(lif, lif->hwstamp_rxq, derr);
2239 derr = ionic_qcq_disable(lif, lif->txqcqs[i], derr);
2240 derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr);
2243 ionic_xdp_queues_config(lif);
2248 static int ionic_start_queues(struct ionic_lif *lif)
2252 if (test_bit(IONIC_LIF_F_BROKEN, lif->state))
2255 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2258 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
2261 err = ionic_txrx_enable(lif);
2263 clear_bit(IONIC_LIF_F_UP, lif->state);
2266 netif_tx_wake_all_queues(lif->netdev);
2271 static int ionic_open(struct net_device *netdev)
2273 struct ionic_lif *lif = netdev_priv(netdev);
2276 /* If recovering from a broken state, clear the bit and we'll try again */
2277 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
2278 netdev_info(netdev, "clearing broken state\n");
2280 mutex_lock(&lif->queue_lock);
2282 err = ionic_txrx_alloc(lif);
2286 err = ionic_txrx_init(lif);
2290 err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
2292 goto err_txrx_deinit;
2294 err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
2296 goto err_txrx_deinit;
2298 /* don't start the queues until we have link */
2299 if (netif_carrier_ok(netdev)) {
2300 err = ionic_start_queues(lif);
2302 goto err_txrx_deinit;
2305 /* If hardware timestamping is enabled, but the queues were freed by
2306 * ionic_stop, those need to be reallocated and initialized, too.
2308 ionic_lif_hwstamp_recreate_queues(lif);
2310 mutex_unlock(&lif->queue_lock);
2315 ionic_txrx_deinit(lif);
2317 ionic_txrx_free(lif);
2319 mutex_unlock(&lif->queue_lock);
2323 static void ionic_stop_queues(struct ionic_lif *lif)
2325 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
2328 netif_tx_disable(lif->netdev);
2329 ionic_txrx_disable(lif);
2332 static int ionic_stop(struct net_device *netdev)
2334 struct ionic_lif *lif = netdev_priv(netdev);
2336 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2339 mutex_lock(&lif->queue_lock);
2340 ionic_stop_queues(lif);
2341 ionic_txrx_deinit(lif);
2342 ionic_txrx_free(lif);
2343 mutex_unlock(&lif->queue_lock);
2348 static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2350 struct ionic_lif *lif = netdev_priv(netdev);
2354 return ionic_lif_hwstamp_set(lif, ifr);
2356 return ionic_lif_hwstamp_get(lif, ifr);
2362 static int ionic_get_vf_config(struct net_device *netdev,
2363 int vf, struct ifla_vf_info *ivf)
2365 struct ionic_lif *lif = netdev_priv(netdev);
2366 struct ionic *ionic = lif->ionic;
2369 if (!netif_device_present(netdev))
2372 down_read(&ionic->vf_op_lock);
2374 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2377 struct ionic_vf *vfdata = &ionic->vfs[vf];
2381 ivf->vlan = le16_to_cpu(vfdata->vlanid);
2382 ivf->spoofchk = vfdata->spoofchk;
2383 ivf->linkstate = vfdata->linkstate;
2384 ivf->max_tx_rate = le32_to_cpu(vfdata->maxrate);
2385 ivf->trusted = vfdata->trusted;
2386 ether_addr_copy(ivf->mac, vfdata->macaddr);
2389 up_read(&ionic->vf_op_lock);
2393 static int ionic_get_vf_stats(struct net_device *netdev, int vf,
2394 struct ifla_vf_stats *vf_stats)
2396 struct ionic_lif *lif = netdev_priv(netdev);
2397 struct ionic *ionic = lif->ionic;
2398 struct ionic_lif_stats *vs;
2401 if (!netif_device_present(netdev))
2404 down_read(&ionic->vf_op_lock);
2406 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2409 memset(vf_stats, 0, sizeof(*vf_stats));
2410 vs = &ionic->vfs[vf].stats;
2412 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
2413 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
2414 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes);
2415 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes);
2416 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets);
2417 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets);
2418 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
2419 le64_to_cpu(vs->rx_mcast_drop_packets) +
2420 le64_to_cpu(vs->rx_bcast_drop_packets);
2421 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
2422 le64_to_cpu(vs->tx_mcast_drop_packets) +
2423 le64_to_cpu(vs->tx_bcast_drop_packets);
2426 up_read(&ionic->vf_op_lock);
2430 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
2432 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_MAC };
2433 struct ionic_lif *lif = netdev_priv(netdev);
2434 struct ionic *ionic = lif->ionic;
2437 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
2440 if (!netif_device_present(netdev))
2443 down_write(&ionic->vf_op_lock);
2445 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2448 ether_addr_copy(vfc.macaddr, mac);
2449 dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n",
2450 __func__, vf, vfc.macaddr);
2452 ret = ionic_set_vf_config(ionic, vf, &vfc);
2454 ether_addr_copy(ionic->vfs[vf].macaddr, mac);
2457 up_write(&ionic->vf_op_lock);
2461 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
2462 u8 qos, __be16 proto)
2464 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_VLAN };
2465 struct ionic_lif *lif = netdev_priv(netdev);
2466 struct ionic *ionic = lif->ionic;
2469 /* until someday when we support qos */
2476 if (proto != htons(ETH_P_8021Q))
2477 return -EPROTONOSUPPORT;
2479 if (!netif_device_present(netdev))
2482 down_write(&ionic->vf_op_lock);
2484 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2487 vfc.vlanid = cpu_to_le16(vlan);
2488 dev_dbg(ionic->dev, "%s: vf %d vlan %d\n",
2489 __func__, vf, le16_to_cpu(vfc.vlanid));
2491 ret = ionic_set_vf_config(ionic, vf, &vfc);
2493 ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
2496 up_write(&ionic->vf_op_lock);
2500 static int ionic_set_vf_rate(struct net_device *netdev, int vf,
2501 int tx_min, int tx_max)
2503 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_RATE };
2504 struct ionic_lif *lif = netdev_priv(netdev);
2505 struct ionic *ionic = lif->ionic;
2508 /* setting the min just seems silly */
2512 if (!netif_device_present(netdev))
2515 down_write(&ionic->vf_op_lock);
2517 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2520 vfc.maxrate = cpu_to_le32(tx_max);
2521 dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n",
2522 __func__, vf, le32_to_cpu(vfc.maxrate));
2524 ret = ionic_set_vf_config(ionic, vf, &vfc);
2526 ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
2529 up_write(&ionic->vf_op_lock);
2533 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
2535 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_SPOOFCHK };
2536 struct ionic_lif *lif = netdev_priv(netdev);
2537 struct ionic *ionic = lif->ionic;
2540 if (!netif_device_present(netdev))
2543 down_write(&ionic->vf_op_lock);
2545 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2549 dev_dbg(ionic->dev, "%s: vf %d spoof %d\n",
2550 __func__, vf, vfc.spoofchk);
2552 ret = ionic_set_vf_config(ionic, vf, &vfc);
2554 ionic->vfs[vf].spoofchk = set;
2557 up_write(&ionic->vf_op_lock);
2561 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
2563 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_TRUST };
2564 struct ionic_lif *lif = netdev_priv(netdev);
2565 struct ionic *ionic = lif->ionic;
2568 if (!netif_device_present(netdev))
2571 down_write(&ionic->vf_op_lock);
2573 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2577 dev_dbg(ionic->dev, "%s: vf %d trust %d\n",
2578 __func__, vf, vfc.trust);
2580 ret = ionic_set_vf_config(ionic, vf, &vfc);
2582 ionic->vfs[vf].trusted = set;
2585 up_write(&ionic->vf_op_lock);
2589 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
2591 struct ionic_vf_setattr_cmd vfc = { .attr = IONIC_VF_ATTR_LINKSTATE };
2592 struct ionic_lif *lif = netdev_priv(netdev);
2593 struct ionic *ionic = lif->ionic;
2598 case IFLA_VF_LINK_STATE_ENABLE:
2599 vfls = IONIC_VF_LINK_STATUS_UP;
2601 case IFLA_VF_LINK_STATE_DISABLE:
2602 vfls = IONIC_VF_LINK_STATUS_DOWN;
2604 case IFLA_VF_LINK_STATE_AUTO:
2605 vfls = IONIC_VF_LINK_STATUS_AUTO;
2611 if (!netif_device_present(netdev))
2614 down_write(&ionic->vf_op_lock);
2616 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2619 vfc.linkstate = vfls;
2620 dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n",
2621 __func__, vf, vfc.linkstate);
2623 ret = ionic_set_vf_config(ionic, vf, &vfc);
2625 ionic->vfs[vf].linkstate = set;
2628 up_write(&ionic->vf_op_lock);
2632 static void ionic_vf_attr_replay(struct ionic_lif *lif)
2634 struct ionic_vf_setattr_cmd vfc = { };
2635 struct ionic *ionic = lif->ionic;
2642 down_read(&ionic->vf_op_lock);
2644 for (i = 0; i < ionic->num_vfs; i++) {
2648 vfc.attr = IONIC_VF_ATTR_STATSADDR;
2649 vfc.stats_pa = cpu_to_le64(v->stats_pa);
2650 ionic_set_vf_config(ionic, i, &vfc);
2654 if (!is_zero_ether_addr(v->macaddr)) {
2655 vfc.attr = IONIC_VF_ATTR_MAC;
2656 ether_addr_copy(vfc.macaddr, v->macaddr);
2657 ionic_set_vf_config(ionic, i, &vfc);
2658 eth_zero_addr(vfc.macaddr);
2662 vfc.attr = IONIC_VF_ATTR_VLAN;
2663 vfc.vlanid = v->vlanid;
2664 ionic_set_vf_config(ionic, i, &vfc);
2669 vfc.attr = IONIC_VF_ATTR_RATE;
2670 vfc.maxrate = v->maxrate;
2671 ionic_set_vf_config(ionic, i, &vfc);
2676 vfc.attr = IONIC_VF_ATTR_SPOOFCHK;
2677 vfc.spoofchk = v->spoofchk;
2678 ionic_set_vf_config(ionic, i, &vfc);
2683 vfc.attr = IONIC_VF_ATTR_TRUST;
2684 vfc.trust = v->trusted;
2685 ionic_set_vf_config(ionic, i, &vfc);
2690 vfc.attr = IONIC_VF_ATTR_LINKSTATE;
2691 vfc.linkstate = v->linkstate;
2692 ionic_set_vf_config(ionic, i, &vfc);
2697 up_read(&ionic->vf_op_lock);
2699 ionic_vf_start(ionic);
2702 static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q)
2704 struct xdp_rxq_info *xi;
2706 if (!q->xdp_rxq_info)
2709 xi = q->xdp_rxq_info;
2710 q->xdp_rxq_info = NULL;
2712 xdp_rxq_info_unreg(xi);
2716 static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_id)
2718 struct xdp_rxq_info *rxq_info;
2721 rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
2725 err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id);
2727 dev_err(q->dev, "Queue %d xdp_rxq_info_reg failed, err %d\n",
2732 err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_ORDER0, NULL);
2734 dev_err(q->dev, "Queue %d xdp_rxq_info_reg_mem_model failed, err %d\n",
2736 xdp_rxq_info_unreg(rxq_info);
2740 q->xdp_rxq_info = rxq_info;
2749 static int ionic_xdp_queues_config(struct ionic_lif *lif)
2757 /* There's no need to rework memory if not going to/from NULL program.
2758 * If there is no lif->xdp_prog, there should also be no q.xdp_rxq_info
2759 * This way we don't need to keep an *xdp_prog in every queue struct.
2761 if (!lif->xdp_prog == !lif->rxqcqs[0]->q.xdp_rxq_info)
2764 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
2765 struct ionic_queue *q = &lif->rxqcqs[i]->q;
2767 if (q->xdp_rxq_info) {
2768 ionic_xdp_unregister_rxq_info(q);
2772 err = ionic_xdp_register_rxq_info(q, lif->rxqcqs[i]->napi.napi_id);
2774 dev_err(lif->ionic->dev, "failed to register RX queue %d info for XDP, err %d\n",
2783 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++)
2784 ionic_xdp_unregister_rxq_info(&lif->rxqcqs[i]->q);
2789 static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf)
2791 struct ionic_lif *lif = netdev_priv(netdev);
2792 struct bpf_prog *old_prog;
2795 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
2796 #define XDP_ERR_SPLIT "XDP not available with split Tx/Rx interrupts"
2797 NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_SPLIT);
2798 netdev_info(lif->netdev, XDP_ERR_SPLIT);
2802 if (!ionic_xdp_is_valid_mtu(lif, netdev->mtu, bpf->prog)) {
2803 #define XDP_ERR_MTU "MTU is too large for XDP without frags support"
2804 NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_MTU);
2805 netdev_info(lif->netdev, XDP_ERR_MTU);
2809 maxfs = __le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN;
2811 maxfs = min_t(u32, maxfs, IONIC_XDP_MAX_LINEAR_MTU);
2812 netdev->max_mtu = maxfs;
2814 if (!netif_running(netdev)) {
2815 old_prog = xchg(&lif->xdp_prog, bpf->prog);
2817 mutex_lock(&lif->queue_lock);
2818 ionic_stop_queues_reconfig(lif);
2819 old_prog = xchg(&lif->xdp_prog, bpf->prog);
2820 ionic_start_queues_reconfig(lif);
2821 mutex_unlock(&lif->queue_lock);
2825 bpf_prog_put(old_prog);
2830 static int ionic_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
2832 switch (bpf->command) {
2833 case XDP_SETUP_PROG:
2834 return ionic_xdp_config(netdev, bpf);
2840 static const struct net_device_ops ionic_netdev_ops = {
2841 .ndo_open = ionic_open,
2842 .ndo_stop = ionic_stop,
2843 .ndo_eth_ioctl = ionic_eth_ioctl,
2844 .ndo_start_xmit = ionic_start_xmit,
2845 .ndo_bpf = ionic_xdp,
2846 .ndo_get_stats64 = ionic_get_stats64,
2847 .ndo_set_rx_mode = ionic_ndo_set_rx_mode,
2848 .ndo_set_features = ionic_set_features,
2849 .ndo_set_mac_address = ionic_set_mac_address,
2850 .ndo_validate_addr = eth_validate_addr,
2851 .ndo_tx_timeout = ionic_tx_timeout,
2852 .ndo_change_mtu = ionic_change_mtu,
2853 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid,
2854 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid,
2855 .ndo_set_vf_vlan = ionic_set_vf_vlan,
2856 .ndo_set_vf_trust = ionic_set_vf_trust,
2857 .ndo_set_vf_mac = ionic_set_vf_mac,
2858 .ndo_set_vf_rate = ionic_set_vf_rate,
2859 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk,
2860 .ndo_get_vf_config = ionic_get_vf_config,
2861 .ndo_set_vf_link_state = ionic_set_vf_link_state,
2862 .ndo_get_vf_stats = ionic_get_vf_stats,
2865 static int ionic_cmb_reconfig(struct ionic_lif *lif,
2866 struct ionic_queue_params *qparam)
2868 struct ionic_queue_params start_qparams;
2871 /* When changing CMB queue parameters, we're using limited
2872 * on-device memory and don't have extra memory to use for
2873 * duplicate allocations, so we free it all first then
2874 * re-allocate with the new parameters.
2877 /* Checkpoint for possible unwind */
2878 ionic_init_queue_params(lif, &start_qparams);
2880 /* Stop and free the queues */
2881 ionic_stop_queues_reconfig(lif);
2882 ionic_txrx_free(lif);
2884 /* Set up new qparams */
2885 ionic_set_queue_params(lif, qparam);
2887 if (netif_running(lif->netdev)) {
2888 /* Alloc and start the new configuration */
2889 err = ionic_txrx_alloc(lif);
2891 dev_warn(lif->ionic->dev,
2892 "CMB reconfig failed, restoring values: %d\n", err);
2894 /* Back out the changes */
2895 ionic_set_queue_params(lif, &start_qparams);
2896 err = ionic_txrx_alloc(lif);
2898 dev_err(lif->ionic->dev,
2899 "CMB restore failed: %d\n", err);
2904 err = ionic_start_queues_reconfig(lif);
2906 dev_err(lif->ionic->dev,
2907 "CMB reconfig failed: %d\n", err);
2913 /* This was detached in ionic_stop_queues_reconfig() */
2914 netif_device_attach(lif->netdev);
2919 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
2921 /* only swapping the queues, not the napi, flags, or other stuff */
2922 swap(a->q.features, b->q.features);
2923 swap(a->q.num_descs, b->q.num_descs);
2924 swap(a->q.desc_size, b->q.desc_size);
2925 swap(a->q.base, b->q.base);
2926 swap(a->q.base_pa, b->q.base_pa);
2927 swap(a->q.info, b->q.info);
2928 swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info);
2929 swap(a->q_base, b->q_base);
2930 swap(a->q_base_pa, b->q_base_pa);
2931 swap(a->q_size, b->q_size);
2933 swap(a->q.sg_desc_size, b->q.sg_desc_size);
2934 swap(a->q.sg_base, b->q.sg_base);
2935 swap(a->q.sg_base_pa, b->q.sg_base_pa);
2936 swap(a->sg_base, b->sg_base);
2937 swap(a->sg_base_pa, b->sg_base_pa);
2938 swap(a->sg_size, b->sg_size);
2940 swap(a->cq.num_descs, b->cq.num_descs);
2941 swap(a->cq.desc_size, b->cq.desc_size);
2942 swap(a->cq.base, b->cq.base);
2943 swap(a->cq.base_pa, b->cq.base_pa);
2944 swap(a->cq.info, b->cq.info);
2945 swap(a->cq_base, b->cq_base);
2946 swap(a->cq_base_pa, b->cq_base_pa);
2947 swap(a->cq_size, b->cq_size);
2949 ionic_debugfs_del_qcq(a);
2950 ionic_debugfs_add_qcq(a->q.lif, a);
2953 int ionic_reconfigure_queues(struct ionic_lif *lif,
2954 struct ionic_queue_params *qparam)
2956 unsigned int comp_sz, desc_sz, num_desc, sg_desc_sz;
2957 struct ionic_qcq **tx_qcqs = NULL;
2958 struct ionic_qcq **rx_qcqs = NULL;
2959 unsigned int flags, i;
2962 /* Are we changing q params while CMB is on */
2963 if ((test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) && qparam->cmb_tx) ||
2964 (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state) && qparam->cmb_rx))
2965 return ionic_cmb_reconfig(lif, qparam);
2967 /* allocate temporary qcq arrays to hold new queue structs */
2968 if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
2969 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
2970 sizeof(struct ionic_qcq *), GFP_KERNEL);
2976 if (qparam->nxqs != lif->nxqs ||
2977 qparam->nrxq_descs != lif->nrxq_descs ||
2978 qparam->rxq_features != lif->rxq_features) {
2979 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
2980 sizeof(struct ionic_qcq *), GFP_KERNEL);
2987 /* allocate new desc_info and rings, but leave the interrupt setup
2988 * until later so as to not mess with the still-running queues
2991 num_desc = qparam->ntxq_descs;
2992 desc_sz = sizeof(struct ionic_txq_desc);
2993 comp_sz = sizeof(struct ionic_txq_comp);
2995 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
2996 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
2997 sizeof(struct ionic_txq_sg_desc_v1))
2998 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
3000 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
3002 for (i = 0; i < qparam->nxqs; i++) {
3003 /* If missing, short placeholder qcq needed for swap */
3004 if (!lif->txqcqs[i]) {
3005 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
3006 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
3007 4, desc_sz, comp_sz, sg_desc_sz,
3008 lif->kern_pid, &lif->txqcqs[i]);
3013 flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
3014 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
3015 num_desc, desc_sz, comp_sz, sg_desc_sz,
3016 lif->kern_pid, &tx_qcqs[i]);
3023 num_desc = qparam->nrxq_descs;
3024 desc_sz = sizeof(struct ionic_rxq_desc);
3025 comp_sz = sizeof(struct ionic_rxq_comp);
3026 sg_desc_sz = sizeof(struct ionic_rxq_sg_desc);
3028 if (qparam->rxq_features & IONIC_Q_F_2X_CQ_DESC)
3031 for (i = 0; i < qparam->nxqs; i++) {
3032 /* If missing, short placeholder qcq needed for swap */
3033 if (!lif->rxqcqs[i]) {
3034 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
3035 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
3036 4, desc_sz, comp_sz, sg_desc_sz,
3037 lif->kern_pid, &lif->rxqcqs[i]);
3042 flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
3043 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
3044 num_desc, desc_sz, comp_sz, sg_desc_sz,
3045 lif->kern_pid, &rx_qcqs[i]);
3049 rx_qcqs[i]->q.features = qparam->rxq_features;
3053 /* stop and clean the queues */
3054 ionic_stop_queues_reconfig(lif);
3056 if (qparam->nxqs != lif->nxqs) {
3057 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs);
3059 goto err_out_reinit_unlock;
3060 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs);
3062 netif_set_real_num_tx_queues(lif->netdev, lif->nxqs);
3063 goto err_out_reinit_unlock;
3067 /* swap new desc_info and rings, keeping existing interrupt config */
3069 lif->ntxq_descs = qparam->ntxq_descs;
3070 for (i = 0; i < qparam->nxqs; i++)
3071 ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]);
3075 lif->nrxq_descs = qparam->nrxq_descs;
3076 for (i = 0; i < qparam->nxqs; i++)
3077 ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]);
3080 /* if we need to change the interrupt layout, this is the time */
3081 if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) ||
3082 qparam->nxqs != lif->nxqs) {
3083 if (qparam->intr_split) {
3084 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
3086 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
3087 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
3088 lif->tx_coalesce_hw = lif->rx_coalesce_hw;
3091 /* Clear existing interrupt assignments. We check for NULL here
3092 * because we're checking the whole array for potential qcqs, not
3093 * just those qcqs that have just been set up.
3095 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
3097 ionic_qcq_intr_free(lif, lif->txqcqs[i]);
3099 ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
3102 /* re-assign the interrupts */
3103 for (i = 0; i < qparam->nxqs; i++) {
3104 lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR;
3105 err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]);
3106 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
3107 lif->rxqcqs[i]->intr.index,
3108 lif->rx_coalesce_hw);
3110 if (qparam->intr_split) {
3111 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR;
3112 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]);
3113 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
3114 lif->txqcqs[i]->intr.index,
3115 lif->tx_coalesce_hw);
3116 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
3117 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
3119 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3120 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]);
3125 /* now we can rework the debugfs mappings */
3127 for (i = 0; i < qparam->nxqs; i++) {
3128 ionic_debugfs_del_qcq(lif->txqcqs[i]);
3129 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
3134 for (i = 0; i < qparam->nxqs; i++) {
3135 ionic_debugfs_del_qcq(lif->rxqcqs[i]);
3136 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
3140 swap(lif->nxqs, qparam->nxqs);
3141 swap(lif->rxq_features, qparam->rxq_features);
3143 err_out_reinit_unlock:
3144 /* re-init the queues, but don't lose an error code */
3146 ionic_start_queues_reconfig(lif);
3148 err = ionic_start_queues_reconfig(lif);
3151 /* free old allocs without cleaning intr */
3152 for (i = 0; i < qparam->nxqs; i++) {
3153 if (tx_qcqs && tx_qcqs[i]) {
3154 tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3155 ionic_qcq_free(lif, tx_qcqs[i]);
3156 devm_kfree(lif->ionic->dev, tx_qcqs[i]);
3159 if (rx_qcqs && rx_qcqs[i]) {
3160 rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3161 ionic_qcq_free(lif, rx_qcqs[i]);
3162 devm_kfree(lif->ionic->dev, rx_qcqs[i]);
3169 devm_kfree(lif->ionic->dev, rx_qcqs);
3173 devm_kfree(lif->ionic->dev, tx_qcqs);
3177 /* clean the unused dma and info allocations when new set is smaller
3178 * than the full array, but leave the qcq shells in place
3180 for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
3181 if (lif->txqcqs && lif->txqcqs[i]) {
3182 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3183 ionic_qcq_free(lif, lif->txqcqs[i]);
3186 if (lif->rxqcqs && lif->rxqcqs[i]) {
3187 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
3188 ionic_qcq_free(lif, lif->rxqcqs[i]);
3193 netdev_info(lif->netdev, "%s: failed %d\n", __func__, err);
3198 int ionic_lif_alloc(struct ionic *ionic)
3200 struct device *dev = ionic->dev;
3201 union ionic_lif_identity *lid;
3202 struct net_device *netdev;
3203 struct ionic_lif *lif;
3207 lid = kzalloc(sizeof(*lid), GFP_KERNEL);
3211 netdev = alloc_etherdev_mqs(sizeof(*lif),
3212 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
3214 dev_err(dev, "Cannot allocate netdev, aborting\n");
3216 goto err_out_free_lid;
3219 SET_NETDEV_DEV(netdev, dev);
3221 lif = netdev_priv(netdev);
3222 lif->netdev = netdev;
3225 netdev->netdev_ops = &ionic_netdev_ops;
3226 ionic_ethtool_set_ops(netdev);
3228 netdev->watchdog_timeo = 2 * HZ;
3229 netif_carrier_off(netdev);
3231 lif->identity = lid;
3232 lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
3233 err = ionic_lif_identify(ionic, lif->lif_type, lif->identity);
3235 dev_err(ionic->dev, "Cannot identify type %d: %d\n",
3236 lif->lif_type, err);
3237 goto err_out_free_netdev;
3239 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
3240 le32_to_cpu(lif->identity->eth.min_frame_size));
3241 lif->netdev->max_mtu =
3242 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
3244 lif->neqs = ionic->neqs_per_lif;
3245 lif->nxqs = ionic->ntxqs_per_lif;
3249 if (is_kdump_kernel()) {
3250 lif->ntxq_descs = IONIC_MIN_TXRX_DESC;
3251 lif->nrxq_descs = IONIC_MIN_TXRX_DESC;
3253 lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
3254 lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
3257 /* Convert the default coalesce value to actual hw resolution */
3258 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
3259 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
3260 lif->rx_coalesce_usecs);
3261 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
3262 lif->tx_coalesce_hw = lif->rx_coalesce_hw;
3263 set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
3264 set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
3266 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
3268 mutex_init(&lif->queue_lock);
3269 mutex_init(&lif->config_lock);
3271 spin_lock_init(&lif->adminq_lock);
3273 spin_lock_init(&lif->deferred.lock);
3274 INIT_LIST_HEAD(&lif->deferred.list);
3275 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
3277 /* allocate lif info */
3278 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
3279 lif->info = dma_alloc_coherent(dev, lif->info_sz,
3280 &lif->info_pa, GFP_KERNEL);
3282 dev_err(dev, "Failed to allocate lif info, aborting\n");
3284 goto err_out_free_mutex;
3287 ionic_debugfs_add_lif(lif);
3289 /* allocate control queues and txrx queue arrays */
3290 ionic_lif_queue_identify(lif);
3291 err = ionic_qcqs_alloc(lif);
3293 goto err_out_free_lif_info;
3295 /* allocate rss indirection table */
3296 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
3297 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
3298 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
3299 &lif->rss_ind_tbl_pa,
3302 if (!lif->rss_ind_tbl) {
3304 dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
3305 goto err_out_free_qcqs;
3307 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
3309 ionic_lif_alloc_phc(lif);
3314 ionic_qcqs_free(lif);
3315 err_out_free_lif_info:
3316 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
3320 mutex_destroy(&lif->config_lock);
3321 mutex_destroy(&lif->queue_lock);
3322 err_out_free_netdev:
3323 free_netdev(lif->netdev);
3331 static void ionic_lif_reset(struct ionic_lif *lif)
3333 struct ionic_dev *idev = &lif->ionic->idev;
3335 if (!ionic_is_fw_running(idev))
3338 mutex_lock(&lif->ionic->dev_cmd_lock);
3339 ionic_dev_cmd_lif_reset(idev, lif->index);
3340 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3341 mutex_unlock(&lif->ionic->dev_cmd_lock);
3344 static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
3346 struct ionic *ionic = lif->ionic;
3348 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
3351 dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
3353 netif_device_detach(lif->netdev);
3355 mutex_lock(&lif->queue_lock);
3356 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
3357 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
3358 ionic_stop_queues(lif);
3361 if (netif_running(lif->netdev)) {
3362 ionic_txrx_deinit(lif);
3363 ionic_txrx_free(lif);
3365 ionic_lif_deinit(lif);
3367 ionic_qcqs_free(lif);
3369 mutex_unlock(&lif->queue_lock);
3371 clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
3372 dev_info(ionic->dev, "FW Down: LIFs stopped\n");
3375 int ionic_restart_lif(struct ionic_lif *lif)
3377 struct ionic *ionic = lif->ionic;
3380 mutex_lock(&lif->queue_lock);
3382 if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state))
3383 dev_info(ionic->dev, "FW Up: clearing broken state\n");
3385 err = ionic_qcqs_alloc(lif);
3389 err = ionic_lif_init(lif);
3393 ionic_vf_attr_replay(lif);
3395 if (lif->registered)
3396 ionic_lif_set_netdev_info(lif);
3398 ionic_rx_filter_replay(lif);
3400 if (netif_running(lif->netdev)) {
3401 err = ionic_txrx_alloc(lif);
3403 goto err_lifs_deinit;
3405 err = ionic_txrx_init(lif);
3410 mutex_unlock(&lif->queue_lock);
3412 clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
3413 ionic_link_status_check_request(lif, CAN_SLEEP);
3414 netif_device_attach(lif->netdev);
3419 ionic_txrx_free(lif);
3421 ionic_lif_deinit(lif);
3423 ionic_qcqs_free(lif);
3425 mutex_unlock(&lif->queue_lock);
3430 static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
3432 struct ionic *ionic = lif->ionic;
3435 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
3438 dev_info(ionic->dev, "FW Up: restarting LIFs\n");
3440 /* This is a little different from what happens at
3441 * probe time because the LIF already exists so we
3442 * just need to reanimate it.
3444 ionic_init_devinfo(ionic);
3445 err = ionic_identify(ionic);
3448 err = ionic_port_identify(ionic);
3451 err = ionic_port_init(ionic);
3455 err = ionic_restart_lif(lif);
3459 dev_info(ionic->dev, "FW Up: LIFs restarted\n");
3461 /* restore the hardware timestamping queues */
3462 ionic_lif_hwstamp_replay(lif);
3467 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
3470 void ionic_lif_free(struct ionic_lif *lif)
3472 struct device *dev = lif->ionic->dev;
3474 ionic_lif_free_phc(lif);
3476 /* free rss indirection table */
3477 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
3478 lif->rss_ind_tbl_pa);
3479 lif->rss_ind_tbl = NULL;
3480 lif->rss_ind_tbl_pa = 0;
3483 ionic_qcqs_free(lif);
3484 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
3485 ionic_lif_reset(lif);
3488 kfree(lif->identity);
3489 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
3493 /* unmap doorbell page */
3494 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
3495 lif->kern_dbpage = NULL;
3497 mutex_destroy(&lif->config_lock);
3498 mutex_destroy(&lif->queue_lock);
3500 /* free netdev & lif */
3501 ionic_debugfs_del_lif(lif);
3502 free_netdev(lif->netdev);
3505 void ionic_lif_deinit(struct ionic_lif *lif)
3507 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
3510 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
3511 cancel_work_sync(&lif->deferred.work);
3512 cancel_work_sync(&lif->tx_timeout_work);
3513 ionic_rx_filters_deinit(lif);
3514 if (lif->netdev->features & NETIF_F_RXHASH)
3515 ionic_lif_rss_deinit(lif);
3518 napi_disable(&lif->adminqcq->napi);
3519 ionic_lif_qcq_deinit(lif, lif->notifyqcq);
3520 ionic_lif_qcq_deinit(lif, lif->adminqcq);
3522 ionic_lif_reset(lif);
3525 static int ionic_lif_adminq_init(struct ionic_lif *lif)
3527 struct device *dev = lif->ionic->dev;
3528 struct ionic_q_init_comp comp;
3529 struct ionic_dev *idev;
3530 struct ionic_qcq *qcq;
3531 struct ionic_queue *q;
3534 idev = &lif->ionic->idev;
3535 qcq = lif->adminqcq;
3538 mutex_lock(&lif->ionic->dev_cmd_lock);
3539 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
3540 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3541 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
3542 mutex_unlock(&lif->ionic->dev_cmd_lock);
3544 netdev_err(lif->netdev, "adminq init failed %d\n", err);
3548 q->hw_type = comp.hw_type;
3549 q->hw_index = le32_to_cpu(comp.hw_index);
3550 q->dbval = IONIC_DBELL_QID(q->hw_index);
3552 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
3553 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
3555 q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE;
3556 q->dbell_jiffies = jiffies;
3558 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi);
3560 qcq->napi_qcq = qcq;
3561 timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
3563 napi_enable(&qcq->napi);
3565 if (qcq->flags & IONIC_QCQ_F_INTR) {
3566 irq_set_affinity_hint(qcq->intr.vector,
3567 &qcq->intr.affinity_mask);
3568 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
3569 IONIC_INTR_MASK_CLEAR);
3572 qcq->flags |= IONIC_QCQ_F_INITED;
3577 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
3579 struct ionic_qcq *qcq = lif->notifyqcq;
3580 struct device *dev = lif->ionic->dev;
3581 struct ionic_queue *q = &qcq->q;
3584 struct ionic_admin_ctx ctx = {
3585 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3587 .opcode = IONIC_CMD_Q_INIT,
3588 .lif_index = cpu_to_le16(lif->index),
3590 .ver = lif->qtype_info[q->type].version,
3591 .index = cpu_to_le32(q->index),
3592 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
3594 .intr_index = cpu_to_le16(lif->adminqcq->intr.index),
3595 .pid = cpu_to_le16(q->pid),
3596 .ring_size = ilog2(q->num_descs),
3597 .ring_base = cpu_to_le64(q->base_pa),
3601 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
3602 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
3603 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
3604 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
3606 err = ionic_adminq_post_wait(lif, &ctx);
3611 q->hw_type = ctx.comp.q_init.hw_type;
3612 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
3613 q->dbval = IONIC_DBELL_QID(q->hw_index);
3615 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
3616 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
3618 /* preset the callback info */
3619 q->info[0].cb_arg = lif;
3621 qcq->flags |= IONIC_QCQ_F_INITED;
3626 static int ionic_station_set(struct ionic_lif *lif)
3628 struct net_device *netdev = lif->netdev;
3629 struct ionic_admin_ctx ctx = {
3630 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3631 .cmd.lif_getattr = {
3632 .opcode = IONIC_CMD_LIF_GETATTR,
3633 .index = cpu_to_le16(lif->index),
3634 .attr = IONIC_LIF_ATTR_MAC,
3637 u8 mac_address[ETH_ALEN];
3638 struct sockaddr addr;
3641 err = ionic_adminq_post_wait(lif, &ctx);
3644 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
3645 ctx.comp.lif_getattr.mac);
3646 ether_addr_copy(mac_address, ctx.comp.lif_getattr.mac);
3648 if (is_zero_ether_addr(mac_address)) {
3649 eth_hw_addr_random(netdev);
3650 netdev_dbg(netdev, "Random Mac generated: %pM\n", netdev->dev_addr);
3651 ether_addr_copy(mac_address, netdev->dev_addr);
3653 err = ionic_program_mac(lif, mac_address);
3658 netdev_dbg(netdev, "%s:SET/GET ATTR Mac are not same-due to old FW running\n",
3664 if (!is_zero_ether_addr(netdev->dev_addr)) {
3665 /* If the netdev mac is non-zero and doesn't match the default
3666 * device address, it was set by something earlier and we're
3667 * likely here again after a fw-upgrade reset. We need to be
3668 * sure the netdev mac is in our filter list.
3670 if (!ether_addr_equal(mac_address, netdev->dev_addr))
3671 ionic_lif_addr_add(lif, netdev->dev_addr);
3673 /* Update the netdev mac with the device's mac */
3674 ether_addr_copy(addr.sa_data, mac_address);
3675 addr.sa_family = AF_INET;
3676 err = eth_prepare_mac_addr_change(netdev, &addr);
3678 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
3683 eth_commit_mac_addr_change(netdev, &addr);
3686 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
3688 ionic_lif_addr_add(lif, netdev->dev_addr);
3693 int ionic_lif_init(struct ionic_lif *lif)
3695 struct ionic_dev *idev = &lif->ionic->idev;
3696 struct device *dev = lif->ionic->dev;
3697 struct ionic_lif_init_comp comp;
3701 mutex_lock(&lif->ionic->dev_cmd_lock);
3702 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
3703 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
3704 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
3705 mutex_unlock(&lif->ionic->dev_cmd_lock);
3709 lif->hw_index = le16_to_cpu(comp.hw_index);
3711 /* now that we have the hw_index we can figure out our doorbell page */
3712 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
3713 if (!lif->dbid_count) {
3714 dev_err(dev, "No doorbell pages, aborting\n");
3719 dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
3720 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
3721 if (!lif->kern_dbpage) {
3722 dev_err(dev, "Cannot map dbpage, aborting\n");
3726 err = ionic_lif_adminq_init(lif);
3728 goto err_out_adminq_deinit;
3730 if (lif->ionic->nnqs_per_lif) {
3731 err = ionic_lif_notifyq_init(lif);
3733 goto err_out_notifyq_deinit;
3736 err = ionic_init_nic_features(lif);
3738 goto err_out_notifyq_deinit;
3740 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
3741 err = ionic_rx_filters_init(lif);
3743 goto err_out_notifyq_deinit;
3746 err = ionic_station_set(lif);
3748 goto err_out_notifyq_deinit;
3750 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
3752 set_bit(IONIC_LIF_F_INITED, lif->state);
3754 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
3758 err_out_notifyq_deinit:
3759 napi_disable(&lif->adminqcq->napi);
3760 ionic_lif_qcq_deinit(lif, lif->notifyqcq);
3761 err_out_adminq_deinit:
3762 ionic_lif_qcq_deinit(lif, lif->adminqcq);
3763 ionic_lif_reset(lif);
3764 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
3765 lif->kern_dbpage = NULL;
3770 static void ionic_lif_notify_work(struct work_struct *ws)
3774 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
3776 struct ionic_admin_ctx ctx = {
3777 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
3778 .cmd.lif_setattr = {
3779 .opcode = IONIC_CMD_LIF_SETATTR,
3780 .index = cpu_to_le16(lif->index),
3781 .attr = IONIC_LIF_ATTR_NAME,
3785 strscpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
3786 sizeof(ctx.cmd.lif_setattr.name));
3788 ionic_adminq_post_wait(lif, &ctx);
3791 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
3793 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
3796 return netdev_priv(netdev);
3799 static int ionic_lif_notify(struct notifier_block *nb,
3800 unsigned long event, void *info)
3802 struct net_device *ndev = netdev_notifier_info_to_dev(info);
3803 struct ionic *ionic = container_of(nb, struct ionic, nb);
3804 struct ionic_lif *lif = ionic_netdev_lif(ndev);
3806 if (!lif || lif->ionic != ionic)
3810 case NETDEV_CHANGENAME:
3811 ionic_lif_set_netdev_info(lif);
3818 int ionic_lif_register(struct ionic_lif *lif)
3822 ionic_lif_register_phc(lif);
3824 INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
3826 lif->ionic->nb.notifier_call = ionic_lif_notify;
3828 err = register_netdevice_notifier(&lif->ionic->nb);
3830 lif->ionic->nb.notifier_call = NULL;
3832 /* only register LIF0 for now */
3833 err = register_netdev(lif->netdev);
3835 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
3836 ionic_lif_unregister_phc(lif);
3840 ionic_link_status_check_request(lif, CAN_SLEEP);
3841 lif->registered = true;
3842 ionic_lif_set_netdev_info(lif);
3847 void ionic_lif_unregister(struct ionic_lif *lif)
3849 if (lif->ionic->nb.notifier_call) {
3850 unregister_netdevice_notifier(&lif->ionic->nb);
3851 cancel_work_sync(&lif->ionic->nb_work);
3852 lif->ionic->nb.notifier_call = NULL;
3855 if (lif->netdev->reg_state == NETREG_REGISTERED)
3856 unregister_netdev(lif->netdev);
3858 ionic_lif_unregister_phc(lif);
3860 lif->registered = false;
3863 static void ionic_lif_queue_identify(struct ionic_lif *lif)
3865 union ionic_q_identity __iomem *q_ident;
3866 struct ionic *ionic = lif->ionic;
3867 struct ionic_dev *idev;
3871 idev = &lif->ionic->idev;
3872 q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data;
3874 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
3875 struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
3877 /* filter out the ones we know about */
3879 case IONIC_QTYPE_ADMINQ:
3880 case IONIC_QTYPE_NOTIFYQ:
3881 case IONIC_QTYPE_RXQ:
3882 case IONIC_QTYPE_TXQ:
3888 memset(qti, 0, sizeof(*qti));
3890 mutex_lock(&ionic->dev_cmd_lock);
3891 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
3892 ionic_qtype_versions[qtype]);
3893 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3895 qti->version = readb(&q_ident->version);
3896 qti->supported = readb(&q_ident->supported);
3897 qti->features = readq(&q_ident->features);
3898 qti->desc_sz = readw(&q_ident->desc_sz);
3899 qti->comp_sz = readw(&q_ident->comp_sz);
3900 qti->sg_desc_sz = readw(&q_ident->sg_desc_sz);
3901 qti->max_sg_elems = readw(&q_ident->max_sg_elems);
3902 qti->sg_desc_stride = readw(&q_ident->sg_desc_stride);
3904 mutex_unlock(&ionic->dev_cmd_lock);
3906 if (err == -EINVAL) {
3907 dev_err(ionic->dev, "qtype %d not supported\n", qtype);
3909 } else if (err == -EIO) {
3910 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
3913 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
3918 dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
3919 qtype, qti->version);
3920 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
3921 qtype, qti->supported);
3922 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
3923 qtype, qti->features);
3924 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
3925 qtype, qti->desc_sz);
3926 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
3927 qtype, qti->comp_sz);
3928 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
3929 qtype, qti->sg_desc_sz);
3930 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
3931 qtype, qti->max_sg_elems);
3932 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
3933 qtype, qti->sg_desc_stride);
3935 if (qti->max_sg_elems >= IONIC_MAX_FRAGS) {
3936 qti->max_sg_elems = IONIC_MAX_FRAGS - 1;
3937 dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to IONIC_MAX_FRAGS-1 %d\n",
3938 qtype, qti->max_sg_elems);
3941 if (qti->max_sg_elems > MAX_SKB_FRAGS) {
3942 qti->max_sg_elems = MAX_SKB_FRAGS;
3943 dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to MAX_SKB_FRAGS %d\n",
3944 qtype, qti->max_sg_elems);
3949 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
3950 union ionic_lif_identity *lid)
3952 struct ionic_dev *idev = &ionic->idev;
3956 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
3958 mutex_lock(&ionic->dev_cmd_lock);
3959 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
3960 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3961 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
3962 mutex_unlock(&ionic->dev_cmd_lock);
3966 dev_dbg(ionic->dev, "capabilities 0x%llx\n",
3967 le64_to_cpu(lid->capabilities));
3969 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
3970 le32_to_cpu(lid->eth.max_ucast_filters));
3971 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
3972 le32_to_cpu(lid->eth.max_mcast_filters));
3973 dev_dbg(ionic->dev, "eth.features 0x%llx\n",
3974 le64_to_cpu(lid->eth.config.features));
3975 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
3976 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
3977 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
3978 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
3979 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
3980 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
3981 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
3982 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
3983 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
3984 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
3985 dev_dbg(ionic->dev, "eth.config.mtu %d\n",
3986 le32_to_cpu(lid->eth.config.mtu));
3991 int ionic_lif_size(struct ionic *ionic)
3993 struct ionic_identity *ident = &ionic->ident;
3994 unsigned int nintrs, dev_nintrs;
3995 union ionic_lif_config *lc;
3996 unsigned int ntxqs_per_lif;
3997 unsigned int nrxqs_per_lif;
3998 unsigned int neqs_per_lif;
3999 unsigned int nnqs_per_lif;
4000 unsigned int nxqs, neqs;
4001 unsigned int min_intrs;
4004 /* retrieve basic values from FW */
4005 lc = &ident->lif.eth.config;
4006 dev_nintrs = le32_to_cpu(ident->dev.nintrs);
4007 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
4008 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
4009 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
4010 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
4012 /* limit values to play nice with kdump */
4013 if (is_kdump_kernel()) {
4021 /* reserve last queue id for hardware timestamping */
4022 if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) {
4023 if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) {
4024 lc->features &= cpu_to_le64(~IONIC_ETH_HW_TIMESTAMP);
4031 nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
4032 nxqs = min(nxqs, num_online_cpus());
4033 neqs = min(neqs_per_lif, num_online_cpus());
4037 * 1 for master lif adminq/notifyq
4038 * 1 for each CPU for master lif TxRx queue pairs
4039 * whatever's left is for RDMA queues
4041 nintrs = 1 + nxqs + neqs;
4042 min_intrs = 2; /* adminq + 1 TxRx queue pair */
4044 if (nintrs > dev_nintrs)
4047 err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
4048 if (err < 0 && err != -ENOSPC) {
4049 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
4055 if (err != nintrs) {
4056 ionic_bus_free_irq_vectors(ionic);
4060 ionic->nnqs_per_lif = nnqs_per_lif;
4061 ionic->neqs_per_lif = neqs;
4062 ionic->ntxqs_per_lif = nxqs;
4063 ionic->nrxqs_per_lif = nxqs;
4064 ionic->nintrs = nintrs;
4066 ionic_debugfs_add_sizes(ionic);
4071 if (nnqs_per_lif > 1) {
4083 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);