1 /* cnic.c: QLogic CNIC core network driver.
3 * Copyright (c) 2006-2014 Broadcom Corporation
4 * Copyright (c) 2014-2015 QLogic Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
11 * Previously modified and maintained by: Michael Chan <mchan@broadcom.com>
12 * Maintained By: Dept-HSGLinuxNICDev@qlogic.com
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/list.h>
22 #include <linux/slab.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/uio_driver.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/delay.h>
30 #include <linux/ethtool.h>
31 #include <linux/if_vlan.h>
32 #include <linux/prefetch.h>
33 #include <linux/random.h>
34 #if IS_ENABLED(CONFIG_VLAN_8021Q)
39 #include <net/route.h>
41 #include <net/ip6_route.h>
42 #include <net/ip6_checksum.h>
43 #include <scsi/iscsi_if.h>
48 #include "bnx2x/bnx2x.h"
49 #include "bnx2x/bnx2x_reg.h"
50 #include "bnx2x/bnx2x_fw_defs.h"
51 #include "bnx2x/bnx2x_hsi.h"
52 #include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
53 #include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
54 #include "../../../scsi/bnx2fc/bnx2fc_constants.h"
56 #include "cnic_defs.h"
58 #define CNIC_MODULE_NAME "cnic"
60 static char version[] =
61 "QLogic " CNIC_MODULE_NAME "Driver v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
63 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
64 "Chen (zongxi@broadcom.com");
65 MODULE_DESCRIPTION("QLogic cnic Driver");
66 MODULE_LICENSE("GPL");
67 MODULE_VERSION(CNIC_MODULE_VERSION);
69 /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
70 static LIST_HEAD(cnic_dev_list);
71 static LIST_HEAD(cnic_udev_list);
72 static DEFINE_RWLOCK(cnic_dev_lock);
73 static DEFINE_MUTEX(cnic_lock);
75 static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
77 /* helper function, assuming cnic_lock is held */
78 static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
80 return rcu_dereference_protected(cnic_ulp_tbl[type],
81 lockdep_is_held(&cnic_lock));
84 static int cnic_service_bnx2(void *, void *);
85 static int cnic_service_bnx2x(void *, void *);
86 static int cnic_ctl(void *, struct cnic_ctl_info *);
88 static struct cnic_ops cnic_bnx2_ops = {
89 .cnic_owner = THIS_MODULE,
90 .cnic_handler = cnic_service_bnx2,
94 static struct cnic_ops cnic_bnx2x_ops = {
95 .cnic_owner = THIS_MODULE,
96 .cnic_handler = cnic_service_bnx2x,
100 static struct workqueue_struct *cnic_wq;
102 static void cnic_shutdown_rings(struct cnic_dev *);
103 static void cnic_init_rings(struct cnic_dev *);
104 static int cnic_cm_set_pg(struct cnic_sock *);
106 static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
108 struct cnic_uio_dev *udev = uinfo->priv;
109 struct cnic_dev *dev;
111 if (!capable(CAP_NET_ADMIN))
114 if (udev->uio_dev != -1)
120 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
125 udev->uio_dev = iminor(inode);
127 cnic_shutdown_rings(dev);
128 cnic_init_rings(dev);
134 static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
136 struct cnic_uio_dev *udev = uinfo->priv;
142 static inline void cnic_hold(struct cnic_dev *dev)
144 atomic_inc(&dev->ref_count);
147 static inline void cnic_put(struct cnic_dev *dev)
149 atomic_dec(&dev->ref_count);
152 static inline void csk_hold(struct cnic_sock *csk)
154 atomic_inc(&csk->ref_count);
157 static inline void csk_put(struct cnic_sock *csk)
159 atomic_dec(&csk->ref_count);
162 static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
164 struct cnic_dev *cdev;
166 read_lock(&cnic_dev_lock);
167 list_for_each_entry(cdev, &cnic_dev_list, list) {
168 if (netdev == cdev->netdev) {
170 read_unlock(&cnic_dev_lock);
174 read_unlock(&cnic_dev_lock);
178 static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
180 atomic_inc(&ulp_ops->ref_count);
183 static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
185 atomic_dec(&ulp_ops->ref_count);
188 static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
190 struct cnic_local *cp = dev->cnic_priv;
191 struct cnic_eth_dev *ethdev = cp->ethdev;
192 struct drv_ctl_info info;
193 struct drv_ctl_io *io = &info.data.io;
195 memset(&info, 0, sizeof(struct drv_ctl_info));
196 info.cmd = DRV_CTL_CTX_WR_CMD;
197 io->cid_addr = cid_addr;
200 ethdev->drv_ctl(dev->netdev, &info);
203 static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
205 struct cnic_local *cp = dev->cnic_priv;
206 struct cnic_eth_dev *ethdev = cp->ethdev;
207 struct drv_ctl_info info;
208 struct drv_ctl_io *io = &info.data.io;
210 memset(&info, 0, sizeof(struct drv_ctl_info));
211 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
214 ethdev->drv_ctl(dev->netdev, &info);
217 static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
219 struct cnic_local *cp = dev->cnic_priv;
220 struct cnic_eth_dev *ethdev = cp->ethdev;
221 struct drv_ctl_info info;
222 struct drv_ctl_l2_ring *ring = &info.data.ring;
224 memset(&info, 0, sizeof(struct drv_ctl_info));
226 info.cmd = DRV_CTL_START_L2_CMD;
228 info.cmd = DRV_CTL_STOP_L2_CMD;
231 ring->client_id = cl_id;
232 ethdev->drv_ctl(dev->netdev, &info);
235 static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
237 struct cnic_local *cp = dev->cnic_priv;
238 struct cnic_eth_dev *ethdev = cp->ethdev;
239 struct drv_ctl_info info;
240 struct drv_ctl_io *io = &info.data.io;
242 memset(&info, 0, sizeof(struct drv_ctl_info));
243 info.cmd = DRV_CTL_IO_WR_CMD;
246 ethdev->drv_ctl(dev->netdev, &info);
249 static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
251 struct cnic_local *cp = dev->cnic_priv;
252 struct cnic_eth_dev *ethdev = cp->ethdev;
253 struct drv_ctl_info info;
254 struct drv_ctl_io *io = &info.data.io;
256 memset(&info, 0, sizeof(struct drv_ctl_info));
257 info.cmd = DRV_CTL_IO_RD_CMD;
259 ethdev->drv_ctl(dev->netdev, &info);
263 static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg, int state)
265 struct cnic_local *cp = dev->cnic_priv;
266 struct cnic_eth_dev *ethdev = cp->ethdev;
267 struct drv_ctl_info info;
268 struct fcoe_capabilities *fcoe_cap =
269 &info.data.register_data.fcoe_features;
271 memset(&info, 0, sizeof(struct drv_ctl_info));
273 info.cmd = DRV_CTL_ULP_REGISTER_CMD;
274 if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
275 memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
277 info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
280 info.data.ulp_type = ulp_type;
281 info.drv_state = state;
282 ethdev->drv_ctl(dev->netdev, &info);
285 static int cnic_in_use(struct cnic_sock *csk)
287 return test_bit(SK_F_INUSE, &csk->flags);
290 static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
292 struct cnic_local *cp = dev->cnic_priv;
293 struct cnic_eth_dev *ethdev = cp->ethdev;
294 struct drv_ctl_info info;
296 memset(&info, 0, sizeof(struct drv_ctl_info));
298 info.data.credit.credit_count = count;
299 ethdev->drv_ctl(dev->netdev, &info);
302 static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
309 for (i = 0; i < cp->max_cid_space; i++) {
310 if (cp->ctx_tbl[i].cid == cid) {
318 static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
319 struct cnic_sock *csk)
321 struct iscsi_path path_req;
324 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
325 struct cnic_ulp_ops *ulp_ops;
326 struct cnic_uio_dev *udev = cp->udev;
327 int rc = 0, retry = 0;
329 if (!udev || udev->uio_dev == -1)
333 len = sizeof(path_req);
334 buf = (char *) &path_req;
335 memset(&path_req, 0, len);
337 msg_type = ISCSI_KEVENT_PATH_REQ;
338 path_req.handle = (u64) csk->l5_cid;
339 if (test_bit(SK_F_IPV6, &csk->flags)) {
340 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
341 sizeof(struct in6_addr));
342 path_req.ip_addr_len = 16;
344 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
345 sizeof(struct in_addr));
346 path_req.ip_addr_len = 4;
348 path_req.vlan_id = csk->vlan_id;
349 path_req.pmtu = csk->mtu;
355 ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
357 rc = ulp_ops->iscsi_nl_send_msg(
358 cp->ulp_handle[CNIC_ULP_ISCSI],
361 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
370 static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
372 static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
378 case ISCSI_UEVENT_PATH_UPDATE: {
379 struct cnic_local *cp;
381 struct cnic_sock *csk;
382 struct iscsi_path *path_resp;
384 if (len < sizeof(*path_resp))
387 path_resp = (struct iscsi_path *) buf;
389 l5_cid = (u32) path_resp->handle;
390 if (l5_cid >= MAX_CM_SK_TBL_SZ)
393 if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
397 csk = &cp->csk_tbl[l5_cid];
399 if (cnic_in_use(csk) &&
400 test_bit(SK_F_CONNECT_START, &csk->flags)) {
402 csk->vlan_id = path_resp->vlan_id;
404 memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
405 if (test_bit(SK_F_IPV6, &csk->flags))
406 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
407 sizeof(struct in6_addr));
409 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
410 sizeof(struct in_addr));
412 if (is_valid_ether_addr(csk->ha)) {
414 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
415 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
417 cnic_cm_upcall(cp, csk,
418 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
419 clear_bit(SK_F_CONNECT_START, &csk->flags);
430 static int cnic_offld_prep(struct cnic_sock *csk)
432 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
435 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
436 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
443 static int cnic_close_prep(struct cnic_sock *csk)
445 clear_bit(SK_F_CONNECT_START, &csk->flags);
446 smp_mb__after_atomic();
448 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
449 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
457 static int cnic_abort_prep(struct cnic_sock *csk)
459 clear_bit(SK_F_CONNECT_START, &csk->flags);
460 smp_mb__after_atomic();
462 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
465 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
466 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
473 int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
475 struct cnic_dev *dev;
477 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
478 pr_err("%s: Bad type %d\n", __func__, ulp_type);
481 mutex_lock(&cnic_lock);
482 if (cnic_ulp_tbl_prot(ulp_type)) {
483 pr_err("%s: Type %d has already been registered\n",
485 mutex_unlock(&cnic_lock);
489 read_lock(&cnic_dev_lock);
490 list_for_each_entry(dev, &cnic_dev_list, list) {
491 struct cnic_local *cp = dev->cnic_priv;
493 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
495 read_unlock(&cnic_dev_lock);
497 atomic_set(&ulp_ops->ref_count, 0);
498 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
499 mutex_unlock(&cnic_lock);
501 /* Prevent race conditions with netdev_event */
503 list_for_each_entry(dev, &cnic_dev_list, list) {
504 struct cnic_local *cp = dev->cnic_priv;
506 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
507 ulp_ops->cnic_init(dev);
514 int cnic_unregister_driver(int ulp_type)
516 struct cnic_dev *dev;
517 struct cnic_ulp_ops *ulp_ops;
520 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
521 pr_err("%s: Bad type %d\n", __func__, ulp_type);
524 mutex_lock(&cnic_lock);
525 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
527 pr_err("%s: Type %d has not been registered\n",
531 read_lock(&cnic_dev_lock);
532 list_for_each_entry(dev, &cnic_dev_list, list) {
533 struct cnic_local *cp = dev->cnic_priv;
535 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
536 pr_err("%s: Type %d still has devices registered\n",
538 read_unlock(&cnic_dev_lock);
542 read_unlock(&cnic_dev_lock);
544 RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
546 mutex_unlock(&cnic_lock);
548 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
553 if (atomic_read(&ulp_ops->ref_count) != 0)
554 pr_warn("%s: Failed waiting for ref count to go to zero\n",
559 mutex_unlock(&cnic_lock);
563 static int cnic_start_hw(struct cnic_dev *);
564 static void cnic_stop_hw(struct cnic_dev *);
566 static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
569 struct cnic_local *cp = dev->cnic_priv;
570 struct cnic_ulp_ops *ulp_ops;
572 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
573 pr_err("%s: Bad type %d\n", __func__, ulp_type);
576 mutex_lock(&cnic_lock);
577 if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
578 pr_err("%s: Driver with type %d has not been registered\n",
580 mutex_unlock(&cnic_lock);
583 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
584 pr_err("%s: Type %d has already been registered to this device\n",
586 mutex_unlock(&cnic_lock);
590 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
591 cp->ulp_handle[ulp_type] = ulp_ctx;
592 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
593 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
596 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
597 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
598 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
600 mutex_unlock(&cnic_lock);
602 cnic_ulp_ctl(dev, ulp_type, true, DRV_ACTIVE);
607 EXPORT_SYMBOL(cnic_register_driver);
609 static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
611 struct cnic_local *cp = dev->cnic_priv;
614 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
615 pr_err("%s: Bad type %d\n", __func__, ulp_type);
619 if (ulp_type == CNIC_ULP_ISCSI)
620 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
622 mutex_lock(&cnic_lock);
623 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
624 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
627 pr_err("%s: device not registered to this ulp type %d\n",
629 mutex_unlock(&cnic_lock);
632 mutex_unlock(&cnic_lock);
634 if (ulp_type == CNIC_ULP_FCOE)
635 dev->fcoe_cap = NULL;
639 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
644 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
645 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
647 if (test_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
648 cnic_ulp_ctl(dev, ulp_type, false, DRV_UNLOADED);
650 cnic_ulp_ctl(dev, ulp_type, false, DRV_INACTIVE);
654 EXPORT_SYMBOL(cnic_unregister_driver);
656 static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
659 id_tbl->start = start_id;
662 spin_lock_init(&id_tbl->lock);
663 id_tbl->table = bitmap_zalloc(size, GFP_KERNEL);
670 static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
672 bitmap_free(id_tbl->table);
673 id_tbl->table = NULL;
676 static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
681 if (id >= id_tbl->max)
684 spin_lock(&id_tbl->lock);
685 if (!test_bit(id, id_tbl->table)) {
686 set_bit(id, id_tbl->table);
689 spin_unlock(&id_tbl->lock);
693 /* Returns -1 if not successful */
694 static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
698 spin_lock(&id_tbl->lock);
699 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
700 if (id >= id_tbl->max) {
702 if (id_tbl->next != 0) {
703 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
704 if (id >= id_tbl->next)
709 if (id < id_tbl->max) {
710 set_bit(id, id_tbl->table);
711 id_tbl->next = (id + 1) & (id_tbl->max - 1);
715 spin_unlock(&id_tbl->lock);
720 static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
726 if (id >= id_tbl->max)
729 clear_bit(id, id_tbl->table);
732 static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
739 for (i = 0; i < dma->num_pages; i++) {
740 if (dma->pg_arr[i]) {
741 dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
742 dma->pg_arr[i], dma->pg_map_arr[i]);
743 dma->pg_arr[i] = NULL;
747 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
748 dma->pgtbl, dma->pgtbl_map);
756 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
759 __le32 *page_table = (__le32 *) dma->pgtbl;
761 for (i = 0; i < dma->num_pages; i++) {
762 /* Each entry needs to be in big endian format. */
763 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
765 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
770 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
773 __le32 *page_table = (__le32 *) dma->pgtbl;
775 for (i = 0; i < dma->num_pages; i++) {
776 /* Each entry needs to be in little endian format. */
777 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
779 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
784 static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
785 int pages, int use_pg_tbl)
788 struct cnic_local *cp = dev->cnic_priv;
790 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
791 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
792 if (dma->pg_arr == NULL)
795 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
796 dma->num_pages = pages;
798 for (i = 0; i < pages; i++) {
799 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
803 if (dma->pg_arr[i] == NULL)
809 dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
810 ~(CNIC_PAGE_SIZE - 1);
811 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
812 &dma->pgtbl_map, GFP_ATOMIC);
813 if (dma->pgtbl == NULL)
816 cp->setup_pgtbl(dev, dma);
821 cnic_free_dma(dev, dma);
825 static void cnic_free_context(struct cnic_dev *dev)
827 struct cnic_local *cp = dev->cnic_priv;
830 for (i = 0; i < cp->ctx_blks; i++) {
831 if (cp->ctx_arr[i].ctx) {
832 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
834 cp->ctx_arr[i].mapping);
835 cp->ctx_arr[i].ctx = NULL;
840 static void __cnic_free_uio_rings(struct cnic_uio_dev *udev)
843 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
844 udev->l2_buf, udev->l2_buf_map);
849 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
850 udev->l2_ring, udev->l2_ring_map);
851 udev->l2_ring = NULL;
856 static void __cnic_free_uio(struct cnic_uio_dev *udev)
858 uio_unregister_device(&udev->cnic_uinfo);
860 __cnic_free_uio_rings(udev);
862 pci_dev_put(udev->pdev);
866 static void cnic_free_uio(struct cnic_uio_dev *udev)
871 write_lock(&cnic_dev_lock);
872 list_del_init(&udev->list);
873 write_unlock(&cnic_dev_lock);
874 __cnic_free_uio(udev);
877 static void cnic_free_resc(struct cnic_dev *dev)
879 struct cnic_local *cp = dev->cnic_priv;
880 struct cnic_uio_dev *udev = cp->udev;
885 if (udev->uio_dev == -1)
886 __cnic_free_uio_rings(udev);
889 cnic_free_context(dev);
894 cnic_free_dma(dev, &cp->gbl_buf_info);
895 cnic_free_dma(dev, &cp->kwq_info);
896 cnic_free_dma(dev, &cp->kwq_16_data_info);
897 cnic_free_dma(dev, &cp->kcq2.dma);
898 cnic_free_dma(dev, &cp->kcq1.dma);
899 kfree(cp->iscsi_tbl);
900 cp->iscsi_tbl = NULL;
904 cnic_free_id_tbl(&cp->fcoe_cid_tbl);
905 cnic_free_id_tbl(&cp->cid_tbl);
908 static int cnic_alloc_context(struct cnic_dev *dev)
910 struct cnic_local *cp = dev->cnic_priv;
912 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
915 cp->ctx_blk_size = CNIC_PAGE_SIZE;
916 cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
917 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
918 sizeof(struct cnic_ctx);
919 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
920 if (cp->ctx_arr == NULL)
924 for (i = 0; i < 2; i++) {
925 u32 j, reg, off, lo, hi;
928 off = BNX2_PG_CTX_MAP;
930 off = BNX2_ISCSI_CTX_MAP;
932 reg = cnic_reg_rd_ind(dev, off);
935 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
936 cp->ctx_arr[k].cid = j;
940 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
945 for (i = 0; i < cp->ctx_blks; i++) {
947 dma_alloc_coherent(&dev->pcidev->dev,
949 &cp->ctx_arr[i].mapping,
951 if (cp->ctx_arr[i].ctx == NULL)
958 static u16 cnic_bnx2_next_idx(u16 idx)
963 static u16 cnic_bnx2_hw_idx(u16 idx)
968 static u16 cnic_bnx2x_next_idx(u16 idx)
971 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
977 static u16 cnic_bnx2x_hw_idx(u16 idx)
979 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
984 static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
987 int err, i, use_page_tbl = 0;
993 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
997 kcq = (struct kcqe **) info->dma.pg_arr;
1000 info->next_idx = cnic_bnx2_next_idx;
1001 info->hw_idx = cnic_bnx2_hw_idx;
1005 info->next_idx = cnic_bnx2x_next_idx;
1006 info->hw_idx = cnic_bnx2x_hw_idx;
1008 for (i = 0; i < KCQ_PAGE_CNT; i++) {
1009 struct bnx2x_bd_chain_next *next =
1010 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
1013 if (j >= KCQ_PAGE_CNT)
1015 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
1016 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
1021 static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1023 struct cnic_local *cp = udev->dev->cnic_priv;
1028 udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
1029 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1030 &udev->l2_ring_map, GFP_KERNEL);
1034 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1035 udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
1036 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1037 &udev->l2_buf_map, GFP_KERNEL);
1038 if (!udev->l2_buf) {
1039 __cnic_free_uio_rings(udev);
1047 static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1049 struct cnic_local *cp = dev->cnic_priv;
1050 struct cnic_uio_dev *udev;
1052 list_for_each_entry(udev, &cnic_udev_list, list) {
1053 if (udev->pdev == dev->pcidev) {
1055 if (__cnic_alloc_uio_rings(udev, pages)) {
1064 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1071 udev->pdev = dev->pcidev;
1073 if (__cnic_alloc_uio_rings(udev, pages))
1076 list_add(&udev->list, &cnic_udev_list);
1078 pci_dev_get(udev->pdev);
1089 static int cnic_init_uio(struct cnic_dev *dev)
1091 struct cnic_local *cp = dev->cnic_priv;
1092 struct cnic_uio_dev *udev = cp->udev;
1093 struct uio_info *uinfo;
1099 uinfo = &udev->cnic_uinfo;
1101 uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
1102 uinfo->mem[0].internal_addr = dev->regview;
1103 uinfo->mem[0].memtype = UIO_MEM_PHYS;
1105 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1106 uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1107 TX_MAX_TSS_RINGS + 1);
1108 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1110 uinfo->mem[1].dma_addr = cp->status_blk_map;
1111 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1112 uinfo->mem[1].size = PAGE_ALIGN(BNX2_SBLK_MSIX_ALIGN_SIZE * 9);
1114 uinfo->mem[1].size = PAGE_ALIGN(BNX2_SBLK_MSIX_ALIGN_SIZE);
1116 uinfo->name = "bnx2_cnic";
1117 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1118 uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1120 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1122 uinfo->mem[1].dma_addr = cp->status_blk_map;
1123 uinfo->mem[1].size = PAGE_ALIGN(sizeof(*cp->bnx2x_def_status_blk));
1125 uinfo->name = "bnx2x_cnic";
1128 uinfo->mem[1].dma_device = &dev->pcidev->dev;
1129 uinfo->mem[1].memtype = UIO_MEM_DMA_COHERENT;
1131 uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1132 uinfo->mem[2].dma_addr = udev->l2_ring_map;
1133 uinfo->mem[2].size = PAGE_ALIGN(udev->l2_ring_size);
1134 uinfo->mem[2].dma_device = &dev->pcidev->dev;
1135 uinfo->mem[2].memtype = UIO_MEM_DMA_COHERENT;
1137 uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1138 uinfo->mem[3].dma_addr = udev->l2_buf_map;
1139 uinfo->mem[3].size = PAGE_ALIGN(udev->l2_buf_size);
1140 uinfo->mem[3].dma_device = &dev->pcidev->dev;
1141 uinfo->mem[3].memtype = UIO_MEM_DMA_COHERENT;
1143 uinfo->version = CNIC_MODULE_VERSION;
1144 uinfo->irq = UIO_IRQ_CUSTOM;
1146 uinfo->open = cnic_uio_open;
1147 uinfo->release = cnic_uio_close;
1149 if (udev->uio_dev == -1) {
1153 ret = uio_register_device(&udev->pdev->dev, uinfo);
1156 cnic_init_rings(dev);
1162 static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1164 struct cnic_local *cp = dev->cnic_priv;
1167 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1170 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1172 ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
1176 ret = cnic_alloc_context(dev);
1180 ret = cnic_alloc_uio_rings(dev, 2);
1184 ret = cnic_init_uio(dev);
1191 cnic_free_resc(dev);
1195 static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1197 struct cnic_local *cp = dev->cnic_priv;
1198 struct bnx2x *bp = netdev_priv(dev->netdev);
1199 int ctx_blk_size = cp->ethdev->ctx_blk_size;
1200 int total_mem, blks, i;
1202 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1203 blks = total_mem / ctx_blk_size;
1204 if (total_mem % ctx_blk_size)
1207 if (blks > cp->ethdev->ctx_tbl_len)
1210 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1211 if (cp->ctx_arr == NULL)
1214 cp->ctx_blks = blks;
1215 cp->ctx_blk_size = ctx_blk_size;
1216 if (!CHIP_IS_E1(bp))
1219 cp->ctx_align = ctx_blk_size;
1221 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1223 for (i = 0; i < blks; i++) {
1224 cp->ctx_arr[i].ctx =
1225 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1226 &cp->ctx_arr[i].mapping,
1228 if (cp->ctx_arr[i].ctx == NULL)
1231 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1232 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1233 cnic_free_context(dev);
1234 cp->ctx_blk_size += cp->ctx_align;
1243 static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1245 struct cnic_local *cp = dev->cnic_priv;
1246 struct bnx2x *bp = netdev_priv(dev->netdev);
1247 struct cnic_eth_dev *ethdev = cp->ethdev;
1248 u32 start_cid = ethdev->starting_cid;
1249 int i, j, n, ret, pages;
1250 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1252 cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1253 cp->iscsi_start_cid = start_cid;
1254 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1256 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
1257 cp->max_cid_space += dev->max_fcoe_conn;
1258 cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1259 if (!cp->fcoe_init_cid)
1260 cp->fcoe_init_cid = 0x10;
1263 cp->iscsi_tbl = kcalloc(MAX_ISCSI_TBL_SZ, sizeof(struct cnic_iscsi),
1268 cp->ctx_tbl = kcalloc(cp->max_cid_space, sizeof(struct cnic_context),
1273 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1274 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1275 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1278 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1279 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1281 pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1284 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1288 n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1289 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1290 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1292 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1293 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1296 if ((i % n) == (n - 1))
1300 ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
1304 if (CNIC_SUPPORTS_FCOE(bp)) {
1305 ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1310 pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
1311 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1315 ret = cnic_alloc_bnx2x_context(dev);
1319 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
1322 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1323 cp->status_blk_map = cp->ethdev->irq_arr[1].status_blk_map;
1325 cp->l2_rx_ring_size = 15;
1327 ret = cnic_alloc_uio_rings(dev, 4);
1331 ret = cnic_init_uio(dev);
1338 cnic_free_resc(dev);
1342 static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1344 return cp->max_kwq_idx -
1345 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1348 static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1351 struct cnic_local *cp = dev->cnic_priv;
1352 struct kwqe *prod_qe;
1353 u16 prod, sw_prod, i;
1355 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1356 return -EAGAIN; /* bnx2 is down */
1358 spin_lock_bh(&cp->cnic_ulp_lock);
1359 if (num_wqes > cnic_kwq_avail(cp) &&
1360 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1361 spin_unlock_bh(&cp->cnic_ulp_lock);
1365 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1367 prod = cp->kwq_prod_idx;
1368 sw_prod = prod & MAX_KWQ_IDX;
1369 for (i = 0; i < num_wqes; i++) {
1370 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1371 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1373 sw_prod = prod & MAX_KWQ_IDX;
1375 cp->kwq_prod_idx = prod;
1377 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1379 spin_unlock_bh(&cp->cnic_ulp_lock);
1383 static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1384 union l5cm_specific_data *l5_data)
1386 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1389 map = ctx->kwqe_data_mapping;
1390 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1391 l5_data->phy_address.hi = (u64) map >> 32;
1392 return ctx->kwqe_data;
1395 static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1396 u32 type, union l5cm_specific_data *l5_data)
1398 struct cnic_local *cp = dev->cnic_priv;
1399 struct bnx2x *bp = netdev_priv(dev->netdev);
1400 struct l5cm_spe kwqe;
1401 struct kwqe_16 *kwq[1];
1405 kwqe.hdr.conn_and_cmd_data =
1406 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1407 BNX2X_HW_CID(bp, cid)));
1409 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1410 type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1411 SPE_HDR_FUNCTION_ID;
1413 kwqe.hdr.type = cpu_to_le16(type_16);
1414 kwqe.hdr.reserved1 = 0;
1415 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1416 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1418 kwq[0] = (struct kwqe_16 *) &kwqe;
1420 spin_lock_bh(&cp->cnic_ulp_lock);
1421 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1422 spin_unlock_bh(&cp->cnic_ulp_lock);
1430 static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1431 struct kcqe *cqes[], u32 num_cqes)
1433 struct cnic_local *cp = dev->cnic_priv;
1434 struct cnic_ulp_ops *ulp_ops;
1437 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1438 if (likely(ulp_ops)) {
1439 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1445 static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
1448 struct bnx2x *bp = netdev_priv(dev->netdev);
1449 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1450 u16 tstorm_flags = 0;
1453 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1454 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1457 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
1459 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1460 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
1462 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1463 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
1466 static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1468 struct cnic_local *cp = dev->cnic_priv;
1469 struct bnx2x *bp = netdev_priv(dev->netdev);
1470 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1472 u32 pfid = bp->pfid;
1474 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1475 cp->num_ccells = req1->num_ccells_per_conn;
1476 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1477 cp->num_iscsi_tasks;
1478 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1479 BNX2X_ISCSI_R2TQE_SIZE;
1480 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1481 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1482 hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1483 cp->num_cqs = req1->num_cqs;
1485 if (!dev->max_iscsi_conn)
1488 /* init Tstorm RAM */
1489 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1491 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1493 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1494 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1495 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1496 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1497 req1->num_tasks_per_conn);
1499 /* init Ustorm RAM */
1500 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1501 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1502 req1->rq_buffer_size);
1503 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1505 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1506 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1507 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1508 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1509 req1->num_tasks_per_conn);
1510 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1512 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1514 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1515 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1517 /* init Xstorm RAM */
1518 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1520 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1521 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1522 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1523 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1524 req1->num_tasks_per_conn);
1525 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1527 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1528 req1->num_tasks_per_conn);
1529 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1530 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1532 /* init Cstorm RAM */
1533 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1535 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1536 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1537 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1538 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1539 req1->num_tasks_per_conn);
1540 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1542 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1545 cnic_bnx2x_set_tcp_options(dev,
1546 req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
1547 req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
1552 static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1554 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1555 struct bnx2x *bp = netdev_priv(dev->netdev);
1556 u32 pfid = bp->pfid;
1557 struct iscsi_kcqe kcqe;
1558 struct kcqe *cqes[1];
1560 memset(&kcqe, 0, sizeof(kcqe));
1561 if (!dev->max_iscsi_conn) {
1562 kcqe.completion_status =
1563 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1567 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1568 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1569 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1570 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1571 req2->error_bit_map[1]);
1573 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1574 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1575 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1576 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1577 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1578 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1579 req2->error_bit_map[1]);
1581 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1582 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1584 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1587 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1588 cqes[0] = (struct kcqe *) &kcqe;
1589 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1594 static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1596 struct cnic_local *cp = dev->cnic_priv;
1597 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1599 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1600 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1602 cnic_free_dma(dev, &iscsi->hq_info);
1603 cnic_free_dma(dev, &iscsi->r2tq_info);
1604 cnic_free_dma(dev, &iscsi->task_array_info);
1605 cnic_free_id(&cp->cid_tbl, ctx->cid);
1607 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1613 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1617 struct cnic_local *cp = dev->cnic_priv;
1618 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1619 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1621 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1622 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1631 cid = cnic_alloc_new_id(&cp->cid_tbl);
1638 pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
1640 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1644 pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
1645 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1649 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1650 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1657 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1661 static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1662 struct regpair *ctx_addr)
1664 struct cnic_local *cp = dev->cnic_priv;
1665 struct cnic_eth_dev *ethdev = cp->ethdev;
1666 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1667 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1668 unsigned long align_off = 0;
1672 if (cp->ctx_align) {
1673 unsigned long mask = cp->ctx_align - 1;
1675 if (cp->ctx_arr[blk].mapping & mask)
1676 align_off = cp->ctx_align -
1677 (cp->ctx_arr[blk].mapping & mask);
1679 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1680 (off * BNX2X_CONTEXT_MEM_SIZE);
1681 ctx = cp->ctx_arr[blk].ctx + align_off +
1682 (off * BNX2X_CONTEXT_MEM_SIZE);
1684 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1686 ctx_addr->lo = ctx_map & 0xffffffff;
1687 ctx_addr->hi = (u64) ctx_map >> 32;
1691 static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1694 struct cnic_local *cp = dev->cnic_priv;
1695 struct bnx2x *bp = netdev_priv(dev->netdev);
1696 struct iscsi_kwqe_conn_offload1 *req1 =
1697 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1698 struct iscsi_kwqe_conn_offload2 *req2 =
1699 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1700 struct iscsi_kwqe_conn_offload3 *req3;
1701 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1702 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1704 u32 hw_cid = BNX2X_HW_CID(bp, cid);
1705 struct iscsi_context *ictx;
1706 struct regpair context_addr;
1707 int i, j, n = 2, n_max;
1708 u8 port = BP_PORT(bp);
1711 if (!req2->num_additional_wqes)
1714 n_max = req2->num_additional_wqes + 2;
1716 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1720 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1722 ictx->xstorm_ag_context.hq_prod = 1;
1724 ictx->xstorm_st_context.iscsi.first_burst_length =
1725 ISCSI_DEF_FIRST_BURST_LEN;
1726 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1727 ISCSI_DEF_MAX_RECV_SEG_LEN;
1728 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1729 req1->sq_page_table_addr_lo;
1730 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1731 req1->sq_page_table_addr_hi;
1732 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1733 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1734 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1735 iscsi->hq_info.pgtbl_map & 0xffffffff;
1736 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1737 (u64) iscsi->hq_info.pgtbl_map >> 32;
1738 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1739 iscsi->hq_info.pgtbl[0];
1740 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1741 iscsi->hq_info.pgtbl[1];
1742 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1743 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1744 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1745 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1746 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1747 iscsi->r2tq_info.pgtbl[0];
1748 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1749 iscsi->r2tq_info.pgtbl[1];
1750 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1751 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1752 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1753 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1754 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1755 BNX2X_ISCSI_PBL_NOT_CACHED;
1756 ictx->xstorm_st_context.iscsi.flags.flags |=
1757 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1758 ictx->xstorm_st_context.iscsi.flags.flags |=
1759 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1760 ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1762 if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
1763 bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
1767 ictx->xstorm_st_context.common.flags =
1768 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1769 ictx->xstorm_st_context.common.flags =
1770 port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
1772 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1773 /* TSTORM requires the base address of RQ DB & not PTE */
1774 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1775 req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
1776 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1777 req2->rq_page_table_addr_hi;
1778 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1779 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1780 ictx->tstorm_st_context.tcp.flags2 |=
1781 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1782 ictx->tstorm_st_context.tcp.ooo_support_mode =
1783 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1785 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1787 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1788 req2->rq_page_table_addr_lo;
1789 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1790 req2->rq_page_table_addr_hi;
1791 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1792 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1793 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1794 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1795 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1796 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1797 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1798 iscsi->r2tq_info.pgtbl[0];
1799 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1800 iscsi->r2tq_info.pgtbl[1];
1801 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1802 req1->cq_page_table_addr_lo;
1803 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1804 req1->cq_page_table_addr_hi;
1805 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1806 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1807 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1808 ictx->ustorm_st_context.task_pbe_cache_index =
1809 BNX2X_ISCSI_PBL_NOT_CACHED;
1810 ictx->ustorm_st_context.task_pdu_cache_index =
1811 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1813 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1817 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1820 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1821 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1822 req3->qp_first_pte[j].hi;
1823 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1824 req3->qp_first_pte[j].lo;
1827 ictx->ustorm_st_context.task_pbl_base.lo =
1828 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1829 ictx->ustorm_st_context.task_pbl_base.hi =
1830 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1831 ictx->ustorm_st_context.tce_phy_addr.lo =
1832 iscsi->task_array_info.pgtbl[0];
1833 ictx->ustorm_st_context.tce_phy_addr.hi =
1834 iscsi->task_array_info.pgtbl[1];
1835 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1836 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1837 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1838 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1839 ISCSI_DEF_MAX_BURST_LEN;
1840 ictx->ustorm_st_context.negotiated_rx |=
1841 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1842 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1844 ictx->cstorm_st_context.hq_pbl_base.lo =
1845 iscsi->hq_info.pgtbl_map & 0xffffffff;
1846 ictx->cstorm_st_context.hq_pbl_base.hi =
1847 (u64) iscsi->hq_info.pgtbl_map >> 32;
1848 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1849 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1850 ictx->cstorm_st_context.task_pbl_base.lo =
1851 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1852 ictx->cstorm_st_context.task_pbl_base.hi =
1853 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1854 /* CSTORM and USTORM initialization is different, CSTORM requires
1855 * CQ DB base & not PTE addr */
1856 ictx->cstorm_st_context.cq_db_base.lo =
1857 req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
1858 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1859 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1860 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1861 for (i = 0; i < cp->num_cqs; i++) {
1862 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1864 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1868 ictx->xstorm_ag_context.cdu_reserved =
1869 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1870 ISCSI_CONNECTION_TYPE);
1871 ictx->ustorm_ag_context.cdu_usage =
1872 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1873 ISCSI_CONNECTION_TYPE);
1878 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1881 struct iscsi_kwqe_conn_offload1 *req1;
1882 struct iscsi_kwqe_conn_offload2 *req2;
1883 struct cnic_local *cp = dev->cnic_priv;
1884 struct bnx2x *bp = netdev_priv(dev->netdev);
1885 struct cnic_context *ctx;
1886 struct iscsi_kcqe kcqe;
1887 struct kcqe *cqes[1];
1896 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1897 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1898 if ((num - 2) < req2->num_additional_wqes) {
1902 *work = 2 + req2->num_additional_wqes;
1904 l5_cid = req1->iscsi_conn_id;
1905 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1908 memset(&kcqe, 0, sizeof(kcqe));
1909 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1910 kcqe.iscsi_conn_id = l5_cid;
1911 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1913 ctx = &cp->ctx_tbl[l5_cid];
1914 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1915 kcqe.completion_status =
1916 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1920 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1921 atomic_dec(&cp->iscsi_conn);
1924 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1926 atomic_dec(&cp->iscsi_conn);
1929 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1931 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1932 atomic_dec(&cp->iscsi_conn);
1936 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1937 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
1940 cqes[0] = (struct kcqe *) &kcqe;
1941 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1946 static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1948 struct cnic_local *cp = dev->cnic_priv;
1949 struct iscsi_kwqe_conn_update *req =
1950 (struct iscsi_kwqe_conn_update *) kwqe;
1952 union l5cm_specific_data l5_data;
1953 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1956 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1959 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1963 memcpy(data, kwqe, sizeof(struct kwqe));
1965 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1966 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1970 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1972 struct cnic_local *cp = dev->cnic_priv;
1973 struct bnx2x *bp = netdev_priv(dev->netdev);
1974 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1975 union l5cm_specific_data l5_data;
1979 init_waitqueue_head(&ctx->waitq);
1981 memset(&l5_data, 0, sizeof(l5_data));
1982 hw_cid = BNX2X_HW_CID(bp, ctx->cid);
1984 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1985 hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1988 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
1989 if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1996 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1998 struct cnic_local *cp = dev->cnic_priv;
1999 struct iscsi_kwqe_conn_destroy *req =
2000 (struct iscsi_kwqe_conn_destroy *) kwqe;
2001 u32 l5_cid = req->reserved0;
2002 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2004 struct iscsi_kcqe kcqe;
2005 struct kcqe *cqes[1];
2007 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2008 goto skip_cfc_delete;
2010 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
2011 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
2013 if (delta > (2 * HZ))
2016 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2017 queue_delayed_work(cnic_wq, &cp->delete_task, delta);
2021 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
2024 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2027 atomic_dec(&cp->iscsi_conn);
2028 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2032 memset(&kcqe, 0, sizeof(kcqe));
2033 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
2034 kcqe.iscsi_conn_id = l5_cid;
2035 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
2036 kcqe.iscsi_conn_context_id = req->context_id;
2038 cqes[0] = (struct kcqe *) &kcqe;
2039 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
2044 static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2045 struct l4_kwq_connect_req1 *kwqe1,
2046 struct l4_kwq_connect_req3 *kwqe3,
2047 struct l5cm_active_conn_buffer *conn_buf)
2049 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
2050 struct l5cm_xstorm_conn_buffer *xstorm_buf =
2051 &conn_buf->xstorm_conn_buffer;
2052 struct l5cm_tstorm_conn_buffer *tstorm_buf =
2053 &conn_buf->tstorm_conn_buffer;
2054 struct regpair context_addr;
2055 u32 cid = BNX2X_SW_CID(kwqe1->cid);
2056 struct in6_addr src_ip, dst_ip;
2060 addrp = (u32 *) &conn_addr->local_ip_addr;
2061 for (i = 0; i < 4; i++, addrp++)
2062 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2064 addrp = (u32 *) &conn_addr->remote_ip_addr;
2065 for (i = 0; i < 4; i++, addrp++)
2066 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
2068 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
2070 xstorm_buf->context_addr.hi = context_addr.hi;
2071 xstorm_buf->context_addr.lo = context_addr.lo;
2072 xstorm_buf->mss = 0xffff;
2073 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
2074 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
2075 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
2076 xstorm_buf->pseudo_header_checksum =
2077 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
2079 if (kwqe3->ka_timeout) {
2080 tstorm_buf->ka_enable = 1;
2081 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
2082 tstorm_buf->ka_interval = kwqe3->ka_interval;
2083 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
2085 tstorm_buf->max_rt_time = 0xffffffff;
2088 static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2090 struct bnx2x *bp = netdev_priv(dev->netdev);
2091 u32 pfid = bp->pfid;
2092 u8 *mac = dev->mac_addr;
2094 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2095 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
2096 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2097 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
2098 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2099 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
2100 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2101 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
2102 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2103 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
2104 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2105 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
2107 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2108 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
2109 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2110 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2112 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2113 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
2114 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2115 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2117 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2118 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
2119 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2120 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2124 static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2127 struct cnic_local *cp = dev->cnic_priv;
2128 struct bnx2x *bp = netdev_priv(dev->netdev);
2129 struct l4_kwq_connect_req1 *kwqe1 =
2130 (struct l4_kwq_connect_req1 *) wqes[0];
2131 struct l4_kwq_connect_req3 *kwqe3;
2132 struct l5cm_active_conn_buffer *conn_buf;
2133 struct l5cm_conn_addr_params *conn_addr;
2134 union l5cm_specific_data l5_data;
2135 u32 l5_cid = kwqe1->pg_cid;
2136 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2137 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2145 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2155 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2156 netdev_err(dev->netdev, "conn_buf size too big\n");
2159 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2163 memset(conn_buf, 0, sizeof(*conn_buf));
2165 conn_addr = &conn_buf->conn_addr_buf;
2166 conn_addr->remote_addr_0 = csk->ha[0];
2167 conn_addr->remote_addr_1 = csk->ha[1];
2168 conn_addr->remote_addr_2 = csk->ha[2];
2169 conn_addr->remote_addr_3 = csk->ha[3];
2170 conn_addr->remote_addr_4 = csk->ha[4];
2171 conn_addr->remote_addr_5 = csk->ha[5];
2173 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2174 struct l4_kwq_connect_req2 *kwqe2 =
2175 (struct l4_kwq_connect_req2 *) wqes[1];
2177 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2178 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2179 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2181 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2182 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2183 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2184 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2186 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2188 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2189 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2190 conn_addr->local_tcp_port = kwqe1->src_port;
2191 conn_addr->remote_tcp_port = kwqe1->dst_port;
2193 conn_addr->pmtu = kwqe3->pmtu;
2194 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2196 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2197 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
2199 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2200 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2202 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2207 static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2209 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2210 union l5cm_specific_data l5_data;
2213 memset(&l5_data, 0, sizeof(l5_data));
2214 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2215 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2219 static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2221 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2222 union l5cm_specific_data l5_data;
2225 memset(&l5_data, 0, sizeof(l5_data));
2226 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2227 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2230 static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2232 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2234 struct kcqe *cqes[1];
2236 memset(&kcqe, 0, sizeof(kcqe));
2237 kcqe.pg_host_opaque = req->host_opaque;
2238 kcqe.pg_cid = req->host_opaque;
2239 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2240 cqes[0] = (struct kcqe *) &kcqe;
2241 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2245 static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2247 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2249 struct kcqe *cqes[1];
2251 memset(&kcqe, 0, sizeof(kcqe));
2252 kcqe.pg_host_opaque = req->pg_host_opaque;
2253 kcqe.pg_cid = req->pg_cid;
2254 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2255 cqes[0] = (struct kcqe *) &kcqe;
2256 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2260 static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2262 struct fcoe_kwqe_stat *req;
2263 struct fcoe_stat_ramrod_params *fcoe_stat;
2264 union l5cm_specific_data l5_data;
2265 struct cnic_local *cp = dev->cnic_priv;
2266 struct bnx2x *bp = netdev_priv(dev->netdev);
2270 req = (struct fcoe_kwqe_stat *) kwqe;
2271 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2273 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2277 memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2278 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2280 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
2281 FCOE_CONNECTION_TYPE, &l5_data);
2285 static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2289 struct cnic_local *cp = dev->cnic_priv;
2290 struct bnx2x *bp = netdev_priv(dev->netdev);
2292 struct fcoe_init_ramrod_params *fcoe_init;
2293 struct fcoe_kwqe_init1 *req1;
2294 struct fcoe_kwqe_init2 *req2;
2295 struct fcoe_kwqe_init3 *req3;
2296 union l5cm_specific_data l5_data;
2302 req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2303 req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2304 req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2305 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2309 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2314 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2315 netdev_err(dev->netdev, "fcoe_init size too big\n");
2318 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2322 memset(fcoe_init, 0, sizeof(*fcoe_init));
2323 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2324 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2325 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2326 fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2327 fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2328 fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
2330 fcoe_init->sb_num = cp->status_blk_num;
2331 fcoe_init->eq_prod = MAX_KCQ_IDX;
2332 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2333 cp->kcq2.sw_prod_idx = 0;
2335 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2336 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2337 FCOE_CONNECTION_TYPE, &l5_data);
2342 static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2346 u32 cid = -1, l5_cid;
2347 struct cnic_local *cp = dev->cnic_priv;
2348 struct bnx2x *bp = netdev_priv(dev->netdev);
2349 struct fcoe_kwqe_conn_offload1 *req1;
2350 struct fcoe_kwqe_conn_offload2 *req2;
2351 struct fcoe_kwqe_conn_offload3 *req3;
2352 struct fcoe_kwqe_conn_offload4 *req4;
2353 struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2354 struct cnic_context *ctx;
2355 struct fcoe_context *fctx;
2356 struct regpair ctx_addr;
2357 union l5cm_specific_data l5_data;
2358 struct fcoe_kcqe kcqe;
2359 struct kcqe *cqes[1];
2365 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2366 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2367 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2368 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2372 l5_cid = req1->fcoe_conn_id;
2373 if (l5_cid >= dev->max_fcoe_conn)
2376 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2378 ctx = &cp->ctx_tbl[l5_cid];
2379 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2382 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2389 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2391 u32 hw_cid = BNX2X_HW_CID(bp, cid);
2394 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2395 FCOE_CONNECTION_TYPE);
2396 fctx->xstorm_ag_context.cdu_reserved = val;
2397 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2398 FCOE_CONNECTION_TYPE);
2399 fctx->ustorm_ag_context.cdu_usage = val;
2401 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2402 netdev_err(dev->netdev, "fcoe_offload size too big\n");
2405 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2409 memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2410 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2411 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2412 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2413 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2415 cid = BNX2X_HW_CID(bp, cid);
2416 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2417 FCOE_CONNECTION_TYPE, &l5_data);
2419 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2425 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2427 memset(&kcqe, 0, sizeof(kcqe));
2428 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2429 kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2430 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2432 cqes[0] = (struct kcqe *) &kcqe;
2433 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2437 static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2439 struct fcoe_kwqe_conn_enable_disable *req;
2440 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2441 union l5cm_specific_data l5_data;
2444 struct cnic_local *cp = dev->cnic_priv;
2446 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2447 cid = req->context_id;
2448 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2450 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2451 netdev_err(dev->netdev, "fcoe_enable size too big\n");
2454 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2458 memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2459 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2460 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2461 FCOE_CONNECTION_TYPE, &l5_data);
2465 static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2467 struct fcoe_kwqe_conn_enable_disable *req;
2468 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2469 union l5cm_specific_data l5_data;
2472 struct cnic_local *cp = dev->cnic_priv;
2474 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2475 cid = req->context_id;
2476 l5_cid = req->conn_id;
2477 if (l5_cid >= dev->max_fcoe_conn)
2480 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2482 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2483 netdev_err(dev->netdev, "fcoe_disable size too big\n");
2486 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2490 memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2491 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2492 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2493 FCOE_CONNECTION_TYPE, &l5_data);
2497 static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2499 struct fcoe_kwqe_conn_destroy *req;
2500 union l5cm_specific_data l5_data;
2503 struct cnic_local *cp = dev->cnic_priv;
2504 struct cnic_context *ctx;
2505 struct fcoe_kcqe kcqe;
2506 struct kcqe *cqes[1];
2508 req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2509 cid = req->context_id;
2510 l5_cid = req->conn_id;
2511 if (l5_cid >= dev->max_fcoe_conn)
2514 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2516 ctx = &cp->ctx_tbl[l5_cid];
2518 init_waitqueue_head(&ctx->waitq);
2521 memset(&kcqe, 0, sizeof(kcqe));
2522 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
2523 memset(&l5_data, 0, sizeof(l5_data));
2524 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2525 FCOE_CONNECTION_TYPE, &l5_data);
2527 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
2529 kcqe.completion_status = 0;
2532 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2533 queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
2535 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2536 kcqe.fcoe_conn_id = req->conn_id;
2537 kcqe.fcoe_conn_context_id = cid;
2539 cqes[0] = (struct kcqe *) &kcqe;
2540 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2544 static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2546 struct cnic_local *cp = dev->cnic_priv;
2549 for (i = start_cid; i < cp->max_cid_space; i++) {
2550 struct cnic_context *ctx = &cp->ctx_tbl[i];
2553 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2556 for (j = 0; j < 5; j++) {
2557 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2562 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2563 netdev_warn(dev->netdev, "CID %x not deleted\n",
2568 static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2570 union l5cm_specific_data l5_data;
2571 struct cnic_local *cp = dev->cnic_priv;
2572 struct bnx2x *bp = netdev_priv(dev->netdev);
2576 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2578 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
2580 memset(&l5_data, 0, sizeof(l5_data));
2581 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
2582 FCOE_CONNECTION_TYPE, &l5_data);
2586 static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2588 struct cnic_local *cp = dev->cnic_priv;
2590 struct kcqe *cqes[1];
2592 u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2593 u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
2597 cid = kwqe->kwqe_info0;
2598 memset(&kcqe, 0, sizeof(kcqe));
2600 if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
2603 ulp_type = CNIC_ULP_FCOE;
2604 if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
2605 struct fcoe_kwqe_conn_enable_disable *req;
2607 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2608 kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
2609 cid = req->context_id;
2610 l5_cid = req->conn_id;
2611 } else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
2612 kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
2616 kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
2617 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
2618 kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2619 kcqe.kcqe_info2 = cid;
2620 kcqe.kcqe_info0 = l5_cid;
2622 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
2623 ulp_type = CNIC_ULP_ISCSI;
2624 if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
2625 cid = kwqe->kwqe_info1;
2627 kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
2628 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
2629 kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR;
2630 kcqe.kcqe_info2 = cid;
2631 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
2633 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
2634 struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
2636 ulp_type = CNIC_ULP_L4;
2637 if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
2638 kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
2639 else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
2640 kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
2641 else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
2642 kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
2646 kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
2647 KCQE_FLAGS_LAYER_MASK_L4;
2648 l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR;
2650 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
2656 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2659 static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2660 struct kwqe *wqes[], u32 num_wqes)
2666 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2667 return -EAGAIN; /* bnx2 is down */
2669 for (i = 0; i < num_wqes; ) {
2671 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2675 case ISCSI_KWQE_OPCODE_INIT1:
2676 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2678 case ISCSI_KWQE_OPCODE_INIT2:
2679 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2681 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2682 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2683 num_wqes - i, &work);
2685 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2686 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2688 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2689 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2691 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2692 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2695 case L4_KWQE_OPCODE_VALUE_CLOSE:
2696 ret = cnic_bnx2x_close(dev, kwqe);
2698 case L4_KWQE_OPCODE_VALUE_RESET:
2699 ret = cnic_bnx2x_reset(dev, kwqe);
2701 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2702 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2704 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2705 ret = cnic_bnx2x_update_pg(dev, kwqe);
2707 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2712 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2717 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2720 /* Possibly bnx2x parity error, send completion
2721 * to ulp drivers with error code to speed up
2722 * cleanup and reset recovery.
2724 if (ret == -EIO || ret == -EAGAIN)
2725 cnic_bnx2x_kwqe_err(dev, kwqe);
2732 static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2733 struct kwqe *wqes[], u32 num_wqes)
2735 struct bnx2x *bp = netdev_priv(dev->netdev);
2740 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2741 return -EAGAIN; /* bnx2 is down */
2743 if (!BNX2X_CHIP_IS_E2_PLUS(bp))
2746 for (i = 0; i < num_wqes; ) {
2748 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2752 case FCOE_KWQE_OPCODE_INIT1:
2753 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2754 num_wqes - i, &work);
2756 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2757 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2758 num_wqes - i, &work);
2760 case FCOE_KWQE_OPCODE_ENABLE_CONN:
2761 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2763 case FCOE_KWQE_OPCODE_DISABLE_CONN:
2764 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2766 case FCOE_KWQE_OPCODE_DESTROY_CONN:
2767 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2769 case FCOE_KWQE_OPCODE_DESTROY:
2770 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2772 case FCOE_KWQE_OPCODE_STAT:
2773 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2777 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2782 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2785 /* Possibly bnx2x parity error, send completion
2786 * to ulp drivers with error code to speed up
2787 * cleanup and reset recovery.
2789 if (ret == -EIO || ret == -EAGAIN)
2790 cnic_bnx2x_kwqe_err(dev, kwqe);
2797 static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2803 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2804 return -EAGAIN; /* bnx2x is down */
2809 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2810 switch (layer_code) {
2811 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2812 case KWQE_FLAGS_LAYER_MASK_L4:
2813 case KWQE_FLAGS_LAYER_MASK_L2:
2814 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2817 case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2818 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2824 static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2826 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2827 return KCQE_FLAGS_LAYER_MASK_L4;
2829 return opflag & KCQE_FLAGS_LAYER_MASK;
2832 static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2834 struct cnic_local *cp = dev->cnic_priv;
2840 struct cnic_ulp_ops *ulp_ops;
2842 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2843 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2845 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2848 while (j < num_cqes) {
2849 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2851 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2854 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2859 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2860 ulp_type = CNIC_ULP_RDMA;
2861 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2862 ulp_type = CNIC_ULP_ISCSI;
2863 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2864 ulp_type = CNIC_ULP_FCOE;
2865 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2866 ulp_type = CNIC_ULP_L4;
2867 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2870 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2876 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2877 if (likely(ulp_ops)) {
2878 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2879 cp->completed_kcq + i, j);
2888 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2891 static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2893 struct cnic_local *cp = dev->cnic_priv;
2894 u16 i, ri, hw_prod, last;
2896 int kcqe_cnt = 0, last_cnt = 0;
2898 i = ri = last = info->sw_prod_idx;
2900 hw_prod = *info->hw_prod_idx_ptr;
2901 hw_prod = info->hw_idx(hw_prod);
2903 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2904 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2905 cp->completed_kcq[kcqe_cnt++] = kcqe;
2906 i = info->next_idx(i);
2907 ri = i & MAX_KCQ_IDX;
2908 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2909 last_cnt = kcqe_cnt;
2914 info->sw_prod_idx = last;
2918 static int cnic_l2_completion(struct cnic_local *cp)
2920 u16 hw_cons, sw_cons;
2921 struct cnic_uio_dev *udev = cp->udev;
2922 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2923 (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
2927 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2930 hw_cons = *cp->rx_cons_ptr;
2931 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2934 sw_cons = cp->rx_cons;
2935 while (sw_cons != hw_cons) {
2938 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2939 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2940 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2941 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2942 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2943 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2944 cmd == RAMROD_CMD_ID_ETH_HALT)
2947 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2952 static void cnic_chk_pkt_rings(struct cnic_local *cp)
2954 u16 rx_cons, tx_cons;
2957 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2960 rx_cons = *cp->rx_cons_ptr;
2961 tx_cons = *cp->tx_cons_ptr;
2962 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2963 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2964 comp = cnic_l2_completion(cp);
2966 cp->tx_cons = tx_cons;
2967 cp->rx_cons = rx_cons;
2970 uio_event_notify(&cp->udev->cnic_uinfo);
2973 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2976 static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2978 struct cnic_local *cp = dev->cnic_priv;
2979 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2982 /* status block index must be read before reading other fields */
2984 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2986 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2988 service_kcqes(dev, kcqe_cnt);
2990 /* Tell compiler that status_blk fields can change. */
2992 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2993 /* status block index must be read first */
2995 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2998 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
3000 cnic_chk_pkt_rings(cp);
3005 static int cnic_service_bnx2(void *data, void *status_blk)
3007 struct cnic_dev *dev = data;
3009 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3010 struct status_block *sblk = status_blk;
3012 return sblk->status_idx;
3015 return cnic_service_bnx2_queues(dev);
3018 static void cnic_service_bnx2_msix(struct tasklet_struct *t)
3020 struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
3021 struct cnic_dev *dev = cp->dev;
3023 cp->last_status_idx = cnic_service_bnx2_queues(dev);
3025 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
3026 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
3029 static void cnic_doirq(struct cnic_dev *dev)
3031 struct cnic_local *cp = dev->cnic_priv;
3033 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
3034 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
3036 prefetch(cp->status_blk.gen);
3037 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
3039 tasklet_schedule(&cp->cnic_irq_task);
3043 static irqreturn_t cnic_irq(int irq, void *dev_instance)
3045 struct cnic_dev *dev = dev_instance;
3046 struct cnic_local *cp = dev->cnic_priv;
3056 static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
3057 u16 index, u8 op, u8 update)
3059 struct bnx2x *bp = netdev_priv(dev->netdev);
3060 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 +
3061 COMMAND_REG_INT_ACK);
3062 struct igu_ack_register igu_ack;
3064 igu_ack.status_block_index = index;
3065 igu_ack.sb_id_and_flags =
3066 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
3067 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
3068 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
3069 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
3071 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
3074 static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
3075 u16 index, u8 op, u8 update)
3077 struct igu_regular cmd_data;
3078 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
3080 cmd_data.sb_id_and_flags =
3081 (index << IGU_REGULAR_SB_INDEX_SHIFT) |
3082 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
3083 (update << IGU_REGULAR_BUPDATE_SHIFT) |
3084 (op << IGU_REGULAR_ENABLE_INT_SHIFT);
3087 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3090 static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
3092 struct cnic_local *cp = dev->cnic_priv;
3094 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
3095 IGU_INT_DISABLE, 0);
3098 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3100 struct cnic_local *cp = dev->cnic_priv;
3102 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
3103 IGU_INT_DISABLE, 0);
3106 static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx)
3108 struct cnic_local *cp = dev->cnic_priv;
3110 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx,
3114 static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx)
3116 struct cnic_local *cp = dev->cnic_priv;
3118 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx,
3122 static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
3124 u32 last_status = *info->status_idx_ptr;
3127 /* status block index must be read before reading the KCQ */
3129 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
3131 service_kcqes(dev, kcqe_cnt);
3133 /* Tell compiler that sblk fields can change. */
3136 last_status = *info->status_idx_ptr;
3137 /* status block index must be read before reading the KCQ */
3143 static void cnic_service_bnx2x_bh(struct tasklet_struct *t)
3145 struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
3146 struct cnic_dev *dev = cp->dev;
3147 struct bnx2x *bp = netdev_priv(dev->netdev);
3148 u32 status_idx, new_status_idx;
3150 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
3154 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
3156 CNIC_WR16(dev, cp->kcq1.io_addr,
3157 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
3159 if (!CNIC_SUPPORTS_FCOE(bp)) {
3160 cp->arm_int(dev, status_idx);
3164 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
3166 if (new_status_idx != status_idx)
3169 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
3172 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
3173 status_idx, IGU_INT_ENABLE, 1);
3179 static int cnic_service_bnx2x(void *data, void *status_blk)
3181 struct cnic_dev *dev = data;
3182 struct cnic_local *cp = dev->cnic_priv;
3184 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3187 cnic_chk_pkt_rings(cp);
3192 static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
3194 struct cnic_ulp_ops *ulp_ops;
3196 if (if_type == CNIC_ULP_ISCSI)
3197 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
3199 mutex_lock(&cnic_lock);
3200 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3201 lockdep_is_held(&cnic_lock));
3203 mutex_unlock(&cnic_lock);
3206 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3207 mutex_unlock(&cnic_lock);
3209 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3210 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
3212 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3215 static void cnic_ulp_stop(struct cnic_dev *dev)
3217 struct cnic_local *cp = dev->cnic_priv;
3220 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3221 cnic_ulp_stop_one(cp, if_type);
3224 static void cnic_ulp_start(struct cnic_dev *dev)
3226 struct cnic_local *cp = dev->cnic_priv;
3229 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3230 struct cnic_ulp_ops *ulp_ops;
3232 mutex_lock(&cnic_lock);
3233 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3234 lockdep_is_held(&cnic_lock));
3235 if (!ulp_ops || !ulp_ops->cnic_start) {
3236 mutex_unlock(&cnic_lock);
3239 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3240 mutex_unlock(&cnic_lock);
3242 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3243 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
3245 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3249 static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3251 struct cnic_local *cp = dev->cnic_priv;
3252 struct cnic_ulp_ops *ulp_ops;
3255 mutex_lock(&cnic_lock);
3256 ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
3257 lockdep_is_held(&cnic_lock));
3258 if (ulp_ops && ulp_ops->cnic_get_stats)
3259 rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
3262 mutex_unlock(&cnic_lock);
3266 static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3268 struct cnic_dev *dev = data;
3269 int ulp_type = CNIC_ULP_ISCSI;
3271 switch (info->cmd) {
3272 case CNIC_CTL_STOP_CMD:
3280 case CNIC_CTL_START_CMD:
3283 if (!cnic_start_hw(dev))
3284 cnic_ulp_start(dev);
3288 case CNIC_CTL_STOP_ISCSI_CMD: {
3289 struct cnic_local *cp = dev->cnic_priv;
3290 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3291 queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3294 case CNIC_CTL_COMPLETION_CMD: {
3295 struct cnic_ctl_completion *comp = &info->data.comp;
3296 u32 cid = BNX2X_SW_CID(comp->cid);
3298 struct cnic_local *cp = dev->cnic_priv;
3300 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
3303 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3304 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3306 if (unlikely(comp->error)) {
3307 set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
3308 netdev_err(dev->netdev,
3309 "CID %x CFC delete comp error %x\n",
3314 wake_up(&ctx->waitq);
3318 case CNIC_CTL_FCOE_STATS_GET_CMD:
3319 ulp_type = CNIC_ULP_FCOE;
3321 case CNIC_CTL_ISCSI_STATS_GET_CMD:
3323 cnic_copy_ulp_stats(dev, ulp_type);
3333 static void cnic_ulp_init(struct cnic_dev *dev)
3336 struct cnic_local *cp = dev->cnic_priv;
3338 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3339 struct cnic_ulp_ops *ulp_ops;
3341 mutex_lock(&cnic_lock);
3342 ulp_ops = cnic_ulp_tbl_prot(i);
3343 if (!ulp_ops || !ulp_ops->cnic_init) {
3344 mutex_unlock(&cnic_lock);
3348 mutex_unlock(&cnic_lock);
3350 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3351 ulp_ops->cnic_init(dev);
3357 static void cnic_ulp_exit(struct cnic_dev *dev)
3360 struct cnic_local *cp = dev->cnic_priv;
3362 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3363 struct cnic_ulp_ops *ulp_ops;
3365 mutex_lock(&cnic_lock);
3366 ulp_ops = cnic_ulp_tbl_prot(i);
3367 if (!ulp_ops || !ulp_ops->cnic_exit) {
3368 mutex_unlock(&cnic_lock);
3372 mutex_unlock(&cnic_lock);
3374 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3375 ulp_ops->cnic_exit(dev);
3381 static int cnic_cm_offload_pg(struct cnic_sock *csk)
3383 struct cnic_dev *dev = csk->dev;
3384 struct l4_kwq_offload_pg *l4kwqe;
3385 struct kwqe *wqes[1];
3387 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3388 memset(l4kwqe, 0, sizeof(*l4kwqe));
3389 wqes[0] = (struct kwqe *) l4kwqe;
3391 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3393 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3394 l4kwqe->l2hdr_nbytes = ETH_HLEN;
3396 l4kwqe->da0 = csk->ha[0];
3397 l4kwqe->da1 = csk->ha[1];
3398 l4kwqe->da2 = csk->ha[2];
3399 l4kwqe->da3 = csk->ha[3];
3400 l4kwqe->da4 = csk->ha[4];
3401 l4kwqe->da5 = csk->ha[5];
3403 l4kwqe->sa0 = dev->mac_addr[0];
3404 l4kwqe->sa1 = dev->mac_addr[1];
3405 l4kwqe->sa2 = dev->mac_addr[2];
3406 l4kwqe->sa3 = dev->mac_addr[3];
3407 l4kwqe->sa4 = dev->mac_addr[4];
3408 l4kwqe->sa5 = dev->mac_addr[5];
3410 l4kwqe->etype = ETH_P_IP;
3411 l4kwqe->ipid_start = DEF_IPID_START;
3412 l4kwqe->host_opaque = csk->l5_cid;
3415 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3416 l4kwqe->vlan_tag = csk->vlan_id;
3417 l4kwqe->l2hdr_nbytes += 4;
3420 return dev->submit_kwqes(dev, wqes, 1);
3423 static int cnic_cm_update_pg(struct cnic_sock *csk)
3425 struct cnic_dev *dev = csk->dev;
3426 struct l4_kwq_update_pg *l4kwqe;
3427 struct kwqe *wqes[1];
3429 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3430 memset(l4kwqe, 0, sizeof(*l4kwqe));
3431 wqes[0] = (struct kwqe *) l4kwqe;
3433 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3435 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3436 l4kwqe->pg_cid = csk->pg_cid;
3438 l4kwqe->da0 = csk->ha[0];
3439 l4kwqe->da1 = csk->ha[1];
3440 l4kwqe->da2 = csk->ha[2];
3441 l4kwqe->da3 = csk->ha[3];
3442 l4kwqe->da4 = csk->ha[4];
3443 l4kwqe->da5 = csk->ha[5];
3445 l4kwqe->pg_host_opaque = csk->l5_cid;
3446 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3448 return dev->submit_kwqes(dev, wqes, 1);
3451 static int cnic_cm_upload_pg(struct cnic_sock *csk)
3453 struct cnic_dev *dev = csk->dev;
3454 struct l4_kwq_upload *l4kwqe;
3455 struct kwqe *wqes[1];
3457 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3458 memset(l4kwqe, 0, sizeof(*l4kwqe));
3459 wqes[0] = (struct kwqe *) l4kwqe;
3461 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3463 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3464 l4kwqe->cid = csk->pg_cid;
3466 return dev->submit_kwqes(dev, wqes, 1);
3469 static int cnic_cm_conn_req(struct cnic_sock *csk)
3471 struct cnic_dev *dev = csk->dev;
3472 struct l4_kwq_connect_req1 *l4kwqe1;
3473 struct l4_kwq_connect_req2 *l4kwqe2;
3474 struct l4_kwq_connect_req3 *l4kwqe3;
3475 struct kwqe *wqes[3];
3479 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3480 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3481 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3482 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3483 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3484 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3486 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3488 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3489 l4kwqe3->ka_timeout = csk->ka_timeout;
3490 l4kwqe3->ka_interval = csk->ka_interval;
3491 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3492 l4kwqe3->tos = csk->tos;
3493 l4kwqe3->ttl = csk->ttl;
3494 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3495 l4kwqe3->pmtu = csk->mtu;
3496 l4kwqe3->rcv_buf = csk->rcv_buf;
3497 l4kwqe3->snd_buf = csk->snd_buf;
3498 l4kwqe3->seed = csk->seed;
3500 wqes[0] = (struct kwqe *) l4kwqe1;
3501 if (test_bit(SK_F_IPV6, &csk->flags)) {
3502 wqes[1] = (struct kwqe *) l4kwqe2;
3503 wqes[2] = (struct kwqe *) l4kwqe3;
3506 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3507 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3509 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3510 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3511 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3512 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3513 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3514 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3515 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3516 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3517 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3518 sizeof(struct tcphdr);
3520 wqes[1] = (struct kwqe *) l4kwqe3;
3521 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3522 sizeof(struct tcphdr);
3525 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3527 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3528 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3529 l4kwqe1->cid = csk->cid;
3530 l4kwqe1->pg_cid = csk->pg_cid;
3531 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3532 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3533 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3534 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3535 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3536 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3537 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3538 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3539 if (csk->tcp_flags & SK_TCP_NAGLE)
3540 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3541 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3542 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3543 if (csk->tcp_flags & SK_TCP_SACK)
3544 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3545 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3546 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3548 l4kwqe1->tcp_flags = tcp_flags;
3550 return dev->submit_kwqes(dev, wqes, num_wqes);
3553 static int cnic_cm_close_req(struct cnic_sock *csk)
3555 struct cnic_dev *dev = csk->dev;
3556 struct l4_kwq_close_req *l4kwqe;
3557 struct kwqe *wqes[1];
3559 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3560 memset(l4kwqe, 0, sizeof(*l4kwqe));
3561 wqes[0] = (struct kwqe *) l4kwqe;
3563 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3564 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3565 l4kwqe->cid = csk->cid;
3567 return dev->submit_kwqes(dev, wqes, 1);
3570 static int cnic_cm_abort_req(struct cnic_sock *csk)
3572 struct cnic_dev *dev = csk->dev;
3573 struct l4_kwq_reset_req *l4kwqe;
3574 struct kwqe *wqes[1];
3576 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3577 memset(l4kwqe, 0, sizeof(*l4kwqe));
3578 wqes[0] = (struct kwqe *) l4kwqe;
3580 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3581 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3582 l4kwqe->cid = csk->cid;
3584 return dev->submit_kwqes(dev, wqes, 1);
3587 static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3588 u32 l5_cid, struct cnic_sock **csk, void *context)
3590 struct cnic_local *cp = dev->cnic_priv;
3591 struct cnic_sock *csk1;
3593 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3597 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3599 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3603 csk1 = &cp->csk_tbl[l5_cid];
3604 if (atomic_read(&csk1->ref_count))
3607 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3612 csk1->l5_cid = l5_cid;
3613 csk1->ulp_type = ulp_type;
3614 csk1->context = context;
3616 csk1->ka_timeout = DEF_KA_TIMEOUT;
3617 csk1->ka_interval = DEF_KA_INTERVAL;
3618 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3619 csk1->tos = DEF_TOS;
3620 csk1->ttl = DEF_TTL;
3621 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3622 csk1->rcv_buf = DEF_RCV_BUF;
3623 csk1->snd_buf = DEF_SND_BUF;
3624 csk1->seed = DEF_SEED;
3625 csk1->tcp_flags = 0;
3631 static void cnic_cm_cleanup(struct cnic_sock *csk)
3633 if (csk->src_port) {
3634 struct cnic_dev *dev = csk->dev;
3635 struct cnic_local *cp = dev->cnic_priv;
3637 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
3642 static void cnic_close_conn(struct cnic_sock *csk)
3644 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3645 cnic_cm_upload_pg(csk);
3646 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3648 cnic_cm_cleanup(csk);
3651 static int cnic_cm_destroy(struct cnic_sock *csk)
3653 if (!cnic_in_use(csk))
3657 clear_bit(SK_F_INUSE, &csk->flags);
3658 smp_mb__after_atomic();
3659 while (atomic_read(&csk->ref_count) != 1)
3661 cnic_cm_cleanup(csk);
3668 static inline u16 cnic_get_vlan(struct net_device *dev,
3669 struct net_device **vlan_dev)
3671 if (is_vlan_dev(dev)) {
3672 *vlan_dev = vlan_dev_real_dev(dev);
3673 return vlan_dev_vlan_id(dev);
3679 static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3680 struct dst_entry **dst)
3682 #if defined(CONFIG_INET)
3685 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3692 return -ENETUNREACH;
3696 static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3697 struct dst_entry **dst)
3699 #if IS_ENABLED(CONFIG_IPV6)
3702 memset(&fl6, 0, sizeof(fl6));
3703 fl6.daddr = dst_addr->sin6_addr;
3704 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3705 fl6.flowi6_oif = dst_addr->sin6_scope_id;
3707 *dst = ip6_route_output(&init_net, NULL, &fl6);
3708 if ((*dst)->error) {
3711 return -ENETUNREACH;
3716 return -ENETUNREACH;
3719 static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3722 struct cnic_dev *dev = NULL;
3723 struct dst_entry *dst;
3724 struct net_device *netdev = NULL;
3725 int err = -ENETUNREACH;
3727 if (dst_addr->sin_family == AF_INET)
3728 err = cnic_get_v4_route(dst_addr, &dst);
3729 else if (dst_addr->sin_family == AF_INET6) {
3730 struct sockaddr_in6 *dst_addr6 =
3731 (struct sockaddr_in6 *) dst_addr;
3733 err = cnic_get_v6_route(dst_addr6, &dst);
3743 cnic_get_vlan(dst->dev, &netdev);
3745 dev = cnic_from_netdev(netdev);
3754 static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3756 struct cnic_dev *dev = csk->dev;
3757 struct cnic_local *cp = dev->cnic_priv;
3759 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3762 static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3764 struct cnic_dev *dev = csk->dev;
3765 struct cnic_local *cp = dev->cnic_priv;
3767 struct dst_entry *dst = NULL;
3768 struct net_device *realdev;
3772 if (saddr->local.v6.sin6_family == AF_INET6 &&
3773 saddr->remote.v6.sin6_family == AF_INET6)
3775 else if (saddr->local.v4.sin_family == AF_INET &&
3776 saddr->remote.v4.sin_family == AF_INET)
3781 clear_bit(SK_F_IPV6, &csk->flags);
3784 set_bit(SK_F_IPV6, &csk->flags);
3785 cnic_get_v6_route(&saddr->remote.v6, &dst);
3787 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3788 sizeof(struct in6_addr));
3789 csk->dst_port = saddr->remote.v6.sin6_port;
3790 local_port = saddr->local.v6.sin6_port;
3793 cnic_get_v4_route(&saddr->remote.v4, &dst);
3795 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3796 csk->dst_port = saddr->remote.v4.sin_port;
3797 local_port = saddr->local.v4.sin_port;
3801 csk->mtu = dev->netdev->mtu;
3802 if (dst && dst->dev) {
3803 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3804 if (realdev == dev->netdev) {
3805 csk->vlan_id = vlan;
3806 csk->mtu = dst_mtu(dst);
3810 port_id = be16_to_cpu(local_port);
3811 if (port_id >= CNIC_LOCAL_PORT_MIN &&
3812 port_id < CNIC_LOCAL_PORT_MAX) {
3813 if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3819 port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3820 if (port_id == -1) {
3824 local_port = cpu_to_be16(port_id);
3826 csk->src_port = local_port;
3833 static void cnic_init_csk_state(struct cnic_sock *csk)
3836 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3837 clear_bit(SK_F_CLOSING, &csk->flags);
3840 static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3842 struct cnic_local *cp = csk->dev->cnic_priv;
3845 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3848 if (!cnic_in_use(csk))
3851 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3854 cnic_init_csk_state(csk);
3856 err = cnic_get_route(csk, saddr);
3860 err = cnic_resolve_addr(csk, saddr);
3865 clear_bit(SK_F_CONNECT_START, &csk->flags);
3869 static int cnic_cm_abort(struct cnic_sock *csk)
3871 struct cnic_local *cp = csk->dev->cnic_priv;
3872 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3874 if (!cnic_in_use(csk))
3877 if (cnic_abort_prep(csk))
3878 return cnic_cm_abort_req(csk);
3880 /* Getting here means that we haven't started connect, or
3881 * connect was not successful, or it has been reset by the target.
3884 cp->close_conn(csk, opcode);
3885 if (csk->state != opcode) {
3886 /* Wait for remote reset sequence to complete */
3887 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3896 static int cnic_cm_close(struct cnic_sock *csk)
3898 if (!cnic_in_use(csk))
3901 if (cnic_close_prep(csk)) {
3902 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3903 return cnic_cm_close_req(csk);
3905 /* Wait for remote reset sequence to complete */
3906 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3914 static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3917 struct cnic_ulp_ops *ulp_ops;
3918 int ulp_type = csk->ulp_type;
3921 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3923 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3924 ulp_ops->cm_connect_complete(csk);
3925 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3926 ulp_ops->cm_close_complete(csk);
3927 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3928 ulp_ops->cm_remote_abort(csk);
3929 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3930 ulp_ops->cm_abort_complete(csk);
3931 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3932 ulp_ops->cm_remote_close(csk);
3937 static int cnic_cm_set_pg(struct cnic_sock *csk)
3939 if (cnic_offld_prep(csk)) {
3940 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3941 cnic_cm_update_pg(csk);
3943 cnic_cm_offload_pg(csk);
3948 static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3950 struct cnic_local *cp = dev->cnic_priv;
3951 u32 l5_cid = kcqe->pg_host_opaque;
3952 u8 opcode = kcqe->op_code;
3953 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3956 if (!cnic_in_use(csk))
3959 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3960 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3963 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3964 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3965 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3966 cnic_cm_upcall(cp, csk,
3967 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3971 csk->pg_cid = kcqe->pg_cid;
3972 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3973 cnic_cm_conn_req(csk);
3979 static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3981 struct cnic_local *cp = dev->cnic_priv;
3982 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3983 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3984 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3986 ctx->timestamp = jiffies;
3988 wake_up(&ctx->waitq);
3991 static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3993 struct cnic_local *cp = dev->cnic_priv;
3994 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3995 u8 opcode = l4kcqe->op_code;
3997 struct cnic_sock *csk;
3999 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
4000 cnic_process_fcoe_term_conn(dev, kcqe);
4003 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
4004 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
4005 cnic_cm_process_offld_pg(dev, l4kcqe);
4009 l5_cid = l4kcqe->conn_id;
4011 l5_cid = l4kcqe->cid;
4012 if (l5_cid >= MAX_CM_SK_TBL_SZ)
4015 csk = &cp->csk_tbl[l5_cid];
4018 if (!cnic_in_use(csk)) {
4024 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
4025 if (l4kcqe->status != 0) {
4026 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4027 cnic_cm_upcall(cp, csk,
4028 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
4031 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
4032 if (l4kcqe->status == 0)
4033 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
4034 else if (l4kcqe->status ==
4035 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4036 set_bit(SK_F_HW_ERR, &csk->flags);
4038 smp_mb__before_atomic();
4039 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
4040 cnic_cm_upcall(cp, csk, opcode);
4043 case L5CM_RAMROD_CMD_ID_CLOSE: {
4044 struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
4046 if (l4kcqe->status == 0 && l5kcqe->completion_status == 0)
4049 netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
4050 l4kcqe->status, l5kcqe->completion_status);
4051 opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
4054 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4055 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4056 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4057 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4058 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4059 if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
4060 set_bit(SK_F_HW_ERR, &csk->flags);
4062 cp->close_conn(csk, opcode);
4065 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
4066 /* after we already sent CLOSE_REQ */
4067 if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
4068 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
4069 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
4070 cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
4072 cnic_cm_upcall(cp, csk, opcode);
4078 static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
4080 struct cnic_dev *dev = data;
4083 for (i = 0; i < num; i++)
4084 cnic_cm_process_kcqe(dev, kcqe[i]);
4087 static struct cnic_ulp_ops cm_ulp_ops = {
4088 .indicate_kcqes = cnic_cm_indicate_kcqe,
4091 static void cnic_cm_free_mem(struct cnic_dev *dev)
4093 struct cnic_local *cp = dev->cnic_priv;
4095 kvfree(cp->csk_tbl);
4097 cnic_free_id_tbl(&cp->csk_port_tbl);
4100 static int cnic_cm_alloc_mem(struct cnic_dev *dev)
4102 struct cnic_local *cp = dev->cnic_priv;
4106 cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
4111 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)
4112 atomic_set(&cp->csk_tbl[i].ref_count, 0);
4114 port_id = get_random_u32_below(CNIC_LOCAL_PORT_RANGE);
4115 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
4116 CNIC_LOCAL_PORT_MIN, port_id)) {
4117 cnic_cm_free_mem(dev);
4123 static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
4125 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
4126 /* Unsolicited RESET_COMP or RESET_RECEIVED */
4127 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
4128 csk->state = opcode;
4131 /* 1. If event opcode matches the expected event in csk->state
4132 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
4134 * 3. If the expected event is 0, meaning the connection was never
4135 * never established, we accept the opcode from cm_abort.
4137 if (opcode == csk->state || csk->state == 0 ||
4138 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
4139 csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
4140 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
4141 if (csk->state == 0)
4142 csk->state = opcode;
4149 static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
4151 struct cnic_dev *dev = csk->dev;
4152 struct cnic_local *cp = dev->cnic_priv;
4154 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
4155 cnic_cm_upcall(cp, csk, opcode);
4159 clear_bit(SK_F_CONNECT_START, &csk->flags);
4160 cnic_close_conn(csk);
4161 csk->state = opcode;
4162 cnic_cm_upcall(cp, csk, opcode);
4165 static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
4169 static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
4173 seed = get_random_u32();
4174 cnic_ctx_wr(dev, 45, 0, seed);
4178 static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
4180 struct cnic_dev *dev = csk->dev;
4181 struct cnic_local *cp = dev->cnic_priv;
4182 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
4183 union l5cm_specific_data l5_data;
4185 int close_complete = 0;
4188 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4189 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4190 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4191 if (cnic_ready_to_close(csk, opcode)) {
4192 if (test_bit(SK_F_HW_ERR, &csk->flags))
4194 else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
4195 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
4200 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4201 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
4203 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4208 memset(&l5_data, 0, sizeof(l5_data));
4210 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
4212 } else if (close_complete) {
4213 ctx->timestamp = jiffies;
4214 cnic_close_conn(csk);
4215 cnic_cm_upcall(cp, csk, csk->state);
4219 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4221 struct cnic_local *cp = dev->cnic_priv;
4226 if (!netif_running(dev->netdev))
4229 cnic_bnx2x_delete_wait(dev, 0);
4231 cancel_delayed_work(&cp->delete_task);
4232 flush_workqueue(cnic_wq);
4234 if (atomic_read(&cp->iscsi_conn) != 0)
4235 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4236 atomic_read(&cp->iscsi_conn));
4239 static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4241 struct bnx2x *bp = netdev_priv(dev->netdev);
4242 u32 pfid = bp->pfid;
4243 u32 port = BP_PORT(bp);
4245 cnic_init_bnx2x_mac(dev);
4246 cnic_bnx2x_set_tcp_options(dev, 0, 1);
4248 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
4249 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
4251 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4252 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
4253 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4254 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
4257 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4258 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
4259 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4260 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
4261 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4262 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
4263 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4264 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
4266 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
4271 static void cnic_delete_task(struct work_struct *work)
4273 struct cnic_local *cp;
4274 struct cnic_dev *dev;
4276 int need_resched = 0;
4278 cp = container_of(work, struct cnic_local, delete_task.work);
4281 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
4282 struct drv_ctl_info info;
4284 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
4286 memset(&info, 0, sizeof(struct drv_ctl_info));
4287 info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4288 cp->ethdev->drv_ctl(dev->netdev, &info);
4291 for (i = 0; i < cp->max_cid_space; i++) {
4292 struct cnic_context *ctx = &cp->ctx_tbl[i];
4295 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4296 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4299 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4304 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4307 err = cnic_bnx2x_destroy_ramrod(dev, i);
4309 cnic_free_bnx2x_conn_resc(dev, i);
4311 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4312 atomic_dec(&cp->iscsi_conn);
4314 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4319 queue_delayed_work(cnic_wq, &cp->delete_task,
4320 msecs_to_jiffies(10));
4324 static int cnic_cm_open(struct cnic_dev *dev)
4326 struct cnic_local *cp = dev->cnic_priv;
4329 err = cnic_cm_alloc_mem(dev);
4333 err = cp->start_cm(dev);
4338 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4340 dev->cm_create = cnic_cm_create;
4341 dev->cm_destroy = cnic_cm_destroy;
4342 dev->cm_connect = cnic_cm_connect;
4343 dev->cm_abort = cnic_cm_abort;
4344 dev->cm_close = cnic_cm_close;
4345 dev->cm_select_dev = cnic_cm_select_dev;
4347 cp->ulp_handle[CNIC_ULP_L4] = dev;
4348 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4352 cnic_cm_free_mem(dev);
4356 static int cnic_cm_shutdown(struct cnic_dev *dev)
4358 struct cnic_local *cp = dev->cnic_priv;
4364 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4365 struct cnic_sock *csk = &cp->csk_tbl[i];
4367 clear_bit(SK_F_INUSE, &csk->flags);
4368 cnic_cm_cleanup(csk);
4370 cnic_cm_free_mem(dev);
4375 static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4380 cid_addr = GET_CID_ADDR(cid);
4382 for (i = 0; i < CTX_SIZE; i += 4)
4383 cnic_ctx_wr(dev, cid_addr, i, 0);
4386 static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4388 struct cnic_local *cp = dev->cnic_priv;
4390 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4392 if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4395 for (i = 0; i < cp->ctx_blks; i++) {
4397 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4400 memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
4402 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4403 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4404 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4405 (u64) cp->ctx_arr[i].mapping >> 32);
4406 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4407 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4408 for (j = 0; j < 10; j++) {
4410 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4411 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4415 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4423 static void cnic_free_irq(struct cnic_dev *dev)
4425 struct cnic_local *cp = dev->cnic_priv;
4426 struct cnic_eth_dev *ethdev = cp->ethdev;
4428 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4429 cp->disable_int_sync(dev);
4430 tasklet_kill(&cp->cnic_irq_task);
4431 free_irq(ethdev->irq_arr[0].vector, dev);
4435 static int cnic_request_irq(struct cnic_dev *dev)
4437 struct cnic_local *cp = dev->cnic_priv;
4438 struct cnic_eth_dev *ethdev = cp->ethdev;
4441 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4443 tasklet_disable(&cp->cnic_irq_task);
4448 static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4450 struct cnic_local *cp = dev->cnic_priv;
4451 struct cnic_eth_dev *ethdev = cp->ethdev;
4453 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4455 int sblk_num = cp->status_blk_num;
4456 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4457 BNX2_HC_SB_CONFIG_1;
4459 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4461 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4462 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4463 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4465 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
4466 tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2_msix);
4467 err = cnic_request_irq(dev);
4471 while (cp->status_blk.bnx2->status_completion_producer_index &&
4473 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4474 1 << (11 + sblk_num));
4479 if (cp->status_blk.bnx2->status_completion_producer_index) {
4485 struct status_block *sblk = cp->status_blk.gen;
4486 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4489 while (sblk->status_completion_producer_index && i < 10) {
4490 CNIC_WR(dev, BNX2_HC_COMMAND,
4491 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4496 if (sblk->status_completion_producer_index)
4503 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4507 static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4509 struct cnic_local *cp = dev->cnic_priv;
4510 struct cnic_eth_dev *ethdev = cp->ethdev;
4512 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4515 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4516 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4519 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4521 struct cnic_local *cp = dev->cnic_priv;
4522 struct cnic_eth_dev *ethdev = cp->ethdev;
4524 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4527 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4528 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4529 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4530 synchronize_irq(ethdev->irq_arr[0].vector);
4533 static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4535 struct cnic_local *cp = dev->cnic_priv;
4536 struct cnic_eth_dev *ethdev = cp->ethdev;
4537 struct cnic_uio_dev *udev = cp->udev;
4538 u32 cid_addr, tx_cid, sb_id;
4539 u32 val, offset0, offset1, offset2, offset3;
4541 struct bnx2_tx_bd *txbd;
4542 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4543 struct status_block *s_blk = cp->status_blk.gen;
4545 sb_id = cp->status_blk_num;
4547 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4548 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4549 struct status_block_msix *sblk = cp->status_blk.bnx2;
4551 tx_cid = TX_TSS_CID + sb_id - 1;
4552 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4554 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4556 cp->tx_cons = *cp->tx_cons_ptr;
4558 cid_addr = GET_CID_ADDR(tx_cid);
4559 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
4560 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4562 for (i = 0; i < PHY_CTX_SIZE; i += 4)
4563 cnic_ctx_wr(dev, cid_addr2, i, 0);
4565 offset0 = BNX2_L2CTX_TYPE_XI;
4566 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4567 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4568 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4570 cnic_init_context(dev, tx_cid);
4571 cnic_init_context(dev, tx_cid + 1);
4573 offset0 = BNX2_L2CTX_TYPE;
4574 offset1 = BNX2_L2CTX_CMD_TYPE;
4575 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4576 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4578 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4579 cnic_ctx_wr(dev, cid_addr, offset0, val);
4581 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4582 cnic_ctx_wr(dev, cid_addr, offset1, val);
4584 txbd = udev->l2_ring;
4586 buf_map = udev->l2_buf_map;
4587 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) {
4588 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4589 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4591 val = (u64) ring_map >> 32;
4592 cnic_ctx_wr(dev, cid_addr, offset2, val);
4593 txbd->tx_bd_haddr_hi = val;
4595 val = (u64) ring_map & 0xffffffff;
4596 cnic_ctx_wr(dev, cid_addr, offset3, val);
4597 txbd->tx_bd_haddr_lo = val;
4600 static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4602 struct cnic_local *cp = dev->cnic_priv;
4603 struct cnic_eth_dev *ethdev = cp->ethdev;
4604 struct cnic_uio_dev *udev = cp->udev;
4605 u32 cid_addr, sb_id, val, coal_reg, coal_val;
4607 struct bnx2_rx_bd *rxbd;
4608 struct status_block *s_blk = cp->status_blk.gen;
4609 dma_addr_t ring_map = udev->l2_ring_map;
4611 sb_id = cp->status_blk_num;
4612 cnic_init_context(dev, 2);
4613 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4614 coal_reg = BNX2_HC_COMMAND;
4615 coal_val = CNIC_RD(dev, coal_reg);
4616 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4617 struct status_block_msix *sblk = cp->status_blk.bnx2;
4619 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4620 coal_reg = BNX2_HC_COALESCE_NOW;
4621 coal_val = 1 << (11 + sb_id);
4624 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4625 CNIC_WR(dev, coal_reg, coal_val);
4630 cp->rx_cons = *cp->rx_cons_ptr;
4632 cid_addr = GET_CID_ADDR(2);
4633 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4634 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4635 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4638 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
4640 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4641 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4643 rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
4644 for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
4646 int n = (i % cp->l2_rx_ring_size) + 1;
4648 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4649 rxbd->rx_bd_len = cp->l2_single_buf_size;
4650 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4651 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4652 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4654 val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
4655 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4656 rxbd->rx_bd_haddr_hi = val;
4658 val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
4659 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4660 rxbd->rx_bd_haddr_lo = val;
4662 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4663 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4666 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4668 struct kwqe *wqes[1], l2kwqe;
4670 memset(&l2kwqe, 0, sizeof(l2kwqe));
4672 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
4673 (L2_KWQE_OPCODE_VALUE_FLUSH <<
4674 KWQE_OPCODE_SHIFT) | 2;
4675 dev->submit_kwqes(dev, wqes, 1);
4678 static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4680 struct cnic_local *cp = dev->cnic_priv;
4683 val = cp->func << 2;
4685 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4687 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4688 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4689 dev->mac_addr[0] = (u8) (val >> 8);
4690 dev->mac_addr[1] = (u8) val;
4692 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4694 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4695 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4696 dev->mac_addr[2] = (u8) (val >> 24);
4697 dev->mac_addr[3] = (u8) (val >> 16);
4698 dev->mac_addr[4] = (u8) (val >> 8);
4699 dev->mac_addr[5] = (u8) val;
4701 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4703 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4704 if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4705 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4707 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4708 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4709 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4712 static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4714 struct cnic_local *cp = dev->cnic_priv;
4715 struct cnic_eth_dev *ethdev = cp->ethdev;
4716 struct status_block *sblk = cp->status_blk.gen;
4717 u32 val, kcq_cid_addr, kwq_cid_addr;
4720 cnic_set_bnx2_mac(dev);
4722 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4723 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4724 if (CNIC_PAGE_BITS > 12)
4725 val |= (12 - 8) << 4;
4727 val |= (CNIC_PAGE_BITS - 8) << 4;
4729 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4731 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4732 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4733 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4735 err = cnic_setup_5709_context(dev, 1);
4739 cnic_init_context(dev, KWQ_CID);
4740 cnic_init_context(dev, KCQ_CID);
4742 kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
4743 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4745 cp->max_kwq_idx = MAX_KWQ_IDX;
4746 cp->kwq_prod_idx = 0;
4747 cp->kwq_con_idx = 0;
4748 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4750 if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708)
4751 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4753 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4755 /* Initialize the kernel work queue context. */
4756 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4757 (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4758 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4760 val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4761 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4763 val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4764 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4766 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
4767 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4769 val = (u32) cp->kwq_info.pgtbl_map;
4770 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4772 kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4773 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
4775 cp->kcq1.sw_prod_idx = 0;
4776 cp->kcq1.hw_prod_idx_ptr =
4777 &sblk->status_completion_producer_index;
4779 cp->kcq1.status_idx_ptr = &sblk->status_idx;
4781 /* Initialize the kernel complete queue context. */
4782 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4783 (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4784 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4786 val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4787 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4789 val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4790 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4792 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4793 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4795 val = (u32) cp->kcq1.dma.pgtbl_map;
4796 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4799 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4800 struct status_block_msix *msblk = cp->status_blk.bnx2;
4801 u32 sb_id = cp->status_blk_num;
4802 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4804 cp->kcq1.hw_prod_idx_ptr =
4805 &msblk->status_completion_producer_index;
4806 cp->kcq1.status_idx_ptr = &msblk->status_idx;
4807 cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
4808 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4809 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4810 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4813 /* Enable Commnad Scheduler notification when we write to the
4814 * host producer index of the kernel contexts. */
4815 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4817 /* Enable Command Scheduler notification when we write to either
4818 * the Send Queue or Receive Queue producer indexes of the kernel
4819 * bypass contexts. */
4820 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4821 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4823 /* Notify COM when the driver post an application buffer. */
4824 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4826 /* Set the CP and COM doorbells. These two processors polls the
4827 * doorbell for a non zero value before running. This must be done
4828 * after setting up the kernel queue contexts. */
4829 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4830 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4832 cnic_init_bnx2_tx_ring(dev);
4833 cnic_init_bnx2_rx_ring(dev);
4835 err = cnic_init_bnx2_irq(dev);
4837 netdev_err(dev->netdev, "cnic_init_irq failed\n");
4838 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4839 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4843 ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
4848 static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4850 struct cnic_local *cp = dev->cnic_priv;
4851 struct cnic_eth_dev *ethdev = cp->ethdev;
4852 u32 start_offset = ethdev->ctx_tbl_offset;
4855 for (i = 0; i < cp->ctx_blks; i++) {
4856 struct cnic_ctx *ctx = &cp->ctx_arr[i];
4857 dma_addr_t map = ctx->mapping;
4859 if (cp->ctx_align) {
4860 unsigned long mask = cp->ctx_align - 1;
4862 map = (map + mask) & ~mask;
4865 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4869 static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4871 struct cnic_local *cp = dev->cnic_priv;
4872 struct cnic_eth_dev *ethdev = cp->ethdev;
4875 tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2x_bh);
4876 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4877 err = cnic_request_irq(dev);
4882 static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4883 u16 sb_id, u8 sb_index,
4886 struct bnx2x *bp = netdev_priv(dev->netdev);
4888 u32 addr = BAR_CSTRORM_INTMEM +
4889 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4890 offsetof(struct hc_status_block_data_e1x, index_data) +
4891 sizeof(struct hc_index_data)*sb_index +
4892 offsetof(struct hc_index_data, flags);
4893 u16 flags = CNIC_RD16(dev, addr);
4895 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4896 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4897 HC_INDEX_DATA_HC_ENABLED);
4898 CNIC_WR16(dev, addr, flags);
4901 static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4903 struct cnic_local *cp = dev->cnic_priv;
4904 struct bnx2x *bp = netdev_priv(dev->netdev);
4905 u8 sb_id = cp->status_blk_num;
4907 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4908 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4909 offsetof(struct hc_status_block_data_e1x, index_data) +
4910 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4911 offsetof(struct hc_index_data, timeout), 64 / 4);
4912 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4915 static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4919 static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4920 struct client_init_ramrod_data *data)
4922 struct cnic_local *cp = dev->cnic_priv;
4923 struct bnx2x *bp = netdev_priv(dev->netdev);
4924 struct cnic_uio_dev *udev = cp->udev;
4925 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4926 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4927 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4929 u32 cli = cp->ethdev->iscsi_l2_client_id;
4932 memset(txbd, 0, CNIC_PAGE_SIZE);
4934 buf_map = udev->l2_buf_map;
4935 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4936 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4937 struct eth_tx_parse_bd_e1x *pbd_e1x =
4938 &((txbd + 1)->parse_bd_e1x);
4939 struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2);
4940 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4942 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4943 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4944 reg_bd->addr_hi = start_bd->addr_hi;
4945 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4946 start_bd->nbytes = cpu_to_le16(0x10);
4947 start_bd->nbd = cpu_to_le16(3);
4948 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4949 start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
4950 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4952 if (BNX2X_CHIP_IS_E2_PLUS(bp))
4953 pbd_e2->parsing_data = (UNICAST_ADDRESS <<
4954 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
4956 pbd_e1x->global_data = (UNICAST_ADDRESS <<
4957 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
4960 val = (u64) ring_map >> 32;
4961 txbd->next_bd.addr_hi = cpu_to_le32(val);
4963 data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4965 val = (u64) ring_map & 0xffffffff;
4966 txbd->next_bd.addr_lo = cpu_to_le32(val);
4968 data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4970 /* Other ramrod params */
4971 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4972 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4974 /* reset xstorm per client statistics */
4975 if (cli < MAX_STAT_COUNTER_ID) {
4976 data->general.statistics_zero_flg = 1;
4977 data->general.statistics_en_flg = 1;
4978 data->general.statistics_counter_id = cli;
4982 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4985 static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4986 struct client_init_ramrod_data *data)
4988 struct cnic_local *cp = dev->cnic_priv;
4989 struct bnx2x *bp = netdev_priv(dev->netdev);
4990 struct cnic_uio_dev *udev = cp->udev;
4991 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4993 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4994 (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
4995 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4997 u32 cli = cp->ethdev->iscsi_l2_client_id;
4998 int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
5000 dma_addr_t ring_map = udev->l2_ring_map;
5003 data->general.client_id = cli;
5004 data->general.activate_flg = 1;
5005 data->general.sp_client_id = cli;
5006 data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
5007 data->general.func_id = bp->pfid;
5009 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
5011 int n = (i % cp->l2_rx_ring_size) + 1;
5013 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
5014 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
5015 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
5018 val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
5019 rxbd->addr_hi = cpu_to_le32(val);
5020 data->rx.bd_page_base.hi = cpu_to_le32(val);
5022 val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
5023 rxbd->addr_lo = cpu_to_le32(val);
5024 data->rx.bd_page_base.lo = cpu_to_le32(val);
5026 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
5027 val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
5028 rxcqe->addr_hi = cpu_to_le32(val);
5029 data->rx.cqe_page_base.hi = cpu_to_le32(val);
5031 val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
5032 rxcqe->addr_lo = cpu_to_le32(val);
5033 data->rx.cqe_page_base.lo = cpu_to_le32(val);
5035 /* Other ramrod params */
5036 data->rx.client_qzone_id = cl_qzone_id;
5037 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
5038 data->rx.status_block_id = BNX2X_DEF_SB_ID;
5040 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
5042 data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
5043 data->rx.outer_vlan_removal_enable_flg = 1;
5044 data->rx.silent_vlan_removal_flg = 1;
5045 data->rx.silent_vlan_value = 0;
5046 data->rx.silent_vlan_mask = 0xffff;
5049 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
5050 cp->rx_cons = *cp->rx_cons_ptr;
5053 static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5055 struct cnic_local *cp = dev->cnic_priv;
5056 struct bnx2x *bp = netdev_priv(dev->netdev);
5057 u32 pfid = bp->pfid;
5059 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
5060 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
5061 cp->kcq1.sw_prod_idx = 0;
5063 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5064 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5066 cp->kcq1.hw_prod_idx_ptr =
5067 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5068 cp->kcq1.status_idx_ptr =
5069 &sb->sb.running_index[SM_RX_ID];
5071 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
5073 cp->kcq1.hw_prod_idx_ptr =
5074 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
5075 cp->kcq1.status_idx_ptr =
5076 &sb->sb.running_index[SM_RX_ID];
5079 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5080 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
5082 cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
5083 USTORM_FCOE_EQ_PROD_OFFSET(pfid);
5084 cp->kcq2.sw_prod_idx = 0;
5085 cp->kcq2.hw_prod_idx_ptr =
5086 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
5087 cp->kcq2.status_idx_ptr =
5088 &sb->sb.running_index[SM_RX_ID];
5092 static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5094 struct cnic_local *cp = dev->cnic_priv;
5095 struct bnx2x *bp = netdev_priv(dev->netdev);
5096 struct cnic_eth_dev *ethdev = cp->ethdev;
5100 dev->stats_addr = ethdev->addr_drv_info_to_mcp;
5101 cp->func = bp->pf_num;
5105 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
5106 cp->iscsi_start_cid, 0);
5111 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5112 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
5113 cp->fcoe_start_cid, 0);
5119 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
5121 cnic_init_bnx2x_kcq(dev);
5124 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
5125 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5126 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
5127 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5128 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
5129 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
5130 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5131 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
5132 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
5133 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5134 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
5135 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
5136 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5137 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
5138 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
5139 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5140 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
5141 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
5142 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
5143 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5144 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
5145 HC_INDEX_ISCSI_EQ_CONS);
5147 CNIC_WR(dev, BAR_USTRORM_INTMEM +
5148 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
5149 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
5150 CNIC_WR(dev, BAR_USTRORM_INTMEM +
5151 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
5152 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
5154 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
5155 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
5157 cnic_setup_bnx2x_context(dev);
5159 ret = cnic_init_bnx2x_irq(dev);
5163 ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
5167 static void cnic_init_rings(struct cnic_dev *dev)
5169 struct cnic_local *cp = dev->cnic_priv;
5170 struct bnx2x *bp = netdev_priv(dev->netdev);
5171 struct cnic_uio_dev *udev = cp->udev;
5173 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5176 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5177 cnic_init_bnx2_tx_ring(dev);
5178 cnic_init_bnx2_rx_ring(dev);
5179 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5180 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5181 u32 cli = cp->ethdev->iscsi_l2_client_id;
5182 u32 cid = cp->ethdev->iscsi_l2_cid;
5184 struct client_init_ramrod_data *data;
5185 union l5cm_specific_data l5_data;
5186 struct ustorm_eth_rx_producers rx_prods = {0};
5187 u32 off, i, *cid_ptr;
5189 rx_prods.bd_prod = 0;
5190 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
5193 cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
5195 off = BAR_USTRORM_INTMEM +
5196 (BNX2X_CHIP_IS_E2_PLUS(bp) ?
5197 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
5198 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli));
5200 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
5201 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
5203 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5205 data = udev->l2_buf;
5206 cid_ptr = udev->l2_buf + 12;
5208 memset(data, 0, sizeof(*data));
5210 cnic_init_bnx2x_tx_ring(dev, data);
5211 cnic_init_bnx2x_rx_ring(dev, data);
5213 data->general.fp_hsi_ver = ETH_FP_HSI_VERSION;
5215 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5216 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
5218 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5220 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
5221 cid, ETH_CONNECTION_TYPE, &l5_data);
5224 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5228 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5229 netdev_err(dev->netdev,
5230 "iSCSI CLIENT_SETUP did not complete\n");
5231 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5232 cnic_ring_ctl(dev, cid, cli, 1);
5233 *cid_ptr = cid >> 4;
5234 *(cid_ptr + 1) = cid * bp->db_size;
5235 *(cid_ptr + 2) = UIO_USE_TX_DOORBELL;
5239 static void cnic_shutdown_rings(struct cnic_dev *dev)
5241 struct cnic_local *cp = dev->cnic_priv;
5242 struct cnic_uio_dev *udev = cp->udev;
5245 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5248 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5249 cnic_shutdown_bnx2_rx_ring(dev);
5250 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5251 u32 cli = cp->ethdev->iscsi_l2_client_id;
5252 u32 cid = cp->ethdev->iscsi_l2_cid;
5253 union l5cm_specific_data l5_data;
5256 cnic_ring_ctl(dev, cid, cli, 0);
5258 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5260 l5_data.phy_address.lo = cli;
5261 l5_data.phy_address.hi = 0;
5262 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
5263 cid, ETH_CONNECTION_TYPE, &l5_data);
5265 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5269 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5270 netdev_err(dev->netdev,
5271 "iSCSI CLIENT_HALT did not complete\n");
5272 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5274 memset(&l5_data, 0, sizeof(l5_data));
5275 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5276 cid, NONE_CONNECTION_TYPE, &l5_data);
5279 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5280 rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
5281 memset(rx_ring, 0, CNIC_PAGE_SIZE);
5284 static int cnic_register_netdev(struct cnic_dev *dev)
5286 struct cnic_local *cp = dev->cnic_priv;
5287 struct cnic_eth_dev *ethdev = cp->ethdev;
5293 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5296 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5298 netdev_err(dev->netdev, "register_cnic failed\n");
5300 /* Read iSCSI config again. On some bnx2x device, iSCSI config
5301 * can change after firmware is downloaded.
5303 dev->max_iscsi_conn = ethdev->max_iscsi_conn;
5304 if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
5305 dev->max_iscsi_conn = 0;
5310 static void cnic_unregister_netdev(struct cnic_dev *dev)
5312 struct cnic_local *cp = dev->cnic_priv;
5313 struct cnic_eth_dev *ethdev = cp->ethdev;
5318 ethdev->drv_unregister_cnic(dev->netdev);
5321 static int cnic_start_hw(struct cnic_dev *dev)
5323 struct cnic_local *cp = dev->cnic_priv;
5324 struct cnic_eth_dev *ethdev = cp->ethdev;
5327 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5330 dev->regview = ethdev->io_base;
5331 pci_dev_get(dev->pcidev);
5332 cp->func = PCI_FUNC(dev->pcidev->devfn);
5333 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
5334 cp->status_blk_map = ethdev->irq_arr[0].status_blk_map;
5335 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5337 err = cp->alloc_resc(dev);
5339 netdev_err(dev->netdev, "allocate resource failure\n");
5343 err = cp->start_hw(dev);
5347 err = cnic_cm_open(dev);
5351 set_bit(CNIC_F_CNIC_UP, &dev->flags);
5353 cp->enable_int(dev);
5358 if (ethdev->drv_state & CNIC_DRV_STATE_HANDLES_IRQ)
5362 pci_dev_put(dev->pcidev);
5366 static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5368 cnic_disable_bnx2_int_sync(dev);
5370 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5371 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5373 cnic_init_context(dev, KWQ_CID);
5374 cnic_init_context(dev, KCQ_CID);
5376 cnic_setup_5709_context(dev, 0);
5379 cnic_free_resc(dev);
5383 static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5385 struct cnic_local *cp = dev->cnic_priv;
5386 struct bnx2x *bp = netdev_priv(dev->netdev);
5387 u32 hc_index = HC_INDEX_ISCSI_EQ_CONS;
5388 u32 sb_id = cp->status_blk_num;
5389 u32 idx_off, syn_off;
5393 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5394 idx_off = offsetof(struct hc_status_block_e2, index_values) +
5395 (hc_index * sizeof(u16));
5397 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index, sb_id);
5399 idx_off = offsetof(struct hc_status_block_e1x, index_values) +
5400 (hc_index * sizeof(u16));
5402 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index, sb_id);
5404 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0);
5405 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) +
5408 *cp->kcq1.hw_prod_idx_ptr = 0;
5409 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5410 CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0);
5411 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5412 cnic_free_resc(dev);
5415 static void cnic_stop_hw(struct cnic_dev *dev)
5417 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5418 struct cnic_local *cp = dev->cnic_priv;
5421 /* Need to wait for the ring shutdown event to complete
5422 * before clearing the CNIC_UP flag.
5424 while (cp->udev && cp->udev->uio_dev != -1 && i < 15) {
5428 cnic_shutdown_rings(dev);
5430 cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ;
5431 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5432 RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
5434 cnic_cm_shutdown(dev);
5436 pci_dev_put(dev->pcidev);
5440 static void cnic_free_dev(struct cnic_dev *dev)
5444 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5448 if (atomic_read(&dev->ref_count) != 0)
5449 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5451 netdev_info(dev->netdev, "Removed CNIC device\n");
5452 dev_put(dev->netdev);
5456 static int cnic_get_fc_npiv_tbl(struct cnic_dev *dev,
5457 struct cnic_fc_npiv_tbl *npiv_tbl)
5459 struct cnic_local *cp = dev->cnic_priv;
5460 struct bnx2x *bp = netdev_priv(dev->netdev);
5463 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
5464 return -EAGAIN; /* bnx2x is down */
5466 if (!BNX2X_CHIP_IS_E2_PLUS(bp))
5469 ret = cp->ethdev->drv_get_fc_npiv_tbl(dev->netdev, npiv_tbl);
5473 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5474 struct pci_dev *pdev)
5476 struct cnic_dev *cdev;
5477 struct cnic_local *cp;
5480 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5482 cdev = kzalloc(alloc_size, GFP_KERNEL);
5487 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5488 cdev->register_device = cnic_register_device;
5489 cdev->unregister_device = cnic_unregister_device;
5490 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5491 cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl;
5492 atomic_set(&cdev->ref_count, 0);
5494 cp = cdev->cnic_priv;
5496 cp->l2_single_buf_size = 0x400;
5497 cp->l2_rx_ring_size = 3;
5499 spin_lock_init(&cp->cnic_ulp_lock);
5501 netdev_info(dev, "Added CNIC device\n");
5506 static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5508 struct pci_dev *pdev;
5509 struct cnic_dev *cdev;
5510 struct cnic_local *cp;
5511 struct bnx2 *bp = netdev_priv(dev);
5512 struct cnic_eth_dev *ethdev = NULL;
5515 ethdev = (bp->cnic_probe)(dev);
5520 pdev = ethdev->pdev;
5526 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5527 pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5528 (pdev->revision < 0x10)) {
5534 cdev = cnic_alloc_dev(dev, pdev);
5538 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5539 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5541 cp = cdev->cnic_priv;
5542 cp->ethdev = ethdev;
5543 cdev->pcidev = pdev;
5544 cp->chip_id = ethdev->chip_id;
5546 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5548 cp->cnic_ops = &cnic_bnx2_ops;
5549 cp->start_hw = cnic_start_bnx2_hw;
5550 cp->stop_hw = cnic_stop_bnx2_hw;
5551 cp->setup_pgtbl = cnic_setup_page_tbl;
5552 cp->alloc_resc = cnic_alloc_bnx2_resc;
5553 cp->free_resc = cnic_free_resc;
5554 cp->start_cm = cnic_cm_init_bnx2_hw;
5555 cp->stop_cm = cnic_cm_stop_bnx2_hw;
5556 cp->enable_int = cnic_enable_bnx2_int;
5557 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5558 cp->close_conn = cnic_close_bnx2_conn;
5566 static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5568 struct pci_dev *pdev;
5569 struct cnic_dev *cdev;
5570 struct cnic_local *cp;
5571 struct bnx2x *bp = netdev_priv(dev);
5572 struct cnic_eth_dev *ethdev = NULL;
5575 ethdev = bp->cnic_probe(dev);
5580 pdev = ethdev->pdev;
5585 cdev = cnic_alloc_dev(dev, pdev);
5591 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5592 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5594 cp = cdev->cnic_priv;
5595 cp->ethdev = ethdev;
5596 cdev->pcidev = pdev;
5597 cp->chip_id = ethdev->chip_id;
5599 cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
5601 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5602 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5603 if (CNIC_SUPPORTS_FCOE(bp)) {
5604 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5605 cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
5608 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5609 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
5611 memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
5613 cp->cnic_ops = &cnic_bnx2x_ops;
5614 cp->start_hw = cnic_start_bnx2x_hw;
5615 cp->stop_hw = cnic_stop_bnx2x_hw;
5616 cp->setup_pgtbl = cnic_setup_page_tbl_le;
5617 cp->alloc_resc = cnic_alloc_bnx2x_resc;
5618 cp->free_resc = cnic_free_resc;
5619 cp->start_cm = cnic_cm_init_bnx2x_hw;
5620 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5621 cp->enable_int = cnic_enable_bnx2x_int;
5622 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5623 if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
5624 cp->ack_int = cnic_ack_bnx2x_e2_msix;
5625 cp->arm_int = cnic_arm_bnx2x_e2_msix;
5627 cp->ack_int = cnic_ack_bnx2x_msix;
5628 cp->arm_int = cnic_arm_bnx2x_msix;
5630 cp->close_conn = cnic_close_bnx2x_conn;
5634 static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5636 struct ethtool_drvinfo drvinfo;
5637 struct cnic_dev *cdev = NULL;
5639 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5640 memset(&drvinfo, 0, sizeof(drvinfo));
5641 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5643 if (!strcmp(drvinfo.driver, "bnx2"))
5644 cdev = init_bnx2_cnic(dev);
5645 if (!strcmp(drvinfo.driver, "bnx2x"))
5646 cdev = init_bnx2x_cnic(dev);
5648 write_lock(&cnic_dev_lock);
5649 list_add(&cdev->list, &cnic_dev_list);
5650 write_unlock(&cnic_dev_lock);
5656 static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5661 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5662 struct cnic_ulp_ops *ulp_ops;
5665 mutex_lock(&cnic_lock);
5666 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
5667 lockdep_is_held(&cnic_lock));
5668 if (!ulp_ops || !ulp_ops->indicate_netevent) {
5669 mutex_unlock(&cnic_lock);
5673 ctx = cp->ulp_handle[if_type];
5675 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5676 mutex_unlock(&cnic_lock);
5678 ulp_ops->indicate_netevent(ctx, event, vlan_id);
5680 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5684 /* netdev event handler */
5685 static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5688 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
5689 struct cnic_dev *dev;
5692 dev = cnic_from_netdev(netdev);
5694 if (!dev && event == NETDEV_REGISTER) {
5695 /* Check for the hot-plug device */
5696 dev = is_cnic_dev(netdev);
5703 struct cnic_local *cp = dev->cnic_priv;
5707 else if (event == NETDEV_UNREGISTER)
5710 if (event == NETDEV_UP) {
5711 if (cnic_register_netdev(dev) != 0) {
5715 if (!cnic_start_hw(dev))
5716 cnic_ulp_start(dev);
5719 cnic_rcv_netevent(cp, event, 0);
5721 if (event == NETDEV_GOING_DOWN) {
5724 cnic_unregister_netdev(dev);
5725 } else if (event == NETDEV_UNREGISTER) {
5726 write_lock(&cnic_dev_lock);
5727 list_del_init(&dev->list);
5728 write_unlock(&cnic_dev_lock);
5736 struct net_device *realdev;
5739 vid = cnic_get_vlan(netdev, &realdev);
5741 dev = cnic_from_netdev(realdev);
5743 vid |= VLAN_CFI_MASK; /* make non-zero */
5744 cnic_rcv_netevent(dev->cnic_priv, event, vid);
5753 static struct notifier_block cnic_netdev_notifier = {
5754 .notifier_call = cnic_netdev_event
5757 static void cnic_release(void)
5759 struct cnic_uio_dev *udev;
5761 while (!list_empty(&cnic_udev_list)) {
5762 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5764 cnic_free_uio(udev);
5768 static int __init cnic_init(void)
5772 pr_info("%s", version);
5774 rc = register_netdevice_notifier(&cnic_netdev_notifier);
5780 cnic_wq = create_singlethread_workqueue("cnic_wq");
5783 unregister_netdevice_notifier(&cnic_netdev_notifier);
5790 static void __exit cnic_exit(void)
5792 unregister_netdevice_notifier(&cnic_netdev_notifier);
5794 destroy_workqueue(cnic_wq);
5797 module_init(cnic_init);
5798 module_exit(cnic_exit);