blob: 5dc89e527e7deefe04c831d9ec556ed76ac40d26 [file] [log] [blame]
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2015 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/stringify.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <asm/byteorder.h>
#include <asm/page.h>
#include <linux/time.h>
#include <linux/mii.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
#include <net/vxlan.h>
#endif
#ifdef CONFIG_NET_RX_BUSY_POLL
#include <net/busy_poll.h>
#endif
#include <linux/workqueue.h>
#include <linux/prefetch.h>
#include <linux/cache.h>
#include <linux/log2.h>
#include <linux/aer.h>
#include <linux/bitmap.h>
#include <linux/cpu_rmap.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_sriov.h"
#include "bnxt_ethtool.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
static const char version[] =
"Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
MODULE_VERSION(DRV_MODULE_VERSION);
#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
#define BNXT_RX_COPY_THRESH 256
#define BNXT_TX_PUSH_THRESH 92
enum board_idx {
BCM57301,
BCM57302,
BCM57304,
BCM57402,
BCM57404,
BCM57406,
BCM57304_VF,
BCM57404_VF,
};
/* indexed by enum above */
static const struct {
char *name;
} board_info[] = {
{ "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" },
{ "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
{ "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
{ "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
{ "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
{ "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
{ PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
#ifdef CONFIG_BNXT_SRIOV
{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
#endif
{ 0 }
};
MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
static const u16 bnxt_vf_req_snif[] = {
HWRM_FUNC_CFG,
HWRM_PORT_PHY_QCFG,
HWRM_CFA_L2_FILTER_ALLOC,
};
static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == BCM57304_VF || idx == BCM57404_VF);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
#define BNXT_CP_DB_REARM(db, raw_cons) \
writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
#define BNXT_CP_DB(db, raw_cons) \
writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
#define BNXT_CP_DB_IRQ_DIS(db) \
writel(DB_CP_IRQ_DIS_FLAGS, db)
static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
{
/* Tell compiler to fetch tx indices from memory. */
barrier();
return bp->tx_ring_size -
((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
}
static const u16 bnxt_lhint_arr[] = {
TX_BD_FLAGS_LHINT_512_AND_SMALLER,
TX_BD_FLAGS_LHINT_512_TO_1023,
TX_BD_FLAGS_LHINT_1024_TO_2047,
TX_BD_FLAGS_LHINT_1024_TO_2047,
TX_BD_FLAGS_LHINT_2048_AND_LARGER,
TX_BD_FLAGS_LHINT_2048_AND_LARGER,
TX_BD_FLAGS_LHINT_2048_AND_LARGER,
TX_BD_FLAGS_LHINT_2048_AND_LARGER,
TX_BD_FLAGS_LHINT_2048_AND_LARGER,
TX_BD_FLAGS_LHINT_2048_AND_LARGER,
TX_BD_FLAGS_LHINT_2048_AND_LARGER,
TX_BD_FLAGS_LHINT_2048_AND_LARGER,
TX_BD_FLAGS_LHINT_2048_AND_LARGER,
TX_BD_FLAGS_LHINT_2048_AND_LARGER,
TX_BD_FLAGS_LHINT_2048_AND_LARGER,
TX_BD_FLAGS_LHINT_2048_AND_LARGER,
TX_BD_FLAGS_LHINT_2048_AND_LARGER,
TX_BD_FLAGS_LHINT_2048_AND_LARGER,
TX_BD_FLAGS_LHINT_2048_AND_LARGER,
};
static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
struct tx_bd *txbd;
struct tx_bd_ext *txbd1;
struct netdev_queue *txq;
int i;
dma_addr_t mapping;
unsigned int length, pad = 0;
u32 len, free_size, vlan_tag_flags, cfa_action, flags;
u16 prod, last_frag;
struct pci_dev *pdev = bp->pdev;
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_tx_bd *tx_buf;
i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
txr = &bp->tx_ring[i];
txq = netdev_get_tx_queue(dev, i);
prod = txr->tx_prod;
free_size = bnxt_tx_avail(bp, txr);
if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
netif_tx_stop_queue(txq);
return NETDEV_TX_BUSY;
}
length = skb->len;
len = skb_headlen(skb);
last_frag = skb_shinfo(skb)->nr_frags;
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
txbd->tx_bd_opaque = prod;
tx_buf = &txr->tx_buf_ring[prod];
tx_buf->skb = skb;
tx_buf->nr_frags = last_frag;
vlan_tag_flags = 0;
cfa_action = 0;
if (skb_vlan_tag_present(skb)) {
vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
skb_vlan_tag_get(skb);
/* Currently supports 8021Q, 8021AD vlan offloads
* QINQ1, QINQ2, QINQ3 vlan headers are deprecated
*/
if (skb->vlan_proto == htons(ETH_P_8021Q))
vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
}
if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
struct tx_push_bd *push = txr->tx_push;
struct tx_bd *tx_push = &push->txbd1;
struct tx_bd_ext *tx_push1 = &push->txbd2;
void *pdata = tx_push1 + 1;
int j;
/* Set COAL_NOW to be ready quickly for the next push */
tx_push->tx_bd_len_flags_type =
cpu_to_le32((length << TX_BD_LEN_SHIFT) |
TX_BD_TYPE_LONG_TX_BD |
TX_BD_FLAGS_LHINT_512_AND_SMALLER |
TX_BD_FLAGS_COAL_NOW |
TX_BD_FLAGS_PACKET_END |
(2 << TX_BD_FLAGS_BD_CNT_SHIFT));
if (skb->ip_summed == CHECKSUM_PARTIAL)
tx_push1->tx_bd_hsize_lflags =
cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
else
tx_push1->tx_bd_hsize_lflags = 0;
tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
skb_copy_from_linear_data(skb, pdata, len);
pdata += len;
for (j = 0; j < last_frag; j++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
void *fptr;
fptr = skb_frag_address_safe(frag);
if (!fptr)
goto normal_tx;
memcpy(pdata, fptr, skb_frag_size(frag));
pdata += skb_frag_size(frag);
}
memcpy(txbd, tx_push, sizeof(*txbd));
prod = NEXT_TX(prod);
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
memcpy(txbd, tx_push1, sizeof(*txbd));
prod = NEXT_TX(prod);
push->doorbell =
cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
txr->tx_prod = prod;
netdev_tx_sent_queue(txq, skb->len);
__iowrite64_copy(txr->tx_doorbell, push,
(length + sizeof(*push) + 8) / 8);
tx_buf->is_push = 1;
goto tx_done;
}
normal_tx:
if (length < BNXT_MIN_PKT_SIZE) {
pad = BNXT_MIN_PKT_SIZE - length;
if (skb_pad(skb, pad)) {
/* SKB already freed. */
tx_buf->skb = NULL;
return NETDEV_TX_OK;
}
length = BNXT_MIN_PKT_SIZE;
}
mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
dev_kfree_skb_any(skb);
tx_buf->skb = NULL;
return NETDEV_TX_OK;
}
dma_unmap_addr_set(tx_buf, mapping, mapping);
flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
prod = NEXT_TX(prod);
txbd1 = (struct tx_bd_ext *)
&txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
txbd1->tx_bd_hsize_lflags = 0;
if (skb_is_gso(skb)) {
u32 hdr_len;
if (skb->encapsulation)
hdr_len = skb_inner_network_offset(skb) +
skb_inner_network_header_len(skb) +
inner_tcp_hdrlen(skb);
else
hdr_len = skb_transport_offset(skb) +
tcp_hdrlen(skb);
txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
TX_BD_FLAGS_T_IPID |
(hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
length = skb_shinfo(skb)->gso_size;
txbd1->tx_bd_mss = cpu_to_le32(length);
length += hdr_len;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
txbd1->tx_bd_hsize_lflags =
cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
txbd1->tx_bd_mss = 0;
}
length >>= 9;
flags |= bnxt_lhint_arr[length];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
for (i = 0; i < last_frag; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
prod = NEXT_TX(prod);
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
len = skb_frag_size(frag);
mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
goto tx_dma_error;
tx_buf = &txr->tx_buf_ring[prod];
dma_unmap_addr_set(tx_buf, mapping, mapping);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
flags = len << TX_BD_LEN_SHIFT;
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
}
flags &= ~TX_BD_LEN;
txbd->tx_bd_len_flags_type =
cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
TX_BD_FLAGS_PACKET_END);
netdev_tx_sent_queue(txq, skb->len);
/* Sync BD data before updating doorbell */
wmb();
prod = NEXT_TX(prod);
txr->tx_prod = prod;
writel(DB_KEY_TX | prod, txr->tx_doorbell);
writel(DB_KEY_TX | prod, txr->tx_doorbell);
tx_done:
mmiowb();
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
netif_tx_stop_queue(txq);
/* netif_tx_stop_queue() must be done before checking
* tx index in bnxt_tx_avail() below, because in
* bnxt_tx_int(), we update tx index before checking for
* netif_tx_queue_stopped().
*/
smp_mb();
if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
netif_tx_wake_queue(txq);
}
return NETDEV_TX_OK;
tx_dma_error:
last_frag = i;
/* start back at beginning and unmap skb */
prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod];
tx_buf->skb = NULL;
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE);
prod = NEXT_TX(prod);
/* unmap remaining mapped pages */
for (i = 0; i < last_frag; i++) {
prod = NEXT_TX(prod);
tx_buf = &txr->tx_buf_ring[prod];
dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
PCI_DMA_TODEVICE);
}
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
{
struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
int index = txr - &bp->tx_ring[0];
struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
u16 cons = txr->tx_cons;
struct pci_dev *pdev = bp->pdev;
int i;
unsigned int tx_bytes = 0;
for (i = 0; i < nr_pkts; i++) {
struct bnxt_sw_tx_bd *tx_buf;
struct sk_buff *skb;
int j, last;
tx_buf = &txr->tx_buf_ring[cons];
cons = NEXT_TX(cons);
skb = tx_buf->skb;
tx_buf->skb = NULL;
if (tx_buf->is_push) {
tx_buf->is_push = 0;
goto next_tx_int;
}
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE);
last = tx_buf->nr_frags;
for (j = 0; j < last; j++) {
cons = NEXT_TX(cons);
tx_buf = &txr->tx_buf_ring[cons];
dma_unmap_page(
&pdev->dev,
dma_unmap_addr(tx_buf, mapping),
skb_frag_size(&skb_shinfo(skb)->frags[j]),
PCI_DMA_TODEVICE);
}
next_tx_int:
cons = NEXT_TX(cons);
tx_bytes += skb->len;
dev_kfree_skb_any(skb);
}
netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
txr->tx_cons = cons;
/* Need to make the tx_cons update visible to bnxt_start_xmit()
* before checking for netif_tx_queue_stopped(). Without the
* memory barrier, there is a small possibility that bnxt_start_xmit()
* will miss it and cause the queue to be stopped forever.
*/
smp_mb();
if (unlikely(netif_tx_queue_stopped(txq)) &&
(bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
__netif_tx_lock(txq, smp_processor_id());
if (netif_tx_queue_stopped(txq) &&
bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
txr->dev_state != BNXT_DEV_STATE_CLOSING)
netif_tx_wake_queue(txq);
__netif_tx_unlock(txq);
}
}
static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
gfp_t gfp)
{
u8 *data;
struct pci_dev *pdev = bp->pdev;
data = kmalloc(bp->rx_buf_size, gfp);
if (!data)
return NULL;
*mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&pdev->dev, *mapping)) {
kfree(data);
data = NULL;
}
return data;
}
static inline int bnxt_alloc_rx_data(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
u16 prod, gfp_t gfp)
{
struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
u8 *data;
dma_addr_t mapping;
data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
if (!data)
return -ENOMEM;
rx_buf->data = data;
dma_unmap_addr_set(rx_buf, mapping, mapping);
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
return 0;
}
static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
u8 *data)
{
u16 prod = rxr->rx_prod;
struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
struct rx_bd *cons_bd, *prod_bd;
prod_rx_buf = &rxr->rx_buf_ring[prod];
cons_rx_buf = &rxr->rx_buf_ring[cons];
prod_rx_buf->data = data;
dma_unmap_addr_set(prod_rx_buf, mapping,
dma_unmap_addr(cons_rx_buf, mapping));
prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
}
static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
{
u16 next, max = rxr->rx_agg_bmap_size;
next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
if (next >= max)
next = find_first_zero_bit(rxr->rx_agg_bmap, max);
return next;
}
static inline int bnxt_alloc_rx_page(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
u16 prod, gfp_t gfp)
{
struct rx_bd *rxbd =
&rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
struct bnxt_sw_rx_agg_bd *rx_agg_buf;
struct pci_dev *pdev = bp->pdev;
struct page *page;
dma_addr_t mapping;
u16 sw_prod = rxr->rx_sw_agg_prod;
page = alloc_page(gfp);
if (!page)
return -ENOMEM;
mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&pdev->dev, mapping)) {
__free_page(page);
return -EIO;
}
if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
__set_bit(sw_prod, rxr->rx_agg_bmap);
rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
rx_agg_buf->page = page;
rx_agg_buf->mapping = mapping;
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
rxbd->rx_bd_opaque = sw_prod;
return 0;
}
static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
u32 agg_bufs)
{
struct bnxt *bp = bnapi->bp;
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
u16 sw_prod = rxr->rx_sw_agg_prod;
u32 i;
for (i = 0; i < agg_bufs; i++) {
u16 cons;
struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
struct rx_bd *prod_bd;
struct page *page;
agg = (struct rx_agg_cmp *)
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
cons = agg->rx_agg_cmp_opaque;
__clear_bit(cons, rxr->rx_agg_bmap);
if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
__set_bit(sw_prod, rxr->rx_agg_bmap);
prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
cons_rx_buf = &rxr->rx_agg_ring[cons];
/* It is possible for sw_prod to be equal to cons, so
* set cons_rx_buf->page to NULL first.
*/
page = cons_rx_buf->page;
cons_rx_buf->page = NULL;
prod_rx_buf->page = page;
prod_rx_buf->mapping = cons_rx_buf->mapping;
prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
prod_bd->rx_bd_opaque = sw_prod;
prod = NEXT_RX_AGG(prod);
sw_prod = NEXT_RX_AGG(sw_prod);
cp_cons = NEXT_CMP(cp_cons);
}
rxr->rx_agg_prod = prod;
rxr->rx_sw_agg_prod = sw_prod;
}
static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr, u16 cons,
u16 prod, u8 *data, dma_addr_t dma_addr,
unsigned int len)
{
int err;
struct sk_buff *skb;
err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
if (unlikely(err)) {
bnxt_reuse_rx_data(rxr, cons, data);
return NULL;
}
skb = build_skb(data, 0);
dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
if (!skb) {
kfree(data);
return NULL;
}
skb_reserve(skb, BNXT_RX_OFFSET);
skb_put(skb, len);
return skb;
}
static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
struct sk_buff *skb, u16 cp_cons,
u32 agg_bufs)
{
struct pci_dev *pdev = bp->pdev;
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
u32 i;
for (i = 0; i < agg_bufs; i++) {
u16 cons, frag_len;
struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf;
struct page *page;
dma_addr_t mapping;
agg = (struct rx_agg_cmp *)
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
cons = agg->rx_agg_cmp_opaque;
frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
cons_rx_buf = &rxr->rx_agg_ring[cons];
skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
__clear_bit(cons, rxr->rx_agg_bmap);
/* It is possible for bnxt_alloc_rx_page() to allocate
* a sw_prod index that equals the cons index, so we
* need to clear the cons entry now.
*/
mapping = dma_unmap_addr(cons_rx_buf, mapping);
page = cons_rx_buf->page;
cons_rx_buf->page = NULL;
if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
struct skb_shared_info *shinfo;
unsigned int nr_frags;
shinfo = skb_shinfo(skb);
nr_frags = --shinfo->nr_frags;
__skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
dev_kfree_skb(skb);
cons_rx_buf->page = page;
/* Update prod since possibly some pages have been
* allocated already.
*/
rxr->rx_agg_prod = prod;
bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
return NULL;
}
dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
skb->data_len += frag_len;
skb->len += frag_len;
skb->truesize += PAGE_SIZE;
prod = NEXT_RX_AGG(prod);
cp_cons = NEXT_CMP(cp_cons);
}
rxr->rx_agg_prod = prod;
return skb;
}
static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u8 agg_bufs, u32 *raw_cons)
{
u16 last;
struct rx_agg_cmp *agg;
*raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
last = RING_CMP(*raw_cons);
agg = (struct rx_agg_cmp *)
&cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
return RX_AGG_CMP_VALID(agg, *raw_cons);
}
static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
unsigned int len,
dma_addr_t mapping)
{
struct bnxt *bp = bnapi->bp;
struct pci_dev *pdev = bp->pdev;
struct sk_buff *skb;
skb = napi_alloc_skb(&bnapi->napi, len);
if (!skb)
return NULL;
dma_sync_single_for_cpu(&pdev->dev, mapping,
bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
dma_sync_single_for_device(&pdev->dev, mapping,
bp->rx_copy_thresh,
PCI_DMA_FROMDEVICE);
skb_put(skb, len);
return skb;
}
static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
struct rx_tpa_start_cmp *tpa_start,
struct rx_tpa_start_cmp_ext *tpa_start1)
{
u8 agg_id = TPA_START_AGG_ID(tpa_start);
u16 cons, prod;
struct bnxt_tpa_info *tpa_info;
struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
struct rx_bd *prod_bd;
dma_addr_t mapping;
cons = tpa_start->rx_tpa_start_cmp_opaque;
prod = rxr->rx_prod;
cons_rx_buf = &rxr->rx_buf_ring[cons];
prod_rx_buf = &rxr->rx_buf_ring[prod];
tpa_info = &rxr->rx_tpa[agg_id];
prod_rx_buf->data = tpa_info->data;
mapping = tpa_info->mapping;
dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
tpa_info->data = cons_rx_buf->data;
cons_rx_buf->data = NULL;
tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
tpa_info->len =
le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
RX_TPA_START_CMP_LEN_SHIFT;
if (likely(TPA_START_HASH_VALID(tpa_start))) {
u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
tpa_info->hash_type = PKT_HASH_TYPE_L4;
tpa_info->gso_type = SKB_GSO_TCPV4;
/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
if (hash_type == 3)
tpa_info->gso_type = SKB_GSO_TCPV6;
tpa_info->rss_hash =
le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
} else {
tpa_info->hash_type = PKT_HASH_TYPE_NONE;
tpa_info->gso_type = 0;
if (netif_msg_rx_err(bp))
netdev_warn(bp->dev, "TPA packet without valid hash\n");
}
tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
rxr->rx_prod = NEXT_RX(prod);
cons = NEXT_RX(cons);
cons_rx_buf = &rxr->rx_buf_ring[cons];
bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
rxr->rx_prod = NEXT_RX(rxr->rx_prod);
cons_rx_buf->data = NULL;
}
static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
u16 cp_cons, u32 agg_bufs)
{
if (agg_bufs)
bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
}
#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
struct rx_tpa_end_cmp *tpa_end,
struct rx_tpa_end_cmp_ext *tpa_end1,
struct sk_buff *skb)
{
#ifdef CONFIG_INET
struct tcphdr *th;
int payload_off, tcp_opt_len = 0;
int len, nw_off;
u16 segs;
segs = TPA_END_TPA_SEGS(tpa_end);
if (segs == 1)
return skb;
NAPI_GRO_CB(skb)->count = segs;
skb_shinfo(skb)->gso_size =
le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
skb_shinfo(skb)->gso_type = tpa_info->gso_type;
payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
if (TPA_END_GRO_TS(tpa_end))
tcp_opt_len = 12;
if (tpa_info->gso_type == SKB_GSO_TCPV4) {
struct iphdr *iph;
nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
ETH_HLEN;
skb_set_network_header(skb, nw_off);
iph = ip_hdr(skb);
skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
len = skb->len - skb_transport_offset(skb);
th = tcp_hdr(skb);
th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
} else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
struct ipv6hdr *iph;
nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
ETH_HLEN;
skb_set_network_header(skb, nw_off);
iph = ipv6_hdr(skb);
skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
len = skb->len - skb_transport_offset(skb);
th = tcp_hdr(skb);
th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
} else {
dev_kfree_skb_any(skb);
return NULL;
}
tcp_gro_complete(skb);
if (nw_off) { /* tunnel */
struct udphdr *uh = NULL;
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = (struct iphdr *)skb->data;
if (iph->protocol == IPPROTO_UDP)
uh = (struct udphdr *)(iph + 1);
} else {
struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
if (iph->nexthdr == IPPROTO_UDP)
uh = (struct udphdr *)(iph + 1);
}
if (uh) {
if (uh->check)
skb_shinfo(skb)->gso_type |=
SKB_GSO_UDP_TUNNEL_CSUM;
else
skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
}
}
#endif
return skb;
}
static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
struct bnxt_napi *bnapi,
u32 *raw_cons,
struct rx_tpa_end_cmp *tpa_end,
struct rx_tpa_end_cmp_ext *tpa_end1,
bool *agg_event)
{
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u8 agg_id = TPA_END_AGG_ID(tpa_end);
u8 *data, agg_bufs;
u16 cp_cons = RING_CMP(*raw_cons);
unsigned int len;
struct bnxt_tpa_info *tpa_info;
dma_addr_t mapping;
struct sk_buff *skb;
tpa_info = &rxr->rx_tpa[agg_id];
data = tpa_info->data;
prefetch(data);
len = tpa_info->len;
mapping = tpa_info->mapping;
agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
if (agg_bufs) {
if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
return ERR_PTR(-EBUSY);
*agg_event = true;
cp_cons = NEXT_CMP(cp_cons);
}
if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
agg_bufs, (int)MAX_SKB_FRAGS);
return NULL;
}
if (len <= bp->rx_copy_thresh) {
skb = bnxt_copy_skb(bnapi, data, len, mapping);
if (!skb) {
bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
return NULL;
}
} else {
u8 *new_data;
dma_addr_t new_mapping;
new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
return NULL;
}
tpa_info->data = new_data;
tpa_info->mapping = new_mapping;
skb = build_skb(data, 0);
dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
if (!skb) {
kfree(data);
bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
return NULL;
}
skb_reserve(skb, BNXT_RX_OFFSET);
skb_put(skb, len);
}
if (agg_bufs) {
skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
return NULL;
}
}
skb->protocol = eth_type_trans(skb, bp->dev);
if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
netdev_features_t features = skb->dev->features;
u16 vlan_proto = tpa_info->metadata >>
RX_CMP_FLAGS2_METADATA_TPID_SFT;
if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
vlan_proto == ETH_P_8021Q) ||
((features & NETIF_F_HW_VLAN_STAG_RX) &&
vlan_proto == ETH_P_8021AD)) {
__vlan_hwaccel_put_tag(skb, htons(vlan_proto),
tpa_info->metadata &
RX_CMP_FLAGS2_METADATA_VID_MASK);
}
}
skb_checksum_none_assert(skb);
if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_level =
(tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
}
if (TPA_END_GRO(tpa_end))
skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb);
return skb;
}
/* returns the following:
* 1 - 1 packet successfully received
* 0 - successful TPA_START, packet not completed yet
* -EBUSY - completion ring does not have all the agg buffers yet
* -ENOMEM - packet aborted due to out of memory
* -EIO - packet aborted due to hw error indicated in BD
*/
static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
bool *agg_event)
{
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
struct net_device *dev = bp->dev;
struct rx_cmp *rxcmp;
struct rx_cmp_ext *rxcmp1;
u32 tmp_raw_cons = *raw_cons;
u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data, agg_bufs, cmp_type;
dma_addr_t dma_addr;
struct sk_buff *skb;
int rc = 0;
rxcmp = (struct rx_cmp *)
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
cp_cons = RING_CMP(tmp_raw_cons);
rxcmp1 = (struct rx_cmp_ext *)
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
return -EBUSY;
cmp_type = RX_CMP_TYPE(rxcmp);
prod = rxr->rx_prod;
if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
(struct rx_tpa_start_cmp_ext *)rxcmp1);
goto next_rx_no_prod;
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
(struct rx_tpa_end_cmp *)rxcmp,
(struct rx_tpa_end_cmp_ext *)rxcmp1,
agg_event);
if (unlikely(IS_ERR(skb)))
return -EBUSY;
rc = -ENOMEM;
if (likely(skb)) {
skb_record_rx_queue(skb, bnapi->index);
skb_mark_napi_id(skb, &bnapi->napi);
if (bnxt_busy_polling(bnapi))
netif_receive_skb(skb);
else
napi_gro_receive(&bnapi->napi, skb);
rc = 1;
}
goto next_rx_no_prod;
}
cons = rxcmp->rx_cmp_opaque;
rx_buf = &rxr->rx_buf_ring[cons];
data = rx_buf->data;
prefetch(data);
agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
RX_CMP_AGG_BUFS_SHIFT;
if (agg_bufs) {
if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
return -EBUSY;
cp_cons = NEXT_CMP(cp_cons);
*agg_event = true;
}
rx_buf->data = NULL;
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
bnxt_reuse_rx_data(rxr, cons, data);
if (agg_bufs)
bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
rc = -EIO;
goto next_rx;
}
len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
dma_addr = dma_unmap_addr(rx_buf, mapping);
if (len <= bp->rx_copy_thresh) {
skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
rc = -ENOMEM;
goto next_rx;
}
} else {
skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
if (!skb) {
rc = -ENOMEM;
goto next_rx;
}
}
if (agg_bufs) {
skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
if (!skb) {
rc = -ENOMEM;
goto next_rx;
}
}
if (RX_CMP_HASH_VALID(rxcmp)) {
u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
enum pkt_hash_types type = PKT_HASH_TYPE_L4;
/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
if (hash_type != 1 && hash_type != 3)
type = PKT_HASH_TYPE_L3;
skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
}
skb->protocol = eth_type_trans(skb, dev);
if (rxcmp1->rx_cmp_flags2 &
cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
netdev_features_t features = skb->dev->features;
u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
vlan_proto == ETH_P_8021Q) ||
((features & NETIF_F_HW_VLAN_STAG_RX) &&
vlan_proto == ETH_P_8021AD))
__vlan_hwaccel_put_tag(skb, htons(vlan_proto),
meta_data &
RX_CMP_FLAGS2_METADATA_VID_MASK);
}
skb_checksum_none_assert(skb);
if (RX_CMP_L4_CS_OK(rxcmp1)) {
if (dev->features & NETIF_F_RXCSUM) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_level = RX_CMP_ENCAP(rxcmp1);
}
} else {
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
if (dev->features & NETIF_F_RXCSUM)
cpr->rx_l4_csum_errors++;
}
}
skb_record_rx_queue(skb, bnapi->index);
skb_mark_napi_id(skb, &bnapi->napi);
if (bnxt_busy_polling(bnapi))
netif_receive_skb(skb);
else
napi_gro_receive(&bnapi->napi, skb);
rc = 1;
next_rx:
rxr->rx_prod = NEXT_RX(prod);
next_rx_no_prod:
*raw_cons = tmp_raw_cons;
return rc;
}
static int bnxt_async_event_process(struct bnxt *bp,
struct hwrm_async_event_cmpl *cmpl)
{
u16 event_id = le16_to_cpu(cmpl->event_id);
/* TODO CHIMP_FW: Define event id's for link change, error etc */
switch (event_id) {
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
schedule_work(&bp->sp_task);
break;
default:
netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
event_id);
break;
}
return 0;
}
static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
{
u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
(struct hwrm_fwd_req_cmpl *)txcmp;
switch (cmpl_type) {
case CMPL_BASE_TYPE_HWRM_DONE:
seq_id = le16_to_cpu(h_cmpl->sequence_id);
if (seq_id == bp->hwrm_intr_seq_id)
bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
else
netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
break;
case CMPL_BASE_TYPE_HWRM_FWD_REQ:
vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
if ((vf_id < bp->pf.first_vf_id) ||
(vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
vf_id);
return -EINVAL;
}
set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
schedule_work(&bp->sp_task);
break;
case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
bnxt_async_event_process(bp,
(struct hwrm_async_event_cmpl *)txcmp);
default:
break;
}
return 0;
}
static irqreturn_t bnxt_msix(int irq, void *dev_instance)
{
struct bnxt_napi *bnapi = dev_instance;
struct bnxt *bp = bnapi->bp;
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
u32 cons = RING_CMP(cpr->cp_raw_cons);
prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
napi_schedule(&bnapi->napi);
return IRQ_HANDLED;
}
static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
{
u32 raw_cons = cpr->cp_raw_cons;
u16 cons = RING_CMP(raw_cons);
struct tx_cmp *txcmp;
txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
return TX_CMP_VALID(txcmp, raw_cons);
}
static irqreturn_t bnxt_inta(int irq, void *dev_instance)
{
struct bnxt_napi *bnapi = dev_instance;
struct bnxt *bp = bnapi->bp;
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
u32 cons = RING_CMP(cpr->cp_raw_cons);
u32 int_status;
prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
if (!bnxt_has_work(bp, cpr)) {
int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
/* return if erroneous interrupt */
if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
return IRQ_NONE;
}
/* disable ring IRQ */
BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
/* Return here if interrupt is shared and is disabled. */
if (unlikely(atomic_read(&bp->intr_sem) != 0))
return IRQ_HANDLED;
napi_schedule(&bnapi->napi);
return IRQ_HANDLED;
}
static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
{
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
u32 raw_cons = cpr->cp_raw_cons;
u32 cons;
int tx_pkts = 0;
int rx_pkts = 0;
bool rx_event = false;
bool agg_event = false;
struct tx_cmp *txcmp;
while (1) {
int rc;
cons = RING_CMP(raw_cons);
txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
if (!TX_CMP_VALID(txcmp, raw_cons))
break;
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
if (unlikely(tx_pkts > bp->tx_wake_thresh))
rx_pkts = budget;
} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
if (likely(rc >= 0))
rx_pkts += rc;
else if (rc == -EBUSY) /* partial completion */
break;
rx_event = true;
} else if (unlikely((TX_CMP_TYPE(txcmp) ==
CMPL_BASE_TYPE_HWRM_DONE) ||
(TX_CMP_TYPE(txcmp) ==
CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
(TX_CMP_TYPE(txcmp) ==
CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
bnxt_hwrm_handler(bp, txcmp);
}
raw_cons = NEXT_RAW_CMP(raw_cons);
if (rx_pkts == budget)
break;
}
cpr->cp_raw_cons = raw_cons;
/* ACK completion ring before freeing tx ring and producing new
* buffers in rx/agg rings to prevent overflowing the completion
* ring.
*/
BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
if (tx_pkts)
bnxt_tx_int(bp, bnapi, tx_pkts);
if (rx_event) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
if (agg_event) {
writel(DB_KEY_RX | rxr->rx_agg_prod,
rxr->rx_agg_doorbell);
writel(DB_KEY_RX | rxr->rx_agg_prod,
rxr->rx_agg_doorbell);
}
}
return rx_pkts;
}
static int bnxt_poll(struct napi_struct *napi, int budget)
{
struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
struct bnxt *bp = bnapi->bp;
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int work_done = 0;
if (!bnxt_lock_napi(bnapi))
return budget;
while (1) {
work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
if (work_done >= budget)
break;
if (!bnxt_has_work(bp, cpr)) {
napi_complete(napi);
BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
break;
}
}
mmiowb();
bnxt_unlock_napi(bnapi);
return work_done;
}
#ifdef CONFIG_NET_RX_BUSY_POLL
static int bnxt_busy_poll(struct napi_struct *napi)
{
struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
struct bnxt *bp = bnapi->bp;
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int rx_work, budget = 4;
if (atomic_read(&bp->intr_sem) != 0)
return LL_FLUSH_FAILED;
if (!bnxt_lock_poll(bnapi))
return LL_FLUSH_BUSY;
rx_work = bnxt_poll_work(bp, bnapi, budget);
BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
bnxt_unlock_poll(bnapi);
return rx_work;
}
#endif
static void bnxt_free_tx_skbs(struct bnxt *bp)
{
int i, max_idx;
struct pci_dev *pdev = bp->pdev;
if (!bp->tx_ring)
return;
max_idx = bp->tx_nr_pages * TX_DESC_CNT;
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
int j;
for (j = 0; j < max_idx;) {
struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
struct sk_buff *skb = tx_buf->skb;
int k, last;
if (!skb) {
j++;
continue;
}
tx_buf->skb = NULL;
if (tx_buf->is_push) {
dev_kfree_skb(skb);
j += 2;
continue;
}
dma_unmap_single(&pdev->dev,
dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
last = tx_buf->nr_frags;
j += 2;
for (k = 0; k < last; k++, j++) {
int ring_idx = j & bp->tx_ring_mask;
skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
tx_buf = &txr->tx_buf_ring[ring_idx];
dma_unmap_page(
&pdev->dev,
dma_unmap_addr(tx_buf, mapping),
skb_frag_size(frag), PCI_DMA_TODEVICE);
}
dev_kfree_skb(skb);
}
netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
}
}
static void bnxt_free_rx_skbs(struct bnxt *bp)
{
int i, max_idx, max_agg_idx;
struct pci_dev *pdev = bp->pdev;
if (!bp->rx_ring)
return;
max_idx = bp->rx_nr_pages * RX_DESC_CNT;
max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
int j;
if (rxr->rx_tpa) {
for (j = 0; j < MAX_TPA; j++) {
struct bnxt_tpa_info *tpa_info =
&rxr->rx_tpa[j];
u8 *data = tpa_info->data;
if (!data)
continue;
dma_unmap_single(
&pdev->dev,
dma_unmap_addr(tpa_info, mapping),
bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
tpa_info->data = NULL;
kfree(data);
}
}
for (j = 0; j < max_idx; j++) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
u8 *data = rx_buf->data;
if (!data)
continue;
dma_unmap_single(&pdev->dev,
dma_unmap_addr(rx_buf, mapping),
bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
rx_buf->data = NULL;
kfree(data);
}
for (j = 0; j < max_agg_idx; j++) {
struct bnxt_sw_rx_agg_bd *rx_agg_buf =
&rxr->rx_agg_ring[j];
struct page *page = rx_agg_buf->page;
if (!page)
continue;
dma_unmap_page(&pdev->dev,
dma_unmap_addr(rx_agg_buf, mapping),
PAGE_SIZE, PCI_DMA_FROMDEVICE);
rx_agg_buf->page = NULL;
__clear_bit(j, rxr->rx_agg_bmap);
__free_page(page);
}
}
}
static void bnxt_free_skbs(struct bnxt *bp)
{
bnxt_free_tx_skbs(bp);
bnxt_free_rx_skbs(bp);
}
static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
{
struct pci_dev *pdev = bp->pdev;
int i;
for (i = 0; i < ring->nr_pages; i++) {
if (!ring->pg_arr[i])
continue;
dma_free_coherent(&pdev->dev, ring->page_size,
ring->pg_arr[i], ring->dma_arr[i]);
ring->pg_arr[i] = NULL;
}
if (ring->pg_tbl) {
dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
ring->pg_tbl, ring->pg_tbl_map);
ring->pg_tbl = NULL;
}
if (ring->vmem_size && *ring->vmem) {
vfree(*ring->vmem);
*ring->vmem = NULL;
}
}
static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
{
int i;
struct pci_dev *pdev = bp->pdev;
if (ring->nr_pages > 1) {
ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
ring->nr_pages * 8,
&ring->pg_tbl_map,
GFP_KERNEL);
if (!ring->pg_tbl)
return -ENOMEM;
}
for (i = 0; i < ring->nr_pages; i++) {
ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
ring->page_size,
&ring->dma_arr[i],
GFP_KERNEL);
if (!ring->pg_arr[i])
return -ENOMEM;
if (ring->nr_pages > 1)
ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
}
if (ring->vmem_size) {
*ring->vmem = vzalloc(ring->vmem_size);
if (!(*ring->vmem))
return -ENOMEM;
}
return 0;
}
static void bnxt_free_rx_rings(struct bnxt *bp)
{
int i;
if (!bp->rx_ring)
return;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring;
kfree(rxr->rx_tpa);
rxr->rx_tpa = NULL;
kfree(rxr->rx_agg_bmap);
rxr->rx_agg_bmap = NULL;
ring = &rxr->rx_ring_struct;
bnxt_free_ring(bp, ring);
ring = &rxr->rx_agg_ring_struct;
bnxt_free_ring(bp, ring);
}
}
static int bnxt_alloc_rx_rings(struct bnxt *bp)
{
int i, rc, agg_rings = 0, tpa_rings = 0;
if (!bp->rx_ring)
return -ENOMEM;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
agg_rings = 1;
if (bp->flags & BNXT_FLAG_TPA)
tpa_rings = 1;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring;
ring = &rxr->rx_ring_struct;
rc = bnxt_alloc_ring(bp, ring);
if (rc)
return rc;
if (agg_rings) {
u16 mem_size;
ring = &rxr->rx_agg_ring_struct;
rc = bnxt_alloc_ring(bp, ring);
if (rc)
return rc;
rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
mem_size = rxr->rx_agg_bmap_size / 8;
rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
if (!rxr->rx_agg_bmap)
return -ENOMEM;
if (tpa_rings) {
rxr->rx_tpa = kcalloc(MAX_TPA,
sizeof(struct bnxt_tpa_info),
GFP_KERNEL);
if (!rxr->rx_tpa)
return -ENOMEM;
}
}
}
return 0;
}
static void bnxt_free_tx_rings(struct bnxt *bp)
{
int i;
struct pci_dev *pdev = bp->pdev;
if (!bp->tx_ring)
return;
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
struct bnxt_ring_struct *ring;
if (txr->tx_push) {
dma_free_coherent(&pdev->dev, bp->tx_push_size,
txr->tx_push, txr->tx_push_mapping);
txr->tx_push = NULL;
}
ring = &txr->tx_ring_struct;
bnxt_free_ring(bp, ring);
}
}
static int bnxt_alloc_tx_rings(struct bnxt *bp)
{
int i, j, rc;
struct pci_dev *pdev = bp->pdev;
bp->tx_push_size = 0;
if (bp->tx_push_thresh) {
int push_size;
push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
bp->tx_push_thresh);
if (push_size > 128) {
push_size = 0;
bp->tx_push_thresh = 0;
}
bp->tx_push_size = push_size;
}
for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
struct bnxt_ring_struct *ring;
ring = &txr->tx_ring_struct;
rc = bnxt_alloc_ring(bp, ring);
if (rc)
return rc;
if (bp->tx_push_size) {
struct tx_bd *txbd;
dma_addr_t mapping;
/* One pre-allocated DMA buffer to backup
* TX push operation
*/
txr->tx_push = dma_alloc_coherent(&pdev->dev,
bp->tx_push_size,
&txr->tx_push_mapping,
GFP_KERNEL);
if (!txr->tx_push)
return -ENOMEM;
txbd = &txr->tx_push->txbd1;
mapping = txr->tx_push_mapping +
sizeof(struct tx_push_bd);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
memset(txbd + 1, 0, sizeof(struct tx_bd_ext));
}
ring->queue_id = bp->q_info[j].queue_id;
if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
j++;
}
return 0;
}
static void bnxt_free_cp_rings(struct bnxt *bp)
{
int i;
if (!bp->bnapi)
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr;
struct bnxt_ring_struct *ring;
if (!bnapi)
continue;
cpr = &bnapi->cp_ring;
ring = &cpr->cp_ring_struct;
bnxt_free_ring(bp, ring);
}
}
static int bnxt_alloc_cp_rings(struct bnxt *bp)
{
int i, rc;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr;
struct bnxt_ring_struct *ring;
if (!bnapi)
continue;
cpr = &bnapi->cp_ring;
ring = &cpr->cp_ring_struct;
rc = bnxt_alloc_ring(bp, ring);
if (rc)
return rc;
}
return 0;
}
static void bnxt_init_ring_struct(struct bnxt *bp)
{
int i;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr;
struct bnxt_rx_ring_info *rxr;
struct bnxt_tx_ring_info *txr;
struct bnxt_ring_struct *ring;
if (!bnapi)
continue;
cpr = &bnapi->cp_ring;
ring = &cpr->cp_ring_struct;
ring->nr_pages = bp->cp_nr_pages;
ring->page_size = HW_CMPD_RING_SIZE;
ring->pg_arr = (void **)cpr->cp_desc_ring;
ring->dma_arr = cpr->cp_desc_mapping;
ring->vmem_size = 0;
rxr = bnapi->rx_ring;
if (!rxr)
goto skip_rx;
ring = &rxr->rx_ring_struct;
ring->nr_pages = bp->rx_nr_pages;
ring->page_size = HW_RXBD_RING_SIZE;
ring->pg_arr = (void **)rxr->rx_desc_ring;
ring->dma_arr = rxr->rx_desc_mapping;
ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
ring->vmem = (void **)&rxr->rx_buf_ring;
ring = &rxr->rx_agg_ring_struct;
ring->nr_pages = bp->rx_agg_nr_pages;
ring->page_size = HW_RXBD_RING_SIZE;
ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
ring->dma_arr = rxr->rx_agg_desc_mapping;
ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
ring->vmem = (void **)&rxr->rx_agg_ring;
skip_rx:
txr = bnapi->tx_ring;
if (!txr)
continue;
ring = &txr->tx_ring_struct;
ring->nr_pages = bp->tx_nr_pages;
ring->page_size = HW_RXBD_RING_SIZE;
ring->pg_arr = (void **)txr->tx_desc_ring;
ring->dma_arr = txr->tx_desc_mapping;
ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
ring->vmem = (void **)&txr->tx_buf_ring;
}
}
static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
{
int i;
u32 prod;
struct rx_bd **rx_buf_ring;
rx_buf_ring = (struct rx_bd **)ring->pg_arr;
for (i = 0, prod = 0; i < ring->nr_pages; i++) {
int j;
struct rx_bd *rxbd;
rxbd = rx_buf_ring[i];
if (!rxbd)
continue;
for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
rxbd->rx_bd_opaque = prod;
}
}
}
static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
{
struct net_device *dev = bp->dev;
struct bnxt_rx_ring_info *rxr;
struct bnxt_ring_struct *ring;
u32 prod, type;
int i;
type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
if (NET_IP_ALIGN == 2)
type |= RX_BD_FLAGS_SOP;
rxr = &bp->rx_ring[ring_nr];
ring = &rxr->rx_ring_struct;
bnxt_init_rxbd_pages(ring, type);
prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
ring_nr, i, bp->rx_ring_size);
break;
}
prod = NEXT_RX(prod);
}
rxr->rx_prod = prod;
ring->fw_ring_id = INVALID_HW_RING_ID;
ring = &rxr->rx_agg_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
return 0;
type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
bnxt_init_rxbd_pages(ring, type);
prod = rxr->rx_agg_prod;
for (i = 0; i < bp->rx_agg_ring_size; i++) {
if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
ring_nr, i, bp->rx_ring_size);
break;
}
prod = NEXT_RX_AGG(prod);
}
rxr->rx_agg_prod = prod;
if (bp->flags & BNXT_FLAG_TPA) {
if (rxr->rx_tpa) {
u8 *data;
dma_addr_t mapping;
for (i = 0; i < MAX_TPA; i++) {
data = __bnxt_alloc_rx_data(bp, &mapping,
GFP_KERNEL);
if (!data)
return -ENOMEM;
rxr->rx_tpa[i].data = data;
rxr->rx_tpa[i].mapping = mapping;
}
} else {
netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
return -ENOMEM;
}
}
return 0;
}
static int bnxt_init_rx_rings(struct bnxt *bp)
{
int i, rc = 0;
for (i = 0; i < bp->rx_nr_rings; i++) {
rc = bnxt_init_one_rx_ring(bp, i);
if (rc)
break;
}
return rc;
}
static int bnxt_init_tx_rings(struct bnxt *bp)
{
u16 i;
bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
MAX_SKB_FRAGS + 1);
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
}
return 0;
}
static void bnxt_free_ring_grps(struct bnxt *bp)
{
kfree(bp->grp_info);
bp->grp_info = NULL;
}
static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
{
int i;
if (irq_re_init) {
bp->grp_info = kcalloc(bp->cp_nr_rings,
sizeof(struct bnxt_ring_grp_info),
GFP_KERNEL);
if (!bp->grp_info)
return -ENOMEM;
}
for (i = 0; i < bp->cp_nr_rings; i++) {
if (irq_re_init)
bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
}
return 0;
}
static void bnxt_free_vnics(struct bnxt *bp)
{
kfree(bp->vnic_info);
bp->vnic_info = NULL;
bp->nr_vnics = 0;
}
static int bnxt_alloc_vnics(struct bnxt *bp)
{
int num_vnics = 1;
#ifdef CONFIG_RFS_ACCEL
if (bp->flags & BNXT_FLAG_RFS)
num_vnics += bp->rx_nr_rings;
#endif
bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
GFP_KERNEL);
if (!bp->vnic_info)
return -ENOMEM;
bp->nr_vnics = num_vnics;
return 0;
}
static void bnxt_init_vnics(struct bnxt *bp)
{
int i;
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
vnic->fw_vnic_id = INVALID_HW_RING_ID;
vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
if (bp->vnic_info[i].rss_hash_key) {
if (i == 0)
prandom_bytes(vnic->rss_hash_key,
HW_HASH_KEY_SIZE);
else
memcpy(vnic->rss_hash_key,
bp->vnic_info[0].rss_hash_key,
HW_HASH_KEY_SIZE);
}
}
}
static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
{
int pages;
pages = ring_size / desc_per_pg;
if (!pages)
return 1;
pages++;
while (pages & (pages - 1))
pages++;
return pages;
}
static void bnxt_set_tpa_flags(struct bnxt *bp)
{
bp->flags &= ~BNXT_FLAG_TPA;
if (bp->dev->features & NETIF_F_LRO)
bp->flags |= BNXT_FLAG_LRO;
if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0))
bp->flags |= BNXT_FLAG_GRO;
}
/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
* be set on entry.
*/
void bnxt_set_ring_params(struct bnxt *bp)
{
u32 ring_size, rx_size, rx_space;
u32 agg_factor = 0, agg_ring_size = 0;
/* 8 for CRC and VLAN */
rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
rx_space = rx_size + NET_SKB_PAD +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
ring_size = bp->rx_ring_size;
bp->rx_agg_ring_size = 0;
bp->rx_agg_nr_pages = 0;
if (bp->flags & BNXT_FLAG_TPA)
agg_factor = 4;
bp->flags &= ~BNXT_FLAG_JUMBO;
if (rx_space > PAGE_SIZE) {
u32 jumbo_factor;
bp->flags |= BNXT_FLAG_JUMBO;
jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
if (jumbo_factor > agg_factor)
agg_factor = jumbo_factor;
}
agg_ring_size = ring_size * agg_factor;
if (agg_ring_size) {
bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
RX_DESC_CNT);
if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
u32 tmp = agg_ring_size;
bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
tmp, agg_ring_size);
}
bp->rx_agg_ring_size = agg_ring_size;
bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
rx_space = rx_size + NET_SKB_PAD +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
bp->rx_buf_use_size = rx_size;
bp->rx_buf_size = rx_space;
bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
ring_size = bp->tx_ring_size;
bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
bp->cp_ring_size = ring_size;
bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
if (bp->cp_nr_pages > MAX_CP_PAGES) {
bp->cp_nr_pages = MAX_CP_PAGES;
bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
ring_size, bp->cp_ring_size);
}
bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
bp->cp_ring_mask = bp->cp_bit - 1;
}
static void bnxt_free_vnic_attributes(struct bnxt *bp)
{
int i;
struct bnxt_vnic_info *vnic;
struct pci_dev *pdev = bp->pdev;
if (!bp->vnic_info)
return;
for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
kfree(vnic->fw_grp_ids);
vnic->fw_grp_ids = NULL;
kfree(vnic->uc_list);
vnic->uc_list = NULL;
if (vnic->mc_list) {
dma_free_coherent(&pdev->dev, vnic->mc_list_size,
vnic->mc_list, vnic->mc_list_mapping);
vnic->mc_list = NULL;
}
if (vnic->rss_table) {
dma_free_coherent(&pdev->dev, PAGE_SIZE,
vnic->rss_table,
vnic->rss_table_dma_addr);
vnic->rss_table = NULL;
}
vnic->rss_hash_key = NULL;
vnic->flags = 0;
}
}
static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
{
int i, rc = 0, size;
struct bnxt_vnic_info *vnic;
struct pci_dev *pdev = bp->pdev;
int max_rings;
for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
if (mem_size > 0) {
vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
if (!vnic->uc_list) {
rc = -ENOMEM;
goto out;
}
}
}
if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
vnic->mc_list =
dma_alloc_coherent(&pdev->dev,
vnic->mc_list_size,
&vnic->mc_list_mapping,
GFP_KERNEL);
if (!vnic->mc_list) {
rc = -ENOMEM;
goto out;
}
}
if (vnic->flags & BNXT_VNIC_RSS_FLAG)
max_rings = bp->rx_nr_rings;
else
max_rings = 1;
vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
if (!vnic->fw_grp_ids) {
rc = -ENOMEM;
goto out;
}
/* Allocate rss table and hash key */
vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
&vnic->rss_table_dma_addr,
GFP_KERNEL);
if (!vnic->rss_table) {
rc = -ENOMEM;
goto out;
}
size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
}
return 0;
out:
return rc;
}
static void bnxt_free_hwrm_resources(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
bp->hwrm_cmd_resp_dma_addr);
bp->hwrm_cmd_resp_addr = NULL;
if (bp->hwrm_dbg_resp_addr) {
dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
bp->hwrm_dbg_resp_addr,
bp->hwrm_dbg_resp_dma_addr);
bp->hwrm_dbg_resp_addr = NULL;
}
}
static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
&bp->hwrm_cmd_resp_dma_addr,
GFP_KERNEL);
if (!bp->hwrm_cmd_resp_addr)
return -ENOMEM;
bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
HWRM_DBG_REG_BUF_SIZE,
&bp->hwrm_dbg_resp_dma_addr,
GFP_KERNEL);
if (!bp->hwrm_dbg_resp_addr)
netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
return 0;
}
static void bnxt_free_stats(struct bnxt *bp)
{
u32 size, i;
struct pci_dev *pdev = bp->pdev;
if (!bp->bnapi)
return;
size = sizeof(struct ctx_hw_stats);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
if (cpr->hw_stats) {
dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
cpr->hw_stats_map);
cpr->hw_stats = NULL;
}
}
}
static int bnxt_alloc_stats(struct bnxt *bp)
{
u32 size, i;
struct pci_dev *pdev = bp->pdev;
size = sizeof(struct ctx_hw_stats);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
&cpr->hw_stats_map,
GFP_KERNEL);
if (!cpr->hw_stats)
return -ENOMEM;
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
}
return 0;
}
static void bnxt_clear_ring_indices(struct bnxt *bp)
{
int i;
if (!bp->bnapi)
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr;
struct bnxt_rx_ring_info *rxr;
struct bnxt_tx_ring_info *txr;
if (!bnapi)
continue;
cpr = &bnapi->cp_ring;
cpr->cp_raw_cons = 0;
txr = bnapi->tx_ring;
if (txr) {
txr->tx_prod = 0;
txr->tx_cons = 0;
}
rxr = bnapi->rx_ring;
if (rxr) {
rxr->rx_prod = 0;
rxr->rx_agg_prod = 0;
rxr->rx_sw_agg_prod = 0;
}
}
}
static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
{
#ifdef CONFIG_RFS_ACCEL
int i;
/* Under rtnl_lock and all our NAPIs have been disabled. It's
* safe to delete the hash table.
*/
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
struct hlist_head *head;
struct hlist_node *tmp;
struct bnxt_ntuple_filter *fltr;
head = &bp->ntp_fltr_hash_tbl[i];
hlist_for_each_entry_safe(fltr, tmp, head, hash) {
hlist_del(&fltr->hash);
kfree(fltr);
}
}
if (irq_reinit) {
kfree(bp->ntp_fltr_bmap);
bp->ntp_fltr_bmap = NULL;
}
bp->ntp_fltr_count = 0;
#endif
}
static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
{
#ifdef CONFIG_RFS_ACCEL
int i, rc = 0;
if (!(bp->flags & BNXT_FLAG_RFS))
return 0;
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
bp->ntp_fltr_count = 0;
bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
GFP_KERNEL);
if (!bp->ntp_fltr_bmap)
rc = -ENOMEM;
return rc;
#else
return 0;
#endif
}
static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
{
bnxt_free_vnic_attributes(bp);
bnxt_free_tx_rings(bp);
bnxt_free_rx_rings(bp);
bnxt_free_cp_rings(bp);
bnxt_free_ntp_fltrs(bp, irq_re_init);
if (irq_re_init) {
bnxt_free_stats(bp);
bnxt_free_ring_grps(bp);
bnxt_free_vnics(bp);
kfree(bp->tx_ring);
bp->tx_ring = NULL;
kfree(bp->rx_ring);
bp->rx_ring = NULL;
kfree(bp->bnapi);
bp->bnapi = NULL;
} else {
bnxt_clear_ring_indices(bp);
}
}
static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
{
int i, j, rc, size, arr_size;
void *bnapi;
if (irq_re_init) {
/* Allocate bnapi mem pointer array and mem block for
* all queues
*/
arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
bp->cp_nr_rings);
size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
if (!bnapi)
return -ENOMEM;
bp->bnapi = bnapi;
bnapi += arr_size;
for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
bp->bnapi[i] = bnapi;
bp->bnapi[i]->index = i;
bp->bnapi[i]->bp = bp;
}
bp->rx_ring = kcalloc(bp->rx_nr_rings,
sizeof(struct bnxt_rx_ring_info),
GFP_KERNEL);
if (!bp->rx_ring)
return -ENOMEM;
for (i = 0; i < bp->rx_nr_rings; i++) {
bp->rx_ring[i].bnapi = bp->bnapi[i];
bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
}
bp->tx_ring = kcalloc(bp->tx_nr_rings,
sizeof(struct bnxt_tx_ring_info),
GFP_KERNEL);
if (!bp->tx_ring)
return -ENOMEM;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
j = 0;
else
j = bp->rx_nr_rings;
for (i = 0; i < bp->tx_nr_rings; i++, j++) {
bp->tx_ring[i].bnapi = bp->bnapi[j];
bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
}
rc = bnxt_alloc_stats(bp);
if (rc)
goto alloc_mem_err;
rc = bnxt_alloc_ntp_fltrs(bp);
if (rc)
goto alloc_mem_err;
rc = bnxt_alloc_vnics(bp);
if (rc)
goto alloc_mem_err;
}
bnxt_init_ring_struct(bp);
rc = bnxt_alloc_rx_rings(bp);
if (rc)
goto alloc_mem_err;
rc = bnxt_alloc_tx_rings(bp);
if (rc)
goto alloc_mem_err;
rc = bnxt_alloc_cp_rings(bp);
if (rc)
goto alloc_mem_err;
bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
BNXT_VNIC_UCAST_FLAG;
rc = bnxt_alloc_vnic_attributes(bp);
if (rc)
goto alloc_mem_err;
return 0;
alloc_mem_err:
bnxt_free_mem(bp, true);
return rc;
}
void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
u16 cmpl_ring, u16 target_id)
{
struct hwrm_cmd_req_hdr *req = request;
req->cmpl_ring_req_type =
cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT));
req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT);
req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
}
int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
{
int i, intr_process, rc;
struct hwrm_cmd_req_hdr *req = msg;
u32 *data = msg;
__le32 *resp_len, *valid;
u16 cp_ring_id, len = 0;
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++);
memset(resp, 0, PAGE_SIZE);
cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) &
HWRM_CMPL_RING_MASK) >>
HWRM_CMPL_RING_SFT;
intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
/* Write request msg to hwrm channel */
__iowrite32_copy(bp->bar0, data, msg_len / 4);
for (i = msg_len; i < HWRM_MAX_REQ_LEN; i += 4)
writel(0, bp->bar0 + i);
/* currently supports only one outstanding message */
if (intr_process)
bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) &
HWRM_SEQ_ID_MASK;
/* Ring channel doorbell */
writel(1, bp->bar0 + 0x100);
i = 0;
if (intr_process) {
/* Wait until hwrm response cmpl interrupt is processed */
while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
i++ < timeout) {
usleep_range(600, 800);
}
if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
req->cmpl_ring_req_type);
return -1;
}
} else {
/* Check if response len is updated */
resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
for (i = 0; i < timeout; i++) {
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
HWRM_RESP_LEN_SFT;
if (len)
break;
usleep_range(600, 800);
}
if (i >= timeout) {
netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
timeout, req->cmpl_ring_req_type,
req->target_id_seq_id, *resp_len);
return -1;
}
/* Last word of resp contains valid bit */
valid = bp->hwrm_cmd_resp_addr + len - 4;
for (i = 0; i < timeout; i++) {
if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
break;
usleep_range(600, 800);
}
if (i >= timeout) {
netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
timeout, req->cmpl_ring_req_type,
req->target_id_seq_id, len, *valid);
return -1;
}
}
rc = le16_to_cpu(resp->error_code);
if (rc) {
netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
le16_to_cpu(resp->req_type),
le16_to_cpu(resp->seq_id), rc);
return rc;
}
return 0;
}
int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
{
int rc;
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, msg, msg_len, timeout);
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
static