blob: 29499e2ee4cb6cbc5e1822540a5476992f57f3e2 [file] [log] [blame]
/* $Id: tg3.c,v 1.43 2001-10-11 00:55:40 jgarzik Exp $
* tg3.c: Broadcom Tigon3 ethernet driver.
*
* Copyright (C) 2001 David S. Miller (davem@redhat.com)
* Copyright (C) 2001 Jeff Garzik (jgarzik@mandrakesoft.com)
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
#include "tg3.h"
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "0.01"
#define DRV_MODULE_RELDATE "Oct 1, 2001"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
#define TG3_DEF_TX_MODE 0
#define TG3_DEF_MSG_ENABLE \
(NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
NETIF_MSG_LINK | \
NETIF_MSG_TIMER | \
NETIF_MSG_IFDOWN | \
NETIF_MSG_IFUP | \
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR | \
NETIF_MSG_TX_QUEUED | \
NETIF_MSG_INTR | \
NETIF_MSG_TX_DONE | \
NETIF_MSG_RX_STATUS | \
NETIF_MSG_PKTDATA)
#define TG3_RING_SIZE 512
#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * TG3_RING_SIZE)
#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * TG3_RING_SIZE)
#define TX_BUFFS_AVAIL(TP) \
(((TP)->tx_cons <= (TP)->tx_prod) ? \
(TP)->tx_cons + (TG3_RING_SIZE - 1) - (TP)->tx_prod : \
(TP)->tx_cons - (TP)->tx_prod - 1)
#define NEXT_TX(N) (((N) + 1) & (TG3_RING_SIZE - 1))
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@mandrakesoft.com)");
MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
MODULE_PARM(tg3_debug, "i");
#ifdef TG3_DEBUG
static int tg3_debug = TG3_DEBUG;
#else
static int tg3_debug = 0;
#endif
static struct pci_device_id tg3_pci_tbl[] __devinitdata = {
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
static void tg3_write_indirect_reg32(struct tg3 *tp, unsigned long off, unsigned long val)
{
if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
unsigned long flags;
spin_lock_irqsave(&tp->indirect_lock, flags);
pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
spin_unlock_irqrestore(&tp->indirect_lock, flags);
} else {
writel(val, tp->regs + off);
}
}
#define tw32(reg,val) tg3_write_indirect_reg32(tp,(reg),(val))
#define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tp->regs + (reg))
#define tw16(reg,val) writew(((val) & 0xffff), tp->regs + (reg))
#define tw8(reg,val) writeb(((val) & 0xff), tp->regs + (reg))
#define tr32(reg) readl(tp->regs + (reg))
#define tr16(reg) readw(tp->regs + (reg))
#define tr8(reg) readb(tp->regs + (reg))
#define PHY_BUSY_LOOPS 5000
static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
{
u32 frame_val;
int loops, ret;
if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
tw32(MAC_MI_MODE,
(tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
udelay(40);
}
*val = 0xffffffff;
frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
MI_COM_PHY_ADDR_MASK);
frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
MI_COM_REG_ADDR_MASK);
frame_val |= (MI_COM_CMD_READ | MI_COM_START);
tw32(MAC_MI_COM, frame_val);
loops = PHY_BUSY_LOOPS;
while (loops-- > 0) {
frame_val = tr32(MAC_MI_COM);
if ((frame_val & MI_COM_BUSY) == 0)
break;
udelay(10);
}
ret = -EBUSY;
if (loops > 0) {
*val = frame_val & MI_COM_DATA_MASK;
ret = 0;
}
if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
tw32(MAC_MI_MODE, tp->mi_mode);
udelay(40);
}
return ret;
}
static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
{
u32 frame_val;
int loops, ret;
if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
tw32(MAC_MI_MODE,
(tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
udelay(40);
}
frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
MI_COM_PHY_ADDR_MASK);
frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
MI_COM_REG_ADDR_MASK);
frame_val |= (val & MI_COM_DATA_MASK);
frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
tw32(MAC_MI_COM, frame_val);
loops = PHY_BUSY_LOOPS;
while (loops-- > 0) {
frame_val = tr32(MAC_MI_COM);
if ((frame_val & MI_COM_BUSY) == 0)
break;
udelay(10);
}
ret = -EBUSY;
if (loops > 0)
ret = 0;
if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
tw32(MAC_MI_MODE, tp->mi_mode);
udelay(40);
}
return ret;
}
/* This will reset the tigon3 PHY if there is no valid
* link unless the FORCE argument is non-zero.
*/
static int tg3_phy_reset(struct tg3 *tp, int force)
{
u32 phy_status, phy_control;
int err, limit;
err = tg3_readphy(tp, MII_BMSR, &phy_status);
err |= tg3_readphy(tp, MII_BMSR, &phy_status);
if (err != 0)
return -EBUSY;
/* If we have link, and not forcing a reset, then nothing
* to do.
*/
if ((phy_status & BMSR_LSTATUS) != 0 && (force == 0))
return 0;
/* OK, reset it, and poll the BMCR_RESET bit until it
* clears or we time out.
*/
phy_control = BMCR_RESET;
err = tg3_writephy(tp, MII_BMCR, phy_control);
if (err != 0)
return -EBUSY;
limit = 5000;
while (limit--) {
err = tg3_readphy(tp, MII_BMCR, &phy_control);
if (err != 0)
return -EBUSY;
if ((phy_control & BMCR_RESET) == 0) {
udelay(40);
return 0;
}
udelay(10);
}
return -EBUSY;
}
static void tg3_stop_hw(struct tg3 *);
static void tg3_init_rings(struct tg3 *);
static void tg3_init_hw(struct tg3 *);
static int tg3_abnormal_irq(struct net_device *dev, struct tg3 *tp)
{
/* XXX Implement me XXX */
return 0;
}
static void tg3_tx(struct tg3 *tp)
{
int work = 100;
u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
u32 sw_idx = tp->tx_cons;
while (sw_idx != hw_idx) {
struct ring_info *ri = &tp->tx_buffers[sw_idx];
struct sk_buff *skb = ri->skb;
u32 len;
if (skb == NULL)
BUG();
if (ri->frag > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[ri->frag - 1];
len = frag->size;
} else {
len = skb->len;
}
pci_unmap_single(tp->pdev, ri->mapping, len, PCI_DMA_TODEVICE);
/* if no frags, or on last frag, free the skb */
if ((skb_shinfo(skb)->nr_frags == 0) ||
(skb_shinfo(skb)->nr_frags == ri->frag))
dev_kfree_skb_irq(skb);
memset(&ri, 0, sizeof(*ri));
rmb();
hw_idx = tp->hw_status->idx[0].tx_consumer;
sw_idx = NEXT_TX(sw_idx);
work--;
if (work <= 0) {
printk(KERN_WARNING "%s: tx work limit reached\n",
tp->dev->name);
break;
}
}
tp->tx_cons = sw_idx;
if (netif_queue_stopped(tp->dev) && TX_BUFFS_AVAIL(tp))
netif_wake_queue(tp->dev);
}
static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_buffer_desc *desc,
struct ring_info *map)
{
dma_addr_t mapping;
struct sk_buff *skb = dev_alloc_skb (PKT_BUF_SZ);
map->skb = skb;
if (skb == NULL)
return -ENOMEM;
skb->dev = tp->dev;
skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET));
mapping = pci_map_single (tp->pdev, skb->tail,
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
map->mapping = mapping;
skb_reserve (skb, RX_OFFSET);
desc->addr_hi = 0;
desc->addr_lo = mapping & 0xffffffff;
desc->len = PKT_BUF_SZ;
return 0;
}
static void tg3_rx(struct tg3 *tp)
{
unsigned long rx_std_ptr = tp->rx_std_ptr;
u16 hw_idx, sw_idx;
int work = 100;
hw_idx = tp->hw_status->idx[0].rx_producer;
sw_idx = tp->rx_std_ptr % TG3_RING_SIZE;
while (sw_idx != hw_idx) {
unsigned int len;
struct tg3_rx_buffer_desc *desc = &tp->rx_std[sw_idx];
struct sk_buff *skb = tp->rx_std_buffers[sw_idx].skb;
dma_addr_t dma_addr = tp->rx_std_buffers[sw_idx].mapping;
if (desc->err_flag &&
(desc->err_flag != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
/* XXX finish me XXX */
tp->net_stats.rx_errors++;
goto next_pkt;
}
len = desc->len - 4; /* omit crc */
if (len > RX_COPY_THRESHOLD) {
pci_unmap_single(tp->pdev, dma_addr,
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
/* Trim the original skb for the netif. */
skb_trim(skb, len);
} else {
struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
if (copy_skb == NULL) {
tp->net_stats.rx_dropped++;
goto next_pkt;
}
copy_skb->dev = tp->dev;
skb_reserve(copy_skb, RX_OFFSET);
skb_put(copy_skb, len);
pci_dma_sync_single(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
memcpy(copy_skb->data, skb->data, len);
/* We'll reuse the original ring buffer. */
skb = copy_skb;
}
#if 0
skb->csum = desc->tcp_udp_csum;
skb->ip_summed = CHECKSUM_HW;
#endif
skb->protocol = eth_type_trans(skb, tp->dev);
netif_rx(skb);
tp->dev->last_rx = jiffies;
next_pkt:
rx_std_ptr++;
hw_idx = tp->hw_status->idx[0].rx_producer;
sw_idx = rx_std_ptr % TG3_RING_SIZE;
work--;
if (work <= 0) {
printk(KERN_WARNING "%s: rx work limit reached\n", tp->dev->name);
break;
}
}
/* refill rx ring */
sw_idx = tp->rx_std_ptr % TG3_RING_SIZE;
while (tp->rx_std_ptr != rx_std_ptr) {
if (!tp->rx_std_buffers[sw_idx].skb &&
tg3_alloc_rx_skb(tp, &tp->rx_std[sw_idx],
&tp->rx_std_buffers[sw_idx]))
break;
tp->rx_std_ptr++;
sw_idx = tp->rx_std_ptr % TG3_RING_SIZE;
work--;
if (work <= 0) {
printk(KERN_WARNING "%s: rx work limit reached during refill\n", tp->dev->name);
break;
}
}
tw32(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
}
static void tg3_interrupt_main_work(struct tg3 *tp)
{
struct tg3_hw_status *sblk = tp->hw_status;
/* XXX Do link status stuff here XXX */
if (sblk->idx[0].rx_producer != tp->rx_std_ptr)
tg3_rx(tp);
if (sblk->idx[0].tx_consumer != tp->tx_cons)
tg3_tx(tp);
}
static void tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
struct net_device *dev = dev_id;
struct tg3 *tp = dev->priv;
struct tg3_hw_status *sblk = tp->hw_status;
spin_lock(&tp->lock);
while (sblk->status & SD_STATUS_UPDATED) {
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
0x00000001);
sblk->status &= ~SD_STATUS_UPDATED;
tg3_interrupt_main_work(tp);
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
0x00000000);
tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
}
spin_unlock(&tp->lock);
}
static void tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
{
struct net_device *dev = dev_id;
struct tg3 *tp = dev->priv;
struct tg3_hw_status *sblk = tp->hw_status;
spin_lock(&tp->lock);
if (sblk->status & SD_STATUS_UPDATED) {
u32 oldtag;
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
0x00000001);
oldtag = sblk->status;
while (1) {
u32 newtag;
sblk->status &= ~SD_STATUS_UPDATED;
barrier();
tg3_interrupt_main_work(tp);
newtag = sblk->status;
if (newtag == oldtag) {
tw32_mailbox(MAILBOX_INTERRUPT_0 +
TG3_64BIT_REG_LOW,
newtag << 24);
break;
}
oldtag = newtag;
}
}
spin_unlock(&tp->lock);
}
static void tg3_tx_timeout(struct net_device *dev)
{
struct tg3 *tp = dev->priv;
printk(KERN_ERR "%s: transmit timed out, resetting\n",
dev->name);
/* XXX More diagnostics XXX */
spin_lock_irq(&tp->lock);
tg3_stop_hw(tp);
tg3_init_rings(tp);
tg3_init_hw(tp);
spin_unlock_irq(&tp->lock);
netif_wake_queue(dev);
}
static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tg3 *tp = dev->priv;
u32 mapping, len, entry = tp->tx_prod;
struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
spin_lock_irq(&tp->lock);
/* if full to TG3_RING_SIZE, queue this skb,
* and stop queueing more, until interrupt handler
* reaps Tx descriptors
*/
if (TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
spin_unlock_irq(&tp->lock);
return 1;
}
/* queue skb data, a.k.a. the main skb fragment */
tp->tx_buffers[entry].skb = skb;
txd = &tp->tx_ring[entry];
len = skb->len;
tp->tx_buffers[entry].frag = 0;
tp->tx_buffers[entry].mapping =
mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
txd->addr_hi = 0; /* FIXME for PCI64 */
txd->addr_lo = cpu_to_le32(mapping);
txd->u1.len_flags = cpu_to_le32(TXD_FLAG_END | len);
txd->u2.vlan_tag = 0;
entry = NEXT_TX(entry);
/* now loop through additional data fragments, and queue them */
if (skb_shinfo(skb)->nr_frags > 0) {
unsigned i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tp->tx_buffers[entry].skb = skb;
tp->tx_buffers[entry].frag = i + 1;
txd = &tp->tx_ring[entry];
len = frag->size;
tp->tx_buffers[entry].mapping =
mapping = pci_map_single(tp->pdev,
(void*)page_address(frag->page) +
frag->page_offset,
len, PCI_DMA_TODEVICE);
txd->addr_hi = 0; /* FIXME for PCI64 */
txd->addr_lo = cpu_to_le32(mapping);
txd->u1.len_flags = cpu_to_le32(TXD_FLAG_END | len);
txd->u2.vlan_tag = 0;
entry = NEXT_TX(entry);
}
}
/* packets are ready, update Tx producer idx local and on card */
tw32(GRCMBOX_SNDHOST_PROD_IDX_0, entry);
tp->tx_prod = entry;
spin_unlock_irq(&tp->lock);
return 0;
}
#if 1
/* XXX Until jumbogram support is coded XXX */
#define MAX_MTU 1500
#else
#define MAX_MTU 9000
#endif
static int tg3_change_mtu(struct net_device *dev, int new_mtu)
{
struct tg3 *tp = dev->priv;
if (new_mtu < 0 || new_mtu > MAX_MTU)
return -EINVAL;
spin_lock_irq(&tp->lock);
tg3_stop_hw(tp);
dev->mtu = new_mtu;
tg3_init_rings(tp);
tg3_init_hw(tp);
spin_unlock_irq(&tp->lock);
return 0;
}
static struct block_reset_ent {
unsigned long reg_off;
u32 bits;
} common_block_reset_list[] = {
{ SNDDATAI_MODE, SNDDATAI_MODE_RESET },
{ SNDDATAC_MODE, SNDDATAC_MODE_RESET },
{ SNDBDS_MODE, SNDBDS_MODE_RESET },
{ SNDBDI_MODE, SNDBDI_MODE_RESET },
{ SNDBDC_MODE, SNDBDC_MODE_RESET },
{ RCVLPC_MODE, RCVLPC_MODE_RESET },
{ RCVDBDI_MODE, RCVDBDI_MODE_RESET },
{ RCVDCC_MODE, RCVDCC_MODE_RESET },
{ RCVBDI_MODE, RCVBDI_MODE_RESET },
{ RCVCC_MODE, RCVCC_MODE_RESET },
{ RCVLSC_MODE, RCVLSC_MODE_RESET },
{ MBFREE_MODE, MBFREE_MODE_RESET },
{ HOSTCC_MODE, HOSTCC_MODE_RESET },
{ MEMARB_MODE, MEMARB_MODE_RESET },
{ BUFMGR_MODE, BUFMGR_MODE_RESET },
{ RDMAC_MODE, RDMAC_MODE_RESET },
{ WDMAC_MODE, WDMAC_MODE_RESET },
};
static const unsigned int max_wait_cnt = 10000;
/* To reset a block, set only the reset bit and poll till the
* reset bit clears.
*/
static void tg3_reset_block(struct tg3 *tp, struct block_reset_ent *bp)
{
unsigned int i;
tw32(bp->reg_off, bp->bits);
for (i = 0; i < max_wait_cnt; i++) {
u32 val = tr32(bp->reg_off);
if ((val & bp->bits) == 0)
break;
udelay(100);
}
if (i == max_wait_cnt)
printk(KERN_ERR PFX "tg3_reset_block timed out, "
"ofs=%lx reset_bit=%x\n",
bp->reg_off, bp->bits);
}
/* To stop a block, clear the enable bit and poll till it
* clears.
*/
static void tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
{
unsigned int i;
u32 val;
val = tr32(ofs);
val &= ~enable_bit;
tw32(ofs, val);
for (i = 0; i < max_wait_cnt; i++) {
val = tr32(ofs);
if ((val & enable_bit) == 0)
break;
udelay(100);
}
if (i == max_wait_cnt)
printk(KERN_ERR PFX "tg3_stop_block timed out, "
"ofs=%lx enable_bit=%x\n",
ofs, enable_bit);
}
static void tg3_stop_intr(struct tg3 *tp)
{
/* Mask PCI interrupt. */
tw32(TG3PCI_MISC_HOST_CTRL,
tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
/* Mask mailbox interrupts. */
tw32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 1);
tw32(MAILBOX_INTERRUPT_1 + TG3_64BIT_REG_LOW, 1);
tw32(MAILBOX_INTERRUPT_2 + TG3_64BIT_REG_LOW, 1);
tw32(MAILBOX_INTERRUPT_3 + TG3_64BIT_REG_LOW, 1);
}
static void tg3_stop_rx(struct tg3 *tp)
{
tp->rx_mode &= ~RX_MODE_ENABLE;
tw32(MAC_RX_MODE, tp->rx_mode);
tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
tp->mac_mode &= ~MAC_MODE_RDE_ENABLE;
tw32(MAC_MODE, tp->mac_mode);
}
static void tg3_stop_tx(struct tg3 *tp)
{
tp->tx_mode &= ~TX_MODE_ENABLE;
tw32(MAC_TX_MODE, tp->tx_mode);
tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
tw32(MAC_MODE, tp->mac_mode);
}
static void tg3_stop_misc(struct tg3 *tp)
{
/* XXX finish me XXX */
tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
/* reset flow-through queues */
tw32(FTQ_RESET, 0xffffffff);
tw32(FTQ_RESET, 0x00000000);
}
static void tg3_stop_paranoid(struct tg3 *tp)
{
tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
}
static void tg3_stop_hw(struct tg3 *tp)
{
tg3_stop_intr(tp);
tg3_stop_rx(tp);
tg3_stop_tx(tp);
tg3_stop_misc(tp);
tg3_stop_paranoid(tp);
}
static void tg3_link_timer(unsigned long data)
{
struct tg3 *tp = (struct tg3 *) data;
int restart_timer = 0;
/* XXX Implement me XXX */
if (restart_timer) {
tp->link_timer.expires = jiffies + ((12 * HZ) / 10);
add_timer(&tp->link_timer);
}
}
/* XXX Finish implementing me XXX */
static void tg3_init_hw(struct tg3 *tp)
{
unsigned int i;
u32 tmp;
u16 pci_cmd;
u8 cacheline_sz;
pci_cmd = tr16(TG3PCI_COMMAND);
cacheline_sz = tr8(TG3PCI_CACHELINESZ);
/* "global reset" (core clock reset, grc register block) */
tw32(GRC_MISC_CFG, GRC_MISC_CFG_CORECLK_RESET);
mdelay((10*100)/1000);
tw16(TG3PCI_COMMAND, pci_cmd);
/* If relaxed ordering enabled, disable it. */
tmp = tr32(TG3PCI_X_CAPS);
if (tmp & PCIX_CAPS_RELAXED_ORDERING) {
tmp &= ~PCIX_CAPS_RELAXED_ORDERING;
tw32(TG3PCI_X_CAPS, tmp);
}
/* probably want other stuff here */
tw32(TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
/* handle reset bits for each register block */
for (i = 0; i < ARRAY_SIZE(common_block_reset_list); i++)
tg3_reset_block(tp, &common_block_reset_list[i]);
tw32(MAC_MODE, 0);
tw8(TG3PCI_CACHELINESZ, cacheline_sz);
/* Setup the timer prescalar register. */
/* Clock is alwasy 66Mhz. */
tw32(GRC_MISC_CFG,
(65 << GRC_MISC_CFG_PRESCALAR_SHIFT));
}
static void tg3_zero_rings(struct tg3 *tp)
{
memset(tp->rx_std_buffers, 0,
sizeof(struct ring_info) * (TG3_RING_SIZE * 2));
memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
} else {
long i;
for (i = 0; i < (TG3_TX_RING_BYTES / 4); i++) {
unsigned long addr;
addr = (tp->regs + NIC_SRAM_WIN_BASE +
NIC_SRAM_BUFFER_DESC);
addr += i;
writel(0, addr);
}
}
}
static void tg3_init_rings(struct tg3 *tp)
{
unsigned int i;
tg3_zero_rings(tp);
for (i = 0; i < TG3_RING_SIZE; i++) {
struct tg3_rx_buffer_desc *desc = &tp->rx_std[i];
desc->idx = i;
desc->flags = RXD_FLAG_END;
}
for (i = 0; i < TG3_RING_SIZE; i++)
if (tg3_alloc_rx_skb(tp, &tp->rx_std[i], &tp->rx_std_buffers[i]))
break;
if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
for (i = 0; i < TG3_RING_SIZE; i++) {
struct tg3_tx_buffer_desc *desc = &tp->tx_ring[i];
desc->u1.s1.flags = TXD_FLAG_END;
}
} else {
for (i = 0; i < TG3_RING_SIZE; i++) {
unsigned long addr = (tp->regs + NIC_SRAM_WIN_BASE +
NIC_SRAM_BUFFER_DESC);
addr += (i * TXD_SIZE);
writew(TXD_FLAG_END, addr + TXD_FLAGS);
}
}
}
static void tg3_clean_rings (struct tg3 *tp)
{
unsigned int i;
for (i = 0; i < TG3_RING_SIZE; i++) {
dma_addr_t mapping = tp->rx_std_buffers[i].mapping;
struct sk_buff *skb = tp->rx_std_buffers[i].skb;
if (!skb)
continue;
pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
tp->rx_std_buffers[i].skb = NULL;
tp->rx_std_buffers[i].mapping = 0xdeadf00d;
}
/* XXX clean Tx XXX */
tg3_zero_rings(tp);
}
static int tg3_open(struct net_device *dev)
{
struct tg3 *tp = dev->priv;
int err;
tp->hw_status->status |= SD_STATUS_UPDATED;
if (tp->tg3_flags & TG3_FLAG_TAGGED_IRQ_STATUS)
err = request_irq(dev->irq, tg3_interrupt_tagged,
SA_SHIRQ, dev->name, dev);
else
err = request_irq(dev->irq, tg3_interrupt,
SA_SHIRQ, dev->name, dev);
if (err)
return err;
tg3_stop_hw(tp);
tg3_init_rings(tp);
tg3_init_hw(tp);
netif_start_queue(dev);
return 0;
}
static int tg3_close(struct net_device *dev)
{
struct tg3 *tp = dev->priv;
netif_stop_queue(dev);
del_timer_sync(&tp->link_timer);
tg3_stop_hw(tp);
tg3_clean_rings(tp);
free_irq(dev->irq, dev);
return 0;
}
static struct net_device_stats *tg3_get_stats(struct net_device *dev)
{
struct tg3 *tp = dev->priv;
struct net_device_stats *stats = &tp->net_stats;
struct tg3_hw_stats *hw_stats = tp->hw_stats;
stats->rx_packets =
hw_stats->rx_ucast_packets +
hw_stats->rx_mcast_packets +
hw_stats->rx_bcast_packets;
stats->tx_packets =
hw_stats->COS_out_packets[0];
stats->rx_bytes = hw_stats->rx_octets;
stats->tx_bytes = hw_stats->tx_out_octets;
stats->rx_errors = hw_stats->rx_errors;
stats->tx_errors =
hw_stats->tx_errors +
hw_stats->tx_mac_errors +
hw_stats->tx_carrier_sense_errors +
hw_stats->tx_discards;
stats->multicast = hw_stats->rx_mcast_packets;
stats->collisions = hw_stats->tx_collisions;
stats->rx_length_errors =
hw_stats->rx_frame_too_long_errors +
hw_stats->rx_undersize_packets;
stats->rx_over_errors = hw_stats->rxbds_empty;
stats->rx_crc_errors = hw_stats->rx_fcs_errors;
stats->rx_frame_errors = hw_stats->rx_align_errors;
stats->tx_aborted_errors = hw_stats->tx_discards;
stats->tx_carrier_errors = hw_stats->tx_carrier_sense_errors;
return stats;
}
static inline u32 calc_crc(unsigned char *buf, int len)
{
u32 reg;
u32 tmp;
int j, k;
reg = 0xffffffff;
for (j = 0; j < len; j++) {
reg ^= buf[j];
for (k = 0; k < 8; k++) {
tmp = reg & 0x01;
reg >>= 1;
if (tmp) {
reg ^= 0xedb88320;
}
}
}
return ~reg;
}
static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
{
/* accept or reject all multicast frames */
tw32 (MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
tw32 (MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
tw32 (MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
tw32 (MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
}
static void tg3_set_rx_mode(struct net_device *dev)
{
struct tg3 *tp = dev->priv;
u32 rx_mode;
spin_lock_irq(&tp->lock);
rx_mode = tp->rx_mode & ~RX_MODE_PROMISC;
if (dev->flags & IFF_PROMISC) {
/* Promiscuous mode. */
rx_mode |= RX_MODE_PROMISC;
} else if (dev->flags & IFF_ALLMULTI) {
/* Accept all multicast. */
tg3_set_multi (tp, 1);
} else if (dev->mc_count < 1) {
/* Reject all multicast. */
tg3_set_multi (tp, 0);
} else {
/* Accept one or more multicast(s). */
struct dev_mc_list *mclist;
unsigned int i;
u32 mc_filter[4] = { 0, };
u32 regidx;
u32 bit;
u32 crc;
for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
i++, mclist = mclist->next) {
crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
bit = ~crc & 0x7f;
regidx = (bit & 0x60) >> 5;
bit &= 0x1f;
mc_filter[regidx] |= (1 << bit);
}
tw32 (MAC_HASH_REG_0, mc_filter[0]);
tw32 (MAC_HASH_REG_1, mc_filter[1]);
tw32 (MAC_HASH_REG_2, mc_filter[2]);
tw32 (MAC_HASH_REG_3, mc_filter[3]);
}
if (rx_mode != tp->rx_mode) {
tp->rx_mode = rx_mode;
tw32 (MAC_RX_MODE, rx_mode);
}
spin_unlock_irq(&tp->lock);
}
static int tg3_ethtool_ioctl (struct net_device *dev, void *useraddr)
{
struct tg3 *tp = dev->priv;
struct pci_dev *pci_dev = tp->pdev;
u32 ethcmd;
if (copy_from_user (&ethcmd, useraddr, sizeof (ethcmd)))
return -EFAULT;
switch (ethcmd) {
case ETHTOOL_GDRVINFO:{
struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
strcpy (info.driver, DRV_MODULE_NAME);
strcpy (info.version, DRV_MODULE_VERSION);
strcpy (info.bus_info, pci_dev->slot_name);
if (copy_to_user (useraddr, &info, sizeof (info)))
return -EFAULT;
return 0;
}
}
return -EOPNOTSUPP;
}
static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
switch(cmd) {
case SIOCETHTOOL:
return tg3_ethtool_ioctl(dev, (void *) ifr->ifr_data);
default:
/* do nothing */
break;
}
return -EOPNOTSUPP;
}
struct subsys_tbl_ent {
u16 subsys_vendor, subsys_devid;
u32 phy_id;
};
static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
/* Broadcom boards. */
{ 0x14e4, 0x1644, PHY_ID_BCM5401 }, { 0x14e4, 0x0001, PHY_ID_BCM5701 },
{ 0x14e4, 0x0002, PHY_ID_BCM8002 }, { 0x14e4, 0x0003, PHY_ID_SERDES },
{ 0x14e4, 0x0005, PHY_ID_BCM5701 }, { 0x14e4, 0x0006, PHY_ID_BCM5701 },
{ 0x14e4, 0x0007, PHY_ID_SERDES }, { 0x14e4, 0x0008, PHY_ID_BCM5701 },
{ 0x14e4, 0x8008, PHY_ID_BCM5701 }, { 0x14e4, 0x0009, PHY_ID_BCM5701 },
{ 0x14e4, 0x8009, PHY_ID_BCM5701 },
/* 3com boards. */
{ 0x10b7, 0x1000, PHY_ID_BCM5401 }, { 0x10b7, 0x1006, PHY_ID_BCM5701 },
{ 0x10b7, 0x1004, PHY_ID_SERDES }, { 0x10b7, 0x1007, PHY_ID_BCM5701 },
{ 0x10b7, 0x1008, PHY_ID_BCM5701 },
/* DELL boards. */
{ 0x1028, 0x00d1, PHY_ID_BCM5401 }, { 0x1028, 0x0106, PHY_ID_BCM5401 },
{ 0x1028, 0x0109, PHY_ID_BCM5411 }, { 0x1028, 0x010a, PHY_ID_BCM5411 },
/* Compaq boards. */
{ 0x0e11, 0x007c, PHY_ID_BCM5701 }, { 0x0e11, 0x007d, PHY_ID_SERDES },
{ 0x0e11, 0x0085, PHY_ID_BCM5701 }
};
#define SUBSYS_TBL_NENTS \
(sizeof(subsys_id_to_phy_id) / sizeof(struct subsys_tbl_ent))
static int __devinit tg3_phy_probe(struct tg3 *tp)
{
u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
enum phy_led_mode eeprom_led_mode;
int i, eeprom_signature_found, err;
tp->phy_id = PHY_ID_INVALID;
for (i = 0; i < SUBSYS_TBL_NENTS; i++) {
if ((subsys_id_to_phy_id[i].subsys_vendor ==
tp->pci_subsys_ven_id) &&
(subsys_id_to_phy_id[i].subsys_devid ==
tp->pci_subsys_id)) {
tp->phy_id = subsys_id_to_phy_id[i].phy_id;
break;
}
}
eeprom_phy_id = PHY_ID_INVALID;
eeprom_led_mode = led_mode_auto;
eeprom_signature_found = 0;
if (tr32(NIC_DATA_SIG) == NIC_DATA_SIG_MAGIC) {
u32 nic_cfg = tr32(NIC_DATA_CFG);
eeprom_signature_found = 1;
if ((nic_cfg & NIC_DATA_CFG_PHY_TYPE_MASK) ==
NIC_DATA_CFG_PHY_TYPE_FIBER) {
eeprom_phy_id = PHY_ID_SERDES;
} else {
u32 nic_phy_id = tr32(NIC_DATA_PHY_ID);
if (nic_phy_id != 0) {
u32 id1 = nic_phy_id & NIC_DATA_PHY_ID1_MASK;
u32 id2 = nic_phy_id & NIC_DATA_PHY_ID2_MASK;
eeprom_phy_id = (id1 >> 16) << 10;
eeprom_phy_id |= (id2 & 0xfc00) << 16;
eeprom_phy_id |= (id2 & 0x03ff) << 0;
}
}
switch (nic_cfg & NIC_DATA_CFG_LED_MODE_MASK) {
case NIC_DATA_CFG_LED_TRIPLE_SPD:
eeprom_led_mode = led_mode_three_link;
break;
case NIC_DATA_CFG_LED_LINK_SPD:
eeprom_led_mode = led_mode_link10;
break;
default:
eeprom_led_mode = led_mode_auto;
break;
};
}
err = tg3_phy_reset(tp, 0);
if (err)
return err;
/* Now read the physical PHY_ID from the chip and verify
* that it is sane. If it doesn't look good, we fall back
* to either the hard-coded table based PHY_ID and failing
* that the value found in the eeprom area.
*/
err = tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
if (!err) {
u32 hw_phy_id, hw_phy_id_masked;
hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
if (!KNOWN_PHY_ID(hw_phy_id_masked)) {
if (tp->phy_id != PHY_ID_INVALID) {
/* If the subsys ven/id table lookup found
* an entry, we will just use that.
*/
hw_phy_id = tp->phy_id;
} else if (eeprom_signature_found != 0) {
hw_phy_id = eeprom_phy_id;
}
hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
if (!KNOWN_PHY_ID(hw_phy_id_masked))
return -ENODEV;
tp->phy_id = hw_phy_id;
}
}
if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
tp->tg3_flags |= TG3_FLAG_PHY_RESET_ON_INIT;
if (tp->tg3_flags & TG3_FLAG_PHY_RESET_ON_INIT) {
err = tg3_phy_reset(tp, 1);
if (err)
return err;
/* These chips, when reset, only advertise 10Mb capabilities.
* Fix that.
*/
err = tg3_writephy(tp, MII_ADVERTISE,
(ADVERTISE_CSMA |
ADVERTISE_10HALF | ADVERTISE_10FULL |
ADVERTISE_100HALF | ADVERTISE_100FULL));
err |= tg3_writephy(tp, MII_TG3_CTRL,
(MII_TG3_CTRL_ADV_1000_HALF |
MII_TG3_CTRL_ADV_1000_FULL |
MII_TG3_CTRL_AS_MASTER |
MII_TG3_CTRL_ENABLE_AS_MASTER));
err |= tg3_writephy(tp, MII_BMCR,
(BMCR_ANRESTART | BMCR_ANENABLE));
}
if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
/* Turn off tap power management. */
err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c20);
err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
udelay(40);
}
return err;
}
static int __devinit tg3_read_partno(struct tg3 *tp)
{
unsigned char vpd_data[256];
int i;
/* Enable seeprom accesses. */
tw32(GRC_LOCAL_CTRL, GRC_LCLCTRL_AUTO_SEEPROM);
udelay(100);
for (i = 0; i < 256; i += 4) {
u32 tmp;
u16 stat;
int limit = 5000;
pci_write_config_word(tp->pdev, TG3PCI_VPD_ADDR_FLAG, i);
while (--limit) {
pci_read_config_word(tp->pdev, TG3PCI_VPD_ADDR_FLAG, &stat);
if ((stat & 0x8000) != 0)
break;
udelay(100);
}
if (!limit)
return -EBUSY;
pci_read_config_dword(tp->pdev, TG3PCI_VPD_DATA, &tmp);
vpd_data[i + 0] = ((tmp >> 0) & 0xff);
vpd_data[i + 1] = ((tmp >> 8) & 0xff);
vpd_data[i + 2] = ((tmp >> 16) & 0xff);
vpd_data[i + 3] = ((tmp >> 24) & 0xff);
}
/* Now parse and find the part number. */
for (i = 0; i < 256; ) {
unsigned char val = vpd_data[i];
int block_end;
if (val == 0x82 || val == 0x91) {
i = (i + 3 +
(vpd_data[i + 1] +
(vpd_data[i + 2] << 8)));
continue;
}
if (val != 0x90)
return -ENODEV;
block_end = (i + 3 +
(vpd_data[i + 1] +
(vpd_data[i + 2] << 8)));
i += 3;
while (i < block_end) {
if (vpd_data[i + 0] == 'P' &&
vpd_data[i + 1] == 'N') {
int partno_len = vpd_data[i + 2];
if (partno_len > 24)
return -EINVAL;
memcpy(tp->board_part_number,
&vpd_data[i + 3],
partno_len);
/* Success. */
return 0;
}
}
/* Part number not found. */
break;
}
return -ENODEV;
}
static int __devinit tg3_get_invariants(struct tg3 *tp)
{
u32 misc_ctrl_reg;
u32 subsys_vend_id_reg, cacheline_sz_reg;
u32 pci_state_reg, grc_misc_cfg;
int err;
pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
&misc_ctrl_reg);
tp->pci_chip_rev_id = (misc_ctrl_reg >>
MISC_HOST_CTRL_CHIPREV_SHIFT);
pci_read_config_dword(tp->pdev, TG3PCI_SUBSYSVENID,
&subsys_vend_id_reg);
tp->pci_subsys_ven_id = (subsys_vend_id_reg & 0xffff);
tp->pci_subsys_id = (subsys_vend_id_reg >> 16);
pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
&cacheline_sz_reg);
tp->pci_cacheline_sz = (cacheline_sz_reg >> 24) & 0xff;
tp->pci_lat_timer = (cacheline_sz_reg >> 16) & 0xff;
tp->pci_hdr_type = (cacheline_sz_reg >> 8) & 0xff;
tp->pci_bist = (cacheline_sz_reg >> 0) & 0xff;
pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
&pci_state_reg);
if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
/* If this is a 5700 BX chipset, and we are in PCI-X
* mode, enable register write workaround.
*
* The workaround is to use indirect register accesses
* for all chip writes not to mailbox registers.
*/
if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
u32 pm_reg;
u16 pci_cmd;
tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
/* The chip can have it's power management PCI config
* space registers clobbered due to this bug.
* So explicitly force the chip into D0 here.
*/
pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
&pm_reg);
pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
pm_reg);
/* Also, force SERR#/PERR# in PCI command. */
pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
}
}
if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
tp->tg3_flags |= TG3_FLAG_PCI_66MHZ;
if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
/* 5700 B0 chips do not support checksumming correctly due
* to hardware bugs.
*/
if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0) {
tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
} else {
/* Checksums work, tell the chip not to use pseudo
* header csums on transmit.
*/
tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
}
/* Only 5701 and later support tagged irq status mode. */
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
tp->tg3_flags |= TG3_FLAG_TAGGED_IRQ_STATUS;
misc_ctrl_reg |= MISC_HOST_CTRL_TAGGED_STATUS;
}
/* Initialize misc host control in PCI block. */
tp->misc_host_ctrl |= (misc_ctrl_reg &
MISC_HOST_CTRL_CHIPREV);
pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
tp->misc_host_ctrl);
/* Initialize MAC MI mode, polling disabled. */
tw32(MAC_MI_MODE, tp->mi_mode);
/* Initialize data/descriptor byte/word swapping. */
tw32(GRC_MODE, tp->grc_mode);
/* Quick sanity check. Make sure we see an expected
* value here.
*/
grc_misc_cfg = tr32(GRC_MISC_CFG);
grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
if (grc_misc_cfg != GRC_MISC_CFG_BOARD_ID_5700 &&
grc_misc_cfg != GRC_MISC_CFG_BOARD_ID_5701)
return -ENODEV;
err = tg3_phy_probe(tp);
if (!err)
err = tg3_read_partno(tp);
return err;
}
static void tg3_free_consistent(struct tg3 *tp)
{
if (tp->rx_std_buffers) {
kfree(tp->rx_std_buffers);
tp->rx_std_buffers = (void *) 0xbedac0ed;
}
if (tp->rx_std) {
pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
tp->rx_std, tp->rx_std_mapping);
tp->rx_std = (void *) 0xdccab;
}
if (tp->tx_ring) {
pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
tp->tx_ring, tp->tx_desc_mapping);
tp->tx_ring = (void *) 0xbadf00d;
}
if (tp->hw_status) {
pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_status),
tp->hw_status, tp->status_mapping);
tp->hw_status = (void *) 0xfef1f0fa;
}
if (tp->hw_stats) {
pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
tp->hw_stats, tp->stats_mapping);
tp->hw_stats = (void *) 0xbeef0cab;
}
}
static int __devinit tg3_alloc_consistent(struct tg3 *tp)
{
tp->rx_std_buffers = kmalloc(sizeof(struct ring_info) * (TG3_RING_SIZE * 2), GFP_KERNEL);
if (!tp->rx_std_buffers)
return -ENOMEM;
tp->tx_buffers = &tp->rx_std_buffers[TG3_RING_SIZE];
tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
&tp->rx_std_mapping);
if (!tp->rx_std)
goto err_out;
if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
&tp->tx_desc_mapping);
if (!tp->tx_ring)
goto err_out;
} else {
tp->tx_ring = NULL;
tp->tx_desc_mapping = 0;
}
tp->hw_status = pci_alloc_consistent(tp->pdev,
sizeof(struct tg3_hw_status),
&tp->status_mapping);
if (!tp->hw_status)
goto err_out;
tp->hw_stats = pci_alloc_consistent(tp->pdev,
sizeof(struct tg3_hw_stats),
&tp->stats_mapping);
if (!tp->hw_stats)
goto err_out;
tg3_zero_rings(tp);
memset(tp->hw_status, 0, sizeof(struct tg3_hw_status));
memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
return 0;
err_out:
tg3_free_consistent(tp);
return -ENOMEM;
}
static void __devinit tg3_get_device_address(struct tg3 *tp)
{
struct net_device *dev = tp->dev;
u32 hi, lo;
hi = tr32(MAC_ADDR_0_HIGH);
lo = tr32(MAC_ADDR_0_LOW);
dev->dev_addr[5] = lo & 0xff;
dev->dev_addr[4] = (lo >> 8) & 0xff;
dev->dev_addr[3] = (lo >> 16) & 0xff;
dev->dev_addr[2] = (lo >> 24) & 0xff;
dev->dev_addr[1] = hi & 0xff;
dev->dev_addr[0] = (hi >> 8) & 0xff;
}
static int __devinit tg3_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
static int tg3_version_printed = 0;
unsigned long tg3reg_base, tg3reg_len;
struct net_device *dev;
struct tg3 *tp;
int i, err;
if (tg3_version_printed++ == 0)
printk(KERN_INFO "%s", version);
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR PFX "Cannot enable PCI device, "
"aborting.\n");
return err;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
printk(KERN_ERR PFX "Cannot find proper PCI device "
"base address, aborting.\n");
err = -ENODEV;
goto err_out_disable_pdev;
}
err = pci_request_regions(pdev, "tg3");
if (err) {
printk(KERN_ERR PFX "Cannot obtain PCI resources, "
"aborting.\n");
goto err_out_disable_pdev;
}
pci_set_master(pdev);
tg3reg_base = pci_resource_start(pdev, 0);
tg3reg_len = pci_resource_len(pdev, 0);
dev = alloc_etherdev(sizeof(*tp));
if (!dev) {
printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
err = -ENOMEM;
goto err_out_free_res;
}
SET_MODULE_OWNER(dev);
tp = dev->priv;
tp->pdev = pdev;
tp->dev = dev;
tp->mac_mode = TG3_DEF_MAC_MODE;
tp->rx_mode = TG3_DEF_RX_MODE;
tp->tx_mode = TG3_DEF_TX_MODE;
tp->mi_mode = MAC_MI_MODE_BASE;
if (tg3_debug > 0)
tp->msg_enable = tg3_debug;
else
tp->msg_enable = TG3_DEF_MSG_ENABLE;
/* The word/byte swap controls here control register access byte
* swapping. DMA data byte swapping is controlled in the GRC_MODE
* setting below.
*/
tp->misc_host_ctrl =
MISC_HOST_CTRL_MASK_PCI_INT |
MISC_HOST_CTRL_WORD_SWAP |
MISC_HOST_CTRL_INDIR_ACCESS |
MISC_HOST_CTRL_PCISTATE_RW;
/* The NONFRM (non-frame) byte/word swap controls take effect
* on descriptor entries, anything which isn't packet data.
*
* The StrongARM chips on the board (one for tx, one for rx)
* are running in big-endian mode.
*/
tp->grc_mode = GRC_MODE_BSWAP_DATA;
#ifndef __BIG_ENDIAN
tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
#endif
spin_lock_init(&tp->lock);
spin_lock_init(&tp->indirect_lock);
/* For now use txds only in NIC sram memory.
* We may change this later. -DaveM
*/
#if 0
tp->tg3_flags |= TG3_FLAG_HOST_TXDS;
#endif
err = tg3_alloc_consistent(tp);
if (err) {
printk(KERN_ERR PFX "Descriptor alloc failed, aborting.\n");
goto err_out_free_dev;
}
init_timer(&tp->link_timer);
tp->link_timer.function = tg3_link_timer;
tp->link_timer.data = (unsigned long) tp;
dev->open = tg3_open;
dev->stop = tg3_close;
dev->hard_start_xmit = tg3_start_xmit;
dev->get_stats = tg3_get_stats;
dev->set_multicast_list = tg3_set_rx_mode;
dev->do_ioctl = tg3_ioctl;
dev->tx_timeout = tg3_tx_timeout;
dev->watchdog_timeo = 5 * HZ;
dev->change_mtu = tg3_change_mtu;
dev->irq = pdev->irq;
tp->regs = (unsigned long) ioremap(tg3reg_base, tg3reg_len);
if (tp->regs == 0UL) {
printk(KERN_ERR PFX "Cannot map device registers, "
"aborting.\n");
err = -ENOMEM;
goto err_out_free_cons;
}
err = tg3_get_invariants(tp);
if (err) {
printk(KERN_ERR PFX "Problem fetching invariants of chip, "
"aborting.\n");
goto err_out_iounmap;
}
tg3_get_device_address(tp);
#if 0
/* Tigon3 can do ipv4 only... and some chips have buggy
* checksumming.
*/
if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0)
dev->features |= NET_F_SG | NETIF_F_IP_CSUM;
#endif
err = register_netdev(dev);
if (err) {
printk(KERN_ERR PFX "Cannot register net device, "
"aborting.\n");
goto err_out_iounmap;
}
pci_set_drvdata(pdev, dev);
printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) 10/100/1000BaseT Ethernet ",
dev->name,
tp->board_part_number,
tp->pci_chip_rev_id,
((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5400) ? "5400" : "5401",
((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
((tp->tg3_flags & TG3_FLAG_PCI_66MHZ) ? "66MHz" : "33MHz"),
((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"));
for (i = 0; i < 6; i++)
printk("%2.2x%c", dev->dev_addr[i],
i == 5 ? '\n' : ':');
return 0;
err_out_iounmap:
iounmap((void *) tp->regs);
err_out_free_cons:
tg3_free_consistent(tp);
err_out_free_dev:
kfree(dev);
err_out_free_res:
pci_release_regions(pdev);
err_out_disable_pdev:
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
return err;
}
static void __devexit tg3_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (dev) {
struct tg3 *tp = dev->priv;
unregister_netdev(dev);
iounmap((void *) tp->regs);
tg3_free_consistent(tp);
kfree(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
}
static struct pci_driver tg3_driver = {
name: DRV_MODULE_NAME,
id_table: tg3_pci_tbl,
probe: tg3_init_one,
remove: tg3_remove_one,
/* XXX suspend/resume support XXX */
};
static int __init tg3_init(void)
{
return pci_module_init(&tg3_driver);
}
static void __exit tg3_cleanup(void)
{
pci_unregister_driver(&tg3_driver);
}
module_init(tg3_init);
module_exit(tg3_cleanup);