diff --git a/src/drivers/net/jme.c b/src/drivers/net/jme.c new file mode 100644 index 00000000..31208c2f --- /dev/null +++ b/src/drivers/net/jme.c @@ -0,0 +1,1303 @@ +/* + * JMicron JMC2x0 series PCIe Ethernet gPXE Device Driver + * + * Copyright 2010 Guo-Fu Tseng + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ +FILE_LICENCE ( GPL2_OR_LATER ); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "jme.h" + +static int +jme_mdio_read(struct net_device *netdev, int phy, int reg) +{ + struct jme_adapter *jme = netdev->priv; + int i, val, again = (reg == MII_BMSR) ? 1 : 0; + +read_again: + jwrite32(jme, JME_SMI, SMI_OP_REQ | + smi_phy_addr(phy) | + smi_reg_addr(reg)); + + for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { + udelay(20); + val = jread32(jme, JME_SMI); + if ((val & SMI_OP_REQ) == 0) + break; + } + + if (i == 0) { + DBG("phy(%d) read timeout : %d\n", phy, reg); + return 0; + } + + if (again--) + goto read_again; + + return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT; +} + +static void +jme_mdio_write(struct net_device *netdev, + int phy, int reg, int val) +{ + struct jme_adapter *jme = netdev->priv; + int i; + + jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ | + ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) | + smi_phy_addr(phy) | smi_reg_addr(reg)); + + wmb(); + for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) { + udelay(20); + if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0) + break; + } + + if (i == 0) + DBG("phy(%d) write timeout : %d\n", phy, reg); + + return; +} + +static void +jme_reset_phy_processor(struct jme_adapter *jme) +{ + u32 val; + + jme_mdio_write(jme->mii_if.dev, + jme->mii_if.phy_id, + MII_ADVERTISE, ADVERTISE_ALL | + ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + + if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) + jme_mdio_write(jme->mii_if.dev, + jme->mii_if.phy_id, + MII_CTRL1000, + ADVERTISE_1000FULL | ADVERTISE_1000HALF); + + val = jme_mdio_read(jme->mii_if.dev, + jme->mii_if.phy_id, + MII_BMCR); + + jme_mdio_write(jme->mii_if.dev, + jme->mii_if.phy_id, + MII_BMCR, val | BMCR_RESET); + + return; +} + +static void +jme_phy_init(struct jme_adapter *jme) +{ + u16 reg26; + + reg26 = jme_mdio_read(jme->mii_if.dev, jme->mii_if.phy_id, 26); + jme_mdio_write(jme->mii_if.dev, jme->mii_if.phy_id, 26, reg26 | 0x1000); +} + +static void +jme_set_phyfifoa(struct jme_adapter *jme) +{ + jme_mdio_write(jme->mii_if.dev, jme->mii_if.phy_id, 27, 0x0004); +} + +static void +jme_set_phyfifob(struct jme_adapter *jme) +{ + jme_mdio_write(jme->mii_if.dev, jme->mii_if.phy_id, 27, 0x0000); +} + +static void +jme_phy_off(struct jme_adapter *jme) +{ + jme_mdio_write(jme->mii_if.dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN); +} + +static void +jme_restart_an(struct jme_adapter *jme) +{ + uint32_t bmcr; + + bmcr = jme_mdio_read(jme->mii_if.dev, jme->mii_if.phy_id, MII_BMCR); + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + jme_mdio_write(jme->mii_if.dev, jme->mii_if.phy_id, MII_BMCR, bmcr); +} + +static void +jme_reset_ghc_speed(struct jme_adapter *jme) +{ + jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX); + jwrite32(jme, JME_GHC, jme->reg_ghc); +} + +static void +jme_start_irq(struct jme_adapter *jme) +{ + /* + * Enable Interrupts + */ + jwrite32(jme, JME_IENS, INTR_ENABLE); +} + +static void +jme_stop_irq(struct jme_adapter *jme) +{ + /* + * Disable Interrupts + */ + jwrite32f(jme, JME_IENC, INTR_ENABLE); +} + +static void +jme_setup_wakeup_frame(struct jme_adapter *jme, + u32 *mask, u32 crc, int fnr) +{ + int i; + + /* + * Setup CRC pattern + */ + jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL)); + wmb(); + jwrite32(jme, JME_WFODP, crc); + wmb(); + + /* + * Setup Mask + */ + for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) { + jwrite32(jme, JME_WFOI, + ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) | + (fnr & WFOI_FRAME_SEL)); + wmb(); + jwrite32(jme, JME_WFODP, mask[i]); + wmb(); + } +} + +static void +jme_reset_mac_processor(struct jme_adapter *jme) +{ + u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; + u32 crc = 0xCDCDCDCD; + int i; + + jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST); + udelay(2); + jwrite32(jme, JME_GHC, jme->reg_ghc); + + jwrite32(jme, JME_RXDBA_LO, 0x00000000); + jwrite32(jme, JME_RXDBA_HI, 0x00000000); + jwrite32(jme, JME_RXQDC, 0x00000000); + jwrite32(jme, JME_RXNDA, 0x00000000); + jwrite32(jme, JME_TXDBA_LO, 0x00000000); + jwrite32(jme, JME_TXDBA_HI, 0x00000000); + jwrite32(jme, JME_TXQDC, 0x00000000); + jwrite32(jme, JME_TXNDA, 0x00000000); + + jwrite32(jme, JME_RXMCHT_LO, 0x00000000); + jwrite32(jme, JME_RXMCHT_HI, 0x00000000); + for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i) + jme_setup_wakeup_frame(jme, mask, crc, i); + jwrite32(jme, JME_GPREG0, GPREG0_DEFAULT); + jwrite32(jme, JME_GPREG1, GPREG1_DEFAULT); +} + +static void +jme_free_tx_buffers(struct jme_adapter *jme) +{ + struct jme_ring *txring = &jme->txring; + struct io_buffer *txbi; + unsigned int i; + + for (i = 0; i < jme->tx_ring_size; ++i) { + txbi = txring->bufinf[i]; + if (txbi) { + netdev_tx_complete_err(jme->mii_if.dev, + txbi, -ENOLINK); + txring->bufinf[i] = NULL; + } + } +} + +static void +jme_free_tx_resources(struct jme_adapter *jme) +{ + struct jme_ring *txring = &jme->txring; + + if (txring->desc) { + if (txring->bufinf) { + memset(txring->bufinf, 0, + sizeof(struct io_buffer *) * jme->tx_ring_size); + free(txring->bufinf); + } + free_dma(txring->desc, jme->tx_ring_size * TX_DESC_SIZE); + txring->desc = NULL; + txring->dma = 0; + txring->bufinf = NULL; + } + txring->next_to_use = 0; + txring->next_to_clean = 0; + txring->nr_free = 0; +} + +static int +jme_alloc_tx_resources(struct jme_adapter *jme) +{ + struct jme_ring *txring = &jme->txring; + + txring->desc = malloc_dma(jme->tx_ring_size * TX_DESC_SIZE, + RING_DESC_ALIGN); + if (!txring->desc) { + DBG("Can not allocate transmit ring descriptors.\n"); + goto err_out; + } + + /* + * 16 Bytes align + */ + txring->dma = virt_to_bus(txring->desc); + txring->bufinf = malloc(sizeof(struct io_buffer *) * + jme->tx_ring_size); + if (!(txring->bufinf)) { + DBG("Can not allocate transmit buffer info.\n"); + goto err_out; + } + + /* + * Initialize Transmit Buffer Pointers + */ + memset(txring->bufinf, 0, + sizeof(struct io_buffer *) * jme->tx_ring_size); + + return 0; + +err_out: + jme_free_tx_resources(jme); + return -ENOMEM; +} + +static void +jme_init_tx_ring(struct jme_adapter *jme) +{ + struct jme_ring *txring = &jme->txring; + + txring->next_to_clean = 0; + txring->next_to_use = 0; + txring->nr_free = jme->tx_ring_size; + + /* + * Initialize Transmit Descriptors + */ + memset(txring->desc, 0, jme->tx_ring_size * TX_DESC_SIZE); + jme_free_tx_buffers(jme); +} + +static void +jme_enable_tx_engine(struct jme_adapter *jme) +{ + /* + * Select Queue 0 + */ + jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0); + wmb(); + + /* + * Setup TX Queue 0 DMA Bass Address + */ + jwrite32(jme, JME_TXDBA_LO, (uint64_t)jme->txring.dma & 0xFFFFFFFFUL); + jwrite32(jme, JME_TXDBA_HI, (uint64_t)(jme->txring.dma) >> 32); + jwrite32(jme, JME_TXNDA, (uint64_t)jme->txring.dma & 0xFFFFFFFFUL); + + /* + * Setup TX Descptor Count + */ + jwrite32(jme, JME_TXQDC, jme->tx_ring_size); + + /* + * Enable TX Engine + */ + wmb(); + jwrite32(jme, JME_TXCS, jme->reg_txcs | + TXCS_SELECT_QUEUE0 | + TXCS_ENABLE); + +} + +static void +jme_disable_tx_engine(struct jme_adapter *jme) +{ + int i; + u32 val; + + /* + * Disable TX Engine + */ + jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0); + wmb(); + + val = jread32(jme, JME_TXCS); + for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) { + mdelay(1); + val = jread32(jme, JME_TXCS); + rmb(); + } + + if (!i) + DBG("Disable TX engine timeout.\n"); +} + + +static void +jme_set_clean_rxdesc(struct jme_adapter *jme, int i) +{ + struct jme_ring *rxring = &jme->rxring; + register struct rxdesc *rxdesc = rxring->desc; + struct io_buffer *rxbi = rxring->bufinf[i]; + uint64_t mapping; + + rxdesc += i; + mapping = virt_to_bus(rxbi->data); + + rxdesc->dw[0] = 0; + rxdesc->dw[1] = 0; + rxdesc->desc1.bufaddrh = cpu_to_le32(mapping >> 32); + rxdesc->desc1.bufaddrl = cpu_to_le32(mapping & 0xFFFFFFFFUL); + rxdesc->desc1.datalen = cpu_to_le16(RX_ALLOC_LEN); + wmb(); + rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT; +} + +static int +jme_make_new_rx_buf(struct io_buffer **rxbip) +{ + struct io_buffer *inbuf; + + /* + * IOB_ALIGN == 2048 + */ + inbuf = alloc_iob(RX_ALLOC_LEN); + if (!inbuf) { + DBG("Allocate receive iob error.\n"); + return -ENOMEM; + } + *rxbip = inbuf; + + return 0; +} + +static void +jme_free_rx_buf(struct jme_adapter *jme, int i) +{ + struct jme_ring *rxring = &jme->rxring; + struct io_buffer *rxbi = rxring->bufinf[i]; + + if (rxbi) { + free_iob(rxbi); + rxring->bufinf[i] = NULL; + } +} + +static void +jme_free_rx_resources(struct jme_adapter *jme) +{ + unsigned int i; + struct jme_ring *rxring = &jme->rxring; + + if (rxring->desc) { + if (rxring->bufinf) { + for (i = 0 ; i < jme->rx_ring_size ; ++i) + jme_free_rx_buf(jme, i); + free(rxring->bufinf); + } + + free_dma(rxring->desc, jme->rx_ring_size * RX_DESC_SIZE); + rxring->desc = NULL; + rxring->dma = 0; + rxring->bufinf = NULL; + } + rxring->next_to_fill = 0; + rxring->next_to_clean = 0; +} + +static int +jme_alloc_rx_resources(struct jme_adapter *jme) +{ + unsigned int i; + struct jme_ring *rxring = &jme->rxring; + struct io_buffer **bufinf; + + rxring->desc = malloc_dma(jme->rx_ring_size * RX_DESC_SIZE, + RING_DESC_ALIGN); + if (!rxring->desc) { + DBG("Can not allocate receive ring descriptors.\n"); + goto err_out; + } + + /* + * 16 Bytes align + */ + rxring->dma = virt_to_bus(rxring->desc); + rxring->bufinf = malloc(sizeof(struct io_buffer *) * + jme->rx_ring_size); + if (!(rxring->bufinf)) { + DBG("Can not allocate receive buffer info.\n"); + goto err_out; + } + + /* + * Initiallize Receive Buffer Pointers + */ + bufinf = rxring->bufinf; + memset(bufinf, 0, sizeof(struct io_buffer *) * jme->rx_ring_size); + for (i = 0 ; i < jme->rx_ring_size ; ++i) { + if (jme_make_new_rx_buf(bufinf)) + goto err_out; + ++bufinf; + } + + return 0; + +err_out: + jme_free_rx_resources(jme); + return -ENOMEM; +} + +static void +jme_init_rx_ring(struct jme_adapter *jme) +{ + unsigned int i; + struct jme_ring *rxring = &jme->rxring; + + for (i = 0 ; i < jme->rx_ring_size ; ++i) + jme_set_clean_rxdesc(jme, i); + + rxring->next_to_fill = 0; + rxring->next_to_clean = 0; +} + +static void +jme_set_multi(struct jme_adapter *jme) +{ + /* + * Just receive all kind of packet for new. + */ + jme->reg_rxmcs |= RXMCS_ALLFRAME | RXMCS_BRDFRAME | RXMCS_UNIFRAME; + jwrite32(jme, JME_RXMCS, jme->reg_rxmcs); +} + +static void +jme_enable_rx_engine(struct jme_adapter *jme) +{ + /* + * Select Queue 0 + */ + jwrite32(jme, JME_RXCS, jme->reg_rxcs | + RXCS_QUEUESEL_Q0); + wmb(); + + /* + * Setup RX DMA Bass Address + */ + jwrite32(jme, JME_RXDBA_LO, (uint64_t)(jme->rxring.dma) & 0xFFFFFFFFUL); + jwrite32(jme, JME_RXDBA_HI, (uint64_t)(jme->rxring.dma) >> 32); + jwrite32(jme, JME_RXNDA, (uint64_t)(jme->rxring.dma) & 0xFFFFFFFFUL); + + /* + * Setup RX Descriptor Count + */ + jwrite32(jme, JME_RXQDC, jme->rx_ring_size); + + /* + * Setup Unicast Filter + */ + jme_set_multi(jme); + + /* + * Enable RX Engine + */ + wmb(); + jwrite32(jme, JME_RXCS, jme->reg_rxcs | + RXCS_QUEUESEL_Q0 | + RXCS_ENABLE | + RXCS_QST); +} + +static void +jme_restart_rx_engine(struct jme_adapter *jme) +{ + /* + * Start RX Engine + */ + jwrite32(jme, JME_RXCS, jme->reg_rxcs | + RXCS_QUEUESEL_Q0 | + RXCS_ENABLE | + RXCS_QST); +} + +static void +jme_disable_rx_engine(struct jme_adapter *jme) +{ + int i; + u32 val; + + /* + * Disable RX Engine + */ + jwrite32(jme, JME_RXCS, jme->reg_rxcs); + wmb(); + + val = jread32(jme, JME_RXCS); + for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) { + mdelay(1); + val = jread32(jme, JME_RXCS); + rmb(); + } + + if (!i) + DBG("Disable RX engine timeout.\n"); + +} + +static void +jme_refill_rx_ring(struct jme_adapter *jme) +{ + struct jme_ring *rxring = &jme->rxring; + int i = rxring->next_to_fill; + struct io_buffer **bufinf = rxring->bufinf; + int mask = jme->rx_ring_mask; + + while (!bufinf[i]) { + if (jme_make_new_rx_buf(bufinf + i)) + break; + jme_set_clean_rxdesc(jme, i); + i = (i + 1) & mask; + } + rxring->next_to_fill = i; +} + +static void +jme_alloc_and_feed_iob(struct jme_adapter *jme, int idx) +{ + struct jme_ring *rxring = &jme->rxring; + struct rxdesc *rxdesc = rxring->desc; + struct io_buffer *rxbi = rxring->bufinf[idx]; + struct net_device *netdev = jme->mii_if.dev; + int framesize; + + rxdesc += idx; + + framesize = le16_to_cpu(rxdesc->descwb.framesize); + iob_put(rxbi, framesize); + netdev_rx(netdev, rxbi); + + rxring->bufinf[idx] = NULL; + jme_refill_rx_ring(jme); +} + +static void +jme_process_receive(struct jme_adapter *jme) +{ + struct jme_ring *rxring = &jme->rxring; + struct rxdesc *rxdesc = rxring->desc; + struct net_device *netdev = jme->mii_if.dev; + int i, j, ccnt, desccnt, mask = jme->rx_ring_mask; + unsigned int limit = jme->rx_ring_size; + + i = rxring->next_to_clean; + rxdesc += i; + while (!(rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) && + (rxdesc->descwb.desccnt & RXWBDCNT_WBCPL) && + limit--) { + + rmb(); + desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT; + DBG2("Cleaning rx desc=%d, cnt=%d\n", i, desccnt); + + if (desccnt > 1 || rxdesc->descwb.errstat & RXWBERR_ALLERR) { + for (j = i, ccnt = desccnt ; ccnt-- ; ) { + jme_set_clean_rxdesc(jme, j); + j = (j + 1) & (mask); + } + DBG("Dropped packet due to "); + if (desccnt > 1) + DBG("long packet.(%d descriptors)\n", desccnt); + else + DBG("Packet error.\n"); + netdev_rx_err(netdev, NULL, -EINVAL); + } else { + jme_alloc_and_feed_iob(jme, i); + } + + i = (i + desccnt) & (mask); + rxdesc = rxring->desc; + rxdesc += i; + } + rxring->next_to_clean = i; + + return; +} + +static void +jme_set_custom_macaddr(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev->priv; + uint8_t *addr = netdev->ll_addr; + u32 val; + + val = (addr[3] & 0xff) << 24 | + (addr[2] & 0xff) << 16 | + (addr[1] & 0xff) << 8 | + (addr[0] & 0xff); + jwrite32(jme, JME_RXUMA_LO, val); + val = (addr[5] & 0xff) << 8 | + (addr[4] & 0xff); + jwrite32(jme, JME_RXUMA_HI, val); +} + +/** + * Open NIC + * + * @v netdev Net device + * @ret rc Return status code + */ +static int +jme_open(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev->priv; + int rc; + + /* + * Allocate receive resources + */ + rc = jme_alloc_rx_resources(jme); + if (rc) { + DBG("Allocate receive resources error.\n"); + goto nomem_out; + } + + /* + * Allocate transmit resources + */ + rc = jme_alloc_tx_resources(jme); + if (rc) { + DBG("Allocate transmit resources error.\n"); + goto free_rx_resources_out; + } + + jme_set_custom_macaddr(netdev); + jme_reset_phy_processor(jme); + jme_restart_an(jme); + + return 0; + +free_rx_resources_out: + jme_free_rx_resources(jme); +nomem_out: + return rc; +} + +/** + * Close NIC + * + * @v netdev Net device + */ +static void +jme_close(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev->priv; + + jme_free_tx_resources(jme); + jme_free_rx_resources(jme); + jme_reset_mac_processor(jme); + jme->phylink = 0; + jme_phy_off(jme); + netdev_link_down(netdev); +} + +static int +jme_alloc_txdesc(struct jme_adapter *jme) +{ + struct jme_ring *txring = &jme->txring; + int idx; + + idx = txring->next_to_use; + if (txring->nr_free < 1) + return -1; + --(txring->nr_free); + txring->next_to_use = (txring->next_to_use + 1) & jme->tx_ring_mask; + + return idx; +} + +static void +jme_fill_tx_desc(struct jme_adapter *jme, struct io_buffer *iob, int idx) +{ + struct jme_ring *txring = &jme->txring; + struct txdesc *txdesc = txring->desc; + uint16_t len = iob_len(iob); + unsigned long int mapping; + + txdesc += idx; + mapping = virt_to_bus(iob->data); + DBG2("TX buffer address: %p(%08lx+%x)\n", + iob->data, mapping, len); + txdesc->dw[0] = 0; + txdesc->dw[1] = 0; + txdesc->dw[2] = 0; + txdesc->dw[3] = 0; + txdesc->desc1.datalen = cpu_to_le16(len); + txdesc->desc1.pktsize = cpu_to_le16(len); + txdesc->desc1.bufaddr = cpu_to_le32(mapping); + /* + * Set OWN bit at final. + * When kernel transmit faster than NIC. + * And NIC trying to send this descriptor before we tell + * it to start sending this TX queue. + * Other fields are already filled correctly. + */ + wmb(); + txdesc->desc1.flags = TXFLAG_OWN | TXFLAG_INT; + /* + * Set tx buffer info after telling NIC to send + * For better tx_clean timing + */ + wmb(); + txring->bufinf[idx] = iob; +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int +jme_transmit(struct net_device *netdev, struct io_buffer *iobuf) +{ + struct jme_adapter *jme = netdev->priv; + int idx; + + idx = jme_alloc_txdesc(jme); + if (idx < 0) { + /* + * Pause transmit queue somehow if possible. + */ + DBG("TX ring full!\n"); + return -EOVERFLOW; + } + + jme_fill_tx_desc(jme, iobuf, idx); + + jwrite32(jme, JME_TXCS, jme->reg_txcs | + TXCS_SELECT_QUEUE0 | + TXCS_QUEUE0S | + TXCS_ENABLE); + DBG2("xmit: idx=%d\n", idx); + + return 0; +} + +static int +jme_check_link(struct net_device *netdev, int testonly) +{ + struct jme_adapter *jme = netdev->priv; + u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, gpreg1; + int rc = 0; + + phylink = jread32(jme, JME_PHY_LINK); + + if (phylink & PHY_LINK_UP) { + /* + * Keep polling for speed/duplex resolve complete + */ + while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) && + --cnt) { + + udelay(1); + phylink = jread32(jme, JME_PHY_LINK); + } + if (!cnt) + DBG("Waiting speed resolve timeout.\n"); + + if (jme->phylink == phylink) { + rc = 1; + goto out; + } + if (testonly) + goto out; + + jme->phylink = phylink; + + ghc = jme->reg_ghc & ~(GHC_SPEED | GHC_DPX | + GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE | + GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY); + switch (phylink & PHY_LINK_SPEED_MASK) { + case PHY_LINK_SPEED_10M: + ghc |= GHC_SPEED_10M | + GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE; + break; + case PHY_LINK_SPEED_100M: + ghc |= GHC_SPEED_100M | + GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE; + break; + case PHY_LINK_SPEED_1000M: + ghc |= GHC_SPEED_1000M | + GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY; + break; + default: + break; + } + + if (phylink & PHY_LINK_DUPLEX) { + jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT); + ghc |= GHC_DPX; + } else { + jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT | + TXMCS_BACKOFF | + TXMCS_CARRIERSENSE | + TXMCS_COLLISION); + jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN | + ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) | + TXTRHD_TXREN | + ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL)); + } + + gpreg1 = GPREG1_DEFAULT; + if (is_buggy250(jme->pdev->device, jme->chiprev)) { + if (!(phylink & PHY_LINK_DUPLEX)) + gpreg1 |= GPREG1_HALFMODEPATCH; + switch (phylink & PHY_LINK_SPEED_MASK) { + case PHY_LINK_SPEED_10M: + jme_set_phyfifoa(jme); + gpreg1 |= GPREG1_RSSPATCH; + break; + case PHY_LINK_SPEED_100M: + jme_set_phyfifob(jme); + gpreg1 |= GPREG1_RSSPATCH; + break; + case PHY_LINK_SPEED_1000M: + jme_set_phyfifoa(jme); + break; + default: + break; + } + } + + jwrite32(jme, JME_GPREG1, gpreg1); + jwrite32(jme, JME_GHC, ghc); + jme->reg_ghc = ghc; + + DBG("Link is up at %d Mbps, %s-Duplex, MDI%s.\n", + ((phylink & PHY_LINK_SPEED_MASK) + == PHY_LINK_SPEED_1000M) ? 1000 : + ((phylink & PHY_LINK_SPEED_MASK) + == PHY_LINK_SPEED_100M) ? 100 : 10, + (phylink & PHY_LINK_DUPLEX) ? "Full" : "Half", + (phylink & PHY_LINK_MDI_STAT) ? "-X" : ""); + netdev_link_up(netdev); + } else { + if (testonly) + goto out; + + DBG("Link is down.\n"); + jme->phylink = 0; + netdev_link_down(netdev); + } + +out: + return rc; +} + +static void +jme_link_change(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev->priv; + + /* + * Do nothing if the link status did not change. + */ + if (jme_check_link(netdev, 1)) + return; + + if (netdev_link_ok(netdev)) { + netdev_link_down(netdev); + jme_disable_rx_engine(jme); + jme_disable_tx_engine(jme); + jme_reset_ghc_speed(jme); + jme_reset_mac_processor(jme); + } + + jme_check_link(netdev, 0); + if (netdev_link_ok(netdev)) { + jme_init_rx_ring(jme); + jme_enable_rx_engine(jme); + jme_init_tx_ring(jme); + jme_enable_tx_engine(jme); + } + + return; +} + +static void +jme_tx_clean(struct jme_adapter *jme) +{ + struct jme_ring *txring = &jme->txring; + struct txdesc *txdesc = txring->desc; + struct io_buffer *txbi; + struct net_device *netdev = jme->mii_if.dev; + int i, cnt = 0, max, err, mask; + + max = jme->tx_ring_size - txring->nr_free; + mask = jme->tx_ring_mask; + + for (i = txring->next_to_clean ; cnt < max ; ++cnt) { + + txbi = txring->bufinf[i]; + + if (txbi && !(txdesc[i].descwb.flags & TXWBFLAG_OWN)) { + DBG2("TX clean address: %08lx(%08lx+%x)\n", + (unsigned long)txbi->data, + virt_to_bus(txbi->data), + iob_len(txbi)); + err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR; + if (err) + netdev_tx_complete_err(netdev, txbi, -EIO); + else + netdev_tx_complete(netdev, txbi); + txring->bufinf[i] = NULL; + } else { + break; + } + + i = (i + 1) & mask; + } + + DBG2("txclean: next %d\n", i); + txring->next_to_clean = i; + txring->nr_free += cnt; +} +/** + * Poll for received packets + * + * @v netdev Network device + */ +static void +jme_poll(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev->priv; + u32 intrstat; + + intrstat = jread32(jme, JME_IEVE); + + /* + * Check if any actions needs to perform. + */ + if ((intrstat & INTR_ENABLE) == 0) + return; + + /* + * Check if the device still exist + */ + if (intrstat == ~((typeof(intrstat))0)) + return; + + DBG2("intrstat 0x%08x\n", intrstat); + if (intrstat & (INTR_LINKCH | INTR_SWINTR)) { + DBG2("Link changed\n"); + jme_link_change(netdev); + + /* + * Clear all interrupt status + */ + jwrite32(jme, JME_IEVE, intrstat); + + /* + * Link change event is critical + * all other events are ignored + */ + return; + } + + /* + * Process transmission complete first to free more memory. + */ + if (intrstat & INTR_TX0) { + DBG2("Packet transmit complete\n"); + jme_tx_clean(jme); + jwrite32(jme, JME_IEVE, intrstat & INTR_TX0); + } + + if (intrstat & (INTR_RX0 | INTR_RX0EMP)) { + DBG2("Packet received\n"); + jme_process_receive(jme); + jwrite32(jme, JME_IEVE, + intrstat & (INTR_RX0 | INTR_RX0EMP)); + if (intrstat & INTR_RX0EMP) + jme_restart_rx_engine(jme); + } + + /* + * Clean all other interrupt status + */ + jwrite32(jme, JME_IEVE, + intrstat & ~(INTR_RX0 | INTR_RX0EMP | INTR_TX0)); +} + +/** + * Enable/disable interrupts + * + * @v netdev Network device + * @v enable Interrupts should be enabled + */ +static void +jme_irq(struct net_device *netdev, int enable) +{ + struct jme_adapter *jme = netdev->priv; + + DBG("jme interrupts %s\n", (enable ? "enabled" : "disabled")); + if (enable) + jme_start_irq(jme); + else + jme_stop_irq(jme); +} + +/** JME net device operations */ +static struct net_device_operations jme_operations = { + .open = jme_open, + .close = jme_close, + .transmit = jme_transmit, + .poll = jme_poll, + .irq = jme_irq, +}; + +static void +jme_check_hw_ver(struct jme_adapter *jme) +{ + u32 chipmode; + + chipmode = jread32(jme, JME_CHIPMODE); + + jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT; + jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT; +} + +static int +jme_reload_eeprom(struct jme_adapter *jme) +{ + u32 val; + int i; + + val = jread32(jme, JME_SMBCSR); + + if (val & SMBCSR_EEPROMD) { + val |= SMBCSR_CNACK; + jwrite32(jme, JME_SMBCSR, val); + val |= SMBCSR_RELOAD; + jwrite32(jme, JME_SMBCSR, val); + mdelay(12); + + for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) { + mdelay(1); + if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0) + break; + } + + if (i == 0) { + DBG("eeprom reload timeout\n"); + return -EIO; + } + } + + return 0; +} + +static void +jme_load_macaddr(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + unsigned char macaddr[6]; + u32 val; + + val = jread32(jme, JME_RXUMA_LO); + macaddr[0] = (val >> 0) & 0xFF; + macaddr[1] = (val >> 8) & 0xFF; + macaddr[2] = (val >> 16) & 0xFF; + macaddr[3] = (val >> 24) & 0xFF; + val = jread32(jme, JME_RXUMA_HI); + macaddr[4] = (val >> 0) & 0xFF; + macaddr[5] = (val >> 8) & 0xFF; + memcpy(netdev->hw_addr, macaddr, 6); +} + +/** + * Probe PCI device + * + * @v pci PCI device + * @v id PCI ID + * @ret rc Return status code + */ +static int +jme_probe(struct pci_device *pci, const struct pci_device_id *id __unused) +{ + struct net_device *netdev; + struct jme_adapter *jme; + int rc; + uint8_t mrrs; + + /* Allocate net device */ + netdev = alloc_etherdev(sizeof(*jme)); + if (!netdev) + return -ENOMEM; + netdev_init(netdev, &jme_operations); + jme = netdev->priv; + pci_set_drvdata(pci, netdev); + netdev->dev = &pci->dev; + jme->regs = ioremap(pci->membase, JME_REGS_SIZE); + if (!(jme->regs)) { + DBG("Mapping PCI resource region error.\n"); + rc = -ENOMEM; + goto err_out; + } + jme->reg_ghc = 0; + jme->reg_rxcs = RXCS_DEFAULT; + jme->reg_rxmcs = RXMCS_DEFAULT; + jme->phylink = 0; + jme->pdev = pci; + jme->mii_if.dev = netdev; + jme->mii_if.phy_id = 1; + jme->mii_if.mdio_read = jme_mdio_read; + jme->mii_if.mdio_write = jme_mdio_write; + jme->rx_ring_size = 1 << 4; + jme->rx_ring_mask = jme->rx_ring_size - 1; + jme->tx_ring_size = 1 << 4; + jme->tx_ring_mask = jme->tx_ring_size - 1; + + /* Fix up PCI device */ + adjust_pci_device(pci); + + /* + * Get Max Read Req Size from PCI Config Space + */ + pci_read_config_byte(pci, PCI_DCSR_MRRS, &mrrs); + mrrs &= PCI_DCSR_MRRS_MASK; + switch (mrrs) { + case MRRS_128B: + jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B; + break; + case MRRS_256B: + jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B; + break; + default: + jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B; + break; + }; + + /* + * Get basic hardware info. + */ + jme_check_hw_ver(jme); + if (pci->device == PCI_DEVICE_ID_JMICRON_JMC250) + jme->mii_if.supports_gmii = 1; + else + jme->mii_if.supports_gmii = 0; + + /* + * Initialize PHY + */ + jme_set_phyfifoa(jme); + jme_phy_init(jme); + + /* + * Bring down phy before interface is opened. + */ + jme_phy_off(jme); + netdev_link_down(netdev); + + /* + * Reset MAC processor and reload EEPROM for MAC Address + */ + jme_reset_mac_processor(jme); + rc = jme_reload_eeprom(jme); + if (rc) { + DBG("Reload eeprom for reading MAC Address error.\n"); + goto err_unmap; + } + jme_load_macaddr(netdev); + + /* Register network device */ + if ((rc = register_netdev(netdev)) != 0) { + DBG("Register net_device error.\n"); + goto err_unmap; + } + + return 0; + +err_unmap: + iounmap(jme->regs); +err_out: + netdev_nullify(netdev); + netdev_put(netdev); + return rc; +} + +/** + * Remove PCI device + * + * @v pci PCI device + */ +static void +jme_remove(struct pci_device *pci) +{ + struct net_device *netdev = pci_get_drvdata(pci); + struct jme_adapter *jme = netdev->priv; + + iounmap(jme->regs); + unregister_netdev(netdev); + netdev_nullify(netdev); + netdev_put(netdev); +} + +static struct pci_device_id jm_nics[] = { +PCI_ROM(0x197b, 0x0250, "jme", "JMicron Gigabit Ethernet", 0), +PCI_ROM(0x197b, 0x0260, "jmfe", "JMicron Fast Ethernet", 0), +}; + +struct pci_driver jme_driver __pci_driver = { + .ids = jm_nics, + .id_count = ( sizeof ( jm_nics ) / sizeof ( jm_nics[0] ) ), + .probe = jme_probe, + .remove = jme_remove, +}; + diff --git a/src/drivers/net/jme.h b/src/drivers/net/jme.h new file mode 100644 index 00000000..7e225430 --- /dev/null +++ b/src/drivers/net/jme.h @@ -0,0 +1,914 @@ +/* + * JMicron JMC2x0 series PCIe Ethernet gPXE Device Driver + * + * Copyright 2010 Guo-Fu Tseng + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ +FILE_LICENCE ( GPL2_OR_LATER ); + +#ifndef __JME_H_INCLUDED__ +#define __JME_H_INCLUDED__ + +#define PCI_VENDOR_ID_JMICRON 0x197b +#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250 +#define PCI_DEVICE_ID_JMICRON_JMC260 0x0260 + +/* + * Extra PCI Configuration space interface + */ +#define PCI_DCSR_MRRS 0x59 +#define PCI_DCSR_MRRS_MASK 0x70 + +enum pci_dcsr_mrrs_vals { + MRRS_128B = 0x00, + MRRS_256B = 0x10, + MRRS_512B = 0x20, + MRRS_1024B = 0x30, + MRRS_2048B = 0x40, + MRRS_4096B = 0x50, +}; + +/* + * TX/RX Descriptors + * + * TX/RX Ring DESC Count Must be multiple of 16 and <= 1024 + */ +#define RING_DESC_ALIGN 16 /* Descriptor alignment */ +#define TX_DESC_SIZE 16 + +struct txdesc { + union { + uint8_t all[16]; + uint32_t dw[4]; + struct { + /* DW0 */ + uint16_t vlan; + uint8_t rsv1; + uint8_t flags; + + /* DW1 */ + uint16_t datalen; + uint16_t mss; + + /* DW2 */ + uint16_t pktsize; + uint16_t rsv2; + + /* DW3 */ + uint32_t bufaddr; + } desc1; + struct { + /* DW0 */ + uint16_t rsv1; + uint8_t rsv2; + uint8_t flags; + + /* DW1 */ + uint16_t datalen; + uint16_t rsv3; + + /* DW2 */ + uint32_t bufaddrh; + + /* DW3 */ + uint32_t bufaddrl; + } desc2; + struct { + /* DW0 */ + uint8_t ehdrsz; + uint8_t rsv1; + uint8_t rsv2; + uint8_t flags; + + /* DW1 */ + uint16_t trycnt; + uint16_t segcnt; + + /* DW2 */ + uint16_t pktsz; + uint16_t rsv3; + + /* DW3 */ + uint32_t bufaddrl; + } descwb; + }; +}; + +enum jme_txdesc_flags_bits { + TXFLAG_OWN = 0x80, + TXFLAG_INT = 0x40, + TXFLAG_64BIT = 0x20, + TXFLAG_TCPCS = 0x10, + TXFLAG_UDPCS = 0x08, + TXFLAG_IPCS = 0x04, + TXFLAG_LSEN = 0x02, + TXFLAG_TAGON = 0x01, +}; + +#define TXDESC_MSS_SHIFT 2 +enum jme_txwbdesc_flags_bits { + TXWBFLAG_OWN = 0x80, + TXWBFLAG_INT = 0x40, + TXWBFLAG_TMOUT = 0x20, + TXWBFLAG_TRYOUT = 0x10, + TXWBFLAG_COL = 0x08, + + TXWBFLAG_ALLERR = TXWBFLAG_TMOUT | + TXWBFLAG_TRYOUT | + TXWBFLAG_COL, +}; + +#define RX_DESC_SIZE 16 +#define RX_BUF_DMA_ALIGN 8 +#define RX_PREPAD_SIZE 10 +#define ETH_CRC_LEN 2 +#define RX_VLANHDR_LEN 2 +#define RX_EXTRA_LEN (ETH_HLEN + \ + ETH_CRC_LEN + \ + RX_VLANHDR_LEN + \ + RX_BUF_DMA_ALIGN) +#define FIXED_MTU 1500 +#define RX_ALLOC_LEN (FIXED_MTU + RX_EXTRA_LEN) + +struct rxdesc { + union { + uint8_t all[16]; + uint32_t dw[4]; + struct { + /* DW0 */ + uint16_t rsv2; + uint8_t rsv1; + uint8_t flags; + + /* DW1 */ + uint16_t datalen; + uint16_t wbcpl; + + /* DW2 */ + uint32_t bufaddrh; + + /* DW3 */ + uint32_t bufaddrl; + } desc1; + struct { + /* DW0 */ + uint16_t vlan; + uint16_t flags; + + /* DW1 */ + uint16_t framesize; + uint8_t errstat; + uint8_t desccnt; + + /* DW2 */ + uint32_t rsshash; + + /* DW3 */ + uint8_t hashfun; + uint8_t hashtype; + uint16_t resrv; + } descwb; + }; +}; + +enum jme_rxdesc_flags_bits { + RXFLAG_OWN = 0x80, + RXFLAG_INT = 0x40, + RXFLAG_64BIT = 0x20, +}; + +enum jme_rxwbdesc_flags_bits { + RXWBFLAG_OWN = 0x8000, + RXWBFLAG_INT = 0x4000, + RXWBFLAG_MF = 0x2000, + RXWBFLAG_64BIT = 0x2000, + RXWBFLAG_TCPON = 0x1000, + RXWBFLAG_UDPON = 0x0800, + RXWBFLAG_IPCS = 0x0400, + RXWBFLAG_TCPCS = 0x0200, + RXWBFLAG_UDPCS = 0x0100, + RXWBFLAG_TAGON = 0x0080, + RXWBFLAG_IPV4 = 0x0040, + RXWBFLAG_IPV6 = 0x0020, + RXWBFLAG_PAUSE = 0x0010, + RXWBFLAG_MAGIC = 0x0008, + RXWBFLAG_WAKEUP = 0x0004, + RXWBFLAG_DEST = 0x0003, + RXWBFLAG_DEST_UNI = 0x0001, + RXWBFLAG_DEST_MUL = 0x0002, + RXWBFLAG_DEST_BRO = 0x0003, +}; + +enum jme_rxwbdesc_desccnt_mask { + RXWBDCNT_WBCPL = 0x80, + RXWBDCNT_DCNT = 0x7F, +}; + +enum jme_rxwbdesc_errstat_bits { + RXWBERR_LIMIT = 0x80, + RXWBERR_MIIER = 0x40, + RXWBERR_NIBON = 0x20, + RXWBERR_COLON = 0x10, + RXWBERR_ABORT = 0x08, + RXWBERR_SHORT = 0x04, + RXWBERR_OVERUN = 0x02, + RXWBERR_CRCERR = 0x01, + RXWBERR_ALLERR = 0xFF, +}; + +/* + * The structure holding buffer information and ring descriptors all together. + */ +struct jme_ring { + void *desc; /* pointer to ring memory */ + unsigned long dma; /* phys address for ring dma */ + + /* Buffer information corresponding to each descriptor */ + struct io_buffer **bufinf; + + int next_to_clean; + int next_to_fill; + int next_to_use; + int nr_free; +}; + +/* + * Jmac Adapter Private data + */ +struct jme_adapter { + void *regs; + struct mii_if_info mii_if; + struct pci_device *pdev; + unsigned int fpgaver; + unsigned int chiprev; + uint32_t reg_ghc; + uint32_t reg_txcs; + uint32_t reg_rxcs; + uint32_t reg_rxmcs; + uint32_t phylink; + struct jme_ring rxring; + uint32_t rx_ring_size; + uint32_t rx_ring_mask; + struct jme_ring txring; + uint32_t tx_ring_size; + uint32_t tx_ring_mask; +}; + +/* + * I/O Resters + */ +enum jme_iomap_regs_value { + JME_REGS_SIZE = 0x1000, +}; + +enum jme_iomap_offsets { + JME_MAC = 0x0000, + JME_PHY = 0x0400, + JME_MISC = 0x0800, + JME_RSS = 0x0C00, +}; + +enum jme_iomap_lens { + JME_MAC_LEN = 0x80, + JME_PHY_LEN = 0x58, + JME_MISC_LEN = 0x98, + JME_RSS_LEN = 0xFF, +}; + +enum jme_iomap_regs { + JME_TXCS = JME_MAC | 0x00, /* Transmit Control and Status */ + JME_TXDBA_LO = JME_MAC | 0x04, /* Transmit Queue Desc Base Addr */ + JME_TXDBA_HI = JME_MAC | 0x08, /* Transmit Queue Desc Base Addr */ + JME_TXQDC = JME_MAC | 0x0C, /* Transmit Queue Desc Count */ + JME_TXNDA = JME_MAC | 0x10, /* Transmit Queue Next Desc Addr */ + JME_TXMCS = JME_MAC | 0x14, /* Transmit MAC Control Status */ + JME_TXPFC = JME_MAC | 0x18, /* Transmit Pause Frame Control */ + JME_TXTRHD = JME_MAC | 0x1C, /* Transmit Timer/Retry@Half-Dup */ + + JME_RXCS = JME_MAC | 0x20, /* Receive Control and Status */ + JME_RXDBA_LO = JME_MAC | 0x24, /* Receive Queue Desc Base Addr */ + JME_RXDBA_HI = JME_MAC | 0x28, /* Receive Queue Desc Base Addr */ + JME_RXQDC = JME_MAC | 0x2C, /* Receive Queue Desc Count */ + JME_RXNDA = JME_MAC | 0x30, /* Receive Queue Next Desc Addr */ + JME_RXMCS = JME_MAC | 0x34, /* Receive MAC Control Status */ + JME_RXUMA_LO = JME_MAC | 0x38, /* Receive Unicast MAC Address */ + JME_RXUMA_HI = JME_MAC | 0x3C, /* Receive Unicast MAC Address */ + JME_RXMCHT_LO = JME_MAC | 0x40, /* Recv Multicast Addr HashTable */ + JME_RXMCHT_HI = JME_MAC | 0x44, /* Recv Multicast Addr HashTable */ + JME_WFODP = JME_MAC | 0x48, /* Wakeup Frame Output Data Port */ + JME_WFOI = JME_MAC | 0x4C, /* Wakeup Frame Output Interface */ + + JME_SMI = JME_MAC | 0x50, /* Station Management Interface */ + JME_GHC = JME_MAC | 0x54, /* Global Host Control */ + JME_PMCS = JME_MAC | 0x60, /* Power Management Control/Stat */ + + + JME_PHY_CS = JME_PHY | 0x28, /* PHY Ctrl and Status Register */ + JME_PHY_LINK = JME_PHY | 0x30, /* PHY Link Status Register */ + JME_SMBCSR = JME_PHY | 0x40, /* SMB Control and Status */ + JME_SMBINTF = JME_PHY | 0x44, /* SMB Interface */ + + + JME_TMCSR = JME_MISC | 0x00, /* Timer Control/Status Register */ + JME_GPREG0 = JME_MISC | 0x08, /* General purpose REG-0 */ + JME_GPREG1 = JME_MISC | 0x0C, /* General purpose REG-1 */ + JME_IEVE = JME_MISC | 0x20, /* Interrupt Event Status */ + JME_IREQ = JME_MISC | 0x24, /* Intr Req Status(For Debug) */ + JME_IENS = JME_MISC | 0x28, /* Intr Enable - Setting Port */ + JME_IENC = JME_MISC | 0x2C, /* Interrupt Enable - Clear Port */ + JME_PCCRX0 = JME_MISC | 0x30, /* PCC Control for RX Queue 0 */ + JME_PCCTX = JME_MISC | 0x40, /* PCC Control for TX Queues */ + JME_CHIPMODE = JME_MISC | 0x44, /* Identify FPGA Version */ + JME_SHBA_HI = JME_MISC | 0x48, /* Shadow Register Base HI */ + JME_SHBA_LO = JME_MISC | 0x4C, /* Shadow Register Base LO */ + JME_TIMER1 = JME_MISC | 0x70, /* Timer1 */ + JME_TIMER2 = JME_MISC | 0x74, /* Timer2 */ + JME_APMC = JME_MISC | 0x7C, /* Aggressive Power Mode Control */ + JME_PCCSRX0 = JME_MISC | 0x80, /* PCC Status of RX0 */ +}; + +/* + * TX Control/Status Bits + */ +enum jme_txcs_bits { + TXCS_QUEUE7S = 0x00008000, + TXCS_QUEUE6S = 0x00004000, + TXCS_QUEUE5S = 0x00002000, + TXCS_QUEUE4S = 0x00001000, + TXCS_QUEUE3S = 0x00000800, + TXCS_QUEUE2S = 0x00000400, + TXCS_QUEUE1S = 0x00000200, + TXCS_QUEUE0S = 0x00000100, + TXCS_FIFOTH = 0x000000C0, + TXCS_DMASIZE = 0x00000030, + TXCS_BURST = 0x00000004, + TXCS_ENABLE = 0x00000001, +}; + +enum jme_txcs_value { + TXCS_FIFOTH_16QW = 0x000000C0, + TXCS_FIFOTH_12QW = 0x00000080, + TXCS_FIFOTH_8QW = 0x00000040, + TXCS_FIFOTH_4QW = 0x00000000, + + TXCS_DMASIZE_64B = 0x00000000, + TXCS_DMASIZE_128B = 0x00000010, + TXCS_DMASIZE_256B = 0x00000020, + TXCS_DMASIZE_512B = 0x00000030, + + TXCS_SELECT_QUEUE0 = 0x00000000, + TXCS_SELECT_QUEUE1 = 0x00010000, + TXCS_SELECT_QUEUE2 = 0x00020000, + TXCS_SELECT_QUEUE3 = 0x00030000, + TXCS_SELECT_QUEUE4 = 0x00040000, + TXCS_SELECT_QUEUE5 = 0x00050000, + TXCS_SELECT_QUEUE6 = 0x00060000, + TXCS_SELECT_QUEUE7 = 0x00070000, + + TXCS_DEFAULT = TXCS_FIFOTH_4QW | + TXCS_BURST, +}; + +#define JME_TX_DISABLE_TIMEOUT 10 /* 10 msec */ + +/* + * TX MAC Control/Status Bits + */ +enum jme_txmcs_bit_masks { + TXMCS_IFG2 = 0xC0000000, + TXMCS_IFG1 = 0x30000000, + TXMCS_TTHOLD = 0x00000300, + TXMCS_FBURST = 0x00000080, + TXMCS_CARRIEREXT = 0x00000040, + TXMCS_DEFER = 0x00000020, + TXMCS_BACKOFF = 0x00000010, + TXMCS_CARRIERSENSE = 0x00000008, + TXMCS_COLLISION = 0x00000004, + TXMCS_CRC = 0x00000002, + TXMCS_PADDING = 0x00000001, +}; + +enum jme_txmcs_values { + TXMCS_IFG2_6_4 = 0x00000000, + TXMCS_IFG2_8_5 = 0x40000000, + TXMCS_IFG2_10_6 = 0x80000000, + TXMCS_IFG2_12_7 = 0xC0000000, + + TXMCS_IFG1_8_4 = 0x00000000, + TXMCS_IFG1_12_6 = 0x10000000, + TXMCS_IFG1_16_8 = 0x20000000, + TXMCS_IFG1_20_10 = 0x30000000, + + TXMCS_TTHOLD_1_8 = 0x00000000, + TXMCS_TTHOLD_1_4 = 0x00000100, + TXMCS_TTHOLD_1_2 = 0x00000200, + TXMCS_TTHOLD_FULL = 0x00000300, + + TXMCS_DEFAULT = TXMCS_IFG2_8_5 | + TXMCS_IFG1_16_8 | + TXMCS_TTHOLD_FULL | + TXMCS_DEFER | + TXMCS_CRC | + TXMCS_PADDING, +}; + +enum jme_txpfc_bits_masks { + TXPFC_VLAN_TAG = 0xFFFF0000, + TXPFC_VLAN_EN = 0x00008000, + TXPFC_PF_EN = 0x00000001, +}; + +enum jme_txtrhd_bits_masks { + TXTRHD_TXPEN = 0x80000000, + TXTRHD_TXP = 0x7FFFFF00, + TXTRHD_TXREN = 0x00000080, + TXTRHD_TXRL = 0x0000007F, +}; + +enum jme_txtrhd_shifts { + TXTRHD_TXP_SHIFT = 8, + TXTRHD_TXRL_SHIFT = 0, +}; + +/* + * RX Control/Status Bits + */ +enum jme_rxcs_bit_masks { + /* FIFO full threshold for transmitting Tx Pause Packet */ + RXCS_FIFOTHTP = 0x30000000, + /* FIFO threshold for processing next packet */ + RXCS_FIFOTHNP = 0x0C000000, + RXCS_DMAREQSZ = 0x03000000, /* DMA Request Size */ + RXCS_QUEUESEL = 0x00030000, /* Queue selection */ + RXCS_RETRYGAP = 0x0000F000, /* RX Desc full retry gap */ + RXCS_RETRYCNT = 0x00000F00, /* RX Desc full retry counter */ + RXCS_WAKEUP = 0x00000040, /* Enable receive wakeup packet */ + RXCS_MAGIC = 0x00000020, /* Enable receive magic packet */ + RXCS_SHORT = 0x00000010, /* Enable receive short packet */ + RXCS_ABORT = 0x00000008, /* Enable receive errorr packet */ + RXCS_QST = 0x00000004, /* Receive queue start */ + RXCS_SUSPEND = 0x00000002, + RXCS_ENABLE = 0x00000001, +}; + +enum jme_rxcs_values { + RXCS_FIFOTHTP_16T = 0x00000000, + RXCS_FIFOTHTP_32T = 0x10000000, + RXCS_FIFOTHTP_64T = 0x20000000, + RXCS_FIFOTHTP_128T = 0x30000000, + + RXCS_FIFOTHNP_16QW = 0x00000000, + RXCS_FIFOTHNP_32QW = 0x04000000, + RXCS_FIFOTHNP_64QW = 0x08000000, + RXCS_FIFOTHNP_128QW = 0x0C000000, + + RXCS_DMAREQSZ_16B = 0x00000000, + RXCS_DMAREQSZ_32B = 0x01000000, + RXCS_DMAREQSZ_64B = 0x02000000, + RXCS_DMAREQSZ_128B = 0x03000000, + + RXCS_QUEUESEL_Q0 = 0x00000000, + RXCS_QUEUESEL_Q1 = 0x00010000, + RXCS_QUEUESEL_Q2 = 0x00020000, + RXCS_QUEUESEL_Q3 = 0x00030000, + + RXCS_RETRYGAP_256ns = 0x00000000, + RXCS_RETRYGAP_512ns = 0x00001000, + RXCS_RETRYGAP_1024ns = 0x00002000, + RXCS_RETRYGAP_2048ns = 0x00003000, + RXCS_RETRYGAP_4096ns = 0x00004000, + RXCS_RETRYGAP_8192ns = 0x00005000, + RXCS_RETRYGAP_16384ns = 0x00006000, + RXCS_RETRYGAP_32768ns = 0x00007000, + + RXCS_RETRYCNT_0 = 0x00000000, + RXCS_RETRYCNT_4 = 0x00000100, + RXCS_RETRYCNT_8 = 0x00000200, + RXCS_RETRYCNT_12 = 0x00000300, + RXCS_RETRYCNT_16 = 0x00000400, + RXCS_RETRYCNT_20 = 0x00000500, + RXCS_RETRYCNT_24 = 0x00000600, + RXCS_RETRYCNT_28 = 0x00000700, + RXCS_RETRYCNT_32 = 0x00000800, + RXCS_RETRYCNT_36 = 0x00000900, + RXCS_RETRYCNT_40 = 0x00000A00, + RXCS_RETRYCNT_44 = 0x00000B00, + RXCS_RETRYCNT_48 = 0x00000C00, + RXCS_RETRYCNT_52 = 0x00000D00, + RXCS_RETRYCNT_56 = 0x00000E00, + RXCS_RETRYCNT_60 = 0x00000F00, + + RXCS_DEFAULT = RXCS_FIFOTHTP_128T | + RXCS_FIFOTHNP_128QW | + RXCS_DMAREQSZ_128B | + RXCS_RETRYGAP_256ns | + RXCS_RETRYCNT_32, +}; + +#define JME_RX_DISABLE_TIMEOUT 10 /* 10 msec */ + +/* + * RX MAC Control/Status Bits + */ +enum jme_rxmcs_bits { + RXMCS_ALLFRAME = 0x00000800, + RXMCS_BRDFRAME = 0x00000400, + RXMCS_MULFRAME = 0x00000200, + RXMCS_UNIFRAME = 0x00000100, + RXMCS_ALLMULFRAME = 0x00000080, + RXMCS_MULFILTERED = 0x00000040, + RXMCS_RXCOLLDEC = 0x00000020, + RXMCS_FLOWCTRL = 0x00000008, + RXMCS_VTAGRM = 0x00000004, + RXMCS_PREPAD = 0x00000002, + RXMCS_CHECKSUM = 0x00000001, + + RXMCS_DEFAULT = RXMCS_VTAGRM | + RXMCS_FLOWCTRL | + RXMCS_CHECKSUM, +}; + +/* + * Wakeup Frame setup interface registers + */ +#define WAKEUP_FRAME_NR 8 +#define WAKEUP_FRAME_MASK_DWNR 4 + +enum jme_wfoi_bit_masks { + WFOI_MASK_SEL = 0x00000070, + WFOI_CRC_SEL = 0x00000008, + WFOI_FRAME_SEL = 0x00000007, +}; + +enum jme_wfoi_shifts { + WFOI_MASK_SHIFT = 4, +}; + +/* + * SMI Related definitions + */ +enum jme_smi_bit_mask { + SMI_DATA_MASK = 0xFFFF0000, + SMI_REG_ADDR_MASK = 0x0000F800, + SMI_PHY_ADDR_MASK = 0x000007C0, + SMI_OP_WRITE = 0x00000020, + /* Set to 1, after req done it'll be cleared to 0 */ + SMI_OP_REQ = 0x00000010, + SMI_OP_MDIO = 0x00000008, /* Software assess In/Out */ + SMI_OP_MDOE = 0x00000004, /* Software Output Enable */ + SMI_OP_MDC = 0x00000002, /* Software CLK Control */ + SMI_OP_MDEN = 0x00000001, /* Software access Enable */ +}; + +enum jme_smi_bit_shift { + SMI_DATA_SHIFT = 16, + SMI_REG_ADDR_SHIFT = 11, + SMI_PHY_ADDR_SHIFT = 6, +}; + +static inline uint32_t smi_reg_addr(int x) +{ + return (x << SMI_REG_ADDR_SHIFT) & SMI_REG_ADDR_MASK; +} + +static inline uint32_t smi_phy_addr(int x) +{ + return (x << SMI_PHY_ADDR_SHIFT) & SMI_PHY_ADDR_MASK; +} + +#define JME_PHY_TIMEOUT 100 /* 100 msec */ +#define JME_PHY_REG_NR 32 + +/* + * Global Host Control + */ +enum jme_ghc_bit_mask { + GHC_SWRST = 0x40000000, + GHC_DPX = 0x00000040, + GHC_SPEED = 0x00000030, + GHC_LINK_POLL = 0x00000001, +}; + +enum jme_ghc_speed_val { + GHC_SPEED_10M = 0x00000010, + GHC_SPEED_100M = 0x00000020, + GHC_SPEED_1000M = 0x00000030, +}; + +enum jme_ghc_to_clk { + GHC_TO_CLK_OFF = 0x00000000, + GHC_TO_CLK_GPHY = 0x00400000, + GHC_TO_CLK_PCIE = 0x00800000, + GHC_TO_CLK_INVALID = 0x00C00000, +}; + +enum jme_ghc_txmac_clk { + GHC_TXMAC_CLK_OFF = 0x00000000, + GHC_TXMAC_CLK_GPHY = 0x00100000, + GHC_TXMAC_CLK_PCIE = 0x00200000, + GHC_TXMAC_CLK_INVALID = 0x00300000, +}; + +/* + * Power management control and status register + */ +enum jme_pmcs_bit_masks { + PMCS_WF7DET = 0x80000000, + PMCS_WF6DET = 0x40000000, + PMCS_WF5DET = 0x20000000, + PMCS_WF4DET = 0x10000000, + PMCS_WF3DET = 0x08000000, + PMCS_WF2DET = 0x04000000, + PMCS_WF1DET = 0x02000000, + PMCS_WF0DET = 0x01000000, + PMCS_LFDET = 0x00040000, + PMCS_LRDET = 0x00020000, + PMCS_MFDET = 0x00010000, + PMCS_WF7EN = 0x00008000, + PMCS_WF6EN = 0x00004000, + PMCS_WF5EN = 0x00002000, + PMCS_WF4EN = 0x00001000, + PMCS_WF3EN = 0x00000800, + PMCS_WF2EN = 0x00000400, + PMCS_WF1EN = 0x00000200, + PMCS_WF0EN = 0x00000100, + PMCS_LFEN = 0x00000004, + PMCS_LREN = 0x00000002, + PMCS_MFEN = 0x00000001, +}; + +/* + * Giga PHY Status Registers + */ +enum jme_phy_link_bit_mask { + PHY_LINK_SPEED_MASK = 0x0000C000, + PHY_LINK_DUPLEX = 0x00002000, + PHY_LINK_SPEEDDPU_RESOLVED = 0x00000800, + PHY_LINK_UP = 0x00000400, + PHY_LINK_AUTONEG_COMPLETE = 0x00000200, + PHY_LINK_MDI_STAT = 0x00000040, +}; + +enum jme_phy_link_speed_val { + PHY_LINK_SPEED_10M = 0x00000000, + PHY_LINK_SPEED_100M = 0x00004000, + PHY_LINK_SPEED_1000M = 0x00008000, +}; + +#define JME_SPDRSV_TIMEOUT 500 /* 500 us */ + +/* + * SMB Control and Status + */ +enum jme_smbcsr_bit_mask { + SMBCSR_CNACK = 0x00020000, + SMBCSR_RELOAD = 0x00010000, + SMBCSR_EEPROMD = 0x00000020, + SMBCSR_INITDONE = 0x00000010, + SMBCSR_BUSY = 0x0000000F, +}; + +enum jme_smbintf_bit_mask { + SMBINTF_HWDATR = 0xFF000000, + SMBINTF_HWDATW = 0x00FF0000, + SMBINTF_HWADDR = 0x0000FF00, + SMBINTF_HWRWN = 0x00000020, + SMBINTF_HWCMD = 0x00000010, + SMBINTF_FASTM = 0x00000008, + SMBINTF_GPIOSCL = 0x00000004, + SMBINTF_GPIOSDA = 0x00000002, + SMBINTF_GPIOEN = 0x00000001, +}; + +enum jme_smbintf_vals { + SMBINTF_HWRWN_READ = 0x00000020, + SMBINTF_HWRWN_WRITE = 0x00000000, +}; + +enum jme_smbintf_shifts { + SMBINTF_HWDATR_SHIFT = 24, + SMBINTF_HWDATW_SHIFT = 16, + SMBINTF_HWADDR_SHIFT = 8, +}; + +#define JME_EEPROM_RELOAD_TIMEOUT 2000 /* 2000 msec */ +#define JME_SMB_BUSY_TIMEOUT 20 /* 20 msec */ +#define JME_SMB_LEN 256 +#define JME_EEPROM_MAGIC 0x250 + +/* + * Timer Control/Status Register + */ +enum jme_tmcsr_bit_masks { + TMCSR_SWIT = 0x80000000, + TMCSR_EN = 0x01000000, + TMCSR_CNT = 0x00FFFFFF, +}; + +/* + * General Purpose REG-0 + */ +enum jme_gpreg0_masks { + GPREG0_DISSH = 0xFF000000, + GPREG0_PCIRLMT = 0x00300000, + GPREG0_PCCNOMUTCLR = 0x00040000, + GPREG0_LNKINTPOLL = 0x00001000, + GPREG0_PCCTMR = 0x00000300, + GPREG0_PHYADDR = 0x0000001F, +}; + +enum jme_gpreg0_vals { + GPREG0_DISSH_DW7 = 0x80000000, + GPREG0_DISSH_DW6 = 0x40000000, + GPREG0_DISSH_DW5 = 0x20000000, + GPREG0_DISSH_DW4 = 0x10000000, + GPREG0_DISSH_DW3 = 0x08000000, + GPREG0_DISSH_DW2 = 0x04000000, + GPREG0_DISSH_DW1 = 0x02000000, + GPREG0_DISSH_DW0 = 0x01000000, + GPREG0_DISSH_ALL = 0xFF000000, + + GPREG0_PCIRLMT_8 = 0x00000000, + GPREG0_PCIRLMT_6 = 0x00100000, + GPREG0_PCIRLMT_5 = 0x00200000, + GPREG0_PCIRLMT_4 = 0x00300000, + + GPREG0_PCCTMR_16ns = 0x00000000, + GPREG0_PCCTMR_256ns = 0x00000100, + GPREG0_PCCTMR_1us = 0x00000200, + GPREG0_PCCTMR_1ms = 0x00000300, + + GPREG0_PHYADDR_1 = 0x00000001, + + GPREG0_DEFAULT = GPREG0_DISSH_ALL | + GPREG0_PCIRLMT_4 | + GPREG0_PCCTMR_1us | + GPREG0_PHYADDR_1, +}; + +/* + * General Purpose REG-1 + * Note: All theses bits defined here are for + * Chip mode revision 0x11 only + */ +enum jme_gpreg1_masks { + GPREG1_INTRDELAYUNIT = 0x00000018, + GPREG1_INTRDELAYENABLE = 0x00000007, +}; + +enum jme_gpreg1_vals { + GPREG1_RSSPATCH = 0x00000040, + GPREG1_HALFMODEPATCH = 0x00000020, + + GPREG1_INTDLYUNIT_16NS = 0x00000000, + GPREG1_INTDLYUNIT_256NS = 0x00000008, + GPREG1_INTDLYUNIT_1US = 0x00000010, + GPREG1_INTDLYUNIT_16US = 0x00000018, + + GPREG1_INTDLYEN_1U = 0x00000001, + GPREG1_INTDLYEN_2U = 0x00000002, + GPREG1_INTDLYEN_3U = 0x00000003, + GPREG1_INTDLYEN_4U = 0x00000004, + GPREG1_INTDLYEN_5U = 0x00000005, + GPREG1_INTDLYEN_6U = 0x00000006, + GPREG1_INTDLYEN_7U = 0x00000007, + + GPREG1_DEFAULT = 0x00000000, +}; + +/* + * Interrupt Status Bits + */ +enum jme_interrupt_bits { + INTR_SWINTR = 0x80000000, + INTR_TMINTR = 0x40000000, + INTR_LINKCH = 0x20000000, + INTR_PAUSERCV = 0x10000000, + INTR_MAGICRCV = 0x08000000, + INTR_WAKERCV = 0x04000000, + INTR_PCCRX0TO = 0x02000000, + INTR_PCCRX1TO = 0x01000000, + INTR_PCCRX2TO = 0x00800000, + INTR_PCCRX3TO = 0x00400000, + INTR_PCCTXTO = 0x00200000, + INTR_PCCRX0 = 0x00100000, + INTR_PCCRX1 = 0x00080000, + INTR_PCCRX2 = 0x00040000, + INTR_PCCRX3 = 0x00020000, + INTR_PCCTX = 0x00010000, + INTR_RX3EMP = 0x00008000, + INTR_RX2EMP = 0x00004000, + INTR_RX1EMP = 0x00002000, + INTR_RX0EMP = 0x00001000, + INTR_RX3 = 0x00000800, + INTR_RX2 = 0x00000400, + INTR_RX1 = 0x00000200, + INTR_RX0 = 0x00000100, + INTR_TX7 = 0x00000080, + INTR_TX6 = 0x00000040, + INTR_TX5 = 0x00000020, + INTR_TX4 = 0x00000010, + INTR_TX3 = 0x00000008, + INTR_TX2 = 0x00000004, + INTR_TX1 = 0x00000002, + INTR_TX0 = 0x00000001, +}; + +static const uint32_t INTR_ENABLE = INTR_LINKCH | + INTR_RX0EMP | + INTR_RX0 | + INTR_TX0; + +/* + * PCC Control Registers + */ +enum jme_pccrx_masks { + PCCRXTO_MASK = 0xFFFF0000, + PCCRX_MASK = 0x0000FF00, +}; + +enum jme_pcctx_masks { + PCCTXTO_MASK = 0xFFFF0000, + PCCTX_MASK = 0x0000FF00, + PCCTX_QS_MASK = 0x000000FF, +}; + +enum jme_pccrx_shifts { + PCCRXTO_SHIFT = 16, + PCCRX_SHIFT = 8, +}; + +enum jme_pcctx_shifts { + PCCTXTO_SHIFT = 16, + PCCTX_SHIFT = 8, +}; + +enum jme_pcctx_bits { + PCCTXQ0_EN = 0x00000001, + PCCTXQ1_EN = 0x00000002, + PCCTXQ2_EN = 0x00000004, + PCCTXQ3_EN = 0x00000008, + PCCTXQ4_EN = 0x00000010, + PCCTXQ5_EN = 0x00000020, + PCCTXQ6_EN = 0x00000040, + PCCTXQ7_EN = 0x00000080, +}; + +/* + * Chip Mode Register + */ +enum jme_chipmode_bit_masks { + CM_FPGAVER_MASK = 0xFFFF0000, + CM_CHIPREV_MASK = 0x0000FF00, + CM_CHIPMODE_MASK = 0x0000000F, +}; + +enum jme_chipmode_shifts { + CM_FPGAVER_SHIFT = 16, + CM_CHIPREV_SHIFT = 8, +}; + +/* + * Workaround + */ +static inline int is_buggy250(unsigned short device, unsigned int chiprev) +{ + return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11; +} + +/* + * Read/Write I/O Registers + */ +static inline uint32_t jread32(struct jme_adapter *jme, uint32_t reg) +{ + return readl(jme->regs + reg); +} + +static inline void jwrite32(struct jme_adapter *jme, uint32_t reg, uint32_t val) +{ + writel(val, jme->regs + reg); +} + +static void jwrite32f(struct jme_adapter *jme, uint32_t reg, uint32_t val) +{ + /* + * Read after write should cause flush + */ + writel(val, jme->regs + reg); + readl(jme->regs + reg); +} + +#endif diff --git a/src/include/ipxe/errfile.h b/src/include/ipxe/errfile.h index b8a8a8cc..861fee24 100644 --- a/src/include/ipxe/errfile.h +++ b/src/include/ipxe/errfile.h @@ -125,6 +125,7 @@ FILE_LICENCE ( GPL2_OR_LATER ); #define ERRFILE_igb_main ( ERRFILE_DRIVER | 0x00580000 ) #define ERRFILE_snpnet ( ERRFILE_DRIVER | 0x00590000 ) #define ERRFILE_snponly ( ERRFILE_DRIVER | 0x005a0000 ) +#define ERRFILE_jme ( ERRFILE_DRIVER | 0x005b0000 ) #define ERRFILE_scsi ( ERRFILE_DRIVER | 0x00700000 ) #define ERRFILE_arbel ( ERRFILE_DRIVER | 0x00710000 )