From 750c19466a018f089316bccdeb50a55a7b8f0866 Mon Sep 17 00:00:00 2001 From: Itay Gazit Date: Mon, 9 Feb 2009 12:40:48 +0000 Subject: [PATCH] [mtnic] Add multiport support and some minor fixes Signed-off-by: Michael Brown --- src/drivers/net/mtnic.c | 1015 +++++++++++++++++++++------------------ src/drivers/net/mtnic.h | 277 ++++++----- 2 files changed, 697 insertions(+), 595 deletions(-) mode change 100755 => 100644 src/drivers/net/mtnic.c mode change 100755 => 100644 src/drivers/net/mtnic.h diff --git a/src/drivers/net/mtnic.c b/src/drivers/net/mtnic.c old mode 100755 new mode 100644 index d06c24ed..0d84a44c --- a/src/drivers/net/mtnic.c +++ b/src/drivers/net/mtnic.c @@ -30,7 +30,6 @@ * SOFTWARE. * */ -#include #include #include #include @@ -48,28 +47,12 @@ /* - mtnic.c - gPXE driver for Mellanox 10Gig ConnectX EN + mtnic.c - gPXE driver for Mellanox 10Gig ConnectX EN */ -/* (mcb30) - The Mellanox driver used "1" as a universal error code; - * this at least makes it a valid error number. - */ -#define MTNIC_ERROR -EIO - - -/** Set port number to use - * - * 0 - port 1 - * 1 - port 2 - */ -#define MTNIC_PORT_NUM 0 -/* Note: for verbose printing do Make ... DEBUG=mtnic */ - - - /******************************************************************** * @@ -92,7 +75,7 @@ mtnic_alloc_aligned(unsigned int size, void **va, unsigned long *pa, unsigned in { *va = alloc_memblock(size, alignment); if (!*va) { - return MTNIC_ERROR; + return -EADDRINUSE; } *pa = (u32)virt_to_bus(*va); return 0; @@ -106,21 +89,21 @@ mtnic_alloc_aligned(unsigned int size, void **va, unsigned long *pa, unsigned in * */ static int -mtnic_alloc_cmdif(struct mtnic_priv *priv) +mtnic_alloc_cmdif(struct mtnic *mtnic) { - u32 bar = mtnic_pci_dev.dev.bar[0]; + u32 bar = mtnic_pci_dev.dev.bar[0]; - priv->hcr = ioremap(bar + MTNIC_HCR_BASE, MTNIC_HCR_SIZE); - if (!priv->hcr) { - DBG("Couldn't map command register."); - return MTNIC_ERROR; + mtnic->hcr = ioremap(bar + MTNIC_HCR_BASE, MTNIC_HCR_SIZE); + if ( !mtnic->hcr ) { + DBG("Couldn't map command register\n"); + return -EADDRINUSE; } - mtnic_alloc_aligned(PAGE_SIZE, (void *)&priv->cmd.buf, &priv->cmd.mapping, PAGE_SIZE); - if (!priv->cmd.buf) { + mtnic_alloc_aligned(PAGE_SIZE, (void *)&mtnic->cmd.buf, &mtnic->cmd.mapping, PAGE_SIZE); + if ( !mtnic->cmd.buf ) { DBG("Error in allocating buffer for command interface\n"); - return MTNIC_ERROR; + return -EADDRINUSE; } - return 0; + return 0; } /** @@ -129,12 +112,13 @@ mtnic_alloc_cmdif(struct mtnic_priv *priv) static void mtnic_free_io_buffers(struct mtnic_ring *ring) { - int index; + int index; for (; ring->cons <= ring->prod; ++ring->cons) { index = ring->cons & ring->size_mask; - if (ring->iobuf[index]) + if ( ring->iobuf[index] ) { free_iob(ring->iobuf[index]); + } } } @@ -146,7 +130,7 @@ mtnic_free_io_buffers(struct mtnic_ring *ring) * */ static int -mtnic_alloc_iobuf(struct mtnic_priv *priv, struct mtnic_ring *ring, +mtnic_alloc_iobuf(struct mtnic_port *priv, struct mtnic_ring *ring, unsigned int size) { struct mtnic_rx_desc *rx_desc_ptr = ring->buf; @@ -155,25 +139,20 @@ mtnic_alloc_iobuf(struct mtnic_priv *priv, struct mtnic_ring *ring, while ((u32)(ring->prod - ring->cons) < UNITS_BUFFER_SIZE) { index = ring->prod & ring->size_mask; ring->iobuf[index] = alloc_iob(size); - if (!&ring->iobuf[index]) { + if (!ring->iobuf[index]) { if (ring->prod <= (ring->cons + 1)) { - DBG("Error allocating Rx io " - "buffer number %x", index); - /* In case of error freeing io buffer */ - mtnic_free_io_buffers(ring); - return MTNIC_ERROR; + DBG ( "Dropping packet, buffer is full\n" ); } - break; } /* Attach io_buffer to descriptor */ rx_desc_ptr = ring->buf + - (sizeof(struct mtnic_rx_desc) * index); + (sizeof(struct mtnic_rx_desc) * index); rx_desc_ptr->data.count = cpu_to_be32(size); - rx_desc_ptr->data.mem_type = priv->fw.mem_type_snoop_be; + rx_desc_ptr->data.mem_type = priv->mtnic->fw.mem_type_snoop_be; rx_desc_ptr->data.addr_l = cpu_to_be32( - virt_to_bus(ring->iobuf[index]->data)); + virt_to_bus(ring->iobuf[index]->data)); ++ ring->prod; } @@ -191,8 +170,8 @@ mtnic_alloc_iobuf(struct mtnic_priv *priv, struct mtnic_ring *ring, * */ static int -mtnic_alloc_ring(struct mtnic_priv *priv, struct mtnic_ring *ring, - u32 size, u16 stride, u16 cq, u8 is_rx) +mtnic_alloc_ring(struct mtnic_port *priv, struct mtnic_ring *ring, + u32 size, u16 stride, u16 cq, u8 is_rx) { unsigned int i; int err; @@ -208,15 +187,15 @@ mtnic_alloc_ring(struct mtnic_priv *priv, struct mtnic_ring *ring, /* Alloc descriptors buffer */ ring->buf_size = ring->size * ((is_rx) ? sizeof(struct mtnic_rx_desc) : - sizeof(struct mtnic_tx_desc)); + sizeof(struct mtnic_tx_desc)); err = mtnic_alloc_aligned(ring->buf_size, (void *)&ring->buf, - &ring->dma, PAGE_SIZE); - if (err) { + &ring->dma, PAGE_SIZE); + if (err) { DBG("Failed allocating descriptor ring sizeof %x\n", ring->buf_size); - return MTNIC_ERROR; + return -EADDRINUSE; } - memset(ring->buf, 0, ring->buf_size); + memset(ring->buf, 0, ring->buf_size); DBG("Allocated %s ring (addr:%p) - buf:%p size:%x" "buf_size:%x dma:%lx\n", @@ -227,11 +206,11 @@ mtnic_alloc_ring(struct mtnic_priv *priv, struct mtnic_ring *ring, if (is_rx) { /* RX ring */ /* Alloc doorbell */ err = mtnic_alloc_aligned(sizeof(struct mtnic_cq_db_record), - (void *)&ring->db, &ring->db_dma, 32); + (void *)&ring->db, &ring->db_dma, 32); if (err) { DBG("Failed allocating Rx ring doorbell record\n"); - free(ring->buf); - return MTNIC_ERROR; + free_memblock(ring->buf, ring->buf_size); + return -EADDRINUSE; } /* ==- Configure Descriptor -== */ @@ -245,14 +224,14 @@ mtnic_alloc_ring(struct mtnic_priv *priv, struct mtnic_ring *ring, /*The last ctrl descriptor is '0' and points to the first one*/ /* Alloc IO_BUFFERS */ - err = mtnic_alloc_iobuf(priv, ring, DEF_IOBUF_SIZE); + err = mtnic_alloc_iobuf ( priv, ring, DEF_IOBUF_SIZE ); if (err) { - DBG("ERROR Allocating io buffer"); - free(ring->buf); - return MTNIC_ERROR; + DBG("ERROR Allocating io buffer\n"); + free_memblock(ring->buf, ring->buf_size); + return -EADDRINUSE; } - } else { /* TX ring */ + } else { /* TX ring */ /* Set initial ownership of all Tx Desc' to SW (1) */ for (i = 0; i < ring->size; i++) { tx_desc = ring->buf + ring->stride * i; @@ -260,17 +239,17 @@ mtnic_alloc_ring(struct mtnic_priv *priv, struct mtnic_ring *ring, } /* DB */ ring->db_offset = cpu_to_be32( - ((u32) priv->fw.tx_offset[priv->port]) << 8); + ((u32) priv->mtnic->fw.tx_offset[priv->port]) << 8); /* Map Tx+CQ doorbells */ DBG("Mapping TxCQ doorbell at offset:0x%x\n", - priv->fw.txcq_db_offset); + priv->mtnic->fw.txcq_db_offset); ring->txcq_db = ioremap(mtnic_pci_dev.dev.bar[2] + - priv->fw.txcq_db_offset, PAGE_SIZE); + priv->mtnic->fw.txcq_db_offset, PAGE_SIZE); if (!ring->txcq_db) { DBG("Couldn't map txcq doorbell, aborting...\n"); - free(ring->buf); - return MTNIC_ERROR; + free_memblock(ring->buf, ring->buf_size); + return -EADDRINUSE; } } @@ -287,7 +266,7 @@ mtnic_alloc_ring(struct mtnic_priv *priv, struct mtnic_ring *ring, */ static int mtnic_alloc_cq(struct net_device *dev, int num, struct mtnic_cq *cq, - u8 is_rx, u32 size, u32 offset_ind) + u8 is_rx, u32 size, u32 offset_ind) { int err ; unsigned int i; @@ -301,24 +280,24 @@ mtnic_alloc_cq(struct net_device *dev, int num, struct mtnic_cq *cq, /* Alloc doorbell */ err = mtnic_alloc_aligned(sizeof(struct mtnic_cq_db_record), - (void *)&cq->db, &cq->db_dma, 32); + (void *)&cq->db, &cq->db_dma, 32); if (err) { DBG("Failed allocating CQ doorbell record\n"); - return MTNIC_ERROR; + return -EADDRINUSE; } memset(cq->db, 0, sizeof(struct mtnic_cq_db_record)); /* Alloc CQEs buffer */ cq->buf_size = size * sizeof(struct mtnic_cqe); err = mtnic_alloc_aligned(cq->buf_size, - (void *)&cq->buf, &cq->dma, PAGE_SIZE); + (void *)&cq->buf, &cq->dma, PAGE_SIZE); if (err) { DBG("Failed allocating CQ buffer\n"); - free(cq->db); - return MTNIC_ERROR; + free_memblock(cq->db, sizeof(struct mtnic_cq_db_record)); + return -EADDRINUSE; } - memset(cq->buf, 0, cq->buf_size); - DBG("Allocated CQ (addr:%p) - size:%x buf:%p buf_size:%x " + memset(cq->buf, 0, cq->buf_size); + DBG("Allocated CQ (addr:%p) - size:%x buf:%p buf_size:%x " "dma:%lx db:%p db_dma:%lx\n" "cqn offset:%x \n", cq, cq->size, cq->buf, cq->buf_size, cq->dma, cq->db, @@ -344,19 +323,20 @@ mtnic_alloc_cq(struct net_device *dev, int num, struct mtnic_cq *cq, unsigned int mtnic_alloc_resources(struct net_device *dev) { - struct mtnic_priv *priv = netdev_priv(dev); - int err; + struct mtnic_port *priv = netdev_priv(dev); + int err; int cq_ind = 0; - int cq_offset = priv->fw.cq_offset; + int cq_offset = priv->mtnic->fw.cq_offset; /* Alloc 1st CQ */ - err = mtnic_alloc_cq(dev, cq_ind, &priv->cq[cq_ind], 1 /* RX */, + err = mtnic_alloc_cq(dev, cq_ind, &priv->cq[cq_ind], 1 /* RX */, UNITS_BUFFER_SIZE, cq_offset + cq_ind); if (err) { DBG("Failed allocating Rx CQ\n"); - return MTNIC_ERROR; + return -EADDRINUSE; } + /* Alloc RX */ err = mtnic_alloc_ring(priv, &priv->rx_ring, UNITS_BUFFER_SIZE, sizeof(struct mtnic_rx_desc), cq_ind, /* RX */1); @@ -365,7 +345,8 @@ mtnic_alloc_resources(struct net_device *dev) goto cq0_error; } - ++cq_ind; + + ++cq_ind; /* alloc 2nd CQ */ err = mtnic_alloc_cq(dev, cq_ind, &priv->cq[cq_ind], 0 /* TX */, @@ -386,17 +367,18 @@ mtnic_alloc_resources(struct net_device *dev) return 0; cq1_error: - free(priv->cq[1].buf); - free(priv->cq[1].db); + free_memblock(priv->cq[1].buf, priv->cq[1].buf_size); + free_memblock(priv->cq[1].db, sizeof(struct mtnic_cq_db_record)); + rx_error: - free(priv->rx_ring.buf); - free(priv->rx_ring.db); + free_memblock(priv->rx_ring.buf, priv->rx_ring.buf_size); + free_memblock(priv->rx_ring.db, sizeof(struct mtnic_cq_db_record)); mtnic_free_io_buffers(&priv->rx_ring); cq0_error: - free(priv->cq[0].buf); - free(priv->cq[0].db); + free_memblock(priv->cq[0].buf, priv->cq[0].buf_size); + free_memblock(priv->cq[0].db, sizeof(struct mtnic_cq_db_record)); - return MTNIC_ERROR; + return -EADDRINUSE; } @@ -406,35 +388,35 @@ cq0_error: * Note: EQ is not used by the driver but must be allocated */ static int -mtnic_alloc_eq(struct mtnic_priv *priv) +mtnic_alloc_eq(struct mtnic *mtnic) { int err; unsigned int i; struct mtnic_eqe *eqe_desc = NULL; /* Allocating doorbell */ - priv->eq_db = ioremap(mtnic_pci_dev.dev.bar[2] + - priv->fw.eq_db_offset, sizeof(u32)); - if (!priv->eq_db) { + mtnic->eq_db = ioremap(mtnic_pci_dev.dev.bar[2] + + mtnic->fw.eq_db_offset, sizeof(u32)); + if (!mtnic->eq_db) { DBG("Couldn't map EQ doorbell, aborting...\n"); - return MTNIC_ERROR; + return -EADDRINUSE; } /* Allocating buffer */ - priv->eq.size = NUM_EQES; - priv->eq.buf_size = priv->eq.size * sizeof(struct mtnic_eqe); - err = mtnic_alloc_aligned(priv->eq.buf_size, (void *)&priv->eq.buf, - &priv->eq.dma, PAGE_SIZE); + mtnic->eq.size = NUM_EQES; + mtnic->eq.buf_size = mtnic->eq.size * sizeof(struct mtnic_eqe); + err = mtnic_alloc_aligned(mtnic->eq.buf_size, (void *)&mtnic->eq.buf, + &mtnic->eq.dma, PAGE_SIZE); if (err) { DBG("Failed allocating EQ buffer\n"); - iounmap(priv->eq_db); - return MTNIC_ERROR; + iounmap(mtnic->eq_db); + return -EADDRINUSE; } - memset(priv->eq.buf, 0, priv->eq.buf_size); + memset(mtnic->eq.buf, 0, mtnic->eq.buf_size); - for (i = 0; i < priv->eq.size; i++) - eqe_desc = priv->eq.buf + (sizeof(struct mtnic_eqe) * i); - eqe_desc->own |= MTNIC_BIT_EQE_OWN; + for (i = 0; i < mtnic->eq.size; i++) + eqe_desc = mtnic->eq.buf + (sizeof(struct mtnic_eqe) * i); + eqe_desc->own |= MTNIC_BIT_EQE_OWN; mdelay(20); return 0; @@ -459,32 +441,32 @@ mtnic_alloc_eq(struct mtnic_priv *priv) * *********************************************************************/ static inline int -cmdif_go_bit(struct mtnic_priv *priv) +cmdif_go_bit(struct mtnic *mtnic) { - struct mtnic_if_cmd_reg *hcr = priv->hcr; + struct mtnic_if_cmd_reg *hcr = mtnic->hcr; u32 status; int i; for (i = 0; i < TBIT_RETRIES; i++) { status = be32_to_cpu(readl(&hcr->status_go_opcode)); if ((status & MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_T_BIT)) == - (priv->cmd.tbit << MTNIC_BC_OFF(MTNIC_MASK_CMD_REG_T_BIT))) { + (mtnic->cmd.tbit << MTNIC_BC_OFF(MTNIC_MASK_CMD_REG_T_BIT))) { /* Read expected t-bit - now return go-bit value */ return status & MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_GO_BIT); } } DBG("Invalid tbit after %d retries!\n", TBIT_RETRIES); - return 1; /* Return busy... */ + return -EBUSY; /* Return busy... */ } /* Base Command interface */ static int -mtnic_cmd(struct mtnic_priv *priv, void *in_imm, +mtnic_cmd(struct mtnic *mtnic, void *in_imm, void *out_imm, u32 in_modifier, u16 op) { - struct mtnic_if_cmd_reg *hcr = priv->hcr; + struct mtnic_if_cmd_reg *hcr = mtnic->hcr; int err = 0; u32 out_param_h = 0; u32 out_param_l = 0; @@ -498,43 +480,43 @@ mtnic_cmd(struct mtnic_priv *priv, void *in_imm, token++; - if (cmdif_go_bit(priv)) { + if ( cmdif_go_bit ( mtnic ) ) { DBG("GO BIT BUSY:%p.\n", hcr + 6); - err = MTNIC_ERROR; + err = -EBUSY; goto out; } if (in_imm) { in_param_h = *((u32*)in_imm); in_param_l = *((u32*)in_imm + 1); } else { - in_param_l = cpu_to_be32(priv->cmd.mapping); + in_param_l = cpu_to_be32(mtnic->cmd.mapping); } - out_param_l = cpu_to_be32(priv->cmd.mapping); + out_param_l = cpu_to_be32(mtnic->cmd.mapping); /* writing to MCR */ - writel(in_param_h, &hcr->in_param_h); - writel(in_param_l, &hcr->in_param_l); - writel((u32) cpu_to_be32(in_modifier), &hcr->input_modifier); - writel(out_param_h, &hcr->out_param_h); - writel(out_param_l, &hcr->out_param_l); - writel((u32)cpu_to_be32(token << 16), &hcr->token); + writel(in_param_h, &hcr->in_param_h); + writel(in_param_l, &hcr->in_param_l); + writel((u32) cpu_to_be32(in_modifier), &hcr->input_modifier); + writel(out_param_h, &hcr->out_param_h); + writel(out_param_l, &hcr->out_param_l); + writel((u32)cpu_to_be32(token << 16), &hcr->token); wmb(); /* flip toggle bit before each write to the HCR */ - priv->cmd.tbit = !priv->cmd.tbit; - writel((u32) + mtnic->cmd.tbit = !mtnic->cmd.tbit; + writel( ( u32 ) cpu_to_be32(MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_GO_BIT) | - (priv->cmd.tbit << MTNIC_BC_OFF(MTNIC_MASK_CMD_REG_T_BIT)) | op), + ( mtnic->cmd.tbit << MTNIC_BC_OFF ( MTNIC_MASK_CMD_REG_T_BIT ) ) | op ), &hcr->status_go_opcode); - while (cmdif_go_bit(priv) && (timeout <= GO_BIT_TIMEOUT)) { - mdelay(1); + while ( cmdif_go_bit ( mtnic ) && ( timeout <= GO_BIT_TIMEOUT ) ) { + mdelay ( 1 ); ++timeout; } - if (cmdif_go_bit(priv)) { + if ( cmdif_go_bit ( mtnic ) ) { DBG("Command opcode:0x%x token:0x%x TIMEOUT.\n", op, token); - err = MTNIC_ERROR; + err = -EBUSY; goto out; } @@ -544,10 +526,10 @@ mtnic_cmd(struct mtnic_priv *priv, void *in_imm, } status = be32_to_cpu((u32)readl(&hcr->status_go_opcode)) >> 24; - /*DBG("Command opcode:0x%x token:0x%x returned:0x%lx\n", - op, token, status);*/ if (status) { + DBG("Command opcode:0x%x token:0x%x returned:0x%x\n", + op, token, status); return status; } @@ -557,12 +539,12 @@ out: /* MAP PAGES wrapper */ static int -mtnic_map_cmd(struct mtnic_priv *priv, u16 op, struct mtnic_pages pages) +mtnic_map_cmd(struct mtnic *mtnic, u16 op, struct mtnic_pages pages) { - unsigned int j; + unsigned int j; u32 addr; unsigned int len; - u32 *page_arr = priv->cmd.buf; + u32 *page_arr = mtnic->cmd.buf; int nent = 0; int err = 0; @@ -576,23 +558,23 @@ mtnic_map_cmd(struct mtnic_priv *priv, u16 op, struct mtnic_pages pages) if (addr & (PAGE_MASK)) { DBG("Got FW area not aligned to %d (%llx/%x)\n", PAGE_SIZE, (u64) addr, len); - return MTNIC_ERROR; + return -EADDRINUSE; } /* Function maps each PAGE seperately */ for (j = 0; j < len; j+= PAGE_SIZE) { page_arr[nent * 4 + 3] = cpu_to_be32(addr + j); if (++nent == MTNIC_MAILBOX_SIZE / 16) { - err = mtnic_cmd(priv, NULL, NULL, nent, op); + err = mtnic_cmd(mtnic, NULL, NULL, nent, op); if (err) - return MTNIC_ERROR; - nent = 0; + return -EIO; + nent = 0; } } - if (nent) - err = mtnic_cmd(priv, NULL, NULL, nent, op); - + if (nent) { + err = mtnic_cmd(mtnic, NULL, NULL, nent, op); + } return err; } @@ -602,45 +584,44 @@ mtnic_map_cmd(struct mtnic_priv *priv, u16 op, struct mtnic_pages pages) * Query FW */ static int -mtnic_QUERY_FW(struct mtnic_priv *priv) +mtnic_QUERY_FW ( struct mtnic *mtnic ) { int err; - struct mtnic_if_query_fw_out_mbox *cmd = priv->cmd.buf; + struct mtnic_if_query_fw_out_mbox *cmd = mtnic->cmd.buf; - err = mtnic_cmd(priv, NULL, NULL, 0, MTNIC_IF_CMD_QUERY_FW); + err = mtnic_cmd(mtnic, NULL, NULL, 0, MTNIC_IF_CMD_QUERY_FW); if (err) - return MTNIC_ERROR; + return -EIO; /* Get FW and interface versions */ - priv->fw_ver = ((u64) be16_to_cpu(cmd->rev_maj) << 32) | - ((u64) be16_to_cpu(cmd->rev_min) << 16) | - (u64) be16_to_cpu(cmd->rev_smin); - priv->fw.ifc_rev = be16_to_cpu(cmd->ifc_rev); + mtnic->fw_ver = ((u64) be16_to_cpu(cmd->rev_maj) << 32) | + ((u64) be16_to_cpu(cmd->rev_min) << 16) | + (u64) be16_to_cpu(cmd->rev_smin); + mtnic->fw.ifc_rev = be16_to_cpu(cmd->ifc_rev); /* Get offset for internal error reports (debug) */ - priv->fw.err_buf.offset = be64_to_cpu(cmd->err_buf_start); - priv->fw.err_buf.size = be32_to_cpu(cmd->err_buf_size); + mtnic->fw.err_buf.offset = be64_to_cpu(cmd->err_buf_start); + mtnic->fw.err_buf.size = be32_to_cpu(cmd->err_buf_size); - DBG("Error buf offset is %llx\n", priv->fw.err_buf.offset); + DBG("Error buf offset is %llx\n", mtnic->fw.err_buf.offset); /* Get number of required FW (4k) pages */ - priv->fw.fw_pages.num = be16_to_cpu(cmd->fw_pages); + mtnic->fw.fw_pages.num = be16_to_cpu(cmd->fw_pages); return 0; } static int -mtnic_OPEN_NIC(struct mtnic_priv *priv) +mtnic_OPEN_NIC(struct mtnic *mtnic) { - - struct mtnic_if_open_nic_in_mbox *open_nic = priv->cmd.buf; + struct mtnic_if_open_nic_in_mbox *open_nic = mtnic->cmd.buf; u32 extra_pages[2] = {0}; int err; memset(open_nic, 0, sizeof *open_nic); - /* port 1 */ + /* port 1 */ open_nic->log_rx_p1 = 0; open_nic->log_cq_p1 = 1; @@ -656,46 +637,47 @@ mtnic_OPEN_NIC(struct mtnic_priv *priv) open_nic->steer_p2 = MTNIC_IF_STEER_RSS; /* MAC + VLAN - leave reserved */ - err = mtnic_cmd(priv, NULL, extra_pages, 0, MTNIC_IF_CMD_OPEN_NIC); - priv->fw.extra_pages.num = be32_to_cpu(*(extra_pages+1)); - DBG("Extra pages num is %x\n", priv->fw.extra_pages.num); + err = mtnic_cmd(mtnic, NULL, extra_pages, 0, MTNIC_IF_CMD_OPEN_NIC); + + mtnic->fw.extra_pages.num = be32_to_cpu(*(extra_pages+1)); + DBG("Extra pages num is %x\n", mtnic->fw.extra_pages.num); return err; } static int -mtnic_CONFIG_RX(struct mtnic_priv *priv) +mtnic_CONFIG_RX(struct mtnic *mtnic) { struct mtnic_if_config_rx_in_imm config_rx; memset(&config_rx, 0, sizeof config_rx); - return mtnic_cmd(priv, &config_rx, NULL, 0, MTNIC_IF_CMD_CONFIG_RX); + return mtnic_cmd(mtnic, &config_rx, NULL, 0, MTNIC_IF_CMD_CONFIG_RX); } static int -mtnic_CONFIG_TX(struct mtnic_priv *priv) +mtnic_CONFIG_TX(struct mtnic *mtnic) { struct mtnic_if_config_send_in_imm config_tx; config_tx.enph_gpf = 0; - return mtnic_cmd(priv, &config_tx, NULL, 0, MTNIC_IF_CMD_CONFIG_TX); + return mtnic_cmd(mtnic, &config_tx, NULL, 0, MTNIC_IF_CMD_CONFIG_TX); } static int -mtnic_HEART_BEAT(struct mtnic_priv *priv, u32 *link_state) +mtnic_HEART_BEAT(struct mtnic_port *priv, u32 *link_state) { struct mtnic_if_heart_beat_out_imm heart_beat; int err; u32 flags; - err = mtnic_cmd(priv, NULL, &heart_beat, 0, MTNIC_IF_CMD_HEART_BEAT); + err = mtnic_cmd(priv->mtnic, NULL, &heart_beat, 0, MTNIC_IF_CMD_HEART_BEAT); if (!err) { flags = be32_to_cpu(heart_beat.flags); if (flags & MTNIC_BC_MASK(MTNIC_MASK_HEAR_BEAT_INT_ERROR)) { DBG("Internal error detected\n"); - return MTNIC_ERROR; + return -EIO; } *link_state = flags & - ~((u32) MTNIC_BC_MASK(MTNIC_MASK_HEAR_BEAT_INT_ERROR)); + ~((u32) MTNIC_BC_MASK(MTNIC_MASK_HEAR_BEAT_INT_ERROR)); } return err; } @@ -706,31 +688,31 @@ mtnic_HEART_BEAT(struct mtnic_priv *priv, u32 *link_state) */ static int -mtnic_SET_PORT_DEFAULT_RING(struct mtnic_priv *priv, u8 port, u16 ring) +mtnic_SET_PORT_DEFAULT_RING(struct mtnic_port *priv, u8 port, u16 ring) { struct mtnic_if_set_port_default_ring_in_imm def_ring; memset(&def_ring, 0, sizeof(def_ring)); def_ring.ring = ring; - return mtnic_cmd(priv, &def_ring, NULL, port + 1, + return mtnic_cmd(priv->mtnic, &def_ring, NULL, port + 1, MTNIC_IF_CMD_SET_PORT_DEFAULT_RING); } static int -mtnic_CONFIG_PORT_RSS_STEER(struct mtnic_priv *priv, int port) +mtnic_CONFIG_PORT_RSS_STEER(struct mtnic_port *priv, int port) { - memset(priv->cmd.buf, 0, PAGE_SIZE); - return mtnic_cmd(priv, NULL, NULL, port + 1, - MTNIC_IF_CMD_CONFIG_PORT_RSS_STEER); + memset(priv->mtnic->cmd.buf, 0, PAGE_SIZE); + return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1, + MTNIC_IF_CMD_CONFIG_PORT_RSS_STEER); } static int -mtnic_SET_PORT_RSS_INDIRECTION(struct mtnic_priv *priv, int port) +mtnic_SET_PORT_RSS_INDIRECTION(struct mtnic_port *priv, int port) { - memset(priv->cmd.buf, 0, PAGE_SIZE); - return mtnic_cmd(priv, NULL, NULL, port + 1, - MTNIC_IF_CMD_SET_PORT_RSS_INDIRECTION); + memset(priv->mtnic->cmd.buf, 0, PAGE_SIZE); + return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1, + MTNIC_IF_CMD_SET_PORT_RSS_INDIRECTION); } @@ -738,89 +720,89 @@ mtnic_SET_PORT_RSS_INDIRECTION(struct mtnic_priv *priv, int port) * Config commands */ static int -mtnic_CONFIG_CQ(struct mtnic_priv *priv, int port, - u16 cq_ind, struct mtnic_cq *cq) +mtnic_CONFIG_CQ(struct mtnic_port *priv, int port, + u16 cq_ind, struct mtnic_cq *cq) { - struct mtnic_if_config_cq_in_mbox *config_cq = priv->cmd.buf; + struct mtnic_if_config_cq_in_mbox *config_cq = priv->mtnic->cmd.buf; memset(config_cq, 0, sizeof *config_cq); config_cq->cq = cq_ind; config_cq->size = fls(UNITS_BUFFER_SIZE - 1); config_cq->offset = ((cq->dma) & (PAGE_MASK)) >> 6; config_cq->db_record_addr_l = cpu_to_be32(cq->db_dma); - config_cq->page_address[1] = cpu_to_be32(cq->dma); + config_cq->page_address[1] = cpu_to_be32(cq->dma); DBG("config cq address: %x dma_address: %lx" - "offset: %d size %d index: %d " + "offset: %d size %d index: %d\n" , config_cq->page_address[1],cq->dma, config_cq->offset, config_cq->size, config_cq->cq ); - return mtnic_cmd(priv, NULL, NULL, port + 1, - MTNIC_IF_CMD_CONFIG_CQ); + return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1, + MTNIC_IF_CMD_CONFIG_CQ); } static int -mtnic_CONFIG_TX_RING(struct mtnic_priv *priv, u8 port, - u16 ring_ind, struct mtnic_ring *ring) +mtnic_CONFIG_TX_RING(struct mtnic_port *priv, u8 port, + u16 ring_ind, struct mtnic_ring *ring) { - struct mtnic_if_config_send_ring_in_mbox *config_tx_ring = priv->cmd.buf; + struct mtnic_if_config_send_ring_in_mbox *config_tx_ring = priv->mtnic->cmd.buf; memset(config_tx_ring, 0, sizeof *config_tx_ring); config_tx_ring->ring = cpu_to_be16(ring_ind); config_tx_ring->size = fls(UNITS_BUFFER_SIZE - 1); config_tx_ring->cq = cpu_to_be16(ring->cq); config_tx_ring->page_address[1] = cpu_to_be32(ring->dma); - return mtnic_cmd(priv, NULL, NULL, port + 1, + return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1, MTNIC_IF_CMD_CONFIG_TX_RING); } static int -mtnic_CONFIG_RX_RING(struct mtnic_priv *priv, u8 port, - u16 ring_ind, struct mtnic_ring *ring) +mtnic_CONFIG_RX_RING(struct mtnic_port *priv, u8 port, + u16 ring_ind, struct mtnic_ring *ring) { - struct mtnic_if_config_rx_ring_in_mbox *config_rx_ring = priv->cmd.buf; + struct mtnic_if_config_rx_ring_in_mbox *config_rx_ring = priv->mtnic->cmd.buf; memset(config_rx_ring, 0, sizeof *config_rx_ring); config_rx_ring->ring = ring_ind; - MTNIC_BC_PUT(config_rx_ring->stride_size, fls(UNITS_BUFFER_SIZE - 1), - MTNIC_MASK_CONFIG_RX_RING_SIZE); - MTNIC_BC_PUT(config_rx_ring->stride_size, 1, - MTNIC_MASK_CONFIG_RX_RING_STRIDE); + MTNIC_BC_PUT(config_rx_ring->stride_size, fls(UNITS_BUFFER_SIZE - 1), + MTNIC_MASK_CONFIG_RX_RING_SIZE); + MTNIC_BC_PUT(config_rx_ring->stride_size, 1, + MTNIC_MASK_CONFIG_RX_RING_STRIDE); config_rx_ring->cq = cpu_to_be16(ring->cq); config_rx_ring->db_record_addr_l = cpu_to_be32(ring->db_dma); - DBG("Config RX ring starting at address:%lx\n", ring->dma); + DBG("Config RX ring starting at address:%lx\n", ring->dma); config_rx_ring->page_address[1] = cpu_to_be32(ring->dma); - return mtnic_cmd(priv, NULL, NULL, port + 1, - MTNIC_IF_CMD_CONFIG_RX_RING); + return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1, + MTNIC_IF_CMD_CONFIG_RX_RING); } static int -mtnic_CONFIG_EQ(struct mtnic_priv *priv) +mtnic_CONFIG_EQ(struct mtnic *mtnic) { - struct mtnic_if_config_eq_in_mbox *eq = priv->cmd.buf; + struct mtnic_if_config_eq_in_mbox *eq = mtnic->cmd.buf; - if (priv->eq.dma & (PAGE_MASK)) { + if (mtnic->eq.dma & (PAGE_MASK)) { DBG("misalligned eq buffer:%lx\n", - priv->eq.dma); - return MTNIC_ERROR; - } + mtnic->eq.dma); + return -EADDRINUSE; + } - memset(eq, 0, sizeof *eq); - MTNIC_BC_PUT(eq->offset, priv->eq.dma >> 6, MTNIC_MASK_CONFIG_EQ_OFFSET); - MTNIC_BC_PUT(eq->size, fls(priv->eq.size - 1) - 1, MTNIC_MASK_CONFIG_EQ_SIZE); + memset(eq, 0, sizeof *eq); + MTNIC_BC_PUT(eq->offset, mtnic->eq.dma >> 6, MTNIC_MASK_CONFIG_EQ_OFFSET); + MTNIC_BC_PUT(eq->size, fls(mtnic->eq.size - 1) - 1, MTNIC_MASK_CONFIG_EQ_SIZE); MTNIC_BC_PUT(eq->int_vector, 0, MTNIC_MASK_CONFIG_EQ_INT_VEC); - eq->page_address[1] = cpu_to_be32(priv->eq.dma); + eq->page_address[1] = cpu_to_be32(mtnic->eq.dma); - return mtnic_cmd(priv, NULL, NULL, 0, MTNIC_IF_CMD_CONFIG_EQ); + return mtnic_cmd(mtnic, NULL, NULL, 0, MTNIC_IF_CMD_CONFIG_EQ); } static int -mtnic_SET_RX_RING_ADDR(struct mtnic_priv *priv, u8 port, u64* mac) +mtnic_SET_RX_RING_ADDR(struct mtnic_port *priv, u8 port, u64* mac) { struct mtnic_if_set_rx_ring_addr_in_imm ring_addr; u32 modifier = ((u32) port + 1) << 16; @@ -830,63 +812,64 @@ mtnic_SET_RX_RING_ADDR(struct mtnic_priv *priv, u8 port, u64* mac) ring_addr.mac_31_0 = cpu_to_be32(*mac & 0xffffffff); ring_addr.mac_47_32 = cpu_to_be16((*mac >> 32) & 0xffff); ring_addr.flags_vlan_id |= cpu_to_be16( - MTNIC_BC_MASK(MTNIC_MASK_SET_RX_RING_ADDR_BY_MAC)); + MTNIC_BC_MASK(MTNIC_MASK_SET_RX_RING_ADDR_BY_MAC)); - return mtnic_cmd(priv, &ring_addr, NULL, modifier, MTNIC_IF_CMD_SET_RX_RING_ADDR); + return mtnic_cmd(priv->mtnic, &ring_addr, NULL, modifier, MTNIC_IF_CMD_SET_RX_RING_ADDR); } static int -mtnic_SET_PORT_STATE(struct mtnic_priv *priv, u8 port, u8 state) +mtnic_SET_PORT_STATE(struct mtnic_port *priv, u8 port, u8 state) { struct mtnic_if_set_port_state_in_imm port_state; port_state.state = state ? cpu_to_be32( - MTNIC_BC_MASK(MTNIC_MASK_CONFIG_PORT_STATE)) : 0; + MTNIC_BC_MASK(MTNIC_MASK_CONFIG_PORT_STATE)) : 0; port_state.reserved = 0; - return mtnic_cmd(priv, &port_state, NULL, port + 1, + return mtnic_cmd(priv->mtnic, &port_state, NULL, port + 1, MTNIC_IF_CMD_SET_PORT_STATE); } static int -mtnic_SET_PORT_MTU(struct mtnic_priv *priv, u8 port, u16 mtu) +mtnic_SET_PORT_MTU(struct mtnic_port *priv, u8 port, u16 mtu) { struct mtnic_if_set_port_mtu_in_imm set_mtu; memset(&set_mtu, 0, sizeof(set_mtu)); set_mtu.mtu = cpu_to_be16(mtu); - return mtnic_cmd(priv, &set_mtu, NULL, port + 1, - MTNIC_IF_CMD_SET_PORT_MTU); + return mtnic_cmd(priv->mtnic, &set_mtu, NULL, port + 1, + MTNIC_IF_CMD_SET_PORT_MTU); } - +/* static int -mtnic_CONFIG_PORT_VLAN_FILTER(struct mtnic_priv *priv, int port) +mtnic_CONFIG_PORT_VLAN_FILTER(struct mtnic_port *priv, int port) { - struct mtnic_if_config_port_vlan_filter_in_mbox *vlan_filter = priv->cmd.buf; + struct mtnic_if_config_port_vlan_filter_in_mbox *vlan_filter = priv->mtnic->cmd.buf; - /* When no vlans are configured we disable the filter - * (i.e., pass all vlans) because we ignore them anyhow */ + // When no vlans are configured we disable the filter + // (i.e., pass all vlans) because we ignore them anyhow memset(vlan_filter, 0xff, sizeof(*vlan_filter)); - return mtnic_cmd(priv, NULL, NULL, port + 1, - MTNIC_IF_CMD_CONFIG_PORT_VLAN_FILTER); + return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1, + MTNIC_IF_CMD_CONFIG_PORT_VLAN_FILTER); } +*/ static int -mtnic_RELEASE_RESOURCE(struct mtnic_priv *priv, u8 port, u8 type, u8 index) +mtnic_RELEASE_RESOURCE(struct mtnic_port *priv, u8 port, u8 type, u8 index) { struct mtnic_if_release_resource_in_imm rel; memset(&rel, 0, sizeof rel); rel.index = index; rel.type = type; - return mtnic_cmd(priv, - &rel, NULL, (type == MTNIC_IF_RESOURCE_TYPE_EQ) ? - 0 : port + 1, MTNIC_IF_CMD_RELEASE_RESOURCE); + return mtnic_cmd ( priv->mtnic, + &rel, NULL, ( type == MTNIC_IF_RESOURCE_TYPE_EQ ) ? + 0 : port + 1, MTNIC_IF_CMD_RELEASE_RESOURCE ); } static int -mtnic_QUERY_CAP(struct mtnic_priv *priv, u8 index, u8 mod, u64 *result) +mtnic_QUERY_CAP(struct mtnic *mtnic, u8 index, u8 mod, u64 *result) { struct mtnic_if_query_cap_in_imm cap; u32 out_imm[2]; @@ -895,7 +878,7 @@ mtnic_QUERY_CAP(struct mtnic_priv *priv, u8 index, u8 mod, u64 *result) memset(&cap, 0, sizeof cap); cap.cap_index = index; cap.cap_modifier = mod; - err = mtnic_cmd(priv, &cap, &out_imm, 0, MTNIC_IF_CMD_QUERY_CAP); + err = mtnic_cmd(mtnic, &cap, &out_imm, 0, MTNIC_IF_CMD_QUERY_CAP); *((u32*)result) = be32_to_cpu(*(out_imm+1)); *((u32*)result + 1) = be32_to_cpu(*out_imm); @@ -907,28 +890,38 @@ mtnic_QUERY_CAP(struct mtnic_priv *priv, u8 index, u8 mod, u64 *result) #define DO_QUERY_CAP(cap, mod, var) \ - err = mtnic_QUERY_CAP(priv, cap, mod, &result); \ + err = mtnic_QUERY_CAP(mtnic, cap, mod, &result);\ if (err) \ return err; \ (var) = result static int -mtnic_query_cap(struct mtnic_priv *priv) +mtnic_query_num_ports(struct mtnic *mtnic) +{ + int err = 0; + u64 result; + + DO_QUERY_CAP(MTNIC_IF_CAP_NUM_PORTS, 0, mtnic->fw.num_ports); + + return 0; +} + +static int +mtnic_query_mac(struct mtnic *mtnic) { int err = 0; int i; - u64 result; + u64 result; - DO_QUERY_CAP(MTNIC_IF_CAP_NUM_PORTS, 0, priv->fw.num_ports); - for (i = 0; i < priv->fw.num_ports; i++) { - DO_QUERY_CAP(MTNIC_IF_CAP_DEFAULT_MAC, i + 1, priv->fw.mac[i]); + for (i = 0; i < mtnic->fw.num_ports; i++) { + DO_QUERY_CAP(MTNIC_IF_CAP_DEFAULT_MAC, i + 1, mtnic->fw.mac[i]); } return 0; } static int -mtnic_query_offsets(struct mtnic_priv *priv) +mtnic_query_offsets(struct mtnic *mtnic) { int err; int i; @@ -936,18 +929,18 @@ mtnic_query_offsets(struct mtnic_priv *priv) DO_QUERY_CAP(MTNIC_IF_CAP_MEM_KEY, MTNIC_IF_MEM_TYPE_SNOOP, - priv->fw.mem_type_snoop_be); - priv->fw.mem_type_snoop_be = cpu_to_be32(priv->fw.mem_type_snoop_be); - DO_QUERY_CAP(MTNIC_IF_CAP_TX_CQ_DB_OFFSET, 0, priv->fw.txcq_db_offset); - DO_QUERY_CAP(MTNIC_IF_CAP_EQ_DB_OFFSET, 0, priv->fw.eq_db_offset); + mtnic->fw.mem_type_snoop_be); + mtnic->fw.mem_type_snoop_be = cpu_to_be32(mtnic->fw.mem_type_snoop_be); + DO_QUERY_CAP(MTNIC_IF_CAP_TX_CQ_DB_OFFSET, 0, mtnic->fw.txcq_db_offset); + DO_QUERY_CAP(MTNIC_IF_CAP_EQ_DB_OFFSET, 0, mtnic->fw.eq_db_offset); - for (i = 0; i < priv->fw.num_ports; i++) { - DO_QUERY_CAP(MTNIC_IF_CAP_CQ_OFFSET, i + 1, priv->fw.cq_offset); - DO_QUERY_CAP(MTNIC_IF_CAP_TX_OFFSET, i + 1, priv->fw.tx_offset[i]); - DO_QUERY_CAP(MTNIC_IF_CAP_RX_OFFSET, i + 1, priv->fw.rx_offset[i]); - DBG("--> Port %d CQ offset:0x%x\n", i, priv->fw.cq_offset); - DBG("--> Port %d Tx offset:0x%x\n", i, priv->fw.tx_offset[i]); - DBG("--> Port %d Rx offset:0x%x\n", i, priv->fw.rx_offset[i]); + for (i = 0; i < mtnic->fw.num_ports; i++) { + DO_QUERY_CAP(MTNIC_IF_CAP_CQ_OFFSET, i + 1, mtnic->fw.cq_offset); + DO_QUERY_CAP(MTNIC_IF_CAP_TX_OFFSET, i + 1, mtnic->fw.tx_offset[i]); + DO_QUERY_CAP(MTNIC_IF_CAP_RX_OFFSET, i + 1, mtnic->fw.rx_offset[i]); + DBG("--> Port %d CQ offset:0x%x\n", i, mtnic->fw.cq_offset); + DBG("--> Port %d Tx offset:0x%x\n", i, mtnic->fw.tx_offset[i]); + DBG("--> Port %d Rx offset:0x%x\n", i, mtnic->fw.rx_offset[i]); } mdelay(20); @@ -977,11 +970,12 @@ mtnic_query_offsets(struct mtnic_priv *priv) * Reset device */ void -mtnic_reset(void) +mtnic_reset ( void ) { - void *reset = ioremap(mtnic_pci_dev.dev.bar[0] + MTNIC_RESET_OFFSET, 4); - writel(cpu_to_be32(1), reset); - iounmap(reset); + void *reset = ioremap ( mtnic_pci_dev.dev.bar[0] + MTNIC_RESET_OFFSET, + 4 ); + writel ( cpu_to_be32 ( 1 ), reset ); + iounmap ( reset ); } @@ -1019,18 +1013,18 @@ mtnic_init_pci(struct pci_device *dev) int err; /* save bars */ - DBG("bus=%d devfn=0x%x", dev->bus, dev->devfn); + DBG("bus=%d devfn=0x%x\n", dev->bus, dev->devfn); for (i = 0; i < 6; ++i) { mtnic_pci_dev.dev.bar[i] = - pci_bar_start(dev, PCI_BASE_ADDRESS_0 + (i << 2)); + pci_bar_start(dev, PCI_BASE_ADDRESS_0 + (i << 2)); DBG("bar[%d]= 0x%08lx \n", i, mtnic_pci_dev.dev.bar[i]); } /* save config space */ for (i = 0; i < 64; ++i) { err = pci_read_config_dword(dev, i << 2, - &mtnic_pci_dev.dev. - dev_config_space[i]); + &mtnic_pci_dev.dev. + dev_config_space[i]); if (err) { DBG("Can not save configuration space"); return err; @@ -1039,144 +1033,140 @@ mtnic_init_pci(struct pci_device *dev) mtnic_pci_dev.dev.dev = dev; - return 0; + return 0; } /** * Initial hardware */ static inline -int mtnic_init_card(struct net_device *dev) +int mtnic_init_card(struct mtnic *mtnic) { - struct mtnic_priv *priv = netdev_priv(dev); int err = 0; - /* Set state */ - priv->state = CARD_DOWN; - /* Set port */ - priv->port = MTNIC_PORT_NUM; - - /* Alloc command interface */ - err = mtnic_alloc_cmdif(priv); + /* Alloc command interface */ + err = mtnic_alloc_cmdif ( mtnic ); if (err) { - DBG("Failed to init command interface, aborting.\n"); - return MTNIC_ERROR; + DBG("Failed to init command interface, aborting\n"); + return -EADDRINUSE; } - - /** - * Bring up HW - */ - err = mtnic_QUERY_FW(priv); + /** + * Bring up HW + */ + err = mtnic_QUERY_FW ( mtnic ); if (err) { - DBG("QUERY_FW command failed, aborting.\n"); + DBG("QUERY_FW command failed, aborting\n"); goto cmd_error; } - - DBG("Command interface revision:%d\n", priv->fw.ifc_rev); + DBG("Command interface revision:%d\n", mtnic->fw.ifc_rev); /* Allocate memory for FW and start it */ - err = mtnic_map_cmd(priv, MTNIC_IF_CMD_MAP_FW, priv->fw.fw_pages); + err = mtnic_map_cmd(mtnic, MTNIC_IF_CMD_MAP_FW, mtnic->fw.fw_pages); if (err) { DBG("Eror In MAP_FW\n"); - if (priv->fw.fw_pages.buf) - free(priv->fw.fw_pages.buf); + if (mtnic->fw.fw_pages.buf) + ufree((intptr_t)mtnic->fw.fw_pages.buf); goto cmd_error; } /* Run firmware */ - err = mtnic_cmd(priv, NULL, NULL, 0, MTNIC_IF_CMD_RUN_FW); + err = mtnic_cmd(mtnic, NULL, NULL, 0, MTNIC_IF_CMD_RUN_FW); if (err) { DBG("Eror In RUN FW\n"); goto map_fw_error; } - DBG("FW version:%d.%d.%d\n", - (u16) (priv->fw_ver >> 32), - (u16) ((priv->fw_ver >> 16) & 0xffff), - (u16) (priv->fw_ver & 0xffff)); + DBG("FW version:%d.%d.%d\n", + (u16) (mtnic->fw_ver >> 32), + (u16) ((mtnic->fw_ver >> 16) & 0xffff), + (u16) (mtnic->fw_ver & 0xffff)); - /* Get device information */ - err = mtnic_query_cap(priv); + /* Query num ports */ + err = mtnic_query_num_ports(mtnic); if (err) { - DBG("Insufficient resources, aborting.\n"); + DBG("Insufficient resources, aborting\n"); goto map_fw_error; } /* Open NIC */ - err = mtnic_OPEN_NIC(priv); + err = mtnic_OPEN_NIC(mtnic); if (err) { - DBG("Failed opening NIC, aborting.\n"); + DBG("Failed opening NIC, aborting\n"); goto map_fw_error; } /* Allocate and map pages worksace */ - err = mtnic_map_cmd(priv, MTNIC_IF_CMD_MAP_PAGES, priv->fw.extra_pages); + err = mtnic_map_cmd(mtnic, MTNIC_IF_CMD_MAP_PAGES, mtnic->fw.extra_pages); if (err) { - DBG("Couldn't allocate %x FW extra pages, aborting.\n", - priv->fw.extra_pages.num); - if (priv->fw.extra_pages.buf) - free(priv->fw.extra_pages.buf); + DBG("Couldn't allocate %x FW extra pages, aborting\n", + mtnic->fw.extra_pages.num); + if (mtnic->fw.extra_pages.buf) + ufree((intptr_t)mtnic->fw.extra_pages.buf); + goto map_fw_error; + } + + + /* Get device information */ + err = mtnic_query_mac(mtnic); + if (err) { + DBG("Insufficient resources in quesry mac, aborting\n"); goto map_fw_error; } /* Get device offsets */ - err = mtnic_query_offsets(priv); + err = mtnic_query_offsets(mtnic); if (err) { - DBG("Failed retrieving resource offests, aborting.\n"); - free(priv->fw.extra_pages.buf); + DBG("Failed retrieving resource offests, aborting\n"); + ufree((intptr_t)mtnic->fw.extra_pages.buf); goto map_extra_error; } - /* Alloc EQ */ - err = mtnic_alloc_eq(priv); + /* Alloc EQ */ + err = mtnic_alloc_eq(mtnic); if (err) { DBG("Failed init shared resources. error: %d\n", err); goto map_extra_error; - } + } /* Configure HW */ - err = mtnic_CONFIG_EQ(priv); + err = mtnic_CONFIG_EQ(mtnic); if (err) { DBG("Failed configuring EQ\n"); goto eq_error; } - err = mtnic_CONFIG_RX(priv); + err = mtnic_CONFIG_RX(mtnic); if (err) { DBG("Failed Rx configuration\n"); goto eq_error; } - err = mtnic_CONFIG_TX(priv); + err = mtnic_CONFIG_TX(mtnic); if (err) { DBG("Failed Tx configuration\n"); goto eq_error; } - DBG("Activating port:%d\n", MTNIC_PORT_NUM + 1); - - priv->state = CARD_INITIALIZED; return 0; eq_error: - iounmap(priv->eq_db); - free(priv->eq.buf); + iounmap(mtnic->eq_db); + free_memblock(mtnic->eq.buf, mtnic->eq.buf_size); map_extra_error: - free(priv->fw.extra_pages.buf); + ufree((intptr_t)mtnic->fw.extra_pages.buf); map_fw_error: - free(priv->fw.fw_pages.buf); + ufree((intptr_t)mtnic->fw.fw_pages.buf); cmd_error: - iounmap(priv->hcr); - free(priv->cmd.buf); - free(priv); + iounmap(mtnic->hcr); + free_memblock(mtnic->cmd.buf, PAGE_SIZE); - return MTNIC_ERROR; + return -EADDRINUSE; } @@ -1196,7 +1186,7 @@ cmd_error: * * ********************************************************************/ -void mtnic_process_tx_cq(struct mtnic_priv *priv, struct net_device *dev, +void mtnic_process_tx_cq(struct mtnic_port *priv, struct net_device *dev, struct mtnic_cq *cq) { struct mtnic_cqe *cqe = cq->buf; @@ -1210,10 +1200,10 @@ void mtnic_process_tx_cq(struct mtnic_priv *priv, struct net_device *dev, /* Owner bit changes every round */ while (XNOR(cqe->op_tr_own & MTNIC_BIT_CQ_OWN, cq->last & cq->size)) { netdev_tx_complete (dev, ring->iobuf[index]); - ++cq->last; - index = cq->last & (cq->size-1); + ++cq->last; + index = cq->last & (cq->size-1); cqe = &cq->buf[index]; - } + } /* Update consumer index */ cq->db->update_ci = cpu_to_be32(cq->last & 0xffffff); @@ -1222,13 +1212,16 @@ void mtnic_process_tx_cq(struct mtnic_priv *priv, struct net_device *dev, } -int mtnic_process_rx_cq(struct mtnic_priv *priv, struct net_device *dev, struct mtnic_cq *cq) +int mtnic_process_rx_cq(struct mtnic_port *priv, + struct net_device *dev, + struct mtnic_cq *cq) { struct mtnic_cqe *cqe; struct mtnic_ring *ring = &priv->rx_ring; int index; int err; struct io_buffer *rx_iob; + unsigned int length; /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx @@ -1254,16 +1247,21 @@ int mtnic_process_rx_cq(struct mtnic_priv *priv, struct net_device *dev, struct /* * Packet is OK - process it. */ - rx_iob = ring->iobuf[index]; - iob_put(rx_iob, DEF_IOBUF_SIZE); + length = be32_to_cpu(cqe->byte_cnt); + rx_iob = ring->iobuf[index]; + iob_put(rx_iob, length); + /* Add this packet to the receive queue. */ netdev_rx(dev, rx_iob); - ring->iobuf[index] = NULL; + ring->iobuf[index] = NULL; next: ++cq->last; index = cq->last & (cq->size-1); cqe = &cq->buf[index]; + + + } /* Update consumer index */ @@ -1275,7 +1273,7 @@ next: err = mtnic_alloc_iobuf(priv, &priv->rx_ring, DEF_IOBUF_SIZE); if (err) { DBG("ERROR Allocating io buffer"); - return MTNIC_ERROR; + return -EADDRINUSE; } } @@ -1308,24 +1306,27 @@ next: static int mtnic_open(struct net_device *dev) { - struct mtnic_priv *priv = netdev_priv(dev); + struct mtnic_port *priv = netdev_priv(dev); + int err = 0; struct mtnic_ring *ring; struct mtnic_cq *cq; int cq_ind = 0; u32 dev_link_state; + int link_check; - DBG("starting port:%d", priv->port); + DBG("starting port:%d, MAC Address: 0x%12llx\n", + priv->port, priv->mtnic->fw.mac[priv->port]); /* Alloc and configure CQs, TX, RX */ - err = mtnic_alloc_resources(dev); + err = mtnic_alloc_resources ( dev ); if (err) { DBG("Error allocating resources\n"); - return MTNIC_ERROR; + return -EADDRINUSE; } /* Pass CQs configuration to HW */ - for (cq_ind = 0; cq_ind < NUM_CQS; ++cq_ind) { + for (cq_ind = 0; cq_ind < NUM_CQS; ++cq_ind) { cq = &priv->cq[cq_ind]; err = mtnic_CONFIG_CQ(priv, priv->port, cq_ind, cq); if (err) { @@ -1334,24 +1335,25 @@ mtnic_open(struct net_device *dev) if (cq_ind) goto cq_error; else - return MTNIC_ERROR; - } + goto allocation_error; + } /* Update consumer index */ cq->db->update_ci = cpu_to_be32(cq->last & 0xffffff); } + /* Pass Tx configuration to HW */ ring = &priv->tx_ring; - err = mtnic_CONFIG_TX_RING(priv, priv->port, 0, ring); + err = mtnic_CONFIG_TX_RING(priv, priv->port, 0, ring); if (err) { DBG("Failed configuring Tx ring:0\n"); - goto cq_error; + goto cq_error; } /* Pass RX configuration to HW */ - ring = &priv->rx_ring; - err = mtnic_CONFIG_RX_RING(priv, priv->port, 0, ring); + ring = &priv->rx_ring; + err = mtnic_CONFIG_RX_RING(priv, priv->port, 0, ring); if (err) { DBG("Failed configuring Rx ring:0\n"); goto tx_error; @@ -1366,6 +1368,7 @@ mtnic_open(struct net_device *dev) goto rx_error; } + /* Set the port default ring to ring 0 */ err = mtnic_SET_PORT_DEFAULT_RING(priv, priv->port, 0); if (err) { @@ -1374,7 +1377,7 @@ mtnic_open(struct net_device *dev) } /* Set Mac address */ - err = mtnic_SET_RX_RING_ADDR(priv, priv->port, &priv->fw.mac[priv->port]); + err = mtnic_SET_RX_RING_ADDR(priv, priv->port, &priv->mtnic->fw.mac[priv->port]); if (err) { DBG("Failed setting default MAC address\n"); goto rx_error; @@ -1388,11 +1391,14 @@ mtnic_open(struct net_device *dev) } /* Configure VLAN filter */ + /* By adding this function, The second port won't accept packets err = mtnic_CONFIG_PORT_VLAN_FILTER(priv, priv->port); - if (err) { + if (err) { DBG("Failed configuring VLAN filter\n"); goto rx_error; } + */ + /* Bring up physical link */ err = mtnic_SET_PORT_STATE(priv, priv->port, 1); @@ -1400,29 +1406,52 @@ mtnic_open(struct net_device *dev) DBG("Failed bringing up port\n"); goto rx_error; } - mdelay(300); /* Let link state stabilize if cable was connected */ + /* PORT IS UP */ priv->state = CARD_UP; - err = mtnic_HEART_BEAT(priv, &dev_link_state); - if (err) { - DBG("Failed getting device link state\n"); - return MTNIC_ERROR; + + /* Checking Link is up */ + DBG ( "Checking if link is up\n" ); + + + for ( link_check = 0; link_check < CHECK_LINK_TIMES; link_check ++ ) { + /* Let link state stabilize if cable was connected */ + mdelay ( DELAY_LINK_CHECK ); + + err = mtnic_HEART_BEAT(priv, &dev_link_state); + if (err) { + DBG("Failed getting device link state\n"); + return -ENETDOWN; + } + + if ( dev_link_state & priv->port ) { + /* Link is up */ + break; + } } - if (!(dev_link_state & 0x3)) { + + + if ( ! ( dev_link_state & 0x3 ) ) { DBG("Link down, check cables and restart\n"); - return MTNIC_ERROR; + netdev_link_down ( dev ); + return -ENETDOWN; } + DBG ( "Link is up!\n" ); + + /* Mark as link up */ + netdev_link_up ( dev ); + return 0; - rx_error: err = mtnic_RELEASE_RESOURCE(priv, priv->port, - MTNIC_IF_RESOURCE_TYPE_RX_RING, 0); + MTNIC_IF_RESOURCE_TYPE_RX_RING, 0); tx_error: err |= mtnic_RELEASE_RESOURCE(priv, priv->port, MTNIC_IF_RESOURCE_TYPE_TX_RING, 0); + cq_error: while (cq_ind) { err |= mtnic_RELEASE_RESOURCE(priv, priv->port, @@ -1431,66 +1460,81 @@ cq_error: if (err) DBG("Eror Releasing resources\n"); - return MTNIC_ERROR; +allocation_error: + + free_memblock(priv->tx_ring.buf, priv->tx_ring.buf_size); + iounmap(priv->tx_ring.txcq_db); + free_memblock(priv->cq[1].buf, priv->cq[1].buf_size); + free_memblock(priv->cq[1].db, sizeof(struct mtnic_cq_db_record)); + free_memblock(priv->rx_ring.buf, priv->rx_ring.buf_size); + free_memblock(priv->rx_ring.db, sizeof(struct mtnic_cq_db_record)); + free_memblock(priv->cq[0].buf, priv->cq[0].buf_size); + free_memblock(priv->cq[0].db, sizeof(struct mtnic_cq_db_record)); + + mtnic_free_io_buffers(&priv->rx_ring); + + return -ENETDOWN; } + /** Check if we got completion for receive and transmit and * check the line with heart_bit command */ static void -mtnic_poll(struct net_device *dev) +mtnic_poll ( struct net_device *dev ) { - struct mtnic_priv *priv = netdev_priv(dev); + struct mtnic_port *priv = netdev_priv(dev); struct mtnic_cq *cq; u32 dev_link_state; int err; unsigned int i; - /* In case of an old error then return */ + /* In case of an old error then return */ if (priv->state != CARD_UP) return; /* We do not check the device every call _poll call, - since it will slow it down */ + since it will slow it down */ if ((priv->poll_counter % ROUND_TO_CHECK) == 0) { /* Check device */ err = mtnic_HEART_BEAT(priv, &dev_link_state); if (err) { DBG("Device has internal error\n"); - priv->state = CARD_DOWN; + priv->state = CARD_LINK_DOWN; return; } if (!(dev_link_state & 0x3)) { DBG("Link down, check cables and restart\n"); - priv->state = CARD_DOWN; + priv->state = CARD_LINK_DOWN; return; } } - /* Polling CQ */ for (i = 0; i < NUM_CQS; i++) { cq = &priv->cq[i]; //Passing on the 2 cqs. if (cq->is_rx) { - err = mtnic_process_rx_cq(priv, cq->dev, cq); + err = mtnic_process_rx_cq(priv, cq->dev, cq); if (err) { - priv->state = CARD_DOWN; + priv->state = CARD_LINK_DOWN; DBG(" Error allocating RX buffers\n"); return; } - } else { - mtnic_process_tx_cq(priv, cq->dev, cq); + } else { + mtnic_process_tx_cq(priv, cq->dev, cq); } } ++ priv->poll_counter; } + + static int mtnic_transmit( struct net_device *dev, struct io_buffer *iobuf ) { - struct mtnic_priv *priv = netdev_priv(dev); + struct mtnic_port *priv = netdev_priv(dev); struct mtnic_ring *ring; struct mtnic_tx_desc *tx_desc; struct mtnic_data_seg *data; @@ -1498,34 +1542,34 @@ mtnic_transmit( struct net_device *dev, struct io_buffer *iobuf ) /* In case of an error then return */ if (priv->state != CARD_UP) - return MTNIC_ERROR; + return -ENETDOWN; ring = &priv->tx_ring; - index = ring->prod & ring->size_mask; + index = ring->prod & ring->size_mask; if ((ring->prod - ring->cons) >= ring->size) { DBG("No space left for descriptors!!! cons: %x prod: %x\n", ring->cons, ring->prod); mdelay(5); - return MTNIC_ERROR;/* no space left */ + return -EAGAIN;/* no space left */ } - /* get current descriptor */ + /* get current descriptor */ tx_desc = ring->buf + (index * sizeof(struct mtnic_tx_desc)); - /* Prepare ctrl segement */ - tx_desc->ctrl.size_vlan = cpu_to_be32(2); - tx_desc->ctrl.flags = cpu_to_be32(MTNIC_BIT_TX_COMP | - MTNIC_BIT_NO_ICRC); - tx_desc->ctrl.op_own = cpu_to_be32(MTNIC_OPCODE_SEND) | - ((ring->prod & ring->size) ? - cpu_to_be32(MTNIC_BIT_DESC_OWN) : 0); - /* Prepare Data Seg */ data = &tx_desc->data; data->addr_l = cpu_to_be32((u32)virt_to_bus(iobuf->data)); data->count = cpu_to_be32(iob_len(iobuf)); - data->mem_type = priv->fw.mem_type_snoop_be; + data->mem_type = priv->mtnic->fw.mem_type_snoop_be; + + /* Prepare ctrl segement */ + tx_desc->ctrl.size_vlan = cpu_to_be32(2); + tx_desc->ctrl.flags = cpu_to_be32(MTNIC_BIT_TX_COMP | + MTNIC_BIT_NO_ICRC); + tx_desc->ctrl.op_own = cpu_to_be32(MTNIC_OPCODE_SEND) | + ((ring->prod & ring->size) ? + cpu_to_be32(MTNIC_BIT_DESC_OWN) : 0); /* Attach io_buffer */ ring->iobuf[index] = iobuf; @@ -1544,11 +1588,13 @@ mtnic_transmit( struct net_device *dev, struct io_buffer *iobuf ) static void mtnic_close(struct net_device *dev) { - struct mtnic_priv *priv = netdev_priv(dev); + struct mtnic_port *priv = netdev_priv(dev); int err = 0; DBG("Close called for port:%d\n", priv->port); - if (priv->state == CARD_UP) { + if ( ( priv->state == CARD_UP ) || + ( priv->state == CARD_LINK_DOWN ) ) { + /* Disable port */ err |= mtnic_SET_PORT_STATE(priv, priv->port, 0); /* @@ -1566,31 +1612,33 @@ mtnic_close(struct net_device *dev) /* Stop CQs */ err |= mtnic_RELEASE_RESOURCE(priv, priv->port, - MTNIC_IF_RESOURCE_TYPE_CQ, 0); + MTNIC_IF_RESOURCE_TYPE_CQ, 0); err |= mtnic_RELEASE_RESOURCE(priv, priv->port, - MTNIC_IF_RESOURCE_TYPE_CQ, 1); + MTNIC_IF_RESOURCE_TYPE_CQ, 1); if (err) { - DBG("Close reported error %d", err); + DBG("Close reported error %d\n", err); } - /* Free memory */ - free(priv->tx_ring.buf); + mdelay ( 10 ); + + /* free memory */ + free_memblock(priv->tx_ring.buf, priv->tx_ring.buf_size); iounmap(priv->tx_ring.txcq_db); - free(priv->cq[1].buf); - free(priv->cq[1].db); + free_memblock(priv->cq[1].buf, priv->cq[1].buf_size); + free_memblock(priv->cq[1].db, sizeof(struct mtnic_cq_db_record)); + free_memblock(priv->rx_ring.buf, priv->rx_ring.buf_size); + free_memblock(priv->rx_ring.db, sizeof(struct mtnic_cq_db_record)); + free_memblock(priv->cq[0].buf, priv->cq[0].buf_size); + free_memblock(priv->cq[0].db, sizeof(struct mtnic_cq_db_record)); /* Free RX buffers */ mtnic_free_io_buffers(&priv->rx_ring); - free(priv->rx_ring.buf); - free(priv->rx_ring.db); - free(priv->cq[0].buf); - free(priv->cq[0].db); - priv->state = CARD_INITIALIZED; } + priv->state = CARD_INITIALIZED; } @@ -1600,35 +1648,61 @@ mtnic_disable(struct pci_device *pci) { int err; - struct net_device *dev = pci_get_drvdata(pci); - struct mtnic_priv *priv = netdev_priv(dev); + int i; + struct mtnic *mtnic = pci_get_drvdata(pci); - /* Should NOT happen! but just in case */ - if (priv->state == CARD_UP) - mtnic_close(dev); - if (priv->state == CARD_INITIALIZED) { - err = mtnic_RELEASE_RESOURCE(priv, 0, - MTNIC_IF_RESOURCE_TYPE_EQ, 0); - DBG("Calling MTNIC_CLOSE command\n"); - err |= mtnic_cmd(priv, NULL, NULL, 0, - MTNIC_IF_CMD_CLOSE_NIC); - if (err) { - DBG("Error Releasing resources %d\n", err); - } + struct net_device *dev; + struct mtnic_port *priv; - free(priv->cmd.buf); - iounmap(priv->hcr); - ufree((intptr_t)priv->fw.fw_pages.buf); - ufree((intptr_t)priv->fw.extra_pages.buf); - free(priv->eq.buf); - iounmap(priv->eq_db); - priv->state = CARD_DOWN; + for ( i = ( mtnic->fw.num_ports - 1 ); i >= 0; i-- ) { + + dev = mtnic->netdev[i]; + + priv = netdev_priv(dev); + + /* Just in case */ + if ( ( priv->state == CARD_UP ) || + ( priv->state == CARD_LINK_DOWN ) ) + mtnic_close ( dev ); } - unregister_netdev(dev); - netdev_nullify(dev); - netdev_put(dev); + /* Releasing EQ */ + priv = netdev_priv ( mtnic->netdev[0] ); + err = mtnic_RELEASE_RESOURCE(priv, 1, + MTNIC_IF_RESOURCE_TYPE_EQ, 0); + + DBG("Calling MTNIC_CLOSE command\n"); + err |= mtnic_cmd(mtnic, NULL, NULL, 0, + MTNIC_IF_CMD_CLOSE_NIC); + if (err) { + DBG("Error Releasing resources %d\n", err); + } + + free_memblock(mtnic->cmd.buf, PAGE_SIZE); + iounmap(mtnic->hcr); + ufree((intptr_t)mtnic->fw.fw_pages.buf); + ufree((intptr_t)mtnic->fw.extra_pages.buf); + free_memblock(mtnic->eq.buf, mtnic->eq.buf_size); + iounmap(mtnic->eq_db); + + + for ( i = ( mtnic->fw.num_ports - 1 ); i >= 0; i-- ) { + dev = mtnic->netdev[i]; + unregister_netdev ( dev ); + netdev_nullify ( dev ); + netdev_put ( dev ); + } + + free ( mtnic ); + + + mtnic_reset (); + mdelay ( 1000 ); + /* Restore config, if we would like to retry booting */ + restore_config (); + + } @@ -1643,11 +1717,11 @@ mtnic_irq(struct net_device *netdev __unused, int enable __unused) /** mtnic net device operations */ static struct net_device_operations mtnic_operations = { - .open = mtnic_open, - .close = mtnic_close, - .transmit = mtnic_transmit, - .poll = mtnic_poll, - .irq = mtnic_irq, + .open = mtnic_open, + .close = mtnic_close, + .transmit = mtnic_transmit, + .poll = mtnic_poll, + .irq = mtnic_irq, }; @@ -1658,102 +1732,119 @@ static struct net_device_operations mtnic_operations = { static int mtnic_probe(struct pci_device *pci, - const struct pci_device_id *id __unused) + const struct pci_device_id *id __unused) { - struct net_device *dev; - struct mtnic_priv *priv; + struct mtnic_port *priv; + struct mtnic *mtnic; int err; u64 mac; - u32 result = 0; - void *dev_id; - int i; + int port_index; - adjust_pci_device(pci); - err = mtnic_init_pci(pci); + adjust_pci_device(pci); + + err = mtnic_init_pci(pci); if (err) { DBG("Error in pci_init\n"); - return MTNIC_ERROR; + return -EIO; } mtnic_reset(); - mdelay(1000); + mdelay(1000); - err = restore_config(); + err = restore_config(); if (err) { - DBG("Error restoring config\n"); + DBG("Error in restoring config\n"); return err; } - /* Checking MTNIC device ID */ - dev_id = ioremap(mtnic_pci_dev.dev.bar[0] + - MTNIC_DEVICE_ID_OFFSET, 4); - result = ntohl(readl(dev_id)); - iounmap(dev_id); - if (result != MTNIC_DEVICE_ID) { - DBG("Wrong Devie ID (0x%x) !!!", result); - return MTNIC_ERROR; + mtnic = zalloc ( sizeof ( *mtnic ) ); + if ( ! mtnic ) { + DBG ( "Error Allocating mtnic buffer\n" ); + return -EADDRINUSE; } - /* Initializing net device */ - dev = alloc_etherdev(sizeof(struct mtnic_priv)); - if (dev == NULL) { - DBG("Net device allocation failed\n"); - return MTNIC_ERROR; - } - /* - * Initialize driver private data - */ - priv = netdev_priv(dev); - memset(priv, 0, sizeof(struct mtnic_priv)); - priv->dev = dev; - priv->pdev = pci; - priv->dev->dev = &pci->dev; - /* Attach pci device */ - pci_set_drvdata(pci, priv->dev); - netdev_init(dev, &mtnic_operations); + pci_set_drvdata(pci, mtnic); + + mtnic->pdev = pci; /* Initialize hardware */ - err = mtnic_init_card(dev); + err = mtnic_init_card ( mtnic ); if (err) { DBG("Error in init_card\n"); - return MTNIC_ERROR; + goto err_init_card; } - /* Program the MAC address */ - mac = priv->fw.mac[priv->port]; - printf("Port %d Mac address: 0x%12llx\n", MTNIC_PORT_NUM + 1, mac); - for (i = 0;i < MAC_ADDRESS_SIZE; ++i) { - dev->ll_addr[MAC_ADDRESS_SIZE - i - 1] = mac & 0xFF; - mac = mac >> 8; + for ( port_index = 0; port_index < mtnic->fw.num_ports; port_index ++ ) { + /* Initializing net device */ + mtnic->netdev[port_index] = alloc_etherdev( sizeof ( struct mtnic_port ) ); + if ( mtnic->netdev[port_index] == NULL ) { + DBG("Net device allocation failed\n"); + goto err_alloc_mtnic; + } + + /* + * Initialize driver private data + */ + + mtnic->netdev[port_index]->dev = &pci->dev; + priv = netdev_priv ( mtnic->netdev[port_index] ); + memset ( priv, 0, sizeof ( struct mtnic_port ) ); + priv->mtnic = mtnic; + priv->netdev = mtnic->netdev[port_index]; + + /* Attach pci device */ + netdev_init(mtnic->netdev[port_index], &mtnic_operations); + + /* Set port number */ + priv->port = port_index; + + /* Set state */ + priv->state = CARD_DOWN; } - /* Mark as link up; we don't yet handle link state */ - netdev_link_up ( dev ); - if (register_netdev(dev)) { - DBG("Netdev registration failed\n"); - return MTNIC_ERROR; + int mac_idx; + for ( port_index = 0; port_index < mtnic->fw.num_ports; port_index ++ ) { + priv = netdev_priv ( mtnic->netdev[port_index] ); + /* Program the MAC address */ + mac = priv->mtnic->fw.mac[port_index]; + for (mac_idx = 0; mac_idx < MAC_ADDRESS_SIZE; ++mac_idx) { + mtnic->netdev[port_index]->ll_addr[MAC_ADDRESS_SIZE - mac_idx - 1] = mac & 0xFF; + mac = mac >> 8; + } + + if ( register_netdev ( mtnic->netdev[port_index] ) ) { + DBG("Netdev registration failed\n"); + priv->state = CARD_INITIALIZED; + goto err_alloc_mtnic; + } } return 0; + +err_alloc_mtnic: + free ( mtnic ); +err_init_card: + return -EIO; } - - static struct pci_device_id mtnic_nics[] = { - PCI_ROM(0x15b3, 0x6368, "mtnic", "Mellanox MTNIC driver"), + PCI_ROM ( 0x15b3, 0x6368, "mt25448", "Mellanox ConnectX EN driver" ), + PCI_ROM ( 0x15b3, 0x6372, "mt25458", "Mellanox ConnectX ENt driver" ), + PCI_ROM ( 0x15b3, 0x6750, "mt26448", "Mellanox ConnectX EN GEN2 driver" ), + PCI_ROM ( 0x15b3, 0x675a, "mt26458", "Mellanox ConnectX ENt GEN2 driver" ), }; struct pci_driver mtnic_driver __pci_driver = { .ids = mtnic_nics, .id_count = sizeof(mtnic_nics) / sizeof(mtnic_nics[0]), - .probe = mtnic_probe, + .probe = mtnic_probe, .remove = mtnic_disable, }; diff --git a/src/drivers/net/mtnic.h b/src/drivers/net/mtnic.h old mode 100755 new mode 100644 index 0987be7a..57a7b98c --- a/src/drivers/net/mtnic.h +++ b/src/drivers/net/mtnic.h @@ -38,24 +38,28 @@ /* * Device setup */ - -/* - Note port number can be changed under mtnic.c ! -*/ #define MTNIC_MAX_PORTS 2 +#define MTNIC_PORT1 0 +#define MTNIC_PORT2 1 #define NUM_TX_RINGS 1 #define NUM_RX_RINGS 1 #define NUM_CQS (NUM_RX_RINGS + NUM_TX_RINGS) #define GO_BIT_TIMEOUT 6000 #define TBIT_RETRIES 100 #define UNITS_BUFFER_SIZE 8 /* can be configured to 4/8/16 */ -#define MAX_GAP_PROD_CONS (UNITS_BUFFER_SIZE/4) -#define DEF_MTU 1600 -#define DEF_IOBUF_SIZE 1600 +#define MAX_GAP_PROD_CONS ( UNITS_BUFFER_SIZE / 4 ) +#define ETH_DEF_LEN 1540 /* 40 bytes used by the card */ +#define ETH_FCS_LEN 14 +#define DEF_MTU ETH_DEF_LEN + ETH_FCS_LEN +#define DEF_IOBUF_SIZE ETH_DEF_LEN + #define MAC_ADDRESS_SIZE 6 #define NUM_EQES 16 #define ROUND_TO_CHECK 0x400 +#define DELAY_LINK_CHECK 300 +#define CHECK_LINK_TIMES 7 + #define XNOR(x,y) (!(x) == !(y)) #define dma_addr_t unsigned long @@ -108,7 +112,7 @@ typedef enum mtnic_if_cmd { MTNIC_IF_CMD_CONFIG_RX = 0x005, /* general receive configuration */ MTNIC_IF_CMD_CONFIG_TX = 0x006, /* general transmit configuration */ MTNIC_IF_CMD_CONFIG_INT_FREQ = 0x007, /* interrupt timers freq limits */ - MTNIC_IF_CMD_HEART_BEAT = 0x008, /* NOP command testing liveliness */ + MTNIC_IF_CMD_HEART_BEAT = 0x008, /* NOP command testing liveliness */ MTNIC_IF_CMD_CLOSE_NIC = 0x009, /* release memory and stop the NIC */ /* Port commands: */ @@ -119,22 +123,22 @@ typedef enum mtnic_if_cmd { MTNIC_IF_CMD_CONFIG_PORT_VLAN_FILTER = 0x14, /* configure VLAN filter */ MTNIC_IF_CMD_CONFIG_PORT_MCAST_FILTER = 0x15, /* configure mcast filter */ MTNIC_IF_CMD_ENABLE_PORT_MCAST_FILTER = 0x16, /* enable/disable */ - MTNIC_IF_CMD_SET_PORT_MTU = 0x17, /* set port MTU */ + MTNIC_IF_CMD_SET_PORT_MTU = 0x17, /* set port MTU */ MTNIC_IF_CMD_SET_PORT_PROMISCUOUS_MODE = 0x18, /* enable/disable promisc */ MTNIC_IF_CMD_SET_PORT_DEFAULT_RING = 0x19, /* set the default ring */ - MTNIC_IF_CMD_SET_PORT_STATE = 0x1a, /* set link up/down */ - MTNIC_IF_CMD_DUMP_STAT = 0x1b, /* dump statistics */ + MTNIC_IF_CMD_SET_PORT_STATE = 0x1a, /* set link up/down */ + MTNIC_IF_CMD_DUMP_STAT = 0x1b, /* dump statistics */ MTNIC_IF_CMD_ARM_PORT_STATE_EVENT = 0x1c, /* arm the port state event */ /* Ring / Completion queue commands: */ - MTNIC_IF_CMD_CONFIG_CQ = 0x20, /* set up completion queue */ - MTNIC_IF_CMD_CONFIG_RX_RING = 0x21, /* setup Rx ring */ - MTNIC_IF_CMD_SET_RX_RING_ADDR = 0x22, /* set Rx ring filter by address */ + MTNIC_IF_CMD_CONFIG_CQ = 0x20, /* set up completion queue */ + MTNIC_IF_CMD_CONFIG_RX_RING = 0x21, /* setup Rx ring */ + MTNIC_IF_CMD_SET_RX_RING_ADDR = 0x22, /* set Rx ring filter by address */ MTNIC_IF_CMD_SET_RX_RING_MCAST = 0x23, /* set Rx ring mcast filter */ - MTNIC_IF_CMD_ARM_RX_RING_WM = 0x24, /* one-time low-watermark INT */ - MTNIC_IF_CMD_CONFIG_TX_RING = 0x25, /* set up Tx ring */ + MTNIC_IF_CMD_ARM_RX_RING_WM = 0x24, /* one-time low-watermark INT */ + MTNIC_IF_CMD_CONFIG_TX_RING = 0x25, /* set up Tx ring */ MTNIC_IF_CMD_ENFORCE_TX_RING_ADDR = 0x26, /* setup anti spoofing */ - MTNIC_IF_CMD_CONFIG_EQ = 0x27, /* config EQ ring */ + MTNIC_IF_CMD_CONFIG_EQ = 0x27, /* config EQ ring */ MTNIC_IF_CMD_RELEASE_RESOURCE = 0x28, /* release internal ref to resource */ } mtnic_if_cmd_t; @@ -144,15 +148,15 @@ mtnic_if_cmd_t; typedef enum mtnic_if_caps { MTNIC_IF_CAP_MAX_TX_RING_PER_PORT = 0x0, MTNIC_IF_CAP_MAX_RX_RING_PER_PORT = 0x1, - MTNIC_IF_CAP_MAX_CQ_PER_PORT = 0x2, - MTNIC_IF_CAP_NUM_PORTS = 0x3, - MTNIC_IF_CAP_MAX_TX_DESC = 0x4, - MTNIC_IF_CAP_MAX_RX_DESC = 0x5, - MTNIC_IF_CAP_MAX_CQES = 0x6, - MTNIC_IF_CAP_MAX_TX_SG_ENTRIES = 0x7, - MTNIC_IF_CAP_MAX_RX_SG_ENTRIES = 0x8, - MTNIC_IF_CAP_MEM_KEY = 0x9, /* key to mem (after map_pages) */ - MTNIC_IF_CAP_RSS_HASH_TYPE = 0xa, /* one of mtnic_if_rss_types_t */ + MTNIC_IF_CAP_MAX_CQ_PER_PORT = 0x2, + MTNIC_IF_CAP_NUM_PORTS = 0x3, + MTNIC_IF_CAP_MAX_TX_DESC = 0x4, + MTNIC_IF_CAP_MAX_RX_DESC = 0x5, + MTNIC_IF_CAP_MAX_CQES = 0x6, + MTNIC_IF_CAP_MAX_TX_SG_ENTRIES = 0x7, + MTNIC_IF_CAP_MAX_RX_SG_ENTRIES = 0x8, + MTNIC_IF_CAP_MEM_KEY = 0x9, /* key to mem (after map_pages) */ + MTNIC_IF_CAP_RSS_HASH_TYPE = 0xa, /* one of mtnic_if_rss_types_t */ MTNIC_IF_CAP_MAX_PORT_UCAST_ADDR = 0xc, MTNIC_IF_CAP_MAX_RING_UCAST_ADDR = 0xd, /* only for ADDR steer */ MTNIC_IF_CAP_MAX_PORT_MCAST_ADDR = 0xe, @@ -164,20 +168,20 @@ typedef enum mtnic_if_caps { MTNIC_IF_CAP_EQ_DB_OFFSET = 0x14, /* offset in bytes for EQ doorbell record */ /* These are per port - using port number from cap modifier field */ - MTNIC_IF_CAP_SPEED = 0x20, - MTNIC_IF_CAP_DEFAULT_MAC = 0x21, - MTNIC_IF_CAP_EQ_OFFSET = 0x22, - MTNIC_IF_CAP_CQ_OFFSET = 0x23, + MTNIC_IF_CAP_SPEED = 0x20, + MTNIC_IF_CAP_DEFAULT_MAC = 0x21, + MTNIC_IF_CAP_EQ_OFFSET = 0x22, + MTNIC_IF_CAP_CQ_OFFSET = 0x23, MTNIC_IF_CAP_TX_OFFSET = 0x24, MTNIC_IF_CAP_RX_OFFSET = 0x25, } mtnic_if_caps_t; typedef enum mtnic_if_steer_types { - MTNIC_IF_STEER_NONE = 0, - MTNIC_IF_STEER_PRIORITY = 1, - MTNIC_IF_STEER_RSS = 2, - MTNIC_IF_STEER_ADDRESS = 3, + MTNIC_IF_STEER_NONE = 0, + MTNIC_IF_STEER_PRIORITY = 1, + MTNIC_IF_STEER_RSS = 2, + MTNIC_IF_STEER_ADDRESS = 3, } mtnic_if_steer_types_t; /** types of memory access modes */ @@ -188,19 +192,12 @@ typedef enum mtnic_if_memory_types { enum { - MTNIC_HCR_BASE = 0x1f000, - MTNIC_HCR_SIZE = 0x0001c, - MTNIC_CLR_INT_SIZE = 0x00008, + MTNIC_HCR_BASE = 0x1f000, + MTNIC_HCR_SIZE = 0x0001c, + MTNIC_CLR_INT_SIZE = 0x00008, }; -#define MELLANOX_VENDOR_ID 0x15b3 -#define MTNIC_DEVICE_ID 0x00a00190 #define MTNIC_RESET_OFFSET 0xF0010 -#define MTNIC_DEVICE_ID_OFFSET 0xF0014 - - - - @@ -265,7 +262,7 @@ struct mtnic_ring { /* Buffers */ u32 buf_size; /* ring buffer size in bytes */ - dma_addr_t dma; + dma_addr_t dma; void *buf; struct io_buffer *iobuf[UNITS_BUFFER_SIZE]; @@ -274,7 +271,7 @@ struct mtnic_ring { u32 db_offset; /* Rx ring only */ - dma_addr_t iobuf_dma; + dma_addr_t iobuf_dma; struct mtnic_rx_db_record *db; dma_addr_t db_dma; }; @@ -351,15 +348,16 @@ struct mtnic_eqe { struct mtnic_eq { u32 size; /* number of EQEs in ring */ - u32 buf_size; /* EQ size in bytes */ + u32 buf_size; /* EQ size in bytes */ void *buf; dma_addr_t dma; }; enum mtnic_state { CARD_DOWN, - CARD_INITIALIZED, - CARD_UP + CARD_INITIALIZED, + CARD_UP, + CARD_LINK_DOWN, }; /* FW */ @@ -375,9 +373,9 @@ struct mtnic_err_buf { struct mtnic_cmd { - void *buf; - unsigned long mapping; - u32 tbit; + void *buf; + unsigned long mapping; + u32 tbit; }; @@ -395,40 +393,52 @@ struct mtnic_txcq_db { * Device private data * */ -struct mtnic_priv { - struct net_device *dev; - struct pci_device *pdev; - u8 port; +struct mtnic { + struct net_device *netdev[MTNIC_MAX_PORTS]; + struct mtnic_if_cmd_reg *hcr; + struct mtnic_cmd cmd; + struct pci_device *pdev; - enum mtnic_state state; - /* Firmware and board info */ - u64 fw_ver; + struct mtnic_eq eq; + u32 *eq_db; + + /* Firmware and board info */ + u64 fw_ver; struct { - struct mtnic_pages fw_pages; - struct mtnic_pages extra_pages; - struct mtnic_err_buf err_buf; - u16 ifc_rev; - u8 num_ports; - u64 mac[MTNIC_MAX_PORTS]; - u16 cq_offset; - u16 tx_offset[MTNIC_MAX_PORTS]; - u16 rx_offset[MTNIC_MAX_PORTS]; - u32 mem_type_snoop_be; - u32 txcq_db_offset; - u32 eq_db_offset; - } fw; + struct mtnic_pages fw_pages; + struct mtnic_pages extra_pages; + struct mtnic_err_buf err_buf; + u16 ifc_rev; + u8 num_ports; + u64 mac[MTNIC_MAX_PORTS]; + u16 cq_offset; + u16 tx_offset[MTNIC_MAX_PORTS]; + u16 rx_offset[MTNIC_MAX_PORTS]; + u32 mem_type_snoop_be; + u32 txcq_db_offset; + u32 eq_db_offset; + } fw; +}; - struct mtnic_if_cmd_reg *hcr; - struct mtnic_cmd cmd; + + + +struct mtnic_port { + + struct mtnic *mtnic; + u8 port; + + enum mtnic_state state; /* TX, RX, CQs, EQ */ - struct mtnic_ring tx_ring; - struct mtnic_ring rx_ring; - struct mtnic_cq cq[NUM_CQS]; - struct mtnic_eq eq; - u32 *eq_db; - u32 poll_counter; + struct mtnic_ring tx_ring; + struct mtnic_ring rx_ring; + struct mtnic_cq cq[NUM_CQS]; + u32 poll_counter; + struct net_device *netdev; + + }; @@ -492,33 +502,34 @@ struct mtnic_if_query_fw_out_mbox { /* CMD MTNIC_IF_CMD_QUERY_CAP */ struct mtnic_if_query_cap_in_imm { u16 reserved1; - u8 cap_modifier; /* a modifier for the particular capability */ - u8 cap_index; /* the index of the capability queried */ + u8 cap_modifier; /* a modifier for the particular capability */ + u8 cap_index; /* the index of the capability queried */ u32 reserved2; }; /* CMD OPEN_NIC */ struct mtnic_if_open_nic_in_mbox { - u16 reserved1; - u16 mkey; /* number of mem keys for all chip*/ - u32 mkey_entry; /* mem key entries for each key*/ - u8 log_rx_p1; /* log2 rx rings for port1 */ - u8 log_cq_p1; /* log2 cq for port1 */ - u8 log_tx_p1; /* log2 tx rings for port1 */ - u8 steer_p1; /* port 1 steering mode */ - u16 reserved2; - u8 log_vlan_p1; /* log2 vlan per rx port1 */ - u8 log_mac_p1; /* log2 mac per rx port1 */ + u16 reserved1; + u16 mkey; /* number of mem keys for all chip*/ + u32 mkey_entry; /* mem key entries for each key*/ + u8 log_rx_p1; /* log2 rx rings for port1 */ + u8 log_cq_p1; /* log2 cq for port1 */ + u8 log_tx_p1; /* log2 tx rings for port1 */ + u8 steer_p1; /* port 1 steering mode */ + u16 reserved2; + u8 log_vlan_p1; /* log2 vlan per rx port1 */ + u8 log_mac_p1; /* log2 mac per rx port1 */ - u8 log_rx_p2; /* log2 rx rings for port1 */ - u8 log_cq_p2; /* log2 cq for port1 */ - u8 log_tx_p2; /* log2 tx rings for port1 */ - u8 steer_p2; /* port 1 steering mode */ - u16 reserved3; - u8 log_vlan_p2; /* log2 vlan per rx port1 */ - u8 log_mac_p2; /* log2 mac per rx port1 */ + u8 log_rx_p2; /* log2 rx rings for port1 */ + u8 log_cq_p2; /* log2 cq for port1 */ + u8 log_tx_p2; /* log2 tx rings for port1 */ + u8 steer_p2; /* port 1 steering mode */ + u16 reserved3; + u8 log_vlan_p2; /* log2 vlan per rx port1 */ + u8 log_mac_p2; /* log2 mac per rx port1 */ }; + /* CMD CONFIG_RX */ struct mtnic_if_config_rx_in_imm { u16 spkt_size; /* size of small packets interrupts enabled on CQ */ @@ -535,9 +546,9 @@ struct mtnic_if_config_send_in_imm { /* CMD HEART_BEAT */ struct mtnic_if_heart_beat_out_imm { - u32 flags; /* several flags */ + u32 flags; /* several flags */ #define MTNIC_MASK_HEAR_BEAT_INT_ERROR MTNIC_BC(31,1) - u32 reserved; + u32 reserved; }; @@ -547,14 +558,14 @@ struct mtnic_if_heart_beat_out_imm { /* CMD CONFIG_PORT_VLAN_FILTER */ /* in mbox is a 4K bits mask - bit per VLAN */ struct mtnic_if_config_port_vlan_filter_in_mbox { - u64 filter[64]; /* vlans[63:0] sit in filter[0], vlans[127:64] sit in filter[1] .. */ + u64 filter[64]; /* vlans[63:0] sit in filter[0], vlans[127:64] sit in filter[1] .. */ }; /* CMD SET_PORT_MTU */ struct mtnic_if_set_port_mtu_in_imm { u16 reserved1; - u16 mtu; /* The MTU of the port in bytes */ + u16 mtu; /* The MTU of the port in bytes */ u32 reserved2; }; @@ -574,17 +585,17 @@ struct mtnic_if_set_port_state_in_imm { /* CMD CONFIG_CQ */ struct mtnic_if_config_cq_in_mbox { - u8 reserved1; - u8 cq; - u8 size; /* Num CQs is 2^size (size <= 22) */ - u8 offset; /* start address of CQE in first page (11:6) */ - u16 tlast; /* interrupt moderation timer from last completion usec */ + u8 reserved1; + u8 cq; + u8 size; /* Num CQs is 2^size (size <= 22) */ + u8 offset; /* start address of CQE in first page (11:6) */ + u16 tlast; /* interrupt moderation timer from last completion usec */ u8 flags; /* flags */ - u8 int_vector; /* MSI index if MSI is enabled, otherwise reserved */ + u8 int_vector; /* MSI index if MSI is enabled, otherwise reserved */ u16 reserved2; u16 max_cnt; /* interrupt moderation counter */ - u8 page_size; /* each mapped page is 2^(12+page_size) bytes */ - u8 reserved4[3]; + u8 page_size; /* each mapped page is 2^(12+page_size) bytes */ + u8 reserved4[3]; u32 db_record_addr_h; /*physical address of CQ doorbell record */ u32 db_record_addr_l; /*physical address of CQ doorbell record */ u32 page_address[0]; /* 64 bit page addresses of CQ buffer */ @@ -592,21 +603,21 @@ struct mtnic_if_config_cq_in_mbox { /* CMD CONFIG_RX_RING */ struct mtnic_if_config_rx_ring_in_mbox { - u8 reserved1; - u8 ring; /* The ring index (with offset) */ - u8 stride_size; /* stride and size */ + u8 reserved1; + u8 ring; /* The ring index (with offset) */ + u8 stride_size; /* stride and size */ /* Entry size = 16* (2^stride) bytes */ #define MTNIC_MASK_CONFIG_RX_RING_STRIDE MTNIC_BC(4,3) /* Rx ring size is 2^size entries */ #define MTNIC_MASK_CONFIG_RX_RING_SIZE MTNIC_BC(0,4) - u8 flags; /* Bit0 - header separation */ - u8 page_size; /* Each mapped page is 2^(12+page_size) bytes */ - u8 reserved2[2]; - u8 cq; /* CQ associated with this ring */ - u32 db_record_addr_h; - u32 db_record_addr_l; - u32 page_address[0];/* Array of 2^size 64b page descriptor addresses */ - /* Must hold all Rx descriptors + doorbell record. */ + u8 flags; /* Bit0 - header separation */ + u8 page_size; /* Each mapped page is 2^(12+page_size) bytes */ + u8 reserved2[2]; + u8 cq; /* CQ associated with this ring */ + u32 db_record_addr_h; + u32 db_record_addr_l; + u32 page_address[0];/* Array of 2^size 64b page descriptor addresses */ + /* Must hold all Rx descriptors + doorbell record. */ }; /* The modifier for SET_RX_RING_ADDR */ @@ -619,27 +630,27 @@ struct mtnic_if_set_rx_ring_modifier { /* CMD SET_RX_RING_ADDR */ struct mtnic_if_set_rx_ring_addr_in_imm { - u16 mac_47_32; /* UCAST MAC Address bits 47:32 */ + u16 mac_47_32; /* UCAST MAC Address bits 47:32 */ u16 flags_vlan_id; /* MAC/VLAN flags and vlan id */ #define MTNIC_MASK_SET_RX_RING_ADDR_VLAN_ID MTNIC_BC(0,12) #define MTNIC_MASK_SET_RX_RING_ADDR_BY_MAC MTNIC_BC(12,1) #define MTNIC_MASK_SET_RX_RING_ADDR_BY_VLAN MTNIC_BC(13,1) - u32 mac_31_0; /* UCAST MAC Address bits 31:0 */ + u32 mac_31_0; /* UCAST MAC Address bits 31:0 */ }; /* CMD CONFIG_TX_RING */ struct mtnic_if_config_send_ring_in_mbox { - u16 ring; /* The ring index (with offset) */ + u16 ring; /* The ring index (with offset) */ #define MTNIC_MASK_CONFIG_TX_RING_INDEX MTNIC_BC(0,8) - u8 size; /* Tx ring size is 32*2^size bytes */ + u8 size; /* Tx ring size is 32*2^size bytes */ #define MTNIC_MASK_CONFIG_TX_RING_SIZE MTNIC_BC(0,4) - u8 reserved; - u8 page_size; /* Each mapped page is 2^(12+page_size) bytes */ - u8 qos_class; /* The COS used for this Tx */ - u16 cq; /* CQ associated with this ring */ + u8 reserved; + u8 page_size; /* Each mapped page is 2^(12+page_size) bytes */ + u8 qos_class; /* The COS used for this Tx */ + u16 cq; /* CQ associated with this ring */ #define MTNIC_MASK_CONFIG_TX_CQ_INDEX MTNIC_BC(0,8) u32 page_address[0]; /* 64 bit page addresses of descriptor buffer. */ - /* The buffer must accommodate all Tx descriptors */ + /* The buffer must accommodate all Tx descriptors */ }; /* CMD CONFIG_EQ */ @@ -647,9 +658,9 @@ struct mtnic_if_config_eq_in_mbox { u8 reserved1; u8 int_vector; /* MSI index if MSI enabled; otherwise reserved */ #define MTNIC_MASK_CONFIG_EQ_INT_VEC MTNIC_BC(0,6) - u8 size; /* Num CQs is 2^size entries (size <= 22) */ + u8 size; /* Num CQs is 2^size entries (size <= 22) */ #define MTNIC_MASK_CONFIG_EQ_SIZE MTNIC_BC(0,5) - u8 offset; /* Start address of CQE in first page (11:6) */ + u8 offset; /* Start address of CQE in first page (11:6) */ #define MTNIC_MASK_CONFIG_EQ_OFFSET MTNIC_BC(0,6) u8 page_size; /* Each mapped page is 2^(12+page_size) bytes*/ u8 reserved[3];