david/ipxe
david
/
ipxe
Archived
1
0
Fork 0

[vxge] Add support for X3100 series 10GbE Server/Storage Adapter

Signed-off-by: Sivakumar Subramani <sivakumar.subramani@neterion.com>
Signed-off-by: Masroor Vettuparambil <masroor.vettuparambil@neterion.com>
Signed-off-by: Stefan Hajnoczi <stefanha@gmail.com>
Signed-off-by: Marty Connor <mdc@etherboot.org>
This commit is contained in:
Masroor Vettuparambil 2010-02-02 09:42:36 +00:00 committed by Marty Connor
parent b7af0aa34e
commit f5f8ee00fc
10 changed files with 9386 additions and 0 deletions

View File

@ -62,6 +62,7 @@ SRCDIRS += drivers/net/e1000
SRCDIRS += drivers/net/phantom
SRCDIRS += drivers/net/rtl818x
SRCDIRS += drivers/net/ath5k
SRCDIRS += drivers/net/vxge
SRCDIRS += drivers/block
SRCDIRS += drivers/nvs
SRCDIRS += drivers/bitbash

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,787 @@
/*
* vxge-config.h: gPXE driver for Neterion Inc's X3100 Series 10GbE
* PCIe I/O Virtualized Server Adapter.
*
* Copyright(c) 2002-2010 Neterion Inc.
*
* This software may be used and distributed according to the terms of
* the GNU General Public License (GPL), incorporated herein by
* reference. Drivers based on or derived from this code fall under
* the GPL and must retain the authorship, copyright and license
* notice.
*
*/
FILE_LICENCE(GPL2_ONLY);
#ifndef VXGE_CONFIG_H
#define VXGE_CONFIG_H
#include <stdint.h>
#include <gpxe/list.h>
#include <gpxe/pci.h>
#ifndef VXGE_CACHE_LINE_SIZE
#define VXGE_CACHE_LINE_SIZE 4096
#endif
#define WAIT_FACTOR 1
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
#endif
#define VXGE_HW_MAC_MAX_WIRE_PORTS 2
#define VXGE_HW_MAC_MAX_AGGR_PORTS 2
#define VXGE_HW_MAC_MAX_PORTS 3
#define VXGE_HW_MIN_MTU 68
#define VXGE_HW_MAX_MTU 9600
#define VXGE_HW_DEFAULT_MTU 1500
#ifndef __iomem
#define __iomem
#endif
#ifndef ____cacheline_aligned
#define ____cacheline_aligned
#endif
/**
* debug filtering masks
*/
#define VXGE_NONE 0x00
#define VXGE_INFO 0x01
#define VXGE_INTR 0x02
#define VXGE_XMIT 0x04
#define VXGE_POLL 0x08
#define VXGE_ERR 0x10
#define VXGE_TRACE 0x20
#define VXGE_ALL (VXGE_INFO|VXGE_INTR|VXGE_XMIT\
|VXGE_POLL|VXGE_ERR|VXGE_TRACE)
#define NULL_VPID 0xFFFFFFFF
#define VXGE_HW_EVENT_BASE 0
#define VXGE_LL_EVENT_BASE 100
#define VXGE_HW_BASE_INF 100
#define VXGE_HW_BASE_ERR 200
#define VXGE_HW_BASE_BADCFG 300
#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2
enum vxge_hw_status {
VXGE_HW_OK = 0,
VXGE_HW_FAIL = 1,
VXGE_HW_PENDING = 2,
VXGE_HW_COMPLETIONS_REMAIN = 3,
VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1,
VXGE_HW_INF_OUT_OF_DESCRIPTORS = VXGE_HW_BASE_INF + 2,
VXGE_HW_INF_SW_LRO_BEGIN = VXGE_HW_BASE_INF + 3,
VXGE_HW_INF_SW_LRO_CONT = VXGE_HW_BASE_INF + 4,
VXGE_HW_INF_SW_LRO_UNCAPABLE = VXGE_HW_BASE_INF + 5,
VXGE_HW_INF_SW_LRO_FLUSH_SESSION = VXGE_HW_BASE_INF + 6,
VXGE_HW_INF_SW_LRO_FLUSH_BOTH = VXGE_HW_BASE_INF + 7,
VXGE_HW_ERR_INVALID_HANDLE = VXGE_HW_BASE_ERR + 1,
VXGE_HW_ERR_OUT_OF_MEMORY = VXGE_HW_BASE_ERR + 2,
VXGE_HW_ERR_VPATH_NOT_AVAILABLE = VXGE_HW_BASE_ERR + 3,
VXGE_HW_ERR_VPATH_NOT_OPEN = VXGE_HW_BASE_ERR + 4,
VXGE_HW_ERR_WRONG_IRQ = VXGE_HW_BASE_ERR + 5,
VXGE_HW_ERR_SWAPPER_CTRL = VXGE_HW_BASE_ERR + 6,
VXGE_HW_ERR_INVALID_MTU_SIZE = VXGE_HW_BASE_ERR + 7,
VXGE_HW_ERR_INVALID_INDEX = VXGE_HW_BASE_ERR + 8,
VXGE_HW_ERR_INVALID_TYPE = VXGE_HW_BASE_ERR + 9,
VXGE_HW_ERR_INVALID_OFFSET = VXGE_HW_BASE_ERR + 10,
VXGE_HW_ERR_INVALID_DEVICE = VXGE_HW_BASE_ERR + 11,
VXGE_HW_ERR_VERSION_CONFLICT = VXGE_HW_BASE_ERR + 12,
VXGE_HW_ERR_INVALID_PCI_INFO = VXGE_HW_BASE_ERR + 13,
VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14,
VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15,
VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16,
VXGE_HW_ERR_PRIVILAGED_OPEARATION = VXGE_HW_BASE_ERR + 17,
VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18,
VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19,
VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20,
VXGE_HW_ERR_CRITICAL = VXGE_HW_BASE_ERR + 21,
VXGE_HW_ERR_SLOT_FREEZE = VXGE_HW_BASE_ERR + 22,
VXGE_HW_ERR_INVALID_MIN_BANDWIDTH = VXGE_HW_BASE_ERR + 25,
VXGE_HW_ERR_INVALID_MAX_BANDWIDTH = VXGE_HW_BASE_ERR + 26,
VXGE_HW_ERR_INVALID_TOTAL_BANDWIDTH = VXGE_HW_BASE_ERR + 27,
VXGE_HW_ERR_INVALID_BANDWIDTH_LIMIT = VXGE_HW_BASE_ERR + 28,
VXGE_HW_ERR_RESET_IN_PROGRESS = VXGE_HW_BASE_ERR + 29,
VXGE_HW_ERR_OUT_OF_SPACE = VXGE_HW_BASE_ERR + 30,
VXGE_HW_ERR_INVALID_FUNC_MODE = VXGE_HW_BASE_ERR + 31,
VXGE_HW_ERR_INVALID_DP_MODE = VXGE_HW_BASE_ERR + 32,
VXGE_HW_ERR_INVALID_FAILURE_BEHAVIOUR = VXGE_HW_BASE_ERR + 33,
VXGE_HW_ERR_INVALID_L2_SWITCH_STATE = VXGE_HW_BASE_ERR + 34,
VXGE_HW_ERR_INVALID_CATCH_BASIN_MODE = VXGE_HW_BASE_ERR + 35,
VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS = VXGE_HW_BASE_BADCFG + 1,
VXGE_HW_BADCFG_FIFO_BLOCKS = VXGE_HW_BASE_BADCFG + 2,
VXGE_HW_BADCFG_VPATH_MTU = VXGE_HW_BASE_BADCFG + 3,
VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG = VXGE_HW_BASE_BADCFG + 4,
VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH = VXGE_HW_BASE_BADCFG + 5,
VXGE_HW_BADCFG_VPATH_BANDWIDTH_LIMIT = VXGE_HW_BASE_BADCFG + 6,
VXGE_HW_BADCFG_INTR_MODE = VXGE_HW_BASE_BADCFG + 7,
VXGE_HW_BADCFG_RTS_MAC_EN = VXGE_HW_BASE_BADCFG + 8,
VXGE_HW_BADCFG_VPATH_AGGR_ACK = VXGE_HW_BASE_BADCFG + 9,
VXGE_HW_BADCFG_VPATH_PRIORITY = VXGE_HW_BASE_BADCFG + 10,
VXGE_HW_EOF_TRACE_BUF = -1
};
/**
* enum enum vxge_hw_device_link_state - Link state enumeration.
* @VXGE_HW_LINK_NONE: Invalid link state.
* @VXGE_HW_LINK_DOWN: Link is down.
* @VXGE_HW_LINK_UP: Link is up.
*
*/
enum vxge_hw_device_link_state {
VXGE_HW_LINK_NONE,
VXGE_HW_LINK_DOWN,
VXGE_HW_LINK_UP
};
/*forward declaration*/
struct vxge_vpath;
struct __vxge_hw_virtualpath;
/**
* struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
*
* One buffer mode RxD for ring structure
*/
struct vxge_hw_ring_rxd_1 {
u64 host_control;
u64 control_0;
#define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0) vxge_bVALn(ctrl0, 0, 7)
#define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER vxge_mBIT(7)
#define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0) vxge_bVALn(ctrl0, 8, 1)
#define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 9, 1)
#define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0) vxge_bVALn(ctrl0, 10, 1)
#define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
#define VXGE_HW_RING_RXD_T_CODE(val) vxge_vBIT(val, 12, 4)
#define VXGE_HW_RING_RXD_T_CODE_UNUSED VXGE_HW_RING_T_CODE_UNUSED
#define VXGE_HW_RING_RXD_SYN_GET(ctrl0) vxge_bVALn(ctrl0, 16, 1)
#define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0) vxge_bVALn(ctrl0, 17, 1)
#define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 18, 1)
#define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0) vxge_bVALn(ctrl0, 19, 1)
#define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0) vxge_bVALn(ctrl0, 20, 4)
#define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0) vxge_bVALn(ctrl0, 24, 1)
#define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0) vxge_bVALn(ctrl0, 25, 2)
#define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0) vxge_bVALn(ctrl0, 27, 5)
#define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 32, 16)
#define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0) vxge_bVALn(ctrl0, 48, 16)
u64 control_1;
#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1) vxge_bVALn(ctrl1, 2, 14)
#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
#define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK vxge_vBIT(0x3FFF, 2, 14)
#define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1) vxge_bVALn(ctrl1, 16, 32)
#define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1) vxge_bVALn(ctrl1, 48, 16)
u64 buffer0_ptr;
};
/**
* struct vxge_hw_fifo_txd - Transmit Descriptor
*
* Transmit descriptor (TxD).Fifo descriptor contains configured number
* (list) of TxDs. * For more details please refer to Titan User Guide,
* Section 5.4.2 "Transmit Descriptor (TxD) Format".
*/
struct vxge_hw_fifo_txd {
u64 control_0;
#define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER vxge_mBIT(7)
#define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0) vxge_bVALn(ctrl0, 12, 4)
#define VXGE_HW_FIFO_TXD_T_CODE(val) vxge_vBIT(val, 12, 4)
#define VXGE_HW_FIFO_TXD_T_CODE_UNUSED VXGE_HW_FIFO_T_CODE_UNUSED
#define VXGE_HW_FIFO_TXD_GATHER_CODE(val) vxge_vBIT(val, 22, 2)
#define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST VXGE_HW_FIFO_GATHER_CODE_FIRST
#define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST VXGE_HW_FIFO_GATHER_CODE_LAST
#define VXGE_HW_FIFO_TXD_LSO_EN vxge_mBIT(30)
#define VXGE_HW_FIFO_TXD_LSO_MSS(val) vxge_vBIT(val, 34, 14)
#define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val) vxge_vBIT(val, 48, 16)
u64 control_1;
#define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN vxge_mBIT(5)
#define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN vxge_mBIT(6)
#define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN vxge_mBIT(7)
#define VXGE_HW_FIFO_TXD_VLAN_ENABLE vxge_mBIT(15)
#define VXGE_HW_FIFO_TXD_VLAN_TAG(val) vxge_vBIT(val, 16, 16)
#define VXGE_HW_FIFO_TXD_NO_BW_LIMIT vxge_mBIT(43)
#define VXGE_HW_FIFO_TXD_INT_NUMBER(val) vxge_vBIT(val, 34, 6)
#define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST vxge_mBIT(46)
#define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ vxge_mBIT(47)
u64 buffer_pointer;
u64 host_control;
};
/**
* struct vxge_hw_device_date - Date Format
* @day: Day
* @month: Month
* @year: Year
* @date: Date in string format
*
* Structure for returning date
*/
#define VXGE_HW_FW_STRLEN 32
struct vxge_hw_device_date {
u32 day;
u32 month;
u32 year;
char date[VXGE_HW_FW_STRLEN];
};
struct vxge_hw_device_version {
u32 major;
u32 minor;
u32 build;
char version[VXGE_HW_FW_STRLEN];
};
u64 __vxge_hw_vpath_pci_func_mode_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg);
/*
* struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
* @control_0: Bits 0 to 7 - Doorbell type.
* Bits 8 to 31 - Reserved.
* Bits 32 to 39 - The highest TxD in this TxDL.
* Bits 40 to 47 - Reserved.
* Bits 48 to 55 - Reserved.
* Bits 56 to 63 - No snoop flags.
* @txdl_ptr: The starting location of the TxDL in host memory.
*
* Created by the host and written to the adapter via PIO to a Kernel Doorbell
* FIFO. All non-offload doorbell wrapper fields must be written by the host as
* part of a doorbell write. Consumed by the adapter but is not written by the
* adapter.
*/
struct __vxge_hw_non_offload_db_wrapper {
u64 control_0;
#define VXGE_HW_NODBW_GET_TYPE(ctrl0) vxge_bVALn(ctrl0, 0, 8)
#define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
#define VXGE_HW_NODBW_TYPE_NODBW 0
#define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0) vxge_bVALn(ctrl0, 32, 8)
#define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
#define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0) vxge_bVALn(ctrl0, 56, 8)
#define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
#define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE 0x2
#define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ 0x1
u64 txdl_ptr;
};
/*
* struct __vxge_hw_fifo - Fifo.
* @vp_id: Virtual path id
* @tx_intr_num: Interrupt Number associated with the TX
* @txdl: Start pointer of the txdl list of this fifo.
* gPxe does not support tx fragmentation, so we need
* only one txd in a list
* @depth: total number of lists in this fifo
* @hw_offset: txd index from where adapter owns the txd list
* @sw_offset: txd index from where driver owns the txd list
*
* @stats: Statistics of this fifo
*
*/
struct __vxge_hw_fifo {
struct vxge_hw_vpath_reg *vp_reg;
struct __vxge_hw_non_offload_db_wrapper *nofl_db;
u32 vp_id;
u32 tx_intr_num;
struct vxge_hw_fifo_txd *txdl;
#define VXGE_HW_FIFO_TXD_DEPTH 128
u16 depth;
u16 hw_offset;
u16 sw_offset;
struct __vxge_hw_virtualpath *vpathh;
};
/* Structure that represents the Rx descriptor block which contains
* 128 Rx descriptors.
*/
struct __vxge_hw_ring_block {
#define VXGE_HW_MAX_RXDS_PER_BLOCK_1 127
struct vxge_hw_ring_rxd_1 rxd[VXGE_HW_MAX_RXDS_PER_BLOCK_1];
u64 reserved_0;
#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
/* 0xFEFFFFFFFFFFFFFF to mark last Rxd in this blk */
u64 reserved_1;
/* Logical ptr to next */
u64 reserved_2_pNext_RxD_block;
/* Buff0_ptr.In a 32 bit arch the upper 32 bits should be 0 */
u64 pNext_RxD_Blk_physical;
};
/*
* struct __vxge_hw_ring - Ring channel.
*
* Note: The structure is cache line aligned to better utilize
* CPU cache performance.
*/
struct __vxge_hw_ring {
struct vxge_hw_vpath_reg *vp_reg;
struct vxge_hw_common_reg *common_reg;
u32 vp_id;
#define VXGE_HW_RING_RXD_QWORDS_MODE_1 4
u32 doorbell_cnt;
u32 total_db_cnt;
#define VXGE_HW_RING_RXD_QWORD_LIMIT 16
u64 rxd_qword_limit;
struct __vxge_hw_ring_block *rxdl;
#define VXGE_HW_RING_BUF_PER_BLOCK 9
u16 buf_per_block;
u16 rxd_offset;
#define VXGE_HW_RING_RX_POLL_WEIGHT 8
u16 rx_poll_weight;
struct io_buffer *iobuf[VXGE_HW_RING_BUF_PER_BLOCK + 1];
struct __vxge_hw_virtualpath *vpathh;
};
/*
* struct __vxge_hw_virtualpath - Virtual Path
*
* Virtual path structure to encapsulate the data related to a virtual path.
* Virtual paths are allocated by the HW upon getting configuration from the
* driver and inserted into the list of virtual paths.
*/
struct __vxge_hw_virtualpath {
u32 vp_id;
u32 vp_open;
#define VXGE_HW_VP_NOT_OPEN 0
#define VXGE_HW_VP_OPEN 1
struct __vxge_hw_device *hldev;
struct vxge_hw_vpath_reg *vp_reg;
struct vxge_hw_vpmgmt_reg *vpmgmt_reg;
struct __vxge_hw_non_offload_db_wrapper *nofl_db;
u32 max_mtu;
u32 vsport_number;
u32 max_kdfc_db;
u32 max_nofl_db;
struct __vxge_hw_ring ringh;
struct __vxge_hw_fifo fifoh;
};
#define VXGE_HW_INFO_LEN 64
#define VXGE_HW_PMD_INFO_LEN 16
#define VXGE_MAX_PRINT_BUF_SIZE 128
/**
* struct vxge_hw_device_hw_info - Device information
* @host_type: Host Type
* @func_id: Function Id
* @vpath_mask: vpath bit mask
* @fw_version: Firmware version
* @fw_date: Firmware Date
* @flash_version: Firmware version
* @flash_date: Firmware Date
* @mac_addrs: Mac addresses for each vpath
* @mac_addr_masks: Mac address masks for each vpath
*
* Returns the vpath mask that has the bits set for each vpath allocated
* for the driver and the first mac address for each vpath
*/
struct vxge_hw_device_hw_info {
u32 host_type;
#define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION 0
#define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION 1
#define VXGE_HW_NO_MR_SR_VH0_FUNCTION0 2
#define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION 3
#define VXGE_HW_MR_SR_VH0_INVALID_CONFIG 4
#define VXGE_HW_SR_VH_FUNCTION0 5
#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
#define VXGE_HW_VH_NORMAL_FUNCTION 7
u64 function_mode;
#define VXGE_HW_FUNCTION_MODE_MIN 0
#define VXGE_HW_FUNCTION_MODE_MAX 10
#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0
#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1
#define VXGE_HW_FUNCTION_MODE_SRIOV 2
#define VXGE_HW_FUNCTION_MODE_MRIOV 3
#define VXGE_HW_FUNCTION_MODE_MRIOV_8 4
#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5
#define VXGE_HW_FUNCTION_MODE_SRIOV_8 6
#define VXGE_HW_FUNCTION_MODE_SRIOV_4 7
#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8
#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9
#define VXGE_HW_FUNCTION_MODE_MRIOV_4 10
u32 func_id;
u64 vpath_mask;
struct vxge_hw_device_version fw_version;
struct vxge_hw_device_date fw_date;
struct vxge_hw_device_version flash_version;
struct vxge_hw_device_date flash_date;
u8 serial_number[VXGE_HW_INFO_LEN];
u8 part_number[VXGE_HW_INFO_LEN];
u8 product_desc[VXGE_HW_INFO_LEN];
u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
};
/**
* struct __vxge_hw_device - Hal device object
* @magic: Magic Number
* @bar0: BAR0 virtual address.
* @pdev: Physical device handle
* @config: Confguration passed by the LL driver at initialization
* @link_state: Link state
*
* HW device object. Represents Titan adapter
*/
struct __vxge_hw_device {
u32 magic;
#define VXGE_HW_DEVICE_MAGIC 0x12345678
#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
void __iomem *bar0;
struct pci_device *pdev;
struct net_device *ndev;
struct vxgedev *vdev;
enum vxge_hw_device_link_state link_state;
u32 host_type;
u32 func_id;
u8 titan1;
u32 access_rights;
#define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH 0x1
#define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM 0x2
#define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM 0x4
struct vxge_hw_legacy_reg *legacy_reg;
struct vxge_hw_toc_reg *toc_reg;
struct vxge_hw_common_reg *common_reg;
struct vxge_hw_mrpcim_reg *mrpcim_reg;
struct vxge_hw_srpcim_reg *srpcim_reg \
[VXGE_HW_TITAN_SRPCIM_REG_SPACES];
struct vxge_hw_vpmgmt_reg *vpmgmt_reg \
[VXGE_HW_TITAN_VPMGMT_REG_SPACES];
struct vxge_hw_vpath_reg *vpath_reg \
[VXGE_HW_TITAN_VPATH_REG_SPACES];
u8 *kdfc;
u8 *usdc;
struct __vxge_hw_virtualpath virtual_path;
u64 vpath_assignments;
u64 vpaths_deployed;
u32 first_vp_id;
u64 tim_int_mask0[4];
u32 tim_int_mask1[4];
struct vxge_hw_device_hw_info hw_info;
};
#define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
#define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) { \
if (i < 16) { \
m0[0] |= vxge_vBIT(0x8, (i*4), 4); \
m0[1] |= vxge_vBIT(0x4, (i*4), 4); \
} \
else { \
m1[0] = 0x80000000; \
m1[1] = 0x40000000; \
} \
}
#define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) { \
if (i < 16) { \
m0[0] &= ~vxge_vBIT(0x8, (i*4), 4); \
m0[1] &= ~vxge_vBIT(0x4, (i*4), 4); \
} \
else { \
m1[0] = 0; \
m1[1] = 0; \
} \
}
/**
* enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
* @VXGE_HW_TXDL_STATE_NONE: Invalid state.
* @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
* @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
* device.
* @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
* filling-in and posting later.
*
* Titan/HW descriptor states.
*
*/
enum vxge_hw_txdl_state {
VXGE_HW_TXDL_STATE_NONE = 0,
VXGE_HW_TXDL_STATE_AVAIL = 1,
VXGE_HW_TXDL_STATE_POSTED = 2,
VXGE_HW_TXDL_STATE_FREED = 3
};
/* fifo and ring circular buffer offset tracking apis */
static inline void __vxge_hw_desc_offset_up(u16 upper_limit,
u16 *offset)
{
if (++(*offset) >= upper_limit)
*offset = 0;
}
/* rxd offset handling apis */
static inline void vxge_hw_ring_rxd_offset_up(u16 *offset)
{
__vxge_hw_desc_offset_up(VXGE_HW_MAX_RXDS_PER_BLOCK_1,
offset);
}
/* txd offset handling apis */
static inline void vxge_hw_fifo_txd_offset_up(u16 *offset)
{
__vxge_hw_desc_offset_up(VXGE_HW_FIFO_TXD_DEPTH, offset);
}
/**
* vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
* @rxdh: Descriptor handle.
* @dma_pointer: DMA address of a single receive buffer this descriptor
* should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called,
* the receive buffer should be already mapped to the device
* @size: Size of the receive @dma_pointer buffer.
*
* Prepare 1-buffer-mode Rx descriptor for posting
* (via vxge_hw_ring_rxd_post()).
*
* This inline helper-function does not return any parameters and always
* succeeds.
*
*/
static inline
void vxge_hw_ring_rxd_1b_set(struct vxge_hw_ring_rxd_1 *rxdp,
struct io_buffer *iob, u32 size)
{
rxdp->host_control = (intptr_t)(iob);
rxdp->buffer0_ptr = virt_to_bus(iob->data);
rxdp->control_1 &= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK;
rxdp->control_1 |= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size);
}
enum vxge_hw_status vxge_hw_device_hw_info_get(
void __iomem *bar0,
struct vxge_hw_device_hw_info *hw_info);
enum vxge_hw_status
__vxge_hw_vpath_fw_ver_get(
struct vxge_hw_vpath_reg __iomem *vpath_reg,
struct vxge_hw_device_hw_info *hw_info);
enum vxge_hw_status
__vxge_hw_vpath_card_info_get(
struct vxge_hw_vpath_reg __iomem *vpath_reg,
struct vxge_hw_device_hw_info *hw_info);
/**
* vxge_hw_device_link_state_get - Get link state.
* @devh: HW device handle.
*
* Get link state.
* Returns: link state.
*/
static inline
enum vxge_hw_device_link_state vxge_hw_device_link_state_get(
struct __vxge_hw_device *devh)
{
return devh->link_state;
}
void vxge_hw_device_terminate(struct __vxge_hw_device *devh);
enum vxge_hw_status vxge_hw_device_initialize(
struct __vxge_hw_device **devh,
void *bar0,
struct pci_device *pdev,
u8 titan1);
enum vxge_hw_status
vxge_hw_vpath_open(struct __vxge_hw_device *hldev, struct vxge_vpath *vpath);
enum vxge_hw_status
__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog);
enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_virtualpath *vpath);
enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_virtualpath *vpath);
enum vxge_hw_status
vxge_hw_vpath_recover_from_reset(struct __vxge_hw_virtualpath *vpath);
void
vxge_hw_vpath_enable(struct __vxge_hw_virtualpath *vpath);
enum vxge_hw_status
vxge_hw_vpath_mtu_set(struct __vxge_hw_virtualpath *vpath, u32 new_mtu);
void
vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_virtualpath *vpath);
void
__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
enum vxge_hw_status
__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
enum vxge_hw_status
__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg);
enum vxge_hw_status
__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
struct vxge_hw_vpath_reg __iomem *vpath_reg);
enum vxge_hw_status
__vxge_hw_device_register_poll(
void __iomem *reg,
u64 mask, u32 max_millis);
#ifndef readq
static inline u64 readq(void __iomem *addr)
{
u64 ret = 0;
ret = readl(addr + 4);
ret <<= 32;
ret |= readl(addr);
return ret;
}
#endif
#ifndef writeq
static inline void writeq(u64 val, void __iomem *addr)
{
writel((u32) (val), addr);
writel((u32) (val >> 32), (addr + 4));
}
#endif
static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
{
writel(val, addr + 4);
}
static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
{
writel(val, addr);
}
static inline enum vxge_hw_status
__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
u64 mask, u32 max_millis)
{
enum vxge_hw_status status = VXGE_HW_OK;
__vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
wmb();
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
wmb();
status = __vxge_hw_device_register_poll(addr, mask, max_millis);
return status;
}
struct vxge_hw_toc_reg __iomem *
__vxge_hw_device_toc_get(void __iomem *bar0);
enum vxge_hw_status
__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
void
__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
enum vxge_hw_status
__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
enum vxge_hw_status
__vxge_hw_vpath_pci_read(
struct __vxge_hw_virtualpath *vpath,
u32 phy_func_0,
u32 offset,
u32 *val);
enum vxge_hw_status
__vxge_hw_vpath_addr_get(
struct vxge_hw_vpath_reg __iomem *vpath_reg,
u8 (macaddr)[ETH_ALEN],
u8 (macaddr_mask)[ETH_ALEN]);
u32
__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
enum vxge_hw_status
__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
enum vxge_hw_status
vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
/**
* vxge_debug
* @mask: mask for the debug
* @fmt: printf like format string
*/
static const u16 debug_filter = VXGE_ERR;
#define vxge_debug(mask, fmt...) do { \
if (debug_filter & mask) \
DBG(fmt); \
} while (0);
#define vxge_trace() vxge_debug(VXGE_TRACE, "%s:%d\n", __func__, __LINE__);
enum vxge_hw_status
vxge_hw_get_func_mode(struct __vxge_hw_device *hldev, u32 *func_mode);
enum vxge_hw_status
vxge_hw_set_fw_api(struct __vxge_hw_device *hldev,
u64 vp_id, u32 action,
u32 offset, u64 data0, u64 data1);
void
vxge_hw_vpath_set_zero_rx_frm_len(struct __vxge_hw_device *hldev);
#endif

View File

@ -0,0 +1,724 @@
/*
* vxge-main.c: gPXE driver for Neterion Inc's X3100 Series 10GbE
* PCIe I/O Virtualized Server Adapter.
*
* Copyright(c) 2002-2010 Neterion Inc.
*
* This software may be used and distributed according to the terms of
* the GNU General Public License (GPL), incorporated herein by
* reference. Drivers based on or derived from this code fall under
* the GPL and must retain the authorship, copyright and license
* notice.
*
*/
FILE_LICENCE(GPL2_ONLY);
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <gpxe/io.h>
#include <errno.h>
#include <byteswap.h>
#include <gpxe/pci.h>
#include <gpxe/malloc.h>
#include <gpxe/if_ether.h>
#include <gpxe/ethernet.h>
#include <gpxe/iobuf.h>
#include <gpxe/netdevice.h>
#include <gpxe/timer.h>
#include <nic.h>
#include "vxge_main.h"
#include "vxge_reg.h"
/* function modes strings */
static char *vxge_func_mode_names[] = {
"Single Function - 1 func, 17 vpath",
"Multi Function 8 - 8 func, 2 vpath per func",
"SRIOV 17 - 17 VF, 1 vpath per VF",
"WLPEX/SharedIO 17 - 17 VH, 1 vpath/func/hierarchy",
"WLPEX/SharedIO 8 - 8 VH, 2 vpath/func/hierarchy",
"Multi Function 17 - 17 func, 1 vpath per func",
"SRIOV 8 - 1 PF, 7 VF, 2 vpath per VF",
"SRIOV 4 - 1 PF, 3 VF, 4 vpath per VF",
"Multi Function 2 - 2 func, 8 vpath per func",
"Multi Function 4 - 4 func, 4 vpath per func",
"WLPEX/SharedIO 4 - 17 func, 1 vpath per func (PCIe ARI)",
};
static inline int is_vxge_card_up(struct vxgedev *vdev)
{
return test_bit(__VXGE_STATE_CARD_UP, vdev->state);
}
/*
* vxge_xmit_compl
*
* If an interrupt was raised to indicate DMA complete of the Tx packet,
* this function is called. It identifies the last TxD whose buffer was
* freed and frees all skbs whose data have already DMA'ed into the NICs
* internal memory.
*/
enum vxge_hw_status
vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw,
struct vxge_hw_fifo_txd *txdp, enum vxge_hw_fifo_tcode tcode)
{
struct net_device *netdev;
struct io_buffer *tx_iob = NULL;
vxge_trace();
netdev = fifo_hw->vpathh->hldev->ndev;
tx_iob = (struct io_buffer *)(intptr_t)txdp->host_control;
if (tcode == VXGE_HW_FIFO_T_CODE_OK) {
netdev_tx_complete(netdev, tx_iob);
} else {
netdev_tx_complete_err(netdev, tx_iob, -EINVAL);
vxge_debug(VXGE_ERR, "%s: transmit failed, tcode %d\n",
netdev->name, tcode);
}
memset(txdp, 0, sizeof(struct vxge_hw_fifo_txd));
return VXGE_HW_OK;
}
/* reset vpaths */
enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_virtualpath *vpath;
vxge_trace();
vpath = vdev->vpath.vpathh;
if (vpath) {
if ((status = vxge_hw_vpath_reset(vpath)) == VXGE_HW_OK) {
if (is_vxge_card_up(vdev) &&
(status = vxge_hw_vpath_recover_from_reset(
vpath)) != VXGE_HW_OK) {
vxge_debug(VXGE_ERR, "vxge_hw_vpath_recover_"
"from_reset failed\n");
return status;
} else {
status = __vxge_hw_vpath_reset_check(vpath);
if (status != VXGE_HW_OK) {
vxge_debug(VXGE_ERR,
"__vxge_hw_vpath_reset_check error\n");
return status;
}
}
} else {
vxge_debug(VXGE_ERR, "vxge_hw_vpath_reset failed\n");
return status;
}
}
return status;
}
/* close vpaths */
void vxge_close_vpaths(struct vxgedev *vdev)
{
if (vdev->vpath.vpathh && vdev->vpath.is_open)
vxge_hw_vpath_close(vdev->vpath.vpathh);
vdev->vpath.is_open = 0;
vdev->vpath.vpathh = NULL;
}
/* open vpaths */
int vxge_open_vpaths(struct vxgedev *vdev)
{
enum vxge_hw_status status;
struct __vxge_hw_device *hldev;
hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
vdev->vpath.vpathh = &hldev->virtual_path;
vdev->vpath.fifo.ndev = vdev->ndev;
vdev->vpath.fifo.pdev = vdev->pdev;
vdev->vpath.fifo.fifoh = &hldev->virtual_path.fifoh;
vdev->vpath.ring.ndev = vdev->ndev;
vdev->vpath.ring.pdev = vdev->pdev;
vdev->vpath.ring.ringh = &hldev->virtual_path.ringh;
status = vxge_hw_vpath_open(vdev->devh, &vdev->vpath);
if (status == VXGE_HW_OK) {
vdev->vpath.is_open = 1;
} else {
vxge_debug(VXGE_ERR,
"%s: vpath: %d failed to open "
"with status: %d\n",
vdev->ndev->name, vdev->vpath.device_id,
status);
vxge_close_vpaths(vdev);
return status;
}
hldev->vpaths_deployed |= vxge_mBIT(vdev->vpath.vpathh->vp_id);
return VXGE_HW_OK;
}
/** Functions that implement the gPXE driver API **/
/**
* vxge_xmit
* @skb : the socket buffer containing the Tx data.
* @dev : device pointer.
*
* This function is the Tx entry point of the driver. Neterion NIC supports
* certain protocol assist features on Tx side, namely CSO, S/G, LSO.
*/
static int
vxge_xmit(struct net_device *dev, struct io_buffer *iobuf)
{
struct vxge_fifo *fifo = NULL;
struct vxgedev *vdev = NULL;
struct __vxge_hw_fifo *fifoh;
struct __vxge_hw_device *hldev;
struct vxge_hw_fifo_txd *txdp;
vxge_trace();
vdev = (struct vxgedev *)netdev_priv(dev);
hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
if (!is_vxge_card_up(vdev)) {
vxge_debug(VXGE_ERR,
"%s: vdev not initialized\n", dev->name);
return -EIO;
}
if (!netdev_link_ok(dev)) {
vxge_debug(VXGE_ERR,
"%s: Link down, transmit failed\n", dev->name);
return -ENETDOWN;
}
fifo = &vdev->vpath.fifo;
fifoh = fifo->fifoh;
txdp = vxge_hw_fifo_free_txdl_get(fifoh);
if (!txdp) {
vxge_debug(VXGE_ERR,
"%s: Out of tx descriptors\n", dev->name);
return -ENOBUFS;
}
vxge_debug(VXGE_XMIT, "%s: %s:%d fifoh offset= %d\n",
dev->name, __func__, __LINE__, fifoh->sw_offset);
vxge_hw_fifo_txdl_buffer_set(fifoh, txdp, iobuf);
vxge_hw_fifo_txdl_post(fifoh, txdp);
return 0;
}
/*
* vxge_poll
* @ndev: net device pointer
*
* This function acks the interrupt. It polls for rx packets
* and send to upper layer. It also checks for tx completion
* and frees iobs.
*/
static void vxge_poll(struct net_device *ndev)
{
struct __vxge_hw_device *hldev;
struct vxgedev *vdev;
vxge_debug(VXGE_POLL, "%s:%d \n", __func__, __LINE__);
vdev = (struct vxgedev *)netdev_priv(ndev);
hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
if (!is_vxge_card_up(vdev))
return;
/* process alarm and acknowledge the interrupts */
vxge_hw_device_begin_irq(hldev);
vxge_hw_vpath_poll_tx(&hldev->virtual_path.fifoh);
vxge_hw_vpath_poll_rx(&hldev->virtual_path.ringh);
}
/*
* vxge_irq - enable or Disable interrupts
*
* @netdev netdevice sturcture reference
* @action requested interrupt action
*/
static void vxge_irq(struct net_device *netdev __unused, int action)
{
struct __vxge_hw_device *hldev;
struct vxgedev *vdev;
vxge_debug(VXGE_INFO,
"%s:%d action(%d)\n", __func__, __LINE__, action);
vdev = (struct vxgedev *)netdev_priv(netdev);
hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
switch (action) {
case DISABLE:
vxge_hw_device_mask_all(hldev);
break;
default:
vxge_hw_device_unmask_all(hldev);
break;
}
}
/**
* vxge_open
* @dev: pointer to the device structure.
*
* This function is the open entry point of the driver. It mainly calls a
* function to allocate Rx buffers and inserts them into the buffer
* descriptors and then enables the Rx part of the NIC.
* Return value: '0' on success and an appropriate (-)ve integer as
* defined in errno.h file on failure.
*/
int
vxge_open(struct net_device *dev)
{
enum vxge_hw_status status;
struct vxgedev *vdev;
struct __vxge_hw_device *hldev;
int ret = 0;
vxge_debug(VXGE_INFO, "%s: %s:%d\n",
VXGE_DRIVER_NAME, __func__, __LINE__);
vdev = (struct vxgedev *)netdev_priv(dev);
hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
/* make sure you have link off by default every time Nic is
* initialized */
netdev_link_down(dev);
/* Open VPATHs */
status = vxge_open_vpaths(vdev);
if (status != VXGE_HW_OK) {
vxge_debug(VXGE_ERR, "%s: fatal: Vpath open failed\n",
VXGE_DRIVER_NAME);
ret = -EPERM;
goto out0;
}
vdev->mtu = VXGE_HW_DEFAULT_MTU;
/* set initial mtu before enabling the device */
status = vxge_hw_vpath_mtu_set(vdev->vpath.vpathh, vdev->mtu);
if (status != VXGE_HW_OK) {
vxge_debug(VXGE_ERR,
"%s: fatal: can not set new MTU\n", dev->name);
ret = -EPERM;
goto out2;
}
vxge_debug(VXGE_INFO,
"%s: MTU is %d\n", vdev->ndev->name, vdev->mtu);
set_bit(__VXGE_STATE_CARD_UP, vdev->state);
wmb();
if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
netdev_link_up(vdev->ndev);
vxge_debug(VXGE_INFO, "%s: Link Up\n", vdev->ndev->name);
}
vxge_hw_device_intr_enable(hldev);
vxge_hw_vpath_enable(vdev->vpath.vpathh);
wmb();
vxge_hw_vpath_rx_doorbell_init(vdev->vpath.vpathh);
goto out0;
out2:
vxge_close_vpaths(vdev);
out0:
vxge_debug(VXGE_INFO, "%s: %s:%d Exiting...\n",
dev->name, __func__, __LINE__);
return ret;
}
/**
* vxge_close
* @dev: device pointer.
*
* This is the stop entry point of the driver. It needs to undo exactly
* whatever was done by the open entry point, thus it's usually referred to
* as the close function.Among other things this function mainly stops the
* Rx side of the NIC and frees all the Rx buffers in the Rx rings.
* Return value: '0' on success and an appropriate (-)ve integer as
* defined in errno.h file on failure.
*/
static void vxge_close(struct net_device *dev)
{
struct vxgedev *vdev;
struct __vxge_hw_device *hldev;
vxge_debug(VXGE_INFO, "%s: %s:%d\n",
dev->name, __func__, __LINE__);
vdev = (struct vxgedev *)netdev_priv(dev);
hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
if (!is_vxge_card_up(vdev))
return;
clear_bit(__VXGE_STATE_CARD_UP, vdev->state);
vxge_hw_vpath_set_zero_rx_frm_len(hldev);
netdev_link_down(vdev->ndev);
vxge_debug(VXGE_INFO, "%s: Link Down\n", vdev->ndev->name);
/* Note that at this point xmit() is stopped by upper layer */
vxge_hw_device_intr_disable(hldev);
/* Multi function shares INTA, hence we should
* leave it in enabled state
*/
if (is_mf(hldev->hw_info.function_mode))
vxge_hw_device_unmask_all(hldev);
vxge_reset_all_vpaths(vdev);
vxge_close_vpaths(vdev);
vxge_debug(VXGE_INFO,
"%s: %s:%d Exiting...\n", dev->name, __func__, __LINE__);
}
static struct net_device_operations vxge_operations;
int vxge_device_register(struct __vxge_hw_device *hldev,
struct vxgedev **vdev_out)
{
struct net_device *ndev;
struct vxgedev *vdev;
int ret = 0;
*vdev_out = NULL;
ndev = alloc_etherdev(sizeof(struct vxgedev));
if (ndev == NULL) {
vxge_debug(VXGE_ERR, "%s : device allocation failed\n",
__func__);
ret = -ENODEV;
goto _out0;
}
vxge_debug(VXGE_INFO, "%s:%d netdev registering\n",
__func__, __LINE__);
vdev = netdev_priv(ndev);
memset(vdev, 0, sizeof(struct vxgedev));
vdev->ndev = ndev;
vdev->devh = hldev;
vdev->pdev = hldev->pdev;
ndev->dev = &vdev->pdev->dev;
/* Associate vxge-specific network operations operations with
* generic network device layer */
netdev_init(ndev, &vxge_operations);
memcpy(ndev->hw_addr,
(u8 *)hldev->hw_info.mac_addrs[hldev->first_vp_id], ETH_ALEN);
if (register_netdev(ndev)) {
vxge_debug(VXGE_ERR, "%s : device registration failed!\n",
__func__);
ret = -ENODEV;
goto _out2;
}
/* Make Link state as off at this point, when the Link change
* interrupt comes the state will be automatically changed to
* the right state.
*/
netdev_link_down(ndev);
vxge_debug(VXGE_INFO, "%s: Ethernet device registered\n",
VXGE_DRIVER_NAME);
*vdev_out = vdev;
return ret;
_out2:
netdev_put(ndev);
_out0:
return ret;
}
/*
* vxge_device_unregister
*
* This function will unregister and free network device
*/
void
vxge_device_unregister(struct __vxge_hw_device *hldev)
{
struct vxgedev *vdev;
struct net_device *ndev;
ndev = hldev->ndev;
vdev = netdev_priv(ndev);
unregister_netdev(ndev);
netdev_nullify(ndev);
netdev_put(ndev);
vxge_debug(VXGE_INFO, "%s: ethernet device unregistered\n",
VXGE_DRIVER_NAME);
}
/**
* vxge_probe
* @pdev : structure containing the PCI related information of the device.
* @id: List of PCI devices supported by the driver listed in vxge_id_table.
* Description:
* This function is called when a new PCI device gets detected and initializes
* it.
* Return value:
* returns 0 on success and negative on failure.
*
*/
static int
vxge_probe(struct pci_device *pdev, const struct pci_device_id *id __unused)
{
struct __vxge_hw_device *hldev;
enum vxge_hw_status status;
int ret = 0;
u64 vpath_mask = 0;
struct vxgedev *vdev;
int i;
u8 revision, titan1;
u32 host_type;
u32 function_mode;
unsigned long mmio_start, mmio_len;
void *bar0;
struct vxge_hw_device_hw_info hw_info;
struct vxge_hw_device_version *fw_version;
vxge_debug(VXGE_INFO, "vxge_probe for device %02X:%02X.%X\n",
pdev->bus, PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn));
pci_read_config_byte(pdev, PCI_REVISION_ID, &revision);
titan1 = is_titan1(pdev->device, revision);
mmio_start = pci_bar_start(pdev, PCI_BASE_ADDRESS_0);
mmio_len = pci_bar_size(pdev, PCI_BASE_ADDRESS_0);
vxge_debug(VXGE_INFO, "mmio_start: %#08lx, mmio_len: %#08lx\n",
mmio_start, mmio_len);
/* sets the bus master */
adjust_pci_device(pdev);
bar0 = ioremap(mmio_start, mmio_len);
if (!bar0) {
vxge_debug(VXGE_ERR,
"%s : cannot remap io memory bar0\n", __func__);
ret = -ENODEV;
goto _exit0;
}
status = vxge_hw_device_hw_info_get(bar0, &hw_info);
if (status != VXGE_HW_OK) {
vxge_debug(VXGE_ERR,
"%s: Reading of hardware info failed.\n",
VXGE_DRIVER_NAME);
ret = -EINVAL;
goto _exit1;
}
if (hw_info.func_id != 0) {
/* Non zero function, So do not load the driver */
iounmap(bar0);
pci_set_drvdata(pdev, NULL);
return -EINVAL;
}
vpath_mask = hw_info.vpath_mask;
if (vpath_mask == 0) {
vxge_debug(VXGE_ERR,
"%s: No vpaths available in device\n",
VXGE_DRIVER_NAME);
ret = -EINVAL;
goto _exit1;
}
vxge_debug(VXGE_INFO,
"%s:%d Vpath mask = %llx\n", __func__, __LINE__,
(unsigned long long)vpath_mask);
host_type = hw_info.host_type;
fw_version = &hw_info.fw_version;
/* fail the driver loading if firmware is incompatible */
if ((fw_version->major != VXGE_CERT_FW_VER_MAJOR) ||
(fw_version->minor < VXGE_CERT_FW_VER_MINOR)) {
printf("%s: Adapter's current firmware version: %d.%d.%d\n",
VXGE_DRIVER_NAME, fw_version->major,
fw_version->minor, fw_version->build);
printf("%s: Upgrade firmware to version %d.%d.%d\n",
VXGE_DRIVER_NAME, VXGE_CERT_FW_VER_MAJOR,
VXGE_CERT_FW_VER_MINOR, VXGE_CERT_FW_VER_BUILD);
ret = -EACCES;
goto _exit1;
}
status = vxge_hw_device_initialize(&hldev, bar0, pdev, titan1);
if (status != VXGE_HW_OK) {
vxge_debug(VXGE_ERR,
"Failed to initialize device (%d)\n", status);
ret = -EINVAL;
goto _exit1;
}
memcpy(&hldev->hw_info, &hw_info,
sizeof(struct vxge_hw_device_hw_info));
/* find the vpath id of the first available one */
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
if (vpath_mask & vxge_mBIT(i)) {
hldev->first_vp_id = i;
break;
}
/* if FCS stripping is not disabled in MAC fail driver load */
if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) {
vxge_debug(VXGE_ERR,
"%s: FCS stripping is not disabled in MAC"
" failing driver load\n", VXGE_DRIVER_NAME);
ret = -EINVAL;
goto _exit2;
}
/* Read function mode */
status = vxge_hw_get_func_mode(hldev, &function_mode);
if (status != VXGE_HW_OK)
goto _exit2;
hldev->hw_info.function_mode = function_mode;
/* set private device info */
pci_set_drvdata(pdev, hldev);
if (vxge_device_register(hldev, &vdev)) {
ret = -EINVAL;
goto _exit2;
}
/* set private HW device info */
hldev->ndev = vdev->ndev;
hldev->vdev = vdev;
hldev->pdev = pdev;
vdev->mtu = VXGE_HW_DEFAULT_MTU;
vdev->bar0 = bar0;
vdev->titan1 = titan1;
/* Virtual Path count */
vdev->vpath.device_id = hldev->first_vp_id;
vdev->vpath.vdev = vdev;
memcpy((u8 *)vdev->vpath.macaddr,
(u8 *)hldev->hw_info.mac_addrs[hldev->first_vp_id],
ETH_ALEN);
hldev->hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
hldev->hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
hldev->hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
vxge_debug(VXGE_INFO, "%s: Neterion %s Server Adapter\n",
VXGE_DRIVER_NAME, hldev->hw_info.product_desc);
vxge_debug(VXGE_INFO, "%s: SERIAL NUMBER: %s\n",
VXGE_DRIVER_NAME, hldev->hw_info.serial_number);
vxge_debug(VXGE_INFO, "%s: PART NUMBER: %s\n",
VXGE_DRIVER_NAME, hldev->hw_info.part_number);
vxge_debug(VXGE_INFO, "%s: MAC ADDR: %s\n",
VXGE_DRIVER_NAME, eth_ntoa(vdev->vpath.macaddr));
vxge_debug(VXGE_INFO,
"%s: Firmware version : %s Date : %s\n", VXGE_DRIVER_NAME,
hldev->hw_info.fw_version.version,
hldev->hw_info.fw_date.date);
vxge_debug(VXGE_INFO, "%s: %s Enabled\n",
VXGE_DRIVER_NAME, vxge_func_mode_names[function_mode]);
vxge_debug(VXGE_INFO, "%s: %s:%d Probe Exiting...\n",
VXGE_DRIVER_NAME, __func__, __LINE__);
return 0;
_exit2:
vxge_hw_device_terminate(hldev);
_exit1:
iounmap(bar0);
_exit0:
pci_set_drvdata(pdev, NULL);
printf("%s: WARNING!! Driver loading failed!!\n",
VXGE_DRIVER_NAME);
return ret;
}
/**
* vxge_remove - Free the PCI device
* @pdev: structure containing the PCI related information of the device.
* Description: This function is called by the Pci subsystem to release a
* PCI device and free up all resource held up by the device.
*/
static void
vxge_remove(struct pci_device *pdev)
{
struct __vxge_hw_device *hldev;
struct vxgedev *vdev = NULL;
struct net_device *ndev;
vxge_debug(VXGE_INFO,
"%s:%d\n", __func__, __LINE__);
hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev);
if (hldev == NULL)
return;
ndev = hldev->ndev;
vdev = netdev_priv(ndev);
iounmap(vdev->bar0);
vxge_device_unregister(hldev);
vxge_debug(VXGE_INFO,
"%s:%d Device unregistered\n", __func__, __LINE__);
vxge_hw_device_terminate(hldev);
pci_set_drvdata(pdev, NULL);
}
/* vxge net device operations */
static struct net_device_operations vxge_operations = {
.open = vxge_open,
.close = vxge_close,
.transmit = vxge_xmit,
.poll = vxge_poll,
.irq = vxge_irq,
};
static struct pci_device_id vxge_nics[] = {
PCI_ROM(0x17d5, 0x5833, "vxge-x3100", "Neterion X3100 Series", 0),
};
struct pci_driver vxge_driver __pci_driver = {
.ids = vxge_nics,
.id_count = (sizeof(vxge_nics) / sizeof(vxge_nics[0])),
.probe = vxge_probe,
.remove = vxge_remove,
};

View File

@ -0,0 +1,246 @@
/*
* vxge-main.h: gPXE driver for Neterion Inc's X3100 Series 10GbE
* PCIe I/O Virtualized Server Adapter.
*
* Copyright(c) 2002-2010 Neterion Inc.
*
* This software may be used and distributed according to the terms of
* the GNU General Public License (GPL), incorporated herein by
* reference. Drivers based on or derived from this code fall under
* the GPL and must retain the authorship, copyright and license
* notice.
*
*/
FILE_LICENCE(GPL2_ONLY);
#ifndef VXGE_MAIN_H
#define VXGE_MAIN_H
#include <unistd.h>
#include "vxge_traffic.h"
#include "vxge_config.h"
#define VXGE_DRIVER_NAME "vxge"
#define VXGE_DRIVER_VENDOR "Neterion, Inc"
#ifndef PCI_VENDOR_ID_S2IO
#define PCI_VENDOR_ID_S2IO 0x17D5
#endif
#ifndef PCI_DEVICE_ID_TITAN_WIN
#define PCI_DEVICE_ID_TITAN_WIN 0x5733
#endif
#ifndef PCI_DEVICE_ID_TITAN_UNI
#define PCI_DEVICE_ID_TITAN_UNI 0x5833
#endif
#define VXGE_HW_TITAN1_PCI_REVISION 1
#define VXGE_HW_TITAN1A_PCI_REVISION 2
#define VXGE_HP_ISS_SUBSYS_VENDORID 0x103C
#define VXGE_HP_ISS_SUBSYS_DEVICEID_1 0x323B
#define VXGE_HP_ISS_SUBSYS_DEVICEID_2 0x323C
#define VXGE_USE_DEFAULT 0xffffffff
#define VXGE_HW_VPATH_MSIX_ACTIVE 4
#define VXGE_ALARM_MSIX_ID 2
#define VXGE_HW_RXSYNC_FREQ_CNT 4
#define VXGE_LL_RX_COPY_THRESHOLD 256
#define VXGE_DEF_FIFO_LENGTH 84
#define NO_STEERING 0
#define PORT_STEERING 0x1
#define RTH_TCP_UDP_STEERING 0x2
#define RTH_IPV4_STEERING 0x3
#define RTH_IPV6_EX_STEERING 0x4
#define RTH_BUCKET_SIZE 8
#define TX_PRIORITY_STEERING 1
#define TX_VLAN_STEERING 2
#define TX_PORT_STEERING 3
#define TX_MULTIQ_STEERING 4
#define VXGE_HW_PROM_MODE_ENABLE 1
#define VXGE_HW_PROM_MODE_DISABLE 0
#define VXGE_HW_FW_UPGRADE_DISABLE 0
#define VXGE_HW_FW_UPGRADE_ALL 1
#define VXGE_HW_FW_UPGRADE_FORCE 2
#define VXGE_HW_FUNC_MODE_DISABLE 0
#define VXGE_TTI_BTIMER_VAL 250000
#define VXGE_T1A_TTI_LTIMER_VAL 80
#define VXGE_T1A_TTI_RTIMER_VAL 400
#define VXGE_TTI_LTIMER_VAL 1000
#define VXGE_TTI_RTIMER_VAL 0
#define VXGE_RTI_BTIMER_VAL 250
#define VXGE_RTI_LTIMER_VAL 100
#define VXGE_RTI_RTIMER_VAL 0
#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
#define VXGE_ISR_POLLING_CNT 8
#define VXGE_MAX_CONFIG_DEV 0xFF
#define VXGE_EXEC_MODE_DISABLE 0
#define VXGE_EXEC_MODE_ENABLE 1
#define VXGE_MAX_CONFIG_PORT 1
#define VXGE_ALL_VID_DISABLE 0
#define VXGE_ALL_VID_ENABLE 1
#define VXGE_PAUSE_CTRL_DISABLE 0
#define VXGE_PAUSE_CTRL_ENABLE 1
#define TTI_TX_URANGE_A 5
#define TTI_TX_URANGE_B 15
#define TTI_TX_URANGE_C 40
#define TTI_TX_UFC_A 5
#define TTI_TX_UFC_B 40
#define TTI_TX_UFC_C 60
#define TTI_TX_UFC_D 100
#define TTI_T1A_TX_UFC_A 30
#define TTI_T1A_TX_UFC_B 80
/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */
/* Slope - 93 */
/* 60 - 9k Mtu, 140 - 1.5k mtu */
#define TTI_T1A_TX_UFC_C(mtu) (60 + ((VXGE_HW_MAX_MTU - mtu)/93))
/* Slope - 37 */
/* 100 - 9k Mtu, 300 - 1.5k mtu */
#define TTI_T1A_TX_UFC_D(mtu) (100 + ((VXGE_HW_MAX_MTU - mtu)/37))
#define RTI_RX_URANGE_A 5
#define RTI_RX_URANGE_B 15
#define RTI_RX_URANGE_C 40
#define RTI_T1A_RX_URANGE_A 1
#define RTI_T1A_RX_URANGE_B 20
#define RTI_T1A_RX_URANGE_C 50
#define RTI_RX_UFC_A 1
#define RTI_RX_UFC_B 5
#define RTI_RX_UFC_C 10
#define RTI_RX_UFC_D 15
#define RTI_T1A_RX_UFC_B 20
#define RTI_T1A_RX_UFC_C 50
#define RTI_T1A_RX_UFC_D 60
/*
* The interrupt rate is maintained at 3k per second with the moderation
* parameters for most traffics but not all. This is the maximum interrupt
* count per allowed per function with INTA or per vector in the case of in a
* MSI-X 10 millisecond time period. Enabled only for Titan 1A.
*/
#define VXGE_T1A_MAX_INTERRUPT_COUNT 100
#define VXGE_ENABLE_NAPI 1
#define VXGE_DISABLE_NAPI 0
#define VXGE_LRO_MAX_BYTES 0x4000
#define VXGE_T1A_LRO_MAX_BYTES 0xC000
#define VXGE_HW_MIN_VPATH_TX_BW_SUPPORT 0
#define VXGE_HW_MAX_VPATH_TX_BW_SUPPORT 7
/* Milli secs timer period */
#define VXGE_TIMER_DELAY 10000
#define VXGE_TIMER_COUNT (2 * 60)
#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE)
#define VXGE_REG_DUMP_BUFSIZE 65000
#define is_mf(function_mode) \
((function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) || \
(function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17) || \
(function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2) || \
(function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4))
#define is_titan1(dev_id, rev) (((dev_id == PCI_DEVICE_ID_TITAN_UNI) || \
(dev_id == PCI_DEVICE_ID_TITAN_WIN)) && \
(rev == VXGE_HW_TITAN1_PCI_REVISION))
/* These flags represent the devices temporary state */
#define __VXGE_STATE_RESET_CARD 0x01
#define __VXGE_STATE_CARD_UP 0x02
#define test_bit(bit, loc) ((bit) & (loc))
#define set_bit(bit, loc) do { (loc) |= (bit); } while (0);
#define clear_bit(bit, loc) do { (loc) &= ~(bit); } while (0);
#define msleep(n) mdelay(n)
struct vxge_fifo {
struct net_device *ndev;
struct pci_device *pdev;
struct __vxge_hw_fifo *fifoh;
};
struct vxge_ring {
struct net_device *ndev;
struct pci_device *pdev;
struct __vxge_hw_ring *ringh;
};
struct vxge_vpath {
struct vxge_fifo fifo;
struct vxge_ring ring;
/* Actual vpath id for this vpath in the device - 0 to 16 */
int device_id;
int is_open;
int vp_open;
u8 (macaddr)[ETH_ALEN];
u8 (macmask)[ETH_ALEN];
struct vxgedev *vdev;
struct __vxge_hw_virtualpath *vpathh;
};
struct vxgedev {
struct net_device *ndev;
struct pci_device *pdev;
struct __vxge_hw_device *devh;
u8 titan1;
unsigned long state;
struct vxge_vpath vpath;
void __iomem *bar0;
int mtu;
char fw_version[VXGE_HW_FW_STRLEN];
};
static inline int is_zero_ether_addr(const u8 *addr)
{
return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
}
static inline int is_multicast_ether_addr(const u8 *addr)
{
return (0x01 & addr[0]);
}
/* checks the ethernet address @addr is a valid unicast */
static inline int is_valid_ether_addr(const u8 *addr)
{
return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr);
}
void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id);
void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id);
int vxge_reset(struct vxgedev *vdev);
enum vxge_hw_status
vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw,
struct vxge_hw_fifo_txd *txdp, enum vxge_hw_fifo_tcode tcode);
void vxge_close_vpaths(struct vxgedev *vdev);
int vxge_open_vpaths(struct vxgedev *vdev);
enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,742 @@
/*
* vxge-traffic.c: gPXE driver for Neterion Inc's X3100 Series 10GbE
* PCIe I/O Virtualized Server Adapter.
*
* Copyright(c) 2002-2010 Neterion Inc.
*
* This software may be used and distributed according to the terms of
* the GNU General Public License (GPL), incorporated herein by
* reference. Drivers based on or derived from this code fall under
* the GPL and must retain the authorship, copyright and license
* notice.
*
*/
FILE_LICENCE(GPL2_ONLY);
#include <gpxe/netdevice.h>
#include <errno.h>
#include "vxge_traffic.h"
#include "vxge_config.h"
#include "vxge_main.h"
/*
* vxge_hw_vpath_intr_enable - Enable vpath interrupts.
* @vpath: Virtual Path handle.
*
* Enable vpath interrupts. The function is to be executed the last in
* vpath initialization sequence.
*
* See also: vxge_hw_vpath_intr_disable()
*/
enum vxge_hw_status
vxge_hw_vpath_intr_enable(struct __vxge_hw_virtualpath *vpath)
{
u64 val64;
struct vxge_hw_vpath_reg *vp_reg;
enum vxge_hw_status status = VXGE_HW_OK;
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
goto exit;
}
vp_reg = vpath->vp_reg;
writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->general_errors_reg);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->pci_config_errors_reg);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->mrpcim_to_vpath_alarm_reg);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->srpcim_to_vpath_alarm_reg);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->vpath_ppif_int_status);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->srpcim_msg_to_vpath_reg);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->vpath_pcipif_int_status);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->prc_alarm_reg);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->wrdma_alarm_status);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->asic_ntwk_vp_err_reg);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->xgmac_vp_int_status);
val64 = readq(&vp_reg->vpath_general_int_status);
/* Mask unwanted interrupts */
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->vpath_pcipif_int_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->srpcim_msg_to_vpath_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->srpcim_to_vpath_alarm_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->mrpcim_to_vpath_alarm_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->pci_config_errors_mask);
/* Unmask the individual interrupts */
writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
&vp_reg->general_errors_mask);
__vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
&vp_reg->kdfcctl_errors_mask);
__vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
__vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
&vp_reg->prc_alarm_mask);
__vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
__vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
if (vpath->hldev->first_vp_id != vpath->vp_id)
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->asic_ntwk_vp_err_mask);
else
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT|
VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK),
0, 32), &vp_reg->asic_ntwk_vp_err_mask);
__vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_general_int_mask);
exit:
return status;
}
/*
* vxge_hw_vpath_intr_disable - Disable vpath interrupts.
* @vpath: Virtual Path handle.
*
* Disable vpath interrupts. The function is to be executed the last in
* vpath initialization sequence.
*
* See also: vxge_hw_vpath_intr_enable()
*/
enum vxge_hw_status
vxge_hw_vpath_intr_disable(struct __vxge_hw_virtualpath *vpath)
{
u64 val64;
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_hw_vpath_reg __iomem *vp_reg;
if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
status = VXGE_HW_ERR_VPATH_NOT_OPEN;
goto exit;
}
vp_reg = vpath->vp_reg;
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->vpath_general_int_mask);
val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->general_errors_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->pci_config_errors_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->mrpcim_to_vpath_alarm_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->srpcim_to_vpath_alarm_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->vpath_ppif_int_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->srpcim_msg_to_vpath_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->vpath_pcipif_int_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->wrdma_alarm_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->prc_alarm_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->xgmac_vp_int_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->asic_ntwk_vp_err_mask);
exit:
return status;
}
/**
* vxge_hw_device_mask_all - Mask all device interrupts.
* @hldev: HW device handle.
*
* Mask all device interrupts.
*
* See also: vxge_hw_device_unmask_all()
*/
void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
{
u64 val64;
val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
&hldev->common_reg->titan_mask_all_int);
return;
}
/**
* vxge_hw_device_unmask_all - Unmask all device interrupts.
* @hldev: HW device handle.
*
* Unmask all device interrupts.
*
* See also: vxge_hw_device_mask_all()
*/
void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
{
u64 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
&hldev->common_reg->titan_mask_all_int);
return;
}
/**
* vxge_hw_device_intr_enable - Enable interrupts.
* @hldev: HW device handle.
*
* Enable Titan interrupts. The function is to be executed the last in
* Titan initialization sequence.
*
* See also: vxge_hw_device_intr_disable()
*/
void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
{
u64 val64;
u32 val32;
vxge_hw_device_mask_all(hldev);
vxge_hw_vpath_intr_enable(&hldev->virtual_path);
val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
if (val64 != 0) {
writeq(val64, &hldev->common_reg->tim_int_status0);
writeq(~val64, &hldev->common_reg->tim_int_mask0);
}
val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
if (val32 != 0) {
__vxge_hw_pio_mem_write32_upper(val32,
&hldev->common_reg->tim_int_status1);
__vxge_hw_pio_mem_write32_upper(~val32,
&hldev->common_reg->tim_int_mask1);
}
val64 = readq(&hldev->common_reg->titan_general_int_status);
/* We have not enabled the top level interrupt yet.
* This will be controlled from vxge_irq() entry api.
*/
return;
}
/**
* vxge_hw_device_intr_disable - Disable Titan interrupts.
* @hldev: HW device handle.
*
* Disable Titan interrupts.
*
* See also: vxge_hw_device_intr_enable()
*/
void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
{
vxge_hw_device_mask_all(hldev);
/* mask all the tim interrupts */
writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
__vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
&hldev->common_reg->tim_int_mask1);
vxge_hw_vpath_intr_disable(&hldev->virtual_path);
return;
}
/**
* vxge_hw_ring_rxd_post - Post descriptor on the ring.
* @ring: Handle to the ring object used for receive
* @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
*
* Post descriptor on the ring.
* Prior to posting the descriptor should be filled in accordance with
* Host/Titan interface specification for a given service (LL, etc.).
*/
void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring __unused,
struct vxge_hw_ring_rxd_1 *rxdp)
{
rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
}
/**
* __vxge_hw_non_offload_db_post - Post non offload doorbell
*
* @fifo: fifohandle
* @txdl_ptr: The starting location of the TxDL in host memory
* @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
*
* This function posts a non-offload doorbell to doorbell FIFO
*
*/
static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
u64 txdl_ptr, u32 num_txds)
{
writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds),
&fifo->nofl_db->control_0);
wmb();
writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
wmb();
}
/**
* vxge_hw_fifo_free_txdl_get: fetch next available txd in the fifo
*
* @fifo: tx channel handle
*/
struct vxge_hw_fifo_txd *
vxge_hw_fifo_free_txdl_get(struct __vxge_hw_fifo *fifo)
{
struct vxge_hw_fifo_txd *txdp;
txdp = fifo->txdl + fifo->sw_offset;
if (txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER) {
vxge_debug(VXGE_ERR, "%s:%d, error: txd(%d) owned by hw\n",
__func__, __LINE__, fifo->sw_offset);
return NULL;
}
return txdp;
}
/**
* vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
* descriptor.
* @fifo: Handle to the fifo object used for non offload send
* @txdlh: Descriptor handle.
* @iob: data buffer.
*/
void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
struct vxge_hw_fifo_txd *txdp,
struct io_buffer *iob)
{
txdp->control_0 = VXGE_HW_FIFO_TXD_GATHER_CODE(
VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST);
txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(iob_len(iob));
txdp->control_1 = VXGE_HW_FIFO_TXD_INT_NUMBER(fifo->tx_intr_num);
txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
txdp->host_control = (intptr_t)iob;
txdp->buffer_pointer = virt_to_bus(iob->data);
}
/**
* vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
* @fifo: Handle to the fifo object used for non offload send
* @txdp: Tx Descriptor
*
* Post descriptor on the 'fifo' type channel for transmission.
* Prior to posting the descriptor should be filled in accordance with
* Host/Titan interface specification for a given service (LL, etc.).
*
*/
void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo,
struct vxge_hw_fifo_txd *txdp)
{
txdp->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
__vxge_hw_non_offload_db_post(fifo, (u64) virt_to_bus(txdp), 0);
vxge_hw_fifo_txd_offset_up(&fifo->sw_offset);
}
/*
* __vxge_hw_vpath_alarm_process - Process Alarms.
* @vpath: Virtual Path.
* @skip_alarms: Do not clear the alarms
*
* Process vpath alarms.
*
*/
static enum vxge_hw_status __vxge_hw_vpath_alarm_process(
struct __vxge_hw_virtualpath *vpath)
{
u64 val64;
u64 alarm_status;
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_device *hldev = NULL;
struct vxge_hw_vpath_reg *vp_reg;
hldev = vpath->hldev;
vp_reg = vpath->vp_reg;
alarm_status = readq(&vp_reg->vpath_general_int_status);
if (alarm_status == VXGE_HW_ALL_FOXES) {
vxge_debug(VXGE_ERR, "%s: %s:%d, slot freeze error\n",
hldev->ndev->name, __func__, __LINE__);
status = VXGE_HW_ERR_SLOT_FREEZE;
goto out;
}
if (alarm_status & ~(
VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
vxge_debug(VXGE_ERR, "%s: %s:%d, Unknown vpath alarm\n",
hldev->ndev->name, __func__, __LINE__);
status = VXGE_HW_FAIL;
goto out;
}
if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
val64 = readq(&vp_reg->xgmac_vp_int_status);
if (val64 &
VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
if (((val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
(!(val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
((val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
&& (!(val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
))) {
writeq(VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
&vp_reg->asic_ntwk_vp_err_mask);
netdev_link_down(hldev->ndev);
vxge_debug(VXGE_INTR, "%s: %s:%d link down\n",
hldev->ndev->name, __func__, __LINE__);
}
if (((val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
(!(val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
((val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
&& (!(val64 &
VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
))) {
writeq(VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
&vp_reg->asic_ntwk_vp_err_mask);
netdev_link_up(hldev->ndev);
vxge_debug(VXGE_INTR, "%s: %s:%d link up\n",
hldev->ndev->name, __func__, __LINE__);
}
writeq(VXGE_HW_INTR_MASK_ALL,
&vp_reg->asic_ntwk_vp_err_reg);
}
} else {
vxge_debug(VXGE_INFO, "%s: %s:%d unhandled alarm %llx\n",
hldev->ndev->name, __func__, __LINE__,
alarm_status);
}
out:
return status;
}
/**
* vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
* condition that has caused the Tx and RX interrupt.
* @hldev: HW device.
*
* Acknowledge (that is, clear) the condition that has caused
* the Tx and Rx interrupt.
* See also: vxge_hw_device_begin_irq(),
* vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
*/
void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
{
if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
(hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
&hldev->common_reg->tim_int_status0);
}
if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
(hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
__vxge_hw_pio_mem_write32_upper(
(hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
&hldev->common_reg->tim_int_status1);
}
return;
}
/**
* vxge_hw_device_begin_irq - Begin IRQ processing.
* @hldev: HW device handle.
*
* The function performs two actions, It first checks whether (shared IRQ) the
* interrupt was raised by the device. Next, it masks the device interrupts.
*
* Note:
* vxge_hw_device_begin_irq() does not flush MMIO writes through the
* bridge. Therefore, two back-to-back interrupts are potentially possible.
*
* Returns: 0, if the interrupt is not "ours" (note that in this case the
* device remain enabled).
* Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
* status.
*/
enum vxge_hw_status
vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev)
{
u64 val64;
u64 adapter_status;
u64 vpath_mask;
enum vxge_hw_status ret = VXGE_HW_OK;
val64 = readq(&hldev->common_reg->titan_general_int_status);
if (!val64) {
ret = VXGE_HW_ERR_WRONG_IRQ;
goto exit;
}
if (val64 == VXGE_HW_ALL_FOXES) {
adapter_status = readq(&hldev->common_reg->adapter_status);
if (adapter_status == VXGE_HW_ALL_FOXES) {
vxge_debug(VXGE_ERR, "%s: %s:%d critical error "
"occurred\n", hldev->ndev->name,
__func__, __LINE__);
ret = VXGE_HW_ERR_SLOT_FREEZE;
goto exit;
}
}
vpath_mask = hldev->vpaths_deployed >>
(64 - VXGE_HW_MAX_VIRTUAL_PATHS);
if (val64 & VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
vpath_mask))
vxge_hw_device_clear_tx_rx(hldev);
if (val64 & VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)
ret = __vxge_hw_vpath_alarm_process(&hldev->virtual_path);
exit:
return ret;
}
/**
* vxge_hw_vpath_doorbell_rx - Indicates to hw the qwords of receive
* descriptors posted.
* @ring: Handle to the ring object used for receive
*
* The function writes the number of qwords of rxds posted during replishment.
* Since the function is called frequently, a flush is not required to post the
* write transaction. At the very least, the previous write will be flushed
* once the subsequent write is made.
*
* Returns: None.
*/
void vxge_hw_vpath_doorbell_rx(struct __vxge_hw_ring *ring)
{
u32 rxds_qw_per_block = VXGE_HW_MAX_RXDS_PER_BLOCK_1 *
VXGE_HW_RING_RXD_QWORDS_MODE_1;
ring->doorbell_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;
ring->total_db_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;
if (ring->total_db_cnt >= rxds_qw_per_block) {
/* For each block add 4 more qwords */
ring->doorbell_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;
/* Reset total count */
ring->total_db_cnt -= rxds_qw_per_block;
}
if (ring->doorbell_cnt >= ring->rxd_qword_limit) {
wmb();
writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(
ring->doorbell_cnt),
&ring->vp_reg->prc_rxd_doorbell);
ring->doorbell_cnt = 0;
}
}
/**
* vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
* descriptors and process the same.
* @ring: Handle to the ring object used for receive
*
* The function polls the Rx for the completed descriptors.
*/
#define ETH_FCS_LEN 4
enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
{
struct __vxge_hw_device *hldev;
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_hw_ring_rxd_1 *rxd;
unsigned int len;
enum vxge_hw_ring_tcode tcode;
struct io_buffer *rx_iob, *iobuf = NULL;
u16 poll_count = 0;
hldev = ring->vpathh->hldev;
do {
rxd = &ring->rxdl->rxd[ring->rxd_offset];
tcode = VXGE_HW_RING_RXD_T_CODE_GET(rxd->control_0);
/* if tcode is VXGE_HW_RING_T_CODE_FRM_DROP, it is
* possible the ownership bit still set to adapter
*/
if ((rxd->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)
&& (tcode == VXGE_HW_RING_T_CODE_OK)) {
status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
goto err0;
}
vxge_debug(VXGE_INFO, "%s: rx frame received at offset %d\n",
hldev->ndev->name, ring->rxd_offset);
if (tcode != VXGE_HW_RING_T_CODE_OK) {
netdev_rx_err(hldev->ndev, NULL, -EINVAL);
vxge_debug(VXGE_ERR, "%s:%d, rx error tcode %d\n",
__func__, __LINE__, tcode);
status = VXGE_HW_FAIL;
goto err1;
}
iobuf = (struct io_buffer *)(intptr_t)rxd->host_control;
len = VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxd->control_1);
len -= ETH_FCS_LEN;
rx_iob = alloc_iob(len);
if (!rx_iob) {
netdev_rx_err(hldev->ndev, NULL, -ENOMEM);
vxge_debug(VXGE_ERR, "%s:%d, alloc_iob error\n",
__func__, __LINE__);
status = VXGE_HW_ERR_OUT_OF_MEMORY;
goto err1;
}
memcpy(iob_put(rx_iob, len), iobuf->data, len);
/* Add this packet to the receive queue. */
netdev_rx(hldev->ndev, rx_iob);
err1:
/* repost the rxd */
rxd->control_0 = rxd->control_1 = 0;
vxge_hw_ring_rxd_1b_set(rxd, iobuf,
VXGE_LL_MAX_FRAME_SIZE(hldev->vdev));
vxge_hw_ring_rxd_post(ring, rxd);
/* repost the qword count for doorbell */
vxge_hw_vpath_doorbell_rx(ring);
/* increment the descriptor offset */
vxge_hw_ring_rxd_offset_up(&ring->rxd_offset);
} while (++poll_count < ring->rx_poll_weight);
err0:
return status;
}
/**
* vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
* the same.
* @fifo: Handle to the fifo object used for non offload send
*
* The function polls the Tx for the completed descriptors and calls
* the driver via supplied completion callback.
*/
enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_hw_fifo_txd *txdp;
txdp = fifo->txdl + fifo->hw_offset;
if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)
&& (txdp->host_control)) {
vxge_xmit_compl(fifo, txdp,
VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0));
vxge_hw_fifo_txd_offset_up(&fifo->hw_offset);
}
return status;
}

View File

@ -0,0 +1,309 @@
/*
* vxge-traffic.h: gPXE driver for Neterion Inc's X3100 Series 10GbE
* PCIe I/O Virtualized Server Adapter.
*
* Copyright(c) 2002-2010 Neterion Inc.
*
* This software may be used and distributed according to the terms of
* the GNU General Public License (GPL), incorporated herein by
* reference. Drivers based on or derived from this code fall under
* the GPL and must retain the authorship, copyright and license
* notice.
*
*/
FILE_LICENCE(GPL2_ONLY);
#ifndef VXGE_TRAFFIC_H
#define VXGE_TRAFFIC_H
#include <stdint.h>
#include <gpxe/if_ether.h>
#include <gpxe/iobuf.h>
#include "vxge_reg.h"
#include "vxge_version.h"
#define VXGE_HW_DTR_MAX_T_CODE 16
#define VXGE_HW_ALL_FOXES 0xFFFFFFFFFFFFFFFFULL
#define VXGE_HW_INTR_MASK_ALL 0xFFFFFFFFFFFFFFFFULL
#define VXGE_HW_MAX_VIRTUAL_PATHS 17
#define VXGE_HW_MAX_VIRTUAL_FUNCTIONS 8
#define VXGE_HW_MAC_MAX_MAC_PORT_ID 3
#define VXGE_HW_DEFAULT_32 0xffffffff
/* frames sizes */
#define VXGE_HW_HEADER_802_2_SIZE 3
#define VXGE_HW_HEADER_SNAP_SIZE 5
#define VXGE_HW_HEADER_VLAN_SIZE 4
#define VXGE_HW_MAC_HEADER_MAX_SIZE \
(ETH_HLEN + \
VXGE_HW_HEADER_802_2_SIZE + \
VXGE_HW_HEADER_VLAN_SIZE + \
VXGE_HW_HEADER_SNAP_SIZE)
/* 32bit alignments */
/* A receive data corruption can occur resulting in either a single-bit or
double-bit ECC error being flagged in the ASIC if the starting offset of a
buffer in single buffer mode is 0x2 to 0xa. The single bit ECC error will not
lock up the card but can hide the data corruption while the double-bit ECC
error will lock up the card. Limiting the starting offset of the buffers to
0x0, 0x1 or to a value greater than 0xF will workaround this issue.
VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN of 2 causes the starting offset of
buffer to be 0x2, 0x12 and so on, to have the start of the ip header dword
aligned. The start of buffer of 0x2 will cause this problem to occur. To
avoid this problem in all cases, add 0x10 to 0x2, to ensure that the start of
buffer is outside of the problem causing offsets.
*/
#define VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN 0x12
#define VXGE_HW_HEADER_802_2_SNAP_ALIGN 2
#define VXGE_HW_HEADER_802_2_ALIGN 3
#define VXGE_HW_HEADER_SNAP_ALIGN 1
#define VXGE_HW_L3_CKSUM_OK 0xFFFF
#define VXGE_HW_L4_CKSUM_OK 0xFFFF
/* Forward declarations */
struct __vxge_hw_device;
struct __vxge_hw_virtualpath;
struct __vxge_hw_fifo;
struct __vxge_hw_ring;
struct vxge_hw_ring_rxd_1;
struct vxge_hw_fifo_txd;
#ifndef TRUE
#define TRUE 1
#endif
#ifndef FALSE
#define FALSE 0
#endif
/*VXGE_HW_STATUS_H*/
#define VXGE_HW_EVENT_BASE 0
#define VXGE_LL_EVENT_BASE 100
/**
* enum vxge_hw_event- Enumerates slow-path HW events.
* @VXGE_HW_EVENT_UNKNOWN: Unknown (and invalid) event.
* @VXGE_HW_EVENT_SERR: Serious vpath hardware error event.
* @VXGE_HW_EVENT_ECCERR: vpath ECC error event.
* @VXGE_HW_EVENT_VPATH_ERR: Error local to the respective vpath
* @VXGE_HW_EVENT_FIFO_ERR: FIFO Doorbell fifo error.
* @VXGE_HW_EVENT_SRPCIM_SERR: srpcim hardware error event.
* @VXGE_HW_EVENT_MRPCIM_SERR: mrpcim hardware error event.
* @VXGE_HW_EVENT_MRPCIM_ECCERR: mrpcim ecc error event.
* @VXGE_HW_EVENT_RESET_START: Privileged entity is starting device reset
* @VXGE_HW_EVENT_RESET_COMPLETE: Device reset has been completed
* @VXGE_HW_EVENT_SLOT_FREEZE: Slot-freeze event. Driver tries to distinguish
* slot-freeze from the rest critical events (e.g. ECC) when it is
* impossible to PIO read "through" the bus, i.e. when getting all-foxes.
*
* enum vxge_hw_event enumerates slow-path HW eventis.
*
* See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
* vxge_uld_link_down_f{}.
*/
enum vxge_hw_event {
VXGE_HW_EVENT_UNKNOWN = 0,
/* HW events */
VXGE_HW_EVENT_RESET_START = VXGE_HW_EVENT_BASE + 1,
VXGE_HW_EVENT_RESET_COMPLETE = VXGE_HW_EVENT_BASE + 2,
VXGE_HW_EVENT_LINK_DOWN = VXGE_HW_EVENT_BASE + 3,
VXGE_HW_EVENT_LINK_UP = VXGE_HW_EVENT_BASE + 4,
VXGE_HW_EVENT_ALARM_CLEARED = VXGE_HW_EVENT_BASE + 5,
VXGE_HW_EVENT_ECCERR = VXGE_HW_EVENT_BASE + 6,
VXGE_HW_EVENT_MRPCIM_ECCERR = VXGE_HW_EVENT_BASE + 7,
VXGE_HW_EVENT_FIFO_ERR = VXGE_HW_EVENT_BASE + 8,
VXGE_HW_EVENT_VPATH_ERR = VXGE_HW_EVENT_BASE + 9,
VXGE_HW_EVENT_CRITICAL_ERR = VXGE_HW_EVENT_BASE + 10,
VXGE_HW_EVENT_SERR = VXGE_HW_EVENT_BASE + 11,
VXGE_HW_EVENT_SRPCIM_SERR = VXGE_HW_EVENT_BASE + 12,
VXGE_HW_EVENT_MRPCIM_SERR = VXGE_HW_EVENT_BASE + 13,
VXGE_HW_EVENT_SLOT_FREEZE = VXGE_HW_EVENT_BASE + 14,
};
#define VXGE_HW_MAX_INTR_PER_VP 4
#define VXGE_HW_VPATH_INTR_TX 0
#define VXGE_HW_VPATH_INTR_RX 1
#define VXGE_HW_VPATH_INTR_EINTA 2
#define VXGE_HW_VPATH_INTR_BMAP 3
#define VXGE_HW_BLOCK_SIZE 4096
#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL 17
#define VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL 18
#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_RX_AVE_NET_UTIL 19
#define VXGE_HW_TIM_UTIL_SEL_PER_VPATH 63
/**
* enum vxge_hw_ring_tcode - Transfer codes returned by adapter
* @VXGE_HW_RING_T_CODE_OK: Transfer ok.
* @VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH: Layer 3 checksum presentation
* configuration mismatch.
* @VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH: Layer 4 checksum presentation
* configuration mismatch.
* @VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH: Layer 3 and Layer 4 checksum
* presentation configuration mismatch.
* @VXGE_HW_RING_T_CODE_L3_PKT_ERR: Layer 3 error unparseable packet,
* such as unknown IPv6 header.
* @VXGE_HW_RING_T_CODE_L2_FRM_ERR: Layer 2 error frame integrity
* error, such as FCS or ECC).
* @VXGE_HW_RING_T_CODE_BUF_SIZE_ERR: Buffer size error the RxD buffer(
* s) were not appropriately sized and data loss occurred.
* @VXGE_HW_RING_T_CODE_INT_ECC_ERR: Internal ECC error RxD corrupted.
* @VXGE_HW_RING_T_CODE_BENIGN_OVFLOW: Benign overflow the contents of
* Segment1 exceeded the capacity of Buffer1 and the remainder
* was placed in Buffer2. Segment2 now starts in Buffer3.
* No data loss or errors occurred.
* @VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF: Buffer size 0 one of the RxDs
* assigned buffers has a size of 0 bytes.
* @VXGE_HW_RING_T_CODE_FRM_DROP: Frame dropped either due to
* VPath Reset or because of a VPIN mismatch.
* @VXGE_HW_RING_T_CODE_UNUSED: Unused
* @VXGE_HW_RING_T_CODE_MULTI_ERR: Multiple errors more than one
* transfer code condition occurred.
*
* Transfer codes returned by adapter.
*/
enum vxge_hw_ring_tcode {
VXGE_HW_RING_T_CODE_OK = 0x0,
VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH = 0x1,
VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH = 0x2,
VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH = 0x3,
VXGE_HW_RING_T_CODE_L3_PKT_ERR = 0x5,
VXGE_HW_RING_T_CODE_L2_FRM_ERR = 0x6,
VXGE_HW_RING_T_CODE_BUF_SIZE_ERR = 0x7,
VXGE_HW_RING_T_CODE_INT_ECC_ERR = 0x8,
VXGE_HW_RING_T_CODE_BENIGN_OVFLOW = 0x9,
VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF = 0xA,
VXGE_HW_RING_T_CODE_FRM_DROP = 0xC,
VXGE_HW_RING_T_CODE_UNUSED = 0xE,
VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
};
/**
* enum enum vxge_hw_fifo_gather_code - Gather codes used in fifo TxD
* @VXGE_HW_FIFO_GATHER_CODE_FIRST: First TxDL
* @VXGE_HW_FIFO_GATHER_CODE_MIDDLE: Middle TxDL
* @VXGE_HW_FIFO_GATHER_CODE_LAST: Last TxDL
* @VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST: First and Last TxDL.
*
* These gather codes are used to indicate the position of a TxD in a TxD list
*/
enum vxge_hw_fifo_gather_code {
VXGE_HW_FIFO_GATHER_CODE_FIRST = 0x2,
VXGE_HW_FIFO_GATHER_CODE_MIDDLE = 0x0,
VXGE_HW_FIFO_GATHER_CODE_LAST = 0x1,
VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST = 0x3
};
/**
* enum enum vxge_hw_fifo_tcode - tcodes used in fifo
* @VXGE_HW_FIFO_T_CODE_OK: Transfer OK
* @VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT: PCI read transaction (either TxD or
* frame data) returned with corrupt data.
* @VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL:PCI read transaction was returned
* with no data.
* @VXGE_HW_FIFO_T_CODE_INVALID_MSS: The host attempted to send either a
* frame or LSO MSS that was too long (>9800B).
* @VXGE_HW_FIFO_T_CODE_LSO_ERROR: Error detected during TCP/UDP Large Send
* Offload operation, due to improper header template,
* unsupported protocol, etc.
* @VXGE_HW_FIFO_T_CODE_UNUSED: Unused
* @VXGE_HW_FIFO_T_CODE_MULTI_ERROR: Set to 1 by the adapter if multiple
* data buffer transfer errors are encountered (see below).
* Otherwise it is set to 0.
*
* These tcodes are returned in various API for TxD status
*/
enum vxge_hw_fifo_tcode {
VXGE_HW_FIFO_T_CODE_OK = 0x0,
VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT = 0x1,
VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL = 0x2,
VXGE_HW_FIFO_T_CODE_INVALID_MSS = 0x3,
VXGE_HW_FIFO_T_CODE_LSO_ERROR = 0x4,
VXGE_HW_FIFO_T_CODE_UNUSED = 0x7,
VXGE_HW_FIFO_T_CODE_MULTI_ERROR = 0x8
};
enum vxge_hw_status
vxge_hw_ring_replenish(struct __vxge_hw_ring *ring);
void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring_handle,
struct vxge_hw_ring_rxd_1 *rxdp);
void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
struct vxge_hw_fifo_txd *txdp,
struct io_buffer *iob);
void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo,
struct vxge_hw_fifo_txd *txdp);
enum vxge_hw_status __vxge_hw_ring_create(
struct __vxge_hw_virtualpath *vpath,
struct __vxge_hw_ring *ring);
enum vxge_hw_status __vxge_hw_ring_delete(
struct __vxge_hw_ring *ringh);
enum vxge_hw_status __vxge_hw_fifo_create(
struct __vxge_hw_virtualpath *vpath,
struct __vxge_hw_fifo *fifo);
enum vxge_hw_status
__vxge_hw_fifo_delete(struct __vxge_hw_fifo *fifo);
enum vxge_hw_status __vxge_hw_vpath_reset(
struct __vxge_hw_device *devh, u32 vp_id);
enum vxge_hw_status
__vxge_hw_vpath_enable(struct __vxge_hw_device *devh, u32 vp_id);
void
__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev);
enum vxge_hw_status
__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *devh, u32 vp_id);
enum vxge_hw_status
__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh);
enum vxge_hw_status
__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *devh, u32 vp_id);
enum vxge_hw_status
__vxge_hw_vpath_initialize(struct __vxge_hw_device *devh, u32 vp_id);
enum vxge_hw_status __vxge_hw_vp_initialize(
struct __vxge_hw_device *hldev, u32 vp_id,
struct __vxge_hw_virtualpath *vpath);
void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev,
struct __vxge_hw_virtualpath *vpath);
enum vxge_hw_status
vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev);
void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev);
void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev);
void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev);
void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev);
void vxge_hw_vpath_doorbell_rx(struct __vxge_hw_ring *ringh);
enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ringh);
enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo);
struct vxge_hw_fifo_txd *
vxge_hw_fifo_free_txdl_get(struct __vxge_hw_fifo *fifo);
#endif

View File

@ -0,0 +1,40 @@
/*
* vxge-version.h: gPXE driver for Neterion Inc's X3100 Series 10GbE
* PCIe I/O Virtualized Server Adapter.
*
* Copyright(c) 2002-2010 Neterion Inc.
*
* This software may be used and distributed according to the terms of
* the GNU General Public License (GPL), incorporated herein by
* reference. Drivers based on or derived from this code fall under
* the GPL and must retain the authorship, copyright and license
* notice.
*
*/
FILE_LICENCE(GPL2_ONLY);
#ifndef VXGE_VERSION_H
#define VXGE_VERSION_H
/* gpxe vxge driver version fields.
* Note: Each field must be a nibble size
*/
#define VXGE_VERSION_MAJOR 3
#define VXGE_VERSION_MINOR 1
#define VXGE_VERSION_FIX 0
#define VXGE_VERSION_BUILD 0
#define VXGE_FW_VER(major, minor, build) \
(((major) << 16) + ((minor) << 8) + (build))
/* Certified FW version. */
#define VXGE_CERT_FW_VER_MAJOR 1
#define VXGE_CERT_FW_VER_MINOR 6
#define VXGE_CERT_FW_VER_BUILD 0
#define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \
VXGE_CERT_FW_VER_MINOR, VXGE_CERT_FW_VER_BUILD)
#endif

View File

@ -117,6 +117,9 @@ FILE_LICENCE ( GPL2_OR_LATER );
#define ERRFILE_sis190 ( ERRFILE_DRIVER | 0x00520000 )
#define ERRFILE_myri10ge ( ERRFILE_DRIVER | 0x00530000 )
#define ERRFILE_skge ( ERRFILE_DRIVER | 0x00540000 )
#define ERRFILE_vxge_main ( ERRFILE_DRIVER | 0x00550000 )
#define ERRFILE_vxge_config ( ERRFILE_DRIVER | 0x00560000 )
#define ERRFILE_vxge_traffic ( ERRFILE_DRIVER | 0x00570000 )
#define ERRFILE_scsi ( ERRFILE_DRIVER | 0x00700000 )
#define ERRFILE_arbel ( ERRFILE_DRIVER | 0x00710000 )