david/ipxe
david
/
ipxe
Archived
1
0
Fork 0

[hermon] Add support for dual-protocol devices

Originally-implemented-by: Itay Gazit <itaygazit@gmail.com>
Signed-off-by: Michael Brown <mcb30@ipxe.org>
This commit is contained in:
Michael Brown 2010-09-19 17:35:25 +01:00
parent ed0ea7cfc2
commit 5a981cff8e
4 changed files with 713 additions and 2657 deletions

View File

@ -36,6 +36,8 @@ FILE_LICENCE ( GPL2_OR_LATER );
#include <ipxe/netdevice.h>
#include <ipxe/infiniband.h>
#include <ipxe/ib_smc.h>
#include <ipxe/if_ether.h>
#include <ipxe/ethernet.h>
#include "hermon.h"
/**
@ -304,12 +306,13 @@ hermon_cmd_close_port ( struct hermon *hermon, unsigned int port ) {
}
static inline int
hermon_cmd_set_port ( struct hermon *hermon, unsigned int port,
union hermonprm_set_port *set_port ) {
hermon_cmd_set_port ( struct hermon *hermon, int is_ethernet,
unsigned int port_selector,
const union hermonprm_set_port *set_port ) {
return hermon_cmd ( hermon,
HERMON_HCR_IN_CMD ( HERMON_HCR_SET_PORT,
1, sizeof ( *set_port ) ),
0, set_port, port, NULL );
is_ethernet, set_port, port_selector, NULL );
}
static inline int
@ -491,6 +494,24 @@ hermon_cmd_mgid_hash ( struct hermon *hermon, const union ib_gid *gid,
0, gid, 0, hash );
}
static inline int
hermon_cmd_query_port ( struct hermon *hermon, unsigned int port,
struct hermonprm_query_port_cap *query_port ) {
return hermon_cmd ( hermon,
HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_PORT,
1, sizeof ( *query_port ) ),
0, NULL, port, query_port );
}
static inline int
hermon_cmd_sense_port ( struct hermon *hermon, unsigned int port,
struct hermonprm_sense_port *port_type ) {
return hermon_cmd ( hermon,
HERMON_HCR_OUT_CMD ( HERMON_HCR_SENSE_PORT,
1, sizeof ( *port_type ) ),
0, NULL, port, port_type );
}
static inline int
hermon_cmd_run_fw ( struct hermon *hermon ) {
return hermon_cmd ( hermon,
@ -559,16 +580,6 @@ hermon_cmd_map_fa ( struct hermon *hermon,
0, map, 1, NULL );
}
static inline int
hermon_cmd_sense_port ( struct hermon *hermon, unsigned int port,
struct hermonprm_sense_port *port_type ) {
return hermon_cmd ( hermon,
HERMON_HCR_OUT_CMD ( HERMON_HCR_SENSE_PORT,
1, sizeof ( *port_type ) ),
0, NULL, port, port_type );
}
/***************************************************************************
*
* Memory translation table operations
@ -916,6 +927,7 @@ static int hermon_alloc_qpn ( struct ib_device *ibdev,
return 0;
case IB_QPT_UD:
case IB_QPT_RC:
case IB_QPT_ETH:
/* Find a free queue pair number */
qpn_offset = hermon_bitmask_alloc ( hermon->qp_inuse,
HERMON_MAX_QPS, 1 );
@ -982,6 +994,7 @@ static uint8_t hermon_qp_st[] = {
[IB_QPT_GSI] = HERMON_ST_MLX,
[IB_QPT_UD] = HERMON_ST_UD,
[IB_QPT_RC] = HERMON_ST_RC,
[IB_QPT_ETH] = HERMON_ST_MLX,
};
/**
@ -1163,7 +1176,9 @@ static int hermon_modify_qp ( struct ib_device *ibdev,
if ( hermon_qp->state < HERMON_QP_ST_RTR ) {
memset ( &qpctx, 0, sizeof ( qpctx ) );
MLX_FILL_2 ( &qpctx, 4,
qpc_eec_data.mtu, HERMON_MTU_2048,
qpc_eec_data.mtu,
( ( qp->type == IB_QPT_ETH ) ?
HERMON_MTU_ETH : HERMON_MTU_2048 ),
qpc_eec_data.msg_max, 31 );
MLX_FILL_1 ( &qpctx, 7,
qpc_eec_data.remote_qpn_een, qp->av.qpn );
@ -1402,6 +1417,38 @@ hermon_fill_rc_send_wqe ( struct ib_device *ibdev,
return HERMON_OPCODE_SEND;
}
/**
* Construct Ethernet send work queue entry
*
* @v ibdev Infiniband device
* @v qp Queue pair
* @v av Address vector
* @v iobuf I/O buffer
* @v wqe Send work queue entry
* @ret opcode Control opcode
*/
static unsigned int
hermon_fill_eth_send_wqe ( struct ib_device *ibdev,
struct ib_queue_pair *qp __unused,
struct ib_address_vector *av __unused,
struct io_buffer *iobuf,
union hermon_send_wqe *wqe ) {
struct hermon *hermon = ib_get_drvdata ( ibdev );
/* Fill work queue entry */
MLX_FILL_1 ( &wqe->eth.ctrl, 1, ds,
( ( offsetof ( typeof ( wqe->mlx ), data[1] ) / 16 ) ) );
MLX_FILL_2 ( &wqe->eth.ctrl, 2,
c, 0x03 /* generate completion */,
s, 1 /* inhibit ICRC */ );
MLX_FILL_1 ( &wqe->eth.data[0], 0,
byte_count, iob_len ( iobuf ) );
MLX_FILL_1 ( &wqe->eth.data[0], 1, l_key, hermon->lkey );
MLX_FILL_1 ( &wqe->eth.data[0], 3,
local_address_l, virt_to_bus ( iobuf->data ) );
return HERMON_OPCODE_SEND;
}
/** Work queue entry constructors */
static unsigned int
( * hermon_fill_send_wqe[] ) ( struct ib_device *ibdev,
@ -1413,6 +1460,7 @@ static unsigned int
[IB_QPT_GSI] = hermon_fill_mlx_send_wqe,
[IB_QPT_UD] = hermon_fill_ud_send_wqe,
[IB_QPT_RC] = hermon_fill_rc_send_wqe,
[IB_QPT_ETH] = hermon_fill_eth_send_wqe,
};
/**
@ -1618,6 +1666,9 @@ static int hermon_complete ( struct ib_device *ibdev,
case IB_QPT_RC:
av = &qp->av;
break;
case IB_QPT_ETH:
av = NULL;
break;
default:
assert ( 0 );
return -EINVAL;
@ -1826,8 +1877,9 @@ static void hermon_event_port_state_change ( struct hermon *hermon,
return;
}
/* Update MAD parameters */
ib_smc_update ( hermon->ibdev[port], hermon_mad );
/* Notify device of port state change */
hermon->port[port].type->state_change ( hermon, &hermon->port[port],
link_up );
}
/**
@ -1898,39 +1950,6 @@ static void hermon_poll_eq ( struct ib_device *ibdev ) {
***************************************************************************
*/
/**
* Sense port type
*
* @v ibdev Infiniband device
* @ret port_type Port type, or negative error
*/
static int hermon_sense_port_type ( struct ib_device *ibdev ) {
struct hermon *hermon = ib_get_drvdata ( ibdev );
struct hermonprm_sense_port sense_port;
int port_type;
int rc;
/* If DPDP is not supported, always assume Infiniband */
if ( ! hermon->cap.dpdp ) {
DBGC ( hermon, "Hermon %p does not support DPDP; assuming "
"Infiniband\n", hermon );
return HERMON_PORT_TYPE_IB;
}
/* Sense the port type */
if ( ( rc = hermon_cmd_sense_port ( hermon, ibdev->port,
&sense_port ) ) != 0 ) {
DBGC ( hermon, "Hermon %p port %d sense failed: %s\n",
hermon, ibdev->port, strerror ( rc ) );
return rc;
}
port_type = MLX_GET ( &sense_port, port_type );
DBGC ( hermon, "Hermon %p port %d type %d\n",
hermon, ibdev->port, port_type );
return port_type;
}
/**
* Initialise Infiniband link
*
@ -1940,18 +1959,8 @@ static int hermon_sense_port_type ( struct ib_device *ibdev ) {
static int hermon_open ( struct ib_device *ibdev ) {
struct hermon *hermon = ib_get_drvdata ( ibdev );
union hermonprm_set_port set_port;
int port_type;
int rc;
/* Check we are connected to an Infiniband network */
if ( ( rc = port_type = hermon_sense_port_type ( ibdev ) ) < 0 )
return rc;
if ( port_type != HERMON_PORT_TYPE_IB ) {
DBGC ( hermon, "Hermon %p port %d not connected to an "
"Infiniband network", hermon, ibdev->port );
return -ENOTCONN;
}
/* Set port parameters */
memset ( &set_port, 0, sizeof ( set_port ) );
MLX_FILL_7 ( &set_port.ib, 0,
@ -1965,7 +1974,7 @@ static int hermon_open ( struct ib_device *ibdev ) {
MLX_FILL_2 ( &set_port.ib, 10,
max_pkey, 1,
max_gid, 1 );
if ( ( rc = hermon_cmd_set_port ( hermon, ibdev->port,
if ( ( rc = hermon_cmd_set_port ( hermon, 0, ibdev->port,
&set_port ) ) != 0 ) {
DBGC ( hermon, "Hermon %p port %d could not set port: %s\n",
hermon, ibdev->port, strerror ( rc ) );
@ -2136,6 +2145,530 @@ static struct ib_device_operations hermon_ib_operations = {
.set_pkey_table = hermon_inform_sma,
};
/**
* Register Hermon Infiniband device
*
* @v hermon Hermon device
* @v port Hermon port
* @ret rc Return status code
*/
static int hermon_register_ibdev ( struct hermon *hermon,
struct hermon_port *port ) {
struct ib_device *ibdev = port->ibdev;
int rc;
/* Initialise parameters using SMC */
ib_smc_init ( ibdev, hermon_mad );
/* Register Infiniband device */
if ( ( rc = register_ibdev ( ibdev ) ) != 0 ) {
DBGC ( hermon, "Hermon %p port %d could not register IB "
"device: %s\n", hermon, ibdev->port, strerror ( rc ) );
return rc;
}
return 0;
}
/**
* Handle Hermon Infiniband device port state change
*
* @v hermon Hermon device
* @v port Hermon port
* @v link_up Link is up
*/
static void hermon_state_change_ibdev ( struct hermon *hermon __unused,
struct hermon_port *port,
int link_up __unused ) {
struct ib_device *ibdev = port->ibdev;
/* Update MAD parameters */
ib_smc_update ( ibdev, hermon_mad );
}
/**
* Unregister Hermon Infiniband device
*
* @v hermon Hermon device
* @v port Hermon port
*/
static void hermon_unregister_ibdev ( struct hermon *hermon __unused,
struct hermon_port *port ) {
struct ib_device *ibdev = port->ibdev;
unregister_ibdev ( ibdev );
}
/** Hermon Infiniband port type */
static struct hermon_port_type hermon_port_type_ib = {
.register_dev = hermon_register_ibdev,
.state_change = hermon_state_change_ibdev,
.unregister_dev = hermon_unregister_ibdev,
};
/***************************************************************************
*
* Ethernet operation
*
***************************************************************************
*/
/** Number of Hermon Ethernet send work queue entries */
#define HERMON_ETH_NUM_SEND_WQES 2
/** Number of Hermon Ethernet receive work queue entries */
#define HERMON_ETH_NUM_RECV_WQES 4
/** Number of Hermon Ethernet completion entries */
#define HERMON_ETH_NUM_CQES 8
/**
* Transmit packet via Hermon Ethernet device
*
* @v netdev Network device
* @v iobuf I/O buffer
* @ret rc Return status code
*/
static int hermon_eth_transmit ( struct net_device *netdev,
struct io_buffer *iobuf ) {
struct hermon_port *port = netdev->priv;
struct ib_device *ibdev = port->ibdev;
struct hermon *hermon = ib_get_drvdata ( ibdev );
int rc;
/* Transmit packet */
if ( ( rc = ib_post_send ( ibdev, port->eth_qp, NULL,
iobuf ) ) != 0 ) {
DBGC ( hermon, "Hermon %p port %d could not transmit: %s\n",
hermon, ibdev->port, strerror ( rc ) );
return rc;
}
return 0;
}
/**
* Handle Hermon Ethernet device send completion
*
* @v ibdev Infiniband device
* @v qp Queue pair
* @v iobuf I/O buffer
* @v rc Completion status code
*/
static void hermon_eth_complete_send ( struct ib_device *ibdev __unused,
struct ib_queue_pair *qp,
struct io_buffer *iobuf, int rc ) {
struct net_device *netdev = ib_qp_get_ownerdata ( qp );
netdev_tx_complete_err ( netdev, iobuf, rc );
}
/**
* Handle Hermon Ethernet device receive completion
*
* @v ibdev Infiniband device
* @v qp Queue pair
* @v av Address vector, or NULL
* @v iobuf I/O buffer
* @v rc Completion status code
*/
static void hermon_eth_complete_recv ( struct ib_device *ibdev __unused,
struct ib_queue_pair *qp,
struct ib_address_vector *av __unused,
struct io_buffer *iobuf, int rc ) {
struct net_device *netdev = ib_qp_get_ownerdata ( qp );
/* Hand off to network layer */
if ( rc == 0 ) {
netdev_rx ( netdev, iobuf );
} else {
netdev_rx_err ( netdev, iobuf, rc );
}
}
/** Hermon Ethernet device completion operations */
static struct ib_completion_queue_operations hermon_eth_cq_op = {
.complete_send = hermon_eth_complete_send,
.complete_recv = hermon_eth_complete_recv,
};
/**
* Poll Hermon Ethernet device
*
* @v netdev Network device
*/
static void hermon_eth_poll ( struct net_device *netdev ) {
struct hermon_port *port = netdev->priv;
struct ib_device *ibdev = port->ibdev;
ib_poll_eq ( ibdev );
}
/**
* Enable/disable interrupts on Hermon Ethernet device
*
* @v netdev Network device
* @v enable Interrupts should be enabled
*/
static void hermon_eth_irq ( struct net_device *netdev __unused,
int enable __unused ) {
/* No implementation */
}
/**
* Open Hermon Ethernet device
*
* @v netdev Network device
* @ret rc Return status code
*/
static int hermon_eth_open ( struct net_device *netdev ) {
struct hermon_port *port = netdev->priv;
struct ib_device *ibdev = port->ibdev;
struct hermon *hermon = ib_get_drvdata ( ibdev );
union hermonprm_set_port set_port;
int rc;
/* Allocate completion queue */
port->eth_cq = ib_create_cq ( ibdev, HERMON_ETH_NUM_CQES,
&hermon_eth_cq_op );
if ( ! port->eth_cq ) {
DBGC ( hermon, "Hermon %p port %d could not create completion "
"queue\n", hermon, ibdev->port );
rc = -ENOMEM;
goto err_create_cq;
}
/* Allocate queue pair */
port->eth_qp = ib_create_qp ( ibdev, IB_QPT_ETH,
HERMON_ETH_NUM_SEND_WQES, port->eth_cq,
HERMON_ETH_NUM_RECV_WQES, port->eth_cq );
if ( ! port->eth_qp ) {
DBGC ( hermon, "Hermon %p port %d could not create queue "
"pair\n", hermon, ibdev->port );
rc = -ENOMEM;
goto err_create_qp;
}
ib_qp_set_ownerdata ( port->eth_qp, netdev );
/* Activate queue pair */
if ( ( rc = ib_modify_qp ( ibdev, port->eth_qp ) ) != 0 ) {
DBGC ( hermon, "Hermon %p port %d could not modify queue "
"pair: %s\n", hermon, ibdev->port, strerror ( rc ) );
goto err_modify_qp;
}
/* Fill receive rings */
ib_refill_recv ( ibdev, port->eth_qp );
/* Set port general parameters */
memset ( &set_port, 0, sizeof ( set_port ) );
MLX_FILL_3 ( &set_port.general, 0,
v_mtu, 1,
v_pprx, 1,
v_pptx, 1 );
MLX_FILL_1 ( &set_port.general, 1,
mtu, ( ETH_FRAME_LEN + 40 /* Used by card */ ) );
MLX_FILL_1 ( &set_port.general, 2, pptx, 1 );
MLX_FILL_1 ( &set_port.general, 3, pprx, 1 );
if ( ( rc = hermon_cmd_set_port ( hermon, 1,
( HERMON_SET_PORT_GENERAL_PARAM |
ibdev->port ),
&set_port ) ) != 0 ) {
DBGC ( hermon, "Hermon %p port %d could not set port general "
"parameters: %s\n",
hermon, ibdev->port, strerror ( rc ) );
goto err_set_port_general_params;
}
/* Set port receive QP */
memset ( &set_port, 0, sizeof ( set_port ) );
MLX_FILL_1 ( &set_port.rqp_calc, 0, base_qpn, port->eth_qp->qpn );
MLX_FILL_1 ( &set_port.rqp_calc, 2,
mac_miss_index, 128 /* MAC misses go to promisc QP */ );
MLX_FILL_2 ( &set_port.rqp_calc, 3,
vlan_miss_index, 127 /* VLAN misses go to promisc QP */,
no_vlan_index, 126 /* VLAN-free go to promisc QP */ );
MLX_FILL_2 ( &set_port.rqp_calc, 5,
promisc_qpn, port->eth_qp->qpn,
en_uc_promisc, 1 );
MLX_FILL_2 ( &set_port.rqp_calc, 6,
def_mcast_qpn, port->eth_qp->qpn,
mc_promisc_mode, 2 /* Receive all multicasts */ );
if ( ( rc = hermon_cmd_set_port ( hermon, 1,
( HERMON_SET_PORT_RECEIVE_QP |
ibdev->port ),
&set_port ) ) != 0 ) {
DBGC ( hermon, "Hermon %p port %d could not set port receive "
"QP: %s\n", hermon, ibdev->port, strerror ( rc ) );
goto err_set_port_receive_qp;
}
/* Initialise port */
if ( ( rc = hermon_cmd_init_port ( hermon, ibdev->port ) ) != 0 ) {
DBGC ( hermon, "Hermon %p port %d could not initialise port: "
"%s\n", hermon, ibdev->port, strerror ( rc ) );
goto err_init_port;
}
return 0;
err_init_port:
err_set_port_receive_qp:
err_set_port_general_params:
err_modify_qp:
ib_destroy_qp ( ibdev, port->eth_qp );
err_create_qp:
ib_destroy_cq ( ibdev, port->eth_cq );
err_create_cq:
return rc;
}
/**
* Close Hermon Ethernet device
*
* @v netdev Network device
*/
static void hermon_eth_close ( struct net_device *netdev ) {
struct hermon_port *port = netdev->priv;
struct ib_device *ibdev = port->ibdev;
struct hermon *hermon = ib_get_drvdata ( ibdev );
int rc;
/* Close port */
if ( ( rc = hermon_cmd_close_port ( hermon, ibdev->port ) ) != 0 ) {
DBGC ( hermon, "Hermon %p port %d could not close port: %s\n",
hermon, ibdev->port, strerror ( rc ) );
/* Nothing we can do about this */
}
/* Tear down the queues */
ib_destroy_qp ( ibdev, port->eth_qp );
ib_destroy_cq ( ibdev, port->eth_cq );
}
/** Hermon Ethernet network device operations */
static struct net_device_operations hermon_eth_operations = {
.open = hermon_eth_open,
.close = hermon_eth_close,
.transmit = hermon_eth_transmit,
.poll = hermon_eth_poll,
.irq = hermon_eth_irq,
};
/**
* Register Hermon Ethernet device
*
* @v hermon Hermon device
* @v port Hermon port
* @ret rc Return status code
*/
static int hermon_register_netdev ( struct hermon *hermon,
struct hermon_port *port ) {
struct net_device *netdev = port->netdev;
struct ib_device *ibdev = port->ibdev;
struct hermonprm_query_port_cap query_port;
union {
uint8_t bytes[8];
uint32_t dwords[2];
} mac;
int rc;
/* Retrieve MAC address */
if ( ( rc = hermon_cmd_query_port ( hermon, ibdev->port,
&query_port ) ) != 0 ) {
DBGC ( hermon, "Hermon %p port %d could not query port: %s\n",
hermon, ibdev->port, strerror ( rc ) );
return rc;
}
mac.dwords[0] = htonl ( MLX_GET ( &query_port, mac_47_32 ) );
mac.dwords[1] = htonl ( MLX_GET ( &query_port, mac_31_0 ) );
memcpy ( netdev->hw_addr,
&mac.bytes[ sizeof ( mac.bytes ) - ETH_ALEN ], ETH_ALEN );
/* Register network device */
if ( ( rc = register_netdev ( netdev ) ) != 0 ) {
DBGC ( hermon, "Hermon %p port %d could not register network "
"device: %s\n", hermon, ibdev->port, strerror ( rc ) );
return rc;
}
return 0;
}
/**
* Handle Hermon Ethernet device port state change
*
* @v hermon Hermon device
* @v port Hermon port
* @v link_up Link is up
*/
static void hermon_state_change_netdev ( struct hermon *hermon __unused,
struct hermon_port *port,
int link_up ) {
struct net_device *netdev = port->netdev;
if ( link_up ) {
netdev_link_up ( netdev );
} else {
netdev_link_down ( netdev );
}
}
/**
* Unregister Hermon Ethernet device
*
* @v hermon Hermon device
* @v port Hermon port
*/
static void hermon_unregister_netdev ( struct hermon *hermon __unused,
struct hermon_port *port ) {
struct net_device *netdev = port->netdev;
unregister_netdev ( netdev );
}
/** Hermon Ethernet port type */
static struct hermon_port_type hermon_port_type_eth = {
.register_dev = hermon_register_netdev,
.state_change = hermon_state_change_netdev,
.unregister_dev = hermon_unregister_netdev,
};
/***************************************************************************
*
* Port type detection
*
***************************************************************************
*/
/** Timeout for port sensing */
#define HERMON_SENSE_PORT_TIMEOUT ( TICKS_PER_SEC / 2 )
/**
* Name port type
*
* @v port_type Port type
* @v port_type_name Port type name
*/
static inline const char * hermon_name_port_type ( unsigned int port_type ) {
switch ( port_type ) {
case HERMON_PORT_TYPE_UNKNOWN: return "unknown";
case HERMON_PORT_TYPE_IB: return "Infiniband";
case HERMON_PORT_TYPE_ETH: return "Ethernet";
default: return "INVALID";
}
}
/**
* Sense port type
*
* @v hermon Hermon device
* @v port Hermon port
* @ret port_type Port type, or negative error
*/
static int hermon_sense_port_type ( struct hermon *hermon,
struct hermon_port *port ) {
struct ib_device *ibdev = port->ibdev;
struct hermonprm_sense_port sense_port;
int port_type;
int rc;
/* If DPDP is not supported, always assume Infiniband */
if ( ! hermon->cap.dpdp ) {
port_type = HERMON_PORT_TYPE_IB;
DBGC ( hermon, "Hermon %p port %d does not support DPDP; "
"assuming an %s network\n", hermon, ibdev->port,
hermon_name_port_type ( port_type ) );
return port_type;
}
/* Sense the port type */
if ( ( rc = hermon_cmd_sense_port ( hermon, ibdev->port,
&sense_port ) ) != 0 ) {
DBGC ( hermon, "Hermon %p port %d sense failed: %s\n",
hermon, ibdev->port, strerror ( rc ) );
return rc;
}
port_type = MLX_GET ( &sense_port, port_type );
DBGC ( hermon, "Hermon %p port %d sensed an %s network\n",
hermon, ibdev->port, hermon_name_port_type ( port_type ) );
return port_type;
}
/**
* Set port type
*
* @v hermon Hermon device
* @v port Hermon port
* @ret rc Return status code
*/
static int hermon_set_port_type ( struct hermon *hermon,
struct hermon_port *port ) {
struct ib_device *ibdev = port->ibdev;
struct hermonprm_query_port_cap query_port;
int ib_supported;
int eth_supported;
int port_type;
unsigned long start;
unsigned long elapsed;
int rc;
/* Check to see which types are supported */
if ( ( rc = hermon_cmd_query_port ( hermon, ibdev->port,
&query_port ) ) != 0 ) {
DBGC ( hermon, "Hermon %p port %d could not query port: %s\n",
hermon, ibdev->port, strerror ( rc ) );
return rc;
}
ib_supported = MLX_GET ( &query_port, ib );
eth_supported = MLX_GET ( &query_port, eth );
DBGC ( hermon, "Hermon %p port %d supports%s%s%s\n",
hermon, ibdev->port, ( ib_supported ? " Infiniband" : "" ),
( ( ib_supported && eth_supported ) ? " and" : "" ),
( eth_supported ? " Ethernet" : "" ) );
/* Sense network, if applicable */
if ( ib_supported && eth_supported ) {
/* Both types are supported; try sensing network */
start = currticks();
do {
/* Try sensing port */
port_type = hermon_sense_port_type ( hermon, port );
if ( port_type < 0 ) {
rc = port_type;
return rc;
}
} while ( ( port_type == HERMON_PORT_TYPE_UNKNOWN ) &&
( ( elapsed = ( currticks() - start ) ) <
HERMON_SENSE_PORT_TIMEOUT ) );
/* Set port type based on sensed network, defaulting
* to Infiniband if nothing was sensed.
*/
switch ( port_type ) {
case HERMON_PORT_TYPE_ETH:
port->type = &hermon_port_type_eth;
break;
case HERMON_PORT_TYPE_IB:
case HERMON_PORT_TYPE_UNKNOWN:
port->type = &hermon_port_type_ib;
break;
default:
return -EINVAL;
}
} else if ( eth_supported ) {
port->type = &hermon_port_type_eth;
} else {
port->type = &hermon_port_type_ib;
}
assert ( port->type != NULL );
return 0;
}
/***************************************************************************
*
* Firmware control
@ -2789,6 +3322,8 @@ static int hermon_probe ( struct pci_device *pci,
const struct pci_device_id *id __unused ) {
struct hermon *hermon;
struct ib_device *ibdev;
struct net_device *netdev;
struct hermon_port *port;
struct hermonprm_init_hca init_hca;
unsigned int i;
int rc;
@ -2842,13 +3377,26 @@ static int hermon_probe ( struct pci_device *pci,
rc = -ENOMEM;
goto err_alloc_ibdev;
}
hermon->ibdev[i] = ibdev;
hermon->port[i].ibdev = ibdev;
ibdev->op = &hermon_ib_operations;
ibdev->dev = &pci->dev;
ibdev->port = ( HERMON_PORT_BASE + i );
ib_set_drvdata ( ibdev, hermon );
}
/* Allocate network devices */
for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
netdev = alloc_etherdev ( 0 );
if ( ! netdev ) {
rc = -ENOMEM;
goto err_alloc_netdev;
}
hermon->port[i].netdev = netdev;
netdev_init ( netdev, &hermon_eth_operations );
netdev->dev = &pci->dev;
netdev->priv = &hermon->port[i];
}
/* Allocate ICM */
memset ( &init_hca, 0, sizeof ( init_hca ) );
if ( ( rc = hermon_alloc_icm ( hermon, &init_hca ) ) != 0 )
@ -2868,7 +3416,7 @@ static int hermon_probe ( struct pci_device *pci,
if ( ( rc = hermon_setup_mpt ( hermon ) ) != 0 )
goto err_setup_mpt;
for ( i = 0 ; i < hermon->cap.num_ports ; i++ )
hermon->ibdev[i]->rdma_key = hermon->lkey;
hermon->port[i].ibdev->rdma_key = hermon->lkey;
/* Set up event queue */
if ( ( rc = hermon_create_eq ( hermon ) ) != 0 )
@ -2878,26 +3426,29 @@ static int hermon_probe ( struct pci_device *pci,
if ( ( rc = hermon_configure_special_qps ( hermon ) ) != 0 )
goto err_conf_special_qps;
/* Initialise parameters using SMC */
for ( i = 0 ; i < hermon->cap.num_ports ; i++ )
ib_smc_init ( hermon->ibdev[i], hermon_mad );
/* Register Infiniband devices */
/* Determine port types */
for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
if ( ( rc = register_ibdev ( hermon->ibdev[i] ) ) != 0 ) {
DBGC ( hermon, "Hermon %p port %d could not register "
"IB device: %s\n", hermon,
hermon->ibdev[i]->port, strerror ( rc ) );
goto err_register_ibdev;
}
port = &hermon->port[i];
if ( ( rc = hermon_set_port_type ( hermon, port ) ) != 0 )
goto err_set_port_type;
}
/* Register devices */
for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
port = &hermon->port[i];
if ( ( rc = port->type->register_dev ( hermon, port ) ) != 0 )
goto err_register;
}
return 0;
i = hermon->cap.num_ports;
err_register_ibdev:
for ( i-- ; ( signed int ) i >= 0 ; i-- )
unregister_ibdev ( hermon->ibdev[i] );
err_register:
for ( i-- ; ( signed int ) i >= 0 ; i-- ) {
port = &hermon->port[i];
port->type->unregister_dev ( hermon, port );
}
err_set_port_type:
err_conf_special_qps:
hermon_destroy_eq ( hermon );
err_create_eq:
@ -2907,9 +3458,15 @@ static int hermon_probe ( struct pci_device *pci,
hermon_free_icm ( hermon );
err_alloc_icm:
i = hermon->cap.num_ports;
err_alloc_netdev:
for ( i-- ; ( signed int ) i >= 0 ; i-- ) {
netdev_nullify ( hermon->port[i].netdev );
netdev_put ( hermon->port[i].netdev );
}
i = hermon->cap.num_ports;
err_alloc_ibdev:
for ( i-- ; ( signed int ) i >= 0 ; i-- )
ibdev_put ( hermon->ibdev[i] );
ibdev_put ( hermon->port[i].ibdev );
err_get_cap:
hermon_stop_firmware ( hermon );
err_start_firmware:
@ -2929,10 +3486,13 @@ static int hermon_probe ( struct pci_device *pci,
*/
static void hermon_remove ( struct pci_device *pci ) {
struct hermon *hermon = pci_get_drvdata ( pci );
struct hermon_port *port;
int i;
for ( i = ( hermon->cap.num_ports - 1 ) ; i >= 0 ; i-- )
unregister_ibdev ( hermon->ibdev[i] );
for ( i = ( hermon->cap.num_ports - 1 ) ; i >= 0 ; i-- ) {
port = &hermon->port[i];
port->type->unregister_dev ( hermon, port );
}
hermon_destroy_eq ( hermon );
hermon_cmd_close_hca ( hermon );
hermon_free_icm ( hermon );
@ -2940,8 +3500,12 @@ static void hermon_remove ( struct pci_device *pci ) {
hermon_stop_firmware ( hermon );
free_dma ( hermon->mailbox_out, HERMON_MBOX_SIZE );
free_dma ( hermon->mailbox_in, HERMON_MBOX_SIZE );
for ( i = ( hermon->cap.num_ports - 1 ) ; i >= 0 ; i-- ) {
netdev_nullify ( hermon->port[i].netdev );
netdev_put ( hermon->port[i].netdev );
}
for ( i = ( hermon->cap.num_ports - 1 ) ; i >= 0 ; i-- )
ibdev_put ( hermon->ibdev[i] );
ibdev_put ( hermon->port[i].ibdev );
free ( hermon );
}
@ -2950,6 +3514,14 @@ static struct pci_device_id hermon_nics[] = {
PCI_ROM ( 0x15b3, 0x634a, "mt25418", "MT25418 HCA driver", 0 ),
PCI_ROM ( 0x15b3, 0x6732, "mt26418", "MT26418 HCA driver", 0 ),
PCI_ROM ( 0x15b3, 0x673c, "mt26428", "MT26428 HCA driver", 0 ),
PCI_ROM ( 0x15b3, 0x6746, "mt26438", "MT26438 HCA driver", 0 ),
PCI_ROM ( 0x15b3, 0x6778, "mt26488", "MT26488 HCA driver", 0 ),
PCI_ROM ( 0x15b3, 0x6368, "mt25448", "MT25448 HCA driver", 0 ),
PCI_ROM ( 0x15b3, 0x6750, "mt26448", "MT26448 HCA driver", 0 ),
PCI_ROM ( 0x15b3, 0x6372, "mt25458", "MT25458 HCA driver", 0 ),
PCI_ROM ( 0x15b3, 0x675a, "mt26458", "MT26458 HCA driver", 0 ),
PCI_ROM ( 0x15b3, 0x6764, "mt26468", "MT26468 HCA driver", 0 ),
PCI_ROM ( 0x15b3, 0x676e, "mt26478", "MT26478 HCA driver", 0 ),
};
struct pci_driver hermon_driver __pci_driver = {

View File

@ -94,6 +94,7 @@ FILE_LICENCE ( GPL2_OR_LATER );
/* MTUs */
#define HERMON_MTU_2048 0x04
#define HERMON_MTU_ETH 0x07
#define HERMON_INVALID_LKEY 0x00000100UL
@ -110,6 +111,13 @@ FILE_LICENCE ( GPL2_OR_LATER );
#define HERMON_MAP_EQ ( 0UL << 31 )
#define HERMON_UNMAP_EQ ( 1UL << 31 )
#define HERMON_SET_PORT_GENERAL_PARAM 0x0000
#define HERMON_SET_PORT_RECEIVE_QP 0x0100
#define HERMON_SET_PORT_MAC_TABLE 0x0200
#define HERMON_SET_PORT_VLAN_TABLE 0x0300
#define HERMON_SET_PORT_PRIORITY_TABLE 0x0400
#define HERMON_SET_PORT_GID_TABLE 0x0500
#define HERMON_EV_PORT_STATE_CHANGE 0x09
#define HERMON_SCHED_QP0 0x3f
@ -449,6 +457,11 @@ struct hermonprm_rc_send_wqe {
struct hermonprm_wqe_segment_data_ptr data[HERMON_MAX_GATHER];
} __attribute__ (( packed ));
struct hermonprm_eth_send_wqe {
struct hermonprm_wqe_segment_ctrl_send ctrl;
struct hermonprm_wqe_segment_data_ptr data[HERMON_MAX_GATHER];
} __attribute__ (( packed ));
#define HERMON_MAX_SCATTER 1
struct hermonprm_recv_wqe {
@ -584,6 +597,7 @@ union hermon_send_wqe {
struct hermonprm_ud_send_wqe ud;
struct hermonprm_mlx_send_wqe mlx;
struct hermonprm_rc_send_wqe rc;
struct hermonprm_eth_send_wqe eth;
uint8_t force_align[HERMON_SEND_WQE_ALIGN];
} __attribute__ (( packed ));
@ -720,6 +734,51 @@ typedef uint32_t hermon_bitmask_t;
( ( (max_entries) + ( 8 * sizeof ( hermon_bitmask_t ) ) - 1 ) / \
( 8 * sizeof ( hermon_bitmask_t ) ) )
struct hermon;
struct hermon_port;
/** A Hermon port type */
struct hermon_port_type {
/** Register port
*
* @v hermon Hermon device
* @v port Hermon port
* @ret rc Return status code
*/
int ( * register_dev ) ( struct hermon *hermon,
struct hermon_port *port );
/** Port state changed
*
* @v hermon Hermon device
* @v port Hermon port
* @v link_up Link is up
*/
void ( * state_change ) ( struct hermon *hermon,
struct hermon_port *port,
int link_up );
/** Unregister port
*
* @v hermon Hermon device
* @v port Hermon port
*/
void ( * unregister_dev ) ( struct hermon *hermon,
struct hermon_port *port );
};
/** A Hermon port */
struct hermon_port {
/** Infiniband device */
struct ib_device *ibdev;
/** Network device */
struct net_device *netdev;
/** Ethernet completion queue */
struct ib_completion_queue *eth_cq;
/** Ethernet queue pair */
struct ib_queue_pair *eth_qp;
/** Port type */
struct hermon_port_type *type;
};
/** A Hermon device */
struct hermon {
/** PCI configuration registers */
@ -763,8 +822,8 @@ struct hermon {
/** QPN base */
unsigned long qpn_base;
/** Infiniband devices */
struct ib_device *ibdev[HERMON_MAX_PORTS];
/** Ports */
struct hermon_port port[HERMON_MAX_PORTS];
};
/** Global protection domain */

File diff suppressed because it is too large Load Diff

View File

@ -1,722 +0,0 @@
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
FILE_LICENCE ( GPL2_ONLY );
#ifndef H_MTNIC_IF_DEFS_H
#define H_MTNIC_IF_DEFS_H
/*
* Device setup
*/
#define MTNIC_MAX_PORTS 2
#define MTNIC_PORT1 0
#define MTNIC_PORT2 1
#define NUM_TX_RINGS 1
#define NUM_RX_RINGS 1
#define NUM_CQS (NUM_RX_RINGS + NUM_TX_RINGS)
#define GO_BIT_TIMEOUT 6000
#define TBIT_RETRIES 100
#define UNITS_BUFFER_SIZE 8 /* can be configured to 4/8/16 */
#define MAX_GAP_PROD_CONS ( UNITS_BUFFER_SIZE / 4 )
#define ETH_DEF_LEN 1540 /* 40 bytes used by the card */
#define ETH_FCS_LEN 14
#define DEF_MTU ETH_DEF_LEN + ETH_FCS_LEN
#define DEF_IOBUF_SIZE ETH_DEF_LEN
#define MAC_ADDRESS_SIZE 6
#define NUM_EQES 16
#define ROUND_TO_CHECK 0x400
#define DELAY_LINK_CHECK 300
#define CHECK_LINK_TIMES 7
#define XNOR(x,y) (!(x) == !(y))
#define dma_addr_t unsigned long
#define PAGE_SIZE 4096
#define PAGE_MASK (PAGE_SIZE - 1)
#define MTNIC_MAILBOX_SIZE PAGE_SIZE
/* BITOPS */
#define MTNIC_BC_OFF(bc) ((bc) >> 8)
#define MTNIC_BC_SZ(bc) ((bc) & 0xff)
#define MTNIC_BC_ONES(size) (~((int)0x80000000 >> (31 - size)))
#define MTNIC_BC_MASK(bc) \
(MTNIC_BC_ONES(MTNIC_BC_SZ(bc)) << MTNIC_BC_OFF(bc))
#define MTNIC_BC_VAL(val, bc) \
(((val) & MTNIC_BC_ONES(MTNIC_BC_SZ(bc))) << MTNIC_BC_OFF(bc))
/*
* Sub word fields - bit code base extraction/setting etc
*/
/* Encode two values */
#define MTNIC_BC(off, size) ((off << 8) | (size & 0xff))
/* Get value of field 'bc' from 'x' */
#define MTNIC_BC_GET(x, bc) \
(((x) >> MTNIC_BC_OFF(bc)) & MTNIC_BC_ONES(MTNIC_BC_SZ(bc)))
/* Set value of field 'bc' of 'x' to 'val' */
#define MTNIC_BC_SET(x, val, bc) \
((x) = ((x) & ~MTNIC_BC_MASK(bc)) | MTNIC_BC_VAL(val, bc))
/* Like MTNIC_BC_SET, except the previous value is assumed to be 0 */
#define MTNIC_BC_PUT(x, val, bc) ((x) |= MTNIC_BC_VAL(val, bc))
/*
* Device constants
*/
typedef enum mtnic_if_cmd {
/* NIC commands: */
MTNIC_IF_CMD_QUERY_FW = 0x004, /* query FW (size, version, etc) */
MTNIC_IF_CMD_MAP_FW = 0xfff, /* map pages for FW image */
MTNIC_IF_CMD_RUN_FW = 0xff6, /* run the FW */
MTNIC_IF_CMD_QUERY_CAP = 0x001, /* query MTNIC capabilities */
MTNIC_IF_CMD_MAP_PAGES = 0x002, /* map physical pages to HW */
MTNIC_IF_CMD_OPEN_NIC = 0x003, /* run the firmware */
MTNIC_IF_CMD_CONFIG_RX = 0x005, /* general receive configuration */
MTNIC_IF_CMD_CONFIG_TX = 0x006, /* general transmit configuration */
MTNIC_IF_CMD_CONFIG_INT_FREQ = 0x007, /* interrupt timers freq limits */
MTNIC_IF_CMD_HEART_BEAT = 0x008, /* NOP command testing liveliness */
MTNIC_IF_CMD_CLOSE_NIC = 0x009, /* release memory and stop the NIC */
/* Port commands: */
MTNIC_IF_CMD_CONFIG_PORT_RSS_STEER = 0x10, /* set RSS mode */
MTNIC_IF_CMD_SET_PORT_RSS_INDIRECTION = 0x11, /* set RSS indirection tbl */
MTNIC_IF_CMD_CONFIG_PORT_PRIO_STEERING = 0x12, /* set PRIORITY mode */
MTNIC_IF_CMD_CONFIG_PORT_ADDR_STEER = 0x13, /* set Address steer mode */
MTNIC_IF_CMD_CONFIG_PORT_VLAN_FILTER = 0x14, /* configure VLAN filter */
MTNIC_IF_CMD_CONFIG_PORT_MCAST_FILTER = 0x15, /* configure mcast filter */
MTNIC_IF_CMD_ENABLE_PORT_MCAST_FILTER = 0x16, /* enable/disable */
MTNIC_IF_CMD_SET_PORT_MTU = 0x17, /* set port MTU */
MTNIC_IF_CMD_SET_PORT_PROMISCUOUS_MODE = 0x18, /* enable/disable promisc */
MTNIC_IF_CMD_SET_PORT_DEFAULT_RING = 0x19, /* set the default ring */
MTNIC_IF_CMD_SET_PORT_STATE = 0x1a, /* set link up/down */
MTNIC_IF_CMD_DUMP_STAT = 0x1b, /* dump statistics */
MTNIC_IF_CMD_ARM_PORT_STATE_EVENT = 0x1c, /* arm the port state event */
/* Ring / Completion queue commands: */
MTNIC_IF_CMD_CONFIG_CQ = 0x20, /* set up completion queue */
MTNIC_IF_CMD_CONFIG_RX_RING = 0x21, /* setup Rx ring */
MTNIC_IF_CMD_SET_RX_RING_ADDR = 0x22, /* set Rx ring filter by address */
MTNIC_IF_CMD_SET_RX_RING_MCAST = 0x23, /* set Rx ring mcast filter */
MTNIC_IF_CMD_ARM_RX_RING_WM = 0x24, /* one-time low-watermark INT */
MTNIC_IF_CMD_CONFIG_TX_RING = 0x25, /* set up Tx ring */
MTNIC_IF_CMD_ENFORCE_TX_RING_ADDR = 0x26, /* setup anti spoofing */
MTNIC_IF_CMD_CONFIG_EQ = 0x27, /* config EQ ring */
MTNIC_IF_CMD_RELEASE_RESOURCE = 0x28, /* release internal ref to resource */
}
mtnic_if_cmd_t;
/** selectors for MTNIC_IF_CMD_QUERY_CAP */
typedef enum mtnic_if_caps {
MTNIC_IF_CAP_MAX_TX_RING_PER_PORT = 0x0,
MTNIC_IF_CAP_MAX_RX_RING_PER_PORT = 0x1,
MTNIC_IF_CAP_MAX_CQ_PER_PORT = 0x2,
MTNIC_IF_CAP_NUM_PORTS = 0x3,
MTNIC_IF_CAP_MAX_TX_DESC = 0x4,
MTNIC_IF_CAP_MAX_RX_DESC = 0x5,
MTNIC_IF_CAP_MAX_CQES = 0x6,
MTNIC_IF_CAP_MAX_TX_SG_ENTRIES = 0x7,
MTNIC_IF_CAP_MAX_RX_SG_ENTRIES = 0x8,
MTNIC_IF_CAP_MEM_KEY = 0x9, /* key to mem (after map_pages) */
MTNIC_IF_CAP_RSS_HASH_TYPE = 0xa, /* one of mtnic_if_rss_types_t */
MTNIC_IF_CAP_MAX_PORT_UCAST_ADDR = 0xc,
MTNIC_IF_CAP_MAX_RING_UCAST_ADDR = 0xd, /* only for ADDR steer */
MTNIC_IF_CAP_MAX_PORT_MCAST_ADDR = 0xe,
MTNIC_IF_CAP_MAX_RING_MCAST_ADDR = 0xf, /* only for ADDR steer */
MTNIC_IF_CAP_INTA = 0x10,
MTNIC_IF_CAP_BOARD_ID_LOW = 0x11,
MTNIC_IF_CAP_BOARD_ID_HIGH = 0x12,
MTNIC_IF_CAP_TX_CQ_DB_OFFSET = 0x13, /* offset in bytes for TX, CQ doorbell record */
MTNIC_IF_CAP_EQ_DB_OFFSET = 0x14, /* offset in bytes for EQ doorbell record */
/* These are per port - using port number from cap modifier field */
MTNIC_IF_CAP_SPEED = 0x20,
MTNIC_IF_CAP_DEFAULT_MAC = 0x21,
MTNIC_IF_CAP_EQ_OFFSET = 0x22,
MTNIC_IF_CAP_CQ_OFFSET = 0x23,
MTNIC_IF_CAP_TX_OFFSET = 0x24,
MTNIC_IF_CAP_RX_OFFSET = 0x25,
} mtnic_if_caps_t;
typedef enum mtnic_if_steer_types {
MTNIC_IF_STEER_NONE = 0,
MTNIC_IF_STEER_PRIORITY = 1,
MTNIC_IF_STEER_RSS = 2,
MTNIC_IF_STEER_ADDRESS = 3,
} mtnic_if_steer_types_t;
/** types of memory access modes */
typedef enum mtnic_if_memory_types {
MTNIC_IF_MEM_TYPE_SNOOP = 1,
MTNIC_IF_MEM_TYPE_NO_SNOOP = 2
} mtnic_if_memory_types_t;
enum {
MTNIC_HCR_BASE = 0x1f000,
MTNIC_HCR_SIZE = 0x0001c,
MTNIC_CLR_INT_SIZE = 0x00008,
};
#define MTNIC_RESET_OFFSET 0xF0010
/********************************************************************
* Device private data structures
*
* This section contains structures of all device private data:
* descriptors, rings, CQs, EQ ....
*
*
*********************************************************************/
/*
* Descriptor format
*/
struct mtnic_ctrl_seg {
u32 op_own;
#define MTNIC_BIT_DESC_OWN 0x80000000
#define MTNIC_OPCODE_SEND 0xa
u32 size_vlan;
u32 flags;
#define MTNIC_BIT_NO_ICRC 0x2
#define MTNIC_BIT_TX_COMP 0xc
u32 reserved;
};
struct mtnic_data_seg {
u32 count;
#define MTNIC_INLINE 0x80000000
u32 mem_type;
#define MTNIC_MEMTYPE_PAD 0x100
u32 addr_h;
u32 addr_l;
};
struct mtnic_tx_desc {
struct mtnic_ctrl_seg ctrl;
struct mtnic_data_seg data; /* at least one data segment */
};
struct mtnic_rx_desc {
u16 reserved1;
u16 next;
u32 reserved2[3];
struct mtnic_data_seg data; /* actual number of entries depends on
* rx ring stride */
};
/*
* Rings
*/
struct mtnic_rx_db_record {
u32 count;
};
struct mtnic_ring {
u32 size; /* REMOVE ____cacheline_aligned_in_smp; *//* number of Rx descs or TXBBs */
u32 size_mask;
u16 stride;
u16 cq; /* index of port CQ associated with this ring */
u32 prod;
u32 cons; /* holds the last consumed index */
/* Buffers */
u32 buf_size; /* ring buffer size in bytes */
dma_addr_t dma;
void *buf;
struct io_buffer *iobuf[UNITS_BUFFER_SIZE];
/* Tx only */
struct mtnic_txcq_db *txcq_db;
u32 db_offset;
/* Rx ring only */
dma_addr_t iobuf_dma;
struct mtnic_rx_db_record *db;
dma_addr_t db_dma;
};
/*
* CQ
*/
struct mtnic_cqe {
u8 vp; /* VLAN present */
u8 reserved1[3];
u32 rss_hash;
u32 reserved2;
u16 vlan_prio;
u16 reserved3;
u8 flags_h;
u8 flags_l_rht;
u8 ipv6_mask;
u8 enc_bf;
#define MTNIC_BIT_BAD_FCS 0x10
#define MTNIC_OPCODE_ERROR 0x1e
u32 byte_cnt;
u16 index;
u16 chksum;
u8 reserved4[3];
u8 op_tr_own;
#define MTNIC_BIT_CQ_OWN 0x80
};
struct mtnic_cq_db_record {
u32 update_ci;
u32 cmd_ci;
};
struct mtnic_cq {
int num; /* CQ number (on attached port) */
u32 size; /* number of CQEs in CQ */
u32 last; /* number of CQEs consumed */
struct mtnic_cq_db_record *db;
struct net_device *dev;
dma_addr_t db_dma;
u8 is_rx;
u16 ring; /* ring associated with this CQ */
u32 offset_ind;
/* CQE ring */
u32 buf_size; /* ring size in bytes */
struct mtnic_cqe *buf;
dma_addr_t dma;
};
/*
* EQ
*/
struct mtnic_eqe {
u8 reserved1;
u8 type;
u8 reserved2;
u8 subtype;
u8 reserved3[3];
u8 ring_cq;
u32 reserved4;
u8 port;
#define MTNIC_MASK_EQE_PORT MTNIC_BC(4,2)
u8 reserved5[2];
u8 syndrome;
u8 reserved6[15];
u8 own;
#define MTNIC_BIT_EQE_OWN 0x80
};
struct mtnic_eq {
u32 size; /* number of EQEs in ring */
u32 buf_size; /* EQ size in bytes */
void *buf;
dma_addr_t dma;
};
enum mtnic_state {
CARD_DOWN,
CARD_INITIALIZED,
CARD_UP,
CARD_LINK_DOWN,
};
/* FW */
struct mtnic_pages {
u32 num;
u32 *buf;
};
struct mtnic_err_buf {
u64 offset;
u32 size;
};
struct mtnic_cmd {
void *buf;
unsigned long mapping;
u32 tbit;
};
struct mtnic_txcq_db {
u32 reserved1[5];
u32 send_db;
u32 reserved2[2];
u32 cq_arm;
u32 cq_ci;
};
/*
* Device private data
*
*/
struct mtnic {
struct net_device *netdev[MTNIC_MAX_PORTS];
struct mtnic_if_cmd_reg *hcr;
struct mtnic_cmd cmd;
struct pci_device *pdev;
struct mtnic_eq eq;
u32 *eq_db;
/* Firmware and board info */
u64 fw_ver;
struct {
struct mtnic_pages fw_pages;
struct mtnic_pages extra_pages;
struct mtnic_err_buf err_buf;
u16 ifc_rev;
u8 num_ports;
u64 mac[MTNIC_MAX_PORTS];
u16 cq_offset;
u16 tx_offset[MTNIC_MAX_PORTS];
u16 rx_offset[MTNIC_MAX_PORTS];
u32 mem_type_snoop_be;
u32 txcq_db_offset;
u32 eq_db_offset;
} fw;
};
struct mtnic_port {
struct mtnic *mtnic;
u8 port;
enum mtnic_state state;
/* TX, RX, CQs, EQ */
struct mtnic_ring tx_ring;
struct mtnic_ring rx_ring;
struct mtnic_cq cq[NUM_CQS];
u32 poll_counter;
struct net_device *netdev;
};
/***************************************************************************
* NIC COMMANDS
*
* The section below provides struct definition for commands parameters,
* and arguments values enumeration.
*
* The format used for the struct names is:
* mtnic_if_<cmd name>_<in|out>_<imm|mbox>
*
***************************************************************************/
/**
* Command Register (Command interface)
*/
struct mtnic_if_cmd_reg {
unsigned long in_param_h;
u32 in_param_l;
u32 input_modifier;
u32 out_param_h;
u32 out_param_l;
u32 token;
#define MTNIC_MASK_CMD_REG_TOKEN MTNIC_BC(16,32)
u32 status_go_opcode;
#define MTNIC_MASK_CMD_REG_OPCODE MTNIC_BC(0,16)
#define MTNIC_MASK_CMD_REG_T_BIT MTNIC_BC(21,1)
#define MTNIC_MASK_CMD_REG_GO_BIT MTNIC_BC(23,1)
#define MTNIC_MASK_CMD_REG_STATUS MTNIC_BC(24,8)
};
/* CMD QUERY_FW */
struct mtnic_if_query_fw_out_mbox {
u16 fw_pages; /* Total number of memory pages the device requires */
u16 rev_maj;
u16 rev_smin;
u16 rev_min;
u16 reserved1;
u16 ifc_rev; /* major revision of the command interface */
u8 ft;
u8 reserved2[3];
u32 reserved3[4];
u64 clr_int_base;
u32 reserved4[2];
u64 err_buf_start;
u32 err_buf_size;
};
/* CMD MTNIC_IF_CMD_QUERY_CAP */
struct mtnic_if_query_cap_in_imm {
u16 reserved1;
u8 cap_modifier; /* a modifier for the particular capability */
u8 cap_index; /* the index of the capability queried */
u32 reserved2;
};
/* CMD OPEN_NIC */
struct mtnic_if_open_nic_in_mbox {
u16 reserved1;
u16 mkey; /* number of mem keys for all chip*/
u32 mkey_entry; /* mem key entries for each key*/
u8 log_rx_p1; /* log2 rx rings for port1 */
u8 log_cq_p1; /* log2 cq for port1 */
u8 log_tx_p1; /* log2 tx rings for port1 */
u8 steer_p1; /* port 1 steering mode */
u16 reserved2;
u8 log_vlan_p1; /* log2 vlan per rx port1 */
u8 log_mac_p1; /* log2 mac per rx port1 */
u8 log_rx_p2; /* log2 rx rings for port1 */
u8 log_cq_p2; /* log2 cq for port1 */
u8 log_tx_p2; /* log2 tx rings for port1 */
u8 steer_p2; /* port 1 steering mode */
u16 reserved3;
u8 log_vlan_p2; /* log2 vlan per rx port1 */
u8 log_mac_p2; /* log2 mac per rx port1 */
};
/* CMD CONFIG_RX */
struct mtnic_if_config_rx_in_imm {
u16 spkt_size; /* size of small packets interrupts enabled on CQ */
u16 resp_rcv_pause_frm_mcast_vlan_comp; /* Two flags see MASK below */
/* Enable response to receive pause frames */
/* Use VLAN in exact-match multicast checks (see SET_RX_RING_MCAST) */
};
/* CMD CONFIG_TX */
struct mtnic_if_config_send_in_imm {
u32 enph_gpf; /* Enable PseudoHeader and GeneratePauseFrames flags */
u32 reserved;
};
/* CMD HEART_BEAT */
struct mtnic_if_heart_beat_out_imm {
u32 flags; /* several flags */
#define MTNIC_MASK_HEAR_BEAT_INT_ERROR MTNIC_BC(31,1)
u32 reserved;
};
/*
* PORT COMMANDS
*/
/* CMD CONFIG_PORT_VLAN_FILTER */
/* in mbox is a 4K bits mask - bit per VLAN */
struct mtnic_if_config_port_vlan_filter_in_mbox {
u64 filter[64]; /* vlans[63:0] sit in filter[0], vlans[127:64] sit in filter[1] .. */
};
/* CMD SET_PORT_MTU */
struct mtnic_if_set_port_mtu_in_imm {
u16 reserved1;
u16 mtu; /* The MTU of the port in bytes */
u32 reserved2;
};
/* CMD SET_PORT_DEFAULT_RING */
struct mtnic_if_set_port_default_ring_in_imm {
u8 reserved1[3];
u8 ring; /* Index of ring that collects promiscuous traffic */
u32 reserved2;
};
/* CMD SET_PORT_STATE */
struct mtnic_if_set_port_state_in_imm {
u32 state; /* if 1 the port state should be up */
#define MTNIC_MASK_CONFIG_PORT_STATE MTNIC_BC(0,1)
u32 reserved;
};
/* CMD CONFIG_CQ */
struct mtnic_if_config_cq_in_mbox {
u8 reserved1;
u8 cq;
u8 size; /* Num CQs is 2^size (size <= 22) */
u8 offset; /* start address of CQE in first page (11:6) */
u16 tlast; /* interrupt moderation timer from last completion usec */
u8 flags; /* flags */
u8 int_vector; /* MSI index if MSI is enabled, otherwise reserved */
u16 reserved2;
u16 max_cnt; /* interrupt moderation counter */
u8 page_size; /* each mapped page is 2^(12+page_size) bytes */
u8 reserved4[3];
u32 db_record_addr_h; /*physical address of CQ doorbell record */
u32 db_record_addr_l; /*physical address of CQ doorbell record */
u32 page_address[0]; /* 64 bit page addresses of CQ buffer */
};
/* CMD CONFIG_RX_RING */
struct mtnic_if_config_rx_ring_in_mbox {
u8 reserved1;
u8 ring; /* The ring index (with offset) */
u8 stride_size; /* stride and size */
/* Entry size = 16* (2^stride) bytes */
#define MTNIC_MASK_CONFIG_RX_RING_STRIDE MTNIC_BC(4,3)
/* Rx ring size is 2^size entries */
#define MTNIC_MASK_CONFIG_RX_RING_SIZE MTNIC_BC(0,4)
u8 flags; /* Bit0 - header separation */
u8 page_size; /* Each mapped page is 2^(12+page_size) bytes */
u8 reserved2[2];
u8 cq; /* CQ associated with this ring */
u32 db_record_addr_h;
u32 db_record_addr_l;
u32 page_address[0];/* Array of 2^size 64b page descriptor addresses */
/* Must hold all Rx descriptors + doorbell record. */
};
/* The modifier for SET_RX_RING_ADDR */
struct mtnic_if_set_rx_ring_modifier {
u8 reserved;
u8 port_num;
u8 index;
u8 ring;
};
/* CMD SET_RX_RING_ADDR */
struct mtnic_if_set_rx_ring_addr_in_imm {
u16 mac_47_32; /* UCAST MAC Address bits 47:32 */
u16 flags_vlan_id; /* MAC/VLAN flags and vlan id */
#define MTNIC_MASK_SET_RX_RING_ADDR_VLAN_ID MTNIC_BC(0,12)
#define MTNIC_MASK_SET_RX_RING_ADDR_BY_MAC MTNIC_BC(12,1)
#define MTNIC_MASK_SET_RX_RING_ADDR_BY_VLAN MTNIC_BC(13,1)
u32 mac_31_0; /* UCAST MAC Address bits 31:0 */
};
/* CMD CONFIG_TX_RING */
struct mtnic_if_config_send_ring_in_mbox {
u16 ring; /* The ring index (with offset) */
#define MTNIC_MASK_CONFIG_TX_RING_INDEX MTNIC_BC(0,8)
u8 size; /* Tx ring size is 32*2^size bytes */
#define MTNIC_MASK_CONFIG_TX_RING_SIZE MTNIC_BC(0,4)
u8 reserved;
u8 page_size; /* Each mapped page is 2^(12+page_size) bytes */
u8 qos_class; /* The COS used for this Tx */
u16 cq; /* CQ associated with this ring */
#define MTNIC_MASK_CONFIG_TX_CQ_INDEX MTNIC_BC(0,8)
u32 page_address[0]; /* 64 bit page addresses of descriptor buffer. */
/* The buffer must accommodate all Tx descriptors */
};
/* CMD CONFIG_EQ */
struct mtnic_if_config_eq_in_mbox {
u8 reserved1;
u8 int_vector; /* MSI index if MSI enabled; otherwise reserved */
#define MTNIC_MASK_CONFIG_EQ_INT_VEC MTNIC_BC(0,6)
u8 size; /* Num CQs is 2^size entries (size <= 22) */
#define MTNIC_MASK_CONFIG_EQ_SIZE MTNIC_BC(0,5)
u8 offset; /* Start address of CQE in first page (11:6) */
#define MTNIC_MASK_CONFIG_EQ_OFFSET MTNIC_BC(0,6)
u8 page_size; /* Each mapped page is 2^(12+page_size) bytes*/
u8 reserved[3];
u32 page_address[0]; /* 64 bit page addresses of EQ buffer */
};
/* CMD RELEASE_RESOURCE */
enum mtnic_if_resource_types {
MTNIC_IF_RESOURCE_TYPE_CQ = 0,
MTNIC_IF_RESOURCE_TYPE_RX_RING,
MTNIC_IF_RESOURCE_TYPE_TX_RING,
MTNIC_IF_RESOURCE_TYPE_EQ
};
struct mtnic_if_release_resource_in_imm {
u8 reserved1;
u8 index; /* must be 0 for TYPE_EQ */
u8 reserved2;
u8 type; /* see enum mtnic_if_resource_types */
u32 reserved3;
};
/*******************************************************************
*
* PCI addon structures
*
********************************************************************/
struct pcidev {
unsigned long bar[6];
u32 dev_config_space[64];
struct pci_device *dev;
u8 bus;
u8 devfn;
};
struct dev_pci_struct {
struct pcidev dev;
struct pcidev br;
};
/* The only global var */
struct dev_pci_struct mtnic_pci_dev;
#endif /* H_MTNIC_IF_DEFS_H */