david/ipxe
david
/
ipxe
Archived
1
0
Fork 0

Dead code removal

This commit is contained in:
Michael Brown 2007-09-17 05:12:47 +01:00
parent 4e78a53cf2
commit 440e7926fb
2 changed files with 5 additions and 533 deletions

View File

@ -35,291 +35,8 @@ struct ib_address_vector hack_ipoib_bcast_av;
static const struct ib_gid arbel_no_gid = {
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2 }
};
#if 0
#define MLX_RX_MAX_FILL NUM_IPOIB_RCV_WQES
struct mlx_nic {
#if ! CREATE_OWN
/** Queue pair handle */
udqp_t ipoib_qph;
/** Send completion queue */
cq_t snd_cqh;
/** Receive completion queue */
cq_t rcv_cqh;
#endif
/** Broadcast Address Vector */
ud_av_t bcast_av;
/** RX fill level */
unsigned int rx_fill;
#if CREATE_OWN
struct ib_completion_queue *own_send_cq;
struct ib_completion_queue *own_recv_cq;
struct ib_queue_pair *own_qp;
#endif
};
static struct io_buffer *static_ipoib_tx_ring[NUM_IPOIB_SND_WQES];
static struct io_buffer *static_ipoib_rx_ring[NUM_IPOIB_RCV_WQES];
static struct arbel static_arbel;
#if ! CREATE_OWN
static struct arbel_completion_queue static_arbel_ipoib_send_cq = {
.ci_doorbell_idx = IPOIB_SND_CQ_CI_DB_IDX,
};
static struct ib_completion_queue static_ipoib_send_cq = {
.cqn = 1234, /* Only used for debug messages */
.num_cqes = NUM_IPOIB_SND_CQES,
.work_queues = LIST_HEAD_INIT ( static_ipoib_send_cq.work_queues ),
.dev_priv = &static_arbel_ipoib_send_cq,
};
static struct arbel_completion_queue static_arbel_ipoib_recv_cq = {
.ci_doorbell_idx = IPOIB_RCV_CQ_CI_DB_IDX,
};
static struct ib_completion_queue static_ipoib_recv_cq = {
.cqn = 2345, /* Only used for debug messages */
.num_cqes = NUM_IPOIB_RCV_CQES,
.work_queues = LIST_HEAD_INIT ( static_ipoib_recv_cq.work_queues ),
.dev_priv = &static_arbel_ipoib_recv_cq,
};
static struct arbel_queue_pair static_arbel_ipoib_qp = {
.send = {
.doorbell_idx = IPOIB_SND_QP_DB_IDX,
},
.recv = {
.doorbell_idx = IPOIB_RCV_QP_DB_IDX,
},
};
static struct ib_queue_pair static_ipoib_qp = {
.send = {
.qp = &static_ipoib_qp,
.is_send = 1,
.cq = &static_ipoib_send_cq,
.num_wqes = NUM_IPOIB_SND_WQES,
.iobufs = static_ipoib_tx_ring,
.list = LIST_HEAD_INIT (static_ipoib_qp.send.list),
.dev_priv = &static_arbel_ipoib_qp.send,
},
.recv = {
.qp = &static_ipoib_qp,
.is_send = 0,
.cq = &static_ipoib_recv_cq,
.num_wqes = NUM_IPOIB_RCV_WQES,
.iobufs = static_ipoib_rx_ring,
.list = LIST_HEAD_INIT (static_ipoib_qp.recv.list),
.dev_priv = &static_arbel_ipoib_qp.recv,
},
.dev_priv = &static_arbel_ipoib_qp,
};
#endif
static struct ib_device static_ibdev = {
.dev_priv = &static_arbel,
};
/**
* Open network device
*
* @v netdev Network device
* @ret rc Return status code
*/
static int mlx_open ( struct net_device *netdev ) {
( void ) netdev;
return 0;
}
/**
* Close network device
*
* @v netdev Network device
*/
static void mlx_close ( struct net_device *netdev ) {
( void ) netdev;
}
static int arbel_post_send ( struct ib_device *ibdev,
struct ib_queue_pair *qp,
struct ib_address_vector *av,
struct io_buffer *iobuf );
static int mlx_transmit_direct ( struct net_device *netdev,
struct io_buffer *iobuf ) {
struct mlx_nic *mlx = netdev->priv;
int rc;
struct ud_av_st *bcast_av = mlx->bcast_av;
struct arbelprm_ud_address_vector *bav =
( struct arbelprm_ud_address_vector * ) &bcast_av->av;
struct ib_address_vector av = {
.dest_qp = bcast_av->dest_qp,
.qkey = bcast_av->qkey,
.dlid = MLX_GET ( bav, rlid ),
.rate = ( MLX_GET ( bav, max_stat_rate ) ? 1 : 4 ),
.sl = MLX_GET ( bav, sl ),
.gid_present = 1,
};
memcpy ( &av.gid, ( ( void * ) bav ) + 16, 16 );
rc = arbel_post_send ( &static_ibdev,
#if CREATE_OWN
mlx->own_qp,
#else
&static_ipoib_qp,
#endif
&av, iobuf );
return rc;
}
static void arbel_poll_cq ( struct ib_device *ibdev,
struct ib_completion_queue *cq,
ib_completer_t complete_send,
ib_completer_t complete_recv );
static void temp_complete_send ( struct ib_device *ibdev __unused,
struct ib_queue_pair *qp,
struct ib_completion *completion,
struct io_buffer *iobuf ) {
struct net_device *netdev = qp->owner_priv;
DBG ( "Wahey! TX completion\n" );
netdev_tx_complete_err ( netdev, iobuf,
( completion->syndrome ? -EIO : 0 ) );
}
static void temp_complete_recv ( struct ib_device *ibdev __unused,
struct ib_queue_pair *qp,
struct ib_completion *completion,
struct io_buffer *iobuf ) {
struct net_device *netdev = qp->owner_priv;
struct mlx_nic *mlx = netdev->priv;
DBG ( "Yay! RX completion on %p len %zx:\n", iobuf, completion->len );
if ( completion->syndrome ) {
netdev_rx_err ( netdev, iobuf, -EIO );
} else {
iob_put ( iobuf, completion->len );
iob_pull ( iobuf, sizeof ( struct ib_global_route_header ) );
netdev_rx ( netdev, iobuf );
}
mlx->rx_fill--;
}
static int arbel_post_recv ( struct ib_device *ibdev,
struct ib_queue_pair *qp,
struct io_buffer *iobuf );
static void mlx_refill_rx ( struct net_device *netdev ) {
struct mlx_nic *mlx = netdev->priv;
struct io_buffer *iobuf;
int rc;
while ( mlx->rx_fill < MLX_RX_MAX_FILL ) {
iobuf = alloc_iob ( 2048 );
if ( ! iobuf )
break;
DBG ( "Posting RX buffer %p:\n", iobuf );
if ( ( rc = arbel_post_recv ( &static_ibdev,
#if CREATE_OWN
mlx->own_qp,
#else
&static_ipoib_qp,
#endif
iobuf ) ) != 0 ) {
free_iob ( iobuf );
break;
}
mlx->rx_fill++;
}
}
/**
* Poll for completed and received packets
*
* @v netdev Network device
*/
static void mlx_poll ( struct net_device *netdev ) {
struct mlx_nic *mlx = netdev->priv;
int rc;
if ( ( rc = poll_error_buf() ) != 0 ) {
DBG ( "poll_error_buf() failed: %s\n", strerror ( rc ) );
return;
}
/* Drain event queue. We can ignore events, since we're going
* to just poll all completion queues anyway.
*/
if ( ( rc = drain_eq() ) != 0 ) {
DBG ( "drain_eq() failed: %s\n", strerror ( rc ) );
return;
}
/* Poll completion queues */
arbel_poll_cq ( &static_ibdev,
#if CREATE_OWN
mlx->own_send_cq,
#else
&static_ipoib_send_cq,
#endif
temp_complete_send, temp_complete_recv );
#if 0
arbel_poll_cq ( &static_ibdev,
#if CREATE_OWN
mlx->own_recv_cq,
#else
&static_ipoib_recv_cq,
#endif
temp_complete_send, temp_complete_recv );
#endif
mlx_refill_rx ( netdev );
}
/**
* Enable or disable interrupts
*
* @v netdev Network device
* @v enable Interrupts should be enabled
*/
static void mlx_irq ( struct net_device *netdev, int enable ) {
( void ) netdev;
( void ) enable;
}
static struct net_device_operations mlx_operations = {
.open = mlx_open,
.close = mlx_close,
.transmit = mlx_transmit_direct,
.poll = mlx_poll,
.irq = mlx_irq,
};
#endif /* 0 */
/***************************************************************************
@ -1030,6 +747,11 @@ static void arbel_ring_doorbell ( struct arbel *arbel,
writel ( db_reg->dword[1], ( arbel->uar + offset + 4 ) );
}
/** GID used for GID-less send work queue entries */
static const struct ib_gid arbel_no_gid = {
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2 }
};
/**
* Post send work queue entry
*
@ -1500,193 +1222,6 @@ static int arbel_get_port_gid ( struct arbel *arbel, struct ib_gid *gid ) {
#if 0
/**
* Probe PCI device
*
* @v pci PCI device
* @v id PCI ID
* @ret rc Return status code
*/
static int arbel_probe ( struct pci_device *pci,
const struct pci_device_id *id __unused ) {
struct net_device *netdev;
struct arbelprm_query_dev_lim dev_lim;
struct arbel *arbel = &static_arbel;
struct mlx_nic *mlx;
struct ib_mac *mac;
udqp_t qph;
int rc;
/* Allocate net device */
netdev = alloc_ibdev ( sizeof ( *mlx ) );
if ( ! netdev )
return -ENOMEM;
netdev_init ( netdev, &mlx_operations );
mlx = netdev->priv;
pci_set_drvdata ( pci, netdev );
netdev->dev = &pci->dev;
memset ( mlx, 0, sizeof ( *mlx ) );
/* Fix up PCI device */
adjust_pci_device ( pci );
/* Initialise hardware */
if ( ( rc = ib_driver_init ( pci, &qph ) ) != 0 )
goto err_ipoib_init;
mlx->bcast_av = ib_data.bcast_av;
#if ! CREATE_OWN
mlx->ipoib_qph = qph;
mlx->snd_cqh = ib_data.ipoib_snd_cq;
mlx->rcv_cqh = ib_data.ipoib_rcv_cq;
mac = ( ( struct ib_mac * ) netdev->ll_addr );
mac->qpn = htonl ( ib_get_qpn ( mlx->ipoib_qph ) );
memcpy ( &mac->gid, ib_data.port_gid.raw, sizeof ( mac->gid ) );
#endif
/* Hack up IB structures */
arbel->config = memfree_pci_dev.cr_space;
arbel->mailbox_in = dev_buffers_p->inprm_buf;
arbel->mailbox_out = dev_buffers_p->outprm_buf;
arbel->uar = memfree_pci_dev.uar;
arbel->db_rec = dev_ib_data.uar_context_base;
arbel->reserved_lkey = dev_ib_data.mkey;
arbel->eqn = dev_ib_data.eq.eqn;
#if ! CREATE_OWN
static_arbel_ipoib_qp.send.wqe =
( ( struct udqp_st * ) qph )->snd_wq;
static_arbel_ipoib_qp.recv.wqe =
( ( struct udqp_st * ) qph )->rcv_wq;
static_arbel_ipoib_send_cq.cqe =
( ( struct cq_st * ) ib_data.ipoib_snd_cq )->cq_buf;
static_arbel_ipoib_recv_cq.cqe =
( ( struct cq_st * ) ib_data.ipoib_rcv_cq )->cq_buf;
static_ipoib_qp.qpn = ib_get_qpn ( qph );
static_ipoib_qp.owner_priv = netdev;
list_add ( &static_ipoib_qp.send.list,
&static_ipoib_send_cq.work_queues );
list_add ( &static_ipoib_qp.recv.list,
&static_ipoib_recv_cq.work_queues );
#endif
static_ibdev.op = &arbel_ib_operations;
/* Get device limits */
if ( ( rc = arbel_cmd_query_dev_lim ( arbel, &dev_lim ) ) != 0 ) {
DBGC ( arbel, "Arbel %p could not get device limits: %s\n",
arbel, strerror ( rc ) );
goto err_query_dev_lim;
}
arbel->limits.reserved_uars = MLX_GET ( &dev_lim, num_rsvd_uars );
arbel->limits.reserved_cqs =
( 1 << MLX_GET ( &dev_lim, log2_rsvd_cqs ) );
arbel->limits.reserved_qps =
( 1 << MLX_GET ( &dev_lim, log2_rsvd_qps ) );
#if CREATE_OWN
struct ib_device *ibdev = &static_ibdev;
mlx->own_send_cq = ib_create_cq ( ibdev, 32 );
if ( ! mlx->own_send_cq ) {
DBG ( "Could not create send CQ\n" );
return -EIO;
}
#if 0
mlx->own_recv_cq = ib_create_cq ( ibdev, 32 );
if ( ! mlx->own_recv_cq ) {
DBG ( "Could not create send CQ\n" );
return -EIO;
}
#endif
mlx->own_qp = ib_create_qp ( ibdev, NUM_IPOIB_SND_WQES,
mlx->own_send_cq, NUM_IPOIB_RCV_WQES,
//mlx->own_recv_cq, ipoib_qkey );
mlx->own_send_cq, ipoib_qkey );
if ( ! mlx->own_qp ) {
DBG ( "Could not create QP\n" );
return -EIO;
}
mlx->own_qp->owner_priv = netdev;
struct ib_gid *bcast_gid = ( struct ib_gid * ) &ib_data.bcast_gid;
if ( ( rc = ib_mcast_attach ( ibdev, mlx->own_qp,
bcast_gid ) ) != 0 ) {
DBG ( "Could not attach to broadcast GID: %s\n",
strerror ( rc ) );
return rc;
}
if ( ( rc = arbel_get_port_gid ( arbel, &ibdev->port_gid ) ) != 0 ) {
DBGC ( arbel, "Arbel %p could not determine port GID: %s\n",
arbel, strerror ( rc ) );
return rc;
}
DBG ( "Port GID:\n" );
DBG_HD ( &ibdev->port_gid, sizeof ( ibdev->port_gid ) );
mac = ( ( struct ib_mac * ) netdev->ll_addr );
mac->qpn = htonl ( mlx->own_qp->qpn );
memcpy ( &mac->gid, &ibdev->port_gid, sizeof ( mac->gid ) );
#endif
#if 0
DBG ( "MADS SND CQN = %#lx\n", dev_ib_data.mads_qp.snd_cq.cqn );
struct ib_completion_queue *test_cq;
test_cq = ib_create_cq ( &static_ibdev, 32 );
if ( test_cq ) {
DBG ( "Woot: create_cq() passed!\n" );
}
#endif
ibdev->dev = &pci->dev;
struct ud_av_st *bcast_av = mlx->bcast_av;
struct arbelprm_ud_address_vector *bav =
( struct arbelprm_ud_address_vector * ) &bcast_av->av;
struct ib_address_vector *av = &hack_ipoib_bcast_av;
av->dest_qp = bcast_av->dest_qp;
av->qkey = bcast_av->qkey;
av->dlid = MLX_GET ( bav, rlid );
av->rate = ( MLX_GET ( bav, max_stat_rate ) ? 1 : 4 );
av->sl = MLX_GET ( bav, sl );
av->gid_present = 1;
memcpy ( &av->gid, ( ( void * ) bav ) + 16, 16 );
/* Register network device */
if ( ( rc = register_netdev ( netdev ) ) != 0 )
goto err_register_netdev;
return 0;
err_query_dev_lim:
err_register_netdev:
err_ipoib_init:
ib_driver_close ( 0 );
netdev_nullify ( netdev );
netdev_put ( netdev );
return rc;
}
/**
* Remove PCI device
*
* @v pci PCI device
*/
static void arbel_remove ( struct pci_device *pci ) {
struct net_device *netdev = pci_get_drvdata ( pci );
unregister_netdev ( netdev );
ib_driver_close ( 0 );
netdev_nullify ( netdev );
netdev_put ( netdev );
}
#endif /* 0 */
/**
* Probe PCI device
*

View File

@ -10,42 +10,6 @@
#include <stdint.h>
#include <gpxe/device.h>
#if 0
/** Infiniband MAC address length */
#define IB_ALEN 20
/** An Infiniband MAC address */
struct ib_mac {
/** Queue pair number
*
* MSB must be zero; QPNs are only 24-bit.
*/
uint32_t qpn;
/** Port GID */
struct ib_gid gid;
} __attribute__ (( packed ));
/** Infiniband link-layer header length */
#define IB_HLEN 4
/** An Infiniband link-layer header */
struct ibhdr {
/** Network-layer protocol */
uint16_t proto;
/** Reserved, must be zero */
uint16_t reserved;
} __attribute__ (( packed ));
#endif
/** An Infiniband Global Identifier */
struct ib_gid {
uint8_t bytes[16];
@ -511,31 +475,4 @@ union ib_mad {
struct ib_mad_port_info port_info;
} __attribute__ (( packed ));
#if 0
extern struct ll_protocol infiniband_protocol;
extern const char * ib_ntoa ( const void *ll_addr );
/**
* Allocate Infiniband device
*
* @v priv_size Size of driver private data
* @ret netdev Network device, or NULL
*/
static inline struct net_device * alloc_ibdev ( size_t priv_size ) {
struct net_device *netdev;
netdev = alloc_netdev ( priv_size );
if ( netdev ) {
netdev->ll_protocol = &infiniband_protocol;
}
return netdev;
}
#endif
#endif /* _GPXE_INFINIBAND_H */