david/ipxe
david
/
ipxe
Archived
1
0
Fork 0

[infiniband] Allow queue pairs to have a custom allocator for receive iobufs

Signed-off-by: Michael Brown <mcb30@ipxe.org>
This commit is contained in:
Michael Brown 2012-08-29 22:11:58 +01:00
parent 96be171be5
commit f747fac3e1
6 changed files with 48 additions and 10 deletions

View File

@ -3128,6 +3128,11 @@ static int hermon_eth_transmit ( struct net_device *netdev,
return 0; return 0;
} }
/** Hermon Ethernet queue pair operations */
static struct ib_queue_pair_operations hermon_eth_qp_op = {
.alloc_iob = alloc_iob,
};
/** /**
* Handle Hermon Ethernet device send completion * Handle Hermon Ethernet device send completion
* *
@ -3225,7 +3230,8 @@ static int hermon_eth_open ( struct net_device *netdev ) {
/* Allocate queue pair */ /* Allocate queue pair */
port->eth_qp = ib_create_qp ( ibdev, IB_QPT_ETH, port->eth_qp = ib_create_qp ( ibdev, IB_QPT_ETH,
HERMON_ETH_NUM_SEND_WQES, port->eth_cq, HERMON_ETH_NUM_SEND_WQES, port->eth_cq,
HERMON_ETH_NUM_RECV_WQES, port->eth_cq ); HERMON_ETH_NUM_RECV_WQES, port->eth_cq,
&hermon_eth_qp_op );
if ( ! port->eth_qp ) { if ( ! port->eth_qp ) {
DBGC ( hermon, "Hermon %p port %d could not create queue " DBGC ( hermon, "Hermon %p port %d could not create queue "
"pair\n", hermon, ibdev->port ); "pair\n", hermon, ibdev->port );

View File

@ -534,6 +534,11 @@ static struct ib_completion_queue_operations ipoib_cq_op = {
.complete_recv = ipoib_complete_recv, .complete_recv = ipoib_complete_recv,
}; };
/** IPoIB queue pair operations */
static struct ib_queue_pair_operations ipoib_qp_op = {
.alloc_iob = alloc_iob,
};
/** /**
* Poll IPoIB network device * Poll IPoIB network device
* *
@ -667,9 +672,9 @@ static int ipoib_open ( struct net_device *netdev ) {
} }
/* Allocate queue pair */ /* Allocate queue pair */
ipoib->qp = ib_create_qp ( ibdev, IB_QPT_UD, ipoib->qp = ib_create_qp ( ibdev, IB_QPT_UD, IPOIB_NUM_SEND_WQES,
IPOIB_NUM_SEND_WQES, ipoib->cq, ipoib->cq, IPOIB_NUM_RECV_WQES, ipoib->cq,
IPOIB_NUM_RECV_WQES, ipoib->cq ); &ipoib_qp_op );
if ( ! ipoib->qp ) { if ( ! ipoib->qp ) {
DBGC ( ipoib, "IPoIB %p could not allocate queue pair\n", DBGC ( ipoib, "IPoIB %p could not allocate queue pair\n",
ipoib ); ipoib );

View File

@ -142,6 +142,16 @@ enum ib_queue_pair_type {
IB_QPT_ETH, IB_QPT_ETH,
}; };
/** Infiniband queue pair operations */
struct ib_queue_pair_operations {
/** Allocate receive I/O buffer
*
* @v len Maximum receive length
* @ret iobuf I/O buffer (or NULL if out of memory)
*/
struct io_buffer * ( * alloc_iob ) ( size_t len );
};
/** An Infiniband Queue Pair */ /** An Infiniband Queue Pair */
struct ib_queue_pair { struct ib_queue_pair {
/** Containing Infiniband device */ /** Containing Infiniband device */
@ -169,6 +179,8 @@ struct ib_queue_pair {
struct list_head mgids; struct list_head mgids;
/** Address vector */ /** Address vector */
struct ib_address_vector av; struct ib_address_vector av;
/** Queue pair operations */
struct ib_queue_pair_operations *op;
/** Driver private data */ /** Driver private data */
void *drv_priv; void *drv_priv;
/** Queue owner private data */ /** Queue owner private data */
@ -478,8 +490,8 @@ extern void ib_poll_cq ( struct ib_device *ibdev,
extern struct ib_queue_pair * extern struct ib_queue_pair *
ib_create_qp ( struct ib_device *ibdev, enum ib_queue_pair_type type, ib_create_qp ( struct ib_device *ibdev, enum ib_queue_pair_type type,
unsigned int num_send_wqes, struct ib_completion_queue *send_cq, unsigned int num_send_wqes, struct ib_completion_queue *send_cq,
unsigned int num_recv_wqes, unsigned int num_recv_wqes, struct ib_completion_queue *recv_cq,
struct ib_completion_queue *recv_cq ); struct ib_queue_pair_operations *op );
extern int ib_modify_qp ( struct ib_device *ibdev, struct ib_queue_pair *qp ); extern int ib_modify_qp ( struct ib_device *ibdev, struct ib_queue_pair *qp );
extern void ib_destroy_qp ( struct ib_device *ibdev, extern void ib_destroy_qp ( struct ib_device *ibdev,
struct ib_queue_pair *qp ); struct ib_queue_pair *qp );

View File

@ -168,6 +168,7 @@ void ib_poll_cq ( struct ib_device *ibdev,
* @v send_cq Send completion queue * @v send_cq Send completion queue
* @v num_recv_wqes Number of receive work queue entries * @v num_recv_wqes Number of receive work queue entries
* @v recv_cq Receive completion queue * @v recv_cq Receive completion queue
* @v op Queue pair operations
* @ret qp Queue pair * @ret qp Queue pair
* *
* The queue pair will be left in the INIT state; you must call * The queue pair will be left in the INIT state; you must call
@ -178,7 +179,8 @@ struct ib_queue_pair * ib_create_qp ( struct ib_device *ibdev,
unsigned int num_send_wqes, unsigned int num_send_wqes,
struct ib_completion_queue *send_cq, struct ib_completion_queue *send_cq,
unsigned int num_recv_wqes, unsigned int num_recv_wqes,
struct ib_completion_queue *recv_cq ) { struct ib_completion_queue *recv_cq,
struct ib_queue_pair_operations *op ) {
struct ib_queue_pair *qp; struct ib_queue_pair *qp;
size_t total_size; size_t total_size;
int rc; int rc;
@ -210,6 +212,7 @@ struct ib_queue_pair * ib_create_qp ( struct ib_device *ibdev,
qp->recv.iobufs = ( ( ( void * ) qp ) + sizeof ( *qp ) + qp->recv.iobufs = ( ( ( void * ) qp ) + sizeof ( *qp ) +
( num_send_wqes * sizeof ( qp->send.iobufs[0] ) )); ( num_send_wqes * sizeof ( qp->send.iobufs[0] ) ));
INIT_LIST_HEAD ( &qp->mgids ); INIT_LIST_HEAD ( &qp->mgids );
qp->op = op;
/* Perform device-specific initialisation and get QPN */ /* Perform device-specific initialisation and get QPN */
if ( ( rc = ibdev->op->create_qp ( ibdev, qp ) ) != 0 ) { if ( ( rc = ibdev->op->create_qp ( ibdev, qp ) ) != 0 ) {
@ -514,7 +517,7 @@ void ib_refill_recv ( struct ib_device *ibdev, struct ib_queue_pair *qp ) {
while ( qp->recv.fill < qp->recv.num_wqes ) { while ( qp->recv.fill < qp->recv.num_wqes ) {
/* Allocate I/O buffer */ /* Allocate I/O buffer */
iobuf = alloc_iob ( IB_MAX_PAYLOAD_SIZE ); iobuf = qp->op->alloc_iob ( IB_MAX_PAYLOAD_SIZE );
if ( ! iobuf ) { if ( ! iobuf ) {
/* Non-fatal; we will refill on next attempt */ /* Non-fatal; we will refill on next attempt */
return; return;

View File

@ -257,6 +257,11 @@ static struct ib_completion_queue_operations ib_cmrc_completion_ops = {
.complete_recv = ib_cmrc_complete_recv, .complete_recv = ib_cmrc_complete_recv,
}; };
/** Infiniband CMRC queue pair operations */
static struct ib_queue_pair_operations ib_cmrc_queue_pair_ops = {
.alloc_iob = alloc_iob,
};
/** /**
* Send data via CMRC * Send data via CMRC
* *
@ -410,7 +415,8 @@ int ib_cmrc_open ( struct interface *xfer, struct ib_device *ibdev,
/* Create queue pair */ /* Create queue pair */
cmrc->qp = ib_create_qp ( ibdev, IB_QPT_RC, IB_CMRC_NUM_SEND_WQES, cmrc->qp = ib_create_qp ( ibdev, IB_QPT_RC, IB_CMRC_NUM_SEND_WQES,
cmrc->cq, IB_CMRC_NUM_RECV_WQES, cmrc->cq ); cmrc->cq, IB_CMRC_NUM_RECV_WQES, cmrc->cq,
&ib_cmrc_queue_pair_ops );
if ( ! cmrc->qp ) { if ( ! cmrc->qp ) {
DBGC ( cmrc, "CMRC %p could not create queue pair\n", cmrc ); DBGC ( cmrc, "CMRC %p could not create queue pair\n", cmrc );
rc = -ENOMEM; rc = -ENOMEM;

View File

@ -164,6 +164,11 @@ static struct ib_completion_queue_operations ib_mi_completion_ops = {
.complete_recv = ib_mi_complete_recv, .complete_recv = ib_mi_complete_recv,
}; };
/** Management interface queue pair operations */
static struct ib_queue_pair_operations ib_mi_queue_pair_ops = {
.alloc_iob = alloc_iob,
};
/** /**
* Transmit MAD * Transmit MAD
* *
@ -353,7 +358,8 @@ struct ib_mad_interface * ib_create_mi ( struct ib_device *ibdev,
/* Create queue pair */ /* Create queue pair */
mi->qp = ib_create_qp ( ibdev, type, IB_MI_NUM_SEND_WQES, mi->cq, mi->qp = ib_create_qp ( ibdev, type, IB_MI_NUM_SEND_WQES, mi->cq,
IB_MI_NUM_RECV_WQES, mi->cq ); IB_MI_NUM_RECV_WQES, mi->cq,
&ib_mi_queue_pair_ops );
if ( ! mi->qp ) { if ( ! mi->qp ) {
DBGC ( mi, "MI %p could not allocate queue pair\n", mi ); DBGC ( mi, "MI %p could not allocate queue pair\n", mi );
goto err_create_qp; goto err_create_qp;