david/ipxe
david
/
ipxe
Archived
1
0
Fork 0

[Infiniband] Move event-queue process from driver to Infiniband core

This commit is contained in:
Michael Brown 2008-04-21 13:23:11 +01:00
parent e55bab3ce3
commit 35a5836677
5 changed files with 353 additions and 261 deletions

View File

@ -835,6 +835,27 @@ static int arbel_create_qp ( struct ib_device *ibdev,
return rc;
}
/**
* Modify queue pair
*
* @v ibdev Infiniband device
* @v qp Queue pair
* @v mod_list Modification list
* @ret rc Return status code
*/
static int arbel_modify_qp ( struct ib_device *ibdev,
struct ib_queue_pair *qp,
unsigned long mod_list ) {
struct arbel *arbel = ib_get_drvdata ( ibdev );
/* TODO */
( void ) arbel;
( void ) qp;
( void ) mod_list;
return -ENOTSUP;
}
/**
* Destroy queue pair
*
@ -1202,6 +1223,25 @@ static void arbel_poll_cq ( struct ib_device *ibdev,
}
}
/***************************************************************************
*
* Event queues
*
***************************************************************************
*/
/**
* Poll event queue
*
* @v ibdev Infiniband device
*/
static void arbel_poll_eq ( struct ib_device *ibdev ) {
struct arbel *arbel = ib_get_drvdata ( ibdev );
/* TODO */
( void ) arbel;
}
/***************************************************************************
*
* Infiniband link-layer operations
@ -1399,10 +1439,12 @@ static struct ib_device_operations arbel_ib_operations = {
.create_cq = arbel_create_cq,
.destroy_cq = arbel_destroy_cq,
.create_qp = arbel_create_qp,
.modify_qp = arbel_modify_qp,
.destroy_qp = arbel_destroy_qp,
.post_send = arbel_post_send,
.post_recv = arbel_post_recv,
.poll_cq = arbel_poll_cq,
.poll_eq = arbel_poll_eq,
.open = arbel_open,
.close = arbel_close,
.mcast_attach = arbel_mcast_attach,
@ -1938,7 +1980,7 @@ static int arbel_probe ( struct pci_device *pci,
i = ( ARBEL_NUM_PORTS - 1 );
err_alloc_ibdev:
for ( ; i >= 0 ; i-- )
free_ibdev ( arbel->ibdev[i] );
ibdev_put ( arbel->ibdev[i] );
free ( arbel );
err_alloc_arbel:
return rc;
@ -1962,7 +2004,7 @@ static void arbel_remove ( struct pci_device *pci ) {
free_dma ( arbel->mailbox_out, ARBEL_MBOX_SIZE );
free_dma ( arbel->mailbox_in, ARBEL_MBOX_SIZE );
for ( i = ( ARBEL_NUM_PORTS - 1 ) ; i >= 0 ; i-- )
free_ibdev ( arbel->ibdev[i] );
ibdev_put ( arbel->ibdev[i] );
free ( arbel );
}

View File

@ -30,7 +30,6 @@
#include <gpxe/umalloc.h>
#include <gpxe/iobuf.h>
#include <gpxe/netdevice.h>
#include <gpxe/process.h>
#include <gpxe/infiniband.h>
#include "hermon.h"
@ -1226,6 +1225,216 @@ static void hermon_poll_cq ( struct ib_device *ibdev,
}
}
/***************************************************************************
*
* Event queues
*
***************************************************************************
*/
/**
* Create event queue
*
* @v hermon Hermon device
* @ret rc Return status code
*/
static int hermon_create_eq ( struct hermon *hermon ) {
struct hermon_event_queue *hermon_eq = &hermon->eq;
struct hermonprm_eqc eqctx;
struct hermonprm_event_mask mask;
unsigned int i;
int rc;
/* Select event queue number */
hermon_eq->eqn = ( 4 * hermon->cap.reserved_uars );
if ( hermon_eq->eqn < hermon->cap.reserved_eqs )
hermon_eq->eqn = hermon->cap.reserved_eqs;
/* Calculate doorbell address */
hermon_eq->doorbell =
( hermon->uar + HERMON_DB_EQ_OFFSET ( hermon_eq->eqn ) );
/* Allocate event queue itself */
hermon_eq->eqe_size =
( HERMON_NUM_EQES * sizeof ( hermon_eq->eqe[0] ) );
hermon_eq->eqe = malloc_dma ( hermon_eq->eqe_size,
sizeof ( hermon_eq->eqe[0] ) );
if ( ! hermon_eq->eqe ) {
rc = -ENOMEM;
goto err_eqe;
}
memset ( hermon_eq->eqe, 0, hermon_eq->eqe_size );
for ( i = 0 ; i < HERMON_NUM_EQES ; i++ ) {
MLX_FILL_1 ( &hermon_eq->eqe[i].generic, 7, owner, 1 );
}
barrier();
/* Allocate MTT entries */
if ( ( rc = hermon_alloc_mtt ( hermon, hermon_eq->eqe,
hermon_eq->eqe_size,
&hermon_eq->mtt ) ) != 0 )
goto err_alloc_mtt;
/* Hand queue over to hardware */
memset ( &eqctx, 0, sizeof ( eqctx ) );
MLX_FILL_1 ( &eqctx, 0, st, 0xa /* "Fired" */ );
MLX_FILL_1 ( &eqctx, 2,
page_offset, ( hermon_eq->mtt.page_offset >> 5 ) );
MLX_FILL_1 ( &eqctx, 3, log_eq_size, fls ( HERMON_NUM_EQES - 1 ) );
MLX_FILL_1 ( &eqctx, 7, mtt_base_addr_l,
( hermon_eq->mtt.mtt_base_addr >> 3 ) );
if ( ( rc = hermon_cmd_sw2hw_eq ( hermon, hermon_eq->eqn,
&eqctx ) ) != 0 ) {
DBGC ( hermon, "Hermon %p SW2HW_EQ failed: %s\n",
hermon, strerror ( rc ) );
goto err_sw2hw_eq;
}
/* Map events to this event queue */
memset ( &mask, 0, sizeof ( mask ) );
MLX_FILL_1 ( &mask, 1, port_state_change, 1 );
if ( ( rc = hermon_cmd_map_eq ( hermon,
( HERMON_MAP_EQ | hermon_eq->eqn ),
&mask ) ) != 0 ) {
DBGC ( hermon, "Hermon %p MAP_EQ failed: %s\n",
hermon, strerror ( rc ) );
goto err_map_eq;
}
DBGC ( hermon, "Hermon %p EQN %#lx ring at [%p,%p])\n",
hermon, hermon_eq->eqn, hermon_eq->eqe,
( ( ( void * ) hermon_eq->eqe ) + hermon_eq->eqe_size ) );
return 0;
err_map_eq:
hermon_cmd_hw2sw_eq ( hermon, hermon_eq->eqn, &eqctx );
err_sw2hw_eq:
hermon_free_mtt ( hermon, &hermon_eq->mtt );
err_alloc_mtt:
free_dma ( hermon_eq->eqe, hermon_eq->eqe_size );
err_eqe:
memset ( hermon_eq, 0, sizeof ( *hermon_eq ) );
return rc;
}
/**
* Destroy event queue
*
* @v hermon Hermon device
*/
static void hermon_destroy_eq ( struct hermon *hermon ) {
struct hermon_event_queue *hermon_eq = &hermon->eq;
struct hermonprm_eqc eqctx;
struct hermonprm_event_mask mask;
int rc;
/* Unmap events from event queue */
memset ( &mask, 0, sizeof ( mask ) );
MLX_FILL_1 ( &mask, 1, port_state_change, 1 );
if ( ( rc = hermon_cmd_map_eq ( hermon,
( HERMON_UNMAP_EQ | hermon_eq->eqn ),
&mask ) ) != 0 ) {
DBGC ( hermon, "Hermon %p FATAL MAP_EQ failed to unmap: %s\n",
hermon, strerror ( rc ) );
/* Continue; HCA may die but system should survive */
}
/* Take ownership back from hardware */
if ( ( rc = hermon_cmd_hw2sw_eq ( hermon, hermon_eq->eqn,
&eqctx ) ) != 0 ) {
DBGC ( hermon, "Hermon %p FATAL HW2SW_EQ failed: %s\n",
hermon, strerror ( rc ) );
/* Leak memory and return; at least we avoid corruption */
return;
}
/* Free MTT entries */
hermon_free_mtt ( hermon, &hermon_eq->mtt );
/* Free memory */
free_dma ( hermon_eq->eqe, hermon_eq->eqe_size );
memset ( hermon_eq, 0, sizeof ( *hermon_eq ) );
}
/**
* Handle port state event
*
* @v hermon Hermon device
* @v eqe Port state change event queue entry
*/
static void hermon_event_port_state_change ( struct hermon *hermon,
union hermonprm_event_entry *eqe){
unsigned int port;
int link_up;
/* Get port and link status */
port = ( MLX_GET ( &eqe->port_state_change, data.p ) - 1 );
link_up = ( MLX_GET ( &eqe->generic, event_sub_type ) & 0x04 );
DBGC ( hermon, "Hermon %p port %d link %s\n", hermon, ( port + 1 ),
( link_up ? "up" : "down" ) );
/* Sanity check */
if ( port >= HERMON_NUM_PORTS ) {
DBGC ( hermon, "Hermon %p port %d does not exist!\n",
hermon, ( port + 1 ) );
return;
}
/* Notify Infiniband core of link state change */
ib_link_state_changed ( hermon->ibdev[port] );
}
/**
* Poll event queue
*
* @v ibdev Infiniband device
*/
static void hermon_poll_eq ( struct ib_device *ibdev ) {
struct hermon *hermon = ib_get_drvdata ( ibdev );
struct hermon_event_queue *hermon_eq = &hermon->eq;
union hermonprm_event_entry *eqe;
union hermonprm_doorbell_register db_reg;
unsigned int eqe_idx_mask;
unsigned int event_type;
while ( 1 ) {
/* Look for event entry */
eqe_idx_mask = ( HERMON_NUM_EQES - 1 );
eqe = &hermon_eq->eqe[hermon_eq->next_idx & eqe_idx_mask];
if ( MLX_GET ( &eqe->generic, owner ) ^
( ( hermon_eq->next_idx & HERMON_NUM_EQES ) ? 1 : 0 ) ) {
/* Entry still owned by hardware; end of poll */
break;
}
DBGCP ( hermon, "Hermon %p event:\n", hermon );
DBGCP_HD ( hermon, eqe, sizeof ( *eqe ) );
/* Handle event */
event_type = MLX_GET ( &eqe->generic, event_type );
switch ( event_type ) {
case HERMON_EV_PORT_STATE_CHANGE:
hermon_event_port_state_change ( hermon, eqe );
break;
default:
DBGC ( hermon, "Hermon %p unrecognised event type "
"%#x:\n", hermon, event_type );
DBGC_HD ( hermon, eqe, sizeof ( *eqe ) );
break;
}
/* Update event queue's index */
hermon_eq->next_idx++;
/* Ring doorbell */
MLX_FILL_1 ( &db_reg.event, 0,
ci, ( hermon_eq->next_idx & 0x00ffffffUL ) );
DBGCP ( hermon, "Ringing doorbell %08lx with %08lx\n",
virt_to_phys ( hermon_eq->doorbell ),
db_reg.dword[0] );
writel ( db_reg.dword[0], hermon_eq->doorbell );
}
}
/***************************************************************************
*
* Infiniband link-layer operations
@ -1428,6 +1637,7 @@ static struct ib_device_operations hermon_ib_operations = {
.post_send = hermon_post_send,
.post_recv = hermon_post_recv,
.poll_cq = hermon_poll_cq,
.poll_eq = hermon_poll_eq,
.open = hermon_open,
.close = hermon_close,
.mcast_attach = hermon_mcast_attach,
@ -1435,227 +1645,6 @@ static struct ib_device_operations hermon_ib_operations = {
.mad = hermon_mad,
};
/***************************************************************************
*
* Event queues
*
***************************************************************************
*/
/**
* Create event queue
*
* @v hermon Hermon device
* @ret rc Return status code
*/
static int hermon_create_eq ( struct hermon *hermon ) {
struct hermon_event_queue *hermon_eq = &hermon->eq;
struct hermonprm_eqc eqctx;
struct hermonprm_event_mask mask;
unsigned int i;
int rc;
/* Select event queue number */
hermon_eq->eqn = ( 4 * hermon->cap.reserved_uars );
if ( hermon_eq->eqn < hermon->cap.reserved_eqs )
hermon_eq->eqn = hermon->cap.reserved_eqs;
/* Calculate doorbell address */
hermon_eq->doorbell =
( hermon->uar + HERMON_DB_EQ_OFFSET ( hermon_eq->eqn ) );
/* Allocate event queue itself */
hermon_eq->eqe_size =
( HERMON_NUM_EQES * sizeof ( hermon_eq->eqe[0] ) );
hermon_eq->eqe = malloc_dma ( hermon_eq->eqe_size,
sizeof ( hermon_eq->eqe[0] ) );
if ( ! hermon_eq->eqe ) {
rc = -ENOMEM;
goto err_eqe;
}
memset ( hermon_eq->eqe, 0, hermon_eq->eqe_size );
for ( i = 0 ; i < HERMON_NUM_EQES ; i++ ) {
MLX_FILL_1 ( &hermon_eq->eqe[i].generic, 7, owner, 1 );
}
barrier();
/* Allocate MTT entries */
if ( ( rc = hermon_alloc_mtt ( hermon, hermon_eq->eqe,
hermon_eq->eqe_size,
&hermon_eq->mtt ) ) != 0 )
goto err_alloc_mtt;
/* Hand queue over to hardware */
memset ( &eqctx, 0, sizeof ( eqctx ) );
MLX_FILL_1 ( &eqctx, 0, st, 0xa /* "Fired" */ );
MLX_FILL_1 ( &eqctx, 2,
page_offset, ( hermon_eq->mtt.page_offset >> 5 ) );
MLX_FILL_1 ( &eqctx, 3, log_eq_size, fls ( HERMON_NUM_EQES - 1 ) );
MLX_FILL_1 ( &eqctx, 7, mtt_base_addr_l,
( hermon_eq->mtt.mtt_base_addr >> 3 ) );
if ( ( rc = hermon_cmd_sw2hw_eq ( hermon, hermon_eq->eqn,
&eqctx ) ) != 0 ) {
DBGC ( hermon, "Hermon %p SW2HW_EQ failed: %s\n",
hermon, strerror ( rc ) );
goto err_sw2hw_eq;
}
/* Map events to this event queue */
memset ( &mask, 0, sizeof ( mask ) );
MLX_FILL_1 ( &mask, 1, port_state_change, 1 );
if ( ( rc = hermon_cmd_map_eq ( hermon,
( HERMON_MAP_EQ | hermon_eq->eqn ),
&mask ) ) != 0 ) {
DBGC ( hermon, "Hermon %p MAP_EQ failed: %s\n",
hermon, strerror ( rc ) );
goto err_map_eq;
}
DBGC ( hermon, "Hermon %p EQN %#lx ring at [%p,%p])\n",
hermon, hermon_eq->eqn, hermon_eq->eqe,
( ( ( void * ) hermon_eq->eqe ) + hermon_eq->eqe_size ) );
return 0;
err_map_eq:
hermon_cmd_hw2sw_eq ( hermon, hermon_eq->eqn, &eqctx );
err_sw2hw_eq:
hermon_free_mtt ( hermon, &hermon_eq->mtt );
err_alloc_mtt:
free_dma ( hermon_eq->eqe, hermon_eq->eqe_size );
err_eqe:
memset ( hermon_eq, 0, sizeof ( *hermon_eq ) );
return rc;
}
/**
* Destroy event queue
*
* @v hermon Hermon device
*/
static void hermon_destroy_eq ( struct hermon *hermon ) {
struct hermon_event_queue *hermon_eq = &hermon->eq;
struct hermonprm_eqc eqctx;
struct hermonprm_event_mask mask;
int rc;
/* Unmap events from event queue */
memset ( &mask, 0, sizeof ( mask ) );
MLX_FILL_1 ( &mask, 1, port_state_change, 1 );
if ( ( rc = hermon_cmd_map_eq ( hermon,
( HERMON_UNMAP_EQ | hermon_eq->eqn ),
&mask ) ) != 0 ) {
DBGC ( hermon, "Hermon %p FATAL MAP_EQ failed to unmap: %s\n",
hermon, strerror ( rc ) );
/* Continue; HCA may die but system should survive */
}
/* Take ownership back from hardware */
if ( ( rc = hermon_cmd_hw2sw_eq ( hermon, hermon_eq->eqn,
&eqctx ) ) != 0 ) {
DBGC ( hermon, "Hermon %p FATAL HW2SW_EQ failed: %s\n",
hermon, strerror ( rc ) );
/* Leak memory and return; at least we avoid corruption */
return;
}
/* Free MTT entries */
hermon_free_mtt ( hermon, &hermon_eq->mtt );
/* Free memory */
free_dma ( hermon_eq->eqe, hermon_eq->eqe_size );
memset ( hermon_eq, 0, sizeof ( *hermon_eq ) );
}
/**
* Handle port state event
*
* @v hermon Hermon device
* @v eqe Port state change event queue entry
*/
static void hermon_event_port_state_change ( struct hermon *hermon,
union hermonprm_event_entry *eqe){
unsigned int port;
int link_up;
/* Get port and link status */
port = ( MLX_GET ( &eqe->port_state_change, data.p ) - 1 );
link_up = ( MLX_GET ( &eqe->generic, event_sub_type ) & 0x04 );
DBGC ( hermon, "Hermon %p port %d link %s\n", hermon, ( port + 1 ),
( link_up ? "up" : "down" ) );
/* Sanity check */
if ( port >= HERMON_NUM_PORTS ) {
DBGC ( hermon, "Hermon %p port %d does not exist!\n",
hermon, ( port + 1 ) );
return;
}
/* Notify Infiniband core of link state change */
ib_link_state_changed ( hermon->ibdev[port] );
}
/**
* Poll event queue
*
* @v hermon Hermon device
*/
static void hermon_poll_eq ( struct hermon *hermon ) {
struct hermon_event_queue *hermon_eq = &hermon->eq;
union hermonprm_event_entry *eqe;
union hermonprm_doorbell_register db_reg;
unsigned int eqe_idx_mask;
unsigned int event_type;
while ( 1 ) {
/* Look for event entry */
eqe_idx_mask = ( HERMON_NUM_EQES - 1 );
eqe = &hermon_eq->eqe[hermon_eq->next_idx & eqe_idx_mask];
if ( MLX_GET ( &eqe->generic, owner ) ^
( ( hermon_eq->next_idx & HERMON_NUM_EQES ) ? 1 : 0 ) ) {
/* Entry still owned by hardware; end of poll */
break;
}
DBGCP ( hermon, "Hermon %p event:\n", hermon );
DBGCP_HD ( hermon, eqe, sizeof ( *eqe ) );
/* Handle event */
event_type = MLX_GET ( &eqe->generic, event_type );
switch ( event_type ) {
case HERMON_EV_PORT_STATE_CHANGE:
hermon_event_port_state_change ( hermon, eqe );
break;
default:
DBGC ( hermon, "Hermon %p unrecognised event type "
"%#x:\n", hermon, event_type );
DBGC_HD ( hermon, eqe, sizeof ( *eqe ) );
break;
}
/* Update event queue's index */
hermon_eq->next_idx++;
/* Ring doorbell */
MLX_FILL_1 ( &db_reg.event, 0,
ci, ( hermon_eq->next_idx & 0x00ffffffUL ) );
DBGCP ( hermon, "Ringing doorbell %08lx with %08lx\n",
virt_to_phys ( hermon_eq->doorbell ),
db_reg.dword[0] );
writel ( db_reg.dword[0], hermon_eq->doorbell );
}
}
/**
* Event queue poll processor
*
* @v process Hermon event queue process
*/
static void hermon_step ( struct process *process ) {
struct hermon *hermon =
container_of ( process, struct hermon, event_process );
hermon_poll_eq ( hermon );
}
/***************************************************************************
*
* Firmware control
@ -2168,7 +2157,6 @@ static int hermon_probe ( struct pci_device *pci,
goto err_alloc_hermon;
}
pci_set_drvdata ( pci, hermon );
process_init ( &hermon->event_process, hermon_step, NULL );
/* Allocate Infiniband devices */
for ( i = 0 ; i < HERMON_NUM_PORTS ; i++ ) {
@ -2270,8 +2258,7 @@ static int hermon_probe ( struct pci_device *pci,
i = ( HERMON_NUM_PORTS - 1 );
err_alloc_ibdev:
for ( ; i >= 0 ; i-- )
free_ibdev ( hermon->ibdev[i] );
process_del ( &hermon->event_process );
ibdev_put ( hermon->ibdev[i] );
free ( hermon );
err_alloc_hermon:
return rc;
@ -2296,8 +2283,7 @@ static void hermon_remove ( struct pci_device *pci ) {
free_dma ( hermon->mailbox_out, HERMON_MBOX_SIZE );
free_dma ( hermon->mailbox_in, HERMON_MBOX_SIZE );
for ( i = ( HERMON_NUM_PORTS - 1 ) ; i >= 0 ; i-- )
free_ibdev ( hermon->ibdev[i] );
process_del ( &hermon->event_process );
ibdev_put ( hermon->ibdev[i] );
free ( hermon );
}

View File

@ -9,7 +9,6 @@
#include <stdint.h>
#include <gpxe/uaccess.h>
#include <gpxe/process.h>
#include "mlx_bitops.h"
#include "MT25408_PRM.h"
@ -465,8 +464,6 @@ struct hermon {
/** Event queue */
struct hermon_event_queue eq;
/** Event queue process */
struct process event_process;
/** Completion queue in-use bitmask */
hermon_bitmask_t cq_inuse[ HERMON_BITMASK_SIZE ( HERMON_MAX_CQS ) ];

View File

@ -8,6 +8,7 @@
*/
#include <stdint.h>
#include <gpxe/refcnt.h>
#include <gpxe/device.h>
/** Subnet administrator QPN */
@ -254,6 +255,12 @@ struct ib_device_operations {
struct ib_completion_queue *cq,
ib_completer_t complete_send,
ib_completer_t complete_recv );
/**
* Poll event queue
*
* @v ibdev Infiniband device
*/
void ( * poll_eq ) ( struct ib_device *ibdev );
/**
* Open port
*
@ -300,6 +307,10 @@ struct ib_device_operations {
/** An Infiniband device */
struct ib_device {
/** Reference counter */
struct refcnt refcnt;
/** List of Infiniband devices */
struct list_head list;
/** Underlying device */
struct device *dev;
/** Infiniband operations */
@ -337,7 +348,6 @@ extern struct ib_work_queue * ib_find_wq ( struct ib_completion_queue *cq,
extern struct ib_device * alloc_ibdev ( size_t priv_size );
extern int register_ibdev ( struct ib_device *ibdev );
extern void unregister_ibdev ( struct ib_device *ibdev );
extern void free_ibdev ( struct ib_device *ibdev );
extern void ib_link_state_changed ( struct ib_device *ibdev );
/**
@ -444,6 +454,28 @@ ib_mad ( struct ib_device *ibdev, struct ib_mad_hdr *mad, size_t len ) {
return ibdev->op->mad ( ibdev, mad, len );
}
/**
* Get reference to Infiniband device
*
* @v ibdev Infiniband device
* @ret ibdev Infiniband device
*/
static inline __attribute__ (( always_inline )) struct ib_device *
ibdev_get ( struct ib_device *ibdev ) {
ref_get ( &ibdev->refcnt );
return ibdev;
}
/**
* Drop reference to Infiniband device
*
* @v ibdev Infiniband device
*/
static inline __attribute__ (( always_inline )) void
ibdev_put ( struct ib_device *ibdev ) {
ref_put ( &ibdev->refcnt );
}
/**
* Set Infiniband work queue driver-private data
*

View File

@ -29,6 +29,7 @@
#include <gpxe/netdevice.h>
#include <gpxe/iobuf.h>
#include <gpxe/ipoib.h>
#include <gpxe/process.h>
#include <gpxe/infiniband.h>
/** @file
@ -37,6 +38,9 @@
*
*/
/** List of Infiniband devices */
struct list_head ib_devices = LIST_HEAD_INIT ( ib_devices );
/**
* Create completion queue
*
@ -349,6 +353,50 @@ static int ib_get_mad_params ( struct ib_device *ibdev ) {
return 0;
}
/***************************************************************************
*
* Event queues
*
***************************************************************************
*/
/**
* Handle Infiniband link state change
*
* @v ibdev Infiniband device
*/
void ib_link_state_changed ( struct ib_device *ibdev ) {
int rc;
/* Update MAD parameters */
if ( ( rc = ib_get_mad_params ( ibdev ) ) != 0 ) {
DBGC ( ibdev, "IBDEV %p could not update MAD parameters: %s\n",
ibdev, strerror ( rc ) );
return;
}
/* Notify IPoIB of link state change */
ipoib_link_state_changed ( ibdev );
}
/**
* Single-step the Infiniband event queue
*
* @v process Infiniband event queue process
*/
static void ib_step ( struct process *process __unused ) {
struct ib_device *ibdev;
list_for_each_entry ( ibdev, &ib_devices, list ) {
ibdev->op->poll_eq ( ibdev );
}
}
/** Infiniband event queue process */
struct process ib_process __permanent_process = {
.step = ib_step,
};
/***************************************************************************
*
* Infiniband device creation/destruction
@ -385,6 +433,10 @@ struct ib_device * alloc_ibdev ( size_t priv_size ) {
int register_ibdev ( struct ib_device *ibdev ) {
int rc;
/* Add to device list */
ibdev_get ( ibdev );
list_add_tail ( &ibdev->list, &ib_devices );
/* Open link */
if ( ( rc = ib_open ( ibdev ) ) != 0 )
goto err_open;
@ -400,12 +452,16 @@ int register_ibdev ( struct ib_device *ibdev ) {
goto err_ipoib_probe;
}
DBGC ( ibdev, "IBDEV %p registered (phys %s)\n", ibdev,
ibdev->dev->name );
return 0;
err_ipoib_probe:
err_get_mad_params:
ib_close ( ibdev );
err_open:
list_del ( &ibdev->list );
ibdev_put ( ibdev );
return rc;
}
@ -415,34 +471,13 @@ int register_ibdev ( struct ib_device *ibdev ) {
* @v ibdev Infiniband device
*/
void unregister_ibdev ( struct ib_device *ibdev ) {
/* Close device */
ipoib_remove ( ibdev );
ib_close ( ibdev );
}
/**
* Free Infiniband device
*
* @v ibdev Infiniband device
*/
void free_ibdev ( struct ib_device *ibdev ) {
free ( ibdev );
}
/**
* Handle Infiniband link state change
*
* @v ibdev Infiniband device
*/
void ib_link_state_changed ( struct ib_device *ibdev ) {
int rc;
/* Update MAD parameters */
if ( ( rc = ib_get_mad_params ( ibdev ) ) != 0 ) {
DBGC ( ibdev, "IBDEV %p could not update MAD parameters: %s\n",
ibdev, strerror ( rc ) );
return;
}
/* Notify IPoIB of link state change */
ipoib_link_state_changed ( ibdev );
/* Remove from device list */
list_del ( &ibdev->list );
ibdev_put ( ibdev );
DBGC ( ibdev, "IBDEV %p unregistered\n", ibdev );
}