Staging: sxg: Locking related changes. Fix locking levels

* Fix locking related issues like taking locks at right level.
* Convert some variables to atomic, to prevent taking them while
  incrementing or decrementing them.

Signed-off-by: LinSysSoft Sahara Team <saharaproj@linsyssoft.com>
Signed-off-by: Christopher Harrer <charrer@alacritech.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Mithlesh Thukral 2009-01-19 20:24:30 +05:30 committed by Greg Kroah-Hartman
parent d9d578bff7
commit 6a2946baa9
3 changed files with 85 additions and 58 deletions

View File

@ -108,10 +108,7 @@ static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus);
static bool sxg_mac_filter(struct adapter_t *adapter, static bool sxg_mac_filter(struct adapter_t *adapter,
struct ether_header *EtherHdr, ushort length); struct ether_header *EtherHdr, ushort length);
#if SLIC_GET_STATS_ENABLED
static struct net_device_stats *sxg_get_stats(struct net_device * dev); static struct net_device_stats *sxg_get_stats(struct net_device * dev);
#endif
void sxg_free_resources(struct adapter_t *adapter); void sxg_free_resources(struct adapter_t *adapter);
void sxg_free_rcvblocks(struct adapter_t *adapter); void sxg_free_rcvblocks(struct adapter_t *adapter);
void sxg_free_sgl_buffers(struct adapter_t *adapter); void sxg_free_sgl_buffers(struct adapter_t *adapter);
@ -456,6 +453,7 @@ static int sxg_allocate_resources(struct adapter_t *adapter)
spin_lock_init(&adapter->XmtZeroLock); spin_lock_init(&adapter->XmtZeroLock);
spin_lock_init(&adapter->Bit64RegLock); spin_lock_init(&adapter->Bit64RegLock);
spin_lock_init(&adapter->AdapterLock); spin_lock_init(&adapter->AdapterLock);
atomic_set(&adapter->pending_allocations, 0);
DBG_ERROR("%s Setup the lists\n", __func__); DBG_ERROR("%s Setup the lists\n", __func__);
@ -928,10 +926,8 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
netdev->do_ioctl = sxg_ioctl; netdev->do_ioctl = sxg_ioctl;
#if XXXTODO #if XXXTODO
netdev->set_mac_address = sxg_mac_set_address; netdev->set_mac_address = sxg_mac_set_address;
#if SLIC_GET_STATS_ENABLED #endif
netdev->get_stats = sxg_get_stats; netdev->get_stats = sxg_get_stats;
#endif
#endif
netdev->set_multicast_list = sxg_mcast_set_list; netdev->set_multicast_list = sxg_mcast_set_list;
SET_ETHTOOL_OPS(netdev, &sxg_nic_ethtool_ops); SET_ETHTOOL_OPS(netdev, &sxg_nic_ethtool_ops);
@ -1044,8 +1040,9 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
{ {
struct net_device *dev = (struct net_device *) dev_id; struct net_device *dev = (struct net_device *) dev_id;
struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
/* u32 CpuMask = 0, i; */
if(adapter->state != ADAPT_UP)
return IRQ_NONE;
adapter->Stats.NumInts++; adapter->Stats.NumInts++;
if (adapter->Isr[0] == 0) { if (adapter->Isr[0] == 0) {
/* /*
@ -1191,6 +1188,7 @@ static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
* complicated than strictly needed. * complicated than strictly needed.
*/ */
adapter->Stats.RcvNoBuffer++; adapter->Stats.RcvNoBuffer++;
adapter->stats.rx_missed_errors++;
if (adapter->Stats.RcvNoBuffer < 5) { if (adapter->Stats.RcvNoBuffer < 5) {
DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n", DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n",
__func__); __func__);
@ -1968,9 +1966,8 @@ static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
u32 mmio_start = 0; u32 mmio_start = 0;
unsigned int mmio_len = 0; unsigned int mmio_len = 0;
struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev); struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
/*
set_bit(ADAPT_DOWN, &adapter->state); flush_scheduled_work();
*/ flush_scheduled_work();
/* Deallocate Resources */ /* Deallocate Resources */
unregister_netdev(dev); unregister_netdev(dev);
@ -2247,7 +2244,9 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0); XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
/* Update stats */ /* Update stats */
adapter->Stats.DumbXmtPkts++; adapter->Stats.DumbXmtPkts++;
adapter->stats.tx_packets++;
adapter->Stats.DumbXmtBytes += DataLength; adapter->Stats.DumbXmtBytes += DataLength;
adapter->stats.tx_bytes += DataLength;
#if XXXTODO /* Stats stuff */ #if XXXTODO /* Stats stuff */
if (SXG_MULTICAST_PACKET(EtherHdr)) { if (SXG_MULTICAST_PACKET(EtherHdr)) {
if (SXG_BROADCAST_PACKET(EtherHdr)) { if (SXG_BROADCAST_PACKET(EtherHdr)) {
@ -2306,6 +2305,7 @@ static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
* XmtZeroLock is grabbed * XmtZeroLock is grabbed
*/ */
adapter->Stats.XmtErrors++; adapter->Stats.XmtErrors++;
adapter->stats.tx_errors++;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail); pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
/* SxgSgl->DumbPacket is the skb */ /* SxgSgl->DumbPacket is the skb */
@ -2730,10 +2730,11 @@ static void sxg_link_state(struct adapter_t *adapter,
* Hold the adapter lock during this routine. Maybe move * Hold the adapter lock during this routine. Maybe move
* the lock to the caller. * the lock to the caller.
*/ */
spin_lock(&adapter->AdapterLock); /* IMP TODO : Check if we can survive without taking this lock */
// spin_lock(&adapter->AdapterLock);
if (LinkState == adapter->LinkState) { if (LinkState == adapter->LinkState) {
/* Nothing changed.. */ /* Nothing changed.. */
spin_unlock(&adapter->AdapterLock); // spin_unlock(&adapter->AdapterLock);
DBG_ERROR("EXIT #0 %s. Link status = %d\n", DBG_ERROR("EXIT #0 %s. Link status = %d\n",
__func__, LinkState); __func__, LinkState);
return; return;
@ -2742,7 +2743,7 @@ static void sxg_link_state(struct adapter_t *adapter,
adapter->LinkState = LinkState; adapter->LinkState = LinkState;
/* Drop the lock and indicate link state */ /* Drop the lock and indicate link state */
spin_unlock(&adapter->AdapterLock); // spin_unlock(&adapter->AdapterLock);
DBG_ERROR("EXIT #1 %s\n", __func__); DBG_ERROR("EXIT #1 %s\n", __func__);
sxg_indicate_link_state(adapter, LinkState); sxg_indicate_link_state(adapter, LinkState);
@ -3204,6 +3205,7 @@ void sxg_free_resources(struct adapter_t *adapter)
{ {
u32 RssIds, IsrCount; u32 RssIds, IsrCount;
u32 i; u32 i;
struct net_device *netdev = adapter->netdev;
RssIds = SXG_RSS_CPU_COUNT(adapter); RssIds = SXG_RSS_CPU_COUNT(adapter);
IsrCount = adapter->MsiEnabled ? RssIds : 1; IsrCount = adapter->MsiEnabled ? RssIds : 1;
@ -3215,6 +3217,10 @@ void sxg_free_resources(struct adapter_t *adapter)
return; return;
} }
/* Free Irq */
free_irq(adapter->netdev->irq, netdev);
if (!(IsListEmpty(&adapter->AllRcvBlocks))) { if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
sxg_free_rcvblocks(adapter); sxg_free_rcvblocks(adapter);
} }
@ -3294,8 +3300,8 @@ static void sxg_allocate_complete(struct adapter_t *adapter,
{ {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocCmp", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocCmp",
adapter, VirtualAddress, Length, Context); adapter, VirtualAddress, Length, Context);
ASSERT(adapter->AllocationsPending); ASSERT(atomic_read(&adapter->pending_allocations));
--adapter->AllocationsPending; atomic_dec(&adapter->pending_allocations);
switch (Context) { switch (Context) {
@ -3340,14 +3346,8 @@ static int sxg_allocate_buffer_memory(struct adapter_t *adapter,
* than INITIALIZING or RUNNING state, fail. This is to prevent * than INITIALIZING or RUNNING state, fail. This is to prevent
* allocations in an improper driver state * allocations in an improper driver state
*/ */
spin_lock(&adapter->AdapterLock);
/* atomic_inc(&adapter->pending_allocations);
* Increment the AllocationsPending count while holding
* the lock. Pause processing relies on this
*/
++adapter->AllocationsPending;
spin_unlock(&adapter->AdapterLock);
if(BufferType != SXG_BUFFER_TYPE_SGL) if(BufferType != SXG_BUFFER_TYPE_SGL)
Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer); Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
@ -3356,13 +3356,11 @@ static int sxg_allocate_buffer_memory(struct adapter_t *adapter,
pBuffer = NULL; pBuffer = NULL;
} }
if (Buffer == NULL) { if (Buffer == NULL) {
spin_lock(&adapter->AdapterLock);
/* /*
* Decrement the AllocationsPending count while holding * Decrement the AllocationsPending count while holding
* the lock. Pause processing relies on this * the lock. Pause processing relies on this
*/ */
--adapter->AllocationsPending; atomic_dec(&adapter->pending_allocations);
spin_unlock(&adapter->AdapterLock);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
adapter, Size, BufferType, 0); adapter, Size, BufferType, 0);
return (STATUS_RESOURCES); return (STATUS_RESOURCES);
@ -3514,9 +3512,9 @@ static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlSglCmp", SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlSglCmp",
adapter, SxgSgl, Length, 0); adapter, SxgSgl, Length, 0);
if(!in_irq()) if(!in_irq())
spin_unlock_irqrestore(&adapter->SglQLock, sgl_flags); spin_lock_irqsave(&adapter->SglQLock, sgl_flags);
else else
spin_unlock(&adapter->SglQLock); spin_lock(&adapter->SglQLock);
adapter->AllSglBufferCount++; adapter->AllSglBufferCount++;
/* PhysicalAddress; */ /* PhysicalAddress; */
SxgSgl->PhysicalAddress = PhysicalAddress; SxgSgl->PhysicalAddress = PhysicalAddress;
@ -3549,6 +3547,21 @@ static void sxg_adapter_set_hwaddr(struct adapter_t *adapter)
/* sxg_dbg_macaddrs(adapter); */ /* sxg_dbg_macaddrs(adapter); */
struct net_device * dev = adapter->netdev;
if(!dev)
{
printk("sxg: Dev is Null\n");
}
DBG_ERROR("%s ENTER (%s)\n", __FUNCTION__, adapter->netdev->name);
if (netif_running(dev)) {
return -EBUSY;
}
if (!adapter) {
return -EBUSY;
}
if (!(adapter->currmacaddr[0] || if (!(adapter->currmacaddr[0] ||
adapter->currmacaddr[1] || adapter->currmacaddr[1] ||
adapter->currmacaddr[2] || adapter->currmacaddr[2] ||
@ -3749,7 +3762,7 @@ static int sxg_fill_descriptor_block(struct adapter_t *adapter,
for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) { for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr); SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
ASSERT(RcvDataBufferHdr); ASSERT(RcvDataBufferHdr);
ASSERT(RcvDataBufferHdr->SxgDumbRcvPacket); // ASSERT(RcvDataBufferHdr->SxgDumbRcvPacket);
if (!RcvDataBufferHdr->SxgDumbRcvPacket) { if (!RcvDataBufferHdr->SxgDumbRcvPacket) {
SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr,
adapter->ReceiveBufferSize); adapter->ReceiveBufferSize);
@ -3815,7 +3828,7 @@ static void sxg_stock_rcv_buffers(struct adapter_t *adapter)
*/ */
if ((adapter->FreeRcvBufferCount < SXG_MIN_RCV_DATA_BUFFERS) && if ((adapter->FreeRcvBufferCount < SXG_MIN_RCV_DATA_BUFFERS) &&
(adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) && (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
(adapter->AllocationsPending == 0)) { (atomic_read(&adapter->pending_allocations) == 0)) {
sxg_allocate_buffer_memory(adapter, sxg_allocate_buffer_memory(adapter,
SXG_RCV_BLOCK_SIZE SXG_RCV_BLOCK_SIZE
(SXG_RCV_DATA_HDR_SIZE), (SXG_RCV_DATA_HDR_SIZE),
@ -3921,6 +3934,17 @@ void sxg_collect_statistics(struct adapter_t *adapter)
{ {
if(adapter->ucode_stats) if(adapter->ucode_stats)
WRITE_REG64(adapter, adapter->UcodeRegs[0].GetUcodeStats, adapter->pucode_stats, 0); WRITE_REG64(adapter, adapter->UcodeRegs[0].GetUcodeStats, adapter->pucode_stats, 0);
adapter->stats.rx_fifo_errors = adapter->ucode_stats->ERDrops;
adapter->stats.rx_over_errors = adapter->ucode_stats->NBDrops;
adapter->stats.tx_fifo_errors = adapter->ucode_stats->XDrops;
}
static struct net_device_stats *sxg_get_stats(struct net_device * dev)
{
struct adapter_t *adapter = netdev_priv(dev);
sxg_collect_statistics(adapter);
return (&adapter->stats);
} }
static struct pci_driver sxg_driver = { static struct pci_driver sxg_driver = {

View File

@ -290,7 +290,7 @@ struct sxg_stats {
struct list_entry *_ple; \ struct list_entry *_ple; \
if ((_pAdapt->FreeSglBufferCount < SXG_MIN_SGL_BUFFERS) && \ if ((_pAdapt->FreeSglBufferCount < SXG_MIN_SGL_BUFFERS) && \
(_pAdapt->AllSglBufferCount < SXG_MAX_SGL_BUFFERS) && \ (_pAdapt->AllSglBufferCount < SXG_MAX_SGL_BUFFERS) && \
(_pAdapt->AllocationsPending == 0)) { \ (atomic_read(&_pAdapt->pending_allocations) == 0)) { \
sxg_allocate_buffer_memory(_pAdapt, \ sxg_allocate_buffer_memory(_pAdapt, \
(sizeof(struct sxg_scatter_gather) + SXG_SGL_BUF_SIZE),\ (sizeof(struct sxg_scatter_gather) + SXG_SGL_BUF_SIZE),\
SXG_BUFFER_TYPE_SGL); \ SXG_BUFFER_TYPE_SGL); \
@ -670,6 +670,9 @@ struct adapter_t {
ushort FreeRcvBlockCount; /* # of free rcv descriptor blocks */ ushort FreeRcvBlockCount; /* # of free rcv descriptor blocks */
ushort AllRcvBlockCount; /* Number of total receive blocks */ ushort AllRcvBlockCount; /* Number of total receive blocks */
ushort ReceiveBufferSize; /* SXG_RCV_DATA/JUMBO_BUFFER_SIZE only */ ushort ReceiveBufferSize; /* SXG_RCV_DATA/JUMBO_BUFFER_SIZE only */
/* Converted this to a atomic variable
u32 AllocationsPending; */
atomic_t pending_allocations;
u32 AllocationsPending; /* Receive allocation pending */ u32 AllocationsPending; /* Receive allocation pending */
u32 RcvBuffersOnCard; /* SXG_DATA_BUFFERS owned by card */ u32 RcvBuffersOnCard; /* SXG_DATA_BUFFERS owned by card */
/* SGL buffers */ /* SGL buffers */

View File

@ -150,8 +150,7 @@ struct sxg_trace_buffer {
unsigned int trace_len; \ unsigned int trace_len; \
struct trace_entry *trace_entry; \ struct trace_entry *trace_entry; \
struct timeval timev; \ struct timeval timev; \
\ if(spin_trylock(&(buffer)->lock)) { \
spin_lock(&(buffer)->lock); \
trace_entry = &(buffer)->entries[(buffer)->in]; \ trace_entry = &(buffer)->entries[(buffer)->in]; \
do_gettimeofday(&timev); \ do_gettimeofday(&timev); \
\ \
@ -174,6 +173,7 @@ struct sxg_trace_buffer {
\ \
spin_unlock(&(buffer)->lock); \ spin_unlock(&(buffer)->lock); \
} \ } \
} \
} }
#else #else
#define SXG_TRACE(tdriver, buffer, tlevel, tname, a1, a2, a3, a4) #define SXG_TRACE(tdriver, buffer, tlevel, tname, a1, a2, a3, a4)