Merge branch 'netvsc-bug-fixes-and-cleanups'
Stephen Hemminger says: ==================== netvsc: bug fixes and cleanups These fix NAPI issues and bugs found during testing of shutdown testing. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
4deece6c9f
@ -686,7 +686,7 @@ struct net_device_context {
|
|||||||
/* point back to our device context */
|
/* point back to our device context */
|
||||||
struct hv_device *device_ctx;
|
struct hv_device *device_ctx;
|
||||||
/* netvsc_device */
|
/* netvsc_device */
|
||||||
struct netvsc_device *nvdev;
|
struct netvsc_device __rcu *nvdev;
|
||||||
/* reconfigure work */
|
/* reconfigure work */
|
||||||
struct delayed_work dwork;
|
struct delayed_work dwork;
|
||||||
/* last reconfig time */
|
/* last reconfig time */
|
||||||
@ -708,9 +708,6 @@ struct net_device_context {
|
|||||||
u32 speed;
|
u32 speed;
|
||||||
struct netvsc_ethtool_stats eth_stats;
|
struct netvsc_ethtool_stats eth_stats;
|
||||||
|
|
||||||
/* the device is going away */
|
|
||||||
bool start_remove;
|
|
||||||
|
|
||||||
/* State to manage the associated VF interface. */
|
/* State to manage the associated VF interface. */
|
||||||
struct net_device __rcu *vf_netdev;
|
struct net_device __rcu *vf_netdev;
|
||||||
|
|
||||||
@ -723,6 +720,7 @@ struct net_device_context {
|
|||||||
/* Per channel data */
|
/* Per channel data */
|
||||||
struct netvsc_channel {
|
struct netvsc_channel {
|
||||||
struct vmbus_channel *channel;
|
struct vmbus_channel *channel;
|
||||||
|
const struct vmpacket_descriptor *desc;
|
||||||
struct napi_struct napi;
|
struct napi_struct napi;
|
||||||
struct multi_send_data msd;
|
struct multi_send_data msd;
|
||||||
struct multi_recv_comp mrc;
|
struct multi_recv_comp mrc;
|
||||||
@ -763,8 +761,8 @@ struct netvsc_device {
|
|||||||
|
|
||||||
u32 max_chn;
|
u32 max_chn;
|
||||||
u32 num_chn;
|
u32 num_chn;
|
||||||
spinlock_t sc_lock; /* Protects num_sc_offered variable */
|
|
||||||
u32 num_sc_offered;
|
refcount_t sc_offered;
|
||||||
|
|
||||||
/* Holds rndis device info */
|
/* Holds rndis device info */
|
||||||
void *extension;
|
void *extension;
|
||||||
@ -779,6 +777,8 @@ struct netvsc_device {
|
|||||||
atomic_t open_cnt;
|
atomic_t open_cnt;
|
||||||
|
|
||||||
struct netvsc_channel chan_table[VRSS_CHANNEL_MAX];
|
struct netvsc_channel chan_table[VRSS_CHANNEL_MAX];
|
||||||
|
|
||||||
|
struct rcu_head rcu;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct netvsc_device *
|
static inline struct netvsc_device *
|
||||||
|
@ -80,8 +80,10 @@ static struct netvsc_device *alloc_net_device(void)
|
|||||||
return net_device;
|
return net_device;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_netvsc_device(struct netvsc_device *nvdev)
|
static void free_netvsc_device(struct rcu_head *head)
|
||||||
{
|
{
|
||||||
|
struct netvsc_device *nvdev
|
||||||
|
= container_of(head, struct netvsc_device, rcu);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < VRSS_CHANNEL_MAX; i++)
|
for (i = 0; i < VRSS_CHANNEL_MAX; i++)
|
||||||
@ -90,6 +92,10 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
|
|||||||
kfree(nvdev);
|
kfree(nvdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
|
||||||
|
{
|
||||||
|
call_rcu(&nvdev->rcu, free_netvsc_device);
|
||||||
|
}
|
||||||
|
|
||||||
static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
|
static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
|
||||||
{
|
{
|
||||||
@ -551,7 +557,7 @@ void netvsc_device_remove(struct hv_device *device)
|
|||||||
|
|
||||||
netvsc_disconnect_vsp(device);
|
netvsc_disconnect_vsp(device);
|
||||||
|
|
||||||
net_device_ctx->nvdev = NULL;
|
RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* At this point, no one should be accessing net_device
|
* At this point, no one should be accessing net_device
|
||||||
@ -566,7 +572,7 @@ void netvsc_device_remove(struct hv_device *device)
|
|||||||
napi_disable(&net_device->chan_table[i].napi);
|
napi_disable(&net_device->chan_table[i].napi);
|
||||||
|
|
||||||
/* Release all resources */
|
/* Release all resources */
|
||||||
free_netvsc_device(net_device);
|
free_netvsc_device_rcu(net_device);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define RING_AVAIL_PERCENT_HIWATER 20
|
#define RING_AVAIL_PERCENT_HIWATER 20
|
||||||
@ -599,7 +605,6 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
|
|||||||
{
|
{
|
||||||
struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
|
struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
|
||||||
struct net_device *ndev = hv_get_drvdata(device);
|
struct net_device *ndev = hv_get_drvdata(device);
|
||||||
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
|
||||||
struct vmbus_channel *channel = device->channel;
|
struct vmbus_channel *channel = device->channel;
|
||||||
u16 q_idx = 0;
|
u16 q_idx = 0;
|
||||||
int queue_sends;
|
int queue_sends;
|
||||||
@ -633,7 +638,6 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
|
|||||||
wake_up(&net_device->wait_drain);
|
wake_up(&net_device->wait_drain);
|
||||||
|
|
||||||
if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
|
if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
|
||||||
!net_device_ctx->start_remove &&
|
|
||||||
(hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
|
(hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
|
||||||
queue_sends < 1))
|
queue_sends < 1))
|
||||||
netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx));
|
netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx));
|
||||||
@ -702,8 +706,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
|
|||||||
packet->page_buf_cnt;
|
packet->page_buf_cnt;
|
||||||
|
|
||||||
/* Add padding */
|
/* Add padding */
|
||||||
if (skb && skb->xmit_more && remain &&
|
if (skb->xmit_more && remain && !packet->cp_partial) {
|
||||||
!packet->cp_partial) {
|
|
||||||
padding = net_device->pkt_align - remain;
|
padding = net_device->pkt_align - remain;
|
||||||
rndis_msg->msg_len += padding;
|
rndis_msg->msg_len += padding;
|
||||||
packet->total_data_buflen += padding;
|
packet->total_data_buflen += padding;
|
||||||
@ -861,9 +864,7 @@ int netvsc_send(struct hv_device *device,
|
|||||||
if (msdp->pkt)
|
if (msdp->pkt)
|
||||||
msd_len = msdp->pkt->total_data_buflen;
|
msd_len = msdp->pkt->total_data_buflen;
|
||||||
|
|
||||||
try_batch = (skb != NULL) && msd_len > 0 && msdp->count <
|
try_batch = msd_len > 0 && msdp->count < net_device->max_pkt;
|
||||||
net_device->max_pkt;
|
|
||||||
|
|
||||||
if (try_batch && msd_len + pktlen + net_device->pkt_align <
|
if (try_batch && msd_len + pktlen + net_device->pkt_align <
|
||||||
net_device->send_section_size) {
|
net_device->send_section_size) {
|
||||||
section_index = msdp->pkt->send_buf_index;
|
section_index = msdp->pkt->send_buf_index;
|
||||||
@ -873,7 +874,7 @@ int netvsc_send(struct hv_device *device,
|
|||||||
section_index = msdp->pkt->send_buf_index;
|
section_index = msdp->pkt->send_buf_index;
|
||||||
packet->cp_partial = true;
|
packet->cp_partial = true;
|
||||||
|
|
||||||
} else if ((skb != NULL) && pktlen + net_device->pkt_align <
|
} else if (pktlen + net_device->pkt_align <
|
||||||
net_device->send_section_size) {
|
net_device->send_section_size) {
|
||||||
section_index = netvsc_get_next_send_section(net_device);
|
section_index = netvsc_get_next_send_section(net_device);
|
||||||
if (section_index != NETVSC_INVALID_INDEX) {
|
if (section_index != NETVSC_INVALID_INDEX) {
|
||||||
@ -1173,7 +1174,6 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
|
|||||||
struct vmbus_channel *channel,
|
struct vmbus_channel *channel,
|
||||||
struct netvsc_device *net_device,
|
struct netvsc_device *net_device,
|
||||||
struct net_device *ndev,
|
struct net_device *ndev,
|
||||||
u64 request_id,
|
|
||||||
const struct vmpacket_descriptor *desc)
|
const struct vmpacket_descriptor *desc)
|
||||||
{
|
{
|
||||||
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
||||||
@ -1195,7 +1195,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
|
|||||||
|
|
||||||
default:
|
default:
|
||||||
netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
|
netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
|
||||||
desc->type, request_id);
|
desc->type, desc->trans_id);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1222,28 +1222,20 @@ int netvsc_poll(struct napi_struct *napi, int budget)
|
|||||||
u16 q_idx = channel->offermsg.offer.sub_channel_index;
|
u16 q_idx = channel->offermsg.offer.sub_channel_index;
|
||||||
struct net_device *ndev = hv_get_drvdata(device);
|
struct net_device *ndev = hv_get_drvdata(device);
|
||||||
struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
|
struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
|
||||||
const struct vmpacket_descriptor *desc;
|
|
||||||
int work_done = 0;
|
int work_done = 0;
|
||||||
|
|
||||||
desc = hv_pkt_iter_first(channel);
|
/* If starting a new interval */
|
||||||
while (desc) {
|
if (!nvchan->desc)
|
||||||
int count;
|
nvchan->desc = hv_pkt_iter_first(channel);
|
||||||
|
|
||||||
count = netvsc_process_raw_pkt(device, channel, net_device,
|
while (nvchan->desc && work_done < budget) {
|
||||||
ndev, desc->trans_id, desc);
|
work_done += netvsc_process_raw_pkt(device, channel, net_device,
|
||||||
work_done += count;
|
ndev, nvchan->desc);
|
||||||
desc = __hv_pkt_iter_next(channel, desc);
|
nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
|
||||||
|
|
||||||
/* If receive packet budget is exhausted, reschedule */
|
|
||||||
if (work_done >= budget) {
|
|
||||||
work_done = budget;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
hv_pkt_iter_close(channel);
|
|
||||||
|
|
||||||
/* If budget was not exhausted and
|
/* If receive ring was exhausted
|
||||||
* not doing busy poll
|
* and not doing busy poll
|
||||||
* then re-enable host interrupts
|
* then re-enable host interrupts
|
||||||
* and reschedule if ring is not empty.
|
* and reschedule if ring is not empty.
|
||||||
*/
|
*/
|
||||||
@ -1253,7 +1245,9 @@ int netvsc_poll(struct napi_struct *napi, int budget)
|
|||||||
napi_reschedule(napi);
|
napi_reschedule(napi);
|
||||||
|
|
||||||
netvsc_chk_recv_comp(net_device, channel, q_idx);
|
netvsc_chk_recv_comp(net_device, channel, q_idx);
|
||||||
return work_done;
|
|
||||||
|
/* Driver may overshoot since multiple packets per descriptor */
|
||||||
|
return min(work_done, budget);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Call back when data is available in host ring buffer.
|
/* Call back when data is available in host ring buffer.
|
||||||
@ -1263,10 +1257,12 @@ void netvsc_channel_cb(void *context)
|
|||||||
{
|
{
|
||||||
struct netvsc_channel *nvchan = context;
|
struct netvsc_channel *nvchan = context;
|
||||||
|
|
||||||
/* disable interupts from host */
|
if (napi_schedule_prep(&nvchan->napi)) {
|
||||||
hv_begin_read(&nvchan->channel->inbound);
|
/* disable interupts from host */
|
||||||
|
hv_begin_read(&nvchan->channel->inbound);
|
||||||
|
|
||||||
napi_schedule(&nvchan->napi);
|
__napi_schedule(&nvchan->napi);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1325,9 +1321,7 @@ int netvsc_device_add(struct hv_device *device,
|
|||||||
/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
|
/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
|
||||||
* populated.
|
* populated.
|
||||||
*/
|
*/
|
||||||
wmb();
|
rcu_assign_pointer(net_device_ctx->nvdev, net_device);
|
||||||
|
|
||||||
net_device_ctx->nvdev = net_device;
|
|
||||||
|
|
||||||
/* Connect with the NetVsp */
|
/* Connect with the NetVsp */
|
||||||
ret = netvsc_connect_vsp(device);
|
ret = netvsc_connect_vsp(device);
|
||||||
@ -1346,7 +1340,7 @@ close:
|
|||||||
vmbus_close(device->channel);
|
vmbus_close(device->channel);
|
||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
free_netvsc_device(net_device);
|
free_netvsc_device(&net_device->rcu);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -62,7 +62,7 @@ static void do_set_multicast(struct work_struct *w)
|
|||||||
container_of(w, struct net_device_context, work);
|
container_of(w, struct net_device_context, work);
|
||||||
struct hv_device *device_obj = ndevctx->device_ctx;
|
struct hv_device *device_obj = ndevctx->device_ctx;
|
||||||
struct net_device *ndev = hv_get_drvdata(device_obj);
|
struct net_device *ndev = hv_get_drvdata(device_obj);
|
||||||
struct netvsc_device *nvdev = ndevctx->nvdev;
|
struct netvsc_device *nvdev = rcu_dereference(ndevctx->nvdev);
|
||||||
struct rndis_device *rdev;
|
struct rndis_device *rdev;
|
||||||
|
|
||||||
if (!nvdev)
|
if (!nvdev)
|
||||||
@ -116,7 +116,7 @@ static int netvsc_open(struct net_device *net)
|
|||||||
static int netvsc_close(struct net_device *net)
|
static int netvsc_close(struct net_device *net)
|
||||||
{
|
{
|
||||||
struct net_device_context *net_device_ctx = netdev_priv(net);
|
struct net_device_context *net_device_ctx = netdev_priv(net);
|
||||||
struct netvsc_device *nvdev = net_device_ctx->nvdev;
|
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
|
||||||
int ret;
|
int ret;
|
||||||
u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
|
u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
|
||||||
struct vmbus_channel *chn;
|
struct vmbus_channel *chn;
|
||||||
@ -637,9 +637,9 @@ int netvsc_recv_callback(struct net_device *net,
|
|||||||
const struct ndis_pkt_8021q_info *vlan)
|
const struct ndis_pkt_8021q_info *vlan)
|
||||||
{
|
{
|
||||||
struct net_device_context *net_device_ctx = netdev_priv(net);
|
struct net_device_context *net_device_ctx = netdev_priv(net);
|
||||||
struct netvsc_device *net_device = net_device_ctx->nvdev;
|
struct netvsc_device *net_device;
|
||||||
u16 q_idx = channel->offermsg.offer.sub_channel_index;
|
u16 q_idx = channel->offermsg.offer.sub_channel_index;
|
||||||
struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
|
struct netvsc_channel *nvchan;
|
||||||
struct net_device *vf_netdev;
|
struct net_device *vf_netdev;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
struct netvsc_stats *rx_stats;
|
struct netvsc_stats *rx_stats;
|
||||||
@ -655,6 +655,11 @@ int netvsc_recv_callback(struct net_device *net,
|
|||||||
* interface in the guest.
|
* interface in the guest.
|
||||||
*/
|
*/
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
net_device = rcu_dereference(net_device_ctx->nvdev);
|
||||||
|
if (unlikely(!net_device))
|
||||||
|
goto drop;
|
||||||
|
|
||||||
|
nvchan = &net_device->chan_table[q_idx];
|
||||||
vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
|
vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
|
||||||
if (vf_netdev && (vf_netdev->flags & IFF_UP))
|
if (vf_netdev && (vf_netdev->flags & IFF_UP))
|
||||||
net = vf_netdev;
|
net = vf_netdev;
|
||||||
@ -663,6 +668,7 @@ int netvsc_recv_callback(struct net_device *net,
|
|||||||
skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
|
skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
|
||||||
csum_info, vlan, data, len);
|
csum_info, vlan, data, len);
|
||||||
if (unlikely(!skb)) {
|
if (unlikely(!skb)) {
|
||||||
|
drop:
|
||||||
++net->stats.rx_dropped;
|
++net->stats.rx_dropped;
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return NVSP_STAT_FAIL;
|
return NVSP_STAT_FAIL;
|
||||||
@ -704,7 +710,7 @@ static void netvsc_get_channels(struct net_device *net,
|
|||||||
struct ethtool_channels *channel)
|
struct ethtool_channels *channel)
|
||||||
{
|
{
|
||||||
struct net_device_context *net_device_ctx = netdev_priv(net);
|
struct net_device_context *net_device_ctx = netdev_priv(net);
|
||||||
struct netvsc_device *nvdev = net_device_ctx->nvdev;
|
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
|
||||||
|
|
||||||
if (nvdev) {
|
if (nvdev) {
|
||||||
channel->max_combined = nvdev->max_chn;
|
channel->max_combined = nvdev->max_chn;
|
||||||
@ -741,8 +747,9 @@ static int netvsc_set_channels(struct net_device *net,
|
|||||||
{
|
{
|
||||||
struct net_device_context *net_device_ctx = netdev_priv(net);
|
struct net_device_context *net_device_ctx = netdev_priv(net);
|
||||||
struct hv_device *dev = net_device_ctx->device_ctx;
|
struct hv_device *dev = net_device_ctx->device_ctx;
|
||||||
struct netvsc_device *nvdev = net_device_ctx->nvdev;
|
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
|
||||||
unsigned int count = channels->combined_count;
|
unsigned int count = channels->combined_count;
|
||||||
|
bool was_running;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* We do not support separate count for rx, tx, or other */
|
/* We do not support separate count for rx, tx, or other */
|
||||||
@ -753,7 +760,7 @@ static int netvsc_set_channels(struct net_device *net,
|
|||||||
if (count > net->num_tx_queues || count > net->num_rx_queues)
|
if (count > net->num_tx_queues || count > net->num_rx_queues)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (net_device_ctx->start_remove || !nvdev || nvdev->destroy)
|
if (!nvdev || nvdev->destroy)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
|
if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
|
||||||
@ -762,11 +769,13 @@ static int netvsc_set_channels(struct net_device *net,
|
|||||||
if (count > nvdev->max_chn)
|
if (count > nvdev->max_chn)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = netvsc_close(net);
|
was_running = netif_running(net);
|
||||||
if (ret)
|
if (was_running) {
|
||||||
return ret;
|
ret = netvsc_close(net);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
net_device_ctx->start_remove = true;
|
|
||||||
rndis_filter_device_remove(dev, nvdev);
|
rndis_filter_device_remove(dev, nvdev);
|
||||||
|
|
||||||
ret = netvsc_set_queues(net, dev, count);
|
ret = netvsc_set_queues(net, dev, count);
|
||||||
@ -775,8 +784,8 @@ static int netvsc_set_channels(struct net_device *net,
|
|||||||
else
|
else
|
||||||
netvsc_set_queues(net, dev, nvdev->num_chn);
|
netvsc_set_queues(net, dev, nvdev->num_chn);
|
||||||
|
|
||||||
netvsc_open(net);
|
if (was_running)
|
||||||
net_device_ctx->start_remove = false;
|
ret = netvsc_open(net);
|
||||||
|
|
||||||
/* We may have missed link change notifications */
|
/* We may have missed link change notifications */
|
||||||
schedule_delayed_work(&net_device_ctx->dwork, 0);
|
schedule_delayed_work(&net_device_ctx->dwork, 0);
|
||||||
@ -842,24 +851,27 @@ static int netvsc_set_link_ksettings(struct net_device *dev,
|
|||||||
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
|
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
|
||||||
{
|
{
|
||||||
struct net_device_context *ndevctx = netdev_priv(ndev);
|
struct net_device_context *ndevctx = netdev_priv(ndev);
|
||||||
struct netvsc_device *nvdev = ndevctx->nvdev;
|
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
|
||||||
struct hv_device *hdev = ndevctx->device_ctx;
|
struct hv_device *hdev = ndevctx->device_ctx;
|
||||||
struct netvsc_device_info device_info;
|
struct netvsc_device_info device_info;
|
||||||
|
bool was_running;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (ndevctx->start_remove || !nvdev || nvdev->destroy)
|
if (!nvdev || nvdev->destroy)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
ret = netvsc_close(ndev);
|
was_running = netif_running(ndev);
|
||||||
if (ret)
|
if (was_running) {
|
||||||
goto out;
|
ret = netvsc_close(ndev);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
memset(&device_info, 0, sizeof(device_info));
|
memset(&device_info, 0, sizeof(device_info));
|
||||||
device_info.ring_size = ring_size;
|
device_info.ring_size = ring_size;
|
||||||
device_info.num_chn = nvdev->num_chn;
|
device_info.num_chn = nvdev->num_chn;
|
||||||
device_info.max_num_vrss_chns = nvdev->num_chn;
|
device_info.max_num_vrss_chns = nvdev->num_chn;
|
||||||
|
|
||||||
ndevctx->start_remove = true;
|
|
||||||
rndis_filter_device_remove(hdev, nvdev);
|
rndis_filter_device_remove(hdev, nvdev);
|
||||||
|
|
||||||
/* 'nvdev' has been freed in rndis_filter_device_remove() ->
|
/* 'nvdev' has been freed in rndis_filter_device_remove() ->
|
||||||
@ -872,9 +884,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
|
|||||||
|
|
||||||
rndis_filter_device_add(hdev, &device_info);
|
rndis_filter_device_add(hdev, &device_info);
|
||||||
|
|
||||||
out:
|
if (was_running)
|
||||||
netvsc_open(ndev);
|
ret = netvsc_open(ndev);
|
||||||
ndevctx->start_remove = false;
|
|
||||||
|
|
||||||
/* We may have missed link change notifications */
|
/* We may have missed link change notifications */
|
||||||
schedule_delayed_work(&ndevctx->dwork, 0);
|
schedule_delayed_work(&ndevctx->dwork, 0);
|
||||||
@ -886,7 +897,7 @@ static void netvsc_get_stats64(struct net_device *net,
|
|||||||
struct rtnl_link_stats64 *t)
|
struct rtnl_link_stats64 *t)
|
||||||
{
|
{
|
||||||
struct net_device_context *ndev_ctx = netdev_priv(net);
|
struct net_device_context *ndev_ctx = netdev_priv(net);
|
||||||
struct netvsc_device *nvdev = ndev_ctx->nvdev;
|
struct netvsc_device *nvdev = rcu_dereference(ndev_ctx->nvdev);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!nvdev)
|
if (!nvdev)
|
||||||
@ -971,7 +982,10 @@ static const struct {
|
|||||||
static int netvsc_get_sset_count(struct net_device *dev, int string_set)
|
static int netvsc_get_sset_count(struct net_device *dev, int string_set)
|
||||||
{
|
{
|
||||||
struct net_device_context *ndc = netdev_priv(dev);
|
struct net_device_context *ndc = netdev_priv(dev);
|
||||||
struct netvsc_device *nvdev = ndc->nvdev;
|
struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
|
||||||
|
|
||||||
|
if (!nvdev)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
switch (string_set) {
|
switch (string_set) {
|
||||||
case ETH_SS_STATS:
|
case ETH_SS_STATS:
|
||||||
@ -985,13 +999,16 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
|
|||||||
struct ethtool_stats *stats, u64 *data)
|
struct ethtool_stats *stats, u64 *data)
|
||||||
{
|
{
|
||||||
struct net_device_context *ndc = netdev_priv(dev);
|
struct net_device_context *ndc = netdev_priv(dev);
|
||||||
struct netvsc_device *nvdev = ndc->nvdev;
|
struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
|
||||||
const void *nds = &ndc->eth_stats;
|
const void *nds = &ndc->eth_stats;
|
||||||
const struct netvsc_stats *qstats;
|
const struct netvsc_stats *qstats;
|
||||||
unsigned int start;
|
unsigned int start;
|
||||||
u64 packets, bytes;
|
u64 packets, bytes;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
|
if (!nvdev)
|
||||||
|
return;
|
||||||
|
|
||||||
for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
|
for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
|
||||||
data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
|
data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
|
||||||
|
|
||||||
@ -1020,10 +1037,13 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
|
|||||||
static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
||||||
{
|
{
|
||||||
struct net_device_context *ndc = netdev_priv(dev);
|
struct net_device_context *ndc = netdev_priv(dev);
|
||||||
struct netvsc_device *nvdev = ndc->nvdev;
|
struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
|
||||||
u8 *p = data;
|
u8 *p = data;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if (!nvdev)
|
||||||
|
return;
|
||||||
|
|
||||||
switch (stringset) {
|
switch (stringset) {
|
||||||
case ETH_SS_STATS:
|
case ETH_SS_STATS:
|
||||||
for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
|
for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
|
||||||
@ -1075,7 +1095,10 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
|
|||||||
u32 *rules)
|
u32 *rules)
|
||||||
{
|
{
|
||||||
struct net_device_context *ndc = netdev_priv(dev);
|
struct net_device_context *ndc = netdev_priv(dev);
|
||||||
struct netvsc_device *nvdev = ndc->nvdev;
|
struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
|
||||||
|
|
||||||
|
if (!nvdev)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
switch (info->cmd) {
|
switch (info->cmd) {
|
||||||
case ETHTOOL_GRXRINGS:
|
case ETHTOOL_GRXRINGS:
|
||||||
@ -1111,10 +1134,13 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
|
|||||||
u8 *hfunc)
|
u8 *hfunc)
|
||||||
{
|
{
|
||||||
struct net_device_context *ndc = netdev_priv(dev);
|
struct net_device_context *ndc = netdev_priv(dev);
|
||||||
struct netvsc_device *ndev = ndc->nvdev;
|
struct netvsc_device *ndev = rcu_dereference(ndc->nvdev);
|
||||||
struct rndis_device *rndis_dev = ndev->extension;
|
struct rndis_device *rndis_dev = ndev->extension;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if (!ndev)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
if (hfunc)
|
if (hfunc)
|
||||||
*hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
|
*hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
|
||||||
|
|
||||||
@ -1133,10 +1159,13 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
|
|||||||
const u8 *key, const u8 hfunc)
|
const u8 *key, const u8 hfunc)
|
||||||
{
|
{
|
||||||
struct net_device_context *ndc = netdev_priv(dev);
|
struct net_device_context *ndc = netdev_priv(dev);
|
||||||
struct netvsc_device *ndev = ndc->nvdev;
|
struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
|
||||||
struct rndis_device *rndis_dev = ndev->extension;
|
struct rndis_device *rndis_dev = ndev->extension;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if (!ndev)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
|
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
@ -1210,10 +1239,10 @@ static void netvsc_link_change(struct work_struct *w)
|
|||||||
unsigned long flags, next_reconfig, delay;
|
unsigned long flags, next_reconfig, delay;
|
||||||
|
|
||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
if (ndev_ctx->start_remove)
|
net_device = rtnl_dereference(ndev_ctx->nvdev);
|
||||||
|
if (!net_device)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
net_device = ndev_ctx->nvdev;
|
|
||||||
rdev = net_device->extension;
|
rdev = net_device->extension;
|
||||||
|
|
||||||
next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
|
next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
|
||||||
@ -1354,7 +1383,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
|
|||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
net_device_ctx = netdev_priv(ndev);
|
net_device_ctx = netdev_priv(ndev);
|
||||||
netvsc_dev = net_device_ctx->nvdev;
|
netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
|
||||||
if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
|
if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
@ -1380,7 +1409,7 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
|
|||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
net_device_ctx = netdev_priv(ndev);
|
net_device_ctx = netdev_priv(ndev);
|
||||||
netvsc_dev = net_device_ctx->nvdev;
|
netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
|
||||||
|
|
||||||
netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
|
netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
|
||||||
|
|
||||||
@ -1414,7 +1443,7 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
|
|||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
net_device_ctx = netdev_priv(ndev);
|
net_device_ctx = netdev_priv(ndev);
|
||||||
netvsc_dev = net_device_ctx->nvdev;
|
netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
|
||||||
|
|
||||||
netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
|
netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
|
||||||
netvsc_switch_datapath(ndev, false);
|
netvsc_switch_datapath(ndev, false);
|
||||||
@ -1474,8 +1503,6 @@ static int netvsc_probe(struct hv_device *dev,
|
|||||||
|
|
||||||
hv_set_drvdata(dev, net);
|
hv_set_drvdata(dev, net);
|
||||||
|
|
||||||
net_device_ctx->start_remove = false;
|
|
||||||
|
|
||||||
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
|
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
|
||||||
INIT_WORK(&net_device_ctx->work, do_set_multicast);
|
INIT_WORK(&net_device_ctx->work, do_set_multicast);
|
||||||
|
|
||||||
@ -1492,8 +1519,7 @@ static int netvsc_probe(struct hv_device *dev,
|
|||||||
/* Notify the netvsc driver of the new device */
|
/* Notify the netvsc driver of the new device */
|
||||||
memset(&device_info, 0, sizeof(device_info));
|
memset(&device_info, 0, sizeof(device_info));
|
||||||
device_info.ring_size = ring_size;
|
device_info.ring_size = ring_size;
|
||||||
device_info.max_num_vrss_chns = min_t(u32, VRSS_CHANNEL_DEFAULT,
|
device_info.num_chn = VRSS_CHANNEL_DEFAULT;
|
||||||
num_online_cpus());
|
|
||||||
ret = rndis_filter_device_add(dev, &device_info);
|
ret = rndis_filter_device_add(dev, &device_info);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
|
netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
|
||||||
@ -1509,6 +1535,7 @@ static int netvsc_probe(struct hv_device *dev,
|
|||||||
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
|
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
|
||||||
net->vlan_features = net->features;
|
net->vlan_features = net->features;
|
||||||
|
|
||||||
|
/* RCU not necessary here, device not registered */
|
||||||
nvdev = net_device_ctx->nvdev;
|
nvdev = net_device_ctx->nvdev;
|
||||||
netif_set_real_num_tx_queues(net, nvdev->num_chn);
|
netif_set_real_num_tx_queues(net, nvdev->num_chn);
|
||||||
netif_set_real_num_rx_queues(net, nvdev->num_chn);
|
netif_set_real_num_rx_queues(net, nvdev->num_chn);
|
||||||
@ -1544,26 +1571,20 @@ static int netvsc_remove(struct hv_device *dev)
|
|||||||
|
|
||||||
ndev_ctx = netdev_priv(net);
|
ndev_ctx = netdev_priv(net);
|
||||||
|
|
||||||
/* Avoid racing with netvsc_change_mtu()/netvsc_set_channels()
|
netif_device_detach(net);
|
||||||
* removing the device.
|
|
||||||
*/
|
|
||||||
rtnl_lock();
|
|
||||||
ndev_ctx->start_remove = true;
|
|
||||||
rtnl_unlock();
|
|
||||||
|
|
||||||
cancel_delayed_work_sync(&ndev_ctx->dwork);
|
cancel_delayed_work_sync(&ndev_ctx->dwork);
|
||||||
cancel_work_sync(&ndev_ctx->work);
|
cancel_work_sync(&ndev_ctx->work);
|
||||||
|
|
||||||
/* Stop outbound asap */
|
|
||||||
netif_tx_disable(net);
|
|
||||||
|
|
||||||
unregister_netdev(net);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Call to the vsc driver to let it know that the device is being
|
* Call to the vsc driver to let it know that the device is being
|
||||||
* removed
|
* removed. Also blocks mtu and channel changes.
|
||||||
*/
|
*/
|
||||||
|
rtnl_lock();
|
||||||
rndis_filter_device_remove(dev, ndev_ctx->nvdev);
|
rndis_filter_device_remove(dev, ndev_ctx->nvdev);
|
||||||
|
rtnl_unlock();
|
||||||
|
|
||||||
|
unregister_netdev(net);
|
||||||
|
|
||||||
hv_set_drvdata(dev, NULL);
|
hv_set_drvdata(dev, NULL);
|
||||||
|
|
||||||
|
@ -819,16 +819,14 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
|
|||||||
{
|
{
|
||||||
struct rndis_request *request;
|
struct rndis_request *request;
|
||||||
struct rndis_set_request *set;
|
struct rndis_set_request *set;
|
||||||
struct rndis_set_complete *set_complete;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
request = get_rndis_request(dev, RNDIS_MSG_SET,
|
request = get_rndis_request(dev, RNDIS_MSG_SET,
|
||||||
RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
|
RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
|
||||||
sizeof(u32));
|
sizeof(u32));
|
||||||
if (!request) {
|
if (!request)
|
||||||
ret = -ENOMEM;
|
return -ENOMEM;
|
||||||
goto cleanup;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Setup the rndis set */
|
/* Setup the rndis set */
|
||||||
set = &request->request_msg.msg.set_req;
|
set = &request->request_msg.msg.set_req;
|
||||||
@ -840,15 +838,11 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
|
|||||||
&new_filter, sizeof(u32));
|
&new_filter, sizeof(u32));
|
||||||
|
|
||||||
ret = rndis_filter_send_request(dev, request);
|
ret = rndis_filter_send_request(dev, request);
|
||||||
if (ret != 0)
|
if (ret == 0)
|
||||||
goto cleanup;
|
wait_for_completion(&request->wait_event);
|
||||||
|
|
||||||
wait_for_completion(&request->wait_event);
|
put_rndis_request(dev, request);
|
||||||
|
|
||||||
set_complete = &request->response_msg.msg.set_complete;
|
|
||||||
cleanup:
|
|
||||||
if (request)
|
|
||||||
put_rndis_request(dev, request);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -926,8 +920,6 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
|
|||||||
struct rndis_halt_request *halt;
|
struct rndis_halt_request *halt;
|
||||||
struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
|
struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
|
||||||
struct netvsc_device *nvdev = net_device_ctx->nvdev;
|
struct netvsc_device *nvdev = net_device_ctx->nvdev;
|
||||||
struct hv_device *hdev = net_device_ctx->device_ctx;
|
|
||||||
ulong flags;
|
|
||||||
|
|
||||||
/* Attempt to do a rndis device halt */
|
/* Attempt to do a rndis device halt */
|
||||||
request = get_rndis_request(dev, RNDIS_MSG_HALT,
|
request = get_rndis_request(dev, RNDIS_MSG_HALT,
|
||||||
@ -945,9 +937,10 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
|
|||||||
dev->state = RNDIS_DEV_UNINITIALIZED;
|
dev->state = RNDIS_DEV_UNINITIALIZED;
|
||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
spin_lock_irqsave(&hdev->channel->inbound_lock, flags);
|
|
||||||
nvdev->destroy = true;
|
nvdev->destroy = true;
|
||||||
spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags);
|
|
||||||
|
/* Force flag to be ordered before waiting */
|
||||||
|
wmb();
|
||||||
|
|
||||||
/* Wait for all send completions */
|
/* Wait for all send completions */
|
||||||
wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
|
wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
|
||||||
@ -997,7 +990,6 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
|
|||||||
struct netvsc_device *nvscdev = net_device_to_netvsc_device(ndev);
|
struct netvsc_device *nvscdev = net_device_to_netvsc_device(ndev);
|
||||||
u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
|
u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
|
||||||
struct netvsc_channel *nvchan;
|
struct netvsc_channel *nvchan;
|
||||||
unsigned long flags;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (chn_index >= nvscdev->num_chn)
|
if (chn_index >= nvscdev->num_chn)
|
||||||
@ -1019,10 +1011,7 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
|
|||||||
|
|
||||||
napi_enable(&nvchan->napi);
|
napi_enable(&nvchan->napi);
|
||||||
|
|
||||||
spin_lock_irqsave(&nvscdev->sc_lock, flags);
|
if (refcount_dec_and_test(&nvscdev->sc_offered))
|
||||||
nvscdev->num_sc_offered--;
|
|
||||||
spin_unlock_irqrestore(&nvscdev->sc_lock, flags);
|
|
||||||
if (nvscdev->num_sc_offered == 0)
|
|
||||||
complete(&nvscdev->channel_init_wait);
|
complete(&nvscdev->channel_init_wait);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1039,12 +1028,9 @@ int rndis_filter_device_add(struct hv_device *dev,
|
|||||||
struct ndis_recv_scale_cap rsscap;
|
struct ndis_recv_scale_cap rsscap;
|
||||||
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
|
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
|
||||||
unsigned int gso_max_size = GSO_MAX_SIZE;
|
unsigned int gso_max_size = GSO_MAX_SIZE;
|
||||||
u32 mtu, size;
|
u32 mtu, size, num_rss_qs;
|
||||||
u32 num_rss_qs;
|
|
||||||
u32 sc_delta;
|
|
||||||
const struct cpumask *node_cpu_mask;
|
const struct cpumask *node_cpu_mask;
|
||||||
u32 num_possible_rss_qs;
|
u32 num_possible_rss_qs;
|
||||||
unsigned long flags;
|
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
rndis_device = get_rndis_device();
|
rndis_device = get_rndis_device();
|
||||||
@ -1067,7 +1053,7 @@ int rndis_filter_device_add(struct hv_device *dev,
|
|||||||
net_device->max_chn = 1;
|
net_device->max_chn = 1;
|
||||||
net_device->num_chn = 1;
|
net_device->num_chn = 1;
|
||||||
|
|
||||||
spin_lock_init(&net_device->sc_lock);
|
refcount_set(&net_device->sc_offered, 0);
|
||||||
|
|
||||||
net_device->extension = rndis_device;
|
net_device->extension = rndis_device;
|
||||||
rndis_device->ndev = net;
|
rndis_device->ndev = net;
|
||||||
@ -1181,34 +1167,30 @@ int rndis_filter_device_add(struct hv_device *dev,
|
|||||||
if (ret || rsscap.num_recv_que < 2)
|
if (ret || rsscap.num_recv_que < 2)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, rsscap.num_recv_que);
|
|
||||||
|
|
||||||
num_rss_qs = min(device_info->max_num_vrss_chns, net_device->max_chn);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We will limit the VRSS channels to the number CPUs in the NUMA node
|
* We will limit the VRSS channels to the number CPUs in the NUMA node
|
||||||
* the primary channel is currently bound to.
|
* the primary channel is currently bound to.
|
||||||
|
*
|
||||||
|
* This also guarantees that num_possible_rss_qs <= num_online_cpus
|
||||||
*/
|
*/
|
||||||
node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
|
node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
|
||||||
num_possible_rss_qs = cpumask_weight(node_cpu_mask);
|
num_possible_rss_qs = min_t(u32, cpumask_weight(node_cpu_mask),
|
||||||
|
rsscap.num_recv_que);
|
||||||
|
|
||||||
|
net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
|
||||||
|
|
||||||
/* We will use the given number of channels if available. */
|
/* We will use the given number of channels if available. */
|
||||||
if (device_info->num_chn && device_info->num_chn < net_device->max_chn)
|
net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
|
||||||
net_device->num_chn = device_info->num_chn;
|
|
||||||
else
|
|
||||||
net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
|
|
||||||
|
|
||||||
num_rss_qs = net_device->num_chn - 1;
|
|
||||||
|
|
||||||
for (i = 0; i < ITAB_NUM; i++)
|
for (i = 0; i < ITAB_NUM; i++)
|
||||||
rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
|
rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
|
||||||
net_device->num_chn);
|
net_device->num_chn);
|
||||||
|
|
||||||
net_device->num_sc_offered = num_rss_qs;
|
num_rss_qs = net_device->num_chn - 1;
|
||||||
|
if (num_rss_qs == 0)
|
||||||
if (net_device->num_chn == 1)
|
return 0;
|
||||||
goto out;
|
|
||||||
|
|
||||||
|
refcount_set(&net_device->sc_offered, num_rss_qs);
|
||||||
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
|
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
|
||||||
|
|
||||||
init_packet = &net_device->channel_init_pkt;
|
init_packet = &net_device->channel_init_pkt;
|
||||||
@ -1224,32 +1206,23 @@ int rndis_filter_device_add(struct hv_device *dev,
|
|||||||
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
wait_for_completion(&net_device->channel_init_wait);
|
|
||||||
|
|
||||||
if (init_packet->msg.v5_msg.subchn_comp.status !=
|
if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
|
||||||
NVSP_STAT_SUCCESS) {
|
|
||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
wait_for_completion(&net_device->channel_init_wait);
|
||||||
|
|
||||||
net_device->num_chn = 1 +
|
net_device->num_chn = 1 +
|
||||||
init_packet->msg.v5_msg.subchn_comp.num_subchannels;
|
init_packet->msg.v5_msg.subchn_comp.num_subchannels;
|
||||||
|
|
||||||
ret = rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
|
/* ignore failues from setting rss parameters, still have channels */
|
||||||
net_device->num_chn);
|
rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
|
||||||
|
net_device->num_chn);
|
||||||
/*
|
|
||||||
* Set the number of sub-channels to be received.
|
|
||||||
*/
|
|
||||||
spin_lock_irqsave(&net_device->sc_lock, flags);
|
|
||||||
sc_delta = num_rss_qs - (net_device->num_chn - 1);
|
|
||||||
net_device->num_sc_offered -= sc_delta;
|
|
||||||
spin_unlock_irqrestore(&net_device->sc_lock, flags);
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (ret) {
|
if (ret) {
|
||||||
net_device->max_chn = 1;
|
net_device->max_chn = 1;
|
||||||
net_device->num_chn = 1;
|
net_device->num_chn = 1;
|
||||||
net_device->num_sc_offered = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0; /* return 0 because primary channel can be used alone */
|
return 0; /* return 0 because primary channel can be used alone */
|
||||||
@ -1264,12 +1237,6 @@ void rndis_filter_device_remove(struct hv_device *dev,
|
|||||||
{
|
{
|
||||||
struct rndis_device *rndis_dev = net_dev->extension;
|
struct rndis_device *rndis_dev = net_dev->extension;
|
||||||
|
|
||||||
/* If not all subchannel offers are complete, wait for them until
|
|
||||||
* completion to avoid race.
|
|
||||||
*/
|
|
||||||
if (net_dev->num_sc_offered > 0)
|
|
||||||
wait_for_completion(&net_dev->channel_init_wait);
|
|
||||||
|
|
||||||
/* Halt and release the rndis device */
|
/* Halt and release the rndis device */
|
||||||
rndis_filter_halt_device(rndis_dev);
|
rndis_filter_halt_device(rndis_dev);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user