static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
static int ibmvnic_remove(struct vio_dev *);
-static void release_sub_crqs(struct ibmvnic_adapter *);
+static void release_sub_crqs(struct ibmvnic_adapter *, bool);
static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
static void send_map_query(struct ibmvnic_adapter *adapter);
static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
static void send_request_unmap(struct ibmvnic_adapter *, u8);
-static void send_login(struct ibmvnic_adapter *adapter);
+static int send_login(struct ibmvnic_adapter *adapter);
static void send_cap_queries(struct ibmvnic_adapter *adapter);
static int init_sub_crqs(struct ibmvnic_adapter *);
static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
static int init_stats_buffers(struct ibmvnic_adapter *adapter)
{
adapter->tx_stats_buffers =
- kcalloc(adapter->req_tx_queues,
+ kcalloc(IBMVNIC_MAX_QUEUES,
sizeof(struct ibmvnic_tx_queue_stats),
GFP_KERNEL);
if (!adapter->tx_stats_buffers)
return -ENOMEM;
adapter->rx_stats_buffers =
- kcalloc(adapter->req_rx_queues,
+ kcalloc(IBMVNIC_MAX_QUEUES,
sizeof(struct ibmvnic_rx_queue_stats),
GFP_KERNEL);
if (!adapter->rx_stats_buffers)
return -1;
}
- adapter->num_active_rx_pools = 0;
+ adapter->num_active_rx_pools = rxadd_subcrqs;
for (i = 0; i < rxadd_subcrqs; i++) {
rx_pool = &adapter->rx_pool[i];
rx_pool->next_free = 0;
}
- adapter->num_active_rx_pools = rxadd_subcrqs;
-
return 0;
}
if (!adapter->tx_pool)
return -1;
- adapter->num_active_tx_pools = 0;
+ adapter->num_active_tx_pools = tx_subcrqs;
for (i = 0; i < tx_subcrqs; i++) {
tx_pool = &adapter->tx_pool[i];
if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
adapter->req_tx_entries_per_subcrq *
- adapter->req_mtu)) {
+ (adapter->req_mtu + VLAN_HLEN))) {
release_tx_pools(adapter);
return -1;
}
tx_pool->producer_index = 0;
}
- adapter->num_active_tx_pools = tx_subcrqs;
-
return 0;
}
adapter->napi_enabled = false;
}
+static int init_napi(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ adapter->napi = kcalloc(adapter->req_rx_queues,
+ sizeof(struct napi_struct), GFP_KERNEL);
+ if (!adapter->napi)
+ return -ENOMEM;
+
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
+ netif_napi_add(adapter->netdev, &adapter->napi[i],
+ ibmvnic_poll, NAPI_POLL_WEIGHT);
+ }
+
+ adapter->num_active_rx_napi = adapter->req_rx_queues;
+ return 0;
+}
+
+static void release_napi(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ if (!adapter->napi)
+ return;
+
+ for (i = 0; i < adapter->num_active_rx_napi; i++) {
+ if (&adapter->napi[i]) {
+ netdev_dbg(adapter->netdev,
+ "Releasing napi[%d]\n", i);
+ netif_napi_del(&adapter->napi[i]);
+ }
+ }
+
+ kfree(adapter->napi);
+ adapter->napi = NULL;
+ adapter->num_active_rx_napi = 0;
+}
+
static int ibmvnic_login(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
do {
if (adapter->renegotiate) {
adapter->renegotiate = false;
- release_sub_crqs(adapter);
+ release_sub_crqs(adapter, 1);
reinit_completion(&adapter->init_done);
send_cap_queries(adapter);
}
reinit_completion(&adapter->init_done);
- send_login(adapter);
- if (!wait_for_completion_timeout(&adapter->init_done,
+ rc = send_login(adapter);
+ if (rc) {
+ dev_err(dev, "Unable to attempt device login\n");
+ return rc;
+ } else if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
dev_err(dev, "Login timeout\n");
return -1;
static void release_resources(struct ibmvnic_adapter *adapter)
{
- int i;
-
release_vpd_data(adapter);
release_tx_pools(adapter);
release_rx_pools(adapter);
- release_stats_token(adapter);
- release_stats_buffers(adapter);
release_error_buffers(adapter);
-
- if (adapter->napi) {
- for (i = 0; i < adapter->req_rx_queues; i++) {
- if (&adapter->napi[i]) {
- netdev_dbg(adapter->netdev,
- "Releasing napi[%d]\n", i);
- netif_napi_del(&adapter->napi[i]);
- }
- }
- }
- kfree(adapter->napi);
- adapter->napi = NULL;
-
+ release_napi(adapter);
release_login_rsp_buffer(adapter);
}
static int init_resources(struct ibmvnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- int i, rc;
+ int rc;
rc = set_real_num_queues(netdev);
if (rc)
return rc;
- rc = init_stats_buffers(adapter);
- if (rc)
- return rc;
-
- rc = init_stats_token(adapter);
- if (rc)
- return rc;
-
adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
if (!adapter->vpd)
return -ENOMEM;
}
adapter->map_id = 1;
- adapter->napi = kcalloc(adapter->req_rx_queues,
- sizeof(struct napi_struct), GFP_KERNEL);
- if (!adapter->napi)
- return -ENOMEM;
- for (i = 0; i < adapter->req_rx_queues; i++) {
- netdev_dbg(netdev, "Adding napi[%d]\n", i);
- netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
- NAPI_POLL_WEIGHT);
- }
+ rc = init_napi(adapter);
+ if (rc)
+ return rc;
send_map_query(adapter);
static void clean_rx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rx_pool *rx_pool;
+ struct ibmvnic_rx_buff *rx_buff;
u64 rx_entries;
int rx_scrqs;
int i, j;
/* Free any remaining skbs in the rx buffer pools */
for (i = 0; i < rx_scrqs; i++) {
rx_pool = &adapter->rx_pool[i];
- if (!rx_pool)
+ if (!rx_pool || !rx_pool->rx_buff)
continue;
netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
for (j = 0; j < rx_entries; j++) {
- if (rx_pool->rx_buff[j].skb) {
- dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
- rx_pool->rx_buff[j].skb = NULL;
+ rx_buff = &rx_pool->rx_buff[j];
+ if (rx_buff && rx_buff->skb) {
+ dev_kfree_skb_any(rx_buff->skb);
+ rx_buff->skb = NULL;
}
}
}
static void clean_tx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_tx_pool *tx_pool;
+ struct ibmvnic_tx_buff *tx_buff;
u64 tx_entries;
int tx_scrqs;
int i, j;
/* Free any remaining skbs in the tx buffer pools */
for (i = 0; i < tx_scrqs; i++) {
tx_pool = &adapter->tx_pool[i];
- if (!tx_pool)
+ if (!tx_pool && !tx_pool->tx_buff)
continue;
netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
for (j = 0; j < tx_entries; j++) {
- if (tx_pool->tx_buff[j].skb) {
- dev_kfree_skb_any(tx_pool->tx_buff[j].skb);
- tx_pool->tx_buff[j].skb = NULL;
+ tx_buff = &tx_pool->tx_buff[j];
+ if (tx_buff && tx_buff->skb) {
+ dev_kfree_skb_any(tx_buff->skb);
+ tx_buff->skb = NULL;
}
}
}
}
-static int __ibmvnic_close(struct net_device *netdev)
+static void ibmvnic_cleanup(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int rc = 0;
int i;
- adapter->state = VNIC_CLOSING;
-
/* ensure that transmissions are stopped if called by do_reset */
if (adapter->resetting)
netif_tx_disable(netdev);
if (adapter->tx_scrq) {
for (i = 0; i < adapter->req_tx_queues; i++)
if (adapter->tx_scrq[i]->irq) {
- netdev_dbg(adapter->netdev,
+ netdev_dbg(netdev,
"Disabling tx_scrq[%d] irq\n", i);
disable_irq(adapter->tx_scrq[i]->irq);
}
}
- rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
- if (rc)
- return rc;
-
if (adapter->rx_scrq) {
for (i = 0; i < adapter->req_rx_queues; i++) {
- int retries = 10;
-
- while (pending_scrq(adapter, adapter->rx_scrq[i])) {
- retries--;
- mdelay(100);
-
- if (retries == 0)
- break;
- }
-
if (adapter->rx_scrq[i]->irq) {
- netdev_dbg(adapter->netdev,
+ netdev_dbg(netdev,
"Disabling rx_scrq[%d] irq\n", i);
disable_irq(adapter->rx_scrq[i]->irq);
}
}
clean_rx_pools(adapter);
clean_tx_pools(adapter);
+}
+
+static int __ibmvnic_close(struct net_device *netdev)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int rc = 0;
+
+ adapter->state = VNIC_CLOSING;
+ rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
+ if (rc)
+ return rc;
+ ibmvnic_cleanup(netdev);
adapter->state = VNIC_CLOSED;
- return rc;
+ return 0;
}
static int ibmvnic_close(struct net_device *netdev)
int len = 0;
u8 *hdr;
- hdr_len[0] = sizeof(struct ethhdr);
+ if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
+ hdr_len[0] = sizeof(struct vlan_ethhdr);
+ else
+ hdr_len[0] = sizeof(struct ethhdr);
if (skb->protocol == htons(ETH_P_IP)) {
hdr_len[1] = ip_hdr(skb)->ihl * 4;
if (tx_pool->tso_index == IBMVNIC_TSO_BUFS)
tx_pool->tso_index = 0;
} else {
- offset = index * adapter->req_mtu;
+ offset = index * (adapter->req_mtu + VLAN_HLEN);
dst = tx_pool->long_term_buff.buff + offset;
- memset(dst, 0, adapter->req_mtu);
+ memset(dst, 0, adapter->req_mtu + VLAN_HLEN);
data_dma_addr = tx_pool->long_term_buff.addr + offset;
}
if ((*hdrs >> 7) & 1) {
build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
tx_crq.v1.n_crq_elem = num_entries;
+ tx_buff->num_entries = num_entries;
tx_buff->indir_arr[0] = tx_crq;
tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
sizeof(tx_buff->indir_arr),
(u64)tx_buff->indir_dma,
(u64)num_entries);
} else {
+ tx_buff->num_entries = num_entries;
lpar_rc = send_subcrq(adapter, handle_array[queue_num],
&tx_crq);
}
goto out;
}
- if (atomic_inc_return(&tx_scrq->used)
+ if (atomic_add_return(num_entries, &tx_scrq->used)
>= adapter->req_tx_entries_per_subcrq) {
- netdev_info(netdev, "Stopping queue %d\n", queue_num);
+ netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
netif_stop_subqueue(netdev, queue_num);
}
rc = ibmvnic_reenable_crq_queue(adapter);
if (rc)
return 0;
+ ibmvnic_cleanup(netdev);
+ } else if (rwi->reset_reason == VNIC_RESET_FAILOVER) {
+ ibmvnic_cleanup(netdev);
+ } else {
+ rc = __ibmvnic_close(netdev);
+ if (rc)
+ return rc;
}
- rc = __ibmvnic_close(netdev);
- if (rc)
- return rc;
-
if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
adapter->wait_for_reset) {
release_resources(adapter);
- release_sub_crqs(adapter);
+ release_sub_crqs(adapter, 1);
release_crq_queue(adapter);
}
release_tx_pools(adapter);
init_rx_pools(netdev);
init_tx_pools(netdev);
+
+ release_napi(adapter);
+ init_napi(adapter);
} else {
rc = reset_tx_pools(adapter);
if (rc)
}
static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
- struct ibmvnic_sub_crq_queue *scrq)
+ struct ibmvnic_sub_crq_queue *scrq,
+ bool do_h_free)
{
struct device *dev = &adapter->vdev->dev;
long rc;
netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
- /* Close the sub-crqs */
- do {
- rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
- adapter->vdev->unit_address,
- scrq->crq_num);
- } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+ if (do_h_free) {
+ /* Close the sub-crqs */
+ do {
+ rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
+ adapter->vdev->unit_address,
+ scrq->crq_num);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
- if (rc) {
- netdev_err(adapter->netdev,
- "Failed to release sub-CRQ %16lx, rc = %ld\n",
- scrq->crq_num, rc);
+ if (rc) {
+ netdev_err(adapter->netdev,
+ "Failed to release sub-CRQ %16lx, rc = %ld\n",
+ scrq->crq_num, rc);
+ }
}
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
return NULL;
}
-static void release_sub_crqs(struct ibmvnic_adapter *adapter)
+static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
{
int i;
if (adapter->tx_scrq) {
- for (i = 0; i < adapter->req_tx_queues; i++) {
+ for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
if (!adapter->tx_scrq[i])
continue;
adapter->tx_scrq[i]->irq = 0;
}
- release_sub_crq_queue(adapter, adapter->tx_scrq[i]);
+ release_sub_crq_queue(adapter, adapter->tx_scrq[i],
+ do_h_free);
}
kfree(adapter->tx_scrq);
adapter->tx_scrq = NULL;
+ adapter->num_active_tx_scrqs = 0;
}
if (adapter->rx_scrq) {
- for (i = 0; i < adapter->req_rx_queues; i++) {
+ for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
if (!adapter->rx_scrq[i])
continue;
adapter->rx_scrq[i]->irq = 0;
}
- release_sub_crq_queue(adapter, adapter->rx_scrq[i]);
+ release_sub_crq_queue(adapter, adapter->rx_scrq[i],
+ do_h_free);
}
kfree(adapter->rx_scrq);
adapter->rx_scrq = NULL;
+ adapter->num_active_rx_scrqs = 0;
}
}
restart_loop:
while (pending_scrq(adapter, scrq)) {
unsigned int pool = scrq->pool_index;
+ int num_entries = 0;
next = ibmvnic_next_scrq(adapter, scrq);
for (i = 0; i < next->tx_comp.num_comps; i++) {
txbuff->skb = NULL;
}
+ num_entries += txbuff->num_entries;
+
adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
producer_index] = index;
adapter->tx_pool[pool].producer_index =
/* remove tx_comp scrq*/
next->tx_comp.first = 0;
- if (atomic_sub_return(next->tx_comp.num_comps, &scrq->used) <=
+ if (atomic_sub_return(num_entries, &scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) &&
__netif_subqueue_stopped(adapter->netdev,
scrq->pool_index)) {
netif_wake_subqueue(adapter->netdev, scrq->pool_index);
- netdev_info(adapter->netdev, "Started queue %d\n",
- scrq->pool_index);
+ netdev_dbg(adapter->netdev, "Started queue %d\n",
+ scrq->pool_index);
}
}
dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
scrq->irq, rc);
irq_dispose_mapping(scrq->irq);
- goto req_rx_irq_failed;
+ goto req_tx_irq_failed;
}
}
free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
irq_dispose_mapping(adapter->rx_scrq[j]->irq);
}
- release_sub_crqs(adapter);
+ release_sub_crqs(adapter, 1);
return rc;
}
for (i = 0; i < adapter->req_tx_queues; i++) {
adapter->tx_scrq[i] = allqueues[i];
adapter->tx_scrq[i]->pool_index = i;
+ adapter->num_active_tx_scrqs++;
}
adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
for (i = 0; i < adapter->req_rx_queues; i++) {
adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
adapter->rx_scrq[i]->scrq_num = i;
+ adapter->num_active_rx_scrqs++;
}
kfree(allqueues);
adapter->tx_scrq = NULL;
tx_failed:
for (i = 0; i < registered_queues; i++)
- release_sub_crq_queue(adapter, allqueues[i]);
+ release_sub_crq_queue(adapter, allqueues[i], 1);
kfree(allqueues);
return -1;
}
strncpy(&vlcd->name, adapter->netdev->name, len);
}
-static void send_login(struct ibmvnic_adapter *adapter)
+static int send_login(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
struct ibmvnic_login_buffer *login_buffer;
struct vnic_login_client_data *vlcd;
int i;
+ if (!adapter->tx_scrq || !adapter->rx_scrq) {
+ netdev_err(adapter->netdev,
+ "RX or TX queues are not allocated, device login failed\n");
+ return -1;
+ }
+
release_login_rsp_buffer(adapter);
client_data_len = vnic_client_data_len(adapter);
crq.login.len = cpu_to_be32(buffer_size);
ibmvnic_send_crq(adapter, &crq);
- return;
+ return 0;
buf_rsp_map_failed:
kfree(login_rsp_buffer);
buf_map_failed:
kfree(login_buffer);
buf_alloc_failed:
- return;
+ return -1;
}
static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
{
struct device *dev = &adapter->vdev->dev;
unsigned long timeout = msecs_to_jiffies(30000);
+ u64 old_num_rx_queues, old_num_tx_queues;
int rc;
if (adapter->resetting && !adapter->wait_for_reset) {
adapter->from_passive_init = false;
+ old_num_rx_queues = adapter->req_rx_queues;
+ old_num_tx_queues = adapter->req_tx_queues;
+
init_completion(&adapter->init_done);
adapter->init_done_rc = 0;
ibmvnic_send_crq_init(adapter);
return -1;
}
- if (adapter->resetting && !adapter->wait_for_reset)
- rc = reset_sub_crq_queues(adapter);
- else
+ if (adapter->resetting && !adapter->wait_for_reset) {
+ if (adapter->req_rx_queues != old_num_rx_queues ||
+ adapter->req_tx_queues != old_num_tx_queues) {
+ release_sub_crqs(adapter, 0);
+ rc = init_sub_crqs(adapter);
+ } else {
+ rc = reset_sub_crq_queues(adapter);
+ }
+ } else {
rc = init_sub_crqs(adapter);
+ }
+
if (rc) {
dev_err(dev, "Initialization of sub crqs failed\n");
release_crq_queue(adapter);
release_crq_queue(adapter);
}
+ rc = init_stats_buffers(adapter);
+ if (rc)
+ return rc;
+
+ rc = init_stats_token(adapter);
+ if (rc)
+ return rc;
+
return rc;
}
device_remove_file(&dev->dev, &dev_attr_failover);
ibmvnic_init_fail:
- release_sub_crqs(adapter);
+ release_sub_crqs(adapter, 1);
release_crq_queue(adapter);
free_netdev(netdev);
mutex_lock(&adapter->reset_lock);
release_resources(adapter);
- release_sub_crqs(adapter);
+ release_sub_crqs(adapter, 1);
release_crq_queue(adapter);
+ release_stats_token(adapter);
+ release_stats_buffers(adapter);
+
adapter->state = VNIC_REMOVED;
mutex_unlock(&adapter->reset_lock);