]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
sfc: Make initial fill of RX descriptors synchronous
authorJon Cooper <jcooper@solarflare.com>
Wed, 2 Oct 2013 10:04:14 +0000 (11:04 +0100)
committerBen Hutchings <bhutchings@solarflare.com>
Thu, 12 Dec 2013 22:06:50 +0000 (22:06 +0000)
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/farch.c
drivers/net/ethernet/sfc/rx.c

index 2f77359607d2c03803d82ae9e6cb2e17d551883b..e9c546bdbdfedae8b1debc9f0e2cd41fa04bec9e 100644 (file)
@@ -1907,7 +1907,7 @@ static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
                 * events, so efx_process_channel() won't refill the
                 * queue. Refill it here
                 */
-               efx_fast_push_rx_descriptors(&channel->rx_queue);
+               efx_fast_push_rx_descriptors(&channel->rx_queue, true);
                break;
        default:
                netif_err(efx, hw, efx->net_dev,
index 8694181643646f545e181c44b251e95b0ea6b324..5e2454d071374641938541c47f84ac97acf93f04 100644 (file)
@@ -253,7 +253,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
                        efx_channel_get_rx_queue(channel);
 
                efx_rx_flush_packet(channel);
-               efx_fast_push_rx_descriptors(rx_queue);
+               efx_fast_push_rx_descriptors(rx_queue, true);
        }
 
        return spent;
@@ -646,7 +646,9 @@ static void efx_start_datapath(struct efx_nic *efx)
                efx_for_each_channel_rx_queue(rx_queue, channel) {
                        efx_init_rx_queue(rx_queue);
                        atomic_inc(&efx->active_queues);
-                       efx_nic_generate_fill_event(rx_queue);
+                       efx_stop_eventq(channel);
+                       efx_fast_push_rx_descriptors(rx_queue, false);
+                       efx_start_eventq(channel);
                }
 
                WARN_ON(channel->rx_pkt_n_frags);
index b8235ee5d7d739ae879aaba810620740951af8ff..a653786fbbe7546b8286652bef643fa5f61573f3 100644 (file)
@@ -37,7 +37,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
 void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
 void efx_rx_slow_fill(unsigned long context);
 void __efx_rx_packet(struct efx_channel *channel);
 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
index c0907d884d754e98992694bff36bae9d57b71642..984e85ee76f6ee67b48713786f6717cb3a9a1488 100644 (file)
@@ -1147,7 +1147,7 @@ static void efx_farch_handle_generated_event(struct efx_channel *channel,
                /* The queue must be empty, so we won't receive any rx
                 * events, so efx_process_channel() won't refill the
                 * queue. Refill it here */
-               efx_fast_push_rx_descriptors(rx_queue);
+               efx_fast_push_rx_descriptors(rx_queue, true);
        } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
                efx_farch_handle_drain_event(channel);
        } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
index 42488df1f4ec2af02feb9a46c408168b6a7dabec..8671bc199a9d03d26876203cb0e9eb596790b291 100644 (file)
@@ -149,7 +149,7 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
  * 0 on success. If a single page can be used for multiple buffers,
  * then the page will either be inserted fully, or not at all.
  */
-static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
+static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
 {
        struct efx_nic *efx = rx_queue->efx;
        struct efx_rx_buffer *rx_buf;
@@ -163,7 +163,8 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
        do {
                page = efx_reuse_page(rx_queue);
                if (page == NULL) {
-                       page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
+                       page = alloc_pages(__GFP_COLD | __GFP_COMP |
+                                          (atomic ? GFP_ATOMIC : GFP_KERNEL),
                                           efx->rx_buffer_order);
                        if (unlikely(page == NULL))
                                return -ENOMEM;
@@ -321,7 +322,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel,
  * this means this function must run from the NAPI handler, or be called
  * when NAPI is disabled.
  */
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
 {
        struct efx_nic *efx = rx_queue->efx;
        unsigned int fill_level, batch_size;
@@ -354,7 +355,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
 
 
        do {
-               rc = efx_init_rx_buffers(rx_queue);
+               rc = efx_init_rx_buffers(rx_queue, atomic);
                if (unlikely(rc)) {
                        /* Ensure that we don't leave the rx queue empty */
                        if (rx_queue->added_count == rx_queue->removed_count)