sysfs: add struct file* to bin_attr callbacks
[safe/jmp/linux-2.6] / drivers / net / sfc / rx.c
index a641330..e308818 100644 (file)
@@ -1,7 +1,7 @@
 /****************************************************************************
  * Driver for Solarflare Solarstorm network controllers and boards
  * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2008 Solarflare Communications Inc.
+ * Copyright 2005-2009 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
 
 #include <linux/socket.h>
 #include <linux/in.h>
+#include <linux/slab.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/udp.h>
 #include <net/ip.h>
 #include <net/checksum.h>
 #include "net_driver.h"
-#include "rx.h"
 #include "efx.h"
-#include "falcon.h"
+#include "nic.h"
 #include "selftest.h"
 #include "workarounds.h"
 
@@ -61,7 +61,7 @@
  *   rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
  *                      RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
  */
-static int rx_alloc_method = RX_ALLOC_METHOD_PAGE;
+static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
 
 #define RX_ALLOC_LEVEL_LRO 0x2000
 #define RX_ALLOC_LEVEL_MAX 0x3000
@@ -86,118 +86,18 @@ static unsigned int rx_refill_limit = 95;
  */
 #define EFX_RXD_HEAD_ROOM 2
 
-/* Macros for zero-order pages (potentially) containing multiple RX buffers */
-#define RX_DATA_OFFSET(_data)                          \
-       (((unsigned long) (_data)) & (PAGE_SIZE-1))
-#define RX_BUF_OFFSET(_rx_buf)                         \
-       RX_DATA_OFFSET((_rx_buf)->data)
-
-#define RX_PAGE_SIZE(_efx)                             \
-       (PAGE_SIZE * (1u << (_efx)->rx_buffer_order))
-
-
-/**************************************************************************
- *
- * Linux generic LRO handling
- *
- **************************************************************************
- */
-
-static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr,
-                              void **tcpudp_hdr, u64 *hdr_flags, void *priv)
+static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf)
 {
-       struct efx_channel *channel = (struct efx_channel *)priv;
-       struct iphdr *iph;
-       struct tcphdr *th;
-
-       iph = (struct iphdr *)skb->data;
-       if (skb->protocol != htons(ETH_P_IP) || iph->protocol != IPPROTO_TCP)
-               goto fail;
-
-       th = (struct tcphdr *)(skb->data + iph->ihl * 4);
-
-       *tcpudp_hdr = th;
-       *ip_hdr = iph;
-       *hdr_flags = LRO_IPV4 | LRO_TCP;
-
-       channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
-       return 0;
-fail:
-       channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
-       return -1;
-}
-
-static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr,
-                           void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
-                           void *priv)
-{
-       struct efx_channel *channel = (struct efx_channel *)priv;
-       struct ethhdr *eh;
-       struct iphdr *iph;
-
-       /* We support EtherII and VLAN encapsulated IPv4 */
-       eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset);
-       *mac_hdr = eh;
-
-       if (eh->h_proto == htons(ETH_P_IP)) {
-               iph = (struct iphdr *)(eh + 1);
-       } else {
-               struct vlan_ethhdr *veh = (struct vlan_ethhdr *)eh;
-               if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
-                       goto fail;
-
-               iph = (struct iphdr *)(veh + 1);
-       }
-       *ip_hdr = iph;
-
-       /* We can only do LRO over TCP */
-       if (iph->protocol != IPPROTO_TCP)
-               goto fail;
-
-       *hdr_flags = LRO_IPV4 | LRO_TCP;
-       *tcpudp_hdr = (struct tcphdr *)((u8 *) iph + iph->ihl * 4);
-
-       channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
-       return 0;
- fail:
-       channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
-       return -1;
+       /* Offset is always within one page, so we don't need to consider
+        * the page order.
+        */
+       return (__force unsigned long) buf->data & (PAGE_SIZE - 1);
 }
-
-int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx)
+static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
 {
-       size_t s = sizeof(struct net_lro_desc) * EFX_MAX_LRO_DESCRIPTORS;
-       struct net_lro_desc *lro_arr;
-
-       /* Allocate the LRO descriptors structure */
-       lro_arr = kzalloc(s, GFP_KERNEL);
-       if (lro_arr == NULL)
-               return -ENOMEM;
-
-       lro_mgr->lro_arr = lro_arr;
-       lro_mgr->max_desc = EFX_MAX_LRO_DESCRIPTORS;
-       lro_mgr->max_aggr = EFX_MAX_LRO_AGGR;
-       lro_mgr->frag_align_pad = EFX_PAGE_SKB_ALIGN;
-
-       lro_mgr->get_skb_header = efx_lro_get_skb_hdr;
-       lro_mgr->get_frag_header = efx_get_frag_hdr;
-       lro_mgr->dev = efx->net_dev;
-
-       lro_mgr->features = LRO_F_NAPI;
-
-       /* We can pass packets up with the checksum intact */
-       lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
-
-       lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
-
-       return 0;
+       return PAGE_SIZE << efx->rx_buffer_order;
 }
 
-void efx_lro_fini(struct net_lro_mgr *lro_mgr)
-{
-       kfree(lro_mgr->lro_arr);
-       lro_mgr->lro_arr = NULL;
-}
 
 /**
  * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
@@ -209,8 +109,8 @@ void efx_lro_fini(struct net_lro_mgr *lro_mgr)
  * and populates a struct efx_rx_buffer with the relevant
  * information.  Return a negative error code or 0 on success.
  */
-static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
-                                        struct efx_rx_buffer *rx_buf)
+static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
+                                 struct efx_rx_buffer *rx_buf)
 {
        struct efx_nic *efx = rx_queue->efx;
        struct net_device *net_dev = efx->net_dev;
@@ -230,7 +130,7 @@ static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
                                          rx_buf->data, rx_buf->len,
                                          PCI_DMA_FROMDEVICE);
 
-       if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) {
+       if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) {
                dev_kfree_skb_any(rx_buf->skb);
                rx_buf->skb = NULL;
                return -EIO;
@@ -249,8 +149,8 @@ static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
  * and populates a struct efx_rx_buffer with the relevant
  * information.  Return a negative error code or 0 on success.
  */
-static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
-                                         struct efx_rx_buffer *rx_buf)
+static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
+                                  struct efx_rx_buffer *rx_buf)
 {
        struct efx_nic *efx = rx_queue->efx;
        int bytes, space, offset;
@@ -269,10 +169,10 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
                        return -ENOMEM;
 
                dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
-                                       0, RX_PAGE_SIZE(efx),
+                                       0, efx_rx_buf_size(efx),
                                        PCI_DMA_FROMDEVICE);
 
-               if (unlikely(pci_dma_mapping_error(dma_addr))) {
+               if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
                        __free_pages(rx_buf->page, efx->rx_buffer_order);
                        rx_buf->page = NULL;
                        return -EIO;
@@ -280,14 +180,14 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
 
                rx_queue->buf_page = rx_buf->page;
                rx_queue->buf_dma_addr = dma_addr;
-               rx_queue->buf_data = ((char *) page_address(rx_buf->page) +
+               rx_queue->buf_data = (page_address(rx_buf->page) +
                                      EFX_PAGE_IP_ALIGN);
        }
 
-       offset = RX_DATA_OFFSET(rx_queue->buf_data);
        rx_buf->len = bytes;
-       rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
        rx_buf->data = rx_queue->buf_data;
+       offset = efx_rx_buf_offset(rx_buf);
+       rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
 
        /* Try to pack multiple buffers per page */
        if (efx->rx_buffer_order == 0) {
@@ -295,7 +195,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
                rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
                offset += ((bytes + 0x1ff) & ~0x1ff);
 
-               space = RX_PAGE_SIZE(efx) - offset;
+               space = efx_rx_buf_size(efx) - offset;
                if (space >= bytes) {
                        /* Refs dropped on kernel releasing each skb */
                        get_page(rx_queue->buf_page);
@@ -316,8 +216,8 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
  * and populates a struct efx_rx_buffer with the relevant
  * information.
  */
-static inline int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
-                                    struct efx_rx_buffer *new_rx_buf)
+static int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
+                             struct efx_rx_buffer *new_rx_buf)
 {
        int rc = 0;
 
@@ -337,14 +237,15 @@ static inline int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
        return rc;
 }
 
-static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
-                                      struct efx_rx_buffer *rx_buf)
+static void efx_unmap_rx_buffer(struct efx_nic *efx,
+                               struct efx_rx_buffer *rx_buf)
 {
        if (rx_buf->page) {
                EFX_BUG_ON_PARANOID(rx_buf->skb);
                if (rx_buf->unmap_addr) {
                        pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr,
-                                      RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE);
+                                      efx_rx_buf_size(efx),
+                                      PCI_DMA_FROMDEVICE);
                        rx_buf->unmap_addr = 0;
                }
        } else if (likely(rx_buf->skb)) {
@@ -353,8 +254,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
        }
 }
 
-static inline void efx_free_rx_buffer(struct efx_nic *efx,
-                                     struct efx_rx_buffer *rx_buf)
+static void efx_free_rx_buffer(struct efx_nic *efx,
+                              struct efx_rx_buffer *rx_buf)
 {
        if (rx_buf->page) {
                __free_pages(rx_buf->page, efx->rx_buffer_order);
@@ -365,8 +266,8 @@ static inline void efx_free_rx_buffer(struct efx_nic *efx,
        }
 }
 
-static inline void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
-                                     struct efx_rx_buffer *rx_buf)
+static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+                              struct efx_rx_buffer *rx_buf)
 {
        efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
        efx_free_rx_buffer(rx_queue->efx, rx_buf);
@@ -392,8 +293,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
         * fill anyway.
         */
        fill_level = (rx_queue->added_count - rx_queue->removed_count);
-       EFX_BUG_ON_PARANOID(fill_level >
-                           rx_queue->efx->type->rxd_ring_mask + 1);
+       EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
 
        /* Don't fill if we don't need to */
        if (fill_level >= rx_queue->fast_fill_trigger)
@@ -415,8 +315,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
  retry:
        /* Recalculate current fill level now that we have the lock */
        fill_level = (rx_queue->added_count - rx_queue->removed_count);
-       EFX_BUG_ON_PARANOID(fill_level >
-                           rx_queue->efx->type->rxd_ring_mask + 1);
+       EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
        space = rx_queue->fast_fill_limit - fill_level;
        if (space < EFX_RX_BATCH)
                goto out_unlock;
@@ -428,8 +327,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
 
        do {
                for (i = 0; i < EFX_RX_BATCH; ++i) {
-                       index = (rx_queue->added_count &
-                                rx_queue->efx->type->rxd_ring_mask);
+                       index = rx_queue->added_count & EFX_RXQ_MASK;
                        rx_buf = efx_rx_buffer(rx_queue, index);
                        rc = efx_init_rx_buffer(rx_queue, rx_buf);
                        if (unlikely(rc))
@@ -444,7 +342,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
 
  out:
        /* Send write pointer to card. */
-       falcon_notify_rx_desc(rx_queue);
+       efx_nic_notify_rx_desc(rx_queue);
 
        /* If the fast fill is running inside from the refill tasklet, then
         * for SMP systems it may be running on a different CPU to
@@ -502,10 +400,10 @@ void efx_rx_work(struct work_struct *data)
                efx_schedule_slow_fill(rx_queue, 1);
 }
 
-static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
-                                           struct efx_rx_buffer *rx_buf,
-                                           int len, int *discard,
-                                           int *leak_packet)
+static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
+                                    struct efx_rx_buffer *rx_buf,
+                                    int len, bool *discard,
+                                    bool *leak_packet)
 {
        struct efx_nic *efx = rx_queue->efx;
        unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
@@ -516,7 +414,7 @@ static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
        /* The packet must be discarded, but this is only a fatal error
         * if the caller indicated it was
         */
-       *discard = 1;
+       *discard = true;
 
        if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
                EFX_ERR_RL(efx, " RX queue %d seriously overlength "
@@ -542,86 +440,66 @@ static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
  * Handles driverlink veto, and passes the fragment up via
  * the appropriate LRO method
  */
-static inline void efx_rx_packet_lro(struct efx_channel *channel,
-                                    struct efx_rx_buffer *rx_buf)
+static void efx_rx_packet_lro(struct efx_channel *channel,
+                             struct efx_rx_buffer *rx_buf,
+                             bool checksummed)
 {
-       struct net_lro_mgr *lro_mgr = &channel->lro_mgr;
-       void *priv = channel;
+       struct napi_struct *napi = &channel->napi_str;
+       gro_result_t gro_result;
 
        /* Pass the skb/page into the LRO engine */
        if (rx_buf->page) {
-               struct skb_frag_struct frags;
-
-               frags.page = rx_buf->page;
-               frags.page_offset = RX_BUF_OFFSET(rx_buf);
-               frags.size = rx_buf->len;
-
-               lro_receive_frags(lro_mgr, &frags, rx_buf->len,
-                                 rx_buf->len, priv, 0);
+               struct page *page = rx_buf->page;
+               struct sk_buff *skb;
 
                EFX_BUG_ON_PARANOID(rx_buf->skb);
                rx_buf->page = NULL;
-       } else {
-               EFX_BUG_ON_PARANOID(!rx_buf->skb);
-
-               lro_receive_skb(lro_mgr, rx_buf->skb, priv);
-               rx_buf->skb = NULL;
-       }
-}
 
-/* Allocate and construct an SKB around a struct page.*/
-static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
-                                           struct efx_nic *efx,
-                                           int hdr_len)
-{
-       struct sk_buff *skb;
-
-       /* Allocate an SKB to store the headers */
-       skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
-       if (unlikely(skb == NULL)) {
-               EFX_ERR_RL(efx, "RX out of memory for skb\n");
-               return NULL;
-       }
+               skb = napi_get_frags(napi);
+               if (!skb) {
+                       put_page(page);
+                       return;
+               }
 
-       EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags);
-       EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
+               skb_shinfo(skb)->frags[0].page = page;
+               skb_shinfo(skb)->frags[0].page_offset =
+                       efx_rx_buf_offset(rx_buf);
+               skb_shinfo(skb)->frags[0].size = rx_buf->len;
+               skb_shinfo(skb)->nr_frags = 1;
 
-       skb->ip_summed = CHECKSUM_UNNECESSARY;
-       skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
+               skb->len = rx_buf->len;
+               skb->data_len = rx_buf->len;
+               skb->truesize += rx_buf->len;
+               skb->ip_summed =
+                       checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
 
-       skb->len = rx_buf->len;
-       skb->truesize = rx_buf->len + sizeof(struct sk_buff);
-       memcpy(skb->data, rx_buf->data, hdr_len);
-       skb->tail += hdr_len;
+               skb_record_rx_queue(skb, channel->channel);
 
-       /* Append the remaining page onto the frag list */
-       if (unlikely(rx_buf->len > hdr_len)) {
-               struct skb_frag_struct *frag = skb_shinfo(skb)->frags;
-               frag->page = rx_buf->page;
-               frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len;
-               frag->size = skb->len - hdr_len;
-               skb_shinfo(skb)->nr_frags = 1;
-               skb->data_len = frag->size;
+               gro_result = napi_gro_frags(napi);
        } else {
-               __free_pages(rx_buf->page, efx->rx_buffer_order);
-               skb->data_len = 0;
-       }
+               struct sk_buff *skb = rx_buf->skb;
 
-       /* Ownership has transferred from the rx_buf to skb */
-       rx_buf->page = NULL;
+               EFX_BUG_ON_PARANOID(!skb);
+               EFX_BUG_ON_PARANOID(!checksummed);
+               rx_buf->skb = NULL;
 
-       /* Move past the ethernet header */
-       skb->protocol = eth_type_trans(skb, efx->net_dev);
+               gro_result = napi_gro_receive(napi, skb);
+       }
 
-       return skb;
+       if (gro_result == GRO_NORMAL) {
+               channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
+       } else if (gro_result != GRO_DROP) {
+               channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
+               channel->irq_mod_score += 2;
+       }
 }
 
 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
-                  unsigned int len, int checksummed, int discard)
+                  unsigned int len, bool checksummed, bool discard)
 {
        struct efx_nic *efx = rx_queue->efx;
        struct efx_rx_buffer *rx_buf;
-       int leak_packet = 0;
+       bool leak_packet = false;
 
        rx_buf = efx_rx_buffer(rx_queue, index);
        EFX_BUG_ON_PARANOID(!rx_buf->data);
@@ -679,11 +557,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
 
 /* Handle a received packet.  Second half: Touches packet payload. */
 void __efx_rx_packet(struct efx_channel *channel,
-                    struct efx_rx_buffer *rx_buf, int checksummed)
+                    struct efx_rx_buffer *rx_buf, bool checksummed)
 {
        struct efx_nic *efx = channel->efx;
        struct sk_buff *skb;
-       int lro = efx->net_dev->features & NETIF_F_LRO;
 
        /* If we're in loopback test, then pass the packet directly to the
         * loopback layer, and free the rx_buf here
@@ -691,7 +568,7 @@ void __efx_rx_packet(struct efx_channel *channel,
        if (unlikely(efx->loopback_selftest)) {
                efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
                efx_free_rx_buffer(efx, rx_buf);
-               goto done;
+               return;
        }
 
        if (rx_buf->skb) {
@@ -703,52 +580,28 @@ void __efx_rx_packet(struct efx_channel *channel,
                 * at the ethernet header */
                rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
                                                       efx->net_dev);
-       }
 
-       /* Both our generic-LRO and SFC-SSR support skb and page based
-        * allocation, but neither support switching from one to the
-        * other on the fly. If we spot that the allocation mode has
-        * changed, then flush the LRO state.
-        */
-       if (unlikely(channel->rx_alloc_pop_pages != (rx_buf->page != NULL))) {
-               efx_flush_lro(channel);
-               channel->rx_alloc_pop_pages = (rx_buf->page != NULL);
-       }
-       if (likely(checksummed && lro)) {
-               efx_rx_packet_lro(channel, rx_buf);
-               goto done;
+               skb_record_rx_queue(rx_buf->skb, channel->channel);
        }
 
-       /* Form an skb if required */
-       if (rx_buf->page) {
-               int hdr_len = min(rx_buf->len, EFX_SKB_HEADERS);
-               skb = efx_rx_mk_skb(rx_buf, efx, hdr_len);
-               if (unlikely(skb == NULL)) {
-                       efx_free_rx_buffer(efx, rx_buf);
-                       goto done;
-               }
-       } else {
-               /* We now own the SKB */
-               skb = rx_buf->skb;
-               rx_buf->skb = NULL;
+       if (likely(checksummed || rx_buf->page)) {
+               efx_rx_packet_lro(channel, rx_buf, checksummed);
+               return;
        }
 
-       EFX_BUG_ON_PARANOID(rx_buf->page);
-       EFX_BUG_ON_PARANOID(rx_buf->skb);
+       /* We now own the SKB */
+       skb = rx_buf->skb;
+       rx_buf->skb = NULL;
        EFX_BUG_ON_PARANOID(!skb);
 
        /* Set the SKB flags */
-       if (unlikely(!checksummed || !efx->rx_checksum_enabled))
-               skb->ip_summed = CHECKSUM_NONE;
+       skb->ip_summed = CHECKSUM_NONE;
 
        /* Pass the packet up */
        netif_receive_skb(skb);
 
        /* Update allocation strategy method */
        channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
-
-done:
-       efx->net_dev->last_rx = jiffies;
 }
 
 void efx_rx_strategy(struct efx_channel *channel)
@@ -756,7 +609,7 @@ void efx_rx_strategy(struct efx_channel *channel)
        enum efx_rx_alloc_method method = rx_alloc_method;
 
        /* Only makes sense to use page based allocation if LRO is enabled */
-       if (!(channel->efx->net_dev->features & NETIF_F_LRO)) {
+       if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
                method = RX_ALLOC_METHOD_SKB;
        } else if (method == RX_ALLOC_METHOD_AUTO) {
                /* Constrain the rx_alloc_level */
@@ -783,31 +636,21 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
        EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);
 
        /* Allocate RX buffers */
-       rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer);
+       rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer);
        rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
-       if (!rx_queue->buffer) {
-               rc = -ENOMEM;
-               goto fail1;
-       }
-
-       rc = falcon_probe_rx(rx_queue);
-       if (rc)
-               goto fail2;
-
-       return 0;
-
- fail2:
-       kfree(rx_queue->buffer);
-       rx_queue->buffer = NULL;
- fail1:
-       rx_queue->used = 0;
+       if (!rx_queue->buffer)
+               return -ENOMEM;
 
+       rc = efx_nic_probe_rx(rx_queue);
+       if (rc) {
+               kfree(rx_queue->buffer);
+               rx_queue->buffer = NULL;
+       }
        return rc;
 }
 
-int efx_init_rx_queue(struct efx_rx_queue *rx_queue)
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
 {
-       struct efx_nic *efx = rx_queue->efx;
        unsigned int max_fill, trigger, limit;
 
        EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
@@ -820,7 +663,7 @@ int efx_init_rx_queue(struct efx_rx_queue *rx_queue)
        rx_queue->min_overfill = -1U;
 
        /* Initialise limit fields */
-       max_fill = efx->type->rxd_ring_mask + 1 - EFX_RXD_HEAD_ROOM;
+       max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM;
        trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
        limit = max_fill * min(rx_refill_limit, 100U) / 100U;
 
@@ -829,7 +672,7 @@ int efx_init_rx_queue(struct efx_rx_queue *rx_queue)
        rx_queue->fast_fill_limit = limit;
 
        /* Set up RX descriptor ring */
-       return falcon_init_rx(rx_queue);
+       efx_nic_init_rx(rx_queue);
 }
 
 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
@@ -839,11 +682,11 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
 
        EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
 
-       falcon_fini_rx(rx_queue);
+       efx_nic_fini_rx(rx_queue);
 
        /* Release RX buffers NB start at index 0 not current HW ptr */
        if (rx_queue->buffer) {
-               for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) {
+               for (i = 0; i <= EFX_RXQ_MASK; i++) {
                        rx_buf = efx_rx_buffer(rx_queue, i);
                        efx_fini_rx_buffer(rx_queue, rx_buf);
                }
@@ -852,7 +695,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
        /* For a page that is part-way through splitting into RX buffers */
        if (rx_queue->buf_page != NULL) {
                pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
-                              RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE);
+                              efx_rx_buf_size(rx_queue->efx),
+                              PCI_DMA_FROMDEVICE);
                __free_pages(rx_queue->buf_page,
                             rx_queue->efx->rx_buffer_order);
                rx_queue->buf_page = NULL;
@@ -863,16 +707,10 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
 {
        EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue);
 
-       falcon_remove_rx(rx_queue);
+       efx_nic_remove_rx(rx_queue);
 
        kfree(rx_queue->buffer);
        rx_queue->buffer = NULL;
-       rx_queue->used = 0;
-}
-
-void efx_flush_lro(struct efx_channel *channel)
-{
-       lro_flush_all(&channel->lro_mgr);
 }