Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
authorDavid S. Miller <davem@davemloft.net>
Mon, 31 May 2010 12:46:45 +0000 (05:46 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 31 May 2010 12:46:45 +0000 (05:46 -0700)
1  2 
drivers/net/fs_enet/mac-fcc.c
net/core/skbuff.c
net/ipv4/udp.c

@@@ -88,19 -88,19 +88,19 @@@ static int do_pd_setup(struct fs_enet_p
        struct fs_platform_info *fpi = fep->fpi;
        int ret = -EINVAL;
  
-       fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL);
+       fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
        if (fep->interrupt == NO_IRQ)
                goto out;
  
-       fep->fcc.fccp = of_iomap(ofdev->node, 0);
+       fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0);
        if (!fep->fcc.fccp)
                goto out;
  
-       fep->fcc.ep = of_iomap(ofdev->node, 1);
+       fep->fcc.ep = of_iomap(ofdev->dev.of_node, 1);
        if (!fep->fcc.ep)
                goto out_fccp;
  
-       fep->fcc.fcccp = of_iomap(ofdev->node, 2);
+       fep->fcc.fcccp = of_iomap(ofdev->dev.of_node, 2);
        if (!fep->fcc.fcccp)
                goto out_ep;
  
@@@ -504,54 -504,17 +504,54 @@@ static int get_regs_len(struct net_devi
  }
  
  /* Some transmit errors cause the transmitter to shut
 - * down.  We now issue a restart transmit.  Since the
 - * errors close the BD and update the pointers, the restart
 - * _should_ pick up without having to reset any of our
 - * pointers either.  Also, To workaround 8260 device erratum
 - * CPM37, we must disable and then re-enable the transmitter
 - * following a Late Collision, Underrun, or Retry Limit error.
 + * down.  We now issue a restart transmit.
 + * Also, to workaround 8260 device erratum CPM37, we must
 + * disable and then re-enable the transmitterfollowing a
 + * Late Collision, Underrun, or Retry Limit error.
 + * In addition, tbptr may point beyond BDs beyond still marked
 + * as ready due to internal pipelining, so we need to look back
 + * through the BDs and adjust tbptr to point to the last BD
 + * marked as ready.  This may result in some buffers being
 + * retransmitted.
   */
  static void tx_restart(struct net_device *dev)
  {
        struct fs_enet_private *fep = netdev_priv(dev);
        fcc_t __iomem *fccp = fep->fcc.fccp;
 +      const struct fs_platform_info *fpi = fep->fpi;
 +      fcc_enet_t __iomem *ep = fep->fcc.ep;
 +      cbd_t __iomem *curr_tbptr;
 +      cbd_t __iomem *recheck_bd;
 +      cbd_t __iomem *prev_bd;
 +      cbd_t __iomem *last_tx_bd;
 +
 +      last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t));
 +
 +      /* get the current bd held in TBPTR  and scan back from this point */
 +      recheck_bd = curr_tbptr = (cbd_t __iomem *)
 +              ((R32(ep, fen_genfcc.fcc_tbptr) - fep->ring_mem_addr) +
 +              fep->ring_base);
 +
 +      prev_bd = (recheck_bd == fep->tx_bd_base) ? last_tx_bd : recheck_bd - 1;
 +
 +      /* Move through the bds in reverse, look for the earliest buffer
 +       * that is not ready.  Adjust TBPTR to the following buffer */
 +      while ((CBDR_SC(prev_bd) & BD_ENET_TX_READY) != 0) {
 +              /* Go back one buffer */
 +              recheck_bd = prev_bd;
 +
 +              /* update the previous buffer */
 +              prev_bd = (prev_bd == fep->tx_bd_base) ? last_tx_bd : prev_bd - 1;
 +
 +              /* We should never see all bds marked as ready, check anyway */
 +              if (recheck_bd == curr_tbptr)
 +                      break;
 +      }
 +      /* Now update the TBPTR and dirty flag to the current buffer */
 +      W32(ep, fen_genfcc.fcc_tbptr,
 +              (uint) (((void *)recheck_bd - fep->ring_base) +
 +              fep->ring_mem_addr));
 +      fep->dirty_tx = recheck_bd;
  
        C32(fccp, fcc_gfmr, FCC_GFMR_ENT);
        udelay(10);
diff --combined net/core/skbuff.c
@@@ -482,22 -482,22 +482,22 @@@ EXPORT_SYMBOL(consume_skb)
   *    reference count dropping and cleans up the skbuff as if it
   *    just came from __alloc_skb().
   */
 -int skb_recycle_check(struct sk_buff *skb, int skb_size)
 +bool skb_recycle_check(struct sk_buff *skb, int skb_size)
  {
        struct skb_shared_info *shinfo;
  
        if (irqs_disabled())
 -              return 0;
 +              return false;
  
        if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
 -              return 0;
 +              return false;
  
        skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
        if (skb_end_pointer(skb) - skb->head < skb_size)
 -              return 0;
 +              return false;
  
        if (skb_shared(skb) || skb_cloned(skb))
 -              return 0;
 +              return false;
  
        skb_release_head_state(skb);
  
        skb->data = skb->head + NET_SKB_PAD;
        skb_reset_tail_pointer(skb);
  
 -      return 1;
 +      return true;
  }
  EXPORT_SYMBOL(skb_recycle_check);
  
@@@ -1406,12 -1406,13 +1406,13 @@@ new_page
  /*
   * Fill page/offset/length into spd, if it can hold more pages.
   */
- static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
+ static inline int spd_fill_page(struct splice_pipe_desc *spd,
+                               struct pipe_inode_info *pipe, struct page *page,
                                unsigned int *len, unsigned int offset,
                                struct sk_buff *skb, int linear,
                                struct sock *sk)
  {
-       if (unlikely(spd->nr_pages == PIPE_BUFFERS))
+       if (unlikely(spd->nr_pages == pipe->buffers))
                return 1;
  
        if (linear) {
@@@ -1447,7 -1448,8 +1448,8 @@@ static inline int __splice_segment(stru
                                   unsigned int plen, unsigned int *off,
                                   unsigned int *len, struct sk_buff *skb,
                                   struct splice_pipe_desc *spd, int linear,
-                                  struct sock *sk)
+                                  struct sock *sk,
+                                  struct pipe_inode_info *pipe)
  {
        if (!*len)
                return 1;
                /* the linear region may spread across several pages  */
                flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
  
-               if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk))
+               if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
                        return 1;
  
                __segment_seek(&page, &poff, &plen, flen);
   * Map linear and fragment data from the skb to spd. It reports failure if the
   * pipe is full or if we already spliced the requested length.
   */
- static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
-                            unsigned int *len, struct splice_pipe_desc *spd,
-                            struct sock *sk)
+ static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
+                            unsigned int *offset, unsigned int *len,
+                            struct splice_pipe_desc *spd, struct sock *sk)
  {
        int seg;
  
        if (__splice_segment(virt_to_page(skb->data),
                             (unsigned long) skb->data & (PAGE_SIZE - 1),
                             skb_headlen(skb),
-                            offset, len, skb, spd, 1, sk))
+                            offset, len, skb, spd, 1, sk, pipe))
                return 1;
  
        /*
                const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
  
                if (__splice_segment(f->page, f->page_offset, f->size,
-                                    offset, len, skb, spd, 0, sk))
+                                    offset, len, skb, spd, 0, sk, pipe))
                        return 1;
        }
  
@@@ -1524,8 -1526,8 +1526,8 @@@ int skb_splice_bits(struct sk_buff *skb
                    struct pipe_inode_info *pipe, unsigned int tlen,
                    unsigned int flags)
  {
-       struct partial_page partial[PIPE_BUFFERS];
-       struct page *pages[PIPE_BUFFERS];
+       struct partial_page partial[PIPE_DEF_BUFFERS];
+       struct page *pages[PIPE_DEF_BUFFERS];
        struct splice_pipe_desc spd = {
                .pages = pages,
                .partial = partial,
        };
        struct sk_buff *frag_iter;
        struct sock *sk = skb->sk;
+       int ret = 0;
+       if (splice_grow_spd(pipe, &spd))
+               return -ENOMEM;
  
        /*
         * __skb_splice_bits() only fails if the output has no room left,
         * so no point in going over the frag_list for the error case.
         */
-       if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk))
+       if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
                goto done;
        else if (!tlen)
                goto done;
        skb_walk_frags(skb, frag_iter) {
                if (!tlen)
                        break;
-               if (__skb_splice_bits(frag_iter, &offset, &tlen, &spd, sk))
+               if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
                        break;
        }
  
  done:
        if (spd.nr_pages) {
-               int ret;
                /*
                 * Drop the socket lock, otherwise we have reverse
                 * locking dependencies between sk_lock and i_mutex
                release_sock(sk);
                ret = splice_to_pipe(pipe, &spd);
                lock_sock(sk);
-               return ret;
        }
  
-       return 0;
+       splice_shrink_spd(pipe, &spd);
+       return ret;
  }
  
  /**
@@@ -2992,11 -2996,7 +2996,11 @@@ void skb_tstamp_tx(struct sk_buff *orig
        memset(serr, 0, sizeof(*serr));
        serr->ee.ee_errno = ENOMSG;
        serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
 +
 +      bh_lock_sock(sk);
        err = sock_queue_err_skb(sk, skb);
 +      bh_unlock_sock(sk);
 +
        if (err)
                kfree_skb(skb);
  }
diff --combined net/ipv4/udp.c
@@@ -634,9 -634,7 +634,9 @@@ void __udp4_lib_err(struct sk_buff *skb
                if (!harderr || sk->sk_state != TCP_ESTABLISHED)
                        goto out;
        } else {
 +              bh_lock_sock(sk);
                ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
 +              bh_unlock_sock(sk);
        }
        sk->sk_err = err;
        sk->sk_error_report(sk);
@@@ -1690,8 -1688,8 +1690,8 @@@ int udp_lib_setsockopt(struct sock *sk
                        return -ENOPROTOOPT;
                if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
                        val = 8;
-               else if (val > USHORT_MAX)
-                       val = USHORT_MAX;
+               else if (val > USHRT_MAX)
+                       val = USHRT_MAX;
                up->pcslen = val;
                up->pcflag |= UDPLITE_SEND_CC;
                break;
                        return -ENOPROTOOPT;
                if (val != 0 && val < 8) /* Avoid silly minimal values.       */
                        val = 8;
-               else if (val > USHORT_MAX)
-                       val = USHORT_MAX;
+               else if (val > USHRT_MAX)
+                       val = USHRT_MAX;
                up->pcrlen = val;
                up->pcflag |= UDPLITE_RECV_CC;
                break;