The hard header cache is in the main output path, so using
seqlock instead of reader/writer lock should reduce overhead.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
*/
u16 hh_len; /* length of header */
int (*hh_output)(struct sk_buff *skb);
*/
u16 hh_len; /* length of header */
int (*hh_output)(struct sk_buff *skb);
/* cached hardware header; allow for machine alignment needs. */
#define HH_DATA_MOD 16
/* cached hardware header; allow for machine alignment needs. */
#define HH_DATA_MOD 16
+static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
+{
+ unsigned seq;
+ int hh_len;
+
+ do {
+ int hh_alen;
+
+ seq = read_seqbegin(&hh->hh_lock);
+ hh_len = hh->hh_len;
+ hh_alen = HH_DATA_ALIGN(hh_len);
+ memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+ } while (read_seqretry(&hh->hh_lock, seq));
+
+ skb_push(skb, hh_len);
+ return hh->hh_output(skb);
+}
+
static inline struct neighbour *
__neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat)
{
static inline struct neighbour *
__neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat)
{
while ((hh = neigh->hh) != NULL) {
neigh->hh = hh->hh_next;
hh->hh_next = NULL;
while ((hh = neigh->hh) != NULL) {
neigh->hh = hh->hh_next;
hh->hh_next = NULL;
- write_lock_bh(&hh->hh_lock);
+
+ write_seqlock_bh(&hh->hh_lock);
hh->hh_output = neigh_blackhole;
hh->hh_output = neigh_blackhole;
- write_unlock_bh(&hh->hh_lock);
+ write_sequnlock_bh(&hh->hh_lock);
if (atomic_dec_and_test(&hh->hh_refcnt))
kfree(hh);
}
if (atomic_dec_and_test(&hh->hh_refcnt))
kfree(hh);
}
if (update) {
for (hh = neigh->hh; hh; hh = hh->hh_next) {
if (update) {
for (hh = neigh->hh; hh; hh = hh->hh_next) {
- write_lock_bh(&hh->hh_lock);
+ write_seqlock_bh(&hh->hh_lock);
update(hh, neigh->dev, neigh->ha);
update(hh, neigh->dev, neigh->ha);
- write_unlock_bh(&hh->hh_lock);
+ write_sequnlock_bh(&hh->hh_lock);
break;
if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
break;
if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
- rwlock_init(&hh->hh_lock);
+ seqlock_init(&hh->hh_lock);
hh->hh_type = protocol;
atomic_set(&hh->hh_refcnt, 0);
hh->hh_next = NULL;
hh->hh_type = protocol;
atomic_set(&hh->hh_refcnt, 0);
hh->hh_next = NULL;
static inline int ip_finish_output2(struct sk_buff *skb)
{
struct dst_entry *dst = skb->dst;
static inline int ip_finish_output2(struct sk_buff *skb)
{
struct dst_entry *dst = skb->dst;
- struct hh_cache *hh = dst->hh;
struct net_device *dev = dst->dev;
int hh_len = LL_RESERVED_SPACE(dev);
struct net_device *dev = dst->dev;
int hh_len = LL_RESERVED_SPACE(dev);
- if (hh) {
- int hh_alen;
-
- read_lock_bh(&hh->hh_lock);
- hh_alen = HH_DATA_ALIGN(hh->hh_len);
- memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
- read_unlock_bh(&hh->hh_lock);
- skb_push(skb, hh->hh_len);
- return hh->hh_output(skb);
- } else if (dst->neighbour)
+ if (dst->hh)
+ return neigh_hh_output(dst->hh, skb);
+ else if (dst->neighbour)
return dst->neighbour->output(skb);
if (net_ratelimit())
return dst->neighbour->output(skb);
if (net_ratelimit())
static inline int ip6_output_finish(struct sk_buff *skb)
{
static inline int ip6_output_finish(struct sk_buff *skb)
{
struct dst_entry *dst = skb->dst;
struct dst_entry *dst = skb->dst;
- struct hh_cache *hh = dst->hh;
-
- if (hh) {
- int hh_alen;
-
- read_lock_bh(&hh->hh_lock);
- hh_alen = HH_DATA_ALIGN(hh->hh_len);
- memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
- read_unlock_bh(&hh->hh_lock);
- skb_push(skb, hh->hh_len);
- return hh->hh_output(skb);
- } else if (dst->neighbour)
+
+ if (dst->hh)
+ return neigh_hh_output(dst->hh, skb);
+ else if (dst->neighbour)
return dst->neighbour->output(skb);
IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
return dst->neighbour->output(skb);
IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);