dev_kfree_skb_any(rx_ring[i].skb);
}
- kfree(rx_ring);
+ vfree(rx_ring);
}
static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
sge[i].length = PAGE_SIZE;
wr->next = NULL;
- wr->sg_list = priv->cm.rx_sge;
+ wr->sg_list = sge;
wr->num_sge = priv->cm.num_frags;
}
int ret;
int i;
- rx->rx_ring = kcalloc(ipoib_recvq_size, sizeof *rx->rx_ring, GFP_KERNEL);
- if (!rx->rx_ring)
+ rx->rx_ring = vmalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
+ if (!rx->rx_ring) {
+ printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
+ priv->ca->name, ipoib_recvq_size);
return -ENOMEM;
+ }
+
+ memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring);
t = kmalloc(sizeof *t, GFP_KERNEL);
if (!t) {
void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct ipoib_tx_buf *tx_req;
+ struct ipoib_cm_tx_buf *tx_req;
u64 addr;
if (unlikely(skb->len > tx->mtu)) {
return;
}
- tx_req->mapping[0] = addr;
+ tx_req->mapping = addr;
if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
addr, skb->len))) {
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_tx *tx = wc->qp->qp_context;
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
- struct ipoib_tx_buf *tx_req;
+ struct ipoib_cm_tx_buf *tx_req;
unsigned long flags;
ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
tx_req = &tx->tx_ring[wr_id];
- ib_dma_unmap_single(priv->ca, tx_req->mapping[0], tx_req->skb->len, DMA_TO_DEVICE);
+ ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
/* FIXME: is this right? Shouldn't we only increment on success? */
++dev->stats.tx_packets;
dev_kfree_skb_any(tx_req->skb);
- spin_lock_irqsave(&priv->tx_lock, flags);
+ netif_tx_lock(dev);
+
++tx->tx_tail;
if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
netif_queue_stopped(dev) &&
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
- spin_lock(&priv->lock);
+ spin_lock_irqsave(&priv->lock, flags);
neigh = tx->neigh;
if (neigh) {
clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
- spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
- spin_unlock_irqrestore(&priv->tx_lock, flags);
+ netif_tx_unlock(dev);
}
int ipoib_cm_dev_open(struct net_device *dev)
goto err_send_cm;
}
- ipoib_dbg(priv, "Request connection 0x%x for gid " IPOIB_GID_FMT " qpn 0x%x\n",
- p->qp->qp_num, IPOIB_GID_ARG(pathrec->dgid), qpn);
+ ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
+ p->qp->qp_num, pathrec->dgid.raw, qpn);
return 0;
static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
{
struct ipoib_dev_priv *priv = netdev_priv(p->dev);
- struct ipoib_tx_buf *tx_req;
- unsigned long flags;
+ struct ipoib_cm_tx_buf *tx_req;
unsigned long begin;
ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
while ((int) p->tx_tail - (int) p->tx_head < 0) {
tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
- ib_dma_unmap_single(priv->ca, tx_req->mapping[0], tx_req->skb->len,
+ ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
DMA_TO_DEVICE);
dev_kfree_skb_any(tx_req->skb);
++p->tx_tail;
- spin_lock_irqsave(&priv->tx_lock, flags);
+ netif_tx_lock_bh(p->dev);
if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
netif_queue_stopped(p->dev) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
netif_wake_queue(p->dev);
- spin_unlock_irqrestore(&priv->tx_lock, flags);
+ netif_tx_unlock_bh(p->dev);
}
if (p->qp)
struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
struct net_device *dev = priv->dev;
struct ipoib_neigh *neigh;
+ unsigned long flags;
int ret;
switch (event->event) {
case IB_CM_REJ_RECEIVED:
case IB_CM_TIMEWAIT_EXIT:
ipoib_dbg(priv, "CM error %d.\n", event->event);
- spin_lock_irq(&priv->tx_lock);
- spin_lock(&priv->lock);
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
neigh = tx->neigh;
if (neigh) {
queue_work(ipoib_workqueue, &priv->cm.reap_task);
}
- spin_unlock(&priv->lock);
- spin_unlock_irq(&priv->tx_lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
break;
default:
break;
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
queue_work(ipoib_workqueue, &priv->cm.reap_task);
- ipoib_dbg(priv, "Reap connection for gid " IPOIB_GID_FMT "\n",
- IPOIB_GID_ARG(tx->neigh->dgid));
+ ipoib_dbg(priv, "Reap connection for gid %pI6\n",
+ tx->neigh->dgid.raw);
tx->neigh = NULL;
}
}
struct ib_sa_path_rec pathrec;
u32 qpn;
- spin_lock_irqsave(&priv->tx_lock, flags);
- spin_lock(&priv->lock);
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+
while (!list_empty(&priv->cm.start_list)) {
p = list_entry(priv->cm.start_list.next, typeof(*p), list);
list_del_init(&p->list);
neigh = p->neigh;
qpn = IPOIB_QPN(neigh->neighbour->ha);
memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
- spin_unlock(&priv->lock);
- spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+
ret = ipoib_cm_tx_init(p, qpn, &pathrec);
- spin_lock_irqsave(&priv->tx_lock, flags);
- spin_lock(&priv->lock);
+
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+
if (ret) {
neigh = p->neigh;
if (neigh) {
kfree(p);
}
}
- spin_unlock(&priv->lock);
- spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
}
static void ipoib_cm_tx_reap(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.reap_task);
+ struct net_device *dev = priv->dev;
struct ipoib_cm_tx *p;
+ unsigned long flags;
+
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
- spin_lock_irq(&priv->tx_lock);
- spin_lock(&priv->lock);
while (!list_empty(&priv->cm.reap_list)) {
p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
list_del(&p->list);
- spin_unlock(&priv->lock);
- spin_unlock_irq(&priv->tx_lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
ipoib_cm_tx_destroy(p);
- spin_lock_irq(&priv->tx_lock);
- spin_lock(&priv->lock);
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
}
- spin_unlock(&priv->lock);
- spin_unlock_irq(&priv->tx_lock);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
}
static void ipoib_cm_skb_reap(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.skb_task);
+ struct net_device *dev = priv->dev;
struct sk_buff *skb;
-
+ unsigned long flags;
unsigned mtu = priv->mcast_mtu;
- spin_lock_irq(&priv->tx_lock);
- spin_lock(&priv->lock);
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+
while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
- spin_unlock(&priv->lock);
- spin_unlock_irq(&priv->tx_lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
+
if (skb->protocol == htons(ETH_P_IP))
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev);
#endif
dev_kfree_skb_any(skb);
- spin_lock_irq(&priv->tx_lock);
- spin_lock(&priv->lock);
+
+ netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&priv->lock, flags);
}
- spin_unlock(&priv->lock);
- spin_unlock_irq(&priv->tx_lock);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_tx_unlock_bh(dev);
}
void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
ipoib_warn(priv, "enabling connected mode "
"will cause multicast packet drops\n");
+ rtnl_lock();
dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
+ rtnl_unlock();
priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
ipoib_flush_paths(dev);
if (!strcmp(buf, "datagram\n")) {
clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
- dev->mtu = min(priv->mcast_mtu, dev->mtu);
- ipoib_flush_paths(dev);
+ rtnl_lock();
if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
if (priv->hca_caps & IB_DEVICE_UD_TSO)
dev->features |= NETIF_F_TSO;
}
+ dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
+ rtnl_unlock();
+ ipoib_flush_paths(dev);
return count;
}
return;
}
- priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring,
- GFP_KERNEL);
+ priv->cm.srq_ring = vmalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
if (!priv->cm.srq_ring) {
printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
priv->ca->name, ipoib_recvq_size);
ib_destroy_srq(priv->cm.srq);
priv->cm.srq = NULL;
+ return;
}
+
+ memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring);
}
int ipoib_cm_dev_init(struct net_device *dev)