net: Move && and || to end of previous line
[safe/jmp/linux-2.6] / net / rds / ib_rdma.c
index 69a6289..4b0da86 100644 (file)
@@ -139,7 +139,7 @@ int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
        return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
 }
 
-int rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
+void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
 {
        struct rds_ib_connection *ic = conn->c_transport_data;
 
@@ -148,51 +148,47 @@ int rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn
        BUG_ON(list_empty(&ib_nodev_conns));
        BUG_ON(list_empty(&ic->ib_node));
        list_del(&ic->ib_node);
-       spin_unlock_irq(&ib_nodev_conns_lock);
 
        spin_lock_irq(&rds_ibdev->spinlock);
        list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
        spin_unlock_irq(&rds_ibdev->spinlock);
+       spin_unlock_irq(&ib_nodev_conns_lock);
 
        ic->rds_ibdev = rds_ibdev;
-
-       return 0;
 }
 
-void rds_ib_remove_nodev_conns(void)
+void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
 {
-       struct rds_ib_connection *ic, *_ic;
-       LIST_HEAD(tmp_list);
+       struct rds_ib_connection *ic = conn->c_transport_data;
 
-       /* avoid calling conn_destroy with irqs off */
-       spin_lock_irq(&ib_nodev_conns_lock);
-       list_splice(&ib_nodev_conns, &tmp_list);
-       INIT_LIST_HEAD(&ib_nodev_conns);
-       spin_unlock_irq(&ib_nodev_conns_lock);
+       /* place conn on nodev_conns_list */
+       spin_lock(&ib_nodev_conns_lock);
 
-       list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) {
-               if (ic->conn->c_passive)
-                       rds_conn_destroy(ic->conn->c_passive);
-               rds_conn_destroy(ic->conn);
-       }
+       spin_lock_irq(&rds_ibdev->spinlock);
+       BUG_ON(list_empty(&ic->ib_node));
+       list_del(&ic->ib_node);
+       spin_unlock_irq(&rds_ibdev->spinlock);
+
+       list_add_tail(&ic->ib_node, &ib_nodev_conns);
+
+       spin_unlock(&ib_nodev_conns_lock);
+
+       ic->rds_ibdev = NULL;
 }
 
-void rds_ib_remove_conns(struct rds_ib_device *rds_ibdev)
+void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock)
 {
        struct rds_ib_connection *ic, *_ic;
        LIST_HEAD(tmp_list);
 
        /* avoid calling conn_destroy with irqs off */
-       spin_lock_irq(&rds_ibdev->spinlock);
-       list_splice(&rds_ibdev->conn_list, &tmp_list);
-       INIT_LIST_HEAD(&rds_ibdev->conn_list);
-       spin_unlock_irq(&rds_ibdev->spinlock);
+       spin_lock_irq(list_lock);
+       list_splice(list, &tmp_list);
+       INIT_LIST_HEAD(list);
+       spin_unlock_irq(list_lock);
 
-       list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) {
-               if (ic->conn->c_passive)
-                       rds_conn_destroy(ic->conn->c_passive);
+       list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
                rds_conn_destroy(ic->conn);
-       }
 }
 
 struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
@@ -212,7 +208,7 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
 
        pool->fmr_attr.max_pages = fmr_message_size;
        pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
-       pool->fmr_attr.page_shift = rds_ibdev->fmr_page_shift;
+       pool->fmr_attr.page_shift = PAGE_SHIFT;
        pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
 
        /* We never allow more than max_items MRs to be allocated.
@@ -350,13 +346,13 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
                unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
                u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
 
-               if (dma_addr & ~rds_ibdev->fmr_page_mask) {
+               if (dma_addr & ~PAGE_MASK) {
                        if (i > 0)
                                return -EINVAL;
                        else
                                ++page_cnt;
                }
-               if ((dma_addr + dma_len) & ~rds_ibdev->fmr_page_mask) {
+               if ((dma_addr + dma_len) & ~PAGE_MASK) {
                        if (i < sg_dma_len - 1)
                                return -EINVAL;
                        else
@@ -366,7 +362,7 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
                len += dma_len;
        }
 
-       page_cnt += len >> rds_ibdev->fmr_page_shift;
+       page_cnt += len >> PAGE_SHIFT;
        if (page_cnt > fmr_message_size)
                return -EINVAL;
 
@@ -379,9 +375,9 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
                unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
                u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
 
-               for (j = 0; j < dma_len; j += rds_ibdev->fmr_page_size)
+               for (j = 0; j < dma_len; j += PAGE_SIZE)
                        dma_pages[page_cnt++] =
-                               (dma_addr & rds_ibdev->fmr_page_mask) + j;
+                               (dma_addr & PAGE_MASK) + j;
        }
 
        ret = ib_map_phys_fmr(ibmr->fmr,
@@ -574,8 +570,8 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
        spin_unlock_irqrestore(&pool->list_lock, flags);
 
        /* If we've pinned too many pages, request a flush */
-       if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned
-        || atomic_read(&pool->dirty_count) >= pool->max_items / 10)
+       if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
+           atomic_read(&pool->dirty_count) >= pool->max_items / 10)
                queue_work(rds_wq, &pool->flush_worker);
 
        if (invalidate) {