net: Move && and || to end of previous line
[safe/jmp/linux-2.6] / net / rds / ib_rdma.c
index 81033af..4b0da86 100644 (file)
@@ -187,11 +187,8 @@ void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock)
        INIT_LIST_HEAD(list);
        spin_unlock_irq(list_lock);
 
-       list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) {
-               if (ic->conn->c_passive)
-                       rds_conn_destroy(ic->conn->c_passive);
+       list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
                rds_conn_destroy(ic->conn);
-       }
 }
 
 struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
@@ -211,7 +208,7 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
 
        pool->fmr_attr.max_pages = fmr_message_size;
        pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
-       pool->fmr_attr.page_shift = rds_ibdev->fmr_page_shift;
+       pool->fmr_attr.page_shift = PAGE_SHIFT;
        pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
 
        /* We never allow more than max_items MRs to be allocated.
@@ -349,13 +346,13 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
                unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
                u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
 
-               if (dma_addr & ~rds_ibdev->fmr_page_mask) {
+               if (dma_addr & ~PAGE_MASK) {
                        if (i > 0)
                                return -EINVAL;
                        else
                                ++page_cnt;
                }
-               if ((dma_addr + dma_len) & ~rds_ibdev->fmr_page_mask) {
+               if ((dma_addr + dma_len) & ~PAGE_MASK) {
                        if (i < sg_dma_len - 1)
                                return -EINVAL;
                        else
@@ -365,7 +362,7 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
                len += dma_len;
        }
 
-       page_cnt += len >> rds_ibdev->fmr_page_shift;
+       page_cnt += len >> PAGE_SHIFT;
        if (page_cnt > fmr_message_size)
                return -EINVAL;
 
@@ -378,9 +375,9 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
                unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
                u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
 
-               for (j = 0; j < dma_len; j += rds_ibdev->fmr_page_size)
+               for (j = 0; j < dma_len; j += PAGE_SIZE)
                        dma_pages[page_cnt++] =
-                               (dma_addr & rds_ibdev->fmr_page_mask) + j;
+                               (dma_addr & PAGE_MASK) + j;
        }
 
        ret = ib_map_phys_fmr(ibmr->fmr,
@@ -573,8 +570,8 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
        spin_unlock_irqrestore(&pool->list_lock, flags);
 
        /* If we've pinned too many pages, request a flush */
-       if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned
-        || atomic_read(&pool->dirty_count) >= pool->max_items / 10)
+       if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
+           atomic_read(&pool->dirty_count) >= pool->max_items / 10)
                queue_work(rds_wq, &pool->flush_worker);
 
        if (invalidate) {