ACPICA: Fix possible memory leak in Unload() operator
[safe/jmp/linux-2.6] / block / blk-settings.c
index 13536a3..dfc7701 100644 (file)
@@ -14,7 +14,6 @@ unsigned long blk_max_low_pfn;
 EXPORT_SYMBOL(blk_max_low_pfn);
 
 unsigned long blk_max_pfn;
-EXPORT_SYMBOL(blk_max_pfn);
 
 /**
  * blk_queue_prep_rq - set a prepare_request function for queue
@@ -140,7 +139,7 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
        /* Assume anything <= 4GB can be handled by IOMMU.
           Actually some IOMMUs can handle everything, but I don't
           know of a way to test this here. */
-       if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
+       if (b_pfn < (min_t(u64, 0x100000000UL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
                dma = 1;
        q->bounce_pfn = max_low_pfn;
 #else
@@ -169,8 +168,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
 {
        if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
                max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
-               printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
-                                                       max_sectors);
+               printk(KERN_INFO "%s: set to minimum %d\n",
+                      __func__, max_sectors);
        }
 
        if (BLK_DEF_MAX_SECTORS > max_sectors)
@@ -197,8 +196,8 @@ void blk_queue_max_phys_segments(struct request_queue *q,
 {
        if (!max_segments) {
                max_segments = 1;
-               printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
-                                                       max_segments);
+               printk(KERN_INFO "%s: set to minimum %d\n",
+                      __func__, max_segments);
        }
 
        q->max_phys_segments = max_segments;
@@ -221,8 +220,8 @@ void blk_queue_max_hw_segments(struct request_queue *q,
 {
        if (!max_segments) {
                max_segments = 1;
-               printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
-                                                       max_segments);
+               printk(KERN_INFO "%s: set to minimum %d\n",
+                      __func__, max_segments);
        }
 
        q->max_hw_segments = max_segments;
@@ -242,8 +241,8 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
 {
        if (max_size < PAGE_CACHE_SIZE) {
                max_size = PAGE_CACHE_SIZE;
-               printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
-                                                       max_size);
+               printk(KERN_INFO "%s: set to minimum %d\n",
+                      __func__, max_size);
        }
 
        q->max_segment_size = max_size;
@@ -287,15 +286,54 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
        t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
        t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
        t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
-       if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
-               clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
+       if (!t->queue_lock)
+               WARN_ON_ONCE(1);
+       else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
+               unsigned long flags;
+               spin_lock_irqsave(t->queue_lock, flags);
+               queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
+               spin_unlock_irqrestore(t->queue_lock, flags);
+       }
 }
 EXPORT_SYMBOL(blk_queue_stack_limits);
 
 /**
- * blk_queue_dma_drain - Set up a drain buffer for excess dma.
+ * blk_queue_dma_pad - set pad mask
+ * @q:     the request queue for the device
+ * @mask:  pad mask
+ *
+ * Set dma pad mask.
+ *
+ * Appending pad buffer to a request modifies the last entry of a
+ * scatter list such that it includes the pad buffer.
+ **/
+void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
+{
+       q->dma_pad_mask = mask;
+}
+EXPORT_SYMBOL(blk_queue_dma_pad);
+
+/**
+ * blk_queue_update_dma_pad - update pad mask
+ * @q:     the request queue for the device
+ * @mask:  pad mask
+ *
+ * Update dma pad mask.
  *
+ * Appending pad buffer to a request modifies the last entry of a
+ * scatter list such that it includes the pad buffer.
+ **/
+void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
+{
+       if (mask > q->dma_pad_mask)
+               q->dma_pad_mask = mask;
+}
+EXPORT_SYMBOL(blk_queue_update_dma_pad);
+
+/**
+ * blk_queue_dma_drain - Set up a drain buffer for excess dma.
  * @q:  the request queue for the device
+ * @dma_drain_needed: fn which returns non-zero if drain is necessary
  * @buf:       physically contiguous buffer
  * @size:      size of the buffer in bytes
  *
@@ -315,14 +353,16 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
  * device can support otherwise there won't be room for the drain
  * buffer.
  */
-int blk_queue_dma_drain(struct request_queue *q, void *buf,
-                               unsigned int size)
+int blk_queue_dma_drain(struct request_queue *q,
+                              dma_drain_needed_fn *dma_drain_needed,
+                              void *buf, unsigned int size)
 {
        if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
                return -EINVAL;
        /* make room for appending the drain */
        --q->max_hw_segments;
        --q->max_phys_segments;
+       q->dma_drain_needed = dma_drain_needed;
        q->dma_drain_buffer = buf;
        q->dma_drain_size = size;
 
@@ -339,8 +379,8 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
 {
        if (mask < PAGE_CACHE_SIZE - 1) {
                mask = PAGE_CACHE_SIZE - 1;
-               printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__,
-                                                       mask);
+               printk(KERN_INFO "%s: set to minimum %lx\n",
+                      __func__, mask);
        }
 
        q->seg_boundary_mask = mask;