dma-debug: add allocator code
authorJoerg Roedel <joerg.roedel@amd.com>
Fri, 9 Jan 2009 11:42:46 +0000 (12:42 +0100)
committerJoerg Roedel <joerg.roedel@amd.com>
Thu, 5 Mar 2009 19:35:15 +0000 (20:35 +0100)
Impact: add allocator code for struct dma_debug_entry

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
lib/dma-debug.c

index 5ff7d2e..b609146 100644 (file)
@@ -52,6 +52,16 @@ struct hash_bucket {
 
 /* Hash list to save the allocated dma addresses */
 static struct hash_bucket dma_entry_hash[HASH_SIZE];
+/* List of pre-allocated dma_debug_entry's */
+static LIST_HEAD(free_entries);
+/* Lock for the list above */
+static DEFINE_SPINLOCK(free_entries_lock);
+
+/* Global disable flag - will be set in case of an error */
+static bool global_disable __read_mostly;
+
+static u32 num_free_entries;
+static u32 min_free_entries;
 
 /*
  * Hash related functions
@@ -141,3 +151,50 @@ static void add_dma_entry(struct dma_debug_entry *entry)
        put_hash_bucket(bucket, &flags);
 }
 
+/* struct dma_entry allocator
+ *
+ * The next two functions implement the allocator for
+ * struct dma_debug_entries.
+ */
+static struct dma_debug_entry *dma_entry_alloc(void)
+{
+       struct dma_debug_entry *entry = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&free_entries_lock, flags);
+
+       if (list_empty(&free_entries)) {
+               printk(KERN_ERR "DMA-API: debugging out of memory "
+                               "- disabling\n");
+               global_disable = true;
+               goto out;
+       }
+
+       entry = list_entry(free_entries.next, struct dma_debug_entry, list);
+       list_del(&entry->list);
+       memset(entry, 0, sizeof(*entry));
+
+       num_free_entries -= 1;
+       if (num_free_entries < min_free_entries)
+               min_free_entries = num_free_entries;
+
+out:
+       spin_unlock_irqrestore(&free_entries_lock, flags);
+
+       return entry;
+}
+
+static void dma_entry_free(struct dma_debug_entry *entry)
+{
+       unsigned long flags;
+
+       /*
+        * add to beginning of the list - this way the entries are
+        * more likely cache hot when they are reallocated.
+        */
+       spin_lock_irqsave(&free_entries_lock, flags);
+       list_add(&entry->list, &free_entries);
+       num_free_entries += 1;
+       spin_unlock_irqrestore(&free_entries_lock, flags);
+}
+