Merge commit 'v2.6.30' into for-2.6.31
[safe/jmp/linux-2.6] / security / selinux / avc.c
index e73ac1a..7f9b5fa 100644 (file)
@@ -2,16 +2,16 @@
  * Implementation of the kernel access vector cache (AVC).
  *
  * Authors:  Stephen Smalley, <sds@epoch.ncsc.mil>
- *           James Morris <jmorris@redhat.com>
+ *          James Morris <jmorris@redhat.com>
  *
  * Update:   KaiGai, Kohei <kaigai@ak.jp.nec.com>
- *     Replaced the avc_lock spinlock by RCU.
+ *     Replaced the avc_lock spinlock by RCU.
  *
  * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
  *
  *     This program is free software; you can redistribute it and/or modify
  *     it under the terms of the GNU General Public License version 2,
- *      as published by the Free Software Foundation.
+ *     as published by the Free Software Foundation.
  */
 #include <linux/types.h>
 #include <linux/stddef.h>
@@ -44,7 +44,7 @@ static const char *class_to_string[] = {
 #undef S_
 };
 
-#define TB_(s) static const char * s [] = {
+#define TB_(s) static const char *s[] = {
 #define TE_(s) };
 #define S_(s) s,
 #include "common_perm_to_string.h"
@@ -53,18 +53,20 @@ static const char *class_to_string[] = {
 #undef S_
 
 static const struct av_inherit av_inherit[] = {
-#define S_(c, i, b) { c, common_##i##_perm_to_string, b },
+#define S_(c, i, b) {  .tclass = c,\
+                       .common_pts = common_##i##_perm_to_string,\
+                       .common_base =  b },
 #include "av_inherit.h"
 #undef S_
 };
 
 const struct selinux_class_perm selinux_class_perm = {
-       av_perm_to_string,
-       ARRAY_SIZE(av_perm_to_string),
-       class_to_string,
-       ARRAY_SIZE(class_to_string),
-       av_inherit,
-       ARRAY_SIZE(av_inherit)
+       .av_perm_to_string = av_perm_to_string,
+       .av_pts_len = ARRAY_SIZE(av_perm_to_string),
+       .class_to_string = class_to_string,
+       .cts_len = ARRAY_SIZE(class_to_string),
+       .av_inherit = av_inherit,
+       .av_inherit_len = ARRAY_SIZE(av_inherit)
 };
 
 #define AVC_CACHE_SLOTS                        512
@@ -72,7 +74,7 @@ const struct selinux_class_perm selinux_class_perm = {
 #define AVC_CACHE_RECLAIM              16
 
 #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
-#define avc_cache_stats_incr(field)                            \
+#define avc_cache_stats_incr(field)                            \
 do {                                                           \
        per_cpu(avc_cache_stats, get_cpu()).field++;            \
        put_cpu();                                              \
@@ -86,17 +88,16 @@ struct avc_entry {
        u32                     tsid;
        u16                     tclass;
        struct av_decision      avd;
-       atomic_t                used;   /* used recently */
 };
 
 struct avc_node {
        struct avc_entry        ae;
-       struct list_head        list;
-       struct rcu_head         rhead;
+       struct hlist_node       list; /* anchored in avc_cache->slots[i] */
+       struct rcu_head         rhead;
 };
 
 struct avc_cache {
-       struct list_head        slots[AVC_CACHE_SLOTS];
+       struct hlist_head       slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
        spinlock_t              slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
        atomic_t                lru_hint;       /* LRU hint for reclaim scan */
        atomic_t                active_nodes;
@@ -105,8 +106,8 @@ struct avc_cache {
 
 struct avc_callback_node {
        int (*callback) (u32 event, u32 ssid, u32 tsid,
-                        u16 tclass, u32 perms,
-                        u32 *out_retained);
+                        u16 tclass, u32 perms,
+                        u32 *out_retained);
        u32 events;
        u32 ssid;
        u32 tsid;
@@ -124,7 +125,7 @@ DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 };
 
 static struct avc_cache avc_cache;
 static struct avc_callback_node *avc_callbacks;
-static kmem_cache_t *avc_node_cachep;
+static struct kmem_cache *avc_node_cachep;
 
 static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
 {
@@ -136,7 +137,7 @@ static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
  * @tclass: target security class
  * @av: access vector
  */
-static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av)
+void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av)
 {
        const char **common_pts = NULL;
        u32 common_base = 0;
@@ -202,7 +203,7 @@ static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tcla
        char *scontext;
        u32 scontext_len;
 
-       rc = security_sid_to_context(ssid, &scontext, &scontext_len);
+       rc = security_sid_to_context(ssid, &scontext, &scontext_len);
        if (rc)
                audit_log_format(ab, "ssid=%d", ssid);
        else {
@@ -217,6 +218,8 @@ static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tcla
                audit_log_format(ab, " tcontext=%s", scontext);
                kfree(scontext);
        }
+
+       BUG_ON(tclass >= ARRAY_SIZE(class_to_string) || !class_to_string[tclass]);
        audit_log_format(ab, " tclass=%s", class_to_string[tclass]);
 }
 
@@ -230,14 +233,14 @@ void __init avc_init(void)
        int i;
 
        for (i = 0; i < AVC_CACHE_SLOTS; i++) {
-               INIT_LIST_HEAD(&avc_cache.slots[i]);
+               INIT_HLIST_HEAD(&avc_cache.slots[i]);
                spin_lock_init(&avc_cache.slots_lock[i]);
        }
        atomic_set(&avc_cache.active_nodes, 0);
        atomic_set(&avc_cache.lru_hint, 0);
 
        avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
-                                            0, SLAB_PANIC, NULL, NULL);
+                                            0, SLAB_PANIC, NULL);
 
        audit_log(current->audit_context, GFP_KERNEL, AUDIT_KERNEL, "AVC INITIALIZED\n");
 }
@@ -246,16 +249,20 @@ int avc_get_hash_stats(char *page)
 {
        int i, chain_len, max_chain_len, slots_used;
        struct avc_node *node;
+       struct hlist_head *head;
 
        rcu_read_lock();
 
        slots_used = 0;
        max_chain_len = 0;
        for (i = 0; i < AVC_CACHE_SLOTS; i++) {
-               if (!list_empty(&avc_cache.slots[i])) {
+               head = &avc_cache.slots[i];
+               if (!hlist_empty(head)) {
+                       struct hlist_node *next;
+
                        slots_used++;
                        chain_len = 0;
-                       list_for_each_entry_rcu(node, &avc_cache.slots[i], list)
+                       hlist_for_each_entry_rcu(node, next, head, list)
                                chain_len++;
                        if (chain_len > max_chain_len)
                                max_chain_len = chain_len;
@@ -279,7 +286,7 @@ static void avc_node_free(struct rcu_head *rhead)
 
 static void avc_node_delete(struct avc_node *node)
 {
-       list_del_rcu(&node->list);
+       hlist_del_rcu(&node->list);
        call_rcu(&node->rhead, avc_node_free);
        atomic_dec(&avc_cache.active_nodes);
 }
@@ -293,7 +300,7 @@ static void avc_node_kill(struct avc_node *node)
 
 static void avc_node_replace(struct avc_node *new, struct avc_node *old)
 {
-       list_replace_rcu(&old->list, &new->list);
+       hlist_replace_rcu(&old->list, &new->list);
        call_rcu(&old->rhead, avc_node_free);
        atomic_dec(&avc_cache.active_nodes);
 }
@@ -303,26 +310,31 @@ static inline int avc_reclaim_node(void)
        struct avc_node *node;
        int hvalue, try, ecx;
        unsigned long flags;
+       struct hlist_head *head;
+       struct hlist_node *next;
+       spinlock_t *lock;
 
-       for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++ ) {
+       for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
                hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
+               head = &avc_cache.slots[hvalue];
+               lock = &avc_cache.slots_lock[hvalue];
 
-               if (!spin_trylock_irqsave(&avc_cache.slots_lock[hvalue], flags))
+               if (!spin_trylock_irqsave(lock, flags))
                        continue;
 
-               list_for_each_entry(node, &avc_cache.slots[hvalue], list) {
-                       if (atomic_dec_and_test(&node->ae.used)) {
-                               /* Recently Unused */
-                               avc_node_delete(node);
-                               avc_cache_stats_incr(reclaims);
-                               ecx++;
-                               if (ecx >= AVC_CACHE_RECLAIM) {
-                                       spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags);
-                                       goto out;
-                               }
+               rcu_read_lock();
+               hlist_for_each_entry(node, next, head, list) {
+                       avc_node_delete(node);
+                       avc_cache_stats_incr(reclaims);
+                       ecx++;
+                       if (ecx >= AVC_CACHE_RECLAIM) {
+                               rcu_read_unlock();
+                               spin_unlock_irqrestore(lock, flags);
+                               goto out;
                        }
                }
-               spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags);
+               rcu_read_unlock();
+               spin_unlock_irqrestore(lock, flags);
        }
 out:
        return ecx;
@@ -332,14 +344,12 @@ static struct avc_node *avc_alloc_node(void)
 {
        struct avc_node *node;
 
-       node = kmem_cache_alloc(avc_node_cachep, SLAB_ATOMIC);
+       node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC);
        if (!node)
                goto out;
 
-       memset(node, 0, sizeof(*node));
        INIT_RCU_HEAD(&node->rhead);
-       INIT_LIST_HEAD(&node->list);
-       atomic_set(&node->ae.used, 1);
+       INIT_HLIST_NODE(&node->list);
        avc_cache_stats_incr(allocations);
 
        if (atomic_inc_return(&avc_cache.active_nodes) > avc_cache_threshold)
@@ -349,21 +359,24 @@ out:
        return node;
 }
 
-static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct avc_entry *ae)
+static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd)
 {
        node->ae.ssid = ssid;
        node->ae.tsid = tsid;
        node->ae.tclass = tclass;
-       memcpy(&node->ae.avd, &ae->avd, sizeof(node->ae.avd));
+       memcpy(&node->ae.avd, avd, sizeof(node->ae.avd));
 }
 
 static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
 {
        struct avc_node *node, *ret = NULL;
        int hvalue;
+       struct hlist_head *head;
+       struct hlist_node *next;
 
        hvalue = avc_hash(ssid, tsid, tclass);
-       list_for_each_entry_rcu(node, &avc_cache.slots[hvalue], list) {
+       head = &avc_cache.slots[hvalue];
+       hlist_for_each_entry_rcu(node, next, head, list) {
                if (ssid == node->ae.ssid &&
                    tclass == node->ae.tclass &&
                    tsid == node->ae.tsid) {
@@ -372,15 +385,6 @@ static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
                }
        }
 
-       if (ret == NULL) {
-               /* cache miss */
-               goto out;
-       }
-
-       /* cache hit */
-       if (atomic_read(&ret->ae.used) != 1)
-               atomic_set(&ret->ae.used, 1);
-out:
        return ret;
 }
 
@@ -389,30 +393,25 @@ out:
  * @ssid: source security identifier
  * @tsid: target security identifier
  * @tclass: target security class
- * @requested: requested permissions, interpreted based on @tclass
  *
  * Look up an AVC entry that is valid for the
- * @requested permissions between the SID pair
  * (@ssid, @tsid), interpreting the permissions
  * based on @tclass.  If a valid AVC entry exists,
  * then this function return the avc_node.
  * Otherwise, this function returns NULL.
  */
-static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass, u32 requested)
+static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass)
 {
        struct avc_node *node;
 
        avc_cache_stats_incr(lookups);
        node = avc_search_node(ssid, tsid, tclass);
 
-       if (node && ((node->ae.avd.decided & requested) == requested)) {
+       if (node)
                avc_cache_stats_incr(hits);
-               goto out;
-       }
+       else
+               avc_cache_stats_incr(misses);
 
-       node = NULL;
-       avc_cache_stats_incr(misses);
-out:
        return node;
 }
 
@@ -425,7 +424,7 @@ static int avc_latest_notif_update(int seqno, int is_insert)
        spin_lock_irqsave(&notif_lock, flag);
        if (is_insert) {
                if (seqno < avc_cache.latest_notif) {
-                       printk(KERN_WARNING "avc:  seqno %d < latest_notif %d\n",
+                       printk(KERN_WARNING "SELinux: avc:  seqno %d < latest_notif %d\n",
                               seqno, avc_cache.latest_notif);
                        ret = -EAGAIN;
                }
@@ -443,44 +442,51 @@ static int avc_latest_notif_update(int seqno, int is_insert)
  * @ssid: source security identifier
  * @tsid: target security identifier
  * @tclass: target security class
- * @ae: AVC entry
+ * @avd: resulting av decision
  *
  * Insert an AVC entry for the SID pair
  * (@ssid, @tsid) and class @tclass.
  * The access vectors and the sequence number are
  * normally provided by the security server in
  * response to a security_compute_av() call.  If the
- * sequence number @ae->avd.seqno is not less than the latest
+ * sequence number @avd->seqno is not less than the latest
  * revocation notification, then the function copies
  * the access vectors into a cache entry, returns
  * avc_node inserted. Otherwise, this function returns NULL.
  */
-static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct avc_entry *ae)
+static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd)
 {
        struct avc_node *pos, *node = NULL;
        int hvalue;
        unsigned long flag;
 
-       if (avc_latest_notif_update(ae->avd.seqno, 1))
+       if (avc_latest_notif_update(avd->seqno, 1))
                goto out;
 
        node = avc_alloc_node();
        if (node) {
+               struct hlist_head *head;
+               struct hlist_node *next;
+               spinlock_t *lock;
+
                hvalue = avc_hash(ssid, tsid, tclass);
-               avc_node_populate(node, ssid, tsid, tclass, ae);
+               avc_node_populate(node, ssid, tsid, tclass, avd);
 
-               spin_lock_irqsave(&avc_cache.slots_lock[hvalue], flag);
-               list_for_each_entry(pos, &avc_cache.slots[hvalue], list) {
+               head = &avc_cache.slots[hvalue];
+               lock = &avc_cache.slots_lock[hvalue];
+
+               spin_lock_irqsave(lock, flag);
+               hlist_for_each_entry(pos, next, head, list) {
                        if (pos->ae.ssid == ssid &&
                            pos->ae.tsid == tsid &&
                            pos->ae.tclass == tclass) {
-                               avc_node_replace(node, pos);
+                               avc_node_replace(node, pos);
                                goto found;
                        }
                }
-               list_add_rcu(&node->list, &avc_cache.slots[hvalue]);
+               hlist_add_head_rcu(&node->list, head);
 found:
-               spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flag);
+               spin_unlock_irqrestore(lock, flag);
        }
 out:
        return node;
@@ -491,7 +497,7 @@ static inline void avc_print_ipv6_addr(struct audit_buffer *ab,
                                       char *name1, char *name2)
 {
        if (!ipv6_addr_any(addr))
-               audit_log_format(ab, " %s=" NIP6_FMT, name1, NIP6(*addr));
+               audit_log_format(ab, " %s=%pI6", name1, addr);
        if (port)
                audit_log_format(ab, " %s=%d", name2, ntohs(port));
 }
@@ -500,7 +506,7 @@ static inline void avc_print_ipv4_addr(struct audit_buffer *ab, __be32 addr,
                                       __be16 port, char *name1, char *name2)
 {
        if (addr)
-               audit_log_format(ab, " %s=" NIPQUAD_FMT, name1, NIPQUAD(addr));
+               audit_log_format(ab, " %s=%pI4", name1, &addr);
        if (port)
                audit_log_format(ab, " %s=%d", name2, ntohs(port));
 }
@@ -525,8 +531,8 @@ static inline void avc_print_ipv4_addr(struct audit_buffer *ab, __be32 addr,
  * before calling the auditing code.
  */
 void avc_audit(u32 ssid, u32 tsid,
-               u16 tclass, u32 requested,
-               struct av_decision *avd, int result, struct avc_audit_data *a)
+              u16 tclass, u32 requested,
+              struct av_decision *avd, int result, struct avc_audit_data *a)
 {
        struct task_struct *tsk = current;
        struct inode *inode = NULL;
@@ -540,7 +546,7 @@ void avc_audit(u32 ssid, u32 tsid,
                        return;
        } else if (result) {
                audited = denied = requested;
-        } else {
+       } else {
                audited = requested;
                if (!(audited & avd->auditallow))
                        return;
@@ -550,7 +556,7 @@ void avc_audit(u32 ssid, u32 tsid,
        if (!ab)
                return;         /* audit_panic has been called */
        audit_log_format(ab, "avc:  %s ", denied ? "denied" : "granted");
-       avc_dump_av(ab, tclass,audited);
+       avc_dump_av(ab, tclass, audited);
        audit_log_format(ab, " for ");
        if (a && a->tsk)
                tsk = a->tsk;
@@ -567,12 +573,15 @@ void avc_audit(u32 ssid, u32 tsid,
                        audit_log_format(ab, " capability=%d", a->u.cap);
                        break;
                case AVC_AUDIT_DATA_FS:
-                       if (a->u.fs.dentry) {
-                               struct dentry *dentry = a->u.fs.dentry;
-                               if (a->u.fs.mnt)
-                                       audit_avc_path(dentry, a->u.fs.mnt);
-                               audit_log_format(ab, " name=");
-                               audit_log_untrustedstring(ab, dentry->d_name.name);
+                       if (a->u.fs.path.dentry) {
+                               struct dentry *dentry = a->u.fs.path.dentry;
+                               if (a->u.fs.path.mnt) {
+                                       audit_log_d_path(ab, "path=",
+                                                        &a->u.fs.path);
+                               } else {
+                                       audit_log_format(ab, " name=");
+                                       audit_log_untrustedstring(ab, dentry->d_name.name);
+                               }
                                inode = dentry->d_inode;
                        } else if (a->u.fs.inode) {
                                struct dentry *dentry;
@@ -585,7 +594,7 @@ void avc_audit(u32 ssid, u32 tsid,
                                }
                        }
                        if (inode)
-                               audit_log_format(ab, " dev=%s ino=%ld",
+                               audit_log_format(ab, " dev=%s ino=%lu",
                                                 inode->i_sb->s_id,
                                                 inode->i_ino);
                        break;
@@ -623,9 +632,12 @@ void avc_audit(u32 ssid, u32 tsid,
                                case AF_UNIX:
                                        u = unix_sk(sk);
                                        if (u->dentry) {
-                                               audit_avc_path(u->dentry, u->mnt);
-                                               audit_log_format(ab, " name=");
-                                               audit_log_untrustedstring(ab, u->dentry->d_name.name);
+                                               struct path path = {
+                                                       .dentry = u->dentry,
+                                                       .mnt = u->mnt
+                                               };
+                                               audit_log_d_path(ab, "path=",
+                                                                &path);
                                                break;
                                        }
                                        if (!u->addr)
@@ -636,11 +648,11 @@ void avc_audit(u32 ssid, u32 tsid,
                                        if (*p)
                                                audit_log_untrustedstring(ab, p);
                                        else
-                                               audit_log_hex(ab, p, len);
+                                               audit_log_n_hex(ab, p, len);
                                        break;
                                }
                        }
-                       
+
                        switch (a->u.net.family) {
                        case AF_INET:
                                avc_print_ipv4_addr(ab, a->u.net.v4info.saddr,
@@ -659,9 +671,18 @@ void avc_audit(u32 ssid, u32 tsid,
                                                    "daddr", "dest");
                                break;
                        }
-                       if (a->u.net.netif)
-                               audit_log_format(ab, " netif=%s",
-                                       a->u.net.netif);
+                       if (a->u.net.netif > 0) {
+                               struct net_device *dev;
+
+                               /* NOTE: we always use init's namespace */
+                               dev = dev_get_by_index(&init_net,
+                                                      a->u.net.netif);
+                               if (dev) {
+                                       audit_log_format(ab, " netif=%s",
+                                                        dev->name);
+                                       dev_put(dev);
+                               }
+                       }
                        break;
                }
        }
@@ -686,10 +707,10 @@ void avc_audit(u32 ssid, u32 tsid,
  * -%ENOMEM if insufficient memory exists to add the callback.
  */
 int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid,
-                                     u16 tclass, u32 perms,
-                                     u32 *out_retained),
-                     u32 events, u32 ssid, u32 tsid,
-                     u16 tclass, u32 perms)
+                                    u16 tclass, u32 perms,
+                                    u32 *out_retained),
+                    u32 events, u32 ssid, u32 tsid,
+                    u16 tclass, u32 perms)
 {
        struct avc_callback_node *c;
        int rc = 0;
@@ -721,17 +742,22 @@ static inline int avc_sidcmp(u32 x, u32 y)
  * @event : Updating event
  * @perms : Permission mask bits
  * @ssid,@tsid,@tclass : identifier of an AVC entry
+ * @seqno : sequence number when decision was made
  *
  * if a valid AVC entry doesn't exist,this function returns -ENOENT.
  * if kmalloc() called internal returns NULL, this function returns -ENOMEM.
  * otherwise, this function update the AVC entry. The original AVC-entry object
  * will release later by RCU.
  */
-static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass)
+static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
+                          u32 seqno)
 {
        int hvalue, rc = 0;
        unsigned long flag;
        struct avc_node *pos, *node, *orig = NULL;
+       struct hlist_head *head;
+       struct hlist_node *next;
+       spinlock_t *lock;
 
        node = avc_alloc_node();
        if (!node) {
@@ -741,12 +767,17 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass)
 
        /* Lock the target slot */
        hvalue = avc_hash(ssid, tsid, tclass);
-       spin_lock_irqsave(&avc_cache.slots_lock[hvalue], flag);
 
-       list_for_each_entry(pos, &avc_cache.slots[hvalue], list){
-               if ( ssid==pos->ae.ssid &&
-                    tsid==pos->ae.tsid &&
-                    tclass==pos->ae.tclass ){
+       head = &avc_cache.slots[hvalue];
+       lock = &avc_cache.slots_lock[hvalue];
+
+       spin_lock_irqsave(lock, flag);
+
+       hlist_for_each_entry(pos, next, head, list) {
+               if (ssid == pos->ae.ssid &&
+                   tsid == pos->ae.tsid &&
+                   tclass == pos->ae.tclass &&
+                   seqno == pos->ae.avd.seqno){
                        orig = pos;
                        break;
                }
@@ -762,7 +793,7 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass)
         * Copy and replace original node.
         */
 
-       avc_node_populate(node, ssid, tsid, tclass, &orig->ae);
+       avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd);
 
        switch (event) {
        case AVC_CALLBACK_GRANT:
@@ -787,7 +818,7 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass)
        }
        avc_node_replace(node, orig);
 out_unlock:
-       spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flag);
+       spin_unlock_irqrestore(lock, flag);
 out:
        return rc;
 }
@@ -802,18 +833,30 @@ int avc_ss_reset(u32 seqno)
        int i, rc = 0, tmprc;
        unsigned long flag;
        struct avc_node *node;
+       struct hlist_head *head;
+       struct hlist_node *next;
+       spinlock_t *lock;
 
        for (i = 0; i < AVC_CACHE_SLOTS; i++) {
-               spin_lock_irqsave(&avc_cache.slots_lock[i], flag);
-               list_for_each_entry(node, &avc_cache.slots[i], list)
+               head = &avc_cache.slots[i];
+               lock = &avc_cache.slots_lock[i];
+
+               spin_lock_irqsave(lock, flag);
+               /*
+                * With preemptable RCU, the outer spinlock does not
+                * prevent RCU grace periods from ending.
+                */
+               rcu_read_lock();
+               hlist_for_each_entry(node, next, head, list)
                        avc_node_delete(node);
-               spin_unlock_irqrestore(&avc_cache.slots_lock[i], flag);
+               rcu_read_unlock();
+               spin_unlock_irqrestore(lock, flag);
        }
 
        for (c = avc_callbacks; c; c = c->next) {
                if (c->events & AVC_CALLBACK_RESET) {
                        tmprc = c->callback(AVC_CALLBACK_RESET,
-                                           0, 0, 0, 0, NULL);
+                                           0, 0, 0, 0, NULL);
                        /* save the first error encountered for the return
                           value and continue processing the callbacks */
                        if (!rc)
@@ -831,6 +874,7 @@ int avc_ss_reset(u32 seqno)
  * @tsid: target security identifier
  * @tclass: target security class
  * @requested: requested permissions, interpreted based on @tclass
+ * @flags:  AVC_STRICT or 0
  * @avd: access vector decisions
  *
  * Check the AVC to determine whether the @requested permissions are granted
@@ -845,40 +889,49 @@ int avc_ss_reset(u32 seqno)
  * should be released for the auditing.
  */
 int avc_has_perm_noaudit(u32 ssid, u32 tsid,
-                         u16 tclass, u32 requested,
-                         struct av_decision *avd)
+                        u16 tclass, u32 requested,
+                        unsigned flags,
+                        struct av_decision *in_avd)
 {
        struct avc_node *node;
-       struct avc_entry entry, *p_ae;
+       struct av_decision avd_entry, *avd;
        int rc = 0;
        u32 denied;
 
+       BUG_ON(!requested);
+
        rcu_read_lock();
 
-       node = avc_lookup(ssid, tsid, tclass, requested);
+       node = avc_lookup(ssid, tsid, tclass);
        if (!node) {
                rcu_read_unlock();
-               rc = security_compute_av(ssid,tsid,tclass,requested,&entry.avd);
+
+               if (in_avd)
+                       avd = in_avd;
+               else
+                       avd = &avd_entry;
+
+               rc = security_compute_av(ssid, tsid, tclass, requested, avd);
                if (rc)
                        goto out;
                rcu_read_lock();
-               node = avc_insert(ssid,tsid,tclass,&entry);
+               node = avc_insert(ssid, tsid, tclass, avd);
+       } else {
+               if (in_avd)
+                       memcpy(in_avd, &node->ae.avd, sizeof(*in_avd));
+               avd = &node->ae.avd;
        }
 
-       p_ae = node ? &node->ae : &entry;
-
-       if (avd)
-               memcpy(avd, &p_ae->avd, sizeof(*avd));
+       denied = requested & ~(avd->allowed);
 
-       denied = requested & ~(p_ae->avd.allowed);
-
-       if (!requested || denied) {
-               if (selinux_enforcing)
+       if (denied) {
+               if (flags & AVC_STRICT)
                        rc = -EACCES;
+               else if (!selinux_enforcing || security_permissive_sid(ssid))
+                       avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
+                                       tsid, tclass, avd->seqno);
                else
-                       if (node)
-                               avc_update_node(AVC_CALLBACK_GRANT,requested,
-                                               ssid,tsid,tclass);
+                       rc = -EACCES;
        }
 
        rcu_read_unlock();
@@ -903,12 +956,17 @@ out:
  * another -errno upon other errors.
  */
 int avc_has_perm(u32 ssid, u32 tsid, u16 tclass,
-                 u32 requested, struct avc_audit_data *auditdata)
+                u32 requested, struct avc_audit_data *auditdata)
 {
        struct av_decision avd;
        int rc;
 
-       rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, &avd);
+       rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd);
        avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata);
        return rc;
 }
+
+u32 avc_policy_seqno(void)
+{
+       return avc_cache.latest_notif;
+}