#include <linux/file.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
+#include <linux/wait.h>
#include <net/sock.h>
#include <net/af_unix.h>
static LIST_HEAD(gc_inflight_list);
static LIST_HEAD(gc_candidates);
static DEFINE_SPINLOCK(unix_gc_lock);
+static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
unsigned int unix_tot_inflight;
* Socket ?
*/
if (S_ISSOCK(inode->i_mode)) {
- struct socket * sock = SOCKET_I(inode);
- struct sock * s = sock->sk;
+ struct socket *sock = SOCKET_I(inode);
+ struct sock *s = sock->sk;
/*
* PF_UNIX ?
void unix_inflight(struct file *fp)
{
struct sock *s = unix_get_socket(fp);
- if(s) {
+ if (s) {
struct unix_sock *u = unix_sk(s);
spin_lock(&unix_gc_lock);
- if (atomic_inc_return(&u->inflight) == 1) {
+ if (atomic_long_inc_return(&u->inflight) == 1) {
BUG_ON(!list_empty(&u->link));
list_add_tail(&u->link, &gc_inflight_list);
} else {
void unix_notinflight(struct file *fp)
{
struct sock *s = unix_get_socket(fp);
- if(s) {
+ if (s) {
struct unix_sock *u = unix_sk(s);
spin_lock(&unix_gc_lock);
BUG_ON(list_empty(&u->link));
- if (atomic_dec_and_test(&u->inflight))
+ if (atomic_long_dec_and_test(&u->inflight))
list_del_init(&u->link);
unix_tot_inflight--;
spin_unlock(&unix_gc_lock);
static inline struct sk_buff *sock_queue_head(struct sock *sk)
{
- return (struct sk_buff *) &sk->sk_receive_queue;
+ return (struct sk_buff *)&sk->sk_receive_queue;
}
#define receive_queue_for_each_skb(sk, next, skb) \
for (skb = sock_queue_head(sk)->next, next = skb->next; \
skb != sock_queue_head(sk); skb = next, next = skb->next)
-static void scan_inflight(struct sock *x, void (*func)(struct sock *),
+static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
struct sk_buff *skb;
* if it indeed does so
*/
struct sock *sk = unix_get_socket(*fp++);
- if(sk) {
- hit = true;
- func(sk);
+ if (sk) {
+ struct unix_sock *u = unix_sk(sk);
+
+ /*
+ * Ignore non-candidates, they could
+ * have been added to the queues after
+ * starting the garbage collection
+ */
+ if (u->gc_candidate) {
+ hit = true;
+ func(u);
+ }
}
}
if (hit && hitlist != NULL) {
spin_unlock(&x->sk_receive_queue.lock);
}
-static void scan_children(struct sock *x, void (*func)(struct sock *),
+static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
if (x->sk_state != TCP_LISTEN)
}
}
-static void dec_inflight(struct sock *sk)
+static void dec_inflight(struct unix_sock *usk)
{
- atomic_dec(&unix_sk(sk)->inflight);
+ atomic_long_dec(&usk->inflight);
}
-static void inc_inflight(struct sock *sk)
+static void inc_inflight(struct unix_sock *usk)
{
- atomic_inc(&unix_sk(sk)->inflight);
+ atomic_long_inc(&usk->inflight);
}
-static void inc_inflight_move_tail(struct sock *sk)
+static void inc_inflight_move_tail(struct unix_sock *u)
{
- struct unix_sock *u = unix_sk(sk);
-
- atomic_inc(&u->inflight);
+ atomic_long_inc(&u->inflight);
/*
- * If this is still a candidate, move it to the end of the
- * list, so that it's checked even if it was already passed
- * over
+ * If this still might be part of a cycle, move it to the end
+ * of the list, so that it's checked even if it was already
+ * passed over
*/
- if (u->gc_candidate)
+ if (u->gc_maybe_cycle)
list_move_tail(&u->link, &gc_candidates);
}
-/* The external entry point: unix_gc() */
+static bool gc_in_progress = false;
-void unix_gc(void)
+void wait_for_unix_gc(void)
{
- static bool gc_in_progress = false;
+ wait_event(unix_gc_wait, gc_in_progress == false);
+}
+/* The external entry point: unix_gc() */
+void unix_gc(void)
+{
struct unix_sock *u;
struct unix_sock *next;
struct sk_buff_head hitlist;
struct list_head cursor;
+ LIST_HEAD(not_cycle_list);
spin_lock(&unix_gc_lock);
*
* Holding unix_gc_lock will protect these candidates from
* being detached, and hence from gaining an external
- * reference. This also means, that since there are no
- * possible receivers, the receive queues of these sockets are
- * static during the GC, even though the dequeue is done
- * before the detach without atomicity guarantees.
+ * reference. Since there are no possible receivers, all
+ * buffers currently on the candidates' queues stay there
+ * during the garbage collection.
+ *
+ * We also know that no new candidate can be added onto the
+ * receive queues. Other, non candidate sockets _can_ be
+ * added to queue, so we must make sure only to touch
+ * candidates.
*/
list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
- int total_refs;
- int inflight_refs;
+ long total_refs;
+ long inflight_refs;
total_refs = file_count(u->sk.sk_socket->file);
- inflight_refs = atomic_read(&u->inflight);
+ inflight_refs = atomic_long_read(&u->inflight);
BUG_ON(inflight_refs < 1);
BUG_ON(total_refs < inflight_refs);
if (total_refs == inflight_refs) {
list_move_tail(&u->link, &gc_candidates);
u->gc_candidate = 1;
+ u->gc_maybe_cycle = 1;
}
}
/* Move cursor to after the current position. */
list_move(&cursor, &u->link);
- if (atomic_read(&u->inflight) > 0) {
- list_move_tail(&u->link, &gc_inflight_list);
- u->gc_candidate = 0;
+ if (atomic_long_read(&u->inflight) > 0) {
+ list_move_tail(&u->link, ¬_cycle_list);
+ u->gc_maybe_cycle = 0;
scan_children(&u->sk, inc_inflight_move_tail, NULL);
}
}
list_del(&cursor);
/*
+ * not_cycle_list contains those sockets which do not make up a
+ * cycle. Restore these to the inflight list.
+ */
+ while (!list_empty(¬_cycle_list)) {
+ u = list_entry(not_cycle_list.next, struct unix_sock, link);
+ u->gc_candidate = 0;
+ list_move_tail(&u->link, &gc_inflight_list);
+ }
+
+ /*
* Now gc_candidates contains only garbage. Restore original
* inflight counters for these as well, and remove the skbuffs
* which are creating the cycle(s).
*/
skb_queue_head_init(&hitlist);
list_for_each_entry(u, &gc_candidates, link)
- scan_children(&u->sk, inc_inflight, &hitlist);
+ scan_children(&u->sk, inc_inflight, &hitlist);
spin_unlock(&unix_gc_lock);
/* All candidates should have been detached by now. */
BUG_ON(!list_empty(&gc_candidates));
gc_in_progress = false;
+ wake_up(&unix_gc_wait);
out:
spin_unlock(&unix_gc_lock);