nfsd: minor nfsd_lookup cleanup
[safe/jmp/linux-2.6] / kernel / res_counter.c
1 /*
2  * resource cgroups
3  *
4  * Copyright 2007 OpenVZ SWsoft Inc
5  *
6  * Author: Pavel Emelianov <xemul@openvz.org>
7  *
8  */
9
10 #include <linux/types.h>
11 #include <linux/parser.h>
12 #include <linux/fs.h>
13 #include <linux/slab.h>
14 #include <linux/res_counter.h>
15 #include <linux/uaccess.h>
16 #include <linux/mm.h>
17
18 void res_counter_init(struct res_counter *counter, struct res_counter *parent)
19 {
20         spin_lock_init(&counter->lock);
21         counter->limit = RESOURCE_MAX;
22         counter->soft_limit = RESOURCE_MAX;
23         counter->parent = parent;
24 }
25
26 int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
27 {
28         if (counter->usage + val > counter->limit) {
29                 counter->failcnt++;
30                 return -ENOMEM;
31         }
32
33         counter->usage += val;
34         if (counter->usage > counter->max_usage)
35                 counter->max_usage = counter->usage;
36         return 0;
37 }
38
39 int res_counter_charge(struct res_counter *counter, unsigned long val,
40                         struct res_counter **limit_fail_at,
41                         struct res_counter **soft_limit_fail_at)
42 {
43         int ret;
44         unsigned long flags;
45         struct res_counter *c, *u;
46
47         *limit_fail_at = NULL;
48         if (soft_limit_fail_at)
49                 *soft_limit_fail_at = NULL;
50         local_irq_save(flags);
51         for (c = counter; c != NULL; c = c->parent) {
52                 spin_lock(&c->lock);
53                 ret = res_counter_charge_locked(c, val);
54                 /*
55                  * With soft limits, we return the highest ancestor
56                  * that exceeds its soft limit
57                  */
58                 if (soft_limit_fail_at &&
59                         !res_counter_soft_limit_check_locked(c))
60                         *soft_limit_fail_at = c;
61                 spin_unlock(&c->lock);
62                 if (ret < 0) {
63                         *limit_fail_at = c;
64                         goto undo;
65                 }
66         }
67         ret = 0;
68         goto done;
69 undo:
70         for (u = counter; u != c; u = u->parent) {
71                 spin_lock(&u->lock);
72                 res_counter_uncharge_locked(u, val);
73                 spin_unlock(&u->lock);
74         }
75 done:
76         local_irq_restore(flags);
77         return ret;
78 }
79
80 void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
81 {
82         if (WARN_ON(counter->usage < val))
83                 val = counter->usage;
84
85         counter->usage -= val;
86 }
87
88 void res_counter_uncharge(struct res_counter *counter, unsigned long val,
89                                 bool *was_soft_limit_excess)
90 {
91         unsigned long flags;
92         struct res_counter *c;
93
94         local_irq_save(flags);
95         for (c = counter; c != NULL; c = c->parent) {
96                 spin_lock(&c->lock);
97                 if (was_soft_limit_excess)
98                         *was_soft_limit_excess =
99                                 !res_counter_soft_limit_check_locked(c);
100                 res_counter_uncharge_locked(c, val);
101                 spin_unlock(&c->lock);
102         }
103         local_irq_restore(flags);
104 }
105
106
107 static inline unsigned long long *
108 res_counter_member(struct res_counter *counter, int member)
109 {
110         switch (member) {
111         case RES_USAGE:
112                 return &counter->usage;
113         case RES_MAX_USAGE:
114                 return &counter->max_usage;
115         case RES_LIMIT:
116                 return &counter->limit;
117         case RES_FAILCNT:
118                 return &counter->failcnt;
119         case RES_SOFT_LIMIT:
120                 return &counter->soft_limit;
121         };
122
123         BUG();
124         return NULL;
125 }
126
127 ssize_t res_counter_read(struct res_counter *counter, int member,
128                 const char __user *userbuf, size_t nbytes, loff_t *pos,
129                 int (*read_strategy)(unsigned long long val, char *st_buf))
130 {
131         unsigned long long *val;
132         char buf[64], *s;
133
134         s = buf;
135         val = res_counter_member(counter, member);
136         if (read_strategy)
137                 s += read_strategy(*val, s);
138         else
139                 s += sprintf(s, "%llu\n", *val);
140         return simple_read_from_buffer((void __user *)userbuf, nbytes,
141                         pos, buf, s - buf);
142 }
143
144 u64 res_counter_read_u64(struct res_counter *counter, int member)
145 {
146         return *res_counter_member(counter, member);
147 }
148
149 int res_counter_memparse_write_strategy(const char *buf,
150                                         unsigned long long *res)
151 {
152         char *end;
153
154         /* return RESOURCE_MAX(unlimited) if "-1" is specified */
155         if (*buf == '-') {
156                 *res = simple_strtoull(buf + 1, &end, 10);
157                 if (*res != 1 || *end != '\0')
158                         return -EINVAL;
159                 *res = RESOURCE_MAX;
160                 return 0;
161         }
162
163         /* FIXME - make memparse() take const char* args */
164         *res = memparse((char *)buf, &end);
165         if (*end != '\0')
166                 return -EINVAL;
167
168         *res = PAGE_ALIGN(*res);
169         return 0;
170 }
171
172 int res_counter_write(struct res_counter *counter, int member,
173                       const char *buf, write_strategy_fn write_strategy)
174 {
175         char *end;
176         unsigned long flags;
177         unsigned long long tmp, *val;
178
179         if (write_strategy) {
180                 if (write_strategy(buf, &tmp))
181                         return -EINVAL;
182         } else {
183                 tmp = simple_strtoull(buf, &end, 10);
184                 if (*end != '\0')
185                         return -EINVAL;
186         }
187         spin_lock_irqsave(&counter->lock, flags);
188         val = res_counter_member(counter, member);
189         *val = tmp;
190         spin_unlock_irqrestore(&counter->lock, flags);
191         return 0;
192 }