4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/list.h>
26 #include <linux/workqueue.h>
27 #include <linux/spinlock.h>
28 #include <linux/slab.h>
29 #include <linux/skbuff.h>
30 #include <linux/suspend.h>
31 #include <linux/connector.h>
32 #include <linux/delay.h>
36 * This job is sent to the kevent workqueue.
37 * While no event is once sent to any callback, the connector workqueue
38 * is not created to avoid a useless waiting kernel task.
39 * Once the first event is received, we create this dedicated workqueue which
40 * is necessary because the flow of data can be high and we don't want
41 * to encumber keventd with that.
43 static void cn_queue_create(struct work_struct *work)
45 struct cn_queue_dev *dev;
47 dev = container_of(work, struct cn_queue_dev, wq_creation);
49 dev->cn_queue = create_singlethread_workqueue(dev->name);
50 /* If we fail, we will use keventd for all following connector jobs */
51 WARN_ON(!dev->cn_queue);
55 * Queue a data sent to a callback.
56 * If the connector workqueue is already created, we queue the job on it.
57 * Otherwise, we queue the job to kevent and queue the connector workqueue
60 int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work)
62 struct cn_queue_dev *pdev = cbq->pdev;
64 if (likely(pdev->cn_queue))
65 return queue_work(pdev->cn_queue, work);
67 /* Don't create the connector workqueue twice */
68 if (atomic_inc_return(&pdev->wq_requested) == 1)
69 schedule_work(&pdev->wq_creation);
71 atomic_dec(&pdev->wq_requested);
73 return schedule_work(work);
76 void cn_queue_wrapper(struct work_struct *work)
78 struct cn_callback_entry *cbq =
79 container_of(work, struct cn_callback_entry, work);
80 struct cn_callback_data *d = &cbq->data;
81 struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(d->skb));
85 d->destruct_data(d->ddata);
91 static struct cn_callback_entry *
92 cn_queue_alloc_callback_entry(char *name, struct cb_id *id,
93 void (*callback)(struct cn_msg *))
95 struct cn_callback_entry *cbq;
97 cbq = kzalloc(sizeof(*cbq), GFP_KERNEL);
99 printk(KERN_ERR "Failed to create new callback queue.\n");
103 snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name);
104 memcpy(&cbq->id.id, id, sizeof(struct cb_id));
105 cbq->data.callback = callback;
107 INIT_WORK(&cbq->work, &cn_queue_wrapper);
111 static void cn_queue_free_callback(struct cn_callback_entry *cbq)
113 /* The first jobs have been sent to kevent, flush them too */
114 flush_scheduled_work();
115 if (cbq->pdev->cn_queue)
116 flush_workqueue(cbq->pdev->cn_queue);
121 int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
123 return ((i1->idx == i2->idx) && (i1->val == i2->val));
126 int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id,
127 void (*callback)(struct cn_msg *))
129 struct cn_callback_entry *cbq, *__cbq;
132 cbq = cn_queue_alloc_callback_entry(name, id, callback);
136 atomic_inc(&dev->refcnt);
139 spin_lock_bh(&dev->queue_lock);
140 list_for_each_entry(__cbq, &dev->queue_list, callback_entry) {
141 if (cn_cb_equal(&__cbq->id.id, id)) {
147 list_add_tail(&cbq->callback_entry, &dev->queue_list);
148 spin_unlock_bh(&dev->queue_lock);
151 cn_queue_free_callback(cbq);
152 atomic_dec(&dev->refcnt);
157 cbq->group = cbq->id.id.idx;
162 void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id)
164 struct cn_callback_entry *cbq, *n;
167 spin_lock_bh(&dev->queue_lock);
168 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) {
169 if (cn_cb_equal(&cbq->id.id, id)) {
170 list_del(&cbq->callback_entry);
175 spin_unlock_bh(&dev->queue_lock);
178 cn_queue_free_callback(cbq);
179 atomic_dec(&dev->refcnt);
183 struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
185 struct cn_queue_dev *dev;
187 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
191 snprintf(dev->name, sizeof(dev->name), "%s", name);
192 atomic_set(&dev->refcnt, 0);
193 INIT_LIST_HEAD(&dev->queue_list);
194 spin_lock_init(&dev->queue_lock);
195 init_waitqueue_head(&dev->wq_created);
199 INIT_WORK(&dev->wq_creation, cn_queue_create);
204 void cn_queue_free_dev(struct cn_queue_dev *dev)
206 struct cn_callback_entry *cbq, *n;
210 /* Flush the first pending jobs queued on kevent */
211 flush_scheduled_work();
213 /* If the connector workqueue creation is still pending, wait for it */
214 prepare_to_wait(&dev->wq_created, &wait, TASK_UNINTERRUPTIBLE);
215 if (atomic_read(&dev->wq_requested) && !dev->cn_queue) {
216 timeout = schedule_timeout(HZ * 2);
217 if (!timeout && !dev->cn_queue)
220 finish_wait(&dev->wq_created, &wait);
223 flush_workqueue(dev->cn_queue);
224 destroy_workqueue(dev->cn_queue);
227 spin_lock_bh(&dev->queue_lock);
228 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
229 list_del(&cbq->callback_entry);
230 spin_unlock_bh(&dev->queue_lock);
232 while (atomic_read(&dev->refcnt)) {
233 printk(KERN_INFO "Waiting for %s to become free: refcnt=%d.\n",
234 dev->name, atomic_read(&dev->refcnt));