firewire: standardize a variable name
[safe/jmp/linux-2.6] / drivers / firewire / fw-cdev.c
1 /*
2  * Char device for device raw access
3  *
4  * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software Foundation,
18  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19  */
20
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/wait.h>
24 #include <linux/errno.h>
25 #include <linux/device.h>
26 #include <linux/vmalloc.h>
27 #include <linux/mutex.h>
28 #include <linux/poll.h>
29 #include <linux/preempt.h>
30 #include <linux/time.h>
31 #include <linux/spinlock.h>
32 #include <linux/delay.h>
33 #include <linux/mm.h>
34 #include <linux/idr.h>
35 #include <linux/compat.h>
36 #include <linux/firewire-cdev.h>
37 #include <asm/system.h>
38 #include <asm/uaccess.h>
39 #include "fw-transaction.h"
40 #include "fw-topology.h"
41 #include "fw-device.h"
42
43 struct client;
44 struct client_resource;
45 typedef void (*client_resource_release_fn_t)(struct client *,
46                                              struct client_resource *);
47 struct client_resource {
48         client_resource_release_fn_t release;
49         int handle;
50 };
51
52 /*
53  * dequeue_event() just kfree()'s the event, so the event has to be
54  * the first field in the struct.
55  */
56
57 struct event {
58         struct { void *data; size_t size; } v[2];
59         struct list_head link;
60 };
61
62 struct bus_reset {
63         struct event event;
64         struct fw_cdev_event_bus_reset reset;
65 };
66
67 struct response {
68         struct event event;
69         struct fw_transaction transaction;
70         struct client *client;
71         struct client_resource resource;
72         struct fw_cdev_event_response response;
73 };
74
75 struct iso_interrupt {
76         struct event event;
77         struct fw_cdev_event_iso_interrupt interrupt;
78 };
79
80 struct client {
81         u32 version;
82         struct fw_device *device;
83
84         spinlock_t lock;
85         bool in_shutdown;
86         struct idr resource_idr;
87         struct list_head event_list;
88         wait_queue_head_t wait;
89         u64 bus_reset_closure;
90
91         struct fw_iso_context *iso_context;
92         u64 iso_closure;
93         struct fw_iso_buffer buffer;
94         unsigned long vm_start;
95
96         struct list_head link;
97 };
98
99 static inline void __user *
100 u64_to_uptr(__u64 value)
101 {
102         return (void __user *)(unsigned long)value;
103 }
104
105 static inline __u64
106 uptr_to_u64(void __user *ptr)
107 {
108         return (__u64)(unsigned long)ptr;
109 }
110
111 static int fw_device_op_open(struct inode *inode, struct file *file)
112 {
113         struct fw_device *device;
114         struct client *client;
115
116         device = fw_device_get_by_devt(inode->i_rdev);
117         if (device == NULL)
118                 return -ENODEV;
119
120         if (fw_device_is_shutdown(device)) {
121                 fw_device_put(device);
122                 return -ENODEV;
123         }
124
125         client = kzalloc(sizeof(*client), GFP_KERNEL);
126         if (client == NULL) {
127                 fw_device_put(device);
128                 return -ENOMEM;
129         }
130
131         client->device = device;
132         spin_lock_init(&client->lock);
133         idr_init(&client->resource_idr);
134         INIT_LIST_HEAD(&client->event_list);
135         init_waitqueue_head(&client->wait);
136
137         file->private_data = client;
138
139         mutex_lock(&device->client_list_mutex);
140         list_add_tail(&client->link, &device->client_list);
141         mutex_unlock(&device->client_list_mutex);
142
143         return 0;
144 }
145
146 static void queue_event(struct client *client, struct event *event,
147                         void *data0, size_t size0, void *data1, size_t size1)
148 {
149         unsigned long flags;
150
151         event->v[0].data = data0;
152         event->v[0].size = size0;
153         event->v[1].data = data1;
154         event->v[1].size = size1;
155
156         spin_lock_irqsave(&client->lock, flags);
157         if (client->in_shutdown)
158                 kfree(event);
159         else
160                 list_add_tail(&event->link, &client->event_list);
161         spin_unlock_irqrestore(&client->lock, flags);
162
163         wake_up_interruptible(&client->wait);
164 }
165
166 static int
167 dequeue_event(struct client *client, char __user *buffer, size_t count)
168 {
169         unsigned long flags;
170         struct event *event;
171         size_t size, total;
172         int i, ret;
173
174         ret = wait_event_interruptible(client->wait,
175                         !list_empty(&client->event_list) ||
176                         fw_device_is_shutdown(client->device));
177         if (ret < 0)
178                 return ret;
179
180         if (list_empty(&client->event_list) &&
181                        fw_device_is_shutdown(client->device))
182                 return -ENODEV;
183
184         spin_lock_irqsave(&client->lock, flags);
185         event = container_of(client->event_list.next, struct event, link);
186         list_del(&event->link);
187         spin_unlock_irqrestore(&client->lock, flags);
188
189         total = 0;
190         for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
191                 size = min(event->v[i].size, count - total);
192                 if (copy_to_user(buffer + total, event->v[i].data, size)) {
193                         ret = -EFAULT;
194                         goto out;
195                 }
196                 total += size;
197         }
198         ret = total;
199
200  out:
201         kfree(event);
202
203         return ret;
204 }
205
206 static ssize_t
207 fw_device_op_read(struct file *file,
208                   char __user *buffer, size_t count, loff_t *offset)
209 {
210         struct client *client = file->private_data;
211
212         return dequeue_event(client, buffer, count);
213 }
214
215 static void
216 fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
217                      struct client *client)
218 {
219         struct fw_card *card = client->device->card;
220         unsigned long flags;
221
222         spin_lock_irqsave(&card->lock, flags);
223
224         event->closure       = client->bus_reset_closure;
225         event->type          = FW_CDEV_EVENT_BUS_RESET;
226         event->generation    = client->device->generation;
227         event->node_id       = client->device->node_id;
228         event->local_node_id = card->local_node->node_id;
229         event->bm_node_id    = 0; /* FIXME: We don't track the BM. */
230         event->irm_node_id   = card->irm_node->node_id;
231         event->root_node_id  = card->root_node->node_id;
232
233         spin_unlock_irqrestore(&card->lock, flags);
234 }
235
236 static void
237 for_each_client(struct fw_device *device,
238                 void (*callback)(struct client *client))
239 {
240         struct client *c;
241
242         mutex_lock(&device->client_list_mutex);
243         list_for_each_entry(c, &device->client_list, link)
244                 callback(c);
245         mutex_unlock(&device->client_list_mutex);
246 }
247
248 static void
249 queue_bus_reset_event(struct client *client)
250 {
251         struct bus_reset *bus_reset;
252
253         bus_reset = kzalloc(sizeof(*bus_reset), GFP_KERNEL);
254         if (bus_reset == NULL) {
255                 fw_notify("Out of memory when allocating bus reset event\n");
256                 return;
257         }
258
259         fill_bus_reset_event(&bus_reset->reset, client);
260
261         queue_event(client, &bus_reset->event,
262                     &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0);
263 }
264
265 void fw_device_cdev_update(struct fw_device *device)
266 {
267         for_each_client(device, queue_bus_reset_event);
268 }
269
270 static void wake_up_client(struct client *client)
271 {
272         wake_up_interruptible(&client->wait);
273 }
274
275 void fw_device_cdev_remove(struct fw_device *device)
276 {
277         for_each_client(device, wake_up_client);
278 }
279
280 static int ioctl_get_info(struct client *client, void *buffer)
281 {
282         struct fw_cdev_get_info *get_info = buffer;
283         struct fw_cdev_event_bus_reset bus_reset;
284         unsigned long ret = 0;
285
286         client->version = get_info->version;
287         get_info->version = FW_CDEV_VERSION;
288         get_info->card = client->device->card->index;
289
290         down_read(&fw_device_rwsem);
291
292         if (get_info->rom != 0) {
293                 void __user *uptr = u64_to_uptr(get_info->rom);
294                 size_t want = get_info->rom_length;
295                 size_t have = client->device->config_rom_length * 4;
296
297                 ret = copy_to_user(uptr, client->device->config_rom,
298                                    min(want, have));
299         }
300         get_info->rom_length = client->device->config_rom_length * 4;
301
302         up_read(&fw_device_rwsem);
303
304         if (ret != 0)
305                 return -EFAULT;
306
307         client->bus_reset_closure = get_info->bus_reset_closure;
308         if (get_info->bus_reset != 0) {
309                 void __user *uptr = u64_to_uptr(get_info->bus_reset);
310
311                 fill_bus_reset_event(&bus_reset, client);
312                 if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
313                         return -EFAULT;
314         }
315
316         return 0;
317 }
318
319 static int
320 add_client_resource(struct client *client, struct client_resource *resource,
321                     gfp_t gfp_mask)
322 {
323         unsigned long flags;
324         int ret;
325
326  retry:
327         if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
328                 return -ENOMEM;
329
330         spin_lock_irqsave(&client->lock, flags);
331         if (client->in_shutdown)
332                 ret = -ECANCELED;
333         else
334                 ret = idr_get_new(&client->resource_idr, resource,
335                                   &resource->handle);
336         spin_unlock_irqrestore(&client->lock, flags);
337
338         if (ret == -EAGAIN)
339                 goto retry;
340
341         return ret < 0 ? ret : 0;
342 }
343
344 static int
345 release_client_resource(struct client *client, u32 handle,
346                         client_resource_release_fn_t release,
347                         struct client_resource **resource)
348 {
349         struct client_resource *r;
350         unsigned long flags;
351
352         spin_lock_irqsave(&client->lock, flags);
353         if (client->in_shutdown)
354                 r = NULL;
355         else
356                 r = idr_find(&client->resource_idr, handle);
357         if (r && r->release == release)
358                 idr_remove(&client->resource_idr, handle);
359         spin_unlock_irqrestore(&client->lock, flags);
360
361         if (!(r && r->release == release))
362                 return -EINVAL;
363
364         if (resource)
365                 *resource = r;
366         else
367                 r->release(client, r);
368
369         return 0;
370 }
371
372 static void
373 release_transaction(struct client *client, struct client_resource *resource)
374 {
375         struct response *response =
376                 container_of(resource, struct response, resource);
377
378         fw_cancel_transaction(client->device->card, &response->transaction);
379 }
380
381 static void
382 complete_transaction(struct fw_card *card, int rcode,
383                      void *payload, size_t length, void *data)
384 {
385         struct response *response = data;
386         struct client *client = response->client;
387         unsigned long flags;
388         struct fw_cdev_event_response *r = &response->response;
389
390         if (length < r->length)
391                 r->length = length;
392         if (rcode == RCODE_COMPLETE)
393                 memcpy(r->data, payload, r->length);
394
395         spin_lock_irqsave(&client->lock, flags);
396         /*
397          * If called while in shutdown, the idr tree must be left untouched.
398          * The idr handle will be removed later.
399          */
400         if (!client->in_shutdown)
401                 idr_remove(&client->resource_idr, response->resource.handle);
402         spin_unlock_irqrestore(&client->lock, flags);
403
404         r->type   = FW_CDEV_EVENT_RESPONSE;
405         r->rcode  = rcode;
406
407         /*
408          * In the case that sizeof(*r) doesn't align with the position of the
409          * data, and the read is short, preserve an extra copy of the data
410          * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
411          * for short reads and some apps depended on it, this is both safe
412          * and prudent for compatibility.
413          */
414         if (r->length <= sizeof(*r) - offsetof(typeof(*r), data))
415                 queue_event(client, &response->event, r, sizeof(*r),
416                             r->data, r->length);
417         else
418                 queue_event(client, &response->event, r, sizeof(*r) + r->length,
419                             NULL, 0);
420 }
421
422 static int ioctl_send_request(struct client *client, void *buffer)
423 {
424         struct fw_device *device = client->device;
425         struct fw_cdev_send_request *request = buffer;
426         struct response *response;
427         int ret;
428
429         /* What is the biggest size we'll accept, really? */
430         if (request->length > 4096)
431                 return -EINVAL;
432
433         response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL);
434         if (response == NULL)
435                 return -ENOMEM;
436
437         response->client = client;
438         response->response.length = request->length;
439         response->response.closure = request->closure;
440
441         if (request->data &&
442             copy_from_user(response->response.data,
443                            u64_to_uptr(request->data), request->length)) {
444                 ret = -EFAULT;
445                 goto failed;
446         }
447
448         switch (request->tcode) {
449         case TCODE_WRITE_QUADLET_REQUEST:
450         case TCODE_WRITE_BLOCK_REQUEST:
451         case TCODE_READ_QUADLET_REQUEST:
452         case TCODE_READ_BLOCK_REQUEST:
453         case TCODE_LOCK_MASK_SWAP:
454         case TCODE_LOCK_COMPARE_SWAP:
455         case TCODE_LOCK_FETCH_ADD:
456         case TCODE_LOCK_LITTLE_ADD:
457         case TCODE_LOCK_BOUNDED_ADD:
458         case TCODE_LOCK_WRAP_ADD:
459         case TCODE_LOCK_VENDOR_DEPENDENT:
460                 break;
461         default:
462                 ret = -EINVAL;
463                 goto failed;
464         }
465
466         response->resource.release = release_transaction;
467         ret = add_client_resource(client, &response->resource, GFP_KERNEL);
468         if (ret < 0)
469                 goto failed;
470
471         fw_send_request(device->card, &response->transaction,
472                         request->tcode & 0x1f,
473                         device->node->node_id,
474                         request->generation,
475                         device->max_speed,
476                         request->offset,
477                         response->response.data, request->length,
478                         complete_transaction, response);
479
480         if (request->data)
481                 return sizeof(request) + request->length;
482         else
483                 return sizeof(request);
484  failed:
485         kfree(response);
486
487         return ret;
488 }
489
490 struct address_handler {
491         struct fw_address_handler handler;
492         __u64 closure;
493         struct client *client;
494         struct client_resource resource;
495 };
496
497 struct request {
498         struct fw_request *request;
499         void *data;
500         size_t length;
501         struct client_resource resource;
502 };
503
504 struct request_event {
505         struct event event;
506         struct fw_cdev_event_request request;
507 };
508
509 static void
510 release_request(struct client *client, struct client_resource *resource)
511 {
512         struct request *request =
513                 container_of(resource, struct request, resource);
514
515         fw_send_response(client->device->card, request->request,
516                          RCODE_CONFLICT_ERROR);
517         kfree(request);
518 }
519
520 static void
521 handle_request(struct fw_card *card, struct fw_request *r,
522                int tcode, int destination, int source,
523                int generation, int speed,
524                unsigned long long offset,
525                void *payload, size_t length, void *callback_data)
526 {
527         struct address_handler *handler = callback_data;
528         struct request *request;
529         struct request_event *e;
530         struct client *client = handler->client;
531         int ret;
532
533         request = kmalloc(sizeof(*request), GFP_ATOMIC);
534         e = kmalloc(sizeof(*e), GFP_ATOMIC);
535         if (request == NULL || e == NULL)
536                 goto failed;
537
538         request->request = r;
539         request->data    = payload;
540         request->length  = length;
541
542         request->resource.release = release_request;
543         ret = add_client_resource(client, &request->resource, GFP_ATOMIC);
544         if (ret < 0)
545                 goto failed;
546
547         e->request.type    = FW_CDEV_EVENT_REQUEST;
548         e->request.tcode   = tcode;
549         e->request.offset  = offset;
550         e->request.length  = length;
551         e->request.handle  = request->resource.handle;
552         e->request.closure = handler->closure;
553
554         queue_event(client, &e->event,
555                     &e->request, sizeof(e->request), payload, length);
556         return;
557
558  failed:
559         kfree(request);
560         kfree(e);
561         fw_send_response(card, r, RCODE_CONFLICT_ERROR);
562 }
563
564 static void
565 release_address_handler(struct client *client,
566                         struct client_resource *resource)
567 {
568         struct address_handler *handler =
569                 container_of(resource, struct address_handler, resource);
570
571         fw_core_remove_address_handler(&handler->handler);
572         kfree(handler);
573 }
574
575 static int ioctl_allocate(struct client *client, void *buffer)
576 {
577         struct fw_cdev_allocate *request = buffer;
578         struct address_handler *handler;
579         struct fw_address_region region;
580         int ret;
581
582         handler = kmalloc(sizeof(*handler), GFP_KERNEL);
583         if (handler == NULL)
584                 return -ENOMEM;
585
586         region.start = request->offset;
587         region.end = request->offset + request->length;
588         handler->handler.length = request->length;
589         handler->handler.address_callback = handle_request;
590         handler->handler.callback_data = handler;
591         handler->closure = request->closure;
592         handler->client = client;
593
594         ret = fw_core_add_address_handler(&handler->handler, &region);
595         if (ret < 0) {
596                 kfree(handler);
597                 return ret;
598         }
599
600         handler->resource.release = release_address_handler;
601         ret = add_client_resource(client, &handler->resource, GFP_KERNEL);
602         if (ret < 0) {
603                 release_address_handler(client, &handler->resource);
604                 return ret;
605         }
606         request->handle = handler->resource.handle;
607
608         return 0;
609 }
610
611 static int ioctl_deallocate(struct client *client, void *buffer)
612 {
613         struct fw_cdev_deallocate *request = buffer;
614
615         return release_client_resource(client, request->handle,
616                                        release_address_handler, NULL);
617 }
618
619 static int ioctl_send_response(struct client *client, void *buffer)
620 {
621         struct fw_cdev_send_response *request = buffer;
622         struct client_resource *resource;
623         struct request *r;
624
625         if (release_client_resource(client, request->handle,
626                                     release_request, &resource) < 0)
627                 return -EINVAL;
628
629         r = container_of(resource, struct request, resource);
630         if (request->length < r->length)
631                 r->length = request->length;
632         if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
633                 return -EFAULT;
634
635         fw_send_response(client->device->card, r->request, request->rcode);
636         kfree(r);
637
638         return 0;
639 }
640
641 static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
642 {
643         struct fw_cdev_initiate_bus_reset *request = buffer;
644         int short_reset;
645
646         short_reset = (request->type == FW_CDEV_SHORT_RESET);
647
648         return fw_core_initiate_bus_reset(client->device->card, short_reset);
649 }
650
651 struct descriptor {
652         struct fw_descriptor d;
653         struct client_resource resource;
654         u32 data[0];
655 };
656
657 static void release_descriptor(struct client *client,
658                                struct client_resource *resource)
659 {
660         struct descriptor *descriptor =
661                 container_of(resource, struct descriptor, resource);
662
663         fw_core_remove_descriptor(&descriptor->d);
664         kfree(descriptor);
665 }
666
667 static int ioctl_add_descriptor(struct client *client, void *buffer)
668 {
669         struct fw_cdev_add_descriptor *request = buffer;
670         struct descriptor *descriptor;
671         int ret;
672
673         if (request->length > 256)
674                 return -EINVAL;
675
676         descriptor =
677                 kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL);
678         if (descriptor == NULL)
679                 return -ENOMEM;
680
681         if (copy_from_user(descriptor->data,
682                            u64_to_uptr(request->data), request->length * 4)) {
683                 ret = -EFAULT;
684                 goto failed;
685         }
686
687         descriptor->d.length = request->length;
688         descriptor->d.immediate = request->immediate;
689         descriptor->d.key = request->key;
690         descriptor->d.data = descriptor->data;
691
692         ret = fw_core_add_descriptor(&descriptor->d);
693         if (ret < 0)
694                 goto failed;
695
696         descriptor->resource.release = release_descriptor;
697         ret = add_client_resource(client, &descriptor->resource, GFP_KERNEL);
698         if (ret < 0) {
699                 fw_core_remove_descriptor(&descriptor->d);
700                 goto failed;
701         }
702         request->handle = descriptor->resource.handle;
703
704         return 0;
705  failed:
706         kfree(descriptor);
707
708         return ret;
709 }
710
711 static int ioctl_remove_descriptor(struct client *client, void *buffer)
712 {
713         struct fw_cdev_remove_descriptor *request = buffer;
714
715         return release_client_resource(client, request->handle,
716                                        release_descriptor, NULL);
717 }
718
719 static void
720 iso_callback(struct fw_iso_context *context, u32 cycle,
721              size_t header_length, void *header, void *data)
722 {
723         struct client *client = data;
724         struct iso_interrupt *irq;
725
726         irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC);
727         if (irq == NULL)
728                 return;
729
730         irq->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
731         irq->interrupt.closure   = client->iso_closure;
732         irq->interrupt.cycle     = cycle;
733         irq->interrupt.header_length = header_length;
734         memcpy(irq->interrupt.header, header, header_length);
735         queue_event(client, &irq->event, &irq->interrupt,
736                     sizeof(irq->interrupt) + header_length, NULL, 0);
737 }
738
739 static int ioctl_create_iso_context(struct client *client, void *buffer)
740 {
741         struct fw_cdev_create_iso_context *request = buffer;
742         struct fw_iso_context *context;
743
744         /* We only support one context at this time. */
745         if (client->iso_context != NULL)
746                 return -EBUSY;
747
748         if (request->channel > 63)
749                 return -EINVAL;
750
751         switch (request->type) {
752         case FW_ISO_CONTEXT_RECEIVE:
753                 if (request->header_size < 4 || (request->header_size & 3))
754                         return -EINVAL;
755
756                 break;
757
758         case FW_ISO_CONTEXT_TRANSMIT:
759                 if (request->speed > SCODE_3200)
760                         return -EINVAL;
761
762                 break;
763
764         default:
765                 return -EINVAL;
766         }
767
768         context =  fw_iso_context_create(client->device->card,
769                                          request->type,
770                                          request->channel,
771                                          request->speed,
772                                          request->header_size,
773                                          iso_callback, client);
774         if (IS_ERR(context))
775                 return PTR_ERR(context);
776
777         client->iso_closure = request->closure;
778         client->iso_context = context;
779
780         /* We only support one context at this time. */
781         request->handle = 0;
782
783         return 0;
784 }
785
786 /* Macros for decoding the iso packet control header. */
787 #define GET_PAYLOAD_LENGTH(v)   ((v) & 0xffff)
788 #define GET_INTERRUPT(v)        (((v) >> 16) & 0x01)
789 #define GET_SKIP(v)             (((v) >> 17) & 0x01)
790 #define GET_TAG(v)              (((v) >> 18) & 0x03)
791 #define GET_SY(v)               (((v) >> 20) & 0x0f)
792 #define GET_HEADER_LENGTH(v)    (((v) >> 24) & 0xff)
793
794 static int ioctl_queue_iso(struct client *client, void *buffer)
795 {
796         struct fw_cdev_queue_iso *request = buffer;
797         struct fw_cdev_iso_packet __user *p, *end, *next;
798         struct fw_iso_context *ctx = client->iso_context;
799         unsigned long payload, buffer_end, header_length;
800         u32 control;
801         int count;
802         struct {
803                 struct fw_iso_packet packet;
804                 u8 header[256];
805         } u;
806
807         if (ctx == NULL || request->handle != 0)
808                 return -EINVAL;
809
810         /*
811          * If the user passes a non-NULL data pointer, has mmap()'ed
812          * the iso buffer, and the pointer points inside the buffer,
813          * we setup the payload pointers accordingly.  Otherwise we
814          * set them both to 0, which will still let packets with
815          * payload_length == 0 through.  In other words, if no packets
816          * use the indirect payload, the iso buffer need not be mapped
817          * and the request->data pointer is ignored.
818          */
819
820         payload = (unsigned long)request->data - client->vm_start;
821         buffer_end = client->buffer.page_count << PAGE_SHIFT;
822         if (request->data == 0 || client->buffer.pages == NULL ||
823             payload >= buffer_end) {
824                 payload = 0;
825                 buffer_end = 0;
826         }
827
828         p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
829
830         if (!access_ok(VERIFY_READ, p, request->size))
831                 return -EFAULT;
832
833         end = (void __user *)p + request->size;
834         count = 0;
835         while (p < end) {
836                 if (get_user(control, &p->control))
837                         return -EFAULT;
838                 u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
839                 u.packet.interrupt = GET_INTERRUPT(control);
840                 u.packet.skip = GET_SKIP(control);
841                 u.packet.tag = GET_TAG(control);
842                 u.packet.sy = GET_SY(control);
843                 u.packet.header_length = GET_HEADER_LENGTH(control);
844
845                 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
846                         header_length = u.packet.header_length;
847                 } else {
848                         /*
849                          * We require that header_length is a multiple of
850                          * the fixed header size, ctx->header_size.
851                          */
852                         if (ctx->header_size == 0) {
853                                 if (u.packet.header_length > 0)
854                                         return -EINVAL;
855                         } else if (u.packet.header_length % ctx->header_size != 0) {
856                                 return -EINVAL;
857                         }
858                         header_length = 0;
859                 }
860
861                 next = (struct fw_cdev_iso_packet __user *)
862                         &p->header[header_length / 4];
863                 if (next > end)
864                         return -EINVAL;
865                 if (__copy_from_user
866                     (u.packet.header, p->header, header_length))
867                         return -EFAULT;
868                 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
869                     u.packet.header_length + u.packet.payload_length > 0)
870                         return -EINVAL;
871                 if (payload + u.packet.payload_length > buffer_end)
872                         return -EINVAL;
873
874                 if (fw_iso_context_queue(ctx, &u.packet,
875                                          &client->buffer, payload))
876                         break;
877
878                 p = next;
879                 payload += u.packet.payload_length;
880                 count++;
881         }
882
883         request->size    -= uptr_to_u64(p) - request->packets;
884         request->packets  = uptr_to_u64(p);
885         request->data     = client->vm_start + payload;
886
887         return count;
888 }
889
890 static int ioctl_start_iso(struct client *client, void *buffer)
891 {
892         struct fw_cdev_start_iso *request = buffer;
893
894         if (client->iso_context == NULL || request->handle != 0)
895                 return -EINVAL;
896
897         if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
898                 if (request->tags == 0 || request->tags > 15)
899                         return -EINVAL;
900
901                 if (request->sync > 15)
902                         return -EINVAL;
903         }
904
905         return fw_iso_context_start(client->iso_context, request->cycle,
906                                     request->sync, request->tags);
907 }
908
909 static int ioctl_stop_iso(struct client *client, void *buffer)
910 {
911         struct fw_cdev_stop_iso *request = buffer;
912
913         if (client->iso_context == NULL || request->handle != 0)
914                 return -EINVAL;
915
916         return fw_iso_context_stop(client->iso_context);
917 }
918
919 static int ioctl_get_cycle_timer(struct client *client, void *buffer)
920 {
921         struct fw_cdev_get_cycle_timer *request = buffer;
922         struct fw_card *card = client->device->card;
923         unsigned long long bus_time;
924         struct timeval tv;
925         unsigned long flags;
926
927         preempt_disable();
928         local_irq_save(flags);
929
930         bus_time = card->driver->get_bus_time(card);
931         do_gettimeofday(&tv);
932
933         local_irq_restore(flags);
934         preempt_enable();
935
936         request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
937         request->cycle_timer = bus_time & 0xffffffff;
938         return 0;
939 }
940
941 static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
942         ioctl_get_info,
943         ioctl_send_request,
944         ioctl_allocate,
945         ioctl_deallocate,
946         ioctl_send_response,
947         ioctl_initiate_bus_reset,
948         ioctl_add_descriptor,
949         ioctl_remove_descriptor,
950         ioctl_create_iso_context,
951         ioctl_queue_iso,
952         ioctl_start_iso,
953         ioctl_stop_iso,
954         ioctl_get_cycle_timer,
955 };
956
957 static int
958 dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
959 {
960         char buffer[256];
961         int ret;
962
963         if (_IOC_TYPE(cmd) != '#' ||
964             _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
965                 return -EINVAL;
966
967         if (_IOC_DIR(cmd) & _IOC_WRITE) {
968                 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
969                     copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
970                         return -EFAULT;
971         }
972
973         ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
974         if (ret < 0)
975                 return ret;
976
977         if (_IOC_DIR(cmd) & _IOC_READ) {
978                 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
979                     copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
980                         return -EFAULT;
981         }
982
983         return ret;
984 }
985
986 static long
987 fw_device_op_ioctl(struct file *file,
988                    unsigned int cmd, unsigned long arg)
989 {
990         struct client *client = file->private_data;
991
992         if (fw_device_is_shutdown(client->device))
993                 return -ENODEV;
994
995         return dispatch_ioctl(client, cmd, (void __user *) arg);
996 }
997
998 #ifdef CONFIG_COMPAT
999 static long
1000 fw_device_op_compat_ioctl(struct file *file,
1001                           unsigned int cmd, unsigned long arg)
1002 {
1003         struct client *client = file->private_data;
1004
1005         if (fw_device_is_shutdown(client->device))
1006                 return -ENODEV;
1007
1008         return dispatch_ioctl(client, cmd, compat_ptr(arg));
1009 }
1010 #endif
1011
1012 static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1013 {
1014         struct client *client = file->private_data;
1015         enum dma_data_direction direction;
1016         unsigned long size;
1017         int page_count, ret;
1018
1019         if (fw_device_is_shutdown(client->device))
1020                 return -ENODEV;
1021
1022         /* FIXME: We could support multiple buffers, but we don't. */
1023         if (client->buffer.pages != NULL)
1024                 return -EBUSY;
1025
1026         if (!(vma->vm_flags & VM_SHARED))
1027                 return -EINVAL;
1028
1029         if (vma->vm_start & ~PAGE_MASK)
1030                 return -EINVAL;
1031
1032         client->vm_start = vma->vm_start;
1033         size = vma->vm_end - vma->vm_start;
1034         page_count = size >> PAGE_SHIFT;
1035         if (size & ~PAGE_MASK)
1036                 return -EINVAL;
1037
1038         if (vma->vm_flags & VM_WRITE)
1039                 direction = DMA_TO_DEVICE;
1040         else
1041                 direction = DMA_FROM_DEVICE;
1042
1043         ret = fw_iso_buffer_init(&client->buffer, client->device->card,
1044                                  page_count, direction);
1045         if (ret < 0)
1046                 return ret;
1047
1048         ret = fw_iso_buffer_map(&client->buffer, vma);
1049         if (ret < 0)
1050                 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1051
1052         return ret;
1053 }
1054
1055 static int shutdown_resource(int id, void *p, void *data)
1056 {
1057         struct client_resource *r = p;
1058         struct client *client = data;
1059
1060         r->release(client, r);
1061
1062         return 0;
1063 }
1064
1065 static int fw_device_op_release(struct inode *inode, struct file *file)
1066 {
1067         struct client *client = file->private_data;
1068         struct event *e, *next_e;
1069         unsigned long flags;
1070
1071         mutex_lock(&client->device->client_list_mutex);
1072         list_del(&client->link);
1073         mutex_unlock(&client->device->client_list_mutex);
1074
1075         if (client->buffer.pages)
1076                 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1077
1078         if (client->iso_context)
1079                 fw_iso_context_destroy(client->iso_context);
1080
1081         /* Freeze client->resource_idr and client->event_list */
1082         spin_lock_irqsave(&client->lock, flags);
1083         client->in_shutdown = true;
1084         spin_unlock_irqrestore(&client->lock, flags);
1085
1086         idr_for_each(&client->resource_idr, shutdown_resource, client);
1087         idr_remove_all(&client->resource_idr);
1088         idr_destroy(&client->resource_idr);
1089
1090         list_for_each_entry_safe(e, next_e, &client->event_list, link)
1091                 kfree(e);
1092
1093         /*
1094          * FIXME: client should be reference-counted.  It's extremely unlikely
1095          * but there may still be transactions being completed at this point.
1096          */
1097         fw_device_put(client->device);
1098         kfree(client);
1099
1100         return 0;
1101 }
1102
1103 static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
1104 {
1105         struct client *client = file->private_data;
1106         unsigned int mask = 0;
1107
1108         poll_wait(file, &client->wait, pt);
1109
1110         if (fw_device_is_shutdown(client->device))
1111                 mask |= POLLHUP | POLLERR;
1112         if (!list_empty(&client->event_list))
1113                 mask |= POLLIN | POLLRDNORM;
1114
1115         return mask;
1116 }
1117
1118 const struct file_operations fw_device_ops = {
1119         .owner          = THIS_MODULE,
1120         .open           = fw_device_op_open,
1121         .read           = fw_device_op_read,
1122         .unlocked_ioctl = fw_device_op_ioctl,
1123         .poll           = fw_device_op_poll,
1124         .release        = fw_device_op_release,
1125         .mmap           = fw_device_op_mmap,
1126
1127 #ifdef CONFIG_COMPAT
1128         .compat_ioctl   = fw_device_op_compat_ioctl,
1129 #endif
1130 };