2 * Char device for device raw access
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/wait.h>
24 #include <linux/errno.h>
25 #include <linux/device.h>
26 #include <linux/vmalloc.h>
27 #include <linux/mutex.h>
28 #include <linux/poll.h>
29 #include <linux/preempt.h>
30 #include <linux/time.h>
31 #include <linux/spinlock.h>
32 #include <linux/delay.h>
34 #include <linux/idr.h>
35 #include <linux/compat.h>
36 #include <linux/firewire-cdev.h>
37 #include <asm/system.h>
38 #include <asm/uaccess.h>
39 #include "fw-transaction.h"
40 #include "fw-topology.h"
41 #include "fw-device.h"
44 struct client_resource;
45 typedef void (*client_resource_release_fn_t)(struct client *,
46 struct client_resource *);
47 struct client_resource {
48 client_resource_release_fn_t release;
53 * dequeue_event() just kfree()'s the event, so the event has to be
54 * the first field in the struct.
58 struct { void *data; size_t size; } v[2];
59 struct list_head link;
64 struct fw_cdev_event_bus_reset reset;
69 struct fw_transaction transaction;
70 struct client *client;
71 struct client_resource resource;
72 struct fw_cdev_event_response response;
75 struct iso_interrupt {
77 struct fw_cdev_event_iso_interrupt interrupt;
82 struct fw_device *device;
86 struct idr resource_idr;
87 struct list_head event_list;
88 wait_queue_head_t wait;
89 u64 bus_reset_closure;
91 struct fw_iso_context *iso_context;
93 struct fw_iso_buffer buffer;
94 unsigned long vm_start;
96 struct list_head link;
99 static inline void __user *
100 u64_to_uptr(__u64 value)
102 return (void __user *)(unsigned long)value;
106 uptr_to_u64(void __user *ptr)
108 return (__u64)(unsigned long)ptr;
111 static int fw_device_op_open(struct inode *inode, struct file *file)
113 struct fw_device *device;
114 struct client *client;
116 device = fw_device_get_by_devt(inode->i_rdev);
120 if (fw_device_is_shutdown(device)) {
121 fw_device_put(device);
125 client = kzalloc(sizeof(*client), GFP_KERNEL);
126 if (client == NULL) {
127 fw_device_put(device);
131 client->device = device;
132 spin_lock_init(&client->lock);
133 idr_init(&client->resource_idr);
134 INIT_LIST_HEAD(&client->event_list);
135 init_waitqueue_head(&client->wait);
137 file->private_data = client;
139 mutex_lock(&device->client_list_mutex);
140 list_add_tail(&client->link, &device->client_list);
141 mutex_unlock(&device->client_list_mutex);
146 static void queue_event(struct client *client, struct event *event,
147 void *data0, size_t size0, void *data1, size_t size1)
151 event->v[0].data = data0;
152 event->v[0].size = size0;
153 event->v[1].data = data1;
154 event->v[1].size = size1;
156 spin_lock_irqsave(&client->lock, flags);
157 if (client->in_shutdown)
160 list_add_tail(&event->link, &client->event_list);
161 spin_unlock_irqrestore(&client->lock, flags);
163 wake_up_interruptible(&client->wait);
167 dequeue_event(struct client *client, char __user *buffer, size_t count)
174 ret = wait_event_interruptible(client->wait,
175 !list_empty(&client->event_list) ||
176 fw_device_is_shutdown(client->device));
180 if (list_empty(&client->event_list) &&
181 fw_device_is_shutdown(client->device))
184 spin_lock_irqsave(&client->lock, flags);
185 event = container_of(client->event_list.next, struct event, link);
186 list_del(&event->link);
187 spin_unlock_irqrestore(&client->lock, flags);
190 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
191 size = min(event->v[i].size, count - total);
192 if (copy_to_user(buffer + total, event->v[i].data, size)) {
207 fw_device_op_read(struct file *file,
208 char __user *buffer, size_t count, loff_t *offset)
210 struct client *client = file->private_data;
212 return dequeue_event(client, buffer, count);
216 fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
217 struct client *client)
219 struct fw_card *card = client->device->card;
222 spin_lock_irqsave(&card->lock, flags);
224 event->closure = client->bus_reset_closure;
225 event->type = FW_CDEV_EVENT_BUS_RESET;
226 event->generation = client->device->generation;
227 event->node_id = client->device->node_id;
228 event->local_node_id = card->local_node->node_id;
229 event->bm_node_id = 0; /* FIXME: We don't track the BM. */
230 event->irm_node_id = card->irm_node->node_id;
231 event->root_node_id = card->root_node->node_id;
233 spin_unlock_irqrestore(&card->lock, flags);
237 for_each_client(struct fw_device *device,
238 void (*callback)(struct client *client))
242 mutex_lock(&device->client_list_mutex);
243 list_for_each_entry(c, &device->client_list, link)
245 mutex_unlock(&device->client_list_mutex);
249 queue_bus_reset_event(struct client *client)
251 struct bus_reset *bus_reset;
253 bus_reset = kzalloc(sizeof(*bus_reset), GFP_KERNEL);
254 if (bus_reset == NULL) {
255 fw_notify("Out of memory when allocating bus reset event\n");
259 fill_bus_reset_event(&bus_reset->reset, client);
261 queue_event(client, &bus_reset->event,
262 &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0);
265 void fw_device_cdev_update(struct fw_device *device)
267 for_each_client(device, queue_bus_reset_event);
270 static void wake_up_client(struct client *client)
272 wake_up_interruptible(&client->wait);
275 void fw_device_cdev_remove(struct fw_device *device)
277 for_each_client(device, wake_up_client);
280 static int ioctl_get_info(struct client *client, void *buffer)
282 struct fw_cdev_get_info *get_info = buffer;
283 struct fw_cdev_event_bus_reset bus_reset;
284 unsigned long ret = 0;
286 client->version = get_info->version;
287 get_info->version = FW_CDEV_VERSION;
288 get_info->card = client->device->card->index;
290 down_read(&fw_device_rwsem);
292 if (get_info->rom != 0) {
293 void __user *uptr = u64_to_uptr(get_info->rom);
294 size_t want = get_info->rom_length;
295 size_t have = client->device->config_rom_length * 4;
297 ret = copy_to_user(uptr, client->device->config_rom,
300 get_info->rom_length = client->device->config_rom_length * 4;
302 up_read(&fw_device_rwsem);
307 client->bus_reset_closure = get_info->bus_reset_closure;
308 if (get_info->bus_reset != 0) {
309 void __user *uptr = u64_to_uptr(get_info->bus_reset);
311 fill_bus_reset_event(&bus_reset, client);
312 if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
320 add_client_resource(struct client *client, struct client_resource *resource,
327 if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
330 spin_lock_irqsave(&client->lock, flags);
331 if (client->in_shutdown)
334 ret = idr_get_new(&client->resource_idr, resource,
336 spin_unlock_irqrestore(&client->lock, flags);
341 return ret < 0 ? ret : 0;
345 release_client_resource(struct client *client, u32 handle,
346 client_resource_release_fn_t release,
347 struct client_resource **resource)
349 struct client_resource *r;
352 spin_lock_irqsave(&client->lock, flags);
353 if (client->in_shutdown)
356 r = idr_find(&client->resource_idr, handle);
357 if (r && r->release == release)
358 idr_remove(&client->resource_idr, handle);
359 spin_unlock_irqrestore(&client->lock, flags);
361 if (!(r && r->release == release))
367 r->release(client, r);
373 release_transaction(struct client *client, struct client_resource *resource)
375 struct response *response =
376 container_of(resource, struct response, resource);
378 fw_cancel_transaction(client->device->card, &response->transaction);
382 complete_transaction(struct fw_card *card, int rcode,
383 void *payload, size_t length, void *data)
385 struct response *response = data;
386 struct client *client = response->client;
388 struct fw_cdev_event_response *r = &response->response;
390 if (length < r->length)
392 if (rcode == RCODE_COMPLETE)
393 memcpy(r->data, payload, r->length);
395 spin_lock_irqsave(&client->lock, flags);
397 * If called while in shutdown, the idr tree must be left untouched.
398 * The idr handle will be removed later.
400 if (!client->in_shutdown)
401 idr_remove(&client->resource_idr, response->resource.handle);
402 spin_unlock_irqrestore(&client->lock, flags);
404 r->type = FW_CDEV_EVENT_RESPONSE;
408 * In the case that sizeof(*r) doesn't align with the position of the
409 * data, and the read is short, preserve an extra copy of the data
410 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
411 * for short reads and some apps depended on it, this is both safe
412 * and prudent for compatibility.
414 if (r->length <= sizeof(*r) - offsetof(typeof(*r), data))
415 queue_event(client, &response->event, r, sizeof(*r),
418 queue_event(client, &response->event, r, sizeof(*r) + r->length,
422 static int ioctl_send_request(struct client *client, void *buffer)
424 struct fw_device *device = client->device;
425 struct fw_cdev_send_request *request = buffer;
426 struct response *response;
429 /* What is the biggest size we'll accept, really? */
430 if (request->length > 4096)
433 response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL);
434 if (response == NULL)
437 response->client = client;
438 response->response.length = request->length;
439 response->response.closure = request->closure;
442 copy_from_user(response->response.data,
443 u64_to_uptr(request->data), request->length)) {
448 switch (request->tcode) {
449 case TCODE_WRITE_QUADLET_REQUEST:
450 case TCODE_WRITE_BLOCK_REQUEST:
451 case TCODE_READ_QUADLET_REQUEST:
452 case TCODE_READ_BLOCK_REQUEST:
453 case TCODE_LOCK_MASK_SWAP:
454 case TCODE_LOCK_COMPARE_SWAP:
455 case TCODE_LOCK_FETCH_ADD:
456 case TCODE_LOCK_LITTLE_ADD:
457 case TCODE_LOCK_BOUNDED_ADD:
458 case TCODE_LOCK_WRAP_ADD:
459 case TCODE_LOCK_VENDOR_DEPENDENT:
466 response->resource.release = release_transaction;
467 ret = add_client_resource(client, &response->resource, GFP_KERNEL);
471 fw_send_request(device->card, &response->transaction,
472 request->tcode & 0x1f,
473 device->node->node_id,
477 response->response.data, request->length,
478 complete_transaction, response);
481 return sizeof(request) + request->length;
483 return sizeof(request);
490 struct address_handler {
491 struct fw_address_handler handler;
493 struct client *client;
494 struct client_resource resource;
498 struct fw_request *request;
501 struct client_resource resource;
504 struct request_event {
506 struct fw_cdev_event_request request;
510 release_request(struct client *client, struct client_resource *resource)
512 struct request *request =
513 container_of(resource, struct request, resource);
515 fw_send_response(client->device->card, request->request,
516 RCODE_CONFLICT_ERROR);
521 handle_request(struct fw_card *card, struct fw_request *r,
522 int tcode, int destination, int source,
523 int generation, int speed,
524 unsigned long long offset,
525 void *payload, size_t length, void *callback_data)
527 struct address_handler *handler = callback_data;
528 struct request *request;
529 struct request_event *e;
530 struct client *client = handler->client;
533 request = kmalloc(sizeof(*request), GFP_ATOMIC);
534 e = kmalloc(sizeof(*e), GFP_ATOMIC);
535 if (request == NULL || e == NULL)
538 request->request = r;
539 request->data = payload;
540 request->length = length;
542 request->resource.release = release_request;
543 ret = add_client_resource(client, &request->resource, GFP_ATOMIC);
547 e->request.type = FW_CDEV_EVENT_REQUEST;
548 e->request.tcode = tcode;
549 e->request.offset = offset;
550 e->request.length = length;
551 e->request.handle = request->resource.handle;
552 e->request.closure = handler->closure;
554 queue_event(client, &e->event,
555 &e->request, sizeof(e->request), payload, length);
561 fw_send_response(card, r, RCODE_CONFLICT_ERROR);
565 release_address_handler(struct client *client,
566 struct client_resource *resource)
568 struct address_handler *handler =
569 container_of(resource, struct address_handler, resource);
571 fw_core_remove_address_handler(&handler->handler);
575 static int ioctl_allocate(struct client *client, void *buffer)
577 struct fw_cdev_allocate *request = buffer;
578 struct address_handler *handler;
579 struct fw_address_region region;
582 handler = kmalloc(sizeof(*handler), GFP_KERNEL);
586 region.start = request->offset;
587 region.end = request->offset + request->length;
588 handler->handler.length = request->length;
589 handler->handler.address_callback = handle_request;
590 handler->handler.callback_data = handler;
591 handler->closure = request->closure;
592 handler->client = client;
594 ret = fw_core_add_address_handler(&handler->handler, ®ion);
600 handler->resource.release = release_address_handler;
601 ret = add_client_resource(client, &handler->resource, GFP_KERNEL);
603 release_address_handler(client, &handler->resource);
606 request->handle = handler->resource.handle;
611 static int ioctl_deallocate(struct client *client, void *buffer)
613 struct fw_cdev_deallocate *request = buffer;
615 return release_client_resource(client, request->handle,
616 release_address_handler, NULL);
619 static int ioctl_send_response(struct client *client, void *buffer)
621 struct fw_cdev_send_response *request = buffer;
622 struct client_resource *resource;
625 if (release_client_resource(client, request->handle,
626 release_request, &resource) < 0)
629 r = container_of(resource, struct request, resource);
630 if (request->length < r->length)
631 r->length = request->length;
632 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
635 fw_send_response(client->device->card, r->request, request->rcode);
641 static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
643 struct fw_cdev_initiate_bus_reset *request = buffer;
646 short_reset = (request->type == FW_CDEV_SHORT_RESET);
648 return fw_core_initiate_bus_reset(client->device->card, short_reset);
652 struct fw_descriptor d;
653 struct client_resource resource;
657 static void release_descriptor(struct client *client,
658 struct client_resource *resource)
660 struct descriptor *descriptor =
661 container_of(resource, struct descriptor, resource);
663 fw_core_remove_descriptor(&descriptor->d);
667 static int ioctl_add_descriptor(struct client *client, void *buffer)
669 struct fw_cdev_add_descriptor *request = buffer;
670 struct descriptor *descriptor;
673 if (request->length > 256)
677 kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL);
678 if (descriptor == NULL)
681 if (copy_from_user(descriptor->data,
682 u64_to_uptr(request->data), request->length * 4)) {
687 descriptor->d.length = request->length;
688 descriptor->d.immediate = request->immediate;
689 descriptor->d.key = request->key;
690 descriptor->d.data = descriptor->data;
692 ret = fw_core_add_descriptor(&descriptor->d);
696 descriptor->resource.release = release_descriptor;
697 ret = add_client_resource(client, &descriptor->resource, GFP_KERNEL);
699 fw_core_remove_descriptor(&descriptor->d);
702 request->handle = descriptor->resource.handle;
711 static int ioctl_remove_descriptor(struct client *client, void *buffer)
713 struct fw_cdev_remove_descriptor *request = buffer;
715 return release_client_resource(client, request->handle,
716 release_descriptor, NULL);
720 iso_callback(struct fw_iso_context *context, u32 cycle,
721 size_t header_length, void *header, void *data)
723 struct client *client = data;
724 struct iso_interrupt *irq;
726 irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC);
730 irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
731 irq->interrupt.closure = client->iso_closure;
732 irq->interrupt.cycle = cycle;
733 irq->interrupt.header_length = header_length;
734 memcpy(irq->interrupt.header, header, header_length);
735 queue_event(client, &irq->event, &irq->interrupt,
736 sizeof(irq->interrupt) + header_length, NULL, 0);
739 static int ioctl_create_iso_context(struct client *client, void *buffer)
741 struct fw_cdev_create_iso_context *request = buffer;
742 struct fw_iso_context *context;
744 /* We only support one context at this time. */
745 if (client->iso_context != NULL)
748 if (request->channel > 63)
751 switch (request->type) {
752 case FW_ISO_CONTEXT_RECEIVE:
753 if (request->header_size < 4 || (request->header_size & 3))
758 case FW_ISO_CONTEXT_TRANSMIT:
759 if (request->speed > SCODE_3200)
768 context = fw_iso_context_create(client->device->card,
772 request->header_size,
773 iso_callback, client);
775 return PTR_ERR(context);
777 client->iso_closure = request->closure;
778 client->iso_context = context;
780 /* We only support one context at this time. */
786 /* Macros for decoding the iso packet control header. */
787 #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
788 #define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
789 #define GET_SKIP(v) (((v) >> 17) & 0x01)
790 #define GET_TAG(v) (((v) >> 18) & 0x03)
791 #define GET_SY(v) (((v) >> 20) & 0x0f)
792 #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
794 static int ioctl_queue_iso(struct client *client, void *buffer)
796 struct fw_cdev_queue_iso *request = buffer;
797 struct fw_cdev_iso_packet __user *p, *end, *next;
798 struct fw_iso_context *ctx = client->iso_context;
799 unsigned long payload, buffer_end, header_length;
803 struct fw_iso_packet packet;
807 if (ctx == NULL || request->handle != 0)
811 * If the user passes a non-NULL data pointer, has mmap()'ed
812 * the iso buffer, and the pointer points inside the buffer,
813 * we setup the payload pointers accordingly. Otherwise we
814 * set them both to 0, which will still let packets with
815 * payload_length == 0 through. In other words, if no packets
816 * use the indirect payload, the iso buffer need not be mapped
817 * and the request->data pointer is ignored.
820 payload = (unsigned long)request->data - client->vm_start;
821 buffer_end = client->buffer.page_count << PAGE_SHIFT;
822 if (request->data == 0 || client->buffer.pages == NULL ||
823 payload >= buffer_end) {
828 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
830 if (!access_ok(VERIFY_READ, p, request->size))
833 end = (void __user *)p + request->size;
836 if (get_user(control, &p->control))
838 u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
839 u.packet.interrupt = GET_INTERRUPT(control);
840 u.packet.skip = GET_SKIP(control);
841 u.packet.tag = GET_TAG(control);
842 u.packet.sy = GET_SY(control);
843 u.packet.header_length = GET_HEADER_LENGTH(control);
845 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
846 header_length = u.packet.header_length;
849 * We require that header_length is a multiple of
850 * the fixed header size, ctx->header_size.
852 if (ctx->header_size == 0) {
853 if (u.packet.header_length > 0)
855 } else if (u.packet.header_length % ctx->header_size != 0) {
861 next = (struct fw_cdev_iso_packet __user *)
862 &p->header[header_length / 4];
866 (u.packet.header, p->header, header_length))
868 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
869 u.packet.header_length + u.packet.payload_length > 0)
871 if (payload + u.packet.payload_length > buffer_end)
874 if (fw_iso_context_queue(ctx, &u.packet,
875 &client->buffer, payload))
879 payload += u.packet.payload_length;
883 request->size -= uptr_to_u64(p) - request->packets;
884 request->packets = uptr_to_u64(p);
885 request->data = client->vm_start + payload;
890 static int ioctl_start_iso(struct client *client, void *buffer)
892 struct fw_cdev_start_iso *request = buffer;
894 if (client->iso_context == NULL || request->handle != 0)
897 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
898 if (request->tags == 0 || request->tags > 15)
901 if (request->sync > 15)
905 return fw_iso_context_start(client->iso_context, request->cycle,
906 request->sync, request->tags);
909 static int ioctl_stop_iso(struct client *client, void *buffer)
911 struct fw_cdev_stop_iso *request = buffer;
913 if (client->iso_context == NULL || request->handle != 0)
916 return fw_iso_context_stop(client->iso_context);
919 static int ioctl_get_cycle_timer(struct client *client, void *buffer)
921 struct fw_cdev_get_cycle_timer *request = buffer;
922 struct fw_card *card = client->device->card;
923 unsigned long long bus_time;
928 local_irq_save(flags);
930 bus_time = card->driver->get_bus_time(card);
931 do_gettimeofday(&tv);
933 local_irq_restore(flags);
936 request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
937 request->cycle_timer = bus_time & 0xffffffff;
941 static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
947 ioctl_initiate_bus_reset,
948 ioctl_add_descriptor,
949 ioctl_remove_descriptor,
950 ioctl_create_iso_context,
954 ioctl_get_cycle_timer,
958 dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
963 if (_IOC_TYPE(cmd) != '#' ||
964 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
967 if (_IOC_DIR(cmd) & _IOC_WRITE) {
968 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
969 copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
973 ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
977 if (_IOC_DIR(cmd) & _IOC_READ) {
978 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
979 copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
987 fw_device_op_ioctl(struct file *file,
988 unsigned int cmd, unsigned long arg)
990 struct client *client = file->private_data;
992 if (fw_device_is_shutdown(client->device))
995 return dispatch_ioctl(client, cmd, (void __user *) arg);
1000 fw_device_op_compat_ioctl(struct file *file,
1001 unsigned int cmd, unsigned long arg)
1003 struct client *client = file->private_data;
1005 if (fw_device_is_shutdown(client->device))
1008 return dispatch_ioctl(client, cmd, compat_ptr(arg));
1012 static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1014 struct client *client = file->private_data;
1015 enum dma_data_direction direction;
1017 int page_count, ret;
1019 if (fw_device_is_shutdown(client->device))
1022 /* FIXME: We could support multiple buffers, but we don't. */
1023 if (client->buffer.pages != NULL)
1026 if (!(vma->vm_flags & VM_SHARED))
1029 if (vma->vm_start & ~PAGE_MASK)
1032 client->vm_start = vma->vm_start;
1033 size = vma->vm_end - vma->vm_start;
1034 page_count = size >> PAGE_SHIFT;
1035 if (size & ~PAGE_MASK)
1038 if (vma->vm_flags & VM_WRITE)
1039 direction = DMA_TO_DEVICE;
1041 direction = DMA_FROM_DEVICE;
1043 ret = fw_iso_buffer_init(&client->buffer, client->device->card,
1044 page_count, direction);
1048 ret = fw_iso_buffer_map(&client->buffer, vma);
1050 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1055 static int shutdown_resource(int id, void *p, void *data)
1057 struct client_resource *r = p;
1058 struct client *client = data;
1060 r->release(client, r);
1065 static int fw_device_op_release(struct inode *inode, struct file *file)
1067 struct client *client = file->private_data;
1068 struct event *e, *next_e;
1069 unsigned long flags;
1071 mutex_lock(&client->device->client_list_mutex);
1072 list_del(&client->link);
1073 mutex_unlock(&client->device->client_list_mutex);
1075 if (client->buffer.pages)
1076 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1078 if (client->iso_context)
1079 fw_iso_context_destroy(client->iso_context);
1081 /* Freeze client->resource_idr and client->event_list */
1082 spin_lock_irqsave(&client->lock, flags);
1083 client->in_shutdown = true;
1084 spin_unlock_irqrestore(&client->lock, flags);
1086 idr_for_each(&client->resource_idr, shutdown_resource, client);
1087 idr_remove_all(&client->resource_idr);
1088 idr_destroy(&client->resource_idr);
1090 list_for_each_entry_safe(e, next_e, &client->event_list, link)
1094 * FIXME: client should be reference-counted. It's extremely unlikely
1095 * but there may still be transactions being completed at this point.
1097 fw_device_put(client->device);
1103 static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
1105 struct client *client = file->private_data;
1106 unsigned int mask = 0;
1108 poll_wait(file, &client->wait, pt);
1110 if (fw_device_is_shutdown(client->device))
1111 mask |= POLLHUP | POLLERR;
1112 if (!list_empty(&client->event_list))
1113 mask |= POLLIN | POLLRDNORM;
1118 const struct file_operations fw_device_ops = {
1119 .owner = THIS_MODULE,
1120 .open = fw_device_op_open,
1121 .read = fw_device_op_read,
1122 .unlocked_ioctl = fw_device_op_ioctl,
1123 .poll = fw_device_op_poll,
1124 .release = fw_device_op_release,
1125 .mmap = fw_device_op_mmap,
1127 #ifdef CONFIG_COMPAT
1128 .compat_ioctl = fw_device_op_compat_ioctl,