2 * Char device for device raw access
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/wait.h>
24 #include <linux/errno.h>
25 #include <linux/device.h>
26 #include <linux/vmalloc.h>
27 #include <linux/poll.h>
28 #include <linux/preempt.h>
29 #include <linux/time.h>
30 #include <linux/spinlock.h>
31 #include <linux/delay.h>
33 #include <linux/idr.h>
34 #include <linux/compat.h>
35 #include <linux/firewire-cdev.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include "fw-transaction.h"
39 #include "fw-topology.h"
40 #include "fw-device.h"
43 struct client_resource {
44 struct list_head link;
45 void (*release)(struct client *client, struct client_resource *r);
50 * dequeue_event() just kfree()'s the event, so the event has to be
51 * the first field in the struct.
55 struct { void *data; size_t size; } v[2];
56 struct list_head link;
61 struct fw_cdev_event_bus_reset reset;
66 struct fw_transaction transaction;
67 struct client *client;
68 struct client_resource resource;
69 struct fw_cdev_event_response response;
72 struct iso_interrupt {
74 struct fw_cdev_event_iso_interrupt interrupt;
79 struct fw_device *device;
82 struct list_head resource_list;
83 struct list_head event_list;
84 wait_queue_head_t wait;
85 u64 bus_reset_closure;
87 struct fw_iso_context *iso_context;
89 struct fw_iso_buffer buffer;
90 unsigned long vm_start;
92 struct list_head link;
95 static inline void __user *
96 u64_to_uptr(__u64 value)
98 return (void __user *)(unsigned long)value;
102 uptr_to_u64(void __user *ptr)
104 return (__u64)(unsigned long)ptr;
107 static int fw_device_op_open(struct inode *inode, struct file *file)
109 struct fw_device *device;
110 struct client *client;
113 device = fw_device_get_by_devt(inode->i_rdev);
117 if (fw_device_is_shutdown(device)) {
118 fw_device_put(device);
122 client = kzalloc(sizeof(*client), GFP_KERNEL);
123 if (client == NULL) {
124 fw_device_put(device);
128 client->device = device;
129 INIT_LIST_HEAD(&client->event_list);
130 INIT_LIST_HEAD(&client->resource_list);
131 spin_lock_init(&client->lock);
132 init_waitqueue_head(&client->wait);
134 file->private_data = client;
136 spin_lock_irqsave(&device->client_list_lock, flags);
137 list_add_tail(&client->link, &device->client_list);
138 spin_unlock_irqrestore(&device->client_list_lock, flags);
143 static void queue_event(struct client *client, struct event *event,
144 void *data0, size_t size0, void *data1, size_t size1)
148 event->v[0].data = data0;
149 event->v[0].size = size0;
150 event->v[1].data = data1;
151 event->v[1].size = size1;
153 spin_lock_irqsave(&client->lock, flags);
154 list_add_tail(&event->link, &client->event_list);
155 spin_unlock_irqrestore(&client->lock, flags);
157 wake_up_interruptible(&client->wait);
161 dequeue_event(struct client *client, char __user *buffer, size_t count)
168 retval = wait_event_interruptible(client->wait,
169 !list_empty(&client->event_list) ||
170 fw_device_is_shutdown(client->device));
174 if (list_empty(&client->event_list) &&
175 fw_device_is_shutdown(client->device))
178 spin_lock_irqsave(&client->lock, flags);
179 event = container_of(client->event_list.next, struct event, link);
180 list_del(&event->link);
181 spin_unlock_irqrestore(&client->lock, flags);
184 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
185 size = min(event->v[i].size, count - total);
186 if (copy_to_user(buffer + total, event->v[i].data, size)) {
201 fw_device_op_read(struct file *file,
202 char __user *buffer, size_t count, loff_t *offset)
204 struct client *client = file->private_data;
206 return dequeue_event(client, buffer, count);
210 fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
211 struct client *client)
213 struct fw_card *card = client->device->card;
216 spin_lock_irqsave(&card->lock, flags);
218 event->closure = client->bus_reset_closure;
219 event->type = FW_CDEV_EVENT_BUS_RESET;
220 event->generation = client->device->generation;
221 event->node_id = client->device->node_id;
222 event->local_node_id = card->local_node->node_id;
223 event->bm_node_id = 0; /* FIXME: We don't track the BM. */
224 event->irm_node_id = card->irm_node->node_id;
225 event->root_node_id = card->root_node->node_id;
227 spin_unlock_irqrestore(&card->lock, flags);
231 for_each_client(struct fw_device *device,
232 void (*callback)(struct client *client))
237 spin_lock_irqsave(&device->client_list_lock, flags);
239 list_for_each_entry(c, &device->client_list, link)
242 spin_unlock_irqrestore(&device->client_list_lock, flags);
246 queue_bus_reset_event(struct client *client)
248 struct bus_reset *bus_reset;
250 bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC);
251 if (bus_reset == NULL) {
252 fw_notify("Out of memory when allocating bus reset event\n");
256 fill_bus_reset_event(&bus_reset->reset, client);
258 queue_event(client, &bus_reset->event,
259 &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0);
262 void fw_device_cdev_update(struct fw_device *device)
264 for_each_client(device, queue_bus_reset_event);
267 static void wake_up_client(struct client *client)
269 wake_up_interruptible(&client->wait);
272 void fw_device_cdev_remove(struct fw_device *device)
274 for_each_client(device, wake_up_client);
277 static int ioctl_get_info(struct client *client, void *buffer)
279 struct fw_cdev_get_info *get_info = buffer;
280 struct fw_cdev_event_bus_reset bus_reset;
281 unsigned long ret = 0;
283 client->version = get_info->version;
284 get_info->version = FW_CDEV_VERSION;
285 get_info->card = client->device->card->index;
287 down_read(&fw_device_rwsem);
289 if (get_info->rom != 0) {
290 void __user *uptr = u64_to_uptr(get_info->rom);
291 size_t want = get_info->rom_length;
292 size_t have = client->device->config_rom_length * 4;
294 ret = copy_to_user(uptr, client->device->config_rom,
297 get_info->rom_length = client->device->config_rom_length * 4;
299 up_read(&fw_device_rwsem);
304 client->bus_reset_closure = get_info->bus_reset_closure;
305 if (get_info->bus_reset != 0) {
306 void __user *uptr = u64_to_uptr(get_info->bus_reset);
308 fill_bus_reset_event(&bus_reset, client);
309 if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
317 add_client_resource(struct client *client, struct client_resource *resource)
321 spin_lock_irqsave(&client->lock, flags);
322 list_add_tail(&resource->link, &client->resource_list);
323 resource->handle = client->resource_handle++;
324 spin_unlock_irqrestore(&client->lock, flags);
328 release_client_resource(struct client *client, u32 handle,
329 struct client_resource **resource)
331 struct client_resource *r;
334 spin_lock_irqsave(&client->lock, flags);
335 list_for_each_entry(r, &client->resource_list, link) {
336 if (r->handle == handle) {
341 spin_unlock_irqrestore(&client->lock, flags);
343 if (&r->link == &client->resource_list)
349 r->release(client, r);
355 release_transaction(struct client *client, struct client_resource *resource)
357 struct response *response =
358 container_of(resource, struct response, resource);
360 fw_cancel_transaction(client->device->card, &response->transaction);
364 complete_transaction(struct fw_card *card, int rcode,
365 void *payload, size_t length, void *data)
367 struct response *response = data;
368 struct client *client = response->client;
370 struct fw_cdev_event_response *r = &response->response;
372 if (length < r->length)
374 if (rcode == RCODE_COMPLETE)
375 memcpy(r->data, payload, r->length);
377 spin_lock_irqsave(&client->lock, flags);
378 list_del(&response->resource.link);
379 spin_unlock_irqrestore(&client->lock, flags);
381 r->type = FW_CDEV_EVENT_RESPONSE;
385 * In the case that sizeof(*r) doesn't align with the position of the
386 * data, and the read is short, preserve an extra copy of the data
387 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
388 * for short reads and some apps depended on it, this is both safe
389 * and prudent for compatibility.
391 if (r->length <= sizeof(*r) - offsetof(typeof(*r), data))
392 queue_event(client, &response->event, r, sizeof(*r),
395 queue_event(client, &response->event, r, sizeof(*r) + r->length,
399 static int ioctl_send_request(struct client *client, void *buffer)
401 struct fw_device *device = client->device;
402 struct fw_cdev_send_request *request = buffer;
403 struct response *response;
405 /* What is the biggest size we'll accept, really? */
406 if (request->length > 4096)
409 response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL);
410 if (response == NULL)
413 response->client = client;
414 response->response.length = request->length;
415 response->response.closure = request->closure;
418 copy_from_user(response->response.data,
419 u64_to_uptr(request->data), request->length)) {
424 response->resource.release = release_transaction;
425 add_client_resource(client, &response->resource);
427 fw_send_request(device->card, &response->transaction,
428 request->tcode & 0x1f,
429 device->node->node_id,
433 response->response.data, request->length,
434 complete_transaction, response);
437 return sizeof(request) + request->length;
439 return sizeof(request);
442 struct address_handler {
443 struct fw_address_handler handler;
445 struct client *client;
446 struct client_resource resource;
450 struct fw_request *request;
453 struct client_resource resource;
456 struct request_event {
458 struct fw_cdev_event_request request;
462 release_request(struct client *client, struct client_resource *resource)
464 struct request *request =
465 container_of(resource, struct request, resource);
467 fw_send_response(client->device->card, request->request,
468 RCODE_CONFLICT_ERROR);
473 handle_request(struct fw_card *card, struct fw_request *r,
474 int tcode, int destination, int source,
475 int generation, int speed,
476 unsigned long long offset,
477 void *payload, size_t length, void *callback_data)
479 struct address_handler *handler = callback_data;
480 struct request *request;
481 struct request_event *e;
482 struct client *client = handler->client;
484 request = kmalloc(sizeof(*request), GFP_ATOMIC);
485 e = kmalloc(sizeof(*e), GFP_ATOMIC);
486 if (request == NULL || e == NULL) {
489 fw_send_response(card, r, RCODE_CONFLICT_ERROR);
493 request->request = r;
494 request->data = payload;
495 request->length = length;
497 request->resource.release = release_request;
498 add_client_resource(client, &request->resource);
500 e->request.type = FW_CDEV_EVENT_REQUEST;
501 e->request.tcode = tcode;
502 e->request.offset = offset;
503 e->request.length = length;
504 e->request.handle = request->resource.handle;
505 e->request.closure = handler->closure;
507 queue_event(client, &e->event,
508 &e->request, sizeof(e->request), payload, length);
512 release_address_handler(struct client *client,
513 struct client_resource *resource)
515 struct address_handler *handler =
516 container_of(resource, struct address_handler, resource);
518 fw_core_remove_address_handler(&handler->handler);
522 static int ioctl_allocate(struct client *client, void *buffer)
524 struct fw_cdev_allocate *request = buffer;
525 struct address_handler *handler;
526 struct fw_address_region region;
528 handler = kmalloc(sizeof(*handler), GFP_KERNEL);
532 region.start = request->offset;
533 region.end = request->offset + request->length;
534 handler->handler.length = request->length;
535 handler->handler.address_callback = handle_request;
536 handler->handler.callback_data = handler;
537 handler->closure = request->closure;
538 handler->client = client;
540 if (fw_core_add_address_handler(&handler->handler, ®ion) < 0) {
545 handler->resource.release = release_address_handler;
546 add_client_resource(client, &handler->resource);
547 request->handle = handler->resource.handle;
552 static int ioctl_deallocate(struct client *client, void *buffer)
554 struct fw_cdev_deallocate *request = buffer;
556 return release_client_resource(client, request->handle, NULL);
559 static int ioctl_send_response(struct client *client, void *buffer)
561 struct fw_cdev_send_response *request = buffer;
562 struct client_resource *resource;
565 if (release_client_resource(client, request->handle, &resource) < 0)
567 r = container_of(resource, struct request, resource);
568 if (request->length < r->length)
569 r->length = request->length;
570 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
573 fw_send_response(client->device->card, r->request, request->rcode);
579 static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
581 struct fw_cdev_initiate_bus_reset *request = buffer;
584 short_reset = (request->type == FW_CDEV_SHORT_RESET);
586 return fw_core_initiate_bus_reset(client->device->card, short_reset);
590 struct fw_descriptor d;
591 struct client_resource resource;
595 static void release_descriptor(struct client *client,
596 struct client_resource *resource)
598 struct descriptor *descriptor =
599 container_of(resource, struct descriptor, resource);
601 fw_core_remove_descriptor(&descriptor->d);
605 static int ioctl_add_descriptor(struct client *client, void *buffer)
607 struct fw_cdev_add_descriptor *request = buffer;
608 struct descriptor *descriptor;
611 if (request->length > 256)
615 kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL);
616 if (descriptor == NULL)
619 if (copy_from_user(descriptor->data,
620 u64_to_uptr(request->data), request->length * 4)) {
625 descriptor->d.length = request->length;
626 descriptor->d.immediate = request->immediate;
627 descriptor->d.key = request->key;
628 descriptor->d.data = descriptor->data;
630 retval = fw_core_add_descriptor(&descriptor->d);
636 descriptor->resource.release = release_descriptor;
637 add_client_resource(client, &descriptor->resource);
638 request->handle = descriptor->resource.handle;
643 static int ioctl_remove_descriptor(struct client *client, void *buffer)
645 struct fw_cdev_remove_descriptor *request = buffer;
647 return release_client_resource(client, request->handle, NULL);
651 iso_callback(struct fw_iso_context *context, u32 cycle,
652 size_t header_length, void *header, void *data)
654 struct client *client = data;
655 struct iso_interrupt *irq;
657 irq = kzalloc(sizeof(*irq) + header_length, GFP_ATOMIC);
661 irq->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
662 irq->interrupt.closure = client->iso_closure;
663 irq->interrupt.cycle = cycle;
664 irq->interrupt.header_length = header_length;
665 memcpy(irq->interrupt.header, header, header_length);
666 queue_event(client, &irq->event, &irq->interrupt,
667 sizeof(irq->interrupt) + header_length, NULL, 0);
670 static int ioctl_create_iso_context(struct client *client, void *buffer)
672 struct fw_cdev_create_iso_context *request = buffer;
673 struct fw_iso_context *context;
675 /* We only support one context at this time. */
676 if (client->iso_context != NULL)
679 if (request->channel > 63)
682 switch (request->type) {
683 case FW_ISO_CONTEXT_RECEIVE:
684 if (request->header_size < 4 || (request->header_size & 3))
689 case FW_ISO_CONTEXT_TRANSMIT:
690 if (request->speed > SCODE_3200)
699 context = fw_iso_context_create(client->device->card,
703 request->header_size,
704 iso_callback, client);
706 return PTR_ERR(context);
708 client->iso_closure = request->closure;
709 client->iso_context = context;
711 /* We only support one context at this time. */
717 /* Macros for decoding the iso packet control header. */
718 #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
719 #define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
720 #define GET_SKIP(v) (((v) >> 17) & 0x01)
721 #define GET_TAG(v) (((v) >> 18) & 0x03)
722 #define GET_SY(v) (((v) >> 20) & 0x0f)
723 #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
725 static int ioctl_queue_iso(struct client *client, void *buffer)
727 struct fw_cdev_queue_iso *request = buffer;
728 struct fw_cdev_iso_packet __user *p, *end, *next;
729 struct fw_iso_context *ctx = client->iso_context;
730 unsigned long payload, buffer_end, header_length;
734 struct fw_iso_packet packet;
738 if (ctx == NULL || request->handle != 0)
742 * If the user passes a non-NULL data pointer, has mmap()'ed
743 * the iso buffer, and the pointer points inside the buffer,
744 * we setup the payload pointers accordingly. Otherwise we
745 * set them both to 0, which will still let packets with
746 * payload_length == 0 through. In other words, if no packets
747 * use the indirect payload, the iso buffer need not be mapped
748 * and the request->data pointer is ignored.
751 payload = (unsigned long)request->data - client->vm_start;
752 buffer_end = client->buffer.page_count << PAGE_SHIFT;
753 if (request->data == 0 || client->buffer.pages == NULL ||
754 payload >= buffer_end) {
759 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
761 if (!access_ok(VERIFY_READ, p, request->size))
764 end = (void __user *)p + request->size;
767 if (get_user(control, &p->control))
769 u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
770 u.packet.interrupt = GET_INTERRUPT(control);
771 u.packet.skip = GET_SKIP(control);
772 u.packet.tag = GET_TAG(control);
773 u.packet.sy = GET_SY(control);
774 u.packet.header_length = GET_HEADER_LENGTH(control);
776 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
777 header_length = u.packet.header_length;
780 * We require that header_length is a multiple of
781 * the fixed header size, ctx->header_size.
783 if (ctx->header_size == 0) {
784 if (u.packet.header_length > 0)
786 } else if (u.packet.header_length % ctx->header_size != 0) {
792 next = (struct fw_cdev_iso_packet __user *)
793 &p->header[header_length / 4];
797 (u.packet.header, p->header, header_length))
799 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
800 u.packet.header_length + u.packet.payload_length > 0)
802 if (payload + u.packet.payload_length > buffer_end)
805 if (fw_iso_context_queue(ctx, &u.packet,
806 &client->buffer, payload))
810 payload += u.packet.payload_length;
814 request->size -= uptr_to_u64(p) - request->packets;
815 request->packets = uptr_to_u64(p);
816 request->data = client->vm_start + payload;
821 static int ioctl_start_iso(struct client *client, void *buffer)
823 struct fw_cdev_start_iso *request = buffer;
825 if (client->iso_context == NULL || request->handle != 0)
828 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
829 if (request->tags == 0 || request->tags > 15)
832 if (request->sync > 15)
836 return fw_iso_context_start(client->iso_context, request->cycle,
837 request->sync, request->tags);
840 static int ioctl_stop_iso(struct client *client, void *buffer)
842 struct fw_cdev_stop_iso *request = buffer;
844 if (client->iso_context == NULL || request->handle != 0)
847 return fw_iso_context_stop(client->iso_context);
850 static int ioctl_get_cycle_timer(struct client *client, void *buffer)
852 struct fw_cdev_get_cycle_timer *request = buffer;
853 struct fw_card *card = client->device->card;
854 unsigned long long bus_time;
859 local_irq_save(flags);
861 bus_time = card->driver->get_bus_time(card);
862 do_gettimeofday(&tv);
864 local_irq_restore(flags);
867 request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
868 request->cycle_timer = bus_time & 0xffffffff;
872 static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
878 ioctl_initiate_bus_reset,
879 ioctl_add_descriptor,
880 ioctl_remove_descriptor,
881 ioctl_create_iso_context,
885 ioctl_get_cycle_timer,
889 dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
894 if (_IOC_TYPE(cmd) != '#' ||
895 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
898 if (_IOC_DIR(cmd) & _IOC_WRITE) {
899 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
900 copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
904 retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
908 if (_IOC_DIR(cmd) & _IOC_READ) {
909 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
910 copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
918 fw_device_op_ioctl(struct file *file,
919 unsigned int cmd, unsigned long arg)
921 struct client *client = file->private_data;
923 if (fw_device_is_shutdown(client->device))
926 return dispatch_ioctl(client, cmd, (void __user *) arg);
931 fw_device_op_compat_ioctl(struct file *file,
932 unsigned int cmd, unsigned long arg)
934 struct client *client = file->private_data;
936 if (fw_device_is_shutdown(client->device))
939 return dispatch_ioctl(client, cmd, compat_ptr(arg));
943 static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
945 struct client *client = file->private_data;
946 enum dma_data_direction direction;
948 int page_count, retval;
950 if (fw_device_is_shutdown(client->device))
953 /* FIXME: We could support multiple buffers, but we don't. */
954 if (client->buffer.pages != NULL)
957 if (!(vma->vm_flags & VM_SHARED))
960 if (vma->vm_start & ~PAGE_MASK)
963 client->vm_start = vma->vm_start;
964 size = vma->vm_end - vma->vm_start;
965 page_count = size >> PAGE_SHIFT;
966 if (size & ~PAGE_MASK)
969 if (vma->vm_flags & VM_WRITE)
970 direction = DMA_TO_DEVICE;
972 direction = DMA_FROM_DEVICE;
974 retval = fw_iso_buffer_init(&client->buffer, client->device->card,
975 page_count, direction);
979 retval = fw_iso_buffer_map(&client->buffer, vma);
981 fw_iso_buffer_destroy(&client->buffer, client->device->card);
986 static int fw_device_op_release(struct inode *inode, struct file *file)
988 struct client *client = file->private_data;
989 struct event *e, *next_e;
990 struct client_resource *r, *next_r;
993 if (client->buffer.pages)
994 fw_iso_buffer_destroy(&client->buffer, client->device->card);
996 if (client->iso_context)
997 fw_iso_context_destroy(client->iso_context);
999 list_for_each_entry_safe(r, next_r, &client->resource_list, link)
1000 r->release(client, r);
1003 * FIXME: We should wait for the async tasklets to stop
1004 * running before freeing the memory.
1007 list_for_each_entry_safe(e, next_e, &client->event_list, link)
1010 spin_lock_irqsave(&client->device->client_list_lock, flags);
1011 list_del(&client->link);
1012 spin_unlock_irqrestore(&client->device->client_list_lock, flags);
1014 fw_device_put(client->device);
1020 static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
1022 struct client *client = file->private_data;
1023 unsigned int mask = 0;
1025 poll_wait(file, &client->wait, pt);
1027 if (fw_device_is_shutdown(client->device))
1028 mask |= POLLHUP | POLLERR;
1029 if (!list_empty(&client->event_list))
1030 mask |= POLLIN | POLLRDNORM;
1035 const struct file_operations fw_device_ops = {
1036 .owner = THIS_MODULE,
1037 .open = fw_device_op_open,
1038 .read = fw_device_op_read,
1039 .unlocked_ioctl = fw_device_op_ioctl,
1040 .poll = fw_device_op_poll,
1041 .release = fw_device_op_release,
1042 .mmap = fw_device_op_mmap,
1044 #ifdef CONFIG_COMPAT
1045 .compat_ioctl = fw_device_op_compat_ioctl,