Staging: poch: fix sched.h build breakage
[safe/jmp/linux-2.6] / drivers / staging / poch / poch.c
1 /*
2  * User-space DMA and UIO based Redrapids Pocket Change CardBus driver
3  *
4  * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org>
5  *
6  * Licensed under GPL version 2 only.
7  */
8
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/uio_driver.h>
13 #include <linux/spinlock.h>
14 #include <linux/cdev.h>
15 #include <linux/delay.h>
16 #include <linux/sysfs.h>
17 #include <linux/poll.h>
18 #include <linux/idr.h>
19 #include <linux/interrupt.h>
20 #include <linux/init.h>
21 #include <linux/ioctl.h>
22 #include <linux/io.h>
23 #include <linux/sched.h>
24
25 #include "poch.h"
26
27 #include <asm/cacheflush.h>
28
29 #ifndef PCI_VENDOR_ID_RRAPIDS
30 #define PCI_VENDOR_ID_RRAPIDS 0x17D2
31 #endif
32
33 #ifndef PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE
34 #define PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE 0x0351
35 #endif
36
37 #define POCH_NCHANNELS 2
38
39 #define MAX_POCH_CARDS 8
40 #define MAX_POCH_DEVICES (MAX_POCH_CARDS * POCH_NCHANNELS)
41
42 #define DRV_NAME "poch"
43 #define PFX      DRV_NAME ": "
44
45 /*
46  * BAR0 Bridge Register Definitions
47  */
48
49 #define BRIDGE_REV_REG                  0x0
50 #define BRIDGE_INT_MASK_REG             0x4
51 #define BRIDGE_INT_STAT_REG             0x8
52
53 #define BRIDGE_INT_ACTIVE               (0x1 << 31)
54 #define BRIDGE_INT_FPGA                 (0x1 << 2)
55 #define BRIDGE_INT_TEMP_FAIL            (0x1 << 1)
56 #define BRIDGE_INT_TEMP_WARN            (0x1 << 0)
57
58 #define BRIDGE_FPGA_RESET_REG           0xC
59
60 #define BRIDGE_CARD_POWER_REG           0x10
61 #define BRIDGE_CARD_POWER_EN            (0x1 << 0)
62 #define BRIDGE_CARD_POWER_PROG_DONE     (0x1 << 31)
63
64 #define BRIDGE_JTAG_REG                 0x14
65 #define BRIDGE_DMA_GO_REG               0x18
66 #define BRIDGE_STAT_0_REG               0x1C
67 #define BRIDGE_STAT_1_REG               0x20
68 #define BRIDGE_STAT_2_REG               0x24
69 #define BRIDGE_STAT_3_REG               0x28
70 #define BRIDGE_TEMP_STAT_REG            0x2C
71 #define BRIDGE_TEMP_THRESH_REG          0x30
72 #define BRIDGE_EEPROM_REVSEL_REG        0x34
73 #define BRIDGE_CIS_STRUCT_REG           0x100
74 #define BRIDGE_BOARDREV_REG             0x124
75
76 /*
77  * BAR1 FPGA Register Definitions
78  */
79
80 #define FPGA_IFACE_REV_REG              0x0
81 #define FPGA_RX_BLOCK_SIZE_REG          0x8
82 #define FPGA_TX_BLOCK_SIZE_REG          0xC
83 #define FPGA_RX_BLOCK_COUNT_REG         0x10
84 #define FPGA_TX_BLOCK_COUNT_REG         0x14
85 #define FPGA_RX_CURR_DMA_BLOCK_REG      0x18
86 #define FPGA_TX_CURR_DMA_BLOCK_REG      0x1C
87 #define FPGA_RX_GROUP_COUNT_REG         0x20
88 #define FPGA_TX_GROUP_COUNT_REG         0x24
89 #define FPGA_RX_CURR_GROUP_REG          0x28
90 #define FPGA_TX_CURR_GROUP_REG          0x2C
91 #define FPGA_RX_CURR_PCI_REG            0x38
92 #define FPGA_TX_CURR_PCI_REG            0x3C
93 #define FPGA_RX_GROUP0_START_REG        0x40
94 #define FPGA_TX_GROUP0_START_REG        0xC0
95 #define FPGA_DMA_DESC_1_REG             0x140
96 #define FPGA_DMA_DESC_2_REG             0x144
97 #define FPGA_DMA_DESC_3_REG             0x148
98 #define FPGA_DMA_DESC_4_REG             0x14C
99
100 #define FPGA_DMA_INT_STAT_REG           0x150
101 #define FPGA_DMA_INT_MASK_REG           0x154
102 #define FPGA_DMA_INT_RX         (1 << 0)
103 #define FPGA_DMA_INT_TX         (1 << 1)
104
105 #define FPGA_RX_GROUPS_PER_INT_REG      0x158
106 #define FPGA_TX_GROUPS_PER_INT_REG      0x15C
107 #define FPGA_DMA_ADR_PAGE_REG           0x160
108 #define FPGA_FPGA_REV_REG               0x200
109
110 #define FPGA_ADC_CLOCK_CTL_REG          0x204
111 #define FPGA_ADC_CLOCK_CTL_OSC_EN       (0x1 << 3)
112 #define FPGA_ADC_CLOCK_LOCAL_CLK        (0x1 | FPGA_ADC_CLOCK_CTL_OSC_EN)
113 #define FPGA_ADC_CLOCK_EXT_SAMP_CLK     0X0
114
115 #define FPGA_ADC_DAC_EN_REG             0x208
116 #define FPGA_ADC_DAC_EN_DAC_OFF         (0x1 << 1)
117 #define FPGA_ADC_DAC_EN_ADC_OFF         (0x1 << 0)
118
119 #define FPGA_INT_STAT_REG               0x20C
120 #define FPGA_INT_MASK_REG               0x210
121 #define FPGA_INT_PLL_UNLOCKED           (0x1 << 9)
122 #define FPGA_INT_DMA_CORE               (0x1 << 8)
123 #define FPGA_INT_TX_FF_EMPTY            (0x1 << 7)
124 #define FPGA_INT_RX_FF_EMPTY            (0x1 << 6)
125 #define FPGA_INT_TX_FF_OVRFLW           (0x1 << 3)
126 #define FPGA_INT_RX_FF_OVRFLW           (0x1 << 2)
127 #define FPGA_INT_TX_ACQ_DONE            (0x1 << 1)
128 #define FPGA_INT_RX_ACQ_DONE            (0x1)
129
130 #define FPGA_RX_CTL_REG                 0x214
131 #define FPGA_RX_CTL_FIFO_FLUSH          (0x1 << 9)
132 #define FPGA_RX_CTL_SYNTH_DATA          (0x1 << 8)
133 #define FPGA_RX_CTL_CONT_CAP            (0x0 << 1)
134 #define FPGA_RX_CTL_SNAP_CAP            (0x1 << 1)
135
136 #define FPGA_RX_ARM_REG                 0x21C
137
138 #define FPGA_DOM_REG                    0x224
139 #define FPGA_DOM_DCM_RESET              (0x1 << 5)
140 #define FPGA_DOM_SOFT_RESET             (0x1 << 4)
141 #define FPGA_DOM_DUAL_M_SG_DMA          (0x0)
142 #define FPGA_DOM_TARGET_ACCESS          (0x1)
143
144 #define FPGA_TX_CTL_REG                 0x228
145 #define FPGA_TX_CTL_FIFO_FLUSH          (0x1 << 9)
146 #define FPGA_TX_CTL_OUTPUT_ZERO         (0x0 << 2)
147 #define FPGA_TX_CTL_OUTPUT_CARDBUS      (0x1 << 2)
148 #define FPGA_TX_CTL_OUTPUT_ADC          (0x2 << 2)
149 #define FPGA_TX_CTL_OUTPUT_SNAPSHOT     (0x3 << 2)
150 #define FPGA_TX_CTL_LOOPBACK            (0x1 << 0)
151
152 #define FPGA_ENDIAN_MODE_REG            0x22C
153 #define FPGA_RX_FIFO_COUNT_REG          0x28C
154 #define FPGA_TX_ENABLE_REG              0x298
155 #define FPGA_TX_TRIGGER_REG             0x29C
156 #define FPGA_TX_DATAMEM_COUNT_REG       0x2A8
157 #define FPGA_CAP_FIFO_REG               0x300
158 #define FPGA_TX_SNAPSHOT_REG            0x8000
159
160 /*
161  * Channel Index Definitions
162  */
163
164 enum {
165         CHNO_RX_CHANNEL,
166         CHNO_TX_CHANNEL,
167 };
168
169 struct poch_dev;
170
171 enum channel_dir {
172         CHANNEL_DIR_RX,
173         CHANNEL_DIR_TX,
174 };
175
176 struct poch_group_info {
177         struct page *pg;
178         dma_addr_t dma_addr;
179         unsigned long user_offset;
180 };
181
182 struct channel_info {
183         unsigned int chno;
184
185         atomic_t sys_block_size;
186         atomic_t sys_group_size;
187         atomic_t sys_group_count;
188
189         enum channel_dir dir;
190
191         unsigned long block_size;
192         unsigned long group_size;
193         unsigned long group_count;
194
195         /* Contains the DMA address and VM offset of each group. */
196         struct poch_group_info *groups;
197
198         /* Contains the header and circular buffer exported to userspace. */
199         spinlock_t group_offsets_lock;
200         struct poch_cbuf_header *header;
201         struct page *header_pg;
202         unsigned long header_size;
203
204         /* Last group indicated as 'complete' to user space. */
205         unsigned int transfer;
206
207         wait_queue_head_t wq;
208
209         union {
210                 unsigned int data_available;
211                 unsigned int space_available;
212         };
213
214         void __iomem *bridge_iomem;
215         void __iomem *fpga_iomem;
216         spinlock_t *iomem_lock;
217
218         atomic_t free;
219         atomic_t inited;
220
221         /* Error counters */
222         struct poch_counters counters;
223         spinlock_t counters_lock;
224
225         struct device *dev;
226 };
227
228 struct poch_dev {
229         struct uio_info uio;
230         struct pci_dev *pci_dev;
231         unsigned int nchannels;
232         struct channel_info channels[POCH_NCHANNELS];
233         struct cdev cdev;
234
235         /* Counts the no. of channels that have been opened. On first
236          * open, the card is powered on. On last channel close, the
237          * card is powered off.
238          */
239         atomic_t usage;
240
241         void __iomem *bridge_iomem;
242         void __iomem *fpga_iomem;
243         spinlock_t iomem_lock;
244
245         struct device *dev;
246 };
247
248 static dev_t poch_first_dev;
249 static struct class *poch_cls;
250 static DEFINE_IDR(poch_ids);
251
252 static ssize_t store_block_size(struct device *dev,
253                                 struct device_attribute *attr,
254                                 const char *buf, size_t count)
255 {
256         struct channel_info *channel = dev_get_drvdata(dev);
257         unsigned long block_size;
258
259         sscanf(buf, "%lu", &block_size);
260         atomic_set(&channel->sys_block_size, block_size);
261
262         return count;
263 }
264 static DEVICE_ATTR(block_size, S_IWUSR|S_IWGRP, NULL, store_block_size);
265
266 static ssize_t store_group_size(struct device *dev,
267                                 struct device_attribute *attr,
268                                 const char *buf, size_t count)
269 {
270         struct channel_info *channel = dev_get_drvdata(dev);
271         unsigned long group_size;
272
273         sscanf(buf, "%lu", &group_size);
274         atomic_set(&channel->sys_group_size, group_size);
275
276         return count;
277 }
278 static DEVICE_ATTR(group_size, S_IWUSR|S_IWGRP, NULL, store_group_size);
279
280 static ssize_t store_group_count(struct device *dev,
281                                 struct device_attribute *attr,
282                                  const char *buf, size_t count)
283 {
284         struct channel_info *channel = dev_get_drvdata(dev);
285         unsigned long group_count;
286
287         sscanf(buf, "%lu", &group_count);
288         atomic_set(&channel->sys_group_count, group_count);
289
290         return count;
291 }
292 static DEVICE_ATTR(group_count, S_IWUSR|S_IWGRP, NULL, store_group_count);
293
294 static ssize_t show_direction(struct device *dev,
295                               struct device_attribute *attr, char *buf)
296 {
297         struct channel_info *channel = dev_get_drvdata(dev);
298         int len;
299
300         len = sprintf(buf, "%s\n", (channel->dir ? "tx" : "rx"));
301         return len;
302 }
303 static DEVICE_ATTR(dir, S_IRUSR|S_IRGRP, show_direction, NULL);
304
305 static unsigned long npages(unsigned long bytes)
306 {
307         if (bytes % PAGE_SIZE == 0)
308                 return bytes / PAGE_SIZE;
309         else
310                 return (bytes / PAGE_SIZE) + 1;
311 }
312
313 static ssize_t show_mmap_size(struct device *dev,
314                               struct device_attribute *attr, char *buf)
315 {
316         struct channel_info *channel = dev_get_drvdata(dev);
317         int len;
318         unsigned long mmap_size;
319         unsigned long group_pages;
320         unsigned long header_pages;
321         unsigned long total_group_pages;
322
323         group_pages = npages(channel->group_size);
324         header_pages = npages(channel->header_size);
325         total_group_pages = group_pages * channel->group_count;
326
327         mmap_size = (header_pages + total_group_pages) * PAGE_SIZE;
328         len = sprintf(buf, "%lu\n", mmap_size);
329         return len;
330 }
331 static DEVICE_ATTR(mmap_size, S_IRUSR|S_IRGRP, show_mmap_size, NULL);
332
333 static struct device_attribute *poch_class_attrs[] = {
334         &dev_attr_block_size,
335         &dev_attr_group_size,
336         &dev_attr_group_count,
337         &dev_attr_dir,
338         &dev_attr_mmap_size,
339 };
340
341 static void poch_channel_free_groups(struct channel_info *channel)
342 {
343         unsigned long i;
344
345         for (i = 0; i < channel->group_count; i++) {
346                 struct poch_group_info *group;
347                 unsigned int order;
348
349                 group = &channel->groups[i];
350                 order = get_order(channel->group_size);
351                 if (group->pg)
352                         __free_pages(group->pg, order);
353         }
354 }
355
356 static int poch_channel_alloc_groups(struct channel_info *channel)
357 {
358         unsigned long i;
359         unsigned long group_pages;
360         unsigned long header_pages;
361
362         group_pages = npages(channel->group_size);
363         header_pages = npages(channel->header_size);
364
365         for (i = 0; i < channel->group_count; i++) {
366                 struct poch_group_info *group;
367                 unsigned int order;
368                 gfp_t gfp_mask;
369
370                 group = &channel->groups[i];
371                 order = get_order(channel->group_size);
372
373                 /*
374                  * __GFP_COMP is required here since we are going to
375                  * perform non-linear mapping to userspace. For more
376                  * information read the vm_insert_page() function
377                  * comments.
378                  */
379
380                 gfp_mask = GFP_KERNEL | GFP_DMA32 | __GFP_ZERO;
381                 group->pg = alloc_pages(gfp_mask, order);
382                 if (!group->pg) {
383                         poch_channel_free_groups(channel);
384                         return -ENOMEM;
385                 }
386
387                 /* FIXME: This is the physical address not the bus
388                  * address!  This won't work in architectures that
389                  * have an IOMMU. Can we use pci_map_single() for
390                  * this?
391                  */
392                 group->dma_addr = page_to_pfn(group->pg) * PAGE_SIZE;
393                 group->user_offset =
394                         (header_pages + (i * group_pages)) * PAGE_SIZE;
395
396                 printk(KERN_INFO PFX "%ld: user_offset: 0x%lx\n", i,
397                        group->user_offset);
398         }
399
400         return 0;
401 }
402
403 static int channel_latch_attr(struct channel_info *channel)
404 {
405         channel->group_count = atomic_read(&channel->sys_group_count);
406         channel->group_size = atomic_read(&channel->sys_group_size);
407         channel->block_size = atomic_read(&channel->sys_block_size);
408
409         if (channel->group_count == 0) {
410                 printk(KERN_ERR PFX "invalid group count %lu",
411                        channel->group_count);
412                 return -EINVAL;
413         }
414
415         if (channel->group_size == 0 ||
416             channel->group_size < channel->block_size) {
417                 printk(KERN_ERR PFX "invalid group size %lu",
418                        channel->group_size);
419                 return -EINVAL;
420         }
421
422         if (channel->block_size == 0 || (channel->block_size % 8) != 0) {
423                 printk(KERN_ERR PFX "invalid block size %lu",
424                        channel->block_size);
425                 return -EINVAL;
426         }
427
428         if (channel->group_size % channel->block_size != 0) {
429                 printk(KERN_ERR PFX
430                        "group size should be multiple of block size");
431                 return -EINVAL;
432         }
433
434         return 0;
435 }
436
437 /*
438  * Configure DMA group registers
439  */
440 static void channel_dma_init(struct channel_info *channel)
441 {
442         void __iomem *fpga = channel->fpga_iomem;
443         u32 group_regs_base;
444         u32 group_reg;
445         unsigned int page;
446         unsigned int group_in_page;
447         unsigned long i;
448         u32 block_size_reg;
449         u32 block_count_reg;
450         u32 group_count_reg;
451         u32 groups_per_int_reg;
452         u32 curr_pci_reg;
453
454         if (channel->chno == CHNO_RX_CHANNEL) {
455                 group_regs_base = FPGA_RX_GROUP0_START_REG;
456                 block_size_reg = FPGA_RX_BLOCK_SIZE_REG;
457                 block_count_reg = FPGA_RX_BLOCK_COUNT_REG;
458                 group_count_reg = FPGA_RX_GROUP_COUNT_REG;
459                 groups_per_int_reg = FPGA_RX_GROUPS_PER_INT_REG;
460                 curr_pci_reg = FPGA_RX_CURR_PCI_REG;
461         } else {
462                 group_regs_base = FPGA_TX_GROUP0_START_REG;
463                 block_size_reg = FPGA_TX_BLOCK_SIZE_REG;
464                 block_count_reg = FPGA_TX_BLOCK_COUNT_REG;
465                 group_count_reg = FPGA_TX_GROUP_COUNT_REG;
466                 groups_per_int_reg = FPGA_TX_GROUPS_PER_INT_REG;
467                 curr_pci_reg = FPGA_TX_CURR_PCI_REG;
468         }
469
470         printk(KERN_WARNING "block_size, group_size, group_count\n");
471         /*
472          * Block size is represented in no. of 64 bit transfers.
473          */
474         iowrite32(channel->block_size / 8, fpga + block_size_reg);
475         iowrite32(channel->group_size / channel->block_size,
476                   fpga + block_count_reg);
477         iowrite32(channel->group_count, fpga + group_count_reg);
478         /* FIXME: Hardcoded groups per int. Get it from sysfs? */
479         iowrite32(1, fpga + groups_per_int_reg);
480
481         /* Unlock PCI address? Not defined in the data sheet, but used
482          * in the reference code by Redrapids.
483          */
484         iowrite32(0x1, fpga + curr_pci_reg);
485
486         /* The DMA address page register is shared between the RX and
487          * TX channels, so acquire lock.
488          */
489         for (i = 0; i < channel->group_count; i++) {
490                 page = i / 32;
491                 group_in_page = i % 32;
492
493                 group_reg = group_regs_base + (group_in_page * 4);
494
495                 spin_lock(channel->iomem_lock);
496                 iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
497                 iowrite32(channel->groups[i].dma_addr, fpga + group_reg);
498                 spin_unlock(channel->iomem_lock);
499         }
500
501         for (i = 0; i < channel->group_count; i++) {
502                 page = i / 32;
503                 group_in_page = i % 32;
504
505                 group_reg = group_regs_base + (group_in_page * 4);
506
507                 spin_lock(channel->iomem_lock);
508                 iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
509                 printk(KERN_INFO PFX "%ld: read dma_addr: 0x%x\n", i,
510                        ioread32(fpga + group_reg));
511                 spin_unlock(channel->iomem_lock);
512         }
513
514 }
515
516 static int poch_channel_alloc_header(struct channel_info *channel)
517 {
518         struct poch_cbuf_header *header = channel->header;
519         unsigned long group_offset_size;
520         unsigned long tot_group_offsets_size;
521
522         /* Allocate memory to hold header exported userspace */
523         group_offset_size = sizeof(header->group_offsets[0]);
524         tot_group_offsets_size = group_offset_size * channel->group_count;
525         channel->header_size = sizeof(*header) + tot_group_offsets_size;
526         channel->header_pg = alloc_pages(GFP_KERNEL | __GFP_ZERO,
527                                          get_order(channel->header_size));
528         if (!channel->header_pg)
529                 return -ENOMEM;
530
531         channel->header = page_address(channel->header_pg);
532
533         return 0;
534 }
535
536 static void poch_channel_free_header(struct channel_info *channel)
537 {
538         unsigned int order;
539
540         order = get_order(channel->header_size);
541         __free_pages(channel->header_pg, order);
542 }
543
544 static void poch_channel_init_header(struct channel_info *channel)
545 {
546         int i;
547         struct poch_group_info *groups;
548         s32 *group_offsets;
549
550         channel->header->group_size_bytes = channel->group_size;
551         channel->header->group_count = channel->group_count;
552
553         spin_lock_init(&channel->group_offsets_lock);
554
555         group_offsets = channel->header->group_offsets;
556         groups = channel->groups;
557
558         for (i = 0; i < channel->group_count; i++) {
559                 if (channel->dir == CHANNEL_DIR_RX)
560                         group_offsets[i] = -1;
561                 else
562                         group_offsets[i] = groups[i].user_offset;
563         }
564 }
565
566 static void __poch_channel_clear_counters(struct channel_info *channel)
567 {
568         channel->counters.pll_unlock = 0;
569         channel->counters.fifo_empty = 0;
570         channel->counters.fifo_overflow = 0;
571 }
572
573 static int poch_channel_init(struct channel_info *channel,
574                              struct poch_dev *poch_dev)
575 {
576         struct pci_dev *pdev = poch_dev->pci_dev;
577         struct device *dev = &pdev->dev;
578         unsigned long alloc_size;
579         int ret;
580
581         printk(KERN_WARNING "channel_latch_attr\n");
582
583         ret = channel_latch_attr(channel);
584         if (ret != 0)
585                 goto out;
586
587         channel->transfer = 0;
588
589         /* Allocate memory to hold group information. */
590         alloc_size = channel->group_count * sizeof(struct poch_group_info);
591         channel->groups = kzalloc(alloc_size, GFP_KERNEL);
592         if (!channel->groups) {
593                 dev_err(dev, "error allocating memory for group info\n");
594                 ret = -ENOMEM;
595                 goto out;
596         }
597
598         printk(KERN_WARNING "poch_channel_alloc_groups\n");
599
600         ret = poch_channel_alloc_groups(channel);
601         if (ret) {
602                 dev_err(dev, "error allocating groups of order %d\n",
603                         get_order(channel->group_size));
604                 goto out_free_group_info;
605         }
606
607         ret = poch_channel_alloc_header(channel);
608         if (ret) {
609                 dev_err(dev, "error allocating user space header\n");
610                 goto out_free_groups;
611         }
612
613         channel->fpga_iomem = poch_dev->fpga_iomem;
614         channel->bridge_iomem = poch_dev->bridge_iomem;
615         channel->iomem_lock = &poch_dev->iomem_lock;
616         spin_lock_init(&channel->counters_lock);
617
618         __poch_channel_clear_counters(channel);
619
620         printk(KERN_WARNING "poch_channel_init_header\n");
621
622         poch_channel_init_header(channel);
623
624         return 0;
625
626  out_free_groups:
627         poch_channel_free_groups(channel);
628  out_free_group_info:
629         kfree(channel->groups);
630  out:
631         return ret;
632 }
633
634 static int poch_wait_fpga_prog(void __iomem *bridge)
635 {
636         unsigned long total_wait;
637         const unsigned long wait_period = 100;
638         /* FIXME: Get the actual timeout */
639         const unsigned long prog_timeo = 10000; /* 10 Seconds */
640         u32 card_power;
641
642         printk(KERN_WARNING "poch_wait_fpg_prog\n");
643
644         printk(KERN_INFO PFX "programming fpga ...\n");
645         total_wait = 0;
646         while (1) {
647                 msleep(wait_period);
648                 total_wait += wait_period;
649
650                 card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
651                 if (card_power & BRIDGE_CARD_POWER_PROG_DONE) {
652                         printk(KERN_INFO PFX "programming done\n");
653                         return 0;
654                 }
655                 if (total_wait > prog_timeo) {
656                         printk(KERN_ERR PFX
657                                "timed out while programming FPGA\n");
658                         return -EIO;
659                 }
660         }
661 }
662
663 static void poch_card_power_off(struct poch_dev *poch_dev)
664 {
665         void __iomem *bridge = poch_dev->bridge_iomem;
666         u32 card_power;
667
668         iowrite32(0, bridge + BRIDGE_INT_MASK_REG);
669         iowrite32(0, bridge + BRIDGE_DMA_GO_REG);
670
671         card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
672         iowrite32(card_power & ~BRIDGE_CARD_POWER_EN,
673                   bridge + BRIDGE_CARD_POWER_REG);
674 }
675
676 enum clk_src {
677         CLK_SRC_ON_BOARD,
678         CLK_SRC_EXTERNAL
679 };
680
681 static void poch_card_clock_on(void __iomem *fpga)
682 {
683         /* FIXME: Get this data through sysfs? */
684         enum clk_src clk_src = CLK_SRC_ON_BOARD;
685
686         if (clk_src == CLK_SRC_ON_BOARD) {
687                 iowrite32(FPGA_ADC_CLOCK_LOCAL_CLK | FPGA_ADC_CLOCK_CTL_OSC_EN,
688                           fpga + FPGA_ADC_CLOCK_CTL_REG);
689         } else if (clk_src == CLK_SRC_EXTERNAL) {
690                 iowrite32(FPGA_ADC_CLOCK_EXT_SAMP_CLK,
691                           fpga + FPGA_ADC_CLOCK_CTL_REG);
692         }
693 }
694
695 static int poch_card_power_on(struct poch_dev *poch_dev)
696 {
697         void __iomem *bridge = poch_dev->bridge_iomem;
698         void __iomem *fpga = poch_dev->fpga_iomem;
699
700         iowrite32(BRIDGE_CARD_POWER_EN, bridge + BRIDGE_CARD_POWER_REG);
701
702         if (poch_wait_fpga_prog(bridge) != 0) {
703                 poch_card_power_off(poch_dev);
704                 return -EIO;
705         }
706
707         poch_card_clock_on(fpga);
708
709         /* Sync to new clock, reset state machines, set DMA mode. */
710         iowrite32(FPGA_DOM_DCM_RESET | FPGA_DOM_SOFT_RESET
711                   | FPGA_DOM_DUAL_M_SG_DMA, fpga + FPGA_DOM_REG);
712
713         /* FIXME: The time required for sync. needs to be tuned. */
714         msleep(1000);
715
716         return 0;
717 }
718
719 static void poch_channel_analog_on(struct channel_info *channel)
720 {
721         void __iomem *fpga = channel->fpga_iomem;
722         u32 adc_dac_en;
723
724         spin_lock(channel->iomem_lock);
725         adc_dac_en = ioread32(fpga + FPGA_ADC_DAC_EN_REG);
726         switch (channel->chno) {
727         case CHNO_RX_CHANNEL:
728                 iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_ADC_OFF,
729                           fpga + FPGA_ADC_DAC_EN_REG);
730                 break;
731         case CHNO_TX_CHANNEL:
732                 iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_DAC_OFF,
733                           fpga + FPGA_ADC_DAC_EN_REG);
734                 break;
735         }
736         spin_unlock(channel->iomem_lock);
737 }
738
739 static int poch_open(struct inode *inode, struct file *filp)
740 {
741         struct poch_dev *poch_dev;
742         struct channel_info *channel;
743         void __iomem *bridge;
744         void __iomem *fpga;
745         int chno;
746         int usage;
747         int ret;
748
749         poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
750         bridge = poch_dev->bridge_iomem;
751         fpga = poch_dev->fpga_iomem;
752
753         chno = iminor(inode) % poch_dev->nchannels;
754         channel = &poch_dev->channels[chno];
755
756         if (!atomic_dec_and_test(&channel->free)) {
757                 atomic_inc(&channel->free);
758                 ret = -EBUSY;
759                 goto out;
760         }
761
762         usage = atomic_inc_return(&poch_dev->usage);
763
764         printk(KERN_WARNING "poch_card_power_on\n");
765
766         if (usage == 1) {
767                 ret = poch_card_power_on(poch_dev);
768                 if (ret)
769                         goto out_dec_usage;
770         }
771
772         printk(KERN_INFO "CardBus Bridge Revision: %x\n",
773                ioread32(bridge + BRIDGE_REV_REG));
774         printk(KERN_INFO "CardBus Interface Revision: %x\n",
775                ioread32(fpga + FPGA_IFACE_REV_REG));
776
777         channel->chno = chno;
778         filp->private_data = channel;
779
780         printk(KERN_WARNING "poch_channel_init\n");
781
782         ret = poch_channel_init(channel, poch_dev);
783         if (ret)
784                 goto out_power_off;
785
786         poch_channel_analog_on(channel);
787
788         printk(KERN_WARNING "channel_dma_init\n");
789
790         channel_dma_init(channel);
791
792         printk(KERN_WARNING "poch_channel_analog_on\n");
793
794         if (usage == 1) {
795                 printk(KERN_WARNING "setting up DMA\n");
796
797                 /* Initialize DMA Controller. */
798                 iowrite32(FPGA_CAP_FIFO_REG, bridge + BRIDGE_STAT_2_REG);
799                 iowrite32(FPGA_DMA_DESC_1_REG, bridge + BRIDGE_STAT_3_REG);
800
801                 ioread32(fpga + FPGA_DMA_INT_STAT_REG);
802                 ioread32(fpga + FPGA_INT_STAT_REG);
803                 ioread32(bridge + BRIDGE_INT_STAT_REG);
804
805                 /* Initialize Interrupts. FIXME: Enable temperature
806                  * handling We are enabling both Tx and Rx channel
807                  * interrupts here. Do we need to enable interrupts
808                  * only for the current channel? Anyways we won't get
809                  * the interrupt unless the DMA is activated.
810                  */
811                 iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
812                 iowrite32(FPGA_INT_DMA_CORE
813                           | FPGA_INT_PLL_UNLOCKED
814                           | FPGA_INT_TX_FF_EMPTY
815                           | FPGA_INT_RX_FF_EMPTY
816                           | FPGA_INT_TX_FF_OVRFLW
817                           | FPGA_INT_RX_FF_OVRFLW,
818                           fpga + FPGA_INT_MASK_REG);
819                 iowrite32(FPGA_DMA_INT_RX | FPGA_DMA_INT_TX,
820                           fpga + FPGA_DMA_INT_MASK_REG);
821         }
822
823         if (channel->dir == CHANNEL_DIR_TX) {
824                 /* Flush TX FIFO and output data from cardbus. */
825                 iowrite32(FPGA_TX_CTL_FIFO_FLUSH
826                           | FPGA_TX_CTL_OUTPUT_CARDBUS,
827                           fpga + FPGA_TX_CTL_REG);
828         } else {
829                 /* Flush RX FIFO and output data to cardbus. */
830                 iowrite32(FPGA_RX_CTL_CONT_CAP
831                           | FPGA_RX_CTL_FIFO_FLUSH,
832                           fpga + FPGA_RX_CTL_REG);
833         }
834
835         atomic_inc(&channel->inited);
836
837         return 0;
838
839  out_power_off:
840         if (usage == 1)
841                 poch_card_power_off(poch_dev);
842  out_dec_usage:
843         atomic_dec(&poch_dev->usage);
844         atomic_inc(&channel->free);
845  out:
846         return ret;
847 }
848
849 static int poch_release(struct inode *inode, struct file *filp)
850 {
851         struct channel_info *channel = filp->private_data;
852         struct poch_dev *poch_dev;
853         int usage;
854
855         poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
856
857         usage = atomic_dec_return(&poch_dev->usage);
858         if (usage == 0) {
859                 printk(KERN_WARNING "poch_card_power_off\n");
860                 poch_card_power_off(poch_dev);
861         }
862
863         atomic_dec(&channel->inited);
864         poch_channel_free_header(channel);
865         poch_channel_free_groups(channel);
866         kfree(channel->groups);
867         atomic_inc(&channel->free);
868
869         return 0;
870 }
871
872 /*
873  * Map the header and the group buffers, to user space.
874  */
875 static int poch_mmap(struct file *filp, struct vm_area_struct *vma)
876 {
877         struct channel_info *channel = filp->private_data;
878
879         unsigned long start;
880         unsigned long size;
881
882         unsigned long group_pages;
883         unsigned long header_pages;
884         unsigned long total_group_pages;
885
886         int pg_num;
887         struct page *pg;
888
889         int i;
890         int ret;
891
892         printk(KERN_WARNING "poch_mmap\n");
893
894         if (vma->vm_pgoff) {
895                 printk(KERN_WARNING PFX "page offset: %lu\n", vma->vm_pgoff);
896                 return -EINVAL;
897         }
898
899         group_pages = npages(channel->group_size);
900         header_pages = npages(channel->header_size);
901         total_group_pages = group_pages * channel->group_count;
902
903         size = vma->vm_end - vma->vm_start;
904         if (size != (header_pages + total_group_pages) * PAGE_SIZE) {
905                 printk(KERN_WARNING PFX "required %lu bytes\n", size);
906                 return -EINVAL;
907         }
908
909         start = vma->vm_start;
910
911         /* FIXME: Cleanup required on failure? */
912         pg = channel->header_pg;
913         for (pg_num = 0; pg_num < header_pages; pg_num++, pg++) {
914                 printk(KERN_DEBUG PFX "page_count: %d\n", page_count(pg));
915                 printk(KERN_DEBUG PFX "%d: header: 0x%lx\n", pg_num, start);
916                 ret = vm_insert_page(vma, start, pg);
917                 if (ret) {
918                         printk(KERN_DEBUG "vm_insert 1 failed at %lx\n", start);
919                         return ret;
920                 }
921                 start += PAGE_SIZE;
922         }
923
924         for (i = 0; i < channel->group_count; i++) {
925                 pg = channel->groups[i].pg;
926                 for (pg_num = 0; pg_num < group_pages; pg_num++, pg++) {
927                         printk(KERN_DEBUG PFX "%d: group %d: 0x%lx\n",
928                                pg_num, i, start);
929                         ret = vm_insert_page(vma, start, pg);
930                         if (ret) {
931                                 printk(KERN_DEBUG PFX
932                                        "vm_insert 2 failed at %d\n", pg_num);
933                                 return ret;
934                         }
935                         start += PAGE_SIZE;
936                 }
937         }
938
939         return 0;
940 }
941
942 /*
943  * Check whether there is some group that the user space has not
944  * consumed yet. When the user space consumes a group, it sets it to
945  * -1. Cosuming could be reading data in case of RX and filling a
946  * buffer in case of TX.
947  */
948 static int poch_channel_available(struct channel_info *channel)
949 {
950         int i;
951
952         spin_lock_irq(&channel->group_offsets_lock);
953
954         for (i = 0; i < channel->group_count; i++) {
955                 if (channel->header->group_offsets[i] != -1) {
956                         spin_unlock_irq(&channel->group_offsets_lock);
957                         return 1;
958                 }
959         }
960
961         spin_unlock_irq(&channel->group_offsets_lock);
962
963         return 0;
964 }
965
966 static unsigned int poch_poll(struct file *filp, poll_table *pt)
967 {
968         struct channel_info *channel = filp->private_data;
969         unsigned int ret = 0;
970
971         poll_wait(filp, &channel->wq, pt);
972
973         if (poch_channel_available(channel)) {
974                 if (channel->dir == CHANNEL_DIR_RX)
975                         ret = POLLIN | POLLRDNORM;
976                 else
977                         ret = POLLOUT | POLLWRNORM;
978         }
979
980         return ret;
981 }
982
983 static int poch_ioctl(struct inode *inode, struct file *filp,
984                       unsigned int cmd, unsigned long arg)
985 {
986         struct channel_info *channel = filp->private_data;
987         void __iomem *fpga = channel->fpga_iomem;
988         void __iomem *bridge = channel->bridge_iomem;
989         void __user *argp = (void __user *)arg;
990         struct vm_area_struct *vms;
991         struct poch_counters counters;
992         int ret;
993
994         switch (cmd) {
995         case POCH_IOC_TRANSFER_START:
996                 switch (channel->chno) {
997                 case CHNO_TX_CHANNEL:
998                         printk(KERN_INFO PFX "ioctl: Tx start\n");
999                         iowrite32(0x1, fpga + FPGA_TX_TRIGGER_REG);
1000                         iowrite32(0x1, fpga + FPGA_TX_ENABLE_REG);
1001
1002                         /* FIXME: Does it make sense to do a DMA GO
1003                          * twice, once in Tx and once in Rx.
1004                          */
1005                         iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
1006                         break;
1007                 case CHNO_RX_CHANNEL:
1008                         printk(KERN_INFO PFX "ioctl: Rx start\n");
1009                         iowrite32(0x1, fpga + FPGA_RX_ARM_REG);
1010                         iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
1011                         break;
1012                 }
1013                 break;
1014         case POCH_IOC_TRANSFER_STOP:
1015                 switch (channel->chno) {
1016                 case CHNO_TX_CHANNEL:
1017                         printk(KERN_INFO PFX "ioctl: Tx stop\n");
1018                         iowrite32(0x0, fpga + FPGA_TX_ENABLE_REG);
1019                         iowrite32(0x0, fpga + FPGA_TX_TRIGGER_REG);
1020                         iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
1021                         break;
1022                 case CHNO_RX_CHANNEL:
1023                         printk(KERN_INFO PFX "ioctl: Rx stop\n");
1024                         iowrite32(0x0, fpga + FPGA_RX_ARM_REG);
1025                         iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
1026                         break;
1027                 }
1028                 break;
1029         case POCH_IOC_GET_COUNTERS:
1030                 if (!access_ok(VERIFY_WRITE, argp, sizeof(struct poch_counters)))
1031                         return -EFAULT;
1032
1033                 spin_lock_irq(&channel->counters_lock);
1034                 counters = channel->counters;
1035                 __poch_channel_clear_counters(channel);
1036                 spin_unlock_irq(&channel->counters_lock);
1037
1038                 ret = copy_to_user(argp, &counters,
1039                                    sizeof(struct poch_counters));
1040                 if (ret)
1041                         return ret;
1042
1043                 break;
1044         case POCH_IOC_SYNC_GROUP_FOR_USER:
1045         case POCH_IOC_SYNC_GROUP_FOR_DEVICE:
1046                 vms = find_vma(current->mm, arg);
1047                 if (!vms)
1048                         /* Address not mapped. */
1049                         return -EINVAL;
1050                 if (vms->vm_file != filp)
1051                         /* Address mapped from different device/file. */
1052                         return -EINVAL;
1053
1054                 flush_cache_range(vms, arg, arg + channel->group_size);
1055                 break;
1056         }
1057         return 0;
1058 }
1059
1060 static struct file_operations poch_fops = {
1061         .owner = THIS_MODULE,
1062         .open = poch_open,
1063         .release = poch_release,
1064         .ioctl = poch_ioctl,
1065         .poll = poch_poll,
1066         .mmap = poch_mmap
1067 };
1068
1069 static void poch_irq_dma(struct channel_info *channel)
1070 {
1071         u32 prev_transfer;
1072         u32 curr_transfer;
1073         long groups_done;
1074         unsigned long i, j;
1075         struct poch_group_info *groups;
1076         s32 *group_offsets;
1077         u32 curr_group_reg;
1078
1079         if (!atomic_read(&channel->inited))
1080                 return;
1081
1082         prev_transfer = channel->transfer;
1083
1084         if (channel->chno == CHNO_RX_CHANNEL)
1085                 curr_group_reg = FPGA_RX_CURR_GROUP_REG;
1086         else
1087                 curr_group_reg = FPGA_TX_CURR_GROUP_REG;
1088
1089         curr_transfer = ioread32(channel->fpga_iomem + curr_group_reg);
1090
1091         groups_done = curr_transfer - prev_transfer;
1092         /* Check wrap over, and handle it. */
1093         if (groups_done <= 0)
1094                 groups_done += channel->group_count;
1095
1096         group_offsets = channel->header->group_offsets;
1097         groups = channel->groups;
1098
1099         spin_lock(&channel->group_offsets_lock);
1100
1101         for (i = 0; i < groups_done; i++) {
1102                 j = (prev_transfer + i) % channel->group_count;
1103                 group_offsets[j] = groups[j].user_offset;
1104         }
1105
1106         spin_unlock(&channel->group_offsets_lock);
1107
1108         channel->transfer = curr_transfer;
1109
1110         wake_up_interruptible(&channel->wq);
1111 }
1112
1113 static irqreturn_t poch_irq_handler(int irq, void *p)
1114 {
1115         struct poch_dev *poch_dev = p;
1116         void __iomem *bridge = poch_dev->bridge_iomem;
1117         void __iomem *fpga = poch_dev->fpga_iomem;
1118         struct channel_info *channel_rx = &poch_dev->channels[CHNO_RX_CHANNEL];
1119         struct channel_info *channel_tx = &poch_dev->channels[CHNO_TX_CHANNEL];
1120         u32 bridge_stat;
1121         u32 fpga_stat;
1122         u32 dma_stat;
1123
1124         bridge_stat = ioread32(bridge + BRIDGE_INT_STAT_REG);
1125         fpga_stat = ioread32(fpga + FPGA_INT_STAT_REG);
1126         dma_stat = ioread32(fpga + FPGA_DMA_INT_STAT_REG);
1127
1128         ioread32(fpga + FPGA_DMA_INT_STAT_REG);
1129         ioread32(fpga + FPGA_INT_STAT_REG);
1130         ioread32(bridge + BRIDGE_INT_STAT_REG);
1131
1132         if (bridge_stat & BRIDGE_INT_FPGA) {
1133                 if (fpga_stat & FPGA_INT_DMA_CORE) {
1134                         if (dma_stat & FPGA_DMA_INT_RX)
1135                                 poch_irq_dma(channel_rx);
1136                         if (dma_stat & FPGA_DMA_INT_TX)
1137                                 poch_irq_dma(channel_tx);
1138                 }
1139                 if (fpga_stat & FPGA_INT_PLL_UNLOCKED) {
1140                         channel_tx->counters.pll_unlock++;
1141                         channel_rx->counters.pll_unlock++;
1142                         if (printk_ratelimit())
1143                                 printk(KERN_WARNING PFX "PLL unlocked\n");
1144                 }
1145                 if (fpga_stat & FPGA_INT_TX_FF_EMPTY)
1146                         channel_tx->counters.fifo_empty++;
1147                 if (fpga_stat & FPGA_INT_TX_FF_OVRFLW)
1148                         channel_tx->counters.fifo_overflow++;
1149                 if (fpga_stat & FPGA_INT_RX_FF_EMPTY)
1150                         channel_rx->counters.fifo_empty++;
1151                 if (fpga_stat & FPGA_INT_RX_FF_OVRFLW)
1152                         channel_rx->counters.fifo_overflow++;
1153
1154                 /*
1155                  * FIXME: These errors should be notified through the
1156                  * poll interface as POLLERR.
1157                  */
1158
1159                 /* Re-enable interrupts. */
1160                 iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
1161
1162                 return IRQ_HANDLED;
1163         }
1164
1165         return IRQ_NONE;
1166 }
1167
1168 static void poch_class_dev_unregister(struct poch_dev *poch_dev, int id)
1169 {
1170         int i, j;
1171         int nattrs;
1172         struct channel_info *channel;
1173         dev_t devno;
1174
1175         if (poch_dev->dev == NULL)
1176                 return;
1177
1178         for (i = 0; i < poch_dev->nchannels; i++) {
1179                 channel = &poch_dev->channels[i];
1180                 devno = poch_first_dev + (id * poch_dev->nchannels) + i;
1181
1182                 if (!channel->dev)
1183                         continue;
1184
1185                 nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
1186                 for (j = 0; j < nattrs; j++)
1187                         device_remove_file(channel->dev, poch_class_attrs[j]);
1188
1189                 device_unregister(channel->dev);
1190         }
1191
1192         device_unregister(poch_dev->dev);
1193 }
1194
1195 static int __devinit poch_class_dev_register(struct poch_dev *poch_dev,
1196                                              int id)
1197 {
1198         struct device *dev = &poch_dev->pci_dev->dev;
1199         int i, j;
1200         int nattrs;
1201         int ret;
1202         struct channel_info *channel;
1203         dev_t devno;
1204
1205         poch_dev->dev = device_create(poch_cls, &poch_dev->pci_dev->dev,
1206                                       MKDEV(0, 0), NULL, "poch%d", id);
1207         if (IS_ERR(poch_dev->dev)) {
1208                 dev_err(dev, "error creating parent class device");
1209                 ret = PTR_ERR(poch_dev->dev);
1210                 poch_dev->dev = NULL;
1211                 return ret;
1212         }
1213
1214         for (i = 0; i < poch_dev->nchannels; i++) {
1215                 channel = &poch_dev->channels[i];
1216
1217                 devno = poch_first_dev + (id * poch_dev->nchannels) + i;
1218                 channel->dev = device_create(poch_cls, poch_dev->dev, devno,
1219                                              NULL, "ch%d", i);
1220                 if (IS_ERR(channel->dev)) {
1221                         dev_err(dev, "error creating channel class device");
1222                         ret = PTR_ERR(channel->dev);
1223                         channel->dev = NULL;
1224                         poch_class_dev_unregister(poch_dev, id);
1225                         return ret;
1226                 }
1227
1228                 dev_set_drvdata(channel->dev, channel);
1229                 nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
1230                 for (j = 0; j < nattrs; j++) {
1231                         ret = device_create_file(channel->dev,
1232                                                  poch_class_attrs[j]);
1233                         if (ret) {
1234                                 dev_err(dev, "error creating attribute file");
1235                                 poch_class_dev_unregister(poch_dev, id);
1236                                 return ret;
1237                         }
1238                 }
1239         }
1240
1241         return 0;
1242 }
1243
1244 static int __devinit poch_pci_probe(struct pci_dev *pdev,
1245                                     const struct pci_device_id *pci_id)
1246 {
1247         struct device *dev = &pdev->dev;
1248         struct poch_dev *poch_dev;
1249         struct uio_info *uio;
1250         int ret;
1251         int id;
1252         int i;
1253
1254         poch_dev = kzalloc(sizeof(struct poch_dev), GFP_KERNEL);
1255         if (!poch_dev) {
1256                 dev_err(dev, "error allocating priv. data memory\n");
1257                 return -ENOMEM;
1258         }
1259
1260         poch_dev->pci_dev = pdev;
1261         uio = &poch_dev->uio;
1262
1263         pci_set_drvdata(pdev, poch_dev);
1264
1265         spin_lock_init(&poch_dev->iomem_lock);
1266
1267         poch_dev->nchannels = POCH_NCHANNELS;
1268         poch_dev->channels[CHNO_RX_CHANNEL].dir = CHANNEL_DIR_RX;
1269         poch_dev->channels[CHNO_TX_CHANNEL].dir = CHANNEL_DIR_TX;
1270
1271         for (i = 0; i < poch_dev->nchannels; i++) {
1272                 init_waitqueue_head(&poch_dev->channels[i].wq);
1273                 atomic_set(&poch_dev->channels[i].free, 1);
1274                 atomic_set(&poch_dev->channels[i].inited, 0);
1275         }
1276
1277         ret = pci_enable_device(pdev);
1278         if (ret) {
1279                 dev_err(dev, "error enabling device\n");
1280                 goto out_free;
1281         }
1282
1283         ret = pci_request_regions(pdev, "poch");
1284         if (ret) {
1285                 dev_err(dev, "error requesting resources\n");
1286                 goto out_disable;
1287         }
1288
1289         uio->mem[0].addr = pci_resource_start(pdev, 1);
1290         if (!uio->mem[0].addr) {
1291                 dev_err(dev, "invalid BAR1\n");
1292                 ret = -ENODEV;
1293                 goto out_release;
1294         }
1295
1296         uio->mem[0].size = pci_resource_len(pdev, 1);
1297         uio->mem[0].memtype = UIO_MEM_PHYS;
1298
1299         uio->name = "poch";
1300         uio->version = "0.0.1";
1301         uio->irq = -1;
1302         ret = uio_register_device(dev, uio);
1303         if (ret) {
1304                 dev_err(dev, "error register UIO device: %d\n", ret);
1305                 goto out_release;
1306         }
1307
1308         poch_dev->bridge_iomem = ioremap(pci_resource_start(pdev, 0),
1309                                          pci_resource_len(pdev, 0));
1310         if (poch_dev->bridge_iomem == NULL) {
1311                 dev_err(dev, "error mapping bridge (bar0) registers\n");
1312                 ret = -ENOMEM;
1313                 goto out_uio_unreg;
1314         }
1315
1316         poch_dev->fpga_iomem = ioremap(pci_resource_start(pdev, 1),
1317                                        pci_resource_len(pdev, 1));
1318         if (poch_dev->fpga_iomem == NULL) {
1319                 dev_err(dev, "error mapping fpga (bar1) registers\n");
1320                 ret = -ENOMEM;
1321                 goto out_bar0_unmap;
1322         }
1323
1324         ret = request_irq(pdev->irq, poch_irq_handler, IRQF_SHARED,
1325                           dev_name(dev), poch_dev);
1326         if (ret) {
1327                 dev_err(dev, "error requesting IRQ %u\n", pdev->irq);
1328                 ret = -ENOMEM;
1329                 goto out_bar1_unmap;
1330         }
1331
1332         if (!idr_pre_get(&poch_ids, GFP_KERNEL)) {
1333                 dev_err(dev, "error allocating memory ids\n");
1334                 ret = -ENOMEM;
1335                 goto out_free_irq;
1336         }
1337
1338         idr_get_new(&poch_ids, poch_dev, &id);
1339         if (id >= MAX_POCH_CARDS) {
1340                 dev_err(dev, "minors exhausted\n");
1341                 ret = -EBUSY;
1342                 goto out_free_irq;
1343         }
1344
1345         cdev_init(&poch_dev->cdev, &poch_fops);
1346         poch_dev->cdev.owner = THIS_MODULE;
1347         ret = cdev_add(&poch_dev->cdev,
1348                        poch_first_dev + (id * poch_dev->nchannels),
1349                        poch_dev->nchannels);
1350         if (ret) {
1351                 dev_err(dev, "error register character device\n");
1352                 goto out_idr_remove;
1353         }
1354
1355         ret = poch_class_dev_register(poch_dev, id);
1356         if (ret)
1357                 goto out_cdev_del;
1358
1359         return 0;
1360
1361  out_cdev_del:
1362         cdev_del(&poch_dev->cdev);
1363  out_idr_remove:
1364         idr_remove(&poch_ids, id);
1365  out_free_irq:
1366         free_irq(pdev->irq, poch_dev);
1367  out_bar1_unmap:
1368         iounmap(poch_dev->fpga_iomem);
1369  out_bar0_unmap:
1370         iounmap(poch_dev->bridge_iomem);
1371  out_uio_unreg:
1372         uio_unregister_device(uio);
1373  out_release:
1374         pci_release_regions(pdev);
1375  out_disable:
1376         pci_disable_device(pdev);
1377  out_free:
1378         kfree(poch_dev);
1379         return ret;
1380 }
1381
1382 /*
1383  * FIXME: We are yet to handle the hot unplug case.
1384  */
1385 static void poch_pci_remove(struct pci_dev *pdev)
1386 {
1387         struct poch_dev *poch_dev = pci_get_drvdata(pdev);
1388         struct uio_info *uio = &poch_dev->uio;
1389         unsigned int minor = MINOR(poch_dev->cdev.dev);
1390         unsigned int id = minor / poch_dev->nchannels;
1391
1392         poch_class_dev_unregister(poch_dev, id);
1393         cdev_del(&poch_dev->cdev);
1394         idr_remove(&poch_ids, id);
1395         free_irq(pdev->irq, poch_dev);
1396         iounmap(poch_dev->fpga_iomem);
1397         iounmap(poch_dev->bridge_iomem);
1398         uio_unregister_device(uio);
1399         pci_release_regions(pdev);
1400         pci_disable_device(pdev);
1401         pci_set_drvdata(pdev, NULL);
1402         iounmap(uio->mem[0].internal_addr);
1403
1404         kfree(poch_dev);
1405 }
1406
1407 static const struct pci_device_id poch_pci_ids[] /* __devinitconst */ = {
1408         { PCI_DEVICE(PCI_VENDOR_ID_RRAPIDS,
1409                      PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE) },
1410         { 0, }
1411 };
1412
1413 static struct pci_driver poch_pci_driver = {
1414         .name = DRV_NAME,
1415         .id_table = poch_pci_ids,
1416         .probe = poch_pci_probe,
1417         .remove = poch_pci_remove,
1418 };
1419
1420 static int __init poch_init_module(void)
1421 {
1422         int ret = 0;
1423
1424         ret = alloc_chrdev_region(&poch_first_dev, 0,
1425                                   MAX_POCH_DEVICES, DRV_NAME);
1426         if (ret) {
1427                 printk(KERN_ERR PFX "error allocating device no.");
1428                 return ret;
1429         }
1430
1431         poch_cls = class_create(THIS_MODULE, "pocketchange");
1432         if (IS_ERR(poch_cls)) {
1433                 ret = PTR_ERR(poch_cls);
1434                 goto out_unreg_chrdev;
1435         }
1436
1437         ret = pci_register_driver(&poch_pci_driver);
1438         if (ret) {
1439                 printk(KERN_ERR PFX "error register PCI device");
1440                 goto out_class_destroy;
1441         }
1442
1443         return 0;
1444
1445  out_class_destroy:
1446         class_destroy(poch_cls);
1447
1448  out_unreg_chrdev:
1449         unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
1450
1451         return ret;
1452 }
1453
1454 static void __exit poch_exit_module(void)
1455 {
1456         pci_unregister_driver(&poch_pci_driver);
1457         class_destroy(poch_cls);
1458         unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
1459 }
1460
1461 module_init(poch_init_module);
1462 module_exit(poch_exit_module);
1463
1464 MODULE_LICENSE("GPL v2");