Staging: poch: Correct pages from bytes.
[safe/jmp/linux-2.6] / drivers / staging / poch / poch.c
1 /*
2  * User-space DMA and UIO based Redrapids Pocket Change CardBus driver
3  *
4  * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org>
5  *
6  * Licensed under GPL version 2 only.
7  */
8
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/uio_driver.h>
13 #include <linux/spinlock.h>
14 #include <linux/cdev.h>
15 #include <linux/delay.h>
16 #include <linux/sysfs.h>
17 #include <linux/poll.h>
18 #include <linux/idr.h>
19 #include <linux/interrupt.h>
20 #include <linux/init.h>
21 #include <linux/ioctl.h>
22 #include <linux/io.h>
23
24 #include "poch.h"
25
26 #include <asm/cacheflush.h>
27
28 #ifndef PCI_VENDOR_ID_RRAPIDS
29 #define PCI_VENDOR_ID_RRAPIDS 0x17D2
30 #endif
31
32 #ifndef PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE
33 #define PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE 0x0351
34 #endif
35
36 #define POCH_NCHANNELS 2
37
38 #define MAX_POCH_CARDS 8
39 #define MAX_POCH_DEVICES (MAX_POCH_CARDS * POCH_NCHANNELS)
40
41 #define DRV_NAME "poch"
42 #define PFX      DRV_NAME ": "
43
44 /*
45  * BAR0 Bridge Register Definitions
46  */
47
48 #define BRIDGE_REV_REG                  0x0
49 #define BRIDGE_INT_MASK_REG             0x4
50 #define BRIDGE_INT_STAT_REG             0x8
51
52 #define BRIDGE_INT_ACTIVE               (0x1 << 31)
53 #define BRIDGE_INT_FPGA                 (0x1 << 2)
54 #define BRIDGE_INT_TEMP_FAIL            (0x1 << 1)
55 #define BRIDGE_INT_TEMP_WARN            (0x1 << 0)
56
57 #define BRIDGE_FPGA_RESET_REG           0xC
58
59 #define BRIDGE_CARD_POWER_REG           0x10
60 #define BRIDGE_CARD_POWER_EN            (0x1 << 0)
61 #define BRIDGE_CARD_POWER_PROG_DONE     (0x1 << 31)
62
63 #define BRIDGE_JTAG_REG                 0x14
64 #define BRIDGE_DMA_GO_REG               0x18
65 #define BRIDGE_STAT_0_REG               0x1C
66 #define BRIDGE_STAT_1_REG               0x20
67 #define BRIDGE_STAT_2_REG               0x24
68 #define BRIDGE_STAT_3_REG               0x28
69 #define BRIDGE_TEMP_STAT_REG            0x2C
70 #define BRIDGE_TEMP_THRESH_REG          0x30
71 #define BRIDGE_EEPROM_REVSEL_REG        0x34
72 #define BRIDGE_CIS_STRUCT_REG           0x100
73 #define BRIDGE_BOARDREV_REG             0x124
74
75 /*
76  * BAR1 FPGA Register Definitions
77  */
78
79 #define FPGA_IFACE_REV_REG              0x0
80 #define FPGA_RX_BLOCK_SIZE_REG          0x8
81 #define FPGA_TX_BLOCK_SIZE_REG          0xC
82 #define FPGA_RX_BLOCK_COUNT_REG         0x10
83 #define FPGA_TX_BLOCK_COUNT_REG         0x14
84 #define FPGA_RX_CURR_DMA_BLOCK_REG      0x18
85 #define FPGA_TX_CURR_DMA_BLOCK_REG      0x1C
86 #define FPGA_RX_GROUP_COUNT_REG         0x20
87 #define FPGA_TX_GROUP_COUNT_REG         0x24
88 #define FPGA_RX_CURR_GROUP_REG          0x28
89 #define FPGA_TX_CURR_GROUP_REG          0x2C
90 #define FPGA_RX_CURR_PCI_REG            0x38
91 #define FPGA_TX_CURR_PCI_REG            0x3C
92 #define FPGA_RX_GROUP0_START_REG        0x40
93 #define FPGA_TX_GROUP0_START_REG        0xC0
94 #define FPGA_DMA_DESC_1_REG             0x140
95 #define FPGA_DMA_DESC_2_REG             0x144
96 #define FPGA_DMA_DESC_3_REG             0x148
97 #define FPGA_DMA_DESC_4_REG             0x14C
98
99 #define FPGA_DMA_INT_STAT_REG           0x150
100 #define FPGA_DMA_INT_MASK_REG           0x154
101 #define FPGA_DMA_INT_RX         (1 << 0)
102 #define FPGA_DMA_INT_TX         (1 << 1)
103
104 #define FPGA_RX_GROUPS_PER_INT_REG      0x158
105 #define FPGA_TX_GROUPS_PER_INT_REG      0x15C
106 #define FPGA_DMA_ADR_PAGE_REG           0x160
107 #define FPGA_FPGA_REV_REG               0x200
108
109 #define FPGA_ADC_CLOCK_CTL_REG          0x204
110 #define FPGA_ADC_CLOCK_CTL_OSC_EN       (0x1 << 3)
111 #define FPGA_ADC_CLOCK_LOCAL_CLK        (0x1 | FPGA_ADC_CLOCK_CTL_OSC_EN)
112 #define FPGA_ADC_CLOCK_EXT_SAMP_CLK     0X0
113
114 #define FPGA_ADC_DAC_EN_REG             0x208
115 #define FPGA_ADC_DAC_EN_DAC_OFF         (0x1 << 1)
116 #define FPGA_ADC_DAC_EN_ADC_OFF         (0x1 << 0)
117
118 #define FPGA_INT_STAT_REG               0x20C
119 #define FPGA_INT_MASK_REG               0x210
120 #define FPGA_INT_PLL_UNLOCKED           (0x1 << 9)
121 #define FPGA_INT_DMA_CORE               (0x1 << 8)
122 #define FPGA_INT_TX_FF_EMPTY            (0x1 << 7)
123 #define FPGA_INT_RX_FF_EMPTY            (0x1 << 6)
124 #define FPGA_INT_TX_FF_OVRFLW           (0x1 << 3)
125 #define FPGA_INT_RX_FF_OVRFLW           (0x1 << 2)
126 #define FPGA_INT_TX_ACQ_DONE            (0x1 << 1)
127 #define FPGA_INT_RX_ACQ_DONE            (0x1)
128
129 #define FPGA_RX_ADC_CTL_REG             0x214
130 #define FPGA_RX_ADC_CTL_CONT_CAP        (0x0)
131 #define FPGA_RX_ADC_CTL_SNAP_CAP        (0x1)
132
133 #define FPGA_RX_ARM_REG                 0x21C
134
135 #define FPGA_DOM_REG                    0x224
136 #define FPGA_DOM_DCM_RESET              (0x1 << 5)
137 #define FPGA_DOM_SOFT_RESET             (0x1 << 4)
138 #define FPGA_DOM_DUAL_M_SG_DMA          (0x0)
139 #define FPGA_DOM_TARGET_ACCESS          (0x1)
140
141 #define FPGA_TX_CTL_REG                 0x228
142 #define FPGA_TX_CTL_FIFO_FLUSH          (0x1 << 9)
143 #define FPGA_TX_CTL_OUTPUT_ZERO         (0x0 << 2)
144 #define FPGA_TX_CTL_OUTPUT_CARDBUS      (0x1 << 2)
145 #define FPGA_TX_CTL_OUTPUT_ADC          (0x2 << 2)
146 #define FPGA_TX_CTL_OUTPUT_SNAPSHOT     (0x3 << 2)
147 #define FPGA_TX_CTL_LOOPBACK            (0x1 << 0)
148
149 #define FPGA_ENDIAN_MODE_REG            0x22C
150 #define FPGA_RX_FIFO_COUNT_REG          0x28C
151 #define FPGA_TX_ENABLE_REG              0x298
152 #define FPGA_TX_TRIGGER_REG             0x29C
153 #define FPGA_TX_DATAMEM_COUNT_REG       0x2A8
154 #define FPGA_CAP_FIFO_REG               0x300
155 #define FPGA_TX_SNAPSHOT_REG            0x8000
156
157 /*
158  * Channel Index Definitions
159  */
160
161 enum {
162         CHNO_RX_CHANNEL,
163         CHNO_TX_CHANNEL,
164 };
165
166 struct poch_dev;
167
168 enum channel_dir {
169         CHANNEL_DIR_RX,
170         CHANNEL_DIR_TX,
171 };
172
173 struct poch_group_info {
174         struct page *pg;
175         dma_addr_t dma_addr;
176         unsigned long user_offset;
177 };
178
179 struct channel_info {
180         unsigned int chno;
181
182         atomic_t sys_block_size;
183         atomic_t sys_group_size;
184         atomic_t sys_group_count;
185
186         enum channel_dir dir;
187
188         unsigned long block_size;
189         unsigned long group_size;
190         unsigned long group_count;
191
192         /* Contains the DMA address and VM offset of each group. */
193         struct poch_group_info *groups;
194
195         /* Contains the header and circular buffer exported to userspace. */
196         spinlock_t group_offsets_lock;
197         struct poch_cbuf_header *header;
198         struct page *header_pg;
199         unsigned long header_size;
200
201         /* Last group indicated as 'complete' to user space. */
202         unsigned int transfer;
203
204         wait_queue_head_t wq;
205
206         union {
207                 unsigned int data_available;
208                 unsigned int space_available;
209         };
210
211         void __iomem *bridge_iomem;
212         void __iomem *fpga_iomem;
213         spinlock_t *iomem_lock;
214
215         atomic_t free;
216         atomic_t inited;
217
218         /* Error counters */
219         struct poch_counters counters;
220         spinlock_t counters_lock;
221
222         struct device *dev;
223 };
224
225 struct poch_dev {
226         struct uio_info uio;
227         struct pci_dev *pci_dev;
228         unsigned int nchannels;
229         struct channel_info channels[POCH_NCHANNELS];
230         struct cdev cdev;
231
232         /* Counts the no. of channels that have been opened. On first
233          * open, the card is powered on. On last channel close, the
234          * card is powered off.
235          */
236         atomic_t usage;
237
238         void __iomem *bridge_iomem;
239         void __iomem *fpga_iomem;
240         spinlock_t iomem_lock;
241
242         struct device *dev;
243 };
244
245 static dev_t poch_first_dev;
246 static struct class *poch_cls;
247 static DEFINE_IDR(poch_ids);
248
249 static ssize_t store_block_size(struct device *dev,
250                                 struct device_attribute *attr,
251                                 const char *buf, size_t count)
252 {
253         struct channel_info *channel = dev_get_drvdata(dev);
254         unsigned long block_size;
255
256         sscanf(buf, "%lu", &block_size);
257         atomic_set(&channel->sys_block_size, block_size);
258
259         return count;
260 }
261 static DEVICE_ATTR(block_size, S_IWUSR|S_IWGRP, NULL, store_block_size);
262
263 static ssize_t store_group_size(struct device *dev,
264                                 struct device_attribute *attr,
265                                 const char *buf, size_t count)
266 {
267         struct channel_info *channel = dev_get_drvdata(dev);
268         unsigned long group_size;
269
270         sscanf(buf, "%lu", &group_size);
271         atomic_set(&channel->sys_group_size, group_size);
272
273         return count;
274 }
275 static DEVICE_ATTR(group_size, S_IWUSR|S_IWGRP, NULL, store_group_size);
276
277 static ssize_t store_group_count(struct device *dev,
278                                 struct device_attribute *attr,
279                                  const char *buf, size_t count)
280 {
281         struct channel_info *channel = dev_get_drvdata(dev);
282         unsigned long group_count;
283
284         sscanf(buf, "%lu", &group_count);
285         atomic_set(&channel->sys_group_count, group_count);
286
287         return count;
288 }
289 static DEVICE_ATTR(group_count, S_IWUSR|S_IWGRP, NULL, store_group_count);
290
291 static ssize_t show_direction(struct device *dev,
292                               struct device_attribute *attr, char *buf)
293 {
294         struct channel_info *channel = dev_get_drvdata(dev);
295         int len;
296
297         len = sprintf(buf, "%s\n", (channel->dir ? "tx" : "rx"));
298         return len;
299 }
300 static DEVICE_ATTR(dir, S_IRUSR|S_IRGRP, show_direction, NULL);
301
302 static unsigned long npages(unsigned long bytes)
303 {
304         if (bytes % PAGE_SIZE == 0)
305                 return bytes / PAGE_SIZE;
306         else
307                 return (bytes / PAGE_SIZE) + 1;
308 }
309
310 static ssize_t show_mmap_size(struct device *dev,
311                               struct device_attribute *attr, char *buf)
312 {
313         struct channel_info *channel = dev_get_drvdata(dev);
314         int len;
315         unsigned long mmap_size;
316         unsigned long group_pages;
317         unsigned long header_pages;
318         unsigned long total_group_pages;
319
320         group_pages = npages(channel->group_size);
321         header_pages = npages(channel->header_size);
322         total_group_pages = group_pages * channel->group_count;
323
324         mmap_size = (header_pages + total_group_pages) * PAGE_SIZE;
325         len = sprintf(buf, "%lu\n", mmap_size);
326         return len;
327 }
328 static DEVICE_ATTR(mmap_size, S_IRUSR|S_IRGRP, show_mmap_size, NULL);
329
330 static struct device_attribute *poch_class_attrs[] = {
331         &dev_attr_block_size,
332         &dev_attr_group_size,
333         &dev_attr_group_count,
334         &dev_attr_dir,
335         &dev_attr_mmap_size,
336 };
337
338 static void poch_channel_free_groups(struct channel_info *channel)
339 {
340         unsigned long i;
341
342         for (i = 0; i < channel->group_count; i++) {
343                 struct poch_group_info *group;
344                 unsigned int order;
345
346                 group = &channel->groups[i];
347                 order = get_order(channel->group_size);
348                 if (group->pg)
349                         __free_pages(group->pg, order);
350         }
351 }
352
353 static int poch_channel_alloc_groups(struct channel_info *channel)
354 {
355         unsigned long i;
356         unsigned long group_pages;
357         unsigned long header_pages;
358
359         group_pages = npages(channel->group_size);
360         header_pages = npages(channel->header_size);
361
362         for (i = 0; i < channel->group_count; i++) {
363                 struct poch_group_info *group;
364                 unsigned int order;
365                 gfp_t gfp_mask;
366
367                 group = &channel->groups[i];
368                 order = get_order(channel->group_size);
369
370                 /*
371                  * __GFP_COMP is required here since we are going to
372                  * perform non-linear mapping to userspace. For more
373                  * information read the vm_insert_page() function
374                  * comments.
375                  */
376
377                 gfp_mask = GFP_KERNEL | GFP_DMA32 | __GFP_ZERO;
378                 group->pg = alloc_pages(gfp_mask, order);
379                 if (!group->pg) {
380                         poch_channel_free_groups(channel);
381                         return -ENOMEM;
382                 }
383
384                 /* FIXME: This is the physical address not the bus
385                  * address!  This won't work in architectures that
386                  * have an IOMMU. Can we use pci_map_single() for
387                  * this?
388                  */
389                 group->dma_addr = page_to_pfn(group->pg) * PAGE_SIZE;
390                 group->user_offset =
391                         (header_pages + (i * group_pages)) * PAGE_SIZE;
392
393                 printk(KERN_INFO PFX "%ld: user_offset: 0x%lx dma: 0x%x\n", i,
394                        group->user_offset, group->dma_addr);
395         }
396
397         return 0;
398 }
399
400 static void channel_latch_attr(struct channel_info *channel)
401 {
402         channel->group_count = atomic_read(&channel->sys_group_count);
403         channel->group_size = atomic_read(&channel->sys_group_size);
404         channel->block_size = atomic_read(&channel->sys_block_size);
405 }
406
407 /*
408  * Configure DMA group registers
409  */
410 static void channel_dma_init(struct channel_info *channel)
411 {
412         void __iomem *fpga = channel->fpga_iomem;
413         u32 group_regs_base;
414         u32 group_reg;
415         unsigned int page;
416         unsigned int group_in_page;
417         unsigned long i;
418         u32 block_size_reg;
419         u32 block_count_reg;
420         u32 group_count_reg;
421         u32 groups_per_int_reg;
422         u32 curr_pci_reg;
423
424         if (channel->chno == CHNO_RX_CHANNEL) {
425                 group_regs_base = FPGA_RX_GROUP0_START_REG;
426                 block_size_reg = FPGA_RX_BLOCK_SIZE_REG;
427                 block_count_reg = FPGA_RX_BLOCK_COUNT_REG;
428                 group_count_reg = FPGA_RX_GROUP_COUNT_REG;
429                 groups_per_int_reg = FPGA_RX_GROUPS_PER_INT_REG;
430                 curr_pci_reg = FPGA_RX_CURR_PCI_REG;
431         } else {
432                 group_regs_base = FPGA_TX_GROUP0_START_REG;
433                 block_size_reg = FPGA_TX_BLOCK_SIZE_REG;
434                 block_count_reg = FPGA_TX_BLOCK_COUNT_REG;
435                 group_count_reg = FPGA_TX_GROUP_COUNT_REG;
436                 groups_per_int_reg = FPGA_TX_GROUPS_PER_INT_REG;
437                 curr_pci_reg = FPGA_TX_CURR_PCI_REG;
438         }
439
440         printk(KERN_WARNING "block_size, group_size, group_count\n");
441         /*
442          * Block size is represented in no. of 64 bit transfers.
443          */
444         iowrite32(channel->block_size / 8, fpga + block_size_reg);
445         iowrite32(channel->group_size / channel->block_size,
446                   fpga + block_count_reg);
447         iowrite32(channel->group_count, fpga + group_count_reg);
448         /* FIXME: Hardcoded groups per int. Get it from sysfs? */
449         iowrite32(1, fpga + groups_per_int_reg);
450
451         /* Unlock PCI address? Not defined in the data sheet, but used
452          * in the reference code by Redrapids.
453          */
454         iowrite32(0x1, fpga + curr_pci_reg);
455
456         /* The DMA address page register is shared between the RX and
457          * TX channels, so acquire lock.
458          */
459         spin_lock(channel->iomem_lock);
460         for (i = 0; i < channel->group_count; i++) {
461                 page = i / 32;
462                 group_in_page = i % 32;
463
464                 group_reg = group_regs_base + (group_in_page * 4);
465
466                 iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
467                 iowrite32(channel->groups[i].dma_addr, fpga + group_reg);
468         }
469         for (i = 0; i < channel->group_count; i++) {
470                 page = i / 32;
471                 group_in_page = i % 32;
472
473                 group_reg = group_regs_base + (group_in_page * 4);
474
475                 iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
476                 printk(KERN_INFO PFX "%ld: read dma_addr: 0x%x\n", i,
477                        ioread32(fpga + group_reg));
478         }
479         spin_unlock(channel->iomem_lock);
480
481 }
482
483 static int poch_channel_alloc_header(struct channel_info *channel)
484 {
485         struct poch_cbuf_header *header = channel->header;
486         unsigned long group_offset_size;
487         unsigned long tot_group_offsets_size;
488
489         /* Allocate memory to hold header exported userspace */
490         group_offset_size = sizeof(header->group_offsets[0]);
491         tot_group_offsets_size = group_offset_size * channel->group_count;
492         channel->header_size = sizeof(*header) + tot_group_offsets_size;
493         channel->header_pg = alloc_pages(GFP_KERNEL | __GFP_ZERO,
494                                          get_order(channel->header_size));
495         if (!channel->header_pg)
496                 return -ENOMEM;
497
498         channel->header = page_address(channel->header_pg);
499
500         return 0;
501 }
502
503 static void poch_channel_free_header(struct channel_info *channel)
504 {
505         unsigned int order;
506
507         order = get_order(channel->header_size);
508         __free_pages(channel->header_pg, order);
509 }
510
511 static void poch_channel_init_header(struct channel_info *channel)
512 {
513         int i;
514         struct poch_group_info *groups;
515         s32 *group_offsets;
516
517         channel->header->group_size_bytes = channel->group_size;
518         channel->header->group_count = channel->group_count;
519
520         spin_lock_init(&channel->group_offsets_lock);
521
522         group_offsets = channel->header->group_offsets;
523         groups = channel->groups;
524
525         for (i = 0; i < channel->group_count; i++) {
526                 if (channel->dir == CHANNEL_DIR_RX)
527                         group_offsets[i] = -1;
528                 else
529                         group_offsets[i] = groups[i].user_offset;
530         }
531 }
532
533 static void __poch_channel_clear_counters(struct channel_info *channel)
534 {
535         channel->counters.pll_unlock = 0;
536         channel->counters.fifo_empty = 0;
537         channel->counters.fifo_overflow = 0;
538 }
539
540 static int poch_channel_init(struct channel_info *channel,
541                              struct poch_dev *poch_dev)
542 {
543         struct pci_dev *pdev = poch_dev->pci_dev;
544         struct device *dev = &pdev->dev;
545         unsigned long alloc_size;
546         int ret;
547
548         printk(KERN_WARNING "channel_latch_attr\n");
549
550         channel_latch_attr(channel);
551
552         channel->transfer = 0;
553
554         /* Allocate memory to hold group information. */
555         alloc_size = channel->group_count * sizeof(struct poch_group_info);
556         channel->groups = kzalloc(alloc_size, GFP_KERNEL);
557         if (!channel->groups) {
558                 dev_err(dev, "error allocating memory for group info\n");
559                 ret = -ENOMEM;
560                 goto out;
561         }
562
563         printk(KERN_WARNING "poch_channel_alloc_groups\n");
564
565         ret = poch_channel_alloc_groups(channel);
566         if (ret) {
567                 dev_err(dev, "error allocating groups of order %d\n",
568                         get_order(channel->group_size));
569                 goto out_free_group_info;
570         }
571
572         ret = poch_channel_alloc_header(channel);
573         if (ret) {
574                 dev_err(dev, "error allocating user space header\n");
575                 goto out_free_groups;
576         }
577
578         channel->fpga_iomem = poch_dev->fpga_iomem;
579         channel->bridge_iomem = poch_dev->bridge_iomem;
580         channel->iomem_lock = &poch_dev->iomem_lock;
581         spin_lock_init(&channel->counters_lock);
582
583         __poch_channel_clear_counters(channel);
584
585         printk(KERN_WARNING "poch_channel_init_header\n");
586
587         poch_channel_init_header(channel);
588
589         return 0;
590
591  out_free_groups:
592         poch_channel_free_groups(channel);
593  out_free_group_info:
594         kfree(channel->groups);
595  out:
596         return ret;
597 }
598
599 static int poch_wait_fpga_prog(void __iomem *bridge)
600 {
601         unsigned long total_wait;
602         const unsigned long wait_period = 100;
603         /* FIXME: Get the actual timeout */
604         const unsigned long prog_timeo = 10000; /* 10 Seconds */
605         u32 card_power;
606
607         printk(KERN_WARNING "poch_wait_fpg_prog\n");
608
609         printk(KERN_INFO PFX "programming fpga ...\n");
610         total_wait = 0;
611         while (1) {
612                 msleep(wait_period);
613                 total_wait += wait_period;
614
615                 card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
616                 if (card_power & BRIDGE_CARD_POWER_PROG_DONE) {
617                         printk(KERN_INFO PFX "programming done\n");
618                         return 0;
619                 }
620                 if (total_wait > prog_timeo) {
621                         printk(KERN_ERR PFX
622                                "timed out while programming FPGA\n");
623                         return -EIO;
624                 }
625         }
626 }
627
628 static void poch_card_power_off(struct poch_dev *poch_dev)
629 {
630         void __iomem *bridge = poch_dev->bridge_iomem;
631         u32 card_power;
632
633         iowrite32(0, bridge + BRIDGE_INT_MASK_REG);
634         iowrite32(0, bridge + BRIDGE_DMA_GO_REG);
635
636         card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
637         iowrite32(card_power & ~BRIDGE_CARD_POWER_EN,
638                   bridge + BRIDGE_CARD_POWER_REG);
639 }
640
641 enum clk_src {
642         CLK_SRC_ON_BOARD,
643         CLK_SRC_EXTERNAL
644 };
645
646 static void poch_card_clock_on(void __iomem *fpga)
647 {
648         /* FIXME: Get this data through sysfs? */
649         enum clk_src clk_src = CLK_SRC_ON_BOARD;
650
651         if (clk_src == CLK_SRC_ON_BOARD) {
652                 iowrite32(FPGA_ADC_CLOCK_LOCAL_CLK | FPGA_ADC_CLOCK_CTL_OSC_EN,
653                           fpga + FPGA_ADC_CLOCK_CTL_REG);
654         } else if (clk_src == CLK_SRC_EXTERNAL) {
655                 iowrite32(FPGA_ADC_CLOCK_EXT_SAMP_CLK,
656                           fpga + FPGA_ADC_CLOCK_CTL_REG);
657         }
658 }
659
660 static int poch_card_power_on(struct poch_dev *poch_dev)
661 {
662         void __iomem *bridge = poch_dev->bridge_iomem;
663         void __iomem *fpga = poch_dev->fpga_iomem;
664
665         iowrite32(BRIDGE_CARD_POWER_EN, bridge + BRIDGE_CARD_POWER_REG);
666
667         if (poch_wait_fpga_prog(bridge) != 0) {
668                 poch_card_power_off(poch_dev);
669                 return -EIO;
670         }
671
672         poch_card_clock_on(fpga);
673
674         /* Sync to new clock, reset state machines, set DMA mode. */
675         iowrite32(FPGA_DOM_DCM_RESET | FPGA_DOM_SOFT_RESET
676                   | FPGA_DOM_DUAL_M_SG_DMA, fpga + FPGA_DOM_REG);
677
678         /* FIXME: The time required for sync. needs to be tuned. */
679         msleep(1000);
680
681         return 0;
682 }
683
684 static void poch_channel_analog_on(struct channel_info *channel)
685 {
686         void __iomem *fpga = channel->fpga_iomem;
687         u32 adc_dac_en;
688
689         spin_lock(channel->iomem_lock);
690         adc_dac_en = ioread32(fpga + FPGA_ADC_DAC_EN_REG);
691         switch (channel->chno) {
692         case CHNO_RX_CHANNEL:
693                 iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_ADC_OFF,
694                           fpga + FPGA_ADC_DAC_EN_REG);
695                 break;
696         case CHNO_TX_CHANNEL:
697                 iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_DAC_OFF,
698                           fpga + FPGA_ADC_DAC_EN_REG);
699                 break;
700         }
701         spin_unlock(channel->iomem_lock);
702 }
703
704 static int poch_open(struct inode *inode, struct file *filp)
705 {
706         struct poch_dev *poch_dev;
707         struct channel_info *channel;
708         void __iomem *bridge;
709         void __iomem *fpga;
710         int chno;
711         int usage;
712         int ret;
713
714         poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
715         bridge = poch_dev->bridge_iomem;
716         fpga = poch_dev->fpga_iomem;
717
718         chno = iminor(inode) % poch_dev->nchannels;
719         channel = &poch_dev->channels[chno];
720
721         if (!atomic_dec_and_test(&channel->free)) {
722                 atomic_inc(&channel->free);
723                 ret = -EBUSY;
724                 goto out;
725         }
726
727         usage = atomic_inc_return(&poch_dev->usage);
728
729         printk(KERN_WARNING "poch_card_power_on\n");
730
731         if (usage == 1) {
732                 ret = poch_card_power_on(poch_dev);
733                 if (ret)
734                         goto out_dec_usage;
735         }
736
737         printk(KERN_INFO "CardBus Bridge Revision: %x\n",
738                ioread32(bridge + BRIDGE_REV_REG));
739         printk(KERN_INFO "CardBus Interface Revision: %x\n",
740                ioread32(fpga + FPGA_IFACE_REV_REG));
741
742         channel->chno = chno;
743         filp->private_data = channel;
744
745         printk(KERN_WARNING "poch_channel_init\n");
746
747         ret = poch_channel_init(channel, poch_dev);
748         if (ret)
749                 goto out_power_off;
750
751         poch_channel_analog_on(channel);
752
753         printk(KERN_WARNING "channel_dma_init\n");
754
755         channel_dma_init(channel);
756
757         printk(KERN_WARNING "poch_channel_analog_on\n");
758
759         if (usage == 1) {
760                 printk(KERN_WARNING "setting up DMA\n");
761
762                 /* Initialize DMA Controller. */
763                 iowrite32(FPGA_CAP_FIFO_REG, bridge + BRIDGE_STAT_2_REG);
764                 iowrite32(FPGA_DMA_DESC_1_REG, bridge + BRIDGE_STAT_3_REG);
765
766                 ioread32(fpga + FPGA_DMA_INT_STAT_REG);
767                 ioread32(fpga + FPGA_INT_STAT_REG);
768                 ioread32(bridge + BRIDGE_INT_STAT_REG);
769
770                 /* Initialize Interrupts. FIXME: Enable temperature
771                  * handling We are enabling both Tx and Rx channel
772                  * interrupts here. Do we need to enable interrupts
773                  * only for the current channel? Anyways we won't get
774                  * the interrupt unless the DMA is activated.
775                  */
776                 iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
777                 iowrite32(FPGA_INT_DMA_CORE
778                           | FPGA_INT_PLL_UNLOCKED
779                           | FPGA_INT_TX_FF_EMPTY
780                           | FPGA_INT_RX_FF_EMPTY
781                           | FPGA_INT_TX_FF_OVRFLW
782                           | FPGA_INT_RX_FF_OVRFLW,
783                           fpga + FPGA_INT_MASK_REG);
784                 iowrite32(FPGA_DMA_INT_RX | FPGA_DMA_INT_TX,
785                           fpga + FPGA_DMA_INT_MASK_REG);
786         }
787
788         if (channel->dir == CHANNEL_DIR_TX) {
789                 /* Flush TX FIFO and output data from cardbus. */
790                 iowrite32(FPGA_TX_CTL_FIFO_FLUSH
791                           | FPGA_TX_CTL_OUTPUT_CARDBUS,
792                           fpga + FPGA_TX_CTL_REG);
793         }
794
795         atomic_inc(&channel->inited);
796
797         return 0;
798
799  out_power_off:
800         if (usage == 1)
801                 poch_card_power_off(poch_dev);
802  out_dec_usage:
803         atomic_dec(&poch_dev->usage);
804         atomic_inc(&channel->free);
805  out:
806         return ret;
807 }
808
809 static int poch_release(struct inode *inode, struct file *filp)
810 {
811         struct channel_info *channel = filp->private_data;
812         struct poch_dev *poch_dev;
813         int usage;
814
815         poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
816
817         usage = atomic_dec_return(&poch_dev->usage);
818         if (usage == 0) {
819                 printk(KERN_WARNING "poch_card_power_off\n");
820                 poch_card_power_off(poch_dev);
821         }
822
823         atomic_dec(&channel->inited);
824         poch_channel_free_header(channel);
825         poch_channel_free_groups(channel);
826         kfree(channel->groups);
827         atomic_inc(&channel->free);
828
829         return 0;
830 }
831
832 /*
833  * Map the header and the group buffers, to user space.
834  */
835 static int poch_mmap(struct file *filp, struct vm_area_struct *vma)
836 {
837         struct channel_info *channel = filp->private_data;
838
839         unsigned long start;
840         unsigned long size;
841
842         unsigned long group_pages;
843         unsigned long header_pages;
844         unsigned long total_group_pages;
845
846         int pg_num;
847         struct page *pg;
848
849         int i;
850         int ret;
851
852         printk(KERN_WARNING "poch_mmap\n");
853
854         if (vma->vm_pgoff) {
855                 printk(KERN_WARNING PFX "page offset: %lu\n", vma->vm_pgoff);
856                 return -EINVAL;
857         }
858
859         group_pages = npages(channel->group_size);
860         header_pages = npages(channel->header_size);
861         total_group_pages = group_pages * channel->group_count;
862
863         size = vma->vm_end - vma->vm_start;
864         if (size != (header_pages + total_group_pages) * PAGE_SIZE) {
865                 printk(KERN_WARNING PFX "required %lu bytes\n", size);
866                 return -EINVAL;
867         }
868
869         start = vma->vm_start;
870
871         /* FIXME: Cleanup required on failure? */
872         pg = channel->header_pg;
873         for (pg_num = 0; pg_num < header_pages; pg_num++, pg++) {
874                 printk(KERN_DEBUG PFX "page_count: %d\n", page_count(pg));
875                 printk(KERN_DEBUG PFX "%d: header: 0x%lx\n", pg_num, start);
876                 ret = vm_insert_page(vma, start, pg);
877                 if (ret) {
878                         printk(KERN_DEBUG "vm_insert 1 failed at %lx\n", start);
879                         return ret;
880                 }
881                 start += PAGE_SIZE;
882         }
883
884         for (i = 0; i < channel->group_count; i++) {
885                 pg = channel->groups[i].pg;
886                 for (pg_num = 0; pg_num < group_pages; pg_num++, pg++) {
887                         printk(KERN_DEBUG PFX "%d: group %d: 0x%lx\n",
888                                pg_num, i, start);
889                         ret = vm_insert_page(vma, start, pg);
890                         if (ret) {
891                                 printk(KERN_DEBUG PFX
892                                        "vm_insert 2 failed at %d\n", pg_num);
893                                 return ret;
894                         }
895                         start += PAGE_SIZE;
896                 }
897         }
898
899         return 0;
900 }
901
902 /*
903  * Check whether there is some group that the user space has not
904  * consumed yet. When the user space consumes a group, it sets it to
905  * -1. Cosuming could be reading data in case of RX and filling a
906  * buffer in case of TX.
907  */
908 static int poch_channel_available(struct channel_info *channel)
909 {
910         int i;
911
912         spin_lock_irq(&channel->group_offsets_lock);
913
914         for (i = 0; i < channel->group_count; i++) {
915                 if (channel->dir == CHANNEL_DIR_RX
916                     && channel->header->group_offsets[i] == -1) {
917                         spin_unlock_irq(&channel->group_offsets_lock);
918                         return 1;
919                 }
920
921                 if (channel->dir == CHANNEL_DIR_TX
922                     && channel->header->group_offsets[i] != -1) {
923                         spin_unlock_irq(&channel->group_offsets_lock);
924                         return 1;
925                 }
926         }
927
928         spin_unlock_irq(&channel->group_offsets_lock);
929
930         return 0;
931 }
932
933 static unsigned int poch_poll(struct file *filp, poll_table *pt)
934 {
935         struct channel_info *channel = filp->private_data;
936         unsigned int ret = 0;
937
938         poll_wait(filp, &channel->wq, pt);
939
940         if (poch_channel_available(channel)) {
941                 if (channel->dir == CHANNEL_DIR_RX)
942                         ret = POLLIN | POLLRDNORM;
943                 else
944                         ret = POLLOUT | POLLWRNORM;
945         }
946
947         return ret;
948 }
949
950 static int poch_ioctl(struct inode *inode, struct file *filp,
951                       unsigned int cmd, unsigned long arg)
952 {
953         struct channel_info *channel = filp->private_data;
954         void __iomem *fpga = channel->fpga_iomem;
955         void __iomem *bridge = channel->bridge_iomem;
956         void __user *argp = (void __user *)arg;
957         struct vm_area_struct *vms;
958         struct poch_counters counters;
959         int ret;
960
961         switch (cmd) {
962         case POCH_IOC_TRANSFER_START:
963                 switch (channel->chno) {
964                 case CHNO_TX_CHANNEL:
965                         printk(KERN_INFO PFX "ioctl: Tx start\n");
966                         iowrite32(0x1, fpga + FPGA_TX_TRIGGER_REG);
967                         iowrite32(0x1, fpga + FPGA_TX_ENABLE_REG);
968
969                         /* FIXME: Does it make sense to do a DMA GO
970                          * twice, once in Tx and once in Rx.
971                          */
972                         iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
973                         break;
974                 case CHNO_RX_CHANNEL:
975                         printk(KERN_INFO PFX "ioctl: Rx start\n");
976                         iowrite32(0x1, fpga + FPGA_RX_ARM_REG);
977                         iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
978                         break;
979                 }
980                 break;
981         case POCH_IOC_TRANSFER_STOP:
982                 switch (channel->chno) {
983                 case CHNO_TX_CHANNEL:
984                         printk(KERN_INFO PFX "ioctl: Tx stop\n");
985                         iowrite32(0x0, fpga + FPGA_TX_ENABLE_REG);
986                         iowrite32(0x0, fpga + FPGA_TX_TRIGGER_REG);
987                         iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
988                         break;
989                 case CHNO_RX_CHANNEL:
990                         printk(KERN_INFO PFX "ioctl: Rx stop\n");
991                         iowrite32(0x0, fpga + FPGA_RX_ARM_REG);
992                         iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
993                         break;
994                 }
995                 break;
996         case POCH_IOC_GET_COUNTERS:
997                 if (access_ok(VERIFY_WRITE, argp, sizeof(struct poch_counters)))
998                         return -EFAULT;
999
1000                 spin_lock_irq(&channel->counters_lock);
1001                 counters = channel->counters;
1002                 __poch_channel_clear_counters(channel);
1003                 spin_unlock_irq(&channel->counters_lock);
1004
1005                 ret = copy_to_user(argp, &counters,
1006                                    sizeof(struct poch_counters));
1007                 if (ret)
1008                         return ret;
1009
1010                 break;
1011         case POCH_IOC_SYNC_GROUP_FOR_USER:
1012         case POCH_IOC_SYNC_GROUP_FOR_DEVICE:
1013                 vms = find_vma(current->mm, arg);
1014                 if (!vms)
1015                         /* Address not mapped. */
1016                         return -EINVAL;
1017                 if (vms->vm_file != filp)
1018                         /* Address mapped from different device/file. */
1019                         return -EINVAL;
1020
1021                 flush_cache_range(vms, arg, arg + channel->group_size);
1022                 break;
1023         }
1024         return 0;
1025 }
1026
1027 static struct file_operations poch_fops = {
1028         .owner = THIS_MODULE,
1029         .open = poch_open,
1030         .release = poch_release,
1031         .ioctl = poch_ioctl,
1032         .poll = poch_poll,
1033         .mmap = poch_mmap
1034 };
1035
1036 static void poch_irq_dma(struct channel_info *channel)
1037 {
1038         u32 prev_transfer;
1039         u32 curr_transfer;
1040         long groups_done;
1041         unsigned long i, j;
1042         struct poch_group_info *groups;
1043         s32 *group_offsets;
1044         u32 curr_group_reg;
1045
1046         if (!atomic_read(&channel->inited))
1047                 return;
1048
1049         prev_transfer = channel->transfer;
1050
1051         if (channel->chno == CHNO_RX_CHANNEL)
1052                 curr_group_reg = FPGA_RX_CURR_GROUP_REG;
1053         else
1054                 curr_group_reg = FPGA_TX_CURR_GROUP_REG;
1055
1056         curr_transfer = ioread32(channel->fpga_iomem + curr_group_reg);
1057
1058         groups_done = curr_transfer - prev_transfer;
1059         /* Check wrap over, and handle it. */
1060         if (groups_done <= 0)
1061                 groups_done += channel->group_count;
1062
1063         group_offsets = channel->header->group_offsets;
1064         groups = channel->groups;
1065
1066         spin_lock(&channel->group_offsets_lock);
1067
1068         for (i = 0; i < groups_done; i++) {
1069                 j = (prev_transfer + i) % channel->group_count;
1070                 if (channel->dir == CHANNEL_DIR_RX)
1071                         group_offsets[j] = -1;
1072                 else
1073                         group_offsets[j] = groups[j].user_offset;
1074         }
1075
1076         spin_unlock(&channel->group_offsets_lock);
1077
1078         channel->transfer = curr_transfer;
1079
1080         wake_up_interruptible(&channel->wq);
1081 }
1082
1083 static irqreturn_t poch_irq_handler(int irq, void *p)
1084 {
1085         struct poch_dev *poch_dev = p;
1086         void __iomem *bridge = poch_dev->bridge_iomem;
1087         void __iomem *fpga = poch_dev->fpga_iomem;
1088         struct channel_info *channel_rx = &poch_dev->channels[CHNO_RX_CHANNEL];
1089         struct channel_info *channel_tx = &poch_dev->channels[CHNO_TX_CHANNEL];
1090         u32 bridge_stat;
1091         u32 fpga_stat;
1092         u32 dma_stat;
1093
1094         bridge_stat = ioread32(bridge + BRIDGE_INT_STAT_REG);
1095         fpga_stat = ioread32(fpga + FPGA_INT_STAT_REG);
1096         dma_stat = ioread32(fpga + FPGA_DMA_INT_STAT_REG);
1097
1098         ioread32(fpga + FPGA_DMA_INT_STAT_REG);
1099         ioread32(fpga + FPGA_INT_STAT_REG);
1100         ioread32(bridge + BRIDGE_INT_STAT_REG);
1101
1102         if (bridge_stat & BRIDGE_INT_FPGA) {
1103                 if (fpga_stat & FPGA_INT_DMA_CORE) {
1104                         if (dma_stat & FPGA_DMA_INT_RX)
1105                                 poch_irq_dma(channel_rx);
1106                         if (dma_stat & FPGA_DMA_INT_TX)
1107                                 poch_irq_dma(channel_tx);
1108                 }
1109                 if (fpga_stat & FPGA_INT_PLL_UNLOCKED) {
1110                         channel_tx->counters.pll_unlock++;
1111                         channel_rx->counters.pll_unlock++;
1112                         if (printk_ratelimit())
1113                                 printk(KERN_WARNING PFX "PLL unlocked\n");
1114                 }
1115                 if (fpga_stat & FPGA_INT_TX_FF_EMPTY)
1116                         channel_tx->counters.fifo_empty++;
1117                 if (fpga_stat & FPGA_INT_TX_FF_OVRFLW)
1118                         channel_tx->counters.fifo_overflow++;
1119                 if (fpga_stat & FPGA_INT_RX_FF_EMPTY)
1120                         channel_rx->counters.fifo_empty++;
1121                 if (fpga_stat & FPGA_INT_RX_FF_OVRFLW)
1122                         channel_rx->counters.fifo_overflow++;
1123
1124                 /*
1125                  * FIXME: These errors should be notified through the
1126                  * poll interface as POLLERR.
1127                  */
1128
1129                 /* Re-enable interrupts. */
1130                 iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
1131
1132                 return IRQ_HANDLED;
1133         }
1134
1135         return IRQ_NONE;
1136 }
1137
1138 static void poch_class_dev_unregister(struct poch_dev *poch_dev, int id)
1139 {
1140         int i, j;
1141         int nattrs;
1142         struct channel_info *channel;
1143         dev_t devno;
1144
1145         if (poch_dev->dev == NULL)
1146                 return;
1147
1148         for (i = 0; i < poch_dev->nchannels; i++) {
1149                 channel = &poch_dev->channels[i];
1150                 devno = poch_first_dev + (id * poch_dev->nchannels) + i;
1151
1152                 if (!channel->dev)
1153                         continue;
1154
1155                 nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
1156                 for (j = 0; j < nattrs; j++)
1157                         device_remove_file(channel->dev, poch_class_attrs[j]);
1158
1159                 device_unregister(channel->dev);
1160         }
1161
1162         device_unregister(poch_dev->dev);
1163 }
1164
1165 static int __devinit poch_class_dev_register(struct poch_dev *poch_dev,
1166                                              int id)
1167 {
1168         struct device *dev = &poch_dev->pci_dev->dev;
1169         int i, j;
1170         int nattrs;
1171         int ret;
1172         struct channel_info *channel;
1173         dev_t devno;
1174
1175         poch_dev->dev = device_create(poch_cls, &poch_dev->pci_dev->dev,
1176                                       MKDEV(0, 0), NULL, "poch%d", id);
1177         if (IS_ERR(poch_dev->dev)) {
1178                 dev_err(dev, "error creating parent class device");
1179                 ret = PTR_ERR(poch_dev->dev);
1180                 poch_dev->dev = NULL;
1181                 return ret;
1182         }
1183
1184         for (i = 0; i < poch_dev->nchannels; i++) {
1185                 channel = &poch_dev->channels[i];
1186
1187                 devno = poch_first_dev + (id * poch_dev->nchannels) + i;
1188                 channel->dev = device_create(poch_cls, poch_dev->dev, devno,
1189                                              NULL, "ch%d", i);
1190                 if (IS_ERR(channel->dev)) {
1191                         dev_err(dev, "error creating channel class device");
1192                         ret = PTR_ERR(channel->dev);
1193                         channel->dev = NULL;
1194                         poch_class_dev_unregister(poch_dev, id);
1195                         return ret;
1196                 }
1197
1198                 dev_set_drvdata(channel->dev, channel);
1199                 nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
1200                 for (j = 0; j < nattrs; j++) {
1201                         ret = device_create_file(channel->dev,
1202                                                  poch_class_attrs[j]);
1203                         if (ret) {
1204                                 dev_err(dev, "error creating attribute file");
1205                                 poch_class_dev_unregister(poch_dev, id);
1206                                 return ret;
1207                         }
1208                 }
1209         }
1210
1211         return 0;
1212 }
1213
1214 static int __devinit poch_pci_probe(struct pci_dev *pdev,
1215                                     const struct pci_device_id *pci_id)
1216 {
1217         struct device *dev = &pdev->dev;
1218         struct poch_dev *poch_dev;
1219         struct uio_info *uio;
1220         int ret;
1221         int id;
1222         int i;
1223
1224         poch_dev = kzalloc(sizeof(struct poch_dev), GFP_KERNEL);
1225         if (!poch_dev) {
1226                 dev_err(dev, "error allocating priv. data memory\n");
1227                 return -ENOMEM;
1228         }
1229
1230         poch_dev->pci_dev = pdev;
1231         uio = &poch_dev->uio;
1232
1233         pci_set_drvdata(pdev, poch_dev);
1234
1235         spin_lock_init(&poch_dev->iomem_lock);
1236
1237         poch_dev->nchannels = POCH_NCHANNELS;
1238         poch_dev->channels[CHNO_RX_CHANNEL].dir = CHANNEL_DIR_RX;
1239         poch_dev->channels[CHNO_TX_CHANNEL].dir = CHANNEL_DIR_TX;
1240
1241         for (i = 0; i < poch_dev->nchannels; i++) {
1242                 init_waitqueue_head(&poch_dev->channels[i].wq);
1243                 atomic_set(&poch_dev->channels[i].free, 1);
1244                 atomic_set(&poch_dev->channels[i].inited, 0);
1245         }
1246
1247         ret = pci_enable_device(pdev);
1248         if (ret) {
1249                 dev_err(dev, "error enabling device\n");
1250                 goto out_free;
1251         }
1252
1253         ret = pci_request_regions(pdev, "poch");
1254         if (ret) {
1255                 dev_err(dev, "error requesting resources\n");
1256                 goto out_disable;
1257         }
1258
1259         uio->mem[0].addr = pci_resource_start(pdev, 1);
1260         if (!uio->mem[0].addr) {
1261                 dev_err(dev, "invalid BAR1\n");
1262                 ret = -ENODEV;
1263                 goto out_release;
1264         }
1265
1266         uio->mem[0].size = pci_resource_len(pdev, 1);
1267         uio->mem[0].memtype = UIO_MEM_PHYS;
1268
1269         uio->name = "poch";
1270         uio->version = "0.0.1";
1271         uio->irq = -1;
1272         ret = uio_register_device(dev, uio);
1273         if (ret) {
1274                 dev_err(dev, "error register UIO device: %d\n", ret);
1275                 goto out_release;
1276         }
1277
1278         poch_dev->bridge_iomem = ioremap(pci_resource_start(pdev, 0),
1279                                          pci_resource_len(pdev, 0));
1280         if (poch_dev->bridge_iomem == NULL) {
1281                 dev_err(dev, "error mapping bridge (bar0) registers\n");
1282                 ret = -ENOMEM;
1283                 goto out_uio_unreg;
1284         }
1285
1286         poch_dev->fpga_iomem = ioremap(pci_resource_start(pdev, 1),
1287                                        pci_resource_len(pdev, 1));
1288         if (poch_dev->fpga_iomem == NULL) {
1289                 dev_err(dev, "error mapping fpga (bar1) registers\n");
1290                 ret = -ENOMEM;
1291                 goto out_bar0_unmap;
1292         }
1293
1294         ret = request_irq(pdev->irq, poch_irq_handler, IRQF_SHARED,
1295                           dev->bus_id, poch_dev);
1296         if (ret) {
1297                 dev_err(dev, "error requesting IRQ %u\n", pdev->irq);
1298                 ret = -ENOMEM;
1299                 goto out_bar1_unmap;
1300         }
1301
1302         if (!idr_pre_get(&poch_ids, GFP_KERNEL)) {
1303                 dev_err(dev, "error allocating memory ids\n");
1304                 ret = -ENOMEM;
1305                 goto out_free_irq;
1306         }
1307
1308         idr_get_new(&poch_ids, poch_dev, &id);
1309         if (id >= MAX_POCH_CARDS) {
1310                 dev_err(dev, "minors exhausted\n");
1311                 ret = -EBUSY;
1312                 goto out_free_irq;
1313         }
1314
1315         cdev_init(&poch_dev->cdev, &poch_fops);
1316         poch_dev->cdev.owner = THIS_MODULE;
1317         ret = cdev_add(&poch_dev->cdev,
1318                        poch_first_dev + (id * poch_dev->nchannels),
1319                        poch_dev->nchannels);
1320         if (ret) {
1321                 dev_err(dev, "error register character device\n");
1322                 goto out_idr_remove;
1323         }
1324
1325         ret = poch_class_dev_register(poch_dev, id);
1326         if (ret)
1327                 goto out_cdev_del;
1328
1329         return 0;
1330
1331  out_cdev_del:
1332         cdev_del(&poch_dev->cdev);
1333  out_idr_remove:
1334         idr_remove(&poch_ids, id);
1335  out_free_irq:
1336         free_irq(pdev->irq, poch_dev);
1337  out_bar1_unmap:
1338         iounmap(poch_dev->fpga_iomem);
1339  out_bar0_unmap:
1340         iounmap(poch_dev->bridge_iomem);
1341  out_uio_unreg:
1342         uio_unregister_device(uio);
1343  out_release:
1344         pci_release_regions(pdev);
1345  out_disable:
1346         pci_disable_device(pdev);
1347  out_free:
1348         kfree(poch_dev);
1349         return ret;
1350 }
1351
1352 /*
1353  * FIXME: We are yet to handle the hot unplug case.
1354  */
1355 static void poch_pci_remove(struct pci_dev *pdev)
1356 {
1357         struct poch_dev *poch_dev = pci_get_drvdata(pdev);
1358         struct uio_info *uio = &poch_dev->uio;
1359         unsigned int minor = MINOR(poch_dev->cdev.dev);
1360         unsigned int id = minor / poch_dev->nchannels;
1361
1362         /* FIXME: unmap fpga_iomem and bridge_iomem */
1363
1364         poch_class_dev_unregister(poch_dev, id);
1365         cdev_del(&poch_dev->cdev);
1366         idr_remove(&poch_ids, id);
1367         free_irq(pdev->irq, poch_dev);
1368         uio_unregister_device(uio);
1369         pci_release_regions(pdev);
1370         pci_disable_device(pdev);
1371         pci_set_drvdata(pdev, NULL);
1372         iounmap(uio->mem[0].internal_addr);
1373
1374         kfree(poch_dev);
1375 }
1376
1377 static const struct pci_device_id poch_pci_ids[] /* __devinitconst */ = {
1378         { PCI_DEVICE(PCI_VENDOR_ID_RRAPIDS,
1379                      PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE) },
1380         { 0, }
1381 };
1382
1383 static struct pci_driver poch_pci_driver = {
1384         .name = DRV_NAME,
1385         .id_table = poch_pci_ids,
1386         .probe = poch_pci_probe,
1387         .remove = poch_pci_remove,
1388 };
1389
1390 static int __init poch_init_module(void)
1391 {
1392         int ret = 0;
1393
1394         ret = alloc_chrdev_region(&poch_first_dev, 0,
1395                                   MAX_POCH_DEVICES, DRV_NAME);
1396         if (ret) {
1397                 printk(KERN_ERR PFX "error allocating device no.");
1398                 return ret;
1399         }
1400
1401         poch_cls = class_create(THIS_MODULE, "pocketchange");
1402         if (IS_ERR(poch_cls)) {
1403                 ret = PTR_ERR(poch_cls);
1404                 goto out_unreg_chrdev;
1405         }
1406
1407         ret = pci_register_driver(&poch_pci_driver);
1408         if (ret) {
1409                 printk(KERN_ERR PFX "error register PCI device");
1410                 goto out_class_destroy;
1411         }
1412
1413         return 0;
1414
1415  out_class_destroy:
1416         class_destroy(poch_cls);
1417
1418  out_unreg_chrdev:
1419         unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
1420
1421         return ret;
1422 }
1423
1424 static void __exit poch_exit_module(void)
1425 {
1426         pci_unregister_driver(&poch_pci_driver);
1427         class_destroy(poch_cls);
1428         unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
1429 }
1430
1431 module_init(poch_init_module);
1432 module_exit(poch_exit_module);
1433
1434 MODULE_LICENSE("GPL v2");