[MTD] cfi_cmdset_0001: Fix the buggy status check.
[safe/jmp/linux-2.6] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.169 2005/03/15 19:07:18 gleixner Exp $
8  *
9  * 
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/mtd/xip.h>
33 #include <linux/mtd/map.h>
34 #include <linux/mtd/mtd.h>
35 #include <linux/mtd/compatmac.h>
36 #include <linux/mtd/cfi.h>
37
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
40
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
43
44 #define MANUFACTURER_INTEL      0x0089
45 #define I82802AB        0x00ad
46 #define I82802AC        0x00ac
47 #define MANUFACTURER_ST         0x0020
48 #define M50LPW080       0x002F
49
50 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
51 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
52 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
54 static void cfi_intelext_sync (struct mtd_info *);
55 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
56 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
57 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
58 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
59 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
60 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
61 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
62                                             struct otp_info *, size_t);
63 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
64                                             struct otp_info *, size_t);
65 static int cfi_intelext_suspend (struct mtd_info *);
66 static void cfi_intelext_resume (struct mtd_info *);
67
68 static void cfi_intelext_destroy(struct mtd_info *);
69
70 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
71
72 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
73 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
74
75 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
76                      size_t *retlen, u_char **mtdbuf);
77 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
78                         size_t len);
79
80 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
81 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
82 #include "fwh_lock.h"
83
84
85
86 /*
87  *  *********** SETUP AND PROBE BITS  ***********
88  */
89
90 static struct mtd_chip_driver cfi_intelext_chipdrv = {
91         .probe          = NULL, /* Not usable directly */
92         .destroy        = cfi_intelext_destroy,
93         .name           = "cfi_cmdset_0001",
94         .module         = THIS_MODULE
95 };
96
97 /* #define DEBUG_LOCK_BITS */
98 /* #define DEBUG_CFI_FEATURES */
99
100 #ifdef DEBUG_CFI_FEATURES
101 static void cfi_tell_features(struct cfi_pri_intelext *extp)
102 {
103         int i;
104         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
105         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
106         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
107         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
108         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
109         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
110         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
111         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
112         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
113         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
114         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
115         for (i=10; i<32; i++) {
116                 if (extp->FeatureSupport & (1<<i)) 
117                         printk("     - Unknown Bit %X:      supported\n", i);
118         }
119         
120         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
121         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
122         for (i=1; i<8; i++) {
123                 if (extp->SuspendCmdSupport & (1<<i))
124                         printk("     - Unknown Bit %X:               supported\n", i);
125         }
126         
127         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
128         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
129         printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
130         for (i=2; i<16; i++) {
131                 if (extp->BlkStatusRegMask & (1<<i))
132                         printk("     - Unknown Bit %X Active: yes\n",i);
133         }
134         
135         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", 
136                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
137         if (extp->VppOptimal)
138                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", 
139                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
140 }
141 #endif
142
143 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
144 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 
145 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
146 {
147         struct map_info *map = mtd->priv;
148         struct cfi_private *cfi = map->fldrv_priv;
149         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
150
151         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
152                             "erase on write disabled.\n");
153         extp->SuspendCmdSupport &= ~1;
154 }
155 #endif
156
157 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
158 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
159 {
160         struct map_info *map = mtd->priv;
161         struct cfi_private *cfi = map->fldrv_priv;
162         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
163
164         if (cfip && (cfip->FeatureSupport&4)) {
165                 cfip->FeatureSupport &= ~4;
166                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
167         }
168 }
169 #endif
170
171 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
172 {
173         struct map_info *map = mtd->priv;
174         struct cfi_private *cfi = map->fldrv_priv;
175         
176         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
177         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
178 }
179
180 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
181 {
182         struct map_info *map = mtd->priv;
183         struct cfi_private *cfi = map->fldrv_priv;
184         
185         /* Note this is done after the region info is endian swapped */
186         cfi->cfiq->EraseRegionInfo[1] =
187                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
188 };
189
190 static void fixup_use_point(struct mtd_info *mtd, void *param)
191 {
192         struct map_info *map = mtd->priv;
193         if (!mtd->point && map_is_linear(map)) {
194                 mtd->point   = cfi_intelext_point;
195                 mtd->unpoint = cfi_intelext_unpoint;
196         }
197 }
198
199 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
200 {
201         struct map_info *map = mtd->priv;
202         struct cfi_private *cfi = map->fldrv_priv;
203         if (cfi->cfiq->BufWriteTimeoutTyp) {
204                 printk(KERN_INFO "Using buffer write method\n" );
205                 mtd->write = cfi_intelext_write_buffers;
206         }
207 }
208
209 static struct cfi_fixup cfi_fixup_table[] = {
210 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
211         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, 
212 #endif
213 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
214         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
215 #endif
216 #if !FORCE_WORD_WRITE
217         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
218 #endif
219         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
220         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
221         { 0, 0, NULL, NULL }
222 };
223
224 static struct cfi_fixup jedec_fixup_table[] = {
225         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
226         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
227         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
228         { 0, 0, NULL, NULL }
229 };
230 static struct cfi_fixup fixup_table[] = {
231         /* The CFI vendor ids and the JEDEC vendor IDs appear
232          * to be common.  It is like the devices id's are as
233          * well.  This table is to pick all cases where
234          * we know that is the case.
235          */
236         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
237         { 0, 0, NULL, NULL }
238 };
239
240 static inline struct cfi_pri_intelext *
241 read_pri_intelext(struct map_info *map, __u16 adr)
242 {
243         struct cfi_pri_intelext *extp;
244         unsigned int extp_size = sizeof(*extp);
245
246  again:
247         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
248         if (!extp)
249                 return NULL;
250
251         /* Do some byteswapping if necessary */
252         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
253         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
254         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
255
256         if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
257                 unsigned int extra_size = 0;
258                 int nb_parts, i;
259
260                 /* Protection Register info */
261                 extra_size += (extp->NumProtectionFields - 1) *
262                               sizeof(struct cfi_intelext_otpinfo);
263
264                 /* Burst Read info */
265                 extra_size += 6;
266
267                 /* Number of hardware-partitions */
268                 extra_size += 1;
269                 if (extp_size < sizeof(*extp) + extra_size)
270                         goto need_more;
271                 nb_parts = extp->extra[extra_size - 1];
272
273                 for (i = 0; i < nb_parts; i++) {
274                         struct cfi_intelext_regioninfo *rinfo;
275                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
276                         extra_size += sizeof(*rinfo);
277                         if (extp_size < sizeof(*extp) + extra_size)
278                                 goto need_more;
279                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
280                         extra_size += (rinfo->NumBlockTypes - 1)
281                                       * sizeof(struct cfi_intelext_blockinfo);
282                 }
283
284                 if (extp_size < sizeof(*extp) + extra_size) {
285                         need_more:
286                         extp_size = sizeof(*extp) + extra_size;
287                         kfree(extp);
288                         if (extp_size > 4096) {
289                                 printk(KERN_ERR
290                                         "%s: cfi_pri_intelext is too fat\n",
291                                         __FUNCTION__);
292                                 return NULL;
293                         }
294                         goto again;
295                 }
296         }
297                 
298         return extp;
299 }
300
301 /* This routine is made available to other mtd code via
302  * inter_module_register.  It must only be accessed through
303  * inter_module_get which will bump the use count of this module.  The
304  * addresses passed back in cfi are valid as long as the use count of
305  * this module is non-zero, i.e. between inter_module_get and
306  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
307  */
308 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
309 {
310         struct cfi_private *cfi = map->fldrv_priv;
311         struct mtd_info *mtd;
312         int i;
313
314         mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
315         if (!mtd) {
316                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
317                 return NULL;
318         }
319         memset(mtd, 0, sizeof(*mtd));
320         mtd->priv = map;
321         mtd->type = MTD_NORFLASH;
322
323         /* Fill in the default mtd operations */
324         mtd->erase   = cfi_intelext_erase_varsize;
325         mtd->read    = cfi_intelext_read;
326         mtd->write   = cfi_intelext_write_words;
327         mtd->sync    = cfi_intelext_sync;
328         mtd->lock    = cfi_intelext_lock;
329         mtd->unlock  = cfi_intelext_unlock;
330         mtd->suspend = cfi_intelext_suspend;
331         mtd->resume  = cfi_intelext_resume;
332         mtd->flags   = MTD_CAP_NORFLASH;
333         mtd->name    = map->name;
334         
335         if (cfi->cfi_mode == CFI_MODE_CFI) {
336                 /* 
337                  * It's a real CFI chip, not one for which the probe
338                  * routine faked a CFI structure. So we read the feature
339                  * table from it.
340                  */
341                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
342                 struct cfi_pri_intelext *extp;
343
344                 extp = read_pri_intelext(map, adr);
345                 if (!extp) {
346                         kfree(mtd);
347                         return NULL;
348                 }
349
350                 /* Install our own private info structure */
351                 cfi->cmdset_priv = extp;        
352
353                 cfi_fixup(mtd, cfi_fixup_table);
354
355 #ifdef DEBUG_CFI_FEATURES
356                 /* Tell the user about it in lots of lovely detail */
357                 cfi_tell_features(extp);
358 #endif  
359
360                 if(extp->SuspendCmdSupport & 1) {
361                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
362                 }
363         }
364         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
365                 /* Apply jedec specific fixups */
366                 cfi_fixup(mtd, jedec_fixup_table);
367         }
368         /* Apply generic fixups */
369         cfi_fixup(mtd, fixup_table);
370
371         for (i=0; i< cfi->numchips; i++) {
372                 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
373                 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
374                 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
375                 cfi->chips[i].ref_point_counter = 0;
376         }               
377
378         map->fldrv = &cfi_intelext_chipdrv;
379         
380         return cfi_intelext_setup(mtd);
381 }
382
383 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
384 {
385         struct map_info *map = mtd->priv;
386         struct cfi_private *cfi = map->fldrv_priv;
387         unsigned long offset = 0;
388         int i,j;
389         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
390
391         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
392
393         mtd->size = devsize * cfi->numchips;
394
395         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
396         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 
397                         * mtd->numeraseregions, GFP_KERNEL);
398         if (!mtd->eraseregions) { 
399                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
400                 goto setup_err;
401         }
402         
403         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
404                 unsigned long ernum, ersize;
405                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
406                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
407
408                 if (mtd->erasesize < ersize) {
409                         mtd->erasesize = ersize;
410                 }
411                 for (j=0; j<cfi->numchips; j++) {
412                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
413                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
414                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
415                 }
416                 offset += (ersize * ernum);
417         }
418
419         if (offset != devsize) {
420                 /* Argh */
421                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
422                 goto setup_err;
423         }
424
425         for (i=0; i<mtd->numeraseregions;i++){
426                 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
427                        i,mtd->eraseregions[i].offset,
428                        mtd->eraseregions[i].erasesize,
429                        mtd->eraseregions[i].numblocks);
430         }
431
432 #ifdef CONFIG_MTD_OTP
433         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
434         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
435         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
436         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
437         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
438         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
439 #endif
440
441         /* This function has the potential to distort the reality
442            a bit and therefore should be called last. */
443         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
444                 goto setup_err;
445
446         __module_get(THIS_MODULE);
447         return mtd;
448
449  setup_err:
450         if(mtd) {
451                 if(mtd->eraseregions)
452                         kfree(mtd->eraseregions);
453                 kfree(mtd);
454         }
455         kfree(cfi->cmdset_priv);
456         return NULL;
457 }
458
459 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
460                                         struct cfi_private **pcfi)
461 {
462         struct map_info *map = mtd->priv;
463         struct cfi_private *cfi = *pcfi;
464         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
465
466         /*
467          * Probing of multi-partition flash ships.
468          *
469          * To support multiple partitions when available, we simply arrange
470          * for each of them to have their own flchip structure even if they
471          * are on the same physical chip.  This means completely recreating
472          * a new cfi_private structure right here which is a blatent code
473          * layering violation, but this is still the least intrusive
474          * arrangement at this point. This can be rearranged in the future
475          * if someone feels motivated enough.  --nico
476          */
477         if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
478             && extp->FeatureSupport & (1 << 9)) {
479                 struct cfi_private *newcfi;
480                 struct flchip *chip;
481                 struct flchip_shared *shared;
482                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
483
484                 /* Protection Register info */
485                 offs = (extp->NumProtectionFields - 1) *
486                        sizeof(struct cfi_intelext_otpinfo);
487
488                 /* Burst Read info */
489                 offs += 6;
490
491                 /* Number of partition regions */
492                 numregions = extp->extra[offs];
493                 offs += 1;
494
495                 /* Number of hardware partitions */
496                 numparts = 0;
497                 for (i = 0; i < numregions; i++) {
498                         struct cfi_intelext_regioninfo *rinfo;
499                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
500                         numparts += rinfo->NumIdentPartitions;
501                         offs += sizeof(*rinfo)
502                                 + (rinfo->NumBlockTypes - 1) *
503                                   sizeof(struct cfi_intelext_blockinfo);
504                 }
505
506                 /*
507                  * All functions below currently rely on all chips having
508                  * the same geometry so we'll just assume that all hardware
509                  * partitions are of the same size too.
510                  */
511                 partshift = cfi->chipshift - __ffs(numparts);
512
513                 if ((1 << partshift) < mtd->erasesize) {
514                         printk( KERN_ERR
515                                 "%s: bad number of hw partitions (%d)\n",
516                                 __FUNCTION__, numparts);
517                         return -EINVAL;
518                 }
519
520                 numvirtchips = cfi->numchips * numparts;
521                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
522                 if (!newcfi)
523                         return -ENOMEM;
524                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
525                 if (!shared) {
526                         kfree(newcfi);
527                         return -ENOMEM;
528                 }
529                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
530                 newcfi->numchips = numvirtchips;
531                 newcfi->chipshift = partshift;
532
533                 chip = &newcfi->chips[0];
534                 for (i = 0; i < cfi->numchips; i++) {
535                         shared[i].writing = shared[i].erasing = NULL;
536                         spin_lock_init(&shared[i].lock);
537                         for (j = 0; j < numparts; j++) {
538                                 *chip = cfi->chips[i];
539                                 chip->start += j << partshift;
540                                 chip->priv = &shared[i];
541                                 /* those should be reset too since
542                                    they create memory references. */
543                                 init_waitqueue_head(&chip->wq);
544                                 spin_lock_init(&chip->_spinlock);
545                                 chip->mutex = &chip->_spinlock;
546                                 chip++;
547                         }
548                 }
549
550                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
551                                   "--> %d partitions of %d KiB\n",
552                                   map->name, cfi->numchips, cfi->interleave,
553                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
554
555                 map->fldrv_priv = newcfi;
556                 *pcfi = newcfi;
557                 kfree(cfi);
558         }
559
560         return 0;
561 }
562
563 /*
564  *  *********** CHIP ACCESS FUNCTIONS ***********
565  */
566
567 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
568 {
569         DECLARE_WAITQUEUE(wait, current);
570         struct cfi_private *cfi = map->fldrv_priv;
571         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
572         unsigned long timeo;
573         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
574
575  resettime:
576         timeo = jiffies + HZ;
577  retry:
578         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
579                 /*
580                  * OK. We have possibility for contension on the write/erase
581                  * operations which are global to the real chip and not per
582                  * partition.  So let's fight it over in the partition which
583                  * currently has authority on the operation.
584                  *
585                  * The rules are as follows:
586                  *
587                  * - any write operation must own shared->writing.
588                  *
589                  * - any erase operation must own _both_ shared->writing and
590                  *   shared->erasing.
591                  *
592                  * - contension arbitration is handled in the owner's context.
593                  *
594                  * The 'shared' struct can be read when its lock is taken.
595                  * However any writes to it can only be made when the current
596                  * owner's lock is also held.
597                  */
598                 struct flchip_shared *shared = chip->priv;
599                 struct flchip *contender;
600                 spin_lock(&shared->lock);
601                 contender = shared->writing;
602                 if (contender && contender != chip) {
603                         /*
604                          * The engine to perform desired operation on this
605                          * partition is already in use by someone else.
606                          * Let's fight over it in the context of the chip
607                          * currently using it.  If it is possible to suspend,
608                          * that other partition will do just that, otherwise
609                          * it'll happily send us to sleep.  In any case, when
610                          * get_chip returns success we're clear to go ahead.
611                          */
612                         int ret = spin_trylock(contender->mutex);
613                         spin_unlock(&shared->lock);
614                         if (!ret)
615                                 goto retry;
616                         spin_unlock(chip->mutex);
617                         ret = get_chip(map, contender, contender->start, mode);
618                         spin_lock(chip->mutex);
619                         if (ret) {
620                                 spin_unlock(contender->mutex);
621                                 return ret;
622                         }
623                         timeo = jiffies + HZ;
624                         spin_lock(&shared->lock);
625                 }
626
627                 /* We now own it */
628                 shared->writing = chip;
629                 if (mode == FL_ERASING)
630                         shared->erasing = chip;
631                 if (contender && contender != chip)
632                         spin_unlock(contender->mutex);
633                 spin_unlock(&shared->lock);
634         }
635
636         switch (chip->state) {
637
638         case FL_STATUS:
639                 for (;;) {
640                         status = map_read(map, adr);
641                         if (map_word_andequal(map, status, status_OK, status_OK))
642                                 break;
643
644                         /* At this point we're fine with write operations
645                            in other partitions as they don't conflict. */
646                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
647                                 break;
648
649                         if (time_after(jiffies, timeo)) {
650                                 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n", 
651                                        status.x[0]);
652                                 return -EIO;
653                         }
654                         spin_unlock(chip->mutex);
655                         cfi_udelay(1);
656                         spin_lock(chip->mutex);
657                         /* Someone else might have been playing with it. */
658                         goto retry;
659                 }
660                                 
661         case FL_READY:
662         case FL_CFI_QUERY:
663         case FL_JEDEC_QUERY:
664                 return 0;
665
666         case FL_ERASING:
667                 if (!cfip ||
668                     !(cfip->FeatureSupport & 2) ||
669                     !(mode == FL_READY || mode == FL_POINT ||
670                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
671                         goto sleep;
672
673
674                 /* Erase suspend */
675                 map_write(map, CMD(0xB0), adr);
676
677                 /* If the flash has finished erasing, then 'erase suspend'
678                  * appears to make some (28F320) flash devices switch to
679                  * 'read' mode.  Make sure that we switch to 'read status'
680                  * mode so we get the right data. --rmk
681                  */
682                 map_write(map, CMD(0x70), adr);
683                 chip->oldstate = FL_ERASING;
684                 chip->state = FL_ERASE_SUSPENDING;
685                 chip->erase_suspended = 1;
686                 for (;;) {
687                         status = map_read(map, adr);
688                         if (map_word_andequal(map, status, status_OK, status_OK))
689                                 break;
690
691                         if (time_after(jiffies, timeo)) {
692                                 /* Urgh. Resume and pretend we weren't here.  */
693                                 map_write(map, CMD(0xd0), adr);
694                                 /* Make sure we're in 'read status' mode if it had finished */
695                                 map_write(map, CMD(0x70), adr);
696                                 chip->state = FL_ERASING;
697                                 chip->oldstate = FL_READY;
698                                 printk(KERN_ERR "Chip not ready after erase "
699                                        "suspended: status = 0x%lx\n", status.x[0]);
700                                 return -EIO;
701                         }
702
703                         spin_unlock(chip->mutex);
704                         cfi_udelay(1);
705                         spin_lock(chip->mutex);
706                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
707                            So we can just loop here. */
708                 }
709                 chip->state = FL_STATUS;
710                 return 0;
711
712         case FL_XIP_WHILE_ERASING:
713                 if (mode != FL_READY && mode != FL_POINT &&
714                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
715                         goto sleep;
716                 chip->oldstate = chip->state;
717                 chip->state = FL_READY;
718                 return 0;
719
720         case FL_POINT:
721                 /* Only if there's no operation suspended... */
722                 if (mode == FL_READY && chip->oldstate == FL_READY)
723                         return 0;
724
725         default:
726         sleep:
727                 set_current_state(TASK_UNINTERRUPTIBLE);
728                 add_wait_queue(&chip->wq, &wait);
729                 spin_unlock(chip->mutex);
730                 schedule();
731                 remove_wait_queue(&chip->wq, &wait);
732                 spin_lock(chip->mutex);
733                 goto resettime;
734         }
735 }
736
737 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
738 {
739         struct cfi_private *cfi = map->fldrv_priv;
740
741         if (chip->priv) {
742                 struct flchip_shared *shared = chip->priv;
743                 spin_lock(&shared->lock);
744                 if (shared->writing == chip && chip->oldstate == FL_READY) {
745                         /* We own the ability to write, but we're done */
746                         shared->writing = shared->erasing;
747                         if (shared->writing && shared->writing != chip) {
748                                 /* give back ownership to who we loaned it from */
749                                 struct flchip *loaner = shared->writing;
750                                 spin_lock(loaner->mutex);
751                                 spin_unlock(&shared->lock);
752                                 spin_unlock(chip->mutex);
753                                 put_chip(map, loaner, loaner->start);
754                                 spin_lock(chip->mutex);
755                                 spin_unlock(loaner->mutex);
756                                 wake_up(&chip->wq);
757                                 return;
758                         }
759                         shared->erasing = NULL;
760                         shared->writing = NULL;
761                 } else if (shared->erasing == chip && shared->writing != chip) {
762                         /*
763                          * We own the ability to erase without the ability
764                          * to write, which means the erase was suspended
765                          * and some other partition is currently writing.
766                          * Don't let the switch below mess things up since
767                          * we don't have ownership to resume anything.
768                          */
769                         spin_unlock(&shared->lock);
770                         wake_up(&chip->wq);
771                         return;
772                 }
773                 spin_unlock(&shared->lock);
774         }
775
776         switch(chip->oldstate) {
777         case FL_ERASING:
778                 chip->state = chip->oldstate;
779                 /* What if one interleaved chip has finished and the 
780                    other hasn't? The old code would leave the finished
781                    one in READY mode. That's bad, and caused -EROFS 
782                    errors to be returned from do_erase_oneblock because
783                    that's the only bit it checked for at the time.
784                    As the state machine appears to explicitly allow 
785                    sending the 0x70 (Read Status) command to an erasing
786                    chip and expecting it to be ignored, that's what we 
787                    do. */
788                 map_write(map, CMD(0xd0), adr);
789                 map_write(map, CMD(0x70), adr);
790                 chip->oldstate = FL_READY;
791                 chip->state = FL_ERASING;
792                 break;
793
794         case FL_XIP_WHILE_ERASING:
795                 chip->state = chip->oldstate;
796                 chip->oldstate = FL_READY;
797                 break;
798
799         case FL_READY:
800         case FL_STATUS:
801         case FL_JEDEC_QUERY:
802                 /* We should really make set_vpp() count, rather than doing this */
803                 DISABLE_VPP(map);
804                 break;
805         default:
806                 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
807         }
808         wake_up(&chip->wq);
809 }
810
811 #ifdef CONFIG_MTD_XIP
812
813 /*
814  * No interrupt what so ever can be serviced while the flash isn't in array
815  * mode.  This is ensured by the xip_disable() and xip_enable() functions
816  * enclosing any code path where the flash is known not to be in array mode.
817  * And within a XIP disabled code path, only functions marked with __xipram
818  * may be called and nothing else (it's a good thing to inspect generated
819  * assembly to make sure inline functions were actually inlined and that gcc
820  * didn't emit calls to its own support functions). Also configuring MTD CFI
821  * support to a single buswidth and a single interleave is also recommended.
822  * Note that not only IRQs are disabled but the preemption count is also
823  * increased to prevent other locking primitives (namely spin_unlock) from
824  * decrementing the preempt count to zero and scheduling the CPU away while
825  * not in array mode.
826  */
827
828 static void xip_disable(struct map_info *map, struct flchip *chip,
829                         unsigned long adr)
830 {
831         /* TODO: chips with no XIP use should ignore and return */
832         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
833         preempt_disable();
834         local_irq_disable();
835 }
836
837 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
838                                 unsigned long adr)
839 {
840         struct cfi_private *cfi = map->fldrv_priv;
841         if (chip->state != FL_POINT && chip->state != FL_READY) {
842                 map_write(map, CMD(0xff), adr);
843                 chip->state = FL_READY;
844         }
845         (void) map_read(map, adr);
846         asm volatile (".rep 8; nop; .endr"); /* fill instruction prefetch */
847         local_irq_enable();
848         preempt_enable();
849 }
850
851 /*
852  * When a delay is required for the flash operation to complete, the
853  * xip_udelay() function is polling for both the given timeout and pending
854  * (but still masked) hardware interrupts.  Whenever there is an interrupt
855  * pending then the flash erase or write operation is suspended, array mode
856  * restored and interrupts unmasked.  Task scheduling might also happen at that
857  * point.  The CPU eventually returns from the interrupt or the call to
858  * schedule() and the suspended flash operation is resumed for the remaining
859  * of the delay period.
860  *
861  * Warning: this function _will_ fool interrupt latency tracing tools.
862  */
863
864 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
865                                 unsigned long adr, int usec)
866 {
867         struct cfi_private *cfi = map->fldrv_priv;
868         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
869         map_word status, OK = CMD(0x80);
870         unsigned long suspended, start = xip_currtime();
871         flstate_t oldstate, newstate;
872
873         do {
874                 cpu_relax();
875                 if (xip_irqpending() && cfip &&
876                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
877                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
878                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
879                         /*
880                          * Let's suspend the erase or write operation when
881                          * supported.  Note that we currently don't try to
882                          * suspend interleaved chips if there is already
883                          * another operation suspended (imagine what happens
884                          * when one chip was already done with the current
885                          * operation while another chip suspended it, then
886                          * we resume the whole thing at once).  Yes, it
887                          * can happen!
888                          */
889                         map_write(map, CMD(0xb0), adr);
890                         map_write(map, CMD(0x70), adr);
891                         usec -= xip_elapsed_since(start);
892                         suspended = xip_currtime();
893                         do {
894                                 if (xip_elapsed_since(suspended) > 100000) {
895                                         /*
896                                          * The chip doesn't want to suspend
897                                          * after waiting for 100 msecs.
898                                          * This is a critical error but there
899                                          * is not much we can do here.
900                                          */
901                                         return;
902                                 }
903                                 status = map_read(map, adr);
904                         } while (!map_word_andequal(map, status, OK, OK));
905
906                         /* Suspend succeeded */
907                         oldstate = chip->state;
908                         if (oldstate == FL_ERASING) {
909                                 if (!map_word_bitsset(map, status, CMD(0x40)))
910                                         break;
911                                 newstate = FL_XIP_WHILE_ERASING;
912                                 chip->erase_suspended = 1;
913                         } else {
914                                 if (!map_word_bitsset(map, status, CMD(0x04)))
915                                         break;
916                                 newstate = FL_XIP_WHILE_WRITING;
917                                 chip->write_suspended = 1;
918                         }
919                         chip->state = newstate;
920                         map_write(map, CMD(0xff), adr);
921                         (void) map_read(map, adr);
922                         asm volatile (".rep 8; nop; .endr");
923                         local_irq_enable();
924                         preempt_enable();
925                         asm volatile (".rep 8; nop; .endr");
926                         cond_resched();
927
928                         /*
929                          * We're back.  However someone else might have
930                          * decided to go write to the chip if we are in
931                          * a suspended erase state.  If so let's wait
932                          * until it's done.
933                          */
934                         preempt_disable();
935                         while (chip->state != newstate) {
936                                 DECLARE_WAITQUEUE(wait, current);
937                                 set_current_state(TASK_UNINTERRUPTIBLE);
938                                 add_wait_queue(&chip->wq, &wait);
939                                 preempt_enable();
940                                 schedule();
941                                 remove_wait_queue(&chip->wq, &wait);
942                                 preempt_disable();
943                         }
944                         /* Disallow XIP again */
945                         local_irq_disable();
946
947                         /* Resume the write or erase operation */
948                         map_write(map, CMD(0xd0), adr);
949                         map_write(map, CMD(0x70), adr);
950                         chip->state = oldstate;
951                         start = xip_currtime();
952                 } else if (usec >= 1000000/HZ) {
953                         /*
954                          * Try to save on CPU power when waiting delay
955                          * is at least a system timer tick period.
956                          * No need to be extremely accurate here.
957                          */
958                         xip_cpu_idle();
959                 }
960                 status = map_read(map, adr);
961         } while (!map_word_andequal(map, status, OK, OK)
962                  && xip_elapsed_since(start) < usec);
963 }
964
965 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
966
967 /*
968  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
969  * the flash is actively programming or erasing since we have to poll for
970  * the operation to complete anyway.  We can't do that in a generic way with
971  * a XIP setup so do it before the actual flash operation in this case.
972  */
973 #undef INVALIDATE_CACHED_RANGE
974 #define INVALIDATE_CACHED_RANGE(x...)
975 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
976         do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
977
978 /*
979  * Extra notes:
980  *
981  * Activating this XIP support changes the way the code works a bit.  For
982  * example the code to suspend the current process when concurrent access
983  * happens is never executed because xip_udelay() will always return with the
984  * same chip state as it was entered with.  This is why there is no care for
985  * the presence of add_wait_queue() or schedule() calls from within a couple
986  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
987  * The queueing and scheduling are always happening within xip_udelay().
988  *
989  * Similarly, get_chip() and put_chip() just happen to always be executed
990  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
991  * is in array mode, therefore never executing many cases therein and not
992  * causing any problem with XIP.
993  */
994
995 #else
996
997 #define xip_disable(map, chip, adr)
998 #define xip_enable(map, chip, adr)
999
1000 #define UDELAY(map, chip, adr, usec)  cfi_udelay(usec)
1001
1002 #define XIP_INVAL_CACHED_RANGE(x...)
1003
1004 #endif
1005
1006 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1007 {
1008         unsigned long cmd_addr;
1009         struct cfi_private *cfi = map->fldrv_priv;
1010         int ret = 0;
1011
1012         adr += chip->start;
1013
1014         /* Ensure cmd read/writes are aligned. */ 
1015         cmd_addr = adr & ~(map_bankwidth(map)-1); 
1016
1017         spin_lock(chip->mutex);
1018
1019         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1020
1021         if (!ret) {
1022                 if (chip->state != FL_POINT && chip->state != FL_READY)
1023                         map_write(map, CMD(0xff), cmd_addr);
1024
1025                 chip->state = FL_POINT;
1026                 chip->ref_point_counter++;
1027         }
1028         spin_unlock(chip->mutex);
1029
1030         return ret;
1031 }
1032
1033 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1034 {
1035         struct map_info *map = mtd->priv;
1036         struct cfi_private *cfi = map->fldrv_priv;
1037         unsigned long ofs;
1038         int chipnum;
1039         int ret = 0;
1040
1041         if (!map->virt || (from + len > mtd->size))
1042                 return -EINVAL;
1043         
1044         *mtdbuf = (void *)map->virt + from;
1045         *retlen = 0;
1046
1047         /* Now lock the chip(s) to POINT state */
1048
1049         /* ofs: offset within the first chip that the first read should start */
1050         chipnum = (from >> cfi->chipshift);
1051         ofs = from - (chipnum << cfi->chipshift);
1052
1053         while (len) {
1054                 unsigned long thislen;
1055
1056                 if (chipnum >= cfi->numchips)
1057                         break;
1058
1059                 if ((len + ofs -1) >> cfi->chipshift)
1060                         thislen = (1<<cfi->chipshift) - ofs;
1061                 else
1062                         thislen = len;
1063
1064                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1065                 if (ret)
1066                         break;
1067
1068                 *retlen += thislen;
1069                 len -= thislen;
1070                 
1071                 ofs = 0;
1072                 chipnum++;
1073         }
1074         return 0;
1075 }
1076
1077 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1078 {
1079         struct map_info *map = mtd->priv;
1080         struct cfi_private *cfi = map->fldrv_priv;
1081         unsigned long ofs;
1082         int chipnum;
1083
1084         /* Now unlock the chip(s) POINT state */
1085
1086         /* ofs: offset within the first chip that the first read should start */
1087         chipnum = (from >> cfi->chipshift);
1088         ofs = from - (chipnum <<  cfi->chipshift);
1089
1090         while (len) {
1091                 unsigned long thislen;
1092                 struct flchip *chip;
1093
1094                 chip = &cfi->chips[chipnum];
1095                 if (chipnum >= cfi->numchips)
1096                         break;
1097
1098                 if ((len + ofs -1) >> cfi->chipshift)
1099                         thislen = (1<<cfi->chipshift) - ofs;
1100                 else
1101                         thislen = len;
1102
1103                 spin_lock(chip->mutex);
1104                 if (chip->state == FL_POINT) {
1105                         chip->ref_point_counter--;
1106                         if(chip->ref_point_counter == 0)
1107                                 chip->state = FL_READY;
1108                 } else
1109                         printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
1110
1111                 put_chip(map, chip, chip->start);
1112                 spin_unlock(chip->mutex);
1113
1114                 len -= thislen;
1115                 ofs = 0;
1116                 chipnum++;
1117         }
1118 }
1119
1120 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1121 {
1122         unsigned long cmd_addr;
1123         struct cfi_private *cfi = map->fldrv_priv;
1124         int ret;
1125
1126         adr += chip->start;
1127
1128         /* Ensure cmd read/writes are aligned. */ 
1129         cmd_addr = adr & ~(map_bankwidth(map)-1); 
1130
1131         spin_lock(chip->mutex);
1132         ret = get_chip(map, chip, cmd_addr, FL_READY);
1133         if (ret) {
1134                 spin_unlock(chip->mutex);
1135                 return ret;
1136         }
1137
1138         if (chip->state != FL_POINT && chip->state != FL_READY) {
1139                 map_write(map, CMD(0xff), cmd_addr);
1140
1141                 chip->state = FL_READY;
1142         }
1143
1144         map_copy_from(map, buf, adr, len);
1145
1146         put_chip(map, chip, cmd_addr);
1147
1148         spin_unlock(chip->mutex);
1149         return 0;
1150 }
1151
1152 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1153 {
1154         struct map_info *map = mtd->priv;
1155         struct cfi_private *cfi = map->fldrv_priv;
1156         unsigned long ofs;
1157         int chipnum;
1158         int ret = 0;
1159
1160         /* ofs: offset within the first chip that the first read should start */
1161         chipnum = (from >> cfi->chipshift);
1162         ofs = from - (chipnum <<  cfi->chipshift);
1163
1164         *retlen = 0;
1165
1166         while (len) {
1167                 unsigned long thislen;
1168
1169                 if (chipnum >= cfi->numchips)
1170                         break;
1171
1172                 if ((len + ofs -1) >> cfi->chipshift)
1173                         thislen = (1<<cfi->chipshift) - ofs;
1174                 else
1175                         thislen = len;
1176
1177                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1178                 if (ret)
1179                         break;
1180
1181                 *retlen += thislen;
1182                 len -= thislen;
1183                 buf += thislen;
1184                 
1185                 ofs = 0;
1186                 chipnum++;
1187         }
1188         return ret;
1189 }
1190
1191 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1192                                      unsigned long adr, map_word datum, int mode)
1193 {
1194         struct cfi_private *cfi = map->fldrv_priv;
1195         map_word status, status_OK, write_cmd;
1196         unsigned long timeo;
1197         int z, ret=0;
1198
1199         adr += chip->start;
1200
1201         /* Let's determine this according to the interleave only once */
1202         status_OK = CMD(0x80);
1203         switch (mode) {
1204         case FL_WRITING:   write_cmd = CMD(0x40); break;
1205         case FL_OTP_WRITE: write_cmd = CMD(0xc0); break;
1206         default: return -EINVAL;
1207         }
1208
1209         spin_lock(chip->mutex);
1210         ret = get_chip(map, chip, adr, mode);
1211         if (ret) {
1212                 spin_unlock(chip->mutex);
1213                 return ret;
1214         }
1215
1216         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1217         ENABLE_VPP(map);
1218         xip_disable(map, chip, adr);
1219         map_write(map, write_cmd, adr);
1220         map_write(map, datum, adr);
1221         chip->state = mode;
1222
1223         spin_unlock(chip->mutex);
1224         INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
1225         UDELAY(map, chip, adr, chip->word_write_time);
1226         spin_lock(chip->mutex);
1227
1228         timeo = jiffies + (HZ/2);
1229         z = 0;
1230         for (;;) {
1231                 if (chip->state != mode) {
1232                         /* Someone's suspended the write. Sleep */
1233                         DECLARE_WAITQUEUE(wait, current);
1234
1235                         set_current_state(TASK_UNINTERRUPTIBLE);
1236                         add_wait_queue(&chip->wq, &wait);
1237                         spin_unlock(chip->mutex);
1238                         schedule();
1239                         remove_wait_queue(&chip->wq, &wait);
1240                         timeo = jiffies + (HZ / 2); /* FIXME */
1241                         spin_lock(chip->mutex);
1242                         continue;
1243                 }
1244
1245                 status = map_read(map, adr);
1246                 if (map_word_andequal(map, status, status_OK, status_OK))
1247                         break;
1248                 
1249                 /* OK Still waiting */
1250                 if (time_after(jiffies, timeo)) {
1251                         chip->state = FL_STATUS;
1252                         xip_enable(map, chip, adr);
1253                         printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
1254                         ret = -EIO;
1255                         goto out;
1256                 }
1257
1258                 /* Latency issues. Drop the lock, wait a while and retry */
1259                 spin_unlock(chip->mutex);
1260                 z++;
1261                 UDELAY(map, chip, adr, 1);
1262                 spin_lock(chip->mutex);
1263         }
1264         if (!z) {
1265                 chip->word_write_time--;
1266                 if (!chip->word_write_time)
1267                         chip->word_write_time++;
1268         }
1269         if (z > 1) 
1270                 chip->word_write_time++;
1271
1272         /* Done and happy. */
1273         chip->state = FL_STATUS;
1274
1275         /* check for lock bit */
1276         if (map_word_bitsset(map, status, CMD(0x02))) {
1277                 /* clear status */
1278                 map_write(map, CMD(0x50), adr);
1279                 /* put back into read status register mode */
1280                 map_write(map, CMD(0x70), adr);
1281                 ret = -EROFS;
1282         }
1283
1284         xip_enable(map, chip, adr);
1285  out:   put_chip(map, chip, adr);
1286         spin_unlock(chip->mutex);
1287
1288         return ret;
1289 }
1290
1291
1292 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1293 {
1294         struct map_info *map = mtd->priv;
1295         struct cfi_private *cfi = map->fldrv_priv;
1296         int ret = 0;
1297         int chipnum;
1298         unsigned long ofs;
1299
1300         *retlen = 0;
1301         if (!len)
1302                 return 0;
1303
1304         chipnum = to >> cfi->chipshift;
1305         ofs = to  - (chipnum << cfi->chipshift);
1306
1307         /* If it's not bus-aligned, do the first byte write */
1308         if (ofs & (map_bankwidth(map)-1)) {
1309                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1310                 int gap = ofs - bus_ofs;
1311                 int n;
1312                 map_word datum;
1313
1314                 n = min_t(int, len, map_bankwidth(map)-gap);
1315                 datum = map_word_ff(map);
1316                 datum = map_word_load_partial(map, datum, buf, gap, n);
1317
1318                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1319                                                bus_ofs, datum, FL_WRITING);
1320                 if (ret) 
1321                         return ret;
1322
1323                 len -= n;
1324                 ofs += n;
1325                 buf += n;
1326                 (*retlen) += n;
1327
1328                 if (ofs >> cfi->chipshift) {
1329                         chipnum ++; 
1330                         ofs = 0;
1331                         if (chipnum == cfi->numchips)
1332                                 return 0;
1333                 }
1334         }
1335         
1336         while(len >= map_bankwidth(map)) {
1337                 map_word datum = map_word_load(map, buf);
1338
1339                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1340                                        ofs, datum, FL_WRITING);
1341                 if (ret)
1342                         return ret;
1343
1344                 ofs += map_bankwidth(map);
1345                 buf += map_bankwidth(map);
1346                 (*retlen) += map_bankwidth(map);
1347                 len -= map_bankwidth(map);
1348
1349                 if (ofs >> cfi->chipshift) {
1350                         chipnum ++; 
1351                         ofs = 0;
1352                         if (chipnum == cfi->numchips)
1353                                 return 0;
1354                 }
1355         }
1356
1357         if (len & (map_bankwidth(map)-1)) {
1358                 map_word datum;
1359
1360                 datum = map_word_ff(map);
1361                 datum = map_word_load_partial(map, datum, buf, 0, len);
1362
1363                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1364                                        ofs, datum, FL_WRITING);
1365                 if (ret) 
1366                         return ret;
1367                 
1368                 (*retlen) += len;
1369         }
1370
1371         return 0;
1372 }
1373
1374
1375 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 
1376                                     unsigned long adr, const u_char *buf, int len)
1377 {
1378         struct cfi_private *cfi = map->fldrv_priv;
1379         map_word status, status_OK;
1380         unsigned long cmd_adr, timeo;
1381         int wbufsize, z, ret=0, bytes, words;
1382
1383         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1384         adr += chip->start;
1385         cmd_adr = adr & ~(wbufsize-1);
1386         
1387         /* Let's determine this according to the interleave only once */
1388         status_OK = CMD(0x80);
1389
1390         spin_lock(chip->mutex);
1391         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1392         if (ret) {
1393                 spin_unlock(chip->mutex);
1394                 return ret;
1395         }
1396
1397         XIP_INVAL_CACHED_RANGE(map, adr, len);
1398         ENABLE_VPP(map);
1399         xip_disable(map, chip, cmd_adr);
1400
1401         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1402            [...], the device will not accept any more Write to Buffer commands". 
1403            So we must check here and reset those bits if they're set. Otherwise
1404            we're just pissing in the wind */
1405         if (chip->state != FL_STATUS)
1406                 map_write(map, CMD(0x70), cmd_adr);
1407         status = map_read(map, cmd_adr);
1408         if (map_word_bitsset(map, status, CMD(0x30))) {
1409                 xip_enable(map, chip, cmd_adr);
1410                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1411                 xip_disable(map, chip, cmd_adr);
1412                 map_write(map, CMD(0x50), cmd_adr);
1413                 map_write(map, CMD(0x70), cmd_adr);
1414         }
1415
1416         chip->state = FL_WRITING_TO_BUFFER;
1417
1418         z = 0;
1419         for (;;) {
1420                 map_write(map, CMD(0xe8), cmd_adr);
1421
1422                 status = map_read(map, cmd_adr);
1423                 if (map_word_andequal(map, status, status_OK, status_OK))
1424                         break;
1425
1426                 spin_unlock(chip->mutex);
1427                 UDELAY(map, chip, cmd_adr, 1);
1428                 spin_lock(chip->mutex);
1429
1430                 if (++z > 20) {
1431                         /* Argh. Not ready for write to buffer */
1432                         map_word Xstatus;
1433                         map_write(map, CMD(0x70), cmd_adr);
1434                         chip->state = FL_STATUS;
1435                         Xstatus = map_read(map, cmd_adr);
1436                         /* Odd. Clear status bits */
1437                         map_write(map, CMD(0x50), cmd_adr);
1438                         map_write(map, CMD(0x70), cmd_adr);
1439                         xip_enable(map, chip, cmd_adr);
1440                         printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1441                                status.x[0], Xstatus.x[0]);
1442                         ret = -EIO;
1443                         goto out;
1444                 }
1445         }
1446
1447         /* Write length of data to come */
1448         bytes = len & (map_bankwidth(map)-1);
1449         words = len / map_bankwidth(map);
1450         map_write(map, CMD(words - !bytes), cmd_adr );
1451
1452         /* Write data */
1453         z = 0;
1454         while(z < words * map_bankwidth(map)) {
1455                 map_word datum = map_word_load(map, buf);
1456                 map_write(map, datum, adr+z);
1457
1458                 z += map_bankwidth(map);
1459                 buf += map_bankwidth(map);
1460         }
1461
1462         if (bytes) {
1463                 map_word datum;
1464
1465                 datum = map_word_ff(map);
1466                 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1467                 map_write(map, datum, adr+z);
1468         }
1469
1470         /* GO GO GO */
1471         map_write(map, CMD(0xd0), cmd_adr);
1472         chip->state = FL_WRITING;
1473
1474         spin_unlock(chip->mutex);
1475         INVALIDATE_CACHED_RANGE(map, adr, len);
1476         UDELAY(map, chip, cmd_adr, chip->buffer_write_time);
1477         spin_lock(chip->mutex);
1478
1479         timeo = jiffies + (HZ/2);
1480         z = 0;
1481         for (;;) {
1482                 if (chip->state != FL_WRITING) {
1483                         /* Someone's suspended the write. Sleep */
1484                         DECLARE_WAITQUEUE(wait, current);
1485                         set_current_state(TASK_UNINTERRUPTIBLE);
1486                         add_wait_queue(&chip->wq, &wait);
1487                         spin_unlock(chip->mutex);
1488                         schedule();
1489                         remove_wait_queue(&chip->wq, &wait);
1490                         timeo = jiffies + (HZ / 2); /* FIXME */
1491                         spin_lock(chip->mutex);
1492                         continue;
1493                 }
1494
1495                 status = map_read(map, cmd_adr);
1496                 if (map_word_andequal(map, status, status_OK, status_OK))
1497                         break;
1498
1499                 /* OK Still waiting */
1500                 if (time_after(jiffies, timeo)) {
1501                         chip->state = FL_STATUS;
1502                         xip_enable(map, chip, cmd_adr);
1503                         printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1504                         ret = -EIO;
1505                         goto out;
1506                 }
1507                 
1508                 /* Latency issues. Drop the lock, wait a while and retry */
1509                 spin_unlock(chip->mutex);
1510                 UDELAY(map, chip, cmd_adr, 1);
1511                 z++;
1512                 spin_lock(chip->mutex);
1513         }
1514         if (!z) {
1515                 chip->buffer_write_time--;
1516                 if (!chip->buffer_write_time)
1517                         chip->buffer_write_time++;
1518         }
1519         if (z > 1) 
1520                 chip->buffer_write_time++;
1521
1522         /* Done and happy. */
1523         chip->state = FL_STATUS;
1524
1525         /* check for lock bit */
1526         if (map_word_bitsset(map, status, CMD(0x02))) {
1527                 /* clear status */
1528                 map_write(map, CMD(0x50), cmd_adr);
1529                 /* put back into read status register mode */
1530                 map_write(map, CMD(0x70), adr);
1531                 ret = -EROFS;
1532         }
1533
1534         xip_enable(map, chip, cmd_adr);
1535  out:   put_chip(map, chip, cmd_adr);
1536         spin_unlock(chip->mutex);
1537         return ret;
1538 }
1539
1540 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to, 
1541                                        size_t len, size_t *retlen, const u_char *buf)
1542 {
1543         struct map_info *map = mtd->priv;
1544         struct cfi_private *cfi = map->fldrv_priv;
1545         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1546         int ret = 0;
1547         int chipnum;
1548         unsigned long ofs;
1549
1550         *retlen = 0;
1551         if (!len)
1552                 return 0;
1553
1554         chipnum = to >> cfi->chipshift;
1555         ofs = to  - (chipnum << cfi->chipshift);
1556
1557         /* If it's not bus-aligned, do the first word write */
1558         if (ofs & (map_bankwidth(map)-1)) {
1559                 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1560                 if (local_len > len)
1561                         local_len = len;
1562                 ret = cfi_intelext_write_words(mtd, to, local_len,
1563                                                retlen, buf);
1564                 if (ret)
1565                         return ret;
1566                 ofs += local_len;
1567                 buf += local_len;
1568                 len -= local_len;
1569
1570                 if (ofs >> cfi->chipshift) {
1571                         chipnum ++;
1572                         ofs = 0;
1573                         if (chipnum == cfi->numchips)
1574                                 return 0;
1575                 }
1576         }
1577
1578         while(len) {
1579                 /* We must not cross write block boundaries */
1580                 int size = wbufsize - (ofs & (wbufsize-1));
1581
1582                 if (size > len)
1583                         size = len;
1584                 ret = do_write_buffer(map, &cfi->chips[chipnum], 
1585                                       ofs, buf, size);
1586                 if (ret)
1587                         return ret;
1588
1589                 ofs += size;
1590                 buf += size;
1591                 (*retlen) += size;
1592                 len -= size;
1593
1594                 if (ofs >> cfi->chipshift) {
1595                         chipnum ++; 
1596                         ofs = 0;
1597                         if (chipnum == cfi->numchips)
1598                                 return 0;
1599                 }
1600         }
1601         return 0;
1602 }
1603
1604 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1605                                       unsigned long adr, int len, void *thunk)
1606 {
1607         struct cfi_private *cfi = map->fldrv_priv;
1608         map_word status, status_OK;
1609         unsigned long timeo;
1610         int retries = 3;
1611         DECLARE_WAITQUEUE(wait, current);
1612         int ret = 0;
1613
1614         adr += chip->start;
1615
1616         /* Let's determine this according to the interleave only once */
1617         status_OK = CMD(0x80);
1618
1619  retry:
1620         spin_lock(chip->mutex);
1621         ret = get_chip(map, chip, adr, FL_ERASING);
1622         if (ret) {
1623                 spin_unlock(chip->mutex);
1624                 return ret;
1625         }
1626
1627         XIP_INVAL_CACHED_RANGE(map, adr, len);
1628         ENABLE_VPP(map);
1629         xip_disable(map, chip, adr);
1630
1631         /* Clear the status register first */
1632         map_write(map, CMD(0x50), adr);
1633
1634         /* Now erase */
1635         map_write(map, CMD(0x20), adr);
1636         map_write(map, CMD(0xD0), adr);
1637         chip->state = FL_ERASING;
1638         chip->erase_suspended = 0;
1639
1640         spin_unlock(chip->mutex);
1641         INVALIDATE_CACHED_RANGE(map, adr, len);
1642         UDELAY(map, chip, adr, chip->erase_time*1000/2);
1643         spin_lock(chip->mutex);
1644
1645         /* FIXME. Use a timer to check this, and return immediately. */
1646         /* Once the state machine's known to be working I'll do that */
1647
1648         timeo = jiffies + (HZ*20);
1649         for (;;) {
1650                 if (chip->state != FL_ERASING) {
1651                         /* Someone's suspended the erase. Sleep */
1652                         set_current_state(TASK_UNINTERRUPTIBLE);
1653                         add_wait_queue(&chip->wq, &wait);
1654                         spin_unlock(chip->mutex);
1655                         schedule();
1656                         remove_wait_queue(&chip->wq, &wait);
1657                         spin_lock(chip->mutex);
1658                         continue;
1659                 }
1660                 if (chip->erase_suspended) {
1661                         /* This erase was suspended and resumed.
1662                            Adjust the timeout */
1663                         timeo = jiffies + (HZ*20); /* FIXME */
1664                         chip->erase_suspended = 0;
1665                 }
1666
1667                 status = map_read(map, adr);
1668                 if (map_word_andequal(map, status, status_OK, status_OK))
1669                         break;
1670                 
1671                 /* OK Still waiting */
1672                 if (time_after(jiffies, timeo)) {
1673                         map_word Xstatus;
1674                         map_write(map, CMD(0x70), adr);
1675                         chip->state = FL_STATUS;
1676                         Xstatus = map_read(map, adr);
1677                         /* Clear status bits */
1678                         map_write(map, CMD(0x50), adr);
1679                         map_write(map, CMD(0x70), adr);
1680                         xip_enable(map, chip, adr);
1681                         printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n",
1682                                adr, status.x[0], Xstatus.x[0]);
1683                         ret = -EIO;
1684                         goto out;
1685                 }
1686                 
1687                 /* Latency issues. Drop the lock, wait a while and retry */
1688                 spin_unlock(chip->mutex);
1689                 UDELAY(map, chip, adr, 1000000/HZ);
1690                 spin_lock(chip->mutex);
1691         }
1692
1693         /* We've broken this before. It doesn't hurt to be safe */
1694         map_write(map, CMD(0x70), adr);
1695         chip->state = FL_STATUS;
1696         status = map_read(map, adr);
1697
1698         /* check for lock bit */
1699         if (map_word_bitsset(map, status, CMD(0x3a))) {
1700                 unsigned long chipstatus;
1701
1702                 /* Reset the error bits */
1703                 map_write(map, CMD(0x50), adr);
1704                 map_write(map, CMD(0x70), adr);
1705                 xip_enable(map, chip, adr);
1706
1707                 chipstatus = MERGESTATUS(status);
1708
1709                 if ((chipstatus & 0x30) == 0x30) {
1710                         printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
1711                         ret = -EIO;
1712                 } else if (chipstatus & 0x02) {
1713                         /* Protection bit set */
1714                         ret = -EROFS;
1715                 } else if (chipstatus & 0x8) {
1716                         /* Voltage */
1717                         printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
1718                         ret = -EIO;
1719                 } else if (chipstatus & 0x20) {
1720                         if (retries--) {
1721                                 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
1722                                 timeo = jiffies + HZ;
1723                                 put_chip(map, chip, adr);
1724                                 spin_unlock(chip->mutex);
1725                                 goto retry;
1726                         }
1727                         printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
1728                         ret = -EIO;
1729                 }
1730         } else {
1731                 xip_enable(map, chip, adr);
1732                 ret = 0;
1733         }
1734
1735  out:   put_chip(map, chip, adr);
1736         spin_unlock(chip->mutex);
1737         return ret;
1738 }
1739
1740 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1741 {
1742         unsigned long ofs, len;
1743         int ret;
1744
1745         ofs = instr->addr;
1746         len = instr->len;
1747
1748         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1749         if (ret)
1750                 return ret;
1751
1752         instr->state = MTD_ERASE_DONE;
1753         mtd_erase_callback(instr);
1754         
1755         return 0;
1756 }
1757
1758 static void cfi_intelext_sync (struct mtd_info *mtd)
1759 {
1760         struct map_info *map = mtd->priv;
1761         struct cfi_private *cfi = map->fldrv_priv;
1762         int i;
1763         struct flchip *chip;
1764         int ret = 0;
1765
1766         for (i=0; !ret && i<cfi->numchips; i++) {
1767                 chip = &cfi->chips[i];
1768
1769                 spin_lock(chip->mutex);
1770                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1771
1772                 if (!ret) {
1773                         chip->oldstate = chip->state;
1774                         chip->state = FL_SYNCING;
1775                         /* No need to wake_up() on this state change - 
1776                          * as the whole point is that nobody can do anything
1777                          * with the chip now anyway.
1778                          */
1779                 }
1780                 spin_unlock(chip->mutex);
1781         }
1782
1783         /* Unlock the chips again */
1784
1785         for (i--; i >=0; i--) {
1786                 chip = &cfi->chips[i];
1787
1788                 spin_lock(chip->mutex);
1789                 
1790                 if (chip->state == FL_SYNCING) {
1791                         chip->state = chip->oldstate;
1792                         wake_up(&chip->wq);
1793                 }
1794                 spin_unlock(chip->mutex);
1795         }
1796 }
1797
1798 #ifdef DEBUG_LOCK_BITS
1799 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1800                                                 struct flchip *chip,
1801                                                 unsigned long adr,
1802                                                 int len, void *thunk)
1803 {
1804         struct cfi_private *cfi = map->fldrv_priv;
1805         int status, ofs_factor = cfi->interleave * cfi->device_type;
1806
1807         xip_disable(map, chip, adr+(2*ofs_factor));
1808         cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1809         chip->state = FL_JEDEC_QUERY;
1810         status = cfi_read_query(map, adr+(2*ofs_factor));
1811         xip_enable(map, chip, 0);
1812         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1813                adr, status);
1814         return 0;
1815 }
1816 #endif
1817
1818 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1819 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1820
1821 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1822                                        unsigned long adr, int len, void *thunk)
1823 {
1824         struct cfi_private *cfi = map->fldrv_priv;
1825         map_word status, status_OK;
1826         unsigned long timeo = jiffies + HZ;
1827         int ret;
1828
1829         adr += chip->start;
1830
1831         /* Let's determine this according to the interleave only once */
1832         status_OK = CMD(0x80);
1833
1834         spin_lock(chip->mutex);
1835         ret = get_chip(map, chip, adr, FL_LOCKING);
1836         if (ret) {
1837                 spin_unlock(chip->mutex);
1838                 return ret;
1839         }
1840
1841         ENABLE_VPP(map);
1842         xip_disable(map, chip, adr);
1843         
1844         map_write(map, CMD(0x60), adr);
1845         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1846                 map_write(map, CMD(0x01), adr);
1847                 chip->state = FL_LOCKING;
1848         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1849                 map_write(map, CMD(0xD0), adr);
1850                 chip->state = FL_UNLOCKING;
1851         } else
1852                 BUG();
1853
1854         spin_unlock(chip->mutex);
1855         UDELAY(map, chip, adr, 1000000/HZ);
1856         spin_lock(chip->mutex);
1857
1858         /* FIXME. Use a timer to check this, and return immediately. */
1859         /* Once the state machine's known to be working I'll do that */
1860
1861         timeo = jiffies + (HZ*20);
1862         for (;;) {
1863
1864                 status = map_read(map, adr);
1865                 if (map_word_andequal(map, status, status_OK, status_OK))
1866                         break;
1867                 
1868                 /* OK Still waiting */
1869                 if (time_after(jiffies, timeo)) {
1870                         map_word Xstatus;
1871                         map_write(map, CMD(0x70), adr);
1872                         chip->state = FL_STATUS;
1873                         Xstatus = map_read(map, adr);
1874                         xip_enable(map, chip, adr);
1875                         printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n",
1876                                status.x[0], Xstatus.x[0]);
1877                         put_chip(map, chip, adr);
1878                         spin_unlock(chip->mutex);
1879                         return -EIO;
1880                 }
1881                 
1882                 /* Latency issues. Drop the lock, wait a while and retry */
1883                 spin_unlock(chip->mutex);
1884                 UDELAY(map, chip, adr, 1);
1885                 spin_lock(chip->mutex);
1886         }
1887         
1888         /* Done and happy. */
1889         chip->state = FL_STATUS;
1890         xip_enable(map, chip, adr);
1891         put_chip(map, chip, adr);
1892         spin_unlock(chip->mutex);
1893         return 0;
1894 }
1895
1896 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1897 {
1898         int ret;
1899
1900 #ifdef DEBUG_LOCK_BITS
1901         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1902                __FUNCTION__, ofs, len);
1903         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1904                 ofs, len, 0);
1905 #endif
1906
1907         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock, 
1908                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1909         
1910 #ifdef DEBUG_LOCK_BITS
1911         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1912                __FUNCTION__, ret);
1913         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1914                 ofs, len, 0);
1915 #endif
1916
1917         return ret;
1918 }
1919
1920 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1921 {
1922         int ret;
1923
1924 #ifdef DEBUG_LOCK_BITS
1925         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1926                __FUNCTION__, ofs, len);
1927         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1928                 ofs, len, 0);
1929 #endif
1930
1931         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1932                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1933         
1934 #ifdef DEBUG_LOCK_BITS
1935         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1936                __FUNCTION__, ret);
1937         cfi_varsize_frob(mtd, do_printlockstatus_oneblock, 
1938                 ofs, len, 0);
1939 #endif
1940         
1941         return ret;
1942 }
1943
1944 #ifdef CONFIG_MTD_OTP
1945
1946 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, 
1947                         u_long data_offset, u_char *buf, u_int size,
1948                         u_long prot_offset, u_int groupno, u_int groupsize);
1949
1950 static int __xipram
1951 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1952             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1953 {
1954         struct cfi_private *cfi = map->fldrv_priv;
1955         int ret;
1956
1957         spin_lock(chip->mutex);
1958         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1959         if (ret) {
1960                 spin_unlock(chip->mutex);
1961                 return ret;
1962         }
1963
1964         /* let's ensure we're not reading back cached data from array mode */
1965         if (map->inval_cache)
1966                 map->inval_cache(map, chip->start + offset, size);
1967
1968         xip_disable(map, chip, chip->start);
1969         if (chip->state != FL_JEDEC_QUERY) {
1970                 map_write(map, CMD(0x90), chip->start);
1971                 chip->state = FL_JEDEC_QUERY;
1972         }
1973         map_copy_from(map, buf, chip->start + offset, size);
1974         xip_enable(map, chip, chip->start);
1975
1976         /* then ensure we don't keep OTP data in the cache */
1977         if (map->inval_cache)
1978                 map->inval_cache(map, chip->start + offset, size);
1979
1980         put_chip(map, chip, chip->start);
1981         spin_unlock(chip->mutex);
1982         return 0;
1983 }
1984
1985 static int
1986 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
1987              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1988 {
1989         int ret;
1990
1991         while (size) {
1992                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
1993                 int gap = offset - bus_ofs;
1994                 int n = min_t(int, size, map_bankwidth(map)-gap);
1995                 map_word datum = map_word_ff(map);
1996
1997                 datum = map_word_load_partial(map, datum, buf, gap, n);
1998                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1999                 if (ret) 
2000                         return ret;
2001
2002                 offset += n;
2003                 buf += n;
2004                 size -= n;
2005         }
2006
2007         return 0;
2008 }
2009
2010 static int
2011 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2012             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2013 {
2014         struct cfi_private *cfi = map->fldrv_priv;
2015         map_word datum;
2016
2017         /* make sure area matches group boundaries */
2018         if (size != grpsz)
2019                 return -EXDEV;
2020
2021         datum = map_word_ff(map);
2022         datum = map_word_clr(map, datum, CMD(1 << grpno));
2023         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2024 }
2025
2026 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2027                                  size_t *retlen, u_char *buf,
2028                                  otp_op_t action, int user_regs)
2029 {
2030         struct map_info *map = mtd->priv;
2031         struct cfi_private *cfi = map->fldrv_priv;
2032         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2033         struct flchip *chip;
2034         struct cfi_intelext_otpinfo *otp;
2035         u_long devsize, reg_prot_offset, data_offset;
2036         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2037         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2038         int ret;
2039
2040         *retlen = 0;
2041
2042         /* Check that we actually have some OTP registers */
2043         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2044                 return -ENODATA;
2045
2046         /* we need real chips here not virtual ones */
2047         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2048         chip_step = devsize >> cfi->chipshift;
2049
2050         for (chip_num = 0; chip_num < cfi->numchips; chip_num += chip_step) {
2051                 chip = &cfi->chips[chip_num];
2052                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2053
2054                 /* first OTP region */
2055                 field = 0;
2056                 reg_prot_offset = extp->ProtRegAddr;
2057                 reg_fact_groups = 1;
2058                 reg_fact_size = 1 << extp->FactProtRegSize;
2059                 reg_user_groups = 1;
2060                 reg_user_size = 1 << extp->UserProtRegSize;
2061
2062                 while (len > 0) {
2063                         /* flash geometry fixup */
2064                         data_offset = reg_prot_offset + 1;
2065                         data_offset *= cfi->interleave * cfi->device_type;
2066                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2067                         reg_fact_size *= cfi->interleave;
2068                         reg_user_size *= cfi->interleave;
2069
2070                         if (user_regs) {
2071                                 groups = reg_user_groups;
2072                                 groupsize = reg_user_size;
2073                                 /* skip over factory reg area */
2074                                 groupno = reg_fact_groups;
2075                                 data_offset += reg_fact_groups * reg_fact_size;
2076                         } else {
2077                                 groups = reg_fact_groups;
2078                                 groupsize = reg_fact_size;
2079                                 groupno = 0;
2080                         }
2081
2082                         while (len > 0 && groups > 0) {
2083                                 if (!action) {
2084                                         /*
2085                                          * Special case: if action is NULL
2086                                          * we fill buf with otp_info records.
2087                                          */
2088                                         struct otp_info *otpinfo;
2089                                         map_word lockword;
2090                                         len -= sizeof(struct otp_info);
2091                                         if (len <= 0)
2092                                                 return -ENOSPC;
2093                                         ret = do_otp_read(map, chip,
2094                                                           reg_prot_offset,
2095                                                           (u_char *)&lockword,
2096                                                           map_bankwidth(map),
2097                                                           0, 0,  0);
2098                                         if (ret)
2099                                                 return ret;
2100                                         otpinfo = (struct otp_info *)buf;
2101                                         otpinfo->start = from;
2102                                         otpinfo->length = groupsize;
2103                                         otpinfo->locked =
2104                                            !map_word_bitsset(map, lockword,
2105                                                              CMD(1 << groupno));
2106                                         from += groupsize;
2107                                         buf += sizeof(*otpinfo);
2108                                         *retlen += sizeof(*otpinfo);
2109                                 } else if (from >= groupsize) {
2110                                         from -= groupsize;
2111                                         data_offset += groupsize;
2112                                 } else {
2113                                         int size = groupsize;
2114                                         data_offset += from;
2115                                         size -= from;
2116                                         from = 0;
2117                                         if (size > len)
2118                                                 size = len;
2119                                         ret = action(map, chip, data_offset,
2120                                                      buf, size, reg_prot_offset,
2121                                                      groupno, groupsize);
2122                                         if (ret < 0)
2123                                                 return ret;
2124                                         buf += size;
2125                                         len -= size;
2126                                         *retlen += size;
2127                                         data_offset += size;
2128                                 }
2129                                 groupno++;
2130                                 groups--;
2131                         }
2132
2133                         /* next OTP region */
2134                         if (++field == extp->NumProtectionFields)
2135                                 break;
2136                         reg_prot_offset = otp->ProtRegAddr;
2137                         reg_fact_groups = otp->FactGroups;
2138                         reg_fact_size = 1 << otp->FactProtRegSize;
2139                         reg_user_groups = otp->UserGroups;
2140                         reg_user_size = 1 << otp->UserProtRegSize;
2141                         otp++;
2142                 }
2143         }
2144
2145         return 0;
2146 }
2147
2148 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2149                                            size_t len, size_t *retlen,
2150                                             u_char *buf)
2151 {
2152         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2153                                      buf, do_otp_read, 0);
2154 }
2155
2156 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2157                                            size_t len, size_t *retlen,
2158                                             u_char *buf)
2159 {
2160         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2161                                      buf, do_otp_read, 1);
2162 }
2163
2164 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2165                                             size_t len, size_t *retlen,
2166                                              u_char *buf)
2167 {
2168         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2169                                      buf, do_otp_write, 1);
2170 }
2171
2172 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2173                                            loff_t from, size_t len)
2174 {
2175         size_t retlen;
2176         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2177                                      NULL, do_otp_lock, 1);
2178 }
2179
2180 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, 
2181                                            struct otp_info *buf, size_t len)
2182 {
2183         size_t retlen;
2184         int ret;
2185
2186         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2187         return ret ? : retlen;
2188 }
2189
2190 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2191                                            struct otp_info *buf, size_t len)
2192 {
2193         size_t retlen;
2194         int ret;
2195
2196         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2197         return ret ? : retlen;
2198 }
2199
2200 #endif
2201
2202 static int cfi_intelext_suspend(struct mtd_info *mtd)
2203 {
2204         struct map_info *map = mtd->priv;
2205         struct cfi_private *cfi = map->fldrv_priv;
2206         int i;
2207         struct flchip *chip;
2208         int ret = 0;
2209
2210         for (i=0; !ret && i<cfi->numchips; i++) {
2211                 chip = &cfi->chips[i];
2212
2213                 spin_lock(chip->mutex);
2214
2215                 switch (chip->state) {
2216                 case FL_READY:
2217                 case FL_STATUS:
2218                 case FL_CFI_QUERY:
2219                 case FL_JEDEC_QUERY:
2220                         if (chip->oldstate == FL_READY) {
2221                                 chip->oldstate = chip->state;
2222                                 chip->state = FL_PM_SUSPENDED;
2223                                 /* No need to wake_up() on this state change - 
2224                                  * as the whole point is that nobody can do anything
2225                                  * with the chip now anyway.
2226                                  */
2227                         } else {
2228                                 /* There seems to be an operation pending. We must wait for it. */
2229                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2230                                 ret = -EAGAIN;
2231                         }
2232                         break;
2233                 default:
2234                         /* Should we actually wait? Once upon a time these routines weren't
2235                            allowed to. Or should we return -EAGAIN, because the upper layers
2236                            ought to have already shut down anything which was using the device
2237                            anyway? The latter for now. */
2238                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2239                         ret = -EAGAIN;
2240                 case FL_PM_SUSPENDED:
2241                         break;
2242                 }
2243                 spin_unlock(chip->mutex);
2244         }
2245
2246         /* Unlock the chips again */
2247
2248         if (ret) {
2249                 for (i--; i >=0; i--) {
2250                         chip = &cfi->chips[i];
2251                         
2252                         spin_lock(chip->mutex);
2253                         
2254                         if (chip->state == FL_PM_SUSPENDED) {
2255                                 /* No need to force it into a known state here,
2256                                    because we're returning failure, and it didn't
2257                                    get power cycled */
2258                                 chip->state = chip->oldstate;
2259                                 chip->oldstate = FL_READY;
2260                                 wake_up(&chip->wq);
2261                         }
2262                         spin_unlock(chip->mutex);
2263                 }
2264         } 
2265         
2266         return ret;
2267 }
2268
2269 static void cfi_intelext_resume(struct mtd_info *mtd)
2270 {
2271         struct map_info *map = mtd->priv;
2272         struct cfi_private *cfi = map->fldrv_priv;
2273         int i;
2274         struct flchip *chip;
2275
2276         for (i=0; i<cfi->numchips; i++) {
2277         
2278                 chip = &cfi->chips[i];
2279
2280                 spin_lock(chip->mutex);
2281                 
2282                 /* Go to known state. Chip may have been power cycled */
2283                 if (chip->state == FL_PM_SUSPENDED) {
2284                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2285                         chip->oldstate = chip->state = FL_READY;
2286                         wake_up(&chip->wq);
2287                 }
2288
2289                 spin_unlock(chip->mutex);
2290         }
2291 }
2292
2293 static void cfi_intelext_destroy(struct mtd_info *mtd)
2294 {
2295         struct map_info *map = mtd->priv;
2296         struct cfi_private *cfi = map->fldrv_priv;
2297         kfree(cfi->cmdset_priv);
2298         kfree(cfi->cfiq);
2299         kfree(cfi->chips[0].priv);
2300         kfree(cfi);
2301         kfree(mtd->eraseregions);
2302 }
2303
2304 static char im_name_1[]="cfi_cmdset_0001";
2305 static char im_name_3[]="cfi_cmdset_0003";
2306
2307 static int __init cfi_intelext_init(void)
2308 {
2309         inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
2310         inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
2311         return 0;
2312 }
2313
2314 static void __exit cfi_intelext_exit(void)
2315 {
2316         inter_module_unregister(im_name_1);
2317         inter_module_unregister(im_name_3);
2318 }
2319
2320 module_init(cfi_intelext_init);
2321 module_exit(cfi_intelext_exit);
2322
2323 MODULE_LICENSE("GPL");
2324 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2325 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");