2 * csr1212.c -- IEEE 1212 Control and Status Register support for Linux
4 * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
5 * Steve Kinneberg <kinnebergsteve@acmsystems.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
27 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * - Verify interface consistency: i.e., public functions that take a size
33 * parameter expect size to be in bytes.
36 #include <linux/errno.h>
37 #include <linux/string.h>
39 #include <asm/byteorder.h>
44 /* Permitted key type for each key id */
45 #define __I (1 << CSR1212_KV_TYPE_IMMEDIATE)
46 #define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET)
47 #define __D (1 << CSR1212_KV_TYPE_DIRECTORY)
48 #define __L (1 << CSR1212_KV_TYPE_LEAF)
49 static const u8 csr1212_key_id_type_map[0x30] = {
50 __C, /* used by Apple iSight */
51 __D | __L, /* Descriptor */
52 __I | __D | __L, /* Bus_Dependent_Info */
53 __I | __D | __L, /* Vendor */
54 __I, /* Hardware_Version */
56 __D | __L | __I, /* Module */
57 __I, 0, 0, 0, /* used by Apple iSight, Reserved */
58 __I, /* Node_Capabilities */
60 0, 0, 0, /* Reserved */
62 __I, /* Specifier_ID */
64 __I | __C | __D | __L, /* Dependent_Info */
65 __L, /* Unit_Location */
71 __L, /* Extended_ROM */
72 __I, /* Extended_Key_Specifier_ID */
73 __I, /* Extended_Key */
74 __I | __C | __D | __L, /* Extended_Data */
75 __L, /* Modifiable_Descriptor */
76 __I, /* Directory_ID */
85 #define quads_to_bytes(_q) ((_q) * sizeof(u32))
86 #define bytes_to_quads(_b) (((_b) + sizeof(u32) - 1) / sizeof(u32))
88 static void free_keyval(struct csr1212_keyval *kv)
90 if ((kv->key.type == CSR1212_KV_TYPE_LEAF) &&
91 (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM))
92 CSR1212_FREE(kv->value.leaf.data);
97 static u16 csr1212_crc16(const u32 *buffer, size_t length)
103 for (; length; length--) {
104 data = be32_to_cpu(*buffer);
106 for (shift = 28; shift >= 0; shift -= 4 ) {
107 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
108 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
113 return cpu_to_be16(crc);
117 /* Microsoft computes the CRC with the bytes in reverse order. Therefore we
118 * have a special version of the CRC algorithm to account for their buggy
120 static u16 csr1212_msft_crc16(const u32 *buffer, size_t length)
126 for (; length; length--) {
127 data = le32_to_cpu(*buffer);
129 for (shift = 28; shift >= 0; shift -= 4 ) {
130 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
131 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
136 return cpu_to_be16(crc);
140 static struct csr1212_dentry *
141 csr1212_find_keyval(struct csr1212_keyval *dir, struct csr1212_keyval *kv)
143 struct csr1212_dentry *pos;
145 for (pos = dir->value.directory.dentries_head;
146 pos != NULL; pos = pos->next) {
153 static struct csr1212_keyval *
154 csr1212_find_keyval_offset(struct csr1212_keyval *kv_list, u32 offset)
156 struct csr1212_keyval *kv;
158 for (kv = kv_list->next; kv && (kv != kv_list); kv = kv->next) {
159 if (kv->offset == offset)
166 /* Creation Routines */
168 struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
169 size_t bus_info_size, void *private)
171 struct csr1212_csr *csr;
173 csr = CSR1212_MALLOC(sizeof(*csr));
178 csr1212_rom_cache_malloc(CSR1212_CONFIG_ROM_SPACE_OFFSET,
179 CSR1212_CONFIG_ROM_SPACE_SIZE);
180 if (!csr->cache_head) {
185 /* The keyval key id is not used for the root node, but a valid key id
186 * that can be used for a directory needs to be passed to
187 * csr1212_new_directory(). */
188 csr->root_kv = csr1212_new_directory(CSR1212_KV_ID_VENDOR);
190 CSR1212_FREE(csr->cache_head);
195 csr->bus_info_data = csr->cache_head->data;
196 csr->bus_info_len = bus_info_size;
197 csr->crc_len = bus_info_size;
199 csr->private = private;
200 csr->cache_tail = csr->cache_head;
205 void csr1212_init_local_csr(struct csr1212_csr *csr,
206 const u32 *bus_info_data, int max_rom)
208 static const int mr_map[] = { 4, 64, 1024, 0 };
210 BUG_ON(max_rom & ~0x3);
211 csr->max_rom = mr_map[max_rom];
212 memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len);
215 static struct csr1212_keyval *csr1212_new_keyval(u8 type, u8 key)
217 struct csr1212_keyval *kv;
219 if (key < 0x30 && ((csr1212_key_id_type_map[key] & (1 << type)) == 0))
222 kv = CSR1212_MALLOC(sizeof(*kv));
229 kv->associate = NULL;
239 struct csr1212_keyval *csr1212_new_immediate(u8 key, u32 value)
241 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
246 kv->value.immediate = value;
251 static struct csr1212_keyval *
252 csr1212_new_leaf(u8 key, const void *data, size_t data_len)
254 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key);
260 kv->value.leaf.data = CSR1212_MALLOC(data_len);
261 if (!kv->value.leaf.data) {
267 memcpy(kv->value.leaf.data, data, data_len);
269 kv->value.leaf.data = NULL;
272 kv->value.leaf.len = bytes_to_quads(data_len);
279 static struct csr1212_keyval *
280 csr1212_new_csr_offset(u8 key, u32 csr_offset)
282 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key);
287 kv->value.csr_offset = csr_offset;
294 struct csr1212_keyval *csr1212_new_directory(u8 key)
296 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key);
301 kv->value.directory.len = 0;
303 kv->value.directory.dentries_head = NULL;
304 kv->value.directory.dentries_tail = NULL;
309 void csr1212_associate_keyval(struct csr1212_keyval *kv,
310 struct csr1212_keyval *associate)
312 BUG_ON(!kv || !associate || kv->key.id == CSR1212_KV_ID_DESCRIPTOR ||
313 (associate->key.id != CSR1212_KV_ID_DESCRIPTOR &&
314 associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO &&
315 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY &&
316 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA &&
317 associate->key.id < 0x30) ||
318 (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID &&
319 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY) ||
320 (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
321 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA) ||
322 (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
323 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) ||
324 (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
325 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY));
328 csr1212_release_keyval(kv->associate);
331 kv->associate = associate;
334 int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
335 struct csr1212_keyval *kv)
337 struct csr1212_dentry *dentry;
339 BUG_ON(!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY);
341 dentry = CSR1212_MALLOC(sizeof(*dentry));
350 dentry->prev = dir->value.directory.dentries_tail;
352 if (!dir->value.directory.dentries_head)
353 dir->value.directory.dentries_head = dentry;
355 if (dir->value.directory.dentries_tail)
356 dir->value.directory.dentries_tail->next = dentry;
357 dir->value.directory.dentries_tail = dentry;
359 return CSR1212_SUCCESS;
362 #define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \
363 (&((kv)->value.leaf.data[1]))
365 #define CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, type) \
366 ((kv)->value.leaf.data[0] = \
367 cpu_to_be32(CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) | \
368 ((type) << CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)))
369 #define CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, spec_id) \
370 ((kv)->value.leaf.data[0] = \
371 cpu_to_be32((CSR1212_DESCRIPTOR_LEAF_TYPE(kv) << \
372 CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT) | \
373 ((spec_id) & CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)))
375 static struct csr1212_keyval *
376 csr1212_new_descriptor_leaf(u8 dtype, u32 specifier_id,
377 const void *data, size_t data_len)
379 struct csr1212_keyval *kv;
381 kv = csr1212_new_leaf(CSR1212_KV_ID_DESCRIPTOR, NULL,
382 data_len + CSR1212_DESCRIPTOR_LEAF_OVERHEAD);
386 CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
387 CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
390 memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len);
396 #define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, width) \
397 ((kv)->value.leaf.data[1] = \
398 ((kv)->value.leaf.data[1] & \
399 cpu_to_be32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK << \
400 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT))) | \
401 cpu_to_be32(((width) & CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK) << \
402 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT))
404 #define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, char_set) \
405 ((kv)->value.leaf.data[1] = \
406 ((kv)->value.leaf.data[1] & \
407 cpu_to_be32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK << \
408 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT))) | \
409 cpu_to_be32(((char_set) & \
410 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK) << \
411 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT))
413 #define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language) \
414 ((kv)->value.leaf.data[1] = \
415 ((kv)->value.leaf.data[1] & \
416 cpu_to_be32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK))) | \
417 cpu_to_be32(((language) & \
418 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK)))
420 static struct csr1212_keyval *
421 csr1212_new_textual_descriptor_leaf(u8 cwidth, u16 cset, u16 language,
422 const void *data, size_t data_len)
424 struct csr1212_keyval *kv;
427 kv = csr1212_new_descriptor_leaf(0, 0, NULL, data_len +
428 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
432 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, cwidth);
433 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, cset);
434 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language);
436 lstr = (char*)CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
438 /* make sure last quadlet is zeroed out */
439 *((u32*)&(lstr[(data_len - 1) & ~0x3])) = 0;
441 /* don't copy the NUL terminator */
442 memcpy(lstr, data, data_len);
447 static int csr1212_check_minimal_ascii(const char *s)
449 static const char minimal_ascii_table[] = {
450 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07,
451 0x00, 0x00, 0x0a, 0x00, 0x0C, 0x0D, 0x00, 0x00,
452 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
453 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
454 0x20, 0x21, 0x22, 0x00, 0x00, 0x25, 0x26, 0x27,
455 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
456 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
457 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
458 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
459 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
460 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
461 0x58, 0x59, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x5f,
462 0x00, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
463 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
464 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
465 0x78, 0x79, 0x7a, 0x00, 0x00, 0x00, 0x00, 0x00,
468 if (minimal_ascii_table[*s & 0x7F] != *s)
469 return -1; /* failed */
471 /* String conforms to minimal-ascii, as specified by IEEE 1212,
476 struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s)
478 /* Check if string conform to minimal_ascii format */
479 if (csr1212_check_minimal_ascii(s))
482 /* IEEE 1212, par. 7.5.4.1 Textual descriptors (minimal ASCII) */
483 return csr1212_new_textual_descriptor_leaf(0, 0, 0, s, strlen(s));
487 /* Destruction Routines */
489 void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
490 struct csr1212_keyval *kv)
492 struct csr1212_dentry *dentry;
494 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
497 dentry = csr1212_find_keyval(dir, kv);
503 dentry->prev->next = dentry->next;
505 dentry->next->prev = dentry->prev;
506 if (dir->value.directory.dentries_head == dentry)
507 dir->value.directory.dentries_head = dentry->next;
508 if (dir->value.directory.dentries_tail == dentry)
509 dir->value.directory.dentries_tail = dentry->prev;
511 CSR1212_FREE(dentry);
513 csr1212_release_keyval(kv);
516 /* This function is used to free the memory taken by a keyval. If the given
517 * keyval is a directory type, then any keyvals contained in that directory
518 * will be destroyed as well if their respective refcnts are 0. By means of
519 * list manipulation, this routine will descend a directory structure in a
520 * non-recursive manner. */
521 static void csr1212_destroy_keyval(struct csr1212_keyval *kv)
523 struct csr1212_keyval *k, *a;
524 struct csr1212_dentry dentry;
525 struct csr1212_dentry *head, *tail;
545 if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) {
546 /* If the current entry is a directory, then move all
547 * the entries to the destruction list. */
548 if (k->value.directory.dentries_head) {
549 tail->next = k->value.directory.dentries_head;
550 k->value.directory.dentries_head->prev = tail;
551 tail = k->value.directory.dentries_tail;
560 if (head->prev && head->prev != &dentry) {
561 CSR1212_FREE(head->prev);
564 } else if (tail != &dentry)
569 void csr1212_release_keyval(struct csr1212_keyval *kv)
574 csr1212_destroy_keyval(kv);
577 void csr1212_destroy_csr(struct csr1212_csr *csr)
579 struct csr1212_csr_rom_cache *c, *oc;
580 struct csr1212_cache_region *cr, *ocr;
582 csr1212_release_keyval(csr->root_kv);
601 /* CSR Image Creation */
603 static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
605 struct csr1212_csr_rom_cache *cache;
608 BUG_ON(!csr || !csr->ops || !csr->ops->allocate_addr_range ||
609 !csr->ops->release_addr || csr->max_rom < 1);
611 /* ROM size must be a multiple of csr->max_rom */
612 romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1);
614 csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom, csr->private);
615 if (csr_addr == CSR1212_INVALID_ADDR_SPACE) {
618 if (csr_addr < CSR1212_REGISTER_SPACE_BASE) {
619 /* Invalid address returned from allocate_addr_range(). */
620 csr->ops->release_addr(csr_addr, csr->private);
624 cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE, romsize);
626 csr->ops->release_addr(csr_addr, csr->private);
630 cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, CSR1212_KV_ID_EXTENDED_ROM);
631 if (!cache->ext_rom) {
632 csr->ops->release_addr(csr_addr, csr->private);
637 if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) != CSR1212_SUCCESS) {
638 csr1212_release_keyval(cache->ext_rom);
639 csr->ops->release_addr(csr_addr, csr->private);
643 cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE;
644 cache->ext_rom->value.leaf.len = -1;
645 cache->ext_rom->value.leaf.data = cache->data;
647 /* Add cache to tail of cache list */
648 cache->prev = csr->cache_tail;
649 csr->cache_tail->next = cache;
650 csr->cache_tail = cache;
651 return CSR1212_SUCCESS;
654 static void csr1212_remove_cache(struct csr1212_csr *csr,
655 struct csr1212_csr_rom_cache *cache)
657 if (csr->cache_head == cache)
658 csr->cache_head = cache->next;
659 if (csr->cache_tail == cache)
660 csr->cache_tail = cache->prev;
663 cache->prev->next = cache->next;
665 cache->next->prev = cache->prev;
667 if (cache->ext_rom) {
668 csr1212_detach_keyval_from_directory(csr->root_kv, cache->ext_rom);
669 csr1212_release_keyval(cache->ext_rom);
675 static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
676 struct csr1212_keyval **layout_tail)
678 struct csr1212_dentry *dentry;
679 struct csr1212_keyval *dkv;
680 struct csr1212_keyval *last_extkey_spec = NULL;
681 struct csr1212_keyval *last_extkey = NULL;
684 for (dentry = dir->value.directory.dentries_head; dentry;
685 dentry = dentry->next) {
686 for (dkv = dentry->kv; dkv; dkv = dkv->associate) {
687 /* Special Case: Extended Key Specifier_ID */
688 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
689 if (last_extkey_spec == NULL) {
690 last_extkey_spec = dkv;
691 } else if (dkv->value.immediate != last_extkey_spec->value.immediate) {
692 last_extkey_spec = dkv;
696 /* Special Case: Extended Key */
697 } else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
698 if (last_extkey == NULL) {
700 } else if (dkv->value.immediate != last_extkey->value.immediate) {
709 switch(dkv->key.type) {
711 case CSR1212_KV_TYPE_IMMEDIATE:
712 case CSR1212_KV_TYPE_CSR_OFFSET:
714 case CSR1212_KV_TYPE_LEAF:
715 case CSR1212_KV_TYPE_DIRECTORY:
716 /* Remove from list */
717 if (dkv->prev && (dkv->prev->next == dkv))
718 dkv->prev->next = dkv->next;
719 if (dkv->next && (dkv->next->prev == dkv))
720 dkv->next->prev = dkv->prev;
721 //if (dkv == *layout_tail)
722 // *layout_tail = dkv->prev;
724 /* Special case: Extended ROM leafs */
725 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
726 dkv->value.leaf.len = -1;
727 /* Don't add Extended ROM leafs in the layout list,
728 * they are handled differently. */
732 /* Add to tail of list */
734 dkv->prev = *layout_tail;
735 (*layout_tail)->next = dkv;
744 static size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
746 struct csr1212_keyval *ltail = kv;
750 switch(kv->key.type) {
751 case CSR1212_KV_TYPE_LEAF:
752 /* Add 1 quadlet for crc/len field */
753 agg_size += kv->value.leaf.len + 1;
756 case CSR1212_KV_TYPE_DIRECTORY:
757 kv->value.directory.len = csr1212_generate_layout_subdir(kv, <ail);
758 /* Add 1 quadlet for crc/len field */
759 agg_size += kv->value.directory.len + 1;
764 return quads_to_bytes(agg_size);
767 static struct csr1212_keyval *
768 csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
769 struct csr1212_keyval *start_kv, int start_pos)
771 struct csr1212_keyval *kv = start_kv;
772 struct csr1212_keyval *okv = start_kv;
774 int kv_len = 0, okv_len = 0;
776 cache->layout_head = kv;
778 while(kv && pos < cache->size) {
779 /* Special case: Extended ROM leafs */
780 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
781 kv->offset = cache->offset + pos;
784 switch(kv->key.type) {
785 case CSR1212_KV_TYPE_LEAF:
786 kv_len = kv->value.leaf.len;
789 case CSR1212_KV_TYPE_DIRECTORY:
790 kv_len = kv->value.directory.len;
794 /* Should never get here */
798 pos += quads_to_bytes(kv_len + 1);
800 if (pos <= cache->size) {
807 cache->layout_tail = okv;
808 cache->len = (okv->offset - cache->offset) + quads_to_bytes(okv_len + 1);
813 #define CSR1212_KV_KEY_SHIFT 24
814 #define CSR1212_KV_KEY_TYPE_SHIFT 6
815 #define CSR1212_KV_KEY_ID_MASK 0x3f
816 #define CSR1212_KV_KEY_TYPE_MASK 0x3 /* after shift */
819 csr1212_generate_tree_subdir(struct csr1212_keyval *dir, u32 *data_buffer)
821 struct csr1212_dentry *dentry;
822 struct csr1212_keyval *last_extkey_spec = NULL;
823 struct csr1212_keyval *last_extkey = NULL;
826 for (dentry = dir->value.directory.dentries_head; dentry; dentry = dentry->next) {
827 struct csr1212_keyval *a;
829 for (a = dentry->kv; a; a = a->associate) {
832 /* Special Case: Extended Key Specifier_ID */
833 if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
834 if (last_extkey_spec == NULL) {
835 last_extkey_spec = a;
836 } else if (a->value.immediate != last_extkey_spec->value.immediate) {
837 last_extkey_spec = a;
841 /* Special Case: Extended Key */
842 } else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
843 if (last_extkey == NULL) {
845 } else if (a->value.immediate != last_extkey->value.immediate) {
852 switch(a->key.type) {
853 case CSR1212_KV_TYPE_IMMEDIATE:
854 value = a->value.immediate;
856 case CSR1212_KV_TYPE_CSR_OFFSET:
857 value = a->value.csr_offset;
859 case CSR1212_KV_TYPE_LEAF:
861 value -= dir->offset + quads_to_bytes(1+index);
862 value = bytes_to_quads(value);
864 case CSR1212_KV_TYPE_DIRECTORY:
866 value -= dir->offset + quads_to_bytes(1+index);
867 value = bytes_to_quads(value);
870 /* Should never get here */
871 break; /* GDB breakpoint */
874 value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) << CSR1212_KV_KEY_SHIFT;
875 value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) <<
876 (CSR1212_KV_KEY_SHIFT + CSR1212_KV_KEY_TYPE_SHIFT);
877 data_buffer[index] = cpu_to_be32(value);
883 struct csr1212_keyval_img {
888 u32 data[0]; /* older gcc can't handle [] which is standard */
891 static void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
893 struct csr1212_keyval *kv, *nkv;
894 struct csr1212_keyval_img *kvi;
896 for (kv = cache->layout_head; kv != cache->layout_tail->next; kv = nkv) {
897 kvi = (struct csr1212_keyval_img *)
898 (cache->data + bytes_to_quads(kv->offset - cache->offset));
899 switch(kv->key.type) {
901 case CSR1212_KV_TYPE_IMMEDIATE:
902 case CSR1212_KV_TYPE_CSR_OFFSET:
903 /* Should never get here */
904 break; /* GDB breakpoint */
906 case CSR1212_KV_TYPE_LEAF:
907 /* Don't copy over Extended ROM areas, they are
908 * already filled out! */
909 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
910 memcpy(kvi->data, kv->value.leaf.data,
911 quads_to_bytes(kv->value.leaf.len));
913 kvi->length = cpu_to_be16(kv->value.leaf.len);
914 kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len);
917 case CSR1212_KV_TYPE_DIRECTORY:
918 csr1212_generate_tree_subdir(kv, kvi->data);
920 kvi->length = cpu_to_be16(kv->value.directory.len);
921 kvi->crc = csr1212_crc16(kvi->data, kv->value.directory.len);
927 kv->prev->next = NULL;
929 kv->next->prev = NULL;
935 /* This size is arbitrarily chosen.
936 * The struct overhead is subtracted for more economic allocations. */
937 #define CSR1212_EXTENDED_ROM_SIZE (2048 - sizeof(struct csr1212_csr_rom_cache))
939 int csr1212_generate_csr_image(struct csr1212_csr *csr)
941 struct csr1212_bus_info_block_img *bi;
942 struct csr1212_csr_rom_cache *cache;
943 struct csr1212_keyval *kv;
950 cache = csr->cache_head;
952 bi = (struct csr1212_bus_info_block_img*)cache->data;
954 bi->length = bytes_to_quads(csr->bus_info_len) - 1;
955 bi->crc_length = bi->length;
956 bi->crc = csr1212_crc16(bi->data, bi->crc_length);
958 csr->root_kv->next = NULL;
959 csr->root_kv->prev = NULL;
961 agg_size = csr1212_generate_layout_order(csr->root_kv);
963 init_offset = csr->bus_info_len;
965 for (kv = csr->root_kv, cache = csr->cache_head; kv; cache = cache->next) {
967 /* Estimate approximate number of additional cache
968 * regions needed (it assumes that the cache holding
969 * the first 1K Config ROM space always exists). */
970 int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE -
971 (2 * sizeof(u32))) + 1;
973 /* Add additional cache regions, extras will be
975 for (; est_c; est_c--) {
976 ret = csr1212_append_new_cache(csr, CSR1212_EXTENDED_ROM_SIZE);
977 if (ret != CSR1212_SUCCESS)
980 /* Need to re-layout for additional cache regions */
981 agg_size = csr1212_generate_layout_order(csr->root_kv);
983 cache = csr->cache_head;
984 init_offset = csr->bus_info_len;
986 kv = csr1212_generate_positions(cache, kv, init_offset);
987 agg_size -= cache->len;
988 init_offset = sizeof(u32);
991 /* Remove unused, excess cache regions */
993 struct csr1212_csr_rom_cache *oc = cache;
996 csr1212_remove_cache(csr, oc);
999 /* Go through the list backward so that when done, the correct CRC
1000 * will be calculated for the Extended ROM areas. */
1001 for(cache = csr->cache_tail; cache; cache = cache->prev) {
1002 /* Only Extended ROM caches should have this set. */
1003 if (cache->ext_rom) {
1006 /* Make sure the Extended ROM leaf is a multiple of
1007 * max_rom in size. */
1008 BUG_ON(csr->max_rom < 1);
1009 leaf_size = (cache->len + (csr->max_rom - 1)) &
1010 ~(csr->max_rom - 1);
1012 /* Zero out the unused ROM region */
1013 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1014 leaf_size - cache->len);
1016 /* Subtract leaf header */
1017 leaf_size -= sizeof(u32);
1019 /* Update the Extended ROM leaf length */
1020 cache->ext_rom->value.leaf.len =
1021 bytes_to_quads(leaf_size);
1023 /* Zero out the unused ROM region */
1024 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1025 cache->size - cache->len);
1028 /* Copy the data into the cache buffer */
1029 csr1212_fill_cache(cache);
1031 if (cache != csr->cache_head) {
1032 /* Set the length and CRC of the extended ROM. */
1033 struct csr1212_keyval_img *kvi =
1034 (struct csr1212_keyval_img*)cache->data;
1035 u16 len = bytes_to_quads(cache->len) - 1;
1037 kvi->length = cpu_to_be16(len);
1038 kvi->crc = csr1212_crc16(kvi->data, len);
1042 return CSR1212_SUCCESS;
1045 int csr1212_read(struct csr1212_csr *csr, u32 offset, void *buffer, u32 len)
1047 struct csr1212_csr_rom_cache *cache;
1049 for (cache = csr->cache_head; cache; cache = cache->next) {
1050 if (offset >= cache->offset &&
1051 (offset + len) <= (cache->offset + cache->size)) {
1053 &cache->data[bytes_to_quads(offset - cache->offset)],
1055 return CSR1212_SUCCESS;
1062 /* Parse a chunk of data as a Config ROM */
1064 static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
1066 struct csr1212_bus_info_block_img *bi;
1067 struct csr1212_cache_region *cr;
1071 /* IEEE 1212 says that the entire bus info block should be readable in
1072 * a single transaction regardless of the max_rom value.
1073 * Unfortunately, many IEEE 1394 devices do not abide by that, so the
1074 * bus info block will be read 1 quadlet at a time. The rest of the
1075 * ConfigROM will be read according to the max_rom field. */
1076 for (i = 0; i < csr->bus_info_len; i += sizeof(u32)) {
1077 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1079 &csr->cache_head->data[bytes_to_quads(i)],
1081 if (ret != CSR1212_SUCCESS)
1084 /* check ROM header's info_length */
1086 be32_to_cpu(csr->cache_head->data[0]) >> 24 !=
1087 bytes_to_quads(csr->bus_info_len) - 1)
1091 bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data;
1092 csr->crc_len = quads_to_bytes(bi->crc_length);
1094 /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that is not
1095 * always the case, so read the rest of the crc area 1 quadlet at a time. */
1096 for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(u32)) {
1097 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1099 &csr->cache_head->data[bytes_to_quads(i)],
1101 if (ret != CSR1212_SUCCESS)
1106 /* Apparently there are too many differnt wrong implementations of the
1107 * CRC algorithm that verifying them is moot. */
1108 if ((csr1212_crc16(bi->data, bi->crc_length) != bi->crc) &&
1109 (csr1212_msft_crc16(bi->data, bi->crc_length) != bi->crc))
1113 cr = CSR1212_MALLOC(sizeof(*cr));
1119 cr->offset_start = 0;
1120 cr->offset_end = csr->crc_len + 4;
1122 csr->cache_head->filled_head = cr;
1123 csr->cache_head->filled_tail = cr;
1125 return CSR1212_SUCCESS;
1128 #define CSR1212_KV_KEY(q) (be32_to_cpu(q) >> CSR1212_KV_KEY_SHIFT)
1129 #define CSR1212_KV_KEY_TYPE(q) (CSR1212_KV_KEY(q) >> CSR1212_KV_KEY_TYPE_SHIFT)
1130 #define CSR1212_KV_KEY_ID(q) (CSR1212_KV_KEY(q) & CSR1212_KV_KEY_ID_MASK)
1131 #define CSR1212_KV_VAL_MASK 0xffffff
1132 #define CSR1212_KV_VAL(q) (be32_to_cpu(q) & CSR1212_KV_VAL_MASK)
1134 static int csr1212_parse_dir_entry(struct csr1212_keyval *dir,
1137 int ret = CSR1212_SUCCESS;
1138 struct csr1212_keyval *k = NULL;
1141 switch(CSR1212_KV_KEY_TYPE(ki)) {
1142 case CSR1212_KV_TYPE_IMMEDIATE:
1143 k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki),
1144 CSR1212_KV_VAL(ki));
1150 k->refcnt = 0; /* Don't keep local reference when parsing. */
1153 case CSR1212_KV_TYPE_CSR_OFFSET:
1154 k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki),
1155 CSR1212_KV_VAL(ki));
1160 k->refcnt = 0; /* Don't keep local reference when parsing. */
1164 /* Compute the offset from 0xffff f000 0000. */
1165 offset = quads_to_bytes(CSR1212_KV_VAL(ki)) + kv_pos;
1166 if (offset == kv_pos) {
1167 /* Uh-oh. Can't have a relative offset of 0 for Leaves
1168 * or Directories. The Config ROM image is most likely
1169 * messed up, so we'll just abort here. */
1174 k = csr1212_find_keyval_offset(dir, offset);
1177 break; /* Found it. */
1179 if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY) {
1180 k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki));
1182 k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0);
1188 k->refcnt = 0; /* Don't keep local reference when parsing. */
1189 k->valid = 0; /* Contents not read yet so it's not valid. */
1193 k->next = dir->next;
1194 dir->next->prev = k;
1197 ret = csr1212_attach_keyval_to_directory(dir, k);
1200 if (ret != CSR1212_SUCCESS && k != NULL)
1205 int csr1212_parse_keyval(struct csr1212_keyval *kv,
1206 struct csr1212_csr_rom_cache *cache)
1208 struct csr1212_keyval_img *kvi;
1210 int ret = CSR1212_SUCCESS;
1213 kvi = (struct csr1212_keyval_img*)&cache->data[bytes_to_quads(kv->offset -
1215 kvi_len = be16_to_cpu(kvi->length);
1218 /* Apparently there are too many differnt wrong implementations of the
1219 * CRC algorithm that verifying them is moot. */
1220 if ((csr1212_crc16(kvi->data, kvi_len) != kvi->crc) &&
1221 (csr1212_msft_crc16(kvi->data, kvi_len) != kvi->crc)) {
1227 switch(kv->key.type) {
1228 case CSR1212_KV_TYPE_DIRECTORY:
1229 for (i = 0; i < kvi_len; i++) {
1230 u32 ki = kvi->data[i];
1232 /* Some devices put null entries in their unit
1233 * directories. If we come across such an entry,
1237 ret = csr1212_parse_dir_entry(kv, ki,
1239 quads_to_bytes(i + 1)));
1241 kv->value.directory.len = kvi_len;
1244 case CSR1212_KV_TYPE_LEAF:
1245 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
1246 kv->value.leaf.data = CSR1212_MALLOC(quads_to_bytes(kvi_len));
1247 if (!kv->value.leaf.data) {
1252 kv->value.leaf.len = kvi_len;
1253 memcpy(kv->value.leaf.data, kvi->data, quads_to_bytes(kvi_len));
1265 csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1267 struct csr1212_cache_region *cr, *ncr, *newcr = NULL;
1268 struct csr1212_keyval_img *kvi = NULL;
1269 struct csr1212_csr_rom_cache *cache;
1275 BUG_ON(!csr || !kv || csr->max_rom < 1);
1277 /* First find which cache the data should be in (or go in if not read
1279 for (cache = csr->cache_head; cache; cache = cache->next) {
1280 if (kv->offset >= cache->offset &&
1281 kv->offset < (cache->offset + cache->size))
1288 /* Only create a new cache for Extended ROM leaves. */
1289 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
1292 if (csr->ops->bus_read(csr,
1293 CSR1212_REGISTER_SPACE_BASE + kv->offset,
1294 sizeof(u32), &q, csr->private)) {
1298 kv->value.leaf.len = be32_to_cpu(q) >> 16;
1300 cache_size = (quads_to_bytes(kv->value.leaf.len + 1) +
1301 (csr->max_rom - 1)) & ~(csr->max_rom - 1);
1303 cache = csr1212_rom_cache_malloc(kv->offset, cache_size);
1307 kv->value.leaf.data = &cache->data[1];
1308 csr->cache_tail->next = cache;
1309 cache->prev = csr->cache_tail;
1311 csr->cache_tail = cache;
1312 cache->filled_head =
1313 CSR1212_MALLOC(sizeof(*cache->filled_head));
1314 if (!cache->filled_head) {
1318 cache->filled_head->offset_start = 0;
1319 cache->filled_head->offset_end = sizeof(u32);
1320 cache->filled_tail = cache->filled_head;
1321 cache->filled_head->next = NULL;
1322 cache->filled_head->prev = NULL;
1325 /* Don't read the entire extended ROM now. Pieces of it will
1326 * be read when entries inside it are read. */
1327 return csr1212_parse_keyval(kv, cache);
1330 cache_index = kv->offset - cache->offset;
1332 /* Now seach read portions of the cache to see if it is there. */
1333 for (cr = cache->filled_head; cr; cr = cr->next) {
1334 if (cache_index < cr->offset_start) {
1335 newcr = CSR1212_MALLOC(sizeof(*newcr));
1339 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1340 newcr->offset_end = newcr->offset_start;
1342 newcr->prev = cr->prev;
1346 } else if ((cache_index >= cr->offset_start) &&
1347 (cache_index < cr->offset_end)) {
1348 kvi = (struct csr1212_keyval_img*)
1349 (&cache->data[bytes_to_quads(cache_index)]);
1350 kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
1352 } else if (cache_index == cr->offset_end)
1357 cr = cache->filled_tail;
1358 newcr = CSR1212_MALLOC(sizeof(*newcr));
1362 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1363 newcr->offset_end = newcr->offset_start;
1365 newcr->next = cr->next;
1368 cache->filled_tail = newcr;
1371 while(!kvi || cr->offset_end < cache_index + kv_len) {
1372 cache_ptr = &cache->data[bytes_to_quads(cr->offset_end &
1373 ~(csr->max_rom - 1))];
1375 addr = (CSR1212_CSR_ARCH_REG_SPACE_BASE + cache->offset +
1376 cr->offset_end) & ~(csr->max_rom - 1);
1378 if (csr->ops->bus_read(csr, addr, csr->max_rom, cache_ptr,
1380 if (csr->max_rom == 4)
1381 /* We've got problems! */
1384 /* Apperently the max_rom value was a lie, set it to
1385 * do quadlet reads and try again. */
1390 cr->offset_end += csr->max_rom - (cr->offset_end &
1391 (csr->max_rom - 1));
1393 if (!kvi && (cr->offset_end > cache_index)) {
1394 kvi = (struct csr1212_keyval_img*)
1395 (&cache->data[bytes_to_quads(cache_index)]);
1396 kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
1399 if ((kv_len + (kv->offset - cache->offset)) > cache->size) {
1400 /* The Leaf or Directory claims its length extends
1401 * beyond the ConfigROM image region and thus beyond the
1402 * end of our cache region. Therefore, we abort now
1403 * rather than seg faulting later. */
1409 if (ncr && (cr->offset_end >= ncr->offset_start)) {
1410 /* consolidate region entries */
1411 ncr->offset_start = cr->offset_start;
1414 cr->prev->next = cr->next;
1415 ncr->prev = cr->prev;
1416 if (cache->filled_head == cr)
1417 cache->filled_head = ncr;
1423 return csr1212_parse_keyval(kv, cache);
1426 struct csr1212_keyval *
1427 csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1432 if (csr1212_read_keyval(csr, kv) != CSR1212_SUCCESS)
1437 int csr1212_parse_csr(struct csr1212_csr *csr)
1439 static const int mr_map[] = { 4, 64, 1024, 0 };
1440 struct csr1212_dentry *dentry;
1443 BUG_ON(!csr || !csr->ops || !csr->ops->bus_read);
1445 ret = csr1212_parse_bus_info_block(csr);
1446 if (ret != CSR1212_SUCCESS)
1449 if (!csr->ops->get_max_rom)
1450 csr->max_rom = mr_map[0]; /* default value */
1452 int i = csr->ops->get_max_rom(csr->bus_info_data,
1456 csr->max_rom = mr_map[i];
1459 csr->cache_head->layout_head = csr->root_kv;
1460 csr->cache_head->layout_tail = csr->root_kv;
1462 csr->root_kv->offset = (CSR1212_CONFIG_ROM_SPACE_BASE & 0xffff) +
1465 csr->root_kv->valid = 0;
1466 csr->root_kv->next = csr->root_kv;
1467 csr->root_kv->prev = csr->root_kv;
1468 ret = csr1212_read_keyval(csr, csr->root_kv);
1469 if (ret != CSR1212_SUCCESS)
1472 /* Scan through the Root directory finding all extended ROM regions
1473 * and make cache regions for them */
1474 for (dentry = csr->root_kv->value.directory.dentries_head;
1475 dentry; dentry = dentry->next) {
1476 if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM &&
1477 !dentry->kv->valid) {
1478 ret = csr1212_read_keyval(csr, dentry->kv);
1479 if (ret != CSR1212_SUCCESS)
1484 return CSR1212_SUCCESS;