2 * csr1212.c -- IEEE 1212 Control and Status Register support for Linux
4 * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
5 * Steve Kinneberg <kinnebergsteve@acmsystems.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
27 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * - Verify interface consistency: i.e., public functions that take a size
33 * parameter expect size to be in bytes.
43 /* Permitted key type for each key id */
44 #define __I (1 << CSR1212_KV_TYPE_IMMEDIATE)
45 #define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET)
46 #define __D (1 << CSR1212_KV_TYPE_DIRECTORY)
47 #define __L (1 << CSR1212_KV_TYPE_LEAF)
48 static const u_int8_t csr1212_key_id_type_map[0x30] = {
49 __C, /* used by Apple iSight */
50 __D | __L, /* Descriptor */
51 __I | __D | __L, /* Bus_Dependent_Info */
52 __I | __D | __L, /* Vendor */
53 __I, /* Hardware_Version */
55 __D | __L | __I, /* Module */
56 __I, 0, 0, 0, /* used by Apple iSight, Reserved */
57 __I, /* Node_Capabilities */
59 0, 0, 0, /* Reserved */
61 __I, /* Specifier_ID */
63 __I | __C | __D | __L, /* Dependent_Info */
64 __L, /* Unit_Location */
70 __L, /* Extended_ROM */
71 __I, /* Extended_Key_Specifier_ID */
72 __I, /* Extended_Key */
73 __I | __C | __D | __L, /* Extended_Data */
74 __L, /* Modifiable_Descriptor */
75 __I, /* Directory_ID */
84 #define quads_to_bytes(_q) ((_q) * sizeof(u_int32_t))
85 #define bytes_to_quads(_b) (((_b) + sizeof(u_int32_t) - 1) / sizeof(u_int32_t))
87 static void free_keyval(struct csr1212_keyval *kv)
89 if ((kv->key.type == CSR1212_KV_TYPE_LEAF) &&
90 (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM))
91 CSR1212_FREE(kv->value.leaf.data);
96 static u_int16_t csr1212_crc16(const u_int32_t *buffer, size_t length)
100 u_int16_t sum, crc = 0;
102 for (; length; length--) {
103 data = CSR1212_BE32_TO_CPU(*buffer);
105 for (shift = 28; shift >= 0; shift -= 4 ) {
106 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
107 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
112 return CSR1212_CPU_TO_BE16(crc);
116 /* Microsoft computes the CRC with the bytes in reverse order. Therefore we
117 * have a special version of the CRC algorithm to account for their buggy
119 static u_int16_t csr1212_msft_crc16(const u_int32_t *buffer, size_t length)
123 u_int16_t sum, crc = 0;
125 for (; length; length--) {
126 data = CSR1212_LE32_TO_CPU(*buffer);
128 for (shift = 28; shift >= 0; shift -= 4 ) {
129 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
130 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
135 return CSR1212_CPU_TO_BE16(crc);
139 static struct csr1212_dentry *
140 csr1212_find_keyval(struct csr1212_keyval *dir, struct csr1212_keyval *kv)
142 struct csr1212_dentry *pos;
144 for (pos = dir->value.directory.dentries_head;
145 pos != NULL; pos = pos->next) {
152 static struct csr1212_keyval *
153 csr1212_find_keyval_offset(struct csr1212_keyval *kv_list, u_int32_t offset)
155 struct csr1212_keyval *kv;
157 for (kv = kv_list->next; kv && (kv != kv_list); kv = kv->next) {
158 if (kv->offset == offset)
165 /* Creation Routines */
167 struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
168 size_t bus_info_size, void *private)
170 struct csr1212_csr *csr;
172 csr = CSR1212_MALLOC(sizeof(*csr));
177 csr1212_rom_cache_malloc(CSR1212_CONFIG_ROM_SPACE_OFFSET,
178 CSR1212_CONFIG_ROM_SPACE_SIZE);
179 if (!csr->cache_head) {
184 /* The keyval key id is not used for the root node, but a valid key id
185 * that can be used for a directory needs to be passed to
186 * csr1212_new_directory(). */
187 csr->root_kv = csr1212_new_directory(CSR1212_KV_ID_VENDOR);
189 CSR1212_FREE(csr->cache_head);
194 csr->bus_info_data = csr->cache_head->data;
195 csr->bus_info_len = bus_info_size;
196 csr->crc_len = bus_info_size;
198 csr->private = private;
199 csr->cache_tail = csr->cache_head;
204 void csr1212_init_local_csr(struct csr1212_csr *csr,
205 const u_int32_t *bus_info_data, int max_rom)
207 static const int mr_map[] = { 4, 64, 1024, 0 };
210 BUG_ON(max_rom & ~0x3);
211 csr->max_rom = mr_map[max_rom];
213 if (max_rom & ~0x3) /* caller supplied invalid argument */
216 csr->max_rom = mr_map[max_rom];
218 memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len);
221 static struct csr1212_keyval *csr1212_new_keyval(u_int8_t type, u_int8_t key)
223 struct csr1212_keyval *kv;
225 if (key < 0x30 && ((csr1212_key_id_type_map[key] & (1 << type)) == 0))
228 kv = CSR1212_MALLOC(sizeof(*kv));
235 kv->associate = NULL;
245 struct csr1212_keyval *csr1212_new_immediate(u_int8_t key, u_int32_t value)
247 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
252 kv->value.immediate = value;
257 static struct csr1212_keyval *
258 csr1212_new_leaf(u_int8_t key, const void *data, size_t data_len)
260 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key);
266 kv->value.leaf.data = CSR1212_MALLOC(data_len);
267 if (!kv->value.leaf.data) {
273 memcpy(kv->value.leaf.data, data, data_len);
275 kv->value.leaf.data = NULL;
278 kv->value.leaf.len = bytes_to_quads(data_len);
285 static struct csr1212_keyval *
286 csr1212_new_csr_offset(u_int8_t key, u_int32_t csr_offset)
288 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key);
293 kv->value.csr_offset = csr_offset;
300 struct csr1212_keyval *csr1212_new_directory(u_int8_t key)
302 struct csr1212_keyval *kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key);
307 kv->value.directory.len = 0;
309 kv->value.directory.dentries_head = NULL;
310 kv->value.directory.dentries_tail = NULL;
315 int csr1212_associate_keyval(struct csr1212_keyval *kv,
316 struct csr1212_keyval *associate)
318 if (!kv || !associate)
319 return CSR1212_EINVAL;
321 if (kv->key.id == CSR1212_KV_ID_DESCRIPTOR ||
322 (associate->key.id != CSR1212_KV_ID_DESCRIPTOR &&
323 associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO &&
324 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY &&
325 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA &&
326 associate->key.id < 0x30))
327 return CSR1212_EINVAL;
329 if (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID &&
330 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY)
331 return CSR1212_EINVAL;
333 if (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
334 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA)
335 return CSR1212_EINVAL;
337 if (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
338 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID)
339 return CSR1212_EINVAL;
341 if (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
342 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY)
343 return CSR1212_EINVAL;
346 csr1212_release_keyval(kv->associate);
349 kv->associate = associate;
351 return CSR1212_SUCCESS;
354 int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
355 struct csr1212_keyval *kv)
357 struct csr1212_dentry *dentry;
359 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
360 return CSR1212_EINVAL;
362 dentry = CSR1212_MALLOC(sizeof(*dentry));
364 return CSR1212_ENOMEM;
371 dentry->prev = dir->value.directory.dentries_tail;
373 if (!dir->value.directory.dentries_head)
374 dir->value.directory.dentries_head = dentry;
376 if (dir->value.directory.dentries_tail)
377 dir->value.directory.dentries_tail->next = dentry;
378 dir->value.directory.dentries_tail = dentry;
380 return CSR1212_SUCCESS;
383 #define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \
384 (&((kv)->value.leaf.data[1]))
386 #define CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, type) \
387 ((kv)->value.leaf.data[0] = \
388 CSR1212_CPU_TO_BE32(CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) | \
389 ((type) << CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)))
390 #define CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, spec_id) \
391 ((kv)->value.leaf.data[0] = \
392 CSR1212_CPU_TO_BE32((CSR1212_DESCRIPTOR_LEAF_TYPE(kv) << \
393 CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT) | \
394 ((spec_id) & CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)))
396 static struct csr1212_keyval *
397 csr1212_new_descriptor_leaf(u_int8_t dtype, u_int32_t specifier_id,
398 const void *data, size_t data_len)
400 struct csr1212_keyval *kv;
402 kv = csr1212_new_leaf(CSR1212_KV_ID_DESCRIPTOR, NULL,
403 data_len + CSR1212_DESCRIPTOR_LEAF_OVERHEAD);
407 CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
408 CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
411 memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len);
417 #define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, width) \
418 ((kv)->value.leaf.data[1] = \
419 ((kv)->value.leaf.data[1] & \
420 CSR1212_CPU_TO_BE32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK << \
421 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT))) | \
422 CSR1212_CPU_TO_BE32(((width) & \
423 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_MASK) << \
424 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_WIDTH_SHIFT))
426 #define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, char_set) \
427 ((kv)->value.leaf.data[1] = \
428 ((kv)->value.leaf.data[1] & \
429 CSR1212_CPU_TO_BE32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK << \
430 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT))) | \
431 CSR1212_CPU_TO_BE32(((char_set) & \
432 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_MASK) << \
433 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_CHAR_SET_SHIFT))
435 #define CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language) \
436 ((kv)->value.leaf.data[1] = \
437 ((kv)->value.leaf.data[1] & \
438 CSR1212_CPU_TO_BE32(~(CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK))) | \
439 CSR1212_CPU_TO_BE32(((language) & \
440 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_LANGUAGE_MASK)))
442 static struct csr1212_keyval *
443 csr1212_new_textual_descriptor_leaf(u_int8_t cwidth, u_int16_t cset,
444 u_int16_t language, const void *data,
447 struct csr1212_keyval *kv;
450 kv = csr1212_new_descriptor_leaf(0, 0, NULL, data_len +
451 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
455 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_WIDTH(kv, cwidth);
456 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_CHAR_SET(kv, cset);
457 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_SET_LANGUAGE(kv, language);
459 lstr = (char*)CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
461 /* make sure last quadlet is zeroed out */
462 *((u_int32_t*)&(lstr[(data_len - 1) & ~0x3])) = 0;
464 /* don't copy the NUL terminator */
465 memcpy(lstr, data, data_len);
470 static int csr1212_check_minimal_ascii(const char *s)
472 static const char minimal_ascii_table[] = {
473 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07,
474 0x00, 0x00, 0x0a, 0x00, 0x0C, 0x0D, 0x00, 0x00,
475 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
476 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
477 0x20, 0x21, 0x22, 0x00, 0x00, 0x25, 0x26, 0x27,
478 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
479 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
480 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
481 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
482 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
483 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
484 0x58, 0x59, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x5f,
485 0x00, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
486 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
487 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
488 0x78, 0x79, 0x7a, 0x00, 0x00, 0x00, 0x00, 0x00,
491 if (minimal_ascii_table[*s & 0x7F] != *s)
492 return -1; /* failed */
494 /* String conforms to minimal-ascii, as specified by IEEE 1212,
499 struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s)
501 /* Check if string conform to minimal_ascii format */
502 if (csr1212_check_minimal_ascii(s))
505 /* IEEE 1212, par. 7.5.4.1 Textual descriptors (minimal ASCII) */
506 return csr1212_new_textual_descriptor_leaf(0, 0, 0, s, strlen(s));
510 /* Destruction Routines */
512 void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
513 struct csr1212_keyval *kv)
515 struct csr1212_dentry *dentry;
517 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
520 dentry = csr1212_find_keyval(dir, kv);
526 dentry->prev->next = dentry->next;
528 dentry->next->prev = dentry->prev;
529 if (dir->value.directory.dentries_head == dentry)
530 dir->value.directory.dentries_head = dentry->next;
531 if (dir->value.directory.dentries_tail == dentry)
532 dir->value.directory.dentries_tail = dentry->prev;
534 CSR1212_FREE(dentry);
536 csr1212_release_keyval(kv);
539 /* This function is used to free the memory taken by a keyval. If the given
540 * keyval is a directory type, then any keyvals contained in that directory
541 * will be destroyed as well if their respective refcnts are 0. By means of
542 * list manipulation, this routine will descend a directory structure in a
543 * non-recursive manner. */
544 void _csr1212_destroy_keyval(struct csr1212_keyval *kv)
546 struct csr1212_keyval *k, *a;
547 struct csr1212_dentry dentry;
548 struct csr1212_dentry *head, *tail;
568 if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) {
569 /* If the current entry is a directory, then move all
570 * the entries to the destruction list. */
571 if (k->value.directory.dentries_head) {
572 tail->next = k->value.directory.dentries_head;
573 k->value.directory.dentries_head->prev = tail;
574 tail = k->value.directory.dentries_tail;
583 if (head->prev && head->prev != &dentry) {
584 CSR1212_FREE(head->prev);
587 } else if (tail != &dentry)
592 void csr1212_destroy_csr(struct csr1212_csr *csr)
594 struct csr1212_csr_rom_cache *c, *oc;
595 struct csr1212_cache_region *cr, *ocr;
597 csr1212_release_keyval(csr->root_kv);
616 /* CSR Image Creation */
618 static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
620 struct csr1212_csr_rom_cache *cache;
623 if (!csr || !csr->ops || !csr->ops->allocate_addr_range ||
624 !csr->ops->release_addr || csr->max_rom < 1)
625 return CSR1212_EINVAL;
627 /* ROM size must be a multiple of csr->max_rom */
628 romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1);
630 csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom, csr->private);
631 if (csr_addr == CSR1212_INVALID_ADDR_SPACE) {
632 return CSR1212_ENOMEM;
634 if (csr_addr < CSR1212_REGISTER_SPACE_BASE) {
635 /* Invalid address returned from allocate_addr_range(). */
636 csr->ops->release_addr(csr_addr, csr->private);
637 return CSR1212_ENOMEM;
640 cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE, romsize);
642 csr->ops->release_addr(csr_addr, csr->private);
643 return CSR1212_ENOMEM;
646 cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, CSR1212_KV_ID_EXTENDED_ROM);
647 if (!cache->ext_rom) {
648 csr->ops->release_addr(csr_addr, csr->private);
650 return CSR1212_ENOMEM;
653 if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) != CSR1212_SUCCESS) {
654 csr1212_release_keyval(cache->ext_rom);
655 csr->ops->release_addr(csr_addr, csr->private);
657 return CSR1212_ENOMEM;
659 cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE;
660 cache->ext_rom->value.leaf.len = -1;
661 cache->ext_rom->value.leaf.data = cache->data;
663 /* Add cache to tail of cache list */
664 cache->prev = csr->cache_tail;
665 csr->cache_tail->next = cache;
666 csr->cache_tail = cache;
667 return CSR1212_SUCCESS;
670 static void csr1212_remove_cache(struct csr1212_csr *csr,
671 struct csr1212_csr_rom_cache *cache)
673 if (csr->cache_head == cache)
674 csr->cache_head = cache->next;
675 if (csr->cache_tail == cache)
676 csr->cache_tail = cache->prev;
679 cache->prev->next = cache->next;
681 cache->next->prev = cache->prev;
683 if (cache->ext_rom) {
684 csr1212_detach_keyval_from_directory(csr->root_kv, cache->ext_rom);
685 csr1212_release_keyval(cache->ext_rom);
691 static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
692 struct csr1212_keyval **layout_tail)
694 struct csr1212_dentry *dentry;
695 struct csr1212_keyval *dkv;
696 struct csr1212_keyval *last_extkey_spec = NULL;
697 struct csr1212_keyval *last_extkey = NULL;
700 for (dentry = dir->value.directory.dentries_head; dentry;
701 dentry = dentry->next) {
702 for (dkv = dentry->kv; dkv; dkv = dkv->associate) {
703 /* Special Case: Extended Key Specifier_ID */
704 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
705 if (last_extkey_spec == NULL) {
706 last_extkey_spec = dkv;
707 } else if (dkv->value.immediate != last_extkey_spec->value.immediate) {
708 last_extkey_spec = dkv;
712 /* Special Case: Extended Key */
713 } else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
714 if (last_extkey == NULL) {
716 } else if (dkv->value.immediate != last_extkey->value.immediate) {
725 switch(dkv->key.type) {
727 case CSR1212_KV_TYPE_IMMEDIATE:
728 case CSR1212_KV_TYPE_CSR_OFFSET:
730 case CSR1212_KV_TYPE_LEAF:
731 case CSR1212_KV_TYPE_DIRECTORY:
732 /* Remove from list */
733 if (dkv->prev && (dkv->prev->next == dkv))
734 dkv->prev->next = dkv->next;
735 if (dkv->next && (dkv->next->prev == dkv))
736 dkv->next->prev = dkv->prev;
737 //if (dkv == *layout_tail)
738 // *layout_tail = dkv->prev;
740 /* Special case: Extended ROM leafs */
741 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
742 dkv->value.leaf.len = -1;
743 /* Don't add Extended ROM leafs in the layout list,
744 * they are handled differently. */
748 /* Add to tail of list */
750 dkv->prev = *layout_tail;
751 (*layout_tail)->next = dkv;
760 static size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
762 struct csr1212_keyval *ltail = kv;
766 switch(kv->key.type) {
767 case CSR1212_KV_TYPE_LEAF:
768 /* Add 1 quadlet for crc/len field */
769 agg_size += kv->value.leaf.len + 1;
772 case CSR1212_KV_TYPE_DIRECTORY:
773 kv->value.directory.len = csr1212_generate_layout_subdir(kv, <ail);
774 /* Add 1 quadlet for crc/len field */
775 agg_size += kv->value.directory.len + 1;
780 return quads_to_bytes(agg_size);
783 static struct csr1212_keyval *
784 csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
785 struct csr1212_keyval *start_kv, int start_pos)
787 struct csr1212_keyval *kv = start_kv;
788 struct csr1212_keyval *okv = start_kv;
790 int kv_len = 0, okv_len = 0;
792 cache->layout_head = kv;
794 while(kv && pos < cache->size) {
795 /* Special case: Extended ROM leafs */
796 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
797 kv->offset = cache->offset + pos;
800 switch(kv->key.type) {
801 case CSR1212_KV_TYPE_LEAF:
802 kv_len = kv->value.leaf.len;
805 case CSR1212_KV_TYPE_DIRECTORY:
806 kv_len = kv->value.directory.len;
810 /* Should never get here */
814 pos += quads_to_bytes(kv_len + 1);
816 if (pos <= cache->size) {
823 cache->layout_tail = okv;
824 cache->len = (okv->offset - cache->offset) + quads_to_bytes(okv_len + 1);
829 #define CSR1212_KV_KEY_SHIFT 24
830 #define CSR1212_KV_KEY_TYPE_SHIFT 6
831 #define CSR1212_KV_KEY_ID_MASK 0x3f
832 #define CSR1212_KV_KEY_TYPE_MASK 0x3 /* after shift */
835 csr1212_generate_tree_subdir(struct csr1212_keyval *dir, u_int32_t *data_buffer)
837 struct csr1212_dentry *dentry;
838 struct csr1212_keyval *last_extkey_spec = NULL;
839 struct csr1212_keyval *last_extkey = NULL;
842 for (dentry = dir->value.directory.dentries_head; dentry; dentry = dentry->next) {
843 struct csr1212_keyval *a;
845 for (a = dentry->kv; a; a = a->associate) {
848 /* Special Case: Extended Key Specifier_ID */
849 if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
850 if (last_extkey_spec == NULL) {
851 last_extkey_spec = a;
852 } else if (a->value.immediate != last_extkey_spec->value.immediate) {
853 last_extkey_spec = a;
857 /* Special Case: Extended Key */
858 } else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
859 if (last_extkey == NULL) {
861 } else if (a->value.immediate != last_extkey->value.immediate) {
868 switch(a->key.type) {
869 case CSR1212_KV_TYPE_IMMEDIATE:
870 value = a->value.immediate;
872 case CSR1212_KV_TYPE_CSR_OFFSET:
873 value = a->value.csr_offset;
875 case CSR1212_KV_TYPE_LEAF:
877 value -= dir->offset + quads_to_bytes(1+index);
878 value = bytes_to_quads(value);
880 case CSR1212_KV_TYPE_DIRECTORY:
882 value -= dir->offset + quads_to_bytes(1+index);
883 value = bytes_to_quads(value);
886 /* Should never get here */
887 break; /* GDB breakpoint */
890 value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) << CSR1212_KV_KEY_SHIFT;
891 value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) <<
892 (CSR1212_KV_KEY_SHIFT + CSR1212_KV_KEY_TYPE_SHIFT);
893 data_buffer[index] = CSR1212_CPU_TO_BE32(value);
899 struct csr1212_keyval_img {
904 csr1212_quad_t data[0]; /* older gcc can't handle [] which is standard */
907 static void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
909 struct csr1212_keyval *kv, *nkv;
910 struct csr1212_keyval_img *kvi;
912 for (kv = cache->layout_head; kv != cache->layout_tail->next; kv = nkv) {
913 kvi = (struct csr1212_keyval_img *)
914 (cache->data + bytes_to_quads(kv->offset - cache->offset));
915 switch(kv->key.type) {
917 case CSR1212_KV_TYPE_IMMEDIATE:
918 case CSR1212_KV_TYPE_CSR_OFFSET:
919 /* Should never get here */
920 break; /* GDB breakpoint */
922 case CSR1212_KV_TYPE_LEAF:
923 /* Don't copy over Extended ROM areas, they are
924 * already filled out! */
925 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
926 memcpy(kvi->data, kv->value.leaf.data,
927 quads_to_bytes(kv->value.leaf.len));
929 kvi->length = CSR1212_CPU_TO_BE16(kv->value.leaf.len);
930 kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len);
933 case CSR1212_KV_TYPE_DIRECTORY:
934 csr1212_generate_tree_subdir(kv, kvi->data);
936 kvi->length = CSR1212_CPU_TO_BE16(kv->value.directory.len);
937 kvi->crc = csr1212_crc16(kvi->data, kv->value.directory.len);
943 kv->prev->next = NULL;
945 kv->next->prev = NULL;
951 #define CSR1212_EXTENDED_ROM_SIZE (0x10000 * sizeof(u_int32_t))
953 int csr1212_generate_csr_image(struct csr1212_csr *csr)
955 struct csr1212_bus_info_block_img *bi;
956 struct csr1212_csr_rom_cache *cache;
957 struct csr1212_keyval *kv;
963 return CSR1212_EINVAL;
965 cache = csr->cache_head;
967 bi = (struct csr1212_bus_info_block_img*)cache->data;
969 bi->length = bytes_to_quads(csr->bus_info_len) - 1;
970 bi->crc_length = bi->length;
971 bi->crc = csr1212_crc16(bi->data, bi->crc_length);
973 csr->root_kv->next = NULL;
974 csr->root_kv->prev = NULL;
976 agg_size = csr1212_generate_layout_order(csr->root_kv);
978 init_offset = csr->bus_info_len;
980 for (kv = csr->root_kv, cache = csr->cache_head; kv; cache = cache->next) {
982 /* Estimate approximate number of additional cache
983 * regions needed (it assumes that the cache holding
984 * the first 1K Config ROM space always exists). */
985 int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE -
986 (2 * sizeof(u_int32_t))) + 1;
988 /* Add additional cache regions, extras will be
990 for (; est_c; est_c--) {
991 ret = csr1212_append_new_cache(csr, CSR1212_EXTENDED_ROM_SIZE);
992 if (ret != CSR1212_SUCCESS)
995 /* Need to re-layout for additional cache regions */
996 agg_size = csr1212_generate_layout_order(csr->root_kv);
998 cache = csr->cache_head;
999 init_offset = csr->bus_info_len;
1001 kv = csr1212_generate_positions(cache, kv, init_offset);
1002 agg_size -= cache->len;
1003 init_offset = sizeof(u_int32_t);
1006 /* Remove unused, excess cache regions */
1008 struct csr1212_csr_rom_cache *oc = cache;
1010 cache = cache->next;
1011 csr1212_remove_cache(csr, oc);
1014 /* Go through the list backward so that when done, the correct CRC
1015 * will be calculated for the Extended ROM areas. */
1016 for(cache = csr->cache_tail; cache; cache = cache->prev) {
1017 /* Only Extended ROM caches should have this set. */
1018 if (cache->ext_rom) {
1021 /* Make sure the Extended ROM leaf is a multiple of
1022 * max_rom in size. */
1023 if (csr->max_rom < 1)
1024 return CSR1212_EINVAL;
1025 leaf_size = (cache->len + (csr->max_rom - 1)) &
1026 ~(csr->max_rom - 1);
1028 /* Zero out the unused ROM region */
1029 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1030 leaf_size - cache->len);
1032 /* Subtract leaf header */
1033 leaf_size -= sizeof(u_int32_t);
1035 /* Update the Extended ROM leaf length */
1036 cache->ext_rom->value.leaf.len =
1037 bytes_to_quads(leaf_size);
1039 /* Zero out the unused ROM region */
1040 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1041 cache->size - cache->len);
1044 /* Copy the data into the cache buffer */
1045 csr1212_fill_cache(cache);
1047 if (cache != csr->cache_head) {
1048 /* Set the length and CRC of the extended ROM. */
1049 struct csr1212_keyval_img *kvi =
1050 (struct csr1212_keyval_img*)cache->data;
1052 kvi->length = CSR1212_CPU_TO_BE16(bytes_to_quads(cache->len) - 1);
1053 kvi->crc = csr1212_crc16(kvi->data,
1054 bytes_to_quads(cache->len) - 1);
1059 return CSR1212_SUCCESS;
1062 int csr1212_read(struct csr1212_csr *csr, u_int32_t offset, void *buffer, u_int32_t len)
1064 struct csr1212_csr_rom_cache *cache;
1066 for (cache = csr->cache_head; cache; cache = cache->next) {
1067 if (offset >= cache->offset &&
1068 (offset + len) <= (cache->offset + cache->size)) {
1070 &cache->data[bytes_to_quads(offset - cache->offset)],
1072 return CSR1212_SUCCESS;
1075 return CSR1212_ENOENT;
1079 /* Parse a chunk of data as a Config ROM */
1081 static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
1083 struct csr1212_bus_info_block_img *bi;
1084 struct csr1212_cache_region *cr;
1088 /* IEEE 1212 says that the entire bus info block should be readable in
1089 * a single transaction regardless of the max_rom value.
1090 * Unfortunately, many IEEE 1394 devices do not abide by that, so the
1091 * bus info block will be read 1 quadlet at a time. The rest of the
1092 * ConfigROM will be read according to the max_rom field. */
1093 for (i = 0; i < csr->bus_info_len; i += sizeof(csr1212_quad_t)) {
1094 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1095 sizeof(csr1212_quad_t),
1096 &csr->cache_head->data[bytes_to_quads(i)],
1098 if (ret != CSR1212_SUCCESS)
1101 /* check ROM header's info_length */
1103 CSR1212_BE32_TO_CPU(csr->cache_head->data[0]) >> 24 !=
1104 bytes_to_quads(csr->bus_info_len) - 1)
1105 return CSR1212_EINVAL;
1108 bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data;
1109 csr->crc_len = quads_to_bytes(bi->crc_length);
1111 /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that is not
1112 * always the case, so read the rest of the crc area 1 quadlet at a time. */
1113 for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(csr1212_quad_t)) {
1114 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1115 sizeof(csr1212_quad_t),
1116 &csr->cache_head->data[bytes_to_quads(i)],
1118 if (ret != CSR1212_SUCCESS)
1123 /* Apparently there are too many differnt wrong implementations of the
1124 * CRC algorithm that verifying them is moot. */
1125 if ((csr1212_crc16(bi->data, bi->crc_length) != bi->crc) &&
1126 (csr1212_msft_crc16(bi->data, bi->crc_length) != bi->crc))
1127 return CSR1212_EINVAL;
1130 cr = CSR1212_MALLOC(sizeof(*cr));
1132 return CSR1212_ENOMEM;
1136 cr->offset_start = 0;
1137 cr->offset_end = csr->crc_len + 4;
1139 csr->cache_head->filled_head = cr;
1140 csr->cache_head->filled_tail = cr;
1142 return CSR1212_SUCCESS;
1145 #define CSR1212_KV_KEY(q) (CSR1212_BE32_TO_CPU(q) >> CSR1212_KV_KEY_SHIFT)
1146 #define CSR1212_KV_KEY_TYPE(q) (CSR1212_KV_KEY(q) >> CSR1212_KV_KEY_TYPE_SHIFT)
1147 #define CSR1212_KV_KEY_ID(q) (CSR1212_KV_KEY(q) & CSR1212_KV_KEY_ID_MASK)
1148 #define CSR1212_KV_VAL_MASK 0xffffff
1149 #define CSR1212_KV_VAL(q) (CSR1212_BE32_TO_CPU(q) & CSR1212_KV_VAL_MASK)
1151 static int csr1212_parse_dir_entry(struct csr1212_keyval *dir,
1155 int ret = CSR1212_SUCCESS;
1156 struct csr1212_keyval *k = NULL;
1159 switch(CSR1212_KV_KEY_TYPE(ki)) {
1160 case CSR1212_KV_TYPE_IMMEDIATE:
1161 k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki),
1162 CSR1212_KV_VAL(ki));
1164 ret = CSR1212_ENOMEM;
1168 k->refcnt = 0; /* Don't keep local reference when parsing. */
1171 case CSR1212_KV_TYPE_CSR_OFFSET:
1172 k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki),
1173 CSR1212_KV_VAL(ki));
1175 ret = CSR1212_ENOMEM;
1178 k->refcnt = 0; /* Don't keep local reference when parsing. */
1182 /* Compute the offset from 0xffff f000 0000. */
1183 offset = quads_to_bytes(CSR1212_KV_VAL(ki)) + kv_pos;
1184 if (offset == kv_pos) {
1185 /* Uh-oh. Can't have a relative offset of 0 for Leaves
1186 * or Directories. The Config ROM image is most likely
1187 * messed up, so we'll just abort here. */
1192 k = csr1212_find_keyval_offset(dir, offset);
1195 break; /* Found it. */
1197 if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY) {
1198 k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki));
1200 k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0);
1203 ret = CSR1212_ENOMEM;
1206 k->refcnt = 0; /* Don't keep local reference when parsing. */
1207 k->valid = 0; /* Contents not read yet so it's not valid. */
1211 k->next = dir->next;
1212 dir->next->prev = k;
1215 ret = csr1212_attach_keyval_to_directory(dir, k);
1218 if (ret != CSR1212_SUCCESS && k != NULL)
1223 int csr1212_parse_keyval(struct csr1212_keyval *kv,
1224 struct csr1212_csr_rom_cache *cache)
1226 struct csr1212_keyval_img *kvi;
1228 int ret = CSR1212_SUCCESS;
1231 kvi = (struct csr1212_keyval_img*)&cache->data[bytes_to_quads(kv->offset -
1233 kvi_len = CSR1212_BE16_TO_CPU(kvi->length);
1236 /* Apparently there are too many differnt wrong implementations of the
1237 * CRC algorithm that verifying them is moot. */
1238 if ((csr1212_crc16(kvi->data, kvi_len) != kvi->crc) &&
1239 (csr1212_msft_crc16(kvi->data, kvi_len) != kvi->crc)) {
1240 ret = CSR1212_EINVAL;
1245 switch(kv->key.type) {
1246 case CSR1212_KV_TYPE_DIRECTORY:
1247 for (i = 0; i < kvi_len; i++) {
1248 csr1212_quad_t ki = kvi->data[i];
1250 /* Some devices put null entries in their unit
1251 * directories. If we come across such an entry,
1255 ret = csr1212_parse_dir_entry(kv, ki,
1257 quads_to_bytes(i + 1)));
1259 kv->value.directory.len = kvi_len;
1262 case CSR1212_KV_TYPE_LEAF:
1263 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
1264 kv->value.leaf.data = CSR1212_MALLOC(quads_to_bytes(kvi_len));
1265 if (!kv->value.leaf.data) {
1266 ret = CSR1212_ENOMEM;
1270 kv->value.leaf.len = kvi_len;
1271 memcpy(kv->value.leaf.data, kvi->data, quads_to_bytes(kvi_len));
1282 int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1284 struct csr1212_cache_region *cr, *ncr, *newcr = NULL;
1285 struct csr1212_keyval_img *kvi = NULL;
1286 struct csr1212_csr_rom_cache *cache;
1289 u_int32_t *cache_ptr;
1290 u_int16_t kv_len = 0;
1292 if (!csr || !kv || csr->max_rom < 1)
1293 return CSR1212_EINVAL;
1295 /* First find which cache the data should be in (or go in if not read
1297 for (cache = csr->cache_head; cache; cache = cache->next) {
1298 if (kv->offset >= cache->offset &&
1299 kv->offset < (cache->offset + cache->size))
1305 u_int32_t cache_size;
1307 /* Only create a new cache for Extended ROM leaves. */
1308 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
1309 return CSR1212_EINVAL;
1311 if (csr->ops->bus_read(csr,
1312 CSR1212_REGISTER_SPACE_BASE + kv->offset,
1313 sizeof(csr1212_quad_t), &q, csr->private)) {
1317 kv->value.leaf.len = CSR1212_BE32_TO_CPU(q) >> 16;
1319 cache_size = (quads_to_bytes(kv->value.leaf.len + 1) +
1320 (csr->max_rom - 1)) & ~(csr->max_rom - 1);
1322 cache = csr1212_rom_cache_malloc(kv->offset, cache_size);
1324 return CSR1212_ENOMEM;
1326 kv->value.leaf.data = &cache->data[1];
1327 csr->cache_tail->next = cache;
1328 cache->prev = csr->cache_tail;
1330 csr->cache_tail = cache;
1331 cache->filled_head =
1332 CSR1212_MALLOC(sizeof(*cache->filled_head));
1333 if (!cache->filled_head) {
1334 return CSR1212_ENOMEM;
1337 cache->filled_head->offset_start = 0;
1338 cache->filled_head->offset_end = sizeof(csr1212_quad_t);
1339 cache->filled_tail = cache->filled_head;
1340 cache->filled_head->next = NULL;
1341 cache->filled_head->prev = NULL;
1344 /* Don't read the entire extended ROM now. Pieces of it will
1345 * be read when entries inside it are read. */
1346 return csr1212_parse_keyval(kv, cache);
1349 cache_index = kv->offset - cache->offset;
1351 /* Now seach read portions of the cache to see if it is there. */
1352 for (cr = cache->filled_head; cr; cr = cr->next) {
1353 if (cache_index < cr->offset_start) {
1354 newcr = CSR1212_MALLOC(sizeof(*newcr));
1356 return CSR1212_ENOMEM;
1358 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1359 newcr->offset_end = newcr->offset_start;
1361 newcr->prev = cr->prev;
1365 } else if ((cache_index >= cr->offset_start) &&
1366 (cache_index < cr->offset_end)) {
1367 kvi = (struct csr1212_keyval_img*)
1368 (&cache->data[bytes_to_quads(cache_index)]);
1369 kv_len = quads_to_bytes(CSR1212_BE16_TO_CPU(kvi->length) +
1372 } else if (cache_index == cr->offset_end)
1377 cr = cache->filled_tail;
1378 newcr = CSR1212_MALLOC(sizeof(*newcr));
1380 return CSR1212_ENOMEM;
1382 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1383 newcr->offset_end = newcr->offset_start;
1385 newcr->next = cr->next;
1388 cache->filled_tail = newcr;
1391 while(!kvi || cr->offset_end < cache_index + kv_len) {
1392 cache_ptr = &cache->data[bytes_to_quads(cr->offset_end &
1393 ~(csr->max_rom - 1))];
1395 addr = (CSR1212_CSR_ARCH_REG_SPACE_BASE + cache->offset +
1396 cr->offset_end) & ~(csr->max_rom - 1);
1398 if (csr->ops->bus_read(csr, addr, csr->max_rom, cache_ptr,
1400 if (csr->max_rom == 4)
1401 /* We've got problems! */
1404 /* Apperently the max_rom value was a lie, set it to
1405 * do quadlet reads and try again. */
1410 cr->offset_end += csr->max_rom - (cr->offset_end &
1411 (csr->max_rom - 1));
1413 if (!kvi && (cr->offset_end > cache_index)) {
1414 kvi = (struct csr1212_keyval_img*)
1415 (&cache->data[bytes_to_quads(cache_index)]);
1416 kv_len = quads_to_bytes(CSR1212_BE16_TO_CPU(kvi->length) +
1420 if ((kv_len + (kv->offset - cache->offset)) > cache->size) {
1421 /* The Leaf or Directory claims its length extends
1422 * beyond the ConfigROM image region and thus beyond the
1423 * end of our cache region. Therefore, we abort now
1424 * rather than seg faulting later. */
1430 if (ncr && (cr->offset_end >= ncr->offset_start)) {
1431 /* consolidate region entries */
1432 ncr->offset_start = cr->offset_start;
1435 cr->prev->next = cr->next;
1436 ncr->prev = cr->prev;
1437 if (cache->filled_head == cr)
1438 cache->filled_head = ncr;
1444 return csr1212_parse_keyval(kv, cache);
1447 int csr1212_parse_csr(struct csr1212_csr *csr)
1449 static const int mr_map[] = { 4, 64, 1024, 0 };
1450 struct csr1212_dentry *dentry;
1453 if (!csr || !csr->ops || !csr->ops->bus_read)
1454 return CSR1212_EINVAL;
1456 ret = csr1212_parse_bus_info_block(csr);
1457 if (ret != CSR1212_SUCCESS)
1460 if (!csr->ops->get_max_rom)
1461 csr->max_rom = mr_map[0]; /* default value */
1463 int i = csr->ops->get_max_rom(csr->bus_info_data,
1466 return CSR1212_EINVAL;
1467 csr->max_rom = mr_map[i];
1470 csr->cache_head->layout_head = csr->root_kv;
1471 csr->cache_head->layout_tail = csr->root_kv;
1473 csr->root_kv->offset = (CSR1212_CONFIG_ROM_SPACE_BASE & 0xffff) +
1476 csr->root_kv->valid = 0;
1477 csr->root_kv->next = csr->root_kv;
1478 csr->root_kv->prev = csr->root_kv;
1479 ret = _csr1212_read_keyval(csr, csr->root_kv);
1480 if (ret != CSR1212_SUCCESS)
1483 /* Scan through the Root directory finding all extended ROM regions
1484 * and make cache regions for them */
1485 for (dentry = csr->root_kv->value.directory.dentries_head;
1486 dentry; dentry = dentry->next) {
1487 if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM &&
1488 !dentry->kv->valid) {
1489 ret = _csr1212_read_keyval(csr, dentry->kv);
1490 if (ret != CSR1212_SUCCESS)
1495 return CSR1212_SUCCESS;