2 * Architecture-specific setup.
4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Stephane Eranian <eranian@hpl.hp.com>
7 * Copyright (C) 2000, 2004 Intel Corp
8 * Rohit Seth <rohit.seth@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Gordon Jin <gordon.jin@intel.com>
11 * Copyright (C) 1999 VA Linux Systems
12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
14 * 12/26/04 S.Siddha, G.Jin, R.Seth
15 * Add multi-threading and multi-core detection
16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes
19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes...
20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP
21 * 01/07/99 S.Eranian added the support for command line argument
22 * 06/24/99 W.Drummond added boot_cpu_data.
23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()"
25 #include <linux/config.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
29 #include <linux/acpi.h>
30 #include <linux/bootmem.h>
31 #include <linux/console.h>
32 #include <linux/delay.h>
33 #include <linux/kernel.h>
34 #include <linux/reboot.h>
35 #include <linux/sched.h>
36 #include <linux/seq_file.h>
37 #include <linux/string.h>
38 #include <linux/threads.h>
39 #include <linux/tty.h>
40 #include <linux/serial.h>
41 #include <linux/serial_core.h>
42 #include <linux/efi.h>
43 #include <linux/initrd.h>
46 #include <asm/machvec.h>
48 #include <asm/meminit.h>
50 #include <asm/patch.h>
51 #include <asm/pgtable.h>
52 #include <asm/processor.h>
54 #include <asm/sections.h>
55 #include <asm/serial.h>
56 #include <asm/setup.h>
58 #include <asm/system.h>
59 #include <asm/unistd.h>
61 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
62 # error "struct cpuinfo_ia64 too big!"
66 unsigned long __per_cpu_offset[NR_CPUS];
67 EXPORT_SYMBOL(__per_cpu_offset);
70 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
71 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
72 DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);
73 unsigned long ia64_cycles_per_usec;
74 struct ia64_boot_param *ia64_boot_param;
75 struct screen_info screen_info;
76 unsigned long vga_console_iobase;
77 unsigned long vga_console_membase;
79 unsigned long ia64_max_cacheline_size;
80 unsigned long ia64_iobase; /* virtual address for I/O accesses */
81 EXPORT_SYMBOL(ia64_iobase);
82 struct io_space io_space[MAX_IO_SPACES];
83 EXPORT_SYMBOL(io_space);
84 unsigned int num_io_spaces;
87 * "flush_icache_range()" needs to know what processor dependent stride size to use
88 * when it makes i-cache(s) coherent with d-caches.
90 #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */
91 unsigned long ia64_i_cache_stride_shift = ~0;
94 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This
95 * mask specifies a mask of address bits that must be 0 in order for two buffers to be
96 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start
97 * address of the second buffer must be aligned to (merge_mask+1) in order to be
98 * mergeable). By default, we assume there is no I/O MMU which can merge physically
99 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu
102 unsigned long ia64_max_iommu_merge_mask = ~0UL;
103 EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
106 * We use a special marker for the end of memory and it uses the extra (+1) slot
108 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
109 int num_rsvd_regions;
113 * Filter incoming memory segments based on the primitive map created from the boot
114 * parameters. Segments contained in the map are removed from the memory ranges. A
115 * caller-specified function is called with the memory ranges that remain after filtering.
116 * This routine does not assume the incoming segments are sorted.
119 filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
121 unsigned long range_start, range_end, prev_start;
122 void (*func)(unsigned long, unsigned long, int);
126 if (start == PAGE_OFFSET) {
127 printk(KERN_WARNING "warning: skipping physical page 0\n");
129 if (start >= end) return 0;
133 * lowest possible address(walker uses virtual)
135 prev_start = PAGE_OFFSET;
138 for (i = 0; i < num_rsvd_regions; ++i) {
139 range_start = max(start, prev_start);
140 range_end = min(end, rsvd_region[i].start);
142 if (range_start < range_end)
143 call_pernode_memory(__pa(range_start), range_end - range_start, func);
145 /* nothing more available in this segment */
146 if (range_end == end) return 0;
148 prev_start = rsvd_region[i].end;
150 /* end of memory marker allows full processing inside loop body */
155 sort_regions (struct rsvd_region *rsvd_region, int max)
159 /* simple bubble sorting */
161 for (j = 0; j < max; ++j) {
162 if (rsvd_region[j].start > rsvd_region[j+1].start) {
163 struct rsvd_region tmp;
164 tmp = rsvd_region[j];
165 rsvd_region[j] = rsvd_region[j + 1];
166 rsvd_region[j + 1] = tmp;
173 * reserve_memory - setup reserved memory areas
175 * Setup the reserved memory areas set aside for the boot parameters,
176 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined,
177 * see include/asm-ia64/meminit.h if you need to define more.
180 reserve_memory (void)
185 * none of the entries in this table overlap
187 rsvd_region[n].start = (unsigned long) ia64_boot_param;
188 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param);
191 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);
192 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;
195 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);
196 rsvd_region[n].end = (rsvd_region[n].start
197 + strlen(__va(ia64_boot_param->command_line)) + 1);
200 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
201 rsvd_region[n].end = (unsigned long) ia64_imva(_end);
204 #ifdef CONFIG_BLK_DEV_INITRD
205 if (ia64_boot_param->initrd_start) {
206 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
207 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size;
212 /* end of memory marker */
213 rsvd_region[n].start = ~0UL;
214 rsvd_region[n].end = ~0UL;
217 num_rsvd_regions = n;
219 sort_regions(rsvd_region, num_rsvd_regions);
223 * find_initrd - get initrd parameters from the boot parameter structure
225 * Grab the initrd start and end from the boot parameter struct given us by
231 #ifdef CONFIG_BLK_DEV_INITRD
232 if (ia64_boot_param->initrd_start) {
233 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
234 initrd_end = initrd_start+ia64_boot_param->initrd_size;
236 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
237 initrd_start, ia64_boot_param->initrd_size);
245 extern unsigned long ia64_iobase;
246 unsigned long phys_iobase;
249 * Set `iobase' to the appropriate address in region 6 (uncached access range).
251 * The EFI memory map is the "preferred" location to get the I/O port space base,
252 * rather the relying on AR.KR0. This should become more clear in future SAL
253 * specs. We'll fall back to getting it out of AR.KR0 if no appropriate entry is
254 * found in the memory map.
256 phys_iobase = efi_get_iobase();
258 /* set AR.KR0 since this is all we use it for anyway */
259 ia64_set_kr(IA64_KR_IO_BASE, phys_iobase);
261 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
262 printk(KERN_INFO "No I/O port range found in EFI memory map, falling back "
264 printk(KERN_INFO "I/O port base = 0x%lx\n", phys_iobase);
266 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
268 /* setup legacy IO port space */
269 io_space[0].mmio_base = ia64_iobase;
270 io_space[0].sparse = 1;
275 * early_console_setup - setup debugging console
277 * Consoles started here require little enough setup that we can start using
278 * them very early in the boot process, either right after the machine
279 * vector initialization, or even before if the drivers can detect their hw.
281 * Returns non-zero if a console couldn't be setup.
283 static inline int __init
284 early_console_setup (char *cmdline)
288 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
290 extern int sn_serial_console_early_setup(void);
291 if (!sn_serial_console_early_setup())
295 #ifdef CONFIG_EFI_PCDP
296 if (!efi_setup_pcdp_console(cmdline))
299 #ifdef CONFIG_SERIAL_8250_CONSOLE
300 if (!early_serial_console_init(cmdline))
304 return (earlycons) ? 0 : -1;
308 mark_bsp_online (void)
311 /* If we register an early console, allow CPU 0 to printk */
312 cpu_set(smp_processor_id(), cpu_online_map);
318 check_for_logical_procs (void)
320 pal_logical_to_physical_t info;
323 status = ia64_pal_logical_to_phys(0, &info);
325 printk(KERN_INFO "No logical to physical processor mapping "
330 printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
335 * Total number of siblings that BSP has. Though not all of them
336 * may have booted successfully. The correct number of siblings
337 * booted is in info.overview_num_log.
339 smp_num_siblings = info.overview_tpc;
340 smp_num_cpucores = info.overview_cpp;
345 setup_arch (char **cmdline_p)
349 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
351 *cmdline_p = __va(ia64_boot_param->command_line);
352 strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
357 #ifdef CONFIG_IA64_GENERIC
359 const char *mvec_name = strstr (*cmdline_p, "machvec=");
367 end = strchr (mvec_name, ' ');
369 len = end - mvec_name;
371 len = strlen (mvec_name);
372 len = min(len, sizeof (str) - 1);
373 strncpy (str, mvec_name, len);
377 mvec_name = acpi_get_sysname();
378 machvec_init(mvec_name);
382 if (early_console_setup(*cmdline_p) == 0)
385 #ifdef CONFIG_ACPI_BOOT
386 /* Initialize the ACPI boot-time table parser */
388 # ifdef CONFIG_ACPI_NUMA
393 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
395 #endif /* CONFIG_APCI_BOOT */
399 /* process SAL system table: */
400 ia64_sal_init(efi.sal_systab);
403 cpu_physical_id(0) = hard_smp_processor_id();
405 cpu_set(0, cpu_sibling_map[0]);
406 cpu_set(0, cpu_core_map[0]);
408 check_for_logical_procs();
409 if (smp_num_cpucores > 1)
411 "cpu package is Multi-Core capable: number of cores=%d\n",
413 if (smp_num_siblings > 1)
415 "cpu package is Multi-Threading capable: number of siblings=%d\n",
419 cpu_init(); /* initialize the bootstrap CPU */
421 #ifdef CONFIG_ACPI_BOOT
427 # if defined(CONFIG_DUMMY_CONSOLE)
428 conswitchp = &dummy_con;
430 # if defined(CONFIG_VGA_CONSOLE)
432 * Non-legacy systems may route legacy VGA MMIO range to system
433 * memory. vga_con probes the MMIO hole, so memory looks like
434 * a VGA device to it. The EFI memory map can tell us if it's
435 * memory so we can avoid this problem.
437 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
438 conswitchp = &vga_con;
443 /* enable IA-64 Machine Check Abort Handling unless disabled */
444 if (!strstr(saved_command_line, "nomca"))
447 platform_setup(cmdline_p);
452 * Display cpu info for all cpu's.
455 show_cpuinfo (struct seq_file *m, void *v)
458 # define lpj c->loops_per_jiffy
459 # define cpunum c->cpu
461 # define lpj loops_per_jiffy
466 const char *feature_name;
468 { 1UL << 0, "branchlong" },
469 { 1UL << 1, "spontaneous deferral"},
470 { 1UL << 2, "16-byte atomic ops" }
472 char family[32], features[128], *cp, sep;
473 struct cpuinfo_ia64 *c = v;
480 case 0x07: memcpy(family, "Itanium", 8); break;
481 case 0x1f: memcpy(family, "Itanium 2", 10); break;
482 default: sprintf(family, "%u", c->family); break;
485 /* build the feature string: */
486 memcpy(features, " standard", 10);
489 for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) {
490 if (mask & feature_bits[i].mask) {
495 strcpy(cp, feature_bits[i].feature_name);
496 cp += strlen(feature_bits[i].feature_name);
497 mask &= ~feature_bits[i].mask;
501 /* print unknown features as a hex value: */
504 sprintf(cp, " 0x%lx", mask);
515 "features :%s\n" /* don't change this---it _is_ right! */
518 "cpu MHz : %lu.%06lu\n"
519 "itc MHz : %lu.%06lu\n"
520 "BogoMIPS : %lu.%02lu\n",
521 cpunum, c->vendor, family, c->model, c->revision, c->archrev,
522 features, c->ppn, c->number,
523 c->proc_freq / 1000000, c->proc_freq % 1000000,
524 c->itc_freq / 1000000, c->itc_freq % 1000000,
525 lpj*HZ/500000, (lpj*HZ/5000) % 100);
527 seq_printf(m, "siblings : %u\n", c->num_log);
528 if (c->threads_per_core > 1 || c->cores_per_socket > 1)
533 c->socket_id, c->core_id, c->thread_id);
541 c_start (struct seq_file *m, loff_t *pos)
544 while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map))
547 return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
551 c_next (struct seq_file *m, void *v, loff_t *pos)
554 return c_start(m, pos);
558 c_stop (struct seq_file *m, void *v)
562 struct seq_operations cpuinfo_op = {
570 identify_cpu (struct cpuinfo_ia64 *c)
573 unsigned long bits[5];
579 u64 ppn; /* processor serial number */
583 unsigned revision : 8;
586 unsigned archrev : 8;
587 unsigned reserved : 24;
593 pal_vm_info_1_u_t vm1;
594 pal_vm_info_2_u_t vm2;
596 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */
599 for (i = 0; i < 5; ++i)
600 cpuid.bits[i] = ia64_get_cpuid(i);
602 memcpy(c->vendor, cpuid.field.vendor, 16);
604 c->cpu = smp_processor_id();
606 /* below default values will be overwritten by identify_siblings()
607 * for Multi-Threading/Multi-Core capable cpu's
609 c->threads_per_core = c->cores_per_socket = c->num_log = 1;
612 identify_siblings(c);
614 c->ppn = cpuid.field.ppn;
615 c->number = cpuid.field.number;
616 c->revision = cpuid.field.revision;
617 c->model = cpuid.field.model;
618 c->family = cpuid.field.family;
619 c->archrev = cpuid.field.archrev;
620 c->features = cpuid.field.features;
622 status = ia64_pal_vm_summary(&vm1, &vm2);
623 if (status == PAL_STATUS_SUCCESS) {
624 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
625 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
627 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
628 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
632 setup_per_cpu_areas (void)
634 /* start_kernel() requires this... */
638 * Calculate the max. cache line size.
640 * In addition, the minimum of the i-cache stride sizes is calculated for
641 * "flush_icache_range()".
644 get_max_cacheline_size (void)
646 unsigned long line_size, max = 1;
647 u64 l, levels, unique_caches;
648 pal_cache_config_info_t cci;
651 status = ia64_pal_cache_summary(&levels, &unique_caches);
653 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
654 __FUNCTION__, status);
655 max = SMP_CACHE_BYTES;
656 /* Safest setup for "flush_icache_range()" */
657 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
661 for (l = 0; l < levels; ++l) {
662 status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2,
666 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
667 __FUNCTION__, l, status);
668 max = SMP_CACHE_BYTES;
669 /* The safest setup for "flush_icache_range()" */
670 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
671 cci.pcci_unified = 1;
673 line_size = 1 << cci.pcci_line_size;
676 if (!cci.pcci_unified) {
677 status = ia64_pal_cache_config_info(l,
678 /* cache_type (instruction)= */ 1,
682 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
683 __FUNCTION__, l, status);
684 /* The safest setup for "flush_icache_range()" */
685 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
688 if (cci.pcci_stride < ia64_i_cache_stride_shift)
689 ia64_i_cache_stride_shift = cci.pcci_stride;
692 if (max > ia64_max_cacheline_size)
693 ia64_max_cacheline_size = max;
697 * cpu_init() initializes state that is per-CPU. This function acts
698 * as a 'CPU state barrier', nothing should get across.
703 extern void __devinit ia64_mmu_init (void *);
704 unsigned long num_phys_stacked;
705 pal_vm_info_2_u_t vmi;
706 unsigned int max_ctx;
707 struct cpuinfo_ia64 *cpu_info;
710 cpu_data = per_cpu_init();
713 * We set ar.k3 so that assembly code in MCA handler can compute
714 * physical addresses of per cpu variables with a simple:
715 * phys = ar.k3 + &per_cpu_var
717 ia64_set_kr(IA64_KR_PER_CPU_DATA,
718 ia64_tpa(cpu_data) - (long) __per_cpu_start);
720 get_max_cacheline_size();
723 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
724 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
725 * depends on the data returned by identify_cpu(). We break the dependency by
726 * accessing cpu_data() through the canonical per-CPU address.
728 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
729 identify_cpu(cpu_info);
731 #ifdef CONFIG_MCKINLEY
733 # define FEATURE_SET 16
734 struct ia64_pal_retval iprv;
736 if (cpu_info->family == 0x1f) {
737 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
738 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
739 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
740 (iprv.v1 | 0x80), FEATURE_SET, 0);
745 /* Clear the stack memory reserved for pt_regs: */
746 memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));
748 ia64_set_kr(IA64_KR_FPU_OWNER, 0);
751 * Initialize the page-table base register to a global
752 * directory with all zeroes. This ensure that we can handle
753 * TLB-misses to user address-space even before we created the
754 * first user address-space. This may happen, e.g., due to
755 * aggressive use of lfetch.fault.
757 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
760 * Initialize default control register to defer speculative faults except
761 * for those arising from TLB misses, which are not deferred. The
762 * kernel MUST NOT depend on a particular setting of these bits (in other words,
763 * the kernel must have recovery code for all speculative accesses). Turn on
764 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps
765 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
768 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
769 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
770 atomic_inc(&init_mm.mm_count);
771 current->active_mm = &init_mm;
775 ia64_mmu_init(ia64_imva(cpu_data));
776 ia64_mca_cpu_init(ia64_imva(cpu_data));
778 #ifdef CONFIG_IA32_SUPPORT
782 /* Clear ITC to eliminiate sched_clock() overflows in human time. */
785 /* disable all local interrupt sources: */
786 ia64_set_itv(1 << 16);
787 ia64_set_lrr0(1 << 16);
788 ia64_set_lrr1(1 << 16);
789 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
790 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
792 /* clear TPR & XTP to enable all interrupt classes: */
793 ia64_setreg(_IA64_REG_CR_TPR, 0);
798 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
799 if (ia64_pal_vm_summary(NULL, &vmi) == 0)
800 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
802 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
803 max_ctx = (1U << 15) - 1; /* use architected minimum */
805 while (max_ctx < ia64_ctx.max_ctx) {
806 unsigned int old = ia64_ctx.max_ctx;
807 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
811 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
812 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical "
814 num_phys_stacked = 96;
816 /* size of physical stacked register partition plus 8 bytes: */
817 __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8;
824 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
825 (unsigned long) __end___mckinley_e9_bundles);