2 * SGI UltraViolet TLB flush routines.
4 * (c) 2008 Cliff Wickman <cpw@sgi.com>, SGI.
6 * This code is released under the GNU General Public License version 2 or
9 #include <linux/mc146818rtc.h>
10 #include <linux/proc_fs.h>
11 #include <linux/kernel.h>
13 #include <asm/mmu_context.h>
15 #include <asm/genapic.h>
16 #include <asm/uv/uv_hub.h>
17 #include <asm/uv/uv_mmrs.h>
18 #include <asm/uv/uv_bau.h>
21 #include <mach_apic.h>
23 static struct bau_control **uv_bau_table_bases __read_mostly;
24 static int uv_bau_retry_limit __read_mostly;
25 static int uv_nshift __read_mostly; /* position of pnode (which is nasid>>1) */
26 static unsigned long uv_mmask __read_mostly;
28 char *status_table[] = {
31 "DESTINATION TIMEOUT",
35 DEFINE_PER_CPU(struct ptc_stats, ptcstats);
36 DEFINE_PER_CPU(struct bau_control, bau_control);
39 * Free a software acknowledge hardware resource by clearing its Pending
40 * bit. This will return a reply to the sender.
41 * If the message has timed out, a reply has already been sent by the
42 * hardware but the resource has not been released. In that case our
43 * clear of the Timeout bit (as well) will free the resource. No reply will
44 * be sent (the hardware will only do one reply per message).
46 static void uv_reply_to_message(int resource,
47 struct bau_payload_queue_entry *msg,
48 struct bau_msg_status *msp)
52 dw = (1 << (resource + UV_SW_ACK_NPENDING)) | (1 << resource);
54 msg->sw_ack_vector = 0;
56 msp->seen_by.bits = 0;
57 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
62 * Do all the things a cpu should do for a TLB shootdown message.
63 * Other cpu's may come here at the same time for this message.
65 static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
66 int msg_slot, int sw_ack_slot)
69 unsigned long this_cpu_mask;
70 struct bau_msg_status *msp;
72 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot;
73 cpu = uv_blade_processor_id();
75 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id()));
76 this_cpu_mask = (unsigned long)1 << cpu;
77 if (msp->seen_by.bits & this_cpu_mask)
79 atomic_or_long(&msp->seen_by.bits, this_cpu_mask);
81 if (msg->replied_to == 1)
84 if (msg->address == TLB_FLUSH_ALL) {
86 __get_cpu_var(ptcstats).alltlb++;
88 __flush_tlb_one(msg->address);
89 __get_cpu_var(ptcstats).onetlb++;
92 __get_cpu_var(ptcstats).requestee++;
94 atomic_inc_short(&msg->acknowledge_count);
95 if (msg->number_of_cpus == msg->acknowledge_count)
96 uv_reply_to_message(sw_ack_slot, msg, msp);
101 * Examine the payload queue on all the distribution nodes to see
102 * which messages have not been seen, and which cpu(s) have not seen them.
104 * Returns the number of cpu's that have not responded.
106 static int uv_examine_destinations(struct bau_target_nodemask *distribution)
113 struct bau_control *bau_tablesp;
114 struct bau_payload_queue_entry *msg;
115 struct bau_msg_status *msp;
117 sender = smp_processor_id();
118 for (i = 0; i < (sizeof(struct bau_target_nodemask) * BITSPERBYTE);
120 if (!bau_node_isset(i, distribution))
122 bau_tablesp = uv_bau_table_bases[i];
123 for (msg = bau_tablesp->va_queue_first, j = 0;
124 j < DESTINATION_PAYLOAD_QUEUE_SIZE; msg++, j++) {
125 if ((msg->sending_cpu == sender) &&
126 (!msg->replied_to)) {
127 msp = bau_tablesp->msg_statuses + j;
129 "blade %d: address:%#lx %d of %d, not cpu(s): ",
131 msg->acknowledge_count,
132 msg->number_of_cpus);
133 for (k = 0; k < msg->number_of_cpus;
135 if (!((long)1 << k & msp->
149 * wait for completion of a broadcast message
151 * return COMPLETE, RETRY or GIVEUP
153 static int uv_wait_completion(struct bau_activation_descriptor *bau_desc,
154 unsigned long mmr_offset, int right_shift)
157 long destination_timeouts = 0;
158 long source_timeouts = 0;
159 unsigned long descriptor_status;
161 while ((descriptor_status = (((unsigned long)
162 uv_read_local_mmr(mmr_offset) >>
163 right_shift) & UV_ACT_STATUS_MASK)) !=
165 if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
167 if (source_timeouts > SOURCE_TIMEOUT_LIMIT)
169 __get_cpu_var(ptcstats).s_retry++;
173 * spin here looking for progress at the destinations
175 if (descriptor_status == DESC_STATUS_DESTINATION_TIMEOUT) {
176 destination_timeouts++;
177 if (destination_timeouts > DESTINATION_TIMEOUT_LIMIT) {
179 * returns number of cpus not responding
181 if (uv_examine_destinations
182 (&bau_desc->distribution) == 0) {
183 __get_cpu_var(ptcstats).d_retry++;
187 if (exams >= uv_bau_retry_limit) {
189 "uv_flush_tlb_others");
190 printk("giving up on cpu %d\n",
195 * delays can hang the simulator
198 destination_timeouts = 0;
202 return FLUSH_COMPLETE;
206 * uv_flush_send_and_wait
208 * Send a broadcast and wait for a broadcast message to complete.
210 * The cpumaskp mask contains the cpus the broadcast was sent to.
212 * Returns 1 if all remote flushing was done. The mask is zeroed.
213 * Returns 0 if some remote flushing remains to be done. The mask is left
216 int uv_flush_send_and_wait(int cpu, int this_blade,
217 struct bau_activation_descriptor *bau_desc, cpumask_t *cpumaskp)
219 int completion_status = 0;
225 unsigned long mmr_offset;
229 if (cpu < UV_CPUS_PER_ACT_STATUS) {
230 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
231 right_shift = cpu * UV_ACT_STATUS_SIZE;
233 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
235 ((cpu - UV_CPUS_PER_ACT_STATUS) * UV_ACT_STATUS_SIZE);
237 time1 = get_cycles();
240 index = ((unsigned long)
241 1 << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) | cpu;
242 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
243 completion_status = uv_wait_completion(bau_desc, mmr_offset,
245 } while (completion_status == FLUSH_RETRY);
246 time2 = get_cycles();
247 __get_cpu_var(ptcstats).sflush += (time2 - time1);
249 __get_cpu_var(ptcstats).retriesok++;
251 if (completion_status == FLUSH_GIVEUP) {
253 * Cause the caller to do an IPI-style TLB shootdown on
254 * the cpu's, all of which are still in the mask.
256 __get_cpu_var(ptcstats).ptc_i++;
261 * Success, so clear the remote cpu's from the mask so we don't
262 * use the IPI method of shootdown on them.
264 for_each_cpu_mask(bit, *cpumaskp) {
265 blade = uv_cpu_to_blade_id(bit);
266 if (blade == this_blade)
268 cpu_clear(bit, *cpumaskp);
270 if (!cpus_empty(*cpumaskp))
276 * uv_flush_tlb_others - globally purge translation cache of a virtual
277 * address or all TLB's
278 * @cpumaskp: mask of all cpu's in which the address is to be removed
279 * @mm: mm_struct containing virtual address range
280 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
282 * This is the entry point for initiating any UV global TLB shootdown.
284 * Purges the translation caches of all specified processors of the given
285 * virtual address, or purges all TLB's on specified processors.
287 * The caller has derived the cpumaskp from the mm_struct and has subtracted
288 * the local cpu from the mask. This function is called only if there
289 * are bits set in the mask. (e.g. flush_tlb_page())
291 * The cpumaskp is converted into a nodemask of the nodes containing
294 * Returns 1 if all remote flushing was done.
295 * Returns 0 if some remote flushing remains to be done.
297 int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
306 struct bau_activation_descriptor *bau_desc;
308 cpu = uv_blade_processor_id();
309 this_blade = uv_numa_blade_id();
310 bau_desc = __get_cpu_var(bau_control).descriptor_base;
311 bau_desc += UV_ITEMS_PER_DESCRIPTOR * cpu;
313 bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
316 for_each_cpu_mask(bit, *cpumaskp) {
317 blade = uv_cpu_to_blade_id(bit);
318 if (blade > (UV_DISTRIBUTION_SIZE - 1))
320 if (blade == this_blade) {
324 bau_node_set(blade, &bau_desc->distribution);
329 * no off_node flushing; return status for local node
336 __get_cpu_var(ptcstats).requestor++;
337 __get_cpu_var(ptcstats).ntargeted += i;
339 bau_desc->payload.address = va;
340 bau_desc->payload.sending_cpu = smp_processor_id();
342 return uv_flush_send_and_wait(cpu, this_blade, bau_desc, cpumaskp);
346 * The BAU message interrupt comes here. (registered by set_intr_gate)
349 * We received a broadcast assist message.
351 * Interrupts may have been disabled; this interrupt could represent
352 * the receipt of several messages.
354 * All cores/threads on this node get this interrupt.
355 * The last one to see it does the s/w ack.
356 * (the resource will not be freed until noninterruptable cpus see this
357 * interrupt; hardware will timeout the s/w ack and reply ERROR)
359 void uv_bau_message_interrupt(struct pt_regs *regs)
361 struct bau_payload_queue_entry *pqp;
362 struct bau_payload_queue_entry *msg;
363 struct pt_regs *old_regs = set_irq_regs(regs);
364 cycles_t time1, time2;
369 unsigned long local_pnode;
375 time1 = get_cycles();
377 local_pnode = uv_blade_to_pnode(uv_numa_blade_id());
379 pqp = __get_cpu_var(bau_control).va_queue_first;
380 msg = __get_cpu_var(bau_control).bau_msg_head;
381 while (msg->sw_ack_vector) {
383 fw = msg->sw_ack_vector;
384 msg_slot = msg - pqp;
385 sw_ack_slot = ffs(fw) - 1;
387 uv_bau_process_message(msg, msg_slot, sw_ack_slot);
390 if (msg > __get_cpu_var(bau_control).va_queue_last)
391 msg = __get_cpu_var(bau_control).va_queue_first;
392 __get_cpu_var(bau_control).bau_msg_head = msg;
395 __get_cpu_var(ptcstats).nomsg++;
397 __get_cpu_var(ptcstats).multmsg++;
399 time2 = get_cycles();
400 __get_cpu_var(ptcstats).dflush += (time2 - time1);
403 set_irq_regs(old_regs);
407 static void uv_enable_timeouts(void)
414 unsigned long apicid;
417 for_each_online_node(i) {
418 blade = uv_node_to_blade_id(i);
419 if (blade == last_blade)
422 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu);
423 pnode = uv_blade_to_pnode(blade);
424 cur_cpu += uv_blade_nr_possible_cpus(i);
429 static void *uv_ptc_seq_start(struct seq_file *file, loff_t *offset)
431 if (*offset < num_possible_cpus())
436 static void *uv_ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
439 if (*offset < num_possible_cpus())
444 static void uv_ptc_seq_stop(struct seq_file *file, void *data)
449 * Display the statistics thru /proc
450 * data points to the cpu number
452 static int uv_ptc_seq_show(struct seq_file *file, void *data)
454 struct ptc_stats *stat;
457 cpu = *(loff_t *)data;
461 "# cpu requestor requestee one all sretry dretry ptc_i ");
463 "sw_ack sflush dflush sok dnomsg dmult starget\n");
465 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
466 stat = &per_cpu(ptcstats, cpu);
467 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld ",
468 cpu, stat->requestor,
469 stat->requestee, stat->onetlb, stat->alltlb,
470 stat->s_retry, stat->d_retry, stat->ptc_i);
471 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n",
472 uv_read_global_mmr64(uv_blade_to_pnode
473 (uv_cpu_to_blade_id(cpu)),
474 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
475 stat->sflush, stat->dflush,
476 stat->retriesok, stat->nomsg,
477 stat->multmsg, stat->ntargeted);
484 * 0: display meaning of the statistics
487 static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
488 size_t count, loff_t *data)
493 if (copy_from_user(optstr, user, count))
495 optstr[count - 1] = '\0';
496 if (strict_strtoul(optstr, 10, &newmode) < 0) {
497 printk(KERN_DEBUG "%s is invalid\n", optstr);
502 printk(KERN_DEBUG "# cpu: cpu number\n");
504 "requestor: times this cpu was the flush requestor\n");
506 "requestee: times this cpu was requested to flush its TLBs\n");
508 "one: times requested to flush a single address\n");
510 "all: times requested to flush all TLB's\n");
512 "sretry: number of retries of source-side timeouts\n");
514 "dretry: number of retries of destination-side timeouts\n");
516 "ptc_i: times UV fell through to IPI-style flushes\n");
518 "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
520 "sflush_us: cycles spent in uv_flush_tlb_others()\n");
522 "dflush_us: cycles spent in handling flush requests\n");
523 printk(KERN_DEBUG "sok: successes on retry\n");
524 printk(KERN_DEBUG "dnomsg: interrupts with no message\n");
526 "dmult: interrupts with multiple messages\n");
527 printk(KERN_DEBUG "starget: nodes targeted\n");
529 uv_bau_retry_limit = newmode;
530 printk(KERN_DEBUG "timeout retry limit:%d\n",
537 static const struct seq_operations uv_ptc_seq_ops = {
538 .start = uv_ptc_seq_start,
539 .next = uv_ptc_seq_next,
540 .stop = uv_ptc_seq_stop,
541 .show = uv_ptc_seq_show
544 static int uv_ptc_proc_open(struct inode *inode, struct file *file)
546 return seq_open(file, &uv_ptc_seq_ops);
549 static const struct file_operations proc_uv_ptc_operations = {
550 .open = uv_ptc_proc_open,
552 .write = uv_ptc_proc_write,
554 .release = seq_release,
557 static int __init uv_ptc_init(void)
559 struct proc_dir_entry *proc_uv_ptc;
564 if (!proc_mkdir("sgi_uv", NULL))
567 proc_uv_ptc = create_proc_entry(UV_PTC_BASENAME, 0444, NULL);
569 printk(KERN_ERR "unable to create %s proc entry\n",
573 proc_uv_ptc->proc_fops = &proc_uv_ptc_operations;
578 * begin the initialization of the per-blade control structures
580 static struct bau_control * __init uv_table_bases_init(int blade, int node)
584 struct bau_msg_status *msp;
585 struct bau_control *bau_tablesp;
588 kmalloc_node(sizeof(struct bau_control), GFP_KERNEL, node);
591 bau_tablesp->msg_statuses =
592 kmalloc_node(sizeof(struct bau_msg_status) *
593 DESTINATION_PAYLOAD_QUEUE_SIZE, GFP_KERNEL, node);
594 if (!bau_tablesp->msg_statuses)
596 for (i = 0, msp = bau_tablesp->msg_statuses;
597 i < DESTINATION_PAYLOAD_QUEUE_SIZE; i++, msp++) {
598 bau_cpubits_clear(&msp->seen_by, (int)
599 uv_blade_nr_possible_cpus(blade));
601 bau_tablesp->watching =
602 kmalloc_node(sizeof(int) * DESTINATION_NUM_RESOURCES,
604 if (!bau_tablesp->watching)
606 for (i = 0, ip = bau_tablesp->watching;
607 i < DESTINATION_PAYLOAD_QUEUE_SIZE; i++, ip++) {
610 uv_bau_table_bases[blade] = bau_tablesp;
615 * finish the initialization of the per-blade control structures
617 static void __init uv_table_bases_finish(int blade, int node, int cur_cpu,
618 struct bau_control *bau_tablesp,
619 struct bau_activation_descriptor *adp)
622 struct bau_control *bcp;
624 for (i = cur_cpu; i < (cur_cpu + uv_blade_nr_possible_cpus(blade));
626 bcp = (struct bau_control *)&per_cpu(bau_control, i);
627 bcp->bau_msg_head = bau_tablesp->va_queue_first;
628 bcp->va_queue_first = bau_tablesp->va_queue_first;
629 bcp->va_queue_last = bau_tablesp->va_queue_last;
630 bcp->watching = bau_tablesp->watching;
631 bcp->msg_statuses = bau_tablesp->msg_statuses;
632 bcp->descriptor_base = adp;
637 * initialize the sending side's sending buffers
639 static struct bau_activation_descriptor * __init
640 uv_activation_descriptor_init(int node, int pnode)
646 unsigned long mmr_image;
647 struct bau_activation_descriptor *adp;
648 struct bau_activation_descriptor *ad2;
650 adp = (struct bau_activation_descriptor *)
651 kmalloc_node(16384, GFP_KERNEL, node);
654 pa = __pa((unsigned long)adp);
657 mmr_image = uv_read_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE);
659 uv_write_global_mmr64(pnode, (unsigned long)
660 UVH_LB_BAU_SB_DESCRIPTOR_BASE,
661 (n << UV_DESC_BASE_PNODE_SHIFT | m));
662 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) {
663 memset(ad2, 0, sizeof(struct bau_activation_descriptor));
664 ad2->header.sw_ack_flag = 1;
665 ad2->header.base_dest_nodeid =
666 uv_blade_to_pnode(uv_cpu_to_blade_id(0));
667 ad2->header.command = UV_NET_ENDPOINT_INTD;
668 ad2->header.int_both = 1;
670 * all others need to be set to zero:
671 * fairness chaining multilevel count replied_to
678 * initialize the destination side's receiving buffers
680 static struct bau_payload_queue_entry * __init uv_payload_queue_init(int node,
681 int pnode, struct bau_control *bau_tablesp)
684 struct bau_payload_queue_entry *pqp;
686 pqp = (struct bau_payload_queue_entry *)
687 kmalloc_node((DESTINATION_PAYLOAD_QUEUE_SIZE + 1) *
688 sizeof(struct bau_payload_queue_entry),
692 cp = (char *)pqp + 31;
693 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
694 bau_tablesp->va_queue_first = pqp;
695 uv_write_global_mmr64(pnode,
696 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
697 ((unsigned long)pnode <<
698 UV_PAYLOADQ_PNODE_SHIFT) |
699 uv_physnodeaddr(pqp));
700 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
701 uv_physnodeaddr(pqp));
702 bau_tablesp->va_queue_last =
703 pqp + (DESTINATION_PAYLOAD_QUEUE_SIZE - 1);
704 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST,
706 uv_physnodeaddr(bau_tablesp->va_queue_last));
707 memset(pqp, 0, sizeof(struct bau_payload_queue_entry) *
708 DESTINATION_PAYLOAD_QUEUE_SIZE);
713 * Initialization of each UV blade's structures
715 static int __init uv_init_blade(int blade, int node, int cur_cpu)
719 unsigned long apicid;
720 struct bau_activation_descriptor *adp;
721 struct bau_payload_queue_entry *pqp;
722 struct bau_control *bau_tablesp;
724 bau_tablesp = uv_table_bases_init(blade, node);
725 pnode = uv_blade_to_pnode(blade);
726 adp = uv_activation_descriptor_init(node, pnode);
727 pqp = uv_payload_queue_init(node, pnode, bau_tablesp);
728 uv_table_bases_finish(blade, node, cur_cpu, bau_tablesp, adp);
730 * the below initialization can't be in firmware because the
731 * messaging IRQ will be determined by the OS
733 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu);
734 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
735 if ((pa & 0xff) != UV_BAU_MESSAGE) {
736 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
737 ((apicid << 32) | UV_BAU_MESSAGE));
743 * Initialization of BAU-related structures
745 static int __init uv_bau_init(void)
756 uv_bau_retry_limit = 1;
757 uv_nshift = uv_hub_info->n_val;
758 uv_mmask = ((unsigned long)1 << uv_hub_info->n_val) - 1;
761 for_each_online_node(node) {
762 blade = uv_node_to_blade_id(node);
763 if (blade == last_blade)
768 uv_bau_table_bases = (struct bau_control **)
769 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
770 if (!uv_bau_table_bases)
773 for_each_online_node(node) {
774 blade = uv_node_to_blade_id(node);
775 if (blade == last_blade)
778 uv_init_blade(blade, node, cur_cpu);
779 cur_cpu += uv_blade_nr_possible_cpus(blade);
781 set_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
782 uv_enable_timeouts();
785 __initcall(uv_bau_init);
786 __initcall(uv_ptc_init);