2 * SGI UltraViolet TLB flush routines.
4 * (c) 2008-2014 Cliff Wickman <cpw@sgi.com>, SGI.
6 * This code is released under the GNU General Public License version 2 or
9 #include <linux/seq_file.h>
10 #include <linux/proc_fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/delay.h>
16 #include <asm/mmu_context.h>
17 #include <asm/uv/uv.h>
18 #include <asm/uv/uv_mmrs.h>
19 #include <asm/uv/uv_hub.h>
20 #include <asm/uv/uv_bau.h>
23 #include <asm/irq_vectors.h>
24 #include <asm/timer.h>
26 static struct bau_operations ops __ro_after_init;
28 /* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
29 static int timeout_base_ns[] = {
40 static int timeout_us;
41 static bool nobau = true;
42 static int nobau_perm;
43 static cycles_t congested_cycles;
46 static int max_concurr = MAX_BAU_CONCURRENT;
47 static int max_concurr_const = MAX_BAU_CONCURRENT;
48 static int plugged_delay = PLUGGED_DELAY;
49 static int plugsb4reset = PLUGSB4RESET;
50 static int giveup_limit = GIVEUP_LIMIT;
51 static int timeoutsb4reset = TIMEOUTSB4RESET;
52 static int ipi_reset_limit = IPI_RESET_LIMIT;
53 static int complete_threshold = COMPLETE_THRESHOLD;
54 static int congested_respns_us = CONGESTED_RESPONSE_US;
55 static int congested_reps = CONGESTED_REPS;
56 static int disabled_period = DISABLED_PERIOD;
58 static struct tunables tunables[] = {
59 {&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
60 {&plugged_delay, PLUGGED_DELAY},
61 {&plugsb4reset, PLUGSB4RESET},
62 {&timeoutsb4reset, TIMEOUTSB4RESET},
63 {&ipi_reset_limit, IPI_RESET_LIMIT},
64 {&complete_threshold, COMPLETE_THRESHOLD},
65 {&congested_respns_us, CONGESTED_RESPONSE_US},
66 {&congested_reps, CONGESTED_REPS},
67 {&disabled_period, DISABLED_PERIOD},
68 {&giveup_limit, GIVEUP_LIMIT}
71 static struct dentry *tunables_dir;
72 static struct dentry *tunables_file;
74 /* these correspond to the statistics printed by ptc_seq_show() */
75 static char *stat_description[] = {
76 "sent: number of shootdown messages sent",
77 "stime: time spent sending messages",
78 "numuvhubs: number of hubs targeted with shootdown",
79 "numuvhubs16: number times 16 or more hubs targeted",
80 "numuvhubs8: number times 8 or more hubs targeted",
81 "numuvhubs4: number times 4 or more hubs targeted",
82 "numuvhubs2: number times 2 or more hubs targeted",
83 "numuvhubs1: number times 1 hub targeted",
84 "numcpus: number of cpus targeted with shootdown",
85 "dto: number of destination timeouts",
86 "retries: destination timeout retries sent",
87 "rok: : destination timeouts successfully retried",
88 "resetp: ipi-style resource resets for plugs",
89 "resett: ipi-style resource resets for timeouts",
90 "giveup: fall-backs to ipi-style shootdowns",
91 "sto: number of source timeouts",
92 "bz: number of stay-busy's",
93 "throt: number times spun in throttle",
94 "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
95 "recv: shootdown messages received",
96 "rtime: time spent processing messages",
97 "all: shootdown all-tlb messages",
98 "one: shootdown one-tlb messages",
99 "mult: interrupts that found multiple messages",
100 "none: interrupts that found no messages",
101 "retry: number of retry messages processed",
102 "canc: number messages canceled by retries",
103 "nocan: number retries that found nothing to cancel",
104 "reset: number of ipi-style reset requests processed",
105 "rcan: number messages canceled by reset requests",
106 "disable: number times use of the BAU was disabled",
107 "enable: number times use of the BAU was re-enabled"
110 static int __init setup_bau(char *arg)
117 result = strtobool(arg, &nobau);
121 /* we need to flip the logic here, so that bau=y sets nobau to false */
125 pr_info("UV BAU Enabled\n");
127 pr_info("UV BAU Disabled\n");
131 early_param("bau", setup_bau);
133 /* base pnode in this partition */
134 static int uv_base_pnode __read_mostly;
136 static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
137 static DEFINE_PER_CPU(struct bau_control, bau_control);
138 static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
144 struct bau_control *bcp;
147 pr_info("BAU not initialized; cannot be turned on\n");
151 for_each_present_cpu(cpu) {
152 bcp = &per_cpu(bau_control, cpu);
155 pr_info("BAU turned on\n");
163 struct bau_control *bcp;
166 for_each_present_cpu(cpu) {
167 bcp = &per_cpu(bau_control, cpu);
170 pr_info("BAU turned off\n");
175 * Determine the first node on a uvhub. 'Nodes' are used for kernel
178 static int __init uvhub_to_first_node(int uvhub)
182 for_each_online_node(node) {
183 b = uv_node_to_blade_id(node);
191 * Determine the apicid of the first cpu on a uvhub.
193 static int __init uvhub_to_first_apicid(int uvhub)
197 for_each_present_cpu(cpu)
198 if (uvhub == uv_cpu_to_blade_id(cpu))
199 return per_cpu(x86_cpu_to_apicid, cpu);
204 * Free a software acknowledge hardware resource by clearing its Pending
205 * bit. This will return a reply to the sender.
206 * If the message has timed out, a reply has already been sent by the
207 * hardware but the resource has not been released. In that case our
208 * clear of the Timeout bit (as well) will free the resource. No reply will
209 * be sent (the hardware will only do one reply per message).
211 static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
215 struct bau_pq_entry *msg;
218 if (!msg->canceled && do_acknowledge) {
219 dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
220 ops.write_l_sw_ack(dw);
227 * Process the receipt of a RETRY message
229 static void bau_process_retry_msg(struct msg_desc *mdp,
230 struct bau_control *bcp)
233 int cancel_count = 0;
234 unsigned long msg_res;
235 unsigned long mmr = 0;
236 struct bau_pq_entry *msg = mdp->msg;
237 struct bau_pq_entry *msg2;
238 struct ptc_stats *stat = bcp->statp;
242 * cancel any message from msg+1 to the retry itself
244 for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
245 if (msg2 > mdp->queue_last)
246 msg2 = mdp->queue_first;
250 /* same conditions for cancellation as do_reset */
251 if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
252 (msg2->swack_vec) && ((msg2->swack_vec &
253 msg->swack_vec) == 0) &&
254 (msg2->sending_cpu == msg->sending_cpu) &&
255 (msg2->msg_type != MSG_NOOP)) {
256 mmr = ops.read_l_sw_ack();
257 msg_res = msg2->swack_vec;
259 * This is a message retry; clear the resources held
260 * by the previous message only if they timed out.
261 * If it has not timed out we have an unexpected
262 * situation to report.
264 if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
267 * Is the resource timed out?
268 * Make everyone ignore the cancelled message.
273 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
274 ops.write_l_sw_ack(mr);
279 stat->d_nocanceled++;
283 * Do all the things a cpu should do for a TLB shootdown message.
284 * Other cpu's may come here at the same time for this message.
286 static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
289 short socket_ack_count = 0;
291 struct atomic_short *asp;
292 struct ptc_stats *stat = bcp->statp;
293 struct bau_pq_entry *msg = mdp->msg;
294 struct bau_control *smaster = bcp->socket_master;
297 * This must be a normal message, or retry of a normal message
299 if (msg->address == TLB_FLUSH_ALL) {
303 __flush_tlb_one(msg->address);
309 * One cpu on each uvhub has the additional job on a RETRY
310 * of releasing the resource held by the message that is
311 * being retried. That message is identified by sending
314 if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
315 bau_process_retry_msg(mdp, bcp);
318 * This is a swack message, so we have to reply to it.
319 * Count each responding cpu on the socket. This avoids
320 * pinging the count's cache line back and forth between
323 sp = &smaster->socket_acknowledge_count[mdp->msg_slot];
324 asp = (struct atomic_short *)sp;
325 socket_ack_count = atom_asr(1, asp);
326 if (socket_ack_count == bcp->cpus_in_socket) {
329 * Both sockets dump their completed count total into
330 * the message's count.
333 asp = (struct atomic_short *)&msg->acknowledge_count;
334 msg_ack_count = atom_asr(socket_ack_count, asp);
336 if (msg_ack_count == bcp->cpus_in_uvhub) {
338 * All cpus in uvhub saw it; reply
339 * (unless we are in the UV2 workaround)
341 reply_to_message(mdp, bcp, do_acknowledge);
349 * Determine the first cpu on a pnode.
351 static int pnode_to_first_cpu(int pnode, struct bau_control *smaster)
354 struct hub_and_pnode *hpp;
356 for_each_present_cpu(cpu) {
357 hpp = &smaster->thp[cpu];
358 if (pnode == hpp->pnode)
365 * Last resort when we get a large number of destination timeouts is
366 * to clear resources held by a given cpu.
367 * Do this with IPI so that all messages in the BAU message queue
368 * can be identified by their nonzero swack_vec field.
370 * This is entered for a single cpu on the uvhub.
371 * The sender want's this uvhub to free a specific message's
374 static void do_reset(void *ptr)
377 struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id());
378 struct reset_args *rap = (struct reset_args *)ptr;
379 struct bau_pq_entry *msg;
380 struct ptc_stats *stat = bcp->statp;
384 * We're looking for the given sender, and
385 * will free its swack resource.
386 * If all cpu's finally responded after the timeout, its
387 * message 'replied_to' was set.
389 for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
390 unsigned long msg_res;
391 /* do_reset: same conditions for cancellation as
392 bau_process_retry_msg() */
393 if ((msg->replied_to == 0) &&
394 (msg->canceled == 0) &&
395 (msg->sending_cpu == rap->sender) &&
397 (msg->msg_type != MSG_NOOP)) {
401 * make everyone else ignore this message
405 * only reset the resource if it is still pending
407 mmr = ops.read_l_sw_ack();
408 msg_res = msg->swack_vec;
409 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
412 ops.write_l_sw_ack(mr);
420 * Use IPI to get all target uvhubs to release resources held by
421 * a given sending cpu number.
423 static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
428 int sender = bcp->cpu;
429 cpumask_t *mask = bcp->uvhub_master->cpumask;
430 struct bau_control *smaster = bcp->socket_master;
431 struct reset_args reset_args;
433 reset_args.sender = sender;
435 /* find a single cpu for each uvhub in this distribution mask */
436 maskbits = sizeof(struct pnmask) * BITSPERBYTE;
437 /* each bit is a pnode relative to the partition base pnode */
438 for (pnode = 0; pnode < maskbits; pnode++) {
440 if (!bau_uvhub_isset(pnode, distribution))
442 apnode = pnode + bcp->partition_base_pnode;
443 cpu = pnode_to_first_cpu(apnode, smaster);
444 cpumask_set_cpu(cpu, mask);
447 /* IPI all cpus; preemption is already disabled */
448 smp_call_function_many(mask, do_reset, (void *)&reset_args, 1);
453 * Not to be confused with cycles_2_ns() from tsc.c; this gives a relative
454 * number, not an absolute. It converts a duration in cycles to a duration in
457 static inline unsigned long long cycles_2_ns(unsigned long long cyc)
459 struct cyc2ns_data data;
460 unsigned long long ns;
462 cyc2ns_read_begin(&data);
463 ns = mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
470 * The reverse of the above; converts a duration in ns to a duration in cycles.
472 static inline unsigned long long ns_2_cycles(unsigned long long ns)
474 struct cyc2ns_data data;
475 unsigned long long cyc;
477 cyc2ns_read_begin(&data);
478 cyc = (ns << data.cyc2ns_shift) / data.cyc2ns_mul;
484 static inline unsigned long cycles_2_us(unsigned long long cyc)
486 return cycles_2_ns(cyc) / NSEC_PER_USEC;
489 static inline cycles_t sec_2_cycles(unsigned long sec)
491 return ns_2_cycles(sec * NSEC_PER_SEC);
494 static inline unsigned long long usec_2_cycles(unsigned long usec)
496 return ns_2_cycles(usec * NSEC_PER_USEC);
500 * wait for all cpus on this hub to finish their sends and go quiet
501 * leaves uvhub_quiesce set so that no new broadcasts are started by
502 * bau_flush_send_and_wait()
504 static inline void quiesce_local_uvhub(struct bau_control *hmaster)
506 atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce);
510 * mark this quiet-requestor as done
512 static inline void end_uvhub_quiesce(struct bau_control *hmaster)
514 atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
517 static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
519 unsigned long descriptor_status;
521 descriptor_status = uv_read_local_mmr(mmr_offset);
522 descriptor_status >>= right_shift;
523 descriptor_status &= UV_ACT_STATUS_MASK;
524 return descriptor_status;
528 * Wait for completion of a broadcast software ack message
529 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
531 static int uv1_wait_completion(struct bau_desc *bau_desc,
532 struct bau_control *bcp, long try)
534 unsigned long descriptor_status;
536 u64 mmr_offset = bcp->status_mmr;
537 int right_shift = bcp->status_index;
538 struct ptc_stats *stat = bcp->statp;
540 descriptor_status = uv1_read_status(mmr_offset, right_shift);
541 /* spin on the status MMR, waiting for it to go idle */
542 while ((descriptor_status != DS_IDLE)) {
544 * Our software ack messages may be blocked because
545 * there are no swack resources available. As long
546 * as none of them has timed out hardware will NACK
547 * our message and its state will stay IDLE.
549 if (descriptor_status == DS_SOURCE_TIMEOUT) {
552 } else if (descriptor_status == DS_DESTINATION_TIMEOUT) {
557 * Our retries may be blocked by all destination
558 * swack resources being consumed, and a timeout
559 * pending. In that case hardware returns the
560 * ERROR that looks like a destination timeout.
562 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
563 bcp->conseccompletes = 0;
564 return FLUSH_RETRY_PLUGGED;
567 bcp->conseccompletes = 0;
568 return FLUSH_RETRY_TIMEOUT;
571 * descriptor_status is still BUSY
575 descriptor_status = uv1_read_status(mmr_offset, right_shift);
577 bcp->conseccompletes++;
578 return FLUSH_COMPLETE;
582 * UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register.
583 * But not currently used.
585 static unsigned long uv2_3_read_status(unsigned long offset, int rshft, int desc)
587 return ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK) << 1;
591 * Return whether the status of the descriptor that is normally used for this
592 * cpu (the one indexed by its hub-relative cpu number) is busy.
593 * The status of the original 32 descriptors is always reflected in the 64
594 * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0.
595 * The bit provided by the activation_status_2 register is irrelevant to
596 * the status if it is only being tested for busy or not busy.
598 int normal_busy(struct bau_control *bcp)
600 int cpu = bcp->uvhub_cpu;
604 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
605 right_shift = cpu * UV_ACT_STATUS_SIZE;
606 return (((((read_lmmr(mmr_offset) >> right_shift) &
607 UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY);
611 * Entered when a bau descriptor has gone into a permanent busy wait because
613 * Workaround the bug.
615 int handle_uv2_busy(struct bau_control *bcp)
617 struct ptc_stats *stat = bcp->statp;
624 static int uv2_3_wait_completion(struct bau_desc *bau_desc,
625 struct bau_control *bcp, long try)
627 unsigned long descriptor_stat;
629 u64 mmr_offset = bcp->status_mmr;
630 int right_shift = bcp->status_index;
631 int desc = bcp->uvhub_cpu;
633 struct ptc_stats *stat = bcp->statp;
635 descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc);
637 /* spin on the status MMR, waiting for it to go idle */
638 while (descriptor_stat != UV2H_DESC_IDLE) {
639 if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT)) {
641 * A h/w bug on the destination side may
642 * have prevented the message being marked
643 * pending, thus it doesn't get replied to
644 * and gets continually nacked until it times
645 * out with a SOURCE_TIMEOUT.
649 } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
653 * Our retries may be blocked by all destination
654 * swack resources being consumed, and a timeout
655 * pending. In that case hardware returns the
656 * ERROR that looks like a destination timeout.
657 * Without using the extended status we have to
658 * deduce from the short time that this was a
661 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
662 bcp->conseccompletes = 0;
664 /* FLUSH_RETRY_PLUGGED causes hang on boot */
668 bcp->conseccompletes = 0;
669 /* FLUSH_RETRY_TIMEOUT causes hang on boot */
673 if (busy_reps > 1000000) {
674 /* not to hammer on the clock */
677 if ((ttm - bcp->send_message) > bcp->timeout_interval)
678 return handle_uv2_busy(bcp);
681 * descriptor_stat is still BUSY
685 descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc);
687 bcp->conseccompletes++;
688 return FLUSH_COMPLETE;
692 * Returns the status of current BAU message for cpu desc as a bit field
695 static u64 read_status(u64 status_mmr, int index, int desc)
699 stat = ((read_lmmr(status_mmr) >> index) & UV_ACT_STATUS_MASK) << 1;
700 stat |= (read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_2) >> desc) & 0x1;
705 static int uv4_wait_completion(struct bau_desc *bau_desc,
706 struct bau_control *bcp, long try)
708 struct ptc_stats *stat = bcp->statp;
710 u64 mmr = bcp->status_mmr;
711 int index = bcp->status_index;
712 int desc = bcp->uvhub_cpu;
714 descriptor_stat = read_status(mmr, index, desc);
716 /* spin on the status MMR, waiting for it to go idle */
717 while (descriptor_stat != UV2H_DESC_IDLE) {
718 switch (descriptor_stat) {
719 case UV2H_DESC_SOURCE_TIMEOUT:
723 case UV2H_DESC_DEST_TIMEOUT:
725 bcp->conseccompletes = 0;
726 return FLUSH_RETRY_TIMEOUT;
728 case UV2H_DESC_DEST_STRONG_NACK:
730 bcp->conseccompletes = 0;
731 return FLUSH_RETRY_PLUGGED;
733 case UV2H_DESC_DEST_PUT_ERR:
734 bcp->conseccompletes = 0;
738 /* descriptor_stat is still BUSY */
741 descriptor_stat = read_status(mmr, index, desc);
743 bcp->conseccompletes++;
744 return FLUSH_COMPLETE;
748 * Our retries are blocked by all destination sw ack resources being
749 * in use, and a timeout is pending. In that case hardware immediately
750 * returns the ERROR that looks like a destination timeout.
752 static void destination_plugged(struct bau_desc *bau_desc,
753 struct bau_control *bcp,
754 struct bau_control *hmaster, struct ptc_stats *stat)
756 udelay(bcp->plugged_delay);
757 bcp->plugged_tries++;
759 if (bcp->plugged_tries >= bcp->plugsb4reset) {
760 bcp->plugged_tries = 0;
762 quiesce_local_uvhub(hmaster);
764 spin_lock(&hmaster->queue_lock);
765 reset_with_ipi(&bau_desc->distribution, bcp);
766 spin_unlock(&hmaster->queue_lock);
768 end_uvhub_quiesce(hmaster);
771 stat->s_resets_plug++;
775 static void destination_timeout(struct bau_desc *bau_desc,
776 struct bau_control *bcp, struct bau_control *hmaster,
777 struct ptc_stats *stat)
779 hmaster->max_concurr = 1;
780 bcp->timeout_tries++;
781 if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
782 bcp->timeout_tries = 0;
784 quiesce_local_uvhub(hmaster);
786 spin_lock(&hmaster->queue_lock);
787 reset_with_ipi(&bau_desc->distribution, bcp);
788 spin_unlock(&hmaster->queue_lock);
790 end_uvhub_quiesce(hmaster);
793 stat->s_resets_timeout++;
798 * Stop all cpus on a uvhub from using the BAU for a period of time.
799 * This is reversed by check_enable.
801 static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
804 struct bau_control *tbcp;
805 struct bau_control *hmaster;
808 hmaster = bcp->uvhub_master;
809 spin_lock(&hmaster->disable_lock);
810 if (!bcp->baudisabled) {
811 stat->s_bau_disabled++;
813 for_each_present_cpu(tcpu) {
814 tbcp = &per_cpu(bau_control, tcpu);
815 if (tbcp->uvhub_master == hmaster) {
816 tbcp->baudisabled = 1;
817 tbcp->set_bau_on_time =
818 tm1 + bcp->disabled_period;
822 spin_unlock(&hmaster->disable_lock);
825 static void count_max_concurr(int stat, struct bau_control *bcp,
826 struct bau_control *hmaster)
828 bcp->plugged_tries = 0;
829 bcp->timeout_tries = 0;
830 if (stat != FLUSH_COMPLETE)
832 if (bcp->conseccompletes <= bcp->complete_threshold)
834 if (hmaster->max_concurr >= hmaster->max_concurr_const)
836 hmaster->max_concurr++;
839 static void record_send_stats(cycles_t time1, cycles_t time2,
840 struct bau_control *bcp, struct ptc_stats *stat,
841 int completion_status, int try)
846 elapsed = time2 - time1;
847 stat->s_time += elapsed;
849 if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
850 bcp->period_requests++;
851 bcp->period_time += elapsed;
852 if ((elapsed > congested_cycles) &&
853 (bcp->period_requests > bcp->cong_reps) &&
854 ((bcp->period_time / bcp->period_requests) >
857 disable_for_period(bcp, stat);
863 if (completion_status == FLUSH_COMPLETE && try > 1)
865 else if (completion_status == FLUSH_GIVEUP) {
867 if (get_cycles() > bcp->period_end)
868 bcp->period_giveups = 0;
869 bcp->period_giveups++;
870 if (bcp->period_giveups == 1)
871 bcp->period_end = get_cycles() + bcp->disabled_period;
872 if (bcp->period_giveups > bcp->giveup_limit) {
873 disable_for_period(bcp, stat);
874 stat->s_giveuplimit++;
880 * Because of a uv1 hardware bug only a limited number of concurrent
881 * requests can be made.
883 static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
885 spinlock_t *lock = &hmaster->uvhub_lock;
888 v = &hmaster->active_descriptor_count;
889 if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) {
893 } while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr));
898 * Handle the completion status of a message send.
900 static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
901 struct bau_control *bcp, struct bau_control *hmaster,
902 struct ptc_stats *stat)
904 if (completion_status == FLUSH_RETRY_PLUGGED)
905 destination_plugged(bau_desc, bcp, hmaster, stat);
906 else if (completion_status == FLUSH_RETRY_TIMEOUT)
907 destination_timeout(bau_desc, bcp, hmaster, stat);
911 * Send a broadcast and wait for it to complete.
913 * The flush_mask contains the cpus the broadcast is to be sent to including
914 * cpus that are on the local uvhub.
916 * Returns 0 if all flushing represented in the mask was done.
917 * Returns 1 if it gives up entirely and the original cpu mask is to be
918 * returned to the kernel.
920 int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp,
921 struct bau_desc *bau_desc)
924 int completion_stat = 0;
930 struct ptc_stats *stat = bcp->statp;
931 struct bau_control *hmaster = bcp->uvhub_master;
932 struct uv1_bau_msg_header *uv1_hdr = NULL;
933 struct uv2_3_bau_msg_header *uv2_3_hdr = NULL;
935 if (bcp->uvhub_version == UV_BAU_V1) {
937 uv1_throttle(hmaster, stat);
940 while (hmaster->uvhub_quiesce)
943 time1 = get_cycles();
945 uv1_hdr = &bau_desc->header.uv1_hdr;
948 uv2_3_hdr = &bau_desc->header.uv2_3_hdr;
953 uv1_hdr->msg_type = MSG_REGULAR;
955 uv2_3_hdr->msg_type = MSG_REGULAR;
956 seq_number = bcp->message_number++;
959 uv1_hdr->msg_type = MSG_RETRY;
961 uv2_3_hdr->msg_type = MSG_RETRY;
962 stat->s_retry_messages++;
966 uv1_hdr->sequence = seq_number;
968 uv2_3_hdr->sequence = seq_number;
969 index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
970 bcp->send_message = get_cycles();
972 write_mmr_activation(index);
975 completion_stat = ops.wait_completion(bau_desc, bcp, try);
977 handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
979 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
980 bcp->ipi_attempts = 0;
981 stat->s_overipilimit++;
982 completion_stat = FLUSH_GIVEUP;
986 } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
987 (completion_stat == FLUSH_RETRY_TIMEOUT));
989 time2 = get_cycles();
991 count_max_concurr(completion_stat, bcp, hmaster);
993 while (hmaster->uvhub_quiesce)
996 atomic_dec(&hmaster->active_descriptor_count);
998 record_send_stats(time1, time2, bcp, stat, completion_stat, try);
1000 if (completion_stat == FLUSH_GIVEUP)
1001 /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
1007 * The BAU is disabled for this uvhub. When the disabled time period has
1008 * expired re-enable it.
1009 * Return 0 if it is re-enabled for all cpus on this uvhub.
1011 static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
1014 struct bau_control *tbcp;
1015 struct bau_control *hmaster;
1017 hmaster = bcp->uvhub_master;
1018 spin_lock(&hmaster->disable_lock);
1019 if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
1020 stat->s_bau_reenabled++;
1021 for_each_present_cpu(tcpu) {
1022 tbcp = &per_cpu(bau_control, tcpu);
1023 if (tbcp->uvhub_master == hmaster) {
1024 tbcp->baudisabled = 0;
1025 tbcp->period_requests = 0;
1026 tbcp->period_time = 0;
1027 tbcp->period_giveups = 0;
1030 spin_unlock(&hmaster->disable_lock);
1033 spin_unlock(&hmaster->disable_lock);
1037 static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs,
1038 int remotes, struct bau_desc *bau_desc)
1040 stat->s_requestor++;
1041 stat->s_ntargcpu += remotes + locals;
1042 stat->s_ntargremotes += remotes;
1043 stat->s_ntarglocals += locals;
1045 /* uvhub statistics */
1046 hubs = bau_uvhub_weight(&bau_desc->distribution);
1048 stat->s_ntarglocaluvhub++;
1049 stat->s_ntargremoteuvhub += (hubs - 1);
1051 stat->s_ntargremoteuvhub += hubs;
1053 stat->s_ntarguvhub += hubs;
1056 stat->s_ntarguvhub16++;
1058 stat->s_ntarguvhub8++;
1060 stat->s_ntarguvhub4++;
1062 stat->s_ntarguvhub2++;
1064 stat->s_ntarguvhub1++;
1068 * Translate a cpu mask to the uvhub distribution mask in the BAU
1069 * activation descriptor.
1071 static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
1072 struct bau_desc *bau_desc, int *localsp, int *remotesp)
1077 struct hub_and_pnode *hpp;
1079 for_each_cpu(cpu, flush_mask) {
1081 * The distribution vector is a bit map of pnodes, relative
1082 * to the partition base pnode (and the partition base nasid
1084 * Translate cpu to pnode and hub using a local memory array.
1086 hpp = &bcp->socket_master->thp[cpu];
1087 pnode = hpp->pnode - bcp->partition_base_pnode;
1088 bau_uvhub_set(pnode, &bau_desc->distribution);
1090 if (hpp->uvhub == bcp->uvhub)
1101 * globally purge translation cache of a virtual address or all TLB's
1102 * @cpumask: mask of all cpu's in which the address is to be removed
1103 * @mm: mm_struct containing virtual address range
1104 * @start: start virtual address to be removed from TLB
1105 * @end: end virtual address to be remove from TLB
1106 * @cpu: the current cpu
1108 * This is the entry point for initiating any UV global TLB shootdown.
1110 * Purges the translation caches of all specified processors of the given
1111 * virtual address, or purges all TLB's on specified processors.
1113 * The caller has derived the cpumask from the mm_struct. This function
1114 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
1116 * The cpumask is converted into a uvhubmask of the uvhubs containing
1119 * Note that this function should be called with preemption disabled.
1121 * Returns NULL if all remote flushing was done.
1122 * Returns pointer to cpumask if some remote flushing remains to be
1123 * done. The returned pointer is valid till preemption is re-enabled.
1125 const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
1126 const struct flush_tlb_info *info)
1128 unsigned int cpu = smp_processor_id();
1129 int locals = 0, remotes = 0, hubs = 0;
1130 struct bau_desc *bau_desc;
1131 struct cpumask *flush_mask;
1132 struct ptc_stats *stat;
1133 struct bau_control *bcp;
1134 unsigned long descriptor_status, status, address;
1136 bcp = &per_cpu(bau_control, cpu);
1146 read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0);
1147 status = ((descriptor_status >> (bcp->uvhub_cpu *
1148 UV_ACT_STATUS_SIZE)) & UV_ACT_STATUS_MASK) << 1;
1149 if (status == UV2H_DESC_BUSY)
1154 /* bau was disabled due to slow response */
1155 if (bcp->baudisabled) {
1156 if (check_enable(bcp, stat)) {
1157 stat->s_ipifordisabled++;
1163 * Each sending cpu has a per-cpu mask which it fills from the caller's
1164 * cpu mask. All cpus are converted to uvhubs and copied to the
1165 * activation descriptor.
1167 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
1168 /* don't actually do a shootdown of the local cpu */
1169 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
1171 if (cpumask_test_cpu(cpu, cpumask))
1172 stat->s_ntargself++;
1174 bau_desc = bcp->descriptor_base;
1175 bau_desc += (ITEMS_PER_DESC * bcp->uvhub_cpu);
1176 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
1177 if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
1180 record_send_statistics(stat, locals, hubs, remotes, bau_desc);
1182 if (!info->end || (info->end - info->start) <= PAGE_SIZE)
1183 address = info->start;
1185 address = TLB_FLUSH_ALL;
1187 switch (bcp->uvhub_version) {
1191 bau_desc->payload.uv1_2_3.address = address;
1192 bau_desc->payload.uv1_2_3.sending_cpu = cpu;
1195 bau_desc->payload.uv4.address = address;
1196 bau_desc->payload.uv4.sending_cpu = cpu;
1197 bau_desc->payload.uv4.qualifier = BAU_DESC_QUALIFIER;
1202 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
1203 * or 1 if it gave up and the original cpumask should be returned.
1205 if (!uv_flush_send_and_wait(flush_mask, bcp, bau_desc))
1212 * Search the message queue for any 'other' unprocessed message with the
1213 * same software acknowledge resource bit vector as the 'msg' message.
1215 struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
1216 struct bau_control *bcp)
1218 struct bau_pq_entry *msg_next = msg + 1;
1219 unsigned char swack_vec = msg->swack_vec;
1221 if (msg_next > bcp->queue_last)
1222 msg_next = bcp->queue_first;
1223 while (msg_next != msg) {
1224 if ((msg_next->canceled == 0) && (msg_next->replied_to == 0) &&
1225 (msg_next->swack_vec == swack_vec))
1228 if (msg_next > bcp->queue_last)
1229 msg_next = bcp->queue_first;
1235 * UV2 needs to work around a bug in which an arriving message has not
1236 * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
1237 * Such a message must be ignored.
1239 void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
1241 unsigned long mmr_image;
1242 unsigned char swack_vec;
1243 struct bau_pq_entry *msg = mdp->msg;
1244 struct bau_pq_entry *other_msg;
1246 mmr_image = ops.read_l_sw_ack();
1247 swack_vec = msg->swack_vec;
1249 if ((swack_vec & mmr_image) == 0) {
1251 * This message was assigned a swack resource, but no
1252 * reserved acknowlegment is pending.
1253 * The bug has prevented this message from setting the MMR.
1256 * Some message has set the MMR 'pending' bit; it might have
1257 * been another message. Look for that message.
1259 other_msg = find_another_by_swack(msg, bcp);
1262 * There is another. Process this one but do not
1265 bau_process_message(mdp, bcp, 0);
1267 * Let the natural processing of that other message
1268 * acknowledge it. Don't get the processing of sw_ack's
1276 * Either the MMR shows this one pending a reply or there is no
1277 * other message using this sw_ack, so it is safe to acknowledge it.
1279 bau_process_message(mdp, bcp, 1);
1285 * The BAU message interrupt comes here. (registered by set_intr_gate)
1288 * We received a broadcast assist message.
1290 * Interrupts are disabled; this interrupt could represent
1291 * the receipt of several messages.
1293 * All cores/threads on this hub get this interrupt.
1294 * The last one to see it does the software ack.
1295 * (the resource will not be freed until noninterruptable cpus see this
1296 * interrupt; hardware may timeout the s/w ack and reply ERROR)
1298 void uv_bau_message_interrupt(struct pt_regs *regs)
1301 cycles_t time_start;
1302 struct bau_pq_entry *msg;
1303 struct bau_control *bcp;
1304 struct ptc_stats *stat;
1305 struct msg_desc msgdesc;
1308 time_start = get_cycles();
1310 bcp = &per_cpu(bau_control, smp_processor_id());
1313 msgdesc.queue_first = bcp->queue_first;
1314 msgdesc.queue_last = bcp->queue_last;
1316 msg = bcp->bau_msg_head;
1317 while (msg->swack_vec) {
1320 msgdesc.msg_slot = msg - msgdesc.queue_first;
1322 if (bcp->uvhub_version == UV_BAU_V2)
1323 process_uv2_message(&msgdesc, bcp);
1325 /* no error workaround for uv1 or uv3 */
1326 bau_process_message(&msgdesc, bcp, 1);
1329 if (msg > msgdesc.queue_last)
1330 msg = msgdesc.queue_first;
1331 bcp->bau_msg_head = msg;
1333 stat->d_time += (get_cycles() - time_start);
1341 * Each target uvhub (i.e. a uvhub that has cpu's) needs to have
1342 * shootdown message timeouts enabled. The timeout does not cause
1343 * an interrupt, but causes an error message to be returned to
1346 static void __init enable_timeouts(void)
1351 unsigned long mmr_image;
1353 nuvhubs = uv_num_possible_blades();
1355 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1356 if (!uv_blade_nr_possible_cpus(uvhub))
1359 pnode = uv_blade_to_pnode(uvhub);
1360 mmr_image = read_mmr_misc_control(pnode);
1362 * Set the timeout period and then lock it in, in three
1363 * steps; captures and locks in the period.
1365 * To program the period, the SOFT_ACK_MODE must be off.
1367 mmr_image &= ~(1L << SOFTACK_MSHIFT);
1368 write_mmr_misc_control(pnode, mmr_image);
1370 * Set the 4-bit period.
1372 mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
1373 mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
1374 write_mmr_misc_control(pnode, mmr_image);
1377 * Subsequent reversals of the timebase bit (3) cause an
1378 * immediate timeout of one or all INTD resources as
1379 * indicated in bits 2:0 (7 causes all of them to timeout).
1381 mmr_image |= (1L << SOFTACK_MSHIFT);
1383 /* do not touch the legacy mode bit */
1384 /* hw bug workaround; do not use extended status */
1385 mmr_image &= ~(1L << UV2_EXT_SHFT);
1386 } else if (is_uv3_hub()) {
1387 mmr_image &= ~(1L << PREFETCH_HINT_SHFT);
1388 mmr_image |= (1L << SB_STATUS_SHFT);
1390 write_mmr_misc_control(pnode, mmr_image);
1394 static void *ptc_seq_start(struct seq_file *file, loff_t *offset)
1396 if (*offset < num_possible_cpus())
1401 static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
1404 if (*offset < num_possible_cpus())
1409 static void ptc_seq_stop(struct seq_file *file, void *data)
1414 * Display the statistics thru /proc/sgi_uv/ptc_statistics
1415 * 'data' points to the cpu number
1416 * Note: see the descriptions in stat_description[].
1418 static int ptc_seq_show(struct seq_file *file, void *data)
1420 struct ptc_stats *stat;
1421 struct bau_control *bcp;
1424 cpu = *(loff_t *)data;
1427 "# cpu bauoff sent stime self locals remotes ncpus localhub ");
1428 seq_puts(file, "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
1430 "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries ");
1432 "rok resetp resett giveup sto bz throt disable ");
1434 "enable wars warshw warwaits enters ipidis plugged ");
1436 "ipiover glim cong swack recv rtime all one mult ");
1437 seq_puts(file, "none retry canc nocan reset rcan\n");
1439 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
1440 bcp = &per_cpu(bau_control, cpu);
1442 seq_printf(file, "cpu %d bau disabled\n", cpu);
1446 /* source side statistics */
1448 "cpu %d %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1449 cpu, bcp->nobau, stat->s_requestor,
1450 cycles_2_us(stat->s_time),
1451 stat->s_ntargself, stat->s_ntarglocals,
1452 stat->s_ntargremotes, stat->s_ntargcpu,
1453 stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
1454 stat->s_ntarguvhub, stat->s_ntarguvhub16);
1455 seq_printf(file, "%ld %ld %ld %ld %ld %ld ",
1456 stat->s_ntarguvhub8, stat->s_ntarguvhub4,
1457 stat->s_ntarguvhub2, stat->s_ntarguvhub1,
1458 stat->s_dtimeout, stat->s_strongnacks);
1459 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
1460 stat->s_retry_messages, stat->s_retriesok,
1461 stat->s_resets_plug, stat->s_resets_timeout,
1462 stat->s_giveup, stat->s_stimeout,
1463 stat->s_busy, stat->s_throttles);
1464 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1465 stat->s_bau_disabled, stat->s_bau_reenabled,
1466 stat->s_uv2_wars, stat->s_uv2_wars_hw,
1467 stat->s_uv2_war_waits, stat->s_enters,
1468 stat->s_ipifordisabled, stat->s_plugged,
1469 stat->s_overipilimit, stat->s_giveuplimit,
1472 /* destination side statistics */
1474 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
1475 ops.read_g_sw_ack(uv_cpu_to_pnode(cpu)),
1476 stat->d_requestee, cycles_2_us(stat->d_time),
1477 stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
1478 stat->d_nomsg, stat->d_retries, stat->d_canceled,
1479 stat->d_nocanceled, stat->d_resets,
1486 * Display the tunables thru debugfs
1488 static ssize_t tunables_read(struct file *file, char __user *userbuf,
1489 size_t count, loff_t *ppos)
1494 buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d %d\n",
1495 "max_concur plugged_delay plugsb4reset timeoutsb4reset",
1496 "ipi_reset_limit complete_threshold congested_response_us",
1497 "congested_reps disabled_period giveup_limit",
1498 max_concurr, plugged_delay, plugsb4reset,
1499 timeoutsb4reset, ipi_reset_limit, complete_threshold,
1500 congested_respns_us, congested_reps, disabled_period,
1506 ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
1512 * handle a write to /proc/sgi_uv/ptc_statistics
1513 * -1: reset the statistics
1514 * 0: display meaning of the statistics
1516 static ssize_t ptc_proc_write(struct file *file, const char __user *user,
1517 size_t count, loff_t *data)
1524 struct ptc_stats *stat;
1526 if (count == 0 || count > sizeof(optstr))
1528 if (copy_from_user(optstr, user, count))
1530 optstr[count - 1] = '\0';
1532 if (!strcmp(optstr, "on")) {
1535 } else if (!strcmp(optstr, "off")) {
1540 if (kstrtol(optstr, 10, &input_arg) < 0) {
1541 pr_debug("%s is invalid\n", optstr);
1545 if (input_arg == 0) {
1546 elements = ARRAY_SIZE(stat_description);
1547 pr_debug("# cpu: cpu number\n");
1548 pr_debug("Sender statistics:\n");
1549 for (i = 0; i < elements; i++)
1550 pr_debug("%s\n", stat_description[i]);
1551 } else if (input_arg == -1) {
1552 for_each_present_cpu(cpu) {
1553 stat = &per_cpu(ptcstats, cpu);
1554 memset(stat, 0, sizeof(struct ptc_stats));
1561 static int local_atoi(const char *name)
1568 val = 10*val+(*name-'0');
1577 * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables.
1578 * Zero values reset them to defaults.
1580 static int parse_tunables_write(struct bau_control *bcp, char *instr,
1587 int e = ARRAY_SIZE(tunables);
1589 p = instr + strspn(instr, WHITESPACE);
1591 for (; *p; p = q + strspn(q, WHITESPACE)) {
1592 q = p + strcspn(p, WHITESPACE);
1598 pr_info("bau tunable error: should be %d values\n", e);
1602 p = instr + strspn(instr, WHITESPACE);
1604 for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
1605 q = p + strcspn(p, WHITESPACE);
1606 val = local_atoi(p);
1610 max_concurr = MAX_BAU_CONCURRENT;
1611 max_concurr_const = MAX_BAU_CONCURRENT;
1614 if (val < 1 || val > bcp->cpus_in_uvhub) {
1616 "Error: BAU max concurrent %d is invalid\n",
1621 max_concurr_const = val;
1625 *tunables[cnt].tunp = tunables[cnt].deflt;
1627 *tunables[cnt].tunp = val;
1637 * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
1639 static ssize_t tunables_write(struct file *file, const char __user *user,
1640 size_t count, loff_t *data)
1645 struct bau_control *bcp;
1647 if (count == 0 || count > sizeof(instr)-1)
1649 if (copy_from_user(instr, user, count))
1652 instr[count] = '\0';
1655 bcp = &per_cpu(bau_control, cpu);
1656 ret = parse_tunables_write(bcp, instr, count);
1661 for_each_present_cpu(cpu) {
1662 bcp = &per_cpu(bau_control, cpu);
1663 bcp->max_concurr = max_concurr;
1664 bcp->max_concurr_const = max_concurr;
1665 bcp->plugged_delay = plugged_delay;
1666 bcp->plugsb4reset = plugsb4reset;
1667 bcp->timeoutsb4reset = timeoutsb4reset;
1668 bcp->ipi_reset_limit = ipi_reset_limit;
1669 bcp->complete_threshold = complete_threshold;
1670 bcp->cong_response_us = congested_respns_us;
1671 bcp->cong_reps = congested_reps;
1672 bcp->disabled_period = sec_2_cycles(disabled_period);
1673 bcp->giveup_limit = giveup_limit;
1678 static const struct seq_operations uv_ptc_seq_ops = {
1679 .start = ptc_seq_start,
1680 .next = ptc_seq_next,
1681 .stop = ptc_seq_stop,
1682 .show = ptc_seq_show
1685 static int ptc_proc_open(struct inode *inode, struct file *file)
1687 return seq_open(file, &uv_ptc_seq_ops);
1690 static int tunables_open(struct inode *inode, struct file *file)
1695 static const struct file_operations proc_uv_ptc_operations = {
1696 .open = ptc_proc_open,
1698 .write = ptc_proc_write,
1699 .llseek = seq_lseek,
1700 .release = seq_release,
1703 static const struct file_operations tunables_fops = {
1704 .open = tunables_open,
1705 .read = tunables_read,
1706 .write = tunables_write,
1707 .llseek = default_llseek,
1710 static int __init uv_ptc_init(void)
1712 struct proc_dir_entry *proc_uv_ptc;
1714 if (!is_uv_system())
1717 proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
1718 &proc_uv_ptc_operations);
1720 pr_err("unable to create %s proc entry\n",
1725 tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
1726 if (!tunables_dir) {
1727 pr_err("unable to create debugfs directory %s\n",
1728 UV_BAU_TUNABLES_DIR);
1731 tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
1732 tunables_dir, NULL, &tunables_fops);
1733 if (!tunables_file) {
1734 pr_err("unable to create debugfs file %s\n",
1735 UV_BAU_TUNABLES_FILE);
1742 * Initialize the sending side's sending buffers.
1744 static void activation_descriptor_init(int node, int pnode, int base_pnode)
1753 struct bau_desc *bau_desc;
1754 struct bau_desc *bd2;
1755 struct uv1_bau_msg_header *uv1_hdr;
1756 struct uv2_3_bau_msg_header *uv2_3_hdr;
1757 struct bau_control *bcp;
1760 * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC)
1761 * per cpu; and one per cpu on the uvhub (ADP_SZ)
1763 dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
1764 bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
1767 gpa = uv_gpa(bau_desc);
1768 n = uv_gpa_to_gnode(gpa);
1769 m = ops.bau_gpa_to_offset(gpa);
1773 /* the 14-bit pnode */
1774 write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
1776 * Initializing all 8 (ITEMS_PER_DESC) descriptors for each
1777 * cpu even though we only use the first one; one descriptor can
1778 * describe a broadcast to 256 uv hubs.
1780 for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
1781 memset(bd2, 0, sizeof(struct bau_desc));
1783 uv1_hdr = &bd2->header.uv1_hdr;
1784 uv1_hdr->swack_flag = 1;
1786 * The base_dest_nasid set in the message header
1787 * is the nasid of the first uvhub in the partition.
1788 * The bit map will indicate destination pnode numbers
1789 * relative to that base. They may not be consecutive
1790 * if nasid striding is being used.
1792 uv1_hdr->base_dest_nasid =
1793 UV_PNODE_TO_NASID(base_pnode);
1794 uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1795 uv1_hdr->command = UV_NET_ENDPOINT_INTD;
1796 uv1_hdr->int_both = 1;
1798 * all others need to be set to zero:
1799 * fairness chaining multilevel count replied_to
1803 * BIOS uses legacy mode, but uv2 and uv3 hardware always
1804 * uses native mode for selective broadcasts.
1806 uv2_3_hdr = &bd2->header.uv2_3_hdr;
1807 uv2_3_hdr->swack_flag = 1;
1808 uv2_3_hdr->base_dest_nasid =
1809 UV_PNODE_TO_NASID(base_pnode);
1810 uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1811 uv2_3_hdr->command = UV_NET_ENDPOINT_INTD;
1814 for_each_present_cpu(cpu) {
1815 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1817 bcp = &per_cpu(bau_control, cpu);
1818 bcp->descriptor_base = bau_desc;
1823 * initialize the destination side's receiving buffers
1824 * entered for each uvhub in the partition
1825 * - node is first node (kernel memory notion) on the uvhub
1826 * - pnode is the uvhub's physical identifier
1828 static void pq_init(int node, int pnode)
1834 unsigned long gnode, first, last, tail;
1835 struct bau_pq_entry *pqp;
1836 struct bau_control *bcp;
1838 plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
1839 vp = kmalloc_node(plsize, GFP_KERNEL, node);
1840 pqp = (struct bau_pq_entry *)vp;
1843 cp = (char *)pqp + 31;
1844 pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
1846 for_each_present_cpu(cpu) {
1847 if (pnode != uv_cpu_to_pnode(cpu))
1849 /* for every cpu on this pnode: */
1850 bcp = &per_cpu(bau_control, cpu);
1851 bcp->queue_first = pqp;
1852 bcp->bau_msg_head = pqp;
1853 bcp->queue_last = pqp + (DEST_Q_SIZE - 1);
1856 first = ops.bau_gpa_to_offset(uv_gpa(pqp));
1857 last = ops.bau_gpa_to_offset(uv_gpa(pqp + (DEST_Q_SIZE - 1)));
1860 * Pre UV4, the gnode is required to locate the payload queue
1861 * and the payload queue tail must be maintained by the kernel.
1863 bcp = &per_cpu(bau_control, smp_processor_id());
1864 if (bcp->uvhub_version <= UV_BAU_V3) {
1866 gnode = uv_gpa_to_gnode(uv_gpa(pqp));
1867 first = (gnode << UV_PAYLOADQ_GNODE_SHIFT) | tail;
1868 write_mmr_payload_tail(pnode, tail);
1871 ops.write_payload_first(pnode, first);
1872 ops.write_payload_last(pnode, last);
1874 /* in effect, all msg_type's are set to MSG_NOOP */
1875 memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
1879 * Initialization of each UV hub's structures
1881 static void __init init_uvhub(int uvhub, int vector, int base_pnode)
1885 unsigned long apicid;
1887 node = uvhub_to_first_node(uvhub);
1888 pnode = uv_blade_to_pnode(uvhub);
1890 activation_descriptor_init(node, pnode, base_pnode);
1892 pq_init(node, pnode);
1894 * The below initialization can't be in firmware because the
1895 * messaging IRQ will be determined by the OS.
1897 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
1898 write_mmr_data_config(pnode, ((apicid << 32) | vector));
1902 * We will set BAU_MISC_CONTROL with a timeout period.
1903 * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
1904 * So the destination timeout period has to be calculated from them.
1906 static int calculate_destination_timeout(void)
1908 unsigned long mmr_image;
1914 unsigned long ts_ns;
1917 mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
1918 mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
1919 index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
1920 mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
1921 mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
1922 ts_ns = timeout_base_ns[index];
1923 ts_ns *= (mult1 * mult2);
1926 /* same destination timeout for uv2 and uv3 */
1927 /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
1928 mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
1929 mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
1930 if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
1934 mult1 = mmr_image & UV2_ACK_MASK;
1940 static void __init init_per_cpu_tunables(void)
1943 struct bau_control *bcp;
1945 for_each_present_cpu(cpu) {
1946 bcp = &per_cpu(bau_control, cpu);
1947 bcp->baudisabled = 0;
1950 bcp->statp = &per_cpu(ptcstats, cpu);
1951 /* time interval to catch a hardware stay-busy bug */
1952 bcp->timeout_interval = usec_2_cycles(2*timeout_us);
1953 bcp->max_concurr = max_concurr;
1954 bcp->max_concurr_const = max_concurr;
1955 bcp->plugged_delay = plugged_delay;
1956 bcp->plugsb4reset = plugsb4reset;
1957 bcp->timeoutsb4reset = timeoutsb4reset;
1958 bcp->ipi_reset_limit = ipi_reset_limit;
1959 bcp->complete_threshold = complete_threshold;
1960 bcp->cong_response_us = congested_respns_us;
1961 bcp->cong_reps = congested_reps;
1962 bcp->disabled_period = sec_2_cycles(disabled_period);
1963 bcp->giveup_limit = giveup_limit;
1964 spin_lock_init(&bcp->queue_lock);
1965 spin_lock_init(&bcp->uvhub_lock);
1966 spin_lock_init(&bcp->disable_lock);
1971 * Scan all cpus to collect blade and socket summaries.
1973 static int __init get_cpu_topology(int base_pnode,
1974 struct uvhub_desc *uvhub_descs,
1975 unsigned char *uvhub_mask)
1981 struct bau_control *bcp;
1982 struct uvhub_desc *bdp;
1983 struct socket_desc *sdp;
1985 for_each_present_cpu(cpu) {
1986 bcp = &per_cpu(bau_control, cpu);
1988 memset(bcp, 0, sizeof(struct bau_control));
1990 pnode = uv_cpu_hub_info(cpu)->pnode;
1991 if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) {
1993 "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
1994 cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
1998 bcp->osnode = cpu_to_node(cpu);
1999 bcp->partition_base_pnode = base_pnode;
2001 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
2002 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
2003 bdp = &uvhub_descs[uvhub];
2009 /* kludge: 'assuming' one node per socket, and assuming that
2010 disabling a socket just leaves a gap in node numbers */
2011 socket = bcp->osnode & 1;
2012 bdp->socket_mask |= (1 << socket);
2013 sdp = &bdp->socket[socket];
2014 sdp->cpu_number[sdp->num_cpus] = cpu;
2016 if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
2017 pr_emerg("%d cpus per socket invalid\n",
2026 * Each socket is to get a local array of pnodes/hubs.
2028 static void make_per_cpu_thp(struct bau_control *smaster)
2031 size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();
2033 smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
2034 memset(smaster->thp, 0, hpsz);
2035 for_each_present_cpu(cpu) {
2036 smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
2037 smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
2042 * Each uvhub is to get a local cpumask.
2044 static void make_per_hub_cpumask(struct bau_control *hmaster)
2046 int sz = sizeof(cpumask_t);
2048 hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode);
2052 * Initialize all the per_cpu information for the cpu's on a given socket,
2053 * given what has been gathered into the socket_desc struct.
2054 * And reports the chosen hub and socket masters back to the caller.
2056 static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
2057 struct bau_control **smasterp,
2058 struct bau_control **hmasterp)
2060 int i, cpu, uvhub_cpu;
2061 struct bau_control *bcp;
2063 for (i = 0; i < sdp->num_cpus; i++) {
2064 cpu = sdp->cpu_number[i];
2065 bcp = &per_cpu(bau_control, cpu);
2072 bcp->cpus_in_uvhub = bdp->num_cpus;
2073 bcp->cpus_in_socket = sdp->num_cpus;
2074 bcp->socket_master = *smasterp;
2075 bcp->uvhub = bdp->uvhub;
2077 bcp->uvhub_version = UV_BAU_V1;
2078 else if (is_uv2_hub())
2079 bcp->uvhub_version = UV_BAU_V2;
2080 else if (is_uv3_hub())
2081 bcp->uvhub_version = UV_BAU_V3;
2082 else if (is_uv4_hub())
2083 bcp->uvhub_version = UV_BAU_V4;
2085 pr_emerg("uvhub version not 1, 2, 3, or 4\n");
2088 bcp->uvhub_master = *hmasterp;
2089 uvhub_cpu = uv_cpu_blade_processor_id(cpu);
2090 bcp->uvhub_cpu = uvhub_cpu;
2093 * The ERROR and BUSY status registers are located pairwise over
2094 * the STATUS_0 and STATUS_1 mmrs; each an array[32] of 2 bits.
2096 if (uvhub_cpu < UV_CPUS_PER_AS) {
2097 bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
2098 bcp->status_index = uvhub_cpu * UV_ACT_STATUS_SIZE;
2100 bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
2101 bcp->status_index = (uvhub_cpu - UV_CPUS_PER_AS)
2102 * UV_ACT_STATUS_SIZE;
2105 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
2106 pr_emerg("%d cpus per uvhub invalid\n",
2115 * Summarize the blade and socket topology into the per_cpu structures.
2117 static int __init summarize_uvhub_sockets(int nuvhubs,
2118 struct uvhub_desc *uvhub_descs,
2119 unsigned char *uvhub_mask)
2123 unsigned short socket_mask;
2125 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
2126 struct uvhub_desc *bdp;
2127 struct bau_control *smaster = NULL;
2128 struct bau_control *hmaster = NULL;
2130 if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
2133 bdp = &uvhub_descs[uvhub];
2134 socket_mask = bdp->socket_mask;
2136 while (socket_mask) {
2137 struct socket_desc *sdp;
2138 if ((socket_mask & 1)) {
2139 sdp = &bdp->socket[socket];
2140 if (scan_sock(sdp, bdp, &smaster, &hmaster))
2142 make_per_cpu_thp(smaster);
2145 socket_mask = (socket_mask >> 1);
2147 make_per_hub_cpumask(hmaster);
2153 * initialize the bau_control structure for each cpu
2155 static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
2157 unsigned char *uvhub_mask;
2159 struct uvhub_desc *uvhub_descs;
2161 if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
2162 timeout_us = calculate_destination_timeout();
2164 vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
2165 uvhub_descs = (struct uvhub_desc *)vp;
2166 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
2167 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
2169 if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
2172 if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
2177 init_per_cpu_tunables();
2186 static const struct bau_operations uv1_bau_ops __initconst = {
2187 .bau_gpa_to_offset = uv_gpa_to_offset,
2188 .read_l_sw_ack = read_mmr_sw_ack,
2189 .read_g_sw_ack = read_gmmr_sw_ack,
2190 .write_l_sw_ack = write_mmr_sw_ack,
2191 .write_g_sw_ack = write_gmmr_sw_ack,
2192 .write_payload_first = write_mmr_payload_first,
2193 .write_payload_last = write_mmr_payload_last,
2194 .wait_completion = uv1_wait_completion,
2197 static const struct bau_operations uv2_3_bau_ops __initconst = {
2198 .bau_gpa_to_offset = uv_gpa_to_offset,
2199 .read_l_sw_ack = read_mmr_sw_ack,
2200 .read_g_sw_ack = read_gmmr_sw_ack,
2201 .write_l_sw_ack = write_mmr_sw_ack,
2202 .write_g_sw_ack = write_gmmr_sw_ack,
2203 .write_payload_first = write_mmr_payload_first,
2204 .write_payload_last = write_mmr_payload_last,
2205 .wait_completion = uv2_3_wait_completion,
2208 static const struct bau_operations uv4_bau_ops __initconst = {
2209 .bau_gpa_to_offset = uv_gpa_to_soc_phys_ram,
2210 .read_l_sw_ack = read_mmr_proc_sw_ack,
2211 .read_g_sw_ack = read_gmmr_proc_sw_ack,
2212 .write_l_sw_ack = write_mmr_proc_sw_ack,
2213 .write_g_sw_ack = write_gmmr_proc_sw_ack,
2214 .write_payload_first = write_mmr_proc_payload_first,
2215 .write_payload_last = write_mmr_proc_payload_last,
2216 .wait_completion = uv4_wait_completion,
2220 * Initialization of BAU-related structures
2222 static int __init uv_bau_init(void)
2230 cpumask_var_t *mask;
2232 if (!is_uv_system())
2237 else if (is_uv3_hub())
2238 ops = uv2_3_bau_ops;
2239 else if (is_uv2_hub())
2240 ops = uv2_3_bau_ops;
2241 else if (is_uv1_hub())
2244 for_each_possible_cpu(cur_cpu) {
2245 mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
2246 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
2249 nuvhubs = uv_num_possible_blades();
2250 congested_cycles = usec_2_cycles(congested_respns_us);
2252 uv_base_pnode = 0x7fffffff;
2253 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
2254 cpus = uv_blade_nr_possible_cpus(uvhub);
2255 if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode))
2256 uv_base_pnode = uv_blade_to_pnode(uvhub);
2259 /* software timeouts are not supported on UV4 */
2260 if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
2263 if (init_per_cpu(nuvhubs, uv_base_pnode)) {
2269 vector = UV_BAU_MESSAGE;
2270 for_each_possible_blade(uvhub) {
2271 if (uv_blade_nr_possible_cpus(uvhub))
2272 init_uvhub(uvhub, vector, uv_base_pnode);
2275 alloc_intr_gate(vector, uv_bau_message_intr1);
2277 for_each_possible_blade(uvhub) {
2278 if (uv_blade_nr_possible_cpus(uvhub)) {
2281 pnode = uv_blade_to_pnode(uvhub);
2284 write_gmmr_activation(pnode, val);
2285 mmr = 1; /* should be 1 to broadcast to both sockets */
2287 write_mmr_data_broadcast(pnode, mmr);
2293 core_initcall(uv_bau_init);
2294 fs_initcall(uv_ptc_init);