2 * SGI UltraViolet TLB flush routines.
4 * (c) 2008-2010 Cliff Wickman <cpw@sgi.com>, SGI.
6 * This code is released under the GNU General Public License version 2 or
9 #include <linux/seq_file.h>
10 #include <linux/proc_fs.h>
11 #include <linux/kernel.h>
13 #include <asm/mmu_context.h>
14 #include <asm/uv/uv.h>
15 #include <asm/uv/uv_mmrs.h>
16 #include <asm/uv/uv_hub.h>
17 #include <asm/uv/uv_bau.h>
21 #include <asm/irq_vectors.h>
22 #include <asm/timer.h>
25 struct bau_payload_queue_entry *msg;
28 struct bau_payload_queue_entry *va_queue_first;
29 struct bau_payload_queue_entry *va_queue_last;
32 #define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
34 static int uv_bau_max_concurrent __read_mostly;
37 static int __init setup_nobau(char *arg)
42 early_param("nobau", setup_nobau);
44 /* base pnode in this partition */
45 static int uv_partition_base_pnode __read_mostly;
46 /* position of pnode (which is nasid>>1): */
47 static int uv_nshift __read_mostly;
48 static unsigned long uv_mmask __read_mostly;
50 static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
51 static DEFINE_PER_CPU(struct bau_control, bau_control);
52 static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
59 * Determine the first node on a uvhub. 'Nodes' are used for kernel
62 static int __init uvhub_to_first_node(int uvhub)
66 for_each_online_node(node) {
67 b = uv_node_to_blade_id(node);
75 * Determine the apicid of the first cpu on a uvhub.
77 static int __init uvhub_to_first_apicid(int uvhub)
81 for_each_present_cpu(cpu)
82 if (uvhub == uv_cpu_to_blade_id(cpu))
83 return per_cpu(x86_cpu_to_apicid, cpu);
88 * Free a software acknowledge hardware resource by clearing its Pending
89 * bit. This will return a reply to the sender.
90 * If the message has timed out, a reply has already been sent by the
91 * hardware but the resource has not been released. In that case our
92 * clear of the Timeout bit (as well) will free the resource. No reply will
93 * be sent (the hardware will only do one reply per message).
95 static inline void uv_reply_to_message(struct msg_desc *mdp,
96 struct bau_control *bcp)
99 struct bau_payload_queue_entry *msg;
102 if (!msg->canceled) {
103 dw = (msg->sw_ack_vector << UV_SW_ACK_NPENDING) |
106 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
109 msg->sw_ack_vector = 0;
113 * Process the receipt of a RETRY message
115 static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
116 struct bau_control *bcp)
119 int cancel_count = 0;
121 unsigned long msg_res;
122 unsigned long mmr = 0;
123 struct bau_payload_queue_entry *msg;
124 struct bau_payload_queue_entry *msg2;
125 struct ptc_stats *stat;
128 stat = &per_cpu(ptcstats, bcp->cpu);
131 * cancel any message from msg+1 to the retry itself
133 for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
134 if (msg2 > mdp->va_queue_last)
135 msg2 = mdp->va_queue_first;
139 /* same conditions for cancellation as uv_do_reset */
140 if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
141 (msg2->sw_ack_vector) && ((msg2->sw_ack_vector &
142 msg->sw_ack_vector) == 0) &&
143 (msg2->sending_cpu == msg->sending_cpu) &&
144 (msg2->msg_type != MSG_NOOP)) {
145 slot2 = msg2 - mdp->va_queue_first;
146 mmr = uv_read_local_mmr
147 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
148 msg_res = ((msg2->sw_ack_vector << 8) |
149 msg2->sw_ack_vector);
151 * This is a message retry; clear the resources held
152 * by the previous message only if they timed out.
153 * If it has not timed out we have an unexpected
154 * situation to report.
156 if (mmr & (msg_res << 8)) {
158 * is the resource timed out?
159 * make everyone ignore the cancelled message.
165 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
166 (msg_res << 8) | msg_res);
168 printk(KERN_INFO "note bau retry: no effect\n");
172 stat->d_nocanceled++;
176 * Do all the things a cpu should do for a TLB shootdown message.
177 * Other cpu's may come here at the same time for this message.
179 static void uv_bau_process_message(struct msg_desc *mdp,
180 struct bau_control *bcp)
183 short socket_ack_count = 0;
184 struct ptc_stats *stat;
185 struct bau_payload_queue_entry *msg;
186 struct bau_control *smaster = bcp->socket_master;
189 * This must be a normal message, or retry of a normal message
192 stat = &per_cpu(ptcstats, bcp->cpu);
193 if (msg->address == TLB_FLUSH_ALL) {
197 __flush_tlb_one(msg->address);
203 * One cpu on each uvhub has the additional job on a RETRY
204 * of releasing the resource held by the message that is
205 * being retried. That message is identified by sending
208 if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
209 uv_bau_process_retry_msg(mdp, bcp);
212 * This is a sw_ack message, so we have to reply to it.
213 * Count each responding cpu on the socket. This avoids
214 * pinging the count's cache line back and forth between
217 socket_ack_count = atomic_add_short_return(1, (struct atomic_short *)
218 &smaster->socket_acknowledge_count[mdp->msg_slot]);
219 if (socket_ack_count == bcp->cpus_in_socket) {
221 * Both sockets dump their completed count total into
222 * the message's count.
224 smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
225 msg_ack_count = atomic_add_short_return(socket_ack_count,
226 (struct atomic_short *)&msg->acknowledge_count);
228 if (msg_ack_count == bcp->cpus_in_uvhub) {
230 * All cpus in uvhub saw it; reply
232 uv_reply_to_message(mdp, bcp);
240 * Determine the first cpu on a uvhub.
242 static int uvhub_to_first_cpu(int uvhub)
245 for_each_present_cpu(cpu)
246 if (uvhub == uv_cpu_to_blade_id(cpu))
252 * Last resort when we get a large number of destination timeouts is
253 * to clear resources held by a given cpu.
254 * Do this with IPI so that all messages in the BAU message queue
255 * can be identified by their nonzero sw_ack_vector field.
257 * This is entered for a single cpu on the uvhub.
258 * The sender want's this uvhub to free a specific message's
262 uv_do_reset(void *ptr)
268 unsigned long msg_res;
269 struct bau_control *bcp;
270 struct reset_args *rap;
271 struct bau_payload_queue_entry *msg;
272 struct ptc_stats *stat;
274 bcp = &per_cpu(bau_control, smp_processor_id());
275 rap = (struct reset_args *)ptr;
276 stat = &per_cpu(ptcstats, bcp->cpu);
280 * We're looking for the given sender, and
281 * will free its sw_ack resource.
282 * If all cpu's finally responded after the timeout, its
283 * message 'replied_to' was set.
285 for (msg = bcp->va_queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
286 /* uv_do_reset: same conditions for cancellation as
287 uv_bau_process_retry_msg() */
288 if ((msg->replied_to == 0) &&
289 (msg->canceled == 0) &&
290 (msg->sending_cpu == rap->sender) &&
291 (msg->sw_ack_vector) &&
292 (msg->msg_type != MSG_NOOP)) {
294 * make everyone else ignore this message
297 slot = msg - bcp->va_queue_first;
300 * only reset the resource if it is still pending
302 mmr = uv_read_local_mmr
303 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
304 msg_res = ((msg->sw_ack_vector << 8) |
309 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
318 * Use IPI to get all target uvhubs to release resources held by
319 * a given sending cpu number.
321 static void uv_reset_with_ipi(struct bau_target_uvhubmask *distribution,
327 struct reset_args reset_args;
329 reset_args.sender = sender;
332 /* find a single cpu for each uvhub in this distribution mask */
334 uvhub < sizeof(struct bau_target_uvhubmask) * BITSPERBYTE;
336 if (!bau_uvhub_isset(uvhub, distribution))
338 /* find a cpu for this uvhub */
339 cpu = uvhub_to_first_cpu(uvhub);
342 /* IPI all cpus; Preemption is already disabled */
343 smp_call_function_many(&mask, uv_do_reset, (void *)&reset_args, 1);
347 static inline unsigned long
348 cycles_2_us(unsigned long long cyc)
350 unsigned long long ns;
352 ns = (cyc * per_cpu(cyc2ns, smp_processor_id()))
353 >> CYC2NS_SCALE_FACTOR;
359 * wait for all cpus on this hub to finish their sends and go quiet
360 * leaves uvhub_quiesce set so that no new broadcasts are started by
361 * bau_flush_send_and_wait()
364 quiesce_local_uvhub(struct bau_control *hmaster)
366 atomic_add_short_return(1, (struct atomic_short *)
367 &hmaster->uvhub_quiesce);
371 * mark this quiet-requestor as done
374 end_uvhub_quiesce(struct bau_control *hmaster)
376 atomic_add_short_return(-1, (struct atomic_short *)
377 &hmaster->uvhub_quiesce);
381 * Wait for completion of a broadcast software ack message
382 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
384 static int uv_wait_completion(struct bau_desc *bau_desc,
385 unsigned long mmr_offset, int right_shift, int this_cpu,
386 struct bau_control *bcp, struct bau_control *smaster, long try)
389 unsigned long descriptor_status;
393 cycles_t timeout_time;
394 struct ptc_stats *stat = &per_cpu(ptcstats, this_cpu);
395 struct bau_control *hmaster;
397 hmaster = bcp->uvhub_master;
398 timeout_time = get_cycles() + bcp->timeout_interval;
400 /* spin on the status MMR, waiting for it to go idle */
401 while ((descriptor_status = (((unsigned long)
402 uv_read_local_mmr(mmr_offset) >>
403 right_shift) & UV_ACT_STATUS_MASK)) !=
406 * Our software ack messages may be blocked because there are
407 * no swack resources available. As long as none of them
408 * has timed out hardware will NACK our message and its
409 * state will stay IDLE.
411 if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
414 } else if (descriptor_status ==
415 DESC_STATUS_DESTINATION_TIMEOUT) {
417 ttime = get_cycles();
420 * Our retries may be blocked by all destination
421 * swack resources being consumed, and a timeout
422 * pending. In that case hardware returns the
423 * ERROR that looks like a destination timeout.
425 if (cycles_2_us(ttime - bcp->send_message) < BIOS_TO) {
426 bcp->conseccompletes = 0;
427 return FLUSH_RETRY_PLUGGED;
430 bcp->conseccompletes = 0;
431 return FLUSH_RETRY_TIMEOUT;
434 * descriptor_status is still BUSY
438 if (relaxes >= 10000) {
440 if (get_cycles() > timeout_time) {
441 quiesce_local_uvhub(hmaster);
443 /* single-thread the register change */
444 spin_lock(&hmaster->masks_lock);
445 mmr = uv_read_local_mmr(mmr_offset);
447 mask |= (3UL < right_shift);
450 uv_write_local_mmr(mmr_offset, mmr);
451 spin_unlock(&hmaster->masks_lock);
452 end_uvhub_quiesce(hmaster);
459 bcp->conseccompletes++;
460 return FLUSH_COMPLETE;
463 static inline cycles_t
464 sec_2_cycles(unsigned long sec)
469 ns = sec * 1000000000;
470 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
475 * conditionally add 1 to *v, unless *v is >= u
476 * return 0 if we cannot add 1 to *v because it is >= u
477 * return 1 if we can add 1 to *v because it is < u
480 * This is close to atomic_add_unless(), but this allows the 'u' value
481 * to be lowered below the current 'v'. atomic_add_unless can only stop
484 static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
487 if (atomic_read(v) >= u) {
497 * uv_flush_send_and_wait
499 * Send a broadcast and wait for it to complete.
501 * The flush_mask contains the cpus the broadcast is to be sent to, plus
502 * cpus that are on the local uvhub.
504 * Returns NULL if all flushing represented in the mask was done. The mask
506 * Returns @flush_mask if some remote flushing remains to be done. The
507 * mask will have some bits still set, representing any cpus on the local
508 * uvhub (not current cpu) and any on remote uvhubs if the broadcast failed.
510 const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
511 struct cpumask *flush_mask,
512 struct bau_control *bcp)
517 int completion_status = 0;
520 int cpu = bcp->uvhub_cpu;
521 int this_cpu = bcp->cpu;
522 int this_uvhub = bcp->uvhub;
523 unsigned long mmr_offset;
527 struct ptc_stats *stat = &per_cpu(ptcstats, bcp->cpu);
528 struct bau_control *smaster = bcp->socket_master;
529 struct bau_control *hmaster = bcp->uvhub_master;
532 * Spin here while there are hmaster->max_concurrent or more active
533 * descriptors. This is the per-uvhub 'throttle'.
535 if (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
536 &hmaster->active_descriptor_count,
537 hmaster->max_concurrent)) {
541 } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
542 &hmaster->active_descriptor_count,
543 hmaster->max_concurrent));
546 while (hmaster->uvhub_quiesce)
549 if (cpu < UV_CPUS_PER_ACT_STATUS) {
550 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
551 right_shift = cpu * UV_ACT_STATUS_SIZE;
553 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
555 ((cpu - UV_CPUS_PER_ACT_STATUS) * UV_ACT_STATUS_SIZE);
557 time1 = get_cycles();
560 * Every message from any given cpu gets a unique message
561 * sequence number. But retries use that same number.
562 * Our message may have timed out at the destination because
563 * all sw-ack resources are in use and there is a timeout
564 * pending there. In that case, our last send never got
565 * placed into the queue and we need to persist until it
568 * Make any retry a type MSG_RETRY so that the destination will
569 * free any resource held by a previous message from this cpu.
572 /* use message type set by the caller the first time */
573 seq_number = bcp->message_number++;
575 /* use RETRY type on all the rest; same sequence */
576 bau_desc->header.msg_type = MSG_RETRY;
577 stat->s_retry_messages++;
579 bau_desc->header.sequence = seq_number;
580 index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
582 bcp->send_message = get_cycles();
584 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
587 completion_status = uv_wait_completion(bau_desc, mmr_offset,
588 right_shift, this_cpu, bcp, smaster, try);
590 if (completion_status == FLUSH_RETRY_PLUGGED) {
592 * Our retries may be blocked by all destination swack
593 * resources being consumed, and a timeout pending. In
594 * that case hardware immediately returns the ERROR
595 * that looks like a destination timeout.
597 udelay(TIMEOUT_DELAY);
598 bcp->plugged_tries++;
599 if (bcp->plugged_tries >= PLUGSB4RESET) {
600 bcp->plugged_tries = 0;
601 quiesce_local_uvhub(hmaster);
602 spin_lock(&hmaster->queue_lock);
603 uv_reset_with_ipi(&bau_desc->distribution,
605 spin_unlock(&hmaster->queue_lock);
606 end_uvhub_quiesce(hmaster);
608 stat->s_resets_plug++;
610 } else if (completion_status == FLUSH_RETRY_TIMEOUT) {
611 hmaster->max_concurrent = 1;
612 bcp->timeout_tries++;
613 udelay(TIMEOUT_DELAY);
614 if (bcp->timeout_tries >= TIMEOUTSB4RESET) {
615 bcp->timeout_tries = 0;
616 quiesce_local_uvhub(hmaster);
617 spin_lock(&hmaster->queue_lock);
618 uv_reset_with_ipi(&bau_desc->distribution,
620 spin_unlock(&hmaster->queue_lock);
621 end_uvhub_quiesce(hmaster);
623 stat->s_resets_timeout++;
626 if (bcp->ipi_attempts >= 3) {
627 bcp->ipi_attempts = 0;
628 completion_status = FLUSH_GIVEUP;
632 } while ((completion_status == FLUSH_RETRY_PLUGGED) ||
633 (completion_status == FLUSH_RETRY_TIMEOUT));
634 time2 = get_cycles();
636 if ((completion_status == FLUSH_COMPLETE) && (bcp->conseccompletes > 5)
637 && (hmaster->max_concurrent < hmaster->max_concurrent_constant))
638 hmaster->max_concurrent++;
641 * hold any cpu not timing out here; no other cpu currently held by
642 * the 'throttle' should enter the activation code
644 while (hmaster->uvhub_quiesce)
646 atomic_dec(&hmaster->active_descriptor_count);
648 /* guard against cycles wrap */
650 stat->s_time += (time2 - time1);
652 stat->s_requestor--; /* don't count this one */
653 if (completion_status == FLUSH_COMPLETE && try > 1)
655 else if (completion_status == FLUSH_GIVEUP) {
657 * Cause the caller to do an IPI-style TLB shootdown on
658 * the target cpu's, all of which are still in the mask.
665 * Success, so clear the remote cpu's from the mask so we don't
666 * use the IPI method of shootdown on them.
668 for_each_cpu(bit, flush_mask) {
669 uvhub = uv_cpu_to_blade_id(bit);
670 if (uvhub == this_uvhub)
672 cpumask_clear_cpu(bit, flush_mask);
674 if (!cpumask_empty(flush_mask))
681 * uv_flush_tlb_others - globally purge translation cache of a virtual
682 * address or all TLB's
683 * @cpumask: mask of all cpu's in which the address is to be removed
684 * @mm: mm_struct containing virtual address range
685 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
686 * @cpu: the current cpu
688 * This is the entry point for initiating any UV global TLB shootdown.
690 * Purges the translation caches of all specified processors of the given
691 * virtual address, or purges all TLB's on specified processors.
693 * The caller has derived the cpumask from the mm_struct. This function
694 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
696 * The cpumask is converted into a uvhubmask of the uvhubs containing
699 * Note that this function should be called with preemption disabled.
701 * Returns NULL if all remote flushing was done.
702 * Returns pointer to cpumask if some remote flushing remains to be
703 * done. The returned pointer is valid till preemption is re-enabled.
705 const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
706 struct mm_struct *mm,
707 unsigned long va, unsigned int cpu)
713 struct bau_desc *bau_desc;
714 struct cpumask *flush_mask;
715 struct ptc_stats *stat;
716 struct bau_control *bcp;
721 bcp = &per_cpu(bau_control, cpu);
723 * Each sending cpu has a per-cpu mask which it fills from the caller's
724 * cpu mask. Only remote cpus are converted to uvhubs and copied.
726 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
728 * copy cpumask to flush_mask, removing current cpu
729 * (current cpu should already have been flushed by the caller and
730 * should never be returned if we return flush_mask)
732 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
733 if (cpu_isset(cpu, *cpumask))
734 locals++; /* current cpu was targeted */
736 bau_desc = bcp->descriptor_base;
737 bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
739 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
741 for_each_cpu(tcpu, flush_mask) {
742 uvhub = uv_cpu_to_blade_id(tcpu);
743 if (uvhub == bcp->uvhub) {
747 bau_uvhub_set(uvhub, &bau_desc->distribution);
752 * No off_hub flushing; return status for local hub.
753 * Return the caller's mask if all were local (the current
754 * cpu may be in that mask).
761 stat = &per_cpu(ptcstats, cpu);
763 stat->s_ntargcpu += remotes;
764 remotes = bau_uvhub_weight(&bau_desc->distribution);
765 stat->s_ntarguvhub += remotes;
767 stat->s_ntarguvhub16++;
768 else if (remotes >= 8)
769 stat->s_ntarguvhub8++;
770 else if (remotes >= 4)
771 stat->s_ntarguvhub4++;
772 else if (remotes >= 2)
773 stat->s_ntarguvhub2++;
775 stat->s_ntarguvhub1++;
777 bau_desc->payload.address = va;
778 bau_desc->payload.sending_cpu = cpu;
781 * uv_flush_send_and_wait returns null if all cpu's were messaged, or
782 * the adjusted flush_mask if any cpu's were not messaged.
784 return uv_flush_send_and_wait(bau_desc, flush_mask, bcp);
788 * The BAU message interrupt comes here. (registered by set_intr_gate)
791 * We received a broadcast assist message.
793 * Interrupts are disabled; this interrupt could represent
794 * the receipt of several messages.
796 * All cores/threads on this hub get this interrupt.
797 * The last one to see it does the software ack.
798 * (the resource will not be freed until noninterruptable cpus see this
799 * interrupt; hardware may timeout the s/w ack and reply ERROR)
801 void uv_bau_message_interrupt(struct pt_regs *regs)
805 struct bau_payload_queue_entry *msg;
806 struct bau_control *bcp;
807 struct ptc_stats *stat;
808 struct msg_desc msgdesc;
810 time_start = get_cycles();
811 bcp = &per_cpu(bau_control, smp_processor_id());
812 stat = &per_cpu(ptcstats, smp_processor_id());
813 msgdesc.va_queue_first = bcp->va_queue_first;
814 msgdesc.va_queue_last = bcp->va_queue_last;
815 msg = bcp->bau_msg_head;
816 while (msg->sw_ack_vector) {
818 msgdesc.msg_slot = msg - msgdesc.va_queue_first;
819 msgdesc.sw_ack_slot = ffs(msg->sw_ack_vector) - 1;
821 uv_bau_process_message(&msgdesc, bcp);
823 if (msg > msgdesc.va_queue_last)
824 msg = msgdesc.va_queue_first;
825 bcp->bau_msg_head = msg;
827 stat->d_time += (get_cycles() - time_start);
838 * Each target uvhub (i.e. a uvhub that has no cpu's) needs to have
839 * shootdown message timeouts enabled. The timeout does not cause
840 * an interrupt, but causes an error message to be returned to
843 static void uv_enable_timeouts(void)
848 unsigned long mmr_image;
850 nuvhubs = uv_num_possible_blades();
852 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
853 if (!uv_blade_nr_possible_cpus(uvhub))
856 pnode = uv_blade_to_pnode(uvhub);
858 uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL);
860 * Set the timeout period and then lock it in, in three
861 * steps; captures and locks in the period.
863 * To program the period, the SOFT_ACK_MODE must be off.
865 mmr_image &= ~((unsigned long)1 <<
866 UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
867 uv_write_global_mmr64
868 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
870 * Set the 4-bit period.
872 mmr_image &= ~((unsigned long)0xf <<
873 UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
874 mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD <<
875 UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
876 uv_write_global_mmr64
877 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
879 * Subsequent reversals of the timebase bit (3) cause an
880 * immediate timeout of one or all INTD resources as
881 * indicated in bits 2:0 (7 causes all of them to timeout).
883 mmr_image |= ((unsigned long)1 <<
884 UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
885 uv_write_global_mmr64
886 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
890 static void *uv_ptc_seq_start(struct seq_file *file, loff_t *offset)
892 if (*offset < num_possible_cpus())
897 static void *uv_ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
900 if (*offset < num_possible_cpus())
905 static void uv_ptc_seq_stop(struct seq_file *file, void *data)
909 static inline unsigned long long
910 millisec_2_cycles(unsigned long millisec)
913 unsigned long long cyc;
915 ns = millisec * 1000;
916 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
921 * Display the statistics thru /proc.
922 * 'data' points to the cpu number
924 static int uv_ptc_seq_show(struct seq_file *file, void *data)
926 struct ptc_stats *stat;
929 cpu = *(loff_t *)data;
933 "# cpu sent stime numuvhubs numuvhubs16 numuvhubs8 ");
935 "numuvhubs4 numuvhubs2 numuvhubs1 numcpus dto ");
937 "retries rok resetp resett giveup sto bz throt ");
939 "sw_ack recv rtime all ");
941 "one mult none retry canc nocan reset rcan\n");
943 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
944 stat = &per_cpu(ptcstats, cpu);
945 /* source side statistics */
947 "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
948 cpu, stat->s_requestor, cycles_2_us(stat->s_time),
949 stat->s_ntarguvhub, stat->s_ntarguvhub16,
950 stat->s_ntarguvhub8, stat->s_ntarguvhub4,
951 stat->s_ntarguvhub2, stat->s_ntarguvhub1,
952 stat->s_ntargcpu, stat->s_dtimeout);
953 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
954 stat->s_retry_messages, stat->s_retriesok,
955 stat->s_resets_plug, stat->s_resets_timeout,
956 stat->s_giveup, stat->s_stimeout,
957 stat->s_busy, stat->s_throttles);
958 /* destination side statistics */
960 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
961 uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
962 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
963 stat->d_requestee, cycles_2_us(stat->d_time),
964 stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
965 stat->d_nomsg, stat->d_retries, stat->d_canceled,
966 stat->d_nocanceled, stat->d_resets,
974 * -1: resetf the statistics
975 * 0: display meaning of the statistics
976 * >0: maximum concurrent active descriptors per uvhub (throttle)
978 static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
979 size_t count, loff_t *data)
984 struct ptc_stats *stat;
985 struct bau_control *bcp;
987 if (count == 0 || count > sizeof(optstr))
989 if (copy_from_user(optstr, user, count))
991 optstr[count - 1] = '\0';
992 if (strict_strtol(optstr, 10, &input_arg) < 0) {
993 printk(KERN_DEBUG "%s is invalid\n", optstr);
997 if (input_arg == 0) {
998 printk(KERN_DEBUG "# cpu: cpu number\n");
999 printk(KERN_DEBUG "Sender statistics:\n");
1001 "sent: number of shootdown messages sent\n");
1003 "stime: time spent sending messages\n");
1005 "numuvhubs: number of hubs targeted with shootdown\n");
1007 "numuvhubs16: number times 16 or more hubs targeted\n");
1009 "numuvhubs8: number times 8 or more hubs targeted\n");
1011 "numuvhubs4: number times 4 or more hubs targeted\n");
1013 "numuvhubs2: number times 2 or more hubs targeted\n");
1015 "numuvhubs1: number times 1 hub targeted\n");
1017 "numcpus: number of cpus targeted with shootdown\n");
1019 "dto: number of destination timeouts\n");
1021 "retries: destination timeout retries sent\n");
1023 "rok: : destination timeouts successfully retried\n");
1025 "resetp: ipi-style resource resets for plugs\n");
1027 "resett: ipi-style resource resets for timeouts\n");
1029 "giveup: fall-backs to ipi-style shootdowns\n");
1031 "sto: number of source timeouts\n");
1033 "bz: number of stay-busy's\n");
1035 "throt: number times spun in throttle\n");
1036 printk(KERN_DEBUG "Destination side statistics:\n");
1038 "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
1040 "recv: shootdown messages received\n");
1042 "rtime: time spent processing messages\n");
1044 "all: shootdown all-tlb messages\n");
1046 "one: shootdown one-tlb messages\n");
1048 "mult: interrupts that found multiple messages\n");
1050 "none: interrupts that found no messages\n");
1052 "retry: number of retry messages processed\n");
1054 "canc: number messages canceled by retries\n");
1056 "nocan: number retries that found nothing to cancel\n");
1058 "reset: number of ipi-style reset requests processed\n");
1060 "rcan: number messages canceled by reset requests\n");
1061 } else if (input_arg == -1) {
1062 for_each_present_cpu(cpu) {
1063 stat = &per_cpu(ptcstats, cpu);
1064 memset(stat, 0, sizeof(struct ptc_stats));
1067 uv_bau_max_concurrent = input_arg;
1068 bcp = &per_cpu(bau_control, smp_processor_id());
1069 if (uv_bau_max_concurrent < 1 ||
1070 uv_bau_max_concurrent > bcp->cpus_in_uvhub) {
1072 "Error: BAU max concurrent %d; %d is invalid\n",
1073 bcp->max_concurrent, uv_bau_max_concurrent);
1076 printk(KERN_DEBUG "Set BAU max concurrent:%d\n",
1077 uv_bau_max_concurrent);
1078 for_each_present_cpu(cpu) {
1079 bcp = &per_cpu(bau_control, cpu);
1080 bcp->max_concurrent = uv_bau_max_concurrent;
1087 static const struct seq_operations uv_ptc_seq_ops = {
1088 .start = uv_ptc_seq_start,
1089 .next = uv_ptc_seq_next,
1090 .stop = uv_ptc_seq_stop,
1091 .show = uv_ptc_seq_show
1094 static int uv_ptc_proc_open(struct inode *inode, struct file *file)
1096 return seq_open(file, &uv_ptc_seq_ops);
1099 static const struct file_operations proc_uv_ptc_operations = {
1100 .open = uv_ptc_proc_open,
1102 .write = uv_ptc_proc_write,
1103 .llseek = seq_lseek,
1104 .release = seq_release,
1107 static int __init uv_ptc_init(void)
1109 struct proc_dir_entry *proc_uv_ptc;
1111 if (!is_uv_system())
1114 proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
1115 &proc_uv_ptc_operations);
1117 printk(KERN_ERR "unable to create %s proc entry\n",
1125 * initialize the sending side's sending buffers
1128 uv_activation_descriptor_init(int node, int pnode)
1135 struct bau_desc *bau_desc;
1136 struct bau_desc *bd2;
1137 struct bau_control *bcp;
1140 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
1141 * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
1143 bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)*
1144 UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
1147 pa = uv_gpa(bau_desc); /* need the real nasid*/
1148 n = pa >> uv_nshift;
1151 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
1152 (n << UV_DESC_BASE_PNODE_SHIFT | m));
1155 * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
1156 * cpu even though we only use the first one; one descriptor can
1157 * describe a broadcast to 256 uv hubs.
1159 for (i = 0, bd2 = bau_desc; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR);
1161 memset(bd2, 0, sizeof(struct bau_desc));
1162 bd2->header.sw_ack_flag = 1;
1164 * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub
1165 * in the partition. The bit map will indicate uvhub numbers,
1166 * which are 0-N in a partition. Pnodes are unique system-wide.
1168 bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
1169 bd2->header.dest_subnodeid = 0x10; /* the LB */
1170 bd2->header.command = UV_NET_ENDPOINT_INTD;
1171 bd2->header.int_both = 1;
1173 * all others need to be set to zero:
1174 * fairness chaining multilevel count replied_to
1177 for_each_present_cpu(cpu) {
1178 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1180 bcp = &per_cpu(bau_control, cpu);
1181 bcp->descriptor_base = bau_desc;
1186 * initialize the destination side's receiving buffers
1187 * entered for each uvhub in the partition
1188 * - node is first node (kernel memory notion) on the uvhub
1189 * - pnode is the uvhub's physical identifier
1192 uv_payload_queue_init(int node, int pnode)
1198 struct bau_payload_queue_entry *pqp;
1199 struct bau_payload_queue_entry *pqp_malloc;
1200 struct bau_control *bcp;
1202 pqp = (struct bau_payload_queue_entry *) kmalloc_node(
1203 (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
1208 cp = (char *)pqp + 31;
1209 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
1211 for_each_present_cpu(cpu) {
1212 if (pnode != uv_cpu_to_pnode(cpu))
1214 /* for every cpu on this pnode: */
1215 bcp = &per_cpu(bau_control, cpu);
1216 bcp->va_queue_first = pqp;
1217 bcp->bau_msg_head = pqp;
1218 bcp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
1221 * need the pnode of where the memory was really allocated
1224 pn = pa >> uv_nshift;
1225 uv_write_global_mmr64(pnode,
1226 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
1227 ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
1228 uv_physnodeaddr(pqp));
1229 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
1230 uv_physnodeaddr(pqp));
1231 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST,
1233 uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1)));
1234 /* in effect, all msg_type's are set to MSG_NOOP */
1235 memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE);
1239 * Initialization of each UV hub's structures
1241 static void __init uv_init_uvhub(int uvhub, int vector)
1245 unsigned long apicid;
1247 node = uvhub_to_first_node(uvhub);
1248 pnode = uv_blade_to_pnode(uvhub);
1249 uv_activation_descriptor_init(node, pnode);
1250 uv_payload_queue_init(node, pnode);
1252 * the below initialization can't be in firmware because the
1253 * messaging IRQ will be determined by the OS
1255 apicid = uvhub_to_first_apicid(uvhub);
1256 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
1257 ((apicid << 32) | vector));
1261 * initialize the bau_control structure for each cpu
1263 static void uv_init_per_cpu(int nuvhubs)
1270 struct bau_control *bcp;
1271 struct uvhub_desc *bdp;
1272 struct socket_desc *sdp;
1273 struct bau_control *hmaster = NULL;
1274 struct bau_control *smaster = NULL;
1275 struct socket_desc {
1277 short cpu_number[16];
1284 struct socket_desc socket[2];
1286 struct uvhub_desc *uvhub_descs;
1288 uvhub_descs = (struct uvhub_desc *)
1289 kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
1290 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
1291 for_each_present_cpu(cpu) {
1292 bcp = &per_cpu(bau_control, cpu);
1293 memset(bcp, 0, sizeof(struct bau_control));
1294 spin_lock_init(&bcp->masks_lock);
1295 bcp->max_concurrent = uv_bau_max_concurrent;
1296 pnode = uv_cpu_hub_info(cpu)->pnode;
1297 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1298 bdp = &uvhub_descs[uvhub];
1302 /* time interval to catch a hardware stay-busy bug */
1303 bcp->timeout_interval = millisec_2_cycles(3);
1304 /* kludge: assume uv_hub.h is constant */
1305 socket = (cpu_physical_id(cpu)>>5)&1;
1306 if (socket >= bdp->num_sockets)
1307 bdp->num_sockets = socket+1;
1308 sdp = &bdp->socket[socket];
1309 sdp->cpu_number[sdp->num_cpus] = cpu;
1313 for_each_possible_blade(uvhub) {
1314 bdp = &uvhub_descs[uvhub];
1315 for (i = 0; i < bdp->num_sockets; i++) {
1316 sdp = &bdp->socket[i];
1317 for (j = 0; j < sdp->num_cpus; j++) {
1318 cpu = sdp->cpu_number[j];
1319 bcp = &per_cpu(bau_control, cpu);
1326 bcp->cpus_in_uvhub = bdp->num_cpus;
1327 bcp->cpus_in_socket = sdp->num_cpus;
1328 bcp->socket_master = smaster;
1329 bcp->uvhub_master = hmaster;
1330 for (k = 0; k < DEST_Q_SIZE; k++)
1331 bcp->socket_acknowledge_count[k] = 0;
1333 uv_cpu_hub_info(cpu)->blade_processor_id;
1342 * Initialization of BAU-related structures
1344 static int __init uv_bau_init(void)
1353 if (!is_uv_system())
1359 for_each_possible_cpu(cur_cpu)
1360 zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
1361 GFP_KERNEL, cpu_to_node(cur_cpu));
1363 uv_bau_max_concurrent = MAX_BAU_CONCURRENT;
1364 uv_nshift = uv_hub_info->m_val;
1365 uv_mmask = (1UL << uv_hub_info->m_val) - 1;
1366 nuvhubs = uv_num_possible_blades();
1368 uv_init_per_cpu(nuvhubs);
1370 uv_partition_base_pnode = 0x7fffffff;
1371 for (uvhub = 0; uvhub < nuvhubs; uvhub++)
1372 if (uv_blade_nr_possible_cpus(uvhub) &&
1373 (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode))
1374 uv_partition_base_pnode = uv_blade_to_pnode(uvhub);
1376 vector = UV_BAU_MESSAGE;
1377 for_each_possible_blade(uvhub)
1378 if (uv_blade_nr_possible_cpus(uvhub))
1379 uv_init_uvhub(uvhub, vector);
1381 uv_enable_timeouts();
1382 alloc_intr_gate(vector, uv_bau_message_intr1);
1384 for_each_possible_blade(uvhub) {
1385 pnode = uv_blade_to_pnode(uvhub);
1387 uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL,
1388 ((unsigned long)1 << 63));
1389 mmr = 1; /* should be 1 to broadcast to both sockets */
1390 uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST, mmr);
1395 core_initcall(uv_bau_init);
1396 core_initcall(uv_ptc_init);