]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - arch/x86/kernel/tlb_uv.c
perf: Reduce perf_disable() usage
[mv-sheeva.git] / arch / x86 / kernel / tlb_uv.c
index 4cb14dbd7fa3e1e9d6c0185e806b6b17092baef4..312ef0292815f1a3a757c10d3f994d67702b3531 100644 (file)
@@ -400,10 +400,7 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
        unsigned long mmr_offset, int right_shift, int this_cpu,
        struct bau_control *bcp, struct bau_control *smaster, long try)
 {
-       int relaxes = 0;
        unsigned long descriptor_status;
-       unsigned long mmr;
-       unsigned long mask;
        cycles_t ttime;
        struct ptc_stats *stat = bcp->statp;
        struct bau_control *hmaster;
@@ -487,6 +484,47 @@ static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
        return 1;
 }
 
+/*
+ * Our retries are blocked by all destination swack resources being
+ * in use, and a timeout is pending. In that case hardware immediately
+ * returns the ERROR that looks like a destination timeout.
+ */
+static void
+destination_plugged(struct bau_desc *bau_desc, struct bau_control *bcp,
+                       struct bau_control *hmaster, struct ptc_stats *stat)
+{
+       udelay(bcp->plugged_delay);
+       bcp->plugged_tries++;
+       if (bcp->plugged_tries >= bcp->plugsb4reset) {
+               bcp->plugged_tries = 0;
+               quiesce_local_uvhub(hmaster);
+               spin_lock(&hmaster->queue_lock);
+               uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu);
+               spin_unlock(&hmaster->queue_lock);
+               end_uvhub_quiesce(hmaster);
+               bcp->ipi_attempts++;
+               stat->s_resets_plug++;
+       }
+}
+
+static void
+destination_timeout(struct bau_desc *bau_desc, struct bau_control *bcp,
+                       struct bau_control *hmaster, struct ptc_stats *stat)
+{
+       hmaster->max_bau_concurrent = 1;
+       bcp->timeout_tries++;
+       if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
+               bcp->timeout_tries = 0;
+               quiesce_local_uvhub(hmaster);
+               spin_lock(&hmaster->queue_lock);
+               uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu);
+               spin_unlock(&hmaster->queue_lock);
+               end_uvhub_quiesce(hmaster);
+               bcp->ipi_attempts++;
+               stat->s_resets_timeout++;
+       }
+}
+
 /*
  * Completions are taking a very long time due to a congested numalink
  * network.
@@ -521,28 +559,22 @@ disable_for_congestion(struct bau_control *bcp, struct ptc_stats *stat)
  *
  * Send a broadcast and wait for it to complete.
  *
- * The flush_mask contains the cpus the broadcast is to be sent to, plus
+ * The flush_mask contains the cpus the broadcast is to be sent to including
  * cpus that are on the local uvhub.
  *
- * Returns NULL if all flushing represented in the mask was done. The mask
- * is zeroed.
- * Returns @flush_mask if some remote flushing remains to be done. The
- * mask will have some bits still set, representing any cpus on the local
- * uvhub (not current cpu) and any on remote uvhubs if the broadcast failed.
+ * Returns 0 if all flushing represented in the mask was done.
+ * Returns 1 if it gives up entirely and the original cpu mask is to be
+ * returned to the kernel.
  */
-const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
-                                            struct cpumask *flush_mask,
-                                            struct bau_control *bcp)
+int uv_flush_send_and_wait(struct bau_desc *bau_desc,
+                          struct cpumask *flush_mask, struct bau_control *bcp)
 {
        int right_shift;
-       int uvhub;
-       int bit;
        int completion_status = 0;
        int seq_number = 0;
        long try = 0;
        int cpu = bcp->uvhub_cpu;
        int this_cpu = bcp->cpu;
-       int this_uvhub = bcp->uvhub;
        unsigned long mmr_offset;
        unsigned long index;
        cycles_t time1;
@@ -552,10 +584,6 @@ const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
        struct bau_control *smaster = bcp->socket_master;
        struct bau_control *hmaster = bcp->uvhub_master;
 
-       /*
-        * Spin here while there are hmaster->max_bau_concurrent or more active
-        * descriptors. This is the per-uvhub 'throttle'.
-        */
        if (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
                        &hmaster->active_descriptor_count,
                        hmaster->max_bau_concurrent)) {
@@ -566,7 +594,6 @@ const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
                        &hmaster->active_descriptor_count,
                        hmaster->max_bau_concurrent));
        }
-
        while (hmaster->uvhub_quiesce)
                cpu_relax();
 
@@ -591,48 +618,15 @@ const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
                index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
                        bcp->uvhub_cpu;
                bcp->send_message = get_cycles();
-
                uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
-
                try++;
                completion_status = uv_wait_completion(bau_desc, mmr_offset,
                        right_shift, this_cpu, bcp, smaster, try);
 
                if (completion_status == FLUSH_RETRY_PLUGGED) {
-                       /*
-                        * Our retries may be blocked by all destination swack
-                        * resources being consumed, and a timeout pending. In
-                        * that case hardware immediately returns the ERROR
-                        * that looks like a destination timeout.
-                        */
-                       udelay(bcp->plugged_delay);
-                       bcp->plugged_tries++;
-                       if (bcp->plugged_tries >= bcp->plugsb4reset) {
-                               bcp->plugged_tries = 0;
-                               quiesce_local_uvhub(hmaster);
-                               spin_lock(&hmaster->queue_lock);
-                               uv_reset_with_ipi(&bau_desc->distribution,
-                                                       this_cpu);
-                               spin_unlock(&hmaster->queue_lock);
-                               end_uvhub_quiesce(hmaster);
-                               bcp->ipi_attempts++;
-                               stat->s_resets_plug++;
-                       }
+                       destination_plugged(bau_desc, bcp, hmaster, stat);
                } else if (completion_status == FLUSH_RETRY_TIMEOUT) {
-                       hmaster->max_bau_concurrent = 1;
-                       bcp->timeout_tries++;
-                       udelay(TIMEOUT_DELAY);
-                       if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
-                               bcp->timeout_tries = 0;
-                               quiesce_local_uvhub(hmaster);
-                               spin_lock(&hmaster->queue_lock);
-                               uv_reset_with_ipi(&bau_desc->distribution,
-                                                               this_cpu);
-                               spin_unlock(&hmaster->queue_lock);
-                               end_uvhub_quiesce(hmaster);
-                               bcp->ipi_attempts++;
-                               stat->s_resets_timeout++;
-                       }
+                       destination_timeout(bau_desc, bcp, hmaster, stat);
                }
                if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
                        bcp->ipi_attempts = 0;
@@ -643,25 +637,16 @@ const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
        } while ((completion_status == FLUSH_RETRY_PLUGGED) ||
                 (completion_status == FLUSH_RETRY_TIMEOUT));
        time2 = get_cycles();
-
        bcp->plugged_tries = 0;
        bcp->timeout_tries = 0;
-
        if ((completion_status == FLUSH_COMPLETE) &&
            (bcp->conseccompletes > bcp->complete_threshold) &&
            (hmaster->max_bau_concurrent <
                                        hmaster->max_bau_concurrent_constant))
                        hmaster->max_bau_concurrent++;
-
-       /*
-        * hold any cpu not timing out here; no other cpu currently held by
-        * the 'throttle' should enter the activation code
-        */
        while (hmaster->uvhub_quiesce)
                cpu_relax();
        atomic_dec(&hmaster->active_descriptor_count);
-
-       /* guard against cycles wrap */
        if (time2 > time1) {
                elapsed = time2 - time1;
                stat->s_time += elapsed;
@@ -674,32 +659,14 @@ const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc,
                        }
                }
        } else
-               stat->s_requestor--; /* don't count this one */
+               stat->s_requestor--;
        if (completion_status == FLUSH_COMPLETE && try > 1)
                stat->s_retriesok++;
        else if (completion_status == FLUSH_GIVEUP) {
-               /*
-                * Cause the caller to do an IPI-style TLB shootdown on
-                * the target cpu's, all of which are still in the mask.
-                */
                stat->s_giveup++;
-               return flush_mask;
-       }
-
-       /*
-        * Success, so clear the remote cpu's from the mask so we don't
-        * use the IPI method of shootdown on them.
-        */
-       for_each_cpu(bit, flush_mask) {
-               uvhub = uv_cpu_to_blade_id(bit);
-               if (uvhub == this_uvhub)
-                       continue;
-               cpumask_clear_cpu(bit, flush_mask);
+               return 1;
        }
-       if (!cpumask_empty(flush_mask))
-               return flush_mask;
-
-       return NULL;
+       return 0;
 }
 
 /**
@@ -731,10 +698,11 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
                                          struct mm_struct *mm,
                                          unsigned long va, unsigned int cpu)
 {
-       int remotes;
        int tcpu;
        int uvhub;
        int locals = 0;
+       int remotes = 0;
+       int hubs = 0;
        struct bau_desc *bau_desc;
        struct cpumask *flush_mask;
        struct ptc_stats *stat;
@@ -768,54 +736,51 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
 
        /*
         * Each sending cpu has a per-cpu mask which it fills from the caller's
-        * cpu mask.  Only remote cpus are converted to uvhubs and copied.
+        * cpu mask.  All cpus are converted to uvhubs and copied to the
+        * activation descriptor.
         */
        flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
-       /*
-        * copy cpumask to flush_mask, removing current cpu
-        * (current cpu should already have been flushed by the caller and
-        *  should never be returned if we return flush_mask)
-        */
+       /* don't actually do a shootdown of the local cpu */
        cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
        if (cpu_isset(cpu, *cpumask))
-               locals++;  /* current cpu was targeted */
+               stat->s_ntargself++;
 
        bau_desc = bcp->descriptor_base;
        bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
-
        bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
-       remotes = 0;
+
+       /* cpu statistics */
        for_each_cpu(tcpu, flush_mask) {
                uvhub = uv_cpu_to_blade_id(tcpu);
-               if (uvhub == bcp->uvhub) {
-                       locals++;
-                       continue;
-               }
                bau_uvhub_set(uvhub, &bau_desc->distribution);
-               remotes++;
-       }
-       if (remotes == 0) {
-               /*
-                * No off_hub flushing; return status for local hub.
-                * Return the caller's mask if all were local (the current
-                * cpu may be in that mask).
-                */
-               if (locals)
-                       return cpumask;
+               if (uvhub == bcp->uvhub)
+                       locals++;
                else
-                       return NULL;
+                       remotes++;
        }
+       if ((locals + remotes) == 0)
+               return NULL;
        stat->s_requestor++;
-       stat->s_ntargcpu += remotes;
+       stat->s_ntargcpu += remotes + locals;
+       stat->s_ntargremotes += remotes;
+       stat->s_ntarglocals += locals;
        remotes = bau_uvhub_weight(&bau_desc->distribution);
-       stat->s_ntarguvhub += remotes;
-       if (remotes >= 16)
+
+       /* uvhub statistics */
+       hubs = bau_uvhub_weight(&bau_desc->distribution);
+       if (locals) {
+               stat->s_ntarglocaluvhub++;
+               stat->s_ntargremoteuvhub += (hubs - 1);
+       } else
+               stat->s_ntargremoteuvhub += hubs;
+       stat->s_ntarguvhub += hubs;
+       if (hubs >= 16)
                stat->s_ntarguvhub16++;
-       else if (remotes >= 8)
+       else if (hubs >= 8)
                stat->s_ntarguvhub8++;
-       else if (remotes >= 4)
+       else if (hubs >= 4)
                stat->s_ntarguvhub4++;
-       else if (remotes >= 2)
+       else if (hubs >= 2)
                stat->s_ntarguvhub2++;
        else
                stat->s_ntarguvhub1++;
@@ -824,10 +789,13 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
        bau_desc->payload.sending_cpu = cpu;
 
        /*
-        * uv_flush_send_and_wait returns null if all cpu's were messaged, or
-        * the adjusted flush_mask if any cpu's were not messaged.
+        * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
+        * or 1 if it gave up and the original cpumask should be returned.
         */
-       return uv_flush_send_and_wait(bau_desc, flush_mask, bcp);
+       if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp))
+               return NULL;
+       else
+               return cpumask;
 }
 
 /*
@@ -976,9 +944,11 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
 
        if (!cpu) {
                seq_printf(file,
-                       "# cpu sent stime numuvhubs numuvhubs16 numuvhubs8 ");
+                       "# cpu sent stime self locals remotes ncpus localhub ");
                seq_printf(file,
-                       "numuvhubs4 numuvhubs2 numuvhubs1 numcpus dto ");
+                       "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
+               seq_printf(file,
+                       "numuvhubs4 numuvhubs2 numuvhubs1 dto ");
                seq_printf(file,
                        "retries rok resetp resett giveup sto bz throt ");
                seq_printf(file,
@@ -994,10 +964,14 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
                seq_printf(file,
                        "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
                           cpu, stat->s_requestor, cycles_2_us(stat->s_time),
-                          stat->s_ntarguvhub, stat->s_ntarguvhub16,
+                          stat->s_ntargself, stat->s_ntarglocals,
+                          stat->s_ntargremotes, stat->s_ntargcpu,
+                          stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
+                          stat->s_ntarguvhub, stat->s_ntarguvhub16);
+               seq_printf(file, "%ld %ld %ld %ld %ld ",
                           stat->s_ntarguvhub8, stat->s_ntarguvhub4,
                           stat->s_ntarguvhub2, stat->s_ntarguvhub1,
-                          stat->s_ntargcpu, stat->s_dtimeout);
+                          stat->s_dtimeout);
                seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
                           stat->s_retry_messages, stat->s_retriesok,
                           stat->s_resets_plug, stat->s_resets_timeout,
@@ -1510,15 +1484,16 @@ calculate_destination_timeout(void)
 /*
  * initialize the bau_control structure for each cpu
  */
-static void uv_init_per_cpu(int nuvhubs)
+static void __init uv_init_per_cpu(int nuvhubs)
 {
        int i;
        int cpu;
        int pnode;
        int uvhub;
+       int have_hmaster;
        short socket = 0;
        unsigned short socket_mask;
-       unsigned int uvhub_mask;
+       unsigned char *uvhub_mask;
        struct bau_control *bcp;
        struct uvhub_desc *bdp;
        struct socket_desc *sdp;
@@ -1542,28 +1517,29 @@ static void uv_init_per_cpu(int nuvhubs)
        uvhub_descs = (struct uvhub_desc *)
                kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
        memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
+       uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
        for_each_present_cpu(cpu) {
                bcp = &per_cpu(bau_control, cpu);
                memset(bcp, 0, sizeof(struct bau_control));
                pnode = uv_cpu_hub_info(cpu)->pnode;
                uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
-               uvhub_mask |= (1 << uvhub);
+               *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
                bdp = &uvhub_descs[uvhub];
                bdp->num_cpus++;
                bdp->uvhub = uvhub;
                bdp->pnode = pnode;
                /* kludge: 'assuming' one node per socket, and assuming that
                   disabling a socket just leaves a gap in node numbers */
-               socket = (cpu_to_node(cpu) & 1);;
+               socket = (cpu_to_node(cpu) & 1);
                bdp->socket_mask |= (1 << socket);
                sdp = &bdp->socket[socket];
                sdp->cpu_number[sdp->num_cpus] = cpu;
                sdp->num_cpus++;
        }
-       uvhub = 0;
-       while (uvhub_mask) {
-               if (!(uvhub_mask & 1))
-                       goto nexthub;
+       for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
+               if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
+                       continue;
+               have_hmaster = 0;
                bdp = &uvhub_descs[uvhub];
                socket_mask = bdp->socket_mask;
                socket = 0;
@@ -1577,8 +1553,10 @@ static void uv_init_per_cpu(int nuvhubs)
                                bcp->cpu = cpu;
                                if (i == 0) {
                                        smaster = bcp;
-                                       if (socket == 0)
+                                       if (!have_hmaster) {
+                                               have_hmaster++;
                                                hmaster = bcp;
+                                       }
                                }
                                bcp->cpus_in_uvhub = bdp->num_cpus;
                                bcp->cpus_in_socket = sdp->num_cpus;
@@ -1592,11 +1570,9 @@ nextsocket:
                        socket++;
                        socket_mask = (socket_mask >> 1);
                }
-nexthub:
-               uvhub++;
-               uvhub_mask = (uvhub_mask >> 1);
        }
        kfree(uvhub_descs);
+       kfree(uvhub_mask);
        for_each_present_cpu(cpu) {
                bcp = &per_cpu(bau_control, cpu);
                bcp->baudisabled = 0;
@@ -1661,12 +1637,16 @@ static int __init uv_bau_init(void)
        alloc_intr_gate(vector, uv_bau_message_intr1);
 
        for_each_possible_blade(uvhub) {
-               pnode = uv_blade_to_pnode(uvhub);
-               /* INIT the bau */
-               uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL,
-                                     ((unsigned long)1 << 63));
-               mmr = 1; /* should be 1 to broadcast to both sockets */
-               uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST, mmr);
+               if (uv_blade_nr_possible_cpus(uvhub)) {
+                       pnode = uv_blade_to_pnode(uvhub);
+                       /* INIT the bau */
+                       uv_write_global_mmr64(pnode,
+                                       UVH_LB_BAU_SB_ACTIVATION_CONTROL,
+                                       ((unsigned long)1 << 63));
+                       mmr = 1; /* should be 1 to broadcast to both sockets */
+                       uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST,
+                                               mmr);
+               }
        }
 
        return 0;