]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - arch/x86/platform/uv/tlb_uv.c
Merge branch 'master' into tk71
[mv-sheeva.git] / arch / x86 / platform / uv / tlb_uv.c
similarity index 97%
rename from arch/x86/kernel/tlb_uv.c
rename to arch/x86/platform/uv/tlb_uv.c
index 312ef0292815f1a3a757c10d3f994d67702b3531..a7b38d35c29a1ce7578d36e38cfe04616143b39f 100644 (file)
@@ -1001,10 +1001,10 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
 static ssize_t tunables_read(struct file *file, char __user *userbuf,
                                                size_t count, loff_t *ppos)
 {
-       char buf[300];
+       char *buf;
        int ret;
 
-       ret = snprintf(buf, 300, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
+       buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
                "max_bau_concurrent plugged_delay plugsb4reset",
                "timeoutsb4reset ipi_reset_limit complete_threshold",
                "congested_response_us congested_reps congested_period",
@@ -1012,7 +1012,12 @@ static ssize_t tunables_read(struct file *file, char __user *userbuf,
                timeoutsb4reset, ipi_reset_limit, complete_threshold,
                congested_response_us, congested_reps, congested_period);
 
-       return simple_read_from_buffer(userbuf, count, ppos, buf, ret);
+       if (!buf)
+               return -ENOMEM;
+
+       ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
+       kfree(buf);
+       return ret;
 }
 
 /*
@@ -1285,6 +1290,7 @@ static const struct file_operations tunables_fops = {
        .open           = tunables_open,
        .read           = tunables_read,
        .write          = tunables_write,
+       .llseek         = default_llseek,
 };
 
 static int __init uv_ptc_init(void)
@@ -1335,10 +1341,10 @@ uv_activation_descriptor_init(int node, int pnode)
 
        /*
         * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
-        * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
+        * per cpu; and one per cpu on the uvhub (UV_ADP_SIZE)
         */
-       bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)*
-               UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
+       bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE
+                               * UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
        BUG_ON(!bau_desc);
 
        pa = uv_gpa(bau_desc); /* need the real nasid*/
@@ -1358,11 +1364,11 @@ uv_activation_descriptor_init(int node, int pnode)
                memset(bd2, 0, sizeof(struct bau_desc));
                bd2->header.sw_ack_flag = 1;
                /*
-                * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub
+                * base_dest_nodeid is the nasid of the first uvhub
                 * in the partition. The bit map will indicate uvhub numbers,
                 * which are 0-N in a partition. Pnodes are unique system-wide.
                 */
-               bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
+               bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode);
                bd2->header.dest_subnodeid = 0x10; /* the LB */
                bd2->header.command = UV_NET_ENDPOINT_INTD;
                bd2->header.int_both = 1;
@@ -1396,9 +1402,9 @@ uv_payload_queue_init(int node, int pnode)
        struct bau_payload_queue_entry *pqp_malloc;
        struct bau_control *bcp;
 
-       pqp = (struct bau_payload_queue_entry *) kmalloc_node(
-               (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
-               GFP_KERNEL, node);
+       pqp = kmalloc_node((DEST_Q_SIZE + 1)
+                          * sizeof(struct bau_payload_queue_entry),
+                          GFP_KERNEL, node);
        BUG_ON(!pqp);
        pqp_malloc = pqp;
 
@@ -1449,7 +1455,7 @@ static void __init uv_init_uvhub(int uvhub, int vector)
         * the below initialization can't be in firmware because the
         * messaging IRQ will be determined by the OS
         */
-       apicid = uvhub_to_first_apicid(uvhub);
+       apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
        uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
                                      ((apicid << 32) | vector));
 }
@@ -1484,7 +1490,7 @@ calculate_destination_timeout(void)
 /*
  * initialize the bau_control structure for each cpu
  */
-static void __init uv_init_per_cpu(int nuvhubs)
+static int __init uv_init_per_cpu(int nuvhubs)
 {
        int i;
        int cpu;
@@ -1501,7 +1507,7 @@ static void __init uv_init_per_cpu(int nuvhubs)
        struct bau_control *smaster = NULL;
        struct socket_desc {
                short num_cpus;
-               short cpu_number[16];
+               short cpu_number[MAX_CPUS_PER_SOCKET];
        };
        struct uvhub_desc {
                unsigned short socket_mask;
@@ -1514,8 +1520,7 @@ static void __init uv_init_per_cpu(int nuvhubs)
 
        timeout_us = calculate_destination_timeout();
 
-       uvhub_descs = (struct uvhub_desc *)
-               kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
+       uvhub_descs = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
        memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
        uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
        for_each_present_cpu(cpu) {
@@ -1535,6 +1540,10 @@ static void __init uv_init_per_cpu(int nuvhubs)
                sdp = &bdp->socket[socket];
                sdp->cpu_number[sdp->num_cpus] = cpu;
                sdp->num_cpus++;
+               if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
+                       printk(KERN_EMERG "%d cpus per socket invalid\n", sdp->num_cpus);
+                       return 1;
+               }
        }
        for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
                if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
@@ -1565,6 +1574,12 @@ static void __init uv_init_per_cpu(int nuvhubs)
                                bcp->uvhub_master = hmaster;
                                bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->
                                                blade_processor_id;
+                               if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
+                                       printk(KERN_EMERG
+                                               "%d cpus per uvhub invalid\n",
+                                               bcp->uvhub_cpu);
+                                       return 1;
+                               }
                        }
 nextsocket:
                        socket++;
@@ -1590,6 +1605,7 @@ nextsocket:
                bcp->congested_reps = congested_reps;
                bcp->congested_period = congested_period;
        }
+       return 0;
 }
 
 /*
@@ -1620,7 +1636,10 @@ static int __init uv_bau_init(void)
        spin_lock_init(&disable_lock);
        congested_cycles = microsec_2_cycles(congested_response_us);
 
-       uv_init_per_cpu(nuvhubs);
+       if (uv_init_per_cpu(nuvhubs)) {
+               nobau = 1;
+               return 0;
+       }
 
        uv_partition_base_pnode = 0x7fffffff;
        for (uvhub = 0; uvhub < nuvhubs; uvhub++)