]> git.karo-electronics.de Git - linux-beck.git/commitdiff
x86: Add cpu_mask_to_apicid_and
authorMike Travis <travis@sgi.com>
Wed, 17 Dec 2008 01:33:54 +0000 (17:33 -0800)
committerMike Travis <travis@sgi.com>
Wed, 17 Dec 2008 01:40:56 +0000 (17:40 -0800)
Impact: new API

Add a helper function that takes two cpumask's, and's them and then
returns the apicid of the result.  This removes a need in io_apic.c
that uses a temporary cpumask to hold (mask & cfg->domain).

Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
12 files changed:
arch/x86/include/asm/bigsmp/apic.h
arch/x86/include/asm/es7000/apic.h
arch/x86/include/asm/genapic_32.h
arch/x86/include/asm/genapic_64.h
arch/x86/include/asm/mach-default/mach_apic.h
arch/x86/include/asm/mach-generic/mach_apic.h
arch/x86/include/asm/numaq/apic.h
arch/x86/include/asm/summit/apic.h
arch/x86/kernel/genapic_flat_64.c
arch/x86/kernel/genx2apic_cluster.c
arch/x86/kernel/genx2apic_phys.c
arch/x86/kernel/genx2apic_uv_x.c

index dc6225ca48ad58cdfda6618dbcd56412a9fecc81..99f9abacf6a2e3b3d77e6d75adfae5d052016238 100644 (file)
@@ -129,6 +129,22 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
        return apicid;
 }
 
+static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask,
+                                                 const cpumask_t *andmask)
+{
+       int cpu;
+
+       /*
+        * We're using fixed IRQ delivery, can only return one phys APIC ID.
+        * May as well be the first.
+        */
+       while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids)
+               if (cpu_isset(cpu, *andmask))
+                       return cpu_to_logical_apicid(cpu);
+
+       return BAD_APICID;
+}
+
 static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
 {
        return cpuid_apic >> index_msb;
index 4cac0837bb4023acaaae8a83c02ebbd567686fb7..c2bed772af887cb43f52cb50d3805bcf387bac42 100644 (file)
@@ -214,6 +214,53 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
        return apicid;
 }
 
+static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask,
+                                                 const cpumask_t *andmask)
+{
+       int num_bits_set;
+       int num_bits_set2;
+       int cpus_found = 0;
+       int cpu;
+       int apicid = 0;
+
+       num_bits_set = cpus_weight(*cpumask);
+       num_bits_set2 = cpus_weight(*andmask);
+       num_bits_set = min_t(int, num_bits_set, num_bits_set2);
+       /* Return id to all */
+       if (num_bits_set >= nr_cpu_ids)
+#if defined CONFIG_ES7000_CLUSTERED_APIC
+               return 0xFF;
+#else
+               return cpu_to_logical_apicid(0);
+#endif
+       /*
+        * The cpus in the mask must all be on the apic cluster.  If are not
+        * on the same apicid cluster return default value of TARGET_CPUS.
+        */
+       while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids)
+               if (cpu_isset(cpu, *andmask)
+                       apicid = cpu_to_logical_apicid(cpu);
+       while (cpus_found < num_bits_set) {
+               if (cpu_isset(cpu, *cpumask) && cpu_isset(cpu, *andmask)) {
+                       int new_apicid = cpu_to_logical_apicid(cpu);
+                       if (apicid_cluster(apicid) !=
+                                       apicid_cluster(new_apicid)) {
+                               printk(KERN_WARNING
+                                       "%s: Not a valid mask!\n", __func__);
+#if defined CONFIG_ES7000_CLUSTERED_APIC
+                               return 0xFF;
+#else
+                               return cpu_to_logical_apicid(0);
+#endif
+                       }
+                       apicid = new_apicid;
+                       cpus_found++;
+               }
+               cpu++;
+       }
+       return apicid;
+}
+
 static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
 {
        return cpuid_apic >> index_msb;
index b21ed21c574dca32c363dfa0025ce8a546666893..325298a82237766c8785de90de06ee1854f63f43 100644 (file)
@@ -58,6 +58,8 @@ struct genapic {
        unsigned (*get_apic_id)(unsigned long x);
        unsigned long apic_id_mask;
        unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask);
+       unsigned int (*cpu_mask_to_apicid_and)(const cpumask_t *cpumask,
+                                              const cpumask_t *andmask);
        void (*vector_allocation_domain)(int cpu, cpumask_t *retmask);
 
 #ifdef CONFIG_SMP
@@ -115,6 +117,7 @@ struct genapic {
        APICFUNC(get_apic_id)                           \
        .apic_id_mask = APIC_ID_MASK,                   \
        APICFUNC(cpu_mask_to_apicid)                    \
+       APICFUNC(cpu_mask_to_apicid_and)                \
        APICFUNC(vector_allocation_domain)              \
        APICFUNC(acpi_madt_oem_check)                   \
        IPIFUNC(send_IPI_mask)                          \
index a020e7d35a402c0a52cb8977d0e5b26e7dd59b20..301c7f41125dfaf466cc51013601354b70dfb493 100644 (file)
@@ -31,6 +31,8 @@ struct genapic {
        void (*send_IPI_self)(int vector);
        /* */
        unsigned int (*cpu_mask_to_apicid)(const cpumask_t *cpumask);
+       unsigned int (*cpu_mask_to_apicid_and)(const cpumask_t *cpumask,
+                                              const cpumask_t *andmask);
        unsigned int (*phys_pkg_id)(int index_msb);
        unsigned int (*get_apic_id)(unsigned long x);
        unsigned long (*set_apic_id)(unsigned int id);
index c18896b0508cc2fc3a1b9556fc9671e784126557..229b605d104e73c114243f19cfeeedff715d3a0a 100644 (file)
@@ -28,6 +28,7 @@ static inline const cpumask_t *target_cpus(void)
 #define apic_id_registered (genapic->apic_id_registered)
 #define init_apic_ldr (genapic->init_apic_ldr)
 #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
+#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
 #define phys_pkg_id    (genapic->phys_pkg_id)
 #define vector_allocation_domain    (genapic->vector_allocation_domain)
 #define read_apic_id()  (GET_APIC_ID(apic_read(APIC_ID)))
@@ -66,6 +67,15 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
        return cpus_addr(*cpumask)[0];
 }
 
+static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask,
+                                             const cpumask_t *andmask)
+{
+       unsigned long mask1 = cpus_addr(*cpumask)[0];
+       unsigned long mask2 = cpus_addr(*andmask)[0];
+
+       return (unsigned int)(mask1 & mask2);
+}
+
 static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
 {
        return cpuid_apic >> index_msb;
index e430f47df667255e26bab25ecb123702426b6e63..48553e958ad517fc72ca40eac6dc145c2fbc5f77 100644 (file)
@@ -24,6 +24,7 @@
 #define check_phys_apicid_present (genapic->check_phys_apicid_present)
 #define check_apicid_used (genapic->check_apicid_used)
 #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
+#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
 #define vector_allocation_domain (genapic->vector_allocation_domain)
 #define enable_apic_mode (genapic->enable_apic_mode)
 #define phys_pkg_id (genapic->phys_pkg_id)
index 1df7ebe738e5842afa7e6eb98ab476e18378a3da..abf668ced5033c3103537711ca03e2a870db4873 100644 (file)
@@ -127,6 +127,12 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
        return (int) 0xF;
 }
 
+static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask,
+                                                 const cpumask_t *andmask)
+{
+       return (int) 0xF;
+}
+
 /* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
 static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
 {
index 437dc83725ca9bf0b19294d7a3d786be98f91e08..cbcc2c7eb1d56263c1fe7c79afb505b5883bb2fb 100644 (file)
@@ -170,6 +170,45 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
        return apicid;
 }
 
+static inline unsigned int cpu_mask_to_apicid_and(const cpumask_t *cpumask,
+                                                 const cpumask_t *andmask)
+{
+       int num_bits_set;
+       int num_bits_set2;
+       int cpus_found = 0;
+       int cpu;
+       int apicid = 0;
+
+       num_bits_set = cpus_weight(*cpumask);
+       num_bits_set2 = cpus_weight(*andmask);
+       num_bits_set = min_t(int, num_bits_set, num_bits_set2);
+       /* Return id to all */
+       if (num_bits_set >= nr_cpu_ids)
+               return 0xFF;
+       /*
+        * The cpus in the mask must all be on the apic cluster.  If are not
+        * on the same apicid cluster return default value of TARGET_CPUS.
+        */
+       while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids)
+               if (cpu_isset(cpu, *andmask)
+                       apicid = cpu_to_logical_apicid(cpu);
+       while (cpus_found < num_bits_set) {
+               if (cpu_isset(cpu, *cpumask) && cpu_isset(cpu, *andmask)) {
+                       int new_apicid = cpu_to_logical_apicid(cpu);
+                       if (apicid_cluster(apicid) !=
+                                       apicid_cluster(new_apicid)) {
+                               printk(KERN_WARNING
+                                       "%s: Not a valid mask!\n", __func__);
+                               return 0xFF;
+                       }
+                       apicid = apicid | new_apicid;
+                       cpus_found++;
+               }
+               cpu++;
+       }
+       return apicid;
+}
+
 /* cpuid returns the value latched in the HW at reset, not the APIC ID
  * register's value.  For any box whose BIOS changes APIC IDs, like
  * clustered APIC systems, we must use hard_smp_processor_id.
index 50eebd0328fe3c5f96bb9c67521d2613d5d39469..1efecd206a7495ac6166a0aafdc4ecbe212ad487 100644 (file)
@@ -158,6 +158,15 @@ static unsigned int flat_cpu_mask_to_apicid(const cpumask_t *cpumask)
        return cpus_addr(*cpumask)[0] & APIC_ALL_CPUS;
 }
 
+static unsigned int flat_cpu_mask_to_apicid_and(const cpumask_t *cpumask,
+                                               const cpumask_t *andmask)
+{
+       unsigned long mask1 = cpus_addr(*cpumask)[0] & APIC_ALL_CPUS;
+       unsigned long mask2 = cpus_addr(*andmask)[0] & APIC_ALL_CPUS;
+
+       return (int)(mask1 & mask2);
+}
+
 static unsigned int phys_pkg_id(int index_msb)
 {
        return hard_smp_processor_id() >> index_msb;
@@ -178,6 +187,7 @@ struct genapic apic_flat =  {
        .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
        .send_IPI_self = apic_send_IPI_self,
        .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
        .phys_pkg_id = phys_pkg_id,
        .get_apic_id = get_apic_id,
        .set_apic_id = set_apic_id,
@@ -254,6 +264,21 @@ static unsigned int physflat_cpu_mask_to_apicid(const cpumask_t *cpumask)
                return BAD_APICID;
 }
 
+static unsigned int physflat_cpu_mask_to_apicid_and(const cpumask_t *cpumask,
+                                                   const cpumask_t *andmask)
+{
+       int cpu;
+
+       /*
+        * We're using fixed IRQ delivery, can only return one phys APIC ID.
+        * May as well be the first.
+        */
+       while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids)
+               if (cpu_isset(cpu, *andmask))
+                       return per_cpu(x86_cpu_to_apicid, cpu);
+       return BAD_APICID;
+}
+
 struct genapic apic_physflat =  {
        .name = "physical flat",
        .acpi_madt_oem_check = physflat_acpi_madt_oem_check,
@@ -269,6 +294,7 @@ struct genapic apic_physflat =  {
        .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
        .send_IPI_self = apic_send_IPI_self,
        .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and,
        .phys_pkg_id = phys_pkg_id,
        .get_apic_id = get_apic_id,
        .set_apic_id = set_apic_id,
index f5fa9a91ad3818fa7ace73537a4c7d61b2dfcb32..fd8047f4e4555fc4dd89a4c0b9859597408ceb7a 100644 (file)
@@ -123,6 +123,21 @@ static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask)
                return BAD_APICID;
 }
 
+static unsigned int x2apic_cpu_mask_to_apicid_and(const cpumask_t *cpumask,
+                                                 const cpumask_t *andmask)
+{
+       int cpu;
+
+       /*
+        * We're using fixed IRQ delivery, can only return one phys APIC ID.
+        * May as well be the first.
+        */
+       while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids)
+               if (cpu_isset(cpu, *andmask))
+                       return per_cpu(x86_cpu_to_apicid, cpu);
+       return BAD_APICID;
+}
+
 static unsigned int get_apic_id(unsigned long x)
 {
        unsigned int id;
@@ -172,6 +187,7 @@ struct genapic apic_x2apic_cluster = {
        .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
        .send_IPI_self = x2apic_send_IPI_self,
        .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
        .phys_pkg_id = phys_pkg_id,
        .get_apic_id = get_apic_id,
        .set_apic_id = set_apic_id,
index 41c27b2f3d01055677b4f64e5d3fa9cc40426a5b..d5578bb8f16530e0cfa370f3096159f47c8f9a6c 100644 (file)
@@ -122,6 +122,21 @@ static unsigned int x2apic_cpu_mask_to_apicid(const cpumask_t *cpumask)
                return BAD_APICID;
 }
 
+static unsigned int x2apic_cpu_mask_to_apicid_and(const cpumask_t *cpumask,
+                                                 const cpumask_t *andmask)
+{
+       int cpu;
+
+       /*
+        * We're using fixed IRQ delivery, can only return one phys APIC ID.
+        * May as well be the first.
+        */
+       while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids)
+               if (cpu_isset(cpu, *andmask))
+                       return per_cpu(x86_cpu_to_apicid, cpu);
+       return BAD_APICID;
+}
+
 static unsigned int get_apic_id(unsigned long x)
 {
        unsigned int id;
@@ -168,6 +183,7 @@ struct genapic apic_x2apic_phys = {
        .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
        .send_IPI_self = x2apic_send_IPI_self,
        .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
        .phys_pkg_id = phys_pkg_id,
        .get_apic_id = get_apic_id,
        .set_apic_id = set_apic_id,
index 010659415ae46a5fba98d84d3d4242045c9838c5..53bd2570272c345d85b73d0f10b5ad5258c43e21 100644 (file)
@@ -179,6 +179,21 @@ static unsigned int uv_cpu_mask_to_apicid(const cpumask_t *cpumask)
                return BAD_APICID;
 }
 
+static unsigned int uv_cpu_mask_to_apicid_and(const cpumask_t *cpumask,
+                                             const cpumask_t *andmask)
+{
+       int cpu;
+
+       /*
+        * We're using fixed IRQ delivery, can only return one phys APIC ID.
+        * May as well be the first.
+        */
+       while ((cpu = next_cpu(-1, *cpumask)) < nr_cpu_ids)
+               if (cpu_isset(cpu, *andmask))
+                       return per_cpu(x86_cpu_to_apicid, cpu);
+       return BAD_APICID;
+}
+
 static unsigned int get_apic_id(unsigned long x)
 {
        unsigned int id;
@@ -229,6 +244,7 @@ struct genapic apic_x2apic_uv_x = {
        .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
        .send_IPI_self = uv_send_IPI_self,
        .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
        .phys_pkg_id = phys_pkg_id,
        .get_apic_id = get_apic_id,
        .set_apic_id = set_apic_id,