]> git.karo-electronics.de Git - linux-beck.git/blobdiff - drivers/misc/sgi-gru/grukservices.c
gru: send cross partition interrupts using the gru
[linux-beck.git] / drivers / misc / sgi-gru / grukservices.c
index d9ff0289a1c34e5beeef3532fa1c91593a498d04..34749ee88dfa8966c7c453aaaf8dcec980206223 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/interrupt.h>
 #include <linux/uaccess.h>
 #include <linux/delay.h>
+#include <asm/io_apic.h>
 #include "gru.h"
 #include "grulib.h"
 #include "grutables.h"
@@ -97,9 +98,6 @@
 #define ASYNC_HAN_TO_BID(h)    ((h) - 1)
 #define ASYNC_BID_TO_HAN(b)    ((b) + 1)
 #define ASYNC_HAN_TO_BS(h)     gru_base[ASYNC_HAN_TO_BID(h)]
-#define KCB_TO_GID(cb)         ((cb - gru_start_vaddr) /               \
-                                       (GRU_SIZE * GRU_CHIPLETS_PER_BLADE))
-#define KCB_TO_BS(cb)          gru_base[KCB_TO_GID(cb)]
 
 #define GRU_NUM_KERNEL_CBR     1
 #define GRU_NUM_KERNEL_DSR_BYTES 256
@@ -224,13 +222,21 @@ static int gru_free_kernel_contexts(void)
 static struct gru_blade_state *gru_lock_kernel_context(int blade_id)
 {
        struct gru_blade_state *bs;
+       int bid;
 
        STAT(lock_kernel_context);
-       bs = gru_base[blade_id];
+again:
+       bid = blade_id < 0 ? uv_numa_blade_id() : blade_id;
+       bs = gru_base[bid];
 
+       /* Handle the case where migration occured while waiting for the sema */
        down_read(&bs->bs_kgts_sema);
+       if (blade_id < 0 && bid != uv_numa_blade_id()) {
+               up_read(&bs->bs_kgts_sema);
+               goto again;
+       }
        if (!bs->bs_kgts || !bs->bs_kgts->ts_gru)
-               gru_load_kernel_context(bs, blade_id);
+               gru_load_kernel_context(bs, bid);
        return bs;
 
 }
@@ -259,7 +265,7 @@ static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
 
        BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
        preempt_disable();
-       bs = gru_lock_kernel_context(uv_numa_blade_id());
+       bs = gru_lock_kernel_context(-1);
        lcpu = uv_blade_processor_id();
        *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
        *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
@@ -388,11 +394,28 @@ int gru_get_cb_exception_detail(void *cb,
                struct control_block_extended_exc_detail *excdet)
 {
        struct gru_control_block_extended *cbe;
-       struct gru_blade_state *bs;
-       int cbrnum;
-
-       bs = KCB_TO_BS(cb);
-       cbrnum = thread_cbr_number(bs->bs_kgts, get_cb_number(cb));
+       struct gru_thread_state *kgts = NULL;
+       unsigned long off;
+       int cbrnum, bid;
+
+       /*
+        * Locate kgts for cb. This algorithm is SLOW but
+        * this function is rarely called (ie., almost never).
+        * Performance does not matter.
+        */
+       for_each_possible_blade(bid) {
+               if (!gru_base[bid])
+                       break;
+               kgts = gru_base[bid]->bs_kgts;
+               if (!kgts || !kgts->ts_gru)
+                       continue;
+               off = cb - kgts->ts_gru->gs_gru_base_vaddr;
+               if (off < GRU_SIZE)
+                       break;
+               kgts = NULL;
+       }
+       BUG_ON(!kgts);
+       cbrnum = thread_cbr_number(kgts, get_cb_number(cb));
        cbe = get_cbe(GRUBASE(cb), cbrnum);
        gru_flush_cache(cbe);   /* CBE not coherent */
        sync_core();
@@ -544,7 +567,7 @@ int gru_create_message_queue(struct gru_message_queue_desc *mqd,
        mqd->mq = mq;
        mqd->mq_gpa = uv_gpa(mq);
        mqd->qlines = qlines;
-       mqd->interrupt_pnode = UV_NASID_TO_PNODE(nasid);
+       mqd->interrupt_pnode = nasid >> 1;
        mqd->interrupt_vector = vector;
        mqd->interrupt_apicid = apicid;
        return 0;
@@ -680,18 +703,6 @@ cberr:
        return MQE_UNEXPECTED_CB_ERR;
 }
 
-/*
- * Send a cross-partition interrupt to the SSI that contains the target
- * message queue. Normally, the interrupt is automatically delivered by hardware
- * but some error conditions require explicit delivery.
- */
-static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
-{
-       if (mqd->interrupt_vector)
-               uv_hub_send_ipi(mqd->interrupt_pnode, mqd->interrupt_apicid,
-                               mqd->interrupt_vector);
-}
-
 /*
  * Handle a PUT failure. Note: if message was a 2-line message, one of the
  * lines might have successfully have been written. Before sending the
@@ -701,7 +712,8 @@ static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
 static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
                        void *mesg, int lines)
 {
-       unsigned long m;
+       unsigned long m, *val = mesg, gpa, save;
+       int ret;
 
        m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
        if (lines == 2) {
@@ -712,7 +724,26 @@ static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
        gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
        if (gru_wait(cb) != CBS_IDLE)
                return MQE_UNEXPECTED_CB_ERR;
-       send_message_queue_interrupt(mqd);
+
+       if (!mqd->interrupt_vector)
+               return MQE_OK;
+
+       /*
+        * Send a cross-partition interrupt to the SSI that contains the target
+        * message queue. Normally, the interrupt is automatically delivered by
+        * hardware but some error conditions require explicit delivery.
+        * Use the GRU to deliver the interrupt. Otherwise partition failures
+        * could cause unrecovered errors.
+        */
+       gpa = uv_global_gru_mmr_address(mqd->interrupt_pnode, UVH_IPI_INT);
+       save = *val;
+       *val = uv_hub_ipi_value(mqd->interrupt_apicid, mqd->interrupt_vector,
+                               dest_Fixed);
+       gru_vstore_phys(cb, gpa, gru_get_tri(mesg), IAA_REGISTER, IMA);
+       ret = gru_wait(cb);
+       *val = save;
+       if (ret != CBS_IDLE)
+               return MQE_UNEXPECTED_CB_ERR;
        return MQE_OK;
 }