4 * (C) Copyright IBM Corp. 2005
6 * Author: Mark Nutter <mnutter@us.ibm.com>
8 * Host-side part of SPU context switch sequence outlined in
9 * Synergistic Processor Element, Book IV.
11 * A fully premptive switch of an SPE is very expensive in terms
12 * of time and system resources. SPE Book IV indicates that SPE
13 * allocation should follow a "serially reusable device" model,
14 * in which the SPE is assigned a task until it completes. When
15 * this is not possible, this sequence may be used to premptively
16 * save, and then later (optionally) restore the context of a
17 * program executing on an SPE.
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2, or (at your option)
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/config.h>
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/sched.h>
39 #include <linux/kernel.h>
41 #include <linux/vmalloc.h>
42 #include <linux/smp.h>
43 #include <linux/smp_lock.h>
44 #include <linux/stddef.h>
45 #include <linux/unistd.h>
49 #include <asm/spu_csa.h>
50 #include <asm/mmu_context.h>
52 #include "spu_save_dump.h"
53 #include "spu_restore_dump.h"
56 #define POLL_WHILE_TRUE(_c) { \
61 #define RELAX_SPIN_COUNT 1000
62 #define POLL_WHILE_TRUE(_c) { \
65 for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \
68 if (unlikely(_c)) yield(); \
74 #define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
76 static inline void acquire_spu_lock(struct spu *spu)
80 * Acquire SPU-specific mutual exclusion lock.
85 static inline void release_spu_lock(struct spu *spu)
88 * Release SPU-specific mutual exclusion lock.
93 static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
95 struct spu_problem __iomem *prob = spu->problem;
100 * If SPU_Status[E,L,IS] any field is '1', this
101 * SPU is in isolate state and cannot be context
102 * saved at this time.
104 isolate_state = SPU_STATUS_ISOLATED_STATE |
105 SPU_STATUS_ISOLATED_LOAD_STAUTUS | SPU_STATUS_ISOLATED_EXIT_STAUTUS;
106 return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
109 static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
113 * Save INT_Mask_class0 in CSA.
114 * Write INT_MASK_class0 with value of 0.
115 * Save INT_Mask_class1 in CSA.
116 * Write INT_MASK_class1 with value of 0.
117 * Save INT_Mask_class2 in CSA.
118 * Write INT_MASK_class2 with value of 0.
120 spin_lock_irq(&spu->register_lock);
122 csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0);
123 csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1);
124 csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2);
126 spu_int_mask_set(spu, 0, 0ul);
127 spu_int_mask_set(spu, 1, 0ul);
128 spu_int_mask_set(spu, 2, 0ul);
130 spin_unlock_irq(&spu->register_lock);
133 static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu)
137 * Set a software watchdog timer, which specifies the
138 * maximum allowable time for a context save sequence.
140 * For present, this implementation will not set a global
141 * watchdog timer, as virtualization & variable system load
142 * may cause unpredictable execution times.
146 static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu)
150 * Inhibit user-space access (if provided) to this
151 * SPU by unmapping the virtual pages assigned to
152 * the SPU memory-mapped I/O (MMIO) for problem
157 static inline void set_switch_pending(struct spu_state *csa, struct spu *spu)
161 * Set a software context switch pending flag.
163 set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
167 static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
169 struct spu_priv2 __iomem *priv2 = spu->priv2;
172 * Suspend DMA and save MFC_CNTL.
174 switch (in_be64(&priv2->mfc_control_RW) &
175 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) {
176 case MFC_CNTL_SUSPEND_IN_PROGRESS:
177 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
178 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
179 MFC_CNTL_SUSPEND_COMPLETE);
181 case MFC_CNTL_SUSPEND_COMPLETE:
183 csa->priv2.mfc_control_RW =
184 in_be64(&priv2->mfc_control_RW) |
185 MFC_CNTL_SUSPEND_DMA_QUEUE;
188 case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
189 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
190 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
191 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
192 MFC_CNTL_SUSPEND_COMPLETE);
194 csa->priv2.mfc_control_RW =
195 in_be64(&priv2->mfc_control_RW) &
196 ~MFC_CNTL_SUSPEND_DMA_QUEUE;
202 static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu)
204 struct spu_problem __iomem *prob = spu->problem;
207 * Save SPU_Runcntl in the CSA. This value contains
208 * the "Application Desired State".
210 csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW);
213 static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu)
216 * Save MFC_SR1 in the CSA.
218 csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu);
221 static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
223 struct spu_problem __iomem *prob = spu->problem;
226 * Read SPU_Status[R], and save to CSA.
228 if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) {
229 csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
233 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
235 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
238 SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
239 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
240 if ((in_be32(&prob->spu_status_R) & stopped) == 0)
241 csa->prob.spu_status_R = SPU_STATUS_RUNNING;
243 csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
247 static inline void save_mfc_decr(struct spu_state *csa, struct spu *spu)
249 struct spu_priv2 __iomem *priv2 = spu->priv2;
252 * Read MFC_CNTL[Ds]. Update saved copy of
255 if (in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DECREMENTER_RUNNING) {
256 csa->priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
257 csa->suspend_time = get_cycles();
258 out_be64(&priv2->spu_chnlcntptr_RW, 7ULL);
260 csa->spu_chnldata_RW[7] = in_be64(&priv2->spu_chnldata_RW);
263 csa->priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
267 static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
269 struct spu_priv2 __iomem *priv2 = spu->priv2;
272 * Write MFC_CNTL[Dh] set to a '1' to halt
275 out_be64(&priv2->mfc_control_RW, MFC_CNTL_DECREMENTER_HALTED);
279 static inline void save_timebase(struct spu_state *csa, struct spu *spu)
282 * Read PPE Timebase High and Timebase low registers
283 * and save in CSA. TBD.
285 csa->suspend_time = get_cycles();
288 static inline void remove_other_spu_access(struct spu_state *csa,
292 * Remove other SPU access to this SPU by unmapping
293 * this SPU's pages from their address space. TBD.
297 static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu)
299 struct spu_problem __iomem *prob = spu->problem;
303 * Write SPU_MSSync register. Poll SPU_MSSync[P]
306 out_be64(&prob->spc_mssync_RW, 1UL);
307 POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING);
310 static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu)
315 * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register.
316 * Then issue a PPE sync instruction.
318 spu_tlb_invalidate(spu);
322 static inline void handle_pending_interrupts(struct spu_state *csa,
326 * Handle any pending interrupts from this SPU
327 * here. This is OS or hypervisor specific. One
328 * option is to re-enable interrupts to handle any
329 * pending interrupts, with the interrupt handlers
330 * recognizing the software Context Switch Pending
331 * flag, to ensure the SPU execution or MFC command
332 * queue is not restarted. TBD.
336 static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu)
338 struct spu_priv2 __iomem *priv2 = spu->priv2;
342 * If MFC_Cntl[Se]=0 then save
343 * MFC command queues.
345 if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) {
346 for (i = 0; i < 8; i++) {
347 csa->priv2.puq[i].mfc_cq_data0_RW =
348 in_be64(&priv2->puq[i].mfc_cq_data0_RW);
349 csa->priv2.puq[i].mfc_cq_data1_RW =
350 in_be64(&priv2->puq[i].mfc_cq_data1_RW);
351 csa->priv2.puq[i].mfc_cq_data2_RW =
352 in_be64(&priv2->puq[i].mfc_cq_data2_RW);
353 csa->priv2.puq[i].mfc_cq_data3_RW =
354 in_be64(&priv2->puq[i].mfc_cq_data3_RW);
356 for (i = 0; i < 16; i++) {
357 csa->priv2.spuq[i].mfc_cq_data0_RW =
358 in_be64(&priv2->spuq[i].mfc_cq_data0_RW);
359 csa->priv2.spuq[i].mfc_cq_data1_RW =
360 in_be64(&priv2->spuq[i].mfc_cq_data1_RW);
361 csa->priv2.spuq[i].mfc_cq_data2_RW =
362 in_be64(&priv2->spuq[i].mfc_cq_data2_RW);
363 csa->priv2.spuq[i].mfc_cq_data3_RW =
364 in_be64(&priv2->spuq[i].mfc_cq_data3_RW);
369 static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu)
371 struct spu_problem __iomem *prob = spu->problem;
374 * Save the PPU_QueryMask register
377 csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW);
380 static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu)
382 struct spu_problem __iomem *prob = spu->problem;
385 * Save the PPU_QueryType register
388 csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW);
391 static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
393 struct spu_priv2 __iomem *priv2 = spu->priv2;
396 * Save the MFC_CSR_TSQ register
399 csa->priv2.spu_tag_status_query_RW =
400 in_be64(&priv2->spu_tag_status_query_RW);
403 static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
405 struct spu_priv2 __iomem *priv2 = spu->priv2;
408 * Save the MFC_CSR_CMD1 and MFC_CSR_CMD2
409 * registers in the CSA.
411 csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW);
412 csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW);
415 static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
417 struct spu_priv2 __iomem *priv2 = spu->priv2;
420 * Save the MFC_CSR_ATO register in
423 csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW);
426 static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
429 * Save the MFC_TCLASS_ID register in
432 csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu);
435 static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
439 * Write the MFC_TCLASS_ID register with
440 * the value 0x10000000.
442 spu_mfc_tclass_id_set(spu, 0x10000000);
446 static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu)
448 struct spu_priv2 __iomem *priv2 = spu->priv2;
452 * Write MFC_CNTL[Pc]=1 (purge queue).
454 out_be64(&priv2->mfc_control_RW, MFC_CNTL_PURGE_DMA_REQUEST);
458 static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)
460 struct spu_priv2 __iomem *priv2 = spu->priv2;
463 * Poll MFC_CNTL[Ps] until value '11' is read
466 POLL_WHILE_FALSE(in_be64(&priv2->mfc_control_RW) &
467 MFC_CNTL_PURGE_DMA_COMPLETE);
470 static inline void save_mfc_slbs(struct spu_state *csa, struct spu *spu)
472 struct spu_priv2 __iomem *priv2 = spu->priv2;
476 * If MFC_SR1[R]='1', save SLBs in CSA.
478 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) {
479 csa->priv2.slb_index_W = in_be64(&priv2->slb_index_W);
480 for (i = 0; i < 8; i++) {
481 out_be64(&priv2->slb_index_W, i);
483 csa->slb_esid_RW[i] = in_be64(&priv2->slb_esid_RW);
484 csa->slb_vsid_RW[i] = in_be64(&priv2->slb_vsid_RW);
490 static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
494 * Write MFC_SR1 with MFC_SR1[D=0,S=1] and
495 * MFC_SR1[TL,R,Pr,T] set correctly for the
496 * OS specific environment.
498 * Implementation note: The SPU-side code
499 * for save/restore is privileged, so the
500 * MFC_SR1[Pr] bit is not set.
503 spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
504 MFC_STATE1_RELOCATE_MASK |
505 MFC_STATE1_BUS_TLBIE_MASK));
508 static inline void save_spu_npc(struct spu_state *csa, struct spu *spu)
510 struct spu_problem __iomem *prob = spu->problem;
513 * Save SPU_NPC in the CSA.
515 csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW);
518 static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu)
520 struct spu_priv2 __iomem *priv2 = spu->priv2;
523 * Save SPU_PrivCntl in the CSA.
525 csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW);
528 static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu)
530 struct spu_priv2 __iomem *priv2 = spu->priv2;
534 * Write SPU_PrivCntl[S,Le,A] fields reset to 0.
536 out_be64(&priv2->spu_privcntl_RW, 0UL);
540 static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu)
542 struct spu_priv2 __iomem *priv2 = spu->priv2;
545 * Save SPU_LSLR in the CSA.
547 csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW);
550 static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu)
552 struct spu_priv2 __iomem *priv2 = spu->priv2;
558 out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK);
562 static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu)
564 struct spu_priv2 __iomem *priv2 = spu->priv2;
567 * Save SPU_Cfg in the CSA.
569 csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW);
572 static inline void save_pm_trace(struct spu_state *csa, struct spu *spu)
575 * Save PM_Trace_Tag_Wait_Mask in the CSA.
576 * Not performed by this implementation.
580 static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu)
583 * Save RA_GROUP_ID register and the
584 * RA_ENABLE reigster in the CSA.
586 csa->priv1.resource_allocation_groupID_RW =
587 spu_resource_allocation_groupID_get(spu);
588 csa->priv1.resource_allocation_enable_RW =
589 spu_resource_allocation_enable_get(spu);
592 static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
594 struct spu_problem __iomem *prob = spu->problem;
597 * Save MB_Stat register in the CSA.
599 csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R);
602 static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu)
604 struct spu_problem __iomem *prob = spu->problem;
607 * Save the PPU_MB register in the CSA.
609 csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R);
612 static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu)
614 struct spu_priv2 __iomem *priv2 = spu->priv2;
617 * Save the PPUINT_MB register in the CSA.
619 csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R);
622 static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
624 struct spu_priv2 __iomem *priv2 = spu->priv2;
625 u64 idx, ch_indices[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL };
629 * Save the following CH: [0,1,3,4,24,25,27]
631 for (i = 0; i < 7; i++) {
633 out_be64(&priv2->spu_chnlcntptr_RW, idx);
635 csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW);
636 csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW);
637 out_be64(&priv2->spu_chnldata_RW, 0UL);
638 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
643 static inline void save_spu_mb(struct spu_state *csa, struct spu *spu)
645 struct spu_priv2 __iomem *priv2 = spu->priv2;
649 * Save SPU Read Mailbox Channel.
651 out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
653 csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW);
654 for (i = 0; i < 4; i++) {
655 csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW);
657 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
661 static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu)
663 struct spu_priv2 __iomem *priv2 = spu->priv2;
666 * Save MFC_CMD Channel.
668 out_be64(&priv2->spu_chnlcntptr_RW, 21UL);
670 csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW);
674 static inline void reset_ch(struct spu_state *csa, struct spu *spu)
676 struct spu_priv2 __iomem *priv2 = spu->priv2;
677 u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL };
678 u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL };
683 * Reset the following CH: [21, 23, 28, 30]
685 for (i = 0; i < 4; i++) {
687 out_be64(&priv2->spu_chnlcntptr_RW, idx);
689 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
694 static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
696 struct spu_priv2 __iomem *priv2 = spu->priv2;
700 * Write MFC_CNTL[Sc]=0 (resume queue processing).
702 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
705 static inline void invalidate_slbs(struct spu_state *csa, struct spu *spu)
707 struct spu_priv2 __iomem *priv2 = spu->priv2;
711 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All.
713 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) {
714 out_be64(&priv2->slb_invalidate_all_W, 0UL);
719 static inline void get_kernel_slb(u64 ea, u64 slb[2])
723 if (REGION_ID(ea) == KERNEL_REGION_ID)
724 llp = mmu_psize_defs[mmu_linear_psize].sllp;
726 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
727 slb[0] = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
728 SLB_VSID_KERNEL | llp;
729 slb[1] = (ea & ESID_MASK) | SLB_ESID_V;
732 static inline void load_mfc_slb(struct spu *spu, u64 slb[2], int slbe)
734 struct spu_priv2 __iomem *priv2 = spu->priv2;
736 out_be64(&priv2->slb_index_W, slbe);
738 out_be64(&priv2->slb_vsid_RW, slb[0]);
739 out_be64(&priv2->slb_esid_RW, slb[1]);
743 static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu)
750 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All
751 * register, then initialize SLB_VSID and SLB_ESID
752 * to provide access to SPU context save code and
755 * This implementation places both the context
756 * switch code and LSCSA in kernel address space.
758 * Further this implementation assumes that the
759 * MFC_SR1[R]=1 (in other words, assume that
760 * translation is desired by OS environment).
762 invalidate_slbs(csa, spu);
763 get_kernel_slb((unsigned long)&spu_save_code[0], code_slb);
764 get_kernel_slb((unsigned long)csa->lscsa, lscsa_slb);
765 load_mfc_slb(spu, code_slb, 0);
766 if ((lscsa_slb[0] != code_slb[0]) || (lscsa_slb[1] != code_slb[1]))
767 load_mfc_slb(spu, lscsa_slb, 1);
770 static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
774 * Change the software context switch pending flag
775 * to context switch active.
777 set_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags);
778 clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
782 static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
784 unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
785 CLASS1_ENABLE_STORAGE_FAULT_INTR;
789 * Reset and then enable interrupts, as
792 * This implementation enables only class1
793 * (translation) interrupts.
795 spin_lock_irq(&spu->register_lock);
796 spu_int_stat_clear(spu, 0, ~0ul);
797 spu_int_stat_clear(spu, 1, ~0ul);
798 spu_int_stat_clear(spu, 2, ~0ul);
799 spu_int_mask_set(spu, 0, 0ul);
800 spu_int_mask_set(spu, 1, class1_mask);
801 spu_int_mask_set(spu, 2, 0ul);
802 spin_unlock_irq(&spu->register_lock);
805 static inline int send_mfc_dma(struct spu *spu, unsigned long ea,
806 unsigned int ls_offset, unsigned int size,
807 unsigned int tag, unsigned int rclass,
810 struct spu_problem __iomem *prob = spu->problem;
811 union mfc_tag_size_class_cmd command;
812 unsigned int transfer_size;
813 volatile unsigned int status = 0x0;
817 (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size;
818 command.u.mfc_size = transfer_size;
819 command.u.mfc_tag = tag;
820 command.u.mfc_rclassid = rclass;
821 command.u.mfc_cmd = cmd;
823 out_be32(&prob->mfc_lsa_W, ls_offset);
824 out_be64(&prob->mfc_ea_W, ea);
825 out_be64(&prob->mfc_union_W.all64, command.all64);
827 in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
828 if (unlikely(status & 0x2)) {
831 } while (status & 0x3);
832 size -= transfer_size;
834 ls_offset += transfer_size;
839 static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu)
841 unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
842 unsigned int ls_offset = 0x0;
843 unsigned int size = 16384;
844 unsigned int tag = 0;
845 unsigned int rclass = 0;
846 unsigned int cmd = MFC_PUT_CMD;
849 * Issue a DMA command to copy the first 16K bytes
850 * of local storage to the CSA.
852 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
855 static inline void set_spu_npc(struct spu_state *csa, struct spu *spu)
857 struct spu_problem __iomem *prob = spu->problem;
861 * Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry
862 * point address of context save code in local
865 * This implementation uses SPU-side save/restore
866 * programs with entry points at LSA of 0.
868 out_be32(&prob->spu_npc_RW, 0);
872 static inline void set_signot1(struct spu_state *csa, struct spu *spu)
874 struct spu_problem __iomem *prob = spu->problem;
882 * Write SPU_Sig_Notify_1 register with upper 32-bits
883 * of the CSA.LSCSA effective address.
885 addr64.ull = (u64) csa->lscsa;
886 out_be32(&prob->signal_notify1, addr64.ui[0]);
890 static inline void set_signot2(struct spu_state *csa, struct spu *spu)
892 struct spu_problem __iomem *prob = spu->problem;
900 * Write SPU_Sig_Notify_2 register with lower 32-bits
901 * of the CSA.LSCSA effective address.
903 addr64.ull = (u64) csa->lscsa;
904 out_be32(&prob->signal_notify2, addr64.ui[1]);
908 static inline void send_save_code(struct spu_state *csa, struct spu *spu)
910 unsigned long addr = (unsigned long)&spu_save_code[0];
911 unsigned int ls_offset = 0x0;
912 unsigned int size = sizeof(spu_save_code);
913 unsigned int tag = 0;
914 unsigned int rclass = 0;
915 unsigned int cmd = MFC_GETFS_CMD;
918 * Issue a DMA command to copy context save code
919 * to local storage and start SPU.
921 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
924 static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu)
926 struct spu_problem __iomem *prob = spu->problem;
930 * Write PPU_QueryMask=1 (enable Tag Group 0)
931 * and issue eieio instruction.
933 out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0));
937 static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
939 struct spu_problem __iomem *prob = spu->problem;
940 u32 mask = MFC_TAGID_TO_TAGMASK(0);
947 * Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete)
948 * or write PPU_QueryType[TS]=01 and wait for Tag Group
949 * Complete Interrupt. Write INT_Stat_Class0 or
950 * INT_Stat_Class2 with value of 'handled'.
952 POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);
954 local_irq_save(flags);
955 spu_int_stat_clear(spu, 0, ~(0ul));
956 spu_int_stat_clear(spu, 2, ~(0ul));
957 local_irq_restore(flags);
960 static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
962 struct spu_problem __iomem *prob = spu->problem;
967 * Poll until SPU_Status[R]=0 or wait for SPU Class 0
968 * or SPU Class 2 interrupt. Write INT_Stat_class0
969 * or INT_Stat_class2 with value of handled.
971 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
973 local_irq_save(flags);
974 spu_int_stat_clear(spu, 0, ~(0ul));
975 spu_int_stat_clear(spu, 2, ~(0ul));
976 local_irq_restore(flags);
979 static inline int check_save_status(struct spu_state *csa, struct spu *spu)
981 struct spu_problem __iomem *prob = spu->problem;
985 * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
986 * context save succeeded, otherwise context save
989 complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
990 SPU_STATUS_STOPPED_BY_STOP);
991 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
994 static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu)
997 * If required, notify the "using application" that
998 * the SPU task has been terminated. TBD.
1002 static inline void suspend_mfc(struct spu_state *csa, struct spu *spu)
1004 struct spu_priv2 __iomem *priv2 = spu->priv2;
1008 * Write MFC_Cntl[Dh,Sc]='1','1' to suspend
1009 * the queue and halt the decrementer.
1011 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE |
1012 MFC_CNTL_DECREMENTER_HALTED);
1016 static inline void wait_suspend_mfc_complete(struct spu_state *csa,
1019 struct spu_priv2 __iomem *priv2 = spu->priv2;
1023 * Poll MFC_CNTL[Ss] until 11 is returned.
1025 POLL_WHILE_FALSE(in_be64(&priv2->mfc_control_RW) &
1026 MFC_CNTL_SUSPEND_COMPLETE);
1029 static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
1031 struct spu_problem __iomem *prob = spu->problem;
1034 * If SPU_Status[R]=1, stop SPU execution
1035 * and wait for stop to complete.
1037 * Returns 1 if SPU_Status[R]=1 on entry.
1040 if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
1041 if (in_be32(&prob->spu_status_R) &
1042 SPU_STATUS_ISOLATED_EXIT_STAUTUS) {
1043 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1044 SPU_STATUS_RUNNING);
1046 if ((in_be32(&prob->spu_status_R) &
1047 SPU_STATUS_ISOLATED_LOAD_STAUTUS)
1048 || (in_be32(&prob->spu_status_R) &
1049 SPU_STATUS_ISOLATED_STATE)) {
1050 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1052 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1053 SPU_STATUS_RUNNING);
1054 out_be32(&prob->spu_runcntl_RW, 0x2);
1056 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1057 SPU_STATUS_RUNNING);
1059 if (in_be32(&prob->spu_status_R) &
1060 SPU_STATUS_WAITING_FOR_CHANNEL) {
1061 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1063 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1064 SPU_STATUS_RUNNING);
1071 static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1073 struct spu_problem __iomem *prob = spu->problem;
1075 /* Restore, Step 10:
1076 * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1,
1077 * release SPU from isolate state.
1079 if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
1080 if (in_be32(&prob->spu_status_R) &
1081 SPU_STATUS_ISOLATED_EXIT_STAUTUS) {
1082 spu_mfc_sr1_set(spu,
1083 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1085 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1087 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1088 SPU_STATUS_RUNNING);
1090 if ((in_be32(&prob->spu_status_R) &
1091 SPU_STATUS_ISOLATED_LOAD_STAUTUS)
1092 || (in_be32(&prob->spu_status_R) &
1093 SPU_STATUS_ISOLATED_STATE)) {
1094 spu_mfc_sr1_set(spu,
1095 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1097 out_be32(&prob->spu_runcntl_RW, 0x2);
1099 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1100 SPU_STATUS_RUNNING);
1105 static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
1107 struct spu_priv2 __iomem *priv2 = spu->priv2;
1108 u64 ch_indices[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1112 /* Restore, Step 20:
1113 * Reset the following CH: [0,1,3,4,24,25,27]
1115 for (i = 0; i < 7; i++) {
1116 idx = ch_indices[i];
1117 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1119 out_be64(&priv2->spu_chnldata_RW, 0UL);
1120 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
1125 static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu)
1127 struct spu_priv2 __iomem *priv2 = spu->priv2;
1128 u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL };
1129 u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL };
1133 /* Restore, Step 21:
1134 * Reset the following CH: [21, 23, 28, 29, 30]
1136 for (i = 0; i < 5; i++) {
1137 idx = ch_indices[i];
1138 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1140 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1145 static inline void setup_spu_status_part1(struct spu_state *csa,
1148 u32 status_P = SPU_STATUS_STOPPED_BY_STOP;
1149 u32 status_I = SPU_STATUS_INVALID_INSTR;
1150 u32 status_H = SPU_STATUS_STOPPED_BY_HALT;
1151 u32 status_S = SPU_STATUS_SINGLE_STEP;
1152 u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR;
1153 u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP;
1154 u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP;
1155 u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR;
1158 /* Restore, Step 27:
1159 * If the CSA.SPU_Status[I,S,H,P]=1 then add the correct
1160 * instruction sequence to the end of the SPU based restore
1161 * code (after the "context restored" stop and signal) to
1162 * restore the correct SPU status.
1164 * NOTE: Rather than modifying the SPU executable, we
1165 * instead add a new 'stopped_status' field to the
1166 * LSCSA. The SPU-side restore reads this field and
1167 * takes the appropriate action when exiting.
1171 (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF;
1172 if ((csa->prob.spu_status_R & status_P_I) == status_P_I) {
1174 /* SPU_Status[P,I]=1 - Illegal Instruction followed
1175 * by Stop and Signal instruction, followed by 'br -4'.
1178 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I;
1179 csa->lscsa->stopped_status.slot[1] = status_code;
1181 } else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) {
1183 /* SPU_Status[P,H]=1 - Halt Conditional, followed
1184 * by Stop and Signal instruction, followed by
1187 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H;
1188 csa->lscsa->stopped_status.slot[1] = status_code;
1190 } else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) {
1192 /* SPU_Status[S,P]=1 - Stop and Signal instruction
1193 * followed by 'br -4'.
1195 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P;
1196 csa->lscsa->stopped_status.slot[1] = status_code;
1198 } else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) {
1200 /* SPU_Status[S,I]=1 - Illegal instruction followed
1203 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I;
1204 csa->lscsa->stopped_status.slot[1] = status_code;
1206 } else if ((csa->prob.spu_status_R & status_P) == status_P) {
1208 /* SPU_Status[P]=1 - Stop and Signal instruction
1209 * followed by 'br -4'.
1211 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P;
1212 csa->lscsa->stopped_status.slot[1] = status_code;
1214 } else if ((csa->prob.spu_status_R & status_H) == status_H) {
1216 /* SPU_Status[H]=1 - Halt Conditional, followed
1219 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H;
1221 } else if ((csa->prob.spu_status_R & status_S) == status_S) {
1223 /* SPU_Status[S]=1 - Two nop instructions.
1225 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S;
1227 } else if ((csa->prob.spu_status_R & status_I) == status_I) {
1229 /* SPU_Status[I]=1 - Illegal instruction followed
1232 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I;
1237 static inline void setup_spu_status_part2(struct spu_state *csa,
1242 /* Restore, Step 28:
1243 * If the CSA.SPU_Status[I,S,H,P,R]=0 then
1244 * add a 'br *' instruction to the end of
1245 * the SPU based restore code.
1247 * NOTE: Rather than modifying the SPU executable, we
1248 * instead add a new 'stopped_status' field to the
1249 * LSCSA. The SPU-side restore reads this field and
1250 * takes the appropriate action when exiting.
1252 mask = SPU_STATUS_INVALID_INSTR |
1253 SPU_STATUS_SINGLE_STEP |
1254 SPU_STATUS_STOPPED_BY_HALT |
1255 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1256 if (!(csa->prob.spu_status_R & mask)) {
1257 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R;
1261 static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu)
1263 /* Restore, Step 29:
1264 * Restore RA_GROUP_ID register and the
1265 * RA_ENABLE reigster from the CSA.
1267 spu_resource_allocation_groupID_set(spu,
1268 csa->priv1.resource_allocation_groupID_RW);
1269 spu_resource_allocation_enable_set(spu,
1270 csa->priv1.resource_allocation_enable_RW);
1273 static inline void send_restore_code(struct spu_state *csa, struct spu *spu)
1275 unsigned long addr = (unsigned long)&spu_restore_code[0];
1276 unsigned int ls_offset = 0x0;
1277 unsigned int size = sizeof(spu_restore_code);
1278 unsigned int tag = 0;
1279 unsigned int rclass = 0;
1280 unsigned int cmd = MFC_GETFS_CMD;
1282 /* Restore, Step 37:
1283 * Issue MFC DMA command to copy context
1284 * restore code to local storage.
1286 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1289 static inline void setup_decr(struct spu_state *csa, struct spu *spu)
1291 /* Restore, Step 34:
1292 * If CSA.MFC_CNTL[Ds]=1 (decrementer was
1293 * running) then adjust decrementer, set
1294 * decrementer running status in LSCSA,
1295 * and set decrementer "wrapped" status
1298 if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) {
1299 cycles_t resume_time = get_cycles();
1300 cycles_t delta_time = resume_time - csa->suspend_time;
1302 csa->lscsa->decr.slot[0] -= delta_time;
1306 static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu)
1308 /* Restore, Step 35:
1309 * Copy the CSA.PU_MB data into the LSCSA.
1311 csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R;
1314 static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu)
1316 /* Restore, Step 36:
1317 * Copy the CSA.PUINT_MB data into the LSCSA.
1319 csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R;
1322 static inline int check_restore_status(struct spu_state *csa, struct spu *spu)
1324 struct spu_problem __iomem *prob = spu->problem;
1327 /* Restore, Step 40:
1328 * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
1329 * context restore succeeded, otherwise context restore
1332 complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
1333 SPU_STATUS_STOPPED_BY_STOP);
1334 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
1337 static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu)
1339 struct spu_priv2 __iomem *priv2 = spu->priv2;
1341 /* Restore, Step 41:
1342 * Restore SPU_PrivCntl from the CSA.
1344 out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW);
1348 static inline void restore_status_part1(struct spu_state *csa, struct spu *spu)
1350 struct spu_problem __iomem *prob = spu->problem;
1353 /* Restore, Step 42:
1354 * If any CSA.SPU_Status[I,S,H,P]=1, then
1355 * restore the error or single step state.
1357 mask = SPU_STATUS_INVALID_INSTR |
1358 SPU_STATUS_SINGLE_STEP |
1359 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
1360 if (csa->prob.spu_status_R & mask) {
1361 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1363 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1364 SPU_STATUS_RUNNING);
1368 static inline void restore_status_part2(struct spu_state *csa, struct spu *spu)
1370 struct spu_problem __iomem *prob = spu->problem;
1373 /* Restore, Step 43:
1374 * If all CSA.SPU_Status[I,S,H,P,R]=0 then write
1375 * SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1,
1376 * then write '00' to SPU_RunCntl[R0R1] and wait
1377 * for SPU_Status[R]=0.
1379 mask = SPU_STATUS_INVALID_INSTR |
1380 SPU_STATUS_SINGLE_STEP |
1381 SPU_STATUS_STOPPED_BY_HALT |
1382 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1383 if (!(csa->prob.spu_status_R & mask)) {
1384 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1386 POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) &
1387 SPU_STATUS_RUNNING);
1388 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1390 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1391 SPU_STATUS_RUNNING);
1395 static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)
1397 unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
1398 unsigned int ls_offset = 0x0;
1399 unsigned int size = 16384;
1400 unsigned int tag = 0;
1401 unsigned int rclass = 0;
1402 unsigned int cmd = MFC_GET_CMD;
1404 /* Restore, Step 44:
1405 * Issue a DMA command to restore the first
1406 * 16kb of local storage from CSA.
1408 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1411 static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
1413 /* Restore, Step 49:
1414 * Write INT_MASK_class0 with value of 0.
1415 * Write INT_MASK_class1 with value of 0.
1416 * Write INT_MASK_class2 with value of 0.
1417 * Write INT_STAT_class0 with value of -1.
1418 * Write INT_STAT_class1 with value of -1.
1419 * Write INT_STAT_class2 with value of -1.
1421 spin_lock_irq(&spu->register_lock);
1422 spu_int_mask_set(spu, 0, 0ul);
1423 spu_int_mask_set(spu, 1, 0ul);
1424 spu_int_mask_set(spu, 2, 0ul);
1425 spu_int_stat_clear(spu, 0, ~0ul);
1426 spu_int_stat_clear(spu, 1, ~0ul);
1427 spu_int_stat_clear(spu, 2, ~0ul);
1428 spin_unlock_irq(&spu->register_lock);
1431 static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu)
1433 struct spu_priv2 __iomem *priv2 = spu->priv2;
1436 /* Restore, Step 50:
1437 * If MFC_Cntl[Se]!=0 then restore
1438 * MFC command queues.
1440 if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) {
1441 for (i = 0; i < 8; i++) {
1442 out_be64(&priv2->puq[i].mfc_cq_data0_RW,
1443 csa->priv2.puq[i].mfc_cq_data0_RW);
1444 out_be64(&priv2->puq[i].mfc_cq_data1_RW,
1445 csa->priv2.puq[i].mfc_cq_data1_RW);
1446 out_be64(&priv2->puq[i].mfc_cq_data2_RW,
1447 csa->priv2.puq[i].mfc_cq_data2_RW);
1448 out_be64(&priv2->puq[i].mfc_cq_data3_RW,
1449 csa->priv2.puq[i].mfc_cq_data3_RW);
1451 for (i = 0; i < 16; i++) {
1452 out_be64(&priv2->spuq[i].mfc_cq_data0_RW,
1453 csa->priv2.spuq[i].mfc_cq_data0_RW);
1454 out_be64(&priv2->spuq[i].mfc_cq_data1_RW,
1455 csa->priv2.spuq[i].mfc_cq_data1_RW);
1456 out_be64(&priv2->spuq[i].mfc_cq_data2_RW,
1457 csa->priv2.spuq[i].mfc_cq_data2_RW);
1458 out_be64(&priv2->spuq[i].mfc_cq_data3_RW,
1459 csa->priv2.spuq[i].mfc_cq_data3_RW);
1465 static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu)
1467 struct spu_problem __iomem *prob = spu->problem;
1469 /* Restore, Step 51:
1470 * Restore the PPU_QueryMask register from CSA.
1472 out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW);
1476 static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu)
1478 struct spu_problem __iomem *prob = spu->problem;
1480 /* Restore, Step 52:
1481 * Restore the PPU_QueryType register from CSA.
1483 out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW);
1487 static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
1489 struct spu_priv2 __iomem *priv2 = spu->priv2;
1491 /* Restore, Step 53:
1492 * Restore the MFC_CSR_TSQ register from CSA.
1494 out_be64(&priv2->spu_tag_status_query_RW,
1495 csa->priv2.spu_tag_status_query_RW);
1499 static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
1501 struct spu_priv2 __iomem *priv2 = spu->priv2;
1503 /* Restore, Step 54:
1504 * Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2
1505 * registers from CSA.
1507 out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW);
1508 out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW);
1512 static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
1514 struct spu_priv2 __iomem *priv2 = spu->priv2;
1516 /* Restore, Step 55:
1517 * Restore the MFC_CSR_ATO register from CSA.
1519 out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW);
1522 static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
1524 /* Restore, Step 56:
1525 * Restore the MFC_TCLASS_ID register from CSA.
1527 spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);
1531 static inline void set_llr_event(struct spu_state *csa, struct spu *spu)
1533 u64 ch0_cnt, ch0_data;
1536 /* Restore, Step 57:
1537 * Set the Lock Line Reservation Lost Event by:
1538 * 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1.
1539 * 2. If CSA.SPU_Channel_0_Count=0 and
1540 * CSA.SPU_Wr_Event_Mask[Lr]=1 and
1541 * CSA.SPU_Event_Status[Lr]=0 then set
1542 * CSA.SPU_Event_Status_Count=1.
1544 ch0_cnt = csa->spu_chnlcnt_RW[0];
1545 ch0_data = csa->spu_chnldata_RW[0];
1546 ch1_data = csa->spu_chnldata_RW[1];
1547 csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT;
1548 if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) &&
1549 (ch1_data & MFC_LLR_LOST_EVENT)) {
1550 csa->spu_chnlcnt_RW[0] = 1;
1554 static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
1556 /* Restore, Step 58:
1557 * If the status of the CSA software decrementer
1558 * "wrapped" flag is set, OR in a '1' to
1559 * CSA.SPU_Event_Status[Tm].
1561 if (csa->lscsa->decr_status.slot[0] == 1) {
1562 csa->spu_chnldata_RW[0] |= 0x20;
1564 if ((csa->lscsa->decr_status.slot[0] == 1) &&
1565 (csa->spu_chnlcnt_RW[0] == 0 &&
1566 ((csa->spu_chnldata_RW[2] & 0x20) == 0x0) &&
1567 ((csa->spu_chnldata_RW[0] & 0x20) != 0x1))) {
1568 csa->spu_chnlcnt_RW[0] = 1;
1572 static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)
1574 struct spu_priv2 __iomem *priv2 = spu->priv2;
1575 u64 idx, ch_indices[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1578 /* Restore, Step 59:
1579 * Restore the following CH: [0,1,3,4,24,25,27]
1581 for (i = 0; i < 7; i++) {
1582 idx = ch_indices[i];
1583 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1585 out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]);
1586 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]);
1591 static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu)
1593 struct spu_priv2 __iomem *priv2 = spu->priv2;
1594 u64 ch_indices[3] = { 9UL, 21UL, 23UL };
1595 u64 ch_counts[3] = { 1UL, 16UL, 1UL };
1599 /* Restore, Step 60:
1600 * Restore the following CH: [9,21,23].
1603 ch_counts[1] = csa->spu_chnlcnt_RW[21];
1605 for (i = 0; i < 3; i++) {
1606 idx = ch_indices[i];
1607 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1609 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1614 static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu)
1616 struct spu_priv2 __iomem *priv2 = spu->priv2;
1618 /* Restore, Step 61:
1619 * Restore the SPU_LSLR register from CSA.
1621 out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW);
1625 static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu)
1627 struct spu_priv2 __iomem *priv2 = spu->priv2;
1629 /* Restore, Step 62:
1630 * Restore the SPU_Cfg register from CSA.
1632 out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW);
1636 static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu)
1638 /* Restore, Step 63:
1639 * Restore PM_Trace_Tag_Wait_Mask from CSA.
1640 * Not performed by this implementation.
1644 static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu)
1646 struct spu_problem __iomem *prob = spu->problem;
1648 /* Restore, Step 64:
1649 * Restore SPU_NPC from CSA.
1651 out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW);
1655 static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
1657 struct spu_priv2 __iomem *priv2 = spu->priv2;
1660 /* Restore, Step 65:
1661 * Restore MFC_RdSPU_MB from CSA.
1663 out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
1665 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]);
1666 for (i = 0; i < 4; i++) {
1667 out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]);
1672 static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
1674 struct spu_problem __iomem *prob = spu->problem;
1677 /* Restore, Step 66:
1678 * If CSA.MB_Stat[P]=0 (mailbox empty) then
1679 * read from the PPU_MB register.
1681 if ((csa->prob.mb_stat_R & 0xFF) == 0) {
1682 dummy = in_be32(&prob->pu_mb_R);
1687 static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
1689 struct spu_priv2 __iomem *priv2 = spu->priv2;
1692 /* Restore, Step 66:
1693 * If CSA.MB_Stat[I]=0 (mailbox empty) then
1694 * read from the PPUINT_MB register.
1696 if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
1697 dummy = in_be64(&priv2->puint_mb_R);
1699 spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
1704 static inline void restore_mfc_slbs(struct spu_state *csa, struct spu *spu)
1706 struct spu_priv2 __iomem *priv2 = spu->priv2;
1709 /* Restore, Step 68:
1710 * If MFC_SR1[R]='1', restore SLBs from CSA.
1712 if (csa->priv1.mfc_sr1_RW & MFC_STATE1_RELOCATE_MASK) {
1713 for (i = 0; i < 8; i++) {
1714 out_be64(&priv2->slb_index_W, i);
1716 out_be64(&priv2->slb_esid_RW, csa->slb_esid_RW[i]);
1717 out_be64(&priv2->slb_vsid_RW, csa->slb_vsid_RW[i]);
1720 out_be64(&priv2->slb_index_W, csa->priv2.slb_index_W);
1725 static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
1727 /* Restore, Step 69:
1728 * Restore the MFC_SR1 register from CSA.
1730 spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);
1734 static inline void restore_other_spu_access(struct spu_state *csa,
1737 /* Restore, Step 70:
1738 * Restore other SPU mappings to this SPU. TBD.
1742 static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu)
1744 struct spu_problem __iomem *prob = spu->problem;
1746 /* Restore, Step 71:
1747 * If CSA.SPU_Status[R]=1 then write
1748 * SPU_RunCntl[R0R1]='01'.
1750 if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) {
1751 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1756 static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
1758 struct spu_priv2 __iomem *priv2 = spu->priv2;
1760 /* Restore, Step 72:
1761 * Restore the MFC_CNTL register for the CSA.
1763 out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
1767 static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
1769 /* Restore, Step 73:
1770 * Enable user-space access (if provided) to this
1771 * SPU by mapping the virtual pages assigned to
1772 * the SPU memory-mapped I/O (MMIO) for problem
1777 static inline void reset_switch_active(struct spu_state *csa, struct spu *spu)
1779 /* Restore, Step 74:
1780 * Reset the "context switch active" flag.
1782 clear_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags);
1786 static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu)
1788 /* Restore, Step 75:
1789 * Re-enable SPU interrupts.
1791 spin_lock_irq(&spu->register_lock);
1792 spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);
1793 spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);
1794 spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);
1795 spin_unlock_irq(&spu->register_lock);
1798 static int quiece_spu(struct spu_state *prev, struct spu *spu)
1801 * Combined steps 2-18 of SPU context save sequence, which
1802 * quiesce the SPU state (disable SPU execution, MFC command
1803 * queues, decrementer, SPU interrupts, etc.).
1805 * Returns 0 on success.
1806 * 2 if failed step 2.
1807 * 6 if failed step 6.
1810 if (check_spu_isolate(prev, spu)) { /* Step 2. */
1813 disable_interrupts(prev, spu); /* Step 3. */
1814 set_watchdog_timer(prev, spu); /* Step 4. */
1815 inhibit_user_access(prev, spu); /* Step 5. */
1816 if (check_spu_isolate(prev, spu)) { /* Step 6. */
1819 set_switch_pending(prev, spu); /* Step 7. */
1820 save_mfc_cntl(prev, spu); /* Step 8. */
1821 save_spu_runcntl(prev, spu); /* Step 9. */
1822 save_mfc_sr1(prev, spu); /* Step 10. */
1823 save_spu_status(prev, spu); /* Step 11. */
1824 save_mfc_decr(prev, spu); /* Step 12. */
1825 halt_mfc_decr(prev, spu); /* Step 13. */
1826 save_timebase(prev, spu); /* Step 14. */
1827 remove_other_spu_access(prev, spu); /* Step 15. */
1828 do_mfc_mssync(prev, spu); /* Step 16. */
1829 issue_mfc_tlbie(prev, spu); /* Step 17. */
1830 handle_pending_interrupts(prev, spu); /* Step 18. */
1835 static void save_csa(struct spu_state *prev, struct spu *spu)
1838 * Combine steps 19-44 of SPU context save sequence, which
1839 * save regions of the privileged & problem state areas.
1842 save_mfc_queues(prev, spu); /* Step 19. */
1843 save_ppu_querymask(prev, spu); /* Step 20. */
1844 save_ppu_querytype(prev, spu); /* Step 21. */
1845 save_mfc_csr_tsq(prev, spu); /* Step 22. */
1846 save_mfc_csr_cmd(prev, spu); /* Step 23. */
1847 save_mfc_csr_ato(prev, spu); /* Step 24. */
1848 save_mfc_tclass_id(prev, spu); /* Step 25. */
1849 set_mfc_tclass_id(prev, spu); /* Step 26. */
1850 purge_mfc_queue(prev, spu); /* Step 27. */
1851 wait_purge_complete(prev, spu); /* Step 28. */
1852 save_mfc_slbs(prev, spu); /* Step 29. */
1853 setup_mfc_sr1(prev, spu); /* Step 30. */
1854 save_spu_npc(prev, spu); /* Step 31. */
1855 save_spu_privcntl(prev, spu); /* Step 32. */
1856 reset_spu_privcntl(prev, spu); /* Step 33. */
1857 save_spu_lslr(prev, spu); /* Step 34. */
1858 reset_spu_lslr(prev, spu); /* Step 35. */
1859 save_spu_cfg(prev, spu); /* Step 36. */
1860 save_pm_trace(prev, spu); /* Step 37. */
1861 save_mfc_rag(prev, spu); /* Step 38. */
1862 save_ppu_mb_stat(prev, spu); /* Step 39. */
1863 save_ppu_mb(prev, spu); /* Step 40. */
1864 save_ppuint_mb(prev, spu); /* Step 41. */
1865 save_ch_part1(prev, spu); /* Step 42. */
1866 save_spu_mb(prev, spu); /* Step 43. */
1867 save_mfc_cmd(prev, spu); /* Step 44. */
1868 reset_ch(prev, spu); /* Step 45. */
1871 static void save_lscsa(struct spu_state *prev, struct spu *spu)
1874 * Perform steps 46-57 of SPU context save sequence,
1875 * which save regions of the local store and register
1879 resume_mfc_queue(prev, spu); /* Step 46. */
1880 setup_mfc_slbs(prev, spu); /* Step 47. */
1881 set_switch_active(prev, spu); /* Step 48. */
1882 enable_interrupts(prev, spu); /* Step 49. */
1883 save_ls_16kb(prev, spu); /* Step 50. */
1884 set_spu_npc(prev, spu); /* Step 51. */
1885 set_signot1(prev, spu); /* Step 52. */
1886 set_signot2(prev, spu); /* Step 53. */
1887 send_save_code(prev, spu); /* Step 54. */
1888 set_ppu_querymask(prev, spu); /* Step 55. */
1889 wait_tag_complete(prev, spu); /* Step 56. */
1890 wait_spu_stopped(prev, spu); /* Step 57. */
1893 static void harvest(struct spu_state *prev, struct spu *spu)
1896 * Perform steps 2-25 of SPU context restore sequence,
1897 * which resets an SPU either after a failed save, or
1898 * when using SPU for first time.
1901 disable_interrupts(prev, spu); /* Step 2. */
1902 inhibit_user_access(prev, spu); /* Step 3. */
1903 terminate_spu_app(prev, spu); /* Step 4. */
1904 set_switch_pending(prev, spu); /* Step 5. */
1905 remove_other_spu_access(prev, spu); /* Step 6. */
1906 suspend_mfc(prev, spu); /* Step 7. */
1907 wait_suspend_mfc_complete(prev, spu); /* Step 8. */
1908 if (!suspend_spe(prev, spu)) /* Step 9. */
1909 clear_spu_status(prev, spu); /* Step 10. */
1910 do_mfc_mssync(prev, spu); /* Step 11. */
1911 issue_mfc_tlbie(prev, spu); /* Step 12. */
1912 handle_pending_interrupts(prev, spu); /* Step 13. */
1913 purge_mfc_queue(prev, spu); /* Step 14. */
1914 wait_purge_complete(prev, spu); /* Step 15. */
1915 reset_spu_privcntl(prev, spu); /* Step 16. */
1916 reset_spu_lslr(prev, spu); /* Step 17. */
1917 setup_mfc_sr1(prev, spu); /* Step 18. */
1918 invalidate_slbs(prev, spu); /* Step 19. */
1919 reset_ch_part1(prev, spu); /* Step 20. */
1920 reset_ch_part2(prev, spu); /* Step 21. */
1921 enable_interrupts(prev, spu); /* Step 22. */
1922 set_switch_active(prev, spu); /* Step 23. */
1923 set_mfc_tclass_id(prev, spu); /* Step 24. */
1924 resume_mfc_queue(prev, spu); /* Step 25. */
1927 static void restore_lscsa(struct spu_state *next, struct spu *spu)
1930 * Perform steps 26-40 of SPU context restore sequence,
1931 * which restores regions of the local store and register
1935 set_watchdog_timer(next, spu); /* Step 26. */
1936 setup_spu_status_part1(next, spu); /* Step 27. */
1937 setup_spu_status_part2(next, spu); /* Step 28. */
1938 restore_mfc_rag(next, spu); /* Step 29. */
1939 setup_mfc_slbs(next, spu); /* Step 30. */
1940 set_spu_npc(next, spu); /* Step 31. */
1941 set_signot1(next, spu); /* Step 32. */
1942 set_signot2(next, spu); /* Step 33. */
1943 setup_decr(next, spu); /* Step 34. */
1944 setup_ppu_mb(next, spu); /* Step 35. */
1945 setup_ppuint_mb(next, spu); /* Step 36. */
1946 send_restore_code(next, spu); /* Step 37. */
1947 set_ppu_querymask(next, spu); /* Step 38. */
1948 wait_tag_complete(next, spu); /* Step 39. */
1949 wait_spu_stopped(next, spu); /* Step 40. */
1952 static void restore_csa(struct spu_state *next, struct spu *spu)
1955 * Combine steps 41-76 of SPU context restore sequence, which
1956 * restore regions of the privileged & problem state areas.
1959 restore_spu_privcntl(next, spu); /* Step 41. */
1960 restore_status_part1(next, spu); /* Step 42. */
1961 restore_status_part2(next, spu); /* Step 43. */
1962 restore_ls_16kb(next, spu); /* Step 44. */
1963 wait_tag_complete(next, spu); /* Step 45. */
1964 suspend_mfc(next, spu); /* Step 46. */
1965 wait_suspend_mfc_complete(next, spu); /* Step 47. */
1966 issue_mfc_tlbie(next, spu); /* Step 48. */
1967 clear_interrupts(next, spu); /* Step 49. */
1968 restore_mfc_queues(next, spu); /* Step 50. */
1969 restore_ppu_querymask(next, spu); /* Step 51. */
1970 restore_ppu_querytype(next, spu); /* Step 52. */
1971 restore_mfc_csr_tsq(next, spu); /* Step 53. */
1972 restore_mfc_csr_cmd(next, spu); /* Step 54. */
1973 restore_mfc_csr_ato(next, spu); /* Step 55. */
1974 restore_mfc_tclass_id(next, spu); /* Step 56. */
1975 set_llr_event(next, spu); /* Step 57. */
1976 restore_decr_wrapped(next, spu); /* Step 58. */
1977 restore_ch_part1(next, spu); /* Step 59. */
1978 restore_ch_part2(next, spu); /* Step 60. */
1979 restore_spu_lslr(next, spu); /* Step 61. */
1980 restore_spu_cfg(next, spu); /* Step 62. */
1981 restore_pm_trace(next, spu); /* Step 63. */
1982 restore_spu_npc(next, spu); /* Step 64. */
1983 restore_spu_mb(next, spu); /* Step 65. */
1984 check_ppu_mb_stat(next, spu); /* Step 66. */
1985 check_ppuint_mb_stat(next, spu); /* Step 67. */
1986 restore_mfc_slbs(next, spu); /* Step 68. */
1987 restore_mfc_sr1(next, spu); /* Step 69. */
1988 restore_other_spu_access(next, spu); /* Step 70. */
1989 restore_spu_runcntl(next, spu); /* Step 71. */
1990 restore_mfc_cntl(next, spu); /* Step 72. */
1991 enable_user_access(next, spu); /* Step 73. */
1992 reset_switch_active(next, spu); /* Step 74. */
1993 reenable_interrupts(next, spu); /* Step 75. */
1996 static int __do_spu_save(struct spu_state *prev, struct spu *spu)
2001 * SPU context save can be broken into three phases:
2003 * (a) quiesce [steps 2-16].
2004 * (b) save of CSA, performed by PPE [steps 17-42]
2005 * (c) save of LSCSA, mostly performed by SPU [steps 43-52].
2007 * Returns 0 on success.
2008 * 2,6 if failed to quiece SPU
2009 * 53 if SPU-side of save failed.
2012 rc = quiece_spu(prev, spu); /* Steps 2-16. */
2023 save_csa(prev, spu); /* Steps 17-43. */
2024 save_lscsa(prev, spu); /* Steps 44-53. */
2025 return check_save_status(prev, spu); /* Step 54. */
2028 static int __do_spu_restore(struct spu_state *next, struct spu *spu)
2033 * SPU context restore can be broken into three phases:
2035 * (a) harvest (or reset) SPU [steps 2-24].
2036 * (b) restore LSCSA [steps 25-40], mostly performed by SPU.
2037 * (c) restore CSA [steps 41-76], performed by PPE.
2039 * The 'harvest' step is not performed here, but rather
2043 restore_lscsa(next, spu); /* Steps 24-39. */
2044 rc = check_restore_status(next, spu); /* Step 40. */
2047 /* Failed. Return now. */
2051 /* Fall through to next step. */
2054 restore_csa(next, spu);
2060 * spu_save - SPU context save, with locking.
2061 * @prev: pointer to SPU context save area, to be saved.
2062 * @spu: pointer to SPU iomem structure.
2064 * Acquire locks, perform the save operation then return.
2066 int spu_save(struct spu_state *prev, struct spu *spu)
2070 acquire_spu_lock(spu); /* Step 1. */
2071 rc = __do_spu_save(prev, spu); /* Steps 2-53. */
2072 release_spu_lock(spu);
2074 panic("%s failed on SPU[%d], rc=%d.\n",
2075 __func__, spu->number, rc);
2079 EXPORT_SYMBOL_GPL(spu_save);
2082 * spu_restore - SPU context restore, with harvest and locking.
2083 * @new: pointer to SPU context save area, to be restored.
2084 * @spu: pointer to SPU iomem structure.
2086 * Perform harvest + restore, as we may not be coming
2087 * from a previous succesful save operation, and the
2088 * hardware state is unknown.
2090 int spu_restore(struct spu_state *new, struct spu *spu)
2094 acquire_spu_lock(spu);
2099 spu->slb_replace = 0;
2100 spu->class_0_pending = 0;
2101 rc = __do_spu_restore(new, spu);
2102 release_spu_lock(spu);
2104 panic("%s failed on SPU[%d] rc=%d.\n",
2105 __func__, spu->number, rc);
2109 EXPORT_SYMBOL_GPL(spu_restore);
2112 * spu_harvest - SPU harvest (reset) operation
2113 * @spu: pointer to SPU iomem structure.
2115 * Perform SPU harvest (reset) operation.
2117 void spu_harvest(struct spu *spu)
2119 acquire_spu_lock(spu);
2121 release_spu_lock(spu);
2124 static void init_prob(struct spu_state *csa)
2126 csa->spu_chnlcnt_RW[9] = 1;
2127 csa->spu_chnlcnt_RW[21] = 16;
2128 csa->spu_chnlcnt_RW[23] = 1;
2129 csa->spu_chnlcnt_RW[28] = 1;
2130 csa->spu_chnlcnt_RW[30] = 1;
2131 csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP;
2132 csa->prob.mb_stat_R = 0x000400;
2135 static void init_priv1(struct spu_state *csa)
2137 /* Enable decode, relocate, tlbie response, master runcntl. */
2138 csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK |
2139 MFC_STATE1_MASTER_RUN_CONTROL_MASK |
2140 MFC_STATE1_PROBLEM_STATE_MASK |
2141 MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
2143 /* Set storage description. */
2144 csa->priv1.mfc_sdr_RW = mfspr(SPRN_SDR1);
2146 /* Enable OS-specific set of interrupts. */
2147 csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
2148 CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
2149 CLASS0_ENABLE_SPU_ERROR_INTR;
2150 csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
2151 CLASS1_ENABLE_STORAGE_FAULT_INTR;
2152 csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR |
2153 CLASS2_ENABLE_SPU_HALT_INTR |
2154 CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR;
2157 static void init_priv2(struct spu_state *csa)
2159 csa->priv2.spu_lslr_RW = LS_ADDR_MASK;
2160 csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE |
2161 MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION |
2162 MFC_CNTL_DMA_QUEUES_EMPTY_MASK;
2166 * spu_alloc_csa - allocate and initialize an SPU context save area.
2168 * Allocate and initialize the contents of an SPU context save area.
2169 * This includes enabling address translation, interrupt masks, etc.,
2170 * as appropriate for the given OS environment.
2172 * Note that storage for the 'lscsa' is allocated separately,
2173 * as it is by far the largest of the context save regions,
2174 * and may need to be pinned or otherwise specially aligned.
2176 void spu_init_csa(struct spu_state *csa)
2178 struct spu_lscsa *lscsa;
2183 memset(csa, 0, sizeof(struct spu_state));
2185 lscsa = vmalloc(sizeof(struct spu_lscsa));
2189 memset(lscsa, 0, sizeof(struct spu_lscsa));
2191 csa->register_lock = SPIN_LOCK_UNLOCKED;
2193 /* Set LS pages reserved to allow for user-space mapping. */
2194 for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE)
2195 SetPageReserved(vmalloc_to_page(p));
2201 EXPORT_SYMBOL_GPL(spu_init_csa);
2203 void spu_fini_csa(struct spu_state *csa)
2205 /* Clear reserved bit before vfree. */
2207 for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
2208 ClearPageReserved(vmalloc_to_page(p));
2212 EXPORT_SYMBOL_GPL(spu_fini_csa);