4 * (C) Copyright IBM Corp. 2005
6 * Author: Mark Nutter <mnutter@us.ibm.com>
8 * Host-side part of SPU context switch sequence outlined in
9 * Synergistic Processor Element, Book IV.
11 * A fully premptive switch of an SPE is very expensive in terms
12 * of time and system resources. SPE Book IV indicates that SPE
13 * allocation should follow a "serially reusable device" model,
14 * in which the SPE is assigned a task until it completes. When
15 * this is not possible, this sequence may be used to premptively
16 * save, and then later (optionally) restore the context of a
17 * program executing on an SPE.
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2, or (at your option)
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/module.h>
36 #include <linux/errno.h>
37 #include <linux/hardirq.h>
38 #include <linux/sched.h>
39 #include <linux/kernel.h>
41 #include <linux/vmalloc.h>
42 #include <linux/smp.h>
43 #include <linux/stddef.h>
44 #include <linux/unistd.h>
48 #include <asm/spu_priv1.h>
49 #include <asm/spu_csa.h>
50 #include <asm/mmu_context.h>
54 #include "spu_save_dump.h"
55 #include "spu_restore_dump.h"
58 #define POLL_WHILE_TRUE(_c) { \
63 #define RELAX_SPIN_COUNT 1000
64 #define POLL_WHILE_TRUE(_c) { \
67 for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \
70 if (unlikely(_c)) yield(); \
76 #define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
78 static inline void acquire_spu_lock(struct spu *spu)
82 * Acquire SPU-specific mutual exclusion lock.
87 static inline void release_spu_lock(struct spu *spu)
90 * Release SPU-specific mutual exclusion lock.
95 static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
97 struct spu_problem __iomem *prob = spu->problem;
102 * If SPU_Status[E,L,IS] any field is '1', this
103 * SPU is in isolate state and cannot be context
104 * saved at this time.
106 isolate_state = SPU_STATUS_ISOLATED_STATE |
107 SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS;
108 return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
111 static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
115 * Save INT_Mask_class0 in CSA.
116 * Write INT_MASK_class0 with value of 0.
117 * Save INT_Mask_class1 in CSA.
118 * Write INT_MASK_class1 with value of 0.
119 * Save INT_Mask_class2 in CSA.
120 * Write INT_MASK_class2 with value of 0.
121 * Synchronize all three interrupts to be sure
122 * we no longer execute a handler on another CPU.
124 spin_lock_irq(&spu->register_lock);
126 csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0);
127 csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1);
128 csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2);
130 spu_int_mask_set(spu, 0, 0ul);
131 spu_int_mask_set(spu, 1, 0ul);
132 spu_int_mask_set(spu, 2, 0ul);
134 spin_unlock_irq(&spu->register_lock);
135 synchronize_irq(spu->irqs[0]);
136 synchronize_irq(spu->irqs[1]);
137 synchronize_irq(spu->irqs[2]);
140 static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu)
144 * Set a software watchdog timer, which specifies the
145 * maximum allowable time for a context save sequence.
147 * For present, this implementation will not set a global
148 * watchdog timer, as virtualization & variable system load
149 * may cause unpredictable execution times.
153 static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu)
157 * Inhibit user-space access (if provided) to this
158 * SPU by unmapping the virtual pages assigned to
159 * the SPU memory-mapped I/O (MMIO) for problem
164 static inline void set_switch_pending(struct spu_state *csa, struct spu *spu)
168 * Set a software context switch pending flag.
170 set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
174 static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
176 struct spu_priv2 __iomem *priv2 = spu->priv2;
179 * Suspend DMA and save MFC_CNTL.
181 switch (in_be64(&priv2->mfc_control_RW) &
182 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) {
183 case MFC_CNTL_SUSPEND_IN_PROGRESS:
184 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
185 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
186 MFC_CNTL_SUSPEND_COMPLETE);
188 case MFC_CNTL_SUSPEND_COMPLETE:
190 csa->priv2.mfc_control_RW =
191 in_be64(&priv2->mfc_control_RW) |
192 MFC_CNTL_SUSPEND_DMA_QUEUE;
194 case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
195 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
196 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
197 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
198 MFC_CNTL_SUSPEND_COMPLETE);
200 csa->priv2.mfc_control_RW =
201 in_be64(&priv2->mfc_control_RW) &
202 ~MFC_CNTL_SUSPEND_DMA_QUEUE &
203 ~MFC_CNTL_SUSPEND_MASK;
208 static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu)
210 struct spu_problem __iomem *prob = spu->problem;
213 * Save SPU_Runcntl in the CSA. This value contains
214 * the "Application Desired State".
216 csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW);
219 static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu)
222 * Save MFC_SR1 in the CSA.
224 csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu);
227 static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
229 struct spu_problem __iomem *prob = spu->problem;
232 * Read SPU_Status[R], and save to CSA.
234 if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) {
235 csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
239 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
241 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
244 SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
245 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
246 if ((in_be32(&prob->spu_status_R) & stopped) == 0)
247 csa->prob.spu_status_R = SPU_STATUS_RUNNING;
249 csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
253 static inline void save_mfc_stopped_status(struct spu_state *csa,
256 struct spu_priv2 __iomem *priv2 = spu->priv2;
257 const u64 mask = MFC_CNTL_DECREMENTER_RUNNING |
258 MFC_CNTL_DMA_QUEUES_EMPTY;
261 * Read MFC_CNTL[Ds]. Update saved copy of
264 * update: do the same with MFC_CNTL[Q].
266 csa->priv2.mfc_control_RW &= ~mask;
267 csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask;
270 static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
272 struct spu_priv2 __iomem *priv2 = spu->priv2;
275 * Write MFC_CNTL[Dh] set to a '1' to halt
278 out_be64(&priv2->mfc_control_RW,
279 MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK);
283 static inline void save_timebase(struct spu_state *csa, struct spu *spu)
286 * Read PPE Timebase High and Timebase low registers
287 * and save in CSA. TBD.
289 csa->suspend_time = get_cycles();
292 static inline void remove_other_spu_access(struct spu_state *csa,
296 * Remove other SPU access to this SPU by unmapping
297 * this SPU's pages from their address space. TBD.
301 static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu)
303 struct spu_problem __iomem *prob = spu->problem;
307 * Write SPU_MSSync register. Poll SPU_MSSync[P]
310 out_be64(&prob->spc_mssync_RW, 1UL);
311 POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING);
314 static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu)
319 * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register.
320 * Then issue a PPE sync instruction.
322 spu_tlb_invalidate(spu);
326 static inline void handle_pending_interrupts(struct spu_state *csa,
330 * Handle any pending interrupts from this SPU
331 * here. This is OS or hypervisor specific. One
332 * option is to re-enable interrupts to handle any
333 * pending interrupts, with the interrupt handlers
334 * recognizing the software Context Switch Pending
335 * flag, to ensure the SPU execution or MFC command
336 * queue is not restarted. TBD.
340 static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu)
342 struct spu_priv2 __iomem *priv2 = spu->priv2;
346 * If MFC_Cntl[Se]=0 then save
347 * MFC command queues.
349 if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) {
350 for (i = 0; i < 8; i++) {
351 csa->priv2.puq[i].mfc_cq_data0_RW =
352 in_be64(&priv2->puq[i].mfc_cq_data0_RW);
353 csa->priv2.puq[i].mfc_cq_data1_RW =
354 in_be64(&priv2->puq[i].mfc_cq_data1_RW);
355 csa->priv2.puq[i].mfc_cq_data2_RW =
356 in_be64(&priv2->puq[i].mfc_cq_data2_RW);
357 csa->priv2.puq[i].mfc_cq_data3_RW =
358 in_be64(&priv2->puq[i].mfc_cq_data3_RW);
360 for (i = 0; i < 16; i++) {
361 csa->priv2.spuq[i].mfc_cq_data0_RW =
362 in_be64(&priv2->spuq[i].mfc_cq_data0_RW);
363 csa->priv2.spuq[i].mfc_cq_data1_RW =
364 in_be64(&priv2->spuq[i].mfc_cq_data1_RW);
365 csa->priv2.spuq[i].mfc_cq_data2_RW =
366 in_be64(&priv2->spuq[i].mfc_cq_data2_RW);
367 csa->priv2.spuq[i].mfc_cq_data3_RW =
368 in_be64(&priv2->spuq[i].mfc_cq_data3_RW);
373 static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu)
375 struct spu_problem __iomem *prob = spu->problem;
378 * Save the PPU_QueryMask register
381 csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW);
384 static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu)
386 struct spu_problem __iomem *prob = spu->problem;
389 * Save the PPU_QueryType register
392 csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW);
395 static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu)
397 struct spu_problem __iomem *prob = spu->problem;
399 /* Save the Prxy_TagStatus register in the CSA.
401 * It is unnecessary to restore dma_tagstatus_R, however,
402 * dma_tagstatus_R in the CSA is accessed via backing_ops, so
405 csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R);
408 static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
410 struct spu_priv2 __iomem *priv2 = spu->priv2;
413 * Save the MFC_CSR_TSQ register
416 csa->priv2.spu_tag_status_query_RW =
417 in_be64(&priv2->spu_tag_status_query_RW);
420 static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
422 struct spu_priv2 __iomem *priv2 = spu->priv2;
425 * Save the MFC_CSR_CMD1 and MFC_CSR_CMD2
426 * registers in the CSA.
428 csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW);
429 csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW);
432 static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
434 struct spu_priv2 __iomem *priv2 = spu->priv2;
437 * Save the MFC_CSR_ATO register in
440 csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW);
443 static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
446 * Save the MFC_TCLASS_ID register in
449 csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu);
452 static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
456 * Write the MFC_TCLASS_ID register with
457 * the value 0x10000000.
459 spu_mfc_tclass_id_set(spu, 0x10000000);
463 static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu)
465 struct spu_priv2 __iomem *priv2 = spu->priv2;
469 * Write MFC_CNTL[Pc]=1 (purge queue).
471 out_be64(&priv2->mfc_control_RW,
472 MFC_CNTL_PURGE_DMA_REQUEST |
473 MFC_CNTL_SUSPEND_MASK);
477 static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)
479 struct spu_priv2 __iomem *priv2 = spu->priv2;
482 * Poll MFC_CNTL[Ps] until value '11' is read
485 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
486 MFC_CNTL_PURGE_DMA_STATUS_MASK) ==
487 MFC_CNTL_PURGE_DMA_COMPLETE);
490 static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
494 * Write MFC_SR1 with MFC_SR1[D=0,S=1] and
495 * MFC_SR1[TL,R,Pr,T] set correctly for the
496 * OS specific environment.
498 * Implementation note: The SPU-side code
499 * for save/restore is privileged, so the
500 * MFC_SR1[Pr] bit is not set.
503 spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
504 MFC_STATE1_RELOCATE_MASK |
505 MFC_STATE1_BUS_TLBIE_MASK));
508 static inline void save_spu_npc(struct spu_state *csa, struct spu *spu)
510 struct spu_problem __iomem *prob = spu->problem;
513 * Save SPU_NPC in the CSA.
515 csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW);
518 static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu)
520 struct spu_priv2 __iomem *priv2 = spu->priv2;
523 * Save SPU_PrivCntl in the CSA.
525 csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW);
528 static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu)
530 struct spu_priv2 __iomem *priv2 = spu->priv2;
534 * Write SPU_PrivCntl[S,Le,A] fields reset to 0.
536 out_be64(&priv2->spu_privcntl_RW, 0UL);
540 static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu)
542 struct spu_priv2 __iomem *priv2 = spu->priv2;
545 * Save SPU_LSLR in the CSA.
547 csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW);
550 static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu)
552 struct spu_priv2 __iomem *priv2 = spu->priv2;
558 out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK);
562 static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu)
564 struct spu_priv2 __iomem *priv2 = spu->priv2;
567 * Save SPU_Cfg in the CSA.
569 csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW);
572 static inline void save_pm_trace(struct spu_state *csa, struct spu *spu)
575 * Save PM_Trace_Tag_Wait_Mask in the CSA.
576 * Not performed by this implementation.
580 static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu)
583 * Save RA_GROUP_ID register and the
584 * RA_ENABLE reigster in the CSA.
586 csa->priv1.resource_allocation_groupID_RW =
587 spu_resource_allocation_groupID_get(spu);
588 csa->priv1.resource_allocation_enable_RW =
589 spu_resource_allocation_enable_get(spu);
592 static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
594 struct spu_problem __iomem *prob = spu->problem;
597 * Save MB_Stat register in the CSA.
599 csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R);
602 static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu)
604 struct spu_problem __iomem *prob = spu->problem;
607 * Save the PPU_MB register in the CSA.
609 csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R);
612 static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu)
614 struct spu_priv2 __iomem *priv2 = spu->priv2;
617 * Save the PPUINT_MB register in the CSA.
619 csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R);
622 static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
624 struct spu_priv2 __iomem *priv2 = spu->priv2;
625 u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
631 /* Save CH 1, without channel count */
632 out_be64(&priv2->spu_chnlcntptr_RW, 1);
633 csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW);
635 /* Save the following CH: [0,3,4,24,25,27] */
636 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
638 out_be64(&priv2->spu_chnlcntptr_RW, idx);
640 csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW);
641 csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW);
642 out_be64(&priv2->spu_chnldata_RW, 0UL);
643 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
648 static inline void save_spu_mb(struct spu_state *csa, struct spu *spu)
650 struct spu_priv2 __iomem *priv2 = spu->priv2;
654 * Save SPU Read Mailbox Channel.
656 out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
658 csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW);
659 for (i = 0; i < 4; i++) {
660 csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW);
662 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
666 static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu)
668 struct spu_priv2 __iomem *priv2 = spu->priv2;
671 * Save MFC_CMD Channel.
673 out_be64(&priv2->spu_chnlcntptr_RW, 21UL);
675 csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW);
679 static inline void reset_ch(struct spu_state *csa, struct spu *spu)
681 struct spu_priv2 __iomem *priv2 = spu->priv2;
682 u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL };
683 u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL };
688 * Reset the following CH: [21, 23, 28, 30]
690 for (i = 0; i < 4; i++) {
692 out_be64(&priv2->spu_chnlcntptr_RW, idx);
694 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
699 static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
701 struct spu_priv2 __iomem *priv2 = spu->priv2;
705 * Write MFC_CNTL[Sc]=0 (resume queue processing).
707 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
710 static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu,
711 unsigned int *code, int code_size)
715 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All
716 * register, then initialize SLB_VSID and SLB_ESID
717 * to provide access to SPU context save code and
720 * This implementation places both the context
721 * switch code and LSCSA in kernel address space.
723 * Further this implementation assumes that the
724 * MFC_SR1[R]=1 (in other words, assume that
725 * translation is desired by OS environment).
727 spu_invalidate_slbs(spu);
728 spu_setup_kernel_slbs(spu, csa->lscsa, code, code_size);
731 static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
735 * Change the software context switch pending flag
736 * to context switch active.
738 * This implementation does not uses a switch active flag.
740 clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
744 static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
746 unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
747 CLASS1_ENABLE_STORAGE_FAULT_INTR;
751 * Reset and then enable interrupts, as
754 * This implementation enables only class1
755 * (translation) interrupts.
757 spin_lock_irq(&spu->register_lock);
758 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
759 spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
760 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
761 spu_int_mask_set(spu, 0, 0ul);
762 spu_int_mask_set(spu, 1, class1_mask);
763 spu_int_mask_set(spu, 2, 0ul);
764 spin_unlock_irq(&spu->register_lock);
767 static inline int send_mfc_dma(struct spu *spu, unsigned long ea,
768 unsigned int ls_offset, unsigned int size,
769 unsigned int tag, unsigned int rclass,
772 struct spu_problem __iomem *prob = spu->problem;
773 union mfc_tag_size_class_cmd command;
774 unsigned int transfer_size;
775 volatile unsigned int status = 0x0;
779 (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size;
780 command.u.mfc_size = transfer_size;
781 command.u.mfc_tag = tag;
782 command.u.mfc_rclassid = rclass;
783 command.u.mfc_cmd = cmd;
785 out_be32(&prob->mfc_lsa_W, ls_offset);
786 out_be64(&prob->mfc_ea_W, ea);
787 out_be64(&prob->mfc_union_W.all64, command.all64);
789 in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
790 if (unlikely(status & 0x2)) {
793 } while (status & 0x3);
794 size -= transfer_size;
796 ls_offset += transfer_size;
801 static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu)
803 unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
804 unsigned int ls_offset = 0x0;
805 unsigned int size = 16384;
806 unsigned int tag = 0;
807 unsigned int rclass = 0;
808 unsigned int cmd = MFC_PUT_CMD;
811 * Issue a DMA command to copy the first 16K bytes
812 * of local storage to the CSA.
814 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
817 static inline void set_spu_npc(struct spu_state *csa, struct spu *spu)
819 struct spu_problem __iomem *prob = spu->problem;
823 * Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry
824 * point address of context save code in local
827 * This implementation uses SPU-side save/restore
828 * programs with entry points at LSA of 0.
830 out_be32(&prob->spu_npc_RW, 0);
834 static inline void set_signot1(struct spu_state *csa, struct spu *spu)
836 struct spu_problem __iomem *prob = spu->problem;
844 * Write SPU_Sig_Notify_1 register with upper 32-bits
845 * of the CSA.LSCSA effective address.
847 addr64.ull = (u64) csa->lscsa;
848 out_be32(&prob->signal_notify1, addr64.ui[0]);
852 static inline void set_signot2(struct spu_state *csa, struct spu *spu)
854 struct spu_problem __iomem *prob = spu->problem;
862 * Write SPU_Sig_Notify_2 register with lower 32-bits
863 * of the CSA.LSCSA effective address.
865 addr64.ull = (u64) csa->lscsa;
866 out_be32(&prob->signal_notify2, addr64.ui[1]);
870 static inline void send_save_code(struct spu_state *csa, struct spu *spu)
872 unsigned long addr = (unsigned long)&spu_save_code[0];
873 unsigned int ls_offset = 0x0;
874 unsigned int size = sizeof(spu_save_code);
875 unsigned int tag = 0;
876 unsigned int rclass = 0;
877 unsigned int cmd = MFC_GETFS_CMD;
880 * Issue a DMA command to copy context save code
881 * to local storage and start SPU.
883 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
886 static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu)
888 struct spu_problem __iomem *prob = spu->problem;
892 * Write PPU_QueryMask=1 (enable Tag Group 0)
893 * and issue eieio instruction.
895 out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0));
899 static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
901 struct spu_problem __iomem *prob = spu->problem;
902 u32 mask = MFC_TAGID_TO_TAGMASK(0);
909 * Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete)
910 * or write PPU_QueryType[TS]=01 and wait for Tag Group
911 * Complete Interrupt. Write INT_Stat_Class0 or
912 * INT_Stat_Class2 with value of 'handled'.
914 POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);
916 local_irq_save(flags);
917 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
918 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
919 local_irq_restore(flags);
922 static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
924 struct spu_problem __iomem *prob = spu->problem;
929 * Poll until SPU_Status[R]=0 or wait for SPU Class 0
930 * or SPU Class 2 interrupt. Write INT_Stat_class0
931 * or INT_Stat_class2 with value of handled.
933 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
935 local_irq_save(flags);
936 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
937 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
938 local_irq_restore(flags);
941 static inline int check_save_status(struct spu_state *csa, struct spu *spu)
943 struct spu_problem __iomem *prob = spu->problem;
947 * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
948 * context save succeeded, otherwise context save
951 complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
952 SPU_STATUS_STOPPED_BY_STOP);
953 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
956 static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu)
959 * If required, notify the "using application" that
960 * the SPU task has been terminated. TBD.
964 static inline void suspend_mfc_and_halt_decr(struct spu_state *csa,
967 struct spu_priv2 __iomem *priv2 = spu->priv2;
970 * Write MFC_Cntl[Dh,Sc,Sm]='1','1','0' to suspend
971 * the queue and halt the decrementer.
973 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE |
974 MFC_CNTL_DECREMENTER_HALTED);
978 static inline void wait_suspend_mfc_complete(struct spu_state *csa,
981 struct spu_priv2 __iomem *priv2 = spu->priv2;
985 * Poll MFC_CNTL[Ss] until 11 is returned.
987 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
988 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
989 MFC_CNTL_SUSPEND_COMPLETE);
992 static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
994 struct spu_problem __iomem *prob = spu->problem;
997 * If SPU_Status[R]=1, stop SPU execution
998 * and wait for stop to complete.
1000 * Returns 1 if SPU_Status[R]=1 on entry.
1003 if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
1004 if (in_be32(&prob->spu_status_R) &
1005 SPU_STATUS_ISOLATED_EXIT_STATUS) {
1006 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1007 SPU_STATUS_RUNNING);
1009 if ((in_be32(&prob->spu_status_R) &
1010 SPU_STATUS_ISOLATED_LOAD_STATUS)
1011 || (in_be32(&prob->spu_status_R) &
1012 SPU_STATUS_ISOLATED_STATE)) {
1013 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1015 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1016 SPU_STATUS_RUNNING);
1017 out_be32(&prob->spu_runcntl_RW, 0x2);
1019 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1020 SPU_STATUS_RUNNING);
1022 if (in_be32(&prob->spu_status_R) &
1023 SPU_STATUS_WAITING_FOR_CHANNEL) {
1024 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1026 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1027 SPU_STATUS_RUNNING);
1034 static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1036 struct spu_problem __iomem *prob = spu->problem;
1038 /* Restore, Step 10:
1039 * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1,
1040 * release SPU from isolate state.
1042 if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
1043 if (in_be32(&prob->spu_status_R) &
1044 SPU_STATUS_ISOLATED_EXIT_STATUS) {
1045 spu_mfc_sr1_set(spu,
1046 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1048 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1050 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1051 SPU_STATUS_RUNNING);
1053 if ((in_be32(&prob->spu_status_R) &
1054 SPU_STATUS_ISOLATED_LOAD_STATUS)
1055 || (in_be32(&prob->spu_status_R) &
1056 SPU_STATUS_ISOLATED_STATE)) {
1057 spu_mfc_sr1_set(spu,
1058 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1060 out_be32(&prob->spu_runcntl_RW, 0x2);
1062 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1063 SPU_STATUS_RUNNING);
1068 static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
1070 struct spu_priv2 __iomem *priv2 = spu->priv2;
1071 u64 ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1075 /* Restore, Step 20:
1079 out_be64(&priv2->spu_chnlcntptr_RW, 1);
1080 out_be64(&priv2->spu_chnldata_RW, 0UL);
1082 /* Reset the following CH: [0,3,4,24,25,27] */
1083 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1084 idx = ch_indices[i];
1085 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1087 out_be64(&priv2->spu_chnldata_RW, 0UL);
1088 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
1093 static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu)
1095 struct spu_priv2 __iomem *priv2 = spu->priv2;
1096 u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL };
1097 u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL };
1101 /* Restore, Step 21:
1102 * Reset the following CH: [21, 23, 28, 29, 30]
1104 for (i = 0; i < 5; i++) {
1105 idx = ch_indices[i];
1106 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1108 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1113 static inline void setup_spu_status_part1(struct spu_state *csa,
1116 u32 status_P = SPU_STATUS_STOPPED_BY_STOP;
1117 u32 status_I = SPU_STATUS_INVALID_INSTR;
1118 u32 status_H = SPU_STATUS_STOPPED_BY_HALT;
1119 u32 status_S = SPU_STATUS_SINGLE_STEP;
1120 u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR;
1121 u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP;
1122 u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP;
1123 u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR;
1126 /* Restore, Step 27:
1127 * If the CSA.SPU_Status[I,S,H,P]=1 then add the correct
1128 * instruction sequence to the end of the SPU based restore
1129 * code (after the "context restored" stop and signal) to
1130 * restore the correct SPU status.
1132 * NOTE: Rather than modifying the SPU executable, we
1133 * instead add a new 'stopped_status' field to the
1134 * LSCSA. The SPU-side restore reads this field and
1135 * takes the appropriate action when exiting.
1139 (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF;
1140 if ((csa->prob.spu_status_R & status_P_I) == status_P_I) {
1142 /* SPU_Status[P,I]=1 - Illegal Instruction followed
1143 * by Stop and Signal instruction, followed by 'br -4'.
1146 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I;
1147 csa->lscsa->stopped_status.slot[1] = status_code;
1149 } else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) {
1151 /* SPU_Status[P,H]=1 - Halt Conditional, followed
1152 * by Stop and Signal instruction, followed by
1155 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H;
1156 csa->lscsa->stopped_status.slot[1] = status_code;
1158 } else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) {
1160 /* SPU_Status[S,P]=1 - Stop and Signal instruction
1161 * followed by 'br -4'.
1163 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P;
1164 csa->lscsa->stopped_status.slot[1] = status_code;
1166 } else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) {
1168 /* SPU_Status[S,I]=1 - Illegal instruction followed
1171 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I;
1172 csa->lscsa->stopped_status.slot[1] = status_code;
1174 } else if ((csa->prob.spu_status_R & status_P) == status_P) {
1176 /* SPU_Status[P]=1 - Stop and Signal instruction
1177 * followed by 'br -4'.
1179 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P;
1180 csa->lscsa->stopped_status.slot[1] = status_code;
1182 } else if ((csa->prob.spu_status_R & status_H) == status_H) {
1184 /* SPU_Status[H]=1 - Halt Conditional, followed
1187 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H;
1189 } else if ((csa->prob.spu_status_R & status_S) == status_S) {
1191 /* SPU_Status[S]=1 - Two nop instructions.
1193 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S;
1195 } else if ((csa->prob.spu_status_R & status_I) == status_I) {
1197 /* SPU_Status[I]=1 - Illegal instruction followed
1200 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I;
1205 static inline void setup_spu_status_part2(struct spu_state *csa,
1210 /* Restore, Step 28:
1211 * If the CSA.SPU_Status[I,S,H,P,R]=0 then
1212 * add a 'br *' instruction to the end of
1213 * the SPU based restore code.
1215 * NOTE: Rather than modifying the SPU executable, we
1216 * instead add a new 'stopped_status' field to the
1217 * LSCSA. The SPU-side restore reads this field and
1218 * takes the appropriate action when exiting.
1220 mask = SPU_STATUS_INVALID_INSTR |
1221 SPU_STATUS_SINGLE_STEP |
1222 SPU_STATUS_STOPPED_BY_HALT |
1223 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1224 if (!(csa->prob.spu_status_R & mask)) {
1225 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R;
1229 static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu)
1231 /* Restore, Step 29:
1232 * Restore RA_GROUP_ID register and the
1233 * RA_ENABLE reigster from the CSA.
1235 spu_resource_allocation_groupID_set(spu,
1236 csa->priv1.resource_allocation_groupID_RW);
1237 spu_resource_allocation_enable_set(spu,
1238 csa->priv1.resource_allocation_enable_RW);
1241 static inline void send_restore_code(struct spu_state *csa, struct spu *spu)
1243 unsigned long addr = (unsigned long)&spu_restore_code[0];
1244 unsigned int ls_offset = 0x0;
1245 unsigned int size = sizeof(spu_restore_code);
1246 unsigned int tag = 0;
1247 unsigned int rclass = 0;
1248 unsigned int cmd = MFC_GETFS_CMD;
1250 /* Restore, Step 37:
1251 * Issue MFC DMA command to copy context
1252 * restore code to local storage.
1254 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1257 static inline void setup_decr(struct spu_state *csa, struct spu *spu)
1259 /* Restore, Step 34:
1260 * If CSA.MFC_CNTL[Ds]=1 (decrementer was
1261 * running) then adjust decrementer, set
1262 * decrementer running status in LSCSA,
1263 * and set decrementer "wrapped" status
1266 if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) {
1267 cycles_t resume_time = get_cycles();
1268 cycles_t delta_time = resume_time - csa->suspend_time;
1270 csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING;
1271 if (csa->lscsa->decr.slot[0] < delta_time) {
1272 csa->lscsa->decr_status.slot[0] |=
1273 SPU_DECR_STATUS_WRAPPED;
1276 csa->lscsa->decr.slot[0] -= delta_time;
1278 csa->lscsa->decr_status.slot[0] = 0;
1282 static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu)
1284 /* Restore, Step 35:
1285 * Copy the CSA.PU_MB data into the LSCSA.
1287 csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R;
1290 static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu)
1292 /* Restore, Step 36:
1293 * Copy the CSA.PUINT_MB data into the LSCSA.
1295 csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R;
1298 static inline int check_restore_status(struct spu_state *csa, struct spu *spu)
1300 struct spu_problem __iomem *prob = spu->problem;
1303 /* Restore, Step 40:
1304 * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
1305 * context restore succeeded, otherwise context restore
1308 complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
1309 SPU_STATUS_STOPPED_BY_STOP);
1310 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
1313 static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu)
1315 struct spu_priv2 __iomem *priv2 = spu->priv2;
1317 /* Restore, Step 41:
1318 * Restore SPU_PrivCntl from the CSA.
1320 out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW);
1324 static inline void restore_status_part1(struct spu_state *csa, struct spu *spu)
1326 struct spu_problem __iomem *prob = spu->problem;
1329 /* Restore, Step 42:
1330 * If any CSA.SPU_Status[I,S,H,P]=1, then
1331 * restore the error or single step state.
1333 mask = SPU_STATUS_INVALID_INSTR |
1334 SPU_STATUS_SINGLE_STEP |
1335 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
1336 if (csa->prob.spu_status_R & mask) {
1337 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1339 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1340 SPU_STATUS_RUNNING);
1344 static inline void restore_status_part2(struct spu_state *csa, struct spu *spu)
1346 struct spu_problem __iomem *prob = spu->problem;
1349 /* Restore, Step 43:
1350 * If all CSA.SPU_Status[I,S,H,P,R]=0 then write
1351 * SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1,
1352 * then write '00' to SPU_RunCntl[R0R1] and wait
1353 * for SPU_Status[R]=0.
1355 mask = SPU_STATUS_INVALID_INSTR |
1356 SPU_STATUS_SINGLE_STEP |
1357 SPU_STATUS_STOPPED_BY_HALT |
1358 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1359 if (!(csa->prob.spu_status_R & mask)) {
1360 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1362 POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) &
1363 SPU_STATUS_RUNNING);
1364 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1366 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1367 SPU_STATUS_RUNNING);
1371 static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)
1373 unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
1374 unsigned int ls_offset = 0x0;
1375 unsigned int size = 16384;
1376 unsigned int tag = 0;
1377 unsigned int rclass = 0;
1378 unsigned int cmd = MFC_GET_CMD;
1380 /* Restore, Step 44:
1381 * Issue a DMA command to restore the first
1382 * 16kb of local storage from CSA.
1384 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1387 static inline void suspend_mfc(struct spu_state *csa, struct spu *spu)
1389 struct spu_priv2 __iomem *priv2 = spu->priv2;
1391 /* Restore, Step 47.
1392 * Write MFC_Cntl[Sc,Sm]='1','0' to suspend
1395 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
1399 static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
1401 /* Restore, Step 49:
1402 * Write INT_MASK_class0 with value of 0.
1403 * Write INT_MASK_class1 with value of 0.
1404 * Write INT_MASK_class2 with value of 0.
1405 * Write INT_STAT_class0 with value of -1.
1406 * Write INT_STAT_class1 with value of -1.
1407 * Write INT_STAT_class2 with value of -1.
1409 spin_lock_irq(&spu->register_lock);
1410 spu_int_mask_set(spu, 0, 0ul);
1411 spu_int_mask_set(spu, 1, 0ul);
1412 spu_int_mask_set(spu, 2, 0ul);
1413 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
1414 spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
1415 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
1416 spin_unlock_irq(&spu->register_lock);
1419 static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu)
1421 struct spu_priv2 __iomem *priv2 = spu->priv2;
1424 /* Restore, Step 50:
1425 * If MFC_Cntl[Se]!=0 then restore
1426 * MFC command queues.
1428 if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) {
1429 for (i = 0; i < 8; i++) {
1430 out_be64(&priv2->puq[i].mfc_cq_data0_RW,
1431 csa->priv2.puq[i].mfc_cq_data0_RW);
1432 out_be64(&priv2->puq[i].mfc_cq_data1_RW,
1433 csa->priv2.puq[i].mfc_cq_data1_RW);
1434 out_be64(&priv2->puq[i].mfc_cq_data2_RW,
1435 csa->priv2.puq[i].mfc_cq_data2_RW);
1436 out_be64(&priv2->puq[i].mfc_cq_data3_RW,
1437 csa->priv2.puq[i].mfc_cq_data3_RW);
1439 for (i = 0; i < 16; i++) {
1440 out_be64(&priv2->spuq[i].mfc_cq_data0_RW,
1441 csa->priv2.spuq[i].mfc_cq_data0_RW);
1442 out_be64(&priv2->spuq[i].mfc_cq_data1_RW,
1443 csa->priv2.spuq[i].mfc_cq_data1_RW);
1444 out_be64(&priv2->spuq[i].mfc_cq_data2_RW,
1445 csa->priv2.spuq[i].mfc_cq_data2_RW);
1446 out_be64(&priv2->spuq[i].mfc_cq_data3_RW,
1447 csa->priv2.spuq[i].mfc_cq_data3_RW);
1453 static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu)
1455 struct spu_problem __iomem *prob = spu->problem;
1457 /* Restore, Step 51:
1458 * Restore the PPU_QueryMask register from CSA.
1460 out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW);
1464 static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu)
1466 struct spu_problem __iomem *prob = spu->problem;
1468 /* Restore, Step 52:
1469 * Restore the PPU_QueryType register from CSA.
1471 out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW);
1475 static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
1477 struct spu_priv2 __iomem *priv2 = spu->priv2;
1479 /* Restore, Step 53:
1480 * Restore the MFC_CSR_TSQ register from CSA.
1482 out_be64(&priv2->spu_tag_status_query_RW,
1483 csa->priv2.spu_tag_status_query_RW);
1487 static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
1489 struct spu_priv2 __iomem *priv2 = spu->priv2;
1491 /* Restore, Step 54:
1492 * Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2
1493 * registers from CSA.
1495 out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW);
1496 out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW);
1500 static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
1502 struct spu_priv2 __iomem *priv2 = spu->priv2;
1504 /* Restore, Step 55:
1505 * Restore the MFC_CSR_ATO register from CSA.
1507 out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW);
1510 static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
1512 /* Restore, Step 56:
1513 * Restore the MFC_TCLASS_ID register from CSA.
1515 spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);
1519 static inline void set_llr_event(struct spu_state *csa, struct spu *spu)
1521 u64 ch0_cnt, ch0_data;
1524 /* Restore, Step 57:
1525 * Set the Lock Line Reservation Lost Event by:
1526 * 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1.
1527 * 2. If CSA.SPU_Channel_0_Count=0 and
1528 * CSA.SPU_Wr_Event_Mask[Lr]=1 and
1529 * CSA.SPU_Event_Status[Lr]=0 then set
1530 * CSA.SPU_Event_Status_Count=1.
1532 ch0_cnt = csa->spu_chnlcnt_RW[0];
1533 ch0_data = csa->spu_chnldata_RW[0];
1534 ch1_data = csa->spu_chnldata_RW[1];
1535 csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT;
1536 if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) &&
1537 (ch1_data & MFC_LLR_LOST_EVENT)) {
1538 csa->spu_chnlcnt_RW[0] = 1;
1542 static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
1544 /* Restore, Step 58:
1545 * If the status of the CSA software decrementer
1546 * "wrapped" flag is set, OR in a '1' to
1547 * CSA.SPU_Event_Status[Tm].
1549 if (!(csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED))
1552 if ((csa->spu_chnlcnt_RW[0] == 0) &&
1553 (csa->spu_chnldata_RW[1] & 0x20) &&
1554 !(csa->spu_chnldata_RW[0] & 0x20))
1555 csa->spu_chnlcnt_RW[0] = 1;
1557 csa->spu_chnldata_RW[0] |= 0x20;
1560 static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)
1562 struct spu_priv2 __iomem *priv2 = spu->priv2;
1563 u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1566 /* Restore, Step 59:
1567 * Restore the following CH: [0,3,4,24,25,27]
1569 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1570 idx = ch_indices[i];
1571 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1573 out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]);
1574 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]);
1579 static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu)
1581 struct spu_priv2 __iomem *priv2 = spu->priv2;
1582 u64 ch_indices[3] = { 9UL, 21UL, 23UL };
1583 u64 ch_counts[3] = { 1UL, 16UL, 1UL };
1587 /* Restore, Step 60:
1588 * Restore the following CH: [9,21,23].
1591 ch_counts[1] = csa->spu_chnlcnt_RW[21];
1593 for (i = 0; i < 3; i++) {
1594 idx = ch_indices[i];
1595 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1597 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1602 static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu)
1604 struct spu_priv2 __iomem *priv2 = spu->priv2;
1606 /* Restore, Step 61:
1607 * Restore the SPU_LSLR register from CSA.
1609 out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW);
1613 static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu)
1615 struct spu_priv2 __iomem *priv2 = spu->priv2;
1617 /* Restore, Step 62:
1618 * Restore the SPU_Cfg register from CSA.
1620 out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW);
1624 static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu)
1626 /* Restore, Step 63:
1627 * Restore PM_Trace_Tag_Wait_Mask from CSA.
1628 * Not performed by this implementation.
1632 static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu)
1634 struct spu_problem __iomem *prob = spu->problem;
1636 /* Restore, Step 64:
1637 * Restore SPU_NPC from CSA.
1639 out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW);
1643 static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
1645 struct spu_priv2 __iomem *priv2 = spu->priv2;
1648 /* Restore, Step 65:
1649 * Restore MFC_RdSPU_MB from CSA.
1651 out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
1653 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]);
1654 for (i = 0; i < 4; i++) {
1655 out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]);
1660 static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
1662 struct spu_problem __iomem *prob = spu->problem;
1665 /* Restore, Step 66:
1666 * If CSA.MB_Stat[P]=0 (mailbox empty) then
1667 * read from the PPU_MB register.
1669 if ((csa->prob.mb_stat_R & 0xFF) == 0) {
1670 dummy = in_be32(&prob->pu_mb_R);
1675 static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
1677 struct spu_priv2 __iomem *priv2 = spu->priv2;
1680 /* Restore, Step 66:
1681 * If CSA.MB_Stat[I]=0 (mailbox empty) then
1682 * read from the PPUINT_MB register.
1684 if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
1685 dummy = in_be64(&priv2->puint_mb_R);
1687 spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
1692 static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
1694 /* Restore, Step 69:
1695 * Restore the MFC_SR1 register from CSA.
1697 spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);
1701 static inline void restore_other_spu_access(struct spu_state *csa,
1704 /* Restore, Step 70:
1705 * Restore other SPU mappings to this SPU. TBD.
1709 static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu)
1711 struct spu_problem __iomem *prob = spu->problem;
1713 /* Restore, Step 71:
1714 * If CSA.SPU_Status[R]=1 then write
1715 * SPU_RunCntl[R0R1]='01'.
1717 if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) {
1718 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1723 static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
1725 struct spu_priv2 __iomem *priv2 = spu->priv2;
1727 /* Restore, Step 72:
1728 * Restore the MFC_CNTL register for the CSA.
1730 out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
1733 * FIXME: this is to restart a DMA that we were processing
1734 * before the save. better remember the fault information
1735 * in the csa instead.
1737 if ((csa->priv2.mfc_control_RW & MFC_CNTL_SUSPEND_DMA_QUEUE_MASK)) {
1738 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
1743 static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
1745 /* Restore, Step 73:
1746 * Enable user-space access (if provided) to this
1747 * SPU by mapping the virtual pages assigned to
1748 * the SPU memory-mapped I/O (MMIO) for problem
1753 static inline void reset_switch_active(struct spu_state *csa, struct spu *spu)
1755 /* Restore, Step 74:
1756 * Reset the "context switch active" flag.
1757 * Not performed by this implementation.
1761 static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu)
1763 /* Restore, Step 75:
1764 * Re-enable SPU interrupts.
1766 spin_lock_irq(&spu->register_lock);
1767 spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);
1768 spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);
1769 spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);
1770 spin_unlock_irq(&spu->register_lock);
1773 static int quiece_spu(struct spu_state *prev, struct spu *spu)
1776 * Combined steps 2-18 of SPU context save sequence, which
1777 * quiesce the SPU state (disable SPU execution, MFC command
1778 * queues, decrementer, SPU interrupts, etc.).
1780 * Returns 0 on success.
1781 * 2 if failed step 2.
1782 * 6 if failed step 6.
1785 if (check_spu_isolate(prev, spu)) { /* Step 2. */
1788 disable_interrupts(prev, spu); /* Step 3. */
1789 set_watchdog_timer(prev, spu); /* Step 4. */
1790 inhibit_user_access(prev, spu); /* Step 5. */
1791 if (check_spu_isolate(prev, spu)) { /* Step 6. */
1794 set_switch_pending(prev, spu); /* Step 7. */
1795 save_mfc_cntl(prev, spu); /* Step 8. */
1796 save_spu_runcntl(prev, spu); /* Step 9. */
1797 save_mfc_sr1(prev, spu); /* Step 10. */
1798 save_spu_status(prev, spu); /* Step 11. */
1799 save_mfc_stopped_status(prev, spu); /* Step 12. */
1800 halt_mfc_decr(prev, spu); /* Step 13. */
1801 save_timebase(prev, spu); /* Step 14. */
1802 remove_other_spu_access(prev, spu); /* Step 15. */
1803 do_mfc_mssync(prev, spu); /* Step 16. */
1804 issue_mfc_tlbie(prev, spu); /* Step 17. */
1805 handle_pending_interrupts(prev, spu); /* Step 18. */
1810 static void save_csa(struct spu_state *prev, struct spu *spu)
1813 * Combine steps 19-44 of SPU context save sequence, which
1814 * save regions of the privileged & problem state areas.
1817 save_mfc_queues(prev, spu); /* Step 19. */
1818 save_ppu_querymask(prev, spu); /* Step 20. */
1819 save_ppu_querytype(prev, spu); /* Step 21. */
1820 save_ppu_tagstatus(prev, spu); /* NEW. */
1821 save_mfc_csr_tsq(prev, spu); /* Step 22. */
1822 save_mfc_csr_cmd(prev, spu); /* Step 23. */
1823 save_mfc_csr_ato(prev, spu); /* Step 24. */
1824 save_mfc_tclass_id(prev, spu); /* Step 25. */
1825 set_mfc_tclass_id(prev, spu); /* Step 26. */
1826 save_mfc_cmd(prev, spu); /* Step 26a - moved from 44. */
1827 purge_mfc_queue(prev, spu); /* Step 27. */
1828 wait_purge_complete(prev, spu); /* Step 28. */
1829 setup_mfc_sr1(prev, spu); /* Step 30. */
1830 save_spu_npc(prev, spu); /* Step 31. */
1831 save_spu_privcntl(prev, spu); /* Step 32. */
1832 reset_spu_privcntl(prev, spu); /* Step 33. */
1833 save_spu_lslr(prev, spu); /* Step 34. */
1834 reset_spu_lslr(prev, spu); /* Step 35. */
1835 save_spu_cfg(prev, spu); /* Step 36. */
1836 save_pm_trace(prev, spu); /* Step 37. */
1837 save_mfc_rag(prev, spu); /* Step 38. */
1838 save_ppu_mb_stat(prev, spu); /* Step 39. */
1839 save_ppu_mb(prev, spu); /* Step 40. */
1840 save_ppuint_mb(prev, spu); /* Step 41. */
1841 save_ch_part1(prev, spu); /* Step 42. */
1842 save_spu_mb(prev, spu); /* Step 43. */
1843 reset_ch(prev, spu); /* Step 45. */
1846 static void save_lscsa(struct spu_state *prev, struct spu *spu)
1849 * Perform steps 46-57 of SPU context save sequence,
1850 * which save regions of the local store and register
1854 resume_mfc_queue(prev, spu); /* Step 46. */
1856 setup_mfc_slbs(prev, spu, spu_save_code, sizeof(spu_save_code));
1857 set_switch_active(prev, spu); /* Step 48. */
1858 enable_interrupts(prev, spu); /* Step 49. */
1859 save_ls_16kb(prev, spu); /* Step 50. */
1860 set_spu_npc(prev, spu); /* Step 51. */
1861 set_signot1(prev, spu); /* Step 52. */
1862 set_signot2(prev, spu); /* Step 53. */
1863 send_save_code(prev, spu); /* Step 54. */
1864 set_ppu_querymask(prev, spu); /* Step 55. */
1865 wait_tag_complete(prev, spu); /* Step 56. */
1866 wait_spu_stopped(prev, spu); /* Step 57. */
1869 static void force_spu_isolate_exit(struct spu *spu)
1871 struct spu_problem __iomem *prob = spu->problem;
1872 struct spu_priv2 __iomem *priv2 = spu->priv2;
1874 /* Stop SPE execution and wait for completion. */
1875 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1877 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
1879 /* Restart SPE master runcntl. */
1880 spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1883 /* Initiate isolate exit request and wait for completion. */
1884 out_be64(&priv2->spu_privcntl_RW, 4LL);
1886 out_be32(&prob->spu_runcntl_RW, 2);
1888 POLL_WHILE_FALSE((in_be32(&prob->spu_status_R)
1889 & SPU_STATUS_STOPPED_BY_STOP));
1891 /* Reset load request to normal. */
1892 out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL);
1898 * Check SPU run-control state and force isolated
1899 * exit function as necessary.
1901 static void stop_spu_isolate(struct spu *spu)
1903 struct spu_problem __iomem *prob = spu->problem;
1905 if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) {
1906 /* The SPU is in isolated state; the only way
1907 * to get it out is to perform an isolated
1908 * exit (clean) operation.
1910 force_spu_isolate_exit(spu);
1914 static void harvest(struct spu_state *prev, struct spu *spu)
1917 * Perform steps 2-25 of SPU context restore sequence,
1918 * which resets an SPU either after a failed save, or
1919 * when using SPU for first time.
1922 disable_interrupts(prev, spu); /* Step 2. */
1923 inhibit_user_access(prev, spu); /* Step 3. */
1924 terminate_spu_app(prev, spu); /* Step 4. */
1925 set_switch_pending(prev, spu); /* Step 5. */
1926 stop_spu_isolate(spu); /* NEW. */
1927 remove_other_spu_access(prev, spu); /* Step 6. */
1928 suspend_mfc_and_halt_decr(prev, spu); /* Step 7. */
1929 wait_suspend_mfc_complete(prev, spu); /* Step 8. */
1930 if (!suspend_spe(prev, spu)) /* Step 9. */
1931 clear_spu_status(prev, spu); /* Step 10. */
1932 do_mfc_mssync(prev, spu); /* Step 11. */
1933 issue_mfc_tlbie(prev, spu); /* Step 12. */
1934 handle_pending_interrupts(prev, spu); /* Step 13. */
1935 purge_mfc_queue(prev, spu); /* Step 14. */
1936 wait_purge_complete(prev, spu); /* Step 15. */
1937 reset_spu_privcntl(prev, spu); /* Step 16. */
1938 reset_spu_lslr(prev, spu); /* Step 17. */
1939 setup_mfc_sr1(prev, spu); /* Step 18. */
1940 spu_invalidate_slbs(spu); /* Step 19. */
1941 reset_ch_part1(prev, spu); /* Step 20. */
1942 reset_ch_part2(prev, spu); /* Step 21. */
1943 enable_interrupts(prev, spu); /* Step 22. */
1944 set_switch_active(prev, spu); /* Step 23. */
1945 set_mfc_tclass_id(prev, spu); /* Step 24. */
1946 resume_mfc_queue(prev, spu); /* Step 25. */
1949 static void restore_lscsa(struct spu_state *next, struct spu *spu)
1952 * Perform steps 26-40 of SPU context restore sequence,
1953 * which restores regions of the local store and register
1957 set_watchdog_timer(next, spu); /* Step 26. */
1958 setup_spu_status_part1(next, spu); /* Step 27. */
1959 setup_spu_status_part2(next, spu); /* Step 28. */
1960 restore_mfc_rag(next, spu); /* Step 29. */
1962 setup_mfc_slbs(next, spu, spu_restore_code, sizeof(spu_restore_code));
1963 set_spu_npc(next, spu); /* Step 31. */
1964 set_signot1(next, spu); /* Step 32. */
1965 set_signot2(next, spu); /* Step 33. */
1966 setup_decr(next, spu); /* Step 34. */
1967 setup_ppu_mb(next, spu); /* Step 35. */
1968 setup_ppuint_mb(next, spu); /* Step 36. */
1969 send_restore_code(next, spu); /* Step 37. */
1970 set_ppu_querymask(next, spu); /* Step 38. */
1971 wait_tag_complete(next, spu); /* Step 39. */
1972 wait_spu_stopped(next, spu); /* Step 40. */
1975 static void restore_csa(struct spu_state *next, struct spu *spu)
1978 * Combine steps 41-76 of SPU context restore sequence, which
1979 * restore regions of the privileged & problem state areas.
1982 restore_spu_privcntl(next, spu); /* Step 41. */
1983 restore_status_part1(next, spu); /* Step 42. */
1984 restore_status_part2(next, spu); /* Step 43. */
1985 restore_ls_16kb(next, spu); /* Step 44. */
1986 wait_tag_complete(next, spu); /* Step 45. */
1987 suspend_mfc(next, spu); /* Step 46. */
1988 wait_suspend_mfc_complete(next, spu); /* Step 47. */
1989 issue_mfc_tlbie(next, spu); /* Step 48. */
1990 clear_interrupts(next, spu); /* Step 49. */
1991 restore_mfc_queues(next, spu); /* Step 50. */
1992 restore_ppu_querymask(next, spu); /* Step 51. */
1993 restore_ppu_querytype(next, spu); /* Step 52. */
1994 restore_mfc_csr_tsq(next, spu); /* Step 53. */
1995 restore_mfc_csr_cmd(next, spu); /* Step 54. */
1996 restore_mfc_csr_ato(next, spu); /* Step 55. */
1997 restore_mfc_tclass_id(next, spu); /* Step 56. */
1998 set_llr_event(next, spu); /* Step 57. */
1999 restore_decr_wrapped(next, spu); /* Step 58. */
2000 restore_ch_part1(next, spu); /* Step 59. */
2001 restore_ch_part2(next, spu); /* Step 60. */
2002 restore_spu_lslr(next, spu); /* Step 61. */
2003 restore_spu_cfg(next, spu); /* Step 62. */
2004 restore_pm_trace(next, spu); /* Step 63. */
2005 restore_spu_npc(next, spu); /* Step 64. */
2006 restore_spu_mb(next, spu); /* Step 65. */
2007 check_ppu_mb_stat(next, spu); /* Step 66. */
2008 check_ppuint_mb_stat(next, spu); /* Step 67. */
2009 spu_invalidate_slbs(spu); /* Modified Step 68. */
2010 restore_mfc_sr1(next, spu); /* Step 69. */
2011 restore_other_spu_access(next, spu); /* Step 70. */
2012 restore_spu_runcntl(next, spu); /* Step 71. */
2013 restore_mfc_cntl(next, spu); /* Step 72. */
2014 enable_user_access(next, spu); /* Step 73. */
2015 reset_switch_active(next, spu); /* Step 74. */
2016 reenable_interrupts(next, spu); /* Step 75. */
2019 static int __do_spu_save(struct spu_state *prev, struct spu *spu)
2024 * SPU context save can be broken into three phases:
2026 * (a) quiesce [steps 2-16].
2027 * (b) save of CSA, performed by PPE [steps 17-42]
2028 * (c) save of LSCSA, mostly performed by SPU [steps 43-52].
2030 * Returns 0 on success.
2031 * 2,6 if failed to quiece SPU
2032 * 53 if SPU-side of save failed.
2035 rc = quiece_spu(prev, spu); /* Steps 2-16. */
2046 save_csa(prev, spu); /* Steps 17-43. */
2047 save_lscsa(prev, spu); /* Steps 44-53. */
2048 return check_save_status(prev, spu); /* Step 54. */
2051 static int __do_spu_restore(struct spu_state *next, struct spu *spu)
2056 * SPU context restore can be broken into three phases:
2058 * (a) harvest (or reset) SPU [steps 2-24].
2059 * (b) restore LSCSA [steps 25-40], mostly performed by SPU.
2060 * (c) restore CSA [steps 41-76], performed by PPE.
2062 * The 'harvest' step is not performed here, but rather
2066 restore_lscsa(next, spu); /* Steps 24-39. */
2067 rc = check_restore_status(next, spu); /* Step 40. */
2070 /* Failed. Return now. */
2074 /* Fall through to next step. */
2077 restore_csa(next, spu);
2083 * spu_save - SPU context save, with locking.
2084 * @prev: pointer to SPU context save area, to be saved.
2085 * @spu: pointer to SPU iomem structure.
2087 * Acquire locks, perform the save operation then return.
2089 int spu_save(struct spu_state *prev, struct spu *spu)
2093 acquire_spu_lock(spu); /* Step 1. */
2094 rc = __do_spu_save(prev, spu); /* Steps 2-53. */
2095 release_spu_lock(spu);
2096 if (rc != 0 && rc != 2 && rc != 6) {
2097 panic("%s failed on SPU[%d], rc=%d.\n",
2098 __func__, spu->number, rc);
2102 EXPORT_SYMBOL_GPL(spu_save);
2105 * spu_restore - SPU context restore, with harvest and locking.
2106 * @new: pointer to SPU context save area, to be restored.
2107 * @spu: pointer to SPU iomem structure.
2109 * Perform harvest + restore, as we may not be coming
2110 * from a previous successful save operation, and the
2111 * hardware state is unknown.
2113 int spu_restore(struct spu_state *new, struct spu *spu)
2117 acquire_spu_lock(spu);
2119 spu->slb_replace = 0;
2120 rc = __do_spu_restore(new, spu);
2121 release_spu_lock(spu);
2123 panic("%s failed on SPU[%d] rc=%d.\n",
2124 __func__, spu->number, rc);
2128 EXPORT_SYMBOL_GPL(spu_restore);
2130 static void init_prob(struct spu_state *csa)
2132 csa->spu_chnlcnt_RW[9] = 1;
2133 csa->spu_chnlcnt_RW[21] = 16;
2134 csa->spu_chnlcnt_RW[23] = 1;
2135 csa->spu_chnlcnt_RW[28] = 1;
2136 csa->spu_chnlcnt_RW[30] = 1;
2137 csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP;
2138 csa->prob.mb_stat_R = 0x000400;
2141 static void init_priv1(struct spu_state *csa)
2143 /* Enable decode, relocate, tlbie response, master runcntl. */
2144 csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK |
2145 MFC_STATE1_MASTER_RUN_CONTROL_MASK |
2146 MFC_STATE1_PROBLEM_STATE_MASK |
2147 MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
2149 /* Enable OS-specific set of interrupts. */
2150 csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
2151 CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
2152 CLASS0_ENABLE_SPU_ERROR_INTR;
2153 csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
2154 CLASS1_ENABLE_STORAGE_FAULT_INTR;
2155 csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR |
2156 CLASS2_ENABLE_SPU_HALT_INTR |
2157 CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR;
2160 static void init_priv2(struct spu_state *csa)
2162 csa->priv2.spu_lslr_RW = LS_ADDR_MASK;
2163 csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE |
2164 MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION |
2165 MFC_CNTL_DMA_QUEUES_EMPTY_MASK;
2169 * spu_alloc_csa - allocate and initialize an SPU context save area.
2171 * Allocate and initialize the contents of an SPU context save area.
2172 * This includes enabling address translation, interrupt masks, etc.,
2173 * as appropriate for the given OS environment.
2175 * Note that storage for the 'lscsa' is allocated separately,
2176 * as it is by far the largest of the context save regions,
2177 * and may need to be pinned or otherwise specially aligned.
2179 int spu_init_csa(struct spu_state *csa)
2185 memset(csa, 0, sizeof(struct spu_state));
2187 rc = spu_alloc_lscsa(csa);
2191 spin_lock_init(&csa->register_lock);
2200 void spu_fini_csa(struct spu_state *csa)
2202 spu_free_lscsa(csa);