2 * Copyright Altera Corporation (C) 2012-2015
4 * SPDX-License-Identifier: BSD-3-Clause
9 #include <asm/arch/sdram.h>
11 #include "sequencer.h"
12 #include "sequencer_auto.h"
13 #include "sequencer_auto_ac_init.h"
14 #include "sequencer_auto_inst_init.h"
15 #include "sequencer_defines.h"
17 static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
18 (struct socfpga_sdr_rw_load_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
20 static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs =
21 (struct socfpga_sdr_rw_load_jump_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00);
23 static struct socfpga_sdr_reg_file *sdr_reg_file =
24 (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS;
26 static struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
27 (struct socfpga_sdr_scc_mgr *)(SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00);
29 static struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
30 (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS;
32 static struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
33 (struct socfpga_phy_mgr_cfg *)(SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40);
35 static struct socfpga_data_mgr *data_mgr =
36 (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS;
38 static struct socfpga_sdr_ctrl *sdr_ctrl =
39 (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
44 * In order to reduce ROM size, most of the selectable calibration steps are
45 * decided at compile time based on the user's calibration mode selection,
46 * as captured by the STATIC_CALIB_STEPS selection below.
48 * However, to support simulation-time selection of fast simulation mode, where
49 * we skip everything except the bare minimum, we need a few of the steps to
50 * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the
51 * check, which is based on the rtl-supplied value, or we dynamically compute
52 * the value to use based on the dynamically-chosen calibration mode
56 #define STATIC_IN_RTL_SIM 0
57 #define STATIC_SKIP_DELAY_LOOPS 0
59 #define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
60 STATIC_SKIP_DELAY_LOOPS)
62 /* calibration steps requested by the rtl */
63 uint16_t dyn_calib_steps;
66 * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option
67 * instead of static, we use boolean logic to select between
68 * non-skip and skip values
70 * The mask is set to include all bits when not-skipping, but is
74 uint16_t skip_delay_mask; /* mask off bits when skipping/not-skipping */
76 #define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
77 ((non_skip_value) & skip_delay_mask)
80 struct param_type *param;
81 uint32_t curr_shadow_reg;
83 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
84 uint32_t write_group, uint32_t use_dm,
85 uint32_t all_correct, uint32_t *bit_chk, uint32_t all_ranks);
87 static void set_failing_group_stage(uint32_t group, uint32_t stage,
91 * Only set the global stage if there was not been any other
94 if (gbl->error_stage == CAL_STAGE_NIL) {
95 gbl->error_substage = substage;
96 gbl->error_stage = stage;
97 gbl->error_group = group;
101 static void reg_file_set_group(u16 set_group)
103 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16);
106 static void reg_file_set_stage(u8 set_stage)
108 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff);
111 static void reg_file_set_sub_stage(u8 set_sub_stage)
113 set_sub_stage &= 0xff;
114 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
118 * phy_mgr_initialize() - Initialize PHY Manager
120 * Initialize PHY Manager.
122 static void phy_mgr_initialize(void)
126 debug("%s:%d\n", __func__, __LINE__);
127 /* Calibration has control over path to memory */
129 * In Hard PHY this is a 2-bit control:
133 writel(0x3, &phy_mgr_cfg->mux_sel);
135 /* USER memory clock is not stable we begin initialization */
136 writel(0, &phy_mgr_cfg->reset_mem_stbl);
138 /* USER calibration status all set to zero */
139 writel(0, &phy_mgr_cfg->cal_status);
141 writel(0, &phy_mgr_cfg->cal_debug_info);
143 /* Init params only if we do NOT skip calibration. */
144 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL)
147 ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
148 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
149 param->read_correct_mask_vg = (1 << ratio) - 1;
150 param->write_correct_mask_vg = (1 << ratio) - 1;
151 param->read_correct_mask = (1 << RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
152 param->write_correct_mask = (1 << RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
153 ratio = RW_MGR_MEM_DATA_WIDTH /
154 RW_MGR_MEM_DATA_MASK_WIDTH;
155 param->dm_correct_mask = (1 << ratio) - 1;
159 * set_rank_and_odt_mask() - Set Rank and ODT mask
161 * @odt_mode: ODT mode, OFF or READ_WRITE
163 * Set Rank and ODT mask (On-Die Termination).
165 static void set_rank_and_odt_mask(const u32 rank, const u32 odt_mode)
171 if (odt_mode == RW_MGR_ODT_MODE_OFF) {
174 } else { /* RW_MGR_ODT_MODE_READ_WRITE */
175 switch (RW_MGR_MEM_NUMBER_OF_RANKS) {
177 /* Read: ODT = 0 ; Write: ODT = 1 */
181 case 2: /* 2 Ranks */
182 if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) {
184 * - Dual-Slot , Single-Rank (1 CS per DIMM)
186 * - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM)
188 * Since MEM_NUMBER_OF_RANKS is 2, they
189 * are both single rank with 2 CS each
190 * (special for RDIMM).
192 * Read: Turn on ODT on the opposite rank
193 * Write: Turn on ODT on all ranks
195 odt_mask_0 = 0x3 & ~(1 << rank);
199 * - Single-Slot , Dual-Rank (2 CS per DIMM)
201 * Read: Turn on ODT off on all ranks
202 * Write: Turn on ODT on active rank
205 odt_mask_1 = 0x3 & (1 << rank);
208 case 4: /* 4 Ranks */
210 * ----------+-----------------------+
212 * Read From +-----------------------+
213 * Rank | 3 | 2 | 1 | 0 |
214 * ----------+-----+-----+-----+-----+
215 * 0 | 0 | 1 | 0 | 0 |
216 * 1 | 1 | 0 | 0 | 0 |
217 * 2 | 0 | 0 | 0 | 1 |
218 * 3 | 0 | 0 | 1 | 0 |
219 * ----------+-----+-----+-----+-----+
222 * ----------+-----------------------+
224 * Write To +-----------------------+
225 * Rank | 3 | 2 | 1 | 0 |
226 * ----------+-----+-----+-----+-----+
227 * 0 | 0 | 1 | 0 | 1 |
228 * 1 | 1 | 0 | 1 | 0 |
229 * 2 | 0 | 1 | 0 | 1 |
230 * 3 | 1 | 0 | 1 | 0 |
231 * ----------+-----+-----+-----+-----+
255 cs_and_odt_mask = (0xFF & ~(1 << rank)) |
256 ((0xFF & odt_mask_0) << 8) |
257 ((0xFF & odt_mask_1) << 16);
258 writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
259 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
263 * scc_mgr_set() - Set SCC Manager register
264 * @off: Base offset in SCC Manager space
265 * @grp: Read/Write group
266 * @val: Value to be set
268 * This function sets the SCC Manager (Scan Chain Control Manager) register.
270 static void scc_mgr_set(u32 off, u32 grp, u32 val)
272 writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2));
276 * scc_mgr_initialize() - Initialize SCC Manager registers
278 * Initialize SCC Manager registers.
280 static void scc_mgr_initialize(void)
283 * Clear register file for HPS. 16 (2^4) is the size of the
284 * full register file in the scc mgr:
285 * RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
286 * MEM_IF_READ_DQS_WIDTH - 1);
290 for (i = 0; i < 16; i++) {
291 debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u\n",
292 __func__, __LINE__, i);
293 scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, 0, i);
297 static void scc_mgr_set_dqdqs_output_phase(uint32_t write_group, uint32_t phase)
299 scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase);
302 static void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, uint32_t delay)
304 scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay);
307 static void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase)
309 scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase);
312 static void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay)
314 scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay);
317 static void scc_mgr_set_dqs_io_in_delay(uint32_t delay)
319 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
323 static void scc_mgr_set_dq_in_delay(uint32_t dq_in_group, uint32_t delay)
325 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay);
328 static void scc_mgr_set_dq_out1_delay(uint32_t dq_in_group, uint32_t delay)
330 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay);
333 static void scc_mgr_set_dqs_out1_delay(uint32_t delay)
335 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
339 static void scc_mgr_set_dm_out1_delay(uint32_t dm, uint32_t delay)
341 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
342 RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm,
346 /* load up dqs config settings */
347 static void scc_mgr_load_dqs(uint32_t dqs)
349 writel(dqs, &sdr_scc_mgr->dqs_ena);
352 /* load up dqs io config settings */
353 static void scc_mgr_load_dqs_io(void)
355 writel(0, &sdr_scc_mgr->dqs_io_ena);
358 /* load up dq config settings */
359 static void scc_mgr_load_dq(uint32_t dq_in_group)
361 writel(dq_in_group, &sdr_scc_mgr->dq_ena);
364 /* load up dm config settings */
365 static void scc_mgr_load_dm(uint32_t dm)
367 writel(dm, &sdr_scc_mgr->dm_ena);
371 * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks
372 * @off: Base offset in SCC Manager space
373 * @grp: Read/Write group
374 * @val: Value to be set
375 * @update: If non-zero, trigger SCC Manager update for all ranks
377 * This function sets the SCC Manager (Scan Chain Control Manager) register
378 * and optionally triggers the SCC update for all ranks.
380 static void scc_mgr_set_all_ranks(const u32 off, const u32 grp, const u32 val,
385 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
386 r += NUM_RANKS_PER_SHADOW_REG) {
387 scc_mgr_set(off, grp, val);
389 if (update || (r == 0)) {
390 writel(grp, &sdr_scc_mgr->dqs_ena);
391 writel(0, &sdr_scc_mgr->update);
396 static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group, u32 phase)
399 * USER although the h/w doesn't support different phases per
400 * shadow register, for simplicity our scc manager modeling
401 * keeps different phase settings per shadow reg, and it's
402 * important for us to keep them in sync to match h/w.
403 * for efficiency, the scan chain update should occur only
406 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_PHASE_OFFSET,
407 read_group, phase, 0);
410 static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group,
414 * USER although the h/w doesn't support different phases per
415 * shadow register, for simplicity our scc manager modeling
416 * keeps different phase settings per shadow reg, and it's
417 * important for us to keep them in sync to match h/w.
418 * for efficiency, the scan chain update should occur only
421 scc_mgr_set_all_ranks(SCC_MGR_DQDQS_OUT_PHASE_OFFSET,
422 write_group, phase, 0);
425 static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group,
429 * In shadow register mode, the T11 settings are stored in
430 * registers in the core, which are updated by the DQS_ENA
431 * signals. Not issuing the SCC_MGR_UPD command allows us to
432 * save lots of rank switching overhead, by calling
433 * select_shadow_regs_for_update with update_scan_chains
436 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_DELAY_OFFSET,
437 read_group, delay, 1);
438 writel(0, &sdr_scc_mgr->update);
442 * scc_mgr_set_oct_out1_delay() - Set OCT output delay
443 * @write_group: Write group
444 * @delay: Delay value
446 * This function sets the OCT output delay in SCC manager.
448 static void scc_mgr_set_oct_out1_delay(const u32 write_group, const u32 delay)
450 const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
451 RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
452 const int base = write_group * ratio;
455 * Load the setting in the SCC manager
456 * Although OCT affects only write data, the OCT delay is controlled
457 * by the DQS logic block which is instantiated once per read group.
458 * For protocols where a write group consists of multiple read groups,
459 * the setting must be set multiple times.
461 for (i = 0; i < ratio; i++)
462 scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay);
466 * scc_mgr_set_hhp_extras() - Set HHP extras.
468 * Load the fixed setting in the SCC manager HHP extras.
470 static void scc_mgr_set_hhp_extras(void)
473 * Load the fixed setting in the SCC manager
474 * bits: 0:0 = 1'b1 - DQS bypass
475 * bits: 1:1 = 1'b1 - DQ bypass
476 * bits: 4:2 = 3'b001 - rfifo_mode
477 * bits: 6:5 = 2'b01 - rfifo clock_select
478 * bits: 7:7 = 1'b0 - separate gating from ungating setting
479 * bits: 8:8 = 1'b0 - separate OE from Output delay setting
481 const u32 value = (0 << 8) | (0 << 7) | (1 << 5) |
482 (1 << 2) | (1 << 1) | (1 << 0);
483 const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS |
484 SCC_MGR_HHP_GLOBALS_OFFSET |
485 SCC_MGR_HHP_EXTRAS_OFFSET;
487 debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n",
490 debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n",
495 * scc_mgr_zero_all() - Zero all DQS config
497 * Zero all DQS config.
499 static void scc_mgr_zero_all(void)
504 * USER Zero all DQS config settings, across all groups and all
507 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
508 r += NUM_RANKS_PER_SHADOW_REG) {
509 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
511 * The phases actually don't exist on a per-rank basis,
512 * but there's no harm updating them several times, so
513 * let's keep the code simple.
515 scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE);
516 scc_mgr_set_dqs_en_phase(i, 0);
517 scc_mgr_set_dqs_en_delay(i, 0);
520 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
521 scc_mgr_set_dqdqs_output_phase(i, 0);
522 /* Arria V/Cyclone V don't have out2. */
523 scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE);
527 /* Multicast to all DQS group enables. */
528 writel(0xff, &sdr_scc_mgr->dqs_ena);
529 writel(0, &sdr_scc_mgr->update);
533 * scc_set_bypass_mode() - Set bypass mode and trigger SCC update
534 * @write_group: Write group
536 * Set bypass mode and trigger SCC update.
538 static void scc_set_bypass_mode(const u32 write_group)
540 /* Multicast to all DQ enables. */
541 writel(0xff, &sdr_scc_mgr->dq_ena);
542 writel(0xff, &sdr_scc_mgr->dm_ena);
544 /* Update current DQS IO enable. */
545 writel(0, &sdr_scc_mgr->dqs_io_ena);
547 /* Update the DQS logic. */
548 writel(write_group, &sdr_scc_mgr->dqs_ena);
551 writel(0, &sdr_scc_mgr->update);
555 * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group
556 * @write_group: Write group
558 * Load DQS settings for Write Group, do not trigger SCC update.
560 static void scc_mgr_load_dqs_for_write_group(const u32 write_group)
562 const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
563 RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
564 const int base = write_group * ratio;
567 * Load the setting in the SCC manager
568 * Although OCT affects only write data, the OCT delay is controlled
569 * by the DQS logic block which is instantiated once per read group.
570 * For protocols where a write group consists of multiple read groups,
571 * the setting must be set multiple times.
573 for (i = 0; i < ratio; i++)
574 writel(base + i, &sdr_scc_mgr->dqs_ena);
578 * scc_mgr_zero_group() - Zero all configs for a group
580 * Zero DQ, DM, DQS and OCT configs for a group.
582 static void scc_mgr_zero_group(const u32 write_group, const int out_only)
586 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
587 r += NUM_RANKS_PER_SHADOW_REG) {
588 /* Zero all DQ config settings. */
589 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
590 scc_mgr_set_dq_out1_delay(i, 0);
592 scc_mgr_set_dq_in_delay(i, 0);
595 /* Multicast to all DQ enables. */
596 writel(0xff, &sdr_scc_mgr->dq_ena);
598 /* Zero all DM config settings. */
599 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
600 scc_mgr_set_dm_out1_delay(i, 0);
602 /* Multicast to all DM enables. */
603 writel(0xff, &sdr_scc_mgr->dm_ena);
605 /* Zero all DQS IO settings. */
607 scc_mgr_set_dqs_io_in_delay(0);
609 /* Arria V/Cyclone V don't have out2. */
610 scc_mgr_set_dqs_out1_delay(IO_DQS_OUT_RESERVE);
611 scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE);
612 scc_mgr_load_dqs_for_write_group(write_group);
614 /* Multicast to all DQS IO enables (only 1 in total). */
615 writel(0, &sdr_scc_mgr->dqs_io_ena);
617 /* Hit update to zero everything. */
618 writel(0, &sdr_scc_mgr->update);
623 * apply and load a particular input delay for the DQ pins in a group
624 * group_bgn is the index of the first dq pin (in the write group)
626 static void scc_mgr_apply_group_dq_in_delay(uint32_t group_bgn, uint32_t delay)
630 for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
631 scc_mgr_set_dq_in_delay(p, delay);
637 * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the DQ pins in a group
638 * @delay: Delay value
640 * Apply and load a particular output delay for the DQ pins in a group.
642 static void scc_mgr_apply_group_dq_out1_delay(const u32 delay)
646 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
647 scc_mgr_set_dq_out1_delay(i, delay);
652 /* apply and load a particular output delay for the DM pins in a group */
653 static void scc_mgr_apply_group_dm_out1_delay(uint32_t delay1)
657 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
658 scc_mgr_set_dm_out1_delay(i, delay1);
664 /* apply and load delay on both DQS and OCT out1 */
665 static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group,
668 scc_mgr_set_dqs_out1_delay(delay);
669 scc_mgr_load_dqs_io();
671 scc_mgr_set_oct_out1_delay(write_group, delay);
672 scc_mgr_load_dqs_for_write_group(write_group);
676 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side: DQ, DM, DQS, OCT
677 * @write_group: Write group
678 * @delay: Delay value
680 * Apply a delay to the entire output side: DQ, DM, DQS, OCT.
682 static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group,
688 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++)
692 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
696 new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay;
697 if (new_delay > IO_IO_OUT2_DELAY_MAX) {
698 debug_cond(DLEVEL == 1,
699 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
700 __func__, __LINE__, write_group, delay, new_delay,
701 IO_IO_OUT2_DELAY_MAX,
702 new_delay - IO_IO_OUT2_DELAY_MAX);
703 new_delay -= IO_IO_OUT2_DELAY_MAX;
704 scc_mgr_set_dqs_out1_delay(new_delay);
707 scc_mgr_load_dqs_io();
710 new_delay = READ_SCC_OCT_OUT2_DELAY + delay;
711 if (new_delay > IO_IO_OUT2_DELAY_MAX) {
712 debug_cond(DLEVEL == 1,
713 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
714 __func__, __LINE__, write_group, delay,
715 new_delay, IO_IO_OUT2_DELAY_MAX,
716 new_delay - IO_IO_OUT2_DELAY_MAX);
717 new_delay -= IO_IO_OUT2_DELAY_MAX;
718 scc_mgr_set_oct_out1_delay(write_group, new_delay);
721 scc_mgr_load_dqs_for_write_group(write_group);
725 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side to all ranks
726 * @write_group: Write group
727 * @delay: Delay value
729 * Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks.
732 scc_mgr_apply_group_all_out_delay_add_all_ranks(const u32 write_group,
737 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
738 r += NUM_RANKS_PER_SHADOW_REG) {
739 scc_mgr_apply_group_all_out_delay_add(write_group, delay);
740 writel(0, &sdr_scc_mgr->update);
745 * set_jump_as_return() - Return instruction optimization
747 * Optimization used to recover some slots in ddr3 inst_rom could be
748 * applied to other protocols if we wanted to
750 static void set_jump_as_return(void)
753 * To save space, we replace return with jump to special shared
754 * RETURN instruction so we set the counter to large value so that
757 writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
758 writel(RW_MGR_RETURN, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
762 * should always use constants as argument to ensure all computations are
763 * performed at compile time
765 static void delay_for_n_mem_clocks(const uint32_t clocks)
772 debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
775 afi_clocks = (clocks + AFI_RATE_RATIO-1) / AFI_RATE_RATIO;
776 /* scale (rounding up) to get afi clocks */
779 * Note, we don't bother accounting for being off a little bit
780 * because of a few extra instructions in outer loops
781 * Note, the loops have a test at the end, and do the test before
782 * the decrement, and so always perform the loop
783 * 1 time more than the counter value
785 if (afi_clocks == 0) {
787 } else if (afi_clocks <= 0x100) {
788 inner = afi_clocks-1;
791 } else if (afi_clocks <= 0x10000) {
793 outer = (afi_clocks-1) >> 8;
798 c_loop = (afi_clocks-1) >> 16;
802 * rom instructions are structured as follows:
804 * IDLE_LOOP2: jnz cntr0, TARGET_A
805 * IDLE_LOOP1: jnz cntr1, TARGET_B
808 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
809 * TARGET_B is set to IDLE_LOOP2 as well
811 * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
812 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
814 * a little confusing, but it helps save precious space in the inst_rom
815 * and sequencer rom and keeps the delays more accurate and reduces
818 if (afi_clocks <= 0x100) {
819 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
820 &sdr_rw_load_mgr_regs->load_cntr1);
822 writel(RW_MGR_IDLE_LOOP1,
823 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
825 writel(RW_MGR_IDLE_LOOP1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
826 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
828 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
829 &sdr_rw_load_mgr_regs->load_cntr0);
831 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
832 &sdr_rw_load_mgr_regs->load_cntr1);
834 writel(RW_MGR_IDLE_LOOP2,
835 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
837 writel(RW_MGR_IDLE_LOOP2,
838 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
840 /* hack to get around compiler not being smart enough */
841 if (afi_clocks <= 0x10000) {
842 /* only need to run once */
843 writel(RW_MGR_IDLE_LOOP2, SDR_PHYGRP_RWMGRGRP_ADDRESS |
844 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
847 writel(RW_MGR_IDLE_LOOP2,
848 SDR_PHYGRP_RWMGRGRP_ADDRESS |
849 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
850 } while (c_loop-- != 0);
853 debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
857 * rw_mgr_mem_init_load_regs() - Load instruction registers
858 * @cntr0: Counter 0 value
859 * @cntr1: Counter 1 value
860 * @cntr2: Counter 2 value
861 * @jump: Jump instruction value
863 * Load instruction registers.
865 static void rw_mgr_mem_init_load_regs(u32 cntr0, u32 cntr1, u32 cntr2, u32 jump)
867 uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
868 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
871 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0),
872 &sdr_rw_load_mgr_regs->load_cntr0);
873 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1),
874 &sdr_rw_load_mgr_regs->load_cntr1);
875 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2),
876 &sdr_rw_load_mgr_regs->load_cntr2);
878 /* Load jump address */
879 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
880 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add1);
881 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
883 /* Execute count instruction */
884 writel(jump, grpaddr);
888 * rw_mgr_mem_load_user() - Load user calibration values
889 * @fin1: Final instruction 1
890 * @fin2: Final instruction 2
891 * @precharge: If 1, precharge the banks at the end
893 * Load user calibration values and optionally precharge the banks.
895 static void rw_mgr_mem_load_user(const u32 fin1, const u32 fin2,
898 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
899 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
902 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
903 if (param->skip_ranks[r]) {
904 /* request to skip the rank */
909 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
911 /* precharge all banks ... */
913 writel(RW_MGR_PRECHARGE_ALL, grpaddr);
916 * USER Use Mirror-ed commands for odd ranks if address
919 if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
920 set_jump_as_return();
921 writel(RW_MGR_MRS2_MIRR, grpaddr);
922 delay_for_n_mem_clocks(4);
923 set_jump_as_return();
924 writel(RW_MGR_MRS3_MIRR, grpaddr);
925 delay_for_n_mem_clocks(4);
926 set_jump_as_return();
927 writel(RW_MGR_MRS1_MIRR, grpaddr);
928 delay_for_n_mem_clocks(4);
929 set_jump_as_return();
930 writel(fin1, grpaddr);
932 set_jump_as_return();
933 writel(RW_MGR_MRS2, grpaddr);
934 delay_for_n_mem_clocks(4);
935 set_jump_as_return();
936 writel(RW_MGR_MRS3, grpaddr);
937 delay_for_n_mem_clocks(4);
938 set_jump_as_return();
939 writel(RW_MGR_MRS1, grpaddr);
940 set_jump_as_return();
941 writel(fin2, grpaddr);
947 set_jump_as_return();
948 writel(RW_MGR_ZQCL, grpaddr);
950 /* tZQinit = tDLLK = 512 ck cycles */
951 delay_for_n_mem_clocks(512);
956 * rw_mgr_mem_initialize() - Initialize RW Manager
958 * Initialize RW Manager.
960 static void rw_mgr_mem_initialize(void)
962 debug("%s:%d\n", __func__, __LINE__);
964 /* The reset / cke part of initialization is broadcasted to all ranks */
965 writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
966 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
969 * Here's how you load register for a loop
970 * Counters are located @ 0x800
971 * Jump address are located @ 0xC00
972 * For both, registers 0 to 3 are selected using bits 3 and 2, like
973 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
974 * I know this ain't pretty, but Avalon bus throws away the 2 least
978 /* Start with memory RESET activated */
983 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
984 * If a and b are the number of iteration in 2 nested loops
985 * it takes the following number of cycles to complete the operation:
986 * number_of_cycles = ((2 + n) * a + 2) * b
987 * where n is the number of instruction in the inner loop
988 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
991 rw_mgr_mem_init_load_regs(SEQ_TINIT_CNTR0_VAL, SEQ_TINIT_CNTR1_VAL,
993 RW_MGR_INIT_RESET_0_CKE_0);
995 /* Indicate that memory is stable. */
996 writel(1, &phy_mgr_cfg->reset_mem_stbl);
999 * transition the RESET to high
1004 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
1005 * If a and b are the number of iteration in 2 nested loops
1006 * it takes the following number of cycles to complete the operation
1007 * number_of_cycles = ((2 + n) * a + 2) * b
1008 * where n is the number of instruction in the inner loop
1009 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
1012 rw_mgr_mem_init_load_regs(SEQ_TRESET_CNTR0_VAL, SEQ_TRESET_CNTR1_VAL,
1013 SEQ_TRESET_CNTR2_VAL,
1014 RW_MGR_INIT_RESET_1_CKE_0);
1016 /* Bring up clock enable. */
1018 /* tXRP < 250 ck cycles */
1019 delay_for_n_mem_clocks(250);
1021 rw_mgr_mem_load_user(RW_MGR_MRS0_DLL_RESET_MIRR, RW_MGR_MRS0_DLL_RESET,
1026 * At the end of calibration we have to program the user settings in, and
1027 * USER hand off the memory to the user.
1029 static void rw_mgr_mem_handoff(void)
1031 rw_mgr_mem_load_user(RW_MGR_MRS0_USER_MIRR, RW_MGR_MRS0_USER, 1);
1033 * USER need to wait tMOD (12CK or 15ns) time before issuing
1034 * other commands, but we will have plenty of NIOS cycles before
1035 * actual handoff so its okay.
1040 * rw_mgr_mem_calibrate_read_test_patterns() - Read back test patterns
1041 * @rank_bgn: Rank number
1042 * @group: Read/Write Group
1043 * @all_ranks: Test all ranks
1045 * Performs a guaranteed read on the patterns we are going to use during a
1046 * read test to ensure memory works.
1049 rw_mgr_mem_calibrate_read_test_patterns(const u32 rank_bgn, const u32 group,
1050 const u32 all_ranks)
1052 const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1053 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1054 const u32 addr_offset =
1055 (group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS) << 2;
1056 const u32 rank_end = all_ranks ?
1057 RW_MGR_MEM_NUMBER_OF_RANKS :
1058 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1059 const u32 shift_ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
1060 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
1061 const u32 correct_mask_vg = param->read_correct_mask_vg;
1063 u32 tmp_bit_chk, base_rw_mgr, bit_chk;
1067 bit_chk = param->read_correct_mask;
1069 for (r = rank_bgn; r < rank_end; r++) {
1070 /* Request to skip the rank */
1071 if (param->skip_ranks[r])
1075 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1077 /* Load up a constant bursts of read commands */
1078 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1079 writel(RW_MGR_GUARANTEED_READ,
1080 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1082 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1083 writel(RW_MGR_GUARANTEED_READ_CONT,
1084 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1087 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1;
1089 /* Reset the FIFOs to get pointers to known state. */
1090 writel(0, &phy_mgr_cmd->fifo_reset);
1091 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1092 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1093 writel(RW_MGR_GUARANTEED_READ,
1094 addr + addr_offset + (vg << 2));
1096 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1097 tmp_bit_chk <<= shift_ratio;
1098 tmp_bit_chk |= correct_mask_vg & ~base_rw_mgr;
1101 bit_chk &= tmp_bit_chk;
1104 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
1106 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1108 if (bit_chk != param->read_correct_mask)
1111 debug_cond(DLEVEL == 1,
1112 "%s:%d test_load_patterns(%u,ALL) => (%u == %u) => %i\n",
1113 __func__, __LINE__, group, bit_chk,
1114 param->read_correct_mask, ret);
1120 * rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read test
1121 * @rank_bgn: Rank number
1122 * @all_ranks: Test all ranks
1124 * Load up the patterns we are going to use during a read test.
1126 static void rw_mgr_mem_calibrate_read_load_patterns(const u32 rank_bgn,
1127 const int all_ranks)
1129 const u32 rank_end = all_ranks ?
1130 RW_MGR_MEM_NUMBER_OF_RANKS :
1131 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1134 debug("%s:%d\n", __func__, __LINE__);
1136 for (r = rank_bgn; r < rank_end; r++) {
1137 if (param->skip_ranks[r])
1138 /* request to skip the rank */
1142 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1144 /* Load up a constant bursts */
1145 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1147 writel(RW_MGR_GUARANTEED_WRITE_WAIT0,
1148 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1150 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1152 writel(RW_MGR_GUARANTEED_WRITE_WAIT1,
1153 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1155 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
1157 writel(RW_MGR_GUARANTEED_WRITE_WAIT2,
1158 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1160 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
1162 writel(RW_MGR_GUARANTEED_WRITE_WAIT3,
1163 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1165 writel(RW_MGR_GUARANTEED_WRITE, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1166 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
1169 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1173 * try a read and see if it returns correct data back. has dummy reads
1174 * inserted into the mix used to align dqs enable. has more thorough checks
1175 * than the regular read test.
1177 static uint32_t rw_mgr_mem_calibrate_read_test(uint32_t rank_bgn, uint32_t group,
1178 uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
1179 uint32_t all_groups, uint32_t all_ranks)
1182 uint32_t correct_mask_vg;
1183 uint32_t tmp_bit_chk;
1184 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1185 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1187 uint32_t base_rw_mgr;
1189 *bit_chk = param->read_correct_mask;
1190 correct_mask_vg = param->read_correct_mask_vg;
1192 uint32_t quick_read_mode = (((STATIC_CALIB_STEPS) &
1193 CALIB_SKIP_DELAY_SWEEPS) && ENABLE_SUPER_QUICK_CALIBRATION);
1195 for (r = rank_bgn; r < rank_end; r++) {
1196 if (param->skip_ranks[r])
1197 /* request to skip the rank */
1201 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1203 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
1205 writel(RW_MGR_READ_B2B_WAIT1,
1206 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1208 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
1209 writel(RW_MGR_READ_B2B_WAIT2,
1210 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1212 if (quick_read_mode)
1213 writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
1214 /* need at least two (1+1) reads to capture failures */
1215 else if (all_groups)
1216 writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
1218 writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
1220 writel(RW_MGR_READ_B2B,
1221 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1223 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH *
1224 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1,
1225 &sdr_rw_load_mgr_regs->load_cntr3);
1227 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
1229 writel(RW_MGR_READ_B2B,
1230 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1233 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
1234 /* reset the fifos to get pointers to known state */
1235 writel(0, &phy_mgr_cmd->fifo_reset);
1236 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1237 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1239 tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
1240 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
1243 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_ALL_GROUPS_OFFSET;
1245 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1247 writel(RW_MGR_READ_B2B, addr +
1248 ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
1251 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1252 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
1257 *bit_chk &= tmp_bit_chk;
1260 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1261 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
1264 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1265 debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ALL,%u) =>\
1266 (%u == %u) => %lu", __func__, __LINE__, group,
1267 all_groups, *bit_chk, param->read_correct_mask,
1268 (long unsigned int)(*bit_chk ==
1269 param->read_correct_mask));
1270 return *bit_chk == param->read_correct_mask;
1272 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1273 debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ONE,%u) =>\
1274 (%u != %lu) => %lu\n", __func__, __LINE__,
1275 group, all_groups, *bit_chk, (long unsigned int)0,
1276 (long unsigned int)(*bit_chk != 0x00));
1277 return *bit_chk != 0x00;
1281 static uint32_t rw_mgr_mem_calibrate_read_test_all_ranks(uint32_t group,
1282 uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
1283 uint32_t all_groups)
1285 return rw_mgr_mem_calibrate_read_test(0, group, num_tries, all_correct,
1286 bit_chk, all_groups, 1);
1290 * rw_mgr_incr_vfifo() - Increase VFIFO value
1291 * @grp: Read/Write group
1293 * Increase VFIFO value.
1295 static void rw_mgr_incr_vfifo(const u32 grp)
1297 writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
1301 * rw_mgr_decr_vfifo() - Decrease VFIFO value
1302 * @grp: Read/Write group
1304 * Decrease VFIFO value.
1306 static void rw_mgr_decr_vfifo(const u32 grp)
1310 for (i = 0; i < VFIFO_SIZE - 1; i++)
1311 rw_mgr_incr_vfifo(grp);
1315 * find_vfifo_failing_read() - Push VFIFO to get a failing read
1316 * @grp: Read/Write group
1318 * Push VFIFO until a failing read happens.
1320 static int find_vfifo_failing_read(const u32 grp)
1322 u32 v, ret, bit_chk, fail_cnt = 0;
1324 for (v = 0; v < VFIFO_SIZE; v++) {
1325 debug_cond(DLEVEL == 2, "%s:%d: vfifo %u\n",
1326 __func__, __LINE__, v);
1327 ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1328 PASS_ONE_BIT, &bit_chk, 0);
1336 /* Fiddle with FIFO. */
1337 rw_mgr_incr_vfifo(grp);
1340 /* No failing read found! Something must have gone wrong. */
1341 debug_cond(DLEVEL == 2, "%s:%d: vfifo failed\n", __func__, __LINE__);
1346 * sdr_find_phase() - Find DQS enable phase
1347 * @working: If 1, look for working phase, if 0, look for non-working phase
1348 * @grp: Read/Write group
1349 * @work: Working window position
1351 * @p: DQS Phase Iterator
1353 * Find working or non-working DQS enable phase setting.
1355 static int sdr_find_phase(int working, const u32 grp, u32 *work,
1359 const u32 end = VFIFO_SIZE + (working ? 0 : 1);
1361 for (; *i < end; (*i)++) {
1365 for (; *p <= IO_DQS_EN_PHASE_MAX; (*p)++) {
1366 scc_mgr_set_dqs_en_phase_all_ranks(grp, *p);
1368 ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1369 PASS_ONE_BIT, &bit_chk, 0);
1376 *work += IO_DELAY_PER_OPA_TAP;
1379 if (*p > IO_DQS_EN_PHASE_MAX) {
1380 /* Fiddle with FIFO. */
1381 rw_mgr_incr_vfifo(grp);
1391 * sdr_working_phase() - Find working DQS enable phase
1392 * @grp: Read/Write group
1393 * @work_bgn: Working window start position
1394 * @d: dtaps output value
1395 * @p: DQS Phase Iterator
1398 * Find working DQS enable phase setting.
1400 static int sdr_working_phase(const u32 grp, u32 *work_bgn, u32 *d,
1403 const u32 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP /
1404 IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1409 for (*d = 0; *d <= dtaps_per_ptap; (*d)++) {
1411 scc_mgr_set_dqs_en_delay_all_ranks(grp, *d);
1412 ret = sdr_find_phase(1, grp, work_bgn, i, p);
1415 *work_bgn += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1418 /* Cannot find working solution */
1419 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/ptap/dtap\n",
1420 __func__, __LINE__);
1425 * sdr_backup_phase() - Find DQS enable backup phase
1426 * @grp: Read/Write group
1427 * @work_bgn: Working window start position
1428 * @p: DQS Phase Iterator
1430 * Find DQS enable backup phase setting.
1432 static void sdr_backup_phase(const u32 grp, u32 *work_bgn, u32 *p)
1434 u32 tmp_delay, bit_chk, d;
1437 /* Special case code for backing up a phase */
1439 *p = IO_DQS_EN_PHASE_MAX;
1440 rw_mgr_decr_vfifo(grp);
1444 tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP;
1445 scc_mgr_set_dqs_en_phase_all_ranks(grp, *p);
1447 for (d = 0; d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn; d++) {
1448 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1450 ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1451 PASS_ONE_BIT, &bit_chk, 0);
1453 *work_bgn = tmp_delay;
1457 tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1460 /* Restore VFIFO to old state before we decremented it (if needed). */
1462 if (*p > IO_DQS_EN_PHASE_MAX) {
1464 rw_mgr_incr_vfifo(grp);
1467 scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
1471 * sdr_nonworking_phase() - Find non-working DQS enable phase
1472 * @grp: Read/Write group
1473 * @work_end: Working window end position
1474 * @p: DQS Phase Iterator
1477 * Find non-working DQS enable phase setting.
1479 static int sdr_nonworking_phase(const u32 grp, u32 *work_end, u32 *p, u32 *i)
1484 *work_end += IO_DELAY_PER_OPA_TAP;
1485 if (*p > IO_DQS_EN_PHASE_MAX) {
1486 /* Fiddle with FIFO. */
1488 rw_mgr_incr_vfifo(grp);
1491 ret = sdr_find_phase(0, grp, work_end, i, p);
1493 /* Cannot see edge of failing read. */
1494 debug_cond(DLEVEL == 2, "%s:%d: end: failed\n",
1495 __func__, __LINE__);
1502 * sdr_find_window_center() - Find center of the working DQS window.
1503 * @grp: Read/Write group
1504 * @work_bgn: First working settings
1505 * @work_end: Last working settings
1507 * Find center of the working DQS enable window.
1509 static int sdr_find_window_center(const u32 grp, const u32 work_bgn,
1512 u32 bit_chk, work_mid;
1516 work_mid = (work_bgn + work_end) / 2;
1518 debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
1519 work_bgn, work_end, work_mid);
1520 /* Get the middle delay to be less than a VFIFO delay */
1521 tmp_delay = (IO_DQS_EN_PHASE_MAX + 1) * IO_DELAY_PER_OPA_TAP;
1523 debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
1524 work_mid %= tmp_delay;
1525 debug_cond(DLEVEL == 2, "new work_mid %d\n", work_mid);
1527 tmp_delay = rounddown(work_mid, IO_DELAY_PER_OPA_TAP);
1528 if (tmp_delay > IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP)
1529 tmp_delay = IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP;
1530 p = tmp_delay / IO_DELAY_PER_OPA_TAP;
1532 debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", p, tmp_delay);
1534 d = DIV_ROUND_UP(work_mid - tmp_delay, IO_DELAY_PER_DQS_EN_DCHAIN_TAP);
1535 if (d > IO_DQS_EN_DELAY_MAX)
1536 d = IO_DQS_EN_DELAY_MAX;
1537 tmp_delay += d * IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1539 debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", d, tmp_delay);
1541 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1542 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1545 * push vfifo until we can successfully calibrate. We can do this
1546 * because the largest possible margin in 1 VFIFO cycle.
1548 for (i = 0; i < VFIFO_SIZE; i++) {
1549 debug_cond(DLEVEL == 2, "find_dqs_en_phase: center\n");
1550 if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1553 debug_cond(DLEVEL == 2,
1554 "%s:%d center: found: ptap=%u dtap=%u\n",
1555 __func__, __LINE__, p, d);
1559 /* Fiddle with FIFO. */
1560 rw_mgr_incr_vfifo(grp);
1563 debug_cond(DLEVEL == 2, "%s:%d center: failed.\n",
1564 __func__, __LINE__);
1568 /* find a good dqs enable to use */
1569 static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp)
1573 uint32_t dtaps_per_ptap;
1574 uint32_t work_bgn, work_end;
1575 uint32_t found_passing_read, found_failing_read, initial_failing_dtap;
1577 debug("%s:%d %u\n", __func__, __LINE__, grp);
1579 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
1581 scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
1582 scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
1584 /* ************************************************************** */
1585 /* * Step 0 : Determine number of delay taps for each phase tap * */
1586 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP/IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1588 /* ********************************************************* */
1589 /* * Step 1 : First push vfifo until we get a failing read * */
1590 find_vfifo_failing_read(grp);
1592 /* ******************************************************** */
1593 /* * step 2: find first working phase, increment in ptaps * */
1595 if (sdr_working_phase(grp, &work_bgn, &d, &p, &i))
1598 work_end = work_bgn;
1601 * If d is 0 then the working window covers a phase tap and
1602 * we can follow the old procedure otherwise, we've found the beginning,
1603 * and we need to increment the dtaps until we find the end.
1606 /* ********************************************************* */
1607 /* * step 3a: if we have room, back off by one and
1608 increment in dtaps * */
1610 sdr_backup_phase(grp, &work_bgn, &p);
1612 /* ********************************************************* */
1613 /* * step 4a: go forward from working phase to non working
1614 phase, increment in ptaps * */
1615 if (sdr_nonworking_phase(grp, &work_end, &p, &i))
1618 /* ********************************************************* */
1619 /* * step 5a: back off one from last, increment in dtaps * */
1621 /* Special case code for backing up a phase */
1623 p = IO_DQS_EN_PHASE_MAX;
1624 rw_mgr_decr_vfifo(grp);
1629 work_end -= IO_DELAY_PER_OPA_TAP;
1630 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1632 /* * The actual increment of dtaps is done outside of
1633 the if/else loop to share code */
1636 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: p: \
1637 ptap=%u\n", __func__, __LINE__,
1640 /* ******************************************************* */
1641 /* * step 3-5b: Find the right edge of the window using
1643 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
1644 ptap=%u dtap=%u bgn=%u\n", __func__, __LINE__,
1647 work_end = work_bgn;
1650 /* The dtap increment to find the failing edge is done here */
1651 for (; d <= IO_DQS_EN_DELAY_MAX; d++, work_end +=
1652 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
1653 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
1654 end-2: dtap=%u\n", __func__, __LINE__, d);
1655 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1657 if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1664 /* Go back to working dtap */
1666 work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1668 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: p/d: \
1669 ptap=%u dtap=%u end=%u\n", __func__, __LINE__,
1672 if (work_end < work_bgn) {
1674 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: end-2: \
1675 failed\n", __func__, __LINE__);
1679 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: found range [%u,%u]\n",
1680 __func__, __LINE__, work_bgn, work_end);
1682 /* *************************************************************** */
1684 * * We need to calculate the number of dtaps that equal a ptap
1685 * * To do that we'll back up a ptap and re-find the edge of the
1686 * * window using dtaps
1689 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: calculate dtaps_per_ptap \
1690 for tracking\n", __func__, __LINE__);
1692 /* Special case code for backing up a phase */
1694 p = IO_DQS_EN_PHASE_MAX;
1695 rw_mgr_decr_vfifo(grp);
1696 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
1697 cycle/phase: p=%u\n", __func__, __LINE__,
1701 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
1702 phase only: p=%u", __func__, __LINE__,
1706 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1709 * Increase dtap until we first see a passing read (in case the
1710 * window is smaller than a ptap),
1711 * and then a failing read to mark the edge of the window again
1714 /* Find a passing read */
1715 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find passing read\n",
1716 __func__, __LINE__);
1717 found_passing_read = 0;
1718 found_failing_read = 0;
1719 initial_failing_dtap = d;
1720 for (; d <= IO_DQS_EN_DELAY_MAX; d++) {
1721 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: testing \
1722 read d=%u\n", __func__, __LINE__, d);
1723 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1725 if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1728 found_passing_read = 1;
1733 if (found_passing_read) {
1734 /* Find a failing read */
1735 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find failing \
1736 read\n", __func__, __LINE__);
1737 for (d = d + 1; d <= IO_DQS_EN_DELAY_MAX; d++) {
1738 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
1739 testing read d=%u\n", __func__, __LINE__, d);
1740 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1742 if (!rw_mgr_mem_calibrate_read_test_all_ranks
1743 (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) {
1744 found_failing_read = 1;
1749 debug_cond(DLEVEL == 1, "%s:%d find_dqs_en_phase: failed to \
1750 calculate dtaps", __func__, __LINE__);
1751 debug_cond(DLEVEL == 1, "per ptap. Fall back on static value\n");
1755 * The dynamically calculated dtaps_per_ptap is only valid if we
1756 * found a passing/failing read. If we didn't, it means d hit the max
1757 * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its
1758 * statically calculated value.
1760 if (found_passing_read && found_failing_read)
1761 dtaps_per_ptap = d - initial_failing_dtap;
1763 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
1764 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: dtaps_per_ptap=%u \
1765 - %u = %u", __func__, __LINE__, d,
1766 initial_failing_dtap, dtaps_per_ptap);
1768 /* ******************************************** */
1769 /* * step 6: Find the centre of the window * */
1770 if (sdr_find_window_centre(grp, work_bgn, work_end))
1771 return 0; /* FIXME: Old code, return 0 means failure :-( */
1776 /* per-bit deskew DQ and center */
1777 static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn,
1778 uint32_t write_group, uint32_t read_group, uint32_t test_bgn,
1779 uint32_t use_read_test, uint32_t update_fom)
1781 uint32_t i, p, d, min_index;
1783 * Store these as signed since there are comparisons with
1787 uint32_t sticky_bit_chk;
1788 int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
1789 int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
1790 int32_t final_dq[RW_MGR_MEM_DQ_PER_READ_DQS];
1792 int32_t orig_mid_min, mid_min;
1793 int32_t new_dqs, start_dqs, start_dqs_en, shift_dq, final_dqs,
1795 int32_t dq_margin, dqs_margin;
1797 uint32_t temp_dq_in_delay1, temp_dq_in_delay2;
1800 debug("%s:%d: %u %u", __func__, __LINE__, read_group, test_bgn);
1802 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQS_IN_DELAY_OFFSET;
1803 start_dqs = readl(addr + (read_group << 2));
1804 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
1805 start_dqs_en = readl(addr + ((read_group << 2)
1806 - IO_DQS_EN_DELAY_OFFSET));
1808 /* set the left and right edge of each bit to an illegal value */
1809 /* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */
1811 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1812 left_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1813 right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1816 /* Search for the left edge of the window for each bit */
1817 for (d = 0; d <= IO_IO_IN_DELAY_MAX; d++) {
1818 scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, d);
1820 writel(0, &sdr_scc_mgr->update);
1823 * Stop searching when the read test doesn't pass AND when
1824 * we've seen a passing read on every bit.
1826 if (use_read_test) {
1827 stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
1828 read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
1831 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
1834 bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
1835 (read_group - (write_group *
1836 RW_MGR_MEM_IF_READ_DQS_WIDTH /
1837 RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
1838 stop = (bit_chk == 0);
1840 sticky_bit_chk = sticky_bit_chk | bit_chk;
1841 stop = stop && (sticky_bit_chk == param->read_correct_mask);
1842 debug_cond(DLEVEL == 2, "%s:%d vfifo_center(left): dtap=%u => %u == %u \
1843 && %u", __func__, __LINE__, d,
1845 param->read_correct_mask, stop);
1850 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1852 /* Remember a passing test as the
1856 /* If a left edge has not been seen yet,
1857 then a future passing test will mark
1858 this edge as the right edge */
1860 IO_IO_IN_DELAY_MAX + 1) {
1861 right_edge[i] = -(d + 1);
1864 bit_chk = bit_chk >> 1;
1869 /* Reset DQ delay chains to 0 */
1870 scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
1872 for (i = RW_MGR_MEM_DQ_PER_READ_DQS - 1;; i--) {
1873 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
1874 %d right_edge[%u]: %d\n", __func__, __LINE__,
1875 i, left_edge[i], i, right_edge[i]);
1878 * Check for cases where we haven't found the left edge,
1879 * which makes our assignment of the the right edge invalid.
1880 * Reset it to the illegal value.
1882 if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) && (
1883 right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
1884 right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1885 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: reset \
1886 right_edge[%u]: %d\n", __func__, __LINE__,
1891 * Reset sticky bit (except for bits where we have seen
1892 * both the left and right edge).
1894 sticky_bit_chk = sticky_bit_chk << 1;
1895 if ((left_edge[i] != IO_IO_IN_DELAY_MAX + 1) &&
1896 (right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
1897 sticky_bit_chk = sticky_bit_chk | 1;
1904 /* Search for the right edge of the window for each bit */
1905 for (d = 0; d <= IO_DQS_IN_DELAY_MAX - start_dqs; d++) {
1906 scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
1907 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
1908 uint32_t delay = d + start_dqs_en;
1909 if (delay > IO_DQS_EN_DELAY_MAX)
1910 delay = IO_DQS_EN_DELAY_MAX;
1911 scc_mgr_set_dqs_en_delay(read_group, delay);
1913 scc_mgr_load_dqs(read_group);
1915 writel(0, &sdr_scc_mgr->update);
1918 * Stop searching when the read test doesn't pass AND when
1919 * we've seen a passing read on every bit.
1921 if (use_read_test) {
1922 stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
1923 read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
1926 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
1929 bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
1930 (read_group - (write_group *
1931 RW_MGR_MEM_IF_READ_DQS_WIDTH /
1932 RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
1933 stop = (bit_chk == 0);
1935 sticky_bit_chk = sticky_bit_chk | bit_chk;
1936 stop = stop && (sticky_bit_chk == param->read_correct_mask);
1938 debug_cond(DLEVEL == 2, "%s:%d vfifo_center(right): dtap=%u => %u == \
1939 %u && %u", __func__, __LINE__, d,
1940 sticky_bit_chk, param->read_correct_mask, stop);
1945 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1947 /* Remember a passing test as
1952 /* If a right edge has not been
1953 seen yet, then a future passing
1954 test will mark this edge as the
1956 if (right_edge[i] ==
1957 IO_IO_IN_DELAY_MAX + 1) {
1958 left_edge[i] = -(d + 1);
1961 /* d = 0 failed, but it passed
1962 when testing the left edge,
1963 so it must be marginal,
1965 if (right_edge[i] ==
1966 IO_IO_IN_DELAY_MAX + 1 &&
1972 /* If a right edge has not been
1973 seen yet, then a future passing
1974 test will mark this edge as the
1976 else if (right_edge[i] ==
1977 IO_IO_IN_DELAY_MAX +
1979 left_edge[i] = -(d + 1);
1984 debug_cond(DLEVEL == 2, "%s:%d vfifo_center[r,\
1985 d=%u]: ", __func__, __LINE__, d);
1986 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d ",
1987 (int)(bit_chk & 1), i, left_edge[i]);
1988 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
1990 bit_chk = bit_chk >> 1;
1995 /* Check that all bits have a window */
1996 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1997 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
1998 %d right_edge[%u]: %d", __func__, __LINE__,
1999 i, left_edge[i], i, right_edge[i]);
2000 if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) || (right_edge[i]
2001 == IO_IO_IN_DELAY_MAX + 1)) {
2003 * Restore delay chain settings before letting the loop
2004 * in rw_mgr_mem_calibrate_vfifo to retry different
2005 * dqs/ck relationships.
2007 scc_mgr_set_dqs_bus_in_delay(read_group, start_dqs);
2008 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2009 scc_mgr_set_dqs_en_delay(read_group,
2012 scc_mgr_load_dqs(read_group);
2013 writel(0, &sdr_scc_mgr->update);
2015 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: failed to \
2016 find edge [%u]: %d %d", __func__, __LINE__,
2017 i, left_edge[i], right_edge[i]);
2018 if (use_read_test) {
2019 set_failing_group_stage(read_group *
2020 RW_MGR_MEM_DQ_PER_READ_DQS + i,
2022 CAL_SUBSTAGE_VFIFO_CENTER);
2024 set_failing_group_stage(read_group *
2025 RW_MGR_MEM_DQ_PER_READ_DQS + i,
2026 CAL_STAGE_VFIFO_AFTER_WRITES,
2027 CAL_SUBSTAGE_VFIFO_CENTER);
2033 /* Find middle of window for each DQ bit */
2034 mid_min = left_edge[0] - right_edge[0];
2036 for (i = 1; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2037 mid = left_edge[i] - right_edge[i];
2038 if (mid < mid_min) {
2045 * -mid_min/2 represents the amount that we need to move DQS.
2046 * If mid_min is odd and positive we'll need to add one to
2047 * make sure the rounding in further calculations is correct
2048 * (always bias to the right), so just add 1 for all positive values.
2053 mid_min = mid_min / 2;
2055 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: mid_min=%d (index=%u)\n",
2056 __func__, __LINE__, mid_min, min_index);
2058 /* Determine the amount we can change DQS (which is -mid_min) */
2059 orig_mid_min = mid_min;
2060 new_dqs = start_dqs - mid_min;
2061 if (new_dqs > IO_DQS_IN_DELAY_MAX)
2062 new_dqs = IO_DQS_IN_DELAY_MAX;
2063 else if (new_dqs < 0)
2066 mid_min = start_dqs - new_dqs;
2067 debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
2070 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2071 if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX)
2072 mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX;
2073 else if (start_dqs_en - mid_min < 0)
2074 mid_min += start_dqs_en - mid_min;
2076 new_dqs = start_dqs - mid_min;
2078 debug_cond(DLEVEL == 1, "vfifo_center: start_dqs=%d start_dqs_en=%d \
2079 new_dqs=%d mid_min=%d\n", start_dqs,
2080 IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1,
2083 /* Initialize data for export structures */
2084 dqs_margin = IO_IO_IN_DELAY_MAX + 1;
2085 dq_margin = IO_IO_IN_DELAY_MAX + 1;
2087 /* add delay to bring centre of all DQ windows to the same "level" */
2088 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
2089 /* Use values before divide by 2 to reduce round off error */
2090 shift_dq = (left_edge[i] - right_edge[i] -
2091 (left_edge[min_index] - right_edge[min_index]))/2 +
2092 (orig_mid_min - mid_min);
2094 debug_cond(DLEVEL == 2, "vfifo_center: before: \
2095 shift_dq[%u]=%d\n", i, shift_dq);
2097 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_IN_DELAY_OFFSET;
2098 temp_dq_in_delay1 = readl(addr + (p << 2));
2099 temp_dq_in_delay2 = readl(addr + (i << 2));
2101 if (shift_dq + (int32_t)temp_dq_in_delay1 >
2102 (int32_t)IO_IO_IN_DELAY_MAX) {
2103 shift_dq = (int32_t)IO_IO_IN_DELAY_MAX - temp_dq_in_delay2;
2104 } else if (shift_dq + (int32_t)temp_dq_in_delay1 < 0) {
2105 shift_dq = -(int32_t)temp_dq_in_delay1;
2107 debug_cond(DLEVEL == 2, "vfifo_center: after: \
2108 shift_dq[%u]=%d\n", i, shift_dq);
2109 final_dq[i] = temp_dq_in_delay1 + shift_dq;
2110 scc_mgr_set_dq_in_delay(p, final_dq[i]);
2113 debug_cond(DLEVEL == 2, "vfifo_center: margin[%u]=[%d,%d]\n", i,
2114 left_edge[i] - shift_dq + (-mid_min),
2115 right_edge[i] + shift_dq - (-mid_min));
2116 /* To determine values for export structures */
2117 if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
2118 dq_margin = left_edge[i] - shift_dq + (-mid_min);
2120 if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
2121 dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2124 final_dqs = new_dqs;
2125 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
2126 final_dqs_en = start_dqs_en - mid_min;
2129 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2130 scc_mgr_set_dqs_en_delay(read_group, final_dqs_en);
2131 scc_mgr_load_dqs(read_group);
2135 scc_mgr_set_dqs_bus_in_delay(read_group, final_dqs);
2136 scc_mgr_load_dqs(read_group);
2137 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: dq_margin=%d \
2138 dqs_margin=%d", __func__, __LINE__,
2139 dq_margin, dqs_margin);
2142 * Do not remove this line as it makes sure all of our decisions
2143 * have been applied. Apply the update bit.
2145 writel(0, &sdr_scc_mgr->update);
2147 return (dq_margin >= 0) && (dqs_margin >= 0);
2151 * rw_mgr_mem_calibrate_guaranteed_write() - Perform guaranteed write into the device
2152 * @rw_group: Read/Write Group
2153 * @phase: DQ/DQS phase
2155 * Because initially no communication ca be reliably performed with the memory
2156 * device, the sequencer uses a guaranteed write mechanism to write data into
2157 * the memory device.
2159 static int rw_mgr_mem_calibrate_guaranteed_write(const u32 rw_group,
2164 /* Set a particular DQ/DQS phase. */
2165 scc_mgr_set_dqdqs_output_phase_all_ranks(rw_group, phase);
2167 debug_cond(DLEVEL == 1, "%s:%d guaranteed write: g=%u p=%u\n",
2168 __func__, __LINE__, rw_group, phase);
2171 * Altera EMI_RM 2015.05.04 :: Figure 1-25
2172 * Load up the patterns used by read calibration using the
2173 * current DQDQS phase.
2175 rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2177 if (gbl->phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ)
2181 * Altera EMI_RM 2015.05.04 :: Figure 1-26
2182 * Back-to-Back reads of the patterns used for calibration.
2184 ret = rw_mgr_mem_calibrate_read_test_patterns(0, rw_group, 1);
2186 debug_cond(DLEVEL == 1,
2187 "%s:%d Guaranteed read test failed: g=%u p=%u\n",
2188 __func__, __LINE__, rw_group, phase);
2193 * rw_mgr_mem_calibrate_dqs_enable_calibration() - DQS Enable Calibration
2194 * @rw_group: Read/Write Group
2195 * @test_bgn: Rank at which the test begins
2197 * DQS enable calibration ensures reliable capture of the DQ signal without
2198 * glitches on the DQS line.
2200 static int rw_mgr_mem_calibrate_dqs_enable_calibration(const u32 rw_group,
2204 * Altera EMI_RM 2015.05.04 :: Figure 1-27
2205 * DQS and DQS Eanble Signal Relationships.
2208 /* We start at zero, so have one less dq to devide among */
2209 const u32 delay_step = IO_IO_IN_DELAY_MAX /
2210 (RW_MGR_MEM_DQ_PER_READ_DQS - 1);
2214 debug("%s:%d (%u,%u)\n", __func__, __LINE__, rw_group, test_bgn);
2216 /* Try different dq_in_delays since the DQ path is shorter than DQS. */
2217 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
2218 r += NUM_RANKS_PER_SHADOW_REG) {
2219 for (i = 0, p = test_bgn, d = 0;
2220 i < RW_MGR_MEM_DQ_PER_READ_DQS;
2221 i++, p++, d += delay_step) {
2222 debug_cond(DLEVEL == 1,
2223 "%s:%d: g=%u r=%u i=%u p=%u d=%u\n",
2224 __func__, __LINE__, rw_group, r, i, p, d);
2226 scc_mgr_set_dq_in_delay(p, d);
2230 writel(0, &sdr_scc_mgr->update);
2234 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
2235 * dq_in_delay values
2237 found = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(rw_group);
2239 debug_cond(DLEVEL == 1,
2240 "%s:%d: g=%u found=%u; Reseting delay chain to zero\n",
2241 __func__, __LINE__, rw_group, found);
2243 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
2244 r += NUM_RANKS_PER_SHADOW_REG) {
2245 scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
2246 writel(0, &sdr_scc_mgr->update);
2257 * rw_mgr_mem_calibrate_dq_dqs_centering() - Centering DQ/DQS
2258 * @rw_group: Read/Write Group
2259 * @test_bgn: Rank at which the test begins
2260 * @use_read_test: Perform a read test
2261 * @update_fom: Update FOM
2263 * The centerin DQ/DQS stage attempts to align DQ and DQS signals on reads
2267 rw_mgr_mem_calibrate_dq_dqs_centering(const u32 rw_group, const u32 test_bgn,
2268 const int use_read_test,
2269 const int update_fom)
2272 int ret, grp_calibrated;
2276 * Altera EMI_RM 2015.05.04 :: Figure 1-28
2277 * Read per-bit deskew can be done on a per shadow register basis.
2280 for (rank_bgn = 0, sr = 0;
2281 rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
2282 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
2283 /* Check if this set of ranks should be skipped entirely. */
2284 if (param->skip_shadow_regs[sr])
2287 ret = rw_mgr_mem_calibrate_vfifo_center(rank_bgn, rw_group,
2297 if (!grp_calibrated)
2304 * rw_mgr_mem_calibrate_vfifo() - Calibrate the read valid prediction FIFO
2305 * @rw_group: Read/Write Group
2306 * @test_bgn: Rank at which the test begins
2308 * Stage 1: Calibrate the read valid prediction FIFO.
2310 * This function implements UniPHY calibration Stage 1, as explained in
2311 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2313 * - read valid prediction will consist of finding:
2314 * - DQS enable phase and DQS enable delay (DQS Enable Calibration)
2315 * - DQS input phase and DQS input delay (DQ/DQS Centering)
2316 * - we also do a per-bit deskew on the DQ lines.
2318 static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn)
2321 uint32_t dtaps_per_ptap;
2322 uint32_t failed_substage;
2326 debug("%s:%d: %u %u\n", __func__, __LINE__, rw_group, test_bgn);
2328 /* Update info for sims */
2329 reg_file_set_group(rw_group);
2330 reg_file_set_stage(CAL_STAGE_VFIFO);
2331 reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
2333 failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
2335 /* USER Determine number of delay taps for each phase tap. */
2336 dtaps_per_ptap = DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP,
2337 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) - 1;
2339 for (d = 0; d <= dtaps_per_ptap; d += 2) {
2341 * In RLDRAMX we may be messing the delay of pins in
2342 * the same write rw_group but outside of the current read
2343 * the rw_group, but that's ok because we haven't calibrated
2347 scc_mgr_apply_group_all_out_delay_add_all_ranks(
2351 for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX; p++) {
2352 /* 1) Guaranteed Write */
2353 ret = rw_mgr_mem_calibrate_guaranteed_write(rw_group, p);
2357 /* 2) DQS Enable Calibration */
2358 ret = rw_mgr_mem_calibrate_dqs_enable_calibration(rw_group,
2361 failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
2365 /* 3) Centering DQ/DQS */
2367 * If doing read after write calibration, do not update
2368 * FOM now. Do it then.
2370 ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group,
2373 failed_substage = CAL_SUBSTAGE_VFIFO_CENTER;
2382 /* Calibration Stage 1 failed. */
2383 set_failing_group_stage(rw_group, CAL_STAGE_VFIFO, failed_substage);
2386 /* Calibration Stage 1 completed OK. */
2389 * Reset the delay chains back to zero if they have moved > 1
2390 * (check for > 1 because loop will increase d even when pass in
2394 scc_mgr_zero_group(rw_group, 1);
2399 /* VFIFO Calibration -- Read Deskew Calibration after write deskew */
2400 static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group,
2403 uint32_t rank_bgn, sr;
2404 uint32_t grp_calibrated;
2405 uint32_t write_group;
2407 debug("%s:%d %u %u", __func__, __LINE__, read_group, test_bgn);
2409 /* update info for sims */
2411 reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
2412 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
2414 write_group = read_group;
2416 /* update info for sims */
2417 reg_file_set_group(read_group);
2420 /* Read per-bit deskew can be done on a per shadow register basis */
2421 for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
2422 rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) {
2423 /* Determine if this set of ranks should be skipped entirely */
2424 if (!param->skip_shadow_regs[sr]) {
2425 /* This is the last calibration round, update FOM here */
2426 if (!rw_mgr_mem_calibrate_vfifo_center(rank_bgn,
2437 if (grp_calibrated == 0) {
2438 set_failing_group_stage(write_group,
2439 CAL_STAGE_VFIFO_AFTER_WRITES,
2440 CAL_SUBSTAGE_VFIFO_CENTER);
2447 /* Calibrate LFIFO to find smallest read latency */
2448 static uint32_t rw_mgr_mem_calibrate_lfifo(void)
2453 debug("%s:%d\n", __func__, __LINE__);
2455 /* update info for sims */
2456 reg_file_set_stage(CAL_STAGE_LFIFO);
2457 reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
2459 /* Load up the patterns used by read calibration for all ranks */
2460 rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2464 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2465 debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u",
2466 __func__, __LINE__, gbl->curr_read_lat);
2468 if (!rw_mgr_mem_calibrate_read_test_all_ranks(0,
2476 /* reduce read latency and see if things are working */
2478 gbl->curr_read_lat--;
2479 } while (gbl->curr_read_lat > 0);
2481 /* reset the fifos to get pointers to known state */
2483 writel(0, &phy_mgr_cmd->fifo_reset);
2486 /* add a fudge factor to the read latency that was determined */
2487 gbl->curr_read_lat += 2;
2488 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2489 debug_cond(DLEVEL == 2, "%s:%d lfifo: success: using \
2490 read_lat=%u\n", __func__, __LINE__,
2491 gbl->curr_read_lat);
2494 set_failing_group_stage(0xff, CAL_STAGE_LFIFO,
2495 CAL_SUBSTAGE_READ_LATENCY);
2497 debug_cond(DLEVEL == 2, "%s:%d lfifo: failed at initial \
2498 read_lat=%u\n", __func__, __LINE__,
2499 gbl->curr_read_lat);
2505 * issue write test command.
2506 * two variants are provided. one that just tests a write pattern and
2507 * another that tests datamask functionality.
2509 static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group,
2512 uint32_t mcc_instruction;
2513 uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) &&
2514 ENABLE_SUPER_QUICK_CALIBRATION);
2515 uint32_t rw_wl_nop_cycles;
2519 * Set counter and jump addresses for the right
2520 * number of NOP cycles.
2521 * The number of supported NOP cycles can range from -1 to infinity
2522 * Three different cases are handled:
2524 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
2525 * mechanism will be used to insert the right number of NOPs
2527 * 2. For a number of NOP cycles equals to 0, the micro-instruction
2528 * issuing the write command will jump straight to the
2529 * micro-instruction that turns on DQS (for DDRx), or outputs write
2530 * data (for RLD), skipping
2531 * the NOP micro-instruction all together
2533 * 3. A number of NOP cycles equal to -1 indicates that DQS must be
2534 * turned on in the same micro-instruction that issues the write
2535 * command. Then we need
2536 * to directly jump to the micro-instruction that sends out the data
2538 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
2539 * (2 and 3). One jump-counter (0) is used to perform multiple
2540 * write-read operations.
2541 * one counter left to issue this command in "multiple-group" mode
2544 rw_wl_nop_cycles = gbl->rw_wl_nop_cycles;
2546 if (rw_wl_nop_cycles == -1) {
2548 * CNTR 2 - We want to execute the special write operation that
2549 * turns on DQS right away and then skip directly to the
2550 * instruction that sends out the data. We set the counter to a
2551 * large number so that the jump is always taken.
2553 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
2555 /* CNTR 3 - Not used */
2557 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1;
2558 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA,
2559 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2560 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
2561 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2563 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1;
2564 writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA,
2565 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2566 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
2567 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2569 } else if (rw_wl_nop_cycles == 0) {
2571 * CNTR 2 - We want to skip the NOP operation and go straight
2572 * to the DQS enable instruction. We set the counter to a large
2573 * number so that the jump is always taken.
2575 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
2577 /* CNTR 3 - Not used */
2579 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
2580 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS,
2581 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2583 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
2584 writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS,
2585 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2589 * CNTR 2 - In this case we want to execute the next instruction
2590 * and NOT take the jump. So we set the counter to 0. The jump
2591 * address doesn't count.
2593 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
2594 writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2597 * CNTR 3 - Set the nop counter to the number of cycles we
2598 * need to loop for, minus 1.
2600 writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
2602 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
2603 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
2604 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2606 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
2607 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
2608 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2612 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
2613 RW_MGR_RESET_READ_DATAPATH_OFFSET);
2615 if (quick_write_mode)
2616 writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
2618 writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
2620 writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
2623 * CNTR 1 - This is used to ensure enough time elapses
2624 * for read data to come back.
2626 writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
2629 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT,
2630 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
2632 writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT,
2633 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
2636 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
2637 writel(mcc_instruction, addr + (group << 2));
2640 /* Test writes, can check for a single bit pass or multiple bit pass */
2641 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
2642 uint32_t write_group, uint32_t use_dm, uint32_t all_correct,
2643 uint32_t *bit_chk, uint32_t all_ranks)
2646 uint32_t correct_mask_vg;
2647 uint32_t tmp_bit_chk;
2649 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
2650 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
2651 uint32_t addr_rw_mgr;
2652 uint32_t base_rw_mgr;
2654 *bit_chk = param->write_correct_mask;
2655 correct_mask_vg = param->write_correct_mask_vg;
2657 for (r = rank_bgn; r < rank_end; r++) {
2658 if (param->skip_ranks[r]) {
2659 /* request to skip the rank */
2664 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
2667 addr_rw_mgr = SDR_PHYGRP_RWMGRGRP_ADDRESS;
2668 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS-1; ; vg--) {
2669 /* reset the fifos to get pointers to known state */
2670 writel(0, &phy_mgr_cmd->fifo_reset);
2672 tmp_bit_chk = tmp_bit_chk <<
2673 (RW_MGR_MEM_DQ_PER_WRITE_DQS /
2674 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
2675 rw_mgr_mem_calibrate_write_test_issue(write_group *
2676 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS+vg,
2679 base_rw_mgr = readl(addr_rw_mgr);
2680 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
2684 *bit_chk &= tmp_bit_chk;
2688 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
2689 debug_cond(DLEVEL == 2, "write_test(%u,%u,ALL) : %u == \
2690 %u => %lu", write_group, use_dm,
2691 *bit_chk, param->write_correct_mask,
2692 (long unsigned int)(*bit_chk ==
2693 param->write_correct_mask));
2694 return *bit_chk == param->write_correct_mask;
2696 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
2697 debug_cond(DLEVEL == 2, "write_test(%u,%u,ONE) : %u != ",
2698 write_group, use_dm, *bit_chk);
2699 debug_cond(DLEVEL == 2, "%lu" " => %lu", (long unsigned int)0,
2700 (long unsigned int)(*bit_chk != 0));
2701 return *bit_chk != 0x00;
2706 * center all windows. do per-bit-deskew to possibly increase size of
2709 static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn,
2710 uint32_t write_group, uint32_t test_bgn)
2712 uint32_t i, p, min_index;
2715 * Store these as signed since there are comparisons with
2719 uint32_t sticky_bit_chk;
2720 int32_t left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2721 int32_t right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2723 int32_t mid_min, orig_mid_min;
2724 int32_t new_dqs, start_dqs, shift_dq;
2725 int32_t dq_margin, dqs_margin, dm_margin;
2727 uint32_t temp_dq_out1_delay;
2730 debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
2734 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
2735 start_dqs = readl(addr +
2736 (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2));
2738 /* per-bit deskew */
2741 * set the left and right edge of each bit to an illegal value
2742 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
2745 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2746 left_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2747 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2750 /* Search for the left edge of the window for each bit */
2751 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX; d++) {
2752 scc_mgr_apply_group_dq_out1_delay(write_group, d);
2754 writel(0, &sdr_scc_mgr->update);
2757 * Stop searching when the read test doesn't pass AND when
2758 * we've seen a passing read on every bit.
2760 stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
2761 0, PASS_ONE_BIT, &bit_chk, 0);
2762 sticky_bit_chk = sticky_bit_chk | bit_chk;
2763 stop = stop && (sticky_bit_chk == param->write_correct_mask);
2764 debug_cond(DLEVEL == 2, "write_center(left): dtap=%d => %u \
2765 == %u && %u [bit_chk= %u ]\n",
2766 d, sticky_bit_chk, param->write_correct_mask,
2772 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2775 * Remember a passing test as the
2781 * If a left edge has not been seen
2782 * yet, then a future passing test will
2783 * mark this edge as the right edge.
2786 IO_IO_OUT1_DELAY_MAX + 1) {
2787 right_edge[i] = -(d + 1);
2790 debug_cond(DLEVEL == 2, "write_center[l,d=%d):", d);
2791 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
2792 (int)(bit_chk & 1), i, left_edge[i]);
2793 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2795 bit_chk = bit_chk >> 1;
2800 /* Reset DQ delay chains to 0 */
2801 scc_mgr_apply_group_dq_out1_delay(0);
2803 for (i = RW_MGR_MEM_DQ_PER_WRITE_DQS - 1;; i--) {
2804 debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
2805 %d right_edge[%u]: %d\n", __func__, __LINE__,
2806 i, left_edge[i], i, right_edge[i]);
2809 * Check for cases where we haven't found the left edge,
2810 * which makes our assignment of the the right edge invalid.
2811 * Reset it to the illegal value.
2813 if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) &&
2814 (right_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) {
2815 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2816 debug_cond(DLEVEL == 2, "%s:%d write_center: reset \
2817 right_edge[%u]: %d\n", __func__, __LINE__,
2822 * Reset sticky bit (except for bits where we have
2823 * seen the left edge).
2825 sticky_bit_chk = sticky_bit_chk << 1;
2826 if ((left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1))
2827 sticky_bit_chk = sticky_bit_chk | 1;
2833 /* Search for the right edge of the window for each bit */
2834 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - start_dqs; d++) {
2835 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
2838 writel(0, &sdr_scc_mgr->update);
2841 * Stop searching when the read test doesn't pass AND when
2842 * we've seen a passing read on every bit.
2844 stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
2845 0, PASS_ONE_BIT, &bit_chk, 0);
2847 sticky_bit_chk = sticky_bit_chk | bit_chk;
2848 stop = stop && (sticky_bit_chk == param->write_correct_mask);
2850 debug_cond(DLEVEL == 2, "write_center (right): dtap=%u => %u == \
2851 %u && %u\n", d, sticky_bit_chk,
2852 param->write_correct_mask, stop);
2856 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS;
2858 /* d = 0 failed, but it passed when
2859 testing the left edge, so it must be
2860 marginal, set it to -1 */
2861 if (right_edge[i] ==
2862 IO_IO_OUT1_DELAY_MAX + 1 &&
2864 IO_IO_OUT1_DELAY_MAX + 1) {
2871 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2874 * Remember a passing test as
2881 * If a right edge has not
2882 * been seen yet, then a future
2883 * passing test will mark this
2884 * edge as the left edge.
2886 if (right_edge[i] ==
2887 IO_IO_OUT1_DELAY_MAX + 1)
2888 left_edge[i] = -(d + 1);
2891 * d = 0 failed, but it passed
2892 * when testing the left edge,
2893 * so it must be marginal, set
2896 if (right_edge[i] ==
2897 IO_IO_OUT1_DELAY_MAX + 1 &&
2899 IO_IO_OUT1_DELAY_MAX + 1)
2902 * If a right edge has not been
2903 * seen yet, then a future
2904 * passing test will mark this
2905 * edge as the left edge.
2907 else if (right_edge[i] ==
2908 IO_IO_OUT1_DELAY_MAX +
2910 left_edge[i] = -(d + 1);
2913 debug_cond(DLEVEL == 2, "write_center[r,d=%d):", d);
2914 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
2915 (int)(bit_chk & 1), i, left_edge[i]);
2916 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2918 bit_chk = bit_chk >> 1;
2923 /* Check that all bits have a window */
2924 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2925 debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
2926 %d right_edge[%u]: %d", __func__, __LINE__,
2927 i, left_edge[i], i, right_edge[i]);
2928 if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) ||
2929 (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1)) {
2930 set_failing_group_stage(test_bgn + i,
2932 CAL_SUBSTAGE_WRITES_CENTER);
2937 /* Find middle of window for each DQ bit */
2938 mid_min = left_edge[0] - right_edge[0];
2940 for (i = 1; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2941 mid = left_edge[i] - right_edge[i];
2942 if (mid < mid_min) {
2949 * -mid_min/2 represents the amount that we need to move DQS.
2950 * If mid_min is odd and positive we'll need to add one to
2951 * make sure the rounding in further calculations is correct
2952 * (always bias to the right), so just add 1 for all positive values.
2956 mid_min = mid_min / 2;
2957 debug_cond(DLEVEL == 1, "%s:%d write_center: mid_min=%d\n", __func__,
2960 /* Determine the amount we can change DQS (which is -mid_min) */
2961 orig_mid_min = mid_min;
2962 new_dqs = start_dqs;
2964 debug_cond(DLEVEL == 1, "%s:%d write_center: start_dqs=%d new_dqs=%d \
2965 mid_min=%d\n", __func__, __LINE__, start_dqs, new_dqs, mid_min);
2966 /* Initialize data for export structures */
2967 dqs_margin = IO_IO_OUT1_DELAY_MAX + 1;
2968 dq_margin = IO_IO_OUT1_DELAY_MAX + 1;
2970 /* add delay to bring centre of all DQ windows to the same "level" */
2971 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
2972 /* Use values before divide by 2 to reduce round off error */
2973 shift_dq = (left_edge[i] - right_edge[i] -
2974 (left_edge[min_index] - right_edge[min_index]))/2 +
2975 (orig_mid_min - mid_min);
2977 debug_cond(DLEVEL == 2, "%s:%d write_center: before: shift_dq \
2978 [%u]=%d\n", __func__, __LINE__, i, shift_dq);
2980 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
2981 temp_dq_out1_delay = readl(addr + (i << 2));
2982 if (shift_dq + (int32_t)temp_dq_out1_delay >
2983 (int32_t)IO_IO_OUT1_DELAY_MAX) {
2984 shift_dq = (int32_t)IO_IO_OUT1_DELAY_MAX - temp_dq_out1_delay;
2985 } else if (shift_dq + (int32_t)temp_dq_out1_delay < 0) {
2986 shift_dq = -(int32_t)temp_dq_out1_delay;
2988 debug_cond(DLEVEL == 2, "write_center: after: shift_dq[%u]=%d\n",
2990 scc_mgr_set_dq_out1_delay(i, temp_dq_out1_delay + shift_dq);
2993 debug_cond(DLEVEL == 2, "write_center: margin[%u]=[%d,%d]\n", i,
2994 left_edge[i] - shift_dq + (-mid_min),
2995 right_edge[i] + shift_dq - (-mid_min));
2996 /* To determine values for export structures */
2997 if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
2998 dq_margin = left_edge[i] - shift_dq + (-mid_min);
3000 if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
3001 dqs_margin = right_edge[i] + shift_dq - (-mid_min);
3005 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
3006 writel(0, &sdr_scc_mgr->update);
3009 debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__);
3012 * set the left and right edge of each bit to an illegal value,
3013 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value,
3015 left_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
3016 right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
3017 int32_t bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3018 int32_t end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3019 int32_t bgn_best = IO_IO_OUT1_DELAY_MAX + 1;
3020 int32_t end_best = IO_IO_OUT1_DELAY_MAX + 1;
3021 int32_t win_best = 0;
3023 /* Search for the/part of the window with DM shift */
3024 for (d = IO_IO_OUT1_DELAY_MAX; d >= 0; d -= DELTA_D) {
3025 scc_mgr_apply_group_dm_out1_delay(d);
3026 writel(0, &sdr_scc_mgr->update);
3028 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
3029 PASS_ALL_BITS, &bit_chk,
3031 /* USE Set current end of the window */
3034 * If a starting edge of our window has not been seen
3035 * this is our current start of the DM window.
3037 if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
3041 * If current window is bigger than best seen.
3042 * Set best seen to be current window.
3044 if ((end_curr-bgn_curr+1) > win_best) {
3045 win_best = end_curr-bgn_curr+1;
3046 bgn_best = bgn_curr;
3047 end_best = end_curr;
3050 /* We just saw a failing test. Reset temp edge */
3051 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3052 end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3057 /* Reset DM delay chains to 0 */
3058 scc_mgr_apply_group_dm_out1_delay(0);
3061 * Check to see if the current window nudges up aganist 0 delay.
3062 * If so we need to continue the search by shifting DQS otherwise DQS
3063 * search begins as a new search. */
3064 if (end_curr != 0) {
3065 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3066 end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3069 /* Search for the/part of the window with DQS shifts */
3070 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - new_dqs; d += DELTA_D) {
3072 * Note: This only shifts DQS, so are we limiting ourselve to
3073 * width of DQ unnecessarily.
3075 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
3078 writel(0, &sdr_scc_mgr->update);
3079 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
3080 PASS_ALL_BITS, &bit_chk,
3082 /* USE Set current end of the window */
3085 * If a beginning edge of our window has not been seen
3086 * this is our current begin of the DM window.
3088 if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
3092 * If current window is bigger than best seen. Set best
3093 * seen to be current window.
3095 if ((end_curr-bgn_curr+1) > win_best) {
3096 win_best = end_curr-bgn_curr+1;
3097 bgn_best = bgn_curr;
3098 end_best = end_curr;
3101 /* We just saw a failing test. Reset temp edge */
3102 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3103 end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3105 /* Early exit optimization: if ther remaining delay
3106 chain space is less than already seen largest window
3109 (IO_IO_OUT1_DELAY_MAX - new_dqs - d)) {
3115 /* assign left and right edge for cal and reporting; */
3116 left_edge[0] = -1*bgn_best;
3117 right_edge[0] = end_best;
3119 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n", __func__,
3120 __LINE__, left_edge[0], right_edge[0]);
3122 /* Move DQS (back to orig) */
3123 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
3127 /* Find middle of window for the DM bit */
3128 mid = (left_edge[0] - right_edge[0]) / 2;
3130 /* only move right, since we are not moving DQS/DQ */
3134 /* dm_marign should fail if we never find a window */
3138 dm_margin = left_edge[0] - mid;
3140 scc_mgr_apply_group_dm_out1_delay(mid);
3141 writel(0, &sdr_scc_mgr->update);
3143 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d mid=%d \
3144 dm_margin=%d\n", __func__, __LINE__, left_edge[0],
3145 right_edge[0], mid, dm_margin);
3147 gbl->fom_out += dq_margin + dqs_margin;
3149 debug_cond(DLEVEL == 2, "%s:%d write_center: dq_margin=%d \
3150 dqs_margin=%d dm_margin=%d\n", __func__, __LINE__,
3151 dq_margin, dqs_margin, dm_margin);
3154 * Do not remove this line as it makes sure all of our
3155 * decisions have been applied.
3157 writel(0, &sdr_scc_mgr->update);
3158 return (dq_margin >= 0) && (dqs_margin >= 0) && (dm_margin >= 0);
3161 /* calibrate the write operations */
3162 static uint32_t rw_mgr_mem_calibrate_writes(uint32_t rank_bgn, uint32_t g,
3165 /* update info for sims */
3166 debug("%s:%d %u %u\n", __func__, __LINE__, g, test_bgn);
3168 reg_file_set_stage(CAL_STAGE_WRITES);
3169 reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
3171 reg_file_set_group(g);
3173 if (!rw_mgr_mem_calibrate_writes_center(rank_bgn, g, test_bgn)) {
3174 set_failing_group_stage(g, CAL_STAGE_WRITES,
3175 CAL_SUBSTAGE_WRITES_CENTER);
3183 * mem_precharge_and_activate() - Precharge all banks and activate
3185 * Precharge all banks and activate row 0 in bank "000..." and bank "111...".
3187 static void mem_precharge_and_activate(void)
3191 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
3192 /* Test if the rank should be skipped. */
3193 if (param->skip_ranks[r])
3197 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
3199 /* Precharge all banks. */
3200 writel(RW_MGR_PRECHARGE_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3201 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3203 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0);
3204 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT1,
3205 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
3207 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1);
3208 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2,
3209 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
3211 /* Activate rows. */
3212 writel(RW_MGR_ACTIVATE_0_AND_1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3213 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3218 * mem_init_latency() - Configure memory RLAT and WLAT settings
3220 * Configure memory RLAT and WLAT parameters.
3222 static void mem_init_latency(void)
3225 * For AV/CV, LFIFO is hardened and always runs at full rate
3226 * so max latency in AFI clocks, used here, is correspondingly
3229 const u32 max_latency = (1 << MAX_LATENCY_COUNT_WIDTH) - 1;
3232 debug("%s:%d\n", __func__, __LINE__);
3235 * Read in write latency.
3236 * WL for Hard PHY does not include additive latency.
3238 wlat = readl(&data_mgr->t_wl_add);
3239 wlat += readl(&data_mgr->mem_t_add);
3241 gbl->rw_wl_nop_cycles = wlat - 1;
3243 /* Read in readl latency. */
3244 rlat = readl(&data_mgr->t_rl_add);
3246 /* Set a pretty high read latency initially. */
3247 gbl->curr_read_lat = rlat + 16;
3248 if (gbl->curr_read_lat > max_latency)
3249 gbl->curr_read_lat = max_latency;
3251 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3253 /* Advertise write latency. */
3254 writel(wlat, &phy_mgr_cfg->afi_wlat);
3258 * @mem_skip_calibrate() - Set VFIFO and LFIFO to instant-on settings
3260 * Set VFIFO and LFIFO to instant-on settings in skip calibration mode.
3262 static void mem_skip_calibrate(void)
3264 uint32_t vfifo_offset;
3267 debug("%s:%d\n", __func__, __LINE__);
3268 /* Need to update every shadow register set used by the interface */
3269 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
3270 r += NUM_RANKS_PER_SHADOW_REG) {
3272 * Set output phase alignment settings appropriate for
3275 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3276 scc_mgr_set_dqs_en_phase(i, 0);
3277 #if IO_DLL_CHAIN_LENGTH == 6
3278 scc_mgr_set_dqdqs_output_phase(i, 6);
3280 scc_mgr_set_dqdqs_output_phase(i, 7);
3285 * Write data arrives to the I/O two cycles before write
3286 * latency is reached (720 deg).
3287 * -> due to bit-slip in a/c bus
3288 * -> to allow board skew where dqs is longer than ck
3289 * -> how often can this happen!?
3290 * -> can claim back some ptaps for high freq
3291 * support if we can relax this, but i digress...
3293 * The write_clk leads mem_ck by 90 deg
3294 * The minimum ptap of the OPA is 180 deg
3295 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
3296 * The write_clk is always delayed by 2 ptaps
3298 * Hence, to make DQS aligned to CK, we need to delay
3300 * (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH))
3302 * Dividing the above by (360 / IO_DLL_CHAIN_LENGTH)
3303 * gives us the number of ptaps, which simplies to:
3305 * (1.25 * IO_DLL_CHAIN_LENGTH - 2)
3307 scc_mgr_set_dqdqs_output_phase(i,
3308 1.25 * IO_DLL_CHAIN_LENGTH - 2);
3310 writel(0xff, &sdr_scc_mgr->dqs_ena);
3311 writel(0xff, &sdr_scc_mgr->dqs_io_ena);
3313 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
3314 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3315 SCC_MGR_GROUP_COUNTER_OFFSET);
3317 writel(0xff, &sdr_scc_mgr->dq_ena);
3318 writel(0xff, &sdr_scc_mgr->dm_ena);
3319 writel(0, &sdr_scc_mgr->update);
3322 /* Compensate for simulation model behaviour */
3323 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3324 scc_mgr_set_dqs_bus_in_delay(i, 10);
3325 scc_mgr_load_dqs(i);
3327 writel(0, &sdr_scc_mgr->update);
3330 * ArriaV has hard FIFOs that can only be initialized by incrementing
3333 vfifo_offset = CALIB_VFIFO_OFFSET;
3334 for (j = 0; j < vfifo_offset; j++)
3335 writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
3336 writel(0, &phy_mgr_cmd->fifo_reset);
3339 * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal
3340 * setting from generation-time constant.
3342 gbl->curr_read_lat = CALIB_LFIFO_OFFSET;
3343 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3347 * mem_calibrate() - Memory calibration entry point.
3349 * Perform memory calibration.
3351 static uint32_t mem_calibrate(void)
3354 uint32_t rank_bgn, sr;
3355 uint32_t write_group, write_test_bgn;
3356 uint32_t read_group, read_test_bgn;
3357 uint32_t run_groups, current_run;
3358 uint32_t failing_groups = 0;
3359 uint32_t group_failed = 0;
3361 const u32 rwdqs_ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
3362 RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
3364 debug("%s:%d\n", __func__, __LINE__);
3366 /* Initialize the data settings */
3367 gbl->error_substage = CAL_SUBSTAGE_NIL;
3368 gbl->error_stage = CAL_STAGE_NIL;
3369 gbl->error_group = 0xff;
3373 /* Initialize WLAT and RLAT. */
3376 /* Initialize bit slips. */
3377 mem_precharge_and_activate();
3379 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3380 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3381 SCC_MGR_GROUP_COUNTER_OFFSET);
3382 /* Only needed once to set all groups, pins, DQ, DQS, DM. */
3384 scc_mgr_set_hhp_extras();
3386 scc_set_bypass_mode(i);
3389 /* Calibration is skipped. */
3390 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
3392 * Set VFIFO and LFIFO to instant-on settings in skip
3395 mem_skip_calibrate();
3398 * Do not remove this line as it makes sure all of our
3399 * decisions have been applied.
3401 writel(0, &sdr_scc_mgr->update);
3405 /* Calibration is not skipped. */
3406 for (i = 0; i < NUM_CALIB_REPEAT; i++) {
3408 * Zero all delay chain/phase settings for all
3409 * groups and all shadow register sets.
3413 run_groups = ~param->skip_groups;
3415 for (write_group = 0, write_test_bgn = 0; write_group
3416 < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
3417 write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
3419 /* Initialize the group failure */
3422 current_run = run_groups & ((1 <<
3423 RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
3424 run_groups = run_groups >>
3425 RW_MGR_NUM_DQS_PER_WRITE_GROUP;
3427 if (current_run == 0)
3430 writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
3431 SCC_MGR_GROUP_COUNTER_OFFSET);
3432 scc_mgr_zero_group(write_group, 0);
3434 for (read_group = write_group * rwdqs_ratio,
3436 read_group < (write_group + 1) * rwdqs_ratio;
3438 read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
3439 if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO)
3442 /* Calibrate the VFIFO */
3443 if (rw_mgr_mem_calibrate_vfifo(read_group,
3447 if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3450 /* The group failed, we're done. */
3454 /* Calibrate the output side */
3455 for (rank_bgn = 0, sr = 0;
3456 rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
3457 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
3458 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3461 /* Not needed in quick mode! */
3462 if (STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS)
3466 * Determine if this set of ranks
3467 * should be skipped entirely.
3469 if (param->skip_shadow_regs[sr])
3472 /* Calibrate WRITEs */
3473 if (rw_mgr_mem_calibrate_writes(rank_bgn,
3474 write_group, write_test_bgn))
3478 if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3482 /* Some group failed, we're done. */
3486 for (read_group = write_group * rwdqs_ratio,
3488 read_group < (write_group + 1) * rwdqs_ratio;
3490 read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
3491 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3494 if (rw_mgr_mem_calibrate_vfifo_end(read_group,
3498 if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
3501 /* The group failed, we're done. */
3505 /* No group failed, continue as usual. */
3508 grp_failed: /* A group failed, increment the counter. */
3513 * USER If there are any failing groups then report
3516 if (failing_groups != 0)
3519 if (STATIC_CALIB_STEPS & CALIB_SKIP_LFIFO)
3523 * If we're skipping groups as part of debug,
3524 * don't calibrate LFIFO.
3526 if (param->skip_groups != 0)
3529 /* Calibrate the LFIFO */
3530 if (!rw_mgr_mem_calibrate_lfifo())
3535 * Do not remove this line as it makes sure all of our decisions
3536 * have been applied.
3538 writel(0, &sdr_scc_mgr->update);
3543 * run_mem_calibrate() - Perform memory calibration
3545 * This function triggers the entire memory calibration procedure.
3547 static int run_mem_calibrate(void)
3551 debug("%s:%d\n", __func__, __LINE__);
3553 /* Reset pass/fail status shown on afi_cal_success/fail */
3554 writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
3556 /* Stop tracking manager. */
3557 clrbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22);
3559 phy_mgr_initialize();
3560 rw_mgr_mem_initialize();
3562 /* Perform the actual memory calibration. */
3563 pass = mem_calibrate();
3565 mem_precharge_and_activate();
3566 writel(0, &phy_mgr_cmd->fifo_reset);
3569 rw_mgr_mem_handoff();
3571 * In Hard PHY this is a 2-bit control:
3573 * 1: DDIO Mux Select
3575 writel(0x2, &phy_mgr_cfg->mux_sel);
3577 /* Start tracking manager. */
3578 setbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22);
3584 * debug_mem_calibrate() - Report result of memory calibration
3585 * @pass: Value indicating whether calibration passed or failed
3587 * This function reports the results of the memory calibration
3588 * and writes debug information into the register file.
3590 static void debug_mem_calibrate(int pass)
3592 uint32_t debug_info;
3595 printf("%s: CALIBRATION PASSED\n", __FILE__);
3600 if (gbl->fom_in > 0xff)
3603 if (gbl->fom_out > 0xff)
3604 gbl->fom_out = 0xff;
3606 /* Update the FOM in the register file */
3607 debug_info = gbl->fom_in;
3608 debug_info |= gbl->fom_out << 8;
3609 writel(debug_info, &sdr_reg_file->fom);
3611 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3612 writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status);
3614 printf("%s: CALIBRATION FAILED\n", __FILE__);
3616 debug_info = gbl->error_stage;
3617 debug_info |= gbl->error_substage << 8;
3618 debug_info |= gbl->error_group << 16;
3620 writel(debug_info, &sdr_reg_file->failing_stage);
3621 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3622 writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status);
3624 /* Update the failing group/stage in the register file */
3625 debug_info = gbl->error_stage;
3626 debug_info |= gbl->error_substage << 8;
3627 debug_info |= gbl->error_group << 16;
3628 writel(debug_info, &sdr_reg_file->failing_stage);
3631 printf("%s: Calibration complete\n", __FILE__);
3635 * hc_initialize_rom_data() - Initialize ROM data
3637 * Initialize ROM data.
3639 static void hc_initialize_rom_data(void)
3643 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET;
3644 for (i = 0; i < ARRAY_SIZE(inst_rom_init); i++)
3645 writel(inst_rom_init[i], addr + (i << 2));
3647 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET;
3648 for (i = 0; i < ARRAY_SIZE(ac_rom_init); i++)
3649 writel(ac_rom_init[i], addr + (i << 2));
3653 * initialize_reg_file() - Initialize SDR register file
3655 * Initialize SDR register file.
3657 static void initialize_reg_file(void)
3659 /* Initialize the register file with the correct data */
3660 writel(REG_FILE_INIT_SEQ_SIGNATURE, &sdr_reg_file->signature);
3661 writel(0, &sdr_reg_file->debug_data_addr);
3662 writel(0, &sdr_reg_file->cur_stage);
3663 writel(0, &sdr_reg_file->fom);
3664 writel(0, &sdr_reg_file->failing_stage);
3665 writel(0, &sdr_reg_file->debug1);
3666 writel(0, &sdr_reg_file->debug2);
3670 * initialize_hps_phy() - Initialize HPS PHY
3672 * Initialize HPS PHY.
3674 static void initialize_hps_phy(void)
3678 * Tracking also gets configured here because it's in the
3681 uint32_t trk_sample_count = 7500;
3682 uint32_t trk_long_idle_sample_count = (10 << 16) | 100;
3684 * Format is number of outer loops in the 16 MSB, sample
3689 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
3690 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
3691 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
3692 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
3693 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
3694 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
3696 * This field selects the intrinsic latency to RDATA_EN/FULL path.
3697 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
3699 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
3700 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
3702 writel(reg, &sdr_ctrl->phy_ctrl0);
3705 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
3707 SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH);
3708 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
3709 trk_long_idle_sample_count);
3710 writel(reg, &sdr_ctrl->phy_ctrl1);
3713 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
3714 trk_long_idle_sample_count >>
3715 SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH);
3716 writel(reg, &sdr_ctrl->phy_ctrl2);
3720 * initialize_tracking() - Initialize tracking
3722 * Initialize the register file with usable initial data.
3724 static void initialize_tracking(void)
3727 * Initialize the register file with the correct data.
3728 * Compute usable version of value in case we skip full
3729 * computation later.
3731 writel(DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP) - 1,
3732 &sdr_reg_file->dtaps_per_ptap);
3734 /* trk_sample_count */
3735 writel(7500, &sdr_reg_file->trk_sample_count);
3737 /* longidle outer loop [15:0] */
3738 writel((10 << 16) | (100 << 0), &sdr_reg_file->trk_longidle);
3741 * longidle sample count [31:24]
3742 * trfc, worst case of 933Mhz 4Gb [23:16]
3743 * trcd, worst case [15:8]
3746 writel((243 << 24) | (14 << 16) | (10 << 8) | (4 << 0),
3747 &sdr_reg_file->delays);
3750 writel((RW_MGR_IDLE << 24) | (RW_MGR_ACTIVATE_1 << 16) |
3751 (RW_MGR_SGLE_READ << 8) | (RW_MGR_PRECHARGE_ALL << 0),
3752 &sdr_reg_file->trk_rw_mgr_addr);
3754 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH,
3755 &sdr_reg_file->trk_read_dqs_width);
3758 writel((RW_MGR_REFRESH_ALL << 24) | (1000 << 0),
3759 &sdr_reg_file->trk_rfsh);
3762 int sdram_calibration_full(void)
3764 struct param_type my_param;
3765 struct gbl_type my_gbl;
3768 memset(&my_param, 0, sizeof(my_param));
3769 memset(&my_gbl, 0, sizeof(my_gbl));
3774 /* Set the calibration enabled by default */
3775 gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
3777 * Only sweep all groups (regardless of fail state) by default
3778 * Set enabled read test by default.
3780 #if DISABLE_GUARANTEED_READ
3781 gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ;
3783 /* Initialize the register file */
3784 initialize_reg_file();
3786 /* Initialize any PHY CSR */
3787 initialize_hps_phy();
3789 scc_mgr_initialize();
3791 initialize_tracking();
3793 printf("%s: Preparing to start memory calibration\n", __FILE__);
3795 debug("%s:%d\n", __func__, __LINE__);
3796 debug_cond(DLEVEL == 1,
3797 "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ",
3798 RW_MGR_MEM_NUMBER_OF_RANKS, RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM,
3799 RW_MGR_MEM_DQ_PER_READ_DQS, RW_MGR_MEM_DQ_PER_WRITE_DQS,
3800 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS,
3801 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
3802 debug_cond(DLEVEL == 1,
3803 "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ",
3804 RW_MGR_MEM_IF_READ_DQS_WIDTH, RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
3805 RW_MGR_MEM_DATA_WIDTH, RW_MGR_MEM_DATA_MASK_WIDTH,
3806 IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP);
3807 debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%u, dll=%u",
3808 IO_DELAY_PER_DQS_EN_DCHAIN_TAP, IO_DLL_CHAIN_LENGTH);
3809 debug_cond(DLEVEL == 1, "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ",
3810 IO_DQS_EN_PHASE_MAX, IO_DQDQS_OUT_PHASE_MAX,
3811 IO_DQS_EN_DELAY_MAX, IO_DQS_IN_DELAY_MAX);
3812 debug_cond(DLEVEL == 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ",
3813 IO_IO_IN_DELAY_MAX, IO_IO_OUT1_DELAY_MAX,
3814 IO_IO_OUT2_DELAY_MAX);
3815 debug_cond(DLEVEL == 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n",
3816 IO_DQS_IN_RESERVE, IO_DQS_OUT_RESERVE);
3818 hc_initialize_rom_data();
3820 /* update info for sims */
3821 reg_file_set_stage(CAL_STAGE_NIL);
3822 reg_file_set_group(0);
3825 * Load global needed for those actions that require
3826 * some dynamic calibration support.
3828 dyn_calib_steps = STATIC_CALIB_STEPS;
3830 * Load global to allow dynamic selection of delay loop settings
3831 * based on calibration mode.
3833 if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS))
3834 skip_delay_mask = 0xff;
3836 skip_delay_mask = 0x0;
3838 pass = run_mem_calibrate();
3839 debug_mem_calibrate(pass);