#include <common.h>
#include <asm/io.h>
#include <asm/arch/sdram.h>
+#include <errno.h>
#include "sequencer.h"
#include "sequencer_auto.h"
#include "sequencer_auto_ac_init.h"
struct param_type *param;
uint32_t curr_shadow_reg;
-static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
- uint32_t write_group, uint32_t use_dm,
- uint32_t all_correct, uint32_t *bit_chk, uint32_t all_ranks);
-
static void set_failing_group_stage(uint32_t group, uint32_t stage,
uint32_t substage)
{
clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
}
-static void initialize(void)
+/**
+ * phy_mgr_initialize() - Initialize PHY Manager
+ *
+ * Initialize PHY Manager.
+ */
+static void phy_mgr_initialize(void)
{
+ u32 ratio;
+
debug("%s:%d\n", __func__, __LINE__);
- /* USER calibration has control over path to memory */
+ /* Calibration has control over path to memory */
/*
* In Hard PHY this is a 2-bit control:
* 0: AFI Mux Select
writel(0, &phy_mgr_cfg->cal_debug_info);
- if ((dyn_calib_steps & CALIB_SKIP_ALL) != CALIB_SKIP_ALL) {
- param->read_correct_mask_vg = ((uint32_t)1 <<
- (RW_MGR_MEM_DQ_PER_READ_DQS /
- RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
- param->write_correct_mask_vg = ((uint32_t)1 <<
- (RW_MGR_MEM_DQ_PER_READ_DQS /
- RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
- param->read_correct_mask = ((uint32_t)1 <<
- RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
- param->write_correct_mask = ((uint32_t)1 <<
- RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
- param->dm_correct_mask = ((uint32_t)1 <<
- (RW_MGR_MEM_DATA_WIDTH / RW_MGR_MEM_DATA_MASK_WIDTH))
- - 1;
- }
+ /* Init params only if we do NOT skip calibration. */
+ if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL)
+ return;
+
+ ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
+ RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
+ param->read_correct_mask_vg = (1 << ratio) - 1;
+ param->write_correct_mask_vg = (1 << ratio) - 1;
+ param->read_correct_mask = (1 << RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
+ param->write_correct_mask = (1 << RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
+ ratio = RW_MGR_MEM_DATA_WIDTH /
+ RW_MGR_MEM_DATA_MASK_WIDTH;
+ param->dm_correct_mask = (1 << ratio) - 1;
}
-static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode)
+/**
+ * set_rank_and_odt_mask() - Set Rank and ODT mask
+ * @rank: Rank mask
+ * @odt_mode: ODT mode, OFF or READ_WRITE
+ *
+ * Set Rank and ODT mask (On-Die Termination).
+ */
+static void set_rank_and_odt_mask(const u32 rank, const u32 odt_mode)
{
- uint32_t odt_mask_0 = 0;
- uint32_t odt_mask_1 = 0;
- uint32_t cs_and_odt_mask;
+ u32 odt_mask_0 = 0;
+ u32 odt_mask_1 = 0;
+ u32 cs_and_odt_mask;
- if (odt_mode == RW_MGR_ODT_MODE_READ_WRITE) {
- if (RW_MGR_MEM_NUMBER_OF_RANKS == 1) {
- /*
- * 1 Rank
- * Read: ODT = 0
- * Write: ODT = 1
- */
+ if (odt_mode == RW_MGR_ODT_MODE_OFF) {
+ odt_mask_0 = 0x0;
+ odt_mask_1 = 0x0;
+ } else { /* RW_MGR_ODT_MODE_READ_WRITE */
+ switch (RW_MGR_MEM_NUMBER_OF_RANKS) {
+ case 1: /* 1 Rank */
+ /* Read: ODT = 0 ; Write: ODT = 1 */
odt_mask_0 = 0x0;
odt_mask_1 = 0x1;
- } else if (RW_MGR_MEM_NUMBER_OF_RANKS == 2) {
- /* 2 Ranks */
+ break;
+ case 2: /* 2 Ranks */
if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) {
- /* - Dual-Slot , Single-Rank
- * (1 chip-select per DIMM)
- * OR
- * - RDIMM, 4 total CS (2 CS per DIMM)
- * means 2 DIMM
- * Since MEM_NUMBER_OF_RANKS is 2 they are
- * both single rank
- * with 2 CS each (special for RDIMM)
+ /*
+ * - Dual-Slot , Single-Rank (1 CS per DIMM)
+ * OR
+ * - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM)
+ *
+ * Since MEM_NUMBER_OF_RANKS is 2, they
+ * are both single rank with 2 CS each
+ * (special for RDIMM).
+ *
* Read: Turn on ODT on the opposite rank
* Write: Turn on ODT on all ranks
*/
odt_mask_1 = 0x3;
} else {
/*
- * USER - Single-Slot , Dual-rank DIMMs
- * (2 chip-selects per DIMM)
- * USER Read: Turn on ODT off on all ranks
- * USER Write: Turn on ODT on active rank
+ * - Single-Slot , Dual-Rank (2 CS per DIMM)
+ *
+ * Read: Turn on ODT off on all ranks
+ * Write: Turn on ODT on active rank
*/
odt_mask_0 = 0x0;
odt_mask_1 = 0x3 & (1 << rank);
}
- } else {
- /* 4 Ranks
- * Read:
+ break;
+ case 4: /* 4 Ranks */
+ /* Read:
* ----------+-----------------------+
- * | |
* | ODT |
* Read From +-----------------------+
* Rank | 3 | 2 | 1 | 0 |
*
* Write:
* ----------+-----------------------+
- * | |
* | ODT |
* Write To +-----------------------+
* Rank | 3 | 2 | 1 | 0 |
odt_mask_1 = 0xA;
break;
}
+ break;
}
- } else {
- odt_mask_0 = 0x0;
- odt_mask_1 = 0x0;
}
- cs_and_odt_mask =
- (0xFF & ~(1 << rank)) |
- ((0xFF & odt_mask_0) << 8) |
- ((0xFF & odt_mask_1) << 16);
+ cs_and_odt_mask = (0xFF & ~(1 << rank)) |
+ ((0xFF & odt_mask_0) << 8) |
+ ((0xFF & odt_mask_1) << 16);
writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
}
}
}
-/* optimization used to recover some slots in ddr3 inst_rom */
-/* could be applied to other protocols if we wanted to */
+/**
+ * set_jump_as_return() - Return instruction optimization
+ *
+ * Optimization used to recover some slots in ddr3 inst_rom could be
+ * applied to other protocols if we wanted to
+ */
static void set_jump_as_return(void)
{
/*
- * to save space, we replace return with jump to special shared
+ * To save space, we replace return with jump to special shared
* RETURN instruction so we set the counter to large value so that
- * we always jump
+ * we always jump.
*/
writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
writel(RW_MGR_RETURN, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
}
-static void rw_mgr_mem_initialize(void)
+/**
+ * rw_mgr_mem_init_load_regs() - Load instruction registers
+ * @cntr0: Counter 0 value
+ * @cntr1: Counter 1 value
+ * @cntr2: Counter 2 value
+ * @jump: Jump instruction value
+ *
+ * Load instruction registers.
+ */
+static void rw_mgr_mem_init_load_regs(u32 cntr0, u32 cntr1, u32 cntr2, u32 jump)
{
- uint32_t r;
uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_SINGLE_GROUP_OFFSET;
- debug("%s:%d\n", __func__, __LINE__);
-
- /* The reset / cke part of initialization is broadcasted to all ranks */
- writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
- RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
-
- /*
- * Here's how you load register for a loop
- * Counters are located @ 0x800
- * Jump address are located @ 0xC00
- * For both, registers 0 to 3 are selected using bits 3 and 2, like
- * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
- * I know this ain't pretty, but Avalon bus throws away the 2 least
- * significant bits
- */
-
- /* start with memory RESET activated */
-
- /* tINIT = 200us */
-
- /*
- * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
- * If a and b are the number of iteration in 2 nested loops
- * it takes the following number of cycles to complete the operation:
- * number_of_cycles = ((2 + n) * a + 2) * b
- * where n is the number of instruction in the inner loop
- * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
- * b = 6A
- */
-
/* Load counters */
- writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR0_VAL),
+ writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0),
&sdr_rw_load_mgr_regs->load_cntr0);
- writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR1_VAL),
+ writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1),
&sdr_rw_load_mgr_regs->load_cntr1);
- writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR2_VAL),
+ writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2),
&sdr_rw_load_mgr_regs->load_cntr2);
/* Load jump address */
- writel(RW_MGR_INIT_RESET_0_CKE_0,
- &sdr_rw_load_jump_mgr_regs->load_jump_add0);
- writel(RW_MGR_INIT_RESET_0_CKE_0,
- &sdr_rw_load_jump_mgr_regs->load_jump_add1);
- writel(RW_MGR_INIT_RESET_0_CKE_0,
- &sdr_rw_load_jump_mgr_regs->load_jump_add2);
+ writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
+ writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add1);
+ writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
/* Execute count instruction */
- writel(RW_MGR_INIT_RESET_0_CKE_0, grpaddr);
-
- /* indicate that memory is stable */
- writel(1, &phy_mgr_cfg->reset_mem_stbl);
-
- /*
- * transition the RESET to high
- * Wait for 500us
- */
-
- /*
- * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
- * If a and b are the number of iteration in 2 nested loops
- * it takes the following number of cycles to complete the operation
- * number_of_cycles = ((2 + n) * a + 2) * b
- * where n is the number of instruction in the inner loop
- * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
- * b = FF
- */
-
- /* Load counters */
- writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR0_VAL),
- &sdr_rw_load_mgr_regs->load_cntr0);
- writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR1_VAL),
- &sdr_rw_load_mgr_regs->load_cntr1);
- writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR2_VAL),
- &sdr_rw_load_mgr_regs->load_cntr2);
-
- /* Load jump address */
- writel(RW_MGR_INIT_RESET_1_CKE_0,
- &sdr_rw_load_jump_mgr_regs->load_jump_add0);
- writel(RW_MGR_INIT_RESET_1_CKE_0,
- &sdr_rw_load_jump_mgr_regs->load_jump_add1);
- writel(RW_MGR_INIT_RESET_1_CKE_0,
- &sdr_rw_load_jump_mgr_regs->load_jump_add2);
-
- writel(RW_MGR_INIT_RESET_1_CKE_0, grpaddr);
-
- /* bring up clock enable */
+ writel(jump, grpaddr);
+}
- /* tXRP < 250 ck cycles */
- delay_for_n_mem_clocks(250);
+/**
+ * rw_mgr_mem_load_user() - Load user calibration values
+ * @fin1: Final instruction 1
+ * @fin2: Final instruction 2
+ * @precharge: If 1, precharge the banks at the end
+ *
+ * Load user calibration values and optionally precharge the banks.
+ */
+static void rw_mgr_mem_load_user(const u32 fin1, const u32 fin2,
+ const int precharge)
+{
+ u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
+ RW_MGR_RUN_SINGLE_GROUP_OFFSET;
+ u32 r;
for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
if (param->skip_ranks[r]) {
/* set rank */
set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
+ /* precharge all banks ... */
+ if (precharge)
+ writel(RW_MGR_PRECHARGE_ALL, grpaddr);
+
/*
* USER Use Mirror-ed commands for odd ranks if address
* mirrorring is on
writel(RW_MGR_MRS1_MIRR, grpaddr);
delay_for_n_mem_clocks(4);
set_jump_as_return();
- writel(RW_MGR_MRS0_DLL_RESET_MIRR, grpaddr);
+ writel(fin1, grpaddr);
} else {
set_jump_as_return();
writel(RW_MGR_MRS2, grpaddr);
set_jump_as_return();
writel(RW_MGR_MRS1, grpaddr);
set_jump_as_return();
- writel(RW_MGR_MRS0_DLL_RESET, grpaddr);
+ writel(fin2, grpaddr);
}
+
+ if (precharge)
+ continue;
+
set_jump_as_return();
writel(RW_MGR_ZQCL, grpaddr);
}
}
-/*
- * At the end of calibration we have to program the user settings in, and
- * USER hand off the memory to the user.
+/**
+ * rw_mgr_mem_initialize() - Initialize RW Manager
+ *
+ * Initialize RW Manager.
*/
-static void rw_mgr_mem_handoff(void)
+static void rw_mgr_mem_initialize(void)
{
- uint32_t r;
- uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
- RW_MGR_RUN_SINGLE_GROUP_OFFSET;
-
debug("%s:%d\n", __func__, __LINE__);
- for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
- if (param->skip_ranks[r])
- /* request to skip the rank */
- continue;
- /* set rank */
- set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
-
- /* precharge all banks ... */
- writel(RW_MGR_PRECHARGE_ALL, grpaddr);
-
- /* load up MR settings specified by user */
-
- /*
- * Use Mirror-ed commands for odd ranks if address
- * mirrorring is on
- */
- if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
- set_jump_as_return();
- writel(RW_MGR_MRS2_MIRR, grpaddr);
- delay_for_n_mem_clocks(4);
- set_jump_as_return();
- writel(RW_MGR_MRS3_MIRR, grpaddr);
- delay_for_n_mem_clocks(4);
- set_jump_as_return();
- writel(RW_MGR_MRS1_MIRR, grpaddr);
- delay_for_n_mem_clocks(4);
- set_jump_as_return();
- writel(RW_MGR_MRS0_USER_MIRR, grpaddr);
- } else {
- set_jump_as_return();
- writel(RW_MGR_MRS2, grpaddr);
- delay_for_n_mem_clocks(4);
- set_jump_as_return();
- writel(RW_MGR_MRS3, grpaddr);
- delay_for_n_mem_clocks(4);
- set_jump_as_return();
- writel(RW_MGR_MRS1, grpaddr);
- delay_for_n_mem_clocks(4);
- set_jump_as_return();
- writel(RW_MGR_MRS0_USER, grpaddr);
- }
- /*
- * USER need to wait tMOD (12CK or 15ns) time before issuing
- * other commands, but we will have plenty of NIOS cycles before
- * actual handoff so its okay.
- */
- }
-}
-
-/*
- * performs a guaranteed read on the patterns we are going to use during a
- * read test to ensure memory works
- */
-static uint32_t rw_mgr_mem_calibrate_read_test_patterns(uint32_t rank_bgn,
- uint32_t group, uint32_t num_tries, uint32_t *bit_chk,
- uint32_t all_ranks)
-{
- uint32_t r, vg;
- uint32_t correct_mask_vg;
- uint32_t tmp_bit_chk;
- uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
- (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
- uint32_t addr;
- uint32_t base_rw_mgr;
- *bit_chk = param->read_correct_mask;
- correct_mask_vg = param->read_correct_mask_vg;
-
- for (r = rank_bgn; r < rank_end; r++) {
- if (param->skip_ranks[r])
- /* request to skip the rank */
- continue;
-
- /* set rank */
- set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
+ /* The reset / cke part of initialization is broadcasted to all ranks */
+ writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
+ RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
- /* Load up a constant bursts of read commands */
- writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
- writel(RW_MGR_GUARANTEED_READ,
- &sdr_rw_load_jump_mgr_regs->load_jump_add0);
+ /*
+ * Here's how you load register for a loop
+ * Counters are located @ 0x800
+ * Jump address are located @ 0xC00
+ * For both, registers 0 to 3 are selected using bits 3 and 2, like
+ * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
+ * I know this ain't pretty, but Avalon bus throws away the 2 least
+ * significant bits
+ */
- writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
- writel(RW_MGR_GUARANTEED_READ_CONT,
- &sdr_rw_load_jump_mgr_regs->load_jump_add1);
+ /* Start with memory RESET activated */
- tmp_bit_chk = 0;
- for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
- /* reset the fifos to get pointers to known state */
+ /* tINIT = 200us */
- writel(0, &phy_mgr_cmd->fifo_reset);
- writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
- RW_MGR_RESET_READ_DATAPATH_OFFSET);
+ /*
+ * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
+ * If a and b are the number of iteration in 2 nested loops
+ * it takes the following number of cycles to complete the operation:
+ * number_of_cycles = ((2 + n) * a + 2) * b
+ * where n is the number of instruction in the inner loop
+ * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
+ * b = 6A
+ */
+ rw_mgr_mem_init_load_regs(SEQ_TINIT_CNTR0_VAL, SEQ_TINIT_CNTR1_VAL,
+ SEQ_TINIT_CNTR2_VAL,
+ RW_MGR_INIT_RESET_0_CKE_0);
- tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
- / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
+ /* Indicate that memory is stable. */
+ writel(1, &phy_mgr_cfg->reset_mem_stbl);
- addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
- writel(RW_MGR_GUARANTEED_READ, addr +
- ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
- vg) << 2));
+ /*
+ * transition the RESET to high
+ * Wait for 500us
+ */
- base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
- tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & (~base_rw_mgr));
+ /*
+ * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
+ * If a and b are the number of iteration in 2 nested loops
+ * it takes the following number of cycles to complete the operation
+ * number_of_cycles = ((2 + n) * a + 2) * b
+ * where n is the number of instruction in the inner loop
+ * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
+ * b = FF
+ */
+ rw_mgr_mem_init_load_regs(SEQ_TRESET_CNTR0_VAL, SEQ_TRESET_CNTR1_VAL,
+ SEQ_TRESET_CNTR2_VAL,
+ RW_MGR_INIT_RESET_1_CKE_0);
- if (vg == 0)
- break;
- }
- *bit_chk &= tmp_bit_chk;
- }
+ /* Bring up clock enable. */
- addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
- writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
+ /* tXRP < 250 ck cycles */
+ delay_for_n_mem_clocks(250);
- set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
- debug_cond(DLEVEL == 1, "%s:%d test_load_patterns(%u,ALL) => (%u == %u) =>\
- %lu\n", __func__, __LINE__, group, *bit_chk, param->read_correct_mask,
- (long unsigned int)(*bit_chk == param->read_correct_mask));
- return *bit_chk == param->read_correct_mask;
+ rw_mgr_mem_load_user(RW_MGR_MRS0_DLL_RESET_MIRR, RW_MGR_MRS0_DLL_RESET,
+ 0);
}
-static uint32_t rw_mgr_mem_calibrate_read_test_patterns_all_ranks
- (uint32_t group, uint32_t num_tries, uint32_t *bit_chk)
+/*
+ * At the end of calibration we have to program the user settings in, and
+ * USER hand off the memory to the user.
+ */
+static void rw_mgr_mem_handoff(void)
{
- return rw_mgr_mem_calibrate_read_test_patterns(0, group,
- num_tries, bit_chk, 1);
+ rw_mgr_mem_load_user(RW_MGR_MRS0_USER_MIRR, RW_MGR_MRS0_USER, 1);
+ /*
+ * USER need to wait tMOD (12CK or 15ns) time before issuing
+ * other commands, but we will have plenty of NIOS cycles before
+ * actual handoff so its okay.
+ */
}
-/* load up the patterns we are going to use during a read test */
-static void rw_mgr_mem_calibrate_read_load_patterns(uint32_t rank_bgn,
- uint32_t all_ranks)
-{
- uint32_t r;
- uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
- (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
-
- debug("%s:%d\n", __func__, __LINE__);
- for (r = rank_bgn; r < rank_end; r++) {
- if (param->skip_ranks[r])
- /* request to skip the rank */
- continue;
-
- /* set rank */
- set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
-
- /* Load up a constant bursts */
- writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
-
- writel(RW_MGR_GUARANTEED_WRITE_WAIT0,
- &sdr_rw_load_jump_mgr_regs->load_jump_add0);
-
- writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
-
- writel(RW_MGR_GUARANTEED_WRITE_WAIT1,
- &sdr_rw_load_jump_mgr_regs->load_jump_add1);
-
- writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
- writel(RW_MGR_GUARANTEED_WRITE_WAIT2,
- &sdr_rw_load_jump_mgr_regs->load_jump_add2);
+/**
+ * rw_mgr_mem_calibrate_write_test_issue() - Issue write test command
+ * @group: Write Group
+ * @use_dm: Use DM
+ *
+ * Issue write test command. Two variants are provided, one that just tests
+ * a write pattern and another that tests datamask functionality.
+ */
+static void rw_mgr_mem_calibrate_write_test_issue(u32 group,
+ u32 test_dm)
+{
+ const u32 quick_write_mode =
+ (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) &&
+ ENABLE_SUPER_QUICK_CALIBRATION;
+ u32 mcc_instruction;
+ u32 rw_wl_nop_cycles;
+
+ /*
+ * Set counter and jump addresses for the right
+ * number of NOP cycles.
+ * The number of supported NOP cycles can range from -1 to infinity
+ * Three different cases are handled:
+ *
+ * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
+ * mechanism will be used to insert the right number of NOPs
+ *
+ * 2. For a number of NOP cycles equals to 0, the micro-instruction
+ * issuing the write command will jump straight to the
+ * micro-instruction that turns on DQS (for DDRx), or outputs write
+ * data (for RLD), skipping
+ * the NOP micro-instruction all together
+ *
+ * 3. A number of NOP cycles equal to -1 indicates that DQS must be
+ * turned on in the same micro-instruction that issues the write
+ * command. Then we need
+ * to directly jump to the micro-instruction that sends out the data
+ *
+ * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
+ * (2 and 3). One jump-counter (0) is used to perform multiple
+ * write-read operations.
+ * one counter left to issue this command in "multiple-group" mode
+ */
+
+ rw_wl_nop_cycles = gbl->rw_wl_nop_cycles;
+
+ if (rw_wl_nop_cycles == -1) {
+ /*
+ * CNTR 2 - We want to execute the special write operation that
+ * turns on DQS right away and then skip directly to the
+ * instruction that sends out the data. We set the counter to a
+ * large number so that the jump is always taken.
+ */
+ writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
+
+ /* CNTR 3 - Not used */
+ if (test_dm) {
+ mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1;
+ writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA,
+ &sdr_rw_load_jump_mgr_regs->load_jump_add2);
+ writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
+ &sdr_rw_load_jump_mgr_regs->load_jump_add3);
+ } else {
+ mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1;
+ writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA,
+ &sdr_rw_load_jump_mgr_regs->load_jump_add2);
+ writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
+ &sdr_rw_load_jump_mgr_regs->load_jump_add3);
+ }
+ } else if (rw_wl_nop_cycles == 0) {
+ /*
+ * CNTR 2 - We want to skip the NOP operation and go straight
+ * to the DQS enable instruction. We set the counter to a large
+ * number so that the jump is always taken.
+ */
+ writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
+
+ /* CNTR 3 - Not used */
+ if (test_dm) {
+ mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
+ writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS,
+ &sdr_rw_load_jump_mgr_regs->load_jump_add2);
+ } else {
+ mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
+ writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS,
+ &sdr_rw_load_jump_mgr_regs->load_jump_add2);
+ }
+ } else {
+ /*
+ * CNTR 2 - In this case we want to execute the next instruction
+ * and NOT take the jump. So we set the counter to 0. The jump
+ * address doesn't count.
+ */
+ writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
+ writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
+
+ /*
+ * CNTR 3 - Set the nop counter to the number of cycles we
+ * need to loop for, minus 1.
+ */
+ writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
+ if (test_dm) {
+ mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
+ writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
+ &sdr_rw_load_jump_mgr_regs->load_jump_add3);
+ } else {
+ mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
+ writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
+ &sdr_rw_load_jump_mgr_regs->load_jump_add3);
+ }
+ }
+
+ writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
+ RW_MGR_RESET_READ_DATAPATH_OFFSET);
+
+ if (quick_write_mode)
+ writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
+ else
+ writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
+
+ writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
+
+ /*
+ * CNTR 1 - This is used to ensure enough time elapses
+ * for read data to come back.
+ */
+ writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
+
+ if (test_dm) {
+ writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT,
+ &sdr_rw_load_jump_mgr_regs->load_jump_add1);
+ } else {
+ writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT,
+ &sdr_rw_load_jump_mgr_regs->load_jump_add1);
+ }
+
+ writel(mcc_instruction, (SDR_PHYGRP_RWMGRGRP_ADDRESS |
+ RW_MGR_RUN_SINGLE_GROUP_OFFSET) +
+ (group << 2));
+}
+
+/**
+ * rw_mgr_mem_calibrate_write_test() - Test writes, check for single/multiple pass
+ * @rank_bgn: Rank number
+ * @write_group: Write Group
+ * @use_dm: Use DM
+ * @all_correct: All bits must be correct in the mask
+ * @bit_chk: Resulting bit mask after the test
+ * @all_ranks: Test all ranks
+ *
+ * Test writes, can check for a single bit pass or multiple bit pass.
+ */
+static int
+rw_mgr_mem_calibrate_write_test(const u32 rank_bgn, const u32 write_group,
+ const u32 use_dm, const u32 all_correct,
+ u32 *bit_chk, const u32 all_ranks)
+{
+ const u32 rank_end = all_ranks ?
+ RW_MGR_MEM_NUMBER_OF_RANKS :
+ (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
+ const u32 shift_ratio = RW_MGR_MEM_DQ_PER_WRITE_DQS /
+ RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS;
+ const u32 correct_mask_vg = param->write_correct_mask_vg;
+
+ u32 tmp_bit_chk, base_rw_mgr;
+ int vg, r;
+
+ *bit_chk = param->write_correct_mask;
+
+ for (r = rank_bgn; r < rank_end; r++) {
+ /* Request to skip the rank */
+ if (param->skip_ranks[r])
+ continue;
+
+ /* Set rank */
+ set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
+
+ tmp_bit_chk = 0;
+ for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS - 1;
+ vg >= 0; vg--) {
+ /* Reset the FIFOs to get pointers to known state. */
+ writel(0, &phy_mgr_cmd->fifo_reset);
+
+ rw_mgr_mem_calibrate_write_test_issue(
+ write_group *
+ RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS + vg,
+ use_dm);
+
+ base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
+ tmp_bit_chk <<= shift_ratio;
+ tmp_bit_chk |= (correct_mask_vg & ~(base_rw_mgr));
+ }
+
+ *bit_chk &= tmp_bit_chk;
+ }
+
+ set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
+ if (all_correct) {
+ debug_cond(DLEVEL == 2,
+ "write_test(%u,%u,ALL) : %u == %u => %i\n",
+ write_group, use_dm, *bit_chk,
+ param->write_correct_mask,
+ *bit_chk == param->write_correct_mask);
+ return *bit_chk == param->write_correct_mask;
+ } else {
+ set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
+ debug_cond(DLEVEL == 2,
+ "write_test(%u,%u,ONE) : %u != %i => %i\n",
+ write_group, use_dm, *bit_chk, 0, *bit_chk != 0);
+ return *bit_chk != 0x00;
+ }
+}
+
+/**
+ * rw_mgr_mem_calibrate_read_test_patterns() - Read back test patterns
+ * @rank_bgn: Rank number
+ * @group: Read/Write Group
+ * @all_ranks: Test all ranks
+ *
+ * Performs a guaranteed read on the patterns we are going to use during a
+ * read test to ensure memory works.
+ */
+static int
+rw_mgr_mem_calibrate_read_test_patterns(const u32 rank_bgn, const u32 group,
+ const u32 all_ranks)
+{
+ const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
+ RW_MGR_RUN_SINGLE_GROUP_OFFSET;
+ const u32 addr_offset =
+ (group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS) << 2;
+ const u32 rank_end = all_ranks ?
+ RW_MGR_MEM_NUMBER_OF_RANKS :
+ (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
+ const u32 shift_ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
+ RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
+ const u32 correct_mask_vg = param->read_correct_mask_vg;
+
+ u32 tmp_bit_chk, base_rw_mgr, bit_chk;
+ int vg, r;
+ int ret = 0;
+
+ bit_chk = param->read_correct_mask;
+
+ for (r = rank_bgn; r < rank_end; r++) {
+ /* Request to skip the rank */
+ if (param->skip_ranks[r])
+ continue;
+
+ /* Set rank */
+ set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
+
+ /* Load up a constant bursts of read commands */
+ writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
+ writel(RW_MGR_GUARANTEED_READ,
+ &sdr_rw_load_jump_mgr_regs->load_jump_add0);
+
+ writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
+ writel(RW_MGR_GUARANTEED_READ_CONT,
+ &sdr_rw_load_jump_mgr_regs->load_jump_add1);
+
+ tmp_bit_chk = 0;
+ for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1;
+ vg >= 0; vg--) {
+ /* Reset the FIFOs to get pointers to known state. */
+ writel(0, &phy_mgr_cmd->fifo_reset);
+ writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
+ RW_MGR_RESET_READ_DATAPATH_OFFSET);
+ writel(RW_MGR_GUARANTEED_READ,
+ addr + addr_offset + (vg << 2));
+
+ base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
+ tmp_bit_chk <<= shift_ratio;
+ tmp_bit_chk |= correct_mask_vg & ~base_rw_mgr;
+ }
+
+ bit_chk &= tmp_bit_chk;
+ }
+
+ writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
+
+ set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
+
+ if (bit_chk != param->read_correct_mask)
+ ret = -EIO;
+
+ debug_cond(DLEVEL == 1,
+ "%s:%d test_load_patterns(%u,ALL) => (%u == %u) => %i\n",
+ __func__, __LINE__, group, bit_chk,
+ param->read_correct_mask, ret);
+
+ return ret;
+}
+
+/**
+ * rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read test
+ * @rank_bgn: Rank number
+ * @all_ranks: Test all ranks
+ *
+ * Load up the patterns we are going to use during a read test.
+ */
+static void rw_mgr_mem_calibrate_read_load_patterns(const u32 rank_bgn,
+ const int all_ranks)
+{
+ const u32 rank_end = all_ranks ?
+ RW_MGR_MEM_NUMBER_OF_RANKS :
+ (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
+ u32 r;
+
+ debug("%s:%d\n", __func__, __LINE__);
+
+ for (r = rank_bgn; r < rank_end; r++) {
+ if (param->skip_ranks[r])
+ /* request to skip the rank */
+ continue;
+
+ /* set rank */
+ set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
+
+ /* Load up a constant bursts */
+ writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
+
+ writel(RW_MGR_GUARANTEED_WRITE_WAIT0,
+ &sdr_rw_load_jump_mgr_regs->load_jump_add0);
+
+ writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
+
+ writel(RW_MGR_GUARANTEED_WRITE_WAIT1,
+ &sdr_rw_load_jump_mgr_regs->load_jump_add1);
+
+ writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
+
+ writel(RW_MGR_GUARANTEED_WRITE_WAIT2,
+ &sdr_rw_load_jump_mgr_regs->load_jump_add2);
writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
}
-/*
- * try a read and see if it returns correct data back. has dummy reads
- * inserted into the mix used to align dqs enable. has more thorough checks
- * than the regular read test.
- */
-static uint32_t rw_mgr_mem_calibrate_read_test(uint32_t rank_bgn, uint32_t group,
- uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
- uint32_t all_groups, uint32_t all_ranks)
-{
- uint32_t r, vg;
- uint32_t correct_mask_vg;
- uint32_t tmp_bit_chk;
- uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
+/**
+ * rw_mgr_mem_calibrate_read_test() - Perform READ test on single rank
+ * @rank_bgn: Rank number
+ * @group: Read/Write group
+ * @num_tries: Number of retries of the test
+ * @all_correct: All bits must be correct in the mask
+ * @bit_chk: Resulting bit mask after the test
+ * @all_groups: Test all R/W groups
+ * @all_ranks: Test all ranks
+ *
+ * Try a read and see if it returns correct data back. Test has dummy reads
+ * inserted into the mix used to align DQS enable. Test has more thorough
+ * checks than the regular read test.
+ */
+static int
+rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group,
+ const u32 num_tries, const u32 all_correct,
+ u32 *bit_chk,
+ const u32 all_groups, const u32 all_ranks)
+{
+ const u32 rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
- uint32_t addr;
- uint32_t base_rw_mgr;
+ const u32 quick_read_mode =
+ ((STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS) &&
+ ENABLE_SUPER_QUICK_CALIBRATION);
+ u32 correct_mask_vg = param->read_correct_mask_vg;
+ u32 tmp_bit_chk;
+ u32 base_rw_mgr;
+ u32 addr;
- *bit_chk = param->read_correct_mask;
- correct_mask_vg = param->read_correct_mask_vg;
+ int r, vg, ret;
- uint32_t quick_read_mode = (((STATIC_CALIB_STEPS) &
- CALIB_SKIP_DELAY_SWEEPS) && ENABLE_SUPER_QUICK_CALIBRATION);
+ *bit_chk = param->read_correct_mask;
for (r = rank_bgn; r < rank_end; r++) {
if (param->skip_ranks[r])
&sdr_rw_load_jump_mgr_regs->load_jump_add3);
tmp_bit_chk = 0;
- for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
- /* reset the fifos to get pointers to known state */
+ for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1; vg >= 0;
+ vg--) {
+ /* Reset the FIFOs to get pointers to known state. */
writel(0, &phy_mgr_cmd->fifo_reset);
writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RESET_READ_DATAPATH_OFFSET);
- tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
- / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
-
- if (all_groups)
- addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_ALL_GROUPS_OFFSET;
- else
- addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
+ if (all_groups) {
+ addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
+ RW_MGR_RUN_ALL_GROUPS_OFFSET;
+ } else {
+ addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
+ RW_MGR_RUN_SINGLE_GROUP_OFFSET;
+ }
writel(RW_MGR_READ_B2B, addr +
((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
vg) << 2));
base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
- tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
-
- if (vg == 0)
- break;
+ tmp_bit_chk <<= RW_MGR_MEM_DQ_PER_READ_DQS /
+ RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
+ tmp_bit_chk |= correct_mask_vg & ~(base_rw_mgr);
}
+
*bit_chk &= tmp_bit_chk;
}
addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
+ set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
+
if (all_correct) {
- set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
- debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ALL,%u) =>\
- (%u == %u) => %lu", __func__, __LINE__, group,
- all_groups, *bit_chk, param->read_correct_mask,
- (long unsigned int)(*bit_chk ==
- param->read_correct_mask));
- return *bit_chk == param->read_correct_mask;
+ ret = (*bit_chk == param->read_correct_mask);
+ debug_cond(DLEVEL == 2,
+ "%s:%d read_test(%u,ALL,%u) => (%u == %u) => %i\n",
+ __func__, __LINE__, group, all_groups, *bit_chk,
+ param->read_correct_mask, ret);
} else {
- set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
- debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ONE,%u) =>\
- (%u != %lu) => %lu\n", __func__, __LINE__,
- group, all_groups, *bit_chk, (long unsigned int)0,
- (long unsigned int)(*bit_chk != 0x00));
- return *bit_chk != 0x00;
+ ret = (*bit_chk != 0x00);
+ debug_cond(DLEVEL == 2,
+ "%s:%d read_test(%u,ONE,%u) => (%u != %u) => %i\n",
+ __func__, __LINE__, group, all_groups, *bit_chk,
+ 0, ret);
}
+
+ return ret;
}
-static uint32_t rw_mgr_mem_calibrate_read_test_all_ranks(uint32_t group,
- uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
- uint32_t all_groups)
+/**
+ * rw_mgr_mem_calibrate_read_test_all_ranks() - Perform READ test on all ranks
+ * @grp: Read/Write group
+ * @num_tries: Number of retries of the test
+ * @all_correct: All bits must be correct in the mask
+ * @all_groups: Test all R/W groups
+ *
+ * Perform a READ test across all memory ranks.
+ */
+static int
+rw_mgr_mem_calibrate_read_test_all_ranks(const u32 grp, const u32 num_tries,
+ const u32 all_correct,
+ const u32 all_groups)
{
- return rw_mgr_mem_calibrate_read_test(0, group, num_tries, all_correct,
- bit_chk, all_groups, 1);
+ u32 bit_chk;
+ return rw_mgr_mem_calibrate_read_test(0, grp, num_tries, all_correct,
+ &bit_chk, all_groups, 1);
}
-static void rw_mgr_incr_vfifo(uint32_t grp, uint32_t *v)
+/**
+ * rw_mgr_incr_vfifo() - Increase VFIFO value
+ * @grp: Read/Write group
+ *
+ * Increase VFIFO value.
+ */
+static void rw_mgr_incr_vfifo(const u32 grp)
{
writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
- (*v)++;
}
-static void rw_mgr_decr_vfifo(uint32_t grp, uint32_t *v)
+/**
+ * rw_mgr_decr_vfifo() - Decrease VFIFO value
+ * @grp: Read/Write group
+ *
+ * Decrease VFIFO value.
+ */
+static void rw_mgr_decr_vfifo(const u32 grp)
{
- uint32_t i;
+ u32 i;
- for (i = 0; i < VFIFO_SIZE-1; i++)
- rw_mgr_incr_vfifo(grp, v);
+ for (i = 0; i < VFIFO_SIZE - 1; i++)
+ rw_mgr_incr_vfifo(grp);
}
-static int find_vfifo_read(uint32_t grp, uint32_t *bit_chk)
-{
- uint32_t v;
- uint32_t fail_cnt = 0;
- uint32_t test_status;
+/**
+ * find_vfifo_failing_read() - Push VFIFO to get a failing read
+ * @grp: Read/Write group
+ *
+ * Push VFIFO until a failing read happens.
+ */
+static int find_vfifo_failing_read(const u32 grp)
+{
+ u32 v, ret, fail_cnt = 0;
- for (v = 0; v < VFIFO_SIZE; ) {
- debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo %u\n",
+ for (v = 0; v < VFIFO_SIZE; v++) {
+ debug_cond(DLEVEL == 2, "%s:%d: vfifo %u\n",
__func__, __LINE__, v);
- test_status = rw_mgr_mem_calibrate_read_test_all_ranks
- (grp, 1, PASS_ONE_BIT, bit_chk, 0);
- if (!test_status) {
+ ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
+ PASS_ONE_BIT, 0);
+ if (!ret) {
fail_cnt++;
if (fail_cnt == 2)
- break;
+ return v;
}
- /* fiddle with FIFO */
- rw_mgr_incr_vfifo(grp, &v);
+ /* Fiddle with FIFO. */
+ rw_mgr_incr_vfifo(grp);
}
- if (v >= VFIFO_SIZE) {
- /* no failing read found!! Something must have gone wrong */
- debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo failed\n",
- __func__, __LINE__);
- return 0;
- } else {
- return v;
- }
+ /* No failing read found! Something must have gone wrong. */
+ debug_cond(DLEVEL == 2, "%s:%d: vfifo failed\n", __func__, __LINE__);
+ return 0;
}
-static int find_working_phase(uint32_t *grp, uint32_t *bit_chk,
- uint32_t dtaps_per_ptap, uint32_t *work_bgn,
- uint32_t *v, uint32_t *d, uint32_t *p,
- uint32_t *i, uint32_t *max_working_cnt)
+/**
+ * sdr_find_phase_delay() - Find DQS enable phase or delay
+ * @working: If 1, look for working phase/delay, if 0, look for non-working
+ * @delay: If 1, look for delay, if 0, look for phase
+ * @grp: Read/Write group
+ * @work: Working window position
+ * @work_inc: Working window increment
+ * @pd: DQS Phase/Delay Iterator
+ *
+ * Find working or non-working DQS enable phase setting.
+ */
+static int sdr_find_phase_delay(int working, int delay, const u32 grp,
+ u32 *work, const u32 work_inc, u32 *pd)
{
- uint32_t found_begin = 0;
- uint32_t tmp_delay = 0;
- uint32_t test_status;
+ const u32 max = delay ? IO_DQS_EN_DELAY_MAX : IO_DQS_EN_PHASE_MAX;
+ u32 ret;
+
+ for (; *pd <= max; (*pd)++) {
+ if (delay)
+ scc_mgr_set_dqs_en_delay_all_ranks(grp, *pd);
+ else
+ scc_mgr_set_dqs_en_phase_all_ranks(grp, *pd);
- for (*d = 0; *d <= dtaps_per_ptap; (*d)++, tmp_delay +=
- IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
- *work_bgn = tmp_delay;
- scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
+ ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
+ PASS_ONE_BIT, 0);
+ if (!working)
+ ret = !ret;
- for (*i = 0; *i < VFIFO_SIZE; (*i)++) {
- for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_bgn +=
- IO_DELAY_PER_OPA_TAP) {
- scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
+ if (ret)
+ return 0;
- test_status =
- rw_mgr_mem_calibrate_read_test_all_ranks
- (*grp, 1, PASS_ONE_BIT, bit_chk, 0);
+ if (work)
+ *work += work_inc;
+ }
- if (test_status) {
- *max_working_cnt = 1;
- found_begin = 1;
- break;
- }
- }
+ return -EINVAL;
+}
+/**
+ * sdr_find_phase() - Find DQS enable phase
+ * @working: If 1, look for working phase, if 0, look for non-working phase
+ * @grp: Read/Write group
+ * @work: Working window position
+ * @i: Iterator
+ * @p: DQS Phase Iterator
+ *
+ * Find working or non-working DQS enable phase setting.
+ */
+static int sdr_find_phase(int working, const u32 grp, u32 *work,
+ u32 *i, u32 *p)
+{
+ const u32 end = VFIFO_SIZE + (working ? 0 : 1);
+ int ret;
- if (found_begin)
- break;
+ for (; *i < end; (*i)++) {
+ if (working)
+ *p = 0;
- if (*p > IO_DQS_EN_PHASE_MAX)
- /* fiddle with FIFO */
- rw_mgr_incr_vfifo(*grp, v);
- }
+ ret = sdr_find_phase_delay(working, 0, grp, work,
+ IO_DELAY_PER_OPA_TAP, p);
+ if (!ret)
+ return 0;
- if (found_begin)
- break;
+ if (*p > IO_DQS_EN_PHASE_MAX) {
+ /* Fiddle with FIFO. */
+ rw_mgr_incr_vfifo(grp);
+ if (!working)
+ *p = 0;
+ }
}
- if (*i >= VFIFO_SIZE) {
- /* cannot find working solution */
- debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/\
- ptap/dtap\n", __func__, __LINE__);
- return 0;
- } else {
- return 1;
+ return -EINVAL;
+}
+
+/**
+ * sdr_working_phase() - Find working DQS enable phase
+ * @grp: Read/Write group
+ * @work_bgn: Working window start position
+ * @d: dtaps output value
+ * @p: DQS Phase Iterator
+ * @i: Iterator
+ *
+ * Find working DQS enable phase setting.
+ */
+static int sdr_working_phase(const u32 grp, u32 *work_bgn, u32 *d,
+ u32 *p, u32 *i)
+{
+ const u32 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP /
+ IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
+ int ret;
+
+ *work_bgn = 0;
+
+ for (*d = 0; *d <= dtaps_per_ptap; (*d)++) {
+ *i = 0;
+ scc_mgr_set_dqs_en_delay_all_ranks(grp, *d);
+ ret = sdr_find_phase(1, grp, work_bgn, i, p);
+ if (!ret)
+ return 0;
+ *work_bgn += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
}
+
+ /* Cannot find working solution */
+ debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/ptap/dtap\n",
+ __func__, __LINE__);
+ return -EINVAL;
}
-static void sdr_backup_phase(uint32_t *grp, uint32_t *bit_chk,
- uint32_t *work_bgn, uint32_t *v, uint32_t *d,
- uint32_t *p, uint32_t *max_working_cnt)
+/**
+ * sdr_backup_phase() - Find DQS enable backup phase
+ * @grp: Read/Write group
+ * @work_bgn: Working window start position
+ * @p: DQS Phase Iterator
+ *
+ * Find DQS enable backup phase setting.
+ */
+static void sdr_backup_phase(const u32 grp, u32 *work_bgn, u32 *p)
{
- uint32_t found_begin = 0;
- uint32_t tmp_delay;
+ u32 tmp_delay, d;
+ int ret;
/* Special case code for backing up a phase */
if (*p == 0) {
*p = IO_DQS_EN_PHASE_MAX;
- rw_mgr_decr_vfifo(*grp, v);
+ rw_mgr_decr_vfifo(grp);
} else {
(*p)--;
}
tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP;
- scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
+ scc_mgr_set_dqs_en_phase_all_ranks(grp, *p);
- for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn;
- (*d)++, tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
- scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
+ for (d = 0; d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn; d++) {
+ scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
- if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1,
- PASS_ONE_BIT,
- bit_chk, 0)) {
- found_begin = 1;
+ ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
+ PASS_ONE_BIT, 0);
+ if (ret) {
*work_bgn = tmp_delay;
break;
}
- }
- /* We have found a working dtap before the ptap found above */
- if (found_begin == 1)
- (*max_working_cnt)++;
+ tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
+ }
- /*
- * Restore VFIFO to old state before we decremented it
- * (if needed).
- */
+ /* Restore VFIFO to old state before we decremented it (if needed). */
(*p)++;
if (*p > IO_DQS_EN_PHASE_MAX) {
*p = 0;
- rw_mgr_incr_vfifo(*grp, v);
+ rw_mgr_incr_vfifo(grp);
}
- scc_mgr_set_dqs_en_delay_all_ranks(*grp, 0);
+ scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
}
-static int sdr_nonworking_phase(uint32_t *grp, uint32_t *bit_chk,
- uint32_t *work_bgn, uint32_t *v, uint32_t *d,
- uint32_t *p, uint32_t *i, uint32_t *max_working_cnt,
- uint32_t *work_end)
+/**
+ * sdr_nonworking_phase() - Find non-working DQS enable phase
+ * @grp: Read/Write group
+ * @work_end: Working window end position
+ * @p: DQS Phase Iterator
+ * @i: Iterator
+ *
+ * Find non-working DQS enable phase setting.
+ */
+static int sdr_nonworking_phase(const u32 grp, u32 *work_end, u32 *p, u32 *i)
{
- uint32_t found_end = 0;
+ int ret;
(*p)++;
*work_end += IO_DELAY_PER_OPA_TAP;
if (*p > IO_DQS_EN_PHASE_MAX) {
- /* fiddle with FIFO */
+ /* Fiddle with FIFO. */
*p = 0;
- rw_mgr_incr_vfifo(*grp, v);
+ rw_mgr_incr_vfifo(grp);
}
- for (; *i < VFIFO_SIZE + 1; (*i)++) {
- for (; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_end
- += IO_DELAY_PER_OPA_TAP) {
- scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
-
- if (!rw_mgr_mem_calibrate_read_test_all_ranks
- (*grp, 1, PASS_ONE_BIT, bit_chk, 0)) {
- found_end = 1;
- break;
- } else {
- (*max_working_cnt)++;
- }
- }
-
- if (found_end)
- break;
-
- if (*p > IO_DQS_EN_PHASE_MAX) {
- /* fiddle with FIFO */
- rw_mgr_incr_vfifo(*grp, v);
- *p = 0;
- }
+ ret = sdr_find_phase(0, grp, work_end, i, p);
+ if (ret) {
+ /* Cannot see edge of failing read. */
+ debug_cond(DLEVEL == 2, "%s:%d: end: failed\n",
+ __func__, __LINE__);
}
- if (*i >= VFIFO_SIZE + 1) {
- /* cannot see edge of failing read */
- debug_cond(DLEVEL == 2, "%s:%d sdr_nonworking_phase: end:\
- failed\n", __func__, __LINE__);
- return 0;
- } else {
- return 1;
- }
+ return ret;
}
-static int sdr_find_window_centre(uint32_t *grp, uint32_t *bit_chk,
- uint32_t *work_bgn, uint32_t *v, uint32_t *d,
- uint32_t *p, uint32_t *work_mid,
- uint32_t *work_end)
+/**
+ * sdr_find_window_center() - Find center of the working DQS window.
+ * @grp: Read/Write group
+ * @work_bgn: First working settings
+ * @work_end: Last working settings
+ *
+ * Find center of the working DQS enable window.
+ */
+static int sdr_find_window_center(const u32 grp, const u32 work_bgn,
+ const u32 work_end)
{
- int i;
+ u32 work_mid;
int tmp_delay = 0;
+ int i, p, d;
- *work_mid = (*work_bgn + *work_end) / 2;
+ work_mid = (work_bgn + work_end) / 2;
debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
- *work_bgn, *work_end, *work_mid);
+ work_bgn, work_end, work_mid);
/* Get the middle delay to be less than a VFIFO delay */
- for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX;
- (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP)
- ;
+ tmp_delay = (IO_DQS_EN_PHASE_MAX + 1) * IO_DELAY_PER_OPA_TAP;
+
debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
- while (*work_mid > tmp_delay)
- *work_mid -= tmp_delay;
- debug_cond(DLEVEL == 2, "new work_mid %d\n", *work_mid);
+ work_mid %= tmp_delay;
+ debug_cond(DLEVEL == 2, "new work_mid %d\n", work_mid);
- tmp_delay = 0;
- for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX && tmp_delay < *work_mid;
- (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP)
- ;
- tmp_delay -= IO_DELAY_PER_OPA_TAP;
- debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", (*p) - 1, tmp_delay);
- for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_mid; (*d)++,
- tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP)
- ;
- debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", *d, tmp_delay);
+ tmp_delay = rounddown(work_mid, IO_DELAY_PER_OPA_TAP);
+ if (tmp_delay > IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP)
+ tmp_delay = IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP;
+ p = tmp_delay / IO_DELAY_PER_OPA_TAP;
+
+ debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", p, tmp_delay);
+
+ d = DIV_ROUND_UP(work_mid - tmp_delay, IO_DELAY_PER_DQS_EN_DCHAIN_TAP);
+ if (d > IO_DQS_EN_DELAY_MAX)
+ d = IO_DQS_EN_DELAY_MAX;
+ tmp_delay += d * IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
+
+ debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", d, tmp_delay);
- scc_mgr_set_dqs_en_phase_all_ranks(*grp, (*p) - 1);
- scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
+ scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
+ scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
/*
* push vfifo until we can successfully calibrate. We can do this
* because the largest possible margin in 1 VFIFO cycle.
*/
for (i = 0; i < VFIFO_SIZE; i++) {
- debug_cond(DLEVEL == 2, "find_dqs_en_phase: center: vfifo=%u\n",
- *v);
- if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1,
+ debug_cond(DLEVEL == 2, "find_dqs_en_phase: center\n");
+ if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
PASS_ONE_BIT,
- bit_chk, 0)) {
- break;
+ 0)) {
+ debug_cond(DLEVEL == 2,
+ "%s:%d center: found: ptap=%u dtap=%u\n",
+ __func__, __LINE__, p, d);
+ return 0;
}
- /* fiddle with FIFO */
- rw_mgr_incr_vfifo(*grp, v);
+ /* Fiddle with FIFO. */
+ rw_mgr_incr_vfifo(grp);
}
- if (i >= VFIFO_SIZE) {
- debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center: \
- failed\n", __func__, __LINE__);
- return 0;
- } else {
- return 1;
- }
+ debug_cond(DLEVEL == 2, "%s:%d center: failed.\n",
+ __func__, __LINE__);
+ return -EINVAL;
}
-/* find a good dqs enable to use */
-static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp)
+/**
+ * rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase() - Find a good DQS enable to use
+ * @grp: Read/Write Group
+ *
+ * Find a good DQS enable to use.
+ */
+static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp)
{
- uint32_t v, d, p, i;
- uint32_t max_working_cnt;
- uint32_t bit_chk;
- uint32_t dtaps_per_ptap;
- uint32_t work_bgn, work_mid, work_end;
- uint32_t found_passing_read, found_failing_read, initial_failing_dtap;
+ u32 d, p, i;
+ u32 dtaps_per_ptap;
+ u32 work_bgn, work_end;
+ u32 found_passing_read, found_failing_read, initial_failing_dtap;
+ int ret;
debug("%s:%d %u\n", __func__, __LINE__, grp);
scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
- /* ************************************************************** */
- /* * Step 0 : Determine number of delay taps for each phase tap * */
- dtaps_per_ptap = IO_DELAY_PER_OPA_TAP/IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
-
- /* ********************************************************* */
- /* * Step 1 : First push vfifo until we get a failing read * */
- v = find_vfifo_read(grp, &bit_chk);
+ /* Step 0: Determine number of delay taps for each phase tap. */
+ dtaps_per_ptap = IO_DELAY_PER_OPA_TAP / IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
- max_working_cnt = 0;
+ /* Step 1: First push vfifo until we get a failing read. */
+ find_vfifo_failing_read(grp);
- /* ******************************************************** */
- /* * step 2: find first working phase, increment in ptaps * */
+ /* Step 2: Find first working phase, increment in ptaps. */
work_bgn = 0;
- if (find_working_phase(&grp, &bit_chk, dtaps_per_ptap, &work_bgn, &v, &d,
- &p, &i, &max_working_cnt) == 0)
- return 0;
+ ret = sdr_working_phase(grp, &work_bgn, &d, &p, &i);
+ if (ret)
+ return ret;
work_end = work_bgn;
/*
- * If d is 0 then the working window covers a phase tap and
- * we can follow the old procedure otherwise, we've found the beginning,
+ * If d is 0 then the working window covers a phase tap and we can
+ * follow the old procedure. Otherwise, we've found the beginning
* and we need to increment the dtaps until we find the end.
*/
if (d == 0) {
- /* ********************************************************* */
- /* * step 3a: if we have room, back off by one and
- increment in dtaps * */
-
- sdr_backup_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p,
- &max_working_cnt);
-
- /* ********************************************************* */
- /* * step 4a: go forward from working phase to non working
- phase, increment in ptaps * */
- if (sdr_nonworking_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p,
- &i, &max_working_cnt, &work_end) == 0)
- return 0;
+ /*
+ * Step 3a: If we have room, back off by one and
+ * increment in dtaps.
+ */
+ sdr_backup_phase(grp, &work_bgn, &p);
+
+ /*
+ * Step 4a: go forward from working phase to non working
+ * phase, increment in ptaps.
+ */
+ ret = sdr_nonworking_phase(grp, &work_end, &p, &i);
+ if (ret)
+ return ret;
- /* ********************************************************* */
- /* * step 5a: back off one from last, increment in dtaps * */
+ /* Step 5a: Back off one from last, increment in dtaps. */
/* Special case code for backing up a phase */
if (p == 0) {
p = IO_DQS_EN_PHASE_MAX;
- rw_mgr_decr_vfifo(grp, &v);
+ rw_mgr_decr_vfifo(grp);
} else {
p = p - 1;
}
work_end -= IO_DELAY_PER_OPA_TAP;
scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
- /* * The actual increment of dtaps is done outside of
- the if/else loop to share code */
d = 0;
- debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p: \
- vfifo=%u ptap=%u\n", __func__, __LINE__,
- v, p);
- } else {
- /* ******************************************************* */
- /* * step 3-5b: Find the right edge of the window using
- delay taps * */
- debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase:vfifo=%u \
- ptap=%u dtap=%u bgn=%u\n", __func__, __LINE__,
- v, p, d, work_bgn);
-
- work_end = work_bgn;
-
- /* * The actual increment of dtaps is done outside of the
- if/else loop to share code */
-
- /* Only here to counterbalance a subtract later on which is
- not needed if this branch of the algorithm is taken */
- max_working_cnt++;
+ debug_cond(DLEVEL == 2, "%s:%d p: ptap=%u\n",
+ __func__, __LINE__, p);
}
- /* The dtap increment to find the failing edge is done here */
- for (; d <= IO_DQS_EN_DELAY_MAX; d++, work_end +=
- IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
- debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
- end-2: dtap=%u\n", __func__, __LINE__, d);
- scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
-
- if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
- PASS_ONE_BIT,
- &bit_chk, 0)) {
- break;
- }
- }
+ /* The dtap increment to find the failing edge is done here. */
+ sdr_find_phase_delay(0, 1, grp, &work_end,
+ IO_DELAY_PER_DQS_EN_DCHAIN_TAP, &d);
/* Go back to working dtap */
if (d != 0)
work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
- debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p/d: vfifo=%u \
- ptap=%u dtap=%u end=%u\n", __func__, __LINE__,
- v, p, d-1, work_end);
+ debug_cond(DLEVEL == 2,
+ "%s:%d p/d: ptap=%u dtap=%u end=%u\n",
+ __func__, __LINE__, p, d - 1, work_end);
if (work_end < work_bgn) {
/* nil range */
- debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: end-2: \
- failed\n", __func__, __LINE__);
- return 0;
+ debug_cond(DLEVEL == 2, "%s:%d end-2: failed\n",
+ __func__, __LINE__);
+ return -EINVAL;
}
- debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: found range [%u,%u]\n",
+ debug_cond(DLEVEL == 2, "%s:%d found range [%u,%u]\n",
__func__, __LINE__, work_bgn, work_end);
- /* *************************************************************** */
/*
- * * We need to calculate the number of dtaps that equal a ptap
- * * To do that we'll back up a ptap and re-find the edge of the
- * * window using dtaps
+ * We need to calculate the number of dtaps that equal a ptap.
+ * To do that we'll back up a ptap and re-find the edge of the
+ * window using dtaps
*/
-
- debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: calculate dtaps_per_ptap \
- for tracking\n", __func__, __LINE__);
+ debug_cond(DLEVEL == 2, "%s:%d calculate dtaps_per_ptap for tracking\n",
+ __func__, __LINE__);
/* Special case code for backing up a phase */
if (p == 0) {
p = IO_DQS_EN_PHASE_MAX;
- rw_mgr_decr_vfifo(grp, &v);
- debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
- cycle/phase: v=%u p=%u\n", __func__, __LINE__,
- v, p);
+ rw_mgr_decr_vfifo(grp);
+ debug_cond(DLEVEL == 2, "%s:%d backedup cycle/phase: p=%u\n",
+ __func__, __LINE__, p);
} else {
p = p - 1;
- debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
- phase only: v=%u p=%u", __func__, __LINE__,
- v, p);
+ debug_cond(DLEVEL == 2, "%s:%d backedup phase only: p=%u",
+ __func__, __LINE__, p);
}
scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
/*
* Increase dtap until we first see a passing read (in case the
- * window is smaller than a ptap),
- * and then a failing read to mark the edge of the window again
+ * window is smaller than a ptap), and then a failing read to
+ * mark the edge of the window again.
*/
- /* Find a passing read */
- debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find passing read\n",
+ /* Find a passing read. */
+ debug_cond(DLEVEL == 2, "%s:%d find passing read\n",
__func__, __LINE__);
- found_passing_read = 0;
- found_failing_read = 0;
- initial_failing_dtap = d;
- for (; d <= IO_DQS_EN_DELAY_MAX; d++) {
- debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: testing \
- read d=%u\n", __func__, __LINE__, d);
- scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
- if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
- PASS_ONE_BIT,
- &bit_chk, 0)) {
- found_passing_read = 1;
- break;
- }
- }
+ initial_failing_dtap = d;
+ found_passing_read = !sdr_find_phase_delay(1, 1, grp, NULL, 0, &d);
if (found_passing_read) {
- /* Find a failing read */
- debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find failing \
- read\n", __func__, __LINE__);
- for (d = d + 1; d <= IO_DQS_EN_DELAY_MAX; d++) {
- debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
- testing read d=%u\n", __func__, __LINE__, d);
- scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
-
- if (!rw_mgr_mem_calibrate_read_test_all_ranks
- (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) {
- found_failing_read = 1;
- break;
- }
- }
+ /* Find a failing read. */
+ debug_cond(DLEVEL == 2, "%s:%d find failing read\n",
+ __func__, __LINE__);
+ d++;
+ found_failing_read = !sdr_find_phase_delay(0, 1, grp, NULL, 0,
+ &d);
} else {
- debug_cond(DLEVEL == 1, "%s:%d find_dqs_en_phase: failed to \
- calculate dtaps", __func__, __LINE__);
- debug_cond(DLEVEL == 1, "per ptap. Fall back on static value\n");
+ debug_cond(DLEVEL == 1,
+ "%s:%d failed to calculate dtaps per ptap. Fall back on static value\n",
+ __func__, __LINE__);
}
/*
dtaps_per_ptap = d - initial_failing_dtap;
writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
- debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: dtaps_per_ptap=%u \
- - %u = %u", __func__, __LINE__, d,
- initial_failing_dtap, dtaps_per_ptap);
-
- /* ******************************************** */
- /* * step 6: Find the centre of the window * */
- if (sdr_find_window_centre(&grp, &bit_chk, &work_bgn, &v, &d, &p,
- &work_mid, &work_end) == 0)
- return 0;
-
- debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center found: \
- vfifo=%u ptap=%u dtap=%u\n", __func__, __LINE__,
- v, p-1, d);
- return 1;
-}
-
-/*
- * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
- * dq_in_delay values
- */
-static uint32_t
-rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
-(uint32_t write_group, uint32_t read_group, uint32_t test_bgn)
-{
- uint32_t found;
- uint32_t i;
- uint32_t p;
- uint32_t d;
- uint32_t r;
-
- const uint32_t delay_step = IO_IO_IN_DELAY_MAX /
- (RW_MGR_MEM_DQ_PER_READ_DQS-1);
- /* we start at zero, so have one less dq to devide among */
-
- debug("%s:%d (%u,%u,%u)", __func__, __LINE__, write_group, read_group,
- test_bgn);
-
- /* try different dq_in_delays since the dq path is shorter than dqs */
-
- for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
- r += NUM_RANKS_PER_SHADOW_REG) {
- for (i = 0, p = test_bgn, d = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++, d += delay_step) {
- debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_\
- vfifo_find_dqs_", __func__, __LINE__);
- debug_cond(DLEVEL == 1, "en_phase_sweep_dq_in_delay: g=%u/%u ",
- write_group, read_group);
- debug_cond(DLEVEL == 1, "r=%u, i=%u p=%u d=%u\n", r, i , p, d);
- scc_mgr_set_dq_in_delay(p, d);
- scc_mgr_load_dq(p);
- }
- writel(0, &sdr_scc_mgr->update);
- }
-
- found = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(read_group);
+ debug_cond(DLEVEL == 2, "%s:%d dtaps_per_ptap=%u - %u = %u",
+ __func__, __LINE__, d, initial_failing_dtap, dtaps_per_ptap);
- debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_vfifo_find_dqs_\
- en_phase_sweep_dq", __func__, __LINE__);
- debug_cond(DLEVEL == 1, "_in_delay: g=%u/%u found=%u; Reseting delay \
- chain to zero\n", write_group, read_group, found);
+ /* Step 6: Find the centre of the window. */
+ ret = sdr_find_window_center(grp, work_bgn, work_end);
- for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
- r += NUM_RANKS_PER_SHADOW_REG) {
- for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS;
- i++, p++) {
- scc_mgr_set_dq_in_delay(p, 0);
- scc_mgr_load_dq(p);
- }
- writel(0, &sdr_scc_mgr->update);
- }
-
- return found;
+ return ret;
}
-/* per-bit deskew DQ and center */
-static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn,
- uint32_t write_group, uint32_t read_group, uint32_t test_bgn,
- uint32_t use_read_test, uint32_t update_fom)
+/**
+ * search_stop_check() - Check if the detected edge is valid
+ * @write: Perform read (Stage 2) or write (Stage 3) calibration
+ * @d: DQS delay
+ * @rank_bgn: Rank number
+ * @write_group: Write Group
+ * @read_group: Read Group
+ * @bit_chk: Resulting bit mask after the test
+ * @sticky_bit_chk: Resulting sticky bit mask after the test
+ * @use_read_test: Perform read test
+ *
+ * Test if the found edge is valid.
+ */
+static u32 search_stop_check(const int write, const int d, const int rank_bgn,
+ const u32 write_group, const u32 read_group,
+ u32 *bit_chk, u32 *sticky_bit_chk,
+ const u32 use_read_test)
{
- uint32_t i, p, d, min_index;
+ const u32 ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
+ RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
+ const u32 correct_mask = write ? param->write_correct_mask :
+ param->read_correct_mask;
+ const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
+ RW_MGR_MEM_DQ_PER_READ_DQS;
+ u32 ret;
/*
- * Store these as signed since there are comparisons with
- * signed numbers.
+ * Stop searching when the read test doesn't pass AND when
+ * we've seen a passing read on every bit.
*/
- uint32_t bit_chk;
- uint32_t sticky_bit_chk;
- int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
- int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
- int32_t final_dq[RW_MGR_MEM_DQ_PER_READ_DQS];
- int32_t mid;
- int32_t orig_mid_min, mid_min;
- int32_t new_dqs, start_dqs, start_dqs_en, shift_dq, final_dqs,
- final_dqs_en;
- int32_t dq_margin, dqs_margin;
- uint32_t stop;
- uint32_t temp_dq_in_delay1, temp_dq_in_delay2;
- uint32_t addr;
-
- debug("%s:%d: %u %u", __func__, __LINE__, read_group, test_bgn);
-
- addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQS_IN_DELAY_OFFSET;
- start_dqs = readl(addr + (read_group << 2));
- if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
- start_dqs_en = readl(addr + ((read_group << 2)
- - IO_DQS_EN_DELAY_OFFSET));
-
- /* set the left and right edge of each bit to an illegal value */
- /* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */
- sticky_bit_chk = 0;
- for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
- left_edge[i] = IO_IO_IN_DELAY_MAX + 1;
- right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
- }
+ if (write) { /* WRITE-ONLY */
+ ret = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
+ 0, PASS_ONE_BIT,
+ bit_chk, 0);
+ } else if (use_read_test) { /* READ-ONLY */
+ ret = !rw_mgr_mem_calibrate_read_test(rank_bgn, read_group,
+ NUM_READ_PB_TESTS,
+ PASS_ONE_BIT, bit_chk,
+ 0, 0);
+ } else { /* READ-ONLY */
+ rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 0,
+ PASS_ONE_BIT, bit_chk, 0);
+ *bit_chk = *bit_chk >> (per_dqs *
+ (read_group - (write_group * ratio)));
+ ret = (*bit_chk == 0);
+ }
+ *sticky_bit_chk = *sticky_bit_chk | *bit_chk;
+ ret = ret && (*sticky_bit_chk == correct_mask);
+ debug_cond(DLEVEL == 2,
+ "%s:%d center(left): dtap=%u => %u == %u && %u",
+ __func__, __LINE__, d,
+ *sticky_bit_chk, correct_mask, ret);
+ return ret;
+}
- /* Search for the left edge of the window for each bit */
- for (d = 0; d <= IO_IO_IN_DELAY_MAX; d++) {
- scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, d);
+/**
+ * search_left_edge() - Find left edge of DQ/DQS working phase
+ * @write: Perform read (Stage 2) or write (Stage 3) calibration
+ * @rank_bgn: Rank number
+ * @write_group: Write Group
+ * @read_group: Read Group
+ * @test_bgn: Rank number to begin the test
+ * @sticky_bit_chk: Resulting sticky bit mask after the test
+ * @left_edge: Left edge of the DQ/DQS phase
+ * @right_edge: Right edge of the DQ/DQS phase
+ * @use_read_test: Perform read test
+ *
+ * Find left edge of DQ/DQS working phase.
+ */
+static void search_left_edge(const int write, const int rank_bgn,
+ const u32 write_group, const u32 read_group, const u32 test_bgn,
+ u32 *sticky_bit_chk,
+ int *left_edge, int *right_edge, const u32 use_read_test)
+{
+ const u32 delay_max = write ? IO_IO_OUT1_DELAY_MAX : IO_IO_IN_DELAY_MAX;
+ const u32 dqs_max = write ? IO_IO_OUT1_DELAY_MAX : IO_DQS_IN_DELAY_MAX;
+ const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
+ RW_MGR_MEM_DQ_PER_READ_DQS;
+ u32 stop, bit_chk;
+ int i, d;
+
+ for (d = 0; d <= dqs_max; d++) {
+ if (write)
+ scc_mgr_apply_group_dq_out1_delay(d);
+ else
+ scc_mgr_apply_group_dq_in_delay(test_bgn, d);
writel(0, &sdr_scc_mgr->update);
- /*
- * Stop searching when the read test doesn't pass AND when
- * we've seen a passing read on every bit.
- */
- if (use_read_test) {
- stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
- read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
- &bit_chk, 0, 0);
- } else {
- rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
- 0, PASS_ONE_BIT,
- &bit_chk, 0);
- bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
- (read_group - (write_group *
- RW_MGR_MEM_IF_READ_DQS_WIDTH /
- RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
- stop = (bit_chk == 0);
- }
- sticky_bit_chk = sticky_bit_chk | bit_chk;
- stop = stop && (sticky_bit_chk == param->read_correct_mask);
- debug_cond(DLEVEL == 2, "%s:%d vfifo_center(left): dtap=%u => %u == %u \
- && %u", __func__, __LINE__, d,
- sticky_bit_chk,
- param->read_correct_mask, stop);
-
- if (stop == 1) {
+ stop = search_stop_check(write, d, rank_bgn, write_group,
+ read_group, &bit_chk, sticky_bit_chk,
+ use_read_test);
+ if (stop == 1)
break;
- } else {
- for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
- if (bit_chk & 1) {
- /* Remember a passing test as the
- left_edge */
- left_edge[i] = d;
- } else {
- /* If a left edge has not been seen yet,
- then a future passing test will mark
- this edge as the right edge */
- if (left_edge[i] ==
- IO_IO_IN_DELAY_MAX + 1) {
- right_edge[i] = -(d + 1);
- }
- }
- bit_chk = bit_chk >> 1;
+
+ /* stop != 1 */
+ for (i = 0; i < per_dqs; i++) {
+ if (bit_chk & 1) {
+ /*
+ * Remember a passing test as
+ * the left_edge.
+ */
+ left_edge[i] = d;
+ } else {
+ /*
+ * If a left edge has not been seen
+ * yet, then a future passing test
+ * will mark this edge as the right
+ * edge.
+ */
+ if (left_edge[i] == delay_max + 1)
+ right_edge[i] = -(d + 1);
}
+ bit_chk >>= 1;
}
}
/* Reset DQ delay chains to 0 */
- scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
- sticky_bit_chk = 0;
- for (i = RW_MGR_MEM_DQ_PER_READ_DQS - 1;; i--) {
- debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
- %d right_edge[%u]: %d\n", __func__, __LINE__,
- i, left_edge[i], i, right_edge[i]);
+ if (write)
+ scc_mgr_apply_group_dq_out1_delay(0);
+ else
+ scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
+
+ *sticky_bit_chk = 0;
+ for (i = per_dqs - 1; i >= 0; i--) {
+ debug_cond(DLEVEL == 2,
+ "%s:%d vfifo_center: left_edge[%u]: %d right_edge[%u]: %d\n",
+ __func__, __LINE__, i, left_edge[i],
+ i, right_edge[i]);
/*
* Check for cases where we haven't found the left edge,
* which makes our assignment of the the right edge invalid.
* Reset it to the illegal value.
*/
- if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) && (
- right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
- right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
- debug_cond(DLEVEL == 2, "%s:%d vfifo_center: reset \
- right_edge[%u]: %d\n", __func__, __LINE__,
- i, right_edge[i]);
+ if ((left_edge[i] == delay_max + 1) &&
+ (right_edge[i] != delay_max + 1)) {
+ right_edge[i] = delay_max + 1;
+ debug_cond(DLEVEL == 2,
+ "%s:%d vfifo_center: reset right_edge[%u]: %d\n",
+ __func__, __LINE__, i, right_edge[i]);
}
/*
- * Reset sticky bit (except for bits where we have seen
- * both the left and right edge).
+ * Reset sticky bit
+ * READ: except for bits where we have seen both
+ * the left and right edge.
+ * WRITE: except for bits where we have seen the
+ * left edge.
*/
- sticky_bit_chk = sticky_bit_chk << 1;
- if ((left_edge[i] != IO_IO_IN_DELAY_MAX + 1) &&
- (right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
- sticky_bit_chk = sticky_bit_chk | 1;
+ *sticky_bit_chk <<= 1;
+ if (write) {
+ if (left_edge[i] != delay_max + 1)
+ *sticky_bit_chk |= 1;
+ } else {
+ if ((left_edge[i] != delay_max + 1) &&
+ (right_edge[i] != delay_max + 1))
+ *sticky_bit_chk |= 1;
}
-
- if (i == 0)
- break;
}
- /* Search for the right edge of the window for each bit */
- for (d = 0; d <= IO_DQS_IN_DELAY_MAX - start_dqs; d++) {
- scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
- if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
- uint32_t delay = d + start_dqs_en;
- if (delay > IO_DQS_EN_DELAY_MAX)
- delay = IO_DQS_EN_DELAY_MAX;
- scc_mgr_set_dqs_en_delay(read_group, delay);
- }
- scc_mgr_load_dqs(read_group);
- writel(0, &sdr_scc_mgr->update);
+}
- /*
- * Stop searching when the read test doesn't pass AND when
- * we've seen a passing read on every bit.
- */
- if (use_read_test) {
- stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
- read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
- &bit_chk, 0, 0);
- } else {
- rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
- 0, PASS_ONE_BIT,
- &bit_chk, 0);
- bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
- (read_group - (write_group *
- RW_MGR_MEM_IF_READ_DQS_WIDTH /
- RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
- stop = (bit_chk == 0);
+/**
+ * search_right_edge() - Find right edge of DQ/DQS working phase
+ * @write: Perform read (Stage 2) or write (Stage 3) calibration
+ * @rank_bgn: Rank number
+ * @write_group: Write Group
+ * @read_group: Read Group
+ * @start_dqs: DQS start phase
+ * @start_dqs_en: DQS enable start phase
+ * @sticky_bit_chk: Resulting sticky bit mask after the test
+ * @left_edge: Left edge of the DQ/DQS phase
+ * @right_edge: Right edge of the DQ/DQS phase
+ * @use_read_test: Perform read test
+ *
+ * Find right edge of DQ/DQS working phase.
+ */
+static int search_right_edge(const int write, const int rank_bgn,
+ const u32 write_group, const u32 read_group,
+ const int start_dqs, const int start_dqs_en,
+ u32 *sticky_bit_chk,
+ int *left_edge, int *right_edge, const u32 use_read_test)
+{
+ const u32 delay_max = write ? IO_IO_OUT1_DELAY_MAX : IO_IO_IN_DELAY_MAX;
+ const u32 dqs_max = write ? IO_IO_OUT1_DELAY_MAX : IO_DQS_IN_DELAY_MAX;
+ const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
+ RW_MGR_MEM_DQ_PER_READ_DQS;
+ u32 stop, bit_chk;
+ int i, d;
+
+ for (d = 0; d <= dqs_max - start_dqs; d++) {
+ if (write) { /* WRITE-ONLY */
+ scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
+ d + start_dqs);
+ } else { /* READ-ONLY */
+ scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
+ if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
+ uint32_t delay = d + start_dqs_en;
+ if (delay > IO_DQS_EN_DELAY_MAX)
+ delay = IO_DQS_EN_DELAY_MAX;
+ scc_mgr_set_dqs_en_delay(read_group, delay);
+ }
+ scc_mgr_load_dqs(read_group);
}
- sticky_bit_chk = sticky_bit_chk | bit_chk;
- stop = stop && (sticky_bit_chk == param->read_correct_mask);
- debug_cond(DLEVEL == 2, "%s:%d vfifo_center(right): dtap=%u => %u == \
- %u && %u", __func__, __LINE__, d,
- sticky_bit_chk, param->read_correct_mask, stop);
+ writel(0, &sdr_scc_mgr->update);
+ stop = search_stop_check(write, d, rank_bgn, write_group,
+ read_group, &bit_chk, sticky_bit_chk,
+ use_read_test);
if (stop == 1) {
+ if (write && (d == 0)) { /* WRITE-ONLY */
+ for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
+ /*
+ * d = 0 failed, but it passed when
+ * testing the left edge, so it must be
+ * marginal, set it to -1
+ */
+ if (right_edge[i] == delay_max + 1 &&
+ left_edge[i] != delay_max + 1)
+ right_edge[i] = -1;
+ }
+ }
break;
- } else {
- for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
- if (bit_chk & 1) {
- /* Remember a passing test as
- the right_edge */
- right_edge[i] = d;
+ }
+
+ /* stop != 1 */
+ for (i = 0; i < per_dqs; i++) {
+ if (bit_chk & 1) {
+ /*
+ * Remember a passing test as
+ * the right_edge.
+ */
+ right_edge[i] = d;
+ } else {
+ if (d != 0) {
+ /*
+ * If a right edge has not
+ * been seen yet, then a future
+ * passing test will mark this
+ * edge as the left edge.
+ */
+ if (right_edge[i] == delay_max + 1)
+ left_edge[i] = -(d + 1);
} else {
- if (d != 0) {
- /* If a right edge has not been
- seen yet, then a future passing
- test will mark this edge as the
- left edge */
- if (right_edge[i] ==
- IO_IO_IN_DELAY_MAX + 1) {
- left_edge[i] = -(d + 1);
- }
- } else {
- /* d = 0 failed, but it passed
- when testing the left edge,
- so it must be marginal,
- set it to -1 */
- if (right_edge[i] ==
- IO_IO_IN_DELAY_MAX + 1 &&
- left_edge[i] !=
- IO_IO_IN_DELAY_MAX
- + 1) {
- right_edge[i] = -1;
- }
- /* If a right edge has not been
- seen yet, then a future passing
- test will mark this edge as the
- left edge */
- else if (right_edge[i] ==
- IO_IO_IN_DELAY_MAX +
- 1) {
- left_edge[i] = -(d + 1);
- }
- }
+ /*
+ * d = 0 failed, but it passed
+ * when testing the left edge,
+ * so it must be marginal, set
+ * it to -1
+ */
+ if (right_edge[i] == delay_max + 1 &&
+ left_edge[i] != delay_max + 1)
+ right_edge[i] = -1;
+ /*
+ * If a right edge has not been
+ * seen yet, then a future
+ * passing test will mark this
+ * edge as the left edge.
+ */
+ else if (right_edge[i] == delay_max + 1)
+ left_edge[i] = -(d + 1);
}
-
- debug_cond(DLEVEL == 2, "%s:%d vfifo_center[r,\
- d=%u]: ", __func__, __LINE__, d);
- debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d ",
- (int)(bit_chk & 1), i, left_edge[i]);
- debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
- right_edge[i]);
- bit_chk = bit_chk >> 1;
}
+
+ debug_cond(DLEVEL == 2, "%s:%d center[r,d=%u]: ",
+ __func__, __LINE__, d);
+ debug_cond(DLEVEL == 2,
+ "bit_chk_test=%i left_edge[%u]: %d ",
+ bit_chk & 1, i, left_edge[i]);
+ debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
+ right_edge[i]);
+ bit_chk >>= 1;
}
}
/* Check that all bits have a window */
- for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
- debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
- %d right_edge[%u]: %d", __func__, __LINE__,
- i, left_edge[i], i, right_edge[i]);
- if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) || (right_edge[i]
- == IO_IO_IN_DELAY_MAX + 1)) {
- /*
- * Restore delay chain settings before letting the loop
- * in rw_mgr_mem_calibrate_vfifo to retry different
- * dqs/ck relationships.
- */
- scc_mgr_set_dqs_bus_in_delay(read_group, start_dqs);
- if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
- scc_mgr_set_dqs_en_delay(read_group,
- start_dqs_en);
- }
- scc_mgr_load_dqs(read_group);
- writel(0, &sdr_scc_mgr->update);
-
- debug_cond(DLEVEL == 1, "%s:%d vfifo_center: failed to \
- find edge [%u]: %d %d", __func__, __LINE__,
- i, left_edge[i], right_edge[i]);
- if (use_read_test) {
- set_failing_group_stage(read_group *
- RW_MGR_MEM_DQ_PER_READ_DQS + i,
- CAL_STAGE_VFIFO,
- CAL_SUBSTAGE_VFIFO_CENTER);
- } else {
- set_failing_group_stage(read_group *
- RW_MGR_MEM_DQ_PER_READ_DQS + i,
- CAL_STAGE_VFIFO_AFTER_WRITES,
- CAL_SUBSTAGE_VFIFO_CENTER);
- }
- return 0;
- }
+ for (i = 0; i < per_dqs; i++) {
+ debug_cond(DLEVEL == 2,
+ "%s:%d write_center: left_edge[%u]: %d right_edge[%u]: %d",
+ __func__, __LINE__, i, left_edge[i],
+ i, right_edge[i]);
+ if ((left_edge[i] == dqs_max + 1) ||
+ (right_edge[i] == dqs_max + 1))
+ return i + 1; /* FIXME: If we fail, retval > 0 */
}
+ return 0;
+}
+
+/**
+ * get_window_mid_index() - Find the best middle setting of DQ/DQS phase
+ * @write: Perform read (Stage 2) or write (Stage 3) calibration
+ * @left_edge: Left edge of the DQ/DQS phase
+ * @right_edge: Right edge of the DQ/DQS phase
+ * @mid_min: Best DQ/DQS phase middle setting
+ *
+ * Find index and value of the middle of the DQ/DQS working phase.
+ */
+static int get_window_mid_index(const int write, int *left_edge,
+ int *right_edge, int *mid_min)
+{
+ const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
+ RW_MGR_MEM_DQ_PER_READ_DQS;
+ int i, mid, min_index;
+
/* Find middle of window for each DQ bit */
- mid_min = left_edge[0] - right_edge[0];
+ *mid_min = left_edge[0] - right_edge[0];
min_index = 0;
- for (i = 1; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
+ for (i = 1; i < per_dqs; i++) {
mid = left_edge[i] - right_edge[i];
- if (mid < mid_min) {
- mid_min = mid;
+ if (mid < *mid_min) {
+ *mid_min = mid;
min_index = i;
}
}
/*
* -mid_min/2 represents the amount that we need to move DQS.
- * If mid_min is odd and positive we'll need to add one to
- * make sure the rounding in further calculations is correct
- * (always bias to the right), so just add 1 for all positive values.
+ * If mid_min is odd and positive we'll need to add one to make
+ * sure the rounding in further calculations is correct (always
+ * bias to the right), so just add 1 for all positive values.
*/
- if (mid_min > 0)
- mid_min++;
+ if (*mid_min > 0)
+ (*mid_min)++;
+ *mid_min = *mid_min / 2;
- mid_min = mid_min / 2;
+ debug_cond(DLEVEL == 1, "%s:%d vfifo_center: *mid_min=%d (index=%u)\n",
+ __func__, __LINE__, *mid_min, min_index);
+ return min_index;
+}
- debug_cond(DLEVEL == 1, "%s:%d vfifo_center: mid_min=%d (index=%u)\n",
- __func__, __LINE__, mid_min, min_index);
+/**
+ * center_dq_windows() - Center the DQ/DQS windows
+ * @write: Perform read (Stage 2) or write (Stage 3) calibration
+ * @left_edge: Left edge of the DQ/DQS phase
+ * @right_edge: Right edge of the DQ/DQS phase
+ * @mid_min: Adjusted DQ/DQS phase middle setting
+ * @orig_mid_min: Original DQ/DQS phase middle setting
+ * @min_index: DQ/DQS phase middle setting index
+ * @test_bgn: Rank number to begin the test
+ * @dq_margin: Amount of shift for the DQ
+ * @dqs_margin: Amount of shift for the DQS
+ *
+ * Align the DQ/DQS windows in each group.
+ */
+static void center_dq_windows(const int write, int *left_edge, int *right_edge,
+ const int mid_min, const int orig_mid_min,
+ const int min_index, const int test_bgn,
+ int *dq_margin, int *dqs_margin)
+{
+ const u32 delay_max = write ? IO_IO_OUT1_DELAY_MAX : IO_IO_IN_DELAY_MAX;
+ const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
+ RW_MGR_MEM_DQ_PER_READ_DQS;
+ const u32 delay_off = write ? SCC_MGR_IO_OUT1_DELAY_OFFSET :
+ SCC_MGR_IO_IN_DELAY_OFFSET;
+ const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | delay_off;
+
+ u32 temp_dq_io_delay1, temp_dq_io_delay2;
+ int shift_dq, i, p;
+
+ /* Initialize data for export structures */
+ *dqs_margin = delay_max + 1;
+ *dq_margin = delay_max + 1;
+
+ /* add delay to bring centre of all DQ windows to the same "level" */
+ for (i = 0, p = test_bgn; i < per_dqs; i++, p++) {
+ /* Use values before divide by 2 to reduce round off error */
+ shift_dq = (left_edge[i] - right_edge[i] -
+ (left_edge[min_index] - right_edge[min_index]))/2 +
+ (orig_mid_min - mid_min);
+
+ debug_cond(DLEVEL == 2,
+ "vfifo_center: before: shift_dq[%u]=%d\n",
+ i, shift_dq);
+
+ temp_dq_io_delay1 = readl(addr + (p << 2));
+ temp_dq_io_delay2 = readl(addr + (i << 2));
+
+ if (shift_dq + temp_dq_io_delay1 > delay_max)
+ shift_dq = delay_max - temp_dq_io_delay2;
+ else if (shift_dq + temp_dq_io_delay1 < 0)
+ shift_dq = -temp_dq_io_delay1;
+
+ debug_cond(DLEVEL == 2,
+ "vfifo_center: after: shift_dq[%u]=%d\n",
+ i, shift_dq);
+
+ if (write)
+ scc_mgr_set_dq_out1_delay(i, temp_dq_io_delay1 + shift_dq);
+ else
+ scc_mgr_set_dq_in_delay(p, temp_dq_io_delay1 + shift_dq);
+
+ scc_mgr_load_dq(p);
+
+ debug_cond(DLEVEL == 2,
+ "vfifo_center: margin[%u]=[%d,%d]\n", i,
+ left_edge[i] - shift_dq + (-mid_min),
+ right_edge[i] + shift_dq - (-mid_min));
+
+ /* To determine values for export structures */
+ if (left_edge[i] - shift_dq + (-mid_min) < *dq_margin)
+ *dq_margin = left_edge[i] - shift_dq + (-mid_min);
+
+ if (right_edge[i] + shift_dq - (-mid_min) < *dqs_margin)
+ *dqs_margin = right_edge[i] + shift_dq - (-mid_min);
+ }
+
+}
+
+/**
+ * rw_mgr_mem_calibrate_vfifo_center() - Per-bit deskew DQ and centering
+ * @rank_bgn: Rank number
+ * @rw_group: Read/Write Group
+ * @test_bgn: Rank at which the test begins
+ * @use_read_test: Perform a read test
+ * @update_fom: Update FOM
+ *
+ * Per-bit deskew DQ and centering.
+ */
+static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn,
+ const u32 rw_group, const u32 test_bgn,
+ const int use_read_test, const int update_fom)
+{
+ const u32 addr =
+ SDR_PHYGRP_SCCGRP_ADDRESS + SCC_MGR_DQS_IN_DELAY_OFFSET +
+ (rw_group << 2);
+ /*
+ * Store these as signed since there are comparisons with
+ * signed numbers.
+ */
+ uint32_t sticky_bit_chk;
+ int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
+ int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
+ int32_t orig_mid_min, mid_min;
+ int32_t new_dqs, start_dqs, start_dqs_en, final_dqs_en;
+ int32_t dq_margin, dqs_margin;
+ int i, min_index;
+ int ret;
+
+ debug("%s:%d: %u %u", __func__, __LINE__, rw_group, test_bgn);
+
+ start_dqs = readl(addr);
+ if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
+ start_dqs_en = readl(addr - IO_DQS_EN_DELAY_OFFSET);
+
+ /* set the left and right edge of each bit to an illegal value */
+ /* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */
+ sticky_bit_chk = 0;
+ for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
+ left_edge[i] = IO_IO_IN_DELAY_MAX + 1;
+ right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
+ }
+
+ /* Search for the left edge of the window for each bit */
+ search_left_edge(0, rank_bgn, rw_group, rw_group, test_bgn,
+ &sticky_bit_chk,
+ left_edge, right_edge, use_read_test);
+
+
+ /* Search for the right edge of the window for each bit */
+ ret = search_right_edge(0, rank_bgn, rw_group, rw_group,
+ start_dqs, start_dqs_en,
+ &sticky_bit_chk,
+ left_edge, right_edge, use_read_test);
+ if (ret) {
+ /*
+ * Restore delay chain settings before letting the loop
+ * in rw_mgr_mem_calibrate_vfifo to retry different
+ * dqs/ck relationships.
+ */
+ scc_mgr_set_dqs_bus_in_delay(rw_group, start_dqs);
+ if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
+ scc_mgr_set_dqs_en_delay(rw_group, start_dqs_en);
+
+ scc_mgr_load_dqs(rw_group);
+ writel(0, &sdr_scc_mgr->update);
+
+ debug_cond(DLEVEL == 1,
+ "%s:%d vfifo_center: failed to find edge [%u]: %d %d",
+ __func__, __LINE__, i, left_edge[i], right_edge[i]);
+ if (use_read_test) {
+ set_failing_group_stage(rw_group *
+ RW_MGR_MEM_DQ_PER_READ_DQS + i,
+ CAL_STAGE_VFIFO,
+ CAL_SUBSTAGE_VFIFO_CENTER);
+ } else {
+ set_failing_group_stage(rw_group *
+ RW_MGR_MEM_DQ_PER_READ_DQS + i,
+ CAL_STAGE_VFIFO_AFTER_WRITES,
+ CAL_SUBSTAGE_VFIFO_CENTER);
+ }
+ return -EIO;
+ }
+
+ min_index = get_window_mid_index(0, left_edge, right_edge, &mid_min);
/* Determine the amount we can change DQS (which is -mid_min) */
orig_mid_min = mid_min;
}
new_dqs = start_dqs - mid_min;
- debug_cond(DLEVEL == 1, "vfifo_center: start_dqs=%d start_dqs_en=%d \
- new_dqs=%d mid_min=%d\n", start_dqs,
+ debug_cond(DLEVEL == 1,
+ "vfifo_center: start_dqs=%d start_dqs_en=%d new_dqs=%d mid_min=%d\n",
+ start_dqs,
IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1,
new_dqs, mid_min);
- /* Initialize data for export structures */
- dqs_margin = IO_IO_IN_DELAY_MAX + 1;
- dq_margin = IO_IO_IN_DELAY_MAX + 1;
-
- /* add delay to bring centre of all DQ windows to the same "level" */
- for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
- /* Use values before divide by 2 to reduce round off error */
- shift_dq = (left_edge[i] - right_edge[i] -
- (left_edge[min_index] - right_edge[min_index]))/2 +
- (orig_mid_min - mid_min);
-
- debug_cond(DLEVEL == 2, "vfifo_center: before: \
- shift_dq[%u]=%d\n", i, shift_dq);
-
- addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_IN_DELAY_OFFSET;
- temp_dq_in_delay1 = readl(addr + (p << 2));
- temp_dq_in_delay2 = readl(addr + (i << 2));
-
- if (shift_dq + (int32_t)temp_dq_in_delay1 >
- (int32_t)IO_IO_IN_DELAY_MAX) {
- shift_dq = (int32_t)IO_IO_IN_DELAY_MAX - temp_dq_in_delay2;
- } else if (shift_dq + (int32_t)temp_dq_in_delay1 < 0) {
- shift_dq = -(int32_t)temp_dq_in_delay1;
- }
- debug_cond(DLEVEL == 2, "vfifo_center: after: \
- shift_dq[%u]=%d\n", i, shift_dq);
- final_dq[i] = temp_dq_in_delay1 + shift_dq;
- scc_mgr_set_dq_in_delay(p, final_dq[i]);
- scc_mgr_load_dq(p);
-
- debug_cond(DLEVEL == 2, "vfifo_center: margin[%u]=[%d,%d]\n", i,
- left_edge[i] - shift_dq + (-mid_min),
- right_edge[i] + shift_dq - (-mid_min));
- /* To determine values for export structures */
- if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
- dq_margin = left_edge[i] - shift_dq + (-mid_min);
-
- if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
- dqs_margin = right_edge[i] + shift_dq - (-mid_min);
- }
-
- final_dqs = new_dqs;
- if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
- final_dqs_en = start_dqs_en - mid_min;
+ /* Add delay to bring centre of all DQ windows to the same "level". */
+ center_dq_windows(0, left_edge, right_edge, mid_min, orig_mid_min,
+ min_index, test_bgn, &dq_margin, &dqs_margin);
/* Move DQS-en */
if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
- scc_mgr_set_dqs_en_delay(read_group, final_dqs_en);
- scc_mgr_load_dqs(read_group);
+ final_dqs_en = start_dqs_en - mid_min;
+ scc_mgr_set_dqs_en_delay(rw_group, final_dqs_en);
+ scc_mgr_load_dqs(rw_group);
}
/* Move DQS */
- scc_mgr_set_dqs_bus_in_delay(read_group, final_dqs);
- scc_mgr_load_dqs(read_group);
- debug_cond(DLEVEL == 2, "%s:%d vfifo_center: dq_margin=%d \
- dqs_margin=%d", __func__, __LINE__,
- dq_margin, dqs_margin);
+ scc_mgr_set_dqs_bus_in_delay(rw_group, new_dqs);
+ scc_mgr_load_dqs(rw_group);
+ debug_cond(DLEVEL == 2,
+ "%s:%d vfifo_center: dq_margin=%d dqs_margin=%d",
+ __func__, __LINE__, dq_margin, dqs_margin);
/*
* Do not remove this line as it makes sure all of our decisions
*/
writel(0, &sdr_scc_mgr->update);
- return (dq_margin >= 0) && (dqs_margin >= 0);
+ if ((dq_margin < 0) || (dqs_margin < 0))
+ return -EINVAL;
+
+ return 0;
}
-/*
- * calibrate the read valid prediction FIFO.
+/**
+ * rw_mgr_mem_calibrate_guaranteed_write() - Perform guaranteed write into the device
+ * @rw_group: Read/Write Group
+ * @phase: DQ/DQS phase
*
- * - read valid prediction will consist of finding a good DQS enable phase,
- * DQS enable delay, DQS input phase, and DQS input delay.
- * - we also do a per-bit deskew on the DQ lines.
+ * Because initially no communication ca be reliably performed with the memory
+ * device, the sequencer uses a guaranteed write mechanism to write data into
+ * the memory device.
*/
-static uint32_t rw_mgr_mem_calibrate_vfifo(uint32_t read_group,
- uint32_t test_bgn)
+static int rw_mgr_mem_calibrate_guaranteed_write(const u32 rw_group,
+ const u32 phase)
{
- uint32_t p, d, rank_bgn, sr;
- uint32_t dtaps_per_ptap;
- uint32_t tmp_delay;
- uint32_t bit_chk;
- uint32_t grp_calibrated;
- uint32_t write_group, write_test_bgn;
- uint32_t failed_substage;
+ int ret;
- debug("%s:%d: %u %u\n", __func__, __LINE__, read_group, test_bgn);
+ /* Set a particular DQ/DQS phase. */
+ scc_mgr_set_dqdqs_output_phase_all_ranks(rw_group, phase);
- /* update info for sims */
- reg_file_set_stage(CAL_STAGE_VFIFO);
+ debug_cond(DLEVEL == 1, "%s:%d guaranteed write: g=%u p=%u\n",
+ __func__, __LINE__, rw_group, phase);
- write_group = read_group;
- write_test_bgn = test_bgn;
+ /*
+ * Altera EMI_RM 2015.05.04 :: Figure 1-25
+ * Load up the patterns used by read calibration using the
+ * current DQDQS phase.
+ */
+ rw_mgr_mem_calibrate_read_load_patterns(0, 1);
- /* USER Determine number of delay taps for each phase tap */
- dtaps_per_ptap = 0;
- tmp_delay = 0;
- while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
- dtaps_per_ptap++;
- tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
+ if (gbl->phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ)
+ return 0;
+
+ /*
+ * Altera EMI_RM 2015.05.04 :: Figure 1-26
+ * Back-to-Back reads of the patterns used for calibration.
+ */
+ ret = rw_mgr_mem_calibrate_read_test_patterns(0, rw_group, 1);
+ if (ret)
+ debug_cond(DLEVEL == 1,
+ "%s:%d Guaranteed read test failed: g=%u p=%u\n",
+ __func__, __LINE__, rw_group, phase);
+ return ret;
+}
+
+/**
+ * rw_mgr_mem_calibrate_dqs_enable_calibration() - DQS Enable Calibration
+ * @rw_group: Read/Write Group
+ * @test_bgn: Rank at which the test begins
+ *
+ * DQS enable calibration ensures reliable capture of the DQ signal without
+ * glitches on the DQS line.
+ */
+static int rw_mgr_mem_calibrate_dqs_enable_calibration(const u32 rw_group,
+ const u32 test_bgn)
+{
+ /*
+ * Altera EMI_RM 2015.05.04 :: Figure 1-27
+ * DQS and DQS Eanble Signal Relationships.
+ */
+
+ /* We start at zero, so have one less dq to devide among */
+ const u32 delay_step = IO_IO_IN_DELAY_MAX /
+ (RW_MGR_MEM_DQ_PER_READ_DQS - 1);
+ int ret;
+ u32 i, p, d, r;
+
+ debug("%s:%d (%u,%u)\n", __func__, __LINE__, rw_group, test_bgn);
+
+ /* Try different dq_in_delays since the DQ path is shorter than DQS. */
+ for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+ r += NUM_RANKS_PER_SHADOW_REG) {
+ for (i = 0, p = test_bgn, d = 0;
+ i < RW_MGR_MEM_DQ_PER_READ_DQS;
+ i++, p++, d += delay_step) {
+ debug_cond(DLEVEL == 1,
+ "%s:%d: g=%u r=%u i=%u p=%u d=%u\n",
+ __func__, __LINE__, rw_group, r, i, p, d);
+
+ scc_mgr_set_dq_in_delay(p, d);
+ scc_mgr_load_dq(p);
+ }
+
+ writel(0, &sdr_scc_mgr->update);
}
- dtaps_per_ptap--;
- tmp_delay = 0;
- /* update info for sims */
- reg_file_set_group(read_group);
+ /*
+ * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
+ * dq_in_delay values
+ */
+ ret = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(rw_group);
+
+ debug_cond(DLEVEL == 1,
+ "%s:%d: g=%u found=%u; Reseting delay chain to zero\n",
+ __func__, __LINE__, rw_group, !ret);
+
+ for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
+ r += NUM_RANKS_PER_SHADOW_REG) {
+ scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
+ writel(0, &sdr_scc_mgr->update);
+ }
+
+ return ret;
+}
+
+/**
+ * rw_mgr_mem_calibrate_dq_dqs_centering() - Centering DQ/DQS
+ * @rw_group: Read/Write Group
+ * @test_bgn: Rank at which the test begins
+ * @use_read_test: Perform a read test
+ * @update_fom: Update FOM
+ *
+ * The centerin DQ/DQS stage attempts to align DQ and DQS signals on reads
+ * within a group.
+ */
+static int
+rw_mgr_mem_calibrate_dq_dqs_centering(const u32 rw_group, const u32 test_bgn,
+ const int use_read_test,
+ const int update_fom)
+
+{
+ int ret, grp_calibrated;
+ u32 rank_bgn, sr;
+
+ /*
+ * Altera EMI_RM 2015.05.04 :: Figure 1-28
+ * Read per-bit deskew can be done on a per shadow register basis.
+ */
+ grp_calibrated = 1;
+ for (rank_bgn = 0, sr = 0;
+ rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
+ rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
+ /* Check if this set of ranks should be skipped entirely. */
+ if (param->skip_shadow_regs[sr])
+ continue;
+
+ ret = rw_mgr_mem_calibrate_vfifo_center(rank_bgn, rw_group,
+ test_bgn,
+ use_read_test,
+ update_fom);
+ if (!ret)
+ continue;
+
+ grp_calibrated = 0;
+ }
+
+ if (!grp_calibrated)
+ return -EIO;
- grp_calibrated = 0;
+ return 0;
+}
+/**
+ * rw_mgr_mem_calibrate_vfifo() - Calibrate the read valid prediction FIFO
+ * @rw_group: Read/Write Group
+ * @test_bgn: Rank at which the test begins
+ *
+ * Stage 1: Calibrate the read valid prediction FIFO.
+ *
+ * This function implements UniPHY calibration Stage 1, as explained in
+ * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
+ *
+ * - read valid prediction will consist of finding:
+ * - DQS enable phase and DQS enable delay (DQS Enable Calibration)
+ * - DQS input phase and DQS input delay (DQ/DQS Centering)
+ * - we also do a per-bit deskew on the DQ lines.
+ */
+static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn)
+{
+ uint32_t p, d;
+ uint32_t dtaps_per_ptap;
+ uint32_t failed_substage;
+
+ int ret;
+
+ debug("%s:%d: %u %u\n", __func__, __LINE__, rw_group, test_bgn);
+
+ /* Update info for sims */
+ reg_file_set_group(rw_group);
+ reg_file_set_stage(CAL_STAGE_VFIFO);
reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
+
failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
- for (d = 0; d <= dtaps_per_ptap && grp_calibrated == 0; d += 2) {
+ /* USER Determine number of delay taps for each phase tap. */
+ dtaps_per_ptap = DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP,
+ IO_DELAY_PER_DQS_EN_DCHAIN_TAP) - 1;
+
+ for (d = 0; d <= dtaps_per_ptap; d += 2) {
/*
* In RLDRAMX we may be messing the delay of pins in
- * the same write group but outside of the current read
- * the group, but that's ok because we haven't
- * calibrated output side yet.
+ * the same write rw_group but outside of the current read
+ * the rw_group, but that's ok because we haven't calibrated
+ * output side yet.
*/
if (d > 0) {
scc_mgr_apply_group_all_out_delay_add_all_ranks(
- write_group, d);
+ rw_group, d);
}
- for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX && grp_calibrated == 0;
- p++) {
- /* set a particular dqdqs phase */
- scc_mgr_set_dqdqs_output_phase_all_ranks(read_group, p);
+ for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX; p++) {
+ /* 1) Guaranteed Write */
+ ret = rw_mgr_mem_calibrate_guaranteed_write(rw_group, p);
+ if (ret)
+ break;
- debug_cond(DLEVEL == 1, "%s:%d calibrate_vfifo: g=%u \
- p=%u d=%u\n", __func__, __LINE__,
- read_group, p, d);
+ /* 2) DQS Enable Calibration */
+ ret = rw_mgr_mem_calibrate_dqs_enable_calibration(rw_group,
+ test_bgn);
+ if (ret) {
+ failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
+ continue;
+ }
+ /* 3) Centering DQ/DQS */
/*
- * Load up the patterns used by read calibration
- * using current DQDQS phase.
+ * If doing read after write calibration, do not update
+ * FOM now. Do it then.
*/
- rw_mgr_mem_calibrate_read_load_patterns(0, 1);
- if (!(gbl->phy_debug_mode_flags &
- PHY_DEBUG_DISABLE_GUARANTEED_READ)) {
- if (!rw_mgr_mem_calibrate_read_test_patterns_all_ranks
- (read_group, 1, &bit_chk)) {
- debug_cond(DLEVEL == 1, "%s:%d Guaranteed read test failed:",
- __func__, __LINE__);
- debug_cond(DLEVEL == 1, " g=%u p=%u d=%u\n",
- read_group, p, d);
- break;
- }
+ ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group,
+ test_bgn, 1, 0);
+ if (ret) {
+ failed_substage = CAL_SUBSTAGE_VFIFO_CENTER;
+ continue;
}
-/* case:56390 */
- grp_calibrated = 1;
- if (rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
- (write_group, read_group, test_bgn)) {
- /*
- * USER Read per-bit deskew can be done on a
- * per shadow register basis.
- */
- for (rank_bgn = 0, sr = 0;
- rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
- rank_bgn += NUM_RANKS_PER_SHADOW_REG,
- ++sr) {
- /*
- * Determine if this set of ranks
- * should be skipped entirely.
- */
- if (!param->skip_shadow_regs[sr]) {
- /*
- * If doing read after write
- * calibration, do not update
- * FOM, now - do it then.
- */
- if (!rw_mgr_mem_calibrate_vfifo_center
- (rank_bgn, write_group,
- read_group, test_bgn, 1, 0)) {
- grp_calibrated = 0;
- failed_substage =
- CAL_SUBSTAGE_VFIFO_CENTER;
- }
- }
- }
- } else {
- grp_calibrated = 0;
- failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
- }
+ /* All done. */
+ goto cal_done_ok;
}
}
- if (grp_calibrated == 0) {
- set_failing_group_stage(write_group, CAL_STAGE_VFIFO,
- failed_substage);
- return 0;
- }
+ /* Calibration Stage 1 failed. */
+ set_failing_group_stage(rw_group, CAL_STAGE_VFIFO, failed_substage);
+ return 0;
+ /* Calibration Stage 1 completed OK. */
+cal_done_ok:
/*
* Reset the delay chains back to zero if they have moved > 1
* (check for > 1 because loop will increase d even when pass in
* first case).
*/
if (d > 2)
- scc_mgr_zero_group(write_group, 1);
+ scc_mgr_zero_group(rw_group, 1);
return 1;
}
-/* VFIFO Calibration -- Read Deskew Calibration after write deskew */
-static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group,
- uint32_t test_bgn)
+/**
+ * rw_mgr_mem_calibrate_vfifo_end() - DQ/DQS Centering.
+ * @rw_group: Read/Write Group
+ * @test_bgn: Rank at which the test begins
+ *
+ * Stage 3: DQ/DQS Centering.
+ *
+ * This function implements UniPHY calibration Stage 3, as explained in
+ * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
+ */
+static int rw_mgr_mem_calibrate_vfifo_end(const u32 rw_group,
+ const u32 test_bgn)
{
- uint32_t rank_bgn, sr;
- uint32_t grp_calibrated;
- uint32_t write_group;
+ int ret;
- debug("%s:%d %u %u", __func__, __LINE__, read_group, test_bgn);
-
- /* update info for sims */
+ debug("%s:%d %u %u", __func__, __LINE__, rw_group, test_bgn);
+ /* Update info for sims. */
+ reg_file_set_group(rw_group);
reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
- write_group = read_group;
-
- /* update info for sims */
- reg_file_set_group(read_group);
-
- grp_calibrated = 1;
- /* Read per-bit deskew can be done on a per shadow register basis */
- for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
- rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) {
- /* Determine if this set of ranks should be skipped entirely */
- if (!param->skip_shadow_regs[sr]) {
- /* This is the last calibration round, update FOM here */
- if (!rw_mgr_mem_calibrate_vfifo_center(rank_bgn,
- write_group,
- read_group,
- test_bgn, 0,
- 1)) {
- grp_calibrated = 0;
- }
- }
- }
-
-
- if (grp_calibrated == 0) {
- set_failing_group_stage(write_group,
+ ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group, test_bgn, 0, 1);
+ if (ret)
+ set_failing_group_stage(rw_group,
CAL_STAGE_VFIFO_AFTER_WRITES,
CAL_SUBSTAGE_VFIFO_CENTER);
- return 0;
- }
-
- return 1;
+ return ret;
}
-/* Calibrate LFIFO to find smallest read latency */
+/**
+ * rw_mgr_mem_calibrate_lfifo() - Minimize latency
+ *
+ * Stage 4: Minimize latency.
+ *
+ * This function implements UniPHY calibration Stage 4, as explained in
+ * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
+ * Calibrate LFIFO to find smallest read latency.
+ */
static uint32_t rw_mgr_mem_calibrate_lfifo(void)
{
- uint32_t found_one;
- uint32_t bit_chk;
+ int found_one = 0;
debug("%s:%d\n", __func__, __LINE__);
- /* update info for sims */
+ /* Update info for sims. */
reg_file_set_stage(CAL_STAGE_LFIFO);
reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
/* Load up the patterns used by read calibration for all ranks */
rw_mgr_mem_calibrate_read_load_patterns(0, 1);
- found_one = 0;
do {
writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u",
__func__, __LINE__, gbl->curr_read_lat);
- if (!rw_mgr_mem_calibrate_read_test_all_ranks(0,
- NUM_READ_TESTS,
- PASS_ALL_BITS,
- &bit_chk, 1)) {
+ if (!rw_mgr_mem_calibrate_read_test_all_ranks(0, NUM_READ_TESTS,
+ PASS_ALL_BITS, 1))
break;
- }
found_one = 1;
- /* reduce read latency and see if things are working */
- /* correctly */
+ /*
+ * Reduce read latency and see if things are
+ * working correctly.
+ */
gbl->curr_read_lat--;
} while (gbl->curr_read_lat > 0);
- /* reset the fifos to get pointers to known state */
-
+ /* Reset the fifos to get pointers to known state. */
writel(0, &phy_mgr_cmd->fifo_reset);
if (found_one) {
- /* add a fudge factor to the read latency that was determined */
+ /* Add a fudge factor to the read latency that was determined */
gbl->curr_read_lat += 2;
writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
- debug_cond(DLEVEL == 2, "%s:%d lfifo: success: using \
- read_lat=%u\n", __func__, __LINE__,
- gbl->curr_read_lat);
- return 1;
+ debug_cond(DLEVEL == 2,
+ "%s:%d lfifo: success: using read_lat=%u\n",
+ __func__, __LINE__, gbl->curr_read_lat);
} else {
set_failing_group_stage(0xff, CAL_STAGE_LFIFO,
CAL_SUBSTAGE_READ_LATENCY);
- debug_cond(DLEVEL == 2, "%s:%d lfifo: failed at initial \
- read_lat=%u\n", __func__, __LINE__,
- gbl->curr_read_lat);
- return 0;
+ debug_cond(DLEVEL == 2,
+ "%s:%d lfifo: failed at initial read_lat=%u\n",
+ __func__, __LINE__, gbl->curr_read_lat);
}
+
+ return found_one;
}
-/*
- * issue write test command.
- * two variants are provided. one that just tests a write pattern and
- * another that tests datamask functionality.
+/**
+ * search_window() - Search for the/part of the window with DM/DQS shift
+ * @search_dm: If 1, search for the DM shift, if 0, search for DQS shift
+ * @rank_bgn: Rank number
+ * @write_group: Write Group
+ * @bgn_curr: Current window begin
+ * @end_curr: Current window end
+ * @bgn_best: Current best window begin
+ * @end_best: Current best window end
+ * @win_best: Size of the best window
+ * @new_dqs: New DQS value (only applicable if search_dm = 0).
+ *
+ * Search for the/part of the window with DM/DQS shift.
*/
-static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group,
- uint32_t test_dm)
-{
- uint32_t mcc_instruction;
- uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) &&
- ENABLE_SUPER_QUICK_CALIBRATION);
- uint32_t rw_wl_nop_cycles;
- uint32_t addr;
-
- /*
- * Set counter and jump addresses for the right
- * number of NOP cycles.
- * The number of supported NOP cycles can range from -1 to infinity
- * Three different cases are handled:
- *
- * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
- * mechanism will be used to insert the right number of NOPs
- *
- * 2. For a number of NOP cycles equals to 0, the micro-instruction
- * issuing the write command will jump straight to the
- * micro-instruction that turns on DQS (for DDRx), or outputs write
- * data (for RLD), skipping
- * the NOP micro-instruction all together
- *
- * 3. A number of NOP cycles equal to -1 indicates that DQS must be
- * turned on in the same micro-instruction that issues the write
- * command. Then we need
- * to directly jump to the micro-instruction that sends out the data
- *
- * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
- * (2 and 3). One jump-counter (0) is used to perform multiple
- * write-read operations.
- * one counter left to issue this command in "multiple-group" mode
- */
-
- rw_wl_nop_cycles = gbl->rw_wl_nop_cycles;
-
- if (rw_wl_nop_cycles == -1) {
- /*
- * CNTR 2 - We want to execute the special write operation that
- * turns on DQS right away and then skip directly to the
- * instruction that sends out the data. We set the counter to a
- * large number so that the jump is always taken.
- */
- writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
-
- /* CNTR 3 - Not used */
- if (test_dm) {
- mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1;
- writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA,
- &sdr_rw_load_jump_mgr_regs->load_jump_add2);
- writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
- &sdr_rw_load_jump_mgr_regs->load_jump_add3);
- } else {
- mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1;
- writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA,
- &sdr_rw_load_jump_mgr_regs->load_jump_add2);
- writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
- &sdr_rw_load_jump_mgr_regs->load_jump_add3);
- }
- } else if (rw_wl_nop_cycles == 0) {
- /*
- * CNTR 2 - We want to skip the NOP operation and go straight
- * to the DQS enable instruction. We set the counter to a large
- * number so that the jump is always taken.
- */
- writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
-
- /* CNTR 3 - Not used */
- if (test_dm) {
- mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
- writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS,
- &sdr_rw_load_jump_mgr_regs->load_jump_add2);
- } else {
- mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
- writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS,
- &sdr_rw_load_jump_mgr_regs->load_jump_add2);
- }
- } else {
- /*
- * CNTR 2 - In this case we want to execute the next instruction
- * and NOT take the jump. So we set the counter to 0. The jump
- * address doesn't count.
- */
- writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
- writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
-
- /*
- * CNTR 3 - Set the nop counter to the number of cycles we
- * need to loop for, minus 1.
- */
- writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
- if (test_dm) {
- mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
- writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
- &sdr_rw_load_jump_mgr_regs->load_jump_add3);
+static void search_window(const int search_dm,
+ const u32 rank_bgn, const u32 write_group,
+ int *bgn_curr, int *end_curr, int *bgn_best,
+ int *end_best, int *win_best, int new_dqs)
+{
+ u32 bit_chk;
+ const int max = IO_IO_OUT1_DELAY_MAX - new_dqs;
+ int d, di;
+
+ /* Search for the/part of the window with DM/DQS shift. */
+ for (di = max; di >= 0; di -= DELTA_D) {
+ if (search_dm) {
+ d = di;
+ scc_mgr_apply_group_dm_out1_delay(d);
} else {
- mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
- writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
- &sdr_rw_load_jump_mgr_regs->load_jump_add3);
- }
- }
-
- writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
- RW_MGR_RESET_READ_DATAPATH_OFFSET);
-
- if (quick_write_mode)
- writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
- else
- writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
-
- writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
-
- /*
- * CNTR 1 - This is used to ensure enough time elapses
- * for read data to come back.
- */
- writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
-
- if (test_dm) {
- writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT,
- &sdr_rw_load_jump_mgr_regs->load_jump_add1);
- } else {
- writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT,
- &sdr_rw_load_jump_mgr_regs->load_jump_add1);
- }
-
- addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
- writel(mcc_instruction, addr + (group << 2));
-}
-
-/* Test writes, can check for a single bit pass or multiple bit pass */
-static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
- uint32_t write_group, uint32_t use_dm, uint32_t all_correct,
- uint32_t *bit_chk, uint32_t all_ranks)
-{
- uint32_t r;
- uint32_t correct_mask_vg;
- uint32_t tmp_bit_chk;
- uint32_t vg;
- uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
- (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
- uint32_t addr_rw_mgr;
- uint32_t base_rw_mgr;
-
- *bit_chk = param->write_correct_mask;
- correct_mask_vg = param->write_correct_mask_vg;
-
- for (r = rank_bgn; r < rank_end; r++) {
- if (param->skip_ranks[r]) {
- /* request to skip the rank */
- continue;
- }
-
- /* set rank */
- set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
-
- tmp_bit_chk = 0;
- addr_rw_mgr = SDR_PHYGRP_RWMGRGRP_ADDRESS;
- for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS-1; ; vg--) {
- /* reset the fifos to get pointers to known state */
- writel(0, &phy_mgr_cmd->fifo_reset);
-
- tmp_bit_chk = tmp_bit_chk <<
- (RW_MGR_MEM_DQ_PER_WRITE_DQS /
- RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
- rw_mgr_mem_calibrate_write_test_issue(write_group *
- RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS+vg,
- use_dm);
-
- base_rw_mgr = readl(addr_rw_mgr);
- tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
- if (vg == 0)
- break;
+ /* For DQS, we go from 0...max */
+ d = max - di;
+ /*
+ * Note: This only shifts DQS, so are we limiting ourselve to
+ * width of DQ unnecessarily.
+ */
+ scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
+ d + new_dqs);
}
- *bit_chk &= tmp_bit_chk;
- }
-
- if (all_correct) {
- set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
- debug_cond(DLEVEL == 2, "write_test(%u,%u,ALL) : %u == \
- %u => %lu", write_group, use_dm,
- *bit_chk, param->write_correct_mask,
- (long unsigned int)(*bit_chk ==
- param->write_correct_mask));
- return *bit_chk == param->write_correct_mask;
- } else {
- set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
- debug_cond(DLEVEL == 2, "write_test(%u,%u,ONE) : %u != ",
- write_group, use_dm, *bit_chk);
- debug_cond(DLEVEL == 2, "%lu" " => %lu", (long unsigned int)0,
- (long unsigned int)(*bit_chk != 0));
- return *bit_chk != 0x00;
- }
-}
-
-/*
- * center all windows. do per-bit-deskew to possibly increase size of
- * certain windows.
- */
-static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn,
- uint32_t write_group, uint32_t test_bgn)
-{
- uint32_t i, p, min_index;
- int32_t d;
- /*
- * Store these as signed since there are comparisons with
- * signed numbers.
- */
- uint32_t bit_chk;
- uint32_t sticky_bit_chk;
- int32_t left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
- int32_t right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
- int32_t mid;
- int32_t mid_min, orig_mid_min;
- int32_t new_dqs, start_dqs, shift_dq;
- int32_t dq_margin, dqs_margin, dm_margin;
- uint32_t stop;
- uint32_t temp_dq_out1_delay;
- uint32_t addr;
-
- debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
-
- dm_margin = 0;
-
- addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
- start_dqs = readl(addr +
- (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2));
-
- /* per-bit deskew */
-
- /*
- * set the left and right edge of each bit to an illegal value
- * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
- */
- sticky_bit_chk = 0;
- for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
- left_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
- right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
- }
-
- /* Search for the left edge of the window for each bit */
- for (d = 0; d <= IO_IO_OUT1_DELAY_MAX; d++) {
- scc_mgr_apply_group_dq_out1_delay(write_group, d);
writel(0, &sdr_scc_mgr->update);
- /*
- * Stop searching when the read test doesn't pass AND when
- * we've seen a passing read on every bit.
- */
- stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
- 0, PASS_ONE_BIT, &bit_chk, 0);
- sticky_bit_chk = sticky_bit_chk | bit_chk;
- stop = stop && (sticky_bit_chk == param->write_correct_mask);
- debug_cond(DLEVEL == 2, "write_center(left): dtap=%d => %u \
- == %u && %u [bit_chk= %u ]\n",
- d, sticky_bit_chk, param->write_correct_mask,
- stop, bit_chk);
-
- if (stop == 1) {
- break;
- } else {
- for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
- if (bit_chk & 1) {
- /*
- * Remember a passing test as the
- * left_edge.
- */
- left_edge[i] = d;
- } else {
- /*
- * If a left edge has not been seen
- * yet, then a future passing test will
- * mark this edge as the right edge.
- */
- if (left_edge[i] ==
- IO_IO_OUT1_DELAY_MAX + 1) {
- right_edge[i] = -(d + 1);
- }
- }
- debug_cond(DLEVEL == 2, "write_center[l,d=%d):", d);
- debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
- (int)(bit_chk & 1), i, left_edge[i]);
- debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
- right_edge[i]);
- bit_chk = bit_chk >> 1;
- }
- }
- }
+ if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
+ PASS_ALL_BITS, &bit_chk,
+ 0)) {
+ /* Set current end of the window. */
+ *end_curr = search_dm ? -d : d;
- /* Reset DQ delay chains to 0 */
- scc_mgr_apply_group_dq_out1_delay(0);
- sticky_bit_chk = 0;
- for (i = RW_MGR_MEM_DQ_PER_WRITE_DQS - 1;; i--) {
- debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
- %d right_edge[%u]: %d\n", __func__, __LINE__,
- i, left_edge[i], i, right_edge[i]);
+ /*
+ * If a starting edge of our window has not been seen
+ * this is our current start of the DM window.
+ */
+ if (*bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
+ *bgn_curr = search_dm ? -d : d;
- /*
- * Check for cases where we haven't found the left edge,
- * which makes our assignment of the the right edge invalid.
- * Reset it to the illegal value.
- */
- if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) &&
- (right_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) {
- right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
- debug_cond(DLEVEL == 2, "%s:%d write_center: reset \
- right_edge[%u]: %d\n", __func__, __LINE__,
- i, right_edge[i]);
- }
+ /*
+ * If current window is bigger than best seen.
+ * Set best seen to be current window.
+ */
+ if ((*end_curr - *bgn_curr + 1) > *win_best) {
+ *win_best = *end_curr - *bgn_curr + 1;
+ *bgn_best = *bgn_curr;
+ *end_best = *end_curr;
+ }
+ } else {
+ /* We just saw a failing test. Reset temp edge. */
+ *bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
+ *end_curr = IO_IO_OUT1_DELAY_MAX + 1;
- /*
- * Reset sticky bit (except for bits where we have
- * seen the left edge).
- */
- sticky_bit_chk = sticky_bit_chk << 1;
- if ((left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1))
- sticky_bit_chk = sticky_bit_chk | 1;
+ /* Early exit is only applicable to DQS. */
+ if (search_dm)
+ continue;
- if (i == 0)
- break;
+ /*
+ * Early exit optimization: if the remaining delay
+ * chain space is less than already seen largest
+ * window we can exit.
+ */
+ if (*win_best - 1 > IO_IO_OUT1_DELAY_MAX - new_dqs - d)
+ break;
+ }
}
+}
- /* Search for the right edge of the window for each bit */
- for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - start_dqs; d++) {
- scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
- d + start_dqs);
-
- writel(0, &sdr_scc_mgr->update);
+/*
+ * rw_mgr_mem_calibrate_writes_center() - Center all windows
+ * @rank_bgn: Rank number
+ * @write_group: Write group
+ * @test_bgn: Rank at which the test begins
+ *
+ * Center all windows. Do per-bit-deskew to possibly increase size of
+ * certain windows.
+ */
+static int
+rw_mgr_mem_calibrate_writes_center(const u32 rank_bgn, const u32 write_group,
+ const u32 test_bgn)
+{
+ int i;
+ u32 sticky_bit_chk;
+ u32 min_index;
+ int left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
+ int right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
+ int mid;
+ int mid_min, orig_mid_min;
+ int new_dqs, start_dqs;
+ int dq_margin, dqs_margin, dm_margin;
+ int bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
+ int end_curr = IO_IO_OUT1_DELAY_MAX + 1;
+ int bgn_best = IO_IO_OUT1_DELAY_MAX + 1;
+ int end_best = IO_IO_OUT1_DELAY_MAX + 1;
+ int win_best = 0;
+
+ int ret;
- /*
- * Stop searching when the read test doesn't pass AND when
- * we've seen a passing read on every bit.
- */
- stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
- 0, PASS_ONE_BIT, &bit_chk, 0);
+ debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
- sticky_bit_chk = sticky_bit_chk | bit_chk;
- stop = stop && (sticky_bit_chk == param->write_correct_mask);
+ dm_margin = 0;
- debug_cond(DLEVEL == 2, "write_center (right): dtap=%u => %u == \
- %u && %u\n", d, sticky_bit_chk,
- param->write_correct_mask, stop);
+ start_dqs = readl((SDR_PHYGRP_SCCGRP_ADDRESS |
+ SCC_MGR_IO_OUT1_DELAY_OFFSET) +
+ (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2));
- if (stop == 1) {
- if (d == 0) {
- for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS;
- i++) {
- /* d = 0 failed, but it passed when
- testing the left edge, so it must be
- marginal, set it to -1 */
- if (right_edge[i] ==
- IO_IO_OUT1_DELAY_MAX + 1 &&
- left_edge[i] !=
- IO_IO_OUT1_DELAY_MAX + 1) {
- right_edge[i] = -1;
- }
- }
- }
- break;
- } else {
- for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
- if (bit_chk & 1) {
- /*
- * Remember a passing test as
- * the right_edge.
- */
- right_edge[i] = d;
- } else {
- if (d != 0) {
- /*
- * If a right edge has not
- * been seen yet, then a future
- * passing test will mark this
- * edge as the left edge.
- */
- if (right_edge[i] ==
- IO_IO_OUT1_DELAY_MAX + 1)
- left_edge[i] = -(d + 1);
- } else {
- /*
- * d = 0 failed, but it passed
- * when testing the left edge,
- * so it must be marginal, set
- * it to -1.
- */
- if (right_edge[i] ==
- IO_IO_OUT1_DELAY_MAX + 1 &&
- left_edge[i] !=
- IO_IO_OUT1_DELAY_MAX + 1)
- right_edge[i] = -1;
- /*
- * If a right edge has not been
- * seen yet, then a future
- * passing test will mark this
- * edge as the left edge.
- */
- else if (right_edge[i] ==
- IO_IO_OUT1_DELAY_MAX +
- 1)
- left_edge[i] = -(d + 1);
- }
- }
- debug_cond(DLEVEL == 2, "write_center[r,d=%d):", d);
- debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
- (int)(bit_chk & 1), i, left_edge[i]);
- debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
- right_edge[i]);
- bit_chk = bit_chk >> 1;
- }
- }
- }
+ /* Per-bit deskew. */
- /* Check that all bits have a window */
+ /*
+ * Set the left and right edge of each bit to an illegal value.
+ * Use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
+ */
+ sticky_bit_chk = 0;
for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
- debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
- %d right_edge[%u]: %d", __func__, __LINE__,
- i, left_edge[i], i, right_edge[i]);
- if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) ||
- (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1)) {
- set_failing_group_stage(test_bgn + i,
- CAL_STAGE_WRITES,
- CAL_SUBSTAGE_WRITES_CENTER);
- return 0;
- }
+ left_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
+ right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
}
- /* Find middle of window for each DQ bit */
- mid_min = left_edge[0] - right_edge[0];
- min_index = 0;
- for (i = 1; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
- mid = left_edge[i] - right_edge[i];
- if (mid < mid_min) {
- mid_min = mid;
- min_index = i;
- }
+ /* Search for the left edge of the window for each bit. */
+ search_left_edge(1, rank_bgn, write_group, 0, test_bgn,
+ &sticky_bit_chk,
+ left_edge, right_edge, 0);
+
+ /* Search for the right edge of the window for each bit. */
+ ret = search_right_edge(1, rank_bgn, write_group, 0,
+ start_dqs, 0,
+ &sticky_bit_chk,
+ left_edge, right_edge, 0);
+ if (ret) {
+ set_failing_group_stage(test_bgn + ret - 1, CAL_STAGE_WRITES,
+ CAL_SUBSTAGE_WRITES_CENTER);
+ return -EINVAL;
}
- /*
- * -mid_min/2 represents the amount that we need to move DQS.
- * If mid_min is odd and positive we'll need to add one to
- * make sure the rounding in further calculations is correct
- * (always bias to the right), so just add 1 for all positive values.
- */
- if (mid_min > 0)
- mid_min++;
- mid_min = mid_min / 2;
- debug_cond(DLEVEL == 1, "%s:%d write_center: mid_min=%d\n", __func__,
- __LINE__, mid_min);
+ min_index = get_window_mid_index(1, left_edge, right_edge, &mid_min);
- /* Determine the amount we can change DQS (which is -mid_min) */
+ /* Determine the amount we can change DQS (which is -mid_min). */
orig_mid_min = mid_min;
new_dqs = start_dqs;
mid_min = 0;
- debug_cond(DLEVEL == 1, "%s:%d write_center: start_dqs=%d new_dqs=%d \
- mid_min=%d\n", __func__, __LINE__, start_dqs, new_dqs, mid_min);
- /* Initialize data for export structures */
- dqs_margin = IO_IO_OUT1_DELAY_MAX + 1;
- dq_margin = IO_IO_OUT1_DELAY_MAX + 1;
-
- /* add delay to bring centre of all DQ windows to the same "level" */
- for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
- /* Use values before divide by 2 to reduce round off error */
- shift_dq = (left_edge[i] - right_edge[i] -
- (left_edge[min_index] - right_edge[min_index]))/2 +
- (orig_mid_min - mid_min);
-
- debug_cond(DLEVEL == 2, "%s:%d write_center: before: shift_dq \
- [%u]=%d\n", __func__, __LINE__, i, shift_dq);
-
- addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
- temp_dq_out1_delay = readl(addr + (i << 2));
- if (shift_dq + (int32_t)temp_dq_out1_delay >
- (int32_t)IO_IO_OUT1_DELAY_MAX) {
- shift_dq = (int32_t)IO_IO_OUT1_DELAY_MAX - temp_dq_out1_delay;
- } else if (shift_dq + (int32_t)temp_dq_out1_delay < 0) {
- shift_dq = -(int32_t)temp_dq_out1_delay;
- }
- debug_cond(DLEVEL == 2, "write_center: after: shift_dq[%u]=%d\n",
- i, shift_dq);
- scc_mgr_set_dq_out1_delay(i, temp_dq_out1_delay + shift_dq);
- scc_mgr_load_dq(i);
-
- debug_cond(DLEVEL == 2, "write_center: margin[%u]=[%d,%d]\n", i,
- left_edge[i] - shift_dq + (-mid_min),
- right_edge[i] + shift_dq - (-mid_min));
- /* To determine values for export structures */
- if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
- dq_margin = left_edge[i] - shift_dq + (-mid_min);
+ debug_cond(DLEVEL == 1,
+ "%s:%d write_center: start_dqs=%d new_dqs=%d mid_min=%d\n",
+ __func__, __LINE__, start_dqs, new_dqs, mid_min);
- if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
- dqs_margin = right_edge[i] + shift_dq - (-mid_min);
- }
+ /* Add delay to bring centre of all DQ windows to the same "level". */
+ center_dq_windows(1, left_edge, right_edge, mid_min, orig_mid_min,
+ min_index, 0, &dq_margin, &dqs_margin);
/* Move DQS */
scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__);
/*
- * set the left and right edge of each bit to an illegal value,
- * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value,
+ * Set the left and right edge of each bit to an illegal value.
+ * Use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
*/
left_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
- int32_t bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
- int32_t end_curr = IO_IO_OUT1_DELAY_MAX + 1;
- int32_t bgn_best = IO_IO_OUT1_DELAY_MAX + 1;
- int32_t end_best = IO_IO_OUT1_DELAY_MAX + 1;
- int32_t win_best = 0;
-
- /* Search for the/part of the window with DM shift */
- for (d = IO_IO_OUT1_DELAY_MAX; d >= 0; d -= DELTA_D) {
- scc_mgr_apply_group_dm_out1_delay(d);
- writel(0, &sdr_scc_mgr->update);
-
- if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
- PASS_ALL_BITS, &bit_chk,
- 0)) {
- /* USE Set current end of the window */
- end_curr = -d;
- /*
- * If a starting edge of our window has not been seen
- * this is our current start of the DM window.
- */
- if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
- bgn_curr = -d;
-
- /*
- * If current window is bigger than best seen.
- * Set best seen to be current window.
- */
- if ((end_curr-bgn_curr+1) > win_best) {
- win_best = end_curr-bgn_curr+1;
- bgn_best = bgn_curr;
- end_best = end_curr;
- }
- } else {
- /* We just saw a failing test. Reset temp edge */
- bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
- end_curr = IO_IO_OUT1_DELAY_MAX + 1;
- }
- }
+ /* Search for the/part of the window with DM shift. */
+ search_window(1, rank_bgn, write_group, &bgn_curr, &end_curr,
+ &bgn_best, &end_best, &win_best, 0);
- /* Reset DM delay chains to 0 */
+ /* Reset DM delay chains to 0. */
scc_mgr_apply_group_dm_out1_delay(0);
/*
* Check to see if the current window nudges up aganist 0 delay.
* If so we need to continue the search by shifting DQS otherwise DQS
- * search begins as a new search. */
+ * search begins as a new search.
+ */
if (end_curr != 0) {
bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
end_curr = IO_IO_OUT1_DELAY_MAX + 1;
}
- /* Search for the/part of the window with DQS shifts */
- for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - new_dqs; d += DELTA_D) {
- /*
- * Note: This only shifts DQS, so are we limiting ourselve to
- * width of DQ unnecessarily.
- */
- scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
- d + new_dqs);
-
- writel(0, &sdr_scc_mgr->update);
- if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
- PASS_ALL_BITS, &bit_chk,
- 0)) {
- /* USE Set current end of the window */
- end_curr = d;
- /*
- * If a beginning edge of our window has not been seen
- * this is our current begin of the DM window.
- */
- if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
- bgn_curr = d;
-
- /*
- * If current window is bigger than best seen. Set best
- * seen to be current window.
- */
- if ((end_curr-bgn_curr+1) > win_best) {
- win_best = end_curr-bgn_curr+1;
- bgn_best = bgn_curr;
- end_best = end_curr;
- }
- } else {
- /* We just saw a failing test. Reset temp edge */
- bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
- end_curr = IO_IO_OUT1_DELAY_MAX + 1;
-
- /* Early exit optimization: if ther remaining delay
- chain space is less than already seen largest window
- we can exit */
- if ((win_best-1) >
- (IO_IO_OUT1_DELAY_MAX - new_dqs - d)) {
- break;
- }
- }
- }
+ /* Search for the/part of the window with DQS shifts. */
+ search_window(0, rank_bgn, write_group, &bgn_curr, &end_curr,
+ &bgn_best, &end_best, &win_best, new_dqs);
- /* assign left and right edge for cal and reporting; */
- left_edge[0] = -1*bgn_best;
+ /* Assign left and right edge for cal and reporting. */
+ left_edge[0] = -1 * bgn_best;
right_edge[0] = end_best;
- debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n", __func__,
- __LINE__, left_edge[0], right_edge[0]);
+ debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n",
+ __func__, __LINE__, left_edge[0], right_edge[0]);
- /* Move DQS (back to orig) */
+ /* Move DQS (back to orig). */
scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
/* Move DM */
- /* Find middle of window for the DM bit */
+ /* Find middle of window for the DM bit. */
mid = (left_edge[0] - right_edge[0]) / 2;
- /* only move right, since we are not moving DQS/DQ */
+ /* Only move right, since we are not moving DQS/DQ. */
if (mid < 0)
mid = 0;
- /* dm_marign should fail if we never find a window */
+ /* dm_marign should fail if we never find a window. */
if (win_best == 0)
dm_margin = -1;
else
scc_mgr_apply_group_dm_out1_delay(mid);
writel(0, &sdr_scc_mgr->update);
- debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d mid=%d \
- dm_margin=%d\n", __func__, __LINE__, left_edge[0],
- right_edge[0], mid, dm_margin);
- /* Export values */
+ debug_cond(DLEVEL == 2,
+ "%s:%d dm_calib: left=%d right=%d mid=%d dm_margin=%d\n",
+ __func__, __LINE__, left_edge[0], right_edge[0],
+ mid, dm_margin);
+ /* Export values. */
gbl->fom_out += dq_margin + dqs_margin;
- debug_cond(DLEVEL == 2, "%s:%d write_center: dq_margin=%d \
- dqs_margin=%d dm_margin=%d\n", __func__, __LINE__,
- dq_margin, dqs_margin, dm_margin);
+ debug_cond(DLEVEL == 2,
+ "%s:%d write_center: dq_margin=%d dqs_margin=%d dm_margin=%d\n",
+ __func__, __LINE__, dq_margin, dqs_margin, dm_margin);
/*
* Do not remove this line as it makes sure all of our
* decisions have been applied.
*/
writel(0, &sdr_scc_mgr->update);
- return (dq_margin >= 0) && (dqs_margin >= 0) && (dm_margin >= 0);
+
+ if ((dq_margin < 0) || (dqs_margin < 0) || (dm_margin < 0))
+ return -EINVAL;
+
+ return 0;
}
-/* calibrate the write operations */
-static uint32_t rw_mgr_mem_calibrate_writes(uint32_t rank_bgn, uint32_t g,
- uint32_t test_bgn)
+/**
+ * rw_mgr_mem_calibrate_writes() - Write Calibration Part One
+ * @rank_bgn: Rank number
+ * @group: Read/Write Group
+ * @test_bgn: Rank at which the test begins
+ *
+ * Stage 2: Write Calibration Part One.
+ *
+ * This function implements UniPHY calibration Stage 2, as explained in
+ * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
+ */
+static int rw_mgr_mem_calibrate_writes(const u32 rank_bgn, const u32 group,
+ const u32 test_bgn)
{
- /* update info for sims */
- debug("%s:%d %u %u\n", __func__, __LINE__, g, test_bgn);
+ int ret;
+
+ /* Update info for sims */
+ debug("%s:%d %u %u\n", __func__, __LINE__, group, test_bgn);
+ reg_file_set_group(group);
reg_file_set_stage(CAL_STAGE_WRITES);
reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
- reg_file_set_group(g);
-
- if (!rw_mgr_mem_calibrate_writes_center(rank_bgn, g, test_bgn)) {
- set_failing_group_stage(g, CAL_STAGE_WRITES,
+ ret = rw_mgr_mem_calibrate_writes_center(rank_bgn, group, test_bgn);
+ if (ret)
+ set_failing_group_stage(group, CAL_STAGE_WRITES,
CAL_SUBSTAGE_WRITES_CENTER);
- return 0;
- }
- return 1;
+ return ret;
}
-/* precharge all banks and activate row 0 in bank "000..." and bank "111..." */
+/**
+ * mem_precharge_and_activate() - Precharge all banks and activate
+ *
+ * Precharge all banks and activate row 0 in bank "000..." and bank "111...".
+ */
static void mem_precharge_and_activate(void)
{
- uint32_t r;
+ int r;
for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
- if (param->skip_ranks[r]) {
- /* request to skip the rank */
+ /* Test if the rank should be skipped. */
+ if (param->skip_ranks[r])
continue;
- }
- /* set rank */
+ /* Set rank. */
set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
- /* precharge all banks ... */
+ /* Precharge all banks. */
writel(RW_MGR_PRECHARGE_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_SINGLE_GROUP_OFFSET);
writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2,
&sdr_rw_load_jump_mgr_regs->load_jump_add1);
- /* activate rows */
+ /* Activate rows. */
writel(RW_MGR_ACTIVATE_0_AND_1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_SINGLE_GROUP_OFFSET);
}
}
-/* Configure various memory related parameters. */
-static void mem_config(void)
+/**
+ * mem_init_latency() - Configure memory RLAT and WLAT settings
+ *
+ * Configure memory RLAT and WLAT parameters.
+ */
+static void mem_init_latency(void)
{
- uint32_t rlat, wlat;
- uint32_t rw_wl_nop_cycles;
- uint32_t max_latency;
-
- debug("%s:%d\n", __func__, __LINE__);
- /* read in write and read latency */
- wlat = readl(&data_mgr->t_wl_add);
- wlat += readl(&data_mgr->mem_t_add);
-
- /* WL for hard phy does not include additive latency */
-
/*
- * add addtional write latency to offset the address/command extra
- * clock cycle. We change the AC mux setting causing AC to be delayed
- * by one mem clock cycle. Only do this for DDR3
+ * For AV/CV, LFIFO is hardened and always runs at full rate
+ * so max latency in AFI clocks, used here, is correspondingly
+ * smaller.
*/
- wlat = wlat + 1;
-
- rlat = readl(&data_mgr->t_rl_add);
+ const u32 max_latency = (1 << MAX_LATENCY_COUNT_WIDTH) - 1;
+ u32 rlat, wlat;
- rw_wl_nop_cycles = wlat - 2;
- gbl->rw_wl_nop_cycles = rw_wl_nop_cycles;
+ debug("%s:%d\n", __func__, __LINE__);
/*
- * For AV/CV, lfifo is hardened and always runs at full rate so
- * max latency in AFI clocks, used here, is correspondingly smaller.
+ * Read in write latency.
+ * WL for Hard PHY does not include additive latency.
*/
- max_latency = (1<<MAX_LATENCY_COUNT_WIDTH)/1 - 1;
- /* configure for a burst length of 8 */
+ wlat = readl(&data_mgr->t_wl_add);
+ wlat += readl(&data_mgr->mem_t_add);
- /* write latency */
- /* Adjust Write Latency for Hard PHY */
- wlat = wlat + 1;
+ gbl->rw_wl_nop_cycles = wlat - 1;
- /* set a pretty high read latency initially */
- gbl->curr_read_lat = rlat + 16;
+ /* Read in readl latency. */
+ rlat = readl(&data_mgr->t_rl_add);
+ /* Set a pretty high read latency initially. */
+ gbl->curr_read_lat = rlat + 16;
if (gbl->curr_read_lat > max_latency)
gbl->curr_read_lat = max_latency;
writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
- /* advertise write latency */
- gbl->curr_write_lat = wlat;
- writel(wlat - 2, &phy_mgr_cfg->afi_wlat);
-
- /* initialize bit slips */
- mem_precharge_and_activate();
+ /* Advertise write latency. */
+ writel(wlat, &phy_mgr_cfg->afi_wlat);
}
-/* Set VFIFO and LFIFO to instant-on settings in skip calibration mode */
+/**
+ * @mem_skip_calibrate() - Set VFIFO and LFIFO to instant-on settings
+ *
+ * Set VFIFO and LFIFO to instant-on settings in skip calibration mode.
+ */
static void mem_skip_calibrate(void)
{
uint32_t vfifo_offset;
debug("%s:%d\n", __func__, __LINE__);
/* Need to update every shadow register set used by the interface */
for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
- r += NUM_RANKS_PER_SHADOW_REG) {
+ r += NUM_RANKS_PER_SHADOW_REG) {
/*
* Set output phase alignment settings appropriate for
* skip calibration.
*
* (1.25 * IO_DLL_CHAIN_LENGTH - 2)
*/
- scc_mgr_set_dqdqs_output_phase(i, (1.25 *
- IO_DLL_CHAIN_LENGTH - 2));
+ scc_mgr_set_dqdqs_output_phase(i,
+ 1.25 * IO_DLL_CHAIN_LENGTH - 2);
}
writel(0xff, &sdr_scc_mgr->dqs_ena);
writel(0xff, &sdr_scc_mgr->dqs_io_ena);
* in sequencer.
*/
vfifo_offset = CALIB_VFIFO_OFFSET;
- for (j = 0; j < vfifo_offset; j++) {
+ for (j = 0; j < vfifo_offset; j++)
writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
- }
writel(0, &phy_mgr_cmd->fifo_reset);
/*
- * For ACV with hard lfifo, we get the skip-cal setting from
- * generation-time constant.
+ * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal
+ * setting from generation-time constant.
*/
gbl->curr_read_lat = CALIB_LFIFO_OFFSET;
writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
}
-/* Memory calibration entry point */
+/**
+ * mem_calibrate() - Memory calibration entry point.
+ *
+ * Perform memory calibration.
+ */
static uint32_t mem_calibrate(void)
{
uint32_t i;
uint32_t run_groups, current_run;
uint32_t failing_groups = 0;
uint32_t group_failed = 0;
- uint32_t sr_failed = 0;
+
+ const u32 rwdqs_ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
+ RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
debug("%s:%d\n", __func__, __LINE__);
- /* Initialize the data settings */
+ /* Initialize the data settings */
gbl->error_substage = CAL_SUBSTAGE_NIL;
gbl->error_stage = CAL_STAGE_NIL;
gbl->error_group = 0xff;
gbl->fom_in = 0;
gbl->fom_out = 0;
- mem_config();
+ /* Initialize WLAT and RLAT. */
+ mem_init_latency();
+
+ /* Initialize bit slips. */
+ mem_precharge_and_activate();
for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
scc_set_bypass_mode(i);
}
+ /* Calibration is skipped. */
if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
/*
* Set VFIFO and LFIFO to instant-on settings in skip
* calibration mode.
*/
mem_skip_calibrate();
- } else {
- for (i = 0; i < NUM_CALIB_REPEAT; i++) {
- /*
- * Zero all delay chain/phase settings for all
- * groups and all shadow register sets.
- */
- scc_mgr_zero_all();
- run_groups = ~param->skip_groups;
+ /*
+ * Do not remove this line as it makes sure all of our
+ * decisions have been applied.
+ */
+ writel(0, &sdr_scc_mgr->update);
+ return 1;
+ }
- for (write_group = 0, write_test_bgn = 0; write_group
- < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
- write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
- /* Initialized the group failure */
- group_failed = 0;
+ /* Calibration is not skipped. */
+ for (i = 0; i < NUM_CALIB_REPEAT; i++) {
+ /*
+ * Zero all delay chain/phase settings for all
+ * groups and all shadow register sets.
+ */
+ scc_mgr_zero_all();
- current_run = run_groups & ((1 <<
- RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
- run_groups = run_groups >>
- RW_MGR_NUM_DQS_PER_WRITE_GROUP;
+ run_groups = ~param->skip_groups;
- if (current_run == 0)
- continue;
+ for (write_group = 0, write_test_bgn = 0; write_group
+ < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
+ write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
- writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
- SCC_MGR_GROUP_COUNTER_OFFSET);
- scc_mgr_zero_group(write_group, 0);
-
- for (read_group = write_group *
- RW_MGR_MEM_IF_READ_DQS_WIDTH /
- RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
- read_test_bgn = 0;
- read_group < (write_group + 1) *
- RW_MGR_MEM_IF_READ_DQS_WIDTH /
- RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
- group_failed == 0;
- read_group++, read_test_bgn +=
- RW_MGR_MEM_DQ_PER_READ_DQS) {
- /* Calibrate the VFIFO */
- if (!((STATIC_CALIB_STEPS) &
- CALIB_SKIP_VFIFO)) {
- if (!rw_mgr_mem_calibrate_vfifo
- (read_group,
- read_test_bgn)) {
- group_failed = 1;
-
- if (!(gbl->
- phy_debug_mode_flags &
- PHY_DEBUG_SWEEP_ALL_GROUPS)) {
- return 0;
- }
- }
- }
- }
+ /* Initialize the group failure */
+ group_failed = 0;
- /* Calibrate the output side */
- if (group_failed == 0) {
- for (rank_bgn = 0, sr = 0; rank_bgn
- < RW_MGR_MEM_NUMBER_OF_RANKS;
- rank_bgn +=
- NUM_RANKS_PER_SHADOW_REG,
- ++sr) {
- sr_failed = 0;
- if (!((STATIC_CALIB_STEPS) &
- CALIB_SKIP_WRITES)) {
- if ((STATIC_CALIB_STEPS)
- & CALIB_SKIP_DELAY_SWEEPS) {
- /* not needed in quick mode! */
- } else {
- /*
- * Determine if this set of
- * ranks should be skipped
- * entirely.
- */
- if (!param->skip_shadow_regs[sr]) {
- if (!rw_mgr_mem_calibrate_writes
- (rank_bgn, write_group,
- write_test_bgn)) {
- sr_failed = 1;
- if (!(gbl->
- phy_debug_mode_flags &
- PHY_DEBUG_SWEEP_ALL_GROUPS)) {
- return 0;
- }
- }
- }
- }
- }
- if (sr_failed != 0)
- group_failed = 1;
- }
- }
+ current_run = run_groups & ((1 <<
+ RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
+ run_groups = run_groups >>
+ RW_MGR_NUM_DQS_PER_WRITE_GROUP;
- if (group_failed == 0) {
- for (read_group = write_group *
- RW_MGR_MEM_IF_READ_DQS_WIDTH /
- RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
- read_test_bgn = 0;
- read_group < (write_group + 1)
- * RW_MGR_MEM_IF_READ_DQS_WIDTH
- / RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
- group_failed == 0;
- read_group++, read_test_bgn +=
- RW_MGR_MEM_DQ_PER_READ_DQS) {
- if (!((STATIC_CALIB_STEPS) &
- CALIB_SKIP_WRITES)) {
- if (!rw_mgr_mem_calibrate_vfifo_end
- (read_group, read_test_bgn)) {
- group_failed = 1;
-
- if (!(gbl->phy_debug_mode_flags
- & PHY_DEBUG_SWEEP_ALL_GROUPS)) {
- return 0;
- }
- }
- }
- }
- }
+ if (current_run == 0)
+ continue;
- if (group_failed != 0)
- failing_groups++;
+ writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
+ SCC_MGR_GROUP_COUNTER_OFFSET);
+ scc_mgr_zero_group(write_group, 0);
+
+ for (read_group = write_group * rwdqs_ratio,
+ read_test_bgn = 0;
+ read_group < (write_group + 1) * rwdqs_ratio;
+ read_group++,
+ read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
+ if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO)
+ continue;
+
+ /* Calibrate the VFIFO */
+ if (rw_mgr_mem_calibrate_vfifo(read_group,
+ read_test_bgn))
+ continue;
+
+ if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
+ return 0;
+
+ /* The group failed, we're done. */
+ goto grp_failed;
}
- /*
- * USER If there are any failing groups then report
- * the failure.
- */
- if (failing_groups != 0)
- return 0;
+ /* Calibrate the output side */
+ for (rank_bgn = 0, sr = 0;
+ rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
+ rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
+ if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
+ continue;
+
+ /* Not needed in quick mode! */
+ if (STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS)
+ continue;
- /* Calibrate the LFIFO */
- if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_LFIFO)) {
/*
- * If we're skipping groups as part of debug,
- * don't calibrate LFIFO.
+ * Determine if this set of ranks
+ * should be skipped entirely.
*/
- if (param->skip_groups == 0) {
- if (!rw_mgr_mem_calibrate_lfifo())
- return 0;
- }
+ if (param->skip_shadow_regs[sr])
+ continue;
+
+ /* Calibrate WRITEs */
+ if (!rw_mgr_mem_calibrate_writes(rank_bgn,
+ write_group, write_test_bgn))
+ continue;
+
+ group_failed = 1;
+ if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
+ return 0;
}
+
+ /* Some group failed, we're done. */
+ if (group_failed)
+ goto grp_failed;
+
+ for (read_group = write_group * rwdqs_ratio,
+ read_test_bgn = 0;
+ read_group < (write_group + 1) * rwdqs_ratio;
+ read_group++,
+ read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
+ if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
+ continue;
+
+ if (!rw_mgr_mem_calibrate_vfifo_end(read_group,
+ read_test_bgn))
+ continue;
+
+ if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
+ return 0;
+
+ /* The group failed, we're done. */
+ goto grp_failed;
+ }
+
+ /* No group failed, continue as usual. */
+ continue;
+
+grp_failed: /* A group failed, increment the counter. */
+ failing_groups++;
}
+
+ /*
+ * USER If there are any failing groups then report
+ * the failure.
+ */
+ if (failing_groups != 0)
+ return 0;
+
+ if (STATIC_CALIB_STEPS & CALIB_SKIP_LFIFO)
+ continue;
+
+ /*
+ * If we're skipping groups as part of debug,
+ * don't calibrate LFIFO.
+ */
+ if (param->skip_groups != 0)
+ continue;
+
+ /* Calibrate the LFIFO */
+ if (!rw_mgr_mem_calibrate_lfifo())
+ return 0;
}
/*
return 1;
}
-static uint32_t run_mem_calibrate(void)
+/**
+ * run_mem_calibrate() - Perform memory calibration
+ *
+ * This function triggers the entire memory calibration procedure.
+ */
+static int run_mem_calibrate(void)
{
- uint32_t pass;
- uint32_t debug_info;
+ int pass;
debug("%s:%d\n", __func__, __LINE__);
/* Reset pass/fail status shown on afi_cal_success/fail */
writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
- /* stop tracking manger */
- uint32_t ctrlcfg = readl(&sdr_ctrl->ctrl_cfg);
+ /* Stop tracking manager. */
+ clrbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22);
- writel(ctrlcfg & 0xFFBFFFFF, &sdr_ctrl->ctrl_cfg);
-
- initialize();
+ phy_mgr_initialize();
rw_mgr_mem_initialize();
+ /* Perform the actual memory calibration. */
pass = mem_calibrate();
mem_precharge_and_activate();
writel(0, &phy_mgr_cmd->fifo_reset);
+ /* Handoff. */
+ rw_mgr_mem_handoff();
/*
- * Handoff:
- * Don't return control of the PHY back to AFI when in debug mode.
+ * In Hard PHY this is a 2-bit control:
+ * 0: AFI Mux Select
+ * 1: DDIO Mux Select
*/
- if ((gbl->phy_debug_mode_flags & PHY_DEBUG_IN_DEBUG_MODE) == 0) {
- rw_mgr_mem_handoff();
- /*
- * In Hard PHY this is a 2-bit control:
- * 0: AFI Mux Select
- * 1: DDIO Mux Select
- */
- writel(0x2, &phy_mgr_cfg->mux_sel);
- }
+ writel(0x2, &phy_mgr_cfg->mux_sel);
+
+ /* Start tracking manager. */
+ setbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22);
- writel(ctrlcfg, &sdr_ctrl->ctrl_cfg);
+ return pass;
+}
+
+/**
+ * debug_mem_calibrate() - Report result of memory calibration
+ * @pass: Value indicating whether calibration passed or failed
+ *
+ * This function reports the results of the memory calibration
+ * and writes debug information into the register file.
+ */
+static void debug_mem_calibrate(int pass)
+{
+ uint32_t debug_info;
if (pass) {
printf("%s: CALIBRATION PASSED\n", __FILE__);
writel(debug_info, &sdr_reg_file->failing_stage);
}
- return pass;
+ printf("%s: Calibration complete\n", __FILE__);
}
/**
writel(reg, &sdr_ctrl->phy_ctrl2);
}
+/**
+ * initialize_tracking() - Initialize tracking
+ *
+ * Initialize the register file with usable initial data.
+ */
static void initialize_tracking(void)
{
- uint32_t concatenated_longidle = 0x0;
- uint32_t concatenated_delays = 0x0;
- uint32_t concatenated_rw_addr = 0x0;
- uint32_t concatenated_refresh = 0x0;
- uint32_t trk_sample_count = 7500;
- uint32_t dtaps_per_ptap;
- uint32_t tmp_delay;
+ /*
+ * Initialize the register file with the correct data.
+ * Compute usable version of value in case we skip full
+ * computation later.
+ */
+ writel(DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP) - 1,
+ &sdr_reg_file->dtaps_per_ptap);
+
+ /* trk_sample_count */
+ writel(7500, &sdr_reg_file->trk_sample_count);
+
+ /* longidle outer loop [15:0] */
+ writel((10 << 16) | (100 << 0), &sdr_reg_file->trk_longidle);
/*
- * compute usable version of value in case we skip full
- * computation later
+ * longidle sample count [31:24]
+ * trfc, worst case of 933Mhz 4Gb [23:16]
+ * trcd, worst case [15:8]
+ * vfifo wait [7:0]
*/
- dtaps_per_ptap = 0;
- tmp_delay = 0;
- while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
- dtaps_per_ptap++;
- tmp_delay += IO_DELAY_PER_DCHAIN_TAP;
- }
- dtaps_per_ptap--;
-
- concatenated_longidle = concatenated_longidle ^ 10;
- /*longidle outer loop */
- concatenated_longidle = concatenated_longidle << 16;
- concatenated_longidle = concatenated_longidle ^ 100;
- /*longidle sample count */
- concatenated_delays = concatenated_delays ^ 243;
- /* trfc, worst case of 933Mhz 4Gb */
- concatenated_delays = concatenated_delays << 8;
- concatenated_delays = concatenated_delays ^ 14;
- /* trcd, worst case */
- concatenated_delays = concatenated_delays << 8;
- concatenated_delays = concatenated_delays ^ 10;
- /* vfifo wait */
- concatenated_delays = concatenated_delays << 8;
- concatenated_delays = concatenated_delays ^ 4;
- /* mux delay */
-
- concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_IDLE;
- concatenated_rw_addr = concatenated_rw_addr << 8;
- concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_ACTIVATE_1;
- concatenated_rw_addr = concatenated_rw_addr << 8;
- concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_SGLE_READ;
- concatenated_rw_addr = concatenated_rw_addr << 8;
- concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_PRECHARGE_ALL;
-
- concatenated_refresh = concatenated_refresh ^ RW_MGR_REFRESH_ALL;
- concatenated_refresh = concatenated_refresh << 24;
- concatenated_refresh = concatenated_refresh ^ 1000; /* trefi */
+ writel((243 << 24) | (14 << 16) | (10 << 8) | (4 << 0),
+ &sdr_reg_file->delays);
- /* Initialize the register file with the correct data */
- writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
- writel(trk_sample_count, &sdr_reg_file->trk_sample_count);
- writel(concatenated_longidle, &sdr_reg_file->trk_longidle);
- writel(concatenated_delays, &sdr_reg_file->delays);
- writel(concatenated_rw_addr, &sdr_reg_file->trk_rw_mgr_addr);
- writel(RW_MGR_MEM_IF_READ_DQS_WIDTH, &sdr_reg_file->trk_read_dqs_width);
- writel(concatenated_refresh, &sdr_reg_file->trk_rfsh);
+ /* mux delay */
+ writel((RW_MGR_IDLE << 24) | (RW_MGR_ACTIVATE_1 << 16) |
+ (RW_MGR_SGLE_READ << 8) | (RW_MGR_PRECHARGE_ALL << 0),
+ &sdr_reg_file->trk_rw_mgr_addr);
+
+ writel(RW_MGR_MEM_IF_READ_DQS_WIDTH,
+ &sdr_reg_file->trk_read_dqs_width);
+
+ /* trefi [7:0] */
+ writel((RW_MGR_REFRESH_ALL << 24) | (1000 << 0),
+ &sdr_reg_file->trk_rfsh);
}
int sdram_calibration_full(void)
struct param_type my_param;
struct gbl_type my_gbl;
uint32_t pass;
- uint32_t i;
+
+ memset(&my_param, 0, sizeof(my_param));
+ memset(&my_gbl, 0, sizeof(my_gbl));
param = &my_param;
gbl = &my_gbl;
- /* Initialize the debug mode flags */
- gbl->phy_debug_mode_flags = 0;
/* Set the calibration enabled by default */
gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
/*
initialize_tracking();
- /* USER Enable all ranks, groups */
- for (i = 0; i < RW_MGR_MEM_NUMBER_OF_RANKS; i++)
- param->skip_ranks[i] = 0;
- for (i = 0; i < NUM_SHADOW_REGS; ++i)
- param->skip_shadow_regs[i] = 0;
- param->skip_groups = 0;
-
printf("%s: Preparing to start memory calibration\n", __FILE__);
debug("%s:%d\n", __func__, __LINE__);
skip_delay_mask = 0x0;
pass = run_mem_calibrate();
-
- printf("%s: Calibration complete\n", __FILE__);
+ debug_mem_calibrate(pass);
return pass;
}