static void initialize_config(void *config)
{
- struct rsxx_card_cfg *cfg = (struct rsxx_card_cfg *) config;
+ struct rsxx_card_cfg *cfg = config;
cfg->hdr.version = RSXX_CFG_VERSION;
/*----------------- Config Operations ------------------*/
-int rsxx_save_config(struct rsxx_cardinfo *card)
+static int rsxx_save_config(struct rsxx_cardinfo *card)
{
struct rsxx_card_cfg cfg;
int st;
iowrite32(card->ier_mask, card->regmap + IER);
}
-irqreturn_t rsxx_isr(int irq, void *pdata)
+static irqreturn_t rsxx_isr(int irq, void *pdata)
{
- struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) pdata;
+ struct rsxx_cardinfo *card = pdata;
unsigned int isr;
int handled = 0;
int reread_isr;
}
/*----------------- Card Event Handler -------------------*/
+static char *rsxx_card_state_to_str(unsigned int state)
+{
+ static char *state_strings[] = {
+ "Unknown", "Shutdown", "Starting", "Formatting",
+ "Uninitialized", "Good", "Shutting Down",
+ "Fault", "Read Only Fault", "dStroying"
+ };
+
+ return state_strings[ffs(state)];
+}
+
static void card_state_change(struct rsxx_cardinfo *card,
unsigned int new_state)
{
rsxx_read_hw_log(card);
}
-
-char *rsxx_card_state_to_str(unsigned int state)
-{
- static char *state_strings[] = {
- "Unknown", "Shutdown", "Starting", "Formatting",
- "Uninitialized", "Good", "Shutting Down",
- "Fault", "Read Only Fault", "dStroying"
- };
-
- return state_strings[ffs(state)];
-}
-
/*----------------- Card Operations -------------------*/
static int card_shutdown(struct rsxx_cardinfo *card)
{
const struct pci_device_id *id)
{
struct rsxx_cardinfo *card;
- unsigned long flags;
int st;
dev_info(&dev->dev, "PCI-Flash SSD discovered\n");
spin_lock_init(&card->irq_lock);
card->halt = 0;
- spin_lock_irqsave(&card->irq_lock, flags);
+ spin_lock_irq(&card->irq_lock);
rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
- spin_unlock_irqrestore(&card->irq_lock, flags);
+ spin_unlock_irq(&card->irq_lock);
if (!force_legacy) {
st = pci_enable_msi(dev);
/************* Setup Processor Command Interface *************/
rsxx_creg_setup(card);
- spin_lock_irqsave(&card->irq_lock, flags);
+ spin_lock_irq(&card->irq_lock);
rsxx_enable_ier_and_isr(card, CR_INTR_CREG);
- spin_unlock_irqrestore(&card->irq_lock, flags);
+ spin_unlock_irq(&card->irq_lock);
st = rsxx_compatibility_check(card);
if (st) {
* we can enable the event interrupt(it kicks off actions in
* those layers so we couldn't enable it right away.)
*/
- spin_lock_irqsave(&card->irq_lock, flags);
+ spin_lock_irq(&card->irq_lock);
rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
- spin_unlock_irqrestore(&card->irq_lock, flags);
+ spin_unlock_irq(&card->irq_lock);
if (card->state == CARD_STATE_SHUTDOWN) {
st = rsxx_issue_card_cmd(card, CARD_CMD_STARTUP);
rsxx_dma_destroy(card);
failed_dma_setup:
failed_compatiblity_check:
- spin_lock_irqsave(&card->irq_lock, flags);
+ spin_lock_irq(&card->irq_lock);
rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
- spin_unlock_irqrestore(&card->irq_lock, flags);
+ spin_unlock_irq(&card->irq_lock);
free_irq(dev->irq, card);
if (!force_legacy)
pci_disable_msi(dev);
* Spin lock is needed because this can be called in atomic/interrupt
* context.
*/
- spin_lock_bh(&card->creg_ctrl.pop_lock);
+ spin_lock_bh(&card->creg_ctrl.lock);
cmd = card->creg_ctrl.active_cmd;
card->creg_ctrl.active_cmd = NULL;
- spin_unlock_bh(&card->creg_ctrl.pop_lock);
+ spin_unlock_bh(&card->creg_ctrl.lock);
return cmd;
}
cmd->buf, cmd->stream);
}
- /* Data copy must complete before initiating the command. */
+ /*
+ * Data copy must complete before initiating the command. This is
+ * needed for weakly ordered processors (i.e. PowerPC), so that all
+ * neccessary registers are written before we kick the hardware.
+ */
wmb();
/* Setting the valid bit will kick off the command. */
cmd->cb_private = cb_private;
cmd->status = 0;
- mutex_lock(&card->creg_ctrl.lock);
+ spin_lock(&card->creg_ctrl.lock);
list_add_tail(&cmd->list, &card->creg_ctrl.queue);
card->creg_ctrl.q_depth++;
creg_kick_queue(card);
- mutex_unlock(&card->creg_ctrl.lock);
+ spin_unlock(&card->creg_ctrl.lock);
return 0;
}
kmem_cache_free(creg_cmd_pool, cmd);
- spin_lock(&card->creg_ctrl.pop_lock);
+
+ spin_lock(&card->creg_ctrl.lock);
card->creg_ctrl.active = 0;
creg_kick_queue(card);
- spin_unlock(&card->creg_ctrl.pop_lock);
+ spin_unlock(&card->creg_ctrl.lock);
}
kmem_cache_free(creg_cmd_pool, cmd);
- mutex_lock(&card->creg_ctrl.lock);
+ spin_lock(&card->creg_ctrl.lock);
card->creg_ctrl.active = 0;
creg_kick_queue(card);
- mutex_unlock(&card->creg_ctrl.lock);
+ spin_unlock(&card->creg_ctrl.lock);
}
static void creg_reset(struct rsxx_cardinfo *card)
struct creg_cmd *tmp;
unsigned long flags;
+ /*
+ * mutex_trylock is used here because if reset_lock is taken then a
+ * reset is already happening. So, we can just go ahead and return.
+ */
if (!mutex_trylock(&card->creg_ctrl.reset_lock))
return;
"Resetting creg interface for recovery\n");
/* Cancel outstanding commands */
- mutex_lock(&card->creg_ctrl.lock);
+ spin_lock(&card->creg_ctrl.lock);
list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
list_del(&cmd->list);
card->creg_ctrl.q_depth--;
card->creg_ctrl.active = 0;
}
- mutex_unlock(&card->creg_ctrl.lock);
+ spin_unlock(&card->creg_ctrl.lock);
card->creg_ctrl.reset = 0;
spin_lock_irqsave(&card->irq_lock, flags);
{
struct creg_completion *cmd_completion;
- cmd_completion = (struct creg_completion *)cmd->cb_private;
+ cmd_completion = cmd->cb_private;
BUG_ON(!cmd_completion);
cmd_completion->st = st;
unsigned long timeout;
int st;
- INIT_COMPLETION(cmd_done);
completion.cmd_done = &cmd_done;
completion.st = 0;
completion.creg_status = 0;
if (st)
return st;
+ /*
+ * This timeout is neccessary for unresponsive hardware. The additional
+ * 20 seconds to used to guarantee that each cregs requests has time to
+ * complete.
+ */
timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC *
- card->creg_ctrl.q_depth) + 20000);
+ card->creg_ctrl.q_depth) + 20000);
/*
* The creg interface is guaranteed to complete. It has a timeout
if (st)
return st;
- data = (void *)((char *)data + xfer);
+ data = (char *)data + xfer;
addr += xfer;
size8 -= xfer;
} while (size8);
}
/*
- * The substrncpy() function copies to string(up to count bytes) point to by src
- * (including the terminating '\0' character) to dest. Returns the number of
- * bytes copied to dest.
+ * The substrncpy function copies the src string (which includes the
+ * terminating '\0' character), up to the count into the dest pointer.
+ * Returns the number of bytes copied to dest.
*/
static int substrncpy(char *dest, const char *src, int count)
{
if (st)
return -EFAULT;
+ if (cmd.cnt > RSXX_MAX_REG_CNT)
+ return -EFAULT;
+
st = issue_reg_cmd(card, &cmd, read);
if (st)
return st;
INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
mutex_init(&card->creg_ctrl.reset_lock);
INIT_LIST_HEAD(&card->creg_ctrl.queue);
- mutex_init(&card->creg_ctrl.lock);
- spin_lock_init(&card->creg_ctrl.pop_lock);
+ spin_lock_init(&card->creg_ctrl.lock);
setup_timer(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out,
(unsigned long) card);
int cnt = 0;
/* Cancel outstanding commands */
- mutex_lock(&card->creg_ctrl.lock);
+ spin_lock(&card->creg_ctrl.lock);
list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
list_del(&cmd->list);
if (cmd->cb)
"Canceled active creg command\n");
kmem_cache_free(creg_cmd_pool, cmd);
}
- mutex_unlock(&card->creg_ctrl.lock);
+ spin_unlock(&card->creg_ctrl.lock);
cancel_work_sync(&card->creg_ctrl.done_work);
}
void *cb_data,
unsigned int error)
{
- struct rsxx_bio_meta *meta = (struct rsxx_bio_meta *)cb_data;
+ struct rsxx_bio_meta *meta = cb_data;
if (error)
atomic_set(&meta->error, 1);
/*----------------- Misc Utility Functions -------------------*/
-unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card)
+static unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card)
{
unsigned long long tgt_addr8;
return tgt_addr8;
}
-unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
+static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
{
unsigned int tgt;
INIT_LIST_HEAD(&ctrl->queue);
setup_timer(&ctrl->activity_timer, dma_engine_stalled,
- (unsigned long)ctrl);
+ (unsigned long)ctrl);
ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
if (!ctrl->issue_wq)
return 0;
}
-int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
+static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
unsigned int stripe_size8)
{
if (!is_power_of_2(stripe_size8)) {
return 0;
}
-int rsxx_dma_configure(struct rsxx_cardinfo *card)
+static int rsxx_dma_configure(struct rsxx_cardinfo *card)
{
u32 intr_coal;
__u32 data[8];
};
+#define RSXX_MAX_REG_CNT (8 * (sizeof(__u32)))
+
#define RSXX_IOC_MAGIC 'r'
#define RSXX_GETREG _IOWR(RSXX_IOC_MAGIC, 0x20, struct rsxx_reg_access)
/* Embedded CPU Communication */
struct {
- struct mutex lock;
+ spinlock_t lock;
bool active;
struct creg_cmd *active_cmd;
struct work_struct done_work;
} creg_stats;
struct timer_list cmd_timer;
struct mutex reset_lock;
- spinlock_t pop_lock;
int reset;
} creg_ctrl;
/***** config.c *****/
int rsxx_load_config(struct rsxx_cardinfo *card);
-int rsxx_save_config(struct rsxx_cardinfo *card);
/***** core.c *****/
void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr);
unsigned int intr);
void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
unsigned int intr);
-char *rsxx_card_state_to_str(unsigned int state);
-irqreturn_t rsxx_isr(int irq, void *pdata);
/***** dev.c *****/
int rsxx_attach_dev(struct rsxx_cardinfo *card);
void rsxx_dma_destroy(struct rsxx_cardinfo *card);
int rsxx_dma_init(void);
void rsxx_dma_cleanup(void);
-int rsxx_dma_configure(struct rsxx_cardinfo *card);
int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
struct bio *bio,
atomic_t *n_dmas,
rsxx_dma_cb cb,
void *cb_data);
-int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
- unsigned int stripe_size8);
-unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8);
-unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card);
/***** cregs.c *****/
int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr,