]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/scsi/ufs/ufshcd.c
ed2015672982a8f1c350615306a65fdffe189c74
[karo-tx-linux.git] / drivers / scsi / ufs / ufshcd.c
1 /*
2  * Universal Flash Storage Host controller driver Core
3  *
4  * This code is based on drivers/scsi/ufs/ufshcd.c
5  * Copyright (C) 2011-2013 Samsung India Software Operations
6  *
7  * Authors:
8  *      Santosh Yaraganavi <santosh.sy@samsung.com>
9  *      Vinayak Holikatti <h.vinayak@samsung.com>
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; either version 2
14  * of the License, or (at your option) any later version.
15  * See the COPYING file in the top-level directory or visit
16  * <http://www.gnu.org/licenses/gpl-2.0.html>
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * This program is provided "AS IS" and "WITH ALL FAULTS" and
24  * without warranty of any kind. You are solely responsible for
25  * determining the appropriateness of using and distributing
26  * the program and assume all risks associated with your exercise
27  * of rights with respect to the program, including but not limited
28  * to infringement of third party rights, the risks and costs of
29  * program errors, damage to or loss of data, programs or equipment,
30  * and unavailability or interruption of operations. Under no
31  * circumstances will the contributor of this Program be liable for
32  * any damages of any kind arising from your use or distribution of
33  * this program.
34  */
35
36 #include <linux/async.h>
37
38 #include "ufshcd.h"
39
40 #define UFSHCD_ENABLE_INTRS     (UTP_TRANSFER_REQ_COMPL |\
41                                  UTP_TASK_REQ_COMPL |\
42                                  UFSHCD_ERROR_MASK)
43 /* UIC command timeout, unit: ms */
44 #define UIC_CMD_TIMEOUT 500
45
46 /* NOP OUT retries waiting for NOP IN response */
47 #define NOP_OUT_RETRIES    10
48 /* Timeout after 30 msecs if NOP OUT hangs without response */
49 #define NOP_OUT_TIMEOUT    30 /* msecs */
50
51 enum {
52         UFSHCD_MAX_CHANNEL      = 0,
53         UFSHCD_MAX_ID           = 1,
54         UFSHCD_MAX_LUNS         = 8,
55         UFSHCD_CMD_PER_LUN      = 32,
56         UFSHCD_CAN_QUEUE        = 32,
57 };
58
59 /* UFSHCD states */
60 enum {
61         UFSHCD_STATE_OPERATIONAL,
62         UFSHCD_STATE_RESET,
63         UFSHCD_STATE_ERROR,
64 };
65
66 /* Interrupt configuration options */
67 enum {
68         UFSHCD_INT_DISABLE,
69         UFSHCD_INT_ENABLE,
70         UFSHCD_INT_CLEAR,
71 };
72
73 /* Interrupt aggregation options */
74 enum {
75         INT_AGGR_RESET,
76         INT_AGGR_CONFIG,
77 };
78
79 /*
80  * ufshcd_wait_for_register - wait for register value to change
81  * @hba - per-adapter interface
82  * @reg - mmio register offset
83  * @mask - mask to apply to read register value
84  * @val - wait condition
85  * @interval_us - polling interval in microsecs
86  * @timeout_ms - timeout in millisecs
87  *
88  * Returns -ETIMEDOUT on error, zero on success
89  */
90 static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
91                 u32 val, unsigned long interval_us, unsigned long timeout_ms)
92 {
93         int err = 0;
94         unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
95
96         /* ignore bits that we don't intend to wait on */
97         val = val & mask;
98
99         while ((ufshcd_readl(hba, reg) & mask) != val) {
100                 /* wakeup within 50us of expiry */
101                 usleep_range(interval_us, interval_us + 50);
102
103                 if (time_after(jiffies, timeout)) {
104                         if ((ufshcd_readl(hba, reg) & mask) != val)
105                                 err = -ETIMEDOUT;
106                         break;
107                 }
108         }
109
110         return err;
111 }
112
113 /**
114  * ufshcd_get_intr_mask - Get the interrupt bit mask
115  * @hba - Pointer to adapter instance
116  *
117  * Returns interrupt bit mask per version
118  */
119 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
120 {
121         if (hba->ufs_version == UFSHCI_VERSION_10)
122                 return INTERRUPT_MASK_ALL_VER_10;
123         else
124                 return INTERRUPT_MASK_ALL_VER_11;
125 }
126
127 /**
128  * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
129  * @hba - Pointer to adapter instance
130  *
131  * Returns UFSHCI version supported by the controller
132  */
133 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
134 {
135         return ufshcd_readl(hba, REG_UFS_VERSION);
136 }
137
138 /**
139  * ufshcd_is_device_present - Check if any device connected to
140  *                            the host controller
141  * @reg_hcs - host controller status register value
142  *
143  * Returns 1 if device present, 0 if no device detected
144  */
145 static inline int ufshcd_is_device_present(u32 reg_hcs)
146 {
147         return (DEVICE_PRESENT & reg_hcs) ? 1 : 0;
148 }
149
150 /**
151  * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
152  * @lrb: pointer to local command reference block
153  *
154  * This function is used to get the OCS field from UTRD
155  * Returns the OCS field in the UTRD
156  */
157 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
158 {
159         return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS;
160 }
161
162 /**
163  * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
164  * @task_req_descp: pointer to utp_task_req_desc structure
165  *
166  * This function is used to get the OCS field from UTMRD
167  * Returns the OCS field in the UTMRD
168  */
169 static inline int
170 ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
171 {
172         return task_req_descp->header.dword_2 & MASK_OCS;
173 }
174
175 /**
176  * ufshcd_get_tm_free_slot - get a free slot for task management request
177  * @hba: per adapter instance
178  *
179  * Returns maximum number of task management request slots in case of
180  * task management queue full or returns the free slot number
181  */
182 static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba)
183 {
184         return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs);
185 }
186
187 /**
188  * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
189  * @hba: per adapter instance
190  * @pos: position of the bit to be cleared
191  */
192 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
193 {
194         ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
195 }
196
197 /**
198  * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
199  * @reg: Register value of host controller status
200  *
201  * Returns integer, 0 on Success and positive value if failed
202  */
203 static inline int ufshcd_get_lists_status(u32 reg)
204 {
205         /*
206          * The mask 0xFF is for the following HCS register bits
207          * Bit          Description
208          *  0           Device Present
209          *  1           UTRLRDY
210          *  2           UTMRLRDY
211          *  3           UCRDY
212          *  4           HEI
213          *  5           DEI
214          * 6-7          reserved
215          */
216         return (((reg) & (0xFF)) >> 1) ^ (0x07);
217 }
218
219 /**
220  * ufshcd_get_uic_cmd_result - Get the UIC command result
221  * @hba: Pointer to adapter instance
222  *
223  * This function gets the result of UIC command completion
224  * Returns 0 on success, non zero value on error
225  */
226 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
227 {
228         return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
229                MASK_UIC_COMMAND_RESULT;
230 }
231
232 /**
233  * ufshcd_get_req_rsp - returns the TR response transaction type
234  * @ucd_rsp_ptr: pointer to response UPIU
235  */
236 static inline int
237 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
238 {
239         return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
240 }
241
242 /**
243  * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
244  * @ucd_rsp_ptr: pointer to response UPIU
245  *
246  * This function gets the response status and scsi_status from response UPIU
247  * Returns the response result code.
248  */
249 static inline int
250 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
251 {
252         return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
253 }
254
255 /**
256  * ufshcd_config_int_aggr - Configure interrupt aggregation values.
257  *              Currently there is no use case where we want to configure
258  *              interrupt aggregation dynamically. So to configure interrupt
259  *              aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and
260  *              INT_AGGR_TIMEOUT_VALUE are used.
261  * @hba: per adapter instance
262  * @option: Interrupt aggregation option
263  */
264 static inline void
265 ufshcd_config_int_aggr(struct ufs_hba *hba, int option)
266 {
267         switch (option) {
268         case INT_AGGR_RESET:
269                 ufshcd_writel(hba, INT_AGGR_ENABLE |
270                               INT_AGGR_COUNTER_AND_TIMER_RESET,
271                               REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
272                 break;
273         case INT_AGGR_CONFIG:
274                 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
275                               INT_AGGR_COUNTER_THRESHOLD_VALUE |
276                               INT_AGGR_TIMEOUT_VALUE,
277                               REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
278                 break;
279         }
280 }
281
282 /**
283  * ufshcd_enable_run_stop_reg - Enable run-stop registers,
284  *                      When run-stop registers are set to 1, it indicates the
285  *                      host controller that it can process the requests
286  * @hba: per adapter instance
287  */
288 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
289 {
290         ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
291                       REG_UTP_TASK_REQ_LIST_RUN_STOP);
292         ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
293                       REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
294 }
295
296 /**
297  * ufshcd_hba_start - Start controller initialization sequence
298  * @hba: per adapter instance
299  */
300 static inline void ufshcd_hba_start(struct ufs_hba *hba)
301 {
302         ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
303 }
304
305 /**
306  * ufshcd_is_hba_active - Get controller state
307  * @hba: per adapter instance
308  *
309  * Returns zero if controller is active, 1 otherwise
310  */
311 static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
312 {
313         return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
314 }
315
316 /**
317  * ufshcd_send_command - Send SCSI or device management commands
318  * @hba: per adapter instance
319  * @task_tag: Task tag of the command
320  */
321 static inline
322 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
323 {
324         __set_bit(task_tag, &hba->outstanding_reqs);
325         ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
326 }
327
328 /**
329  * ufshcd_copy_sense_data - Copy sense data in case of check condition
330  * @lrb - pointer to local reference block
331  */
332 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
333 {
334         int len;
335         if (lrbp->sense_buffer) {
336                 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
337                 memcpy(lrbp->sense_buffer,
338                         lrbp->ucd_rsp_ptr->sr.sense_data,
339                         min_t(int, len, SCSI_SENSE_BUFFERSIZE));
340         }
341 }
342
343 /**
344  * ufshcd_hba_capabilities - Read controller capabilities
345  * @hba: per adapter instance
346  */
347 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
348 {
349         hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
350
351         /* nutrs and nutmrs are 0 based values */
352         hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
353         hba->nutmrs =
354         ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
355 }
356
357 /**
358  * ufshcd_ready_for_uic_cmd - Check if controller is ready
359  *                            to accept UIC commands
360  * @hba: per adapter instance
361  * Return true on success, else false
362  */
363 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
364 {
365         if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
366                 return true;
367         else
368                 return false;
369 }
370
371 /**
372  * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
373  * @hba: per adapter instance
374  * @uic_cmd: UIC command
375  *
376  * Mutex must be held.
377  */
378 static inline void
379 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
380 {
381         WARN_ON(hba->active_uic_cmd);
382
383         hba->active_uic_cmd = uic_cmd;
384
385         /* Write Args */
386         ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
387         ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
388         ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
389
390         /* Write UIC Cmd */
391         ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
392                       REG_UIC_COMMAND);
393 }
394
395 /**
396  * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
397  * @hba: per adapter instance
398  * @uic_command: UIC command
399  *
400  * Must be called with mutex held.
401  * Returns 0 only if success.
402  */
403 static int
404 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
405 {
406         int ret;
407         unsigned long flags;
408
409         if (wait_for_completion_timeout(&uic_cmd->done,
410                                         msecs_to_jiffies(UIC_CMD_TIMEOUT)))
411                 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
412         else
413                 ret = -ETIMEDOUT;
414
415         spin_lock_irqsave(hba->host->host_lock, flags);
416         hba->active_uic_cmd = NULL;
417         spin_unlock_irqrestore(hba->host->host_lock, flags);
418
419         return ret;
420 }
421
422 /**
423  * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
424  * @hba: per adapter instance
425  * @uic_cmd: UIC command
426  *
427  * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
428  * with mutex held.
429  * Returns 0 only if success.
430  */
431 static int
432 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
433 {
434         int ret;
435         unsigned long flags;
436
437         if (!ufshcd_ready_for_uic_cmd(hba)) {
438                 dev_err(hba->dev,
439                         "Controller not ready to accept UIC commands\n");
440                 return -EIO;
441         }
442
443         init_completion(&uic_cmd->done);
444
445         spin_lock_irqsave(hba->host->host_lock, flags);
446         ufshcd_dispatch_uic_cmd(hba, uic_cmd);
447         spin_unlock_irqrestore(hba->host->host_lock, flags);
448
449         ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
450
451         return ret;
452 }
453
454 /**
455  * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
456  * @hba: per adapter instance
457  * @uic_cmd: UIC command
458  *
459  * Returns 0 only if success.
460  */
461 static int
462 ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
463 {
464         int ret;
465
466         mutex_lock(&hba->uic_cmd_mutex);
467         ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
468         mutex_unlock(&hba->uic_cmd_mutex);
469
470         return ret;
471 }
472
473 /**
474  * ufshcd_map_sg - Map scatter-gather list to prdt
475  * @lrbp - pointer to local reference block
476  *
477  * Returns 0 in case of success, non-zero value in case of failure
478  */
479 static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
480 {
481         struct ufshcd_sg_entry *prd_table;
482         struct scatterlist *sg;
483         struct scsi_cmnd *cmd;
484         int sg_segments;
485         int i;
486
487         cmd = lrbp->cmd;
488         sg_segments = scsi_dma_map(cmd);
489         if (sg_segments < 0)
490                 return sg_segments;
491
492         if (sg_segments) {
493                 lrbp->utr_descriptor_ptr->prd_table_length =
494                                         cpu_to_le16((u16) (sg_segments));
495
496                 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
497
498                 scsi_for_each_sg(cmd, sg, sg_segments, i) {
499                         prd_table[i].size  =
500                                 cpu_to_le32(((u32) sg_dma_len(sg))-1);
501                         prd_table[i].base_addr =
502                                 cpu_to_le32(lower_32_bits(sg->dma_address));
503                         prd_table[i].upper_addr =
504                                 cpu_to_le32(upper_32_bits(sg->dma_address));
505                 }
506         } else {
507                 lrbp->utr_descriptor_ptr->prd_table_length = 0;
508         }
509
510         return 0;
511 }
512
513 /**
514  * ufshcd_enable_intr - enable interrupts
515  * @hba: per adapter instance
516  * @intrs: interrupt bits
517  */
518 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
519 {
520         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
521
522         if (hba->ufs_version == UFSHCI_VERSION_10) {
523                 u32 rw;
524                 rw = set & INTERRUPT_MASK_RW_VER_10;
525                 set = rw | ((set ^ intrs) & intrs);
526         } else {
527                 set |= intrs;
528         }
529
530         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
531 }
532
533 /**
534  * ufshcd_disable_intr - disable interrupts
535  * @hba: per adapter instance
536  * @intrs: interrupt bits
537  */
538 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
539 {
540         u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
541
542         if (hba->ufs_version == UFSHCI_VERSION_10) {
543                 u32 rw;
544                 rw = (set & INTERRUPT_MASK_RW_VER_10) &
545                         ~(intrs & INTERRUPT_MASK_RW_VER_10);
546                 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
547
548         } else {
549                 set &= ~intrs;
550         }
551
552         ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
553 }
554
555 /**
556  * ufshcd_prepare_req_desc_hdr() - Fills the requests header
557  * descriptor according to request
558  * @lrbp: pointer to local reference block
559  * @upiu_flags: flags required in the header
560  * @cmd_dir: requests data direction
561  */
562 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
563                 u32 *upiu_flags, enum dma_data_direction cmd_dir)
564 {
565         struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
566         u32 data_direction;
567         u32 dword_0;
568
569         if (cmd_dir == DMA_FROM_DEVICE) {
570                 data_direction = UTP_DEVICE_TO_HOST;
571                 *upiu_flags = UPIU_CMD_FLAGS_READ;
572         } else if (cmd_dir == DMA_TO_DEVICE) {
573                 data_direction = UTP_HOST_TO_DEVICE;
574                 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
575         } else {
576                 data_direction = UTP_NO_DATA_TRANSFER;
577                 *upiu_flags = UPIU_CMD_FLAGS_NONE;
578         }
579
580         dword_0 = data_direction | (lrbp->command_type
581                                 << UPIU_COMMAND_TYPE_OFFSET);
582         if (lrbp->intr_cmd)
583                 dword_0 |= UTP_REQ_DESC_INT_CMD;
584
585         /* Transfer request descriptor header fields */
586         req_desc->header.dword_0 = cpu_to_le32(dword_0);
587
588         /*
589          * assigning invalid value for command status. Controller
590          * updates OCS on command completion, with the command
591          * status
592          */
593         req_desc->header.dword_2 =
594                 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
595 }
596
597 /**
598  * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
599  * for scsi commands
600  * @lrbp - local reference block pointer
601  * @upiu_flags - flags
602  */
603 static
604 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
605 {
606         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
607
608         /* command descriptor fields */
609         ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
610                                 UPIU_TRANSACTION_COMMAND, upiu_flags,
611                                 lrbp->lun, lrbp->task_tag);
612         ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
613                                 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
614
615         /* Total EHS length and Data segment length will be zero */
616         ucd_req_ptr->header.dword_2 = 0;
617
618         ucd_req_ptr->sc.exp_data_transfer_len =
619                 cpu_to_be32(lrbp->cmd->sdb.length);
620
621         memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
622                 (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
623 }
624
625 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
626 {
627         struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
628
629         memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
630
631         /* command descriptor fields */
632         ucd_req_ptr->header.dword_0 =
633                 UPIU_HEADER_DWORD(
634                         UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
635 }
636
637 /**
638  * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
639  * @hba - per adapter instance
640  * @lrb - pointer to local reference block
641  */
642 static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
643 {
644         u32 upiu_flags;
645         int ret = 0;
646
647         switch (lrbp->command_type) {
648         case UTP_CMD_TYPE_SCSI:
649                 if (likely(lrbp->cmd)) {
650                         ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
651                                         lrbp->cmd->sc_data_direction);
652                         ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
653                 } else {
654                         ret = -EINVAL;
655                 }
656                 break;
657         case UTP_CMD_TYPE_DEV_MANAGE:
658                 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
659                 if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
660                         ufshcd_prepare_utp_nop_upiu(lrbp);
661                 else
662                         ret = -EINVAL;
663                 break;
664         case UTP_CMD_TYPE_UFS:
665                 /* For UFS native command implementation */
666                 ret = -ENOTSUPP;
667                 dev_err(hba->dev, "%s: UFS native command are not supported\n",
668                         __func__);
669                 break;
670         default:
671                 ret = -ENOTSUPP;
672                 dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
673                                 __func__, lrbp->command_type);
674                 break;
675         } /* end of switch */
676
677         return ret;
678 }
679
680 /**
681  * ufshcd_queuecommand - main entry point for SCSI requests
682  * @cmd: command from SCSI Midlayer
683  * @done: call back function
684  *
685  * Returns 0 for success, non-zero in case of failure
686  */
687 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
688 {
689         struct ufshcd_lrb *lrbp;
690         struct ufs_hba *hba;
691         unsigned long flags;
692         int tag;
693         int err = 0;
694
695         hba = shost_priv(host);
696
697         tag = cmd->request->tag;
698
699         if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
700                 err = SCSI_MLQUEUE_HOST_BUSY;
701                 goto out;
702         }
703
704         /* acquire the tag to make sure device cmds don't use it */
705         if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
706                 /*
707                  * Dev manage command in progress, requeue the command.
708                  * Requeuing the command helps in cases where the request *may*
709                  * find different tag instead of waiting for dev manage command
710                  * completion.
711                  */
712                 err = SCSI_MLQUEUE_HOST_BUSY;
713                 goto out;
714         }
715
716         lrbp = &hba->lrb[tag];
717
718         WARN_ON(lrbp->cmd);
719         lrbp->cmd = cmd;
720         lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
721         lrbp->sense_buffer = cmd->sense_buffer;
722         lrbp->task_tag = tag;
723         lrbp->lun = cmd->device->lun;
724         lrbp->intr_cmd = false;
725         lrbp->command_type = UTP_CMD_TYPE_SCSI;
726
727         /* form UPIU before issuing the command */
728         ufshcd_compose_upiu(hba, lrbp);
729         err = ufshcd_map_sg(lrbp);
730         if (err) {
731                 lrbp->cmd = NULL;
732                 clear_bit_unlock(tag, &hba->lrb_in_use);
733                 goto out;
734         }
735
736         /* issue command to the controller */
737         spin_lock_irqsave(hba->host->host_lock, flags);
738         ufshcd_send_command(hba, tag);
739         spin_unlock_irqrestore(hba->host->host_lock, flags);
740 out:
741         return err;
742 }
743
744 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
745                 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
746 {
747         lrbp->cmd = NULL;
748         lrbp->sense_bufflen = 0;
749         lrbp->sense_buffer = NULL;
750         lrbp->task_tag = tag;
751         lrbp->lun = 0; /* device management cmd is not specific to any LUN */
752         lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
753         lrbp->intr_cmd = true; /* No interrupt aggregation */
754         hba->dev_cmd.type = cmd_type;
755
756         return ufshcd_compose_upiu(hba, lrbp);
757 }
758
759 static int
760 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
761 {
762         int err = 0;
763         unsigned long flags;
764         u32 mask = 1 << tag;
765
766         /* clear outstanding transaction before retry */
767         spin_lock_irqsave(hba->host->host_lock, flags);
768         ufshcd_utrl_clear(hba, tag);
769         spin_unlock_irqrestore(hba->host->host_lock, flags);
770
771         /*
772          * wait for for h/w to clear corresponding bit in door-bell.
773          * max. wait is 1 sec.
774          */
775         err = ufshcd_wait_for_register(hba,
776                         REG_UTP_TRANSFER_REQ_DOOR_BELL,
777                         mask, ~mask, 1000, 1000);
778
779         return err;
780 }
781
782 /**
783  * ufshcd_dev_cmd_completion() - handles device management command responses
784  * @hba: per adapter instance
785  * @lrbp: pointer to local reference block
786  */
787 static int
788 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
789 {
790         int resp;
791         int err = 0;
792
793         resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
794
795         switch (resp) {
796         case UPIU_TRANSACTION_NOP_IN:
797                 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
798                         err = -EINVAL;
799                         dev_err(hba->dev, "%s: unexpected response %x\n",
800                                         __func__, resp);
801                 }
802                 break;
803         case UPIU_TRANSACTION_REJECT_UPIU:
804                 /* TODO: handle Reject UPIU Response */
805                 err = -EPERM;
806                 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
807                                 __func__);
808                 break;
809         default:
810                 err = -EINVAL;
811                 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
812                                 __func__, resp);
813                 break;
814         }
815
816         return err;
817 }
818
819 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
820                 struct ufshcd_lrb *lrbp, int max_timeout)
821 {
822         int err = 0;
823         unsigned long time_left;
824         unsigned long flags;
825
826         time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
827                         msecs_to_jiffies(max_timeout));
828
829         spin_lock_irqsave(hba->host->host_lock, flags);
830         hba->dev_cmd.complete = NULL;
831         if (likely(time_left)) {
832                 err = ufshcd_get_tr_ocs(lrbp);
833                 if (!err)
834                         err = ufshcd_dev_cmd_completion(hba, lrbp);
835         }
836         spin_unlock_irqrestore(hba->host->host_lock, flags);
837
838         if (!time_left) {
839                 err = -ETIMEDOUT;
840                 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
841                         /* sucessfully cleared the command, retry if needed */
842                         err = -EAGAIN;
843         }
844
845         return err;
846 }
847
848 /**
849  * ufshcd_get_dev_cmd_tag - Get device management command tag
850  * @hba: per-adapter instance
851  * @tag: pointer to variable with available slot value
852  *
853  * Get a free slot and lock it until device management command
854  * completes.
855  *
856  * Returns false if free slot is unavailable for locking, else
857  * return true with tag value in @tag.
858  */
859 static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
860 {
861         int tag;
862         bool ret = false;
863         unsigned long tmp;
864
865         if (!tag_out)
866                 goto out;
867
868         do {
869                 tmp = ~hba->lrb_in_use;
870                 tag = find_last_bit(&tmp, hba->nutrs);
871                 if (tag >= hba->nutrs)
872                         goto out;
873         } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
874
875         *tag_out = tag;
876         ret = true;
877 out:
878         return ret;
879 }
880
881 static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
882 {
883         clear_bit_unlock(tag, &hba->lrb_in_use);
884 }
885
886 /**
887  * ufshcd_exec_dev_cmd - API for sending device management requests
888  * @hba - UFS hba
889  * @cmd_type - specifies the type (NOP, Query...)
890  * @timeout - time in seconds
891  *
892  * NOTE: There is only one available tag for device management commands. Thus
893  * synchronisation is the responsibilty of the user.
894  */
895 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
896                 enum dev_cmd_type cmd_type, int timeout)
897 {
898         struct ufshcd_lrb *lrbp;
899         int err;
900         int tag;
901         struct completion wait;
902         unsigned long flags;
903
904         /*
905          * Get free slot, sleep if slots are unavailable.
906          * Even though we use wait_event() which sleeps indefinitely,
907          * the maximum wait time is bounded by SCSI request timeout.
908          */
909         wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
910
911         init_completion(&wait);
912         lrbp = &hba->lrb[tag];
913         WARN_ON(lrbp->cmd);
914         err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
915         if (unlikely(err))
916                 goto out_put_tag;
917
918         hba->dev_cmd.complete = &wait;
919
920         spin_lock_irqsave(hba->host->host_lock, flags);
921         ufshcd_send_command(hba, tag);
922         spin_unlock_irqrestore(hba->host->host_lock, flags);
923
924         err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
925
926 out_put_tag:
927         ufshcd_put_dev_cmd_tag(hba, tag);
928         wake_up(&hba->dev_cmd.tag_wq);
929         return err;
930 }
931
932 /**
933  * ufshcd_memory_alloc - allocate memory for host memory space data structures
934  * @hba: per adapter instance
935  *
936  * 1. Allocate DMA memory for Command Descriptor array
937  *      Each command descriptor consist of Command UPIU, Response UPIU and PRDT
938  * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
939  * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
940  *      (UTMRDL)
941  * 4. Allocate memory for local reference block(lrb).
942  *
943  * Returns 0 for success, non-zero in case of failure
944  */
945 static int ufshcd_memory_alloc(struct ufs_hba *hba)
946 {
947         size_t utmrdl_size, utrdl_size, ucdl_size;
948
949         /* Allocate memory for UTP command descriptors */
950         ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
951         hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
952                                                   ucdl_size,
953                                                   &hba->ucdl_dma_addr,
954                                                   GFP_KERNEL);
955
956         /*
957          * UFSHCI requires UTP command descriptor to be 128 byte aligned.
958          * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
959          * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
960          * be aligned to 128 bytes as well
961          */
962         if (!hba->ucdl_base_addr ||
963             WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
964                 dev_err(hba->dev,
965                         "Command Descriptor Memory allocation failed\n");
966                 goto out;
967         }
968
969         /*
970          * Allocate memory for UTP Transfer descriptors
971          * UFSHCI requires 1024 byte alignment of UTRD
972          */
973         utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
974         hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
975                                                    utrdl_size,
976                                                    &hba->utrdl_dma_addr,
977                                                    GFP_KERNEL);
978         if (!hba->utrdl_base_addr ||
979             WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
980                 dev_err(hba->dev,
981                         "Transfer Descriptor Memory allocation failed\n");
982                 goto out;
983         }
984
985         /*
986          * Allocate memory for UTP Task Management descriptors
987          * UFSHCI requires 1024 byte alignment of UTMRD
988          */
989         utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
990         hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
991                                                     utmrdl_size,
992                                                     &hba->utmrdl_dma_addr,
993                                                     GFP_KERNEL);
994         if (!hba->utmrdl_base_addr ||
995             WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
996                 dev_err(hba->dev,
997                 "Task Management Descriptor Memory allocation failed\n");
998                 goto out;
999         }
1000
1001         /* Allocate memory for local reference block */
1002         hba->lrb = devm_kzalloc(hba->dev,
1003                                 hba->nutrs * sizeof(struct ufshcd_lrb),
1004                                 GFP_KERNEL);
1005         if (!hba->lrb) {
1006                 dev_err(hba->dev, "LRB Memory allocation failed\n");
1007                 goto out;
1008         }
1009         return 0;
1010 out:
1011         return -ENOMEM;
1012 }
1013
1014 /**
1015  * ufshcd_host_memory_configure - configure local reference block with
1016  *                              memory offsets
1017  * @hba: per adapter instance
1018  *
1019  * Configure Host memory space
1020  * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
1021  * address.
1022  * 2. Update each UTRD with Response UPIU offset, Response UPIU length
1023  * and PRDT offset.
1024  * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
1025  * into local reference block.
1026  */
1027 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
1028 {
1029         struct utp_transfer_cmd_desc *cmd_descp;
1030         struct utp_transfer_req_desc *utrdlp;
1031         dma_addr_t cmd_desc_dma_addr;
1032         dma_addr_t cmd_desc_element_addr;
1033         u16 response_offset;
1034         u16 prdt_offset;
1035         int cmd_desc_size;
1036         int i;
1037
1038         utrdlp = hba->utrdl_base_addr;
1039         cmd_descp = hba->ucdl_base_addr;
1040
1041         response_offset =
1042                 offsetof(struct utp_transfer_cmd_desc, response_upiu);
1043         prdt_offset =
1044                 offsetof(struct utp_transfer_cmd_desc, prd_table);
1045
1046         cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
1047         cmd_desc_dma_addr = hba->ucdl_dma_addr;
1048
1049         for (i = 0; i < hba->nutrs; i++) {
1050                 /* Configure UTRD with command descriptor base address */
1051                 cmd_desc_element_addr =
1052                                 (cmd_desc_dma_addr + (cmd_desc_size * i));
1053                 utrdlp[i].command_desc_base_addr_lo =
1054                                 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
1055                 utrdlp[i].command_desc_base_addr_hi =
1056                                 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
1057
1058                 /* Response upiu and prdt offset should be in double words */
1059                 utrdlp[i].response_upiu_offset =
1060                                 cpu_to_le16((response_offset >> 2));
1061                 utrdlp[i].prd_table_offset =
1062                                 cpu_to_le16((prdt_offset >> 2));
1063                 utrdlp[i].response_upiu_length =
1064                                 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
1065
1066                 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
1067                 hba->lrb[i].ucd_req_ptr =
1068                         (struct utp_upiu_req *)(cmd_descp + i);
1069                 hba->lrb[i].ucd_rsp_ptr =
1070                         (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
1071                 hba->lrb[i].ucd_prdt_ptr =
1072                         (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
1073         }
1074 }
1075
1076 /**
1077  * ufshcd_dme_link_startup - Notify Unipro to perform link startup
1078  * @hba: per adapter instance
1079  *
1080  * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
1081  * in order to initialize the Unipro link startup procedure.
1082  * Once the Unipro links are up, the device connected to the controller
1083  * is detected.
1084  *
1085  * Returns 0 on success, non-zero value on failure
1086  */
1087 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
1088 {
1089         struct uic_command uic_cmd = {0};
1090         int ret;
1091
1092         uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
1093
1094         ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
1095         if (ret)
1096                 dev_err(hba->dev,
1097                         "dme-link-startup: error code %d\n", ret);
1098         return ret;
1099 }
1100
1101 /**
1102  * ufshcd_make_hba_operational - Make UFS controller operational
1103  * @hba: per adapter instance
1104  *
1105  * To bring UFS host controller to operational state,
1106  * 1. Check if device is present
1107  * 2. Enable required interrupts
1108  * 3. Configure interrupt aggregation
1109  * 4. Program UTRL and UTMRL base addres
1110  * 5. Configure run-stop-registers
1111  *
1112  * Returns 0 on success, non-zero value on failure
1113  */
1114 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
1115 {
1116         int err = 0;
1117         u32 reg;
1118
1119         /* check if device present */
1120         reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
1121         if (!ufshcd_is_device_present(reg)) {
1122                 dev_err(hba->dev, "cc: Device not present\n");
1123                 err = -ENXIO;
1124                 goto out;
1125         }
1126
1127         /* Enable required interrupts */
1128         ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
1129
1130         /* Configure interrupt aggregation */
1131         ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG);
1132
1133         /* Configure UTRL and UTMRL base address registers */
1134         ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
1135                         REG_UTP_TRANSFER_REQ_LIST_BASE_L);
1136         ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
1137                         REG_UTP_TRANSFER_REQ_LIST_BASE_H);
1138         ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
1139                         REG_UTP_TASK_REQ_LIST_BASE_L);
1140         ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
1141                         REG_UTP_TASK_REQ_LIST_BASE_H);
1142
1143         /*
1144          * UCRDY, UTMRLDY and UTRLRDY bits must be 1
1145          * DEI, HEI bits must be 0
1146          */
1147         if (!(ufshcd_get_lists_status(reg))) {
1148                 ufshcd_enable_run_stop_reg(hba);
1149         } else {
1150                 dev_err(hba->dev,
1151                         "Host controller not ready to process requests");
1152                 err = -EIO;
1153                 goto out;
1154         }
1155
1156         if (hba->ufshcd_state == UFSHCD_STATE_RESET)
1157                 scsi_unblock_requests(hba->host);
1158
1159         hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
1160
1161 out:
1162         return err;
1163 }
1164
1165 /**
1166  * ufshcd_hba_enable - initialize the controller
1167  * @hba: per adapter instance
1168  *
1169  * The controller resets itself and controller firmware initialization
1170  * sequence kicks off. When controller is ready it will set
1171  * the Host Controller Enable bit to 1.
1172  *
1173  * Returns 0 on success, non-zero value on failure
1174  */
1175 static int ufshcd_hba_enable(struct ufs_hba *hba)
1176 {
1177         int retry;
1178
1179         /*
1180          * msleep of 1 and 5 used in this function might result in msleep(20),
1181          * but it was necessary to send the UFS FPGA to reset mode during
1182          * development and testing of this driver. msleep can be changed to
1183          * mdelay and retry count can be reduced based on the controller.
1184          */
1185         if (!ufshcd_is_hba_active(hba)) {
1186
1187                 /* change controller state to "reset state" */
1188                 ufshcd_hba_stop(hba);
1189
1190                 /*
1191                  * This delay is based on the testing done with UFS host
1192                  * controller FPGA. The delay can be changed based on the
1193                  * host controller used.
1194                  */
1195                 msleep(5);
1196         }
1197
1198         /* start controller initialization sequence */
1199         ufshcd_hba_start(hba);
1200
1201         /*
1202          * To initialize a UFS host controller HCE bit must be set to 1.
1203          * During initialization the HCE bit value changes from 1->0->1.
1204          * When the host controller completes initialization sequence
1205          * it sets the value of HCE bit to 1. The same HCE bit is read back
1206          * to check if the controller has completed initialization sequence.
1207          * So without this delay the value HCE = 1, set in the previous
1208          * instruction might be read back.
1209          * This delay can be changed based on the controller.
1210          */
1211         msleep(1);
1212
1213         /* wait for the host controller to complete initialization */
1214         retry = 10;
1215         while (ufshcd_is_hba_active(hba)) {
1216                 if (retry) {
1217                         retry--;
1218                 } else {
1219                         dev_err(hba->dev,
1220                                 "Controller enable failed\n");
1221                         return -EIO;
1222                 }
1223                 msleep(5);
1224         }
1225         return 0;
1226 }
1227
1228 /**
1229  * ufshcd_link_startup - Initialize unipro link startup
1230  * @hba: per adapter instance
1231  *
1232  * Returns 0 for success, non-zero in case of failure
1233  */
1234 static int ufshcd_link_startup(struct ufs_hba *hba)
1235 {
1236         int ret;
1237
1238         /* enable UIC related interrupts */
1239         ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
1240
1241         ret = ufshcd_dme_link_startup(hba);
1242         if (ret)
1243                 goto out;
1244
1245         ret = ufshcd_make_hba_operational(hba);
1246
1247 out:
1248         if (ret)
1249                 dev_err(hba->dev, "link startup failed %d\n", ret);
1250         return ret;
1251 }
1252
1253 /**
1254  * ufshcd_verify_dev_init() - Verify device initialization
1255  * @hba: per-adapter instance
1256  *
1257  * Send NOP OUT UPIU and wait for NOP IN response to check whether the
1258  * device Transport Protocol (UTP) layer is ready after a reset.
1259  * If the UTP layer at the device side is not initialized, it may
1260  * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
1261  * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
1262  */
1263 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
1264 {
1265         int err = 0;
1266         int retries;
1267
1268         mutex_lock(&hba->dev_cmd.lock);
1269         for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
1270                 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
1271                                                NOP_OUT_TIMEOUT);
1272
1273                 if (!err || err == -ETIMEDOUT)
1274                         break;
1275
1276                 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
1277         }
1278         mutex_unlock(&hba->dev_cmd.lock);
1279
1280         if (err)
1281                 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
1282         return err;
1283 }
1284
1285 /**
1286  * ufshcd_do_reset - reset the host controller
1287  * @hba: per adapter instance
1288  *
1289  * Returns SUCCESS/FAILED
1290  */
1291 static int ufshcd_do_reset(struct ufs_hba *hba)
1292 {
1293         struct ufshcd_lrb *lrbp;
1294         unsigned long flags;
1295         int tag;
1296
1297         /* block commands from midlayer */
1298         scsi_block_requests(hba->host);
1299
1300         spin_lock_irqsave(hba->host->host_lock, flags);
1301         hba->ufshcd_state = UFSHCD_STATE_RESET;
1302
1303         /* send controller to reset state */
1304         ufshcd_hba_stop(hba);
1305         spin_unlock_irqrestore(hba->host->host_lock, flags);
1306
1307         /* abort outstanding commands */
1308         for (tag = 0; tag < hba->nutrs; tag++) {
1309                 if (test_bit(tag, &hba->outstanding_reqs)) {
1310                         lrbp = &hba->lrb[tag];
1311                         if (lrbp->cmd) {
1312                                 scsi_dma_unmap(lrbp->cmd);
1313                                 lrbp->cmd->result = DID_RESET << 16;
1314                                 lrbp->cmd->scsi_done(lrbp->cmd);
1315                                 lrbp->cmd = NULL;
1316                                 clear_bit_unlock(tag, &hba->lrb_in_use);
1317                         }
1318                 }
1319         }
1320
1321         /* complete device management command */
1322         if (hba->dev_cmd.complete)
1323                 complete(hba->dev_cmd.complete);
1324
1325         /* clear outstanding request/task bit maps */
1326         hba->outstanding_reqs = 0;
1327         hba->outstanding_tasks = 0;
1328
1329         /* Host controller enable */
1330         if (ufshcd_hba_enable(hba)) {
1331                 dev_err(hba->dev,
1332                         "Reset: Controller initialization failed\n");
1333                 return FAILED;
1334         }
1335
1336         if (ufshcd_link_startup(hba)) {
1337                 dev_err(hba->dev,
1338                         "Reset: Link start-up failed\n");
1339                 return FAILED;
1340         }
1341
1342         return SUCCESS;
1343 }
1344
1345 /**
1346  * ufshcd_slave_alloc - handle initial SCSI device configurations
1347  * @sdev: pointer to SCSI device
1348  *
1349  * Returns success
1350  */
1351 static int ufshcd_slave_alloc(struct scsi_device *sdev)
1352 {
1353         struct ufs_hba *hba;
1354
1355         hba = shost_priv(sdev->host);
1356         sdev->tagged_supported = 1;
1357
1358         /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
1359         sdev->use_10_for_ms = 1;
1360         scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
1361
1362         /*
1363          * Inform SCSI Midlayer that the LUN queue depth is same as the
1364          * controller queue depth. If a LUN queue depth is less than the
1365          * controller queue depth and if the LUN reports
1366          * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted
1367          * with scsi_adjust_queue_depth.
1368          */
1369         scsi_activate_tcq(sdev, hba->nutrs);
1370         return 0;
1371 }
1372
1373 /**
1374  * ufshcd_slave_destroy - remove SCSI device configurations
1375  * @sdev: pointer to SCSI device
1376  */
1377 static void ufshcd_slave_destroy(struct scsi_device *sdev)
1378 {
1379         struct ufs_hba *hba;
1380
1381         hba = shost_priv(sdev->host);
1382         scsi_deactivate_tcq(sdev, hba->nutrs);
1383 }
1384
1385 /**
1386  * ufshcd_task_req_compl - handle task management request completion
1387  * @hba: per adapter instance
1388  * @index: index of the completed request
1389  *
1390  * Returns SUCCESS/FAILED
1391  */
1392 static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
1393 {
1394         struct utp_task_req_desc *task_req_descp;
1395         struct utp_upiu_task_rsp *task_rsp_upiup;
1396         unsigned long flags;
1397         int ocs_value;
1398         int task_result;
1399
1400         spin_lock_irqsave(hba->host->host_lock, flags);
1401
1402         /* Clear completed tasks from outstanding_tasks */
1403         __clear_bit(index, &hba->outstanding_tasks);
1404
1405         task_req_descp = hba->utmrdl_base_addr;
1406         ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
1407
1408         if (ocs_value == OCS_SUCCESS) {
1409                 task_rsp_upiup = (struct utp_upiu_task_rsp *)
1410                                 task_req_descp[index].task_rsp_upiu;
1411                 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
1412                 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
1413
1414                 if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL &&
1415                     task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
1416                         task_result = FAILED;
1417                 else
1418                         task_result = SUCCESS;
1419         } else {
1420                 task_result = FAILED;
1421                 dev_err(hba->dev,
1422                         "trc: Invalid ocs = %x\n", ocs_value);
1423         }
1424         spin_unlock_irqrestore(hba->host->host_lock, flags);
1425         return task_result;
1426 }
1427
1428 /**
1429  * ufshcd_adjust_lun_qdepth - Update LUN queue depth if device responds with
1430  *                            SAM_STAT_TASK_SET_FULL SCSI command status.
1431  * @cmd: pointer to SCSI command
1432  */
1433 static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd)
1434 {
1435         struct ufs_hba *hba;
1436         int i;
1437         int lun_qdepth = 0;
1438
1439         hba = shost_priv(cmd->device->host);
1440
1441         /*
1442          * LUN queue depth can be obtained by counting outstanding commands
1443          * on the LUN.
1444          */
1445         for (i = 0; i < hba->nutrs; i++) {
1446                 if (test_bit(i, &hba->outstanding_reqs)) {
1447
1448                         /*
1449                          * Check if the outstanding command belongs
1450                          * to the LUN which reported SAM_STAT_TASK_SET_FULL.
1451                          */
1452                         if (cmd->device->lun == hba->lrb[i].lun)
1453                                 lun_qdepth++;
1454                 }
1455         }
1456
1457         /*
1458          * LUN queue depth will be total outstanding commands, except the
1459          * command for which the LUN reported SAM_STAT_TASK_SET_FULL.
1460          */
1461         scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1);
1462 }
1463
1464 /**
1465  * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
1466  * @lrb: pointer to local reference block of completed command
1467  * @scsi_status: SCSI command status
1468  *
1469  * Returns value base on SCSI command status
1470  */
1471 static inline int
1472 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
1473 {
1474         int result = 0;
1475
1476         switch (scsi_status) {
1477         case SAM_STAT_GOOD:
1478                 result |= DID_OK << 16 |
1479                           COMMAND_COMPLETE << 8 |
1480                           SAM_STAT_GOOD;
1481                 break;
1482         case SAM_STAT_CHECK_CONDITION:
1483                 result |= DID_OK << 16 |
1484                           COMMAND_COMPLETE << 8 |
1485                           SAM_STAT_CHECK_CONDITION;
1486                 ufshcd_copy_sense_data(lrbp);
1487                 break;
1488         case SAM_STAT_BUSY:
1489                 result |= SAM_STAT_BUSY;
1490                 break;
1491         case SAM_STAT_TASK_SET_FULL:
1492
1493                 /*
1494                  * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
1495                  * depth needs to be adjusted to the exact number of
1496                  * outstanding commands the LUN can handle at any given time.
1497                  */
1498                 ufshcd_adjust_lun_qdepth(lrbp->cmd);
1499                 result |= SAM_STAT_TASK_SET_FULL;
1500                 break;
1501         case SAM_STAT_TASK_ABORTED:
1502                 result |= SAM_STAT_TASK_ABORTED;
1503                 break;
1504         default:
1505                 result |= DID_ERROR << 16;
1506                 break;
1507         } /* end of switch */
1508
1509         return result;
1510 }
1511
1512 /**
1513  * ufshcd_transfer_rsp_status - Get overall status of the response
1514  * @hba: per adapter instance
1515  * @lrb: pointer to local reference block of completed command
1516  *
1517  * Returns result of the command to notify SCSI midlayer
1518  */
1519 static inline int
1520 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1521 {
1522         int result = 0;
1523         int scsi_status;
1524         int ocs;
1525
1526         /* overall command status of utrd */
1527         ocs = ufshcd_get_tr_ocs(lrbp);
1528
1529         switch (ocs) {
1530         case OCS_SUCCESS:
1531                 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
1532
1533                 switch (result) {
1534                 case UPIU_TRANSACTION_RESPONSE:
1535                         /*
1536                          * get the response UPIU result to extract
1537                          * the SCSI command status
1538                          */
1539                         result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
1540
1541                         /*
1542                          * get the result based on SCSI status response
1543                          * to notify the SCSI midlayer of the command status
1544                          */
1545                         scsi_status = result & MASK_SCSI_STATUS;
1546                         result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
1547                         break;
1548                 case UPIU_TRANSACTION_REJECT_UPIU:
1549                         /* TODO: handle Reject UPIU Response */
1550                         result = DID_ERROR << 16;
1551                         dev_err(hba->dev,
1552                                 "Reject UPIU not fully implemented\n");
1553                         break;
1554                 default:
1555                         result = DID_ERROR << 16;
1556                         dev_err(hba->dev,
1557                                 "Unexpected request response code = %x\n",
1558                                 result);
1559                         break;
1560                 }
1561                 break;
1562         case OCS_ABORTED:
1563                 result |= DID_ABORT << 16;
1564                 break;
1565         case OCS_INVALID_CMD_TABLE_ATTR:
1566         case OCS_INVALID_PRDT_ATTR:
1567         case OCS_MISMATCH_DATA_BUF_SIZE:
1568         case OCS_MISMATCH_RESP_UPIU_SIZE:
1569         case OCS_PEER_COMM_FAILURE:
1570         case OCS_FATAL_ERROR:
1571         default:
1572                 result |= DID_ERROR << 16;
1573                 dev_err(hba->dev,
1574                 "OCS error from controller = %x\n", ocs);
1575                 break;
1576         } /* end of switch */
1577
1578         return result;
1579 }
1580
1581 /**
1582  * ufshcd_uic_cmd_compl - handle completion of uic command
1583  * @hba: per adapter instance
1584  */
1585 static void ufshcd_uic_cmd_compl(struct ufs_hba *hba)
1586 {
1587         if (hba->active_uic_cmd) {
1588                 hba->active_uic_cmd->argument2 |=
1589                         ufshcd_get_uic_cmd_result(hba);
1590                 complete(&hba->active_uic_cmd->done);
1591         }
1592 }
1593
1594 /**
1595  * ufshcd_transfer_req_compl - handle SCSI and query command completion
1596  * @hba: per adapter instance
1597  */
1598 static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
1599 {
1600         struct ufshcd_lrb *lrbp;
1601         struct scsi_cmnd *cmd;
1602         unsigned long completed_reqs;
1603         u32 tr_doorbell;
1604         int result;
1605         int index;
1606         bool int_aggr_reset = false;
1607
1608         tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1609         completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
1610
1611         for (index = 0; index < hba->nutrs; index++) {
1612                 if (test_bit(index, &completed_reqs)) {
1613                         lrbp = &hba->lrb[index];
1614                         cmd = lrbp->cmd;
1615                         /*
1616                          * Don't skip resetting interrupt aggregation counters
1617                          * if a regular command is present.
1618                          */
1619                         int_aggr_reset |= !lrbp->intr_cmd;
1620
1621                         if (cmd) {
1622                                 result = ufshcd_transfer_rsp_status(hba, lrbp);
1623                                 scsi_dma_unmap(cmd);
1624                                 cmd->result = result;
1625                                 /* Mark completed command as NULL in LRB */
1626                                 lrbp->cmd = NULL;
1627                                 clear_bit_unlock(index, &hba->lrb_in_use);
1628                                 /* Do not touch lrbp after scsi done */
1629                                 cmd->scsi_done(cmd);
1630                         } else if (lrbp->command_type ==
1631                                         UTP_CMD_TYPE_DEV_MANAGE) {
1632                                 if (hba->dev_cmd.complete)
1633                                         complete(hba->dev_cmd.complete);
1634                         }
1635                 } /* end of if */
1636         } /* end of for */
1637
1638         /* clear corresponding bits of completed commands */
1639         hba->outstanding_reqs ^= completed_reqs;
1640
1641         /* we might have free'd some tags above */
1642         wake_up(&hba->dev_cmd.tag_wq);
1643
1644         /* Reset interrupt aggregation counters */
1645         if (int_aggr_reset)
1646                 ufshcd_config_int_aggr(hba, INT_AGGR_RESET);
1647 }
1648
1649 /**
1650  * ufshcd_fatal_err_handler - handle fatal errors
1651  * @hba: per adapter instance
1652  */
1653 static void ufshcd_fatal_err_handler(struct work_struct *work)
1654 {
1655         struct ufs_hba *hba;
1656         hba = container_of(work, struct ufs_hba, feh_workq);
1657
1658         /* check if reset is already in progress */
1659         if (hba->ufshcd_state != UFSHCD_STATE_RESET)
1660                 ufshcd_do_reset(hba);
1661 }
1662
1663 /**
1664  * ufshcd_err_handler - Check for fatal errors
1665  * @work: pointer to a work queue structure
1666  */
1667 static void ufshcd_err_handler(struct ufs_hba *hba)
1668 {
1669         u32 reg;
1670
1671         if (hba->errors & INT_FATAL_ERRORS)
1672                 goto fatal_eh;
1673
1674         if (hba->errors & UIC_ERROR) {
1675                 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
1676                 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
1677                         goto fatal_eh;
1678         }
1679         return;
1680 fatal_eh:
1681         hba->ufshcd_state = UFSHCD_STATE_ERROR;
1682         schedule_work(&hba->feh_workq);
1683 }
1684
1685 /**
1686  * ufshcd_tmc_handler - handle task management function completion
1687  * @hba: per adapter instance
1688  */
1689 static void ufshcd_tmc_handler(struct ufs_hba *hba)
1690 {
1691         u32 tm_doorbell;
1692
1693         tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1694         hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
1695         wake_up_interruptible(&hba->ufshcd_tm_wait_queue);
1696 }
1697
1698 /**
1699  * ufshcd_sl_intr - Interrupt service routine
1700  * @hba: per adapter instance
1701  * @intr_status: contains interrupts generated by the controller
1702  */
1703 static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
1704 {
1705         hba->errors = UFSHCD_ERROR_MASK & intr_status;
1706         if (hba->errors)
1707                 ufshcd_err_handler(hba);
1708
1709         if (intr_status & UIC_COMMAND_COMPL)
1710                 ufshcd_uic_cmd_compl(hba);
1711
1712         if (intr_status & UTP_TASK_REQ_COMPL)
1713                 ufshcd_tmc_handler(hba);
1714
1715         if (intr_status & UTP_TRANSFER_REQ_COMPL)
1716                 ufshcd_transfer_req_compl(hba);
1717 }
1718
1719 /**
1720  * ufshcd_intr - Main interrupt service routine
1721  * @irq: irq number
1722  * @__hba: pointer to adapter instance
1723  *
1724  * Returns IRQ_HANDLED - If interrupt is valid
1725  *              IRQ_NONE - If invalid interrupt
1726  */
1727 static irqreturn_t ufshcd_intr(int irq, void *__hba)
1728 {
1729         u32 intr_status;
1730         irqreturn_t retval = IRQ_NONE;
1731         struct ufs_hba *hba = __hba;
1732
1733         spin_lock(hba->host->host_lock);
1734         intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
1735
1736         if (intr_status) {
1737                 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
1738                 ufshcd_sl_intr(hba, intr_status);
1739                 retval = IRQ_HANDLED;
1740         }
1741         spin_unlock(hba->host->host_lock);
1742         return retval;
1743 }
1744
1745 /**
1746  * ufshcd_issue_tm_cmd - issues task management commands to controller
1747  * @hba: per adapter instance
1748  * @lrbp: pointer to local reference block
1749  *
1750  * Returns SUCCESS/FAILED
1751  */
1752 static int
1753 ufshcd_issue_tm_cmd(struct ufs_hba *hba,
1754                     struct ufshcd_lrb *lrbp,
1755                     u8 tm_function)
1756 {
1757         struct utp_task_req_desc *task_req_descp;
1758         struct utp_upiu_task_req *task_req_upiup;
1759         struct Scsi_Host *host;
1760         unsigned long flags;
1761         int free_slot = 0;
1762         int err;
1763
1764         host = hba->host;
1765
1766         spin_lock_irqsave(host->host_lock, flags);
1767
1768         /* If task management queue is full */
1769         free_slot = ufshcd_get_tm_free_slot(hba);
1770         if (free_slot >= hba->nutmrs) {
1771                 spin_unlock_irqrestore(host->host_lock, flags);
1772                 dev_err(hba->dev, "Task management queue full\n");
1773                 err = FAILED;
1774                 goto out;
1775         }
1776
1777         task_req_descp = hba->utmrdl_base_addr;
1778         task_req_descp += free_slot;
1779
1780         /* Configure task request descriptor */
1781         task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
1782         task_req_descp->header.dword_2 =
1783                         cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
1784
1785         /* Configure task request UPIU */
1786         task_req_upiup =
1787                 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
1788         task_req_upiup->header.dword_0 =
1789                 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
1790                                               lrbp->lun, lrbp->task_tag);
1791         task_req_upiup->header.dword_1 =
1792                 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
1793
1794         task_req_upiup->input_param1 = lrbp->lun;
1795         task_req_upiup->input_param1 =
1796                 cpu_to_be32(task_req_upiup->input_param1);
1797         task_req_upiup->input_param2 = lrbp->task_tag;
1798         task_req_upiup->input_param2 =
1799                 cpu_to_be32(task_req_upiup->input_param2);
1800
1801         /* send command to the controller */
1802         __set_bit(free_slot, &hba->outstanding_tasks);
1803         ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
1804
1805         spin_unlock_irqrestore(host->host_lock, flags);
1806
1807         /* wait until the task management command is completed */
1808         err =
1809         wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue,
1810                                          (test_bit(free_slot,
1811                                          &hba->tm_condition) != 0),
1812                                          60 * HZ);
1813         if (!err) {
1814                 dev_err(hba->dev,
1815                         "Task management command timed-out\n");
1816                 err = FAILED;
1817                 goto out;
1818         }
1819         clear_bit(free_slot, &hba->tm_condition);
1820         err = ufshcd_task_req_compl(hba, free_slot);
1821 out:
1822         return err;
1823 }
1824
1825 /**
1826  * ufshcd_device_reset - reset device and abort all the pending commands
1827  * @cmd: SCSI command pointer
1828  *
1829  * Returns SUCCESS/FAILED
1830  */
1831 static int ufshcd_device_reset(struct scsi_cmnd *cmd)
1832 {
1833         struct Scsi_Host *host;
1834         struct ufs_hba *hba;
1835         unsigned int tag;
1836         u32 pos;
1837         int err;
1838
1839         host = cmd->device->host;
1840         hba = shost_priv(host);
1841         tag = cmd->request->tag;
1842
1843         err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET);
1844         if (err == FAILED)
1845                 goto out;
1846
1847         for (pos = 0; pos < hba->nutrs; pos++) {
1848                 if (test_bit(pos, &hba->outstanding_reqs) &&
1849                     (hba->lrb[tag].lun == hba->lrb[pos].lun)) {
1850
1851                         /* clear the respective UTRLCLR register bit */
1852                         ufshcd_utrl_clear(hba, pos);
1853
1854                         clear_bit(pos, &hba->outstanding_reqs);
1855
1856                         if (hba->lrb[pos].cmd) {
1857                                 scsi_dma_unmap(hba->lrb[pos].cmd);
1858                                 hba->lrb[pos].cmd->result =
1859                                         DID_ABORT << 16;
1860                                 hba->lrb[pos].cmd->scsi_done(cmd);
1861                                 hba->lrb[pos].cmd = NULL;
1862                                 clear_bit_unlock(pos, &hba->lrb_in_use);
1863                                 wake_up(&hba->dev_cmd.tag_wq);
1864                         }
1865                 }
1866         } /* end of for */
1867 out:
1868         return err;
1869 }
1870
1871 /**
1872  * ufshcd_host_reset - Main reset function registered with scsi layer
1873  * @cmd: SCSI command pointer
1874  *
1875  * Returns SUCCESS/FAILED
1876  */
1877 static int ufshcd_host_reset(struct scsi_cmnd *cmd)
1878 {
1879         struct ufs_hba *hba;
1880
1881         hba = shost_priv(cmd->device->host);
1882
1883         if (hba->ufshcd_state == UFSHCD_STATE_RESET)
1884                 return SUCCESS;
1885
1886         return ufshcd_do_reset(hba);
1887 }
1888
1889 /**
1890  * ufshcd_abort - abort a specific command
1891  * @cmd: SCSI command pointer
1892  *
1893  * Returns SUCCESS/FAILED
1894  */
1895 static int ufshcd_abort(struct scsi_cmnd *cmd)
1896 {
1897         struct Scsi_Host *host;
1898         struct ufs_hba *hba;
1899         unsigned long flags;
1900         unsigned int tag;
1901         int err;
1902
1903         host = cmd->device->host;
1904         hba = shost_priv(host);
1905         tag = cmd->request->tag;
1906
1907         spin_lock_irqsave(host->host_lock, flags);
1908
1909         /* check if command is still pending */
1910         if (!(test_bit(tag, &hba->outstanding_reqs))) {
1911                 err = FAILED;
1912                 spin_unlock_irqrestore(host->host_lock, flags);
1913                 goto out;
1914         }
1915         spin_unlock_irqrestore(host->host_lock, flags);
1916
1917         err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK);
1918         if (err == FAILED)
1919                 goto out;
1920
1921         scsi_dma_unmap(cmd);
1922
1923         spin_lock_irqsave(host->host_lock, flags);
1924
1925         /* clear the respective UTRLCLR register bit */
1926         ufshcd_utrl_clear(hba, tag);
1927
1928         __clear_bit(tag, &hba->outstanding_reqs);
1929         hba->lrb[tag].cmd = NULL;
1930         spin_unlock_irqrestore(host->host_lock, flags);
1931
1932         clear_bit_unlock(tag, &hba->lrb_in_use);
1933         wake_up(&hba->dev_cmd.tag_wq);
1934 out:
1935         return err;
1936 }
1937
1938 /**
1939  * ufshcd_async_scan - asynchronous execution for link startup
1940  * @data: data pointer to pass to this function
1941  * @cookie: cookie data
1942  */
1943 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
1944 {
1945         struct ufs_hba *hba = (struct ufs_hba *)data;
1946         int ret;
1947
1948         ret = ufshcd_link_startup(hba);
1949         if (ret)
1950                 goto out;
1951
1952         ret = ufshcd_verify_dev_init(hba);
1953         if (ret)
1954                 goto out;
1955
1956         scsi_scan_host(hba->host);
1957 out:
1958         return;
1959 }
1960
1961 static struct scsi_host_template ufshcd_driver_template = {
1962         .module                 = THIS_MODULE,
1963         .name                   = UFSHCD,
1964         .proc_name              = UFSHCD,
1965         .queuecommand           = ufshcd_queuecommand,
1966         .slave_alloc            = ufshcd_slave_alloc,
1967         .slave_destroy          = ufshcd_slave_destroy,
1968         .eh_abort_handler       = ufshcd_abort,
1969         .eh_device_reset_handler = ufshcd_device_reset,
1970         .eh_host_reset_handler  = ufshcd_host_reset,
1971         .this_id                = -1,
1972         .sg_tablesize           = SG_ALL,
1973         .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
1974         .can_queue              = UFSHCD_CAN_QUEUE,
1975 };
1976
1977 /**
1978  * ufshcd_suspend - suspend power management function
1979  * @hba: per adapter instance
1980  * @state: power state
1981  *
1982  * Returns -ENOSYS
1983  */
1984 int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state)
1985 {
1986         /*
1987          * TODO:
1988          * 1. Block SCSI requests from SCSI midlayer
1989          * 2. Change the internal driver state to non operational
1990          * 3. Set UTRLRSR and UTMRLRSR bits to zero
1991          * 4. Wait until outstanding commands are completed
1992          * 5. Set HCE to zero to send the UFS host controller to reset state
1993          */
1994
1995         return -ENOSYS;
1996 }
1997 EXPORT_SYMBOL_GPL(ufshcd_suspend);
1998
1999 /**
2000  * ufshcd_resume - resume power management function
2001  * @hba: per adapter instance
2002  *
2003  * Returns -ENOSYS
2004  */
2005 int ufshcd_resume(struct ufs_hba *hba)
2006 {
2007         /*
2008          * TODO:
2009          * 1. Set HCE to 1, to start the UFS host controller
2010          * initialization process
2011          * 2. Set UTRLRSR and UTMRLRSR bits to 1
2012          * 3. Change the internal driver state to operational
2013          * 4. Unblock SCSI requests from SCSI midlayer
2014          */
2015
2016         return -ENOSYS;
2017 }
2018 EXPORT_SYMBOL_GPL(ufshcd_resume);
2019
2020 /**
2021  * ufshcd_remove - de-allocate SCSI host and host memory space
2022  *              data structure memory
2023  * @hba - per adapter instance
2024  */
2025 void ufshcd_remove(struct ufs_hba *hba)
2026 {
2027         /* disable interrupts */
2028         ufshcd_disable_intr(hba, hba->intr_mask);
2029         ufshcd_hba_stop(hba);
2030
2031         scsi_remove_host(hba->host);
2032         scsi_host_put(hba->host);
2033 }
2034 EXPORT_SYMBOL_GPL(ufshcd_remove);
2035
2036 /**
2037  * ufshcd_init - Driver initialization routine
2038  * @dev: pointer to device handle
2039  * @hba_handle: driver private handle
2040  * @mmio_base: base register address
2041  * @irq: Interrupt line of device
2042  * Returns 0 on success, non-zero value on failure
2043  */
2044 int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
2045                  void __iomem *mmio_base, unsigned int irq)
2046 {
2047         struct Scsi_Host *host;
2048         struct ufs_hba *hba;
2049         int err;
2050
2051         if (!dev) {
2052                 dev_err(dev,
2053                 "Invalid memory reference for dev is NULL\n");
2054                 err = -ENODEV;
2055                 goto out_error;
2056         }
2057
2058         if (!mmio_base) {
2059                 dev_err(dev,
2060                 "Invalid memory reference for mmio_base is NULL\n");
2061                 err = -ENODEV;
2062                 goto out_error;
2063         }
2064
2065         host = scsi_host_alloc(&ufshcd_driver_template,
2066                                 sizeof(struct ufs_hba));
2067         if (!host) {
2068                 dev_err(dev, "scsi_host_alloc failed\n");
2069                 err = -ENOMEM;
2070                 goto out_error;
2071         }
2072         hba = shost_priv(host);
2073         hba->host = host;
2074         hba->dev = dev;
2075         hba->mmio_base = mmio_base;
2076         hba->irq = irq;
2077
2078         /* Read capabilities registers */
2079         ufshcd_hba_capabilities(hba);
2080
2081         /* Get UFS version supported by the controller */
2082         hba->ufs_version = ufshcd_get_ufs_version(hba);
2083
2084         /* Get Interrupt bit mask per version */
2085         hba->intr_mask = ufshcd_get_intr_mask(hba);
2086
2087         /* Allocate memory for host memory space */
2088         err = ufshcd_memory_alloc(hba);
2089         if (err) {
2090                 dev_err(hba->dev, "Memory allocation failed\n");
2091                 goto out_disable;
2092         }
2093
2094         /* Configure LRB */
2095         ufshcd_host_memory_configure(hba);
2096
2097         host->can_queue = hba->nutrs;
2098         host->cmd_per_lun = hba->nutrs;
2099         host->max_id = UFSHCD_MAX_ID;
2100         host->max_lun = UFSHCD_MAX_LUNS;
2101         host->max_channel = UFSHCD_MAX_CHANNEL;
2102         host->unique_id = host->host_no;
2103         host->max_cmd_len = MAX_CDB_SIZE;
2104
2105         /* Initailize wait queue for task management */
2106         init_waitqueue_head(&hba->ufshcd_tm_wait_queue);
2107
2108         /* Initialize work queues */
2109         INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
2110
2111         /* Initialize UIC command mutex */
2112         mutex_init(&hba->uic_cmd_mutex);
2113
2114         /* Initialize mutex for device management commands */
2115         mutex_init(&hba->dev_cmd.lock);
2116
2117         /* Initialize device management tag acquire wait queue */
2118         init_waitqueue_head(&hba->dev_cmd.tag_wq);
2119
2120         /* IRQ registration */
2121         err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
2122         if (err) {
2123                 dev_err(hba->dev, "request irq failed\n");
2124                 goto out_disable;
2125         }
2126
2127         /* Enable SCSI tag mapping */
2128         err = scsi_init_shared_tag_map(host, host->can_queue);
2129         if (err) {
2130                 dev_err(hba->dev, "init shared queue failed\n");
2131                 goto out_disable;
2132         }
2133
2134         err = scsi_add_host(host, hba->dev);
2135         if (err) {
2136                 dev_err(hba->dev, "scsi_add_host failed\n");
2137                 goto out_disable;
2138         }
2139
2140         /* Host controller enable */
2141         err = ufshcd_hba_enable(hba);
2142         if (err) {
2143                 dev_err(hba->dev, "Host controller enable failed\n");
2144                 goto out_remove_scsi_host;
2145         }
2146
2147         *hba_handle = hba;
2148
2149         async_schedule(ufshcd_async_scan, hba);
2150
2151         return 0;
2152
2153 out_remove_scsi_host:
2154         scsi_remove_host(hba->host);
2155 out_disable:
2156         scsi_host_put(host);
2157 out_error:
2158         return err;
2159 }
2160 EXPORT_SYMBOL_GPL(ufshcd_init);
2161
2162 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
2163 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
2164 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
2165 MODULE_LICENSE("GPL");
2166 MODULE_VERSION(UFSHCD_DRIVER_VERSION);