]> git.karo-electronics.de Git - linux-beck.git/blob - drivers/scsi/hisi_sas/hisi_sas_main.c
Merge tag 'mac80211-next-for-davem-2016-02-26' of git://git.kernel.org/pub/scm/linux...
[linux-beck.git] / drivers / scsi / hisi_sas / hisi_sas_main.c
1 /*
2  * Copyright (c) 2015 Linaro Ltd.
3  * Copyright (c) 2015 Hisilicon Limited.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  */
11
12 #include "hisi_sas.h"
13 #define DRV_NAME "hisi_sas"
14
15 #define DEV_IS_EXPANDER(type) \
16         ((type == SAS_EDGE_EXPANDER_DEVICE) || \
17         (type == SAS_FANOUT_EXPANDER_DEVICE))
18
19 #define DEV_IS_GONE(dev) \
20         ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
21
22 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
23 {
24         return device->port->ha->lldd_ha;
25 }
26
27 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
28 {
29         void *bitmap = hisi_hba->slot_index_tags;
30
31         clear_bit(slot_idx, bitmap);
32 }
33
34 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
35 {
36         hisi_sas_slot_index_clear(hisi_hba, slot_idx);
37 }
38
39 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
40 {
41         void *bitmap = hisi_hba->slot_index_tags;
42
43         set_bit(slot_idx, bitmap);
44 }
45
46 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
47 {
48         unsigned int index;
49         void *bitmap = hisi_hba->slot_index_tags;
50
51         index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count);
52         if (index >= hisi_hba->slot_index_count)
53                 return -SAS_QUEUE_FULL;
54         hisi_sas_slot_index_set(hisi_hba, index);
55         *slot_idx = index;
56         return 0;
57 }
58
59 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
60 {
61         int i;
62
63         for (i = 0; i < hisi_hba->slot_index_count; ++i)
64                 hisi_sas_slot_index_clear(hisi_hba, i);
65 }
66
67 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
68                              struct hisi_sas_slot *slot)
69 {
70         struct device *dev = &hisi_hba->pdev->dev;
71
72         if (!slot->task)
73                 return;
74
75         if (!sas_protocol_ata(task->task_proto))
76                 if (slot->n_elem)
77                         dma_unmap_sg(dev, task->scatter, slot->n_elem,
78                                      task->data_dir);
79
80         if (slot->command_table)
81                 dma_pool_free(hisi_hba->command_table_pool,
82                               slot->command_table, slot->command_table_dma);
83
84         if (slot->status_buffer)
85                 dma_pool_free(hisi_hba->status_buffer_pool,
86                               slot->status_buffer, slot->status_buffer_dma);
87
88         if (slot->sge_page)
89                 dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page,
90                               slot->sge_page_dma);
91
92         list_del_init(&slot->entry);
93         task->lldd_task = NULL;
94         slot->task = NULL;
95         slot->port = NULL;
96         hisi_sas_slot_index_free(hisi_hba, slot->idx);
97         memset(slot, 0, sizeof(*slot));
98 }
99 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
100
101 static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
102                                   struct hisi_sas_slot *slot)
103 {
104         return hisi_hba->hw->prep_smp(hisi_hba, slot);
105 }
106
107 static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
108                                   struct hisi_sas_slot *slot, int is_tmf,
109                                   struct hisi_sas_tmf_task *tmf)
110 {
111         return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
112 }
113
114 static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
115                               int is_tmf, struct hisi_sas_tmf_task *tmf,
116                               int *pass)
117 {
118         struct domain_device *device = task->dev;
119         struct hisi_sas_device *sas_dev = device->lldd_dev;
120         struct hisi_sas_port *port;
121         struct hisi_sas_slot *slot;
122         struct hisi_sas_cmd_hdr *cmd_hdr_base;
123         struct device *dev = &hisi_hba->pdev->dev;
124         int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
125
126         if (!device->port) {
127                 struct task_status_struct *ts = &task->task_status;
128
129                 ts->resp = SAS_TASK_UNDELIVERED;
130                 ts->stat = SAS_PHY_DOWN;
131                 /*
132                  * libsas will use dev->port, should
133                  * not call task_done for sata
134                  */
135                 if (device->dev_type != SAS_SATA_DEV)
136                         task->task_done(task);
137                 return 0;
138         }
139
140         if (DEV_IS_GONE(sas_dev)) {
141                 if (sas_dev)
142                         dev_info(dev, "task prep: device %llu not ready\n",
143                                  sas_dev->device_id);
144                 else
145                         dev_info(dev, "task prep: device %016llx not ready\n",
146                                  SAS_ADDR(device->sas_addr));
147
148                 rc = SAS_PHY_DOWN;
149                 return rc;
150         }
151         port = device->port->lldd_port;
152         if (port && !port->port_attached && !tmf) {
153                 if (sas_protocol_ata(task->task_proto)) {
154                         struct task_status_struct *ts = &task->task_status;
155
156                         dev_info(dev,
157                                  "task prep: SATA/STP port%d not attach device\n",
158                                  device->port->id);
159                         ts->resp = SAS_TASK_COMPLETE;
160                         ts->stat = SAS_PHY_DOWN;
161                         task->task_done(task);
162                 } else {
163                         struct task_status_struct *ts = &task->task_status;
164
165                         dev_info(dev,
166                                  "task prep: SAS port%d does not attach device\n",
167                                  device->port->id);
168                         ts->resp = SAS_TASK_UNDELIVERED;
169                         ts->stat = SAS_PHY_DOWN;
170                         task->task_done(task);
171                 }
172                 return 0;
173         }
174
175         if (!sas_protocol_ata(task->task_proto)) {
176                 if (task->num_scatter) {
177                         n_elem = dma_map_sg(dev, task->scatter,
178                                             task->num_scatter, task->data_dir);
179                         if (!n_elem) {
180                                 rc = -ENOMEM;
181                                 goto prep_out;
182                         }
183                 }
184         } else
185                 n_elem = task->num_scatter;
186
187         rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
188         if (rc)
189                 goto err_out;
190         rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
191                                          &dlvry_queue_slot);
192         if (rc)
193                 goto err_out_tag;
194
195         slot = &hisi_hba->slot_info[slot_idx];
196         memset(slot, 0, sizeof(struct hisi_sas_slot));
197
198         slot->idx = slot_idx;
199         slot->n_elem = n_elem;
200         slot->dlvry_queue = dlvry_queue;
201         slot->dlvry_queue_slot = dlvry_queue_slot;
202         cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
203         slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
204         slot->task = task;
205         slot->port = port;
206         task->lldd_task = slot;
207
208         slot->status_buffer = dma_pool_alloc(hisi_hba->status_buffer_pool,
209                                              GFP_ATOMIC,
210                                              &slot->status_buffer_dma);
211         if (!slot->status_buffer) {
212                 rc = -ENOMEM;
213                 goto err_out_slot_buf;
214         }
215         memset(slot->status_buffer, 0, HISI_SAS_STATUS_BUF_SZ);
216
217         slot->command_table = dma_pool_alloc(hisi_hba->command_table_pool,
218                                              GFP_ATOMIC,
219                                              &slot->command_table_dma);
220         if (!slot->command_table) {
221                 rc = -ENOMEM;
222                 goto err_out_status_buf;
223         }
224         memset(slot->command_table, 0, HISI_SAS_COMMAND_TABLE_SZ);
225         memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
226
227         switch (task->task_proto) {
228         case SAS_PROTOCOL_SMP:
229                 rc = hisi_sas_task_prep_smp(hisi_hba, slot);
230                 break;
231         case SAS_PROTOCOL_SSP:
232                 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf);
233                 break;
234         case SAS_PROTOCOL_SATA:
235         case SAS_PROTOCOL_STP:
236         case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
237         default:
238                 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
239                         task->task_proto);
240                 rc = -EINVAL;
241                 break;
242         }
243
244         if (rc) {
245                 dev_err(dev, "task prep: rc = 0x%x\n", rc);
246                 if (slot->sge_page)
247                         goto err_out_sge;
248                 goto err_out_command_table;
249         }
250
251         list_add_tail(&slot->entry, &port->list);
252         spin_lock(&task->task_state_lock);
253         task->task_state_flags |= SAS_TASK_AT_INITIATOR;
254         spin_unlock(&task->task_state_lock);
255
256         hisi_hba->slot_prep = slot;
257
258         sas_dev->running_req++;
259         ++(*pass);
260
261         return 0;
262
263 err_out_sge:
264         dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page,
265                 slot->sge_page_dma);
266 err_out_command_table:
267         dma_pool_free(hisi_hba->command_table_pool, slot->command_table,
268                 slot->command_table_dma);
269 err_out_status_buf:
270         dma_pool_free(hisi_hba->status_buffer_pool, slot->status_buffer,
271                 slot->status_buffer_dma);
272 err_out_slot_buf:
273         /* Nothing to be done */
274 err_out_tag:
275         hisi_sas_slot_index_free(hisi_hba, slot_idx);
276 err_out:
277         dev_err(dev, "task prep: failed[%d]!\n", rc);
278         if (!sas_protocol_ata(task->task_proto))
279                 if (n_elem)
280                         dma_unmap_sg(dev, task->scatter, n_elem,
281                                      task->data_dir);
282 prep_out:
283         return rc;
284 }
285
286 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
287                               int is_tmf, struct hisi_sas_tmf_task *tmf)
288 {
289         u32 rc;
290         u32 pass = 0;
291         unsigned long flags;
292         struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
293         struct device *dev = &hisi_hba->pdev->dev;
294
295         /* protect task_prep and start_delivery sequence */
296         spin_lock_irqsave(&hisi_hba->lock, flags);
297         rc = hisi_sas_task_prep(task, hisi_hba, is_tmf, tmf, &pass);
298         if (rc)
299                 dev_err(dev, "task exec: failed[%d]!\n", rc);
300
301         if (likely(pass))
302                 hisi_hba->hw->start_delivery(hisi_hba);
303         spin_unlock_irqrestore(&hisi_hba->lock, flags);
304
305         return rc;
306 }
307
308 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
309 {
310         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
311         struct asd_sas_phy *sas_phy = &phy->sas_phy;
312         struct sas_ha_struct *sas_ha;
313
314         if (!phy->phy_attached)
315                 return;
316
317         sas_ha = &hisi_hba->sha;
318         sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
319
320         if (sas_phy->phy) {
321                 struct sas_phy *sphy = sas_phy->phy;
322
323                 sphy->negotiated_linkrate = sas_phy->linkrate;
324                 sphy->minimum_linkrate = phy->minimum_linkrate;
325                 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
326                 sphy->maximum_linkrate = phy->maximum_linkrate;
327         }
328
329         if (phy->phy_type & PORT_TYPE_SAS) {
330                 struct sas_identify_frame *id;
331
332                 id = (struct sas_identify_frame *)phy->frame_rcvd;
333                 id->dev_type = phy->identify.device_type;
334                 id->initiator_bits = SAS_PROTOCOL_ALL;
335                 id->target_bits = phy->identify.target_port_protocols;
336         } else if (phy->phy_type & PORT_TYPE_SATA) {
337                 /*Nothing*/
338         }
339
340         sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
341         sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
342 }
343
344 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
345 {
346         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
347         struct hisi_sas_device *sas_dev = NULL;
348         int i;
349
350         spin_lock(&hisi_hba->lock);
351         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
352                 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
353                         hisi_hba->devices[i].device_id = i;
354                         sas_dev = &hisi_hba->devices[i];
355                         sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
356                         sas_dev->dev_type = device->dev_type;
357                         sas_dev->hisi_hba = hisi_hba;
358                         sas_dev->sas_device = device;
359                         break;
360                 }
361         }
362         spin_unlock(&hisi_hba->lock);
363
364         return sas_dev;
365 }
366
367 static int hisi_sas_dev_found(struct domain_device *device)
368 {
369         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
370         struct domain_device *parent_dev = device->parent;
371         struct hisi_sas_device *sas_dev;
372         struct device *dev = &hisi_hba->pdev->dev;
373
374         sas_dev = hisi_sas_alloc_dev(device);
375         if (!sas_dev) {
376                 dev_err(dev, "fail alloc dev: max support %d devices\n",
377                         HISI_SAS_MAX_DEVICES);
378                 return -EINVAL;
379         }
380
381         device->lldd_dev = sas_dev;
382         hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
383
384         if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
385                 int phy_no;
386                 u8 phy_num = parent_dev->ex_dev.num_phys;
387                 struct ex_phy *phy;
388
389                 for (phy_no = 0; phy_no < phy_num; phy_no++) {
390                         phy = &parent_dev->ex_dev.ex_phy[phy_no];
391                         if (SAS_ADDR(phy->attached_sas_addr) ==
392                                 SAS_ADDR(device->sas_addr)) {
393                                 sas_dev->attached_phy = phy_no;
394                                 break;
395                         }
396                 }
397
398                 if (phy_no == phy_num) {
399                         dev_info(dev, "dev found: no attached "
400                                  "dev:%016llx at ex:%016llx\n",
401                                  SAS_ADDR(device->sas_addr),
402                                  SAS_ADDR(parent_dev->sas_addr));
403                         return -EINVAL;
404                 }
405         }
406
407         return 0;
408 }
409
410 static void hisi_sas_scan_start(struct Scsi_Host *shost)
411 {
412         struct hisi_hba *hisi_hba = shost_priv(shost);
413         int i;
414
415         for (i = 0; i < hisi_hba->n_phy; ++i)
416                 hisi_sas_bytes_dmaed(hisi_hba, i);
417
418         hisi_hba->scan_finished = 1;
419 }
420
421 static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
422 {
423         struct hisi_hba *hisi_hba = shost_priv(shost);
424         struct sas_ha_struct *sha = &hisi_hba->sha;
425
426         if (hisi_hba->scan_finished == 0)
427                 return 0;
428
429         sas_drain_work(sha);
430         return 1;
431 }
432
433 static void hisi_sas_phyup_work(struct work_struct *work)
434 {
435         struct hisi_sas_phy *phy =
436                 container_of(work, struct hisi_sas_phy, phyup_ws);
437         struct hisi_hba *hisi_hba = phy->hisi_hba;
438         struct asd_sas_phy *sas_phy = &phy->sas_phy;
439         int phy_no = sas_phy->id;
440
441         hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */
442         hisi_sas_bytes_dmaed(hisi_hba, phy_no);
443 }
444
445 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
446 {
447         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
448         struct asd_sas_phy *sas_phy = &phy->sas_phy;
449
450         phy->hisi_hba = hisi_hba;
451         phy->port = NULL;
452         init_timer(&phy->timer);
453         sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
454         sas_phy->class = SAS;
455         sas_phy->iproto = SAS_PROTOCOL_ALL;
456         sas_phy->tproto = 0;
457         sas_phy->type = PHY_TYPE_PHYSICAL;
458         sas_phy->role = PHY_ROLE_INITIATOR;
459         sas_phy->oob_mode = OOB_NOT_CONNECTED;
460         sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
461         sas_phy->id = phy_no;
462         sas_phy->sas_addr = &hisi_hba->sas_addr[0];
463         sas_phy->frame_rcvd = &phy->frame_rcvd[0];
464         sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
465         sas_phy->lldd_phy = phy;
466
467         INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work);
468 }
469
470 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
471 {
472         struct sas_ha_struct *sas_ha = sas_phy->ha;
473         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
474         struct hisi_sas_phy *phy = sas_phy->lldd_phy;
475         struct asd_sas_port *sas_port = sas_phy->port;
476         struct hisi_sas_port *port = &hisi_hba->port[sas_phy->id];
477         unsigned long flags;
478
479         if (!sas_port)
480                 return;
481
482         spin_lock_irqsave(&hisi_hba->lock, flags);
483         port->port_attached = 1;
484         port->id = phy->port_id;
485         phy->port = port;
486         sas_port->lldd_port = port;
487         spin_unlock_irqrestore(&hisi_hba->lock, flags);
488 }
489
490 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, int phy_no,
491                                      struct domain_device *device)
492 {
493         struct hisi_sas_phy *phy;
494         struct hisi_sas_port *port;
495         struct hisi_sas_slot *slot, *slot2;
496         struct device *dev = &hisi_hba->pdev->dev;
497
498         phy = &hisi_hba->phy[phy_no];
499         port = phy->port;
500         if (!port)
501                 return;
502
503         list_for_each_entry_safe(slot, slot2, &port->list, entry) {
504                 struct sas_task *task;
505
506                 task = slot->task;
507                 if (device && task->dev != device)
508                         continue;
509
510                 dev_info(dev, "Release slot [%d:%d], task [%p]:\n",
511                          slot->dlvry_queue, slot->dlvry_queue_slot, task);
512                 hisi_hba->hw->slot_complete(hisi_hba, slot, 1);
513         }
514 }
515
516 static void hisi_sas_port_notify_deformed(struct asd_sas_phy *sas_phy)
517 {
518         struct domain_device *device;
519         struct hisi_sas_phy *phy = sas_phy->lldd_phy;
520         struct asd_sas_port *sas_port = sas_phy->port;
521
522         list_for_each_entry(device, &sas_port->dev_list, dev_list_node)
523                 hisi_sas_do_release_task(phy->hisi_hba, sas_phy->id, device);
524 }
525
526 static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
527                         struct domain_device *device)
528 {
529         struct asd_sas_port *port = device->port;
530         struct asd_sas_phy *sas_phy;
531
532         list_for_each_entry(sas_phy, &port->phy_list, port_phy_el)
533                 hisi_sas_do_release_task(hisi_hba, sas_phy->id, device);
534 }
535
536 static void hisi_sas_dev_gone(struct domain_device *device)
537 {
538         struct hisi_sas_device *sas_dev = device->lldd_dev;
539         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
540         struct device *dev = &hisi_hba->pdev->dev;
541         u64 dev_id = sas_dev->device_id;
542
543         dev_info(dev, "found dev[%lld:%x] is gone\n",
544                  sas_dev->device_id, sas_dev->dev_type);
545
546         hisi_hba->hw->free_device(hisi_hba, sas_dev);
547         device->lldd_dev = NULL;
548         memset(sas_dev, 0, sizeof(*sas_dev));
549         sas_dev->device_id = dev_id;
550         sas_dev->dev_type = SAS_PHY_UNUSED;
551         sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
552 }
553
554 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
555 {
556         return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
557 }
558
559 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
560                                 void *funcdata)
561 {
562         struct sas_ha_struct *sas_ha = sas_phy->ha;
563         struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
564         int phy_no = sas_phy->id;
565
566         switch (func) {
567         case PHY_FUNC_HARD_RESET:
568                 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
569                 break;
570
571         case PHY_FUNC_LINK_RESET:
572                 hisi_hba->hw->phy_enable(hisi_hba, phy_no);
573                 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
574                 break;
575
576         case PHY_FUNC_DISABLE:
577                 hisi_hba->hw->phy_disable(hisi_hba, phy_no);
578                 break;
579
580         case PHY_FUNC_SET_LINK_RATE:
581         case PHY_FUNC_RELEASE_SPINUP_HOLD:
582         default:
583                 return -EOPNOTSUPP;
584         }
585         return 0;
586 }
587
588 static void hisi_sas_task_done(struct sas_task *task)
589 {
590         if (!del_timer(&task->slow_task->timer))
591                 return;
592         complete(&task->slow_task->completion);
593 }
594
595 static void hisi_sas_tmf_timedout(unsigned long data)
596 {
597         struct sas_task *task = (struct sas_task *)data;
598
599         task->task_state_flags |= SAS_TASK_STATE_ABORTED;
600         complete(&task->slow_task->completion);
601 }
602
603 #define TASK_TIMEOUT 20
604 #define TASK_RETRY 3
605 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
606                                            void *parameter, u32 para_len,
607                                            struct hisi_sas_tmf_task *tmf)
608 {
609         struct hisi_sas_device *sas_dev = device->lldd_dev;
610         struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
611         struct device *dev = &hisi_hba->pdev->dev;
612         struct sas_task *task;
613         int res, retry;
614
615         for (retry = 0; retry < TASK_RETRY; retry++) {
616                 task = sas_alloc_slow_task(GFP_KERNEL);
617                 if (!task)
618                         return -ENOMEM;
619
620                 task->dev = device;
621                 task->task_proto = device->tproto;
622
623                 memcpy(&task->ssp_task, parameter, para_len);
624                 task->task_done = hisi_sas_task_done;
625
626                 task->slow_task->timer.data = (unsigned long) task;
627                 task->slow_task->timer.function = hisi_sas_tmf_timedout;
628                 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
629                 add_timer(&task->slow_task->timer);
630
631                 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
632
633                 if (res) {
634                         del_timer(&task->slow_task->timer);
635                         dev_err(dev, "abort tmf: executing internal task failed: %d\n",
636                                 res);
637                         goto ex_err;
638                 }
639
640                 wait_for_completion(&task->slow_task->completion);
641                 res = TMF_RESP_FUNC_FAILED;
642                 /* Even TMF timed out, return direct. */
643                 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
644                         if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
645                                 dev_err(dev, "abort tmf: TMF task[%d] timeout\n",
646                                         tmf->tag_of_task_to_be_managed);
647                                 if (task->lldd_task) {
648                                         struct hisi_sas_slot *slot =
649                                                 task->lldd_task;
650
651                                         hisi_sas_slot_task_free(hisi_hba,
652                                                                 task, slot);
653                                 }
654
655                                 goto ex_err;
656                         }
657                 }
658
659                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
660                     task->task_status.stat == SAM_STAT_GOOD) {
661                         res = TMF_RESP_FUNC_COMPLETE;
662                         break;
663                 }
664
665                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
666                       task->task_status.stat == SAS_DATA_UNDERRUN) {
667                         /* no error, but return the number of bytes of
668                          * underrun
669                          */
670                         dev_warn(dev, "abort tmf: task to dev %016llx "
671                                  "resp: 0x%x sts 0x%x underrun\n",
672                                  SAS_ADDR(device->sas_addr),
673                                  task->task_status.resp,
674                                  task->task_status.stat);
675                         res = task->task_status.residual;
676                         break;
677                 }
678
679                 if (task->task_status.resp == SAS_TASK_COMPLETE &&
680                         task->task_status.stat == SAS_DATA_OVERRUN) {
681                         dev_warn(dev, "abort tmf: blocked task error\n");
682                         res = -EMSGSIZE;
683                         break;
684                 }
685
686                 dev_warn(dev, "abort tmf: task to dev "
687                          "%016llx resp: 0x%x status 0x%x\n",
688                          SAS_ADDR(device->sas_addr), task->task_status.resp,
689                          task->task_status.stat);
690                 sas_free_task(task);
691                 task = NULL;
692         }
693 ex_err:
694         WARN_ON(retry == TASK_RETRY);
695         sas_free_task(task);
696         return res;
697 }
698
699 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
700                                 u8 *lun, struct hisi_sas_tmf_task *tmf)
701 {
702         struct sas_ssp_task ssp_task;
703
704         if (!(device->tproto & SAS_PROTOCOL_SSP))
705                 return TMF_RESP_FUNC_ESUPP;
706
707         memcpy(ssp_task.LUN, lun, 8);
708
709         return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
710                                 sizeof(ssp_task), tmf);
711 }
712
713 static int hisi_sas_abort_task(struct sas_task *task)
714 {
715         struct scsi_lun lun;
716         struct hisi_sas_tmf_task tmf_task;
717         struct domain_device *device = task->dev;
718         struct hisi_sas_device *sas_dev = device->lldd_dev;
719         struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
720         struct device *dev = &hisi_hba->pdev->dev;
721         int rc = TMF_RESP_FUNC_FAILED;
722         unsigned long flags;
723
724         if (!sas_dev) {
725                 dev_warn(dev, "Device has been removed\n");
726                 return TMF_RESP_FUNC_FAILED;
727         }
728
729         spin_lock_irqsave(&task->task_state_lock, flags);
730         if (task->task_state_flags & SAS_TASK_STATE_DONE) {
731                 spin_unlock_irqrestore(&task->task_state_lock, flags);
732                 rc = TMF_RESP_FUNC_COMPLETE;
733                 goto out;
734         }
735
736         spin_unlock_irqrestore(&task->task_state_lock, flags);
737         sas_dev->dev_status = HISI_SAS_DEV_EH;
738         if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
739                 struct scsi_cmnd *cmnd = task->uldd_task;
740                 struct hisi_sas_slot *slot = task->lldd_task;
741                 u32 tag = slot->idx;
742
743                 int_to_scsilun(cmnd->device->lun, &lun);
744                 tmf_task.tmf = TMF_ABORT_TASK;
745                 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
746
747                 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
748                                                   &tmf_task);
749
750                 /* if successful, clear the task and callback forwards.*/
751                 if (rc == TMF_RESP_FUNC_COMPLETE) {
752                         if (task->lldd_task) {
753                                 struct hisi_sas_slot *slot;
754
755                                 slot = &hisi_hba->slot_info
756                                         [tmf_task.tag_of_task_to_be_managed];
757                                 spin_lock_irqsave(&hisi_hba->lock, flags);
758                                 hisi_hba->hw->slot_complete(hisi_hba, slot, 1);
759                                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
760                         }
761                 }
762
763         } else if (task->task_proto & SAS_PROTOCOL_SATA ||
764                 task->task_proto & SAS_PROTOCOL_STP) {
765                 if (task->dev->dev_type == SAS_SATA_DEV) {
766                         struct hisi_slot_info *slot = task->lldd_task;
767
768                         dev_notice(dev, "abort task: hba=%p task=%p slot=%p\n",
769                                    hisi_hba, task, slot);
770                         task->task_state_flags |= SAS_TASK_STATE_ABORTED;
771                         rc = TMF_RESP_FUNC_COMPLETE;
772                         goto out;
773                 }
774
775         }
776
777 out:
778         if (rc != TMF_RESP_FUNC_COMPLETE)
779                 dev_notice(dev, "abort task: rc=%d\n", rc);
780         return rc;
781 }
782
783 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
784 {
785         struct hisi_sas_tmf_task tmf_task;
786         int rc = TMF_RESP_FUNC_FAILED;
787
788         tmf_task.tmf = TMF_ABORT_TASK_SET;
789         rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
790
791         return rc;
792 }
793
794 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
795 {
796         int rc = TMF_RESP_FUNC_FAILED;
797         struct hisi_sas_tmf_task tmf_task;
798
799         tmf_task.tmf = TMF_CLEAR_ACA;
800         rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
801
802         return rc;
803 }
804
805 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
806 {
807         struct sas_phy *phy = sas_get_local_phy(device);
808         int rc, reset_type = (device->dev_type == SAS_SATA_DEV ||
809                         (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
810         rc = sas_phy_reset(phy, reset_type);
811         sas_put_local_phy(phy);
812         msleep(2000);
813         return rc;
814 }
815
816 static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
817 {
818         struct hisi_sas_device *sas_dev = device->lldd_dev;
819         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
820         unsigned long flags;
821         int rc = TMF_RESP_FUNC_FAILED;
822
823         if (sas_dev->dev_status != HISI_SAS_DEV_EH)
824                 return TMF_RESP_FUNC_FAILED;
825         sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
826
827         rc = hisi_sas_debug_I_T_nexus_reset(device);
828
829         spin_lock_irqsave(&hisi_hba->lock, flags);
830         hisi_sas_release_task(hisi_hba, device);
831         spin_unlock_irqrestore(&hisi_hba->lock, flags);
832
833         return 0;
834 }
835
836 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
837 {
838         struct hisi_sas_tmf_task tmf_task;
839         struct hisi_sas_device *sas_dev = device->lldd_dev;
840         struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
841         struct device *dev = &hisi_hba->pdev->dev;
842         unsigned long flags;
843         int rc = TMF_RESP_FUNC_FAILED;
844
845         tmf_task.tmf = TMF_LU_RESET;
846         sas_dev->dev_status = HISI_SAS_DEV_EH;
847         rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
848         if (rc == TMF_RESP_FUNC_COMPLETE) {
849                 spin_lock_irqsave(&hisi_hba->lock, flags);
850                 hisi_sas_release_task(hisi_hba, device);
851                 spin_unlock_irqrestore(&hisi_hba->lock, flags);
852         }
853
854         /* If failed, fall-through I_T_Nexus reset */
855         dev_err(dev, "lu_reset: for device[%llx]:rc= %d\n",
856                 sas_dev->device_id, rc);
857         return rc;
858 }
859
860 static int hisi_sas_query_task(struct sas_task *task)
861 {
862         struct scsi_lun lun;
863         struct hisi_sas_tmf_task tmf_task;
864         int rc = TMF_RESP_FUNC_FAILED;
865
866         if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
867                 struct scsi_cmnd *cmnd = task->uldd_task;
868                 struct domain_device *device = task->dev;
869                 struct hisi_sas_slot *slot = task->lldd_task;
870                 u32 tag = slot->idx;
871
872                 int_to_scsilun(cmnd->device->lun, &lun);
873                 tmf_task.tmf = TMF_QUERY_TASK;
874                 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
875
876                 rc = hisi_sas_debug_issue_ssp_tmf(device,
877                                                   lun.scsi_lun,
878                                                   &tmf_task);
879                 switch (rc) {
880                 /* The task is still in Lun, release it then */
881                 case TMF_RESP_FUNC_SUCC:
882                 /* The task is not in Lun or failed, reset the phy */
883                 case TMF_RESP_FUNC_FAILED:
884                 case TMF_RESP_FUNC_COMPLETE:
885                         break;
886                 }
887         }
888         return rc;
889 }
890
891 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
892 {
893         hisi_sas_port_notify_formed(sas_phy);
894 }
895
896 static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
897 {
898         hisi_sas_port_notify_deformed(sas_phy);
899 }
900
901 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
902 {
903         phy->phy_attached = 0;
904         phy->phy_type = 0;
905         phy->port = NULL;
906 }
907
908 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
909 {
910         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
911         struct asd_sas_phy *sas_phy = &phy->sas_phy;
912         struct sas_ha_struct *sas_ha = &hisi_hba->sha;
913
914         if (rdy) {
915                 /* Phy down but ready */
916                 hisi_sas_bytes_dmaed(hisi_hba, phy_no);
917                 hisi_sas_port_notify_formed(sas_phy);
918         } else {
919                 struct hisi_sas_port *port  = phy->port;
920
921                 /* Phy down and not ready */
922                 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
923                 sas_phy_disconnected(sas_phy);
924
925                 if (port) {
926                         if (phy->phy_type & PORT_TYPE_SAS) {
927                                 int port_id = port->id;
928
929                                 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
930                                                                        port_id))
931                                         port->port_attached = 0;
932                         } else if (phy->phy_type & PORT_TYPE_SATA)
933                                 port->port_attached = 0;
934                 }
935                 hisi_sas_phy_disconnected(phy);
936         }
937 }
938 EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
939
940 static struct scsi_transport_template *hisi_sas_stt;
941
942 static struct scsi_host_template hisi_sas_sht = {
943         .module                 = THIS_MODULE,
944         .name                   = DRV_NAME,
945         .queuecommand           = sas_queuecommand,
946         .target_alloc           = sas_target_alloc,
947         .slave_configure        = sas_slave_configure,
948         .scan_finished          = hisi_sas_scan_finished,
949         .scan_start             = hisi_sas_scan_start,
950         .change_queue_depth     = sas_change_queue_depth,
951         .bios_param             = sas_bios_param,
952         .can_queue              = 1,
953         .this_id                = -1,
954         .sg_tablesize           = SG_ALL,
955         .max_sectors            = SCSI_DEFAULT_MAX_SECTORS,
956         .use_clustering         = ENABLE_CLUSTERING,
957         .eh_device_reset_handler = sas_eh_device_reset_handler,
958         .eh_bus_reset_handler   = sas_eh_bus_reset_handler,
959         .target_destroy         = sas_target_destroy,
960         .ioctl                  = sas_ioctl,
961 };
962
963 static struct sas_domain_function_template hisi_sas_transport_ops = {
964         .lldd_dev_found         = hisi_sas_dev_found,
965         .lldd_dev_gone          = hisi_sas_dev_gone,
966         .lldd_execute_task      = hisi_sas_queue_command,
967         .lldd_control_phy       = hisi_sas_control_phy,
968         .lldd_abort_task        = hisi_sas_abort_task,
969         .lldd_abort_task_set    = hisi_sas_abort_task_set,
970         .lldd_clear_aca         = hisi_sas_clear_aca,
971         .lldd_I_T_nexus_reset   = hisi_sas_I_T_nexus_reset,
972         .lldd_lu_reset          = hisi_sas_lu_reset,
973         .lldd_query_task        = hisi_sas_query_task,
974         .lldd_port_formed       = hisi_sas_port_formed,
975         .lldd_port_deformed     = hisi_sas_port_deformed,
976 };
977
978 static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
979 {
980         int i, s;
981         struct platform_device *pdev = hisi_hba->pdev;
982         struct device *dev = &pdev->dev;
983
984         spin_lock_init(&hisi_hba->lock);
985         for (i = 0; i < hisi_hba->n_phy; i++) {
986                 hisi_sas_phy_init(hisi_hba, i);
987                 hisi_hba->port[i].port_attached = 0;
988                 hisi_hba->port[i].id = -1;
989                 INIT_LIST_HEAD(&hisi_hba->port[i].list);
990         }
991
992         for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
993                 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
994                 hisi_hba->devices[i].device_id = i;
995                 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL;
996         }
997
998         for (i = 0; i < hisi_hba->queue_count; i++) {
999                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1000
1001                 /* Completion queue structure */
1002                 cq->id = i;
1003                 cq->hisi_hba = hisi_hba;
1004
1005                 /* Delivery queue */
1006                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1007                 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
1008                                         &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
1009                 if (!hisi_hba->cmd_hdr[i])
1010                         goto err_out;
1011                 memset(hisi_hba->cmd_hdr[i], 0, s);
1012
1013                 /* Completion queue */
1014                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1015                 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
1016                                 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
1017                 if (!hisi_hba->complete_hdr[i])
1018                         goto err_out;
1019                 memset(hisi_hba->complete_hdr[i], 0, s);
1020         }
1021
1022         s = HISI_SAS_STATUS_BUF_SZ;
1023         hisi_hba->status_buffer_pool = dma_pool_create("status_buffer",
1024                                                        dev, s, 16, 0);
1025         if (!hisi_hba->status_buffer_pool)
1026                 goto err_out;
1027
1028         s = HISI_SAS_COMMAND_TABLE_SZ;
1029         hisi_hba->command_table_pool = dma_pool_create("command_table",
1030                                                        dev, s, 16, 0);
1031         if (!hisi_hba->command_table_pool)
1032                 goto err_out;
1033
1034         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1035         hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma,
1036                                             GFP_KERNEL);
1037         if (!hisi_hba->itct)
1038                 goto err_out;
1039
1040         memset(hisi_hba->itct, 0, s);
1041
1042         hisi_hba->slot_info = devm_kcalloc(dev, HISI_SAS_COMMAND_ENTRIES,
1043                                            sizeof(struct hisi_sas_slot),
1044                                            GFP_KERNEL);
1045         if (!hisi_hba->slot_info)
1046                 goto err_out;
1047
1048         s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_iost);
1049         hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
1050                                             GFP_KERNEL);
1051         if (!hisi_hba->iost)
1052                 goto err_out;
1053
1054         memset(hisi_hba->iost, 0, s);
1055
1056         s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_breakpoint);
1057         hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
1058                                 &hisi_hba->breakpoint_dma, GFP_KERNEL);
1059         if (!hisi_hba->breakpoint)
1060                 goto err_out;
1061
1062         memset(hisi_hba->breakpoint, 0, s);
1063
1064         hisi_hba->slot_index_count = HISI_SAS_COMMAND_ENTRIES;
1065         s = hisi_hba->slot_index_count / sizeof(unsigned long);
1066         hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1067         if (!hisi_hba->slot_index_tags)
1068                 goto err_out;
1069
1070         hisi_hba->sge_page_pool = dma_pool_create("status_sge", dev,
1071                                 sizeof(struct hisi_sas_sge_page), 16, 0);
1072         if (!hisi_hba->sge_page_pool)
1073                 goto err_out;
1074
1075         s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1076         hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
1077                                 &hisi_hba->initial_fis_dma, GFP_KERNEL);
1078         if (!hisi_hba->initial_fis)
1079                 goto err_out;
1080         memset(hisi_hba->initial_fis, 0, s);
1081
1082         s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_breakpoint) * 2;
1083         hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
1084                                 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
1085         if (!hisi_hba->sata_breakpoint)
1086                 goto err_out;
1087         memset(hisi_hba->sata_breakpoint, 0, s);
1088
1089         hisi_sas_slot_index_init(hisi_hba);
1090
1091         hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
1092         if (!hisi_hba->wq) {
1093                 dev_err(dev, "sas_alloc: failed to create workqueue\n");
1094                 goto err_out;
1095         }
1096
1097         return 0;
1098 err_out:
1099         return -ENOMEM;
1100 }
1101
1102 static void hisi_sas_free(struct hisi_hba *hisi_hba)
1103 {
1104         struct device *dev = &hisi_hba->pdev->dev;
1105         int i, s;
1106
1107         for (i = 0; i < hisi_hba->queue_count; i++) {
1108                 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1109                 if (hisi_hba->cmd_hdr[i])
1110                         dma_free_coherent(dev, s,
1111                                           hisi_hba->cmd_hdr[i],
1112                                           hisi_hba->cmd_hdr_dma[i]);
1113
1114                 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
1115                 if (hisi_hba->complete_hdr[i])
1116                         dma_free_coherent(dev, s,
1117                                           hisi_hba->complete_hdr[i],
1118                                           hisi_hba->complete_hdr_dma[i]);
1119         }
1120
1121         dma_pool_destroy(hisi_hba->status_buffer_pool);
1122         dma_pool_destroy(hisi_hba->command_table_pool);
1123         dma_pool_destroy(hisi_hba->sge_page_pool);
1124
1125         s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
1126         if (hisi_hba->itct)
1127                 dma_free_coherent(dev, s,
1128                                   hisi_hba->itct, hisi_hba->itct_dma);
1129
1130         s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_iost);
1131         if (hisi_hba->iost)
1132                 dma_free_coherent(dev, s,
1133                                   hisi_hba->iost, hisi_hba->iost_dma);
1134
1135         s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_breakpoint);
1136         if (hisi_hba->breakpoint)
1137                 dma_free_coherent(dev, s,
1138                                   hisi_hba->breakpoint,
1139                                   hisi_hba->breakpoint_dma);
1140
1141
1142         s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
1143         if (hisi_hba->initial_fis)
1144                 dma_free_coherent(dev, s,
1145                                   hisi_hba->initial_fis,
1146                                   hisi_hba->initial_fis_dma);
1147
1148         s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_breakpoint) * 2;
1149         if (hisi_hba->sata_breakpoint)
1150                 dma_free_coherent(dev, s,
1151                                   hisi_hba->sata_breakpoint,
1152                                   hisi_hba->sata_breakpoint_dma);
1153
1154         if (hisi_hba->wq)
1155                 destroy_workqueue(hisi_hba->wq);
1156 }
1157
1158 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
1159                                               const struct hisi_sas_hw *hw)
1160 {
1161         struct resource *res;
1162         struct Scsi_Host *shost;
1163         struct hisi_hba *hisi_hba;
1164         struct device *dev = &pdev->dev;
1165         struct device_node *np = pdev->dev.of_node;
1166         struct property *sas_addr_prop;
1167
1168         shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba));
1169         if (!shost)
1170                 goto err_out;
1171         hisi_hba = shost_priv(shost);
1172
1173         hisi_hba->hw = hw;
1174         hisi_hba->pdev = pdev;
1175         hisi_hba->shost = shost;
1176         SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
1177
1178         init_timer(&hisi_hba->timer);
1179
1180         sas_addr_prop = of_find_property(np, "sas-addr", NULL);
1181         if (!sas_addr_prop || (sas_addr_prop->length != SAS_ADDR_SIZE))
1182                 goto err_out;
1183         memcpy(hisi_hba->sas_addr, sas_addr_prop->value, SAS_ADDR_SIZE);
1184
1185         if (of_property_read_u32(np, "ctrl-reset-reg",
1186                                  &hisi_hba->ctrl_reset_reg))
1187                 goto err_out;
1188
1189         if (of_property_read_u32(np, "ctrl-reset-sts-reg",
1190                                  &hisi_hba->ctrl_reset_sts_reg))
1191                 goto err_out;
1192
1193         if (of_property_read_u32(np, "ctrl-clock-ena-reg",
1194                                  &hisi_hba->ctrl_clock_ena_reg))
1195                 goto err_out;
1196
1197         if (of_property_read_u32(np, "phy-count", &hisi_hba->n_phy))
1198                 goto err_out;
1199
1200         if (of_property_read_u32(np, "queue-count", &hisi_hba->queue_count))
1201                 goto err_out;
1202
1203         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1204         hisi_hba->regs = devm_ioremap_resource(dev, res);
1205         if (IS_ERR(hisi_hba->regs))
1206                 goto err_out;
1207
1208         hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(
1209                                 np, "hisilicon,sas-syscon");
1210         if (IS_ERR(hisi_hba->ctrl))
1211                 goto err_out;
1212
1213         if (hisi_sas_alloc(hisi_hba, shost)) {
1214                 hisi_sas_free(hisi_hba);
1215                 goto err_out;
1216         }
1217
1218         return shost;
1219 err_out:
1220         dev_err(dev, "shost alloc failed\n");
1221         return NULL;
1222 }
1223
1224 static void hisi_sas_init_add(struct hisi_hba *hisi_hba)
1225 {
1226         int i;
1227
1228         for (i = 0; i < hisi_hba->n_phy; i++)
1229                 memcpy(&hisi_hba->phy[i].dev_sas_addr,
1230                        hisi_hba->sas_addr,
1231                        SAS_ADDR_SIZE);
1232 }
1233
1234 int hisi_sas_probe(struct platform_device *pdev,
1235                          const struct hisi_sas_hw *hw)
1236 {
1237         struct Scsi_Host *shost;
1238         struct hisi_hba *hisi_hba;
1239         struct device *dev = &pdev->dev;
1240         struct asd_sas_phy **arr_phy;
1241         struct asd_sas_port **arr_port;
1242         struct sas_ha_struct *sha;
1243         int rc, phy_nr, port_nr, i;
1244
1245         shost = hisi_sas_shost_alloc(pdev, hw);
1246         if (!shost) {
1247                 rc = -ENOMEM;
1248                 goto err_out_ha;
1249         }
1250
1251         sha = SHOST_TO_SAS_HA(shost);
1252         hisi_hba = shost_priv(shost);
1253         platform_set_drvdata(pdev, sha);
1254
1255         if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
1256             dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
1257                 dev_err(dev, "No usable DMA addressing method\n");
1258                 rc = -EIO;
1259                 goto err_out_ha;
1260         }
1261
1262         phy_nr = port_nr = hisi_hba->n_phy;
1263
1264         arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
1265         arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
1266         if (!arr_phy || !arr_port)
1267                 return -ENOMEM;
1268
1269         sha->sas_phy = arr_phy;
1270         sha->sas_port = arr_port;
1271         sha->core.shost = shost;
1272         sha->lldd_ha = hisi_hba;
1273
1274         shost->transportt = hisi_sas_stt;
1275         shost->max_id = HISI_SAS_MAX_DEVICES;
1276         shost->max_lun = ~0;
1277         shost->max_channel = 1;
1278         shost->max_cmd_len = 16;
1279         shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
1280         shost->can_queue = HISI_SAS_COMMAND_ENTRIES;
1281         shost->cmd_per_lun = HISI_SAS_COMMAND_ENTRIES;
1282
1283         sha->sas_ha_name = DRV_NAME;
1284         sha->dev = &hisi_hba->pdev->dev;
1285         sha->lldd_module = THIS_MODULE;
1286         sha->sas_addr = &hisi_hba->sas_addr[0];
1287         sha->num_phys = hisi_hba->n_phy;
1288         sha->core.shost = hisi_hba->shost;
1289
1290         for (i = 0; i < hisi_hba->n_phy; i++) {
1291                 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
1292                 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
1293         }
1294
1295         hisi_sas_init_add(hisi_hba);
1296
1297         rc = hisi_hba->hw->hw_init(hisi_hba);
1298         if (rc)
1299                 goto err_out_ha;
1300
1301         rc = scsi_add_host(shost, &pdev->dev);
1302         if (rc)
1303                 goto err_out_ha;
1304
1305         rc = sas_register_ha(sha);
1306         if (rc)
1307                 goto err_out_register_ha;
1308
1309         scsi_scan_host(shost);
1310
1311         return 0;
1312
1313 err_out_register_ha:
1314         scsi_remove_host(shost);
1315 err_out_ha:
1316         kfree(shost);
1317         return rc;
1318 }
1319 EXPORT_SYMBOL_GPL(hisi_sas_probe);
1320
1321 int hisi_sas_remove(struct platform_device *pdev)
1322 {
1323         struct sas_ha_struct *sha = platform_get_drvdata(pdev);
1324         struct hisi_hba *hisi_hba = sha->lldd_ha;
1325
1326         scsi_remove_host(sha->core.shost);
1327         sas_unregister_ha(sha);
1328         sas_remove_host(sha->core.shost);
1329
1330         hisi_sas_free(hisi_hba);
1331         return 0;
1332 }
1333 EXPORT_SYMBOL_GPL(hisi_sas_remove);
1334
1335 static __init int hisi_sas_init(void)
1336 {
1337         pr_info("hisi_sas: driver version %s\n", DRV_VERSION);
1338
1339         hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
1340         if (!hisi_sas_stt)
1341                 return -ENOMEM;
1342
1343         return 0;
1344 }
1345
1346 static __exit void hisi_sas_exit(void)
1347 {
1348         sas_release_transport(hisi_sas_stt);
1349 }
1350
1351 module_init(hisi_sas_init);
1352 module_exit(hisi_sas_exit);
1353
1354 MODULE_VERSION(DRV_VERSION);
1355 MODULE_LICENSE("GPL");
1356 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
1357 MODULE_DESCRIPTION("HISILICON SAS controller driver");
1358 MODULE_ALIAS("platform:" DRV_NAME);