]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/crypto/caam/ctrl.c
Merge tag 'v3.11' into next
[karo-tx-linux.git] / drivers / crypto / caam / ctrl.c
1 /*
2  * CAAM control-plane driver backend
3  * Controller-level driver, kernel property detection, initialization
4  *
5  * Copyright 2008-2012 Freescale Semiconductor, Inc.
6  */
7
8 #include "compat.h"
9 #include "regs.h"
10 #include "intern.h"
11 #include "jr.h"
12 #include "desc_constr.h"
13 #include "error.h"
14 #include "ctrl.h"
15
16 static int caam_remove(struct platform_device *pdev)
17 {
18         struct device *ctrldev;
19         struct caam_drv_private *ctrlpriv;
20         struct caam_drv_private_jr *jrpriv;
21         struct caam_full __iomem *topregs;
22         int ring, ret = 0;
23
24         ctrldev = &pdev->dev;
25         ctrlpriv = dev_get_drvdata(ctrldev);
26         topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
27
28         /* shut down JobRs */
29         for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
30                 ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
31                 jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
32                 irq_dispose_mapping(jrpriv->irq);
33         }
34
35         /* Shut down debug views */
36 #ifdef CONFIG_DEBUG_FS
37         debugfs_remove_recursive(ctrlpriv->dfs_root);
38 #endif
39
40         /* Unmap controller region */
41         iounmap(&topregs->ctrl);
42
43         kfree(ctrlpriv->jrdev);
44         kfree(ctrlpriv);
45
46         return ret;
47 }
48
49 /*
50  * Descriptor to instantiate RNG State Handle 0 in normal mode and
51  * load the JDKEK, TDKEK and TDSK registers
52  */
53 static void build_instantiation_desc(u32 *desc)
54 {
55         u32 *jump_cmd;
56
57         init_job_desc(desc, 0);
58
59         /* INIT RNG in non-test mode */
60         append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
61                          OP_ALG_AS_INIT);
62
63         /* wait for done */
64         jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
65         set_jump_tgt_here(desc, jump_cmd);
66
67         /*
68          * load 1 to clear written reg:
69          * resets the done interrupt and returns the RNG to idle.
70          */
71         append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
72
73         /* generate secure keys (non-test) */
74         append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
75                          OP_ALG_RNG4_SK);
76 }
77
78 struct instantiate_result {
79         struct completion completion;
80         int err;
81 };
82
83 static void rng4_init_done(struct device *dev, u32 *desc, u32 err,
84                            void *context)
85 {
86         struct instantiate_result *instantiation = context;
87
88         if (err) {
89                 char tmp[CAAM_ERROR_STR_MAX];
90
91                 dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
92         }
93
94         instantiation->err = err;
95         complete(&instantiation->completion);
96 }
97
98 static int instantiate_rng(struct device *jrdev)
99 {
100         struct instantiate_result instantiation;
101
102         dma_addr_t desc_dma;
103         u32 *desc;
104         int ret;
105
106         desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA);
107         if (!desc) {
108                 dev_err(jrdev, "cannot allocate RNG init descriptor memory\n");
109                 return -ENOMEM;
110         }
111
112         build_instantiation_desc(desc);
113         desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE);
114         init_completion(&instantiation.completion);
115         ret = caam_jr_enqueue(jrdev, desc, rng4_init_done, &instantiation);
116         if (!ret) {
117                 wait_for_completion_interruptible(&instantiation.completion);
118                 ret = instantiation.err;
119                 if (ret)
120                         dev_err(jrdev, "unable to instantiate RNG\n");
121         }
122
123         dma_unmap_single(jrdev, desc_dma, desc_bytes(desc), DMA_TO_DEVICE);
124
125         kfree(desc);
126
127         return ret;
128 }
129
130 /*
131  * By default, the TRNG runs for 200 clocks per sample;
132  * 1600 clocks per sample generates better entropy.
133  */
134 static void kick_trng(struct platform_device *pdev)
135 {
136         struct device *ctrldev = &pdev->dev;
137         struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
138         struct caam_full __iomem *topregs;
139         struct rng4tst __iomem *r4tst;
140         u32 val;
141
142         topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
143         r4tst = &topregs->ctrl.r4tst[0];
144
145         /* put RNG4 into program mode */
146         setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
147         /* 1600 clocks per sample */
148         val = rd_reg32(&r4tst->rtsdctl);
149         val = (val & ~RTSDCTL_ENT_DLY_MASK) | (1600 << RTSDCTL_ENT_DLY_SHIFT);
150         wr_reg32(&r4tst->rtsdctl, val);
151         /* min. freq. count */
152         wr_reg32(&r4tst->rtfrqmin, 400);
153         /* max. freq. count */
154         wr_reg32(&r4tst->rtfrqmax, 6400);
155         /* put RNG4 into run mode */
156         clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
157 }
158
159 /**
160  * caam_get_era() - Return the ERA of the SEC on SoC, based
161  * on the SEC_VID register.
162  * Returns the ERA number (1..4) or -ENOTSUPP if the ERA is unknown.
163  * @caam_id - the value of the SEC_VID register
164  **/
165 int caam_get_era(u64 caam_id)
166 {
167         struct sec_vid *sec_vid = (struct sec_vid *)&caam_id;
168         static const struct {
169                 u16 ip_id;
170                 u8 maj_rev;
171                 u8 era;
172         } caam_eras[] = {
173                 {0x0A10, 1, 1},
174                 {0x0A10, 2, 2},
175                 {0x0A12, 1, 3},
176                 {0x0A14, 1, 3},
177                 {0x0A14, 2, 4},
178                 {0x0A16, 1, 4},
179                 {0x0A11, 1, 4}
180         };
181         int i;
182
183         for (i = 0; i < ARRAY_SIZE(caam_eras); i++)
184                 if (caam_eras[i].ip_id == sec_vid->ip_id &&
185                         caam_eras[i].maj_rev == sec_vid->maj_rev)
186                                 return caam_eras[i].era;
187
188         return -ENOTSUPP;
189 }
190 EXPORT_SYMBOL(caam_get_era);
191
192 /* Probe routine for CAAM top (controller) level */
193 static int caam_probe(struct platform_device *pdev)
194 {
195         int ret, ring, rspec;
196         u64 caam_id;
197         struct device *dev;
198         struct device_node *nprop, *np;
199         struct caam_ctrl __iomem *ctrl;
200         struct caam_full __iomem *topregs;
201         struct caam_drv_private *ctrlpriv;
202 #ifdef CONFIG_DEBUG_FS
203         struct caam_perfmon *perfmon;
204 #endif
205         u64 cha_vid;
206
207         ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
208         if (!ctrlpriv)
209                 return -ENOMEM;
210
211         dev = &pdev->dev;
212         dev_set_drvdata(dev, ctrlpriv);
213         ctrlpriv->pdev = pdev;
214         nprop = pdev->dev.of_node;
215
216         /* Get configuration properties from device tree */
217         /* First, get register page */
218         ctrl = of_iomap(nprop, 0);
219         if (ctrl == NULL) {
220                 dev_err(dev, "caam: of_iomap() failed\n");
221                 return -ENOMEM;
222         }
223         ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
224
225         /* topregs used to derive pointers to CAAM sub-blocks only */
226         topregs = (struct caam_full __iomem *)ctrl;
227
228         /* Get the IRQ of the controller (for security violations only) */
229         ctrlpriv->secvio_irq = of_irq_to_resource(nprop, 0, NULL);
230
231         /*
232          * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
233          * long pointers in master configuration register
234          */
235         setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
236                   (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
237
238         if (sizeof(dma_addr_t) == sizeof(u64))
239                 if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
240                         dma_set_mask(dev, DMA_BIT_MASK(40));
241                 else
242                         dma_set_mask(dev, DMA_BIT_MASK(36));
243         else
244                 dma_set_mask(dev, DMA_BIT_MASK(32));
245
246         /*
247          * Detect and enable JobRs
248          * First, find out how many ring spec'ed, allocate references
249          * for all, then go probe each one.
250          */
251         rspec = 0;
252         for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring")
253                 rspec++;
254         if (!rspec) {
255                 /* for backward compatible with device trees */
256                 for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring")
257                         rspec++;
258         }
259
260         ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL);
261         if (ctrlpriv->jrdev == NULL) {
262                 iounmap(&topregs->ctrl);
263                 return -ENOMEM;
264         }
265
266         ring = 0;
267         ctrlpriv->total_jobrs = 0;
268         for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
269                 caam_jr_probe(pdev, np, ring);
270                 ctrlpriv->total_jobrs++;
271                 ring++;
272         }
273         if (!ring) {
274                 for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") {
275                         caam_jr_probe(pdev, np, ring);
276                         ctrlpriv->total_jobrs++;
277                         ring++;
278                 }
279         }
280
281         /* Check to see if QI present. If so, enable */
282         ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
283                                   CTPR_QI_MASK);
284         if (ctrlpriv->qi_present) {
285                 ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
286                 /* This is all that's required to physically enable QI */
287                 wr_reg32(&topregs->qi.qi_control_lo, QICTL_DQEN);
288         }
289
290         /* If no QI and no rings specified, quit and go home */
291         if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
292                 dev_err(dev, "no queues configured, terminating\n");
293                 caam_remove(pdev);
294                 return -ENOMEM;
295         }
296
297         cha_vid = rd_reg64(&topregs->ctrl.perfmon.cha_id);
298
299         /*
300          * If SEC has RNG version >= 4 and RNG state handle has not been
301          * already instantiated ,do RNG instantiation
302          */
303         if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 &&
304             !(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) {
305                 kick_trng(pdev);
306                 ret = instantiate_rng(ctrlpriv->jrdev[0]);
307                 if (ret) {
308                         caam_remove(pdev);
309                         return ret;
310                 }
311
312                 /* Enable RDB bit so that RNG works faster */
313                 setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE);
314         }
315
316         /* NOTE: RTIC detection ought to go here, around Si time */
317
318         /* Initialize queue allocator lock */
319         spin_lock_init(&ctrlpriv->jr_alloc_lock);
320
321         caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id);
322
323         /* Report "alive" for developer to see */
324         dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
325                  caam_get_era(caam_id));
326         dev_info(dev, "job rings = %d, qi = %d\n",
327                  ctrlpriv->total_jobrs, ctrlpriv->qi_present);
328
329 #ifdef CONFIG_DEBUG_FS
330         /*
331          * FIXME: needs better naming distinction, as some amalgamation of
332          * "caam" and nprop->full_name. The OF name isn't distinctive,
333          * but does separate instances
334          */
335         perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
336
337         ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL);
338         ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
339
340         /* Controller-level - performance monitor counters */
341         ctrlpriv->ctl_rq_dequeued =
342                 debugfs_create_u64("rq_dequeued",
343                                    S_IRUSR | S_IRGRP | S_IROTH,
344                                    ctrlpriv->ctl, &perfmon->req_dequeued);
345         ctrlpriv->ctl_ob_enc_req =
346                 debugfs_create_u64("ob_rq_encrypted",
347                                    S_IRUSR | S_IRGRP | S_IROTH,
348                                    ctrlpriv->ctl, &perfmon->ob_enc_req);
349         ctrlpriv->ctl_ib_dec_req =
350                 debugfs_create_u64("ib_rq_decrypted",
351                                    S_IRUSR | S_IRGRP | S_IROTH,
352                                    ctrlpriv->ctl, &perfmon->ib_dec_req);
353         ctrlpriv->ctl_ob_enc_bytes =
354                 debugfs_create_u64("ob_bytes_encrypted",
355                                    S_IRUSR | S_IRGRP | S_IROTH,
356                                    ctrlpriv->ctl, &perfmon->ob_enc_bytes);
357         ctrlpriv->ctl_ob_prot_bytes =
358                 debugfs_create_u64("ob_bytes_protected",
359                                    S_IRUSR | S_IRGRP | S_IROTH,
360                                    ctrlpriv->ctl, &perfmon->ob_prot_bytes);
361         ctrlpriv->ctl_ib_dec_bytes =
362                 debugfs_create_u64("ib_bytes_decrypted",
363                                    S_IRUSR | S_IRGRP | S_IROTH,
364                                    ctrlpriv->ctl, &perfmon->ib_dec_bytes);
365         ctrlpriv->ctl_ib_valid_bytes =
366                 debugfs_create_u64("ib_bytes_validated",
367                                    S_IRUSR | S_IRGRP | S_IROTH,
368                                    ctrlpriv->ctl, &perfmon->ib_valid_bytes);
369
370         /* Controller level - global status values */
371         ctrlpriv->ctl_faultaddr =
372                 debugfs_create_u64("fault_addr",
373                                    S_IRUSR | S_IRGRP | S_IROTH,
374                                    ctrlpriv->ctl, &perfmon->faultaddr);
375         ctrlpriv->ctl_faultdetail =
376                 debugfs_create_u32("fault_detail",
377                                    S_IRUSR | S_IRGRP | S_IROTH,
378                                    ctrlpriv->ctl, &perfmon->faultdetail);
379         ctrlpriv->ctl_faultstatus =
380                 debugfs_create_u32("fault_status",
381                                    S_IRUSR | S_IRGRP | S_IROTH,
382                                    ctrlpriv->ctl, &perfmon->status);
383
384         /* Internal covering keys (useful in non-secure mode only) */
385         ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
386         ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
387         ctrlpriv->ctl_kek = debugfs_create_blob("kek",
388                                                 S_IRUSR |
389                                                 S_IRGRP | S_IROTH,
390                                                 ctrlpriv->ctl,
391                                                 &ctrlpriv->ctl_kek_wrap);
392
393         ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
394         ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
395         ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
396                                                  S_IRUSR |
397                                                  S_IRGRP | S_IROTH,
398                                                  ctrlpriv->ctl,
399                                                  &ctrlpriv->ctl_tkek_wrap);
400
401         ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
402         ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
403         ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
404                                                  S_IRUSR |
405                                                  S_IRGRP | S_IROTH,
406                                                  ctrlpriv->ctl,
407                                                  &ctrlpriv->ctl_tdsk_wrap);
408 #endif
409         return 0;
410 }
411
412 static struct of_device_id caam_match[] = {
413         {
414                 .compatible = "fsl,sec-v4.0",
415         },
416         {
417                 .compatible = "fsl,sec4.0",
418         },
419         {},
420 };
421 MODULE_DEVICE_TABLE(of, caam_match);
422
423 static struct platform_driver caam_driver = {
424         .driver = {
425                 .name = "caam",
426                 .owner = THIS_MODULE,
427                 .of_match_table = caam_match,
428         },
429         .probe       = caam_probe,
430         .remove      = caam_remove,
431 };
432
433 module_platform_driver(caam_driver);
434
435 MODULE_LICENSE("GPL");
436 MODULE_DESCRIPTION("FSL CAAM request backend");
437 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");