2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
24 #include <subdev/timer.h>
27 managed_falcons_names[] = {
28 [NVKM_SECBOOT_FALCON_PMU] = "PMU",
29 [NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>",
30 [NVKM_SECBOOT_FALCON_FECS] = "FECS",
31 [NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS",
32 [NVKM_SECBOOT_FALCON_END] = "<invalid>",
36 * Helper falcon functions
40 falcon_clear_halt_interrupt(struct nvkm_device *device, u32 base)
44 /* clear halt interrupt */
45 nvkm_mask(device, base + 0x004, 0x10, 0x10);
46 /* wait until halt interrupt is cleared */
47 ret = nvkm_wait_msec(device, 10, base + 0x008, 0x10, 0x0);
55 falcon_wait_idle(struct nvkm_device *device, u32 base)
59 ret = nvkm_wait_msec(device, 10, base + 0x04c, 0xffff, 0x0);
67 nvkm_secboot_falcon_enable(struct nvkm_secboot *sb)
69 struct nvkm_device *device = sb->subdev.device;
73 nvkm_mask(device, 0x200, sb->enable_mask, sb->enable_mask);
74 nvkm_rd32(device, 0x200);
75 ret = nvkm_wait_msec(device, 10, sb->base + 0x10c, 0x6, 0x0);
77 nvkm_mask(device, 0x200, sb->enable_mask, 0x0);
78 nvkm_error(&sb->subdev, "Falcon mem scrubbing timeout\n");
82 ret = falcon_wait_idle(device, sb->base);
87 nvkm_wr32(device, sb->base + 0x010, 0xff);
88 nvkm_mask(device, 0x640, sb->irq_mask, sb->irq_mask);
89 nvkm_mask(device, 0x644, sb->irq_mask, sb->irq_mask);
95 nvkm_secboot_falcon_disable(struct nvkm_secboot *sb)
97 struct nvkm_device *device = sb->subdev.device;
99 /* disable IRQs and wait for any previous code to complete */
100 nvkm_mask(device, 0x644, sb->irq_mask, 0x0);
101 nvkm_mask(device, 0x640, sb->irq_mask, 0x0);
102 nvkm_wr32(device, sb->base + 0x014, 0xff);
104 falcon_wait_idle(device, sb->base);
107 nvkm_mask(device, 0x200, sb->enable_mask, 0x0);
113 nvkm_secboot_falcon_reset(struct nvkm_secboot *sb)
117 ret = nvkm_secboot_falcon_disable(sb);
121 ret = nvkm_secboot_falcon_enable(sb);
129 * nvkm_secboot_falcon_run - run the falcon that will perform secure boot
131 * This function is to be called after all chip-specific preparations have
132 * been completed. It will start the falcon to perform secure boot, wait for
133 * it to halt, and report if an error occurred.
136 nvkm_secboot_falcon_run(struct nvkm_secboot *sb)
138 struct nvkm_device *device = sb->subdev.device;
142 nvkm_wr32(device, sb->base + 0x100, 0x2);
144 /* Wait for falcon halt */
145 ret = nvkm_wait_msec(device, 100, sb->base + 0x100, 0x10, 0x10);
149 /* If mailbox register contains an error code, then ACR has failed */
150 ret = nvkm_rd32(device, sb->base + 0x040);
152 nvkm_error(&sb->subdev, "ACR boot failed, ret 0x%08x", ret);
153 falcon_clear_halt_interrupt(device, sb->base);
162 * nvkm_secboot_reset() - reset specified falcon
165 nvkm_secboot_reset(struct nvkm_secboot *sb, u32 falcon)
167 /* Unmanaged falcon? */
168 if (!(BIT(falcon) & sb->func->managed_falcons)) {
169 nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n");
173 return sb->func->reset(sb, falcon);
177 * nvkm_secboot_start() - start specified falcon
180 nvkm_secboot_start(struct nvkm_secboot *sb, u32 falcon)
182 /* Unmanaged falcon? */
183 if (!(BIT(falcon) & sb->func->managed_falcons)) {
184 nvkm_error(&sb->subdev, "cannot start unmanaged falcon!\n");
188 return sb->func->start(sb, falcon);
192 * nvkm_secboot_is_managed() - check whether a given falcon is securely-managed
195 nvkm_secboot_is_managed(struct nvkm_secboot *secboot,
196 enum nvkm_secboot_falcon fid)
201 return secboot->func->managed_falcons & BIT(fid);
205 nvkm_secboot_oneinit(struct nvkm_subdev *subdev)
207 struct nvkm_secboot *sb = nvkm_secboot(subdev);
210 /* Call chip-specific init function */
212 ret = sb->func->init(sb);
214 nvkm_error(subdev, "Secure Boot initialization failed: %d\n",
220 * Build all blobs - the same blobs can be used to perform secure boot
223 if (sb->func->prepare_blobs)
224 ret = sb->func->prepare_blobs(sb);
230 nvkm_secboot_fini(struct nvkm_subdev *subdev, bool suspend)
232 struct nvkm_secboot *sb = nvkm_secboot(subdev);
236 ret = sb->func->fini(sb, suspend);
242 nvkm_secboot_dtor(struct nvkm_subdev *subdev)
244 struct nvkm_secboot *sb = nvkm_secboot(subdev);
248 ret = sb->func->dtor(sb);
253 static const struct nvkm_subdev_func
255 .oneinit = nvkm_secboot_oneinit,
256 .fini = nvkm_secboot_fini,
257 .dtor = nvkm_secboot_dtor,
261 nvkm_secboot_ctor(const struct nvkm_secboot_func *func,
262 struct nvkm_device *device, int index,
263 struct nvkm_secboot *sb)
267 nvkm_subdev_ctor(&nvkm_secboot, device, index, &sb->subdev);
270 /* setup the performing falcon's base address and masks */
271 switch (func->boot_falcon) {
272 case NVKM_SECBOOT_FALCON_PMU:
274 sb->irq_mask = 0x1000000;
275 sb->enable_mask = 0x2000;
278 nvkm_error(&sb->subdev, "invalid secure boot falcon\n");
282 nvkm_debug(&sb->subdev, "securely managed falcons:\n");
283 for_each_set_bit(fid, &sb->func->managed_falcons,
284 NVKM_SECBOOT_FALCON_END)
285 nvkm_debug(&sb->subdev, "- %s\n", managed_falcons_names[fid]);