2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/spinlock.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/mutex.h>
15 #include <linux/init.h>
16 #include <linux/list.h>
19 #include <linux/slab.h>
20 #include <linux/idr.h>
21 #include <linux/pci.h>
22 #include <asm/cputable.h>
23 #include <misc/cxl-base.h>
28 static DEFINE_SPINLOCK(adapter_idr_lock);
29 static DEFINE_IDR(cxl_adapter_idr);
32 module_param_named(verbose, cxl_verbose, uint, 0600);
33 MODULE_PARM_DESC(verbose, "Enable verbose dmesg output");
35 const struct cxl_backend_ops *cxl_ops;
37 int cxl_afu_slbia(struct cxl_afu *afu)
39 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
41 pr_devel("cxl_afu_slbia issuing SLBIA command\n");
42 cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL);
43 while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) {
44 if (time_after_eq(jiffies, timeout)) {
45 dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n");
48 /* If the adapter has gone down, we can assume that we
49 * will PERST it and that will invalidate everything.
51 if (!cxl_ops->link_ok(afu->adapter, afu))
58 static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm)
60 struct task_struct *task;
62 if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
63 pr_devel("%s unable to get task %i\n",
64 __func__, pid_nr(ctx->pid));
71 pr_devel("%s matched mm - card: %i afu: %i pe: %i\n", __func__,
72 ctx->afu->adapter->adapter_num, ctx->afu->slice, ctx->pe);
74 spin_lock_irqsave(&ctx->sste_lock, flags);
76 memset(ctx->sstp, 0, ctx->sst_size);
77 spin_unlock_irqrestore(&ctx->sste_lock, flags);
79 cxl_afu_slbia(ctx->afu);
81 put_task_struct(task);
84 static inline void cxl_slbia_core(struct mm_struct *mm)
88 struct cxl_context *ctx;
91 pr_devel("%s called\n", __func__);
93 spin_lock(&adapter_idr_lock);
94 idr_for_each_entry(&cxl_adapter_idr, adapter, card) {
95 /* XXX: Make this lookup faster with link from mm to ctx */
96 spin_lock(&adapter->afu_list_lock);
97 for (slice = 0; slice < adapter->slices; slice++) {
98 afu = adapter->afu[slice];
99 if (!afu || !afu->enabled)
102 idr_for_each_entry(&afu->contexts_idr, ctx, id)
106 spin_unlock(&adapter->afu_list_lock);
108 spin_unlock(&adapter_idr_lock);
111 static struct cxl_calls cxl_calls = {
112 .cxl_slbia = cxl_slbia_core,
113 .owner = THIS_MODULE,
116 int cxl_alloc_sst(struct cxl_context *ctx)
119 u64 ea_mask, size, sstp0, sstp1;
124 ctx->sst_size = PAGE_SIZE;
126 ctx->sstp = (struct cxl_sste *)get_zeroed_page(GFP_KERNEL);
128 pr_err("cxl_alloc_sst: Unable to allocate segment table\n");
131 pr_devel("SSTP allocated at 0x%p\n", ctx->sstp);
133 vsid = get_kernel_vsid((u64)ctx->sstp, mmu_kernel_ssize) << 12;
135 sstp0 |= (u64)mmu_kernel_ssize << CXL_SSTP0_An_B_SHIFT;
136 sstp0 |= (SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp) << 50;
138 size = (((u64)ctx->sst_size >> 8) - 1) << CXL_SSTP0_An_SegTableSize_SHIFT;
139 if (unlikely(size & ~CXL_SSTP0_An_SegTableSize_MASK)) {
140 WARN(1, "Impossible segment table size\n");
145 if (mmu_kernel_ssize == MMU_SEGSIZE_256M)
146 ea_mask = 0xfffff00ULL;
148 ea_mask = 0xffffffff00ULL;
150 sstp0 |= vsid >> (50-14); /* Top 14 bits of VSID */
151 sstp1 |= (vsid << (64-(50-14))) & ~ea_mask;
152 sstp1 |= (u64)ctx->sstp & ea_mask;
153 sstp1 |= CXL_SSTP1_An_V;
155 pr_devel("Looked up %#llx: slbfee. %#llx (ssize: %x, vsid: %#lx), copied to SSTP0: %#llx, SSTP1: %#llx\n",
156 (u64)ctx->sstp, (u64)ctx->sstp & ESID_MASK, mmu_kernel_ssize, vsid, sstp0, sstp1);
158 /* Store calculated sstp hardware points for use later */
165 /* print buffer content as integers when debugging */
166 void cxl_dump_debug_buffer(void *buf, size_t buf_len)
172 * We want to regroup up to 4 integers per line, which means they
173 * need to be in the same pr_devel() statement
176 for (i = 0; i * 4 < buf_len; i += 4) {
177 if ((i + 3) * 4 < buf_len)
178 pr_devel("%.8x %.8x %.8x %.8x\n", ptr[i], ptr[i + 1],
179 ptr[i + 2], ptr[i + 3]);
180 else if ((i + 2) * 4 < buf_len)
181 pr_devel("%.8x %.8x %.8x\n", ptr[i], ptr[i + 1],
183 else if ((i + 1) * 4 < buf_len)
184 pr_devel("%.8x %.8x\n", ptr[i], ptr[i + 1]);
186 pr_devel("%.8x\n", ptr[i]);
191 /* Find a CXL adapter by it's number and increase it's refcount */
192 struct cxl *get_cxl_adapter(int num)
196 spin_lock(&adapter_idr_lock);
197 if ((adapter = idr_find(&cxl_adapter_idr, num)))
198 get_device(&adapter->dev);
199 spin_unlock(&adapter_idr_lock);
204 static int cxl_alloc_adapter_nr(struct cxl *adapter)
208 idr_preload(GFP_KERNEL);
209 spin_lock(&adapter_idr_lock);
210 i = idr_alloc(&cxl_adapter_idr, adapter, 0, 0, GFP_NOWAIT);
211 spin_unlock(&adapter_idr_lock);
216 adapter->adapter_num = i;
221 void cxl_remove_adapter_nr(struct cxl *adapter)
223 idr_remove(&cxl_adapter_idr, adapter->adapter_num);
226 struct cxl *cxl_alloc_adapter(void)
230 if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL)))
233 spin_lock_init(&adapter->afu_list_lock);
235 if (cxl_alloc_adapter_nr(adapter))
238 if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
244 cxl_remove_adapter_nr(adapter);
250 struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
254 if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL)))
257 afu->adapter = adapter;
258 afu->dev.parent = &adapter->dev;
259 afu->dev.release = cxl_ops->release_afu;
261 idr_init(&afu->contexts_idr);
262 mutex_init(&afu->contexts_lock);
263 spin_lock_init(&afu->afu_cntl_lock);
265 afu->prefault_mode = CXL_PREFAULT_NONE;
266 afu->irqs_max = afu->adapter->user_irqs;
271 int cxl_afu_select_best_mode(struct cxl_afu *afu)
273 if (afu->modes_supported & CXL_MODE_DIRECTED)
274 return cxl_ops->afu_activate_mode(afu, CXL_MODE_DIRECTED);
276 if (afu->modes_supported & CXL_MODE_DEDICATED)
277 return cxl_ops->afu_activate_mode(afu, CXL_MODE_DEDICATED);
279 dev_warn(&afu->dev, "No supported programming modes available\n");
280 /* We don't fail this so the user can inspect sysfs */
284 static int __init init_cxl(void)
288 if ((rc = cxl_file_init()))
293 if ((rc = register_cxl_calls(&cxl_calls)))
296 if (cpu_has_feature(CPU_FTR_HVMODE)) {
297 cxl_ops = &cxl_native_ops;
298 rc = pci_register_driver(&cxl_pci_driver);
300 #ifdef CONFIG_PPC_PSERIES
302 cxl_ops = &cxl_guest_ops;
303 rc = platform_driver_register(&cxl_of_driver);
311 unregister_cxl_calls(&cxl_calls);
319 static void exit_cxl(void)
321 if (cpu_has_feature(CPU_FTR_HVMODE))
322 pci_unregister_driver(&cxl_pci_driver);
323 #ifdef CONFIG_PPC_PSERIES
325 platform_driver_unregister(&cxl_of_driver);
330 unregister_cxl_calls(&cxl_calls);
331 idr_destroy(&cxl_adapter_idr);
334 module_init(init_cxl);
335 module_exit(exit_cxl);
337 MODULE_DESCRIPTION("IBM Coherent Accelerator");
338 MODULE_AUTHOR("Ian Munsie <imunsie@au1.ibm.com>");
339 MODULE_LICENSE("GPL");