2 * Copyright 2014 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/spinlock.h>
11 #include <linux/module.h>
12 #include <linux/export.h>
13 #include <linux/kernel.h>
14 #include <linux/bitmap.h>
15 #include <linux/sched/signal.h>
16 #include <linux/poll.h>
17 #include <linux/pid.h>
20 #include <linux/slab.h>
21 #include <linux/sched/mm.h>
22 #include <asm/cputable.h>
23 #include <asm/current.h>
24 #include <asm/copro.h>
29 #define CXL_NUM_MINORS 256 /* Total to reserve */
31 #define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice))
32 #define CXL_AFU_MINOR_M(afu) (CXL_AFU_MINOR_D(afu) + 1)
33 #define CXL_AFU_MINOR_S(afu) (CXL_AFU_MINOR_D(afu) + 2)
34 #define CXL_AFU_MKDEV_D(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_D(afu))
35 #define CXL_AFU_MKDEV_M(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_M(afu))
36 #define CXL_AFU_MKDEV_S(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_S(afu))
38 #define CXL_DEVT_AFU(dev) ((MINOR(dev) % CXL_DEV_MINORS - 1) / 3)
40 #define CXL_DEVT_IS_CARD(dev) (MINOR(dev) % CXL_DEV_MINORS == 0)
44 static struct class *cxl_class;
46 static int __afu_open(struct inode *inode, struct file *file, bool master)
50 struct cxl_context *ctx;
51 int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev);
52 int slice = CXL_DEVT_AFU(inode->i_rdev);
55 pr_devel("afu_open afu%i.%i\n", slice, adapter_num);
57 if (!(adapter = get_cxl_adapter(adapter_num)))
60 if (slice > adapter->slices)
63 spin_lock(&adapter->afu_list_lock);
64 if (!(afu = adapter->afu[slice])) {
65 spin_unlock(&adapter->afu_list_lock);
70 * taking a ref to the afu so that it doesn't go away
71 * for rest of the function. This ref is released before
75 spin_unlock(&adapter->afu_list_lock);
77 if (!afu->current_mode)
80 if (!cxl_ops->link_ok(adapter, afu)) {
85 if (!(ctx = cxl_context_alloc())) {
90 rc = cxl_context_init(ctx, afu, master);
94 cxl_context_set_mapping(ctx, inode->i_mapping);
96 pr_devel("afu_open pe: %i\n", ctx->pe);
97 file->private_data = ctx;
100 /* indicate success */
104 /* release the ref taken earlier */
107 put_device(&adapter->dev);
111 int afu_open(struct inode *inode, struct file *file)
113 return __afu_open(inode, file, false);
116 static int afu_master_open(struct inode *inode, struct file *file)
118 return __afu_open(inode, file, true);
121 int afu_release(struct inode *inode, struct file *file)
123 struct cxl_context *ctx = file->private_data;
125 pr_devel("%s: closing cxl file descriptor. pe: %i\n",
127 cxl_context_detach(ctx);
131 * Delete the context's mapping pointer, unless it's created by the
132 * kernel API, in which case leave it so it can be freed by reclaim_ctx()
134 if (!ctx->kernelapi) {
135 mutex_lock(&ctx->mapping_lock);
137 mutex_unlock(&ctx->mapping_lock);
141 * At this this point all bottom halfs have finished and we should be
142 * getting no more IRQs from the hardware for this context. Once it's
143 * removed from the IDR (and RCU synchronised) it's safe to free the
146 cxl_context_free(ctx);
151 static long afu_ioctl_start_work(struct cxl_context *ctx,
152 struct cxl_ioctl_start_work __user *uwork)
154 struct cxl_ioctl_start_work work;
158 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
160 /* Do this outside the status_mutex to avoid a circular dependency with
161 * the locking in cxl_mmap_fault() */
162 if (copy_from_user(&work, uwork,
163 sizeof(struct cxl_ioctl_start_work))) {
168 mutex_lock(&ctx->status_mutex);
169 if (ctx->status != OPENED) {
175 * if any of the reserved fields are set or any of the unused
176 * flags are set it's invalid
178 if (work.reserved1 || work.reserved2 || work.reserved3 ||
179 work.reserved4 || work.reserved5 || work.reserved6 ||
180 (work.flags & ~CXL_START_WORK_ALL)) {
185 if (!(work.flags & CXL_START_WORK_NUM_IRQS))
186 work.num_interrupts = ctx->afu->pp_irqs;
187 else if ((work.num_interrupts < ctx->afu->pp_irqs) ||
188 (work.num_interrupts > ctx->afu->irqs_max)) {
192 if ((rc = afu_register_irqs(ctx, work.num_interrupts)))
195 if (work.flags & CXL_START_WORK_AMR)
196 amr = work.amr & mfspr(SPRN_UAMOR);
198 ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
201 * Increment the mapped context count for adapter. This also checks
202 * if adapter_context_lock is taken.
204 rc = cxl_adapter_context_get(ctx->afu->adapter);
206 afu_release_irqs(ctx, ctx);
211 * We grab the PID here and not in the file open to allow for the case
212 * where a process (master, some daemon, etc) has opened the chardev on
213 * behalf of another process, so the AFU's mm gets bound to the process
214 * that performs this ioctl and not the process that opened the file.
215 * Also we grab the PID of the group leader so that if the task that
216 * has performed the attach operation exits the mm context of the
217 * process is still accessible.
219 ctx->pid = get_task_pid(current, PIDTYPE_PID);
221 /* acquire a reference to the task's mm */
222 ctx->mm = get_task_mm(current);
224 /* ensure this mm_struct can't be freed */
225 cxl_context_mm_count_get(ctx);
227 /* decrement the use count */
231 trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
233 if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
235 afu_release_irqs(ctx, ctx);
236 cxl_adapter_context_put(ctx->afu->adapter);
239 cxl_context_mm_count_put(ctx);
243 ctx->status = STARTED;
246 mutex_unlock(&ctx->status_mutex);
250 static long afu_ioctl_process_element(struct cxl_context *ctx,
253 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
255 if (copy_to_user(upe, &ctx->external_pe, sizeof(__u32)))
261 static long afu_ioctl_get_afu_id(struct cxl_context *ctx,
262 struct cxl_afu_id __user *upafuid)
264 struct cxl_afu_id afuid = { 0 };
266 afuid.card_id = ctx->afu->adapter->adapter_num;
267 afuid.afu_offset = ctx->afu->slice;
268 afuid.afu_mode = ctx->afu->current_mode;
270 /* set the flag bit in case the afu is a slave */
271 if (ctx->afu->current_mode == CXL_MODE_DIRECTED && !ctx->master)
272 afuid.flags |= CXL_AFUID_FLAG_SLAVE;
274 if (copy_to_user(upafuid, &afuid, sizeof(afuid)))
280 long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
282 struct cxl_context *ctx = file->private_data;
284 if (ctx->status == CLOSED)
287 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
290 pr_devel("afu_ioctl\n");
292 case CXL_IOCTL_START_WORK:
293 return afu_ioctl_start_work(ctx, (struct cxl_ioctl_start_work __user *)arg);
294 case CXL_IOCTL_GET_PROCESS_ELEMENT:
295 return afu_ioctl_process_element(ctx, (__u32 __user *)arg);
296 case CXL_IOCTL_GET_AFU_ID:
297 return afu_ioctl_get_afu_id(ctx, (struct cxl_afu_id __user *)
303 static long afu_compat_ioctl(struct file *file, unsigned int cmd,
306 return afu_ioctl(file, cmd, arg);
309 int afu_mmap(struct file *file, struct vm_area_struct *vm)
311 struct cxl_context *ctx = file->private_data;
313 /* AFU must be started before we can MMIO */
314 if (ctx->status != STARTED)
317 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
320 return cxl_context_iomap(ctx, vm);
323 static inline bool ctx_event_pending(struct cxl_context *ctx)
325 if (ctx->pending_irq || ctx->pending_fault || ctx->pending_afu_err)
328 if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events))
334 unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
336 struct cxl_context *ctx = file->private_data;
341 poll_wait(file, &ctx->wq, poll);
343 pr_devel("afu_poll wait done pe: %i\n", ctx->pe);
345 spin_lock_irqsave(&ctx->lock, flags);
346 if (ctx_event_pending(ctx))
347 mask |= POLLIN | POLLRDNORM;
348 else if (ctx->status == CLOSED)
349 /* Only error on closed when there are no futher events pending
352 spin_unlock_irqrestore(&ctx->lock, flags);
354 pr_devel("afu_poll pe: %i returning %#x\n", ctx->pe, mask);
359 static ssize_t afu_driver_event_copy(struct cxl_context *ctx,
361 struct cxl_event *event,
362 struct cxl_event_afu_driver_reserved *pl)
366 ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
370 /* Check event size */
371 event->header.size += pl->data_size;
372 if (event->header.size > CXL_READ_MIN_SIZE) {
373 ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
377 /* Copy event header */
378 if (copy_to_user(buf, event, sizeof(struct cxl_event_header))) {
379 ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
383 /* Copy event data */
384 buf += sizeof(struct cxl_event_header);
385 if (copy_to_user(buf, &pl->data, pl->data_size)) {
386 ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
390 ctx->afu_driver_ops->event_delivered(ctx, pl, 0); /* Success */
391 return event->header.size;
394 ssize_t afu_read(struct file *file, char __user *buf, size_t count,
397 struct cxl_context *ctx = file->private_data;
398 struct cxl_event_afu_driver_reserved *pl = NULL;
399 struct cxl_event event;
404 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
407 if (count < CXL_READ_MIN_SIZE)
410 spin_lock_irqsave(&ctx->lock, flags);
413 prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE);
414 if (ctx_event_pending(ctx) || (ctx->status == CLOSED))
417 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
422 if (file->f_flags & O_NONBLOCK) {
427 if (signal_pending(current)) {
432 spin_unlock_irqrestore(&ctx->lock, flags);
433 pr_devel("afu_read going to sleep...\n");
435 pr_devel("afu_read woken up\n");
436 spin_lock_irqsave(&ctx->lock, flags);
439 finish_wait(&ctx->wq, &wait);
441 memset(&event, 0, sizeof(event));
442 event.header.process_element = ctx->pe;
443 event.header.size = sizeof(struct cxl_event_header);
444 if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events)) {
445 pr_devel("afu_read delivering AFU driver specific event\n");
446 pl = ctx->afu_driver_ops->fetch_event(ctx);
447 atomic_dec(&ctx->afu_driver_events);
448 event.header.type = CXL_EVENT_AFU_DRIVER;
449 } else if (ctx->pending_irq) {
450 pr_devel("afu_read delivering AFU interrupt\n");
451 event.header.size += sizeof(struct cxl_event_afu_interrupt);
452 event.header.type = CXL_EVENT_AFU_INTERRUPT;
453 event.irq.irq = find_first_bit(ctx->irq_bitmap, ctx->irq_count) + 1;
454 clear_bit(event.irq.irq - 1, ctx->irq_bitmap);
455 if (bitmap_empty(ctx->irq_bitmap, ctx->irq_count))
456 ctx->pending_irq = false;
457 } else if (ctx->pending_fault) {
458 pr_devel("afu_read delivering data storage fault\n");
459 event.header.size += sizeof(struct cxl_event_data_storage);
460 event.header.type = CXL_EVENT_DATA_STORAGE;
461 event.fault.addr = ctx->fault_addr;
462 event.fault.dsisr = ctx->fault_dsisr;
463 ctx->pending_fault = false;
464 } else if (ctx->pending_afu_err) {
465 pr_devel("afu_read delivering afu error\n");
466 event.header.size += sizeof(struct cxl_event_afu_error);
467 event.header.type = CXL_EVENT_AFU_ERROR;
468 event.afu_error.error = ctx->afu_err;
469 ctx->pending_afu_err = false;
470 } else if (ctx->status == CLOSED) {
471 pr_devel("afu_read fatal error\n");
472 spin_unlock_irqrestore(&ctx->lock, flags);
475 WARN(1, "afu_read must be buggy\n");
477 spin_unlock_irqrestore(&ctx->lock, flags);
479 if (event.header.type == CXL_EVENT_AFU_DRIVER)
480 return afu_driver_event_copy(ctx, buf, &event, pl);
482 if (copy_to_user(buf, &event, event.header.size))
484 return event.header.size;
487 finish_wait(&ctx->wq, &wait);
488 spin_unlock_irqrestore(&ctx->lock, flags);
493 * Note: if this is updated, we need to update api.c to patch the new ones in
496 const struct file_operations afu_fops = {
497 .owner = THIS_MODULE,
501 .release = afu_release,
502 .unlocked_ioctl = afu_ioctl,
503 .compat_ioctl = afu_compat_ioctl,
507 static const struct file_operations afu_master_fops = {
508 .owner = THIS_MODULE,
509 .open = afu_master_open,
512 .release = afu_release,
513 .unlocked_ioctl = afu_ioctl,
514 .compat_ioctl = afu_compat_ioctl,
519 static char *cxl_devnode(struct device *dev, umode_t *mode)
521 if (cpu_has_feature(CPU_FTR_HVMODE) &&
522 CXL_DEVT_IS_CARD(dev->devt)) {
524 * These minor numbers will eventually be used to program the
525 * PSL and AFUs once we have dynamic reprogramming support
529 return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
532 extern struct class *cxl_class;
534 static int cxl_add_chardev(struct cxl_afu *afu, dev_t devt, struct cdev *cdev,
535 struct device **chardev, char *postfix, char *desc,
536 const struct file_operations *fops)
541 cdev_init(cdev, fops);
542 if ((rc = cdev_add(cdev, devt, 1))) {
543 dev_err(&afu->dev, "Unable to add %s chardev: %i\n", desc, rc);
547 dev = device_create(cxl_class, &afu->dev, devt, afu,
548 "afu%i.%i%s", afu->adapter->adapter_num, afu->slice, postfix);
550 dev_err(&afu->dev, "Unable to create %s chardev in sysfs: %i\n", desc, rc);
563 int cxl_chardev_d_afu_add(struct cxl_afu *afu)
565 return cxl_add_chardev(afu, CXL_AFU_MKDEV_D(afu), &afu->afu_cdev_d,
566 &afu->chardev_d, "d", "dedicated",
567 &afu_master_fops); /* Uses master fops */
570 int cxl_chardev_m_afu_add(struct cxl_afu *afu)
572 return cxl_add_chardev(afu, CXL_AFU_MKDEV_M(afu), &afu->afu_cdev_m,
573 &afu->chardev_m, "m", "master",
577 int cxl_chardev_s_afu_add(struct cxl_afu *afu)
579 return cxl_add_chardev(afu, CXL_AFU_MKDEV_S(afu), &afu->afu_cdev_s,
580 &afu->chardev_s, "s", "shared",
584 void cxl_chardev_afu_remove(struct cxl_afu *afu)
586 if (afu->chardev_d) {
587 cdev_del(&afu->afu_cdev_d);
588 device_unregister(afu->chardev_d);
589 afu->chardev_d = NULL;
591 if (afu->chardev_m) {
592 cdev_del(&afu->afu_cdev_m);
593 device_unregister(afu->chardev_m);
594 afu->chardev_m = NULL;
596 if (afu->chardev_s) {
597 cdev_del(&afu->afu_cdev_s);
598 device_unregister(afu->chardev_s);
599 afu->chardev_s = NULL;
603 int cxl_register_afu(struct cxl_afu *afu)
605 afu->dev.class = cxl_class;
607 return device_register(&afu->dev);
610 int cxl_register_adapter(struct cxl *adapter)
612 adapter->dev.class = cxl_class;
615 * Future: When we support dynamically reprogramming the PSL & AFU we
616 * will expose the interface to do that via a chardev:
617 * adapter->dev.devt = CXL_CARD_MKDEV(adapter);
620 return device_register(&adapter->dev);
623 dev_t cxl_get_dev(void)
628 int __init cxl_file_init(void)
633 * If these change we really need to update API. Either change some
634 * flags or update API version number CXL_API_VERSION.
636 BUILD_BUG_ON(CXL_API_VERSION != 3);
637 BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64);
638 BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8);
639 BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8);
640 BUILD_BUG_ON(sizeof(struct cxl_event_data_storage) != 32);
641 BUILD_BUG_ON(sizeof(struct cxl_event_afu_error) != 16);
643 if ((rc = alloc_chrdev_region(&cxl_dev, 0, CXL_NUM_MINORS, "cxl"))) {
644 pr_err("Unable to allocate CXL major number: %i\n", rc);
648 pr_devel("CXL device allocated, MAJOR %i\n", MAJOR(cxl_dev));
650 cxl_class = class_create(THIS_MODULE, "cxl");
651 if (IS_ERR(cxl_class)) {
652 pr_err("Unable to create CXL class\n");
653 rc = PTR_ERR(cxl_class);
656 cxl_class->devnode = cxl_devnode;
661 unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
665 void cxl_file_exit(void)
667 unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
668 class_destroy(cxl_class);