2 * Copyright 2015 IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/spinlock.h>
11 #include <linux/uaccess.h>
12 #include <linux/delay.h>
18 #define CXL_ERROR_DETECTED_EVENT 1
19 #define CXL_SLOT_RESET_EVENT 2
20 #define CXL_RESUME_EVENT 3
22 static void pci_error_handlers(struct cxl_afu *afu,
24 pci_channel_state_t state)
26 struct pci_dev *afu_dev;
31 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
35 switch (bus_error_event) {
36 case CXL_ERROR_DETECTED_EVENT:
37 afu_dev->error_state = state;
39 if (afu_dev->driver->err_handler &&
40 afu_dev->driver->err_handler->error_detected)
41 afu_dev->driver->err_handler->error_detected(afu_dev, state);
43 case CXL_SLOT_RESET_EVENT:
44 afu_dev->error_state = state;
46 if (afu_dev->driver->err_handler &&
47 afu_dev->driver->err_handler->slot_reset)
48 afu_dev->driver->err_handler->slot_reset(afu_dev);
50 case CXL_RESUME_EVENT:
51 if (afu_dev->driver->err_handler &&
52 afu_dev->driver->err_handler->resume)
53 afu_dev->driver->err_handler->resume(afu_dev);
59 static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr,
62 pr_devel("in %s\n", __func__);
63 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
65 return cxl_ops->ack_irq(ctx, 0, errstat);
68 static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu,
69 void *buf, size_t len)
71 unsigned int entries, mod;
72 unsigned long **vpd_buf = NULL;
74 int rc = 0, i, tocopy;
80 /* number of entries in the list */
81 entries = len / SG_BUFFER_SIZE;
82 mod = len % SG_BUFFER_SIZE;
86 if (entries > SG_MAX_ENTRIES) {
87 entries = SG_MAX_ENTRIES;
88 len = SG_MAX_ENTRIES * SG_BUFFER_SIZE;
92 vpd_buf = kzalloc(entries * sizeof(unsigned long *), GFP_KERNEL);
96 le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
102 for (i = 0; i < entries; i++) {
103 vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
108 le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i]));
109 le[i].len = cpu_to_be64(SG_BUFFER_SIZE);
110 if ((i == (entries - 1)) && mod)
111 le[i].len = cpu_to_be64(mod);
115 rc = cxl_h_collect_vpd_adapter(adapter->guest->handle,
116 virt_to_phys(le), entries, &out);
118 rc = cxl_h_collect_vpd(afu->guest->handle, 0,
119 virt_to_phys(le), entries, &out);
120 pr_devel("length of available (entries: %i), vpd: %#llx\n",
125 * hcall returns in 'out' the size of available VPDs.
126 * It fills the buffer with as much data as possible.
132 for (i = 0; i < entries; i++) {
133 if (len < SG_BUFFER_SIZE)
136 tocopy = SG_BUFFER_SIZE;
137 memcpy(buf, vpd_buf[i], tocopy);
144 for (i = 0; i < entries; i++) {
146 free_page((unsigned long) vpd_buf[i]);
148 free_page((unsigned long) le);
154 static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info)
156 return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info);
159 static irqreturn_t guest_psl_irq(int irq, void *data)
161 struct cxl_context *ctx = data;
162 struct cxl_irq_info irq_info;
165 pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq);
166 rc = guest_get_irq_info(ctx, &irq_info);
168 WARN(1, "Unable to get IRQ info: %i\n", rc);
172 rc = cxl_irq(irq, ctx, &irq_info);
176 static int afu_read_error_state(struct cxl_afu *afu, int *state_out)
184 rc = cxl_h_read_error_state(afu->guest->handle, &state);
186 WARN_ON(state != H_STATE_NORMAL &&
187 state != H_STATE_DISABLE &&
188 state != H_STATE_TEMP_UNAVAILABLE &&
189 state != H_STATE_PERM_UNAVAILABLE);
190 *state_out = state & 0xffffffff;
195 static irqreturn_t guest_slice_irq_err(int irq, void *data)
197 struct cxl_afu *afu = data;
201 WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
202 rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr);
204 dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc);
207 dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
209 rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr);
211 dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n",
218 static int irq_alloc_range(struct cxl *adapter, int len, int *irq)
221 struct irq_avail *cur;
223 for (i = 0; i < adapter->guest->irq_nranges; i++) {
224 cur = &adapter->guest->irq_avail[i];
225 n = bitmap_find_next_zero_area(cur->bitmap, cur->range,
227 if (n < cur->range) {
228 bitmap_set(cur->bitmap, n, len);
229 *irq = cur->offset + n;
230 pr_devel("guest: allocate IRQs %#x->%#x\n",
231 *irq, *irq + len - 1);
239 static int irq_free_range(struct cxl *adapter, int irq, int len)
242 struct irq_avail *cur;
247 for (i = 0; i < adapter->guest->irq_nranges; i++) {
248 cur = &adapter->guest->irq_avail[i];
249 if (irq >= cur->offset &&
250 (irq + len) <= (cur->offset + cur->range)) {
251 n = irq - cur->offset;
252 bitmap_clear(cur->bitmap, n, len);
253 pr_devel("guest: release IRQs %#x->%#x\n",
261 static int guest_reset(struct cxl *adapter)
263 struct cxl_afu *afu = NULL;
266 pr_devel("Adapter reset request\n");
267 for (i = 0; i < adapter->slices; i++) {
268 if ((afu = adapter->afu[i])) {
269 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
270 pci_channel_io_frozen);
271 cxl_context_detach_all(afu);
275 rc = cxl_h_reset_adapter(adapter->guest->handle);
276 for (i = 0; i < adapter->slices; i++) {
277 if (!rc && (afu = adapter->afu[i])) {
278 pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
279 pci_channel_io_normal);
280 pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
286 static int guest_alloc_one_irq(struct cxl *adapter)
290 spin_lock(&adapter->guest->irq_alloc_lock);
291 if (irq_alloc_range(adapter, 1, &irq))
293 spin_unlock(&adapter->guest->irq_alloc_lock);
297 static void guest_release_one_irq(struct cxl *adapter, int irq)
299 spin_lock(&adapter->guest->irq_alloc_lock);
300 irq_free_range(adapter, irq, 1);
301 spin_unlock(&adapter->guest->irq_alloc_lock);
304 static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
305 struct cxl *adapter, unsigned int num)
309 memset(irqs, 0, sizeof(struct cxl_irq_ranges));
311 spin_lock(&adapter->guest->irq_alloc_lock);
312 for (i = 0; i < CXL_IRQ_RANGES && num; i++) {
315 if (irq_alloc_range(adapter, try, &irq) == 0)
321 irqs->offset[i] = irq;
322 irqs->range[i] = try;
327 spin_unlock(&adapter->guest->irq_alloc_lock);
331 for (i = 0; i < CXL_IRQ_RANGES; i++)
332 irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
333 spin_unlock(&adapter->guest->irq_alloc_lock);
337 static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs,
342 spin_lock(&adapter->guest->irq_alloc_lock);
343 for (i = 0; i < CXL_IRQ_RANGES; i++)
344 irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
345 spin_unlock(&adapter->guest->irq_alloc_lock);
348 static int guest_register_serr_irq(struct cxl_afu *afu)
350 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
351 dev_name(&afu->dev));
352 if (!afu->err_irq_name)
355 if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq,
356 guest_slice_irq_err, afu, afu->err_irq_name))) {
357 kfree(afu->err_irq_name);
358 afu->err_irq_name = NULL;
365 static void guest_release_serr_irq(struct cxl_afu *afu)
367 cxl_unmap_irq(afu->serr_virq, afu);
368 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
369 kfree(afu->err_irq_name);
372 static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
374 return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token,
375 tfc >> 32, (psl_reset_mask != 0));
378 static void disable_afu_irqs(struct cxl_context *ctx)
380 irq_hw_number_t hwirq;
384 pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice);
385 for (r = 0; r < CXL_IRQ_RANGES; r++) {
386 hwirq = ctx->irqs.offset[r];
387 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
388 virq = irq_find_mapping(NULL, hwirq);
394 static void enable_afu_irqs(struct cxl_context *ctx)
396 irq_hw_number_t hwirq;
400 pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice);
401 for (r = 0; r < CXL_IRQ_RANGES; r++) {
402 hwirq = ctx->irqs.offset[r];
403 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
404 virq = irq_find_mapping(NULL, hwirq);
410 static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx,
411 u64 offset, u64 *val)
417 if (afu->crs_len < sz)
420 if (unlikely(offset >= afu->crs_len))
423 cr = get_zeroed_page(GFP_KERNEL);
427 rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset,
428 virt_to_phys((void *)cr), sz);
438 *val = in_le16((u16 *)cr);
441 *val = in_le32((unsigned *)cr);
444 *val = in_le64((u64 *)cr);
454 static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset,
460 rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val);
466 static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset,
472 rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val);
478 static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset,
484 rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val);
490 static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset,
493 return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out);
496 static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
498 /* config record is not writable from guest */
502 static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
504 /* config record is not writable from guest */
508 static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
510 /* config record is not writable from guest */
514 static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
516 struct cxl_process_element_hcall *elem;
517 struct cxl *adapter = ctx->afu->adapter;
518 const struct cred *cred;
521 u64 mmio_addr, mmio_size;
524 /* Must be 8 byte aligned and cannot cross a 4096 byte boundary */
525 if (!(elem = (struct cxl_process_element_hcall *)
526 get_zeroed_page(GFP_KERNEL)))
529 elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION);
532 flags |= CXL_PE_TRANSLATION_ENABLED;
533 flags |= CXL_PE_PRIVILEGED_PROCESS;
534 if (mfmsr() & MSR_SF)
535 flags |= CXL_PE_64_BIT;
538 flags |= CXL_PE_PROBLEM_STATE;
539 flags |= CXL_PE_TRANSLATION_ENABLED;
540 if (!test_tsk_thread_flag(current, TIF_32BIT))
541 flags |= CXL_PE_64_BIT;
542 cred = get_current_cred();
543 if (uid_eq(cred->euid, GLOBAL_ROOT_UID))
544 flags |= CXL_PE_PRIVILEGED_PROCESS;
547 elem->flags = cpu_to_be64(flags);
548 elem->common.tid = cpu_to_be32(0); /* Unused */
549 elem->common.pid = cpu_to_be32(pid);
550 elem->common.csrp = cpu_to_be64(0); /* disable */
551 elem->common.aurp0 = cpu_to_be64(0); /* disable */
552 elem->common.aurp1 = cpu_to_be64(0); /* disable */
554 cxl_prefault(ctx, wed);
556 elem->common.sstp0 = cpu_to_be64(ctx->sstp0);
557 elem->common.sstp1 = cpu_to_be64(ctx->sstp1);
560 * Ensure we have at least one interrupt allocated to take faults for
561 * kernel contexts that may not have allocated any AFU IRQs at all:
563 if (ctx->irqs.range[0] == 0) {
564 rc = afu_register_irqs(ctx, 0);
569 for (r = 0; r < CXL_IRQ_RANGES; r++) {
570 for (i = 0; i < ctx->irqs.range[r]; i++) {
571 if (r == 0 && i == 0) {
572 elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]);
574 idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset;
575 elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8);
579 elem->common.amr = cpu_to_be64(amr);
580 elem->common.wed = cpu_to_be64(wed);
582 disable_afu_irqs(ctx);
584 rc = cxl_h_attach_process(ctx->afu->guest->handle, elem,
585 &ctx->process_token, &mmio_addr, &mmio_size);
586 if (rc == H_SUCCESS) {
587 if (ctx->master || !ctx->afu->pp_psa) {
588 ctx->psn_phys = ctx->afu->psn_phys;
589 ctx->psn_size = ctx->afu->adapter->ps_size;
591 ctx->psn_phys = mmio_addr;
592 ctx->psn_size = mmio_size;
594 if (ctx->afu->pp_psa && mmio_size &&
595 ctx->afu->pp_size == 0) {
597 * There's no property in the device tree to read the
598 * pp_size. We only find out at the 1st attach.
599 * Compared to bare-metal, it is too late and we
600 * should really lock here. However, on powerVM,
601 * pp_size is really only used to display in /sys.
602 * Being discussed with pHyp for their next release.
604 ctx->afu->pp_size = mmio_size;
606 /* from PAPR: process element is bytes 4-7 of process token */
607 ctx->external_pe = ctx->process_token & 0xFFFFFFFF;
608 pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx",
609 ctx->pe, ctx->external_pe, ctx->psn_size);
610 ctx->pe_inserted = true;
611 enable_afu_irqs(ctx);
615 free_page((u64)elem);
619 static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
621 pr_devel("in %s\n", __func__);
626 ctx->kernel = kernel;
627 if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
628 return attach_afu_directed(ctx, wed, amr);
630 /* dedicated mode not supported on FW840 */
635 static int detach_afu_directed(struct cxl_context *ctx)
637 if (!ctx->pe_inserted)
639 if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token))
644 static int guest_detach_process(struct cxl_context *ctx)
646 pr_devel("in %s\n", __func__);
647 trace_cxl_detach(ctx);
649 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
652 if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
653 return detach_afu_directed(ctx);
658 static void guest_release_afu(struct device *dev)
660 struct cxl_afu *afu = to_cxl_afu(dev);
662 pr_devel("%s\n", __func__);
664 idr_destroy(&afu->contexts_idr);
670 ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len)
672 return guest_collect_vpd(NULL, afu, buf, len);
675 #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
676 static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
677 loff_t off, size_t count)
682 tbuf = (void *) get_zeroed_page(GFP_KERNEL);
686 rc = cxl_h_get_afu_err(afu->guest->handle,
693 if (count > ERR_BUFF_MAX_COPY_SIZE)
694 count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
695 memcpy(buf, tbuf, count);
697 free_page((u64)tbuf);
702 static int guest_afu_check_and_enable(struct cxl_afu *afu)
707 static bool guest_support_attributes(const char *attr_name,
711 case CXL_ADAPTER_ATTRS:
712 if ((strcmp(attr_name, "base_image") == 0) ||
713 (strcmp(attr_name, "load_image_on_perst") == 0) ||
714 (strcmp(attr_name, "perst_reloads_same_image") == 0) ||
715 (strcmp(attr_name, "image_loaded") == 0))
718 case CXL_AFU_MASTER_ATTRS:
719 if ((strcmp(attr_name, "pp_mmio_off") == 0))
731 static int activate_afu_directed(struct cxl_afu *afu)
735 dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice);
737 afu->current_mode = CXL_MODE_DIRECTED;
739 afu->num_procs = afu->max_procs_virtualised;
741 if ((rc = cxl_chardev_m_afu_add(afu)))
744 if ((rc = cxl_sysfs_afu_m_add(afu)))
747 if ((rc = cxl_chardev_s_afu_add(afu)))
752 cxl_sysfs_afu_m_remove(afu);
754 cxl_chardev_afu_remove(afu);
758 static int guest_afu_activate_mode(struct cxl_afu *afu, int mode)
762 if (!(mode & afu->modes_supported))
765 if (mode == CXL_MODE_DIRECTED)
766 return activate_afu_directed(afu);
768 if (mode == CXL_MODE_DEDICATED)
769 dev_err(&afu->dev, "Dedicated mode not supported\n");
774 static int deactivate_afu_directed(struct cxl_afu *afu)
776 dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice);
778 afu->current_mode = 0;
781 cxl_sysfs_afu_m_remove(afu);
782 cxl_chardev_afu_remove(afu);
784 cxl_ops->afu_reset(afu);
789 static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode)
793 if (!(mode & afu->modes_supported))
796 if (mode == CXL_MODE_DIRECTED)
797 return deactivate_afu_directed(afu);
801 static int guest_afu_reset(struct cxl_afu *afu)
803 pr_devel("AFU(%d) reset request\n", afu->slice);
804 return cxl_h_reset_afu(afu->guest->handle);
807 static int guest_map_slice_regs(struct cxl_afu *afu)
809 if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) {
810 dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n",
817 static void guest_unmap_slice_regs(struct cxl_afu *afu)
820 iounmap(afu->p2n_mmio);
823 static int afu_update_state(struct cxl_afu *afu)
827 rc = afu_read_error_state(afu, &cur_state);
831 if (afu->guest->previous_state == cur_state)
834 pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state);
838 afu->guest->previous_state = cur_state;
841 case H_STATE_DISABLE:
842 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
843 pci_channel_io_frozen);
845 cxl_context_detach_all(afu);
846 if ((rc = cxl_ops->afu_reset(afu)))
847 pr_devel("reset hcall failed %d\n", rc);
849 rc = afu_read_error_state(afu, &cur_state);
850 if (!rc && cur_state == H_STATE_NORMAL) {
851 pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
852 pci_channel_io_normal);
853 pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
855 afu->guest->previous_state = 0;
858 case H_STATE_TEMP_UNAVAILABLE:
859 afu->guest->previous_state = cur_state;
862 case H_STATE_PERM_UNAVAILABLE:
863 dev_err(&afu->dev, "AFU is in permanent error state\n");
864 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
865 pci_channel_io_perm_failure);
866 afu->guest->previous_state = cur_state;
870 pr_err("Unexpected AFU(%d) error state: %#x\n",
871 afu->slice, cur_state);
878 static void afu_handle_errstate(struct work_struct *work)
880 struct cxl_afu_guest *afu_guest =
881 container_of(to_delayed_work(work), struct cxl_afu_guest, work_err);
883 if (!afu_update_state(afu_guest->parent) &&
884 afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE)
887 if (afu_guest->handle_err == true)
888 schedule_delayed_work(&afu_guest->work_err,
889 msecs_to_jiffies(3000));
892 static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu)
896 if (afu && (!afu_read_error_state(afu, &state))) {
897 if (state == H_STATE_NORMAL)
904 static int afu_properties_look_ok(struct cxl_afu *afu)
906 if (afu->pp_irqs < 0) {
907 dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n");
911 if (afu->max_procs_virtualised < 1) {
912 dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n");
916 if (afu->crs_len < 0) {
917 dev_err(&afu->dev, "Unexpected configuration record size value\n");
924 int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np)
930 pr_devel("in %s - AFU(%d)\n", __func__, slice);
931 if (!(afu = cxl_alloc_afu(adapter, slice)))
934 if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) {
939 if ((rc = dev_set_name(&afu->dev, "afu%i.%i",
940 adapter->adapter_num,
946 if ((rc = cxl_of_read_afu_handle(afu, afu_np)))
949 if ((rc = cxl_ops->afu_reset(afu)))
952 if ((rc = cxl_of_read_afu_properties(afu, afu_np)))
955 if ((rc = afu_properties_look_ok(afu)))
958 if ((rc = guest_map_slice_regs(afu)))
961 if ((rc = guest_register_serr_irq(afu)))
965 * After we call this function we must not free the afu directly, even
966 * if it returns an error!
968 if ((rc = cxl_register_afu(afu)))
971 if ((rc = cxl_sysfs_afu_add(afu)))
975 * pHyp doesn't expose the programming models supported by the
976 * AFU. pHyp currently only supports directed mode. If it adds
977 * dedicated mode later, this version of cxl has no way to
978 * detect it. So we'll initialize the driver, but the first
980 * Being discussed with pHyp to do better (likely new property)
982 if (afu->max_procs_virtualised == 1)
983 afu->modes_supported = CXL_MODE_DEDICATED;
985 afu->modes_supported = CXL_MODE_DIRECTED;
987 if ((rc = cxl_afu_select_best_mode(afu)))
990 adapter->afu[afu->slice] = afu;
995 * wake up the cpu periodically to check the state
996 * of the AFU using "afu" stored in the guest structure.
998 afu->guest->parent = afu;
999 afu->guest->handle_err = true;
1000 INIT_DELAYED_WORK(&afu->guest->work_err, afu_handle_errstate);
1001 schedule_delayed_work(&afu->guest->work_err, msecs_to_jiffies(1000));
1003 if ((rc = cxl_pci_vphb_add(afu)))
1004 dev_info(&afu->dev, "Can't register vPHB\n");
1009 cxl_sysfs_afu_remove(afu);
1011 device_unregister(&afu->dev);
1013 guest_release_serr_irq(afu);
1015 guest_unmap_slice_regs(afu);
1024 void cxl_guest_remove_afu(struct cxl_afu *afu)
1026 pr_devel("in %s - AFU(%d)\n", __func__, afu->slice);
1031 /* flush and stop pending job */
1032 afu->guest->handle_err = false;
1033 flush_delayed_work(&afu->guest->work_err);
1035 cxl_pci_vphb_remove(afu);
1036 cxl_sysfs_afu_remove(afu);
1038 spin_lock(&afu->adapter->afu_list_lock);
1039 afu->adapter->afu[afu->slice] = NULL;
1040 spin_unlock(&afu->adapter->afu_list_lock);
1042 cxl_context_detach_all(afu);
1043 cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
1044 guest_release_serr_irq(afu);
1045 guest_unmap_slice_regs(afu);
1047 device_unregister(&afu->dev);
1050 static void free_adapter(struct cxl *adapter)
1052 struct irq_avail *cur;
1055 if (adapter->guest->irq_avail) {
1056 for (i = 0; i < adapter->guest->irq_nranges; i++) {
1057 cur = &adapter->guest->irq_avail[i];
1060 kfree(adapter->guest->irq_avail);
1062 kfree(adapter->guest->status);
1063 cxl_remove_adapter_nr(adapter);
1064 kfree(adapter->guest);
1068 static int properties_look_ok(struct cxl *adapter)
1070 /* The absence of this property means that the operational
1071 * status is unknown or okay
1073 if (strlen(adapter->guest->status) &&
1074 strcmp(adapter->guest->status, "okay")) {
1075 pr_err("ABORTING:Bad operational status of the device\n");
1082 ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
1084 return guest_collect_vpd(adapter, NULL, buf, len);
1087 void cxl_guest_remove_adapter(struct cxl *adapter)
1089 pr_devel("in %s\n", __func__);
1091 cxl_sysfs_adapter_remove(adapter);
1093 cxl_guest_remove_chardev(adapter);
1094 device_unregister(&adapter->dev);
1097 static void release_adapter(struct device *dev)
1099 free_adapter(to_cxl_adapter(dev));
1102 struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev)
1104 struct cxl *adapter;
1108 if (!(adapter = cxl_alloc_adapter()))
1109 return ERR_PTR(-ENOMEM);
1111 if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) {
1112 free_adapter(adapter);
1113 return ERR_PTR(-ENOMEM);
1116 adapter->slices = 0;
1117 adapter->guest->pdev = pdev;
1118 adapter->dev.parent = &pdev->dev;
1119 adapter->dev.release = release_adapter;
1120 dev_set_drvdata(&pdev->dev, adapter);
1123 * Hypervisor controls PSL timebase initialization (p1 register).
1124 * On FW840, PSL is initialized.
1126 adapter->psl_timebase_synced = true;
1128 if ((rc = cxl_of_read_adapter_handle(adapter, np)))
1131 if ((rc = cxl_of_read_adapter_properties(adapter, np)))
1134 if ((rc = properties_look_ok(adapter)))
1137 if ((rc = cxl_guest_add_chardev(adapter)))
1141 * After we call this function we must not free the adapter directly,
1142 * even if it returns an error!
1144 if ((rc = cxl_register_adapter(adapter)))
1147 if ((rc = cxl_sysfs_adapter_add(adapter)))
1153 device_unregister(&adapter->dev);
1155 cxl_guest_remove_chardev(adapter);
1158 free_adapter(adapter);
1162 void cxl_guest_reload_module(struct cxl *adapter)
1164 struct platform_device *pdev;
1166 pdev = adapter->guest->pdev;
1167 cxl_guest_remove_adapter(adapter);
1172 const struct cxl_backend_ops cxl_guest_ops = {
1173 .module = THIS_MODULE,
1174 .adapter_reset = guest_reset,
1175 .alloc_one_irq = guest_alloc_one_irq,
1176 .release_one_irq = guest_release_one_irq,
1177 .alloc_irq_ranges = guest_alloc_irq_ranges,
1178 .release_irq_ranges = guest_release_irq_ranges,
1180 .handle_psl_slice_error = guest_handle_psl_slice_error,
1181 .psl_interrupt = guest_psl_irq,
1182 .ack_irq = guest_ack_irq,
1183 .attach_process = guest_attach_process,
1184 .detach_process = guest_detach_process,
1185 .support_attributes = guest_support_attributes,
1186 .link_ok = guest_link_ok,
1187 .release_afu = guest_release_afu,
1188 .afu_read_err_buffer = guest_afu_read_err_buffer,
1189 .afu_check_and_enable = guest_afu_check_and_enable,
1190 .afu_activate_mode = guest_afu_activate_mode,
1191 .afu_deactivate_mode = guest_afu_deactivate_mode,
1192 .afu_reset = guest_afu_reset,
1193 .afu_cr_read8 = guest_afu_cr_read8,
1194 .afu_cr_read16 = guest_afu_cr_read16,
1195 .afu_cr_read32 = guest_afu_cr_read32,
1196 .afu_cr_read64 = guest_afu_cr_read64,
1197 .afu_cr_write8 = guest_afu_cr_write8,
1198 .afu_cr_write16 = guest_afu_cr_write16,
1199 .afu_cr_write32 = guest_afu_cr_write32,
1200 .read_adapter_vpd = cxl_guest_read_adapter_vpd,