2 * Physical device callbacks for vfio_ccw
4 * Copyright IBM Corp. 2017
6 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
7 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
10 #include <linux/vfio.h>
11 #include <linux/mdev.h>
13 #include "vfio_ccw_private.h"
15 static int vfio_ccw_mdev_reset(struct mdev_device *mdev)
17 struct vfio_ccw_private *private;
18 struct subchannel *sch;
21 private = dev_get_drvdata(mdev_parent_dev(mdev));
25 * In the cureent stage, some things like "no I/O running" and "no
26 * interrupt pending" are clear, but we are not sure what other state
27 * we need to care about.
28 * There are still a lot more instructions need to be handled. We
29 * should come back here later.
31 ret = vfio_ccw_sch_quiesce(sch);
35 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
37 private->state = VFIO_CCW_STATE_IDLE;
42 static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
46 struct vfio_ccw_private *private =
47 container_of(nb, struct vfio_ccw_private, nb);
50 * Vendor drivers MUST unpin pages in response to an
53 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
54 struct vfio_iommu_type1_dma_unmap *unmap = data;
56 if (!cp_iova_pinned(&private->cp, unmap->iova))
59 if (vfio_ccw_mdev_reset(private->mdev))
62 cp_free(&private->cp);
69 static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
71 return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
73 MDEV_TYPE_ATTR_RO(name);
75 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
78 return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
80 MDEV_TYPE_ATTR_RO(device_api);
82 static ssize_t available_instances_show(struct kobject *kobj,
83 struct device *dev, char *buf)
85 struct vfio_ccw_private *private = dev_get_drvdata(dev);
87 return sprintf(buf, "%d\n", atomic_read(&private->avail));
89 MDEV_TYPE_ATTR_RO(available_instances);
91 static struct attribute *mdev_types_attrs[] = {
92 &mdev_type_attr_name.attr,
93 &mdev_type_attr_device_api.attr,
94 &mdev_type_attr_available_instances.attr,
98 static struct attribute_group mdev_type_group = {
100 .attrs = mdev_types_attrs,
103 struct attribute_group *mdev_type_groups[] = {
108 static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
110 struct vfio_ccw_private *private =
111 dev_get_drvdata(mdev_parent_dev(mdev));
113 if (private->state == VFIO_CCW_STATE_NOT_OPER)
116 if (atomic_dec_if_positive(&private->avail) < 0)
119 private->mdev = mdev;
120 private->state = VFIO_CCW_STATE_IDLE;
125 static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
127 struct vfio_ccw_private *private =
128 dev_get_drvdata(mdev_parent_dev(mdev));
130 if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
131 (private->state != VFIO_CCW_STATE_STANDBY)) {
132 if (!vfio_ccw_mdev_reset(mdev))
133 private->state = VFIO_CCW_STATE_STANDBY;
134 /* The state will be NOT_OPER on error. */
137 private->mdev = NULL;
138 atomic_inc(&private->avail);
143 static int vfio_ccw_mdev_open(struct mdev_device *mdev)
145 struct vfio_ccw_private *private =
146 dev_get_drvdata(mdev_parent_dev(mdev));
147 unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
149 private->nb.notifier_call = vfio_ccw_mdev_notifier;
151 return vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
152 &events, &private->nb);
155 void vfio_ccw_mdev_release(struct mdev_device *mdev)
157 struct vfio_ccw_private *private =
158 dev_get_drvdata(mdev_parent_dev(mdev));
160 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
164 static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
169 struct vfio_ccw_private *private;
170 struct ccw_io_region *region;
172 if (*ppos + count > sizeof(*region))
175 private = dev_get_drvdata(mdev_parent_dev(mdev));
176 region = &private->io_region;
177 if (copy_to_user(buf, (void *)region + *ppos, count))
183 static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
184 const char __user *buf,
188 struct vfio_ccw_private *private;
189 struct ccw_io_region *region;
191 if (*ppos + count > sizeof(*region))
194 private = dev_get_drvdata(mdev_parent_dev(mdev));
195 if (private->state != VFIO_CCW_STATE_IDLE)
198 region = &private->io_region;
199 if (copy_from_user((void *)region + *ppos, buf, count))
202 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
203 if (region->ret_code != 0) {
204 private->state = VFIO_CCW_STATE_IDLE;
205 return region->ret_code;
211 static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info)
213 info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
214 info->num_regions = VFIO_CCW_NUM_REGIONS;
215 info->num_irqs = VFIO_CCW_NUM_IRQS;
220 static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
224 switch (info->index) {
225 case VFIO_CCW_CONFIG_REGION_INDEX:
227 info->size = sizeof(struct ccw_io_region);
228 info->flags = VFIO_REGION_INFO_FLAG_READ
229 | VFIO_REGION_INFO_FLAG_WRITE;
236 int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
238 if (info->index != VFIO_CCW_IO_IRQ_INDEX)
242 info->flags = VFIO_IRQ_INFO_EVENTFD;
247 static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
251 struct vfio_ccw_private *private;
252 struct eventfd_ctx **ctx;
254 if (!(flags & VFIO_IRQ_SET_ACTION_TRIGGER))
257 private = dev_get_drvdata(mdev_parent_dev(mdev));
258 ctx = &private->io_trigger;
260 switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
261 case VFIO_IRQ_SET_DATA_NONE:
264 eventfd_signal(*ctx, 1);
267 case VFIO_IRQ_SET_DATA_BOOL:
271 if (get_user(trigger, (uint8_t __user *)data))
275 eventfd_signal(*ctx, 1);
278 case VFIO_IRQ_SET_DATA_EVENTFD:
282 if (get_user(fd, (int32_t __user *)data))
287 eventfd_ctx_put(*ctx);
289 } else if (fd >= 0) {
290 struct eventfd_ctx *efdctx;
292 efdctx = eventfd_ctx_fdget(fd);
294 return PTR_ERR(efdctx);
297 eventfd_ctx_put(*ctx);
310 static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
318 case VFIO_DEVICE_GET_INFO:
320 struct vfio_device_info info;
322 minsz = offsetofend(struct vfio_device_info, num_irqs);
324 if (copy_from_user(&info, (void __user *)arg, minsz))
327 if (info.argsz < minsz)
330 ret = vfio_ccw_mdev_get_device_info(&info);
334 return copy_to_user((void __user *)arg, &info, minsz);
336 case VFIO_DEVICE_GET_REGION_INFO:
338 struct vfio_region_info info;
340 void *cap_type = NULL;
342 minsz = offsetofend(struct vfio_region_info, offset);
344 if (copy_from_user(&info, (void __user *)arg, minsz))
347 if (info.argsz < minsz)
350 ret = vfio_ccw_mdev_get_region_info(&info, &cap_type_id,
355 return copy_to_user((void __user *)arg, &info, minsz);
357 case VFIO_DEVICE_GET_IRQ_INFO:
359 struct vfio_irq_info info;
361 minsz = offsetofend(struct vfio_irq_info, count);
363 if (copy_from_user(&info, (void __user *)arg, minsz))
366 if (info.argsz < minsz || info.index >= VFIO_CCW_NUM_IRQS)
369 ret = vfio_ccw_mdev_get_irq_info(&info);
373 if (info.count == -1)
376 return copy_to_user((void __user *)arg, &info, minsz);
378 case VFIO_DEVICE_SET_IRQS:
380 struct vfio_irq_set hdr;
384 minsz = offsetofend(struct vfio_irq_set, count);
386 if (copy_from_user(&hdr, (void __user *)arg, minsz))
389 ret = vfio_set_irqs_validate_and_prepare(&hdr, 1,
395 data = (void __user *)(arg + minsz);
396 return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, data);
398 case VFIO_DEVICE_RESET:
399 return vfio_ccw_mdev_reset(mdev);
405 static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
406 .owner = THIS_MODULE,
407 .supported_type_groups = mdev_type_groups,
408 .create = vfio_ccw_mdev_create,
409 .remove = vfio_ccw_mdev_remove,
410 .open = vfio_ccw_mdev_open,
411 .release = vfio_ccw_mdev_release,
412 .read = vfio_ccw_mdev_read,
413 .write = vfio_ccw_mdev_write,
414 .ioctl = vfio_ccw_mdev_ioctl,
417 int vfio_ccw_mdev_reg(struct subchannel *sch)
419 return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops);
422 void vfio_ccw_mdev_unreg(struct subchannel *sch)
424 mdev_unregister_device(&sch->dev);