* physical interrupt lines
* @dev_info: the iio device for which the is an interrupt line
* @line_number: associated line number
- * @id: idr allocated unique id number
+ * @id: ida allocated unique id number
* @irq: associate interrupt number
* @ev_list: event handler list for associated events
* @ev_list_lock: ensure only one access to list at a time
| INDIO_RING_HARDWARE_BUFFER);
};
-struct idr;
+struct ida;
-int iio_get_new_idr_val(struct idr *this_idr);
-void iio_free_idr_val(struct idr *this_idr, int id);
+int iio_get_new_ida_val(struct ida *this_ida);
+void iio_free_ida_val(struct ida *this_ida, int id);
#endif /* _INDUSTRIAL_IO_H_ */
#define IIO_ID_FORMAT IIO_ID_PREFIX "%d"
/* IDR to assign each registered device a unique id*/
-static DEFINE_IDR(iio_idr);
+static DEFINE_IDA(iio_ida);
/* IDR to allocate character device minor numbers */
-static DEFINE_IDR(iio_chrdev_idr);
+static DEFINE_IDA(iio_chrdev_ida);
/* Lock used to protect both of the above */
-static DEFINE_SPINLOCK(iio_idr_lock);
+static DEFINE_SPINLOCK(iio_ida_lock);
dev_t iio_devt;
EXPORT_SYMBOL(iio_devt);
{
int ret, val;
-idr_again:
- if (unlikely(idr_pre_get(&iio_chrdev_idr, GFP_KERNEL) == 0))
+ida_again:
+ if (unlikely(ida_pre_get(&iio_chrdev_ida, GFP_KERNEL) == 0))
return -ENOMEM;
- spin_lock(&iio_idr_lock);
- ret = idr_get_new(&iio_chrdev_idr, NULL, &val);
- spin_unlock(&iio_idr_lock);
+ spin_lock(&iio_ida_lock);
+ ret = ida_get_new(&iio_chrdev_ida, &val);
+ spin_unlock(&iio_ida_lock);
if (unlikely(ret == -EAGAIN))
- goto idr_again;
+ goto ida_again;
else if (unlikely(ret))
return ret;
if (val > IIO_DEV_MAX)
void iio_device_free_chrdev_minor(int val)
{
- spin_lock(&iio_idr_lock);
- idr_remove(&iio_chrdev_idr, val);
- spin_unlock(&iio_idr_lock);
+ spin_lock(&iio_ida_lock);
+ ida_remove(&iio_chrdev_ida, val);
+ spin_unlock(&iio_ida_lock);
}
int iio_setup_ev_int(struct iio_event_interface *ev_int,
}
/* Return a negative errno on failure */
-int iio_get_new_idr_val(struct idr *this_idr)
+int iio_get_new_ida_val(struct ida *this_ida)
{
int ret;
int val;
-idr_again:
- if (unlikely(idr_pre_get(this_idr, GFP_KERNEL) == 0))
+ida_again:
+ if (unlikely(ida_pre_get(this_ida, GFP_KERNEL) == 0))
return -ENOMEM;
- spin_lock(&iio_idr_lock);
- ret = idr_get_new(this_idr, NULL, &val);
- spin_unlock(&iio_idr_lock);
+ spin_lock(&iio_ida_lock);
+ ret = ida_get_new(this_ida, &val);
+ spin_unlock(&iio_ida_lock);
if (unlikely(ret == -EAGAIN))
- goto idr_again;
+ goto ida_again;
else if (unlikely(ret))
return ret;
return val;
}
-EXPORT_SYMBOL(iio_get_new_idr_val);
+EXPORT_SYMBOL(iio_get_new_ida_val);
-void iio_free_idr_val(struct idr *this_idr, int id)
+void iio_free_ida_val(struct ida *this_ida, int id)
{
- spin_lock(&iio_idr_lock);
- idr_remove(this_idr, id);
- spin_unlock(&iio_idr_lock);
+ spin_lock(&iio_ida_lock);
+ ida_remove(this_ida, id);
+ spin_unlock(&iio_ida_lock);
}
-EXPORT_SYMBOL(iio_free_idr_val);
+EXPORT_SYMBOL(iio_free_ida_val);
static int iio_device_register_id(struct iio_dev *dev_info,
- struct idr *this_idr)
+ struct ida *this_ida)
{
-
- dev_info->id = iio_get_new_idr_val(&iio_idr);
+ dev_info->id = iio_get_new_ida_val(&iio_ida);
if (dev_info->id < 0)
return dev_info->id;
return 0;
static void iio_device_unregister_id(struct iio_dev *dev_info)
{
- iio_free_idr_val(&iio_idr, dev_info->id);
+ iio_free_ida_val(&iio_ida, dev_info->id);
}
static inline int __iio_add_event_config_attrs(struct iio_dev *dev_info, int i)
{
int ret;
- ret = iio_device_register_id(dev_info, &iio_idr);
+ ret = iio_device_register_id(dev_info, &iio_ida);
if (ret) {
dev_err(&dev_info->dev, "Failed to get id\n");
goto error_ret;
ret = device_add(&dev_info->dev);
if (ret)
- goto error_free_idr;
+ goto error_free_ida;
ret = iio_device_register_sysfs(dev_info);
if (ret) {
dev_err(dev_info->dev.parent,
iio_device_unregister_sysfs(dev_info);
error_del_device:
device_del(&dev_info->dev);
-error_free_idr:
+error_free_ida:
iio_device_unregister_id(dev_info);
error_ret:
return ret;