4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
7 * Released under the GPLv2 only.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/gpio.h>
14 #include <linux/irq.h>
15 #include <linux/irqdomain.h>
16 #include <linux/mutex.h>
22 /* The following has to be an array of line_max entries */
23 /* --> make them just a flags field */
25 direction: 1, /* 0 = output, 1 = input */
26 value: 1; /* 0 = low, 1 = high */
30 bool irq_type_pending;
35 struct gb_gpio_controller {
36 struct gbphy_device *gbphy_dev;
37 struct gb_connection *connection;
38 u8 line_max; /* max line number */
39 struct gb_gpio_line *lines;
41 struct gpio_chip chip;
43 struct irq_chip *irqchip;
44 struct irq_domain *irqdomain;
45 unsigned int irq_base;
46 irq_flow_handler_t irq_handler;
47 unsigned int irq_default_type;
48 struct mutex irq_lock;
50 #define gpio_chip_to_gb_gpio_controller(chip) \
51 container_of(chip, struct gb_gpio_controller, chip)
52 #define irq_data_to_gpio_chip(d) (d->domain->host_data)
54 static int gb_gpio_line_count_operation(struct gb_gpio_controller *ggc)
56 struct gb_gpio_line_count_response response;
59 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_LINE_COUNT,
60 NULL, 0, &response, sizeof(response));
62 ggc->line_max = response.count;
66 static int gb_gpio_activate_operation(struct gb_gpio_controller *ggc, u8 which)
68 struct gb_gpio_activate_request request;
71 request.which = which;
72 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_ACTIVATE,
73 &request, sizeof(request), NULL, 0);
75 ggc->lines[which].active = true;
79 static void gb_gpio_deactivate_operation(struct gb_gpio_controller *ggc,
82 struct device *dev = &ggc->gbphy_dev->dev;
83 struct gb_gpio_deactivate_request request;
86 request.which = which;
87 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DEACTIVATE,
88 &request, sizeof(request), NULL, 0);
90 dev_err(dev, "failed to deactivate gpio %u\n", which);
94 ggc->lines[which].active = false;
97 static int gb_gpio_get_direction_operation(struct gb_gpio_controller *ggc,
100 struct device *dev = &ggc->gbphy_dev->dev;
101 struct gb_gpio_get_direction_request request;
102 struct gb_gpio_get_direction_response response;
106 request.which = which;
107 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_DIRECTION,
108 &request, sizeof(request),
109 &response, sizeof(response));
113 direction = response.direction;
114 if (direction && direction != 1) {
115 dev_warn(dev, "gpio %u direction was %u (should be 0 or 1)\n",
118 ggc->lines[which].direction = direction ? 1 : 0;
122 static int gb_gpio_direction_in_operation(struct gb_gpio_controller *ggc,
125 struct gb_gpio_direction_in_request request;
128 request.which = which;
129 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_IN,
130 &request, sizeof(request), NULL, 0);
132 ggc->lines[which].direction = 1;
136 static int gb_gpio_direction_out_operation(struct gb_gpio_controller *ggc,
137 u8 which, bool value_high)
139 struct gb_gpio_direction_out_request request;
142 request.which = which;
143 request.value = value_high ? 1 : 0;
144 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_OUT,
145 &request, sizeof(request), NULL, 0);
147 ggc->lines[which].direction = 0;
151 static int gb_gpio_get_value_operation(struct gb_gpio_controller *ggc,
154 struct device *dev = &ggc->gbphy_dev->dev;
155 struct gb_gpio_get_value_request request;
156 struct gb_gpio_get_value_response response;
160 request.which = which;
161 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_VALUE,
162 &request, sizeof(request),
163 &response, sizeof(response));
165 dev_err(dev, "failed to get value of gpio %u\n", which);
169 value = response.value;
170 if (value && value != 1) {
171 dev_warn(dev, "gpio %u value was %u (should be 0 or 1)\n",
174 ggc->lines[which].value = value ? 1 : 0;
178 static void gb_gpio_set_value_operation(struct gb_gpio_controller *ggc,
179 u8 which, bool value_high)
181 struct device *dev = &ggc->gbphy_dev->dev;
182 struct gb_gpio_set_value_request request;
185 if (ggc->lines[which].direction == 1) {
186 dev_warn(dev, "refusing to set value of input gpio %u\n",
191 request.which = which;
192 request.value = value_high ? 1 : 0;
193 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_VALUE,
194 &request, sizeof(request), NULL, 0);
196 dev_err(dev, "failed to set value of gpio %u\n", which);
200 ggc->lines[which].value = request.value;
203 static int gb_gpio_set_debounce_operation(struct gb_gpio_controller *ggc,
204 u8 which, u16 debounce_usec)
206 struct gb_gpio_set_debounce_request request;
209 request.which = which;
210 request.usec = cpu_to_le16(debounce_usec);
211 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_DEBOUNCE,
212 &request, sizeof(request), NULL, 0);
214 ggc->lines[which].debounce_usec = debounce_usec;
218 static void _gb_gpio_irq_mask(struct gb_gpio_controller *ggc, u8 hwirq)
220 struct device *dev = &ggc->gbphy_dev->dev;
221 struct gb_gpio_irq_mask_request request;
224 request.which = hwirq;
225 ret = gb_operation_sync(ggc->connection,
226 GB_GPIO_TYPE_IRQ_MASK,
227 &request, sizeof(request), NULL, 0);
229 dev_err(dev, "failed to mask irq: %d\n", ret);
232 static void _gb_gpio_irq_unmask(struct gb_gpio_controller *ggc, u8 hwirq)
234 struct device *dev = &ggc->gbphy_dev->dev;
235 struct gb_gpio_irq_unmask_request request;
238 request.which = hwirq;
239 ret = gb_operation_sync(ggc->connection,
240 GB_GPIO_TYPE_IRQ_UNMASK,
241 &request, sizeof(request), NULL, 0);
243 dev_err(dev, "failed to unmask irq: %d\n", ret);
246 static void _gb_gpio_irq_set_type(struct gb_gpio_controller *ggc,
249 struct device *dev = &ggc->gbphy_dev->dev;
250 struct gb_gpio_irq_type_request request;
253 request.which = hwirq;
256 ret = gb_operation_sync(ggc->connection,
257 GB_GPIO_TYPE_IRQ_TYPE,
258 &request, sizeof(request), NULL, 0);
260 dev_err(dev, "failed to set irq type: %d\n", ret);
263 static void gb_gpio_irq_mask(struct irq_data *d)
265 struct gpio_chip *chip = irq_data_to_gpio_chip(d);
266 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
267 struct gb_gpio_line *line = &ggc->lines[d->hwirq];
270 line->masked_pending = true;
273 static void gb_gpio_irq_unmask(struct irq_data *d)
275 struct gpio_chip *chip = irq_data_to_gpio_chip(d);
276 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
277 struct gb_gpio_line *line = &ggc->lines[d->hwirq];
279 line->masked = false;
280 line->masked_pending = true;
283 static int gb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
285 struct gpio_chip *chip = irq_data_to_gpio_chip(d);
286 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
287 struct gb_gpio_line *line = &ggc->lines[d->hwirq];
288 struct device *dev = &ggc->gbphy_dev->dev;
293 irq_type = GB_GPIO_IRQ_TYPE_NONE;
295 case IRQ_TYPE_EDGE_RISING:
296 irq_type = GB_GPIO_IRQ_TYPE_EDGE_RISING;
298 case IRQ_TYPE_EDGE_FALLING:
299 irq_type = GB_GPIO_IRQ_TYPE_EDGE_FALLING;
301 case IRQ_TYPE_EDGE_BOTH:
302 irq_type = GB_GPIO_IRQ_TYPE_EDGE_BOTH;
304 case IRQ_TYPE_LEVEL_LOW:
305 irq_type = GB_GPIO_IRQ_TYPE_LEVEL_LOW;
307 case IRQ_TYPE_LEVEL_HIGH:
308 irq_type = GB_GPIO_IRQ_TYPE_LEVEL_HIGH;
311 dev_err(dev, "unsupported irq type: %u\n", type);
315 line->irq_type = irq_type;
316 line->irq_type_pending = true;
321 static void gb_gpio_irq_bus_lock(struct irq_data *d)
323 struct gpio_chip *chip = irq_data_to_gpio_chip(d);
324 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
326 mutex_lock(&ggc->irq_lock);
329 static void gb_gpio_irq_bus_sync_unlock(struct irq_data *d)
331 struct gpio_chip *chip = irq_data_to_gpio_chip(d);
332 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
333 struct gb_gpio_line *line = &ggc->lines[d->hwirq];
335 if (line->irq_type_pending) {
336 _gb_gpio_irq_set_type(ggc, d->hwirq, line->irq_type);
337 line->irq_type_pending = false;
340 if (line->masked_pending) {
342 _gb_gpio_irq_mask(ggc, d->hwirq);
344 _gb_gpio_irq_unmask(ggc, d->hwirq);
345 line->masked_pending = false;
348 mutex_unlock(&ggc->irq_lock);
351 static int gb_gpio_request_handler(struct gb_operation *op)
353 struct gb_connection *connection = op->connection;
354 struct gb_gpio_controller *ggc = gb_connection_get_data(connection);
355 struct device *dev = &ggc->gbphy_dev->dev;
356 struct gb_message *request;
357 struct gb_gpio_irq_event_request *event;
360 struct irq_desc *desc;
362 if (type != GB_GPIO_TYPE_IRQ_EVENT) {
363 dev_err(dev, "unsupported unsolicited request: %u\n", type);
367 request = op->request;
369 if (request->payload_size < sizeof(*event)) {
370 dev_err(dev, "short event received (%zu < %zu)\n",
371 request->payload_size, sizeof(*event));
375 event = request->payload;
376 if (event->which > ggc->line_max) {
377 dev_err(dev, "invalid hw irq: %d\n", event->which);
381 irq = irq_find_mapping(ggc->irqdomain, event->which);
383 dev_err(dev, "failed to find IRQ\n");
386 desc = irq_to_desc(irq);
388 dev_err(dev, "failed to look up irq\n");
393 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
394 generic_handle_irq_desc(irq, desc);
396 generic_handle_irq_desc(desc);
403 static int gb_gpio_request(struct gpio_chip *chip, unsigned offset)
405 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
407 return gb_gpio_activate_operation(ggc, (u8)offset);
410 static void gb_gpio_free(struct gpio_chip *chip, unsigned offset)
412 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
414 gb_gpio_deactivate_operation(ggc, (u8)offset);
417 static int gb_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
419 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
424 ret = gb_gpio_get_direction_operation(ggc, which);
428 return ggc->lines[which].direction ? 1 : 0;
431 static int gb_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
433 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
435 return gb_gpio_direction_in_operation(ggc, (u8)offset);
438 static int gb_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
441 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
443 return gb_gpio_direction_out_operation(ggc, (u8)offset, !!value);
446 static int gb_gpio_get(struct gpio_chip *chip, unsigned offset)
448 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
453 ret = gb_gpio_get_value_operation(ggc, which);
457 return ggc->lines[which].value;
460 static void gb_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
462 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
464 gb_gpio_set_value_operation(ggc, (u8)offset, !!value);
467 static int gb_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
470 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
473 if (debounce > U16_MAX)
475 usec = (u16)debounce;
477 return gb_gpio_set_debounce_operation(ggc, (u8)offset, usec);
480 static int gb_gpio_controller_setup(struct gb_gpio_controller *ggc)
484 /* Now find out how many lines there are */
485 ret = gb_gpio_line_count_operation(ggc);
489 ggc->lines = kcalloc(ggc->line_max + 1, sizeof(*ggc->lines),
498 * gb_gpio_irq_map() - maps an IRQ into a GB gpio irqchip
499 * @d: the irqdomain used by this irqchip
500 * @irq: the global irq number used by this GB gpio irqchip irq
501 * @hwirq: the local IRQ/GPIO line offset on this GB gpio
503 * This function will set up the mapping for a certain IRQ line on a
504 * GB gpio by assigning the GB gpio as chip data, and using the irqchip
505 * stored inside the GB gpio.
507 static int gb_gpio_irq_map(struct irq_domain *domain, unsigned int irq,
508 irq_hw_number_t hwirq)
510 struct gpio_chip *chip = domain->host_data;
511 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
513 irq_set_chip_data(irq, ggc);
514 irq_set_chip_and_handler(irq, ggc->irqchip, ggc->irq_handler);
515 irq_set_noprobe(irq);
517 * No set-up of the hardware will happen if IRQ_TYPE_NONE
518 * is passed as default type.
520 if (ggc->irq_default_type != IRQ_TYPE_NONE)
521 irq_set_irq_type(irq, ggc->irq_default_type);
526 static void gb_gpio_irq_unmap(struct irq_domain *d, unsigned int irq)
528 irq_set_chip_and_handler(irq, NULL, NULL);
529 irq_set_chip_data(irq, NULL);
532 static const struct irq_domain_ops gb_gpio_domain_ops = {
533 .map = gb_gpio_irq_map,
534 .unmap = gb_gpio_irq_unmap,
538 * gb_gpio_irqchip_remove() - removes an irqchip added to a gb_gpio_controller
539 * @ggc: the gb_gpio_controller to remove the irqchip from
541 * This is called only from gb_gpio_remove()
543 static void gb_gpio_irqchip_remove(struct gb_gpio_controller *ggc)
547 /* Remove all IRQ mappings and delete the domain */
548 if (ggc->irqdomain) {
549 for (offset = 0; offset < (ggc->line_max + 1); offset++)
550 irq_dispose_mapping(irq_find_mapping(ggc->irqdomain, offset));
551 irq_domain_remove(ggc->irqdomain);
561 * gb_gpio_irqchip_add() - adds an irqchip to a gpio chip
562 * @chip: the gpio chip to add the irqchip to
563 * @irqchip: the irqchip to add to the adapter
564 * @first_irq: if not dynamically assigned, the base (first) IRQ to
565 * allocate gpio irqs from
566 * @handler: the irq handler to use (often a predefined irq core function)
567 * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
568 * to have the core avoid setting up any default type in the hardware.
570 * This function closely associates a certain irqchip with a certain
571 * gpio chip, providing an irq domain to translate the local IRQs to
572 * global irqs, and making sure that the gpio chip
573 * is passed as chip data to all related functions. Driver callbacks
574 * need to use container_of() to get their local state containers back
575 * from the gpio chip passed as chip data. An irqdomain will be stored
576 * in the gpio chip that shall be used by the driver to handle IRQ number
577 * translation. The gpio chip will need to be initialized and registered
578 * before calling this function.
580 static int gb_gpio_irqchip_add(struct gpio_chip *chip,
581 struct irq_chip *irqchip,
582 unsigned int first_irq,
583 irq_flow_handler_t handler,
586 struct gb_gpio_controller *ggc;
590 if (!chip || !irqchip)
593 ggc = gpio_chip_to_gb_gpio_controller(chip);
595 ggc->irqchip = irqchip;
596 ggc->irq_handler = handler;
597 ggc->irq_default_type = type;
598 ggc->irqdomain = irq_domain_add_simple(NULL,
599 ggc->line_max + 1, first_irq,
600 &gb_gpio_domain_ops, chip);
601 if (!ggc->irqdomain) {
607 * Prepare the mapping since the irqchip shall be orthogonal to
608 * any gpio calls. If the first_irq was zero, this is
609 * necessary to allocate descriptors for all IRQs.
611 for (offset = 0; offset < (ggc->line_max + 1); offset++) {
612 irq_base = irq_create_mapping(ggc->irqdomain, offset);
614 ggc->irq_base = irq_base;
620 static int gb_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
622 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
624 return irq_find_mapping(ggc->irqdomain, offset);
627 static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
628 const struct gbphy_device_id *id)
630 struct gb_connection *connection;
631 struct gb_gpio_controller *ggc;
632 struct gpio_chip *gpio;
633 struct irq_chip *irqc;
636 ggc = kzalloc(sizeof(*ggc), GFP_KERNEL);
640 connection = gb_connection_create(gbphy_dev->bundle,
641 le16_to_cpu(gbphy_dev->cport_desc->id),
642 gb_gpio_request_handler);
643 if (IS_ERR(connection)) {
644 ret = PTR_ERR(connection);
648 ggc->connection = connection;
649 gb_connection_set_data(connection, ggc);
650 ggc->gbphy_dev = gbphy_dev;
651 gb_gbphy_set_data(gbphy_dev, ggc);
653 ret = gb_connection_enable_tx(connection);
655 goto exit_connection_destroy;
657 ret = gb_gbphy_get_version(connection);
659 goto exit_connection_disable;
661 ret = gb_gpio_controller_setup(ggc);
663 goto exit_connection_disable;
666 irqc->irq_mask = gb_gpio_irq_mask;
667 irqc->irq_unmask = gb_gpio_irq_unmask;
668 irqc->irq_set_type = gb_gpio_irq_set_type;
669 irqc->irq_bus_lock = gb_gpio_irq_bus_lock;
670 irqc->irq_bus_sync_unlock = gb_gpio_irq_bus_sync_unlock;
671 irqc->name = "greybus_gpio";
673 mutex_init(&ggc->irq_lock);
677 gpio->label = "greybus_gpio";
678 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
679 gpio->parent = &gbphy_dev->dev;
681 gpio->dev = &gbphy_dev->dev;
683 gpio->owner = THIS_MODULE;
685 gpio->request = gb_gpio_request;
686 gpio->free = gb_gpio_free;
687 gpio->get_direction = gb_gpio_get_direction;
688 gpio->direction_input = gb_gpio_direction_input;
689 gpio->direction_output = gb_gpio_direction_output;
690 gpio->get = gb_gpio_get;
691 gpio->set = gb_gpio_set;
692 gpio->set_debounce = gb_gpio_set_debounce;
693 gpio->to_irq = gb_gpio_to_irq;
694 gpio->base = -1; /* Allocate base dynamically */
695 gpio->ngpio = ggc->line_max + 1;
696 gpio->can_sleep = true;
698 ret = gb_connection_enable(connection);
702 ret = gpiochip_add(gpio);
704 dev_err(&connection->bundle->dev,
705 "failed to add gpio chip: %d\n", ret);
709 ret = gb_gpio_irqchip_add(gpio, irqc, 0,
710 handle_level_irq, IRQ_TYPE_NONE);
712 dev_err(&connection->bundle->dev,
713 "failed to add irq chip: %d\n", ret);
714 goto exit_gpiochip_remove;
719 exit_gpiochip_remove:
720 gb_gpiochip_remove(gpio);
723 exit_connection_disable:
724 gb_connection_disable(connection);
725 exit_connection_destroy:
726 gb_connection_destroy(connection);
732 static void gb_gpio_remove(struct gbphy_device *gbphy_dev)
734 struct gb_gpio_controller *ggc = gb_gbphy_get_data(gbphy_dev);
735 struct gb_connection *connection = ggc->connection;
737 gb_connection_disable_rx(connection);
738 gb_gpio_irqchip_remove(ggc);
739 gb_gpiochip_remove(&ggc->chip);
740 gb_connection_disable(connection);
741 gb_connection_destroy(connection);
746 static const struct gbphy_device_id gb_gpio_id_table[] = {
747 { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_GPIO) },
750 MODULE_DEVICE_TABLE(gbphy, gb_gpio_id_table);
752 static struct gbphy_driver gpio_driver = {
754 .probe = gb_gpio_probe,
755 .remove = gb_gpio_remove,
756 .id_table = gb_gpio_id_table,
759 module_gbphy_driver(gpio_driver);
760 MODULE_LICENSE("GPL v2");