4 * Copyright 2015 Google Inc.
5 * Copyright 2015 Linaro Ltd.
7 * Released under the GPLv2 only.
10 #include <linux/debugfs.h>
11 #include <linux/input.h>
12 #include <linux/workqueue.h>
16 #define SVC_KEY_ARA_BUTTON KEY_A
18 #define SVC_INTF_EJECT_TIMEOUT 9000
20 struct gb_svc_deferred_request {
21 struct work_struct work;
22 struct gb_operation *operation;
26 static ssize_t endo_id_show(struct device *dev,
27 struct device_attribute *attr, char *buf)
29 struct gb_svc *svc = to_gb_svc(dev);
31 return sprintf(buf, "0x%04x\n", svc->endo_id);
33 static DEVICE_ATTR_RO(endo_id);
35 static ssize_t ap_intf_id_show(struct device *dev,
36 struct device_attribute *attr, char *buf)
38 struct gb_svc *svc = to_gb_svc(dev);
40 return sprintf(buf, "%u\n", svc->ap_intf_id);
42 static DEVICE_ATTR_RO(ap_intf_id);
46 // This is a hack, we need to do this "right" and clean the interface up
47 // properly, not just forcibly yank the thing out of the system and hope for the
48 // best. But for now, people want their modules to come out without having to
49 // throw the thing to the ground or get out a screwdriver.
50 static ssize_t intf_eject_store(struct device *dev,
51 struct device_attribute *attr, const char *buf,
54 struct gb_svc *svc = to_gb_svc(dev);
55 unsigned short intf_id;
58 ret = kstrtou16(buf, 10, &intf_id);
62 dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
64 ret = gb_svc_intf_eject(svc, intf_id);
70 static DEVICE_ATTR_WO(intf_eject);
72 static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
75 struct gb_svc *svc = to_gb_svc(dev);
77 return sprintf(buf, "%s\n",
78 gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
81 static ssize_t watchdog_store(struct device *dev,
82 struct device_attribute *attr, const char *buf,
85 struct gb_svc *svc = to_gb_svc(dev);
89 retval = strtobool(buf, &user_request);
94 retval = gb_svc_watchdog_enable(svc);
96 retval = gb_svc_watchdog_disable(svc);
101 static DEVICE_ATTR_RW(watchdog);
103 static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
105 struct gb_svc_pwrmon_rail_count_get_response response;
108 ret = gb_operation_sync(svc->connection,
109 GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
110 &response, sizeof(response));
112 dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
116 *value = response.rail_count;
121 static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
122 struct gb_svc_pwrmon_rail_names_get_response *response,
127 ret = gb_operation_sync(svc->connection,
128 GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
131 dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
138 static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
139 u8 measurement_type, u32 *value)
141 struct gb_svc_pwrmon_sample_get_request request;
142 struct gb_svc_pwrmon_sample_get_response response;
145 request.rail_id = rail_id;
146 request.measurement_type = measurement_type;
148 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
149 &request, sizeof(request),
150 &response, sizeof(response));
152 dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
156 if (response.result) {
158 "UniPro error while getting rail power sample (%d %d): %d\n",
159 rail_id, measurement_type, response.result);
160 switch (response.result) {
161 case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
163 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
170 *value = le32_to_cpu(response.measurement);
175 int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
176 u8 measurement_type, u32 *value)
178 struct gb_svc_pwrmon_intf_sample_get_request request;
179 struct gb_svc_pwrmon_intf_sample_get_response response;
182 request.intf_id = intf_id;
183 request.measurement_type = measurement_type;
185 ret = gb_operation_sync(svc->connection,
186 GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
187 &request, sizeof(request),
188 &response, sizeof(response));
190 dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
194 if (response.result) {
196 "UniPro error while getting intf power sample (%d %d): %d\n",
197 intf_id, measurement_type, response.result);
198 switch (response.result) {
199 case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
201 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
208 *value = le32_to_cpu(response.measurement);
213 static struct attribute *svc_attrs[] = {
214 &dev_attr_endo_id.attr,
215 &dev_attr_ap_intf_id.attr,
216 &dev_attr_intf_eject.attr,
217 &dev_attr_watchdog.attr,
220 ATTRIBUTE_GROUPS(svc);
222 int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
224 struct gb_svc_intf_device_id_request request;
226 request.intf_id = intf_id;
227 request.device_id = device_id;
229 return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
230 &request, sizeof(request), NULL, 0);
233 int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
235 struct gb_svc_intf_eject_request request;
238 request.intf_id = intf_id;
241 * The pulse width for module release in svc is long so we need to
242 * increase the timeout so the operation will not return to soon.
244 ret = gb_operation_sync_timeout(svc->connection,
245 GB_SVC_TYPE_INTF_EJECT, &request,
246 sizeof(request), NULL, 0,
247 SVC_INTF_EJECT_TIMEOUT);
249 dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
256 int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
259 struct gb_svc_dme_peer_get_request request;
260 struct gb_svc_dme_peer_get_response response;
264 request.intf_id = intf_id;
265 request.attr = cpu_to_le16(attr);
266 request.selector = cpu_to_le16(selector);
268 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
269 &request, sizeof(request),
270 &response, sizeof(response));
272 dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
273 intf_id, attr, selector, ret);
277 result = le16_to_cpu(response.result_code);
279 dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
280 intf_id, attr, selector, result);
285 *value = le32_to_cpu(response.attr_value);
289 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_get);
291 int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
294 struct gb_svc_dme_peer_set_request request;
295 struct gb_svc_dme_peer_set_response response;
299 request.intf_id = intf_id;
300 request.attr = cpu_to_le16(attr);
301 request.selector = cpu_to_le16(selector);
302 request.value = cpu_to_le32(value);
304 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
305 &request, sizeof(request),
306 &response, sizeof(response));
308 dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
309 intf_id, attr, selector, value, ret);
313 result = le16_to_cpu(response.result_code);
315 dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
316 intf_id, attr, selector, value, result);
322 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_set);
324 int gb_svc_connection_create(struct gb_svc *svc,
325 u8 intf1_id, u16 cport1_id,
326 u8 intf2_id, u16 cport2_id,
329 struct gb_svc_conn_create_request request;
331 request.intf1_id = intf1_id;
332 request.cport1_id = cpu_to_le16(cport1_id);
333 request.intf2_id = intf2_id;
334 request.cport2_id = cpu_to_le16(cport2_id);
335 request.tc = 0; /* TC0 */
336 request.flags = cport_flags;
338 return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
339 &request, sizeof(request), NULL, 0);
341 EXPORT_SYMBOL_GPL(gb_svc_connection_create);
343 void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
344 u8 intf2_id, u16 cport2_id)
346 struct gb_svc_conn_destroy_request request;
347 struct gb_connection *connection = svc->connection;
350 request.intf1_id = intf1_id;
351 request.cport1_id = cpu_to_le16(cport1_id);
352 request.intf2_id = intf2_id;
353 request.cport2_id = cpu_to_le16(cport2_id);
355 ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
356 &request, sizeof(request), NULL, 0);
358 dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
359 intf1_id, cport1_id, intf2_id, cport2_id, ret);
362 EXPORT_SYMBOL_GPL(gb_svc_connection_destroy);
364 /* Creates bi-directional routes between the devices */
365 int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
366 u8 intf2_id, u8 dev2_id)
368 struct gb_svc_route_create_request request;
370 request.intf1_id = intf1_id;
371 request.dev1_id = dev1_id;
372 request.intf2_id = intf2_id;
373 request.dev2_id = dev2_id;
375 return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
376 &request, sizeof(request), NULL, 0);
379 /* Destroys bi-directional routes between the devices */
380 void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
382 struct gb_svc_route_destroy_request request;
385 request.intf1_id = intf1_id;
386 request.intf2_id = intf2_id;
388 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
389 &request, sizeof(request), NULL, 0);
391 dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
392 intf1_id, intf2_id, ret);
396 int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
397 u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
398 u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
399 u8 flags, u32 quirks)
401 struct gb_svc_intf_set_pwrm_request request;
402 struct gb_svc_intf_set_pwrm_response response;
405 request.intf_id = intf_id;
406 request.hs_series = hs_series;
407 request.tx_mode = tx_mode;
408 request.tx_gear = tx_gear;
409 request.tx_nlanes = tx_nlanes;
410 request.rx_mode = rx_mode;
411 request.rx_gear = rx_gear;
412 request.rx_nlanes = rx_nlanes;
413 request.flags = flags;
414 request.quirks = cpu_to_le32(quirks);
416 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
417 &request, sizeof(request),
418 &response, sizeof(response));
422 return le16_to_cpu(response.result_code);
424 EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
426 int gb_svc_ping(struct gb_svc *svc)
428 return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
430 GB_OPERATION_TIMEOUT_DEFAULT * 2);
432 EXPORT_SYMBOL_GPL(gb_svc_ping);
434 static int gb_svc_version_request(struct gb_operation *op)
436 struct gb_connection *connection = op->connection;
437 struct gb_svc *svc = gb_connection_get_data(connection);
438 struct gb_protocol_version_request *request;
439 struct gb_protocol_version_response *response;
441 if (op->request->payload_size < sizeof(*request)) {
442 dev_err(&svc->dev, "short version request (%zu < %zu)\n",
443 op->request->payload_size,
448 request = op->request->payload;
450 if (request->major > GB_SVC_VERSION_MAJOR) {
451 dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
452 request->major, GB_SVC_VERSION_MAJOR);
456 svc->protocol_major = request->major;
457 svc->protocol_minor = request->minor;
459 if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
462 response = op->response->payload;
463 response->major = svc->protocol_major;
464 response->minor = svc->protocol_minor;
469 static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
470 size_t len, loff_t *offset)
472 struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
473 struct gb_svc *svc = pwrmon_rails->svc;
478 ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
479 GB_SVC_PWRMON_TYPE_VOL, &value);
482 "failed to get voltage sample %u: %d\n",
483 pwrmon_rails->id, ret);
487 desc = scnprintf(buff, sizeof(buff), "%u\n", value);
489 return simple_read_from_buffer(buf, len, offset, buff, desc);
492 static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
493 size_t len, loff_t *offset)
495 struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
496 struct gb_svc *svc = pwrmon_rails->svc;
501 ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
502 GB_SVC_PWRMON_TYPE_CURR, &value);
505 "failed to get current sample %u: %d\n",
506 pwrmon_rails->id, ret);
510 desc = scnprintf(buff, sizeof(buff), "%u\n", value);
512 return simple_read_from_buffer(buf, len, offset, buff, desc);
515 static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
516 size_t len, loff_t *offset)
518 struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
519 struct gb_svc *svc = pwrmon_rails->svc;
524 ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
525 GB_SVC_PWRMON_TYPE_PWR, &value);
527 dev_err(&svc->dev, "failed to get power sample %u: %d\n",
528 pwrmon_rails->id, ret);
532 desc = scnprintf(buff, sizeof(buff), "%u\n", value);
534 return simple_read_from_buffer(buf, len, offset, buff, desc);
537 static const struct file_operations pwrmon_debugfs_voltage_fops = {
538 .read = pwr_debugfs_voltage_read,
541 static const struct file_operations pwrmon_debugfs_current_fops = {
542 .read = pwr_debugfs_current_read,
545 static const struct file_operations pwrmon_debugfs_power_fops = {
546 .read = pwr_debugfs_power_read,
549 static void svc_pwrmon_debugfs_init(struct gb_svc *svc)
555 dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
556 if (IS_ERR_OR_NULL(dent))
559 if (gb_svc_pwrmon_rail_count_get(svc, &svc->rail_count))
560 goto err_pwrmon_debugfs;
562 if (!svc->rail_count || svc->rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
563 goto err_pwrmon_debugfs;
565 bufsize = GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * svc->rail_count;
567 svc->rail_names = kzalloc(bufsize, GFP_KERNEL);
568 if (!svc->rail_names)
569 goto err_pwrmon_debugfs;
571 svc->pwrmon_rails = kcalloc(svc->rail_count, sizeof(*svc->pwrmon_rails),
573 if (!svc->pwrmon_rails)
574 goto err_pwrmon_debugfs_free;
576 if (gb_svc_pwrmon_rail_names_get(svc, svc->rail_names, bufsize))
577 goto err_pwrmon_debugfs_free;
579 for (i = 0; i < svc->rail_count; i++) {
581 struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
582 char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
584 snprintf(fname, sizeof(fname), "%s",
585 (char *)&svc->rail_names->name[i]);
590 dir = debugfs_create_dir(fname, dent);
591 debugfs_create_file("voltage_now", S_IRUGO, dir, rail,
592 &pwrmon_debugfs_voltage_fops);
593 debugfs_create_file("current_now", S_IRUGO, dir, rail,
594 &pwrmon_debugfs_current_fops);
595 debugfs_create_file("power_now", S_IRUGO, dir, rail,
596 &pwrmon_debugfs_power_fops);
600 err_pwrmon_debugfs_free:
601 kfree(svc->rail_names);
602 svc->rail_names = NULL;
604 kfree(svc->pwrmon_rails);
605 svc->pwrmon_rails = NULL;
608 debugfs_remove(dent);
611 static void svc_debugfs_init(struct gb_svc *svc)
613 svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
615 svc_pwrmon_debugfs_init(svc);
618 static void svc_debugfs_exit(struct gb_svc *svc)
620 debugfs_remove_recursive(svc->debugfs_dentry);
621 kfree(svc->rail_names);
624 static int gb_svc_hello(struct gb_operation *op)
626 struct gb_connection *connection = op->connection;
627 struct gb_svc *svc = gb_connection_get_data(connection);
628 struct gb_svc_hello_request *hello_request;
631 if (op->request->payload_size < sizeof(*hello_request)) {
632 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
633 op->request->payload_size,
634 sizeof(*hello_request));
638 hello_request = op->request->payload;
639 svc->endo_id = le16_to_cpu(hello_request->endo_id);
640 svc->ap_intf_id = hello_request->interface_id;
642 ret = device_add(&svc->dev);
644 dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
648 ret = input_register_device(svc->input);
650 dev_err(&svc->dev, "failed to register input: %d\n", ret);
651 device_del(&svc->dev);
655 ret = gb_svc_watchdog_create(svc);
657 dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
658 input_unregister_device(svc->input);
659 device_del(&svc->dev);
663 svc_debugfs_init(svc);
668 static void gb_svc_process_intf_hotplug(struct gb_operation *operation)
670 struct gb_svc_intf_hotplug_request *request;
671 struct gb_connection *connection = operation->connection;
672 struct gb_svc *svc = gb_connection_get_data(connection);
673 struct gb_host_device *hd = connection->hd;
674 struct gb_interface *intf;
678 /* The request message size has already been verified. */
679 request = operation->request->payload;
680 intf_id = request->intf_id;
682 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
684 intf = gb_interface_find(hd, intf_id);
686 dev_info(&svc->dev, "mode switch detected on interface %u\n",
689 /* Mark as disconnected to prevent I/O during disable. */
690 intf->disconnected = true;
691 gb_interface_disable(intf);
692 intf->disconnected = false;
694 goto enable_interface;
697 intf = gb_interface_create(hd, intf_id);
699 dev_err(&svc->dev, "failed to create interface %u\n",
704 ret = gb_interface_activate(intf);
706 dev_err(&svc->dev, "failed to activate interface %u: %d\n",
708 gb_interface_add(intf);
712 ret = gb_interface_add(intf);
714 goto err_interface_deactivate;
717 ret = gb_interface_enable(intf);
719 dev_err(&svc->dev, "failed to enable interface %u: %d\n",
721 goto err_interface_deactivate;
726 err_interface_deactivate:
727 gb_interface_deactivate(intf);
730 static void gb_svc_process_intf_hot_unplug(struct gb_operation *operation)
732 struct gb_svc *svc = gb_connection_get_data(operation->connection);
733 struct gb_svc_intf_hot_unplug_request *request;
734 struct gb_host_device *hd = operation->connection->hd;
735 struct gb_interface *intf;
738 /* The request message size has already been verified. */
739 request = operation->request->payload;
740 intf_id = request->intf_id;
742 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
744 intf = gb_interface_find(hd, intf_id);
746 dev_warn(&svc->dev, "could not find hot-unplug interface %u\n",
751 /* Mark as disconnected to prevent I/O during disable. */
752 intf->disconnected = true;
754 gb_interface_disable(intf);
755 gb_interface_deactivate(intf);
756 gb_interface_remove(intf);
759 static void gb_svc_process_deferred_request(struct work_struct *work)
761 struct gb_svc_deferred_request *dr;
762 struct gb_operation *operation;
766 dr = container_of(work, struct gb_svc_deferred_request, work);
767 operation = dr->operation;
768 svc = gb_connection_get_data(operation->connection);
769 type = operation->request->header->type;
772 case GB_SVC_TYPE_INTF_HOTPLUG:
773 gb_svc_process_intf_hotplug(operation);
775 case GB_SVC_TYPE_INTF_HOT_UNPLUG:
776 gb_svc_process_intf_hot_unplug(operation);
779 dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
782 gb_operation_put(operation);
786 static int gb_svc_queue_deferred_request(struct gb_operation *operation)
788 struct gb_svc *svc = gb_connection_get_data(operation->connection);
789 struct gb_svc_deferred_request *dr;
791 dr = kmalloc(sizeof(*dr), GFP_KERNEL);
795 gb_operation_get(operation);
797 dr->operation = operation;
798 INIT_WORK(&dr->work, gb_svc_process_deferred_request);
800 queue_work(svc->wq, &dr->work);
806 * Bringing up a module can be time consuming, as that may require lots of
807 * initialization on the module side. Over that, we may also need to download
808 * the firmware first and flash that on the module.
810 * In order not to make other svc events wait for all this to finish,
811 * handle most of module hotplug stuff outside of the hotplug callback, with
812 * help of a workqueue.
814 static int gb_svc_intf_hotplug_recv(struct gb_operation *op)
816 struct gb_svc *svc = gb_connection_get_data(op->connection);
817 struct gb_svc_intf_hotplug_request *request;
819 if (op->request->payload_size < sizeof(*request)) {
820 dev_warn(&svc->dev, "short hotplug request received (%zu < %zu)\n",
821 op->request->payload_size, sizeof(*request));
825 request = op->request->payload;
827 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
829 return gb_svc_queue_deferred_request(op);
832 static int gb_svc_intf_hot_unplug_recv(struct gb_operation *op)
834 struct gb_svc *svc = gb_connection_get_data(op->connection);
835 struct gb_svc_intf_hot_unplug_request *request;
837 if (op->request->payload_size < sizeof(*request)) {
838 dev_warn(&svc->dev, "short hot unplug request received (%zu < %zu)\n",
839 op->request->payload_size, sizeof(*request));
843 request = op->request->payload;
845 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
847 return gb_svc_queue_deferred_request(op);
850 static int gb_svc_intf_reset_recv(struct gb_operation *op)
852 struct gb_svc *svc = gb_connection_get_data(op->connection);
853 struct gb_message *request = op->request;
854 struct gb_svc_intf_reset_request *reset;
857 if (request->payload_size < sizeof(*reset)) {
858 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
859 request->payload_size, sizeof(*reset));
862 reset = request->payload;
864 intf_id = reset->intf_id;
866 /* FIXME Reset the interface here */
871 static int gb_svc_key_code_map(struct gb_svc *svc, u16 key_code, u16 *code)
875 *code = SVC_KEY_ARA_BUTTON;
878 dev_warn(&svc->dev, "unknown keycode received: %u\n", key_code);
885 static int gb_svc_key_event_recv(struct gb_operation *op)
887 struct gb_svc *svc = gb_connection_get_data(op->connection);
888 struct gb_message *request = op->request;
889 struct gb_svc_key_event_request *key;
894 if (request->payload_size < sizeof(*key)) {
895 dev_warn(&svc->dev, "short key request received (%zu < %zu)\n",
896 request->payload_size, sizeof(*key));
900 key = request->payload;
902 ret = gb_svc_key_code_map(svc, le16_to_cpu(key->key_code), &code);
906 event = key->key_event;
907 if ((event != GB_SVC_KEY_PRESSED) && (event != GB_SVC_KEY_RELEASED)) {
908 dev_warn(&svc->dev, "unknown key event received: %u\n", event);
912 input_report_key(svc->input, code, (event == GB_SVC_KEY_PRESSED));
913 input_sync(svc->input);
918 static int gb_svc_request_handler(struct gb_operation *op)
920 struct gb_connection *connection = op->connection;
921 struct gb_svc *svc = gb_connection_get_data(connection);
926 * SVC requests need to follow a specific order (at least initially) and
927 * below code takes care of enforcing that. The expected order is:
930 * - Any other request, but the earlier two.
932 * Incoming requests are guaranteed to be serialized and so we don't
933 * need to protect 'state' for any races.
936 case GB_REQUEST_TYPE_PROTOCOL_VERSION:
937 if (svc->state != GB_SVC_STATE_RESET)
940 case GB_SVC_TYPE_SVC_HELLO:
941 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
945 if (svc->state != GB_SVC_STATE_SVC_HELLO)
951 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
957 case GB_REQUEST_TYPE_PROTOCOL_VERSION:
958 ret = gb_svc_version_request(op);
960 svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
962 case GB_SVC_TYPE_SVC_HELLO:
963 ret = gb_svc_hello(op);
965 svc->state = GB_SVC_STATE_SVC_HELLO;
967 case GB_SVC_TYPE_INTF_HOTPLUG:
968 return gb_svc_intf_hotplug_recv(op);
969 case GB_SVC_TYPE_INTF_HOT_UNPLUG:
970 return gb_svc_intf_hot_unplug_recv(op);
971 case GB_SVC_TYPE_INTF_RESET:
972 return gb_svc_intf_reset_recv(op);
973 case GB_SVC_TYPE_KEY_EVENT:
974 return gb_svc_key_event_recv(op);
976 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
981 static struct input_dev *gb_svc_input_create(struct gb_svc *svc)
983 struct input_dev *input_dev;
985 input_dev = input_allocate_device();
987 return ERR_PTR(-ENOMEM);
989 input_dev->name = dev_name(&svc->dev);
990 svc->input_phys = kasprintf(GFP_KERNEL, "greybus-%s/input0",
992 if (!svc->input_phys)
995 input_dev->phys = svc->input_phys;
996 input_dev->dev.parent = &svc->dev;
998 input_set_drvdata(input_dev, svc);
1000 input_set_capability(input_dev, EV_KEY, SVC_KEY_ARA_BUTTON);
1005 input_free_device(svc->input);
1006 return ERR_PTR(-ENOMEM);
1009 static void gb_svc_release(struct device *dev)
1011 struct gb_svc *svc = to_gb_svc(dev);
1013 if (svc->connection)
1014 gb_connection_destroy(svc->connection);
1015 ida_destroy(&svc->device_id_map);
1016 destroy_workqueue(svc->wq);
1017 kfree(svc->input_phys);
1021 struct device_type greybus_svc_type = {
1022 .name = "greybus_svc",
1023 .release = gb_svc_release,
1026 struct gb_svc *gb_svc_create(struct gb_host_device *hd)
1030 svc = kzalloc(sizeof(*svc), GFP_KERNEL);
1034 svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
1040 svc->dev.parent = &hd->dev;
1041 svc->dev.bus = &greybus_bus_type;
1042 svc->dev.type = &greybus_svc_type;
1043 svc->dev.groups = svc_groups;
1044 svc->dev.dma_mask = svc->dev.parent->dma_mask;
1045 device_initialize(&svc->dev);
1047 dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
1049 ida_init(&svc->device_id_map);
1050 svc->state = GB_SVC_STATE_RESET;
1053 svc->input = gb_svc_input_create(svc);
1054 if (IS_ERR(svc->input)) {
1055 dev_err(&svc->dev, "failed to create input device: %ld\n",
1056 PTR_ERR(svc->input));
1057 goto err_put_device;
1060 svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
1061 gb_svc_request_handler);
1062 if (IS_ERR(svc->connection)) {
1063 dev_err(&svc->dev, "failed to create connection: %ld\n",
1064 PTR_ERR(svc->connection));
1065 goto err_free_input;
1068 gb_connection_set_data(svc->connection, svc);
1073 input_free_device(svc->input);
1075 put_device(&svc->dev);
1079 int gb_svc_add(struct gb_svc *svc)
1084 * The SVC protocol is currently driven by the SVC, so the SVC device
1085 * is added from the connection request handler when enough
1086 * information has been received.
1088 ret = gb_connection_enable(svc->connection);
1095 static void gb_svc_remove_interfaces(struct gb_svc *svc)
1097 struct gb_interface *intf, *tmp;
1099 list_for_each_entry_safe(intf, tmp, &svc->hd->interfaces, links) {
1100 gb_interface_disable(intf);
1101 gb_interface_deactivate(intf);
1102 gb_interface_remove(intf);
1106 void gb_svc_del(struct gb_svc *svc)
1108 gb_connection_disable(svc->connection);
1111 * The SVC device and input device may have been registered
1112 * from the request handler.
1114 if (device_is_registered(&svc->dev)) {
1115 svc_debugfs_exit(svc);
1116 gb_svc_watchdog_destroy(svc);
1117 input_unregister_device(svc->input);
1118 device_del(&svc->dev);
1121 flush_workqueue(svc->wq);
1123 gb_svc_remove_interfaces(svc);
1126 void gb_svc_put(struct gb_svc *svc)
1128 put_device(&svc->dev);