]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/greybus/svc.c
greybus: svc: clean up interface-remove helper
[karo-tx-linux.git] / drivers / staging / greybus / svc.c
1 /*
2  * SVC Greybus driver.
3  *
4  * Copyright 2015 Google Inc.
5  * Copyright 2015 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/workqueue.h>
11
12 #include "greybus.h"
13
14 #define CPORT_FLAGS_E2EFC       BIT(0)
15 #define CPORT_FLAGS_CSD_N       BIT(1)
16 #define CPORT_FLAGS_CSV_N       BIT(2)
17
18
19 struct svc_hotplug {
20         struct work_struct work;
21         struct gb_connection *connection;
22         struct gb_svc_intf_hotplug_request data;
23 };
24
25
26 static ssize_t endo_id_show(struct device *dev,
27                         struct device_attribute *attr, char *buf)
28 {
29         struct gb_svc *svc = to_gb_svc(dev);
30
31         return sprintf(buf, "0x%04x\n", svc->endo_id);
32 }
33 static DEVICE_ATTR_RO(endo_id);
34
35 static ssize_t ap_intf_id_show(struct device *dev,
36                         struct device_attribute *attr, char *buf)
37 {
38         struct gb_svc *svc = to_gb_svc(dev);
39
40         return sprintf(buf, "%u\n", svc->ap_intf_id);
41 }
42 static DEVICE_ATTR_RO(ap_intf_id);
43
44 static struct attribute *svc_attrs[] = {
45         &dev_attr_endo_id.attr,
46         &dev_attr_ap_intf_id.attr,
47         NULL,
48 };
49 ATTRIBUTE_GROUPS(svc);
50
51 static int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
52 {
53         struct gb_svc_intf_device_id_request request;
54
55         request.intf_id = intf_id;
56         request.device_id = device_id;
57
58         return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
59                                  &request, sizeof(request), NULL, 0);
60 }
61
62 int gb_svc_intf_reset(struct gb_svc *svc, u8 intf_id)
63 {
64         struct gb_svc_intf_reset_request request;
65
66         request.intf_id = intf_id;
67
68         return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_RESET,
69                                  &request, sizeof(request), NULL, 0);
70 }
71 EXPORT_SYMBOL_GPL(gb_svc_intf_reset);
72
73 int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
74                         u32 *value)
75 {
76         struct gb_svc_dme_peer_get_request request;
77         struct gb_svc_dme_peer_get_response response;
78         u16 result;
79         int ret;
80
81         request.intf_id = intf_id;
82         request.attr = cpu_to_le16(attr);
83         request.selector = cpu_to_le16(selector);
84
85         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
86                                 &request, sizeof(request),
87                                 &response, sizeof(response));
88         if (ret) {
89                 dev_err(&svc->dev, "failed to get DME attribute (%hhu %hx %hu): %d\n",
90                                 intf_id, attr, selector, ret);
91                 return ret;
92         }
93
94         result = le16_to_cpu(response.result_code);
95         if (result) {
96                 dev_err(&svc->dev, "UniPro error while getting DME attribute (%hhu %hx %hu): %hu\n",
97                                 intf_id, attr, selector, result);
98                 return -EINVAL;
99         }
100
101         if (value)
102                 *value = le32_to_cpu(response.attr_value);
103
104         return 0;
105 }
106 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_get);
107
108 int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
109                         u32 value)
110 {
111         struct gb_svc_dme_peer_set_request request;
112         struct gb_svc_dme_peer_set_response response;
113         u16 result;
114         int ret;
115
116         request.intf_id = intf_id;
117         request.attr = cpu_to_le16(attr);
118         request.selector = cpu_to_le16(selector);
119         request.value = cpu_to_le32(value);
120
121         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
122                                 &request, sizeof(request),
123                                 &response, sizeof(response));
124         if (ret) {
125                 dev_err(&svc->dev, "failed to set DME attribute (%hhu %hx %hu %u): %d\n",
126                                 intf_id, attr, selector, value, ret);
127                 return ret;
128         }
129
130         result = le16_to_cpu(response.result_code);
131         if (result) {
132                 dev_err(&svc->dev, "UniPro error while setting DME attribute (%hhu %hx %hu %u): %hu\n",
133                                 intf_id, attr, selector, value, result);
134                 return -EINVAL;
135         }
136
137         return 0;
138 }
139 EXPORT_SYMBOL_GPL(gb_svc_dme_peer_set);
140
141 /*
142  * T_TstSrcIncrement is written by the module on ES2 as a stand-in for boot
143  * status attribute. AP needs to read and clear it, after reading a non-zero
144  * value from it.
145  *
146  * FIXME: This is module-hardware dependent and needs to be extended for every
147  * type of module we want to support.
148  */
149 static int gb_svc_read_and_clear_module_boot_status(struct gb_interface *intf)
150 {
151         struct gb_host_device *hd = intf->hd;
152         int ret;
153         u32 value;
154
155         /* Read and clear boot status in T_TstSrcIncrement */
156         ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id,
157                                   DME_ATTR_T_TST_SRC_INCREMENT,
158                                   DME_ATTR_SELECTOR_INDEX, &value);
159
160         if (ret)
161                 return ret;
162
163         /*
164          * A nonzero boot status indicates the module has finished
165          * booting. Clear it.
166          */
167         if (!value) {
168                 dev_err(&intf->dev, "Module not ready yet\n");
169                 return -ENODEV;
170         }
171
172         /*
173          * Check if the module needs to boot from unipro.
174          * For ES2: We need to check lowest 8 bits of 'value'.
175          * For ES3: We need to check highest 8 bits out of 32 of 'value'.
176          *
177          * FIXME: Add code to find if we are on ES2 or ES3 to have separate
178          * checks.
179          */
180         if (value == DME_TSI_UNIPRO_BOOT_STARTED ||
181             value == DME_TSI_FALLBACK_UNIPRO_BOOT_STARTED)
182                 intf->boot_over_unipro = true;
183
184         return gb_svc_dme_peer_set(hd->svc, intf->interface_id,
185                                    DME_ATTR_T_TST_SRC_INCREMENT,
186                                    DME_ATTR_SELECTOR_INDEX, 0);
187 }
188
189 int gb_svc_connection_create(struct gb_svc *svc,
190                                 u8 intf1_id, u16 cport1_id,
191                                 u8 intf2_id, u16 cport2_id,
192                                 bool boot_over_unipro)
193 {
194         struct gb_svc_conn_create_request request;
195
196         request.intf1_id = intf1_id;
197         request.cport1_id = cpu_to_le16(cport1_id);
198         request.intf2_id = intf2_id;
199         request.cport2_id = cpu_to_le16(cport2_id);
200         /*
201          * XXX: fix connections paramaters to TC0 and all CPort flags
202          * for now.
203          */
204         request.tc = 0;
205
206         /*
207          * We need to skip setting E2EFC and other flags to the connection
208          * create request, for all cports, on an interface that need to boot
209          * over unipro, i.e. interfaces required to download firmware.
210          */
211         if (boot_over_unipro)
212                 request.flags = CPORT_FLAGS_CSV_N | CPORT_FLAGS_CSD_N;
213         else
214                 request.flags = CPORT_FLAGS_CSV_N | CPORT_FLAGS_E2EFC;
215
216         return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
217                                  &request, sizeof(request), NULL, 0);
218 }
219 EXPORT_SYMBOL_GPL(gb_svc_connection_create);
220
221 void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
222                                u8 intf2_id, u16 cport2_id)
223 {
224         struct gb_svc_conn_destroy_request request;
225         struct gb_connection *connection = svc->connection;
226         int ret;
227
228         request.intf1_id = intf1_id;
229         request.cport1_id = cpu_to_le16(cport1_id);
230         request.intf2_id = intf2_id;
231         request.cport2_id = cpu_to_le16(cport2_id);
232
233         ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
234                                 &request, sizeof(request), NULL, 0);
235         if (ret) {
236                 dev_err(&svc->dev, "failed to destroy connection (%hhu:%hu %hhu:%hu): %d\n",
237                                 intf1_id, cport1_id, intf2_id, cport2_id, ret);
238         }
239 }
240 EXPORT_SYMBOL_GPL(gb_svc_connection_destroy);
241
242 /* Creates bi-directional routes between the devices */
243 static int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
244                                u8 intf2_id, u8 dev2_id)
245 {
246         struct gb_svc_route_create_request request;
247
248         request.intf1_id = intf1_id;
249         request.dev1_id = dev1_id;
250         request.intf2_id = intf2_id;
251         request.dev2_id = dev2_id;
252
253         return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
254                                  &request, sizeof(request), NULL, 0);
255 }
256
257 /* Destroys bi-directional routes between the devices */
258 static void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
259 {
260         struct gb_svc_route_destroy_request request;
261         int ret;
262
263         request.intf1_id = intf1_id;
264         request.intf2_id = intf2_id;
265
266         ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
267                                 &request, sizeof(request), NULL, 0);
268         if (ret) {
269                 dev_err(&svc->dev, "failed to destroy route (%hhu %hhu): %d\n",
270                                 intf1_id, intf2_id, ret);
271         }
272 }
273
274 static int gb_svc_version_request(struct gb_operation *op)
275 {
276         struct gb_connection *connection = op->connection;
277         struct gb_svc *svc = connection->private;
278         struct gb_protocol_version_request *request;
279         struct gb_protocol_version_response *response;
280
281         if (op->request->payload_size < sizeof(*request)) {
282                 dev_err(&svc->dev, "short version request (%zu < %zu)\n",
283                                 op->request->payload_size,
284                                 sizeof(*request));
285                 return -EINVAL;
286         }
287
288         request = op->request->payload;
289
290         if (request->major > GB_SVC_VERSION_MAJOR) {
291                 dev_warn(&svc->dev, "unsupported major version (%hhu > %hhu)\n",
292                                 request->major, GB_SVC_VERSION_MAJOR);
293                 return -ENOTSUPP;
294         }
295
296         connection->module_major = request->major;
297         connection->module_minor = request->minor;
298
299         if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
300                 return -ENOMEM;
301
302         response = op->response->payload;
303         response->major = connection->module_major;
304         response->minor = connection->module_minor;
305
306         return 0;
307 }
308
309 static int gb_svc_hello(struct gb_operation *op)
310 {
311         struct gb_connection *connection = op->connection;
312         struct gb_svc *svc = connection->private;
313         struct gb_svc_hello_request *hello_request;
314         int ret;
315
316         if (op->request->payload_size < sizeof(*hello_request)) {
317                 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
318                                 op->request->payload_size,
319                                 sizeof(*hello_request));
320                 return -EINVAL;
321         }
322
323         hello_request = op->request->payload;
324         svc->endo_id = le16_to_cpu(hello_request->endo_id);
325         svc->ap_intf_id = hello_request->interface_id;
326
327         ret = device_add(&svc->dev);
328         if (ret) {
329                 dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
330                 return ret;
331         }
332
333         return 0;
334 }
335
336 static void gb_svc_intf_remove(struct gb_svc *svc, struct gb_interface *intf)
337 {
338         u8 intf_id = intf->interface_id;
339         u8 device_id;
340
341         device_id = intf->device_id;
342         gb_interface_remove(intf);
343
344         /*
345          * Destroy the two-way route between the AP and the interface.
346          */
347         gb_svc_route_destroy(svc, svc->ap_intf_id, intf_id);
348
349         ida_simple_remove(&svc->device_id_map, device_id);
350 }
351
352 /*
353  * 'struct svc_hotplug' should be freed by svc_process_hotplug() before it
354  * returns, irrespective of success or Failure in bringing up the module.
355  */
356 static void svc_process_hotplug(struct work_struct *work)
357 {
358         struct svc_hotplug *svc_hotplug = container_of(work, struct svc_hotplug,
359                                                        work);
360         struct gb_svc_intf_hotplug_request *request;
361         struct gb_connection *connection = svc_hotplug->connection;
362         struct gb_svc *svc = connection->private;
363         struct gb_host_device *hd = connection->hd;
364         struct gb_interface *intf;
365         u8 intf_id, device_id;
366         int ret;
367
368         /* The request message size has already been verified. */
369         request = &svc_hotplug->data;
370         intf_id = request->intf_id;
371
372         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
373
374         intf = gb_interface_find(hd, intf_id);
375         if (intf) {
376                 /*
377                  * We have received a hotplug request for an interface that
378                  * already exists.
379                  *
380                  * This can happen in cases like:
381                  * - bootrom loading the firmware image and booting into that,
382                  *   which only generates a hotplug event. i.e. no hot-unplug
383                  *   event.
384                  * - Or the firmware on the module crashed and sent hotplug
385                  *   request again to the SVC, which got propagated to AP.
386                  *
387                  * Remove the interface and add it again, and let user know
388                  * about this with a print message.
389                  */
390                 dev_info(&svc->dev, "removing interface %hhu to add it again\n",
391                                 intf_id);
392                 gb_svc_intf_remove(svc, intf);
393         }
394
395         intf = gb_interface_create(hd, intf_id);
396         if (!intf) {
397                 dev_err(&svc->dev, "failed to create interface %hhu\n",
398                                 intf_id);
399                 goto free_svc_hotplug;
400         }
401
402         ret = gb_svc_read_and_clear_module_boot_status(intf);
403         if (ret)
404                 goto destroy_interface;
405
406         intf->unipro_mfg_id = le32_to_cpu(request->data.unipro_mfg_id);
407         intf->unipro_prod_id = le32_to_cpu(request->data.unipro_prod_id);
408         intf->vendor_id = le32_to_cpu(request->data.ara_vend_id);
409         intf->product_id = le32_to_cpu(request->data.ara_prod_id);
410
411         /*
412          * Create a device id for the interface:
413          * - device id 0 (GB_DEVICE_ID_SVC) belongs to the SVC
414          * - device id 1 (GB_DEVICE_ID_AP) belongs to the AP
415          *
416          * XXX Do we need to allocate device ID for SVC or the AP here? And what
417          * XXX about an AP with multiple interface blocks?
418          */
419         device_id = ida_simple_get(&svc->device_id_map,
420                                    GB_DEVICE_ID_MODULES_START, 0, GFP_KERNEL);
421         if (device_id < 0) {
422                 ret = device_id;
423                 dev_err(&svc->dev, "failed to allocate device id for interface %hhu: %d\n",
424                                 intf_id, ret);
425                 goto destroy_interface;
426         }
427
428         ret = gb_svc_intf_device_id(svc, intf_id, device_id);
429         if (ret) {
430                 dev_err(&svc->dev, "failed to set device id %hhu for interface %hhu: %d\n",
431                                 device_id, intf_id, ret);
432                 goto ida_put;
433         }
434
435         /*
436          * Create a two-way route between the AP and the new interface
437          */
438         ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_DEVICE_ID_AP,
439                                   intf_id, device_id);
440         if (ret) {
441                 dev_err(&svc->dev, "failed to create route to interface %hhu (device id %hhu): %d\n",
442                                 intf_id, device_id, ret);
443                 goto svc_id_free;
444         }
445
446         ret = gb_interface_init(intf, device_id);
447         if (ret) {
448                 dev_err(&svc->dev, "failed to initialize interface %hhu (device id %hhu): %d\n",
449                                 intf_id, device_id, ret);
450                 goto destroy_route;
451         }
452
453         goto free_svc_hotplug;
454
455 destroy_route:
456         gb_svc_route_destroy(svc, svc->ap_intf_id, intf_id);
457 svc_id_free:
458         /*
459          * XXX Should we tell SVC that this id doesn't belong to interface
460          * XXX anymore.
461          */
462 ida_put:
463         ida_simple_remove(&svc->device_id_map, device_id);
464 destroy_interface:
465         gb_interface_remove(intf);
466 free_svc_hotplug:
467         kfree(svc_hotplug);
468 }
469
470 /*
471  * Bringing up a module can be time consuming, as that may require lots of
472  * initialization on the module side. Over that, we may also need to download
473  * the firmware first and flash that on the module.
474  *
475  * In order to make other hotplug events to not wait for all this to finish,
476  * handle most of module hotplug stuff outside of the hotplug callback, with
477  * help of a workqueue.
478  */
479 static int gb_svc_intf_hotplug_recv(struct gb_operation *op)
480 {
481         struct gb_svc *svc = op->connection->private;
482         struct gb_svc_intf_hotplug_request *request;
483         struct svc_hotplug *svc_hotplug;
484
485         if (op->request->payload_size < sizeof(*request)) {
486                 dev_warn(&svc->dev, "short hotplug request received (%zu < %zu)\n",
487                                 op->request->payload_size, sizeof(*request));
488                 return -EINVAL;
489         }
490
491         request = op->request->payload;
492
493         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
494
495         svc_hotplug = kmalloc(sizeof(*svc_hotplug), GFP_KERNEL);
496         if (!svc_hotplug)
497                 return -ENOMEM;
498
499         svc_hotplug->connection = op->connection;
500         memcpy(&svc_hotplug->data, request, sizeof(svc_hotplug->data));
501
502         INIT_WORK(&svc_hotplug->work, svc_process_hotplug);
503         queue_work(system_unbound_wq, &svc_hotplug->work);
504
505         return 0;
506 }
507
508 static int gb_svc_intf_hot_unplug_recv(struct gb_operation *op)
509 {
510         struct gb_svc *svc = op->connection->private;
511         struct gb_svc_intf_hot_unplug_request *request;
512         struct gb_host_device *hd = op->connection->hd;
513         struct gb_interface *intf;
514         u8 intf_id;
515
516         if (op->request->payload_size < sizeof(*request)) {
517                 dev_warn(&svc->dev, "short hot unplug request received (%zu < %zu)\n",
518                                 op->request->payload_size, sizeof(*request));
519                 return -EINVAL;
520         }
521
522         request = op->request->payload;
523         intf_id = request->intf_id;
524
525         dev_dbg(&svc->dev, "%s - id = %u\n", __func__, intf_id);
526
527         intf = gb_interface_find(hd, intf_id);
528         if (!intf) {
529                 dev_warn(&svc->dev, "could not find hot-unplug interface %hhu\n",
530                                 intf_id);
531                 return -EINVAL;
532         }
533
534         gb_svc_intf_remove(svc, intf);
535
536         return 0;
537 }
538
539 static int gb_svc_intf_reset_recv(struct gb_operation *op)
540 {
541         struct gb_svc *svc = op->connection->private;
542         struct gb_message *request = op->request;
543         struct gb_svc_intf_reset_request *reset;
544         u8 intf_id;
545
546         if (request->payload_size < sizeof(*reset)) {
547                 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
548                                 request->payload_size, sizeof(*reset));
549                 return -EINVAL;
550         }
551         reset = request->payload;
552
553         intf_id = reset->intf_id;
554
555         /* FIXME Reset the interface here */
556
557         return 0;
558 }
559
560 static int gb_svc_request_recv(u8 type, struct gb_operation *op)
561 {
562         struct gb_connection *connection = op->connection;
563         struct gb_svc *svc = connection->private;
564         int ret = 0;
565
566         /*
567          * SVC requests need to follow a specific order (at least initially) and
568          * below code takes care of enforcing that. The expected order is:
569          * - PROTOCOL_VERSION
570          * - SVC_HELLO
571          * - Any other request, but the earlier two.
572          *
573          * Incoming requests are guaranteed to be serialized and so we don't
574          * need to protect 'state' for any races.
575          */
576         switch (type) {
577         case GB_REQUEST_TYPE_PROTOCOL_VERSION:
578                 if (svc->state != GB_SVC_STATE_RESET)
579                         ret = -EINVAL;
580                 break;
581         case GB_SVC_TYPE_SVC_HELLO:
582                 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
583                         ret = -EINVAL;
584                 break;
585         default:
586                 if (svc->state != GB_SVC_STATE_SVC_HELLO)
587                         ret = -EINVAL;
588                 break;
589         }
590
591         if (ret) {
592                 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
593                                 type, svc->state);
594                 return ret;
595         }
596
597         switch (type) {
598         case GB_REQUEST_TYPE_PROTOCOL_VERSION:
599                 ret = gb_svc_version_request(op);
600                 if (!ret)
601                         svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
602                 return ret;
603         case GB_SVC_TYPE_SVC_HELLO:
604                 ret = gb_svc_hello(op);
605                 if (!ret)
606                         svc->state = GB_SVC_STATE_SVC_HELLO;
607                 return ret;
608         case GB_SVC_TYPE_INTF_HOTPLUG:
609                 return gb_svc_intf_hotplug_recv(op);
610         case GB_SVC_TYPE_INTF_HOT_UNPLUG:
611                 return gb_svc_intf_hot_unplug_recv(op);
612         case GB_SVC_TYPE_INTF_RESET:
613                 return gb_svc_intf_reset_recv(op);
614         default:
615                 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
616                 return -EINVAL;
617         }
618 }
619
620 static void gb_svc_release(struct device *dev)
621 {
622         struct gb_svc *svc = to_gb_svc(dev);
623
624         ida_destroy(&svc->device_id_map);
625         kfree(svc);
626 }
627
628 struct device_type greybus_svc_type = {
629         .name           = "greybus_svc",
630         .release        = gb_svc_release,
631 };
632
633 static int gb_svc_connection_init(struct gb_connection *connection)
634 {
635         struct gb_host_device *hd = connection->hd;
636         struct gb_svc *svc;
637
638         svc = kzalloc(sizeof(*svc), GFP_KERNEL);
639         if (!svc)
640                 return -ENOMEM;
641
642         svc->dev.parent = &hd->dev;
643         svc->dev.bus = &greybus_bus_type;
644         svc->dev.type = &greybus_svc_type;
645         svc->dev.groups = svc_groups;
646         svc->dev.dma_mask = svc->dev.parent->dma_mask;
647         device_initialize(&svc->dev);
648
649         dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
650
651         ida_init(&svc->device_id_map);
652         svc->state = GB_SVC_STATE_RESET;
653         svc->connection = connection;
654         connection->private = svc;
655
656         hd->svc = svc;
657
658         return 0;
659 }
660
661 static void gb_svc_connection_exit(struct gb_connection *connection)
662 {
663         struct gb_svc *svc = connection->private;
664
665         if (device_is_registered(&svc->dev))
666                 device_del(&svc->dev);
667
668         connection->hd->svc = NULL;
669         connection->private = NULL;
670
671         put_device(&svc->dev);
672 }
673
674 static struct gb_protocol svc_protocol = {
675         .name                   = "svc",
676         .id                     = GREYBUS_PROTOCOL_SVC,
677         .major                  = GB_SVC_VERSION_MAJOR,
678         .minor                  = GB_SVC_VERSION_MINOR,
679         .connection_init        = gb_svc_connection_init,
680         .connection_exit        = gb_svc_connection_exit,
681         .request_recv           = gb_svc_request_recv,
682         .flags                  = GB_PROTOCOL_SKIP_CONTROL_CONNECTED |
683                                   GB_PROTOCOL_SKIP_CONTROL_DISCONNECTED |
684                                   GB_PROTOCOL_SKIP_VERSION,
685 };
686 gb_builtin_protocol_driver(svc_protocol);