]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/greybus/operation.c
greybus: use protocol_id for numeric values
[karo-tx-linux.git] / drivers / staging / greybus / operation.c
1 /*
2  * Greybus operations
3  *
4  * Copyright 2014 Google Inc.
5  *
6  * Released under the GPLv2 only.
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/workqueue.h>
13
14 #include "greybus.h"
15
16 /*
17  * The top bit of the type in an operation message header indicates
18  * whether the message is a request (bit clear) or response (bit set)
19  */
20 #define GB_OPERATION_TYPE_RESPONSE      0x80
21
22 #define OPERATION_TIMEOUT_DEFAULT       1000    /* milliseconds */
23
24 /*
25  * XXX This needs to be coordinated with host driver parameters
26  */
27 #define GB_OPERATION_MESSAGE_SIZE_MAX   4096
28
29 static struct kmem_cache *gb_operation_cache;
30
31 /* Workqueue to handle Greybus operation completions. */
32 static struct workqueue_struct *gb_operation_recv_workqueue;
33
34 /*
35  * All operation messages (both requests and responses) begin with
36  * a common header that encodes the size of the data (header
37  * included).  This header also contains a unique identifier, which
38  * is used to keep track of in-flight operations.  Finally, the
39  * header contains a operation type field, whose interpretation is
40  * dependent on what type of device lies on the other end of the
41  * connection.  Response messages are distinguished from request
42  * messages by setting the high bit (0x80) in the operation type
43  * value.
44  *
45  * The wire format for all numeric fields in the header is little
46  * endian.  Any operation-specific data begins immediately after the
47  * header, and is 64-bit aligned.
48  */
49 struct gb_operation_msg_hdr {
50         __le16  size;   /* Size in bytes of header + payload */
51         __le16  id;     /* Operation unique id */
52         __u8    type;   /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
53         /* 3 bytes pad, must be zero (ignore when read) */
54 } __aligned(sizeof(u64));
55
56 /* XXX Could be per-host device, per-module, or even per-connection */
57 static DEFINE_SPINLOCK(gb_operations_lock);
58
59 static void gb_operation_insert(struct gb_operation *operation)
60 {
61         struct gb_connection *connection = operation->connection;
62         struct rb_root *root = &connection->pending;
63         struct rb_node *node = &operation->node;
64         struct rb_node **link = &root->rb_node;
65         struct rb_node *above = NULL;
66         struct gb_operation_msg_hdr *header;
67         unsigned long timeout;
68         bool start_timer;
69         __le16 wire_id;
70
71         /*
72          * Assign the operation's id, and store it in the header of
73          * both request and response message headers.
74          */
75         operation->id = gb_connection_operation_id(connection);
76         wire_id = cpu_to_le16(operation->id);
77         header = operation->request->transfer_buffer;
78         header->id = wire_id;
79
80         /* OK, insert the operation into its connection's tree */
81         spin_lock_irq(&gb_operations_lock);
82
83         /*
84          * We impose a time limit for requests to complete.  If
85          * there are no requests pending there is no need for a
86          * timer.  So if this will be the only one in flight we'll
87          * need to start the timer.  Otherwise we just update the
88          * existing one to give this request a full timeout period
89          * to complete.
90          */
91         start_timer = RB_EMPTY_ROOT(root);
92
93         while (*link) {
94                 struct gb_operation *other;
95
96                 above = *link;
97                 other = rb_entry(above, struct gb_operation, node);
98                 header = other->request->transfer_buffer;
99                 if (other->id > operation->id)
100                         link = &above->rb_left;
101                 else if (other->id < operation->id)
102                         link = &above->rb_right;
103         }
104         rb_link_node(node, above, link);
105         rb_insert_color(node, root);
106         spin_unlock_irq(&gb_operations_lock);
107
108         timeout = msecs_to_jiffies(OPERATION_TIMEOUT_DEFAULT);
109         if (start_timer)
110                 schedule_delayed_work(&operation->timeout_work, timeout);
111         else
112                 mod_delayed_work(system_wq, &operation->timeout_work, timeout);
113 }
114
115 static void gb_operation_remove(struct gb_operation *operation)
116 {
117         struct gb_connection *connection = operation->connection;
118
119         /* Shut down our timeout timer */
120         cancel_delayed_work(&operation->timeout_work);
121
122         /* Take us off of the list of pending operations */
123         spin_lock_irq(&gb_operations_lock);
124         rb_erase(&operation->node, &connection->pending);
125         spin_unlock_irq(&gb_operations_lock);
126
127 }
128
129 static struct gb_operation *
130 gb_operation_find(struct gb_connection *connection, u16 id)
131 {
132         struct gb_operation *operation = NULL;
133         struct rb_node *node;
134         bool found = false;
135
136         spin_lock_irq(&gb_operations_lock);
137         node = connection->pending.rb_node;
138         while (node && !found) {
139                 operation = rb_entry(node, struct gb_operation, node);
140                 if (operation->id > id)
141                         node = node->rb_left;
142                 else if (operation->id < id)
143                         node = node->rb_right;
144                 else
145                         found = true;
146         }
147         spin_unlock_irq(&gb_operations_lock);
148
149         return found ? operation : NULL;
150 }
151
152 /*
153  * An operations's response message has arrived.  If no callback was
154  * supplied it was submitted for asynchronous completion, so we notify
155  * any waiters.  Otherwise we assume calling the completion is enough
156  * and nobody else will be waiting.
157  */
158 static void gb_operation_complete(struct gb_operation *operation)
159 {
160         if (operation->callback)
161                 operation->callback(operation);
162         else
163                 complete_all(&operation->completion);
164 }
165
166 /* Wait for a submitted operation to complete */
167 int gb_operation_wait(struct gb_operation *operation)
168 {
169         int ret;
170
171         ret = wait_for_completion_interruptible(&operation->completion);
172         /* If interrupted, cancel the in-flight buffer */
173         if (ret < 0)
174                 greybus_kill_gbuf(operation->request);
175         return ret;
176
177 }
178
179 /*
180  * This handler is used if no operation response messages are ever
181  * expected for a given protocol.
182  */
183 static void gb_operation_recv_none(struct gb_operation *operation)
184 {
185         /* Nothing to do! */
186 }
187
188 typedef void (*gb_operation_recv_handler)(struct gb_operation *operation);
189 static gb_operation_recv_handler gb_operation_recv_handlers[] = {
190         [GREYBUS_PROTOCOL_CONTROL]      = NULL,
191         [GREYBUS_PROTOCOL_AP]           = NULL,
192         [GREYBUS_PROTOCOL_GPIO]         = NULL,
193         [GREYBUS_PROTOCOL_I2C]          = gb_operation_recv_none,
194         [GREYBUS_PROTOCOL_UART]         = NULL,
195         [GREYBUS_PROTOCOL_HID]          = NULL,
196         [GREYBUS_PROTOCOL_BATTERY]      = gb_operation_recv_none,
197         [GREYBUS_PROTOCOL_LED]          = NULL,
198         [GREYBUS_PROTOCOL_VENDOR]       = NULL,
199 };
200
201 static void gb_operation_request_handle(struct gb_operation *operation)
202 {
203         u8 protocol_id = operation->connection->protocol_id;
204
205         /* Subtract one from array size to stay within u8 range */
206         if (protocol_id <= (u8)(ARRAY_SIZE(gb_operation_recv_handlers) - 1)) {
207                 gb_operation_recv_handler handler;
208
209                 handler = gb_operation_recv_handlers[protocol_id];
210                 if (handler) {
211                         handler(operation);     /* Handle the request */
212                         return;
213                 }
214         }
215
216         gb_connection_err(operation->connection,
217                 "unrecognized protocol id %hhu\n", protocol_id);
218         operation->result = GB_OP_PROTOCOL_BAD;
219         gb_operation_complete(operation);
220 }
221
222 /*
223  * Either this operation contains an incoming request, or its
224  * response has arrived.  An incoming request will have a null
225  * response buffer pointer (it is the responsibility of the request
226  * handler to allocate and fill in the response buffer).
227  */
228 static void gb_operation_recv_work(struct work_struct *recv_work)
229 {
230         struct gb_operation *operation;
231         bool incoming_request;
232
233         operation = container_of(recv_work, struct gb_operation, recv_work);
234         incoming_request = operation->response == NULL;
235         if (incoming_request)
236                 gb_operation_request_handle(operation);
237         gb_operation_complete(operation);
238
239         /* We're finished with the buffer we read into */
240         if (incoming_request)
241                 greybus_gbuf_finished(operation->request);
242         else
243                 greybus_gbuf_finished(operation->response);
244 }
245
246 /*
247  * Timeout call for the operation.
248  *
249  * If this fires, something went wrong, so mark the result as timed out, and
250  * run the completion handler, which (hopefully) should clean up the operation
251  * properly.
252  */
253 static void operation_timeout(struct work_struct *work)
254 {
255         struct gb_operation *operation;
256
257         operation = container_of(work, struct gb_operation, timeout_work.work);
258         printk("timeout!\n");
259
260         operation->result = GB_OP_TIMEOUT;
261         gb_operation_complete(operation);
262 }
263
264 /*
265  * Buffer completion function.  We get notified whenever any buffer
266  * completes.  For outbound messages, this tells us that the message
267  * has been sent.  For inbound messages, it means the data has
268  * landed in the buffer and is ready to be processed.
269  *
270  * Either way, we don't do anything.  We don't really care when an
271  * outbound message has been sent, and for incoming messages we
272  * we'll be done with everything we need to do before we mark it
273  * finished.
274  *
275  * XXX We may want to record that a request is (or is no longer) in flight.
276  */
277 static void gb_operation_gbuf_complete(struct gbuf *gbuf)
278 {
279         if (gbuf->status) {
280                 struct gb_operation *operation = gbuf->context;
281                 struct gb_operation_msg_hdr *header;
282                 int id;
283                 int type;
284
285                 if (gbuf == operation->request)
286                         header = operation->request_payload;
287                 else if (gbuf == operation->response)
288                         header = operation->response_payload;
289                 else
290                         header = NULL;
291
292                 if (header) {
293                         id = le16_to_cpu(header->id);
294                         type = header->type;
295                 } else {
296                         id = -1;
297                         type = -1;
298                 }
299
300                 gb_connection_err(operation->connection,
301                         "operation %d type %d gbuf error %d",
302                         id, type, gbuf->status);
303         }
304         return;
305 }
306
307 /*
308  * Allocate a buffer to be used for an operation request or response
309  * message.  For outgoing messages, both types of message contain a
310  * common header, which is filled in here.  Incoming requests or
311  * responses also contain the same header, but there's no need to
312  * initialize it here (it'll be overwritten by the incoming
313  * message).
314  */
315 static struct gbuf *gb_operation_gbuf_create(struct gb_operation *operation,
316                                              u8 type, size_t size,
317                                              bool data_out)
318 {
319         struct gb_connection *connection = operation->connection;
320         struct gb_operation_msg_hdr *header;
321         struct gbuf *gbuf;
322         gfp_t gfp_flags = data_out ? GFP_KERNEL : GFP_ATOMIC;
323
324         size += sizeof(*header);
325         gbuf = greybus_alloc_gbuf(connection, gb_operation_gbuf_complete,
326                                         size, data_out, gfp_flags, operation);
327         if (!gbuf)
328                 return NULL;
329
330         /* Fill in the header structure */
331         header = (struct gb_operation_msg_hdr *)gbuf->transfer_buffer;
332         header->size = cpu_to_le16(size);
333         header->id = 0;         /* Filled in when submitted */
334         header->type = type;
335
336         return gbuf;
337 }
338
339 /*
340  * Create a Greybus operation to be sent over the given connection.
341  * The request buffer will big enough for a payload of the given
342  * size.  Outgoing requests must specify the size of the response
343  * buffer size, which must be sufficient to hold all expected
344  * response data.
345  *
346  * Incoming requests will supply a response size of 0, and in that
347  * case no response buffer is allocated.  (A response always
348  * includes a status byte, so 0 is not a valid size.)  Whatever
349  * handles the operation request is responsible for allocating the
350  * response buffer.
351  *
352  * Returns a pointer to the new operation or a null pointer if an
353  * error occurs.
354  */
355 struct gb_operation *gb_operation_create(struct gb_connection *connection,
356                                         u8 type, size_t request_size,
357                                         size_t response_size)
358 {
359         struct gb_operation *operation;
360         gfp_t gfp_flags = response_size ? GFP_KERNEL : GFP_ATOMIC;
361         bool outgoing = response_size != 0;
362
363         operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
364         if (!operation)
365                 return NULL;
366         operation->connection = connection;
367
368         operation->request = gb_operation_gbuf_create(operation, type,
369                                                         request_size,
370                                                         outgoing);
371         if (!operation->request)
372                 goto err_cache;
373         operation->request_payload = operation->request->transfer_buffer +
374                                         sizeof(struct gb_operation_msg_hdr);
375         /* We always use the full request buffer */
376         operation->request->actual_length = request_size;
377
378         if (outgoing) {
379                 type |= GB_OPERATION_TYPE_RESPONSE;
380                 operation->response = gb_operation_gbuf_create(operation,
381                                                 type, response_size,
382                                                 false);
383                 if (!operation->response)
384                         goto err_request;
385                 operation->response_payload =
386                                 operation->response->transfer_buffer +
387                                 sizeof(struct gb_operation_msg_hdr);
388         }
389
390         INIT_WORK(&operation->recv_work, gb_operation_recv_work);
391         operation->callback = NULL;     /* set at submit time */
392         init_completion(&operation->completion);
393         INIT_DELAYED_WORK(&operation->timeout_work, operation_timeout);
394
395         spin_lock_irq(&gb_operations_lock);
396         list_add_tail(&operation->links, &connection->operations);
397         spin_unlock_irq(&gb_operations_lock);
398
399         return operation;
400
401 err_request:
402         greybus_free_gbuf(operation->request);
403 err_cache:
404         kmem_cache_free(gb_operation_cache, operation);
405
406         return NULL;
407 }
408
409 /*
410  * Destroy a previously created operation.
411  */
412 void gb_operation_destroy(struct gb_operation *operation)
413 {
414         if (WARN_ON(!operation))
415                 return;
416
417         /* XXX Make sure it's not in flight */
418         spin_lock_irq(&gb_operations_lock);
419         list_del(&operation->links);
420         spin_unlock_irq(&gb_operations_lock);
421
422         greybus_free_gbuf(operation->response);
423         greybus_free_gbuf(operation->request);
424
425         kmem_cache_free(gb_operation_cache, operation);
426 }
427
428 /*
429  * Send an operation request message.  The caller has filled in
430  * any payload so the request message is ready to go.  If non-null,
431  * the callback function supplied will be called when the response
432  * message has arrived indicating the operation is complete.  A null
433  * callback function is used for a synchronous request; return from
434  * this function won't occur until the operation is complete (or an
435  * interrupt occurs).
436  */
437 int gb_operation_request_send(struct gb_operation *operation,
438                                 gb_operation_callback callback)
439 {
440         int ret;
441
442         if (operation->connection->state != GB_CONNECTION_STATE_ENABLED)
443                 return -ENOTCONN;
444
445         /*
446          * XXX
447          * I think the order of operations is going to be
448          * significant, and if so, we may need a mutex to surround
449          * setting the operation id and submitting the gbuf.
450          */
451         operation->callback = callback;
452         gb_operation_insert(operation);
453         ret = greybus_submit_gbuf(operation->request, GFP_KERNEL);
454         if (ret)
455                 return ret;
456         if (!callback)
457                 ret = gb_operation_wait(operation);
458
459         return ret;
460 }
461
462 /*
463  * Send a response for an incoming operation request.
464  */
465 int gb_operation_response_send(struct gb_operation *operation)
466 {
467         /* XXX
468          * Caller needs to have set operation->response->actual_length
469          */
470         gb_operation_remove(operation);
471         gb_operation_destroy(operation);
472
473         return 0;
474 }
475
476 /*
477  * Handle data arriving on a connection.  This is called in
478  * interrupt context, so just copy the incoming data into a buffer
479  * and do remaining handling via a work queue.
480  */
481 void gb_connection_operation_recv(struct gb_connection *connection,
482                                 void *data, size_t size)
483 {
484         struct gb_operation_msg_hdr *header;
485         struct gb_operation *operation;
486         struct gbuf *gbuf;
487         u16 msg_size;
488
489         if (connection->state != GB_CONNECTION_STATE_ENABLED)
490                 return;
491
492         if (size > GB_OPERATION_MESSAGE_SIZE_MAX) {
493                 gb_connection_err(connection, "message too big");
494                 return;
495         }
496
497         header = data;
498         msg_size = le16_to_cpu(header->size);
499         if (header->type & GB_OPERATION_TYPE_RESPONSE) {
500                 u16 id = le16_to_cpu(header->id);
501
502                 operation = gb_operation_find(connection, id);
503                 if (!operation) {
504                         gb_connection_err(connection, "operation not found");
505                         return;
506                 }
507                 gb_operation_remove(operation);
508                 gbuf = operation->response;
509                 gbuf->status = GB_OP_SUCCESS;   /* If we got here we're good */
510                 if (size > gbuf->transfer_buffer_length) {
511                         gb_connection_err(connection, "recv buffer too small");
512                         return;
513                 }
514         } else {
515                 WARN_ON(msg_size != size);
516                 operation = gb_operation_create(connection, header->type,
517                                                         msg_size, 0);
518                 if (!operation) {
519                         gb_connection_err(connection, "can't create operation");
520                         return;
521                 }
522                 gbuf = operation->request;
523         }
524
525         memcpy(gbuf->transfer_buffer, data, msg_size);
526         gbuf->actual_length = msg_size;
527
528         /* The rest will be handled in work queue context */
529         queue_work(gb_operation_recv_workqueue, &operation->recv_work);
530 }
531
532 /*
533  * Cancel an operation.
534  */
535 void gb_operation_cancel(struct gb_operation *operation)
536 {
537         operation->canceled = true;
538         greybus_kill_gbuf(operation->request);
539         if (operation->response)
540                 greybus_kill_gbuf(operation->response);
541 }
542
543 int gb_operation_init(void)
544 {
545         gb_operation_cache = kmem_cache_create("gb_operation_cache",
546                                 sizeof(struct gb_operation), 0, 0, NULL);
547         if (!gb_operation_cache)
548                 return -ENOMEM;
549
550         gb_operation_recv_workqueue = alloc_workqueue("greybus_recv", 0, 1);
551         if (!gb_operation_recv_workqueue) {
552                 kmem_cache_destroy(gb_operation_cache);
553                 gb_operation_cache = NULL;
554                 return -ENOMEM;
555         }
556
557         return 0;
558 }
559
560 void gb_operation_exit(void)
561 {
562         kmem_cache_destroy(gb_operation_cache);
563         gb_operation_cache = NULL;
564         destroy_workqueue(gb_operation_recv_workqueue);
565         gb_operation_recv_workqueue = NULL;
566 }