]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/greybus/operation.c
greybus: get rid of gbuf->actual_length
[karo-tx-linux.git] / drivers / staging / greybus / operation.c
1 /*
2  * Greybus operations
3  *
4  * Copyright 2014 Google Inc.
5  *
6  * Released under the GPLv2 only.
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/workqueue.h>
13
14 #include "greybus.h"
15
16 /*
17  * The top bit of the type in an operation message header indicates
18  * whether the message is a request (bit clear) or response (bit set)
19  */
20 #define GB_OPERATION_TYPE_RESPONSE      0x80
21
22 #define OPERATION_TIMEOUT_DEFAULT       1000    /* milliseconds */
23
24 /*
25  * XXX This needs to be coordinated with host driver parameters
26  */
27 #define GB_OPERATION_MESSAGE_SIZE_MAX   4096
28
29 static struct kmem_cache *gb_operation_cache;
30
31 /* Workqueue to handle Greybus operation completions. */
32 static struct workqueue_struct *gb_operation_recv_workqueue;
33
34 /*
35  * All operation messages (both requests and responses) begin with
36  * a common header that encodes the size of the data (header
37  * included).  This header also contains a unique identifier, which
38  * is used to keep track of in-flight operations.  Finally, the
39  * header contains a operation type field, whose interpretation is
40  * dependent on what type of device lies on the other end of the
41  * connection.  Response messages are distinguished from request
42  * messages by setting the high bit (0x80) in the operation type
43  * value.
44  *
45  * The wire format for all numeric fields in the header is little
46  * endian.  Any operation-specific data begins immediately after the
47  * header, and is 64-bit aligned.
48  */
49 struct gb_operation_msg_hdr {
50         __le16  size;   /* Size in bytes of header + payload */
51         __le16  id;     /* Operation unique id */
52         __u8    type;   /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
53         /* 3 bytes pad, must be zero (ignore when read) */
54 } __aligned(sizeof(u64));
55
56 /* XXX Could be per-host device, per-module, or even per-connection */
57 static DEFINE_SPINLOCK(gb_operations_lock);
58
59 static void gb_operation_insert(struct gb_operation *operation)
60 {
61         struct gb_connection *connection = operation->connection;
62         struct rb_root *root = &connection->pending;
63         struct rb_node *node = &operation->node;
64         struct rb_node **link = &root->rb_node;
65         struct rb_node *above = NULL;
66         struct gb_operation_msg_hdr *header;
67         unsigned long timeout;
68         bool start_timer;
69         __le16 wire_id;
70
71         /*
72          * Assign the operation's id, and store it in the header of
73          * both request and response message headers.
74          */
75         operation->id = gb_connection_operation_id(connection);
76         wire_id = cpu_to_le16(operation->id);
77         header = operation->request->transfer_buffer;
78         header->id = wire_id;
79
80         /* OK, insert the operation into its connection's tree */
81         spin_lock_irq(&gb_operations_lock);
82
83         /*
84          * We impose a time limit for requests to complete.  If
85          * there are no requests pending there is no need for a
86          * timer.  So if this will be the only one in flight we'll
87          * need to start the timer.  Otherwise we just update the
88          * existing one to give this request a full timeout period
89          * to complete.
90          */
91         start_timer = RB_EMPTY_ROOT(root);
92
93         while (*link) {
94                 struct gb_operation *other;
95
96                 above = *link;
97                 other = rb_entry(above, struct gb_operation, node);
98                 header = other->request->transfer_buffer;
99                 if (other->id > operation->id)
100                         link = &above->rb_left;
101                 else if (other->id < operation->id)
102                         link = &above->rb_right;
103         }
104         rb_link_node(node, above, link);
105         rb_insert_color(node, root);
106         spin_unlock_irq(&gb_operations_lock);
107
108         timeout = msecs_to_jiffies(OPERATION_TIMEOUT_DEFAULT);
109         if (start_timer)
110                 schedule_delayed_work(&operation->timeout_work, timeout);
111         else
112                 mod_delayed_work(system_wq, &operation->timeout_work, timeout);
113 }
114
115 static void gb_operation_remove(struct gb_operation *operation)
116 {
117         struct gb_connection *connection = operation->connection;
118
119         /* Shut down our timeout timer */
120         cancel_delayed_work(&operation->timeout_work);
121
122         /* Take us off of the list of pending operations */
123         spin_lock_irq(&gb_operations_lock);
124         rb_erase(&operation->node, &connection->pending);
125         spin_unlock_irq(&gb_operations_lock);
126
127 }
128
129 static struct gb_operation *
130 gb_operation_find(struct gb_connection *connection, u16 id)
131 {
132         struct gb_operation *operation = NULL;
133         struct rb_node *node;
134         bool found = false;
135
136         spin_lock_irq(&gb_operations_lock);
137         node = connection->pending.rb_node;
138         while (node && !found) {
139                 operation = rb_entry(node, struct gb_operation, node);
140                 if (operation->id > id)
141                         node = node->rb_left;
142                 else if (operation->id < id)
143                         node = node->rb_right;
144                 else
145                         found = true;
146         }
147         spin_unlock_irq(&gb_operations_lock);
148
149         return found ? operation : NULL;
150 }
151
152 /*
153  * An operations's response message has arrived.  If no callback was
154  * supplied it was submitted for asynchronous completion, so we notify
155  * any waiters.  Otherwise we assume calling the completion is enough
156  * and nobody else will be waiting.
157  */
158 static void gb_operation_complete(struct gb_operation *operation)
159 {
160         if (operation->callback)
161                 operation->callback(operation);
162         else
163                 complete_all(&operation->completion);
164 }
165
166 /* Wait for a submitted operation to complete */
167 int gb_operation_wait(struct gb_operation *operation)
168 {
169         int ret;
170
171         ret = wait_for_completion_interruptible(&operation->completion);
172         /* If interrupted, cancel the in-flight buffer */
173         if (ret < 0)
174                 greybus_kill_gbuf(operation->request);
175         return ret;
176
177 }
178
179 static void gb_operation_request_handle(struct gb_operation *operation)
180 {
181         struct gb_protocol *protocol = operation->connection->protocol;
182         struct gb_operation_msg_hdr *header;
183
184         /*
185          * If the protocol has no incoming request handler, report
186          * an error and mark the request bad.
187          */
188         if (protocol->request_recv) {
189                 protocol->request_recv(operation);
190                 goto out;
191         }
192
193         header = operation->request->transfer_buffer;
194         gb_connection_err(operation->connection,
195                 "unexpected incoming request type 0x%02hhx\n", header->type);
196         operation->result = GB_OP_PROTOCOL_BAD;
197 out:
198         gb_operation_complete(operation);
199 }
200
201 /*
202  * Either this operation contains an incoming request, or its
203  * response has arrived.  An incoming request will have a null
204  * response buffer pointer (it is the responsibility of the request
205  * handler to allocate and fill in the response buffer).
206  */
207 static void gb_operation_recv_work(struct work_struct *recv_work)
208 {
209         struct gb_operation *operation;
210         bool incoming_request;
211
212         operation = container_of(recv_work, struct gb_operation, recv_work);
213         incoming_request = operation->response == NULL;
214         if (incoming_request)
215                 gb_operation_request_handle(operation);
216         gb_operation_complete(operation);
217
218         /* We're finished with the buffer we read into */
219         if (incoming_request)
220                 greybus_gbuf_finished(operation->request);
221         else
222                 greybus_gbuf_finished(operation->response);
223 }
224
225 /*
226  * Timeout call for the operation.
227  *
228  * If this fires, something went wrong, so mark the result as timed out, and
229  * run the completion handler, which (hopefully) should clean up the operation
230  * properly.
231  */
232 static void operation_timeout(struct work_struct *work)
233 {
234         struct gb_operation *operation;
235
236         operation = container_of(work, struct gb_operation, timeout_work.work);
237         printk("timeout!\n");
238
239         operation->result = GB_OP_TIMEOUT;
240         gb_operation_complete(operation);
241 }
242
243 /*
244  * Buffer completion function.  We get notified whenever any buffer
245  * completes.  For outbound messages, this tells us that the message
246  * has been sent.  For inbound messages, it means the data has
247  * landed in the buffer and is ready to be processed.
248  *
249  * Either way, we don't do anything.  We don't really care when an
250  * outbound message has been sent, and for incoming messages we
251  * we'll be done with everything we need to do before we mark it
252  * finished.
253  *
254  * XXX We may want to record that a request is (or is no longer) in flight.
255  */
256 static void gb_operation_gbuf_complete(struct gbuf *gbuf)
257 {
258         if (gbuf->status) {
259                 struct gb_operation *operation = gbuf->context;
260                 struct gb_operation_msg_hdr *header;
261                 int id;
262                 int type;
263
264                 if (gbuf == operation->request)
265                         header = operation->request->transfer_buffer;
266                 else if (gbuf == operation->response)
267                         header = operation->response->transfer_buffer;
268                 else
269                         header = NULL;
270
271                 if (header) {
272                         id = le16_to_cpu(header->id);
273                         type = header->type;
274                 } else {
275                         id = -1;
276                         type = -1;
277                 }
278
279                 gb_connection_err(operation->connection,
280                         "operation %d type %d gbuf error %d",
281                         id, type, gbuf->status);
282         }
283         return;
284 }
285
286 /*
287  * Allocate a buffer to be used for an operation request or response
288  * message.  For outgoing messages, both types of message contain a
289  * common header, which is filled in here.  Incoming requests or
290  * responses also contain the same header, but there's no need to
291  * initialize it here (it'll be overwritten by the incoming
292  * message).
293  */
294 static struct gbuf *gb_operation_gbuf_create(struct gb_operation *operation,
295                                              u8 type, size_t size,
296                                              bool data_out)
297 {
298         struct gb_connection *connection = operation->connection;
299         struct gb_operation_msg_hdr *header;
300         struct gbuf *gbuf;
301         gfp_t gfp_flags = data_out ? GFP_KERNEL : GFP_ATOMIC;
302
303         size += sizeof(*header);
304         gbuf = greybus_alloc_gbuf(connection, gb_operation_gbuf_complete,
305                                         size, data_out, gfp_flags, operation);
306         if (!gbuf)
307                 return NULL;
308
309         /* Fill in the header structure */
310         header = (struct gb_operation_msg_hdr *)gbuf->transfer_buffer;
311         header->size = cpu_to_le16(size);
312         header->id = 0;         /* Filled in when submitted */
313         header->type = type;
314
315         return gbuf;
316 }
317
318 /*
319  * Create a Greybus operation to be sent over the given connection.
320  * The request buffer will big enough for a payload of the given
321  * size.  Outgoing requests must specify the size of the response
322  * buffer size, which must be sufficient to hold all expected
323  * response data.
324  *
325  * Incoming requests will supply a response size of 0, and in that
326  * case no response buffer is allocated.  (A response always
327  * includes a status byte, so 0 is not a valid size.)  Whatever
328  * handles the operation request is responsible for allocating the
329  * response buffer.
330  *
331  * Returns a pointer to the new operation or a null pointer if an
332  * error occurs.
333  */
334 struct gb_operation *gb_operation_create(struct gb_connection *connection,
335                                         u8 type, size_t request_size,
336                                         size_t response_size)
337 {
338         struct gb_operation *operation;
339         gfp_t gfp_flags = response_size ? GFP_KERNEL : GFP_ATOMIC;
340         bool outgoing = response_size != 0;
341
342         operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
343         if (!operation)
344                 return NULL;
345         operation->connection = connection;
346
347         operation->request = gb_operation_gbuf_create(operation, type,
348                                                         request_size,
349                                                         outgoing);
350         if (!operation->request)
351                 goto err_cache;
352         operation->request_payload = operation->request->transfer_buffer +
353                                         sizeof(struct gb_operation_msg_hdr);
354
355         if (outgoing) {
356                 type |= GB_OPERATION_TYPE_RESPONSE;
357                 operation->response = gb_operation_gbuf_create(operation,
358                                                 type, response_size,
359                                                 false);
360                 if (!operation->response)
361                         goto err_request;
362                 operation->response_payload =
363                                 operation->response->transfer_buffer +
364                                 sizeof(struct gb_operation_msg_hdr);
365         }
366
367         INIT_WORK(&operation->recv_work, gb_operation_recv_work);
368         operation->callback = NULL;     /* set at submit time */
369         init_completion(&operation->completion);
370         INIT_DELAYED_WORK(&operation->timeout_work, operation_timeout);
371
372         spin_lock_irq(&gb_operations_lock);
373         list_add_tail(&operation->links, &connection->operations);
374         spin_unlock_irq(&gb_operations_lock);
375
376         return operation;
377
378 err_request:
379         greybus_free_gbuf(operation->request);
380 err_cache:
381         kmem_cache_free(gb_operation_cache, operation);
382
383         return NULL;
384 }
385
386 /*
387  * Destroy a previously created operation.
388  */
389 void gb_operation_destroy(struct gb_operation *operation)
390 {
391         if (WARN_ON(!operation))
392                 return;
393
394         /* XXX Make sure it's not in flight */
395         spin_lock_irq(&gb_operations_lock);
396         list_del(&operation->links);
397         spin_unlock_irq(&gb_operations_lock);
398
399         greybus_free_gbuf(operation->response);
400         greybus_free_gbuf(operation->request);
401
402         kmem_cache_free(gb_operation_cache, operation);
403 }
404
405 /*
406  * Send an operation request message.  The caller has filled in
407  * any payload so the request message is ready to go.  If non-null,
408  * the callback function supplied will be called when the response
409  * message has arrived indicating the operation is complete.  A null
410  * callback function is used for a synchronous request; return from
411  * this function won't occur until the operation is complete (or an
412  * interrupt occurs).
413  */
414 int gb_operation_request_send(struct gb_operation *operation,
415                                 gb_operation_callback callback)
416 {
417         int ret;
418
419         if (operation->connection->state != GB_CONNECTION_STATE_ENABLED)
420                 return -ENOTCONN;
421
422         /*
423          * XXX
424          * I think the order of operations is going to be
425          * significant, and if so, we may need a mutex to surround
426          * setting the operation id and submitting the gbuf.
427          */
428         operation->callback = callback;
429         gb_operation_insert(operation);
430         ret = greybus_submit_gbuf(operation->request, GFP_KERNEL);
431         if (ret)
432                 return ret;
433         if (!callback)
434                 ret = gb_operation_wait(operation);
435
436         return ret;
437 }
438
439 /*
440  * Send a response for an incoming operation request.
441  */
442 int gb_operation_response_send(struct gb_operation *operation)
443 {
444         gb_operation_remove(operation);
445         gb_operation_destroy(operation);
446
447         return 0;
448 }
449
450 /*
451  * Handle data arriving on a connection.  This is called in
452  * interrupt context, so just copy the incoming data into a buffer
453  * and do remaining handling via a work queue.
454  */
455 void gb_connection_operation_recv(struct gb_connection *connection,
456                                 void *data, size_t size)
457 {
458         struct gb_operation_msg_hdr *header;
459         struct gb_operation *operation;
460         struct gbuf *gbuf;
461         u16 msg_size;
462
463         if (connection->state != GB_CONNECTION_STATE_ENABLED)
464                 return;
465
466         if (size > GB_OPERATION_MESSAGE_SIZE_MAX) {
467                 gb_connection_err(connection, "message too big");
468                 return;
469         }
470
471         header = data;
472         msg_size = le16_to_cpu(header->size);
473         if (header->type & GB_OPERATION_TYPE_RESPONSE) {
474                 u16 id = le16_to_cpu(header->id);
475
476                 operation = gb_operation_find(connection, id);
477                 if (!operation) {
478                         gb_connection_err(connection, "operation not found");
479                         return;
480                 }
481                 gb_operation_remove(operation);
482                 gbuf = operation->response;
483                 gbuf->status = GB_OP_SUCCESS;   /* If we got here we're good */
484                 if (size > gbuf->transfer_buffer_length) {
485                         gb_connection_err(connection, "recv buffer too small");
486                         return;
487                 }
488         } else {
489                 WARN_ON(msg_size != size);
490                 operation = gb_operation_create(connection, header->type,
491                                                         msg_size, 0);
492                 if (!operation) {
493                         gb_connection_err(connection, "can't create operation");
494                         return;
495                 }
496                 gbuf = operation->request;
497         }
498
499         memcpy(gbuf->transfer_buffer, data, msg_size);
500
501         /* The rest will be handled in work queue context */
502         queue_work(gb_operation_recv_workqueue, &operation->recv_work);
503 }
504
505 /*
506  * Cancel an operation.
507  */
508 void gb_operation_cancel(struct gb_operation *operation)
509 {
510         operation->canceled = true;
511         greybus_kill_gbuf(operation->request);
512         if (operation->response)
513                 greybus_kill_gbuf(operation->response);
514 }
515
516 int gb_operation_init(void)
517 {
518         gb_operation_cache = kmem_cache_create("gb_operation_cache",
519                                 sizeof(struct gb_operation), 0, 0, NULL);
520         if (!gb_operation_cache)
521                 return -ENOMEM;
522
523         gb_operation_recv_workqueue = alloc_workqueue("greybus_recv", 0, 1);
524         if (!gb_operation_recv_workqueue) {
525                 kmem_cache_destroy(gb_operation_cache);
526                 gb_operation_cache = NULL;
527                 return -ENOMEM;
528         }
529
530         return 0;
531 }
532
533 void gb_operation_exit(void)
534 {
535         kmem_cache_destroy(gb_operation_cache);
536         gb_operation_cache = NULL;
537         destroy_workqueue(gb_operation_recv_workqueue);
538         gb_operation_recv_workqueue = NULL;
539 }