]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/greybus/operation.c
greybus: complete overflow responses
[karo-tx-linux.git] / drivers / staging / greybus / operation.c
1 /*
2  * Greybus operations
3  *
4  * Copyright 2014 Google Inc.
5  *
6  * Released under the GPLv2 only.
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/workqueue.h>
13
14 #include "greybus.h"
15
16 /*
17  * The top bit of the type in an operation message header indicates
18  * whether the message is a request (bit clear) or response (bit set)
19  */
20 #define GB_OPERATION_TYPE_RESPONSE      0x80
21
22 #define OPERATION_TIMEOUT_DEFAULT       1000    /* milliseconds */
23
24 /*
25  * XXX This needs to be coordinated with host driver parameters
26  * XXX May need to reduce to allow for message header within a page
27  */
28 #define GB_OPERATION_MESSAGE_SIZE_MAX   4096
29
30 static struct kmem_cache *gb_operation_cache;
31
32 /* Workqueue to handle Greybus operation completions. */
33 static struct workqueue_struct *gb_operation_recv_workqueue;
34
35 /*
36  * All operation messages (both requests and responses) begin with
37  * a header that encodes the size of the data (header included).
38  * This header also contains a unique identifier, which is used to
39  * keep track of in-flight operations.  The header contains an
40  * operation type field, whose interpretation is dependent on what
41  * type of protocol is used over the connection.
42  *
43  * The high bit (0x80) of the operation type field is used to
44  * indicate whether the message is a request (clear) or a response
45  * (set).
46  *
47  * Response messages include an additional status byte, which
48  * communicates the result of the corresponding request.  A zero
49  * status value means the operation completed successfully.  Any
50  * other value indicates an error; in this case, the payload of the
51  * response message (if any) is ignored.  The status byte must be
52  * zero in the header for a request message.
53  *
54  * The wire format for all numeric fields in the header is little
55  * endian.  Any operation-specific data begins immediately after the
56  * header, and is 64-bit aligned.
57  */
58 struct gb_operation_msg_hdr {
59         __le16  size;           /* Size in bytes of header + payload */
60         __le16  operation_id;   /* Operation unique id */
61         __u8    type;           /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
62         __u8    result;         /* Result of request (in responses only) */
63         /* 2 bytes pad, must be zero (ignore when read) */
64 } __aligned(sizeof(u64));
65
66 /* XXX Could be per-host device, per-module, or even per-connection */
67 static DEFINE_SPINLOCK(gb_operations_lock);
68
69 static void gb_pending_operation_insert(struct gb_operation *operation)
70 {
71         struct gb_connection *connection = operation->connection;
72         struct gb_operation_msg_hdr *header;
73
74         /*
75          * Assign the operation's id and move it into its
76          * connection's pending list.
77          */
78         spin_lock_irq(&gb_operations_lock);
79         operation->id = ++connection->op_cycle;
80         list_move_tail(&operation->links, &connection->pending);
81         spin_unlock_irq(&gb_operations_lock);
82
83         /* Store the operation id in the request header */
84         header = operation->request.buffer;
85         header->operation_id = cpu_to_le16(operation->id);
86 }
87
88 static void gb_pending_operation_remove(struct gb_operation *operation)
89 {
90         struct gb_connection *connection = operation->connection;
91
92         /* Take us off of the list of pending operations */
93         spin_lock_irq(&gb_operations_lock);
94         list_move_tail(&operation->links, &connection->operations);
95         spin_unlock_irq(&gb_operations_lock);
96 }
97
98 static struct gb_operation *
99 gb_pending_operation_find(struct gb_connection *connection, u16 operation_id)
100 {
101         struct gb_operation *operation;
102         bool found = false;
103
104         spin_lock_irq(&gb_operations_lock);
105         list_for_each_entry(operation, &connection->pending, links)
106                 if (operation->id == operation_id) {
107                         found = true;
108                         break;
109                 }
110         spin_unlock_irq(&gb_operations_lock);
111
112         return found ? operation : NULL;
113 }
114
115 static int gb_message_send(struct gb_message *message, gfp_t gfp_mask)
116 {
117         struct gb_connection *connection = message->operation->connection;
118         u16 dest_cport_id = connection->interface_cport_id;
119         int ret = 0;
120
121         message->cookie = connection->hd->driver->buffer_send(connection->hd,
122                                         dest_cport_id,
123                                         message->buffer,
124                                         message->buffer_size,
125                                         gfp_mask);
126         if (IS_ERR(message->cookie)) {
127                 ret = PTR_ERR(message->cookie);
128                 message->cookie = NULL;
129         }
130         return ret;
131 }
132
133 /*
134  * Cancel a message whose buffer we have passed to the host device
135  * layer to be sent.
136  */
137 static void gb_message_cancel(struct gb_message *message)
138 {
139         struct greybus_host_device *hd;
140
141         if (!message->cookie)
142                 return; /* Don't bother if the message isn't in flight */
143
144         hd = message->operation->connection->hd;
145         hd->driver->buffer_cancel(message->cookie);
146 }
147
148 /*
149  * An operations's response message has arrived.  If no callback was
150  * supplied it was submitted for asynchronous completion, so we notify
151  * any waiters.  Otherwise we assume calling the completion is enough
152  * and nobody else will be waiting.
153  */
154 static void gb_operation_complete(struct gb_operation *operation)
155 {
156         if (operation->callback)
157                 operation->callback(operation);
158         else
159                 complete_all(&operation->completion);
160 }
161
162 /* Wait for a submitted operation to complete */
163 int gb_operation_wait(struct gb_operation *operation)
164 {
165         int ret;
166
167         ret = wait_for_completion_interruptible(&operation->completion);
168         /* If interrupted, cancel the in-flight buffer */
169         if (ret < 0)
170                 gb_message_cancel(&operation->request);
171         return ret;
172
173 }
174
175 static void gb_operation_request_handle(struct gb_operation *operation)
176 {
177         struct gb_protocol *protocol = operation->connection->protocol;
178         struct gb_operation_msg_hdr *header;
179
180         header = operation->request.buffer;
181
182         /*
183          * If the protocol has no incoming request handler, report
184          * an error and mark the request bad.
185          */
186         if (protocol->request_recv) {
187                 protocol->request_recv(header->type, operation);
188                 return;
189         }
190
191         gb_connection_err(operation->connection,
192                 "unexpected incoming request type 0x%02hhx\n", header->type);
193         operation->result = GB_OP_PROTOCOL_BAD;
194 }
195
196 /*
197  * Either this operation contains an incoming request, or its
198  * response has arrived.  An incoming request will have a null
199  * response buffer pointer (it is the responsibility of the request
200  * handler to allocate and fill in the response buffer).
201  */
202 static void gb_operation_recv_work(struct work_struct *recv_work)
203 {
204         struct gb_operation *operation;
205         bool incoming_request;
206
207         operation = container_of(recv_work, struct gb_operation, recv_work);
208         incoming_request = operation->response.buffer == NULL;
209         if (incoming_request)
210                 gb_operation_request_handle(operation);
211         gb_operation_complete(operation);
212 }
213
214 /*
215  * Timeout call for the operation.
216  *
217  * If this fires, something went wrong, so mark the result as timed out, and
218  * run the completion handler, which (hopefully) should clean up the operation
219  * properly.
220  */
221 static void operation_timeout(struct work_struct *work)
222 {
223         struct gb_operation *operation;
224
225         operation = container_of(work, struct gb_operation, timeout_work.work);
226         pr_debug("%s: timeout!\n", __func__);
227
228         operation->result = GB_OP_TIMEOUT;
229         gb_operation_complete(operation);
230 }
231
232 /*
233  * Allocate a buffer to be used for an operation request or response
234  * message.  For outgoing messages, both types of message contain a
235  * common header, which is filled in here.  Incoming requests or
236  * responses also contain the same header, but there's no need to
237  * initialize it here (it'll be overwritten by the incoming
238  * message).
239  */
240 static int gb_operation_message_init(struct gb_operation *operation,
241                                         u8 type, size_t size,
242                                         bool request, gfp_t gfp_flags)
243 {
244         struct gb_connection *connection = operation->connection;
245         struct greybus_host_device *hd = connection->hd;
246         struct gb_message *message;
247         struct gb_operation_msg_hdr *header;
248
249         if (size > GB_OPERATION_MESSAGE_SIZE_MAX)
250                 return -E2BIG;
251         size += sizeof(*header);
252
253         if (request) {
254                 message = &operation->request;
255         } else {
256                 message = &operation->response;
257                 type |= GB_OPERATION_TYPE_RESPONSE;
258         }
259
260         message->buffer = hd->driver->buffer_alloc(size, gfp_flags);
261         if (!message->buffer)
262                 return -ENOMEM;
263         message->buffer_size = size;
264
265         /* Fill in the header structure */
266         header = message->buffer;
267         header->size = cpu_to_le16(size);
268         header->operation_id = 0;       /* Filled in when submitted */
269         header->type = type;
270
271         message->payload = header + 1;
272         message->operation = operation;
273
274         return 0;
275 }
276
277 static void gb_operation_message_exit(struct gb_message *message)
278 {
279         struct greybus_host_device *hd;
280
281         hd = message->operation->connection->hd;
282         hd->driver->buffer_free(message->buffer);
283
284         message->operation = NULL;
285         message->payload = NULL;
286         message->buffer = NULL;
287         message->buffer_size = 0;
288 }
289
290 /*
291  * Map an enum gb_operation_status value (which is represted in a
292  * message as a single back a single byte) to an appropriate Linux
293  * negative errno.
294  */
295 int gb_operation_status_map(u8 status)
296 {
297         switch (status) {
298         case GB_OP_SUCCESS:
299                 return 0;
300         case GB_OP_INVALID:
301                 return -EINVAL;
302         case GB_OP_NO_MEMORY:
303                 return -ENOMEM;
304         case GB_OP_INTERRUPTED:
305                 return -EINTR;
306         case GB_OP_RETRY:
307                 return -EAGAIN;
308         case GB_OP_PROTOCOL_BAD:
309                 return -EPROTONOSUPPORT;
310         case GB_OP_OVERFLOW:
311                 return -E2BIG;
312         case GB_OP_TIMEOUT:
313                 return -ETIMEDOUT;
314         default:
315                 return -EIO;
316         }
317 }
318
319 /*
320  * Create a Greybus operation to be sent over the given connection.
321  * The request buffer will big enough for a payload of the given
322  * size.  Outgoing requests must specify the size of the response
323  * buffer size, which must be sufficient to hold all expected
324  * response data.
325  *
326  * Incoming requests will supply a response size of 0, and in that
327  * case no response buffer is allocated.  (A response always
328  * includes a status byte, so 0 is not a valid size.)  Whatever
329  * handles the operation request is responsible for allocating the
330  * response buffer.
331  *
332  * Returns a pointer to the new operation or a null pointer if an
333  * error occurs.
334  */
335 static struct gb_operation *
336 gb_operation_create_common(struct gb_connection *connection, bool outgoing,
337                                 u8 type, size_t request_size,
338                                 size_t response_size)
339 {
340         struct gb_operation *operation;
341         gfp_t gfp_flags = response_size ? GFP_KERNEL : GFP_ATOMIC;
342         int ret;
343
344         operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
345         if (!operation)
346                 return NULL;
347         operation->connection = connection;
348
349         ret = gb_operation_message_init(operation, type, request_size,
350                                                 true, gfp_flags);
351         if (ret)
352                 goto err_cache;
353
354         if (outgoing) {
355                 ret = gb_operation_message_init(operation, type, response_size,
356                                                 false, GFP_KERNEL);
357                 if (ret)
358                         goto err_request;
359         }
360
361         INIT_WORK(&operation->recv_work, gb_operation_recv_work);
362         operation->callback = NULL;     /* set at submit time */
363         init_completion(&operation->completion);
364         INIT_DELAYED_WORK(&operation->timeout_work, operation_timeout);
365         kref_init(&operation->kref);
366
367         spin_lock_irq(&gb_operations_lock);
368         list_add_tail(&operation->links, &connection->operations);
369         spin_unlock_irq(&gb_operations_lock);
370
371         return operation;
372
373 err_request:
374         gb_operation_message_exit(&operation->request);
375 err_cache:
376         kmem_cache_free(gb_operation_cache, operation);
377
378         return NULL;
379 }
380
381 struct gb_operation *gb_operation_create(struct gb_connection *connection,
382                                         u8 type, size_t request_size,
383                                         size_t response_size)
384 {
385         return gb_operation_create_common(connection, true, type,
386                                         request_size, response_size);
387 }
388
389 static struct gb_operation *
390 gb_operation_create_incoming(struct gb_connection *connection,
391                                         u8 type, size_t request_size,
392                                         size_t response_size)
393 {
394         return gb_operation_create_common(connection, false, type,
395                                         request_size, response_size);
396 }
397
398 /*
399  * Destroy a previously created operation.
400  */
401 static void _gb_operation_destroy(struct kref *kref)
402 {
403         struct gb_operation *operation;
404
405         operation = container_of(kref, struct gb_operation, kref);
406
407         /* XXX Make sure it's not in flight */
408         spin_lock_irq(&gb_operations_lock);
409         list_del(&operation->links);
410         spin_unlock_irq(&gb_operations_lock);
411
412         gb_operation_message_exit(&operation->response);
413         gb_operation_message_exit(&operation->request);
414
415         kmem_cache_free(gb_operation_cache, operation);
416 }
417
418 void gb_operation_put(struct gb_operation *operation)
419 {
420         if (!WARN_ON(!operation))
421                 kref_put(&operation->kref, _gb_operation_destroy);
422 }
423
424 /*
425  * Send an operation request message.  The caller has filled in
426  * any payload so the request message is ready to go.  If non-null,
427  * the callback function supplied will be called when the response
428  * message has arrived indicating the operation is complete.  A null
429  * callback function is used for a synchronous request; return from
430  * this function won't occur until the operation is complete (or an
431  * interrupt occurs).
432  */
433 int gb_operation_request_send(struct gb_operation *operation,
434                                 gb_operation_callback callback)
435 {
436         unsigned long timeout;
437         int ret;
438
439         if (operation->connection->state != GB_CONNECTION_STATE_ENABLED)
440                 return -ENOTCONN;
441
442         /*
443          * XXX
444          * I think the order of operations is going to be
445          * significant, and if so, we may need a mutex to surround
446          * setting the operation id and submitting the buffer.
447          */
448         operation->callback = callback;
449         gb_pending_operation_insert(operation);
450
451         /*
452          * We impose a time limit for requests to complete.  We need
453          * to set the timer before we send the request though, so we
454          * don't lose a race with the receipt of the resposne.
455          */
456         timeout = msecs_to_jiffies(OPERATION_TIMEOUT_DEFAULT);
457         schedule_delayed_work(&operation->timeout_work, timeout);
458
459         /* All set, send the request */
460         ret = gb_message_send(&operation->request, GFP_KERNEL);
461         if (ret)
462                 return ret;
463
464         if (!callback)
465                 ret = gb_operation_wait(operation);
466
467         return ret;
468 }
469
470 /*
471  * Send a response for an incoming operation request.
472  */
473 int gb_operation_response_send(struct gb_operation *operation)
474 {
475         gb_operation_destroy(operation);
476
477         return 0;
478 }
479
480 /*
481  * We've received data on a connection, and it doesn't look like a
482  * response, so we assume it's a request.
483  *
484  * This is called in interrupt context, so just copy the incoming
485  * data into the request buffer and handle the rest via workqueue.
486  */
487 void gb_connection_recv_request(struct gb_connection *connection,
488         u16 operation_id, u8 type, void *data, size_t size)
489 {
490         struct gb_operation *operation;
491
492         operation = gb_operation_create_incoming(connection, type, size, 0);
493         if (!operation) {
494                 gb_connection_err(connection, "can't create operation");
495                 return;         /* XXX Respond with pre-allocated ENOMEM */
496         }
497         operation->id = operation_id;
498         memcpy(operation->request.buffer, data, size);
499
500         /* The rest will be handled in work queue context */
501         queue_work(gb_operation_recv_workqueue, &operation->recv_work);
502 }
503
504 /*
505  * We've received data that appears to be an operation response
506  * message.  Look up the operation, and record that we've received
507  * its repsonse.
508  *
509  * This is called in interrupt context, so just copy the incoming
510  * data into the response buffer and handle the rest via workqueue.
511  */
512 static void gb_connection_recv_response(struct gb_connection *connection,
513                                 u16 operation_id, void *data, size_t size)
514 {
515         struct gb_operation *operation;
516         struct gb_message *message;
517         struct gb_operation_msg_hdr *header;
518
519         operation = gb_pending_operation_find(connection, operation_id);
520         if (!operation) {
521                 gb_connection_err(connection, "operation not found");
522                 return;
523         }
524
525         cancel_delayed_work(&operation->timeout_work);
526         gb_pending_operation_remove(operation);
527
528         message = &operation->response;
529         if (size <= message->buffer_size) {
530                 /* Transfer the operation result from the response header */
531                 header = message->buffer;
532                 operation->result = header->result;
533         } else {
534                 gb_connection_err(connection, "recv buffer too small");
535                 operation->result = GB_OP_OVERFLOW;
536         }
537
538         /* We must ignore the payload if a bad status is returned */
539         if (operation->result == GB_OP_SUCCESS)
540                 memcpy(message->buffer, data, size);
541
542         /* The rest will be handled in work queue context */
543         queue_work(gb_operation_recv_workqueue, &operation->recv_work);
544 }
545
546 /*
547  * Handle data arriving on a connection.  As soon as we return the
548  * supplied data buffer will be reused (so unless we do something
549  * with, it's effectively dropped).
550  */
551 void gb_connection_recv(struct gb_connection *connection,
552                                 void *data, size_t size)
553 {
554         struct gb_operation_msg_hdr *header;
555         size_t msg_size;
556         u16 operation_id;
557
558         if (connection->state != GB_CONNECTION_STATE_ENABLED) {
559                 gb_connection_err(connection, "dropping %zu received bytes",
560                         size);
561                 return;
562         }
563
564         if (size < sizeof(*header)) {
565                 gb_connection_err(connection, "message too small");
566                 return;
567         }
568
569         header = data;
570         msg_size = (size_t)le16_to_cpu(header->size);
571         if (msg_size > size) {
572                 gb_connection_err(connection, "incomplete message");
573                 return;         /* XXX Should still complete operation */
574         }
575
576         operation_id = le16_to_cpu(header->operation_id);
577         if (header->type & GB_OPERATION_TYPE_RESPONSE)
578                 gb_connection_recv_response(connection, operation_id,
579                                                 data, msg_size);
580         else
581                 gb_connection_recv_request(connection, operation_id,
582                                                 header->type, data, msg_size);
583 }
584
585 /*
586  * Cancel an operation.
587  */
588 void gb_operation_cancel(struct gb_operation *operation)
589 {
590         operation->canceled = true;
591         gb_message_cancel(&operation->request);
592         if (operation->response.buffer)
593                 gb_message_cancel(&operation->response);
594 }
595
596 int gb_operation_init(void)
597 {
598         gb_operation_cache = kmem_cache_create("gb_operation_cache",
599                                 sizeof(struct gb_operation), 0, 0, NULL);
600         if (!gb_operation_cache)
601                 return -ENOMEM;
602
603         gb_operation_recv_workqueue = alloc_workqueue("greybus_recv", 0, 1);
604         if (!gb_operation_recv_workqueue) {
605                 kmem_cache_destroy(gb_operation_cache);
606                 gb_operation_cache = NULL;
607                 return -ENOMEM;
608         }
609
610         return 0;
611 }
612
613 void gb_operation_exit(void)
614 {
615         destroy_workqueue(gb_operation_recv_workqueue);
616         gb_operation_recv_workqueue = NULL;
617         kmem_cache_destroy(gb_operation_cache);
618         gb_operation_cache = NULL;
619 }