]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/greybus/operation.c
greybus: Random spell fixes
[karo-tx-linux.git] / drivers / staging / greybus / operation.c
1 /*
2  * Greybus operations
3  *
4  * Copyright 2014 Google Inc.
5  *
6  * Released under the GPLv2 only.
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/workqueue.h>
13
14 #include "greybus.h"
15
16 /*
17  * The top bit of the type in an operation message header indicates
18  * whether the message is a request (bit clear) or response (bit set)
19  */
20 #define GB_OPERATION_TYPE_RESPONSE      0x80
21
22 #define OPERATION_TIMEOUT_DEFAULT       1000    /* milliseconds */
23
24 /*
25  * XXX This needs to be coordinated with host driver parameters
26  * XXX May need to reduce to allow for message header within a page
27  */
28 #define GB_OPERATION_MESSAGE_SIZE_MAX   4096
29
30 static struct kmem_cache *gb_operation_cache;
31
32 /* Workqueue to handle Greybus operation completions. */
33 static struct workqueue_struct *gb_operation_recv_workqueue;
34
35 /*
36  * All operation messages (both requests and responses) begin with
37  * a header that encodes the size of the data (header included).
38  * This header also contains a unique identifier, which is used to
39  * keep track of in-flight operations.  The header contains an
40  * operation type field, whose interpretation is dependent on what
41  * type of protocol is used over the connection.
42  *
43  * The high bit (0x80) of the operation type field is used to
44  * indicate whether the message is a request (clear) or a response
45  * (set).
46  *
47  * Response messages include an additional status byte, which
48  * communicates the result of the corresponding request.  A zero
49  * status value means the operation completed successfully.  Any
50  * other value indicates an error; in this case, the payload of the
51  * response message (if any) is ignored.  The status byte must be
52  * zero in the header for a request message.
53  *
54  * The wire format for all numeric fields in the header is little
55  * endian.  Any operation-specific data begins immediately after the
56  * header, and is 64-bit aligned.
57  */
58 struct gb_operation_msg_hdr {
59         __le16  size;           /* Size in bytes of header + payload */
60         __le16  operation_id;   /* Operation unique id */
61         __u8    type;           /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
62         __u8    result;         /* Result of request (in responses only) */
63         /* 2 bytes pad, must be zero (ignore when read) */
64 } __aligned(sizeof(u64));
65
66 /* XXX Could be per-host device, per-module, or even per-connection */
67 static DEFINE_SPINLOCK(gb_operations_lock);
68
69 static void gb_pending_operation_insert(struct gb_operation *operation)
70 {
71         struct gb_connection *connection = operation->connection;
72         struct gb_operation_msg_hdr *header;
73
74         /*
75          * Assign the operation's id and move it into its
76          * connection's pending list.
77          */
78         spin_lock_irq(&gb_operations_lock);
79         operation->id = ++connection->op_cycle;
80         list_move_tail(&operation->links, &connection->pending);
81         spin_unlock_irq(&gb_operations_lock);
82
83         /* Store the operation id in the request header */
84         header = operation->request->header;
85         header->operation_id = cpu_to_le16(operation->id);
86 }
87
88 static void gb_pending_operation_remove(struct gb_operation *operation)
89 {
90         struct gb_connection *connection = operation->connection;
91
92         /* Take us off of the list of pending operations */
93         spin_lock_irq(&gb_operations_lock);
94         list_move_tail(&operation->links, &connection->operations);
95         spin_unlock_irq(&gb_operations_lock);
96 }
97
98 static struct gb_operation *
99 gb_pending_operation_find(struct gb_connection *connection, u16 operation_id)
100 {
101         struct gb_operation *operation;
102         bool found = false;
103
104         spin_lock_irq(&gb_operations_lock);
105         list_for_each_entry(operation, &connection->pending, links)
106                 if (operation->id == operation_id) {
107                         found = true;
108                         break;
109                 }
110         spin_unlock_irq(&gb_operations_lock);
111
112         return found ? operation : NULL;
113 }
114
115 static int gb_message_send(struct gb_message *message, gfp_t gfp_mask)
116 {
117         struct gb_connection *connection = message->operation->connection;
118         u16 dest_cport_id = connection->interface_cport_id;
119         int ret = 0;
120
121         message->cookie = connection->hd->driver->buffer_send(connection->hd,
122                                         dest_cport_id,
123                                         message->header,
124                                         message->size,
125                                         gfp_mask);
126         if (IS_ERR(message->cookie)) {
127                 ret = PTR_ERR(message->cookie);
128                 message->cookie = NULL;
129         }
130         return ret;
131 }
132
133 /*
134  * Cancel a message whose buffer we have passed to the host device
135  * layer to be sent.
136  */
137 static void gb_message_cancel(struct gb_message *message)
138 {
139         struct greybus_host_device *hd;
140
141         if (!message->cookie)
142                 return; /* Don't bother if the message isn't in flight */
143
144         hd = message->operation->connection->hd;
145         hd->driver->buffer_cancel(message->cookie);
146 }
147
148 /*
149  * An operation's response message has arrived.  If no callback was
150  * supplied it was submitted for asynchronous completion, so we notify
151  * any waiters.  Otherwise we assume calling the completion is enough
152  * and nobody else will be waiting.
153  */
154 static void gb_operation_complete(struct gb_operation *operation)
155 {
156         if (operation->callback)
157                 operation->callback(operation);
158         else
159                 complete_all(&operation->completion);
160 }
161
162 /* Wait for a submitted operation to complete */
163 int gb_operation_wait(struct gb_operation *operation)
164 {
165         int ret;
166
167         ret = wait_for_completion_interruptible(&operation->completion);
168         /* If interrupted, cancel the in-flight buffer */
169         if (ret < 0)
170                 gb_message_cancel(operation->request);
171         return ret;
172 }
173
174 static void gb_operation_request_handle(struct gb_operation *operation)
175 {
176         struct gb_protocol *protocol = operation->connection->protocol;
177         struct gb_operation_msg_hdr *header;
178
179         header = operation->request->header;
180
181         /*
182          * If the protocol has no incoming request handler, report
183          * an error and mark the request bad.
184          */
185         if (protocol->request_recv) {
186                 protocol->request_recv(header->type, operation);
187                 return;
188         }
189
190         gb_connection_err(operation->connection,
191                 "unexpected incoming request type 0x%02hhx\n", header->type);
192         operation->result = GB_OP_PROTOCOL_BAD;
193 }
194
195 /*
196  * Either this operation contains an incoming request, or its
197  * response has arrived.  An incoming request will have a null
198  * response buffer pointer (it is the responsibility of the request
199  * handler to allocate and fill in the response buffer).
200  */
201 static void gb_operation_recv_work(struct work_struct *recv_work)
202 {
203         struct gb_operation *operation;
204         bool incoming_request;
205
206         operation = container_of(recv_work, struct gb_operation, recv_work);
207         incoming_request = operation->response->header == NULL;
208         if (incoming_request)
209                 gb_operation_request_handle(operation);
210         gb_operation_complete(operation);
211 }
212
213 /*
214  * Timeout call for the operation.
215  *
216  * If this fires, something went wrong, so mark the result as timed out, and
217  * run the completion handler, which (hopefully) should clean up the operation
218  * properly.
219  */
220 static void operation_timeout(struct work_struct *work)
221 {
222         struct gb_operation *operation;
223
224         operation = container_of(work, struct gb_operation, timeout_work.work);
225         pr_debug("%s: timeout!\n", __func__);
226
227         operation->result = GB_OP_TIMEOUT;
228         gb_operation_complete(operation);
229 }
230
231 /*
232  * Given a pointer to the header in a message sent on a given host
233  * device, return the associated message structure.  (This "header"
234  * is just the buffer pointer we supply to the host device for
235  * sending.)
236  */
237 static struct gb_message *
238 gb_hd_message_find(struct greybus_host_device *hd, void *header)
239 {
240         struct gb_message *message;
241         u8 *result;
242
243         result = (u8 *)header - hd->buffer_headroom - sizeof(*message);
244         message = (struct gb_message *)result;
245
246         return message;
247 }
248
249 /*
250  * Allocate a message to be used for an operation request or
251  * response.  For outgoing messages, both types of message contain a
252  * common header, which is filled in here.  Incoming requests or
253  * responses also contain the same header, but there's no need to
254  * initialize it here (it'll be overwritten by the incoming
255  * message).
256  *
257  * Our message structure consists of:
258  *      message structure
259  *      headroom
260  *      message header  \_ these combined are
261  *      message payload /  the message size
262  */
263 static struct gb_message *
264 gb_operation_message_alloc(struct greybus_host_device *hd, u8 type,
265                                 size_t payload_size, gfp_t gfp_flags)
266 {
267         struct gb_message *message;
268         struct gb_operation_msg_hdr *header;
269         size_t message_size = payload_size + sizeof(*header);
270         size_t size;
271         u8 *buffer;
272
273         if (message_size > hd->buffer_size_max)
274                 return NULL;
275
276         size = sizeof(*message) + hd->buffer_headroom + message_size;
277         message = kzalloc(size, gfp_flags);
278         if (!message)
279                 return NULL;
280         buffer = &message->buffer[0];
281         header = (struct gb_operation_msg_hdr *)(buffer + hd->buffer_headroom);
282
283         /* Fill in the header structure */
284         header->size = cpu_to_le16(message_size);
285         header->operation_id = 0;       /* Filled in when submitted */
286         header->type = type;
287
288         message->header = header;
289         message->payload = header + 1;
290         message->size = message_size;
291
292         return message;
293 }
294
295 static void gb_operation_message_free(struct gb_message *message)
296 {
297         kfree(message);
298 }
299
300 /*
301  * Map an enum gb_operation_status value (which is represented in a
302  * message as a single byte) to an appropriate Linux negative errno.
303  */
304 int gb_operation_status_map(u8 status)
305 {
306         switch (status) {
307         case GB_OP_SUCCESS:
308                 return 0;
309         case GB_OP_INVALID:
310                 return -EINVAL;
311         case GB_OP_NO_MEMORY:
312                 return -ENOMEM;
313         case GB_OP_INTERRUPTED:
314                 return -EINTR;
315         case GB_OP_RETRY:
316                 return -EAGAIN;
317         case GB_OP_PROTOCOL_BAD:
318                 return -EPROTONOSUPPORT;
319         case GB_OP_OVERFLOW:
320                 return -E2BIG;
321         case GB_OP_TIMEOUT:
322                 return -ETIMEDOUT;
323         default:
324                 return -EIO;
325         }
326 }
327
328 /*
329  * Create a Greybus operation to be sent over the given connection.
330  * The request buffer will be big enough for a payload of the given
331  * size.  Outgoing requests must specify the size of the response
332  * buffer size, which must be sufficient to hold all expected
333  * response data.
334  *
335  * Incoming requests will supply a response size of 0, and in that
336  * case no response buffer is allocated.  (A response always
337  * includes a status byte, so 0 is not a valid size.)  Whatever
338  * handles the operation request is responsible for allocating the
339  * response buffer.
340  *
341  * Returns a pointer to the new operation or a null pointer if an
342  * error occurs.
343  */
344 static struct gb_operation *
345 gb_operation_create_common(struct gb_connection *connection, bool outgoing,
346                                 u8 type, size_t request_size,
347                                 size_t response_size)
348 {
349         struct greybus_host_device *hd = connection->hd;
350         struct gb_operation *operation;
351         gfp_t gfp_flags = response_size ? GFP_KERNEL : GFP_ATOMIC;
352
353         operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
354         if (!operation)
355                 return NULL;
356         operation->connection = connection;
357
358         operation->request = gb_operation_message_alloc(hd, type, request_size,
359                                                         gfp_flags);
360         if (!operation->request)
361                 goto err_cache;
362         operation->request->operation = operation;
363
364         if (outgoing) {
365                 type |= GB_OPERATION_TYPE_RESPONSE;
366                 operation->response = gb_operation_message_alloc(hd, type,
367                                                 response_size, GFP_KERNEL);
368                 if (!operation->response)
369                         goto err_request;
370                 operation->response->operation = operation;
371         }
372
373         INIT_WORK(&operation->recv_work, gb_operation_recv_work);
374         operation->callback = NULL;     /* set at submit time */
375         init_completion(&operation->completion);
376         INIT_DELAYED_WORK(&operation->timeout_work, operation_timeout);
377         kref_init(&operation->kref);
378
379         spin_lock_irq(&gb_operations_lock);
380         list_add_tail(&operation->links, &connection->operations);
381         spin_unlock_irq(&gb_operations_lock);
382
383         return operation;
384
385 err_request:
386         gb_operation_message_free(operation->request);
387 err_cache:
388         kmem_cache_free(gb_operation_cache, operation);
389
390         return NULL;
391 }
392
393 struct gb_operation *gb_operation_create(struct gb_connection *connection,
394                                         u8 type, size_t request_size,
395                                         size_t response_size)
396 {
397         return gb_operation_create_common(connection, true, type,
398                                         request_size, response_size);
399 }
400
401 static struct gb_operation *
402 gb_operation_create_incoming(struct gb_connection *connection,
403                                         u8 type, size_t request_size,
404                                         size_t response_size)
405 {
406         return gb_operation_create_common(connection, false, type,
407                                         request_size, response_size);
408 }
409
410 /*
411  * Destroy a previously created operation.
412  */
413 static void _gb_operation_destroy(struct kref *kref)
414 {
415         struct gb_operation *operation;
416
417         operation = container_of(kref, struct gb_operation, kref);
418
419         /* XXX Make sure it's not in flight */
420         spin_lock_irq(&gb_operations_lock);
421         list_del(&operation->links);
422         spin_unlock_irq(&gb_operations_lock);
423
424         gb_operation_message_free(operation->response);
425         gb_operation_message_free(operation->request);
426
427         kmem_cache_free(gb_operation_cache, operation);
428 }
429
430 void gb_operation_put(struct gb_operation *operation)
431 {
432         if (!WARN_ON(!operation))
433                 kref_put(&operation->kref, _gb_operation_destroy);
434 }
435
436 /*
437  * Send an operation request message.  The caller has filled in
438  * any payload so the request message is ready to go.  If non-null,
439  * the callback function supplied will be called when the response
440  * message has arrived indicating the operation is complete.  A null
441  * callback function is used for a synchronous request; return from
442  * this function won't occur until the operation is complete (or an
443  * interrupt occurs).
444  */
445 int gb_operation_request_send(struct gb_operation *operation,
446                                 gb_operation_callback callback)
447 {
448         unsigned long timeout;
449         int ret;
450
451         if (operation->connection->state != GB_CONNECTION_STATE_ENABLED)
452                 return -ENOTCONN;
453
454         /*
455          * XXX
456          * I think the order of operations is going to be
457          * significant, and if so, we may need a mutex to surround
458          * setting the operation id and submitting the buffer.
459          */
460         operation->callback = callback;
461         gb_pending_operation_insert(operation);
462
463         /*
464          * We impose a time limit for requests to complete.  We need
465          * to set the timer before we send the request though, so we
466          * don't lose a race with the receipt of the resposne.
467          */
468         timeout = msecs_to_jiffies(OPERATION_TIMEOUT_DEFAULT);
469         schedule_delayed_work(&operation->timeout_work, timeout);
470
471         /* All set, send the request */
472         ret = gb_message_send(operation->request, GFP_KERNEL);
473         if (ret)
474                 return ret;
475
476         if (!callback)
477                 ret = gb_operation_wait(operation);
478
479         return ret;
480 }
481
482 /*
483  * Send a response for an incoming operation request.
484  */
485 int gb_operation_response_send(struct gb_operation *operation)
486 {
487         gb_operation_destroy(operation);
488
489         return 0;
490 }
491
492 /*
493  * This function is called when a buffer send request has completed.
494  * The "header" is the message header--the beginning of what we
495  * asked to have sent.
496  *
497  * XXX Mismatch between errno here and operation result code
498  */
499 void
500 greybus_data_sent(struct greybus_host_device *hd, void *header, int status)
501 {
502         struct gb_message *message;
503         struct gb_operation *operation;
504
505         /* If there's no error, there's really nothing to do */
506         if (!status)
507                 return; /* Mark it complete? */
508
509         /* XXX Right now we assume we're an outgoing request */
510         message = gb_hd_message_find(hd, header);
511         operation = message->operation;
512         gb_connection_err(operation->connection, "send error %d\n", status);
513         operation->result = status;     /* XXX */
514         gb_operation_complete(operation);
515 }
516 EXPORT_SYMBOL_GPL(greybus_data_sent);
517
518 /*
519  * We've received data on a connection, and it doesn't look like a
520  * response, so we assume it's a request.
521  *
522  * This is called in interrupt context, so just copy the incoming
523  * data into the request buffer and handle the rest via workqueue.
524  */
525 void gb_connection_recv_request(struct gb_connection *connection,
526         u16 operation_id, u8 type, void *data, size_t size)
527 {
528         struct gb_operation *operation;
529
530         operation = gb_operation_create_incoming(connection, type, size, 0);
531         if (!operation) {
532                 gb_connection_err(connection, "can't create operation");
533                 return;         /* XXX Respond with pre-allocated ENOMEM */
534         }
535         operation->id = operation_id;
536         memcpy(operation->request->header, data, size);
537
538         /* The rest will be handled in work queue context */
539         queue_work(gb_operation_recv_workqueue, &operation->recv_work);
540 }
541
542 /*
543  * We've received data that appears to be an operation response
544  * message.  Look up the operation, and record that we've received
545  * its response.
546  *
547  * This is called in interrupt context, so just copy the incoming
548  * data into the response buffer and handle the rest via workqueue.
549  */
550 static void gb_connection_recv_response(struct gb_connection *connection,
551                                 u16 operation_id, void *data, size_t size)
552 {
553         struct gb_operation *operation;
554         struct gb_message *message;
555         struct gb_operation_msg_hdr *header;
556
557         operation = gb_pending_operation_find(connection, operation_id);
558         if (!operation) {
559                 gb_connection_err(connection, "operation not found");
560                 return;
561         }
562
563         cancel_delayed_work(&operation->timeout_work);
564         gb_pending_operation_remove(operation);
565
566         message = operation->response;
567         if (size <= message->size) {
568                 /* Transfer the operation result from the response header */
569                 header = message->header;
570                 operation->result = header->result;
571         } else {
572                 gb_connection_err(connection, "recv buffer too small");
573                 operation->result = GB_OP_OVERFLOW;
574         }
575
576         /* We must ignore the payload if a bad status is returned */
577         if (operation->result == GB_OP_SUCCESS)
578                 memcpy(message->header, data, size);
579
580         /* The rest will be handled in work queue context */
581         queue_work(gb_operation_recv_workqueue, &operation->recv_work);
582 }
583
584 /*
585  * Handle data arriving on a connection.  As soon as we return the
586  * supplied data buffer will be reused (so unless we do something
587  * with, it's effectively dropped).
588  */
589 void gb_connection_recv(struct gb_connection *connection,
590                                 void *data, size_t size)
591 {
592         struct gb_operation_msg_hdr *header;
593         size_t msg_size;
594         u16 operation_id;
595
596         if (connection->state != GB_CONNECTION_STATE_ENABLED) {
597                 gb_connection_err(connection, "dropping %zu received bytes",
598                         size);
599                 return;
600         }
601
602         if (size < sizeof(*header)) {
603                 gb_connection_err(connection, "message too small");
604                 return;
605         }
606
607         header = data;
608         msg_size = (size_t)le16_to_cpu(header->size);
609         if (msg_size > size) {
610                 gb_connection_err(connection, "incomplete message");
611                 return;         /* XXX Should still complete operation */
612         }
613
614         operation_id = le16_to_cpu(header->operation_id);
615         if (header->type & GB_OPERATION_TYPE_RESPONSE)
616                 gb_connection_recv_response(connection, operation_id,
617                                                 data, msg_size);
618         else
619                 gb_connection_recv_request(connection, operation_id,
620                                                 header->type, data, msg_size);
621 }
622
623 /*
624  * Cancel an operation.
625  */
626 void gb_operation_cancel(struct gb_operation *operation)
627 {
628         operation->canceled = true;
629         gb_message_cancel(operation->request);
630         if (operation->response->header)
631                 gb_message_cancel(operation->response);
632 }
633
634 int gb_operation_init(void)
635 {
636         gb_operation_cache = kmem_cache_create("gb_operation_cache",
637                                 sizeof(struct gb_operation), 0, 0, NULL);
638         if (!gb_operation_cache)
639                 return -ENOMEM;
640
641         gb_operation_recv_workqueue = alloc_workqueue("greybus_recv", 0, 1);
642         if (!gb_operation_recv_workqueue) {
643                 kmem_cache_destroy(gb_operation_cache);
644                 gb_operation_cache = NULL;
645                 return -ENOMEM;
646         }
647
648         return 0;
649 }
650
651 void gb_operation_exit(void)
652 {
653         destroy_workqueue(gb_operation_recv_workqueue);
654         gb_operation_recv_workqueue = NULL;
655         kmem_cache_destroy(gb_operation_cache);
656         gb_operation_cache = NULL;
657 }