4 * Copyright 2014 Google Inc.
6 * Released under the GPLv2 only.
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/workqueue.h>
17 * The top bit of the type in an operation message header indicates
18 * whether the message is a request (bit clear) or response (bit set)
20 #define GB_OPERATION_TYPE_RESPONSE 0x80
22 #define OPERATION_TIMEOUT_DEFAULT 1000 /* milliseconds */
25 * XXX This needs to be coordinated with host driver parameters
27 #define GB_OPERATION_MESSAGE_SIZE_MAX 4096
29 static struct kmem_cache *gb_operation_cache;
31 /* Workqueue to handle Greybus operation completions. */
32 static struct workqueue_struct *gb_operation_recv_workqueue;
35 * All operation messages (both requests and responses) begin with
36 * a common header that encodes the size of the data (header
37 * included). This header also contains a unique identifier, which
38 * is used to keep track of in-flight operations. Finally, the
39 * header contains a operation type field, whose interpretation is
40 * dependent on what type of device lies on the other end of the
41 * connection. Response messages are distinguished from request
42 * messages by setting the high bit (0x80) in the operation type
45 * The wire format for all numeric fields in the header is little
46 * endian. Any operation-specific data begins immediately after the
47 * header, and is 64-bit aligned.
49 struct gb_operation_msg_hdr {
50 __le16 size; /* Size in bytes of header + payload */
51 __le16 id; /* Operation unique id */
52 __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
53 /* 3 bytes pad, must be zero (ignore when read) */
54 } __aligned(sizeof(u64));
56 /* XXX Could be per-host device, per-module, or even per-connection */
57 static DEFINE_SPINLOCK(gb_operations_lock);
59 static void gb_operation_insert(struct gb_operation *operation)
61 struct gb_connection *connection = operation->connection;
62 struct rb_root *root = &connection->pending;
63 struct rb_node *node = &operation->node;
64 struct rb_node **link = &root->rb_node;
65 struct rb_node *above = NULL;
66 struct gb_operation_msg_hdr *header;
67 unsigned long timeout;
72 * Assign the operation's id, and store it in the header of
73 * both request and response message headers.
75 operation->id = gb_connection_operation_id(connection);
76 wire_id = cpu_to_le16(operation->id);
77 header = operation->request->transfer_buffer;
80 /* OK, insert the operation into its connection's tree */
81 spin_lock_irq(&gb_operations_lock);
84 * We impose a time limit for requests to complete. If
85 * there are no requests pending there is no need for a
86 * timer. So if this will be the only one in flight we'll
87 * need to start the timer. Otherwise we just update the
88 * existing one to give this request a full timeout period
91 start_timer = RB_EMPTY_ROOT(root);
94 struct gb_operation *other;
97 other = rb_entry(above, struct gb_operation, node);
98 header = other->request->transfer_buffer;
99 if (other->id > operation->id)
100 link = &above->rb_left;
101 else if (other->id < operation->id)
102 link = &above->rb_right;
104 rb_link_node(node, above, link);
105 rb_insert_color(node, root);
106 spin_unlock_irq(&gb_operations_lock);
108 timeout = msecs_to_jiffies(OPERATION_TIMEOUT_DEFAULT);
110 schedule_delayed_work(&operation->timeout_work, timeout);
112 mod_delayed_work(system_wq, &operation->timeout_work, timeout);
115 static void gb_operation_remove(struct gb_operation *operation)
117 struct gb_connection *connection = operation->connection;
119 /* Shut down our timeout timer */
120 cancel_delayed_work(&operation->timeout_work);
122 /* Take us off of the list of pending operations */
123 spin_lock_irq(&gb_operations_lock);
124 rb_erase(&operation->node, &connection->pending);
125 spin_unlock_irq(&gb_operations_lock);
129 static struct gb_operation *
130 gb_operation_find(struct gb_connection *connection, u16 id)
132 struct gb_operation *operation = NULL;
133 struct rb_node *node;
136 spin_lock_irq(&gb_operations_lock);
137 node = connection->pending.rb_node;
138 while (node && !found) {
139 operation = rb_entry(node, struct gb_operation, node);
140 if (operation->id > id)
141 node = node->rb_left;
142 else if (operation->id < id)
143 node = node->rb_right;
147 spin_unlock_irq(&gb_operations_lock);
149 return found ? operation : NULL;
153 * An operations's response message has arrived. If no callback was
154 * supplied it was submitted for asynchronous completion, so we notify
155 * any waiters. Otherwise we assume calling the completion is enough
156 * and nobody else will be waiting.
158 static void gb_operation_complete(struct gb_operation *operation)
160 if (operation->callback)
161 operation->callback(operation);
163 complete_all(&operation->completion);
166 /* Wait for a submitted operation to complete */
167 int gb_operation_wait(struct gb_operation *operation)
171 ret = wait_for_completion_interruptible(&operation->completion);
172 /* If interrupted, cancel the in-flight buffer */
174 greybus_kill_gbuf(operation->request);
179 static void gb_operation_request_handle(struct gb_operation *operation)
181 struct gb_protocol *protocol = operation->connection->protocol;
182 struct gb_operation_msg_hdr *header;
185 * If the protocol has no incoming request handler, report
186 * an error and mark the request bad.
188 if (protocol->request_recv) {
189 protocol->request_recv(operation);
193 header = operation->request->transfer_buffer;
194 gb_connection_err(operation->connection,
195 "unexpected incoming request type 0x%02hhx\n", header->type);
196 operation->result = GB_OP_PROTOCOL_BAD;
198 gb_operation_complete(operation);
202 * Either this operation contains an incoming request, or its
203 * response has arrived. An incoming request will have a null
204 * response buffer pointer (it is the responsibility of the request
205 * handler to allocate and fill in the response buffer).
207 static void gb_operation_recv_work(struct work_struct *recv_work)
209 struct gb_operation *operation;
210 bool incoming_request;
212 operation = container_of(recv_work, struct gb_operation, recv_work);
213 incoming_request = operation->response == NULL;
214 if (incoming_request)
215 gb_operation_request_handle(operation);
216 gb_operation_complete(operation);
218 /* We're finished with the buffer we read into */
219 if (incoming_request)
220 greybus_gbuf_finished(operation->request);
222 greybus_gbuf_finished(operation->response);
226 * Timeout call for the operation.
228 * If this fires, something went wrong, so mark the result as timed out, and
229 * run the completion handler, which (hopefully) should clean up the operation
232 static void operation_timeout(struct work_struct *work)
234 struct gb_operation *operation;
236 operation = container_of(work, struct gb_operation, timeout_work.work);
237 printk("timeout!\n");
239 operation->result = GB_OP_TIMEOUT;
240 gb_operation_complete(operation);
244 * Buffer completion function. We get notified whenever any buffer
245 * completes. For outbound messages, this tells us that the message
246 * has been sent. For inbound messages, it means the data has
247 * landed in the buffer and is ready to be processed.
249 * Either way, we don't do anything. We don't really care when an
250 * outbound message has been sent, and for incoming messages we
251 * we'll be done with everything we need to do before we mark it
254 * XXX We may want to record that a request is (or is no longer) in flight.
256 static void gb_operation_gbuf_complete(struct gbuf *gbuf)
259 struct gb_operation *operation = gbuf->context;
260 struct gb_operation_msg_hdr *header;
264 if (gbuf == operation->request)
265 header = operation->request->transfer_buffer;
266 else if (gbuf == operation->response)
267 header = operation->response->transfer_buffer;
272 id = le16_to_cpu(header->id);
279 gb_connection_err(operation->connection,
280 "operation %d type %d gbuf error %d",
281 id, type, gbuf->status);
287 * Allocate a buffer to be used for an operation request or response
288 * message. For outgoing messages, both types of message contain a
289 * common header, which is filled in here. Incoming requests or
290 * responses also contain the same header, but there's no need to
291 * initialize it here (it'll be overwritten by the incoming
294 static struct gbuf *gb_operation_gbuf_create(struct gb_operation *operation,
295 u8 type, size_t size,
298 struct gb_connection *connection = operation->connection;
299 struct gb_operation_msg_hdr *header;
301 gfp_t gfp_flags = data_out ? GFP_KERNEL : GFP_ATOMIC;
303 size += sizeof(*header);
304 gbuf = greybus_alloc_gbuf(connection, gb_operation_gbuf_complete,
305 size, data_out, gfp_flags, operation);
309 /* Fill in the header structure */
310 header = (struct gb_operation_msg_hdr *)gbuf->transfer_buffer;
311 header->size = cpu_to_le16(size);
312 header->id = 0; /* Filled in when submitted */
319 * Create a Greybus operation to be sent over the given connection.
320 * The request buffer will big enough for a payload of the given
321 * size. Outgoing requests must specify the size of the response
322 * buffer size, which must be sufficient to hold all expected
325 * Incoming requests will supply a response size of 0, and in that
326 * case no response buffer is allocated. (A response always
327 * includes a status byte, so 0 is not a valid size.) Whatever
328 * handles the operation request is responsible for allocating the
331 * Returns a pointer to the new operation or a null pointer if an
334 struct gb_operation *gb_operation_create(struct gb_connection *connection,
335 u8 type, size_t request_size,
336 size_t response_size)
338 struct gb_operation *operation;
339 gfp_t gfp_flags = response_size ? GFP_KERNEL : GFP_ATOMIC;
340 bool outgoing = response_size != 0;
342 operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
345 operation->connection = connection;
347 operation->request = gb_operation_gbuf_create(operation, type,
350 if (!operation->request)
352 operation->request_payload = operation->request->transfer_buffer +
353 sizeof(struct gb_operation_msg_hdr);
356 type |= GB_OPERATION_TYPE_RESPONSE;
357 operation->response = gb_operation_gbuf_create(operation,
360 if (!operation->response)
362 operation->response_payload =
363 operation->response->transfer_buffer +
364 sizeof(struct gb_operation_msg_hdr);
367 INIT_WORK(&operation->recv_work, gb_operation_recv_work);
368 operation->callback = NULL; /* set at submit time */
369 init_completion(&operation->completion);
370 INIT_DELAYED_WORK(&operation->timeout_work, operation_timeout);
372 spin_lock_irq(&gb_operations_lock);
373 list_add_tail(&operation->links, &connection->operations);
374 spin_unlock_irq(&gb_operations_lock);
379 greybus_free_gbuf(operation->request);
381 kmem_cache_free(gb_operation_cache, operation);
387 * Destroy a previously created operation.
389 void gb_operation_destroy(struct gb_operation *operation)
391 if (WARN_ON(!operation))
394 /* XXX Make sure it's not in flight */
395 spin_lock_irq(&gb_operations_lock);
396 list_del(&operation->links);
397 spin_unlock_irq(&gb_operations_lock);
399 greybus_free_gbuf(operation->response);
400 greybus_free_gbuf(operation->request);
402 kmem_cache_free(gb_operation_cache, operation);
406 * Send an operation request message. The caller has filled in
407 * any payload so the request message is ready to go. If non-null,
408 * the callback function supplied will be called when the response
409 * message has arrived indicating the operation is complete. A null
410 * callback function is used for a synchronous request; return from
411 * this function won't occur until the operation is complete (or an
414 int gb_operation_request_send(struct gb_operation *operation,
415 gb_operation_callback callback)
419 if (operation->connection->state != GB_CONNECTION_STATE_ENABLED)
424 * I think the order of operations is going to be
425 * significant, and if so, we may need a mutex to surround
426 * setting the operation id and submitting the gbuf.
428 operation->callback = callback;
429 gb_operation_insert(operation);
430 ret = greybus_submit_gbuf(operation->request, GFP_KERNEL);
434 ret = gb_operation_wait(operation);
440 * Send a response for an incoming operation request.
442 int gb_operation_response_send(struct gb_operation *operation)
444 gb_operation_remove(operation);
445 gb_operation_destroy(operation);
451 * Handle data arriving on a connection. This is called in
452 * interrupt context, so just copy the incoming data into a buffer
453 * and do remaining handling via a work queue.
455 void gb_connection_operation_recv(struct gb_connection *connection,
456 void *data, size_t size)
458 struct gb_operation_msg_hdr *header;
459 struct gb_operation *operation;
463 if (connection->state != GB_CONNECTION_STATE_ENABLED)
466 if (size > GB_OPERATION_MESSAGE_SIZE_MAX) {
467 gb_connection_err(connection, "message too big");
472 msg_size = le16_to_cpu(header->size);
473 if (header->type & GB_OPERATION_TYPE_RESPONSE) {
474 u16 id = le16_to_cpu(header->id);
476 operation = gb_operation_find(connection, id);
478 gb_connection_err(connection, "operation not found");
481 gb_operation_remove(operation);
482 gbuf = operation->response;
483 gbuf->status = GB_OP_SUCCESS; /* If we got here we're good */
484 if (size > gbuf->transfer_buffer_length) {
485 gb_connection_err(connection, "recv buffer too small");
489 WARN_ON(msg_size != size);
490 operation = gb_operation_create(connection, header->type,
493 gb_connection_err(connection, "can't create operation");
496 gbuf = operation->request;
499 memcpy(gbuf->transfer_buffer, data, msg_size);
501 /* The rest will be handled in work queue context */
502 queue_work(gb_operation_recv_workqueue, &operation->recv_work);
506 * Cancel an operation.
508 void gb_operation_cancel(struct gb_operation *operation)
510 operation->canceled = true;
511 greybus_kill_gbuf(operation->request);
512 if (operation->response)
513 greybus_kill_gbuf(operation->response);
516 int gb_operation_init(void)
518 gb_operation_cache = kmem_cache_create("gb_operation_cache",
519 sizeof(struct gb_operation), 0, 0, NULL);
520 if (!gb_operation_cache)
523 gb_operation_recv_workqueue = alloc_workqueue("greybus_recv", 0, 1);
524 if (!gb_operation_recv_workqueue) {
525 kmem_cache_destroy(gb_operation_cache);
526 gb_operation_cache = NULL;
533 void gb_operation_exit(void)
535 kmem_cache_destroy(gb_operation_cache);
536 gb_operation_cache = NULL;
537 destroy_workqueue(gb_operation_recv_workqueue);
538 gb_operation_recv_workqueue = NULL;