4 * Copyright 2014 Google Inc.
6 * Released under the GPLv2 only.
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/workqueue.h>
17 * The top bit of the type in an operation message header indicates
18 * whether the message is a request (bit clear) or response (bit set)
20 #define GB_OPERATION_TYPE_RESPONSE 0x80
23 * All operation messages (both requests and responses) begin with
24 * a common header that encodes the size of the data (header
25 * included). This header also contains a unique identifier, which
26 * is used to keep track of in-flight operations. Finally, the
27 * header contains a operation type field, whose interpretation is
28 * dependent on what type of device lies on the other end of the
29 * connection. Response messages are distinguished from request
30 * messages by setting the high bit (0x80) in the operation type
33 * The wire format for all numeric fields in the header is little
34 * endian. Any operation-specific data begins immediately after the
35 * header, and is 64-bit aligned.
37 struct gb_operation_msg_hdr {
38 __le16 size; /* Size in bytes of header + payload */
39 __le16 id; /* Operation unique id */
40 __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
41 /* 3 bytes pad, must be zero (ignore when read) */
42 } __aligned(sizeof(u64));
44 /* XXX Could be per-host device, per-module, or even per-connection */
45 static DEFINE_SPINLOCK(gb_operations_lock);
47 static void gb_operation_insert(struct gb_operation *operation)
49 struct gb_connection *connection = operation->connection;
50 struct rb_root *root = &connection->pending;
51 struct rb_node *node = &operation->node;
52 struct rb_node **link = &root->rb_node;
53 struct rb_node *above = NULL;
54 struct gb_operation_msg_hdr *header;
58 * Assign the operation's id, and store it in the header of
59 * both request and response message headers.
61 operation->id = gb_connection_operation_id(connection);
62 wire_id = cpu_to_le16(operation->id);
63 header = operation->request->transfer_buffer;
66 /* OK, insert the operation into its connection's tree */
67 spin_lock_irq(&gb_operations_lock);
70 struct gb_operation *other;
73 other = rb_entry(above, struct gb_operation, node);
74 header = other->request->transfer_buffer;
75 if (other->id > operation->id)
76 link = &above->rb_left;
77 else if (other->id < operation->id)
78 link = &above->rb_right;
80 rb_link_node(node, above, link);
81 rb_insert_color(node, root);
83 spin_unlock_irq(&gb_operations_lock);
86 static void gb_operation_remove(struct gb_operation *operation)
88 spin_lock_irq(&gb_operations_lock);
89 rb_erase(&operation->node, &operation->connection->pending);
90 spin_unlock_irq(&gb_operations_lock);
93 static struct gb_operation *
94 gb_operation_find(struct gb_connection *connection, u16 id)
96 struct gb_operation *operation;
100 spin_lock_irq(&gb_operations_lock);
101 node = connection->pending.rb_node;
102 while (node && !found) {
103 operation = rb_entry(node, struct gb_operation, node);
104 if (operation->id > id)
105 node = node->rb_left;
106 else if (operation->id < id)
107 node = node->rb_right;
111 spin_unlock_irq(&gb_operations_lock);
113 return found ? operation : NULL;
117 * An operations's response message has arrived. If no callback was
118 * supplied it was submitted for asynchronous completion, so we notify
119 * any waiters. Otherwise we assume calling the completion is enough
120 * and nobody else will be waiting.
122 void gb_operation_complete(struct gb_operation *operation)
124 if (operation->callback)
125 operation->callback(operation);
127 complete_all(&operation->completion);
131 * Wait for a submitted operatnoi to complete */
132 int gb_operation_wait(struct gb_operation *operation)
136 ret = wait_for_completion_interruptible(&operation->completion);
137 /* If interrupted, cancel the in-flight buffer */
139 ret = greybus_kill_gbuf(operation->request);
145 * Submit an outbound operation. The caller has filled in any
146 * payload so the request message is ready to go. If non-null,
147 * the callback function supplied will be called when the response
148 * message has arrived indicating the operation is complete. A null
149 * callback function is used for a synchronous request; return from
150 * this function won't occur until the operation is complete (or an
153 int gb_operation_submit(struct gb_operation *operation,
154 gb_operation_callback callback)
160 * I think the order of operations is going to be
161 * significant, and if so, we may need a mutex to surround
162 * setting the operation id and submitting the gbuf.
164 operation->callback = callback;
165 gb_operation_insert(operation);
166 ret = greybus_submit_gbuf(operation->request, GFP_KERNEL);
170 ret = gb_operation_wait(operation);
176 * Called when an operation buffer completes.
178 static void gb_operation_gbuf_complete(struct gbuf *gbuf)
180 struct gb_operation *operation;
181 struct gb_operation_msg_hdr *header;
185 * This isn't right, but it keeps things balanced until we
186 * can set up operation response handling.
188 header = gbuf->transfer_buffer;
189 id = le16_to_cpu(header->id);
190 operation = gb_operation_find(gbuf->connection, id);
192 gb_operation_remove(operation);
194 gb_connection_err(gbuf->connection, "operation not found");
198 * Allocate a buffer to be used for an operation request or response
199 * message. Both types of message contain a header, which is filled
202 struct gbuf *gb_operation_gbuf_create(struct gb_operation *operation,
203 u8 type, size_t size, bool outbound)
205 struct gb_connection *connection = operation->connection;
206 struct gb_operation_msg_hdr *header;
208 gfp_t gfp_flags = outbound ? GFP_KERNEL : GFP_ATOMIC;
210 /* Operation buffers hold a header in addition to their payload */
211 size += sizeof(*header);
212 gbuf = greybus_alloc_gbuf(connection, gb_operation_gbuf_complete,
213 size, outbound, gfp_flags, operation);
217 /* Fill in the header structure */
218 header = (struct gb_operation_msg_hdr *)gbuf->transfer_buffer;
219 header->size = cpu_to_le16(size);
220 header->id = 0; /* Filled in when submitted */
227 * Create a Greybus operation to be sent over the given connection.
228 * The request buffer will big enough for a payload of the given
229 * size. Outgoing requests must specify the size of the response
230 * buffer size, which must be sufficient to hold all expected
233 * Incoming requests will supply a response size of 0, and in that
234 * case no response buffer is allocated. (A response always
235 * includes a status byte, so 0 is not a valid size.) Whatever
236 * handles the operation request is responsible for allocating the
239 * Returns a pointer to the new operation or a null pointer if an
242 struct gb_operation *gb_operation_create(struct gb_connection *connection,
243 u8 type, size_t request_size,
244 size_t response_size)
246 struct gb_operation *operation;
247 gfp_t gfp_flags = response_size ? GFP_KERNEL : GFP_ATOMIC;
250 gb_connection_err(connection, "zero-sized request");
254 /* XXX Use a slab cache */
255 operation = kzalloc(sizeof(*operation), gfp_flags);
258 operation->connection = connection; /* XXX refcount? */
260 operation->request = gb_operation_gbuf_create(operation, type,
262 if (!operation->request) {
266 operation->request_payload = operation->request->transfer_buffer +
267 sizeof(struct gb_operation_msg_hdr);
268 /* We always use the full request buffer */
269 operation->request->actual_length = request_size;
272 type |= GB_OPERATION_TYPE_RESPONSE;
273 operation->response = gb_operation_gbuf_create(operation,
274 type, response_size, false);
275 if (!operation->response) {
276 greybus_free_gbuf(operation->request);
280 operation->response_payload =
281 operation->response->transfer_buffer +
282 sizeof(struct gb_operation_msg_hdr);
285 operation->callback = NULL; /* set at submit time */
286 init_completion(&operation->completion);
288 spin_lock_irq(&gb_operations_lock);
289 list_add_tail(&operation->links, &connection->operations);
290 spin_unlock_irq(&gb_operations_lock);
296 * Destroy a previously created operation.
298 void gb_operation_destroy(struct gb_operation *operation)
300 if (WARN_ON(!operation))
303 /* XXX Make sure it's not in flight */
304 spin_lock_irq(&gb_operations_lock);
305 list_del(&operation->links);
306 spin_unlock_irq(&gb_operations_lock);
308 greybus_free_gbuf(operation->response);
309 greybus_free_gbuf(operation->request);