]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/greybus/spi.c
greybus: spi: validate spi master register
[karo-tx-linux.git] / drivers / staging / greybus / spi.c
1 /*
2  * SPI bridge driver for the Greybus "generic" SPI module.
3  *
4  * Copyright 2014-2015 Google Inc.
5  * Copyright 2014-2015 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9
10 #include <linux/bitops.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/spi/spi.h>
15
16 #include "greybus.h"
17
18 struct gb_spi {
19         struct gb_connection    *connection;
20         u16                     mode;
21         u16                     flags;
22         u32                     bits_per_word_mask;
23         u8                      num_chipselect;
24         u32                     min_speed_hz;
25         u32                     max_speed_hz;
26         struct spi_device       *spi_devices;
27 };
28
29 static struct spi_master *get_master_from_spi(struct gb_spi *spi)
30 {
31         return spi->connection->private;
32 }
33
34 static int tx_header_fit_operation(u32 tx_size, u32 count, size_t data_max)
35 {
36         size_t headers_size;
37
38         data_max -= sizeof(struct gb_spi_transfer_request);
39         headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
40
41         return tx_size + headers_size > data_max ? 0 : 1;
42 }
43
44 static size_t calc_rx_xfer_size(u32 rx_size, u32 *tx_xfer_size, u32 len,
45                                 size_t data_max)
46 {
47         size_t rx_xfer_size;
48
49         data_max -= sizeof(struct gb_spi_transfer_response);
50
51         if (rx_size + len > data_max)
52                 rx_xfer_size = data_max - rx_size;
53         else
54                 rx_xfer_size = len;
55
56         /* if this is a write_read, for symmetry read the same as write */
57         if (*tx_xfer_size && rx_xfer_size > *tx_xfer_size)
58                 rx_xfer_size = *tx_xfer_size;
59         if (*tx_xfer_size && rx_xfer_size < *tx_xfer_size)
60                 *tx_xfer_size = rx_xfer_size;
61
62         return rx_xfer_size;
63 }
64
65 static size_t calc_tx_xfer_size(u32 tx_size, u32 count, size_t len,
66                                 size_t data_max)
67 {
68         size_t headers_size;
69
70         data_max -= sizeof(struct gb_spi_transfer_request);
71         headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
72
73         if (tx_size + headers_size + len > data_max)
74                 return data_max - (tx_size + sizeof(struct gb_spi_transfer));
75
76         return len;
77 }
78
79 /* Routines to transfer data */
80 static struct gb_operation *
81 gb_spi_operation_create(struct gb_connection *connection,
82                         struct spi_message *msg, u32 *total_len)
83 {
84         struct gb_spi_transfer_request *request;
85         struct spi_device *dev = msg->spi;
86         struct spi_transfer *xfer;
87         struct gb_spi_transfer *gb_xfer;
88         struct gb_operation *operation;
89         struct spi_transfer *last_xfer = NULL;
90         u32 tx_size = 0, rx_size = 0, count = 0, xfer_len = 0, request_size;
91         u32 tx_xfer_size = 0, rx_xfer_size = 0, last_xfer_size = 0;
92         size_t data_max;
93         void *tx_data;
94
95         data_max = gb_operation_get_payload_size_max(connection);
96
97         /* Find number of transfers queued and tx/rx length in the message */
98         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
99                 if (!xfer->tx_buf && !xfer->rx_buf) {
100                         dev_err(&connection->bundle->dev,
101                                 "bufferless transfer, length %u\n", xfer->len);
102                         return NULL;
103                 }
104                 last_xfer = xfer;
105
106                 tx_xfer_size = 0;
107                 rx_xfer_size = 0;
108
109                 if (xfer->tx_buf) {
110                         if (!tx_header_fit_operation(tx_size, count, data_max))
111                                 break;
112                         tx_xfer_size = calc_tx_xfer_size(tx_size, count,
113                                                          xfer->len, data_max);
114                         last_xfer_size = tx_xfer_size;
115                 }
116
117                 if (xfer->rx_buf) {
118                         rx_xfer_size = calc_rx_xfer_size(rx_size, &tx_xfer_size,
119                                                          xfer->len, data_max);
120                         last_xfer_size = rx_xfer_size;
121                 }
122
123                 tx_size += tx_xfer_size;
124                 rx_size += rx_xfer_size;
125
126                 *total_len += last_xfer_size;
127                 count++;
128
129                 if (xfer->len != last_xfer_size)
130                         break;
131         }
132
133         /*
134          * In addition to space for all message descriptors we need
135          * to have enough to hold all tx data.
136          */
137         request_size = sizeof(*request);
138         request_size += count * sizeof(*gb_xfer);
139         request_size += tx_size;
140
141         /* Response consists only of incoming data */
142         operation = gb_operation_create(connection, GB_SPI_TYPE_TRANSFER,
143                                         request_size, rx_size, GFP_KERNEL);
144         if (!operation)
145                 return NULL;
146
147         request = operation->request->payload;
148         request->count = cpu_to_le16(count);
149         request->mode = dev->mode;
150         request->chip_select = dev->chip_select;
151
152         gb_xfer = &request->transfers[0];
153         tx_data = gb_xfer + count;      /* place tx data after last gb_xfer */
154
155         /* Fill in the transfers array */
156         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
157                 if (last_xfer && xfer == last_xfer)
158                         xfer_len = last_xfer_size;
159                 else
160                         xfer_len = xfer->len;
161
162                 gb_xfer->speed_hz = cpu_to_le32(xfer->speed_hz);
163                 gb_xfer->len = cpu_to_le32(xfer_len);
164                 gb_xfer->delay_usecs = cpu_to_le16(xfer->delay_usecs);
165                 gb_xfer->cs_change = xfer->cs_change;
166                 gb_xfer->bits_per_word = xfer->bits_per_word;
167
168                 /* Copy tx data */
169                 if (xfer->tx_buf) {
170                         gb_xfer->rdwr |= GB_SPI_XFER_WRITE;
171                         memcpy(tx_data, xfer->tx_buf, xfer_len);
172                         tx_data += xfer_len;
173                 }
174
175                 if (xfer->rx_buf)
176                         gb_xfer->rdwr |= GB_SPI_XFER_READ;
177
178                 if (last_xfer && xfer == last_xfer)
179                         break;
180
181                 gb_xfer++;
182         }
183
184         return operation;
185 }
186
187 static void gb_spi_decode_response(struct spi_message *msg,
188                                    struct gb_spi_transfer_response *response)
189 {
190         struct spi_transfer *xfer;
191         void *rx_data = response->data;
192
193         list_for_each_entry(xfer, &msg->transfers, transfer_list) {
194                 /* Copy rx data */
195                 if (xfer->rx_buf) {
196                         memcpy(xfer->rx_buf, rx_data, xfer->len);
197                         rx_data += xfer->len;
198                 }
199         }
200 }
201
202 static int gb_spi_transfer_one_message(struct spi_master *master,
203                                        struct spi_message *msg)
204 {
205         struct gb_spi *spi = spi_master_get_devdata(master);
206         struct gb_connection *connection = spi->connection;
207         struct gb_spi_transfer_response *response;
208         struct gb_operation *operation;
209         u32 len = 0;
210         int ret;
211
212         operation = gb_spi_operation_create(connection, msg, &len);
213         if (!operation)
214                 return -ENOMEM;
215
216         ret = gb_operation_request_send_sync(operation);
217         if (!ret) {
218                 response = operation->response->payload;
219                 if (response)
220                         gb_spi_decode_response(msg, response);
221         } else {
222                 pr_err("transfer operation failed (%d)\n", ret);
223         }
224
225         gb_operation_put(operation);
226
227         msg->actual_length = len;
228         msg->status = 0;
229         spi_finalize_current_message(master);
230
231         return ret;
232 }
233
234 static int gb_spi_setup(struct spi_device *spi)
235 {
236         /* Nothing to do for now */
237         return 0;
238 }
239
240 static void gb_spi_cleanup(struct spi_device *spi)
241 {
242         /* Nothing to do for now */
243 }
244
245
246 /* Routines to get controller information */
247
248 /*
249  * Map Greybus spi mode bits/flags/bpw into Linux ones.
250  * All bits are same for now and so these macro's return same values.
251  */
252 #define gb_spi_mode_map(mode) mode
253 #define gb_spi_flags_map(flags) flags
254
255 static int gb_spi_get_master_config(struct gb_spi *spi)
256 {
257         struct gb_spi_master_config_response response;
258         u16 mode, flags;
259         int ret;
260
261         ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_MASTER_CONFIG,
262                                 NULL, 0, &response, sizeof(response));
263         if (ret < 0)
264                 return ret;
265
266         mode = le16_to_cpu(response.mode);
267         spi->mode = gb_spi_mode_map(mode);
268
269         flags = le16_to_cpu(response.flags);
270         spi->flags = gb_spi_flags_map(flags);
271
272         spi->bits_per_word_mask = le32_to_cpu(response.bits_per_word_mask);
273         spi->num_chipselect = response.num_chipselect;
274
275         spi->min_speed_hz = le32_to_cpu(response.min_speed_hz);
276         spi->max_speed_hz = le32_to_cpu(response.max_speed_hz);
277
278         return 0;
279 }
280
281 static int gb_spi_setup_device(struct gb_spi *spi, u8 cs)
282 {
283         struct spi_master *master = get_master_from_spi(spi);
284         struct gb_spi_device_config_request request;
285         struct gb_spi_device_config_response response;
286         struct spi_board_info spi_board = { {0} };
287         struct spi_device *spidev = &spi->spi_devices[cs];
288         int ret;
289
290         request.chip_select = cs;
291
292         ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_DEVICE_CONFIG,
293                                 &request, sizeof(request),
294                                 &response, sizeof(response));
295         if (ret < 0)
296                 return ret;
297
298         memcpy(spi_board.modalias, response.name, sizeof(spi_board.modalias));
299         spi_board.mode          = le16_to_cpu(response.mode);
300         spi_board.bus_num       = master->bus_num;
301         spi_board.chip_select   = cs;
302         spi_board.max_speed_hz  = le32_to_cpu(response.max_speed_hz);
303
304         spidev = spi_new_device(master, &spi_board);
305         if (!spidev)
306                 ret = -EINVAL;
307
308         return 0;
309 }
310
311 static int gb_spi_init(struct gb_spi *spi)
312 {
313         int ret;
314
315         /* get master configuration */
316         ret = gb_spi_get_master_config(spi);
317         if (ret)
318                 return ret;
319
320         spi->spi_devices = kcalloc(spi->num_chipselect,
321                                    sizeof(struct spi_device), GFP_KERNEL);
322         if (!spi->spi_devices)
323                 return -ENOMEM;
324
325         return ret;
326 }
327
328
329 static int gb_spi_connection_init(struct gb_connection *connection)
330 {
331         struct gb_spi *spi;
332         struct spi_master *master;
333         int ret;
334         u8 i;
335
336         /* Allocate master with space for data */
337         master = spi_alloc_master(&connection->bundle->dev, sizeof(*spi));
338         if (!master) {
339                 dev_err(&connection->bundle->dev, "cannot alloc SPI master\n");
340                 return -ENOMEM;
341         }
342
343         spi = spi_master_get_devdata(master);
344         spi->connection = connection;
345         connection->private = master;
346
347         ret = gb_spi_init(spi);
348         if (ret)
349                 goto out_put_master;
350
351         master->bus_num = -1; /* Allow spi-core to allocate it dynamically */
352         master->num_chipselect = spi->num_chipselect;
353         master->mode_bits = spi->mode;
354         master->flags = spi->flags;
355         master->bits_per_word_mask = spi->bits_per_word_mask;
356
357         /* Attach methods */
358         master->cleanup = gb_spi_cleanup;
359         master->setup = gb_spi_setup;
360         master->transfer_one_message = gb_spi_transfer_one_message;
361
362         ret = spi_register_master(master);
363         if (ret < 0)
364                 goto out_put_master;
365
366         /* now, fetch the devices configuration */
367         for (i = 0; i < spi->num_chipselect; i++) {
368                 ret = gb_spi_setup_device(spi, i);
369                 if (ret < 0)
370                         break;
371         }
372
373         return ret;
374
375 out_put_master:
376         spi_master_put(master);
377
378         return ret;
379 }
380
381 static void gb_spi_connection_exit(struct gb_connection *connection)
382 {
383         struct spi_master *master = connection->private;
384
385         spi_unregister_master(master);
386 }
387
388 static struct gb_protocol spi_protocol = {
389         .name                   = "spi",
390         .id                     = GREYBUS_PROTOCOL_SPI,
391         .major                  = GB_SPI_VERSION_MAJOR,
392         .minor                  = GB_SPI_VERSION_MINOR,
393         .connection_init        = gb_spi_connection_init,
394         .connection_exit        = gb_spi_connection_exit,
395         .request_recv           = NULL,
396 };
397
398 gb_builtin_protocol_driver(spi_protocol);