]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/input/rmi4/rmi_spi.c
Input: synaptics-rmi4 - when registering sensors do not call them "drivers"
[karo-tx-linux.git] / drivers / input / rmi4 / rmi_spi.c
1 /*
2  * Copyright (c) 2011-2016 Synaptics Incorporated
3  * Copyright (c) 2011 Unixphere
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/rmi.h>
13 #include <linux/slab.h>
14 #include <linux/spi/spi.h>
15 #include <linux/of.h>
16 #include "rmi_driver.h"
17
18 #define RMI_SPI_DEFAULT_XFER_BUF_SIZE   64
19
20 #define RMI_PAGE_SELECT_REGISTER        0x00FF
21 #define RMI_SPI_PAGE(addr)              (((addr) >> 8) & 0x80)
22 #define RMI_SPI_XFER_SIZE_LIMIT         255
23
24 #define BUFFER_SIZE_INCREMENT 32
25
26 enum rmi_spi_op {
27         RMI_SPI_WRITE = 0,
28         RMI_SPI_READ,
29         RMI_SPI_V2_READ_UNIFIED,
30         RMI_SPI_V2_READ_SPLIT,
31         RMI_SPI_V2_WRITE,
32 };
33
34 struct rmi_spi_cmd {
35         enum rmi_spi_op op;
36         u16 addr;
37 };
38
39 struct rmi_spi_xport {
40         struct rmi_transport_dev xport;
41         struct spi_device *spi;
42
43         struct mutex page_mutex;
44         int page;
45
46         u8 *rx_buf;
47         u8 *tx_buf;
48         int xfer_buf_size;
49
50         struct spi_transfer *rx_xfers;
51         struct spi_transfer *tx_xfers;
52         int rx_xfer_count;
53         int tx_xfer_count;
54 };
55
56 static int rmi_spi_manage_pools(struct rmi_spi_xport *rmi_spi, int len)
57 {
58         struct spi_device *spi = rmi_spi->spi;
59         int buf_size = rmi_spi->xfer_buf_size
60                 ? rmi_spi->xfer_buf_size : RMI_SPI_DEFAULT_XFER_BUF_SIZE;
61         struct spi_transfer *xfer_buf;
62         void *buf;
63         void *tmp;
64
65         while (buf_size < len)
66                 buf_size *= 2;
67
68         if (buf_size > RMI_SPI_XFER_SIZE_LIMIT)
69                 buf_size = RMI_SPI_XFER_SIZE_LIMIT;
70
71         tmp = rmi_spi->rx_buf;
72         buf = devm_kzalloc(&spi->dev, buf_size * 2,
73                                 GFP_KERNEL | GFP_DMA);
74         if (!buf)
75                 return -ENOMEM;
76
77         rmi_spi->rx_buf = buf;
78         rmi_spi->tx_buf = &rmi_spi->rx_buf[buf_size];
79         rmi_spi->xfer_buf_size = buf_size;
80
81         if (tmp)
82                 devm_kfree(&spi->dev, tmp);
83
84         if (rmi_spi->xport.pdata.spi_data.read_delay_us)
85                 rmi_spi->rx_xfer_count = buf_size;
86         else
87                 rmi_spi->rx_xfer_count = 1;
88
89         if (rmi_spi->xport.pdata.spi_data.write_delay_us)
90                 rmi_spi->tx_xfer_count = buf_size;
91         else
92                 rmi_spi->tx_xfer_count = 1;
93
94         /*
95          * Allocate a pool of spi_transfer buffers for devices which need
96          * per byte delays.
97          */
98         tmp = rmi_spi->rx_xfers;
99         xfer_buf = devm_kzalloc(&spi->dev,
100                 (rmi_spi->rx_xfer_count + rmi_spi->tx_xfer_count)
101                 * sizeof(struct spi_transfer), GFP_KERNEL);
102         if (!xfer_buf)
103                 return -ENOMEM;
104
105         rmi_spi->rx_xfers = xfer_buf;
106         rmi_spi->tx_xfers = &xfer_buf[rmi_spi->rx_xfer_count];
107
108         if (tmp)
109                 devm_kfree(&spi->dev, tmp);
110
111         return 0;
112 }
113
114 static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
115                         const struct rmi_spi_cmd *cmd, const u8 *tx_buf,
116                         int tx_len, u8 *rx_buf, int rx_len)
117 {
118         struct spi_device *spi = rmi_spi->spi;
119         struct rmi_device_platform_data_spi *spi_data =
120                                         &rmi_spi->xport.pdata.spi_data;
121         struct spi_message msg;
122         struct spi_transfer *xfer;
123         int ret = 0;
124         int len;
125         int cmd_len = 0;
126         int total_tx_len;
127         int i;
128         u16 addr = cmd->addr;
129
130         spi_message_init(&msg);
131
132         switch (cmd->op) {
133         case RMI_SPI_WRITE:
134         case RMI_SPI_READ:
135                 cmd_len += 2;
136                 break;
137         case RMI_SPI_V2_READ_UNIFIED:
138         case RMI_SPI_V2_READ_SPLIT:
139         case RMI_SPI_V2_WRITE:
140                 cmd_len += 4;
141                 break;
142         }
143
144         total_tx_len = cmd_len + tx_len;
145         len = max(total_tx_len, rx_len);
146
147         if (len > RMI_SPI_XFER_SIZE_LIMIT)
148                 return -EINVAL;
149
150         if (rmi_spi->xfer_buf_size < len)
151                 rmi_spi_manage_pools(rmi_spi, len);
152
153         if (addr == 0)
154                 /*
155                  * SPI needs an address. Use 0x7FF if we want to keep
156                  * reading from the last position of the register pointer.
157                  */
158                 addr = 0x7FF;
159
160         switch (cmd->op) {
161         case RMI_SPI_WRITE:
162                 rmi_spi->tx_buf[0] = (addr >> 8);
163                 rmi_spi->tx_buf[1] = addr & 0xFF;
164                 break;
165         case RMI_SPI_READ:
166                 rmi_spi->tx_buf[0] = (addr >> 8) | 0x80;
167                 rmi_spi->tx_buf[1] = addr & 0xFF;
168                 break;
169         case RMI_SPI_V2_READ_UNIFIED:
170                 break;
171         case RMI_SPI_V2_READ_SPLIT:
172                 break;
173         case RMI_SPI_V2_WRITE:
174                 rmi_spi->tx_buf[0] = 0x40;
175                 rmi_spi->tx_buf[1] = (addr >> 8) & 0xFF;
176                 rmi_spi->tx_buf[2] = addr & 0xFF;
177                 rmi_spi->tx_buf[3] = tx_len;
178                 break;
179         }
180
181         if (tx_buf)
182                 memcpy(&rmi_spi->tx_buf[cmd_len], tx_buf, tx_len);
183
184         if (rmi_spi->tx_xfer_count > 1) {
185                 for (i = 0; i < total_tx_len; i++) {
186                         xfer = &rmi_spi->tx_xfers[i];
187                         memset(xfer, 0, sizeof(struct spi_transfer));
188                         xfer->tx_buf = &rmi_spi->tx_buf[i];
189                         xfer->len = 1;
190                         xfer->delay_usecs = spi_data->write_delay_us;
191                         spi_message_add_tail(xfer, &msg);
192                 }
193         } else {
194                 xfer = rmi_spi->tx_xfers;
195                 memset(xfer, 0, sizeof(struct spi_transfer));
196                 xfer->tx_buf = rmi_spi->tx_buf;
197                 xfer->len = total_tx_len;
198                 spi_message_add_tail(xfer, &msg);
199         }
200
201         rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: cmd: %s tx_buf len: %d tx_buf: %*ph\n",
202                 __func__, cmd->op == RMI_SPI_WRITE ? "WRITE" : "READ",
203                 total_tx_len, total_tx_len, rmi_spi->tx_buf);
204
205         if (rx_buf) {
206                 if (rmi_spi->rx_xfer_count > 1) {
207                         for (i = 0; i < rx_len; i++) {
208                                 xfer = &rmi_spi->rx_xfers[i];
209                                 memset(xfer, 0, sizeof(struct spi_transfer));
210                                 xfer->rx_buf = &rmi_spi->rx_buf[i];
211                                 xfer->len = 1;
212                                 xfer->delay_usecs = spi_data->read_delay_us;
213                                 spi_message_add_tail(xfer, &msg);
214                         }
215                 } else {
216                         xfer = rmi_spi->rx_xfers;
217                         memset(xfer, 0, sizeof(struct spi_transfer));
218                         xfer->rx_buf = rmi_spi->rx_buf;
219                         xfer->len = rx_len;
220                         spi_message_add_tail(xfer, &msg);
221                 }
222         }
223
224         ret = spi_sync(spi, &msg);
225         if (ret < 0) {
226                 dev_err(&spi->dev, "spi xfer failed: %d\n", ret);
227                 return ret;
228         }
229
230         if (rx_buf) {
231                 memcpy(rx_buf, rmi_spi->rx_buf, rx_len);
232                 rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: (%d) %*ph\n",
233                         __func__, rx_len, rx_len, rx_buf);
234         }
235
236         return 0;
237 }
238
239 /*
240  * rmi_set_page - Set RMI page
241  * @xport: The pointer to the rmi_transport_dev struct
242  * @page: The new page address.
243  *
244  * RMI devices have 16-bit addressing, but some of the transport
245  * implementations (like SMBus) only have 8-bit addressing. So RMI implements
246  * a page address at 0xff of every page so we can reliable page addresses
247  * every 256 registers.
248  *
249  * The page_mutex lock must be held when this function is entered.
250  *
251  * Returns zero on success, non-zero on failure.
252  */
253 static int rmi_set_page(struct rmi_spi_xport *rmi_spi, u8 page)
254 {
255         struct rmi_spi_cmd cmd;
256         int ret;
257
258         cmd.op = RMI_SPI_WRITE;
259         cmd.addr = RMI_PAGE_SELECT_REGISTER;
260
261         ret = rmi_spi_xfer(rmi_spi, &cmd, &page, 1, NULL, 0);
262
263         if (ret)
264                 rmi_spi->page = page;
265
266         return ret;
267 }
268
269 static int rmi_spi_write_block(struct rmi_transport_dev *xport, u16 addr,
270                                const void *buf, size_t len)
271 {
272         struct rmi_spi_xport *rmi_spi =
273                 container_of(xport, struct rmi_spi_xport, xport);
274         struct rmi_spi_cmd cmd;
275         int ret;
276
277         mutex_lock(&rmi_spi->page_mutex);
278
279         if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
280                 ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
281                 if (ret)
282                         goto exit;
283         }
284
285         cmd.op = RMI_SPI_WRITE;
286         cmd.addr = addr;
287
288         ret = rmi_spi_xfer(rmi_spi, &cmd, buf, len, NULL, 0);
289
290 exit:
291         mutex_unlock(&rmi_spi->page_mutex);
292         return ret;
293 }
294
295 static int rmi_spi_read_block(struct rmi_transport_dev *xport, u16 addr,
296                               void *buf, size_t len)
297 {
298         struct rmi_spi_xport *rmi_spi =
299                 container_of(xport, struct rmi_spi_xport, xport);
300         struct rmi_spi_cmd cmd;
301         int ret;
302
303         mutex_lock(&rmi_spi->page_mutex);
304
305         if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
306                 ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
307                 if (ret)
308                         goto exit;
309         }
310
311         cmd.op = RMI_SPI_READ;
312         cmd.addr = addr;
313
314         ret = rmi_spi_xfer(rmi_spi, &cmd, NULL, 0, buf, len);
315
316 exit:
317         mutex_unlock(&rmi_spi->page_mutex);
318         return ret;
319 }
320
321 static const struct rmi_transport_ops rmi_spi_ops = {
322         .write_block    = rmi_spi_write_block,
323         .read_block     = rmi_spi_read_block,
324 };
325
326 #ifdef CONFIG_OF
327 static int rmi_spi_of_probe(struct spi_device *spi,
328                         struct rmi_device_platform_data *pdata)
329 {
330         struct device *dev = &spi->dev;
331         int retval;
332
333         retval = rmi_of_property_read_u32(dev,
334                         &pdata->spi_data.read_delay_us,
335                         "spi-rx-delay-us", 1);
336         if (retval)
337                 return retval;
338
339         retval = rmi_of_property_read_u32(dev,
340                         &pdata->spi_data.write_delay_us,
341                         "spi-tx-delay-us", 1);
342         if (retval)
343                 return retval;
344
345         return 0;
346 }
347
348 static const struct of_device_id rmi_spi_of_match[] = {
349         { .compatible = "syna,rmi4-spi" },
350         {},
351 };
352 MODULE_DEVICE_TABLE(of, rmi_spi_of_match);
353 #else
354 static inline int rmi_spi_of_probe(struct spi_device *spi,
355                                 struct rmi_device_platform_data *pdata)
356 {
357         return -ENODEV;
358 }
359 #endif
360
361 static void rmi_spi_unregister_transport(void *data)
362 {
363         struct rmi_spi_xport *rmi_spi = data;
364
365         rmi_unregister_transport_device(&rmi_spi->xport);
366 }
367
368 static int rmi_spi_probe(struct spi_device *spi)
369 {
370         struct rmi_spi_xport *rmi_spi;
371         struct rmi_device_platform_data *pdata;
372         struct rmi_device_platform_data *spi_pdata = spi->dev.platform_data;
373         int error;
374
375         if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
376                 return -EINVAL;
377
378         rmi_spi = devm_kzalloc(&spi->dev, sizeof(struct rmi_spi_xport),
379                         GFP_KERNEL);
380         if (!rmi_spi)
381                 return -ENOMEM;
382
383         pdata = &rmi_spi->xport.pdata;
384
385         if (spi->dev.of_node) {
386                 error = rmi_spi_of_probe(spi, pdata);
387                 if (error)
388                         return error;
389         } else if (spi_pdata) {
390                 *pdata = *spi_pdata;
391         }
392
393         if (pdata->spi_data.bits_per_word)
394                 spi->bits_per_word = pdata->spi_data.bits_per_word;
395
396         if (pdata->spi_data.mode)
397                 spi->mode = pdata->spi_data.mode;
398
399         error = spi_setup(spi);
400         if (error < 0) {
401                 dev_err(&spi->dev, "spi_setup failed!\n");
402                 return error;
403         }
404
405         pdata->irq = spi->irq;
406
407         rmi_spi->spi = spi;
408         mutex_init(&rmi_spi->page_mutex);
409
410         rmi_spi->xport.dev = &spi->dev;
411         rmi_spi->xport.proto_name = "spi";
412         rmi_spi->xport.ops = &rmi_spi_ops;
413
414         spi_set_drvdata(spi, rmi_spi);
415
416         error = rmi_spi_manage_pools(rmi_spi, RMI_SPI_DEFAULT_XFER_BUF_SIZE);
417         if (error)
418                 return error;
419
420         /*
421          * Setting the page to zero will (a) make sure the PSR is in a
422          * known state, and (b) make sure we can talk to the device.
423          */
424         error = rmi_set_page(rmi_spi, 0);
425         if (error) {
426                 dev_err(&spi->dev, "Failed to set page select to 0.\n");
427                 return error;
428         }
429
430         dev_info(&spi->dev, "registering SPI-connected sensor\n");
431
432         error = rmi_register_transport_device(&rmi_spi->xport);
433         if (error) {
434                 dev_err(&spi->dev, "failed to register sensor: %d\n", error);
435                 return error;
436         }
437
438         error = devm_add_action_or_reset(&spi->dev,
439                                           rmi_spi_unregister_transport,
440                                           rmi_spi);
441         if (error)
442                 return error;
443
444         return 0;
445 }
446
447 #ifdef CONFIG_PM_SLEEP
448 static int rmi_spi_suspend(struct device *dev)
449 {
450         struct spi_device *spi = to_spi_device(dev);
451         struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
452         int ret;
453
454         ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, true);
455         if (ret)
456                 dev_warn(dev, "Failed to resume device: %d\n", ret);
457
458         return ret;
459 }
460
461 static int rmi_spi_resume(struct device *dev)
462 {
463         struct spi_device *spi = to_spi_device(dev);
464         struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
465         int ret;
466
467         ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, true);
468         if (ret)
469                 dev_warn(dev, "Failed to resume device: %d\n", ret);
470
471         return ret;
472 }
473 #endif
474
475 #ifdef CONFIG_PM
476 static int rmi_spi_runtime_suspend(struct device *dev)
477 {
478         struct spi_device *spi = to_spi_device(dev);
479         struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
480         int ret;
481
482         ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, false);
483         if (ret)
484                 dev_warn(dev, "Failed to resume device: %d\n", ret);
485
486         return 0;
487 }
488
489 static int rmi_spi_runtime_resume(struct device *dev)
490 {
491         struct spi_device *spi = to_spi_device(dev);
492         struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
493         int ret;
494
495         ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, false);
496         if (ret)
497                 dev_warn(dev, "Failed to resume device: %d\n", ret);
498
499         return 0;
500 }
501 #endif
502
503 static const struct dev_pm_ops rmi_spi_pm = {
504         SET_SYSTEM_SLEEP_PM_OPS(rmi_spi_suspend, rmi_spi_resume)
505         SET_RUNTIME_PM_OPS(rmi_spi_runtime_suspend, rmi_spi_runtime_resume,
506                            NULL)
507 };
508
509 static const struct spi_device_id rmi_id[] = {
510         { "rmi4_spi", 0 },
511         { }
512 };
513 MODULE_DEVICE_TABLE(spi, rmi_id);
514
515 static struct spi_driver rmi_spi_driver = {
516         .driver = {
517                 .name   = "rmi4_spi",
518                 .pm     = &rmi_spi_pm,
519                 .of_match_table = of_match_ptr(rmi_spi_of_match),
520         },
521         .id_table       = rmi_id,
522         .probe          = rmi_spi_probe,
523 };
524
525 module_spi_driver(rmi_spi_driver);
526
527 MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com>");
528 MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
529 MODULE_DESCRIPTION("RMI SPI driver");
530 MODULE_LICENSE("GPL");
531 MODULE_VERSION(RMI_DRIVER_VERSION);