2 * NVEC: NVIDIA compliant embedded controller interface
4 * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
6 * Authors: Pierre-Hugues Husson <phhusson@free.fr>
7 * Ilya Petrov <ilya.muromec@gmail.com>
8 * Marc Dietrich <marvin24@gmx.de>
9 * Julian Andres Klode <jak@jak-linux.org>
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file "COPYING" in the main directory of this archive
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/atomic.h>
22 #include <linux/clk.h>
23 #include <linux/completion.h>
24 #include <linux/delay.h>
25 #include <linux/err.h>
26 #include <linux/gpio.h>
27 #include <linux/interrupt.h>
29 #include <linux/irq.h>
31 #include <linux/of_gpio.h>
32 #include <linux/list.h>
33 #include <linux/mfd/core.h>
34 #include <linux/mutex.h>
35 #include <linux/notifier.h>
36 #include <linux/platform_device.h>
37 #include <linux/slab.h>
38 #include <linux/spinlock.h>
39 #include <linux/workqueue.h>
42 #include <mach/iomap.h>
47 #define I2C_CNFG_PACKET_MODE_EN (1<<10)
48 #define I2C_CNFG_NEW_MASTER_SFM (1<<11)
49 #define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
51 #define I2C_SL_CNFG 0x20
52 #define I2C_SL_NEWSL (1<<2)
53 #define I2C_SL_NACK (1<<1)
54 #define I2C_SL_RESP (1<<0)
55 #define I2C_SL_IRQ (1<<3)
56 #define END_TRANS (1<<4)
60 #define I2C_SL_RCVD 0x24
61 #define I2C_SL_STATUS 0x28
62 #define I2C_SL_ADDR1 0x2c
63 #define I2C_SL_ADDR2 0x30
64 #define I2C_SL_DELAY_COUNT 0x3c
67 * enum nvec_msg_category - Message categories for nvec_msg_alloc()
68 * @NVEC_MSG_RX: The message is an incoming message (from EC)
69 * @NVEC_MSG_TX: The message is an outgoing message (to EC)
71 enum nvec_msg_category {
76 static const unsigned char EC_DISABLE_EVENT_REPORTING[3] = "\x04\x00\x00";
77 static const unsigned char EC_ENABLE_EVENT_REPORTING[3] = "\x04\x00\x01";
78 static const unsigned char EC_GET_FIRMWARE_VERSION[2] = "\x07\x15";
80 static struct nvec_chip *nvec_power_handle;
82 static struct mfd_cell nvec_devices[] = {
100 .name = "nvec-paz00",
106 * nvec_register_notifier - Register a notifier with nvec
107 * @nvec: A &struct nvec_chip
108 * @nb: The notifier block to register
110 * Registers a notifier with @nvec. The notifier will be added to an atomic
111 * notifier chain that is called for all received messages except those that
112 * correspond to a request initiated by nvec_write_sync().
114 int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb,
117 return atomic_notifier_chain_register(&nvec->notifier_list, nb);
119 EXPORT_SYMBOL_GPL(nvec_register_notifier);
122 * nvec_status_notifier - The final notifier
124 * Prints a message about control events not handled in the notifier
127 static int nvec_status_notifier(struct notifier_block *nb,
128 unsigned long event_type, void *data)
130 struct nvec_chip *nvec = container_of(nb, struct nvec_chip,
131 nvec_status_notifier);
132 unsigned char *msg = (unsigned char *)data;
134 if (event_type != NVEC_CNTL)
137 dev_warn(nvec->dev, "unhandled msg type %ld\n", event_type);
138 print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1,
139 msg, msg[1] + 2, true);
146 * @nvec: A &struct nvec_chip
147 * @category: Pool category, see &enum nvec_msg_category
149 * Allocate a single &struct nvec_msg object from the message pool of
150 * @nvec. The result shall be passed to nvec_msg_free() if no longer
153 * Outgoing messages are placed in the upper 75% of the pool, keeping the
154 * lower 25% available for RX buffers only. The reason is to prevent a
155 * situation where all buffers are full and a message is thus endlessly
156 * retried because the response could never be processed.
158 static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
159 enum nvec_msg_category category)
161 int i = (category == NVEC_MSG_TX) ? (NVEC_POOL_SIZE / 4) : 0;
163 for (; i < NVEC_POOL_SIZE; i++) {
164 if (atomic_xchg(&nvec->msg_pool[i].used, 1) == 0) {
165 dev_vdbg(nvec->dev, "INFO: Allocate %i\n", i);
166 return &nvec->msg_pool[i];
170 dev_err(nvec->dev, "could not allocate %s buffer\n",
171 (category == NVEC_MSG_TX) ? "TX" : "RX");
178 * @nvec: A &struct nvec_chip
179 * @msg: A message (must be allocated by nvec_msg_alloc() and belong to @nvec)
181 * Free the given message
183 inline void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
185 if (msg != &nvec->tx_scratch)
186 dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool);
187 atomic_set(&msg->used, 0);
189 EXPORT_SYMBOL_GPL(nvec_msg_free);
192 * nvec_msg_is_event - Return %true if @msg is an event
195 static bool nvec_msg_is_event(struct nvec_msg *msg)
197 return msg->data[0] >> 7;
201 * nvec_msg_size - Get the size of a message
202 * @msg: The message to get the size for
204 * This only works for received messages, not for outgoing messages.
206 static size_t nvec_msg_size(struct nvec_msg *msg)
208 bool is_event = nvec_msg_is_event(msg);
209 int event_length = (msg->data[0] & 0x60) >> 5;
211 /* for variable size, payload size in byte 1 + count (1) + cmd (1) */
212 if (!is_event || event_length == NVEC_VAR_SIZE)
213 return (msg->pos || msg->size) ? (msg->data[1] + 2) : 0;
214 else if (event_length == NVEC_2BYTES)
216 else if (event_length == NVEC_3BYTES)
223 * nvec_gpio_set_value - Set the GPIO value
224 * @nvec: A &struct nvec_chip
225 * @value: The value to write (0 or 1)
227 * Like gpio_set_value(), but generating debugging information
229 static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
231 dev_dbg(nvec->dev, "GPIO changed from %u to %u\n",
232 gpio_get_value(nvec->gpio), value);
233 gpio_set_value(nvec->gpio, value);
237 * nvec_write_async - Asynchronously write a message to NVEC
238 * @nvec: An nvec_chip instance
239 * @data: The message data, starting with the request type
240 * @size: The size of @data
242 * Queue a single message to be transferred to the embedded controller
243 * and return immediately.
245 * Returns: 0 on success, a negative error code on failure. If a failure
246 * occured, the nvec driver may print an error.
248 int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
251 struct nvec_msg *msg;
254 msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
260 memcpy(msg->data + 1, data, size);
261 msg->size = size + 1;
263 spin_lock_irqsave(&nvec->tx_lock, flags);
264 list_add_tail(&msg->node, &nvec->tx_data);
265 spin_unlock_irqrestore(&nvec->tx_lock, flags);
267 queue_work(system_nrt_wq, &nvec->tx_work);
271 EXPORT_SYMBOL(nvec_write_async);
274 * nvec_write_sync - Write a message to nvec and read the response
275 * @nvec: An &struct nvec_chip
276 * @data: The data to write
277 * @size: The size of @data
279 * This is similar to nvec_write_async(), but waits for the
280 * request to be answered before returning. This function
281 * uses a mutex and can thus not be called from e.g.
282 * interrupt handlers.
284 * Returns: A pointer to the response message on success,
285 * %NULL on failure. Free with nvec_msg_free() once no longer
288 struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
289 const unsigned char *data, short size)
291 struct nvec_msg *msg;
293 mutex_lock(&nvec->sync_write_mutex);
295 nvec->sync_write_pending = (data[1] << 8) + data[0];
297 if (nvec_write_async(nvec, data, size) < 0) {
298 mutex_unlock(&nvec->sync_write_mutex);
302 dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n",
303 nvec->sync_write_pending);
304 if (!(wait_for_completion_timeout(&nvec->sync_write,
305 msecs_to_jiffies(2000)))) {
306 dev_warn(nvec->dev, "timeout waiting for sync write to complete\n");
307 mutex_unlock(&nvec->sync_write_mutex);
311 dev_dbg(nvec->dev, "nvec_sync_write: pong!\n");
313 msg = nvec->last_sync_msg;
315 mutex_unlock(&nvec->sync_write_mutex);
319 EXPORT_SYMBOL(nvec_write_sync);
322 * nvec_request_master - Process outgoing messages
323 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
325 * Processes all outgoing requests by sending the request and awaiting the
326 * response, then continuing with the next request. Once a request has a
327 * matching response, it will be freed and removed from the list.
329 static void nvec_request_master(struct work_struct *work)
331 struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work);
334 struct nvec_msg *msg;
336 spin_lock_irqsave(&nvec->tx_lock, flags);
337 while (!list_empty(&nvec->tx_data)) {
338 msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node);
339 spin_unlock_irqrestore(&nvec->tx_lock, flags);
340 nvec_gpio_set_value(nvec, 0);
341 err = wait_for_completion_interruptible_timeout(
342 &nvec->ec_transfer, msecs_to_jiffies(5000));
345 dev_warn(nvec->dev, "timeout waiting for ec transfer\n");
346 nvec_gpio_set_value(nvec, 1);
350 spin_lock_irqsave(&nvec->tx_lock, flags);
353 list_del_init(&msg->node);
354 nvec_msg_free(nvec, msg);
357 spin_unlock_irqrestore(&nvec->tx_lock, flags);
361 * parse_msg - Print some information and call the notifiers on an RX message
362 * @nvec: A &struct nvec_chip
363 * @msg: A message received by @nvec
365 * Paarse some pieces of the message and then call the chain of notifiers
366 * registered via nvec_register_notifier.
368 static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg)
370 if ((msg->data[0] & 1 << 7) == 0 && msg->data[3]) {
371 dev_err(nvec->dev, "ec responded %*ph\n", 4, msg->data);
375 if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5)
376 print_hex_dump(KERN_WARNING, "ec system event ",
377 DUMP_PREFIX_NONE, 16, 1, msg->data,
378 msg->data[1] + 2, true);
380 atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f,
387 * nvec_dispatch - Process messages received from the EC
388 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
390 * Process messages previously received from the EC and put into the RX
391 * queue of the &struct nvec_chip instance associated with @work.
393 static void nvec_dispatch(struct work_struct *work)
395 struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work);
397 struct nvec_msg *msg;
399 spin_lock_irqsave(&nvec->rx_lock, flags);
400 while (!list_empty(&nvec->rx_data)) {
401 msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node);
402 list_del_init(&msg->node);
403 spin_unlock_irqrestore(&nvec->rx_lock, flags);
405 if (nvec->sync_write_pending ==
406 (msg->data[2] << 8) + msg->data[0]) {
407 dev_dbg(nvec->dev, "sync write completed!\n");
408 nvec->sync_write_pending = 0;
409 nvec->last_sync_msg = msg;
410 complete(&nvec->sync_write);
412 parse_msg(nvec, msg);
413 nvec_msg_free(nvec, msg);
415 spin_lock_irqsave(&nvec->rx_lock, flags);
417 spin_unlock_irqrestore(&nvec->rx_lock, flags);
421 * nvec_tx_completed - Complete the current transfer
422 * @nvec: A &struct nvec_chip
424 * This is called when we have received an END_TRANS on a TX transfer.
426 static void nvec_tx_completed(struct nvec_chip *nvec)
428 /* We got an END_TRANS, let's skip this, maybe there's an event */
429 if (nvec->tx->pos != nvec->tx->size) {
430 dev_err(nvec->dev, "premature END_TRANS, resending\n");
432 nvec_gpio_set_value(nvec, 0);
439 * nvec_rx_completed - Complete the current transfer
440 * @nvec: A &struct nvec_chip
442 * This is called when we have received an END_TRANS on a RX transfer.
444 static void nvec_rx_completed(struct nvec_chip *nvec)
446 if (nvec->rx->pos != nvec_msg_size(nvec->rx)) {
447 dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n",
448 (uint) nvec_msg_size(nvec->rx),
449 (uint) nvec->rx->pos);
451 nvec_msg_free(nvec, nvec->rx);
454 /* Battery quirk - Often incomplete, and likes to crash */
455 if (nvec->rx->data[0] == NVEC_BAT)
456 complete(&nvec->ec_transfer);
461 spin_lock(&nvec->rx_lock);
463 /* add the received data to the work list
464 and move the ring buffer pointer to the next entry */
465 list_add_tail(&nvec->rx->node, &nvec->rx_data);
467 spin_unlock(&nvec->rx_lock);
471 if (!nvec_msg_is_event(nvec->rx))
472 complete(&nvec->ec_transfer);
474 queue_work(system_nrt_wq, &nvec->rx_work);
478 * nvec_invalid_flags - Send an error message about invalid flags and jump
479 * @nvec: The nvec device
480 * @status: The status flags
481 * @reset: Whether we shall jump to state 0.
483 static void nvec_invalid_flags(struct nvec_chip *nvec, unsigned int status,
486 dev_err(nvec->dev, "unexpected status flags 0x%02x during state %i\n",
487 status, nvec->state);
493 * nvec_tx_set - Set the message to transfer (nvec->tx)
494 * @nvec: A &struct nvec_chip
496 * Gets the first entry from the tx_data list of @nvec and sets the
497 * tx member to it. If the tx_data list is empty, this uses the
498 * tx_scratch message to send a no operation message.
500 static void nvec_tx_set(struct nvec_chip *nvec)
502 spin_lock(&nvec->tx_lock);
503 if (list_empty(&nvec->tx_data)) {
504 dev_err(nvec->dev, "empty tx - sending no-op\n");
505 memcpy(nvec->tx_scratch.data, "\x02\x07\x02", 3);
506 nvec->tx_scratch.size = 3;
507 nvec->tx_scratch.pos = 0;
508 nvec->tx = &nvec->tx_scratch;
509 list_add_tail(&nvec->tx->node, &nvec->tx_data);
511 nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg,
515 spin_unlock(&nvec->tx_lock);
517 dev_dbg(nvec->dev, "Sending message of length %u, command 0x%x\n",
518 (uint)nvec->tx->size, nvec->tx->data[1]);
522 * nvec_interrupt - Interrupt handler
524 * @dev: The nvec device
526 * Interrupt handler that fills our RX buffers and empties our TX
527 * buffers. This uses a finite state machine with ridiculous amounts
528 * of error checking, in order to be fairly reliable.
530 static irqreturn_t nvec_interrupt(int irq, void *dev)
532 unsigned long status;
533 unsigned int received = 0;
534 unsigned char to_send = 0xff;
535 const unsigned long irq_mask = I2C_SL_IRQ | END_TRANS | RCVD | RNW;
536 struct nvec_chip *nvec = dev;
537 unsigned int state = nvec->state;
539 status = readl(nvec->base + I2C_SL_STATUS);
541 /* Filter out some errors */
542 if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) {
543 dev_err(nvec->dev, "unexpected irq mask %lx\n", status);
546 if ((status & I2C_SL_IRQ) == 0) {
547 dev_err(nvec->dev, "Spurious IRQ\n");
551 /* The EC did not request a read, so it send us something, read it */
552 if ((status & RNW) == 0) {
553 received = readl(nvec->base + I2C_SL_RCVD);
555 writel(0, nvec->base + I2C_SL_RCVD);
558 if (status == (I2C_SL_IRQ | RCVD))
561 switch (nvec->state) {
562 case 0: /* Verify that its a transfer start, the rest later */
563 if (status != (I2C_SL_IRQ | RCVD))
564 nvec_invalid_flags(nvec, status, false);
566 case 1: /* command byte */
567 if (status != I2C_SL_IRQ) {
568 nvec_invalid_flags(nvec, status, true);
570 nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
571 /* Should not happen in a normal world */
572 if (unlikely(nvec->rx == NULL)) {
576 nvec->rx->data[0] = received;
581 case 2: /* first byte after command */
582 if (status == (I2C_SL_IRQ | RNW | RCVD)) {
584 if (nvec->rx->data[0] != 0x01) {
586 "Read without prior read command\n");
590 nvec_msg_free(nvec, nvec->rx);
593 BUG_ON(nvec->tx->size < 1);
594 to_send = nvec->tx->data[0];
596 } else if (status == (I2C_SL_IRQ)) {
597 BUG_ON(nvec->rx == NULL);
598 nvec->rx->data[1] = received;
602 nvec_invalid_flags(nvec, status, true);
605 case 3: /* EC does a block read, we transmit data */
606 if (status & END_TRANS) {
607 nvec_tx_completed(nvec);
608 } else if ((status & RNW) == 0 || (status & RCVD)) {
609 nvec_invalid_flags(nvec, status, true);
610 } else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
611 to_send = nvec->tx->data[nvec->tx->pos++];
613 dev_err(nvec->dev, "tx buffer underflow on %p (%u > %u)\n",
615 (uint) (nvec->tx ? nvec->tx->pos : 0),
616 (uint) (nvec->tx ? nvec->tx->size : 0));
620 case 4: /* EC does some write, we read the data */
621 if ((status & (END_TRANS | RNW)) == END_TRANS)
622 nvec_rx_completed(nvec);
623 else if (status & (RNW | RCVD))
624 nvec_invalid_flags(nvec, status, true);
625 else if (nvec->rx && nvec->rx->pos < NVEC_MSG_SIZE)
626 nvec->rx->data[nvec->rx->pos++] = received;
629 "RX buffer overflow on %p: "
630 "Trying to write byte %u of %u\n",
631 nvec->rx, nvec->rx->pos, NVEC_MSG_SIZE);
637 /* If we are told that a new transfer starts, verify it */
638 if ((status & (RCVD | RNW)) == RCVD) {
639 if (received != nvec->i2c_addr)
641 "received address 0x%02x, expected 0x%02x\n",
642 received, nvec->i2c_addr);
646 /* Send data if requested, but not on end of transmission */
647 if ((status & (RNW | END_TRANS)) == RNW)
648 writel(to_send, nvec->base + I2C_SL_RCVD);
650 /* If we have send the first byte */
651 if (status == (I2C_SL_IRQ | RNW | RCVD))
652 nvec_gpio_set_value(nvec, 1);
655 "Handled: %s 0x%02x, %s 0x%02x in state %u [%s%s%s]\n",
656 (status & RNW) == 0 ? "received" : "R=",
658 (status & (RNW | END_TRANS)) ? "sent" : "S=",
661 status & END_TRANS ? " END_TRANS" : "",
662 status & RCVD ? " RCVD" : "",
663 status & RNW ? " RNW" : "");
667 * TODO: A correct fix needs to be found for this.
669 * We experience less incomplete messages with this delay than without
670 * it, but we don't know why. Help is appreciated.
677 static void tegra_init_i2c_slave(struct nvec_chip *nvec)
681 clk_prepare_enable(nvec->i2c_clk);
683 tegra_periph_reset_assert(nvec->i2c_clk);
685 tegra_periph_reset_deassert(nvec->i2c_clk);
687 val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN |
688 (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
689 writel(val, nvec->base + I2C_CNFG);
691 clk_set_rate(nvec->i2c_clk, 8 * 80000);
693 writel(I2C_SL_NEWSL, nvec->base + I2C_SL_CNFG);
694 writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
696 writel(nvec->i2c_addr>>1, nvec->base + I2C_SL_ADDR1);
697 writel(0, nvec->base + I2C_SL_ADDR2);
699 enable_irq(nvec->irq);
701 clk_disable_unprepare(nvec->i2c_clk);
704 #ifdef CONFIG_PM_SLEEP
705 static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
707 disable_irq(nvec->irq);
708 writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
709 clk_disable_unprepare(nvec->i2c_clk);
713 static void nvec_power_off(void)
715 nvec_write_async(nvec_power_handle, EC_DISABLE_EVENT_REPORTING, 3);
716 nvec_write_async(nvec_power_handle, "\x04\x01", 2);
719 static int __devinit tegra_nvec_probe(struct platform_device *pdev)
723 struct nvec_platform_data *pdata = pdev->dev.platform_data;
724 struct nvec_chip *nvec;
725 struct nvec_msg *msg;
726 struct resource *res;
729 nvec = devm_kzalloc(&pdev->dev, sizeof(struct nvec_chip), GFP_KERNEL);
731 dev_err(&pdev->dev, "failed to reserve memory\n");
734 platform_set_drvdata(pdev, nvec);
735 nvec->dev = &pdev->dev;
738 nvec->gpio = pdata->gpio;
739 nvec->i2c_addr = pdata->i2c_addr;
740 } else if (nvec->dev->of_node) {
741 nvec->gpio = of_get_named_gpio(nvec->dev->of_node,
743 if (nvec->gpio < 0) {
744 dev_err(&pdev->dev, "no gpio specified");
747 if (of_property_read_u32(nvec->dev->of_node,
748 "slave-addr", &nvec->i2c_addr)) {
749 dev_err(&pdev->dev, "no i2c address specified");
753 dev_err(&pdev->dev, "no platform data\n");
757 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
759 dev_err(&pdev->dev, "no mem resource?\n");
763 base = devm_request_and_ioremap(&pdev->dev, res);
765 dev_err(&pdev->dev, "Can't ioremap I2C region\n");
769 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
771 dev_err(&pdev->dev, "no irq resource?\n");
775 i2c_clk = clk_get_sys("tegra-i2c.2", "div-clk");
776 if (IS_ERR(i2c_clk)) {
777 dev_err(nvec->dev, "failed to get controller clock\n");
782 nvec->irq = res->start;
783 nvec->i2c_clk = i2c_clk;
784 nvec->rx = &nvec->msg_pool[0];
786 ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list);
788 init_completion(&nvec->sync_write);
789 init_completion(&nvec->ec_transfer);
790 mutex_init(&nvec->sync_write_mutex);
791 spin_lock_init(&nvec->tx_lock);
792 spin_lock_init(&nvec->rx_lock);
793 INIT_LIST_HEAD(&nvec->rx_data);
794 INIT_LIST_HEAD(&nvec->tx_data);
795 INIT_WORK(&nvec->rx_work, nvec_dispatch);
796 INIT_WORK(&nvec->tx_work, nvec_request_master);
798 err = devm_gpio_request_one(&pdev->dev, nvec->gpio, GPIOF_OUT_INIT_HIGH,
801 dev_err(nvec->dev, "couldn't request gpio\n");
805 err = devm_request_irq(&pdev->dev, nvec->irq, nvec_interrupt, 0,
808 dev_err(nvec->dev, "couldn't request irq\n");
811 disable_irq(nvec->irq);
813 tegra_init_i2c_slave(nvec);
815 clk_prepare_enable(i2c_clk);
818 /* enable event reporting */
819 nvec_write_async(nvec, EC_ENABLE_EVENT_REPORTING,
820 sizeof(EC_ENABLE_EVENT_REPORTING));
822 nvec->nvec_status_notifier.notifier_call = nvec_status_notifier;
823 nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0);
825 nvec_power_handle = nvec;
826 pm_power_off = nvec_power_off;
828 /* Get Firmware Version */
829 msg = nvec_write_sync(nvec, EC_GET_FIRMWARE_VERSION,
830 sizeof(EC_GET_FIRMWARE_VERSION));
833 dev_warn(nvec->dev, "ec firmware version %02x.%02x.%02x / %02x\n",
834 msg->data[4], msg->data[5], msg->data[6], msg->data[7]);
836 nvec_msg_free(nvec, msg);
839 ret = mfd_add_devices(nvec->dev, -1, nvec_devices,
840 ARRAY_SIZE(nvec_devices), base, 0, NULL);
842 dev_err(nvec->dev, "error adding subdevices\n");
844 /* unmute speakers? */
845 nvec_write_async(nvec, "\x0d\x10\x59\x95", 4);
847 /* enable lid switch event */
848 nvec_write_async(nvec, "\x01\x01\x01\x00\x00\x02\x00", 7);
850 /* enable power button event */
851 nvec_write_async(nvec, "\x01\x01\x01\x00\x00\x80\x00", 7);
856 static int __devexit tegra_nvec_remove(struct platform_device *pdev)
858 struct nvec_chip *nvec = platform_get_drvdata(pdev);
860 nvec_write_async(nvec, EC_DISABLE_EVENT_REPORTING, 3);
861 mfd_remove_devices(nvec->dev);
862 cancel_work_sync(&nvec->rx_work);
863 cancel_work_sync(&nvec->tx_work);
868 #ifdef CONFIG_PM_SLEEP
869 static int nvec_suspend(struct device *dev)
871 struct platform_device *pdev = to_platform_device(dev);
872 struct nvec_chip *nvec = platform_get_drvdata(pdev);
873 struct nvec_msg *msg;
875 dev_dbg(nvec->dev, "suspending\n");
877 /* keep these sync or you'll break suspend */
878 msg = nvec_write_sync(nvec, EC_DISABLE_EVENT_REPORTING, 3);
879 nvec_msg_free(nvec, msg);
880 msg = nvec_write_sync(nvec, "\x04\x02", 2);
881 nvec_msg_free(nvec, msg);
883 nvec_disable_i2c_slave(nvec);
888 static int nvec_resume(struct device *dev)
890 struct platform_device *pdev = to_platform_device(dev);
891 struct nvec_chip *nvec = platform_get_drvdata(pdev);
893 dev_dbg(nvec->dev, "resuming\n");
894 tegra_init_i2c_slave(nvec);
895 nvec_write_async(nvec, EC_ENABLE_EVENT_REPORTING, 3);
901 static const SIMPLE_DEV_PM_OPS(nvec_pm_ops, nvec_suspend, nvec_resume);
903 /* Match table for of_platform binding */
904 static const struct of_device_id nvidia_nvec_of_match[] __devinitconst = {
905 { .compatible = "nvidia,nvec", },
908 MODULE_DEVICE_TABLE(of, nvidia_nvec_of_match);
910 static struct platform_driver nvec_device_driver = {
911 .probe = tegra_nvec_probe,
912 .remove = __devexit_p(tegra_nvec_remove),
915 .owner = THIS_MODULE,
917 .of_match_table = nvidia_nvec_of_match,
921 module_platform_driver(nvec_device_driver);
923 MODULE_ALIAS("platform:nvec");
924 MODULE_DESCRIPTION("NVIDIA compliant embedded controller interface");
925 MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
926 MODULE_LICENSE("GPL");