2 * Copyright (C) 2005, 2006 IBM Corporation
5 * Leendert van Doorn <leendert@watson.ibm.com>
6 * Kylene Hall <kjhall@us.ibm.com>
8 * Maintained by: <tpmdd-devel@lists.sourceforge.net>
10 * Device driver for TCG/TCPA TPM (trusted platform module).
11 * Specifications at www.trustedcomputinggroup.org
13 * This device driver implements the TPM interface as defined in
14 * the TCG TPM Interface Spec version 1.2, revision 1.0.
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation, version 2 of the
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/pnp.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/wait.h>
28 #include <linux/acpi.h>
29 #include <linux/freezer.h>
32 #define TPM_HEADER_SIZE 10
35 TPM_ACCESS_VALID = 0x80,
36 TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
37 TPM_ACCESS_REQUEST_PENDING = 0x04,
38 TPM_ACCESS_REQUEST_USE = 0x02,
43 TPM_STS_COMMAND_READY = 0x40,
45 TPM_STS_DATA_AVAIL = 0x10,
46 TPM_STS_DATA_EXPECT = 0x08,
50 TPM_GLOBAL_INT_ENABLE = 0x80000000,
51 TPM_INTF_BURST_COUNT_STATIC = 0x100,
52 TPM_INTF_CMD_READY_INT = 0x080,
53 TPM_INTF_INT_EDGE_FALLING = 0x040,
54 TPM_INTF_INT_EDGE_RISING = 0x020,
55 TPM_INTF_INT_LEVEL_LOW = 0x010,
56 TPM_INTF_INT_LEVEL_HIGH = 0x008,
57 TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
58 TPM_INTF_STS_VALID_INT = 0x002,
59 TPM_INTF_DATA_AVAIL_INT = 0x001,
63 TIS_MEM_BASE = 0xFED40000,
65 TIS_SHORT_TIMEOUT = 750, /* ms */
66 TIS_LONG_TIMEOUT = 2000, /* 2 sec */
69 #define TPM_ACCESS(l) (0x0000 | ((l) << 12))
70 #define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12))
71 #define TPM_INT_VECTOR(l) (0x000C | ((l) << 12))
72 #define TPM_INT_STATUS(l) (0x0010 | ((l) << 12))
73 #define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12))
74 #define TPM_STS(l) (0x0018 | ((l) << 12))
75 #define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12))
77 #define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
78 #define TPM_RID(l) (0x0F04 | ((l) << 12))
80 static LIST_HEAD(tis_chips);
81 static DEFINE_SPINLOCK(tis_lock);
83 #if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
84 static int is_itpm(struct pnp_dev *dev)
86 struct acpi_device *acpi = pnp_acpi_device(dev);
87 struct acpi_hardware_id *id;
89 list_for_each_entry(id, &acpi->pnp.ids, list) {
90 if (!strcmp("INTC0102", id->id))
97 static inline int is_itpm(struct pnp_dev *dev)
103 static int check_locality(struct tpm_chip *chip, int l)
105 if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
106 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
107 (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
108 return chip->vendor.locality = l;
113 static void release_locality(struct tpm_chip *chip, int l, int force)
115 if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
116 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
117 (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
118 iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
119 chip->vendor.iobase + TPM_ACCESS(l));
122 static int request_locality(struct tpm_chip *chip, int l)
124 unsigned long stop, timeout;
127 if (check_locality(chip, l) >= 0)
130 iowrite8(TPM_ACCESS_REQUEST_USE,
131 chip->vendor.iobase + TPM_ACCESS(l));
133 stop = jiffies + chip->vendor.timeout_a;
135 if (chip->vendor.irq) {
137 timeout = stop - jiffies;
138 if ((long)timeout <= 0)
140 rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
146 if (rc == -ERESTARTSYS && freezing(current)) {
147 clear_thread_flag(TIF_SIGPENDING);
151 /* wait for burstcount */
153 if (check_locality(chip, l) >= 0)
157 while (time_before(jiffies, stop));
162 static u8 tpm_tis_status(struct tpm_chip *chip)
164 return ioread8(chip->vendor.iobase +
165 TPM_STS(chip->vendor.locality));
168 static void tpm_tis_ready(struct tpm_chip *chip)
170 /* this causes the current command to be aborted */
171 iowrite8(TPM_STS_COMMAND_READY,
172 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
175 static int get_burstcount(struct tpm_chip *chip)
180 /* wait for burstcount */
181 /* which timeout value, spec has 2 answers (c & d) */
182 stop = jiffies + chip->vendor.timeout_d;
184 burstcnt = ioread8(chip->vendor.iobase +
185 TPM_STS(chip->vendor.locality) + 1);
186 burstcnt += ioread8(chip->vendor.iobase +
187 TPM_STS(chip->vendor.locality) +
192 } while (time_before(jiffies, stop));
196 static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
197 wait_queue_head_t *queue)
203 /* check current status */
204 status = tpm_tis_status(chip);
205 if ((status & mask) == mask)
208 stop = jiffies + timeout;
210 if (chip->vendor.irq) {
212 timeout = stop - jiffies;
213 if ((long)timeout <= 0)
215 rc = wait_event_interruptible_timeout(*queue,
221 if (rc == -ERESTARTSYS && freezing(current)) {
222 clear_thread_flag(TIF_SIGPENDING);
228 status = tpm_tis_status(chip);
229 if ((status & mask) == mask)
231 } while (time_before(jiffies, stop));
236 static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
238 int size = 0, burstcnt;
239 while (size < count &&
241 TPM_STS_DATA_AVAIL | TPM_STS_VALID,
242 chip->vendor.timeout_c,
243 &chip->vendor.read_queue)
245 burstcnt = get_burstcount(chip);
246 for (; burstcnt > 0 && size < count; burstcnt--)
247 buf[size++] = ioread8(chip->vendor.iobase +
248 TPM_DATA_FIFO(chip->vendor.
254 static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
257 int expected, status;
259 if (count < TPM_HEADER_SIZE) {
264 /* read first 10 bytes, including tag, paramsize, and result */
266 recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
267 dev_err(chip->dev, "Unable to read header\n");
271 expected = be32_to_cpu(*(__be32 *) (buf + 2));
272 if (expected > count) {
278 recv_data(chip, &buf[TPM_HEADER_SIZE],
279 expected - TPM_HEADER_SIZE)) < expected) {
280 dev_err(chip->dev, "Unable to read remainder of result\n");
285 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
286 &chip->vendor.int_queue);
287 status = tpm_tis_status(chip);
288 if (status & TPM_STS_DATA_AVAIL) { /* retry? */
289 dev_err(chip->dev, "Error left over data\n");
296 release_locality(chip, chip->vendor.locality, 0);
301 module_param(itpm, bool, 0444);
302 MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
305 * If interrupts are used (signaled by an irq set in the vendor structure)
306 * tpm.c can skip polling for the data to be available as the interrupt is
309 static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
311 int rc, status, burstcnt;
314 if (request_locality(chip, 0) < 0)
317 status = tpm_tis_status(chip);
318 if ((status & TPM_STS_COMMAND_READY) == 0) {
321 (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
322 &chip->vendor.int_queue) < 0) {
328 while (count < len - 1) {
329 burstcnt = get_burstcount(chip);
330 for (; burstcnt > 0 && count < len - 1; burstcnt--) {
331 iowrite8(buf[count], chip->vendor.iobase +
332 TPM_DATA_FIFO(chip->vendor.locality));
336 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
337 &chip->vendor.int_queue);
338 status = tpm_tis_status(chip);
339 if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
345 /* write last byte */
347 chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
348 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
349 &chip->vendor.int_queue);
350 status = tpm_tis_status(chip);
351 if ((status & TPM_STS_DATA_EXPECT) != 0) {
360 release_locality(chip, chip->vendor.locality, 0);
365 * If interrupts are used (signaled by an irq set in the vendor structure)
366 * tpm.c can skip polling for the data to be available as the interrupt is
369 static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
374 rc = tpm_tis_send_data(chip, buf, len);
380 chip->vendor.iobase + TPM_STS(chip->vendor.locality));
382 if (chip->vendor.irq) {
383 ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
385 (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
386 tpm_calc_ordinal_duration(chip, ordinal),
387 &chip->vendor.read_queue) < 0) {
395 release_locality(chip, chip->vendor.locality, 0);
400 * Early probing for iTPM with STS_DATA_EXPECT flaw.
401 * Try sending command without itpm flag set and if that
402 * fails, repeat with itpm flag set.
404 static int probe_itpm(struct tpm_chip *chip)
407 u8 cmd_getticks[] = {
408 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
409 0x00, 0x00, 0x00, 0xf1
411 size_t len = sizeof(cmd_getticks);
416 rc = tpm_tis_send_data(chip, cmd_getticks, len);
421 release_locality(chip, chip->vendor.locality, 0);
425 rc = tpm_tis_send_data(chip, cmd_getticks, len);
427 dev_info(chip->dev, "Detected an iTPM.\n");
435 release_locality(chip, chip->vendor.locality, 0);
440 static const struct file_operations tis_ops = {
441 .owner = THIS_MODULE,
446 .release = tpm_release,
449 static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
450 static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
451 static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
452 static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
453 static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
454 static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
456 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
457 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
458 static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
459 static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
461 static struct attribute *tis_attrs[] = {
462 &dev_attr_pubek.attr,
464 &dev_attr_enabled.attr,
465 &dev_attr_active.attr,
466 &dev_attr_owned.attr,
467 &dev_attr_temp_deactivated.attr,
469 &dev_attr_cancel.attr,
470 &dev_attr_durations.attr,
471 &dev_attr_timeouts.attr, NULL,
474 static struct attribute_group tis_attr_grp = {
478 static struct tpm_vendor_specific tpm_tis = {
479 .status = tpm_tis_status,
480 .recv = tpm_tis_recv,
481 .send = tpm_tis_send,
482 .cancel = tpm_tis_ready,
483 .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
484 .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
485 .req_canceled = TPM_STS_COMMAND_READY,
486 .attr_group = &tis_attr_grp,
491 static irqreturn_t tis_int_probe(int irq, void *dev_id)
493 struct tpm_chip *chip = dev_id;
496 interrupt = ioread32(chip->vendor.iobase +
497 TPM_INT_STATUS(chip->vendor.locality));
502 chip->vendor.probed_irq = irq;
504 /* Clear interrupts handled with TPM_EOI */
506 chip->vendor.iobase +
507 TPM_INT_STATUS(chip->vendor.locality));
511 static irqreturn_t tis_int_handler(int dummy, void *dev_id)
513 struct tpm_chip *chip = dev_id;
517 interrupt = ioread32(chip->vendor.iobase +
518 TPM_INT_STATUS(chip->vendor.locality));
523 if (interrupt & TPM_INTF_DATA_AVAIL_INT)
524 wake_up_interruptible(&chip->vendor.read_queue);
525 if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
526 for (i = 0; i < 5; i++)
527 if (check_locality(chip, i) >= 0)
530 (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
531 TPM_INTF_CMD_READY_INT))
532 wake_up_interruptible(&chip->vendor.int_queue);
534 /* Clear interrupts handled with TPM_EOI */
536 chip->vendor.iobase +
537 TPM_INT_STATUS(chip->vendor.locality));
538 ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
542 static int interrupts = 1;
543 module_param(interrupts, bool, 0444);
544 MODULE_PARM_DESC(interrupts, "Enable interrupts");
546 static int tpm_tis_init(struct device *dev, resource_size_t start,
547 resource_size_t len, unsigned int irq)
549 u32 vendor, intfcaps, intmask;
550 int rc, i, irq_s, irq_e;
551 struct tpm_chip *chip;
553 if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
556 chip->vendor.iobase = ioremap(start, len);
557 if (!chip->vendor.iobase) {
562 /* Default timeouts */
563 chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
564 chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
565 chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
566 chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
568 if (request_locality(chip, 0) != 0) {
573 vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
576 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
577 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
580 itpm = probe_itpm(chip);
588 dev_info(dev, "Intel iTPM workaround enabled\n");
591 /* Figure out the capabilities */
593 ioread32(chip->vendor.iobase +
594 TPM_INTF_CAPS(chip->vendor.locality));
595 dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
597 if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
598 dev_dbg(dev, "\tBurst Count Static\n");
599 if (intfcaps & TPM_INTF_CMD_READY_INT)
600 dev_dbg(dev, "\tCommand Ready Int Support\n");
601 if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
602 dev_dbg(dev, "\tInterrupt Edge Falling\n");
603 if (intfcaps & TPM_INTF_INT_EDGE_RISING)
604 dev_dbg(dev, "\tInterrupt Edge Rising\n");
605 if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
606 dev_dbg(dev, "\tInterrupt Level Low\n");
607 if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
608 dev_dbg(dev, "\tInterrupt Level High\n");
609 if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
610 dev_dbg(dev, "\tLocality Change Int Support\n");
611 if (intfcaps & TPM_INTF_STS_VALID_INT)
612 dev_dbg(dev, "\tSts Valid Int Support\n");
613 if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
614 dev_dbg(dev, "\tData Avail Int Support\n");
616 /* get the timeouts before testing for irqs */
617 tpm_get_timeouts(chip);
619 if (tpm_do_selftest(chip)) {
620 dev_err(dev, "TPM self test failed\n");
625 /* INTERRUPT Setup */
626 init_waitqueue_head(&chip->vendor.read_queue);
627 init_waitqueue_head(&chip->vendor.int_queue);
630 ioread32(chip->vendor.iobase +
631 TPM_INT_ENABLE(chip->vendor.locality));
633 intmask |= TPM_INTF_CMD_READY_INT
634 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
635 | TPM_INTF_STS_VALID_INT;
638 chip->vendor.iobase +
639 TPM_INT_ENABLE(chip->vendor.locality));
641 chip->vendor.irq = irq;
642 if (interrupts && !chip->vendor.irq) {
644 ioread8(chip->vendor.iobase +
645 TPM_INT_VECTOR(chip->vendor.locality));
653 for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) {
654 iowrite8(i, chip->vendor.iobase +
655 TPM_INT_VECTOR(chip->vendor.locality));
657 (i, tis_int_probe, IRQF_SHARED,
658 chip->vendor.miscdev.name, chip) != 0) {
660 "Unable to request irq: %d for probe\n",
665 /* Clear all existing */
667 (chip->vendor.iobase +
668 TPM_INT_STATUS(chip->vendor.locality)),
669 chip->vendor.iobase +
670 TPM_INT_STATUS(chip->vendor.locality));
673 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
674 chip->vendor.iobase +
675 TPM_INT_ENABLE(chip->vendor.locality));
677 chip->vendor.probed_irq = 0;
679 /* Generate Interrupts */
680 tpm_gen_interrupt(chip);
682 chip->vendor.irq = chip->vendor.probed_irq;
684 /* free_irq will call into tis_int_probe;
685 clear all irqs we haven't seen while doing
688 (chip->vendor.iobase +
689 TPM_INT_STATUS(chip->vendor.locality)),
690 chip->vendor.iobase +
691 TPM_INT_STATUS(chip->vendor.locality));
695 chip->vendor.iobase +
696 TPM_INT_ENABLE(chip->vendor.locality));
700 if (chip->vendor.irq) {
701 iowrite8(chip->vendor.irq,
702 chip->vendor.iobase +
703 TPM_INT_VECTOR(chip->vendor.locality));
705 (chip->vendor.irq, tis_int_handler, IRQF_SHARED,
706 chip->vendor.miscdev.name, chip) != 0) {
708 "Unable to request irq: %d for use\n",
710 chip->vendor.irq = 0;
712 /* Clear all existing */
714 (chip->vendor.iobase +
715 TPM_INT_STATUS(chip->vendor.locality)),
716 chip->vendor.iobase +
717 TPM_INT_STATUS(chip->vendor.locality));
720 iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
721 chip->vendor.iobase +
722 TPM_INT_ENABLE(chip->vendor.locality));
726 INIT_LIST_HEAD(&chip->vendor.list);
727 spin_lock(&tis_lock);
728 list_add(&chip->vendor.list, &tis_chips);
729 spin_unlock(&tis_lock);
734 if (chip->vendor.iobase)
735 iounmap(chip->vendor.iobase);
736 tpm_remove_hardware(chip->dev);
740 static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
744 /* reenable interrupts that device may have lost or
745 BIOS/firmware may have disabled */
746 iowrite8(chip->vendor.irq, chip->vendor.iobase +
747 TPM_INT_VECTOR(chip->vendor.locality));
750 ioread32(chip->vendor.iobase +
751 TPM_INT_ENABLE(chip->vendor.locality));
753 intmask |= TPM_INTF_CMD_READY_INT
754 | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
755 | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
758 chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
763 static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
764 const struct pnp_device_id *pnp_id)
766 resource_size_t start, len;
767 unsigned int irq = 0;
769 start = pnp_mem_start(pnp_dev, 0);
770 len = pnp_mem_len(pnp_dev, 0);
772 if (pnp_irq_valid(pnp_dev, 0))
773 irq = pnp_irq(pnp_dev, 0);
777 if (is_itpm(pnp_dev))
780 return tpm_tis_init(&pnp_dev->dev, start, len, irq);
783 static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
785 return tpm_pm_suspend(&dev->dev, msg);
788 static int tpm_tis_pnp_resume(struct pnp_dev *dev)
790 struct tpm_chip *chip = pnp_get_drvdata(dev);
793 if (chip->vendor.irq)
794 tpm_tis_reenable_interrupts(chip);
796 ret = tpm_pm_resume(&dev->dev);
798 tpm_do_selftest(chip);
803 static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
804 {"PNP0C31", 0}, /* TPM */
805 {"ATM1200", 0}, /* Atmel */
806 {"IFX0102", 0}, /* Infineon */
807 {"BCM0101", 0}, /* Broadcom */
808 {"BCM0102", 0}, /* Broadcom */
809 {"NSC1200", 0}, /* National */
810 {"ICO0102", 0}, /* Intel */
812 {"", 0}, /* User Specified */
813 {"", 0} /* Terminator */
815 MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
817 static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev)
819 struct tpm_chip *chip = pnp_get_drvdata(dev);
821 tpm_dev_vendor_release(chip);
827 static struct pnp_driver tis_pnp_driver = {
829 .id_table = tpm_pnp_tbl,
830 .probe = tpm_tis_pnp_init,
831 .suspend = tpm_tis_pnp_suspend,
832 .resume = tpm_tis_pnp_resume,
833 .remove = tpm_tis_pnp_remove,
836 #define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
837 module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
838 sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
839 MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
841 static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
843 return tpm_pm_suspend(&dev->dev, msg);
846 static int tpm_tis_resume(struct platform_device *dev)
848 struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
850 if (chip->vendor.irq)
851 tpm_tis_reenable_interrupts(chip);
853 return tpm_pm_resume(&dev->dev);
855 static struct platform_driver tis_drv = {
858 .owner = THIS_MODULE,
860 .suspend = tpm_tis_suspend,
861 .resume = tpm_tis_resume,
864 static struct platform_device *pdev;
867 module_param(force, bool, 0444);
868 MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
869 static int __init init_tis(void)
874 return pnp_register_driver(&tis_pnp_driver);
877 rc = platform_driver_register(&tis_drv);
880 if (IS_ERR(pdev=platform_device_register_simple("tpm_tis", -1, NULL, 0)))
881 return PTR_ERR(pdev);
882 if((rc=tpm_tis_init(&pdev->dev, TIS_MEM_BASE, TIS_MEM_LEN, 0)) != 0) {
883 platform_device_unregister(pdev);
884 platform_driver_unregister(&tis_drv);
889 static void __exit cleanup_tis(void)
891 struct tpm_vendor_specific *i, *j;
892 struct tpm_chip *chip;
893 spin_lock(&tis_lock);
894 list_for_each_entry_safe(i, j, &tis_chips, list) {
895 chip = to_tpm_chip(i);
896 tpm_remove_hardware(chip->dev);
897 iowrite32(~TPM_GLOBAL_INT_ENABLE &
898 ioread32(chip->vendor.iobase +
899 TPM_INT_ENABLE(chip->vendor.
901 chip->vendor.iobase +
902 TPM_INT_ENABLE(chip->vendor.locality));
903 release_locality(chip, chip->vendor.locality, 1);
904 if (chip->vendor.irq)
905 free_irq(chip->vendor.irq, chip);
909 spin_unlock(&tis_lock);
912 pnp_unregister_driver(&tis_pnp_driver);
916 platform_device_unregister(pdev);
917 platform_driver_unregister(&tis_drv);
920 module_init(init_tis);
921 module_exit(cleanup_tis);
922 MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
923 MODULE_DESCRIPTION("TPM Driver");
924 MODULE_VERSION("2.0");
925 MODULE_LICENSE("GPL");