*
*/
+ #include <linux/export.h>
#include <linux/device.h>
+#include <linux/gpio.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
+#include <linux/bitmap.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
- #include <linux/module.h>
+ #include <linux/export.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
*****************************************************************************/
#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+ #include <asm/page.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/clk.h>
#include <linux/err.h>
+ #include <linux/export.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
+#include <linux/clk.h>
#include <plat/common.h>
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
- #include <linux/module.h>
#include <linux/types.h>
#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/signal.h>
*/
#include <linux/types.h>
+ #include <linux/export.h>
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_embedded.h>
+#include <linux/bcma/bcma_soc.h>
#include <asm/bootinfo.h>
#include <asm/reboot.h>
#include <asm/time.h>
*
*/
+ #include <linux/module.h>
#include <crypto/aes.h>
+#include <asm/aes.h>
asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
+ #include <linux/export.h>
#include <linux/init.h>
#include <linux/bitops.h>
+#include <linux/elf.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/debugfs.h>
-#include <linux/edac_mce.h>
#include <linux/irq_work.h>
+ #include <linux/export.h>
#include <asm/processor.h>
#include <asm/mce.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
#include <linux/bsg-lib.h>
--#include <linux/module.h>
++#include <linux/export.h>
#include <scsi/scsi_cmnd.h>
/**
--- /dev/null
- #include <linux/module.h>
+/*
+ * drivers/base/power/common.c - Common device power management code.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
++#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/pm_clock.h>
+
+/**
+ * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
+ * @dev: Device to handle.
+ *
+ * If power.subsys_data is NULL, point it to a new object, otherwise increment
+ * its reference counter. Return 1 if a new object has been created, otherwise
+ * return 0 or error code.
+ */
+int dev_pm_get_subsys_data(struct device *dev)
+{
+ struct pm_subsys_data *psd;
+ int ret = 0;
+
+ psd = kzalloc(sizeof(*psd), GFP_KERNEL);
+ if (!psd)
+ return -ENOMEM;
+
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.subsys_data) {
+ dev->power.subsys_data->refcount++;
+ } else {
+ spin_lock_init(&psd->lock);
+ psd->refcount = 1;
+ dev->power.subsys_data = psd;
+ pm_clk_init(dev);
+ psd = NULL;
+ ret = 1;
+ }
+
+ spin_unlock_irq(&dev->power.lock);
+
+ /* kfree() verifies that its argument is nonzero. */
+ kfree(psd);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
+
+/**
+ * dev_pm_put_subsys_data - Drop reference to power.subsys_data.
+ * @dev: Device to handle.
+ *
+ * If the reference counter of power.subsys_data is zero after dropping the
+ * reference, power.subsys_data is removed. Return 1 if that happens or 0
+ * otherwise.
+ */
+int dev_pm_put_subsys_data(struct device *dev)
+{
+ struct pm_subsys_data *psd;
+ int ret = 0;
+
+ spin_lock_irq(&dev->power.lock);
+
+ psd = dev_to_psd(dev);
+ if (!psd) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (--psd->refcount == 0) {
+ dev->power.subsys_data = NULL;
+ kfree(psd);
+ ret = 1;
+ }
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
--- /dev/null
+/*
+ * Devices PM QoS constraints management
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *
+ * This module exposes the interface to kernel space for specifying
+ * per-device PM QoS dependencies. It provides infrastructure for registration
+ * of:
+ *
+ * Dependents on a QoS value : register requests
+ * Watchers of QoS value : get notified when target QoS value changes
+ *
+ * This QoS design is best effort based. Dependents register their QoS needs.
+ * Watchers register to keep track of the current QoS needs of the system.
+ * Watchers can register different types of notification callbacks:
+ * . a per-device notification callback using the dev_pm_qos_*_notifier API.
+ * The notification chain data is stored in the per-device constraint
+ * data struct.
+ * . a system-wide notification callback using the dev_pm_qos_*_global_notifier
+ * API. The notification chain data is stored in a static variable.
+ *
+ * Note about the per-device constraint data struct allocation:
+ * . The per-device constraints data struct ptr is tored into the device
+ * dev_pm_info.
+ * . To minimize the data usage by the per-device constraints, the data struct
+ * is only allocated at the first call to dev_pm_qos_add_request.
+ * . The data is later free'd when the device is removed from the system.
+ * . A global mutex protects the constraints users from the data being
+ * allocated and free'd.
+ */
+
+#include <linux/pm_qos.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
++#include <linux/export.h>
+
+
+static DEFINE_MUTEX(dev_pm_qos_mtx);
+
+static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
+
+/**
+ * dev_pm_qos_read_value - Get PM QoS constraint for a given device.
+ * @dev: Device to get the PM QoS constraint value for.
+ */
+s32 dev_pm_qos_read_value(struct device *dev)
+{
+ struct pm_qos_constraints *c;
+ unsigned long flags;
+ s32 ret = 0;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ c = dev->power.constraints;
+ if (c)
+ ret = pm_qos_read_value(c);
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return ret;
+}
+
+/*
+ * apply_constraint
+ * @req: constraint request to apply
+ * @action: action to perform add/update/remove, of type enum pm_qos_req_action
+ * @value: defines the qos request
+ *
+ * Internal function to update the constraints list using the PM QoS core
+ * code and if needed call the per-device and the global notification
+ * callbacks
+ */
+static int apply_constraint(struct dev_pm_qos_request *req,
+ enum pm_qos_req_action action, int value)
+{
+ int ret, curr_value;
+
+ ret = pm_qos_update_target(req->dev->power.constraints,
+ &req->node, action, value);
+
+ if (ret) {
+ /* Call the global callbacks if needed */
+ curr_value = pm_qos_read_value(req->dev->power.constraints);
+ blocking_notifier_call_chain(&dev_pm_notifiers,
+ (unsigned long)curr_value,
+ req);
+ }
+
+ return ret;
+}
+
+/*
+ * dev_pm_qos_constraints_allocate
+ * @dev: device to allocate data for
+ *
+ * Called at the first call to add_request, for constraint data allocation
+ * Must be called with the dev_pm_qos_mtx mutex held
+ */
+static int dev_pm_qos_constraints_allocate(struct device *dev)
+{
+ struct pm_qos_constraints *c;
+ struct blocking_notifier_head *n;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return -ENOMEM;
+
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+ if (!n) {
+ kfree(c);
+ return -ENOMEM;
+ }
+ BLOCKING_INIT_NOTIFIER_HEAD(n);
+
+ plist_head_init(&c->list);
+ c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
+ c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
+ c->type = PM_QOS_MIN;
+ c->notifiers = n;
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.constraints = c;
+ spin_unlock_irq(&dev->power.lock);
+
+ return 0;
+}
+
+/**
+ * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer.
+ * @dev: target device
+ *
+ * Called from the device PM subsystem during device insertion under
+ * device_pm_lock().
+ */
+void dev_pm_qos_constraints_init(struct device *dev)
+{
+ mutex_lock(&dev_pm_qos_mtx);
+ dev->power.constraints = NULL;
+ dev->power.power_state = PMSG_ON;
+ mutex_unlock(&dev_pm_qos_mtx);
+}
+
+/**
+ * dev_pm_qos_constraints_destroy
+ * @dev: target device
+ *
+ * Called from the device PM subsystem on device removal under device_pm_lock().
+ */
+void dev_pm_qos_constraints_destroy(struct device *dev)
+{
+ struct dev_pm_qos_request *req, *tmp;
+ struct pm_qos_constraints *c;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ dev->power.power_state = PMSG_INVALID;
+ c = dev->power.constraints;
+ if (!c)
+ goto out;
+
+ /* Flush the constraints list for the device */
+ plist_for_each_entry_safe(req, tmp, &c->list, node) {
+ /*
+ * Update constraints list and call the notification
+ * callbacks if needed
+ */
+ apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.constraints = NULL;
+ spin_unlock_irq(&dev->power.lock);
+
+ kfree(c->notifiers);
+ kfree(c);
+
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
+}
+
+/**
+ * dev_pm_qos_add_request - inserts new qos request into the list
+ * @dev: target device for the constraint
+ * @req: pointer to a preallocated handle
+ * @value: defines the qos request
+ *
+ * This function inserts a new entry in the device constraints list of
+ * requested qos performance characteristics. It recomputes the aggregate
+ * QoS expectations of parameters and initializes the dev_pm_qos_request
+ * handle. Caller needs to save this handle for later use in updates and
+ * removal.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
+ * to allocate for data structures, -ENODEV if the device has just been removed
+ * from the system.
+ */
+int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
+ s32 value)
+{
+ int ret = 0;
+
+ if (!dev || !req) /*guard against callers passing in null */
+ return -EINVAL;
+
+ if (dev_pm_qos_request_active(req)) {
+ WARN(1, KERN_ERR "dev_pm_qos_add_request() called for already "
+ "added request\n");
+ return -EINVAL;
+ }
+
+ req->dev = dev;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (!dev->power.constraints) {
+ if (dev->power.power_state.event == PM_EVENT_INVALID) {
+ /* The device has been removed from the system. */
+ req->dev = NULL;
+ ret = -ENODEV;
+ goto out;
+ } else {
+ /*
+ * Allocate the constraints data on the first call to
+ * add_request, i.e. only if the data is not already
+ * allocated and if the device has not been removed.
+ */
+ ret = dev_pm_qos_constraints_allocate(dev);
+ }
+ }
+
+ if (!ret)
+ ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
+
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
+
+/**
+ * dev_pm_qos_update_request - modifies an existing qos request
+ * @req : handle to list element holding a dev_pm_qos request to use
+ * @new_value: defines the qos request
+ *
+ * Updates an existing dev PM qos request along with updating the
+ * target value.
+ *
+ * Attempts are made to make this code callable on hot code paths.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENODEV if the device has been
+ * removed from the system
+ */
+int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
+ s32 new_value)
+{
+ int ret = 0;
+
+ if (!req) /*guard against callers passing in null */
+ return -EINVAL;
+
+ if (!dev_pm_qos_request_active(req)) {
+ WARN(1, KERN_ERR "dev_pm_qos_update_request() called for "
+ "unknown object\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (req->dev->power.constraints) {
+ if (new_value != req->node.prio)
+ ret = apply_constraint(req, PM_QOS_UPDATE_REQ,
+ new_value);
+ } else {
+ /* Return if the device has been removed */
+ ret = -ENODEV;
+ }
+
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
+
+/**
+ * dev_pm_qos_remove_request - modifies an existing qos request
+ * @req: handle to request list element
+ *
+ * Will remove pm qos request from the list of constraints and
+ * recompute the current target value. Call this on slow code paths.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENODEV if the device has been
+ * removed from the system
+ */
+int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+{
+ int ret = 0;
+
+ if (!req) /*guard against callers passing in null */
+ return -EINVAL;
+
+ if (!dev_pm_qos_request_active(req)) {
+ WARN(1, KERN_ERR "dev_pm_qos_remove_request() called for "
+ "unknown object\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (req->dev->power.constraints) {
+ ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
+ PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ } else {
+ /* Return if the device has been removed */
+ ret = -ENODEV;
+ }
+
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
+
+/**
+ * dev_pm_qos_add_notifier - sets notification entry for changes to target value
+ * of per-device PM QoS constraints
+ *
+ * @dev: target device for the constraint
+ * @notifier: notifier block managed by caller.
+ *
+ * Will register the notifier into a notification chain that gets called
+ * upon changes to the target value for the device.
+ */
+int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
+{
+ int retval = 0;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ /* Silently return if the constraints object is not present. */
+ if (dev->power.constraints)
+ retval = blocking_notifier_chain_register(
+ dev->power.constraints->notifiers,
+ notifier);
+
+ mutex_unlock(&dev_pm_qos_mtx);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
+
+/**
+ * dev_pm_qos_remove_notifier - deletes notification for changes to target value
+ * of per-device PM QoS constraints
+ *
+ * @dev: target device for the constraint
+ * @notifier: notifier block to be removed.
+ *
+ * Will remove the notifier from the notification chain that gets called
+ * upon changes to the target value.
+ */
+int dev_pm_qos_remove_notifier(struct device *dev,
+ struct notifier_block *notifier)
+{
+ int retval = 0;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ /* Silently return if the constraints object is not present. */
+ if (dev->power.constraints)
+ retval = blocking_notifier_chain_unregister(
+ dev->power.constraints->notifiers,
+ notifier);
+
+ mutex_unlock(&dev_pm_qos_mtx);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
+
+/**
+ * dev_pm_qos_add_global_notifier - sets notification entry for changes to
+ * target value of the PM QoS constraints for any device
+ *
+ * @notifier: notifier block managed by caller.
+ *
+ * Will register the notifier into a notification chain that gets called
+ * upon changes to the target value for any device.
+ */
+int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
+{
+ return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
+
+/**
+ * dev_pm_qos_remove_global_notifier - deletes notification for changes to
+ * target value of PM QoS constraints for any device
+ *
+ * @notifier: notifier block to be removed.
+ *
+ * Will remove the notifier from the notification chain that gets called
+ * upon changes to the target value for any device.
+ */
+int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
+{
+ return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
*/
#include <linux/sched.h>
+ #include <linux/export.h>
#include <linux/pm_runtime.h>
+#include <trace/events/rpm.h>
#include "power.h"
static int rpm_resume(struct device *dev, int rpmflags);
--- /dev/null
+/*
+ * Register cache access API
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
++#include <linux/export.h>
+#include <trace/events/regmap.h>
+#include <linux/sort.h>
+
+#include "internal.h"
+
+static const struct regcache_ops *cache_types[] = {
+ ®cache_indexed_ops,
+ ®cache_rbtree_ops,
+ ®cache_lzo_ops,
+};
+
+static int regcache_hw_init(struct regmap *map)
+{
+ int i, j;
+ int ret;
+ int count;
+ unsigned int val;
+ void *tmp_buf;
+
+ if (!map->num_reg_defaults_raw)
+ return -EINVAL;
+
+ if (!map->reg_defaults_raw) {
+ dev_warn(map->dev, "No cache defaults, reading back from HW\n");
+ tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
+ if (!tmp_buf)
+ return -EINVAL;
+ ret = regmap_bulk_read(map, 0, tmp_buf,
+ map->num_reg_defaults_raw);
+ if (ret < 0) {
+ kfree(tmp_buf);
+ return ret;
+ }
+ map->reg_defaults_raw = tmp_buf;
+ map->cache_free = 1;
+ }
+
+ /* calculate the size of reg_defaults */
+ for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
+ val = regcache_get_val(map->reg_defaults_raw,
+ i, map->cache_word_size);
+ if (!val)
+ continue;
+ count++;
+ }
+
+ map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
+ GFP_KERNEL);
+ if (!map->reg_defaults)
+ return -ENOMEM;
+
+ /* fill the reg_defaults */
+ map->num_reg_defaults = count;
+ for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
+ val = regcache_get_val(map->reg_defaults_raw,
+ i, map->cache_word_size);
+ if (!val)
+ continue;
+ map->reg_defaults[j].reg = i;
+ map->reg_defaults[j].def = val;
+ j++;
+ }
+
+ return 0;
+}
+
+int regcache_init(struct regmap *map)
+{
+ int ret;
+ int i;
+ void *tmp_buf;
+
+ if (map->cache_type == REGCACHE_NONE) {
+ map->cache_bypass = true;
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cache_types); i++)
+ if (cache_types[i]->type == map->cache_type)
+ break;
+
+ if (i == ARRAY_SIZE(cache_types)) {
+ dev_err(map->dev, "Could not match compress type: %d\n",
+ map->cache_type);
+ return -EINVAL;
+ }
+
+ map->cache = NULL;
+ map->cache_ops = cache_types[i];
+
+ if (!map->cache_ops->read ||
+ !map->cache_ops->write ||
+ !map->cache_ops->name)
+ return -EINVAL;
+
+ /* We still need to ensure that the reg_defaults
+ * won't vanish from under us. We'll need to make
+ * a copy of it.
+ */
+ if (map->reg_defaults) {
+ if (!map->num_reg_defaults)
+ return -EINVAL;
+ tmp_buf = kmemdup(map->reg_defaults, map->num_reg_defaults *
+ sizeof(struct reg_default), GFP_KERNEL);
+ if (!tmp_buf)
+ return -ENOMEM;
+ map->reg_defaults = tmp_buf;
+ } else {
+ /* Some devices such as PMICs don't have cache defaults,
+ * we cope with this by reading back the HW registers and
+ * crafting the cache defaults by hand.
+ */
+ ret = regcache_hw_init(map);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!map->max_register)
+ map->max_register = map->num_reg_defaults_raw;
+
+ if (map->cache_ops->init) {
+ dev_dbg(map->dev, "Initializing %s cache\n",
+ map->cache_ops->name);
+ return map->cache_ops->init(map);
+ }
+ return 0;
+}
+
+void regcache_exit(struct regmap *map)
+{
+ if (map->cache_type == REGCACHE_NONE)
+ return;
+
+ BUG_ON(!map->cache_ops);
+
+ kfree(map->reg_defaults);
+ if (map->cache_free)
+ kfree(map->reg_defaults_raw);
+
+ if (map->cache_ops->exit) {
+ dev_dbg(map->dev, "Destroying %s cache\n",
+ map->cache_ops->name);
+ map->cache_ops->exit(map);
+ }
+}
+
+/**
+ * regcache_read: Fetch the value of a given register from the cache.
+ *
+ * @map: map to configure.
+ * @reg: The register index.
+ * @value: The value to be returned.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_read(struct regmap *map,
+ unsigned int reg, unsigned int *value)
+{
+ if (map->cache_type == REGCACHE_NONE)
+ return -ENOSYS;
+
+ BUG_ON(!map->cache_ops);
+
+ if (!regmap_readable(map, reg))
+ return -EIO;
+
+ if (!regmap_volatile(map, reg))
+ return map->cache_ops->read(map, reg, value);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(regcache_read);
+
+/**
+ * regcache_write: Set the value of a given register in the cache.
+ *
+ * @map: map to configure.
+ * @reg: The register index.
+ * @value: The new register value.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_write(struct regmap *map,
+ unsigned int reg, unsigned int value)
+{
+ if (map->cache_type == REGCACHE_NONE)
+ return 0;
+
+ BUG_ON(!map->cache_ops);
+
+ if (!regmap_writeable(map, reg))
+ return -EIO;
+
+ if (!regmap_volatile(map, reg))
+ return map->cache_ops->write(map, reg, value);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regcache_write);
+
+/**
+ * regcache_sync: Sync the register cache with the hardware.
+ *
+ * @map: map to configure.
+ *
+ * Any registers that should not be synced should be marked as
+ * volatile. In general drivers can choose not to use the provided
+ * syncing functionality if they so require.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_sync(struct regmap *map)
+{
+ int ret = 0;
+ unsigned int val;
+ unsigned int i;
+ const char *name;
+ unsigned int bypass;
+
+ BUG_ON(!map->cache_ops);
+
+ mutex_lock(&map->lock);
+ /* Remember the initial bypass state */
+ bypass = map->cache_bypass;
+ dev_dbg(map->dev, "Syncing %s cache\n",
+ map->cache_ops->name);
+ name = map->cache_ops->name;
+ trace_regcache_sync(map->dev, name, "start");
+ if (map->cache_ops->sync) {
+ ret = map->cache_ops->sync(map);
+ } else {
+ for (i = 0; i < map->num_reg_defaults; i++) {
+ ret = regcache_read(map, i, &val);
+ if (ret < 0)
+ goto out;
+ map->cache_bypass = 1;
+ ret = _regmap_write(map, i, val);
+ map->cache_bypass = 0;
+ if (ret < 0)
+ goto out;
+ dev_dbg(map->dev, "Synced register %#x, value %#x\n",
+ map->reg_defaults[i].reg,
+ map->reg_defaults[i].def);
+ }
+
+ }
+out:
+ trace_regcache_sync(map->dev, name, "stop");
+ /* Restore the bypass state */
+ map->cache_bypass = bypass;
+ mutex_unlock(&map->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regcache_sync);
+
+/**
+ * regcache_cache_only: Put a register map into cache only mode
+ *
+ * @map: map to configure
+ * @cache_only: flag if changes should be written to the hardware
+ *
+ * When a register map is marked as cache only writes to the register
+ * map API will only update the register cache, they will not cause
+ * any hardware changes. This is useful for allowing portions of
+ * drivers to act as though the device were functioning as normal when
+ * it is disabled for power saving reasons.
+ */
+void regcache_cache_only(struct regmap *map, bool enable)
+{
+ mutex_lock(&map->lock);
+ WARN_ON(map->cache_bypass && enable);
+ map->cache_only = enable;
+ mutex_unlock(&map->lock);
+}
+EXPORT_SYMBOL_GPL(regcache_cache_only);
+
+/**
+ * regcache_cache_bypass: Put a register map into cache bypass mode
+ *
+ * @map: map to configure
+ * @cache_bypass: flag if changes should not be written to the hardware
+ *
+ * When a register map is marked with the cache bypass option, writes
+ * to the register map API will only update the hardware and not the
+ * the cache directly. This is useful when syncing the cache back to
+ * the hardware.
+ */
+void regcache_cache_bypass(struct regmap *map, bool enable)
+{
+ mutex_lock(&map->lock);
+ WARN_ON(map->cache_only && enable);
+ map->cache_bypass = enable;
+ mutex_unlock(&map->lock);
+}
+EXPORT_SYMBOL_GPL(regcache_cache_bypass);
+
+bool regcache_set_val(void *base, unsigned int idx,
+ unsigned int val, unsigned int word_size)
+{
+ switch (word_size) {
+ case 1: {
+ u8 *cache = base;
+ if (cache[idx] == val)
+ return true;
+ cache[idx] = val;
+ break;
+ }
+ case 2: {
+ u16 *cache = base;
+ if (cache[idx] == val)
+ return true;
+ cache[idx] = val;
+ break;
+ }
+ default:
+ BUG();
+ }
+ /* unreachable */
+ return false;
+}
+
+unsigned int regcache_get_val(const void *base, unsigned int idx,
+ unsigned int word_size)
+{
+ if (!base)
+ return -EINVAL;
+
+ switch (word_size) {
+ case 1: {
+ const u8 *cache = base;
+ return cache[idx];
+ }
+ case 2: {
+ const u16 *cache = base;
+ return cache[idx];
+ }
+ default:
+ BUG();
+ }
+ /* unreachable */
+ return -1;
+}
+
+int regcache_lookup_reg(struct regmap *map, unsigned int reg)
+{
+ unsigned int min, max, index;
+
+ min = 0;
+ max = map->num_reg_defaults - 1;
+ do {
+ index = (min + max) / 2;
+ if (map->reg_defaults[index].reg == reg)
+ return index;
+ if (map->reg_defaults[index].reg < reg)
+ min = index + 1;
+ else
+ max = index;
+ } while (min <= max);
+ return -1;
+}
+
+static int regcache_insert_cmp(const void *a, const void *b)
+{
+ const struct reg_default *_a = a;
+ const struct reg_default *_b = b;
+
+ return _a->reg - _b->reg;
+}
+
+int regcache_insert_reg(struct regmap *map, unsigned int reg,
+ unsigned int val)
+{
+ void *tmp;
+
+ tmp = krealloc(map->reg_defaults,
+ (map->num_reg_defaults + 1) * sizeof(struct reg_default),
+ GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+ map->reg_defaults = tmp;
+ map->num_reg_defaults++;
+ map->reg_defaults[map->num_reg_defaults - 1].reg = reg;
+ map->reg_defaults[map->num_reg_defaults - 1].def = val;
+ sort(map->reg_defaults, map->num_reg_defaults,
+ sizeof(struct reg_default), regcache_insert_cmp, NULL);
+ return 0;
+}
*/
#include "bcma_private.h"
++#include <linux/export.h>
#include <linux/bcma/bcma.h>
-static void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc,
- u32 offset, u32 mask, u32 set)
+static u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset)
{
- u32 value;
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
+ bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
+ return bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
+}
- bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR);
+void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value)
+{
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
+ bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value);
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_pll_write);
+
+void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
+ u32 set)
+{
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
+ bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
+ bcma_cc_maskset32(cc, BCMA_CC_PLLCTL_DATA, mask, set);
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_pll_maskset);
+
+void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc,
+ u32 offset, u32 mask, u32 set)
+{
bcma_cc_write32(cc, BCMA_CC_CHIPCTL_ADDR, offset);
bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR);
- value = bcma_cc_read32(cc, BCMA_CC_CHIPCTL_DATA);
- value &= mask;
- value |= set;
- bcma_cc_write32(cc, BCMA_CC_CHIPCTL_DATA, value);
- bcma_cc_read32(cc, BCMA_CC_CHIPCTL_DATA);
+ bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL_DATA, mask, set);
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_chipctl_maskset);
+
+void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
+ u32 set)
+{
+ bcma_cc_write32(cc, BCMA_CC_REGCTL_ADDR, offset);
+ bcma_cc_read32(cc, BCMA_CC_REGCTL_ADDR);
+ bcma_cc_maskset32(cc, BCMA_CC_REGCTL_DATA, mask, set);
}
+EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset);
static void bcma_pmu_pll_init(struct bcma_drv_cc *cc)
{
--- /dev/null
+/*
+ * Driver for the Micron P320 SSD
+ * Copyright (C) 2011 Micron Technology, Inc.
+ *
+ * Portions of this code were derived from works subjected to the
+ * following copyright:
+ * Copyright (C) 2009 Integrated Device Technology, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/ata.h>
+#include <linux/delay.h>
+#include <linux/hdreg.h>
+#include <linux/uaccess.h>
+#include <linux/random.h>
+#include <linux/smp.h>
+#include <linux/compat.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/genhd.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
++#include <linux/module.h>
+#include <../drivers/ata/ahci.h>
+#include "mtip32xx.h"
+
+#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
+#define HW_CMD_TBL_SZ (AHCI_CMD_TBL_HDR_SZ + (MTIP_MAX_SG * 16))
+#define HW_CMD_TBL_AR_SZ (HW_CMD_TBL_SZ * MTIP_MAX_COMMAND_SLOTS)
+#define HW_PORT_PRIV_DMA_SZ \
+ (HW_CMD_SLOT_SZ + HW_CMD_TBL_AR_SZ + AHCI_RX_FIS_SZ)
+
+#define HOST_HSORG 0xFC
+#define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
+#define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
+#define HSORG_HWREV 0xFF00
+#define HSORG_STYLE 0x8
+#define HSORG_SLOTGROUPS 0x7
+
+#define PORT_COMMAND_ISSUE 0x38
+#define PORT_SDBV 0x7C
+
+#define PORT_OFFSET 0x100
+#define PORT_MEM_SIZE 0x80
+
+#define PORT_IRQ_ERR \
+ (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
+ PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
+ PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
+ PORT_IRQ_OVERFLOW)
+#define PORT_IRQ_LEGACY \
+ (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
+#define PORT_IRQ_HANDLED \
+ (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
+ PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
+ PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
+#define DEF_PORT_IRQ \
+ (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
+
+/* product numbers */
+#define MTIP_PRODUCT_UNKNOWN 0x00
+#define MTIP_PRODUCT_ASICFPGA 0x11
+
+/* Device instance number, incremented each time a device is probed. */
+static int instance;
+
+/*
+ * Global variable used to hold the major block device number
+ * allocated in mtip_init().
+ */
+static int mtip_major;
+
+static DEFINE_SPINLOCK(rssd_index_lock);
+static DEFINE_IDA(rssd_index_ida);
+
+#ifdef CONFIG_COMPAT
+struct mtip_compat_ide_task_request_s {
+ __u8 io_ports[8];
+ __u8 hob_ports[8];
+ ide_reg_valid_t out_flags;
+ ide_reg_valid_t in_flags;
+ int data_phase;
+ int req_cmd;
+ compat_ulong_t out_size;
+ compat_ulong_t in_size;
+};
+#endif
+
+static int mtip_exec_internal_command(struct mtip_port *port,
+ void *fis,
+ int fisLen,
+ dma_addr_t buffer,
+ int bufLen,
+ u32 opts,
+ gfp_t atomic,
+ unsigned long timeout);
+
+/*
+ * This function check_for_surprise_removal is called
+ * while card is removed from the system and it will
+ * read the vendor id from the configration space
+ *
+ * @pdev Pointer to the pci_dev structure.
+ *
+ * return value
+ * true if device removed, else false
+ */
+static bool mtip_check_surprise_removal(struct pci_dev *pdev)
+{
+ u16 vendor_id = 0;
+
+ /* Read the vendorID from the configuration space */
+ pci_read_config_word(pdev, 0x00, &vendor_id);
+ if (vendor_id == 0xFFFF)
+ return true; /* device removed */
+
+ return false; /* device present */
+}
+
+/*
+ * This function is called for clean the pending command in the
+ * command slot during the surprise removal of device and return
+ * error to the upper layer.
+ *
+ * @dd Pointer to the DRIVER_DATA structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_command_cleanup(struct driver_data *dd)
+{
+ int group = 0, commandslot = 0, commandindex = 0;
+ struct mtip_cmd *command;
+ struct mtip_port *port = dd->port;
+
+ for (group = 0; group < 4; group++) {
+ for (commandslot = 0; commandslot < 32; commandslot++) {
+ if (!(port->allocated[group] & (1 << commandslot)))
+ continue;
+
+ commandindex = group << 5 | commandslot;
+ command = &port->commands[commandindex];
+
+ if (atomic_read(&command->active)
+ && (command->async_callback)) {
+ command->async_callback(command->async_data,
+ -ENODEV);
+ command->async_callback = NULL;
+ command->async_data = NULL;
+ }
+
+ dma_unmap_sg(&port->dd->pdev->dev,
+ command->sg,
+ command->scatter_ents,
+ command->direction);
+ }
+ }
+
+ up(&port->cmd_slot);
+
+ atomic_set(&dd->drv_cleanup_done, true);
+}
+
+/*
+ * Obtain an empty command slot.
+ *
+ * This function needs to be reentrant since it could be called
+ * at the same time on multiple CPUs. The allocation of the
+ * command slot must be atomic.
+ *
+ * @port Pointer to the port data structure.
+ *
+ * return value
+ * >= 0 Index of command slot obtained.
+ * -1 No command slots available.
+ */
+static int get_slot(struct mtip_port *port)
+{
+ int slot, i;
+ unsigned int num_command_slots = port->dd->slot_groups * 32;
+
+ /*
+ * Try 10 times, because there is a small race here.
+ * that's ok, because it's still cheaper than a lock.
+ *
+ * Race: Since this section is not protected by lock, same bit
+ * could be chosen by different process contexts running in
+ * different processor. So instead of costly lock, we are going
+ * with loop.
+ */
+ for (i = 0; i < 10; i++) {
+ slot = find_next_zero_bit(port->allocated,
+ num_command_slots, 1);
+ if ((slot < num_command_slots) &&
+ (!test_and_set_bit(slot, port->allocated)))
+ return slot;
+ }
+ dev_warn(&port->dd->pdev->dev, "Failed to get a tag.\n");
+
+ if (mtip_check_surprise_removal(port->dd->pdev)) {
+ /* Device not present, clean outstanding commands */
+ mtip_command_cleanup(port->dd);
+ }
+ return -1;
+}
+
+/*
+ * Release a command slot.
+ *
+ * @port Pointer to the port data structure.
+ * @tag Tag of command to release
+ *
+ * return value
+ * None
+ */
+static inline void release_slot(struct mtip_port *port, int tag)
+{
+ smp_mb__before_clear_bit();
+ clear_bit(tag, port->allocated);
+ smp_mb__after_clear_bit();
+}
+
+/*
+ * Reset the HBA (without sleeping)
+ *
+ * Just like hba_reset, except does not call sleep, so can be
+ * run from interrupt/tasklet context.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0 The reset was successful.
+ * -1 The HBA Reset bit did not clear.
+ */
+static int hba_reset_nosleep(struct driver_data *dd)
+{
+ unsigned long timeout;
+
+ /* Chip quirk: quiesce any chip function */
+ mdelay(10);
+
+ /* Set the reset bit */
+ writel(HOST_RESET, dd->mmio + HOST_CTL);
+
+ /* Flush */
+ readl(dd->mmio + HOST_CTL);
+
+ /*
+ * Wait 10ms then spin for up to 1 second
+ * waiting for reset acknowledgement
+ */
+ timeout = jiffies + msecs_to_jiffies(1000);
+ mdelay(10);
+ while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
+ && time_before(jiffies, timeout))
+ mdelay(1);
+
+ if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Issue a command to the hardware.
+ *
+ * Set the appropriate bit in the s_active and Command Issue hardware
+ * registers, causing hardware command processing to begin.
+ *
+ * @port Pointer to the port structure.
+ * @tag The tag of the command to be issued.
+ *
+ * return value
+ * None
+ */
+static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
+{
+ unsigned long flags = 0;
+
+ atomic_set(&port->commands[tag].active, 1);
+
+ spin_lock_irqsave(&port->cmd_issue_lock, flags);
+
+ writel((1 << MTIP_TAG_BIT(tag)),
+ port->s_active[MTIP_TAG_INDEX(tag)]);
+ writel((1 << MTIP_TAG_BIT(tag)),
+ port->cmd_issue[MTIP_TAG_INDEX(tag)]);
+
+ spin_unlock_irqrestore(&port->cmd_issue_lock, flags);
+}
+
+/*
+ * Enable/disable the reception of FIS
+ *
+ * @port Pointer to the port data structure
+ * @enable 1 to enable, 0 to disable
+ *
+ * return value
+ * Previous state: 1 enabled, 0 disabled
+ */
+static int mtip_enable_fis(struct mtip_port *port, int enable)
+{
+ u32 tmp;
+
+ /* enable FIS reception */
+ tmp = readl(port->mmio + PORT_CMD);
+ if (enable)
+ writel(tmp | PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
+ else
+ writel(tmp & ~PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
+
+ /* Flush */
+ readl(port->mmio + PORT_CMD);
+
+ return (((tmp & PORT_CMD_FIS_RX) == PORT_CMD_FIS_RX));
+}
+
+/*
+ * Enable/disable the DMA engine
+ *
+ * @port Pointer to the port data structure
+ * @enable 1 to enable, 0 to disable
+ *
+ * return value
+ * Previous state: 1 enabled, 0 disabled.
+ */
+static int mtip_enable_engine(struct mtip_port *port, int enable)
+{
+ u32 tmp;
+
+ /* enable FIS reception */
+ tmp = readl(port->mmio + PORT_CMD);
+ if (enable)
+ writel(tmp | PORT_CMD_START, port->mmio + PORT_CMD);
+ else
+ writel(tmp & ~PORT_CMD_START, port->mmio + PORT_CMD);
+
+ readl(port->mmio + PORT_CMD);
+ return (((tmp & PORT_CMD_START) == PORT_CMD_START));
+}
+
+/*
+ * Enables the port DMA engine and FIS reception.
+ *
+ * return value
+ * None
+ */
+static inline void mtip_start_port(struct mtip_port *port)
+{
+ /* Enable FIS reception */
+ mtip_enable_fis(port, 1);
+
+ /* Enable the DMA engine */
+ mtip_enable_engine(port, 1);
+}
+
+/*
+ * Deinitialize a port by disabling port interrupts, the DMA engine,
+ * and FIS reception.
+ *
+ * @port Pointer to the port structure
+ *
+ * return value
+ * None
+ */
+static inline void mtip_deinit_port(struct mtip_port *port)
+{
+ /* Disable interrupts on this port */
+ writel(0, port->mmio + PORT_IRQ_MASK);
+
+ /* Disable the DMA engine */
+ mtip_enable_engine(port, 0);
+
+ /* Disable FIS reception */
+ mtip_enable_fis(port, 0);
+}
+
+/*
+ * Initialize a port.
+ *
+ * This function deinitializes the port by calling mtip_deinit_port() and
+ * then initializes it by setting the command header and RX FIS addresses,
+ * clearing the SError register and any pending port interrupts before
+ * re-enabling the default set of port interrupts.
+ *
+ * @port Pointer to the port structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_init_port(struct mtip_port *port)
+{
+ int i;
+ mtip_deinit_port(port);
+
+ /* Program the command list base and FIS base addresses */
+ if (readl(port->dd->mmio + HOST_CAP) & HOST_CAP_64) {
+ writel((port->command_list_dma >> 16) >> 16,
+ port->mmio + PORT_LST_ADDR_HI);
+ writel((port->rxfis_dma >> 16) >> 16,
+ port->mmio + PORT_FIS_ADDR_HI);
+ }
+
+ writel(port->command_list_dma & 0xffffffff,
+ port->mmio + PORT_LST_ADDR);
+ writel(port->rxfis_dma & 0xffffffff, port->mmio + PORT_FIS_ADDR);
+
+ /* Clear SError */
+ writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
+
+ /* reset the completed registers.*/
+ for (i = 0; i < port->dd->slot_groups; i++)
+ writel(0xFFFFFFFF, port->completed[i]);
+
+ /* Clear any pending interrupts for this port */
+ writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
+
+ /* Enable port interrupts */
+ writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK);
+}
+
+/*
+ * Restart a port
+ *
+ * @port Pointer to the port data structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_restart_port(struct mtip_port *port)
+{
+ unsigned long timeout;
+
+ /* Disable the DMA engine */
+ mtip_enable_engine(port, 0);
+
+ /* Chip quirk: wait up to 500ms for PxCMD.CR == 0 */
+ timeout = jiffies + msecs_to_jiffies(500);
+ while ((readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON)
+ && time_before(jiffies, timeout))
+ ;
+
+ /*
+ * Chip quirk: escalate to hba reset if
+ * PxCMD.CR not clear after 500 ms
+ */
+ if (readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) {
+ dev_warn(&port->dd->pdev->dev,
+ "PxCMD.CR not clear, escalating reset\n");
+
+ if (hba_reset_nosleep(port->dd))
+ dev_err(&port->dd->pdev->dev,
+ "HBA reset escalation failed.\n");
+
+ /* 30 ms delay before com reset to quiesce chip */
+ mdelay(30);
+ }
+
+ dev_warn(&port->dd->pdev->dev, "Issuing COM reset\n");
+
+ /* Set PxSCTL.DET */
+ writel(readl(port->mmio + PORT_SCR_CTL) |
+ 1, port->mmio + PORT_SCR_CTL);
+ readl(port->mmio + PORT_SCR_CTL);
+
+ /* Wait 1 ms to quiesce chip function */
+ timeout = jiffies + msecs_to_jiffies(1);
+ while (time_before(jiffies, timeout))
+ ;
+
+ /* Clear PxSCTL.DET */
+ writel(readl(port->mmio + PORT_SCR_CTL) & ~1,
+ port->mmio + PORT_SCR_CTL);
+ readl(port->mmio + PORT_SCR_CTL);
+
+ /* Wait 500 ms for bit 0 of PORT_SCR_STS to be set */
+ timeout = jiffies + msecs_to_jiffies(500);
+ while (((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
+ && time_before(jiffies, timeout))
+ ;
+
+ if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
+ dev_warn(&port->dd->pdev->dev,
+ "COM reset failed\n");
+
+ /* Clear SError, the PxSERR.DIAG.x should be set so clear it */
+ writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
+
+ /* Enable the DMA engine */
+ mtip_enable_engine(port, 1);
+}
+
+/*
+ * Called periodically to see if any read/write commands are
+ * taking too long to complete.
+ *
+ * @data Pointer to the PORT data structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_timeout_function(unsigned long int data)
+{
+ struct mtip_port *port = (struct mtip_port *) data;
+ struct host_to_dev_fis *fis;
+ struct mtip_cmd *command;
+ int tag, cmdto_cnt = 0;
+ unsigned int bit, group;
+ unsigned int num_command_slots = port->dd->slot_groups * 32;
+
+ if (unlikely(!port))
+ return;
+
+ if (atomic_read(&port->dd->resumeflag) == true) {
+ mod_timer(&port->cmd_timer,
+ jiffies + msecs_to_jiffies(30000));
+ return;
+ }
+
+ for (tag = 0; tag < num_command_slots; tag++) {
+ /*
+ * Skip internal command slot as it has
+ * its own timeout mechanism
+ */
+ if (tag == MTIP_TAG_INTERNAL)
+ continue;
+
+ if (atomic_read(&port->commands[tag].active) &&
+ (time_after(jiffies, port->commands[tag].comp_time))) {
+ group = tag >> 5;
+ bit = tag & 0x1f;
+
+ command = &port->commands[tag];
+ fis = (struct host_to_dev_fis *) command->command;
+
+ dev_warn(&port->dd->pdev->dev,
+ "Timeout for command tag %d\n", tag);
+
+ cmdto_cnt++;
+ if (cmdto_cnt == 1)
+ atomic_inc(&port->dd->eh_active);
+
+ /*
+ * Clear the completed bit. This should prevent
+ * any interrupt handlers from trying to retire
+ * the command.
+ */
+ writel(1 << bit, port->completed[group]);
+
+ /* Call the async completion callback. */
+ if (likely(command->async_callback))
+ command->async_callback(command->async_data,
+ -EIO);
+ command->async_callback = NULL;
+ command->comp_func = NULL;
+
+ /* Unmap the DMA scatter list entries */
+ dma_unmap_sg(&port->dd->pdev->dev,
+ command->sg,
+ command->scatter_ents,
+ command->direction);
+
+ /*
+ * Clear the allocated bit and active tag for the
+ * command.
+ */
+ atomic_set(&port->commands[tag].active, 0);
+ release_slot(port, tag);
+
+ up(&port->cmd_slot);
+ }
+ }
+
+ if (cmdto_cnt) {
+ dev_warn(&port->dd->pdev->dev,
+ "%d commands timed out: restarting port",
+ cmdto_cnt);
+ mtip_restart_port(port);
+ atomic_dec(&port->dd->eh_active);
+ }
+
+ /* Restart the timer */
+ mod_timer(&port->cmd_timer,
+ jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
+}
+
+/*
+ * IO completion function.
+ *
+ * This completion function is called by the driver ISR when a
+ * command that was issued by the kernel completes. It first calls the
+ * asynchronous completion function which normally calls back into the block
+ * layer passing the asynchronous callback data, then unmaps the
+ * scatter list associated with the completed command, and finally
+ * clears the allocated bit associated with the completed command.
+ *
+ * @port Pointer to the port data structure.
+ * @tag Tag of the command.
+ * @data Pointer to driver_data.
+ * @status Completion status.
+ *
+ * return value
+ * None
+ */
+static void mtip_async_complete(struct mtip_port *port,
+ int tag,
+ void *data,
+ int status)
+{
+ struct mtip_cmd *command;
+ struct driver_data *dd = data;
+ int cb_status = status ? -EIO : 0;
+
+ if (unlikely(!dd) || unlikely(!port))
+ return;
+
+ command = &port->commands[tag];
+
+ if (unlikely(status == PORT_IRQ_TF_ERR)) {
+ dev_warn(&port->dd->pdev->dev,
+ "Command tag %d failed due to TFE\n", tag);
+ }
+
+ /* Upper layer callback */
+ if (likely(command->async_callback))
+ command->async_callback(command->async_data, cb_status);
+
+ command->async_callback = NULL;
+ command->comp_func = NULL;
+
+ /* Unmap the DMA scatter list entries */
+ dma_unmap_sg(&dd->pdev->dev,
+ command->sg,
+ command->scatter_ents,
+ command->direction);
+
+ /* Clear the allocated and active bits for the command */
+ atomic_set(&port->commands[tag].active, 0);
+ release_slot(port, tag);
+
+ up(&port->cmd_slot);
+}
+
+/*
+ * Internal command completion callback function.
+ *
+ * This function is normally called by the driver ISR when an internal
+ * command completed. This function signals the command completion by
+ * calling complete().
+ *
+ * @port Pointer to the port data structure.
+ * @tag Tag of the command that has completed.
+ * @data Pointer to a completion structure.
+ * @status Completion status.
+ *
+ * return value
+ * None
+ */
+static void mtip_completion(struct mtip_port *port,
+ int tag,
+ void *data,
+ int status)
+{
+ struct mtip_cmd *command = &port->commands[tag];
+ struct completion *waiting = data;
+ if (unlikely(status == PORT_IRQ_TF_ERR))
+ dev_warn(&port->dd->pdev->dev,
+ "Internal command %d completed with TFE\n", tag);
+
+ command->async_callback = NULL;
+ command->comp_func = NULL;
+
+ complete(waiting);
+}
+
+/*
+ * Helper function for tag logging
+ */
+static void print_tags(struct driver_data *dd,
+ char *msg,
+ unsigned long *tagbits)
+{
+ unsigned int tag, count = 0;
+
+ for (tag = 0; tag < (dd->slot_groups) * 32; tag++) {
+ if (test_bit(tag, tagbits))
+ count++;
+ }
+ if (count)
+ dev_info(&dd->pdev->dev, "%s [%i tags]\n", msg, count);
+}
+
+/*
+ * Handle an error.
+ *
+ * @dd Pointer to the DRIVER_DATA structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_handle_tfe(struct driver_data *dd)
+{
+ int group, tag, bit, reissue;
+ struct mtip_port *port;
+ struct mtip_cmd *command;
+ u32 completed;
+ struct host_to_dev_fis *fis;
+ unsigned long tagaccum[SLOTBITS_IN_LONGS];
+
+ dev_warn(&dd->pdev->dev, "Taskfile error\n");
+
+ port = dd->port;
+
+ /* Stop the timer to prevent command timeouts. */
+ del_timer(&port->cmd_timer);
+
+ /* Set eh_active */
+ atomic_inc(&dd->eh_active);
+
+ /* Loop through all the groups */
+ for (group = 0; group < dd->slot_groups; group++) {
+ completed = readl(port->completed[group]);
+
+ /* clear completed status register in the hardware.*/
+ writel(completed, port->completed[group]);
+
+ /* clear the tag accumulator */
+ memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
+
+ /* Process successfully completed commands */
+ for (bit = 0; bit < 32 && completed; bit++) {
+ if (!(completed & (1<<bit)))
+ continue;
+ tag = (group << 5) + bit;
+
+ /* Skip the internal command slot */
+ if (tag == MTIP_TAG_INTERNAL)
+ continue;
+
+ command = &port->commands[tag];
+ if (likely(command->comp_func)) {
+ set_bit(tag, tagaccum);
+ atomic_set(&port->commands[tag].active, 0);
+ command->comp_func(port,
+ tag,
+ command->comp_data,
+ 0);
+ } else {
+ dev_err(&port->dd->pdev->dev,
+ "Missing completion func for tag %d",
+ tag);
+ if (mtip_check_surprise_removal(dd->pdev)) {
+ mtip_command_cleanup(dd);
+ /* don't proceed further */
+ return;
+ }
+ }
+ }
+ }
+ print_tags(dd, "TFE tags completed:", tagaccum);
+
+ /* Restart the port */
+ mdelay(20);
+ mtip_restart_port(port);
+
+ /* clear the tag accumulator */
+ memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
+
+ /* Loop through all the groups */
+ for (group = 0; group < dd->slot_groups; group++) {
+ for (bit = 0; bit < 32; bit++) {
+ reissue = 1;
+ tag = (group << 5) + bit;
+
+ /* If the active bit is set re-issue the command */
+ if (atomic_read(&port->commands[tag].active) == 0)
+ continue;
+
+ fis = (struct host_to_dev_fis *)
+ port->commands[tag].command;
+
+ /* Should re-issue? */
+ if (tag == MTIP_TAG_INTERNAL ||
+ fis->command == ATA_CMD_SET_FEATURES)
+ reissue = 0;
+
+ /*
+ * First check if this command has
+ * exceeded its retries.
+ */
+ if (reissue &&
+ (port->commands[tag].retries-- > 0)) {
+
+ set_bit(tag, tagaccum);
+
+ /* Update the timeout value. */
+ port->commands[tag].comp_time =
+ jiffies + msecs_to_jiffies(
+ MTIP_NCQ_COMMAND_TIMEOUT_MS);
+ /* Re-issue the command. */
+ mtip_issue_ncq_command(port, tag);
+
+ continue;
+ }
+
+ /* Retire a command that will not be reissued */
+ dev_warn(&port->dd->pdev->dev,
+ "retiring tag %d\n", tag);
+ atomic_set(&port->commands[tag].active, 0);
+
+ if (port->commands[tag].comp_func)
+ port->commands[tag].comp_func(
+ port,
+ tag,
+ port->commands[tag].comp_data,
+ PORT_IRQ_TF_ERR);
+ else
+ dev_warn(&port->dd->pdev->dev,
+ "Bad completion for tag %d\n",
+ tag);
+ }
+ }
+ print_tags(dd, "TFE tags reissued:", tagaccum);
+
+ /* Decrement eh_active */
+ atomic_dec(&dd->eh_active);
+
+ mod_timer(&port->cmd_timer,
+ jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
+}
+
+/*
+ * Handle a set device bits interrupt
+ */
+static inline void mtip_process_sdbf(struct driver_data *dd)
+{
+ struct mtip_port *port = dd->port;
+ int group, tag, bit;
+ u32 completed;
+ struct mtip_cmd *command;
+
+ /* walk all bits in all slot groups */
+ for (group = 0; group < dd->slot_groups; group++) {
+ completed = readl(port->completed[group]);
+
+ /* clear completed status register in the hardware.*/
+ writel(completed, port->completed[group]);
+
+ /* Process completed commands. */
+ for (bit = 0;
+ (bit < 32) && completed;
+ bit++, completed >>= 1) {
+ if (completed & 0x01) {
+ tag = (group << 5) | bit;
+
+ /* skip internal command slot. */
+ if (unlikely(tag == MTIP_TAG_INTERNAL))
+ continue;
+
+ command = &port->commands[tag];
+
+ /* make internal callback */
+ if (likely(command->comp_func)) {
+ command->comp_func(
+ port,
+ tag,
+ command->comp_data,
+ 0);
+ } else {
+ dev_warn(&dd->pdev->dev,
+ "Null completion "
+ "for tag %d",
+ tag);
+
+ if (mtip_check_surprise_removal(
+ dd->pdev)) {
+ mtip_command_cleanup(dd);
+ return;
+ }
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Process legacy pio and d2h interrupts
+ */
+static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
+{
+ struct mtip_port *port = dd->port;
+ struct mtip_cmd *cmd = &port->commands[MTIP_TAG_INTERNAL];
+
+ if (port->internal_cmd_in_progress &&
+ cmd != NULL &&
+ !(readl(port->cmd_issue[MTIP_TAG_INTERNAL])
+ & (1 << MTIP_TAG_INTERNAL))) {
+ if (cmd->comp_func) {
+ cmd->comp_func(port,
+ MTIP_TAG_INTERNAL,
+ cmd->comp_data,
+ 0);
+ return;
+ }
+ }
+
+ dev_warn(&dd->pdev->dev, "IRQ status 0x%x ignored.\n", port_stat);
+
+ return;
+}
+
+/*
+ * Demux and handle errors
+ */
+static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
+{
+ if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR)))
+ mtip_handle_tfe(dd);
+
+ if (unlikely(port_stat & PORT_IRQ_CONNECT)) {
+ dev_warn(&dd->pdev->dev,
+ "Clearing PxSERR.DIAG.x\n");
+ writel((1 << 26), dd->port->mmio + PORT_SCR_ERR);
+ }
+
+ if (unlikely(port_stat & PORT_IRQ_PHYRDY)) {
+ dev_warn(&dd->pdev->dev,
+ "Clearing PxSERR.DIAG.n\n");
+ writel((1 << 16), dd->port->mmio + PORT_SCR_ERR);
+ }
+
+ if (unlikely(port_stat & ~PORT_IRQ_HANDLED)) {
+ dev_warn(&dd->pdev->dev,
+ "Port stat errors %x unhandled\n",
+ (port_stat & ~PORT_IRQ_HANDLED));
+ }
+}
+
+static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
+{
+ struct driver_data *dd = (struct driver_data *) data;
+ struct mtip_port *port = dd->port;
+ u32 hba_stat, port_stat;
+ int rv = IRQ_NONE;
+
+ hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
+ if (hba_stat) {
+ rv = IRQ_HANDLED;
+
+ /* Acknowledge the interrupt status on the port.*/
+ port_stat = readl(port->mmio + PORT_IRQ_STAT);
+ writel(port_stat, port->mmio + PORT_IRQ_STAT);
+
+ /* Demux port status */
+ if (likely(port_stat & PORT_IRQ_SDB_FIS))
+ mtip_process_sdbf(dd);
+
+ if (unlikely(port_stat & PORT_IRQ_ERR)) {
+ if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
+ mtip_command_cleanup(dd);
+ /* don't proceed further */
+ return IRQ_HANDLED;
+ }
+
+ mtip_process_errors(dd, port_stat & PORT_IRQ_ERR);
+ }
+
+ if (unlikely(port_stat & PORT_IRQ_LEGACY))
+ mtip_process_legacy(dd, port_stat & PORT_IRQ_LEGACY);
+ }
+
+ /* acknowledge interrupt */
+ writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
+
+ return rv;
+}
+
+/*
+ * Wrapper for mtip_handle_irq
+ * (ignores return code)
+ */
+static void mtip_tasklet(unsigned long data)
+{
+ mtip_handle_irq((struct driver_data *) data);
+}
+
+/*
+ * HBA interrupt subroutine.
+ *
+ * @irq IRQ number.
+ * @instance Pointer to the driver data structure.
+ *
+ * return value
+ * IRQ_HANDLED A HBA interrupt was pending and handled.
+ * IRQ_NONE This interrupt was not for the HBA.
+ */
+static irqreturn_t mtip_irq_handler(int irq, void *instance)
+{
+ struct driver_data *dd = instance;
+ tasklet_schedule(&dd->tasklet);
+ return IRQ_HANDLED;
+}
+
+static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
+{
+ atomic_set(&port->commands[tag].active, 1);
+ writel(1 << MTIP_TAG_BIT(tag),
+ port->cmd_issue[MTIP_TAG_INDEX(tag)]);
+}
+
+/*
+ * Wait for port to quiesce
+ *
+ * @port Pointer to port data structure
+ * @timeout Max duration to wait (ms)
+ *
+ * return value
+ * 0 Success
+ * -EBUSY Commands still active
+ */
+static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
+{
+ unsigned long to;
+ unsigned int n, active;
+
+ to = jiffies + msecs_to_jiffies(timeout);
+ do {
+ /*
+ * Ignore s_active bit 0 of array element 0.
+ * This bit will always be set
+ */
+ active = readl(port->s_active[0]) & 0xfffffffe;
+ for (n = 1; n < port->dd->slot_groups; n++)
+ active |= readl(port->s_active[n]);
+
+ if (!active)
+ break;
+
+ msleep(20);
+ } while (time_before(jiffies, to));
+
+ return active ? -EBUSY : 0;
+}
+
+/*
+ * Execute an internal command and wait for the completion.
+ *
+ * @port Pointer to the port data structure.
+ * @fis Pointer to the FIS that describes the command.
+ * @fisLen Length in WORDS of the FIS.
+ * @buffer DMA accessible for command data.
+ * @bufLen Length, in bytes, of the data buffer.
+ * @opts Command header options, excluding the FIS length
+ * and the number of PRD entries.
+ * @timeout Time in ms to wait for the command to complete.
+ *
+ * return value
+ * 0 Command completed successfully.
+ * -EFAULT The buffer address is not correctly aligned.
+ * -EBUSY Internal command or other IO in progress.
+ * -EAGAIN Time out waiting for command to complete.
+ */
+static int mtip_exec_internal_command(struct mtip_port *port,
+ void *fis,
+ int fisLen,
+ dma_addr_t buffer,
+ int bufLen,
+ u32 opts,
+ gfp_t atomic,
+ unsigned long timeout)
+{
+ struct mtip_cmd_sg *command_sg;
+ DECLARE_COMPLETION_ONSTACK(wait);
+ int rv = 0;
+ struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL];
+
+ /* Make sure the buffer is 8 byte aligned. This is asic specific. */
+ if (buffer & 0x00000007) {
+ dev_err(&port->dd->pdev->dev,
+ "SG buffer is not 8 byte aligned\n");
+ return -EFAULT;
+ }
+
+ /* Only one internal command should be running at a time */
+ if (test_and_set_bit(MTIP_TAG_INTERNAL, port->allocated)) {
+ dev_warn(&port->dd->pdev->dev,
+ "Internal command already active\n");
+ return -EBUSY;
+ }
+ port->internal_cmd_in_progress = 1;
+
+ if (atomic == GFP_KERNEL) {
+ /* wait for io to complete if non atomic */
+ if (mtip_quiesce_io(port, 5000) < 0) {
+ dev_warn(&port->dd->pdev->dev,
+ "Failed to quiesce IO\n");
+ release_slot(port, MTIP_TAG_INTERNAL);
+ port->internal_cmd_in_progress = 0;
+ return -EBUSY;
+ }
+
+ /* Set the completion function and data for the command. */
+ int_cmd->comp_data = &wait;
+ int_cmd->comp_func = mtip_completion;
+
+ } else {
+ /* Clear completion - we're going to poll */
+ int_cmd->comp_data = NULL;
+ int_cmd->comp_func = NULL;
+ }
+
+ /* Copy the command to the command table */
+ memcpy(int_cmd->command, fis, fisLen*4);
+
+ /* Populate the SG list */
+ int_cmd->command_header->opts =
+ cpu_to_le32(opts | fisLen);
+ if (bufLen) {
+ command_sg = int_cmd->command + AHCI_CMD_TBL_HDR_SZ;
+
+ command_sg->info = cpu_to_le32((bufLen-1) & 0x3fffff);
+ command_sg->dba = cpu_to_le32(buffer & 0xffffffff);
+ command_sg->dba_upper = cpu_to_le32((buffer >> 16) >> 16);
+
+ int_cmd->command_header->opts |= cpu_to_le32((1 << 16));
+ }
+
+ /* Populate the command header */
+ int_cmd->command_header->byte_count = 0;
+
+ /* Issue the command to the hardware */
+ mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
+
+ /* Poll if atomic, wait_for_completion otherwise */
+ if (atomic == GFP_KERNEL) {
+ /* Wait for the command to complete or timeout. */
+ if (wait_for_completion_timeout(
+ &wait,
+ msecs_to_jiffies(timeout)) == 0) {
+ dev_err(&port->dd->pdev->dev,
+ "Internal command did not complete [%d]\n",
+ atomic);
+ rv = -EAGAIN;
+ }
+
+ if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
+ & (1 << MTIP_TAG_INTERNAL)) {
+ dev_warn(&port->dd->pdev->dev,
+ "Retiring internal command but CI is 1.\n");
+ }
+
+ } else {
+ /* Spin for <timeout> checking if command still outstanding */
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while ((readl(
+ port->cmd_issue[MTIP_TAG_INTERNAL])
+ & (1 << MTIP_TAG_INTERNAL))
+ && time_before(jiffies, timeout))
+ ;
+
+ if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
+ & (1 << MTIP_TAG_INTERNAL)) {
+ dev_err(&port->dd->pdev->dev,
+ "Internal command did not complete [%d]\n",
+ atomic);
+ rv = -EAGAIN;
+ }
+ }
+
+ /* Clear the allocated and active bits for the internal command. */
+ atomic_set(&int_cmd->active, 0);
+ release_slot(port, MTIP_TAG_INTERNAL);
+ port->internal_cmd_in_progress = 0;
+
+ return rv;
+}
+
+/*
+ * Byte-swap ATA ID strings.
+ *
+ * ATA identify data contains strings in byte-swapped 16-bit words.
+ * They must be swapped (on all architectures) to be usable as C strings.
+ * This function swaps bytes in-place.
+ *
+ * @buf The buffer location of the string
+ * @len The number of bytes to swap
+ *
+ * return value
+ * None
+ */
+static inline void ata_swap_string(u16 *buf, unsigned int len)
+{
+ int i;
+ for (i = 0; i < (len/2); i++)
+ be16_to_cpus(&buf[i]);
+}
+
+/*
+ * Request the device identity information.
+ *
+ * If a user space buffer is not specified, i.e. is NULL, the
+ * identify information is still read from the drive and placed
+ * into the identify data buffer (@e port->identify) in the
+ * port data structure.
+ * When the identify buffer contains valid identify information @e
+ * port->identify_valid is non-zero.
+ *
+ * @port Pointer to the port structure.
+ * @user_buffer A user space buffer where the identify data should be
+ * copied.
+ *
+ * return value
+ * 0 Command completed successfully.
+ * -EFAULT An error occurred while coping data to the user buffer.
+ * -1 Command failed.
+ */
+static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
+{
+ int rv = 0;
+ struct host_to_dev_fis fis;
+
+ down_write(&port->dd->internal_sem);
+
+ /* Build the FIS. */
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = ATA_CMD_ID_ATA;
+
+ /* Set the identify information as invalid. */
+ port->identify_valid = 0;
+
+ /* Clear the identify information. */
+ memset(port->identify, 0, sizeof(u16) * ATA_ID_WORDS);
+
+ /* Execute the command. */
+ if (mtip_exec_internal_command(port,
+ &fis,
+ 5,
+ port->identify_dma,
+ sizeof(u16) * ATA_ID_WORDS,
+ 0,
+ GFP_KERNEL,
+ MTIP_INTERNAL_COMMAND_TIMEOUT_MS)
+ < 0) {
+ rv = -1;
+ goto out;
+ }
+
+ /*
+ * Perform any necessary byte-swapping. Yes, the kernel does in fact
+ * perform field-sensitive swapping on the string fields.
+ * See the kernel use of ata_id_string() for proof of this.
+ */
+#ifdef __LITTLE_ENDIAN
+ ata_swap_string(port->identify + 27, 40); /* model string*/
+ ata_swap_string(port->identify + 23, 8); /* firmware string*/
+ ata_swap_string(port->identify + 10, 20); /* serial# string*/
+#else
+ {
+ int i;
+ for (i = 0; i < ATA_ID_WORDS; i++)
+ port->identify[i] = le16_to_cpu(port->identify[i]);
+ }
+#endif
+
+ /* Set the identify buffer as valid. */
+ port->identify_valid = 1;
+
+ if (user_buffer) {
+ if (copy_to_user(
+ user_buffer,
+ port->identify,
+ ATA_ID_WORDS * sizeof(u16))) {
+ rv = -EFAULT;
+ goto out;
+ }
+ }
+
+out:
+ up_write(&port->dd->internal_sem);
+ return rv;
+}
+
+/*
+ * Issue a standby immediate command to the device.
+ *
+ * @port Pointer to the port structure.
+ *
+ * return value
+ * 0 Command was executed successfully.
+ * -1 An error occurred while executing the command.
+ */
+static int mtip_standby_immediate(struct mtip_port *port)
+{
+ int rv;
+ struct host_to_dev_fis fis;
+
+ down_write(&port->dd->internal_sem);
+
+ /* Build the FIS. */
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = ATA_CMD_STANDBYNOW1;
+
+ /* Execute the command. Use a 15-second timeout for large drives. */
+ rv = mtip_exec_internal_command(port,
+ &fis,
+ 5,
+ 0,
+ 0,
+ 0,
+ GFP_KERNEL,
+ 15000);
+
+ up_write(&port->dd->internal_sem);
+
+ return rv;
+}
+
+/*
+ * Get the drive capacity.
+ *
+ * @dd Pointer to the device data structure.
+ * @sectors Pointer to the variable that will receive the sector count.
+ *
+ * return value
+ * 1 Capacity was returned successfully.
+ * 0 The identify information is invalid.
+ */
+static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
+{
+ struct mtip_port *port = dd->port;
+ u64 total, raw0, raw1, raw2, raw3;
+ raw0 = port->identify[100];
+ raw1 = port->identify[101];
+ raw2 = port->identify[102];
+ raw3 = port->identify[103];
+ total = raw0 | raw1<<16 | raw2<<32 | raw3<<48;
+ *sectors = total;
+ return (bool) !!port->identify_valid;
+}
+
+/*
+ * Reset the HBA.
+ *
+ * Resets the HBA by setting the HBA Reset bit in the Global
+ * HBA Control register. After setting the HBA Reset bit the
+ * function waits for 1 second before reading the HBA Reset
+ * bit to make sure it has cleared. If HBA Reset is not clear
+ * an error is returned. Cannot be used in non-blockable
+ * context.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0 The reset was successful.
+ * -1 The HBA Reset bit did not clear.
+ */
+static int mtip_hba_reset(struct driver_data *dd)
+{
+ mtip_deinit_port(dd->port);
+
+ /* Set the reset bit */
+ writel(HOST_RESET, dd->mmio + HOST_CTL);
+
+ /* Flush */
+ readl(dd->mmio + HOST_CTL);
+
+ /* Wait for reset to clear */
+ ssleep(1);
+
+ /* Check the bit has cleared */
+ if (readl(dd->mmio + HOST_CTL) & HOST_RESET) {
+ dev_err(&dd->pdev->dev,
+ "Reset bit did not clear.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Display the identify command data.
+ *
+ * @port Pointer to the port data structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_dump_identify(struct mtip_port *port)
+{
+ sector_t sectors;
+ unsigned short revid;
+ char cbuf[42];
+
+ if (!port->identify_valid)
+ return;
+
+ strlcpy(cbuf, (char *)(port->identify+10), 21);
+ dev_info(&port->dd->pdev->dev,
+ "Serial No.: %s\n", cbuf);
+
+ strlcpy(cbuf, (char *)(port->identify+23), 9);
+ dev_info(&port->dd->pdev->dev,
+ "Firmware Ver.: %s\n", cbuf);
+
+ strlcpy(cbuf, (char *)(port->identify+27), 41);
+ dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
+
+ if (mtip_hw_get_capacity(port->dd, §ors))
+ dev_info(&port->dd->pdev->dev,
+ "Capacity: %llu sectors (%llu MB)\n",
+ (u64)sectors,
+ ((u64)sectors) * ATA_SECT_SIZE >> 20);
+
+ pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid);
+ switch (revid & 0xff) {
+ case 0x1:
+ strlcpy(cbuf, "A0", 3);
+ break;
+ case 0x3:
+ strlcpy(cbuf, "A2", 3);
+ break;
+ default:
+ strlcpy(cbuf, "?", 2);
+ break;
+ }
+ dev_info(&port->dd->pdev->dev,
+ "Card Type: %s\n", cbuf);
+}
+
+/*
+ * Map the commands scatter list into the command table.
+ *
+ * @command Pointer to the command.
+ * @nents Number of scatter list entries.
+ *
+ * return value
+ * None
+ */
+static inline void fill_command_sg(struct driver_data *dd,
+ struct mtip_cmd *command,
+ int nents)
+{
+ int n;
+ unsigned int dma_len;
+ struct mtip_cmd_sg *command_sg;
+ struct scatterlist *sg = command->sg;
+
+ command_sg = command->command + AHCI_CMD_TBL_HDR_SZ;
+
+ for (n = 0; n < nents; n++) {
+ dma_len = sg_dma_len(sg);
+ if (dma_len > 0x400000)
+ dev_err(&dd->pdev->dev,
+ "DMA segment length truncated\n");
+ command_sg->info = cpu_to_le32((dma_len-1) & 0x3fffff);
+#if (BITS_PER_LONG == 64)
+ *((unsigned long *) &command_sg->dba) =
+ cpu_to_le64(sg_dma_address(sg));
+#else
+ command_sg->dba = cpu_to_le32(sg_dma_address(sg));
+ command_sg->dba_upper =
+ cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
+#endif
+ command_sg++;
+ sg++;
+ }
+}
+
+/*
+ * @brief Execute a drive command.
+ *
+ * return value 0 The command completed successfully.
+ * return value -1 An error occurred while executing the command.
+ */
+static int exec_drive_task(struct mtip_port *port, u8 *command)
+{
+ struct host_to_dev_fis fis;
+ struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
+
+ /* Lock the internal command semaphore. */
+ down_write(&port->dd->internal_sem);
+
+ /* Build the FIS. */
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = command[0];
+ fis.features = command[1];
+ fis.sect_count = command[2];
+ fis.sector = command[3];
+ fis.cyl_low = command[4];
+ fis.cyl_hi = command[5];
+ fis.device = command[6] & ~0x10; /* Clear the dev bit*/
+
+
+ dbg_printk(MTIP_DRV_NAME "%s: User Command: cmd %x, feat %x, "
+ "nsect %x, sect %x, lcyl %x, "
+ "hcyl %x, sel %x\n",
+ __func__,
+ command[0],
+ command[1],
+ command[2],
+ command[3],
+ command[4],
+ command[5],
+ command[6]);
+
+ /* Execute the command. */
+ if (mtip_exec_internal_command(port,
+ &fis,
+ 5,
+ 0,
+ 0,
+ 0,
+ GFP_KERNEL,
+ MTIP_IOCTL_COMMAND_TIMEOUT_MS) < 0) {
+ up_write(&port->dd->internal_sem);
+ return -1;
+ }
+
+ command[0] = reply->command; /* Status*/
+ command[1] = reply->features; /* Error*/
+ command[4] = reply->cyl_low;
+ command[5] = reply->cyl_hi;
+
+ dbg_printk(MTIP_DRV_NAME "%s: Completion Status: stat %x, "
+ "err %x , cyl_lo %x cyl_hi %x\n",
+ __func__,
+ command[0],
+ command[1],
+ command[4],
+ command[5]);
+
+ up_write(&port->dd->internal_sem);
+ return 0;
+}
+
+/*
+ * @brief Execute a drive command.
+ *
+ * @param port Pointer to the port data structure.
+ * @param command Pointer to the user specified command parameters.
+ * @param user_buffer Pointer to the user space buffer where read sector
+ * data should be copied.
+ *
+ * return value 0 The command completed successfully.
+ * return value -EFAULT An error occurred while copying the completion
+ * data to the user space buffer.
+ * return value -1 An error occurred while executing the command.
+ */
+static int exec_drive_command(struct mtip_port *port, u8 *command,
+ void __user *user_buffer)
+{
+ struct host_to_dev_fis fis;
+ struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
+
+ /* Lock the internal command semaphore. */
+ down_write(&port->dd->internal_sem);
+
+ /* Build the FIS. */
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = command[0];
+ fis.features = command[2];
+ fis.sect_count = command[3];
+ if (fis.command == ATA_CMD_SMART) {
+ fis.sector = command[1];
+ fis.cyl_low = 0x4f;
+ fis.cyl_hi = 0xc2;
+ }
+
+ dbg_printk(MTIP_DRV_NAME
+ "%s: User Command: cmd %x, sect %x, "
+ "feat %x, sectcnt %x\n",
+ __func__,
+ command[0],
+ command[1],
+ command[2],
+ command[3]);
+
+ memset(port->sector_buffer, 0x00, ATA_SECT_SIZE);
+
+ /* Execute the command. */
+ if (mtip_exec_internal_command(port,
+ &fis,
+ 5,
+ port->sector_buffer_dma,
+ (command[3] != 0) ? ATA_SECT_SIZE : 0,
+ 0,
+ GFP_KERNEL,
+ MTIP_IOCTL_COMMAND_TIMEOUT_MS)
+ < 0) {
+ up_write(&port->dd->internal_sem);
+ return -1;
+ }
+
+ /* Collect the completion status. */
+ command[0] = reply->command; /* Status*/
+ command[1] = reply->features; /* Error*/
+ command[2] = command[3];
+
+ dbg_printk(MTIP_DRV_NAME
+ "%s: Completion Status: stat %x, "
+ "err %x, cmd %x\n",
+ __func__,
+ command[0],
+ command[1],
+ command[2]);
+
+ if (user_buffer && command[3]) {
+ if (copy_to_user(user_buffer,
+ port->sector_buffer,
+ ATA_SECT_SIZE * command[3])) {
+ up_write(&port->dd->internal_sem);
+ return -EFAULT;
+ }
+ }
+
+ up_write(&port->dd->internal_sem);
+ return 0;
+}
+
+/*
+ * Indicates whether a command has a single sector payload.
+ *
+ * @command passed to the device to perform the certain event.
+ * @features passed to the device to perform the certain event.
+ *
+ * return value
+ * 1 command is one that always has a single sector payload,
+ * regardless of the value in the Sector Count field.
+ * 0 otherwise
+ *
+ */
+static unsigned int implicit_sector(unsigned char command,
+ unsigned char features)
+{
+ unsigned int rv = 0;
+
+ /* list of commands that have an implicit sector count of 1 */
+ switch (command) {
+ case 0xF1:
+ case 0xF2:
+ case 0xF3:
+ case 0xF4:
+ case 0xF5:
+ case 0xF6:
+ case 0xE4:
+ case 0xE8:
+ rv = 1;
+ break;
+ case 0xF9:
+ if (features == 0x03)
+ rv = 1;
+ break;
+ case 0xB0:
+ if ((features == 0xD0) || (features == 0xD1))
+ rv = 1;
+ break;
+ case 0xB1:
+ if ((features == 0xC2) || (features == 0xC3))
+ rv = 1;
+ break;
+ }
+ return rv;
+}
+
+/*
+ * Executes a taskfile
+ * See ide_taskfile_ioctl() for derivation
+ */
+static int exec_drive_taskfile(struct driver_data *dd,
+ void __user *buf,
+ ide_task_request_t *req_task,
+ int outtotal)
+{
+ struct host_to_dev_fis fis;
+ struct host_to_dev_fis *reply;
+ u8 *outbuf = NULL;
+ u8 *inbuf = NULL;
+ dma_addr_t outbuf_dma = 0;
+ dma_addr_t inbuf_dma = 0;
+ dma_addr_t dma_buffer = 0;
+ int err = 0;
+ unsigned int taskin = 0;
+ unsigned int taskout = 0;
+ u8 nsect = 0;
+ unsigned int timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
+ unsigned int force_single_sector;
+ unsigned int transfer_size;
+ unsigned long task_file_data;
+ int intotal = outtotal + req_task->out_size;
+
+ taskout = req_task->out_size;
+ taskin = req_task->in_size;
+ /* 130560 = 512 * 0xFF*/
+ if (taskin > 130560 || taskout > 130560) {
+ err = -EINVAL;
+ goto abort;
+ }
+
+ if (taskout) {
+ outbuf = kzalloc(taskout, GFP_KERNEL);
+ if (outbuf == NULL) {
+ err = -ENOMEM;
+ goto abort;
+ }
+ if (copy_from_user(outbuf, buf + outtotal, taskout)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ outbuf_dma = pci_map_single(dd->pdev,
+ outbuf,
+ taskout,
+ DMA_TO_DEVICE);
+ if (outbuf_dma == 0) {
+ err = -ENOMEM;
+ goto abort;
+ }
+ dma_buffer = outbuf_dma;
+ }
+
+ if (taskin) {
+ inbuf = kzalloc(taskin, GFP_KERNEL);
+ if (inbuf == NULL) {
+ err = -ENOMEM;
+ goto abort;
+ }
+
+ if (copy_from_user(inbuf, buf + intotal, taskin)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ inbuf_dma = pci_map_single(dd->pdev,
+ inbuf,
+ taskin, DMA_FROM_DEVICE);
+ if (inbuf_dma == 0) {
+ err = -ENOMEM;
+ goto abort;
+ }
+ dma_buffer = inbuf_dma;
+ }
+
+ /* only supports PIO and non-data commands from this ioctl. */
+ switch (req_task->data_phase) {
+ case TASKFILE_OUT:
+ nsect = taskout / ATA_SECT_SIZE;
+ reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
+ break;
+ case TASKFILE_IN:
+ reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
+ break;
+ case TASKFILE_NO_DATA:
+ reply = (dd->port->rxfis + RX_FIS_D2H_REG);
+ break;
+ default:
+ err = -EINVAL;
+ goto abort;
+ }
+
+ /* Lock the internal command semaphore. */
+ down_write(&dd->internal_sem);
+
+ /* Build the FIS. */
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = req_task->io_ports[7];
+ fis.features = req_task->io_ports[1];
+ fis.sect_count = req_task->io_ports[2];
+ fis.lba_low = req_task->io_ports[3];
+ fis.lba_mid = req_task->io_ports[4];
+ fis.lba_hi = req_task->io_ports[5];
+ /* Clear the dev bit*/
+ fis.device = req_task->io_ports[6] & ~0x10;
+
+ if ((req_task->in_flags.all == 0) && (req_task->out_flags.all & 1)) {
+ req_task->in_flags.all =
+ IDE_TASKFILE_STD_IN_FLAGS |
+ (IDE_HOB_STD_IN_FLAGS << 8);
+ fis.lba_low_ex = req_task->hob_ports[3];
+ fis.lba_mid_ex = req_task->hob_ports[4];
+ fis.lba_hi_ex = req_task->hob_ports[5];
+ fis.features_ex = req_task->hob_ports[1];
+ fis.sect_cnt_ex = req_task->hob_ports[2];
+
+ } else {
+ req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
+ }
+
+ force_single_sector = implicit_sector(fis.command, fis.features);
+
+ if ((taskin || taskout) && (!fis.sect_count)) {
+ if (nsect)
+ fis.sect_count = nsect;
+ else {
+ if (!force_single_sector) {
+ dev_warn(&dd->pdev->dev,
+ "data movement but "
+ "sect_count is 0\n");
+ up_write(&dd->internal_sem);
+ err = -EINVAL;
+ goto abort;
+ }
+ }
+ }
+
+ dbg_printk(MTIP_DRV_NAME
+ "taskfile: cmd %x, feat %x, nsect %x,"
+ " sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x,"
+ " head/dev %x\n",
+ fis.command,
+ fis.features,
+ fis.sect_count,
+ fis.lba_low,
+ fis.lba_mid,
+ fis.lba_hi,
+ fis.device);
+
+ switch (fis.command) {
+ case 0x92: /* Change timeout for Download Microcode to 60 seconds.*/
+ timeout = 60000;
+ break;
+ case 0xf4: /* Change timeout for Security Erase Unit to 4 minutes.*/
+ timeout = 240000;
+ break;
+ case 0xe0: /* Change timeout for standby immediate to 10 seconds.*/
+ timeout = 10000;
+ break;
+ case 0xf7: /* Change timeout for vendor unique command to 10 secs */
+ timeout = 10000;
+ break;
+ case 0xfa: /* Change timeout for vendor unique command to 10 secs */
+ timeout = 10000;
+ break;
+ default:
+ timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
+ break;
+ }
+
+ /* Determine the correct transfer size.*/
+ if (force_single_sector)
+ transfer_size = ATA_SECT_SIZE;
+ else
+ transfer_size = ATA_SECT_SIZE * fis.sect_count;
+
+ /* Execute the command.*/
+ if (mtip_exec_internal_command(dd->port,
+ &fis,
+ 5,
+ dma_buffer,
+ transfer_size,
+ 0,
+ GFP_KERNEL,
+ timeout) < 0) {
+ up_write(&dd->internal_sem);
+ err = -EIO;
+ goto abort;
+ }
+
+ task_file_data = readl(dd->port->mmio+PORT_TFDATA);
+
+ if ((req_task->data_phase == TASKFILE_IN) && !(task_file_data & 1)) {
+ reply = dd->port->rxfis + RX_FIS_PIO_SETUP;
+ req_task->io_ports[7] = reply->control;
+ } else {
+ reply = dd->port->rxfis + RX_FIS_D2H_REG;
+ req_task->io_ports[7] = reply->command;
+ }
+
+ /* reclaim the DMA buffers.*/
+ if (inbuf_dma)
+ pci_unmap_single(dd->pdev, inbuf_dma,
+ taskin, DMA_FROM_DEVICE);
+ if (outbuf_dma)
+ pci_unmap_single(dd->pdev, outbuf_dma,
+ taskout, DMA_TO_DEVICE);
+ inbuf_dma = 0;
+ outbuf_dma = 0;
+
+ /* return the ATA registers to the caller.*/
+ req_task->io_ports[1] = reply->features;
+ req_task->io_ports[2] = reply->sect_count;
+ req_task->io_ports[3] = reply->lba_low;
+ req_task->io_ports[4] = reply->lba_mid;
+ req_task->io_ports[5] = reply->lba_hi;
+ req_task->io_ports[6] = reply->device;
+
+ if (req_task->out_flags.all & 1) {
+
+ req_task->hob_ports[3] = reply->lba_low_ex;
+ req_task->hob_ports[4] = reply->lba_mid_ex;
+ req_task->hob_ports[5] = reply->lba_hi_ex;
+ req_task->hob_ports[1] = reply->features_ex;
+ req_task->hob_ports[2] = reply->sect_cnt_ex;
+ }
+
+ /* Com rest after secure erase or lowlevel format */
+ if (((fis.command == 0xF4) ||
+ ((fis.command == 0xFC) &&
+ (fis.features == 0x27 || fis.features == 0x72 ||
+ fis.features == 0x62 || fis.features == 0x26))) &&
+ !(reply->command & 1)) {
+ mtip_restart_port(dd->port);
+ }
+
+ dbg_printk(MTIP_DRV_NAME
+ "%s: Completion: stat %x,"
+ "err %x, sect_cnt %x, lbalo %x,"
+ "lbamid %x, lbahi %x, dev %x\n",
+ __func__,
+ req_task->io_ports[7],
+ req_task->io_ports[1],
+ req_task->io_ports[2],
+ req_task->io_ports[3],
+ req_task->io_ports[4],
+ req_task->io_ports[5],
+ req_task->io_ports[6]);
+
+ up_write(&dd->internal_sem);
+
+ if (taskout) {
+ if (copy_to_user(buf + outtotal, outbuf, taskout)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ }
+ if (taskin) {
+ if (copy_to_user(buf + intotal, inbuf, taskin)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ }
+abort:
+ if (inbuf_dma)
+ pci_unmap_single(dd->pdev, inbuf_dma,
+ taskin, DMA_FROM_DEVICE);
+ if (outbuf_dma)
+ pci_unmap_single(dd->pdev, outbuf_dma,
+ taskout, DMA_TO_DEVICE);
+ kfree(outbuf);
+ kfree(inbuf);
+
+ return err;
+}
+
+/*
+ * Handle IOCTL calls from the Block Layer.
+ *
+ * This function is called by the Block Layer when it receives an IOCTL
+ * command that it does not understand. If the IOCTL command is not supported
+ * this function returns -ENOTTY.
+ *
+ * @dd Pointer to the driver data structure.
+ * @cmd IOCTL command passed from the Block Layer.
+ * @arg IOCTL argument passed from the Block Layer.
+ *
+ * return value
+ * 0 The IOCTL completed successfully.
+ * -ENOTTY The specified command is not supported.
+ * -EFAULT An error occurred copying data to a user space buffer.
+ * -EIO An error occurred while executing the command.
+ */
+static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case HDIO_GET_IDENTITY:
+ if (mtip_get_identify(dd->port, (void __user *) arg) < 0) {
+ dev_warn(&dd->pdev->dev,
+ "Unable to read identity\n");
+ return -EIO;
+ }
+
+ break;
+ case HDIO_DRIVE_CMD:
+ {
+ u8 drive_command[4];
+
+ /* Copy the user command info to our buffer. */
+ if (copy_from_user(drive_command,
+ (void __user *) arg,
+ sizeof(drive_command)))
+ return -EFAULT;
+
+ /* Execute the drive command. */
+ if (exec_drive_command(dd->port,
+ drive_command,
+ (void __user *) (arg+4)))
+ return -EIO;
+
+ /* Copy the status back to the users buffer. */
+ if (copy_to_user((void __user *) arg,
+ drive_command,
+ sizeof(drive_command)))
+ return -EFAULT;
+
+ break;
+ }
+ case HDIO_DRIVE_TASK:
+ {
+ u8 drive_command[7];
+
+ /* Copy the user command info to our buffer. */
+ if (copy_from_user(drive_command,
+ (void __user *) arg,
+ sizeof(drive_command)))
+ return -EFAULT;
+
+ /* Execute the drive command. */
+ if (exec_drive_task(dd->port, drive_command))
+ return -EIO;
+
+ /* Copy the status back to the users buffer. */
+ if (copy_to_user((void __user *) arg,
+ drive_command,
+ sizeof(drive_command)))
+ return -EFAULT;
+
+ break;
+ }
+ case HDIO_DRIVE_TASKFILE: {
+ ide_task_request_t req_task;
+ int ret, outtotal;
+
+ if (copy_from_user(&req_task, (void __user *) arg,
+ sizeof(req_task)))
+ return -EFAULT;
+
+ outtotal = sizeof(req_task);
+
+ ret = exec_drive_taskfile(dd, (void __user *) arg,
+ &req_task, outtotal);
+
+ if (copy_to_user((void __user *) arg, &req_task, sizeof(req_task)))
+ return -EFAULT;
+
+ return ret;
+ }
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Submit an IO to the hw
+ *
+ * This function is called by the block layer to issue an io
+ * to the device. Upon completion, the callback function will
+ * be called with the data parameter passed as the callback data.
+ *
+ * @dd Pointer to the driver data structure.
+ * @start First sector to read.
+ * @nsect Number of sectors to read.
+ * @nents Number of entries in scatter list for the read command.
+ * @tag The tag of this read command.
+ * @callback Pointer to the function that should be called
+ * when the read completes.
+ * @data Callback data passed to the callback function
+ * when the read completes.
+ * @barrier If non-zero, this command must be completed before
+ * issuing any other commands.
+ * @dir Direction (read or write)
+ *
+ * return value
+ * None
+ */
+static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
+ int nsect, int nents, int tag, void *callback,
+ void *data, int barrier, int dir)
+{
+ struct host_to_dev_fis *fis;
+ struct mtip_port *port = dd->port;
+ struct mtip_cmd *command = &port->commands[tag];
+
+ /* Map the scatter list for DMA access */
+ if (dir == READ)
+ nents = dma_map_sg(&dd->pdev->dev, command->sg,
+ nents, DMA_FROM_DEVICE);
+ else
+ nents = dma_map_sg(&dd->pdev->dev, command->sg,
+ nents, DMA_TO_DEVICE);
+
+ command->scatter_ents = nents;
+
+ /*
+ * The number of retries for this command before it is
+ * reported as a failure to the upper layers.
+ */
+ command->retries = MTIP_MAX_RETRIES;
+
+ /* Fill out fis */
+ fis = command->command;
+ fis->type = 0x27;
+ fis->opts = 1 << 7;
+ fis->command =
+ (dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE);
+ *((unsigned int *) &fis->lba_low) = (start & 0xffffff);
+ *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xffffff);
+ fis->device = 1 << 6;
+ if (barrier)
+ fis->device |= FUA_BIT;
+ fis->features = nsect & 0xff;
+ fis->features_ex = (nsect >> 8) & 0xff;
+ fis->sect_count = ((tag << 3) | (tag >> 5));
+ fis->sect_cnt_ex = 0;
+ fis->control = 0;
+ fis->res2 = 0;
+ fis->res3 = 0;
+ fill_command_sg(dd, command, nents);
+
+ /* Populate the command header */
+ command->command_header->opts = cpu_to_le32(
+ (nents << 16) | 5 | AHCI_CMD_PREFETCH);
+ command->command_header->byte_count = 0;
+
+ /*
+ * Set the completion function and data for the command
+ * within this layer.
+ */
+ command->comp_data = dd;
+ command->comp_func = mtip_async_complete;
+ command->direction = (dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+
+ /*
+ * Set the completion function and data for the command passed
+ * from the upper layer.
+ */
+ command->async_data = data;
+ command->async_callback = callback;
+
+ /*
+ * Lock used to prevent this command from being issued
+ * if an internal command is in progress.
+ */
+ down_read(&port->dd->internal_sem);
+
+ /* Issue the command to the hardware */
+ mtip_issue_ncq_command(port, tag);
+
+ /* Set the command's timeout value.*/
+ port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
+ MTIP_NCQ_COMMAND_TIMEOUT_MS);
+
+ up_read(&port->dd->internal_sem);
+}
+
+/*
+ * Release a command slot.
+ *
+ * @dd Pointer to the driver data structure.
+ * @tag Slot tag
+ *
+ * return value
+ * None
+ */
+static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag)
+{
+ release_slot(dd->port, tag);
+}
+
+/*
+ * Obtain a command slot and return its associated scatter list.
+ *
+ * @dd Pointer to the driver data structure.
+ * @tag Pointer to an int that will receive the allocated command
+ * slot tag.
+ *
+ * return value
+ * Pointer to the scatter list for the allocated command slot
+ * or NULL if no command slots are available.
+ */
+static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
+ int *tag)
+{
+ /*
+ * It is possible that, even with this semaphore, a thread
+ * may think that no command slots are available. Therefore, we
+ * need to make an attempt to get_slot().
+ */
+ down(&dd->port->cmd_slot);
+ *tag = get_slot(dd->port);
+
+ if (unlikely(*tag < 0))
+ return NULL;
+
+ return dd->port->commands[*tag].sg;
+}
+
+/*
+ * Sysfs register/status dump.
+ *
+ * @dev Pointer to the device structure, passed by the kernrel.
+ * @attr Pointer to the device_attribute structure passed by the kernel.
+ * @buf Pointer to the char buffer that will receive the stats info.
+ *
+ * return value
+ * The size, in bytes, of the data copied into buf.
+ */
+static ssize_t hw_show_registers(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 group_allocated;
+ struct driver_data *dd = dev_to_disk(dev)->private_data;
+ int size = 0;
+ int n;
+
+ size += sprintf(&buf[size], "%s:\ns_active:\n", __func__);
+
+ for (n = 0; n < dd->slot_groups; n++)
+ size += sprintf(&buf[size], "0x%08x\n",
+ readl(dd->port->s_active[n]));
+
+ size += sprintf(&buf[size], "Command Issue:\n");
+
+ for (n = 0; n < dd->slot_groups; n++)
+ size += sprintf(&buf[size], "0x%08x\n",
+ readl(dd->port->cmd_issue[n]));
+
+ size += sprintf(&buf[size], "Allocated:\n");
+
+ for (n = 0; n < dd->slot_groups; n++) {
+ if (sizeof(long) > sizeof(u32))
+ group_allocated =
+ dd->port->allocated[n/2] >> (32*(n&1));
+ else
+ group_allocated = dd->port->allocated[n];
+ size += sprintf(&buf[size], "0x%08x\n",
+ group_allocated);
+ }
+
+ size += sprintf(&buf[size], "completed:\n");
+
+ for (n = 0; n < dd->slot_groups; n++)
+ size += sprintf(&buf[size], "0x%08x\n",
+ readl(dd->port->completed[n]));
+
+ size += sprintf(&buf[size], "PORT_IRQ_STAT 0x%08x\n",
+ readl(dd->port->mmio + PORT_IRQ_STAT));
+ size += sprintf(&buf[size], "HOST_IRQ_STAT 0x%08x\n",
+ readl(dd->mmio + HOST_IRQ_STAT));
+
+ return size;
+}
+static DEVICE_ATTR(registers, S_IRUGO, hw_show_registers, NULL);
+
+/*
+ * Create the sysfs related attributes.
+ *
+ * @dd Pointer to the driver data structure.
+ * @kobj Pointer to the kobj for the block device.
+ *
+ * return value
+ * 0 Operation completed successfully.
+ * -EINVAL Invalid parameter.
+ */
+static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
+{
+ if (!kobj || !dd)
+ return -EINVAL;
+
+ if (sysfs_create_file(kobj, &dev_attr_registers.attr))
+ dev_warn(&dd->pdev->dev,
+ "Error creating registers sysfs entry\n");
+ return 0;
+}
+
+/*
+ * Remove the sysfs related attributes.
+ *
+ * @dd Pointer to the driver data structure.
+ * @kobj Pointer to the kobj for the block device.
+ *
+ * return value
+ * 0 Operation completed successfully.
+ * -EINVAL Invalid parameter.
+ */
+static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
+{
+ if (!kobj || !dd)
+ return -EINVAL;
+
+ sysfs_remove_file(kobj, &dev_attr_registers.attr);
+
+ return 0;
+}
+
+/*
+ * Perform any init/resume time hardware setup
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * None
+ */
+static inline void hba_setup(struct driver_data *dd)
+{
+ u32 hwdata;
+ hwdata = readl(dd->mmio + HOST_HSORG);
+
+ /* interrupt bug workaround: use only 1 IS bit.*/
+ writel(hwdata |
+ HSORG_DISABLE_SLOTGRP_INTR |
+ HSORG_DISABLE_SLOTGRP_PXIS,
+ dd->mmio + HOST_HSORG);
+}
+
+/*
+ * Detect the details of the product, and store anything needed
+ * into the driver data structure. This includes product type and
+ * version and number of slot groups.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_detect_product(struct driver_data *dd)
+{
+ u32 hwdata;
+ unsigned int rev, slotgroups;
+
+ /*
+ * HBA base + 0xFC [15:0] - vendor-specific hardware interface
+ * info register:
+ * [15:8] hardware/software interface rev#
+ * [ 3] asic-style interface
+ * [ 2:0] number of slot groups, minus 1 (only valid for asic-style).
+ */
+ hwdata = readl(dd->mmio + HOST_HSORG);
+
+ dd->product_type = MTIP_PRODUCT_UNKNOWN;
+ dd->slot_groups = 1;
+
+ if (hwdata & 0x8) {
+ dd->product_type = MTIP_PRODUCT_ASICFPGA;
+ rev = (hwdata & HSORG_HWREV) >> 8;
+ slotgroups = (hwdata & HSORG_SLOTGROUPS) + 1;
+ dev_info(&dd->pdev->dev,
+ "ASIC-FPGA design, HS rev 0x%x, "
+ "%i slot groups [%i slots]\n",
+ rev,
+ slotgroups,
+ slotgroups * 32);
+
+ if (slotgroups > MTIP_MAX_SLOT_GROUPS) {
+ dev_warn(&dd->pdev->dev,
+ "Warning: driver only supports "
+ "%i slot groups.\n", MTIP_MAX_SLOT_GROUPS);
+ slotgroups = MTIP_MAX_SLOT_GROUPS;
+ }
+ dd->slot_groups = slotgroups;
+ return;
+ }
+
+ dev_warn(&dd->pdev->dev, "Unrecognized product id\n");
+}
+
+/*
+ * Blocking wait for FTL rebuild to complete
+ *
+ * @dd Pointer to the DRIVER_DATA structure.
+ *
+ * return value
+ * 0 FTL rebuild completed successfully
+ * -EFAULT FTL rebuild error/timeout/interruption
+ */
+static int mtip_ftl_rebuild_poll(struct driver_data *dd)
+{
+ unsigned long timeout, cnt = 0, start;
+
+ dev_warn(&dd->pdev->dev,
+ "FTL rebuild in progress. Polling for completion.\n");
+
+ start = jiffies;
+ dd->ftlrebuildflag = 1;
+ timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS);
+
+ do {
+#ifdef CONFIG_HOTPLUG
+ if (mtip_check_surprise_removal(dd->pdev))
+ return -EFAULT;
+#endif
+ if (mtip_get_identify(dd->port, NULL) < 0)
+ return -EFAULT;
+
+ if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
+ MTIP_FTL_REBUILD_MAGIC) {
+ ssleep(1);
+ /* Print message every 3 minutes */
+ if (cnt++ >= 180) {
+ dev_warn(&dd->pdev->dev,
+ "FTL rebuild in progress (%d secs).\n",
+ jiffies_to_msecs(jiffies - start) / 1000);
+ cnt = 0;
+ }
+ } else {
+ dev_warn(&dd->pdev->dev,
+ "FTL rebuild complete (%d secs).\n",
+ jiffies_to_msecs(jiffies - start) / 1000);
+ dd->ftlrebuildflag = 0;
+ break;
+ }
+ ssleep(10);
+ } while (time_before(jiffies, timeout));
+
+ /* Check for timeout */
+ if (dd->ftlrebuildflag) {
+ dev_err(&dd->pdev->dev,
+ "Timed out waiting for FTL rebuild to complete (%d secs).\n",
+ jiffies_to_msecs(jiffies - start) / 1000);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * Called once for each card.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0 on success, else an error code.
+ */
+static int mtip_hw_init(struct driver_data *dd)
+{
+ int i;
+ int rv;
+ unsigned int num_command_slots;
+
+ dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
+
+ mtip_detect_product(dd);
+ if (dd->product_type == MTIP_PRODUCT_UNKNOWN) {
+ rv = -EIO;
+ goto out1;
+ }
+ num_command_slots = dd->slot_groups * 32;
+
+ hba_setup(dd);
+
+ /*
+ * Initialize the internal semaphore
+ * Use a rw semaphore to enable prioritization of
+ * mgmnt ioctl traffic during heavy IO load
+ */
+ init_rwsem(&dd->internal_sem);
+
+ tasklet_init(&dd->tasklet, mtip_tasklet, (unsigned long)dd);
+
+ dd->port = kzalloc(sizeof(struct mtip_port), GFP_KERNEL);
+ if (!dd->port) {
+ dev_err(&dd->pdev->dev,
+ "Memory allocation: port structure\n");
+ return -ENOMEM;
+ }
+
+ /* Counting semaphore to track command slot usage */
+ sema_init(&dd->port->cmd_slot, num_command_slots - 1);
+
+ /* Spinlock to prevent concurrent issue */
+ spin_lock_init(&dd->port->cmd_issue_lock);
+
+ /* Set the port mmio base address. */
+ dd->port->mmio = dd->mmio + PORT_OFFSET;
+ dd->port->dd = dd;
+
+ /* Allocate memory for the command list. */
+ dd->port->command_list =
+ dmam_alloc_coherent(&dd->pdev->dev,
+ HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
+ &dd->port->command_list_dma,
+ GFP_KERNEL);
+ if (!dd->port->command_list) {
+ dev_err(&dd->pdev->dev,
+ "Memory allocation: command list\n");
+ rv = -ENOMEM;
+ goto out1;
+ }
+
+ /* Clear the memory we have allocated. */
+ memset(dd->port->command_list,
+ 0,
+ HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2));
+
+ /* Setup the addresse of the RX FIS. */
+ dd->port->rxfis = dd->port->command_list + HW_CMD_SLOT_SZ;
+ dd->port->rxfis_dma = dd->port->command_list_dma + HW_CMD_SLOT_SZ;
+
+ /* Setup the address of the command tables. */
+ dd->port->command_table = dd->port->rxfis + AHCI_RX_FIS_SZ;
+ dd->port->command_tbl_dma = dd->port->rxfis_dma + AHCI_RX_FIS_SZ;
+
+ /* Setup the address of the identify data. */
+ dd->port->identify = dd->port->command_table +
+ HW_CMD_TBL_AR_SZ;
+ dd->port->identify_dma = dd->port->command_tbl_dma +
+ HW_CMD_TBL_AR_SZ;
+
+ /* Setup the address of the sector buffer. */
+ dd->port->sector_buffer = (void *) dd->port->identify + ATA_SECT_SIZE;
+ dd->port->sector_buffer_dma = dd->port->identify_dma + ATA_SECT_SIZE;
+
+ /* Point the command headers at the command tables. */
+ for (i = 0; i < num_command_slots; i++) {
+ dd->port->commands[i].command_header =
+ dd->port->command_list +
+ (sizeof(struct mtip_cmd_hdr) * i);
+ dd->port->commands[i].command_header_dma =
+ dd->port->command_list_dma +
+ (sizeof(struct mtip_cmd_hdr) * i);
+
+ dd->port->commands[i].command =
+ dd->port->command_table + (HW_CMD_TBL_SZ * i);
+ dd->port->commands[i].command_dma =
+ dd->port->command_tbl_dma + (HW_CMD_TBL_SZ * i);
+
+ if (readl(dd->mmio + HOST_CAP) & HOST_CAP_64)
+ dd->port->commands[i].command_header->ctbau =
+ cpu_to_le32(
+ (dd->port->commands[i].command_dma >> 16) >> 16);
+ dd->port->commands[i].command_header->ctba = cpu_to_le32(
+ dd->port->commands[i].command_dma & 0xffffffff);
+
+ /*
+ * If this is not done, a bug is reported by the stock
+ * FC11 i386. Due to the fact that it has lots of kernel
+ * debugging enabled.
+ */
+ sg_init_table(dd->port->commands[i].sg, MTIP_MAX_SG);
+
+ /* Mark all commands as currently inactive.*/
+ atomic_set(&dd->port->commands[i].active, 0);
+ }
+
+ /* Setup the pointers to the extended s_active and CI registers. */
+ for (i = 0; i < dd->slot_groups; i++) {
+ dd->port->s_active[i] =
+ dd->port->mmio + i*0x80 + PORT_SCR_ACT;
+ dd->port->cmd_issue[i] =
+ dd->port->mmio + i*0x80 + PORT_COMMAND_ISSUE;
+ dd->port->completed[i] =
+ dd->port->mmio + i*0x80 + PORT_SDBV;
+ }
+
+ /* Reset the HBA. */
+ if (mtip_hba_reset(dd) < 0) {
+ dev_err(&dd->pdev->dev,
+ "Card did not reset within timeout\n");
+ rv = -EIO;
+ goto out2;
+ }
+
+ mtip_init_port(dd->port);
+ mtip_start_port(dd->port);
+
+ /* Setup the ISR and enable interrupts. */
+ rv = devm_request_irq(&dd->pdev->dev,
+ dd->pdev->irq,
+ mtip_irq_handler,
+ IRQF_SHARED,
+ dev_driver_string(&dd->pdev->dev),
+ dd);
+
+ if (rv) {
+ dev_err(&dd->pdev->dev,
+ "Unable to allocate IRQ %d\n", dd->pdev->irq);
+ goto out2;
+ }
+
+ /* Enable interrupts on the HBA. */
+ writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
+ dd->mmio + HOST_CTL);
+
+ init_timer(&dd->port->cmd_timer);
+ dd->port->cmd_timer.data = (unsigned long int) dd->port;
+ dd->port->cmd_timer.function = mtip_timeout_function;
+ mod_timer(&dd->port->cmd_timer,
+ jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
+
+ if (mtip_get_identify(dd->port, NULL) < 0) {
+ rv = -EFAULT;
+ goto out3;
+ }
+ mtip_dump_identify(dd->port);
+
+ if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
+ MTIP_FTL_REBUILD_MAGIC) {
+ return mtip_ftl_rebuild_poll(dd);
+ }
+ return rv;
+
+out3:
+ del_timer_sync(&dd->port->cmd_timer);
+
+ /* Disable interrupts on the HBA. */
+ writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
+ dd->mmio + HOST_CTL);
+
+ /*Release the IRQ. */
+ devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
+
+out2:
+ mtip_deinit_port(dd->port);
+
+ /* Free the command/command header memory. */
+ dmam_free_coherent(&dd->pdev->dev,
+ HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
+ dd->port->command_list,
+ dd->port->command_list_dma);
+out1:
+ /* Free the memory allocated for the for structure. */
+ kfree(dd->port);
+
+ return rv;
+}
+
+/*
+ * Called to deinitialize an interface.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0
+ */
+static int mtip_hw_exit(struct driver_data *dd)
+{
+ /*
+ * Send standby immediate (E0h) to the drive so that it
+ * saves its state.
+ */
+ if (atomic_read(&dd->drv_cleanup_done) != true) {
+
+ mtip_standby_immediate(dd->port);
+
+ /* de-initialize the port. */
+ mtip_deinit_port(dd->port);
+
+ /* Disable interrupts on the HBA. */
+ writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
+ dd->mmio + HOST_CTL);
+ }
+
+ del_timer_sync(&dd->port->cmd_timer);
+
+ /* Stop the bottom half tasklet. */
+ tasklet_kill(&dd->tasklet);
+
+ /* Release the IRQ. */
+ devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
+
+ /* Free the command/command header memory. */
+ dmam_free_coherent(&dd->pdev->dev,
+ HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
+ dd->port->command_list,
+ dd->port->command_list_dma);
+ /* Free the memory allocated for the for structure. */
+ kfree(dd->port);
+
+ return 0;
+}
+
+/*
+ * Issue a Standby Immediate command to the device.
+ *
+ * This function is called by the Block Layer just before the
+ * system powers off during a shutdown.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0
+ */
+static int mtip_hw_shutdown(struct driver_data *dd)
+{
+ /*
+ * Send standby immediate (E0h) to the drive so that it
+ * saves its state.
+ */
+ mtip_standby_immediate(dd->port);
+
+ return 0;
+}
+
+/*
+ * Suspend function
+ *
+ * This function is called by the Block Layer just before the
+ * system hibernates.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0 Suspend was successful
+ * -EFAULT Suspend was not successful
+ */
+static int mtip_hw_suspend(struct driver_data *dd)
+{
+ /*
+ * Send standby immediate (E0h) to the drive
+ * so that it saves its state.
+ */
+ if (mtip_standby_immediate(dd->port) != 0) {
+ dev_err(&dd->pdev->dev,
+ "Failed standby-immediate command\n");
+ return -EFAULT;
+ }
+
+ /* Disable interrupts on the HBA.*/
+ writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
+ dd->mmio + HOST_CTL);
+ mtip_deinit_port(dd->port);
+
+ return 0;
+}
+
+/*
+ * Resume function
+ *
+ * This function is called by the Block Layer as the
+ * system resumes.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0 Resume was successful
+ * -EFAULT Resume was not successful
+ */
+static int mtip_hw_resume(struct driver_data *dd)
+{
+ /* Perform any needed hardware setup steps */
+ hba_setup(dd);
+
+ /* Reset the HBA */
+ if (mtip_hba_reset(dd) != 0) {
+ dev_err(&dd->pdev->dev,
+ "Unable to reset the HBA\n");
+ return -EFAULT;
+ }
+
+ /*
+ * Enable the port, DMA engine, and FIS reception specific
+ * h/w in controller.
+ */
+ mtip_init_port(dd->port);
+ mtip_start_port(dd->port);
+
+ /* Enable interrupts on the HBA.*/
+ writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
+ dd->mmio + HOST_CTL);
+
+ return 0;
+}
+
+/*
+ * Helper function for reusing disk name
+ * upon hot insertion.
+ */
+static int rssd_disk_name_format(char *prefix,
+ int index,
+ char *buf,
+ int buflen)
+{
+ const int base = 'z' - 'a' + 1;
+ char *begin = buf + strlen(prefix);
+ char *end = buf + buflen;
+ char *p;
+ int unit;
+
+ p = end - 1;
+ *p = '\0';
+ unit = base;
+ do {
+ if (p == begin)
+ return -EINVAL;
+ *--p = 'a' + (index % unit);
+ index = (index / unit) - 1;
+ } while (index >= 0);
+
+ memmove(begin, p, end - p);
+ memcpy(buf, prefix, strlen(prefix));
+
+ return 0;
+}
+
+/*
+ * Block layer IOCTL handler.
+ *
+ * @dev Pointer to the block_device structure.
+ * @mode ignored
+ * @cmd IOCTL command passed from the user application.
+ * @arg Argument passed from the user application.
+ *
+ * return value
+ * 0 IOCTL completed successfully.
+ * -ENOTTY IOCTL not supported or invalid driver data
+ * structure pointer.
+ */
+static int mtip_block_ioctl(struct block_device *dev,
+ fmode_t mode,
+ unsigned cmd,
+ unsigned long arg)
+{
+ struct driver_data *dd = dev->bd_disk->private_data;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (!dd)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case BLKFLSBUF:
+ return 0;
+ default:
+ return mtip_hw_ioctl(dd, cmd, arg);
+ }
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * Block layer compat IOCTL handler.
+ *
+ * @dev Pointer to the block_device structure.
+ * @mode ignored
+ * @cmd IOCTL command passed from the user application.
+ * @arg Argument passed from the user application.
+ *
+ * return value
+ * 0 IOCTL completed successfully.
+ * -ENOTTY IOCTL not supported or invalid driver data
+ * structure pointer.
+ */
+static int mtip_block_compat_ioctl(struct block_device *dev,
+ fmode_t mode,
+ unsigned cmd,
+ unsigned long arg)
+{
+ struct driver_data *dd = dev->bd_disk->private_data;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (!dd)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case BLKFLSBUF:
+ return 0;
+ case HDIO_DRIVE_TASKFILE: {
+ struct mtip_compat_ide_task_request_s *compat_req_task;
+ ide_task_request_t req_task;
+ int compat_tasksize, outtotal, ret;
+
+ compat_tasksize = sizeof(struct mtip_compat_ide_task_request_s);
+
+ compat_req_task =
+ (struct mtip_compat_ide_task_request_s __user *) arg;
+
+ if (copy_from_user(&req_task, (void __user *) arg,
+ compat_tasksize - (2 * sizeof(compat_long_t))))
+ return -EFAULT;
+
+ if (get_user(req_task.out_size, &compat_req_task->out_size))
+ return -EFAULT;
+
+ if (get_user(req_task.in_size, &compat_req_task->in_size))
+ return -EFAULT;
+
+ outtotal = sizeof(struct mtip_compat_ide_task_request_s);
+
+ ret = exec_drive_taskfile(dd, (void __user *) arg,
+ &req_task, outtotal);
+
+ if (copy_to_user((void __user *) arg, &req_task,
+ compat_tasksize -
+ (2 * sizeof(compat_long_t))))
+ return -EFAULT;
+
+ if (put_user(req_task.out_size, &compat_req_task->out_size))
+ return -EFAULT;
+
+ if (put_user(req_task.in_size, &compat_req_task->in_size))
+ return -EFAULT;
+
+ return ret;
+ }
+ default:
+ return mtip_hw_ioctl(dd, cmd, arg);
+ }
+}
+#endif
+
+/*
+ * Obtain the geometry of the device.
+ *
+ * You may think that this function is obsolete, but some applications,
+ * fdisk for example still used CHS values. This function describes the
+ * device as having 224 heads and 56 sectors per cylinder. These values are
+ * chosen so that each cylinder is aligned on a 4KB boundary. Since a
+ * partition is described in terms of a start and end cylinder this means
+ * that each partition is also 4KB aligned. Non-aligned partitions adversely
+ * affects performance.
+ *
+ * @dev Pointer to the block_device strucutre.
+ * @geo Pointer to a hd_geometry structure.
+ *
+ * return value
+ * 0 Operation completed successfully.
+ * -ENOTTY An error occurred while reading the drive capacity.
+ */
+static int mtip_block_getgeo(struct block_device *dev,
+ struct hd_geometry *geo)
+{
+ struct driver_data *dd = dev->bd_disk->private_data;
+ sector_t capacity;
+
+ if (!dd)
+ return -ENOTTY;
+
+ if (!(mtip_hw_get_capacity(dd, &capacity))) {
+ dev_warn(&dd->pdev->dev,
+ "Could not get drive capacity.\n");
+ return -ENOTTY;
+ }
+
+ geo->heads = 224;
+ geo->sectors = 56;
+#if BITS_PER_LONG == 64
+ geo->cylinders = capacity / (geo->heads * geo->sectors);
+#else
+ do_div(capacity, (geo->heads * geo->sectors));
+ geo->cylinders = capacity;
+#endif
+ return 0;
+}
+
+/*
+ * Block device operation function.
+ *
+ * This structure contains pointers to the functions required by the block
+ * layer.
+ */
+static const struct block_device_operations mtip_block_ops = {
+ .ioctl = mtip_block_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mtip_block_compat_ioctl,
+#endif
+ .getgeo = mtip_block_getgeo,
+ .owner = THIS_MODULE
+};
+
+/*
+ * Block layer make request function.
+ *
+ * This function is called by the kernel to process a BIO for
+ * the P320 device.
+ *
+ * @queue Pointer to the request queue. Unused other than to obtain
+ * the driver data structure.
+ * @bio Pointer to the BIO.
+ *
+ * return value
+ * 0
+ */
+static void mtip_make_request(struct request_queue *queue, struct bio *bio)
+{
+ struct driver_data *dd = queue->queuedata;
+ struct scatterlist *sg;
+ struct bio_vec *bvec;
+ int nents = 0;
+ int tag = 0;
+
+ if (unlikely(!bio_has_data(bio))) {
+ blk_queue_flush(queue, 0);
+ bio_endio(bio, 0);
+ return;
+ }
+
+ if (unlikely(atomic_read(&dd->eh_active))) {
+ bio_endio(bio, -EBUSY);
+ return;
+ }
+
+ sg = mtip_hw_get_scatterlist(dd, &tag);
+ if (likely(sg != NULL)) {
+ blk_queue_bounce(queue, &bio);
+
+ if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) {
+ dev_warn(&dd->pdev->dev,
+ "Maximum number of SGL entries exceeded");
+ bio_io_error(bio);
+ mtip_hw_release_scatterlist(dd, tag);
+ return;
+ }
+
+ /* Create the scatter list for this bio. */
+ bio_for_each_segment(bvec, bio, nents) {
+ sg_set_page(&sg[nents],
+ bvec->bv_page,
+ bvec->bv_len,
+ bvec->bv_offset);
+ }
+
+ /* Issue the read/write. */
+ mtip_hw_submit_io(dd,
+ bio->bi_sector,
+ bio_sectors(bio),
+ nents,
+ tag,
+ bio_endio,
+ bio,
+ bio->bi_rw & REQ_FLUSH,
+ bio_data_dir(bio));
+ } else {
+ bio_io_error(bio);
+ }
+}
+
+/*
+ * Block layer initialization function.
+ *
+ * This function is called once by the PCI layer for each P320
+ * device that is connected to the system.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0 on success else an error code.
+ */
+static int mtip_block_initialize(struct driver_data *dd)
+{
+ int rv = 0;
+ sector_t capacity;
+ unsigned int index = 0;
+ struct kobject *kobj;
+
+ /* Initialize the protocol layer. */
+ rv = mtip_hw_init(dd);
+ if (rv < 0) {
+ dev_err(&dd->pdev->dev,
+ "Protocol layer initialization failed\n");
+ rv = -EINVAL;
+ goto protocol_init_error;
+ }
+
+ /* Allocate the request queue. */
+ dd->queue = blk_alloc_queue(GFP_KERNEL);
+ if (dd->queue == NULL) {
+ dev_err(&dd->pdev->dev,
+ "Unable to allocate request queue\n");
+ rv = -ENOMEM;
+ goto block_queue_alloc_init_error;
+ }
+
+ /* Attach our request function to the request queue. */
+ blk_queue_make_request(dd->queue, mtip_make_request);
+
+ /* Set device limits. */
+ set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
+ blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
+ blk_queue_physical_block_size(dd->queue, 4096);
+ blk_queue_io_min(dd->queue, 4096);
+
+ dd->disk = alloc_disk(MTIP_MAX_MINORS);
+ if (dd->disk == NULL) {
+ dev_err(&dd->pdev->dev,
+ "Unable to allocate gendisk structure\n");
+ rv = -EINVAL;
+ goto alloc_disk_error;
+ }
+
+ /* Generate the disk name, implemented same as in sd.c */
+ do {
+ if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL))
+ goto ida_get_error;
+
+ spin_lock(&rssd_index_lock);
+ rv = ida_get_new(&rssd_index_ida, &index);
+ spin_unlock(&rssd_index_lock);
+ } while (rv == -EAGAIN);
+
+ if (rv)
+ goto ida_get_error;
+
+ rv = rssd_disk_name_format("rssd",
+ index,
+ dd->disk->disk_name,
+ DISK_NAME_LEN);
+ if (rv)
+ goto disk_index_error;
+
+ dd->disk->driverfs_dev = &dd->pdev->dev;
+ dd->disk->major = dd->major;
+ dd->disk->first_minor = dd->instance * MTIP_MAX_MINORS;
+ dd->disk->fops = &mtip_block_ops;
+ dd->disk->queue = dd->queue;
+ dd->disk->private_data = dd;
+ dd->queue->queuedata = dd;
+ dd->index = index;
+
+ /* Set the capacity of the device in 512 byte sectors. */
+ if (!(mtip_hw_get_capacity(dd, &capacity))) {
+ dev_warn(&dd->pdev->dev,
+ "Could not read drive capacity\n");
+ rv = -EIO;
+ goto read_capacity_error;
+ }
+ set_capacity(dd->disk, capacity);
+
+ /* Enable the block device and add it to /dev */
+ add_disk(dd->disk);
+
+ /*
+ * Now that the disk is active, initialize any sysfs attributes
+ * managed by the protocol layer.
+ */
+ kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
+ if (kobj) {
+ mtip_hw_sysfs_init(dd, kobj);
+ kobject_put(kobj);
+ }
+
+ return rv;
+
+read_capacity_error:
+ /*
+ * Delete our gendisk structure. This also removes the device
+ * from /dev
+ */
+ del_gendisk(dd->disk);
+
+disk_index_error:
+ spin_lock(&rssd_index_lock);
+ ida_remove(&rssd_index_ida, index);
+ spin_unlock(&rssd_index_lock);
+
+ida_get_error:
+ put_disk(dd->disk);
+
+alloc_disk_error:
+ blk_cleanup_queue(dd->queue);
+
+block_queue_alloc_init_error:
+ /* De-initialize the protocol layer. */
+ mtip_hw_exit(dd);
+
+protocol_init_error:
+ return rv;
+}
+
+/*
+ * Block layer deinitialization function.
+ *
+ * Called by the PCI layer as each P320 device is removed.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0
+ */
+static int mtip_block_remove(struct driver_data *dd)
+{
+ struct kobject *kobj;
+ /* Clean up the sysfs attributes managed by the protocol layer. */
+ kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
+ if (kobj) {
+ mtip_hw_sysfs_exit(dd, kobj);
+ kobject_put(kobj);
+ }
+
+ /*
+ * Delete our gendisk structure. This also removes the device
+ * from /dev
+ */
+ del_gendisk(dd->disk);
+ blk_cleanup_queue(dd->queue);
+ dd->disk = NULL;
+ dd->queue = NULL;
+
+ /* De-initialize the protocol layer. */
+ mtip_hw_exit(dd);
+
+ return 0;
+}
+
+/*
+ * Function called by the PCI layer when just before the
+ * machine shuts down.
+ *
+ * If a protocol layer shutdown function is present it will be called
+ * by this function.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0
+ */
+static int mtip_block_shutdown(struct driver_data *dd)
+{
+ dev_info(&dd->pdev->dev,
+ "Shutting down %s ...\n", dd->disk->disk_name);
+
+ /* Delete our gendisk structure, and cleanup the blk queue. */
+ del_gendisk(dd->disk);
+ blk_cleanup_queue(dd->queue);
+ dd->disk = NULL;
+ dd->queue = NULL;
+
+ mtip_hw_shutdown(dd);
+ return 0;
+}
+
+static int mtip_block_suspend(struct driver_data *dd)
+{
+ dev_info(&dd->pdev->dev,
+ "Suspending %s ...\n", dd->disk->disk_name);
+ mtip_hw_suspend(dd);
+ return 0;
+}
+
+static int mtip_block_resume(struct driver_data *dd)
+{
+ dev_info(&dd->pdev->dev, "Resuming %s ...\n",
+ dd->disk->disk_name);
+ mtip_hw_resume(dd);
+ return 0;
+}
+
+/*
+ * Called for each supported PCI device detected.
+ *
+ * This function allocates the private data structure, enables the
+ * PCI device and then calls the block layer initialization function.
+ *
+ * return value
+ * 0 on success else an error code.
+ */
+static int mtip_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rv = 0;
+ struct driver_data *dd = NULL;
+
+ /* Allocate memory for this devices private data. */
+ dd = kzalloc(sizeof(struct driver_data), GFP_KERNEL);
+ if (dd == NULL) {
+ dev_err(&pdev->dev,
+ "Unable to allocate memory for driver data\n");
+ return -ENOMEM;
+ }
+
+ /* Set the atomic variable as 1 in case of SRSI */
+ atomic_set(&dd->drv_cleanup_done, true);
+
+ atomic_set(&dd->resumeflag, false);
+ atomic_set(&dd->eh_active, 0);
+
+ /* Attach the private data to this PCI device. */
+ pci_set_drvdata(pdev, dd);
+
+ rv = pcim_enable_device(pdev);
+ if (rv < 0) {
+ dev_err(&pdev->dev, "Unable to enable device\n");
+ goto iomap_err;
+ }
+
+ /* Map BAR5 to memory. */
+ rv = pcim_iomap_regions(pdev, 1 << MTIP_ABAR, MTIP_DRV_NAME);
+ if (rv < 0) {
+ dev_err(&pdev->dev, "Unable to map regions\n");
+ goto iomap_err;
+ }
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ rv = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+
+ if (rv) {
+ rv = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ if (rv) {
+ dev_warn(&pdev->dev,
+ "64-bit DMA enable failed\n");
+ goto setmask_err;
+ }
+ }
+ }
+
+ pci_set_master(pdev);
+
+ if (pci_enable_msi(pdev)) {
+ dev_warn(&pdev->dev,
+ "Unable to enable MSI interrupt.\n");
+ goto block_initialize_err;
+ }
+
+ /* Copy the info we may need later into the private data structure. */
+ dd->major = mtip_major;
+ dd->protocol = ent->driver_data;
+ dd->instance = instance;
+ dd->pdev = pdev;
+
+ /* Initialize the block layer. */
+ rv = mtip_block_initialize(dd);
+ if (rv < 0) {
+ dev_err(&pdev->dev,
+ "Unable to initialize block layer\n");
+ goto block_initialize_err;
+ }
+
+ /*
+ * Increment the instance count so that each device has a unique
+ * instance number.
+ */
+ instance++;
+
+ goto done;
+
+block_initialize_err:
+ pci_disable_msi(pdev);
+
+setmask_err:
+ pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
+
+iomap_err:
+ kfree(dd);
+ pci_set_drvdata(pdev, NULL);
+ return rv;
+done:
+ /* Set the atomic variable as 0 in case of SRSI */
+ atomic_set(&dd->drv_cleanup_done, true);
+
+ return rv;
+}
+
+/*
+ * Called for each probed device when the device is removed or the
+ * driver is unloaded.
+ *
+ * return value
+ * None
+ */
+static void mtip_pci_remove(struct pci_dev *pdev)
+{
+ struct driver_data *dd = pci_get_drvdata(pdev);
+ int counter = 0;
+
+ if (mtip_check_surprise_removal(pdev)) {
+ while (atomic_read(&dd->drv_cleanup_done) == false) {
+ counter++;
+ msleep(20);
+ if (counter == 10) {
+ /* Cleanup the outstanding commands */
+ mtip_command_cleanup(dd);
+ break;
+ }
+ }
+ }
+ /* Set the atomic variable as 1 in case of SRSI */
+ atomic_set(&dd->drv_cleanup_done, true);
+
+ /* Clean up the block layer. */
+ mtip_block_remove(dd);
+
+ pci_disable_msi(pdev);
+
+ kfree(dd);
+ pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
+}
+
+/*
+ * Called for each probed device when the device is suspended.
+ *
+ * return value
+ * 0 Success
+ * <0 Error
+ */
+static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+ int rv = 0;
+ struct driver_data *dd = pci_get_drvdata(pdev);
+
+ if (!dd) {
+ dev_err(&pdev->dev,
+ "Driver private datastructure is NULL\n");
+ return -EFAULT;
+ }
+
+ atomic_set(&dd->resumeflag, true);
+
+ /* Disable ports & interrupts then send standby immediate */
+ rv = mtip_block_suspend(dd);
+ if (rv < 0) {
+ dev_err(&pdev->dev,
+ "Failed to suspend controller\n");
+ return rv;
+ }
+
+ /*
+ * Save the pci config space to pdev structure &
+ * disable the device
+ */
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ /* Move to Low power state*/
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return rv;
+}
+
+/*
+ * Called for each probed device when the device is resumed.
+ *
+ * return value
+ * 0 Success
+ * <0 Error
+ */
+static int mtip_pci_resume(struct pci_dev *pdev)
+{
+ int rv = 0;
+ struct driver_data *dd;
+
+ dd = pci_get_drvdata(pdev);
+ if (!dd) {
+ dev_err(&pdev->dev,
+ "Driver private datastructure is NULL\n");
+ return -EFAULT;
+ }
+
+ /* Move the device to active State */
+ pci_set_power_state(pdev, PCI_D0);
+
+ /* Restore PCI configuration space */
+ pci_restore_state(pdev);
+
+ /* Enable the PCI device*/
+ rv = pcim_enable_device(pdev);
+ if (rv < 0) {
+ dev_err(&pdev->dev,
+ "Failed to enable card during resume\n");
+ goto err;
+ }
+ pci_set_master(pdev);
+
+ /*
+ * Calls hbaReset, initPort, & startPort function
+ * then enables interrupts
+ */
+ rv = mtip_block_resume(dd);
+ if (rv < 0)
+ dev_err(&pdev->dev, "Unable to resume\n");
+
+err:
+ atomic_set(&dd->resumeflag, false);
+
+ return rv;
+}
+
+/*
+ * Shutdown routine
+ *
+ * return value
+ * None
+ */
+static void mtip_pci_shutdown(struct pci_dev *pdev)
+{
+ struct driver_data *dd = pci_get_drvdata(pdev);
+ if (dd)
+ mtip_block_shutdown(dd);
+}
+
+/* Table of device ids supported by this driver. */
+static DEFINE_PCI_DEVICE_TABLE(mtip_pci_tbl) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320_DEVICE_ID) },
+ { 0 }
+};
+
+/* Structure that describes the PCI driver functions. */
+static struct pci_driver mtip_pci_driver = {
+ .name = MTIP_DRV_NAME,
+ .id_table = mtip_pci_tbl,
+ .probe = mtip_pci_probe,
+ .remove = mtip_pci_remove,
+ .suspend = mtip_pci_suspend,
+ .resume = mtip_pci_resume,
+ .shutdown = mtip_pci_shutdown,
+};
+
+MODULE_DEVICE_TABLE(pci, mtip_pci_tbl);
+
+/*
+ * Module initialization function.
+ *
+ * Called once when the module is loaded. This function allocates a major
+ * block device number to the Cyclone devices and registers the PCI layer
+ * of the driver.
+ *
+ * Return value
+ * 0 on success else error code.
+ */
+static int __init mtip_init(void)
+{
+ printk(KERN_INFO MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
+
+ /* Allocate a major block device number to use with this driver. */
+ mtip_major = register_blkdev(0, MTIP_DRV_NAME);
+ if (mtip_major < 0) {
+ printk(KERN_ERR "Unable to register block device (%d)\n",
+ mtip_major);
+ return -EBUSY;
+ }
+
+ /* Register our PCI operations. */
+ return pci_register_driver(&mtip_pci_driver);
+}
+
+/*
+ * Module de-initialization function.
+ *
+ * Called once when the module is unloaded. This function deallocates
+ * the major block device number allocated by mtip_init() and
+ * unregisters the PCI layer of the driver.
+ *
+ * Return value
+ * none
+ */
+static void __exit mtip_exit(void)
+{
+ /* Release the allocated major block device number. */
+ unregister_blkdev(mtip_major, MTIP_DRV_NAME);
+
+ /* Unregister the PCI driver. */
+ pci_unregister_driver(&mtip_pci_driver);
+}
+
+MODULE_AUTHOR("Micron Technology, Inc");
+MODULE_DESCRIPTION("Micron RealSSD PCIe Block Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MTIP_DRV_VERSION);
+
+module_init(mtip_init);
+module_exit(mtip_exit);
#include <linux/kernel.h>
#include <linux/cpuidle.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/moduleparam.h>
#include <linux/jiffies.h>
+ #include <linux/module.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <linux/basic_mmio_gpio.h>
#include <linux/of.h>
#include <linux/of_device.h>
+ #include <linux/module.h>
#include <asm-generic/bug.h>
+#define irq_to_gpio(irq) ((irq) - MXC_GPIO_IRQ_START)
+
enum mxc_gpio_hwtype {
IMX1_GPIO, /* runs on i.mx1 */
IMX21_GPIO, /* runs on i.mx21 and i.mx27 */
--- /dev/null
- #include <linux/module.h>
+/*
+ * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-btree.h"
+#include "dm-btree-internal.h"
+#include "dm-transaction-manager.h"
+
++#include <linux/export.h>
+
+/*
+ * Removing an entry from a btree
+ * ==============================
+ *
+ * A very important constraint for our btree is that no node, except the
+ * root, may have fewer than a certain number of entries.
+ * (MIN_ENTRIES <= nr_entries <= MAX_ENTRIES).
+ *
+ * Ensuring this is complicated by the way we want to only ever hold the
+ * locks on 2 nodes concurrently, and only change nodes in a top to bottom
+ * fashion.
+ *
+ * Each node may have a left or right sibling. When decending the spine,
+ * if a node contains only MIN_ENTRIES then we try and increase this to at
+ * least MIN_ENTRIES + 1. We do this in the following ways:
+ *
+ * [A] No siblings => this can only happen if the node is the root, in which
+ * case we copy the childs contents over the root.
+ *
+ * [B] No left sibling
+ * ==> rebalance(node, right sibling)
+ *
+ * [C] No right sibling
+ * ==> rebalance(left sibling, node)
+ *
+ * [D] Both siblings, total_entries(left, node, right) <= DEL_THRESHOLD
+ * ==> delete node adding it's contents to left and right
+ *
+ * [E] Both siblings, total_entries(left, node, right) > DEL_THRESHOLD
+ * ==> rebalance(left, node, right)
+ *
+ * After these operations it's possible that the our original node no
+ * longer contains the desired sub tree. For this reason this rebalancing
+ * is performed on the children of the current node. This also avoids
+ * having a special case for the root.
+ *
+ * Once this rebalancing has occurred we can then step into the child node
+ * for internal nodes. Or delete the entry for leaf nodes.
+ */
+
+/*
+ * Some little utilities for moving node data around.
+ */
+static void node_shift(struct node *n, int shift)
+{
+ uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
+
+ if (shift < 0) {
+ shift = -shift;
+ memmove(key_ptr(n, 0),
+ key_ptr(n, shift),
+ (nr_entries - shift) * sizeof(__le64));
+ memmove(value_ptr(n, 0, sizeof(__le64)),
+ value_ptr(n, shift, sizeof(__le64)),
+ (nr_entries - shift) * sizeof(__le64));
+ } else {
+ memmove(key_ptr(n, shift),
+ key_ptr(n, 0),
+ nr_entries * sizeof(__le64));
+ memmove(value_ptr(n, shift, sizeof(__le64)),
+ value_ptr(n, 0, sizeof(__le64)),
+ nr_entries * sizeof(__le64));
+ }
+}
+
+static void node_copy(struct node *left, struct node *right, int shift)
+{
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+
+ if (shift < 0) {
+ shift = -shift;
+ memcpy(key_ptr(left, nr_left),
+ key_ptr(right, 0),
+ shift * sizeof(__le64));
+ memcpy(value_ptr(left, nr_left, sizeof(__le64)),
+ value_ptr(right, 0, sizeof(__le64)),
+ shift * sizeof(__le64));
+ } else {
+ memcpy(key_ptr(right, 0),
+ key_ptr(left, nr_left - shift),
+ shift * sizeof(__le64));
+ memcpy(value_ptr(right, 0, sizeof(__le64)),
+ value_ptr(left, nr_left - shift, sizeof(__le64)),
+ shift * sizeof(__le64));
+ }
+}
+
+/*
+ * Delete a specific entry from a leaf node.
+ */
+static void delete_at(struct node *n, unsigned index, size_t value_size)
+{
+ unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
+ unsigned nr_to_copy = nr_entries - (index + 1);
+
+ if (nr_to_copy) {
+ memmove(key_ptr(n, index),
+ key_ptr(n, index + 1),
+ nr_to_copy * sizeof(__le64));
+
+ memmove(value_ptr(n, index, value_size),
+ value_ptr(n, index + 1, value_size),
+ nr_to_copy * value_size);
+ }
+
+ n->header.nr_entries = cpu_to_le32(nr_entries - 1);
+}
+
+static unsigned del_threshold(struct node *n)
+{
+ return le32_to_cpu(n->header.max_entries) / 3;
+}
+
+static unsigned merge_threshold(struct node *n)
+{
+ /*
+ * The extra one is because we know we're potentially going to
+ * delete an entry.
+ */
+ return 2 * (le32_to_cpu(n->header.max_entries) / 3) + 1;
+}
+
+struct child {
+ unsigned index;
+ struct dm_block *block;
+ struct node *n;
+};
+
+static struct dm_btree_value_type le64_type = {
+ .context = NULL,
+ .size = sizeof(__le64),
+ .inc = NULL,
+ .dec = NULL,
+ .equal = NULL
+};
+
+static int init_child(struct dm_btree_info *info, struct node *parent,
+ unsigned index, struct child *result)
+{
+ int r, inc;
+ dm_block_t root;
+
+ result->index = index;
+ root = value64(parent, index);
+
+ r = dm_tm_shadow_block(info->tm, root, &btree_node_validator,
+ &result->block, &inc);
+ if (r)
+ return r;
+
+ result->n = dm_block_data(result->block);
+
+ if (inc)
+ inc_children(info->tm, result->n, &le64_type);
+
+ return 0;
+}
+
+static int exit_child(struct dm_btree_info *info, struct child *c)
+{
+ return dm_tm_unlock(info->tm, c->block);
+}
+
+static void shift(struct node *left, struct node *right, int count)
+{
+ if (!count)
+ return;
+
+ if (count > 0) {
+ node_shift(right, count);
+ node_copy(left, right, count);
+ } else {
+ node_copy(left, right, count);
+ node_shift(right, count);
+ }
+
+ left->header.nr_entries =
+ cpu_to_le32(le32_to_cpu(left->header.nr_entries) - count);
+
+ right->header.nr_entries =
+ cpu_to_le32(le32_to_cpu(right->header.nr_entries) + count);
+}
+
+static void __rebalance2(struct dm_btree_info *info, struct node *parent,
+ struct child *l, struct child *r)
+{
+ struct node *left = l->n;
+ struct node *right = r->n;
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+
+ if (nr_left + nr_right <= merge_threshold(left)) {
+ /*
+ * Merge
+ */
+ node_copy(left, right, -nr_right);
+ left->header.nr_entries = cpu_to_le32(nr_left + nr_right);
+
+ *((__le64 *) value_ptr(parent, l->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(l->block));
+ delete_at(parent, r->index, sizeof(__le64));
+
+ /*
+ * We need to decrement the right block, but not it's
+ * children, since they're still referenced by left.
+ */
+ dm_tm_dec(info->tm, dm_block_location(r->block));
+ } else {
+ /*
+ * Rebalance.
+ */
+ unsigned target_left = (nr_left + nr_right) / 2;
+
+ shift(left, right, nr_left - target_left);
+ *((__le64 *) value_ptr(parent, l->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(l->block));
+ *((__le64 *) value_ptr(parent, r->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(r->block));
+ *key_ptr(parent, r->index) = right->keys[0];
+ }
+}
+
+static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
+ unsigned left_index)
+{
+ int r;
+ struct node *parent;
+ struct child left, right;
+
+ parent = dm_block_data(shadow_current(s));
+
+ r = init_child(info, parent, left_index, &left);
+ if (r)
+ return r;
+
+ r = init_child(info, parent, left_index + 1, &right);
+ if (r) {
+ exit_child(info, &left);
+ return r;
+ }
+
+ __rebalance2(info, parent, &left, &right);
+
+ r = exit_child(info, &left);
+ if (r) {
+ exit_child(info, &right);
+ return r;
+ }
+
+ r = exit_child(info, &right);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void __rebalance3(struct dm_btree_info *info, struct node *parent,
+ struct child *l, struct child *c, struct child *r)
+{
+ struct node *left = l->n;
+ struct node *center = c->n;
+ struct node *right = r->n;
+
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
+ uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+ uint32_t max_entries = le32_to_cpu(left->header.max_entries);
+
+ unsigned target;
+
+ if (((nr_left + nr_center + nr_right) / 2) < merge_threshold(center)) {
+ /*
+ * Delete center node:
+ *
+ * We dump as many entries from center as possible into
+ * left, then the rest in right, then rebalance2. This
+ * wastes some cpu, but I want something simple atm.
+ */
+ unsigned shift = min(max_entries - nr_left, nr_center);
+
+ node_copy(left, center, -shift);
+ left->header.nr_entries = cpu_to_le32(nr_left + shift);
+
+ if (shift != nr_center) {
+ shift = nr_center - shift;
+ node_shift(right, shift);
+ node_copy(center, right, shift);
+ right->header.nr_entries = cpu_to_le32(nr_right + shift);
+ }
+
+ *((__le64 *) value_ptr(parent, l->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(l->block));
+ *((__le64 *) value_ptr(parent, r->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(r->block));
+ *key_ptr(parent, r->index) = right->keys[0];
+
+ delete_at(parent, c->index, sizeof(__le64));
+ r->index--;
+
+ dm_tm_dec(info->tm, dm_block_location(c->block));
+ __rebalance2(info, parent, l, r);
+
+ return;
+ }
+
+ /*
+ * Rebalance
+ */
+ target = (nr_left + nr_center + nr_right) / 3;
+ BUG_ON(target == nr_center);
+
+ /*
+ * Adjust the left node
+ */
+ shift(left, center, nr_left - target);
+
+ /*
+ * Adjust the right node
+ */
+ shift(center, right, target - nr_right);
+
+ *((__le64 *) value_ptr(parent, l->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(l->block));
+ *((__le64 *) value_ptr(parent, c->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(c->block));
+ *((__le64 *) value_ptr(parent, r->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(r->block));
+
+ *key_ptr(parent, c->index) = center->keys[0];
+ *key_ptr(parent, r->index) = right->keys[0];
+}
+
+static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
+ unsigned left_index)
+{
+ int r;
+ struct node *parent = dm_block_data(shadow_current(s));
+ struct child left, center, right;
+
+ /*
+ * FIXME: fill out an array?
+ */
+ r = init_child(info, parent, left_index, &left);
+ if (r)
+ return r;
+
+ r = init_child(info, parent, left_index + 1, ¢er);
+ if (r) {
+ exit_child(info, &left);
+ return r;
+ }
+
+ r = init_child(info, parent, left_index + 2, &right);
+ if (r) {
+ exit_child(info, &left);
+ exit_child(info, ¢er);
+ return r;
+ }
+
+ __rebalance3(info, parent, &left, ¢er, &right);
+
+ r = exit_child(info, &left);
+ if (r) {
+ exit_child(info, ¢er);
+ exit_child(info, &right);
+ return r;
+ }
+
+ r = exit_child(info, ¢er);
+ if (r) {
+ exit_child(info, &right);
+ return r;
+ }
+
+ r = exit_child(info, &right);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int get_nr_entries(struct dm_transaction_manager *tm,
+ dm_block_t b, uint32_t *result)
+{
+ int r;
+ struct dm_block *block;
+ struct node *n;
+
+ r = dm_tm_read_lock(tm, b, &btree_node_validator, &block);
+ if (r)
+ return r;
+
+ n = dm_block_data(block);
+ *result = le32_to_cpu(n->header.nr_entries);
+
+ return dm_tm_unlock(tm, block);
+}
+
+static int rebalance_children(struct shadow_spine *s,
+ struct dm_btree_info *info, uint64_t key)
+{
+ int i, r, has_left_sibling, has_right_sibling;
+ uint32_t child_entries;
+ struct node *n;
+
+ n = dm_block_data(shadow_current(s));
+
+ if (le32_to_cpu(n->header.nr_entries) == 1) {
+ struct dm_block *child;
+ dm_block_t b = value64(n, 0);
+
+ r = dm_tm_read_lock(info->tm, b, &btree_node_validator, &child);
+ if (r)
+ return r;
+
+ memcpy(n, dm_block_data(child),
+ dm_bm_block_size(dm_tm_get_bm(info->tm)));
+ r = dm_tm_unlock(info->tm, child);
+ dm_tm_dec(info->tm, dm_block_location(child));
+
+ return r;
+ }
+
+ i = lower_bound(n, key);
+ if (i < 0)
+ return -ENODATA;
+
+ r = get_nr_entries(info->tm, value64(n, i), &child_entries);
+ if (r)
+ return r;
+
+ if (child_entries > del_threshold(n))
+ return 0;
+
+ has_left_sibling = i > 0 ? 1 : 0;
+ has_right_sibling =
+ (i >= (le32_to_cpu(n->header.nr_entries) - 1)) ? 0 : 1;
+
+ if (!has_left_sibling)
+ r = rebalance2(s, info, i);
+
+ else if (!has_right_sibling)
+ r = rebalance2(s, info, i - 1);
+
+ else
+ r = rebalance3(s, info, i - 1);
+
+ return r;
+}
+
+static int do_leaf(struct node *n, uint64_t key, unsigned *index)
+{
+ int i = lower_bound(n, key);
+
+ if ((i < 0) ||
+ (i >= le32_to_cpu(n->header.nr_entries)) ||
+ (le64_to_cpu(n->keys[i]) != key))
+ return -ENODATA;
+
+ *index = i;
+
+ return 0;
+}
+
+/*
+ * Prepares for removal from one level of the hierarchy. The caller must
+ * actually call delete_at() to remove the entry at index.
+ */
+static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
+ struct dm_btree_value_type *vt, dm_block_t root,
+ uint64_t key, unsigned *index)
+{
+ int i = *index, inc, r;
+ struct node *n;
+
+ for (;;) {
+ r = shadow_step(s, root, vt, &inc);
+ if (r < 0)
+ break;
+
+ /*
+ * We have to patch up the parent node, ugly, but I don't
+ * see a way to do this automatically as part of the spine
+ * op.
+ */
+ if (shadow_has_parent(s)) {
+ __le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
+ memcpy(value_ptr(dm_block_data(shadow_parent(s)), i, sizeof(uint64_t)),
+ &location, sizeof(__le64));
+ }
+
+ n = dm_block_data(shadow_current(s));
+ if (inc)
+ inc_children(info->tm, n, vt);
+
+ if (le32_to_cpu(n->header.flags) & LEAF_NODE)
+ return do_leaf(n, key, index);
+
+ r = rebalance_children(s, info, key);
+ if (r)
+ break;
+
+ n = dm_block_data(shadow_current(s));
+ if (le32_to_cpu(n->header.flags) & LEAF_NODE)
+ return do_leaf(n, key, index);
+
+ i = lower_bound(n, key);
+
+ /*
+ * We know the key is present, or else
+ * rebalance_children would have returned
+ * -ENODATA
+ */
+ root = value64(n, i);
+ }
+
+ return r;
+}
+
+int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, dm_block_t *new_root)
+{
+ unsigned level, last_level = info->levels - 1;
+ int index = 0, r = 0;
+ struct shadow_spine spine;
+ struct node *n;
+
+ init_shadow_spine(&spine, info);
+ for (level = 0; level < info->levels; level++) {
+ r = remove_raw(&spine, info,
+ (level == last_level ?
+ &info->value_type : &le64_type),
+ root, keys[level], (unsigned *)&index);
+ if (r < 0)
+ break;
+
+ n = dm_block_data(shadow_current(&spine));
+ if (level != last_level) {
+ root = value64(n, index);
+ continue;
+ }
+
+ BUG_ON(index < 0 || index >= le32_to_cpu(n->header.nr_entries));
+
+ if (info->value_type.dec)
+ info->value_type.dec(info->value_type.context,
+ value_ptr(n, index, info->value_type.size));
+
+ delete_at(n, index, info->value_type.size);
+
+ r = 0;
+ *new_root = shadow_root(&spine);
+ }
+
+ exit_shadow_spine(&spine);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_btree_remove);
--- /dev/null
- #include <linux/module.h>
+/*
+ * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-btree-internal.h"
+#include "dm-space-map.h"
+#include "dm-transaction-manager.h"
+
++#include <linux/export.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "btree"
+
+/*----------------------------------------------------------------
+ * Array manipulation
+ *--------------------------------------------------------------*/
+static void memcpy_disk(void *dest, const void *src, size_t len)
+ __dm_written_to_disk(src)
+{
+ memcpy(dest, src, len);
+ __dm_unbless_for_disk(src);
+}
+
+static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
+ unsigned index, void *elt)
+ __dm_written_to_disk(elt)
+{
+ if (index < nr_elts)
+ memmove(base + (elt_size * (index + 1)),
+ base + (elt_size * index),
+ (nr_elts - index) * elt_size);
+
+ memcpy_disk(base + (elt_size * index), elt, elt_size);
+}
+
+/*----------------------------------------------------------------*/
+
+/* makes the assumption that no two keys are the same. */
+static int bsearch(struct node *n, uint64_t key, int want_hi)
+{
+ int lo = -1, hi = le32_to_cpu(n->header.nr_entries);
+
+ while (hi - lo > 1) {
+ int mid = lo + ((hi - lo) / 2);
+ uint64_t mid_key = le64_to_cpu(n->keys[mid]);
+
+ if (mid_key == key)
+ return mid;
+
+ if (mid_key < key)
+ lo = mid;
+ else
+ hi = mid;
+ }
+
+ return want_hi ? hi : lo;
+}
+
+int lower_bound(struct node *n, uint64_t key)
+{
+ return bsearch(n, key, 0);
+}
+
+void inc_children(struct dm_transaction_manager *tm, struct node *n,
+ struct dm_btree_value_type *vt)
+{
+ unsigned i;
+ uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
+
+ if (le32_to_cpu(n->header.flags) & INTERNAL_NODE)
+ for (i = 0; i < nr_entries; i++)
+ dm_tm_inc(tm, value64(n, i));
+ else if (vt->inc)
+ for (i = 0; i < nr_entries; i++)
+ vt->inc(vt->context,
+ value_ptr(n, i, vt->size));
+}
+
+static int insert_at(size_t value_size, struct node *node, unsigned index,
+ uint64_t key, void *value)
+ __dm_written_to_disk(value)
+{
+ uint32_t nr_entries = le32_to_cpu(node->header.nr_entries);
+ __le64 key_le = cpu_to_le64(key);
+
+ if (index > nr_entries ||
+ index >= le32_to_cpu(node->header.max_entries)) {
+ DMERR("too many entries in btree node for insert");
+ __dm_unbless_for_disk(value);
+ return -ENOMEM;
+ }
+
+ __dm_bless_for_disk(&key_le);
+
+ array_insert(node->keys, sizeof(*node->keys), nr_entries, index, &key_le);
+ array_insert(value_base(node), value_size, nr_entries, index, value);
+ node->header.nr_entries = cpu_to_le32(nr_entries + 1);
+
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * We want 3n entries (for some n). This works more nicely for repeated
+ * insert remove loops than (2n + 1).
+ */
+static uint32_t calc_max_entries(size_t value_size, size_t block_size)
+{
+ uint32_t total, n;
+ size_t elt_size = sizeof(uint64_t) + value_size; /* key + value */
+
+ block_size -= sizeof(struct node_header);
+ total = block_size / elt_size;
+ n = total / 3; /* rounds down */
+
+ return 3 * n;
+}
+
+int dm_btree_create(struct dm_btree_info *info, dm_block_t *root)
+{
+ int r;
+ struct dm_block *b;
+ struct node *n;
+ size_t block_size;
+ uint32_t max_entries;
+
+ r = new_block(info, &b);
+ if (r < 0)
+ return r;
+
+ block_size = dm_bm_block_size(dm_tm_get_bm(info->tm));
+ max_entries = calc_max_entries(info->value_type.size, block_size);
+
+ n = dm_block_data(b);
+ memset(n, 0, block_size);
+ n->header.flags = cpu_to_le32(LEAF_NODE);
+ n->header.nr_entries = cpu_to_le32(0);
+ n->header.max_entries = cpu_to_le32(max_entries);
+ n->header.value_size = cpu_to_le32(info->value_type.size);
+
+ *root = dm_block_location(b);
+
+ return unlock_block(info, b);
+}
+EXPORT_SYMBOL_GPL(dm_btree_create);
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Deletion uses a recursive algorithm, since we have limited stack space
+ * we explicitly manage our own stack on the heap.
+ */
+#define MAX_SPINE_DEPTH 64
+struct frame {
+ struct dm_block *b;
+ struct node *n;
+ unsigned level;
+ unsigned nr_children;
+ unsigned current_child;
+};
+
+struct del_stack {
+ struct dm_transaction_manager *tm;
+ int top;
+ struct frame spine[MAX_SPINE_DEPTH];
+};
+
+static int top_frame(struct del_stack *s, struct frame **f)
+{
+ if (s->top < 0) {
+ DMERR("btree deletion stack empty");
+ return -EINVAL;
+ }
+
+ *f = s->spine + s->top;
+
+ return 0;
+}
+
+static int unprocessed_frames(struct del_stack *s)
+{
+ return s->top >= 0;
+}
+
+static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
+{
+ int r;
+ uint32_t ref_count;
+
+ if (s->top >= MAX_SPINE_DEPTH - 1) {
+ DMERR("btree deletion stack out of memory");
+ return -ENOMEM;
+ }
+
+ r = dm_tm_ref(s->tm, b, &ref_count);
+ if (r)
+ return r;
+
+ if (ref_count > 1)
+ /*
+ * This is a shared node, so we can just decrement its
+ * reference counter and leave the children.
+ */
+ dm_tm_dec(s->tm, b);
+
+ else {
+ struct frame *f = s->spine + ++s->top;
+
+ r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b);
+ if (r) {
+ s->top--;
+ return r;
+ }
+
+ f->n = dm_block_data(f->b);
+ f->level = level;
+ f->nr_children = le32_to_cpu(f->n->header.nr_entries);
+ f->current_child = 0;
+ }
+
+ return 0;
+}
+
+static void pop_frame(struct del_stack *s)
+{
+ struct frame *f = s->spine + s->top--;
+
+ dm_tm_dec(s->tm, dm_block_location(f->b));
+ dm_tm_unlock(s->tm, f->b);
+}
+
+int dm_btree_destroy(struct dm_btree_info *info, dm_block_t root)
+{
+ int r;
+ struct del_stack *s;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ s->tm = info->tm;
+ s->top = -1;
+
+ r = push_frame(s, root, 1);
+ if (r)
+ goto out;
+
+ while (unprocessed_frames(s)) {
+ uint32_t flags;
+ struct frame *f;
+ dm_block_t b;
+
+ r = top_frame(s, &f);
+ if (r)
+ goto out;
+
+ if (f->current_child >= f->nr_children) {
+ pop_frame(s);
+ continue;
+ }
+
+ flags = le32_to_cpu(f->n->header.flags);
+ if (flags & INTERNAL_NODE) {
+ b = value64(f->n, f->current_child);
+ f->current_child++;
+ r = push_frame(s, b, f->level);
+ if (r)
+ goto out;
+
+ } else if (f->level != (info->levels - 1)) {
+ b = value64(f->n, f->current_child);
+ f->current_child++;
+ r = push_frame(s, b, f->level + 1);
+ if (r)
+ goto out;
+
+ } else {
+ if (info->value_type.dec) {
+ unsigned i;
+
+ for (i = 0; i < f->nr_children; i++)
+ info->value_type.dec(info->value_type.context,
+ value_ptr(f->n, i, info->value_type.size));
+ }
+ f->current_child = f->nr_children;
+ }
+ }
+
+out:
+ kfree(s);
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_btree_destroy);
+
+// FIXME Implement or remove this fn before final submission.
+int dm_btree_delete_gt(struct dm_btree_info *info, dm_block_t root, uint64_t *key,
+ dm_block_t *new_root)
+{
+ /* FIXME: implement */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_btree_delete_gt);
+
+/*----------------------------------------------------------------*/
+
+static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
+ int (*search_fn)(struct node *, uint64_t),
+ uint64_t *result_key, void *v, size_t value_size)
+{
+ int i, r;
+ uint32_t flags, nr_entries;
+
+ do {
+ r = ro_step(s, block);
+ if (r < 0)
+ return r;
+
+ i = search_fn(ro_node(s), key);
+
+ flags = le32_to_cpu(ro_node(s)->header.flags);
+ nr_entries = le32_to_cpu(ro_node(s)->header.nr_entries);
+ if (i < 0 || i >= nr_entries)
+ return -ENODATA;
+
+ if (flags & INTERNAL_NODE)
+ block = value64(ro_node(s), i);
+
+ } while (!(flags & LEAF_NODE));
+
+ *result_key = le64_to_cpu(ro_node(s)->keys[i]);
+ memcpy(v, value_ptr(ro_node(s), i, value_size), value_size);
+
+ return 0;
+}
+
+int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, void *value_le)
+{
+ unsigned level, last_level = info->levels - 1;
+ int r = -ENODATA;
+ uint64_t rkey;
+ __le64 internal_value_le;
+ struct ro_spine spine;
+
+ init_ro_spine(&spine, info);
+ for (level = 0; level < info->levels; level++) {
+ size_t size;
+ void *value_p;
+
+ if (level == last_level) {
+ value_p = value_le;
+ size = info->value_type.size;
+
+ } else {
+ value_p = &internal_value_le;
+ size = sizeof(uint64_t);
+ }
+
+ r = btree_lookup_raw(&spine, root, keys[level],
+ lower_bound, &rkey,
+ value_p, size);
+
+ if (!r) {
+ if (rkey != keys[level]) {
+ exit_ro_spine(&spine);
+ return -ENODATA;
+ }
+ } else {
+ exit_ro_spine(&spine);
+ return r;
+ }
+
+ root = le64_to_cpu(internal_value_le);
+ }
+ exit_ro_spine(&spine);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_btree_lookup);
+
+/*
+ * Splits a node by creating a sibling node and shifting half the nodes
+ * contents across. Assumes there is a parent node, and it has room for
+ * another child.
+ *
+ * Before:
+ * +--------+
+ * | Parent |
+ * +--------+
+ * |
+ * v
+ * +----------+
+ * | A ++++++ |
+ * +----------+
+ *
+ *
+ * After:
+ * +--------+
+ * | Parent |
+ * +--------+
+ * | |
+ * v +------+
+ * +---------+ |
+ * | A* +++ | v
+ * +---------+ +-------+
+ * | B +++ |
+ * +-------+
+ *
+ * Where A* is a shadow of A.
+ */
+static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
+ unsigned parent_index, uint64_t key)
+{
+ int r;
+ size_t size;
+ unsigned nr_left, nr_right;
+ struct dm_block *left, *right, *parent;
+ struct node *ln, *rn, *pn;
+ __le64 location;
+
+ left = shadow_current(s);
+
+ r = new_block(s->info, &right);
+ if (r < 0)
+ return r;
+
+ ln = dm_block_data(left);
+ rn = dm_block_data(right);
+
+ nr_left = le32_to_cpu(ln->header.nr_entries) / 2;
+ nr_right = le32_to_cpu(ln->header.nr_entries) - nr_left;
+
+ ln->header.nr_entries = cpu_to_le32(nr_left);
+
+ rn->header.flags = ln->header.flags;
+ rn->header.nr_entries = cpu_to_le32(nr_right);
+ rn->header.max_entries = ln->header.max_entries;
+ rn->header.value_size = ln->header.value_size;
+ memcpy(rn->keys, ln->keys + nr_left, nr_right * sizeof(rn->keys[0]));
+
+ size = le32_to_cpu(ln->header.flags) & INTERNAL_NODE ?
+ sizeof(uint64_t) : s->info->value_type.size;
+ memcpy(value_ptr(rn, 0, size), value_ptr(ln, nr_left, size),
+ size * nr_right);
+
+ /*
+ * Patch up the parent
+ */
+ parent = shadow_parent(s);
+
+ pn = dm_block_data(parent);
+ location = cpu_to_le64(dm_block_location(left));
+ __dm_bless_for_disk(&location);
+ memcpy_disk(value_ptr(pn, parent_index, sizeof(__le64)),
+ &location, sizeof(__le64));
+
+ location = cpu_to_le64(dm_block_location(right));
+ __dm_bless_for_disk(&location);
+
+ r = insert_at(sizeof(__le64), pn, parent_index + 1,
+ le64_to_cpu(rn->keys[0]), &location);
+ if (r)
+ return r;
+
+ if (key < le64_to_cpu(rn->keys[0])) {
+ unlock_block(s->info, right);
+ s->nodes[1] = left;
+ } else {
+ unlock_block(s->info, left);
+ s->nodes[1] = right;
+ }
+
+ return 0;
+}
+
+/*
+ * Splits a node by creating two new children beneath the given node.
+ *
+ * Before:
+ * +----------+
+ * | A ++++++ |
+ * +----------+
+ *
+ *
+ * After:
+ * +------------+
+ * | A (shadow) |
+ * +------------+
+ * | |
+ * +------+ +----+
+ * | |
+ * v v
+ * +-------+ +-------+
+ * | B +++ | | C +++ |
+ * +-------+ +-------+
+ */
+static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
+{
+ int r;
+ size_t size;
+ unsigned nr_left, nr_right;
+ struct dm_block *left, *right, *new_parent;
+ struct node *pn, *ln, *rn;
+ __le64 val;
+
+ new_parent = shadow_current(s);
+
+ r = new_block(s->info, &left);
+ if (r < 0)
+ return r;
+
+ r = new_block(s->info, &right);
+ if (r < 0) {
+ /* FIXME: put left */
+ return r;
+ }
+
+ pn = dm_block_data(new_parent);
+ ln = dm_block_data(left);
+ rn = dm_block_data(right);
+
+ nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
+ nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
+
+ ln->header.flags = pn->header.flags;
+ ln->header.nr_entries = cpu_to_le32(nr_left);
+ ln->header.max_entries = pn->header.max_entries;
+ ln->header.value_size = pn->header.value_size;
+
+ rn->header.flags = pn->header.flags;
+ rn->header.nr_entries = cpu_to_le32(nr_right);
+ rn->header.max_entries = pn->header.max_entries;
+ rn->header.value_size = pn->header.value_size;
+
+ memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
+ memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
+
+ size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
+ sizeof(__le64) : s->info->value_type.size;
+ memcpy(value_ptr(ln, 0, size), value_ptr(pn, 0, size), nr_left * size);
+ memcpy(value_ptr(rn, 0, size), value_ptr(pn, nr_left, size),
+ nr_right * size);
+
+ /* new_parent should just point to l and r now */
+ pn->header.flags = cpu_to_le32(INTERNAL_NODE);
+ pn->header.nr_entries = cpu_to_le32(2);
+ pn->header.max_entries = cpu_to_le32(
+ calc_max_entries(sizeof(__le64),
+ dm_bm_block_size(
+ dm_tm_get_bm(s->info->tm))));
+ pn->header.value_size = cpu_to_le32(sizeof(__le64));
+
+ val = cpu_to_le64(dm_block_location(left));
+ __dm_bless_for_disk(&val);
+ pn->keys[0] = ln->keys[0];
+ memcpy_disk(value_ptr(pn, 0, sizeof(__le64)), &val, sizeof(__le64));
+
+ val = cpu_to_le64(dm_block_location(right));
+ __dm_bless_for_disk(&val);
+ pn->keys[1] = rn->keys[0];
+ memcpy_disk(value_ptr(pn, 1, sizeof(__le64)), &val, sizeof(__le64));
+
+ /*
+ * rejig the spine. This is ugly, since it knows too
+ * much about the spine
+ */
+ if (s->nodes[0] != new_parent) {
+ unlock_block(s->info, s->nodes[0]);
+ s->nodes[0] = new_parent;
+ }
+ if (key < le64_to_cpu(rn->keys[0])) {
+ unlock_block(s->info, right);
+ s->nodes[1] = left;
+ } else {
+ unlock_block(s->info, left);
+ s->nodes[1] = right;
+ }
+ s->count = 2;
+
+ return 0;
+}
+
+static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
+ struct dm_btree_value_type *vt,
+ uint64_t key, unsigned *index)
+{
+ int r, i = *index, inc, top = 1;
+ struct node *node;
+
+ for (;;) {
+ r = shadow_step(s, root, vt, &inc);
+ if (r < 0)
+ return r;
+
+ node = dm_block_data(shadow_current(s));
+ if (inc)
+ inc_children(s->info->tm, node, vt);
+
+ /*
+ * We have to patch up the parent node, ugly, but I don't
+ * see a way to do this automatically as part of the spine
+ * op.
+ */
+ if (shadow_has_parent(s) && i >= 0) { /* FIXME: second clause unness. */
+ __le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
+
+ __dm_bless_for_disk(&location);
+ memcpy_disk(value_ptr(dm_block_data(shadow_parent(s)), i, sizeof(uint64_t)),
+ &location, sizeof(__le64));
+ }
+
+ node = dm_block_data(shadow_current(s));
+
+ if (node->header.nr_entries == node->header.max_entries) {
+ if (top)
+ r = btree_split_beneath(s, key);
+ else
+ r = btree_split_sibling(s, root, i, key);
+
+ if (r < 0)
+ return r;
+ }
+
+ node = dm_block_data(shadow_current(s));
+
+ i = lower_bound(node, key);
+
+ if (le32_to_cpu(node->header.flags) & LEAF_NODE)
+ break;
+
+ if (i < 0) {
+ /* change the bounds on the lowest key */
+ node->keys[0] = cpu_to_le64(key);
+ i = 0;
+ }
+
+ root = value64(node, i);
+ top = 0;
+ }
+
+ if (i < 0 || le64_to_cpu(node->keys[i]) != key)
+ i++;
+
+ /* we're about to overwrite this value, so undo the increment for it */
+ /* FIXME: shame that inc information is leaking outside the spine.
+ * Plus inc is just plain wrong in the event of a split */
+ if (le64_to_cpu(node->keys[i]) == key && inc)
+ if (vt->dec)
+ vt->dec(vt->context, value_ptr(node, i, vt->size));
+
+ *index = i;
+ return 0;
+}
+
+static int insert(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, void *value, dm_block_t *new_root,
+ int *inserted)
+ __dm_written_to_disk(value)
+{
+ int r, need_insert;
+ unsigned level, index = -1, last_level = info->levels - 1;
+ dm_block_t block = root;
+ struct shadow_spine spine;
+ struct node *n;
+ struct dm_btree_value_type le64_type;
+
+ le64_type.context = NULL;
+ le64_type.size = sizeof(__le64);
+ le64_type.inc = NULL;
+ le64_type.dec = NULL;
+ le64_type.equal = NULL;
+
+ init_shadow_spine(&spine, info);
+
+ for (level = 0; level < (info->levels - 1); level++) {
+ r = btree_insert_raw(&spine, block, &le64_type, keys[level], &index);
+ if (r < 0)
+ goto bad;
+
+ n = dm_block_data(shadow_current(&spine));
+ need_insert = ((index >= le32_to_cpu(n->header.nr_entries)) ||
+ (le64_to_cpu(n->keys[index]) != keys[level]));
+
+ if (need_insert) {
+ dm_block_t new_tree;
+ __le64 new_le;
+
+ r = dm_btree_create(info, &new_tree);
+ if (r < 0)
+ goto bad;
+
+ new_le = cpu_to_le64(new_tree);
+ __dm_bless_for_disk(&new_le);
+
+ r = insert_at(sizeof(uint64_t), n, index,
+ keys[level], &new_le);
+ if (r)
+ goto bad;
+ }
+
+ if (level < last_level)
+ block = value64(n, index);
+ }
+
+ r = btree_insert_raw(&spine, block, &info->value_type,
+ keys[level], &index);
+ if (r < 0)
+ goto bad;
+
+ n = dm_block_data(shadow_current(&spine));
+ need_insert = ((index >= le32_to_cpu(n->header.nr_entries)) ||
+ (le64_to_cpu(n->keys[index]) != keys[level]));
+
+ if (need_insert) {
+ if (inserted)
+ *inserted = 1;
+
+ r = insert_at(info->value_type.size, n, index,
+ keys[level], value);
+ if (r)
+ goto bad_unblessed;
+ } else {
+ if (inserted)
+ *inserted = 0;
+
+ if (info->value_type.dec &&
+ (!info->value_type.equal ||
+ !info->value_type.equal(
+ info->value_type.context,
+ value_ptr(n, index, info->value_type.size),
+ value))) {
+ info->value_type.dec(info->value_type.context,
+ value_ptr(n, index, info->value_type.size));
+ }
+ memcpy_disk(value_ptr(n, index, info->value_type.size),
+ value, info->value_type.size);
+ }
+
+ *new_root = shadow_root(&spine);
+ exit_shadow_spine(&spine);
+
+ return 0;
+
+bad:
+ __dm_unbless_for_disk(value);
+bad_unblessed:
+ exit_shadow_spine(&spine);
+ return r;
+}
+
+int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, void *value, dm_block_t *new_root)
+ __dm_written_to_disk(value)
+{
+ return insert(info, root, keys, value, new_root, NULL);
+}
+EXPORT_SYMBOL_GPL(dm_btree_insert);
+
+int dm_btree_insert_notify(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, void *value, dm_block_t *new_root,
+ int *inserted)
+ __dm_written_to_disk(value)
+{
+ return insert(info, root, keys, value, new_root, inserted);
+}
+EXPORT_SYMBOL_GPL(dm_btree_insert_notify);
+
+/*----------------------------------------------------------------*/
+
+int dm_btree_clone(struct dm_btree_info *info, dm_block_t root,
+ dm_block_t *clone)
+{
+ int r;
+ struct dm_block *b, *orig_b;
+ struct node *b_node, *orig_node;
+
+ /* Copy the root node */
+ r = new_block(info, &b);
+ if (r < 0)
+ return r;
+
+ r = dm_tm_read_lock(info->tm, root, &btree_node_validator, &orig_b);
+ if (r < 0) {
+ dm_block_t location = dm_block_location(b);
+
+ unlock_block(info, b);
+ dm_tm_dec(info->tm, location);
+ }
+
+ *clone = dm_block_location(b);
+ b_node = dm_block_data(b);
+ orig_node = dm_block_data(orig_b);
+
+ memcpy(b_node, orig_node,
+ dm_bm_block_size(dm_tm_get_bm(info->tm)));
+ dm_tm_unlock(info->tm, orig_b);
+ inc_children(info->tm, b_node, &info->value_type);
+ dm_tm_unlock(info->tm, b);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_btree_clone);
+
+/*----------------------------------------------------------------*/
+
+static int find_highest_key(struct ro_spine *s, dm_block_t block,
+ uint64_t *result_key, dm_block_t *next_block)
+{
+ int i, r;
+ uint32_t flags;
+
+ do {
+ r = ro_step(s, block);
+ if (r < 0)
+ return r;
+
+ flags = le32_to_cpu(ro_node(s)->header.flags);
+ i = le32_to_cpu(ro_node(s)->header.nr_entries);
+ if (!i)
+ return -ENODATA;
+ else
+ i--;
+
+ *result_key = le64_to_cpu(ro_node(s)->keys[i]);
+ if (next_block || flags & INTERNAL_NODE)
+ block = value64(ro_node(s), i);
+
+ } while (flags & INTERNAL_NODE);
+
+ if (next_block)
+ *next_block = block;
+ return 0;
+}
+
+int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *result_keys)
+{
+ int r = 0, count = 0, level;
+ struct ro_spine spine;
+
+ init_ro_spine(&spine, info);
+ for (level = 0; level < info->levels; level++) {
+ r = find_highest_key(&spine, root, result_keys + level,
+ level == info->levels - 1 ? NULL : &root);
+ if (r == -ENODATA) {
+ r = 0;
+ break;
+
+ } else if (r)
+ break;
+
+ count++;
+ }
+ exit_ro_spine(&spine);
+
+ return r ? r : count;
+}
+EXPORT_SYMBOL_GPL(dm_btree_find_highest_key);
--- /dev/null
- #include <linux/module.h>
+/*
+ * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-space-map-common.h"
+#include "dm-space-map-disk.h"
+#include "dm-space-map.h"
+#include "dm-transaction-manager.h"
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
++#include <linux/export.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "space map disk"
+
+/*
+ * Bitmap validator
+ */
+static void bitmap_prepare_for_write(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
+{
+ struct disk_bitmap_header *disk_header = dm_block_data(b);
+
+ disk_header->blocknr = cpu_to_le64(dm_block_location(b));
+ disk_header->csum = cpu_to_le32(dm_block_csum_data(&disk_header->not_used, block_size - sizeof(__le32)));
+}
+
+static int bitmap_check(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
+{
+ struct disk_bitmap_header *disk_header = dm_block_data(b);
+ __le32 csum_disk;
+
+ if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) {
+ DMERR("bitmap check failed blocknr %llu wanted %llu",
+ le64_to_cpu(disk_header->blocknr), dm_block_location(b));
+ return -ENOTBLK;
+ }
+
+ csum_disk = cpu_to_le32(dm_block_csum_data(&disk_header->not_used, block_size - sizeof(__le32)));
+ if (csum_disk != disk_header->csum) {
+ DMERR("bitmap check failed csum %u wanted %u",
+ le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum));
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+struct dm_block_validator dm_sm_bitmap_validator = {
+ .name = "sm_bitmap",
+ .prepare_for_write = bitmap_prepare_for_write,
+ .check = bitmap_check
+};
+
+/*----------------------------------------------------------------*/
+
+#define ENTRIES_PER_WORD 32
+#define ENTRIES_SHIFT 5
+
+void *dm_bitmap_data(struct dm_block *b)
+{
+ return dm_block_data(b) + sizeof(struct disk_bitmap_header);
+}
+
+#define WORD_MASK_LOW 0x5555555555555555ULL
+#define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
+#define WORD_MASK_ALL 0xFFFFFFFFFFFFFFFFULL
+
+static unsigned bitmap_word_used(void *addr, unsigned b)
+{
+ __le64 *words_le = addr;
+ __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
+
+ uint64_t bits = le64_to_cpu(*w_le);
+
+ return ((bits & WORD_MASK_LOW) == WORD_MASK_LOW ||
+ (bits & WORD_MASK_HIGH) == WORD_MASK_HIGH ||
+ (bits & WORD_MASK_ALL) == WORD_MASK_ALL);
+}
+
+unsigned sm_lookup_bitmap(void *addr, unsigned b)
+{
+ __le64 *words_le = addr;
+ __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
+
+ b = (b & (ENTRIES_PER_WORD - 1)) << 1;
+
+ return (!!test_bit_le(b, (void *) w_le) << 1) |
+ (!!test_bit_le(b + 1, (void *) w_le));
+}
+
+void sm_set_bitmap(void *addr, unsigned b, unsigned val)
+{
+ __le64 *words_le = addr;
+ __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
+
+ b = (b & (ENTRIES_PER_WORD - 1)) << 1;
+
+ if (val & 2)
+ __set_bit_le(b, (void *) w_le);
+ else
+ __clear_bit_le(b, (void *) w_le);
+
+ if (val & 1)
+ __set_bit_le(b + 1, (void *) w_le);
+ else
+ __clear_bit_le(b + 1, (void *) w_le);
+}
+
+int sm_find_free(void *addr, unsigned begin, unsigned end,
+ unsigned *result)
+{
+ while (begin < end) {
+ if (!(begin & (ENTRIES_PER_WORD - 1)) &&
+ bitmap_word_used(addr, begin)) {
+ begin += ENTRIES_PER_WORD;
+ continue;
+ }
+
+ if (!sm_lookup_bitmap(addr, begin)) {
+ *result = begin;
+ return 0;
+ }
+
+ begin++;
+ }
+
+ return -ENOSPC;
+}
+
+static int disk_ll_init(struct ll_disk *io, struct dm_transaction_manager *tm)
+{
+ io->tm = tm;
+ io->bitmap_info.tm = tm;
+ io->bitmap_info.levels = 1;
+
+ /*
+ * Because the new bitmap blocks are created via a shadow
+ * operation, the old entry has already had its reference count
+ * decremented and we don't need the btree to do any bookkeeping.
+ */
+ io->bitmap_info.value_type.size = sizeof(struct disk_index_entry);
+ io->bitmap_info.value_type.inc = NULL;
+ io->bitmap_info.value_type.dec = NULL;
+ io->bitmap_info.value_type.equal = NULL;
+
+ io->ref_count_info.tm = tm;
+ io->ref_count_info.levels = 1;
+ io->ref_count_info.value_type.size = sizeof(uint32_t);
+ io->ref_count_info.value_type.inc = NULL;
+ io->ref_count_info.value_type.dec = NULL;
+ io->ref_count_info.value_type.equal = NULL;
+
+ io->block_size = dm_bm_block_size(dm_tm_get_bm(tm));
+
+ if (io->block_size > (1 << 30)) {
+ DMERR("block size too big to hold bitmaps");
+ return -EINVAL;
+ }
+
+ io->entries_per_block = (io->block_size - sizeof(struct disk_bitmap_header)) *
+ ENTRIES_PER_BYTE;
+ io->nr_blocks = 0;
+ io->bitmap_root = 0;
+ io->ref_count_root = 0;
+
+ return 0;
+}
+
+static int disk_ll_new(struct ll_disk *io, struct dm_transaction_manager *tm)
+{
+ int r;
+
+ r = disk_ll_init(io, tm);
+ if (r < 0)
+ return r;
+
+ io->nr_blocks = 0;
+ io->nr_allocated = 0;
+ r = dm_btree_create(&io->bitmap_info, &io->bitmap_root);
+ if (r < 0)
+ return r;
+
+ r = dm_btree_create(&io->ref_count_info, &io->ref_count_root);
+ if (r < 0) {
+ dm_btree_destroy(&io->bitmap_info, io->bitmap_root);
+ return r;
+ }
+
+ return 0;
+}
+
+static int disk_ll_extend(struct ll_disk *io, dm_block_t extra_blocks)
+{
+ int r;
+ dm_block_t i, nr_blocks;
+ unsigned old_blocks, blocks;
+
+ nr_blocks = io->nr_blocks + extra_blocks;
+ old_blocks = dm_sector_div_up(io->nr_blocks, io->entries_per_block);
+ blocks = dm_sector_div_up(nr_blocks, io->entries_per_block);
+
+ for (i = old_blocks; i < blocks; i++) {
+ struct dm_block *b;
+ struct disk_index_entry idx;
+
+ r = dm_tm_new_block(io->tm, &dm_sm_bitmap_validator, &b);
+ if (r < 0)
+ return r;
+ idx.blocknr = cpu_to_le64(dm_block_location(b));
+
+ r = dm_tm_unlock(io->tm, b);
+ if (r < 0)
+ return r;
+
+ idx.nr_free = cpu_to_le32(io->entries_per_block);
+ idx.none_free_before = 0;
+ __dm_bless_for_disk(&idx);
+
+ r = dm_btree_insert(&io->bitmap_info, io->bitmap_root,
+ &i, &idx, &io->bitmap_root);
+ if (r < 0)
+ return r;
+ }
+
+ io->nr_blocks = nr_blocks;
+ return 0;
+}
+
+static int disk_ll_open(struct ll_disk *ll, struct dm_transaction_manager *tm,
+ void *root_le, size_t len)
+{
+ int r;
+ struct disk_sm_root *smr = root_le;
+
+ if (len < sizeof(struct disk_sm_root)) {
+ DMERR("sm_disk root too small");
+ return -ENOMEM;
+ }
+
+ r = disk_ll_init(ll, tm);
+ if (r < 0)
+ return r;
+
+ ll->nr_blocks = le64_to_cpu(smr->nr_blocks);
+ ll->nr_allocated = le64_to_cpu(smr->nr_allocated);
+ ll->bitmap_root = le64_to_cpu(smr->bitmap_root);
+ ll->ref_count_root = le64_to_cpu(smr->ref_count_root);
+
+ return 0;
+}
+
+static int disk_ll_lookup_bitmap(struct ll_disk *io, dm_block_t b, uint32_t *result)
+{
+ int r;
+ dm_block_t index = b;
+ struct disk_index_entry ie_disk;
+ struct dm_block *blk;
+
+ do_div(index, io->entries_per_block);
+ r = dm_btree_lookup(&io->bitmap_info, io->bitmap_root, &index, &ie_disk);
+ if (r < 0)
+ return r;
+
+ r = dm_tm_read_lock(io->tm, le64_to_cpu(ie_disk.blocknr), &dm_sm_bitmap_validator, &blk);
+ if (r < 0)
+ return r;
+
+ *result = sm_lookup_bitmap(dm_bitmap_data(blk), do_div(b, io->entries_per_block));
+
+ return dm_tm_unlock(io->tm, blk);
+}
+
+static int disk_ll_lookup(struct ll_disk *io, dm_block_t b, uint32_t *result)
+{
+ __le32 rc_le;
+ int r = disk_ll_lookup_bitmap(io, b, result);
+
+ if (r)
+ return r;
+
+ if (*result != 3)
+ return r;
+
+ r = dm_btree_lookup(&io->ref_count_info, io->ref_count_root, &b, &rc_le);
+ if (r < 0)
+ return r;
+
+ *result = le32_to_cpu(rc_le);
+
+ return r;
+}
+
+static int disk_ll_find_free_block(struct ll_disk *io, dm_block_t begin,
+ dm_block_t end, dm_block_t *result)
+{
+ int r;
+ struct disk_index_entry ie_disk;
+ dm_block_t i, index_begin = begin;
+ dm_block_t index_end = dm_sector_div_up(end, io->entries_per_block);
+
+ begin = do_div(index_begin, io->entries_per_block);
+
+ for (i = index_begin; i < index_end; i++, begin = 0) {
+ struct dm_block *blk;
+ unsigned position;
+ uint32_t bit_end;
+
+ r = dm_btree_lookup(&io->bitmap_info, io->bitmap_root, &i, &ie_disk);
+ if (r < 0)
+ return r;
+
+ if (le32_to_cpu(ie_disk.nr_free) <= 0)
+ continue;
+
+ r = dm_tm_read_lock(io->tm, le64_to_cpu(ie_disk.blocknr),
+ &dm_sm_bitmap_validator, &blk);
+ if (r < 0)
+ return r;
+
+ bit_end = (i == index_end - 1) ?
+ do_div(end, io->entries_per_block) : io->entries_per_block;
+
+ r = sm_find_free(dm_bitmap_data(blk),
+ max((unsigned)begin, (unsigned)le32_to_cpu(ie_disk.none_free_before)),
+ bit_end, &position);
+ if (r < 0) {
+ dm_tm_unlock(io->tm, blk);
+ continue;
+ }
+
+ r = dm_tm_unlock(io->tm, blk);
+ if (r < 0)
+ return r;
+
+ *result = i * io->entries_per_block + (dm_block_t) position;
+
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+static int disk_ll_insert(struct ll_disk *io, dm_block_t b, uint32_t ref_count)
+{
+ int r;
+ uint32_t bit, old;
+ struct dm_block *nb;
+ dm_block_t index = b;
+ struct disk_index_entry ie_disk;
+ void *bm_le;
+ int inc;
+
+ do_div(index, io->entries_per_block);
+ r = dm_btree_lookup(&io->bitmap_info, io->bitmap_root, &index, &ie_disk);
+ if (r < 0)
+ return r;
+
+ r = dm_tm_shadow_block(io->tm, le64_to_cpu(ie_disk.blocknr),
+ &dm_sm_bitmap_validator, &nb, &inc);
+ if (r < 0) {
+ DMERR("dm_tm_shadow_block() failed");
+ return r;
+ }
+ ie_disk.blocknr = cpu_to_le64(dm_block_location(nb));
+
+ bm_le = dm_bitmap_data(nb);
+ bit = do_div(b, io->entries_per_block);
+ old = sm_lookup_bitmap(bm_le, bit);
+
+ if (ref_count <= 2) {
+ sm_set_bitmap(bm_le, bit, ref_count);
+
+ if (old > 2) {
+ r = dm_btree_remove(&io->ref_count_info, io->ref_count_root,
+ &b, &io->ref_count_root);
+ if (r) {
+ dm_tm_unlock(io->tm, nb);
+ return r;
+ }
+ }
+ } else {
+ __le32 rc_le = cpu_to_le32(ref_count);
+
+ __dm_bless_for_disk(&rc_le);
+
+ sm_set_bitmap(bm_le, bit, 3);
+ r = dm_btree_insert(&io->ref_count_info, io->ref_count_root,
+ &b, &rc_le, &io->ref_count_root);
+ if (r < 0) {
+ dm_tm_unlock(io->tm, nb);
+ DMERR("ref count insert failed");
+ return r;
+ }
+ }
+
+ r = dm_tm_unlock(io->tm, nb);
+ if (r < 0)
+ return r;
+
+ if (ref_count && !old) {
+ io->nr_allocated++;
+ ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) - 1);
+ if (le32_to_cpu(ie_disk.none_free_before) == b)
+ ie_disk.none_free_before = cpu_to_le32(b + 1);
+
+ } else if (old && !ref_count) {
+ io->nr_allocated--;
+ ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) + 1);
+ ie_disk.none_free_before = cpu_to_le32(min((dm_block_t) le32_to_cpu(ie_disk.none_free_before), b));
+ }
+
+ __dm_bless_for_disk(&ie_disk);
+
+ r = dm_btree_insert(&io->bitmap_info, io->bitmap_root, &index, &ie_disk, &io->bitmap_root);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+static int disk_ll_inc(struct ll_disk *ll, dm_block_t b)
+{
+ int r;
+ uint32_t rc;
+
+ r = disk_ll_lookup(ll, b, &rc);
+ if (r)
+ return r;
+
+ return disk_ll_insert(ll, b, rc + 1);
+}
+
+static int disk_ll_dec(struct ll_disk *ll, dm_block_t b)
+{
+ int r;
+ uint32_t rc;
+
+ r = disk_ll_lookup(ll, b, &rc);
+ if (r)
+ return r;
+
+ if (!rc)
+ return -EINVAL;
+
+ return disk_ll_insert(ll, b, rc - 1);
+}
+
+/*--------------------------------------------------------------*/
+
+/*
+ * Space map interface.
+ */
+struct sm_disk {
+ struct dm_space_map sm;
+
+ struct ll_disk ll;
+};
+
+static void sm_disk_destroy(struct dm_space_map *sm)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ kfree(smd);
+}
+
+static int sm_disk_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ return disk_ll_extend(&smd->ll, extra_blocks);
+}
+
+static int sm_disk_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ *count = smd->ll.nr_blocks;
+
+ return 0;
+}
+
+static int sm_disk_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ *count = smd->ll.nr_blocks - smd->ll.nr_allocated;
+
+ return 0;
+}
+
+static int sm_disk_get_count(struct dm_space_map *sm, dm_block_t b,
+ uint32_t *result)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ return disk_ll_lookup(&smd->ll, b, result);
+}
+
+static int sm_disk_count_is_more_than_one(struct dm_space_map *sm, dm_block_t b,
+ int *result)
+{
+ int r;
+ uint32_t count;
+
+ r = sm_disk_get_count(sm, b, &count);
+ if (r)
+ return r;
+
+ return count > 1;
+}
+
+static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b,
+ uint32_t count)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ return disk_ll_insert(&smd->ll, b, count);
+}
+
+static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ return disk_ll_inc(&smd->ll, b);
+}
+
+static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ return disk_ll_dec(&smd->ll, b);
+}
+
+static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
+{
+ int r;
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ /*
+ * FIXME: We should start the search where we left off.
+ */
+ r = disk_ll_find_free_block(&smd->ll, 0, smd->ll.nr_blocks, b);
+ if (r)
+ return r;
+
+ return disk_ll_inc(&smd->ll, *b);
+}
+
+static int sm_disk_commit(struct dm_space_map *sm)
+{
+ return 0;
+}
+
+static int sm_disk_root_size(struct dm_space_map *sm, size_t *result)
+{
+ *result = sizeof(struct disk_sm_root);
+
+ return 0;
+}
+
+static int sm_disk_copy_root(struct dm_space_map *sm, void *where_le, size_t max)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+ struct disk_sm_root root_le;
+
+ root_le.nr_blocks = cpu_to_le64(smd->ll.nr_blocks);
+ root_le.nr_allocated = cpu_to_le64(smd->ll.nr_allocated);
+ root_le.bitmap_root = cpu_to_le64(smd->ll.bitmap_root);
+ root_le.ref_count_root = cpu_to_le64(smd->ll.ref_count_root);
+
+ if (max < sizeof(root_le))
+ return -ENOSPC;
+
+ memcpy(where_le, &root_le, sizeof(root_le));
+
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+
+static struct dm_space_map ops = {
+ .destroy = sm_disk_destroy,
+ .extend = sm_disk_extend,
+ .get_nr_blocks = sm_disk_get_nr_blocks,
+ .get_nr_free = sm_disk_get_nr_free,
+ .get_count = sm_disk_get_count,
+ .count_is_more_than_one = sm_disk_count_is_more_than_one,
+ .set_count = sm_disk_set_count,
+ .inc_block = sm_disk_inc_block,
+ .dec_block = sm_disk_dec_block,
+ .new_block = sm_disk_new_block,
+ .commit = sm_disk_commit,
+ .root_size = sm_disk_root_size,
+ .copy_root = sm_disk_copy_root
+};
+
+struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
+ dm_block_t nr_blocks)
+{
+ int r;
+ struct sm_disk *smd;
+
+ smd = kmalloc(sizeof(*smd), GFP_KERNEL);
+ if (!smd)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&smd->sm, &ops, sizeof(smd->sm));
+
+ r = disk_ll_new(&smd->ll, tm);
+ if (r)
+ goto bad;
+
+ r = disk_ll_extend(&smd->ll, nr_blocks);
+ if (r)
+ goto bad;
+
+ r = sm_disk_commit(&smd->sm);
+ if (r)
+ goto bad;
+
+ return &smd->sm;
+
+bad:
+ kfree(smd);
+ return ERR_PTR(r);
+}
+EXPORT_SYMBOL_GPL(dm_sm_disk_create);
+
+struct dm_space_map *dm_sm_disk_open(struct dm_transaction_manager *tm,
+ void *root_le, size_t len)
+{
+ int r;
+ struct sm_disk *smd;
+
+ smd = kmalloc(sizeof(*smd), GFP_KERNEL);
+ if (!smd)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&smd->sm, &ops, sizeof(smd->sm));
+
+ r = disk_ll_open(&smd->ll, tm, root_le, len);
+ if (r)
+ goto bad;
+
+ r = sm_disk_commit(&smd->sm);
+ if (r)
+ goto bad;
+
+ return &smd->sm;
+
+bad:
+ kfree(smd);
+ return ERR_PTR(r);
+}
+EXPORT_SYMBOL_GPL(dm_sm_disk_open);
--- /dev/null
- #include <linux/module.h>
+/*
+ * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+#include "dm-transaction-manager.h"
+#include "dm-space-map.h"
+#include "dm-space-map-disk.h"
+#include "dm-space-map-metadata.h"
+#include "dm-persistent-data-internal.h"
+
++#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "transaction manager"
+
+/*----------------------------------------------------------------*/
+
+struct shadow_info {
+ struct hlist_node hlist;
+ dm_block_t where;
+};
+
+/*
+ * It would be nice if we scaled with the size of transaction.
+ */
+#define HASH_SIZE 256
+#define HASH_MASK (HASH_SIZE - 1)
+
+struct dm_transaction_manager {
+ int is_clone;
+ struct dm_transaction_manager *real;
+
+ struct dm_block_manager *bm;
+ struct dm_space_map *sm;
+
+ spinlock_t lock;
+ struct hlist_head buckets[HASH_SIZE];
+};
+
+/*----------------------------------------------------------------*/
+
+static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
+{
+ int r = 0;
+ unsigned bucket = dm_hash_block(b, HASH_MASK);
+ struct shadow_info *si;
+ struct hlist_node *n;
+
+ spin_lock(&tm->lock);
+
+ hlist_for_each_entry(si, n, tm->buckets + bucket, hlist)
+ if (si->where == b) {
+ r = 1;
+ break;
+ }
+
+ spin_unlock(&tm->lock);
+
+ return r;
+}
+
+/*
+ * This can silently fail if there's no memory. We're ok with this since
+ * creating redundant shadows causes no harm.
+ */
+static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
+{
+ unsigned bucket;
+ struct shadow_info *si;
+
+ si = kmalloc(sizeof(*si), GFP_NOIO);
+ if (si) {
+ si->where = b;
+ bucket = dm_hash_block(b, HASH_MASK);
+
+ spin_lock(&tm->lock);
+ hlist_add_head(&si->hlist, tm->buckets + bucket);
+ spin_unlock(&tm->lock);
+ }
+}
+
+static void wipe_shadow_table(struct dm_transaction_manager *tm)
+{
+ struct shadow_info *si;
+ struct hlist_node *n, *tmp;
+ struct hlist_head *bucket;
+ int i;
+
+ spin_lock(&tm->lock);
+ for (i = 0; i < HASH_SIZE; i++) {
+ bucket = tm->buckets + i;
+ hlist_for_each_entry_safe(si, n, tmp, bucket, hlist)
+ kfree(si);
+
+ INIT_HLIST_HEAD(bucket);
+ }
+ spin_unlock(&tm->lock);
+}
+
+/*----------------------------------------------------------------*/
+
+static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
+ struct dm_space_map *sm)
+{
+ int i;
+ struct dm_transaction_manager *tm;
+
+ tm = kmalloc(sizeof(*tm), GFP_KERNEL);
+ if (!tm)
+ return ERR_PTR(-ENOMEM);
+
+ tm->is_clone = 0;
+ tm->real = NULL;
+ tm->bm = bm;
+ tm->sm = sm;
+
+ spin_lock_init(&tm->lock);
+ for (i = 0; i < HASH_SIZE; i++)
+ INIT_HLIST_HEAD(tm->buckets + i);
+
+ return tm;
+}
+
+struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transaction_manager *real)
+{
+ struct dm_transaction_manager *tm;
+
+ tm = kmalloc(sizeof(*tm), GFP_KERNEL);
+ if (tm) {
+ tm->is_clone = 1;
+ tm->real = real;
+ }
+
+ return tm;
+}
+EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
+
+void dm_tm_destroy(struct dm_transaction_manager *tm)
+{
+ kfree(tm);
+}
+EXPORT_SYMBOL_GPL(dm_tm_destroy);
+
+int dm_tm_pre_commit(struct dm_transaction_manager *tm)
+{
+ int r;
+
+ if (tm->is_clone)
+ return -EWOULDBLOCK;
+
+ r = dm_sm_commit(tm->sm);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
+
+int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)
+{
+ if (tm->is_clone)
+ return -EWOULDBLOCK;
+
+ wipe_shadow_table(tm);
+
+ return dm_bm_flush_and_unlock(tm->bm, root);
+}
+EXPORT_SYMBOL_GPL(dm_tm_commit);
+
+int dm_tm_new_block(struct dm_transaction_manager *tm,
+ struct dm_block_validator *v,
+ struct dm_block **result)
+{
+ int r;
+ dm_block_t new_block;
+
+ if (tm->is_clone)
+ return -EWOULDBLOCK;
+
+ r = dm_sm_new_block(tm->sm, &new_block);
+ if (r < 0)
+ return r;
+
+ r = dm_bm_write_lock_zero(tm->bm, new_block, v, result);
+ if (r < 0) {
+ dm_sm_dec_block(tm->sm, new_block);
+ return r;
+ }
+
+ /*
+ * New blocks count as shadows in that they don't need to be
+ * shadowed again.
+ */
+ insert_shadow(tm, new_block);
+
+ return 0;
+}
+
+static int __shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
+ struct dm_block_validator *v,
+ struct dm_block **result, int *inc_children)
+{
+ int r;
+ dm_block_t new;
+ uint32_t count;
+ struct dm_block *orig_block;
+
+ r = dm_sm_new_block(tm->sm, &new);
+ if (r < 0)
+ return r;
+
+ r = dm_bm_write_lock_zero(tm->bm, new, v, result);
+ if (r < 0)
+ goto bad_dec_block;
+
+ r = dm_bm_read_lock(tm->bm, orig, v, &orig_block);
+ if (r < 0)
+ goto bad_dec_block;
+
+ memcpy(dm_block_data(*result), dm_block_data(orig_block),
+ dm_bm_block_size(tm->bm));
+
+ r = dm_bm_unlock(orig_block);
+ if (r < 0)
+ goto bad_dec_block;
+
+ r = dm_sm_get_count(tm->sm, orig, &count);
+ if (r < 0)
+ goto bad;
+
+ r = dm_sm_dec_block(tm->sm, orig);
+ if (r < 0)
+ goto bad;
+
+ *inc_children = count > 1;
+
+ return 0;
+
+bad:
+ dm_bm_unlock(*result);
+bad_dec_block:
+ dm_sm_dec_block(tm->sm, new);
+
+ return r;
+}
+
+int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
+ struct dm_block_validator *v, struct dm_block **result,
+ int *inc_children)
+{
+ int r, more_than_one;
+
+ if (tm->is_clone)
+ return -EWOULDBLOCK;
+
+ if (is_shadow(tm, orig)) {
+ r = dm_sm_count_is_more_than_one(tm->sm, orig, &more_than_one);
+ if (r < 0)
+ return r;
+
+ if (!more_than_one) {
+ *inc_children = 0;
+ return dm_bm_write_lock(tm->bm, orig, v, result);
+ }
+ /* fall through */
+ }
+
+ r = __shadow_block(tm, orig, v, result, inc_children);
+ if (r < 0)
+ return r;
+
+ insert_shadow(tm, dm_block_location(*result));
+
+ return r;
+}
+
+int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
+ struct dm_block_validator *v,
+ struct dm_block **blk)
+{
+ if (tm->is_clone)
+ return dm_bm_read_try_lock(tm->real->bm, b, v, blk);
+
+ return dm_bm_read_lock(tm->bm, b, v, blk);
+}
+
+int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
+{
+ return dm_bm_unlock(b);
+}
+EXPORT_SYMBOL_GPL(dm_tm_unlock);
+
+void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b)
+{
+ /*
+ * The non-blocking clone doesn't support this.
+ */
+ BUG_ON(tm->is_clone);
+
+ dm_sm_inc_block(tm->sm, b);
+}
+EXPORT_SYMBOL_GPL(dm_tm_inc);
+
+void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b)
+{
+ /*
+ * The non-blocking clone doesn't support this.
+ */
+ BUG_ON(tm->is_clone);
+
+ dm_sm_dec_block(tm->sm, b);
+}
+
+int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
+ uint32_t *result)
+{
+ if (tm->is_clone)
+ return -EWOULDBLOCK;
+
+ return dm_sm_get_count(tm->sm, b, result);
+}
+
+struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm)
+{
+ return tm->bm;
+}
+
+/*----------------------------------------------------------------*/
+
+static int dm_tm_create_internal(struct dm_block_manager *bm,
+ dm_block_t sb_location,
+ struct dm_block_validator *sb_validator,
+ size_t root_offset, size_t root_max_len,
+ struct dm_transaction_manager **tm,
+ struct dm_space_map **sm,
+ struct dm_block **sblock,
+ int create)
+{
+ int r;
+
+ *sm = dm_sm_metadata_init();
+ if (IS_ERR(*sm))
+ return PTR_ERR(*sm);
+
+ *tm = dm_tm_create(bm, *sm);
+ if (IS_ERR(*tm)) {
+ dm_sm_destroy(*sm);
+ return PTR_ERR(*tm);
+ }
+
+ if (create) {
+ r = dm_bm_write_lock_zero(dm_tm_get_bm(*tm), sb_location,
+ sb_validator, sblock);
+ if (r < 0) {
+ DMERR("couldn't lock superblock");
+ goto bad1;
+ }
+
+ r = dm_sm_metadata_create(*sm, *tm, dm_bm_nr_blocks(bm),
+ sb_location);
+ if (r) {
+ DMERR("couldn't create metadata space map");
+ goto bad2;
+ }
+
+ } else {
+ r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
+ sb_validator, sblock);
+ if (r < 0) {
+ DMERR("couldn't lock superblock");
+ goto bad1;
+ }
+
+ r = dm_sm_metadata_open(*sm, *tm,
+ dm_block_data(*sblock) + root_offset,
+ root_max_len);
+ if (IS_ERR(*sm)) {
+ DMERR("couldn't open metadata space map");
+ goto bad2;
+ }
+ }
+
+ return 0;
+
+bad2:
+ dm_tm_unlock(*tm, *sblock);
+bad1:
+ dm_tm_destroy(*tm);
+ dm_sm_destroy(*sm);
+ return r;
+}
+
+int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
+ struct dm_block_validator *sb_validator,
+ struct dm_transaction_manager **tm,
+ struct dm_space_map **sm, struct dm_block **sblock)
+{
+ return dm_tm_create_internal(bm, sb_location, sb_validator,
+ 0, 0, tm, sm, sblock, 1);
+}
+EXPORT_SYMBOL_GPL(dm_tm_create_with_sm);
+
+int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
+ struct dm_block_validator *sb_validator,
+ size_t root_offset, size_t root_max_len,
+ struct dm_transaction_manager **tm,
+ struct dm_space_map **sm, struct dm_block **sblock)
+{
+ return dm_tm_create_internal(bm, sb_location, sb_validator, root_offset,
+ root_max_len, tm, sm, sblock, 0);
+}
+EXPORT_SYMBOL_GPL(dm_tm_open_with_sm);
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <media/saa7146.h>
+ #include <linux/module.h>
LIST_HEAD(saa7146_devices);
DEFINE_MUTEX(saa7146_devices_lock);
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <media/saa7146_vv.h>
+ #include <linux/module.h>
/****************************************************************************/
/* resource management functions, shamelessly stolen from saa7134 driver */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
+ #include <linux/export.h>
#include <media/saa7146_vv.h>
static void calculate_output_format_register(struct saa7146_dev* saa, u32 palette, u32* clip_format)
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <media/saa7146_vv.h>
#include <media/v4l2-chip-ident.h>
+ #include <linux/module.h>
static int max_memory = 32;
#include <linux/i2c.h>
+#include <linux/mutex.h>
+ #include <linux/module.h>
#include "dibx000_common.h"
*/
#include <linux/delay.h>
+ #include <linux/module.h>
#include <linux/i2c.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/version.h>
#include <media/adp1653.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
+ #include <linux/module.h>
#include <linux/mutex.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max8997.h>
#include <linux/mfd/core.h>
#include <linux/mfd/wm8400-private.h>
#include <linux/mfd/wm8400-audio.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
+ #include <linux/module.h>
static struct {
u16 readable; /* Mask of readable bits */
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
++#include <linux/moduleparam.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
+ #include <linux/export.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/gpio.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
+ #include <linux/module.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
+
+#include <mach/gpio-tegra.h>
#include <mach/sdhci.h>
#include "sdhci-pltfm.h"
#include "pch_gbe.h"
#include "pch_gbe_api.h"
-#include <linux/prefetch.h>
+ #include <linux/module.h>
#define DRV_VERSION "1.00"
const char pch_driver_version[] = DRV_VERSION;
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
-
- #include <linux/module.h>
+ #include <linux/export.h>
+ #include <linux/moduleparam.h>
-
-#include "base.h"
+#include <linux/seq_file.h>
+#include <linux/list.h>
#include "debug.h"
+#include "ath5k.h"
+#include "reg.h"
+#include "base.h"
static unsigned int ath5k_debug;
module_param_named(debug, ath5k_debug, uint, 0);
--- /dev/null
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
++#include <linux/moduleparam.h>
++
+#include "core.h"
+#include "cfg80211.h"
+#include "debug.h"
+#include "hif-ops.h"
+#include "testmode.h"
+
+static unsigned int ath6kl_p2p;
+
+module_param(ath6kl_p2p, uint, 0644);
+
+#define RATETAB_ENT(_rate, _rateid, _flags) { \
+ .bitrate = (_rate), \
+ .flags = (_flags), \
+ .hw_value = (_rateid), \
+}
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = 5000 + (5 * (_channel)), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static struct ieee80211_rate ath6kl_rates[] = {
+ RATETAB_ENT(10, 0x1, 0),
+ RATETAB_ENT(20, 0x2, 0),
+ RATETAB_ENT(55, 0x4, 0),
+ RATETAB_ENT(110, 0x8, 0),
+ RATETAB_ENT(60, 0x10, 0),
+ RATETAB_ENT(90, 0x20, 0),
+ RATETAB_ENT(120, 0x40, 0),
+ RATETAB_ENT(180, 0x80, 0),
+ RATETAB_ENT(240, 0x100, 0),
+ RATETAB_ENT(360, 0x200, 0),
+ RATETAB_ENT(480, 0x400, 0),
+ RATETAB_ENT(540, 0x800, 0),
+};
+
+#define ath6kl_a_rates (ath6kl_rates + 4)
+#define ath6kl_a_rates_size 8
+#define ath6kl_g_rates (ath6kl_rates + 0)
+#define ath6kl_g_rates_size 12
+
+static struct ieee80211_channel ath6kl_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static struct ieee80211_channel ath6kl_5ghz_a_channels[] = {
+ CHAN5G(34, 0), CHAN5G(36, 0),
+ CHAN5G(38, 0), CHAN5G(40, 0),
+ CHAN5G(42, 0), CHAN5G(44, 0),
+ CHAN5G(46, 0), CHAN5G(48, 0),
+ CHAN5G(52, 0), CHAN5G(56, 0),
+ CHAN5G(60, 0), CHAN5G(64, 0),
+ CHAN5G(100, 0), CHAN5G(104, 0),
+ CHAN5G(108, 0), CHAN5G(112, 0),
+ CHAN5G(116, 0), CHAN5G(120, 0),
+ CHAN5G(124, 0), CHAN5G(128, 0),
+ CHAN5G(132, 0), CHAN5G(136, 0),
+ CHAN5G(140, 0), CHAN5G(149, 0),
+ CHAN5G(153, 0), CHAN5G(157, 0),
+ CHAN5G(161, 0), CHAN5G(165, 0),
+ CHAN5G(184, 0), CHAN5G(188, 0),
+ CHAN5G(192, 0), CHAN5G(196, 0),
+ CHAN5G(200, 0), CHAN5G(204, 0),
+ CHAN5G(208, 0), CHAN5G(212, 0),
+ CHAN5G(216, 0),
+};
+
+static struct ieee80211_supported_band ath6kl_band_2ghz = {
+ .n_channels = ARRAY_SIZE(ath6kl_2ghz_channels),
+ .channels = ath6kl_2ghz_channels,
+ .n_bitrates = ath6kl_g_rates_size,
+ .bitrates = ath6kl_g_rates,
+};
+
+static struct ieee80211_supported_band ath6kl_band_5ghz = {
+ .n_channels = ARRAY_SIZE(ath6kl_5ghz_a_channels),
+ .channels = ath6kl_5ghz_a_channels,
+ .n_bitrates = ath6kl_a_rates_size,
+ .bitrates = ath6kl_a_rates,
+};
+
+static int ath6kl_set_wpa_version(struct ath6kl *ar,
+ enum nl80211_wpa_versions wpa_version)
+{
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: %u\n", __func__, wpa_version);
+
+ if (!wpa_version) {
+ ar->auth_mode = NONE_AUTH;
+ } else if (wpa_version & NL80211_WPA_VERSION_2) {
+ ar->auth_mode = WPA2_AUTH;
+ } else if (wpa_version & NL80211_WPA_VERSION_1) {
+ ar->auth_mode = WPA_AUTH;
+ } else {
+ ath6kl_err("%s: %u not supported\n", __func__, wpa_version);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ath6kl_set_auth_type(struct ath6kl *ar,
+ enum nl80211_auth_type auth_type)
+{
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, auth_type);
+
+ switch (auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ ar->dot11_auth_mode = OPEN_AUTH;
+ break;
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ ar->dot11_auth_mode = SHARED_AUTH;
+ break;
+ case NL80211_AUTHTYPE_NETWORK_EAP:
+ ar->dot11_auth_mode = LEAP_AUTH;
+ break;
+
+ case NL80211_AUTHTYPE_AUTOMATIC:
+ ar->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH;
+ break;
+
+ default:
+ ath6kl_err("%s: 0x%x not spported\n", __func__, auth_type);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast)
+{
+ u8 *ar_cipher = ucast ? &ar->prwise_crypto : &ar->grp_crypto;
+ u8 *ar_cipher_len = ucast ? &ar->prwise_crypto_len :
+ &ar->grp_crypto_len;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: cipher 0x%x, ucast %u\n",
+ __func__, cipher, ucast);
+
+ switch (cipher) {
+ case 0:
+ /* our own hack to use value 0 as no crypto used */
+ *ar_cipher = NONE_CRYPT;
+ *ar_cipher_len = 0;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ *ar_cipher = WEP_CRYPT;
+ *ar_cipher_len = 5;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ *ar_cipher = WEP_CRYPT;
+ *ar_cipher_len = 13;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ *ar_cipher = TKIP_CRYPT;
+ *ar_cipher_len = 0;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ *ar_cipher = AES_CRYPT;
+ *ar_cipher_len = 0;
+ break;
+ default:
+ ath6kl_err("cipher 0x%x not supported\n", cipher);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void ath6kl_set_key_mgmt(struct ath6kl *ar, u32 key_mgmt)
+{
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, key_mgmt);
+
+ if (key_mgmt == WLAN_AKM_SUITE_PSK) {
+ if (ar->auth_mode == WPA_AUTH)
+ ar->auth_mode = WPA_PSK_AUTH;
+ else if (ar->auth_mode == WPA2_AUTH)
+ ar->auth_mode = WPA2_PSK_AUTH;
+ } else if (key_mgmt != WLAN_AKM_SUITE_8021X) {
+ ar->auth_mode = NONE_AUTH;
+ }
+}
+
+static bool ath6kl_cfg80211_ready(struct ath6kl *ar)
+{
+ if (!test_bit(WMI_READY, &ar->flag)) {
+ ath6kl_err("wmi is not ready\n");
+ return false;
+ }
+
+ if (!test_bit(WLAN_ENABLED, &ar->flag)) {
+ ath6kl_err("wlan disabled\n");
+ return false;
+ }
+
+ return true;
+}
+
+static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ int status;
+
+ ar->sme_state = SME_CONNECTING;
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
+ ath6kl_err("destroy in progress\n");
+ return -EBUSY;
+ }
+
+ if (test_bit(SKIP_SCAN, &ar->flag) &&
+ ((sme->channel && sme->channel->center_freq == 0) ||
+ (sme->bssid && is_zero_ether_addr(sme->bssid)))) {
+ ath6kl_err("SkipScan: channel or bssid invalid\n");
+ return -EINVAL;
+ }
+
+ if (down_interruptible(&ar->sem)) {
+ ath6kl_err("busy, couldn't get access\n");
+ return -ERESTARTSYS;
+ }
+
+ if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
+ ath6kl_err("busy, destroy in progress\n");
+ up(&ar->sem);
+ return -EBUSY;
+ }
+
+ if (ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)]) {
+ /*
+ * sleep until the command queue drains
+ */
+ wait_event_interruptible_timeout(ar->event_wq,
+ ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0,
+ WMI_TIMEOUT);
+ if (signal_pending(current)) {
+ ath6kl_err("cmd queue drain timeout\n");
+ up(&ar->sem);
+ return -EINTR;
+ }
+ }
+
+ if (test_bit(CONNECTED, &ar->flag) &&
+ ar->ssid_len == sme->ssid_len &&
+ !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
+ ar->reconnect_flag = true;
+ status = ath6kl_wmi_reconnect_cmd(ar->wmi, ar->req_bssid,
+ ar->ch_hint);
+
+ up(&ar->sem);
+ if (status) {
+ ath6kl_err("wmi_reconnect_cmd failed\n");
+ return -EIO;
+ }
+ return 0;
+ } else if (ar->ssid_len == sme->ssid_len &&
+ !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
+ ath6kl_disconnect(ar);
+ }
+
+ memset(ar->ssid, 0, sizeof(ar->ssid));
+ ar->ssid_len = sme->ssid_len;
+ memcpy(ar->ssid, sme->ssid, sme->ssid_len);
+
+ if (sme->channel)
+ ar->ch_hint = sme->channel->center_freq;
+
+ memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+ if (sme->bssid && !is_broadcast_ether_addr(sme->bssid))
+ memcpy(ar->req_bssid, sme->bssid, sizeof(ar->req_bssid));
+
+ ath6kl_set_wpa_version(ar, sme->crypto.wpa_versions);
+
+ status = ath6kl_set_auth_type(ar, sme->auth_type);
+ if (status) {
+ up(&ar->sem);
+ return status;
+ }
+
+ if (sme->crypto.n_ciphers_pairwise)
+ ath6kl_set_cipher(ar, sme->crypto.ciphers_pairwise[0], true);
+ else
+ ath6kl_set_cipher(ar, 0, true);
+
+ ath6kl_set_cipher(ar, sme->crypto.cipher_group, false);
+
+ if (sme->crypto.n_akm_suites)
+ ath6kl_set_key_mgmt(ar, sme->crypto.akm_suites[0]);
+
+ if ((sme->key_len) &&
+ (ar->auth_mode == NONE_AUTH) && (ar->prwise_crypto == WEP_CRYPT)) {
+ struct ath6kl_key *key = NULL;
+
+ if (sme->key_idx < WMI_MIN_KEY_INDEX ||
+ sme->key_idx > WMI_MAX_KEY_INDEX) {
+ ath6kl_err("key index %d out of bounds\n",
+ sme->key_idx);
+ up(&ar->sem);
+ return -ENOENT;
+ }
+
+ key = &ar->keys[sme->key_idx];
+ key->key_len = sme->key_len;
+ memcpy(key->key, sme->key, key->key_len);
+ key->cipher = ar->prwise_crypto;
+ ar->def_txkey_index = sme->key_idx;
+
+ ath6kl_wmi_addkey_cmd(ar->wmi, sme->key_idx,
+ ar->prwise_crypto,
+ GROUP_USAGE | TX_USAGE,
+ key->key_len,
+ NULL,
+ key->key, KEY_OP_INIT_VAL, NULL,
+ NO_SYNC_WMIFLAG);
+ }
+
+ if (!ar->usr_bss_filter) {
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
+ if (ath6kl_wmi_bssfilter_cmd(ar->wmi, ALL_BSS_FILTER, 0) != 0) {
+ ath6kl_err("couldn't set bss filtering\n");
+ up(&ar->sem);
+ return -EIO;
+ }
+ }
+
+ ar->nw_type = ar->next_mode;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: connect called with authmode %d dot11 auth %d"
+ " PW crypto %d PW crypto len %d GRP crypto %d"
+ " GRP crypto len %d channel hint %u\n",
+ __func__,
+ ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
+ ar->prwise_crypto_len, ar->grp_crypto,
+ ar->grp_crypto_len, ar->ch_hint);
+
+ ar->reconnect_flag = 0;
+ status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
+ ar->dot11_auth_mode, ar->auth_mode,
+ ar->prwise_crypto,
+ ar->prwise_crypto_len,
+ ar->grp_crypto, ar->grp_crypto_len,
+ ar->ssid_len, ar->ssid,
+ ar->req_bssid, ar->ch_hint,
+ ar->connect_ctrl_flags);
+
+ up(&ar->sem);
+
+ if (status == -EINVAL) {
+ memset(ar->ssid, 0, sizeof(ar->ssid));
+ ar->ssid_len = 0;
+ ath6kl_err("invalid request\n");
+ return -ENOENT;
+ } else if (status) {
+ ath6kl_err("ath6kl_wmi_connect_cmd failed\n");
+ return -EIO;
+ }
+
+ if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) &&
+ ((ar->auth_mode == WPA_PSK_AUTH)
+ || (ar->auth_mode == WPA2_PSK_AUTH))) {
+ mod_timer(&ar->disconnect_timer,
+ jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL));
+ }
+
+ ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD;
+ set_bit(CONNECT_PEND, &ar->flag);
+
+ return 0;
+}
+
+static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid,
+ struct ieee80211_channel *chan,
+ const u8 *beacon_ie, size_t beacon_ie_len)
+{
+ struct cfg80211_bss *bss;
+ u8 *ie;
+
+ bss = cfg80211_get_bss(ar->wdev->wiphy, chan, bssid,
+ ar->ssid, ar->ssid_len, WLAN_CAPABILITY_ESS,
+ WLAN_CAPABILITY_ESS);
+ if (bss == NULL) {
+ /*
+ * Since cfg80211 may not yet know about the BSS,
+ * generate a partial entry until the first BSS info
+ * event becomes available.
+ *
+ * Prepend SSID element since it is not included in the Beacon
+ * IEs from the target.
+ */
+ ie = kmalloc(2 + ar->ssid_len + beacon_ie_len, GFP_KERNEL);
+ if (ie == NULL)
+ return -ENOMEM;
+ ie[0] = WLAN_EID_SSID;
+ ie[1] = ar->ssid_len;
+ memcpy(ie + 2, ar->ssid, ar->ssid_len);
+ memcpy(ie + 2 + ar->ssid_len, beacon_ie, beacon_ie_len);
+ bss = cfg80211_inform_bss(ar->wdev->wiphy, chan,
+ bssid, 0, WLAN_CAPABILITY_ESS, 100,
+ ie, 2 + ar->ssid_len + beacon_ie_len,
+ 0, GFP_KERNEL);
+ if (bss)
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added dummy bss for "
+ "%pM prior to indicating connect/roamed "
+ "event\n", bssid);
+ kfree(ie);
+ } else
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss "
+ "entry\n");
+
+ if (bss == NULL)
+ return -ENOMEM;
+
+ cfg80211_put_bss(bss);
+
+ return 0;
+}
+
+void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
+ u8 *bssid, u16 listen_intvl,
+ u16 beacon_intvl,
+ enum network_type nw_type,
+ u8 beacon_ie_len, u8 assoc_req_len,
+ u8 assoc_resp_len, u8 *assoc_info)
+{
+ struct ieee80211_channel *chan;
+
+ /* capinfo + listen interval */
+ u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16);
+
+ /* capinfo + status code + associd */
+ u8 assoc_resp_ie_offset = sizeof(u16) + sizeof(u16) + sizeof(u16);
+
+ u8 *assoc_req_ie = assoc_info + beacon_ie_len + assoc_req_ie_offset;
+ u8 *assoc_resp_ie = assoc_info + beacon_ie_len + assoc_req_len +
+ assoc_resp_ie_offset;
+
+ assoc_req_len -= assoc_req_ie_offset;
+ assoc_resp_len -= assoc_resp_ie_offset;
+
+ /*
+ * Store Beacon interval here; DTIM period will be available only once
+ * a Beacon frame from the AP is seen.
+ */
+ ar->assoc_bss_beacon_int = beacon_intvl;
+ clear_bit(DTIM_PERIOD_AVAIL, &ar->flag);
+
+ if (nw_type & ADHOC_NETWORK) {
+ if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in ibss mode\n", __func__);
+ return;
+ }
+ }
+
+ if (nw_type & INFRA_NETWORK) {
+ if (ar->wdev->iftype != NL80211_IFTYPE_STATION &&
+ ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in station mode\n", __func__);
+ return;
+ }
+ }
+
+ chan = ieee80211_get_channel(ar->wdev->wiphy, (int) channel);
+
+
+ if (nw_type & ADHOC_NETWORK) {
+ cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
+ return;
+ }
+
+ if (ath6kl_add_bss_if_needed(ar, bssid, chan, assoc_info,
+ beacon_ie_len) < 0) {
+ ath6kl_err("could not add cfg80211 bss entry for "
+ "connect/roamed notification\n");
+ return;
+ }
+
+ if (ar->sme_state == SME_CONNECTING) {
+ /* inform connect result to cfg80211 */
+ ar->sme_state = SME_CONNECTED;
+ cfg80211_connect_result(ar->net_dev, bssid,
+ assoc_req_ie, assoc_req_len,
+ assoc_resp_ie, assoc_resp_len,
+ WLAN_STATUS_SUCCESS, GFP_KERNEL);
+ } else if (ar->sme_state == SME_CONNECTED) {
+ /* inform roam event to cfg80211 */
+ cfg80211_roamed(ar->net_dev, chan, bssid,
+ assoc_req_ie, assoc_req_len,
+ assoc_resp_ie, assoc_resp_len, GFP_KERNEL);
+ }
+}
+
+static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy,
+ struct net_device *dev, u16 reason_code)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: reason=%u\n", __func__,
+ reason_code);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
+ ath6kl_err("busy, destroy in progress\n");
+ return -EBUSY;
+ }
+
+ if (down_interruptible(&ar->sem)) {
+ ath6kl_err("busy, couldn't get access\n");
+ return -ERESTARTSYS;
+ }
+
+ ar->reconnect_flag = 0;
+ ath6kl_disconnect(ar);
+ memset(ar->ssid, 0, sizeof(ar->ssid));
+ ar->ssid_len = 0;
+
+ if (!test_bit(SKIP_SCAN, &ar->flag))
+ memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+
+ up(&ar->sem);
+
+ ar->sme_state = SME_DISCONNECTED;
+
+ return 0;
+}
+
+void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
+ u8 *bssid, u8 assoc_resp_len,
+ u8 *assoc_info, u16 proto_reason)
+{
+ if (ar->scan_req) {
+ cfg80211_scan_done(ar->scan_req, true);
+ ar->scan_req = NULL;
+ }
+
+ if (ar->nw_type & ADHOC_NETWORK) {
+ if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in ibss mode\n", __func__);
+ return;
+ }
+ memset(bssid, 0, ETH_ALEN);
+ cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
+ return;
+ }
+
+ if (ar->nw_type & INFRA_NETWORK) {
+ if (ar->wdev->iftype != NL80211_IFTYPE_STATION &&
+ ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in station mode\n", __func__);
+ return;
+ }
+ }
+
+ /*
+ * Send a disconnect command to target when a disconnect event is
+ * received with reason code other than 3 (DISCONNECT_CMD - disconnect
+ * request from host) to make the firmware stop trying to connect even
+ * after giving disconnect event. There will be one more disconnect
+ * event for this disconnect command with reason code DISCONNECT_CMD
+ * which will be notified to cfg80211.
+ */
+
+ if (reason != DISCONNECT_CMD) {
+ ath6kl_wmi_disconnect_cmd(ar->wmi);
+ return;
+ }
+
+ clear_bit(CONNECT_PEND, &ar->flag);
+
+ if (ar->sme_state == SME_CONNECTING) {
+ cfg80211_connect_result(ar->net_dev,
+ bssid, NULL, 0,
+ NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ } else if (ar->sme_state == SME_CONNECTED) {
+ cfg80211_disconnected(ar->net_dev, reason,
+ NULL, 0, GFP_KERNEL);
+ }
+
+ ar->sme_state = SME_DISCONNECTED;
+}
+
+static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ s8 n_channels = 0;
+ u16 *channels = NULL;
+ int ret = 0;
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (!ar->usr_bss_filter) {
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
+ ret = ath6kl_wmi_bssfilter_cmd(
+ ar->wmi,
+ (test_bit(CONNECTED, &ar->flag) ?
+ ALL_BUT_BSS_FILTER : ALL_BSS_FILTER), 0);
+ if (ret) {
+ ath6kl_err("couldn't set bss filtering\n");
+ return ret;
+ }
+ }
+
+ if (request->n_ssids && request->ssids[0].ssid_len) {
+ u8 i;
+
+ if (request->n_ssids > (MAX_PROBED_SSID_INDEX - 1))
+ request->n_ssids = MAX_PROBED_SSID_INDEX - 1;
+
+ for (i = 0; i < request->n_ssids; i++)
+ ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
+ SPECIFIC_SSID_FLAG,
+ request->ssids[i].ssid_len,
+ request->ssids[i].ssid);
+ }
+
+ if (request->ie) {
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_REQ,
+ request->ie, request->ie_len);
+ if (ret) {
+ ath6kl_err("failed to set Probe Request appie for "
+ "scan");
+ return ret;
+ }
+ }
+
+ /*
+ * Scan only the requested channels if the request specifies a set of
+ * channels. If the list is longer than the target supports, do not
+ * configure the list and instead, scan all available channels.
+ */
+ if (request->n_channels > 0 &&
+ request->n_channels <= WMI_MAX_CHANNELS) {
+ u8 i;
+
+ n_channels = request->n_channels;
+
+ channels = kzalloc(n_channels * sizeof(u16), GFP_KERNEL);
+ if (channels == NULL) {
+ ath6kl_warn("failed to set scan channels, "
+ "scan all channels");
+ n_channels = 0;
+ }
+
+ for (i = 0; i < n_channels; i++)
+ channels[i] = request->channels[i]->center_freq;
+ }
+
+ ret = ath6kl_wmi_startscan_cmd(ar->wmi, WMI_LONG_SCAN, 0,
+ false, 0, 0, n_channels, channels);
+ if (ret)
+ ath6kl_err("wmi_startscan_cmd failed\n");
+ else
+ ar->scan_req = request;
+
+ kfree(channels);
+
+ return ret;
+}
+
+void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status)
+{
+ int i;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status %d\n", __func__, status);
+
+ if (!ar->scan_req)
+ return;
+
+ if ((status == -ECANCELED) || (status == -EBUSY)) {
+ cfg80211_scan_done(ar->scan_req, true);
+ goto out;
+ }
+
+ cfg80211_scan_done(ar->scan_req, false);
+
+ if (ar->scan_req->n_ssids && ar->scan_req->ssids[0].ssid_len) {
+ for (i = 0; i < ar->scan_req->n_ssids; i++) {
+ ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
+ DISABLE_SSID_FLAG,
+ 0, NULL);
+ }
+ }
+
+out:
+ ar->scan_req = NULL;
+}
+
+static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr,
+ struct key_params *params)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl_key *key = NULL;
+ u8 key_usage;
+ u8 key_type;
+ int status = 0;
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: key index %d out of bounds\n", __func__,
+ key_index);
+ return -ENOENT;
+ }
+
+ key = &ar->keys[key_index];
+ memset(key, 0, sizeof(struct ath6kl_key));
+
+ if (pairwise)
+ key_usage = PAIRWISE_USAGE;
+ else
+ key_usage = GROUP_USAGE;
+
+ if (params) {
+ if (params->key_len > WLAN_MAX_KEY_LEN ||
+ params->seq_len > sizeof(key->seq))
+ return -EINVAL;
+
+ key->key_len = params->key_len;
+ memcpy(key->key, params->key, key->key_len);
+ key->seq_len = params->seq_len;
+ memcpy(key->seq, params->seq, key->seq_len);
+ key->cipher = params->cipher;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ key_type = WEP_CRYPT;
+ break;
+
+ case WLAN_CIPHER_SUITE_TKIP:
+ key_type = TKIP_CRYPT;
+ break;
+
+ case WLAN_CIPHER_SUITE_CCMP:
+ key_type = AES_CRYPT;
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (((ar->auth_mode == WPA_PSK_AUTH)
+ || (ar->auth_mode == WPA2_PSK_AUTH))
+ && (key_usage & GROUP_USAGE))
+ del_timer(&ar->disconnect_timer);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n",
+ __func__, key_index, key->key_len, key_type,
+ key_usage, key->seq_len);
+
+ ar->def_txkey_index = key_index;
+
+ if (ar->nw_type == AP_NETWORK && !pairwise &&
+ (key_type == TKIP_CRYPT || key_type == AES_CRYPT) && params) {
+ ar->ap_mode_bkey.valid = true;
+ ar->ap_mode_bkey.key_index = key_index;
+ ar->ap_mode_bkey.key_type = key_type;
+ ar->ap_mode_bkey.key_len = key->key_len;
+ memcpy(ar->ap_mode_bkey.key, key->key, key->key_len);
+ if (!test_bit(CONNECTED, &ar->flag)) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay initial group "
+ "key configuration until AP mode has been "
+ "started\n");
+ /*
+ * The key will be set in ath6kl_connect_ap_mode() once
+ * the connected event is received from the target.
+ */
+ return 0;
+ }
+ }
+
+ if (ar->next_mode == AP_NETWORK && key_type == WEP_CRYPT &&
+ !test_bit(CONNECTED, &ar->flag)) {
+ /*
+ * Store the key locally so that it can be re-configured after
+ * the AP mode has properly started
+ * (ath6kl_install_statioc_wep_keys).
+ */
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay WEP key configuration "
+ "until AP mode has been started\n");
+ ar->wep_key_list[key_index].key_len = key->key_len;
+ memcpy(ar->wep_key_list[key_index].key, key->key, key->key_len);
+ return 0;
+ }
+
+ status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
+ key_type, key_usage, key->key_len,
+ key->seq, key->key, KEY_OP_INIT_VAL,
+ (u8 *) mac_addr, SYNC_BOTH_WMIFLAG);
+
+ if (status)
+ return -EIO;
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: key index %d out of bounds\n", __func__,
+ key_index);
+ return -ENOENT;
+ }
+
+ if (!ar->keys[key_index].key_len) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: index %d is empty\n", __func__, key_index);
+ return 0;
+ }
+
+ ar->keys[key_index].key_len = 0;
+
+ return ath6kl_wmi_deletekey_cmd(ar->wmi, key_index);
+}
+
+static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr, void *cookie,
+ void (*callback) (void *cookie,
+ struct key_params *))
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl_key *key = NULL;
+ struct key_params params;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: key index %d out of bounds\n", __func__,
+ key_index);
+ return -ENOENT;
+ }
+
+ key = &ar->keys[key_index];
+ memset(¶ms, 0, sizeof(params));
+ params.cipher = key->cipher;
+ params.key_len = key->key_len;
+ params.seq_len = key->seq_len;
+ params.seq = key->seq;
+ params.key = key->key;
+
+ callback(cookie, ¶ms);
+
+ return key->key_len ? 0 : -ENOENT;
+}
+
+static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 key_index, bool unicast,
+ bool multicast)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl_key *key = NULL;
+ int status = 0;
+ u8 key_usage;
+ enum crypto_type key_type = NONE_CRYPT;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: key index %d out of bounds\n",
+ __func__, key_index);
+ return -ENOENT;
+ }
+
+ if (!ar->keys[key_index].key_len) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: invalid key index %d\n",
+ __func__, key_index);
+ return -EINVAL;
+ }
+
+ ar->def_txkey_index = key_index;
+ key = &ar->keys[ar->def_txkey_index];
+ key_usage = GROUP_USAGE;
+ if (ar->prwise_crypto == WEP_CRYPT)
+ key_usage |= TX_USAGE;
+ if (unicast)
+ key_type = ar->prwise_crypto;
+ if (multicast)
+ key_type = ar->grp_crypto;
+
+ if (ar->next_mode == AP_NETWORK && !test_bit(CONNECTED, &ar->flag))
+ return 0; /* Delay until AP mode has been started */
+
+ status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
+ key_type, key_usage,
+ key->key_len, key->seq, key->key,
+ KEY_OP_INIT_VAL, NULL,
+ SYNC_BOTH_WMIFLAG);
+ if (status)
+ return -EIO;
+
+ return 0;
+}
+
+void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
+ bool ismcast)
+{
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast);
+
+ cfg80211_michael_mic_failure(ar->net_dev, ar->bssid,
+ (ismcast ? NL80211_KEYTYPE_GROUP :
+ NL80211_KEYTYPE_PAIRWISE), keyid, NULL,
+ GFP_KERNEL);
+}
+
+static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+ int ret;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: changed 0x%x\n", __func__,
+ changed);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
+ ret = ath6kl_wmi_set_rts_cmd(ar->wmi, wiphy->rts_threshold);
+ if (ret != 0) {
+ ath6kl_err("ath6kl_wmi_set_rts_cmd failed\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * The type nl80211_tx_power_setting replaces the following
+ * data type from 2.6.36 onwards
+*/
+static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
+ enum nl80211_tx_power_setting type,
+ int dbm)
+{
+ struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+ u8 ath6kl_dbm;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__,
+ type, dbm);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ switch (type) {
+ case NL80211_TX_POWER_AUTOMATIC:
+ return 0;
+ case NL80211_TX_POWER_LIMITED:
+ ar->tx_pwr = ath6kl_dbm = dbm;
+ break;
+ default:
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x not supported\n",
+ __func__, type);
+ return -EOPNOTSUPP;
+ }
+
+ ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, ath6kl_dbm);
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
+{
+ struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (test_bit(CONNECTED, &ar->flag)) {
+ ar->tx_pwr = 0;
+
+ if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi) != 0) {
+ ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n");
+ return -EIO;
+ }
+
+ wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 0,
+ 5 * HZ);
+
+ if (signal_pending(current)) {
+ ath6kl_err("target did not respond\n");
+ return -EINTR;
+ }
+ }
+
+ *dbm = ar->tx_pwr;
+ return 0;
+}
+
+static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+ struct net_device *dev,
+ bool pmgmt, int timeout)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct wmi_power_mode_cmd mode;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: pmgmt %d, timeout %d\n",
+ __func__, pmgmt, timeout);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (pmgmt) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: max perf\n", __func__);
+ mode.pwr_mode = REC_POWER;
+ } else {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: rec power\n", __func__);
+ mode.pwr_mode = MAX_PERF_POWER;
+ }
+
+ if (ath6kl_wmi_powermode_cmd(ar->wmi, mode.pwr_mode) != 0) {
+ ath6kl_err("wmi_powermode_cmd failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
+ struct net_device *ndev,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ struct ath6kl *ar = ath6kl_priv(ndev);
+ struct wireless_dev *wdev = ar->wdev;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ ar->next_mode = INFRA_NETWORK;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ar->next_mode = ADHOC_NETWORK;
+ break;
+ case NL80211_IFTYPE_AP:
+ ar->next_mode = AP_NETWORK;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ ar->next_mode = INFRA_NETWORK;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ ar->next_mode = AP_NETWORK;
+ break;
+ default:
+ ath6kl_err("invalid interface type %u\n", type);
+ return -EOPNOTSUPP;
+ }
+
+ wdev->iftype = type;
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_ibss_params *ibss_param)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ int status;
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ ar->ssid_len = ibss_param->ssid_len;
+ memcpy(ar->ssid, ibss_param->ssid, ar->ssid_len);
+
+ if (ibss_param->channel)
+ ar->ch_hint = ibss_param->channel->center_freq;
+
+ if (ibss_param->channel_fixed) {
+ /*
+ * TODO: channel_fixed: The channel should be fixed, do not
+ * search for IBSSs to join on other channels. Target
+ * firmware does not support this feature, needs to be
+ * updated.
+ */
+ return -EOPNOTSUPP;
+ }
+
+ memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+ if (ibss_param->bssid && !is_broadcast_ether_addr(ibss_param->bssid))
+ memcpy(ar->req_bssid, ibss_param->bssid, sizeof(ar->req_bssid));
+
+ ath6kl_set_wpa_version(ar, 0);
+
+ status = ath6kl_set_auth_type(ar, NL80211_AUTHTYPE_OPEN_SYSTEM);
+ if (status)
+ return status;
+
+ if (ibss_param->privacy) {
+ ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, true);
+ ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, false);
+ } else {
+ ath6kl_set_cipher(ar, 0, true);
+ ath6kl_set_cipher(ar, 0, false);
+ }
+
+ ar->nw_type = ar->next_mode;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: connect called with authmode %d dot11 auth %d"
+ " PW crypto %d PW crypto len %d GRP crypto %d"
+ " GRP crypto len %d channel hint %u\n",
+ __func__,
+ ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
+ ar->prwise_crypto_len, ar->grp_crypto,
+ ar->grp_crypto_len, ar->ch_hint);
+
+ status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
+ ar->dot11_auth_mode, ar->auth_mode,
+ ar->prwise_crypto,
+ ar->prwise_crypto_len,
+ ar->grp_crypto, ar->grp_crypto_len,
+ ar->ssid_len, ar->ssid,
+ ar->req_bssid, ar->ch_hint,
+ ar->connect_ctrl_flags);
+ set_bit(CONNECT_PEND, &ar->flag);
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_leave_ibss(struct wiphy *wiphy,
+ struct net_device *dev)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ ath6kl_disconnect(ar);
+ memset(ar->ssid, 0, sizeof(ar->ssid));
+ ar->ssid_len = 0;
+
+ return 0;
+}
+
+static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+};
+
+static bool is_rate_legacy(s32 rate)
+{
+ static const s32 legacy[] = { 1000, 2000, 5500, 11000,
+ 6000, 9000, 12000, 18000, 24000,
+ 36000, 48000, 54000
+ };
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(legacy); i++)
+ if (rate == legacy[i])
+ return true;
+
+ return false;
+}
+
+static bool is_rate_ht20(s32 rate, u8 *mcs, bool *sgi)
+{
+ static const s32 ht20[] = { 6500, 13000, 19500, 26000, 39000,
+ 52000, 58500, 65000, 72200
+ };
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(ht20); i++) {
+ if (rate == ht20[i]) {
+ if (i == ARRAY_SIZE(ht20) - 1)
+ /* last rate uses sgi */
+ *sgi = true;
+ else
+ *sgi = false;
+
+ *mcs = i;
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool is_rate_ht40(s32 rate, u8 *mcs, bool *sgi)
+{
+ static const s32 ht40[] = { 13500, 27000, 40500, 54000,
+ 81000, 108000, 121500, 135000,
+ 150000
+ };
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(ht40); i++) {
+ if (rate == ht40[i]) {
+ if (i == ARRAY_SIZE(ht40) - 1)
+ /* last rate uses sgi */
+ *sgi = true;
+ else
+ *sgi = false;
+
+ *mcs = i;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
+ u8 *mac, struct station_info *sinfo)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ long left;
+ bool sgi;
+ s32 rate;
+ int ret;
+ u8 mcs;
+
+ if (memcmp(mac, ar->bssid, ETH_ALEN) != 0)
+ return -ENOENT;
+
+ if (down_interruptible(&ar->sem))
+ return -EBUSY;
+
+ set_bit(STATS_UPDATE_PEND, &ar->flag);
+
+ ret = ath6kl_wmi_get_stats_cmd(ar->wmi);
+
+ if (ret != 0) {
+ up(&ar->sem);
+ return -EIO;
+ }
+
+ left = wait_event_interruptible_timeout(ar->event_wq,
+ !test_bit(STATS_UPDATE_PEND,
+ &ar->flag),
+ WMI_TIMEOUT);
+
+ up(&ar->sem);
+
+ if (left == 0)
+ return -ETIMEDOUT;
+ else if (left < 0)
+ return left;
+
+ if (ar->target_stats.rx_byte) {
+ sinfo->rx_bytes = ar->target_stats.rx_byte;
+ sinfo->filled |= STATION_INFO_RX_BYTES;
+ sinfo->rx_packets = ar->target_stats.rx_pkt;
+ sinfo->filled |= STATION_INFO_RX_PACKETS;
+ }
+
+ if (ar->target_stats.tx_byte) {
+ sinfo->tx_bytes = ar->target_stats.tx_byte;
+ sinfo->filled |= STATION_INFO_TX_BYTES;
+ sinfo->tx_packets = ar->target_stats.tx_pkt;
+ sinfo->filled |= STATION_INFO_TX_PACKETS;
+ }
+
+ sinfo->signal = ar->target_stats.cs_rssi;
+ sinfo->filled |= STATION_INFO_SIGNAL;
+
+ rate = ar->target_stats.tx_ucast_rate;
+
+ if (is_rate_legacy(rate)) {
+ sinfo->txrate.legacy = rate / 100;
+ } else if (is_rate_ht20(rate, &mcs, &sgi)) {
+ if (sgi) {
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ sinfo->txrate.mcs = mcs - 1;
+ } else {
+ sinfo->txrate.mcs = mcs;
+ }
+
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
+ } else if (is_rate_ht40(rate, &mcs, &sgi)) {
+ if (sgi) {
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ sinfo->txrate.mcs = mcs - 1;
+ } else {
+ sinfo->txrate.mcs = mcs;
+ }
+
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
+ } else {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "invalid rate from stats: %d\n", rate);
+ ath6kl_debug_war(ar, ATH6KL_WAR_INVALID_RATE);
+ return 0;
+ }
+
+ sinfo->filled |= STATION_INFO_TX_BITRATE;
+
+ if (test_bit(CONNECTED, &ar->flag) &&
+ test_bit(DTIM_PERIOD_AVAIL, &ar->flag) &&
+ ar->nw_type == INFRA_NETWORK) {
+ sinfo->filled |= STATION_INFO_BSS_PARAM;
+ sinfo->bss_param.flags = 0;
+ sinfo->bss_param.dtim_period = ar->assoc_bss_dtim_period;
+ sinfo->bss_param.beacon_interval = ar->assoc_bss_beacon_int;
+ }
+
+ return 0;
+}
+
+static int ath6kl_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct ath6kl *ar = ath6kl_priv(netdev);
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
+ pmksa->pmkid, true);
+}
+
+static int ath6kl_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct ath6kl *ar = ath6kl_priv(netdev);
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
+ pmksa->pmkid, false);
+}
+
+static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
+{
+ struct ath6kl *ar = ath6kl_priv(netdev);
+ if (test_bit(CONNECTED, &ar->flag))
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, ar->bssid, NULL, false);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ar6k_cfg80211_suspend(struct wiphy *wiphy,
+ struct cfg80211_wowlan *wow)
+{
+ struct ath6kl *ar = wiphy_priv(wiphy);
+
+ return ath6kl_hif_suspend(ar);
+}
+#endif
+
+static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n",
+ __func__, chan->center_freq, chan->hw_value);
+ ar->next_chan = chan->center_freq;
+
+ return 0;
+}
+
+static bool ath6kl_is_p2p_ie(const u8 *pos)
+{
+ return pos[0] == WLAN_EID_VENDOR_SPECIFIC && pos[1] >= 4 &&
+ pos[2] == 0x50 && pos[3] == 0x6f &&
+ pos[4] == 0x9a && pos[5] == 0x09;
+}
+
+static int ath6kl_set_ap_probe_resp_ies(struct ath6kl *ar, const u8 *ies,
+ size_t ies_len)
+{
+ const u8 *pos;
+ u8 *buf = NULL;
+ size_t len = 0;
+ int ret;
+
+ /*
+ * Filter out P2P IE(s) since they will be included depending on
+ * the Probe Request frame in ath6kl_send_go_probe_resp().
+ */
+
+ if (ies && ies_len) {
+ buf = kmalloc(ies_len, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ pos = ies;
+ while (pos + 1 < ies + ies_len) {
+ if (pos + 2 + pos[1] > ies + ies_len)
+ break;
+ if (!ath6kl_is_p2p_ie(pos)) {
+ memcpy(buf + len, pos, 2 + pos[1]);
+ len += 2 + pos[1];
+ }
+ pos += 2 + pos[1];
+ }
+ }
+
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_RESP,
+ buf, len);
+ kfree(buf);
+ return ret;
+}
+
+static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
+ struct beacon_parameters *info, bool add)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ieee80211_mgmt *mgmt;
+ u8 *ies;
+ int ies_len;
+ struct wmi_connect_cmd p;
+ int res;
+ int i;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: add=%d\n", __func__, add);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (ar->next_mode != AP_NETWORK)
+ return -EOPNOTSUPP;
+
+ if (info->beacon_ies) {
+ res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_BEACON,
+ info->beacon_ies,
+ info->beacon_ies_len);
+ if (res)
+ return res;
+ }
+ if (info->proberesp_ies) {
+ res = ath6kl_set_ap_probe_resp_ies(ar, info->proberesp_ies,
+ info->proberesp_ies_len);
+ if (res)
+ return res;
+ }
+ if (info->assocresp_ies) {
+ res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_ASSOC_RESP,
+ info->assocresp_ies,
+ info->assocresp_ies_len);
+ if (res)
+ return res;
+ }
+
+ if (!add)
+ return 0;
+
+ ar->ap_mode_bkey.valid = false;
+
+ /* TODO:
+ * info->interval
+ * info->dtim_period
+ */
+
+ if (info->head == NULL)
+ return -EINVAL;
+ mgmt = (struct ieee80211_mgmt *) info->head;
+ ies = mgmt->u.beacon.variable;
+ if (ies > info->head + info->head_len)
+ return -EINVAL;
+ ies_len = info->head + info->head_len - ies;
+
+ if (info->ssid == NULL)
+ return -EINVAL;
+ memcpy(ar->ssid, info->ssid, info->ssid_len);
+ ar->ssid_len = info->ssid_len;
+ if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE)
+ return -EOPNOTSUPP; /* TODO */
+
+ ar->dot11_auth_mode = OPEN_AUTH;
+
+ memset(&p, 0, sizeof(p));
+
+ for (i = 0; i < info->crypto.n_akm_suites; i++) {
+ switch (info->crypto.akm_suites[i]) {
+ case WLAN_AKM_SUITE_8021X:
+ if (info->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ p.auth_mode |= WPA_AUTH;
+ if (info->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+ p.auth_mode |= WPA2_AUTH;
+ break;
+ case WLAN_AKM_SUITE_PSK:
+ if (info->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ p.auth_mode |= WPA_PSK_AUTH;
+ if (info->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+ p.auth_mode |= WPA2_PSK_AUTH;
+ break;
+ }
+ }
+ if (p.auth_mode == 0)
+ p.auth_mode = NONE_AUTH;
+ ar->auth_mode = p.auth_mode;
+
+ for (i = 0; i < info->crypto.n_ciphers_pairwise; i++) {
+ switch (info->crypto.ciphers_pairwise[i]) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ p.prwise_crypto_type |= WEP_CRYPT;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ p.prwise_crypto_type |= TKIP_CRYPT;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ p.prwise_crypto_type |= AES_CRYPT;
+ break;
+ }
+ }
+ if (p.prwise_crypto_type == 0) {
+ p.prwise_crypto_type = NONE_CRYPT;
+ ath6kl_set_cipher(ar, 0, true);
+ } else if (info->crypto.n_ciphers_pairwise == 1)
+ ath6kl_set_cipher(ar, info->crypto.ciphers_pairwise[0], true);
+
+ switch (info->crypto.cipher_group) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ p.grp_crypto_type = WEP_CRYPT;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ p.grp_crypto_type = TKIP_CRYPT;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ p.grp_crypto_type = AES_CRYPT;
+ break;
+ default:
+ p.grp_crypto_type = NONE_CRYPT;
+ break;
+ }
+ ath6kl_set_cipher(ar, info->crypto.cipher_group, false);
+
+ p.nw_type = AP_NETWORK;
+ ar->nw_type = ar->next_mode;
+
+ p.ssid_len = ar->ssid_len;
+ memcpy(p.ssid, ar->ssid, ar->ssid_len);
+ p.dot11_auth_mode = ar->dot11_auth_mode;
+ p.ch = cpu_to_le16(ar->next_chan);
+
+ res = ath6kl_wmi_ap_profile_commit(ar->wmi, &p);
+ if (res < 0)
+ return res;
+
+ return 0;
+}
+
+static int ath6kl_add_beacon(struct wiphy *wiphy, struct net_device *dev,
+ struct beacon_parameters *info)
+{
+ return ath6kl_ap_beacon(wiphy, dev, info, true);
+}
+
+static int ath6kl_set_beacon(struct wiphy *wiphy, struct net_device *dev,
+ struct beacon_parameters *info)
+{
+ return ath6kl_ap_beacon(wiphy, dev, info, false);
+}
+
+static int ath6kl_del_beacon(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ if (ar->nw_type != AP_NETWORK)
+ return -EOPNOTSUPP;
+ if (!test_bit(CONNECTED, &ar->flag))
+ return -ENOTCONN;
+
+ ath6kl_wmi_disconnect_cmd(ar->wmi);
+ clear_bit(CONNECTED, &ar->flag);
+
+ return 0;
+}
+
+static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
+ u8 *mac, struct station_parameters *params)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ if (ar->nw_type != AP_NETWORK)
+ return -EOPNOTSUPP;
+
+ /* Use this only for authorizing/unauthorizing a station */
+ if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)))
+ return -EOPNOTSUPP;
+
+ if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))
+ return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_AUTHORIZE,
+ mac, 0);
+ return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_UNAUTHORIZE, mac,
+ 0);
+}
+
+static int ath6kl_remain_on_channel(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration,
+ u64 *cookie)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ /* TODO: if already pending or ongoing remain-on-channel,
+ * return -EBUSY */
+ *cookie = 1; /* only a single pending request is supported */
+
+ return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, chan->center_freq,
+ duration);
+}
+
+static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct net_device *dev,
+ u64 cookie)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ if (cookie != 1)
+ return -ENOENT;
+
+ return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi);
+}
+
+static int ath6kl_send_go_probe_resp(struct ath6kl *ar, const u8 *buf,
+ size_t len, unsigned int freq)
+{
+ const u8 *pos;
+ u8 *p2p;
+ int p2p_len;
+ int ret;
+ const struct ieee80211_mgmt *mgmt;
+
+ mgmt = (const struct ieee80211_mgmt *) buf;
+
+ /* Include P2P IE(s) from the frame generated in user space. */
+
+ p2p = kmalloc(len, GFP_KERNEL);
+ if (p2p == NULL)
+ return -ENOMEM;
+ p2p_len = 0;
+
+ pos = mgmt->u.probe_resp.variable;
+ while (pos + 1 < buf + len) {
+ if (pos + 2 + pos[1] > buf + len)
+ break;
+ if (ath6kl_is_p2p_ie(pos)) {
+ memcpy(p2p + p2p_len, pos, 2 + pos[1]);
+ p2p_len += 2 + pos[1];
+ }
+ pos += 2 + pos[1];
+ }
+
+ ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, freq, mgmt->da,
+ p2p, p2p_len);
+ kfree(p2p);
+ return ret;
+}
+
+static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel *chan, bool offchan,
+ enum nl80211_channel_type channel_type,
+ bool channel_type_valid, unsigned int wait,
+ const u8 *buf, size_t len, u64 *cookie)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ u32 id;
+ const struct ieee80211_mgmt *mgmt;
+
+ mgmt = (const struct ieee80211_mgmt *) buf;
+ if (buf + len >= mgmt->u.probe_resp.variable &&
+ ar->nw_type == AP_NETWORK && test_bit(CONNECTED, &ar->flag) &&
+ ieee80211_is_probe_resp(mgmt->frame_control)) {
+ /*
+ * Send Probe Response frame in AP mode using a separate WMI
+ * command to allow the target to fill in the generic IEs.
+ */
+ *cookie = 0; /* TX status not supported */
+ return ath6kl_send_go_probe_resp(ar, buf, len,
+ chan->center_freq);
+ }
+
+ id = ar->send_action_id++;
+ if (id == 0) {
+ /*
+ * 0 is a reserved value in the WMI command and shall not be
+ * used for the command.
+ */
+ id = ar->send_action_id++;
+ }
+
+ *cookie = id;
+ return ath6kl_wmi_send_action_cmd(ar->wmi, id, chan->center_freq, wait,
+ buf, len);
+}
+
+static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
+ struct net_device *dev,
+ u16 frame_type, bool reg)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: frame_type=0x%x reg=%d\n",
+ __func__, frame_type, reg);
+ if (frame_type == IEEE80211_STYPE_PROBE_REQ) {
+ /*
+ * Note: This notification callback is not allowed to sleep, so
+ * we cannot send WMI_PROBE_REQ_REPORT_CMD here. Instead, we
+ * hardcode target to report Probe Request frames all the time.
+ */
+ ar->probe_req_report = reg;
+ }
+}
+
+static const struct ieee80211_txrx_stypes
+ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_STATION] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+};
+
+static struct cfg80211_ops ath6kl_cfg80211_ops = {
+ .change_virtual_intf = ath6kl_cfg80211_change_iface,
+ .scan = ath6kl_cfg80211_scan,
+ .connect = ath6kl_cfg80211_connect,
+ .disconnect = ath6kl_cfg80211_disconnect,
+ .add_key = ath6kl_cfg80211_add_key,
+ .get_key = ath6kl_cfg80211_get_key,
+ .del_key = ath6kl_cfg80211_del_key,
+ .set_default_key = ath6kl_cfg80211_set_default_key,
+ .set_wiphy_params = ath6kl_cfg80211_set_wiphy_params,
+ .set_tx_power = ath6kl_cfg80211_set_txpower,
+ .get_tx_power = ath6kl_cfg80211_get_txpower,
+ .set_power_mgmt = ath6kl_cfg80211_set_power_mgmt,
+ .join_ibss = ath6kl_cfg80211_join_ibss,
+ .leave_ibss = ath6kl_cfg80211_leave_ibss,
+ .get_station = ath6kl_get_station,
+ .set_pmksa = ath6kl_set_pmksa,
+ .del_pmksa = ath6kl_del_pmksa,
+ .flush_pmksa = ath6kl_flush_pmksa,
+ CFG80211_TESTMODE_CMD(ath6kl_tm_cmd)
+#ifdef CONFIG_PM
+ .suspend = ar6k_cfg80211_suspend,
+#endif
+ .set_channel = ath6kl_set_channel,
+ .add_beacon = ath6kl_add_beacon,
+ .set_beacon = ath6kl_set_beacon,
+ .del_beacon = ath6kl_del_beacon,
+ .change_station = ath6kl_change_station,
+ .remain_on_channel = ath6kl_remain_on_channel,
+ .cancel_remain_on_channel = ath6kl_cancel_remain_on_channel,
+ .mgmt_tx = ath6kl_mgmt_tx,
+ .mgmt_frame_register = ath6kl_mgmt_frame_register,
+};
+
+struct wireless_dev *ath6kl_cfg80211_init(struct device *dev)
+{
+ int ret = 0;
+ struct wireless_dev *wdev;
+ struct ath6kl *ar;
+
+ wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+ if (!wdev) {
+ ath6kl_err("couldn't allocate wireless device\n");
+ return NULL;
+ }
+
+ /* create a new wiphy for use with cfg80211 */
+ wdev->wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
+ if (!wdev->wiphy) {
+ ath6kl_err("couldn't allocate wiphy device\n");
+ kfree(wdev);
+ return NULL;
+ }
+
+ ar = wiphy_priv(wdev->wiphy);
+ ar->p2p = !!ath6kl_p2p;
+
+ wdev->wiphy->mgmt_stypes = ath6kl_mgmt_stypes;
+
+ wdev->wiphy->max_remain_on_channel_duration = 5000;
+
+ /* set device pointer for wiphy */
+ set_wiphy_dev(wdev->wiphy, dev);
+
+ wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
+ if (ar->p2p) {
+ wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT);
+ }
+ /* max num of ssids that can be probed during scanning */
+ wdev->wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
+ wdev->wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
+ wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
+ wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
+ wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+ wdev->wiphy->cipher_suites = cipher_suites;
+ wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ ret = wiphy_register(wdev->wiphy);
+ if (ret < 0) {
+ ath6kl_err("couldn't register wiphy device\n");
+ wiphy_free(wdev->wiphy);
+ kfree(wdev);
+ return NULL;
+ }
+
+ return wdev;
+}
+
+void ath6kl_cfg80211_deinit(struct ath6kl *ar)
+{
+ struct wireless_dev *wdev = ar->wdev;
+
+ if (ar->scan_req) {
+ cfg80211_scan_done(ar->scan_req, true);
+ ar->scan_req = NULL;
+ }
+
+ if (!wdev)
+ return;
+
+ wiphy_unregister(wdev->wiphy);
+ wiphy_free(wdev->wiphy);
+ kfree(wdev);
+}
--- /dev/null
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+
+#include <linux/circ_buf.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
++#include <linux/export.h>
+
+#include "debug.h"
+#include "target.h"
+
+struct ath6kl_fwlog_slot {
+ __le32 timestamp;
+ __le32 length;
+
+ /* max ATH6KL_FWLOG_PAYLOAD_SIZE bytes */
+ u8 payload[0];
+};
+
+#define ATH6KL_FWLOG_SIZE 32768
+#define ATH6KL_FWLOG_SLOT_SIZE (sizeof(struct ath6kl_fwlog_slot) + \
+ ATH6KL_FWLOG_PAYLOAD_SIZE)
+#define ATH6KL_FWLOG_VALID_MASK 0x1ffff
+
+int ath6kl_printk(const char *level, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int rtn;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ rtn = printk("%sath6kl: %pV", level, &vaf);
+
+ va_end(args);
+
+ return rtn;
+}
+
+#ifdef CONFIG_ATH6KL_DEBUG
+
+#define REG_OUTPUT_LEN_PER_LINE 25
+#define REGTYPE_STR_LEN 100
+
+struct ath6kl_diag_reg_info {
+ u32 reg_start;
+ u32 reg_end;
+ const char *reg_info;
+};
+
+static const struct ath6kl_diag_reg_info diag_reg[] = {
+ { 0x20000, 0x200fc, "General DMA and Rx registers" },
+ { 0x28000, 0x28900, "MAC PCU register & keycache" },
+ { 0x20800, 0x20a40, "QCU" },
+ { 0x21000, 0x212f0, "DCU" },
+ { 0x4000, 0x42e4, "RTC" },
+ { 0x540000, 0x540000 + (256 * 1024), "RAM" },
+ { 0x29800, 0x2B210, "Base Band" },
+ { 0x1C000, 0x1C748, "Analog" },
+};
+
+void ath6kl_dump_registers(struct ath6kl_device *dev,
+ struct ath6kl_irq_proc_registers *irq_proc_reg,
+ struct ath6kl_irq_enable_reg *irq_enable_reg)
+{
+
+ ath6kl_dbg(ATH6KL_DBG_ANY, ("<------- Register Table -------->\n"));
+
+ if (irq_proc_reg != NULL) {
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Host Int status: 0x%x\n",
+ irq_proc_reg->host_int_status);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "CPU Int status: 0x%x\n",
+ irq_proc_reg->cpu_int_status);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Error Int status: 0x%x\n",
+ irq_proc_reg->error_int_status);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Counter Int status: 0x%x\n",
+ irq_proc_reg->counter_int_status);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Mbox Frame: 0x%x\n",
+ irq_proc_reg->mbox_frame);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Rx Lookahead Valid: 0x%x\n",
+ irq_proc_reg->rx_lkahd_valid);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Rx Lookahead 0: 0x%x\n",
+ irq_proc_reg->rx_lkahd[0]);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Rx Lookahead 1: 0x%x\n",
+ irq_proc_reg->rx_lkahd[1]);
+
+ if (dev->ar->mbox_info.gmbox_addr != 0) {
+ /*
+ * If the target supports GMBOX hardware, dump some
+ * additional state.
+ */
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "GMBOX Host Int status 2: 0x%x\n",
+ irq_proc_reg->host_int_status2);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "GMBOX RX Avail: 0x%x\n",
+ irq_proc_reg->gmbox_rx_avail);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "GMBOX lookahead alias 0: 0x%x\n",
+ irq_proc_reg->rx_gmbox_lkahd_alias[0]);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "GMBOX lookahead alias 1: 0x%x\n",
+ irq_proc_reg->rx_gmbox_lkahd_alias[1]);
+ }
+
+ }
+
+ if (irq_enable_reg != NULL) {
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Int status Enable: 0x%x\n",
+ irq_enable_reg->int_status_en);
+ ath6kl_dbg(ATH6KL_DBG_ANY, "Counter Int status Enable: 0x%x\n",
+ irq_enable_reg->cntr_int_status_en);
+ }
+ ath6kl_dbg(ATH6KL_DBG_ANY, "<------------------------------->\n");
+}
+
+static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist)
+{
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "--- endpoint: %d svc_id: 0x%X ---\n",
+ ep_dist->endpoint, ep_dist->svc_id);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " dist_flags : 0x%X\n",
+ ep_dist->dist_flags);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_norm : %d\n",
+ ep_dist->cred_norm);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_min : %d\n",
+ ep_dist->cred_min);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " credits : %d\n",
+ ep_dist->credits);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_assngd : %d\n",
+ ep_dist->cred_assngd);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " seek_cred : %d\n",
+ ep_dist->seek_cred);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_sz : %d\n",
+ ep_dist->cred_sz);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_per_msg : %d\n",
+ ep_dist->cred_per_msg);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_to_dist : %d\n",
+ ep_dist->cred_to_dist);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " txq_depth : %d\n",
+ get_queue_depth(&((struct htc_endpoint *)
+ ep_dist->htc_rsvd)->txq));
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "----------------------------------\n");
+}
+
+void dump_cred_dist_stats(struct htc_target *target)
+{
+ struct htc_endpoint_credit_dist *ep_list;
+
+ if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_TRC))
+ return;
+
+ list_for_each_entry(ep_list, &target->cred_dist_list, list)
+ dump_cred_dist(ep_list);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:%p dist:%p\n",
+ target->cred_dist_cntxt, NULL);
+ ath6kl_dbg(ATH6KL_DBG_TRC, "credit distribution, total : %d, free : %d\n",
+ target->cred_dist_cntxt->total_avail_credits,
+ target->cred_dist_cntxt->cur_free_credits);
+}
+
+static int ath6kl_debugfs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war)
+{
+ switch (war) {
+ case ATH6KL_WAR_INVALID_RATE:
+ ar->debug.war_stats.invalid_rate++;
+ break;
+ }
+}
+
+static ssize_t read_file_war_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char *buf;
+ unsigned int len = 0, buf_len = 1500;
+ ssize_t ret_cnt;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "Workaround stats");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n\n",
+ "=================");
+ len += scnprintf(buf + len, buf_len - len, "%20s %10u\n",
+ "Invalid rates", ar->debug.war_stats.invalid_rate);
+
+ if (WARN_ON(len > buf_len))
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_war_stats = {
+ .read = read_file_war_stats,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath6kl_debug_fwlog_add(struct ath6kl *ar, const void *buf,
+ size_t buf_len)
+{
+ struct circ_buf *fwlog = &ar->debug.fwlog_buf;
+ size_t space;
+ int i;
+
+ /* entries must all be equal size */
+ if (WARN_ON(buf_len != ATH6KL_FWLOG_SLOT_SIZE))
+ return;
+
+ space = CIRC_SPACE(fwlog->head, fwlog->tail, ATH6KL_FWLOG_SIZE);
+ if (space < buf_len)
+ /* discard oldest slot */
+ fwlog->tail = (fwlog->tail + ATH6KL_FWLOG_SLOT_SIZE) &
+ (ATH6KL_FWLOG_SIZE - 1);
+
+ for (i = 0; i < buf_len; i += space) {
+ space = CIRC_SPACE_TO_END(fwlog->head, fwlog->tail,
+ ATH6KL_FWLOG_SIZE);
+
+ if ((size_t) space > buf_len - i)
+ space = buf_len - i;
+
+ memcpy(&fwlog->buf[fwlog->head], buf, space);
+ fwlog->head = (fwlog->head + space) & (ATH6KL_FWLOG_SIZE - 1);
+ }
+
+}
+
+void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len)
+{
+ struct ath6kl_fwlog_slot *slot = ar->debug.fwlog_tmp;
+ size_t slot_len;
+
+ if (WARN_ON(len > ATH6KL_FWLOG_PAYLOAD_SIZE))
+ return;
+
+ spin_lock_bh(&ar->debug.fwlog_lock);
+
+ slot->timestamp = cpu_to_le32(jiffies);
+ slot->length = cpu_to_le32(len);
+ memcpy(slot->payload, buf, len);
+
+ slot_len = sizeof(*slot) + len;
+
+ if (slot_len < ATH6KL_FWLOG_SLOT_SIZE)
+ memset(slot->payload + len, 0,
+ ATH6KL_FWLOG_SLOT_SIZE - slot_len);
+
+ ath6kl_debug_fwlog_add(ar, slot, ATH6KL_FWLOG_SLOT_SIZE);
+
+ spin_unlock_bh(&ar->debug.fwlog_lock);
+}
+
+static bool ath6kl_debug_fwlog_empty(struct ath6kl *ar)
+{
+ return CIRC_CNT(ar->debug.fwlog_buf.head,
+ ar->debug.fwlog_buf.tail,
+ ATH6KL_FWLOG_SLOT_SIZE) == 0;
+}
+
+static ssize_t ath6kl_fwlog_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct circ_buf *fwlog = &ar->debug.fwlog_buf;
+ size_t len = 0, buf_len = count;
+ ssize_t ret_cnt;
+ char *buf;
+ int ccnt;
+
+ buf = vmalloc(buf_len);
+ if (!buf)
+ return -ENOMEM;
+
+ /* read undelivered logs from firmware */
+ ath6kl_read_fwlogs(ar);
+
+ spin_lock_bh(&ar->debug.fwlog_lock);
+
+ while (len < buf_len && !ath6kl_debug_fwlog_empty(ar)) {
+ ccnt = CIRC_CNT_TO_END(fwlog->head, fwlog->tail,
+ ATH6KL_FWLOG_SIZE);
+
+ if ((size_t) ccnt > buf_len - len)
+ ccnt = buf_len - len;
+
+ memcpy(buf + len, &fwlog->buf[fwlog->tail], ccnt);
+ len += ccnt;
+
+ fwlog->tail = (fwlog->tail + ccnt) &
+ (ATH6KL_FWLOG_SIZE - 1);
+ }
+
+ spin_unlock_bh(&ar->debug.fwlog_lock);
+
+ if (WARN_ON(len > buf_len))
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ vfree(buf);
+
+ return ret_cnt;
+}
+
+static const struct file_operations fops_fwlog = {
+ .open = ath6kl_debugfs_open,
+ .read = ath6kl_fwlog_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_fwlog_mask_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[16];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), "0x%x\n", ar->debug.fwlog_mask);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_fwlog_mask_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &ar->debug.fwlog_mask);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_config_debug_module_cmd(ar->wmi,
+ ATH6KL_FWLOG_VALID_MASK,
+ ar->debug.fwlog_mask);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_fwlog_mask = {
+ .open = ath6kl_debugfs_open,
+ .read = ath6kl_fwlog_mask_read,
+ .write = ath6kl_fwlog_mask_write,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct target_stats *tgt_stats = &ar->target_stats;
+ char *buf;
+ unsigned int len = 0, buf_len = 1500;
+ int i;
+ long left;
+ ssize_t ret_cnt;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (down_interruptible(&ar->sem)) {
+ kfree(buf);
+ return -EBUSY;
+ }
+
+ set_bit(STATS_UPDATE_PEND, &ar->flag);
+
+ if (ath6kl_wmi_get_stats_cmd(ar->wmi)) {
+ up(&ar->sem);
+ kfree(buf);
+ return -EIO;
+ }
+
+ left = wait_event_interruptible_timeout(ar->event_wq,
+ !test_bit(STATS_UPDATE_PEND,
+ &ar->flag), WMI_TIMEOUT);
+
+ up(&ar->sem);
+
+ if (left <= 0) {
+ kfree(buf);
+ return -ETIMEDOUT;
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "Target Tx stats");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n\n",
+ "=================");
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Ucast packets", tgt_stats->tx_ucast_pkt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Bcast packets", tgt_stats->tx_bcast_pkt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Ucast byte", tgt_stats->tx_ucast_byte);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Bcast byte", tgt_stats->tx_bcast_byte);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Rts success cnt", tgt_stats->tx_rts_success_cnt);
+ for (i = 0; i < 4; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%18s %d %10llu\n", "PER on ac",
+ i, tgt_stats->tx_pkt_per_ac[i]);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Error", tgt_stats->tx_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Fail count", tgt_stats->tx_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Retry count", tgt_stats->tx_retry_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Multi retry cnt", tgt_stats->tx_mult_retry_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Rts fail cnt", tgt_stats->tx_rts_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%25s %10llu\n\n",
+ "TKIP counter measure used",
+ tgt_stats->tkip_cnter_measures_invoked);
+
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "Target Rx stats");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Ucast packets", tgt_stats->rx_ucast_pkt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
+ "Ucast Rate", tgt_stats->rx_ucast_rate);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Bcast packets", tgt_stats->rx_bcast_pkt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Ucast byte", tgt_stats->rx_ucast_byte);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Bcast byte", tgt_stats->rx_bcast_byte);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Fragmented pkt", tgt_stats->rx_frgment_pkt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Error", tgt_stats->rx_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "CRC Err", tgt_stats->rx_crc_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Key chache miss", tgt_stats->rx_key_cache_miss);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Decrypt Err", tgt_stats->rx_decrypt_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Duplicate frame", tgt_stats->rx_dupl_frame);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Tkip Mic failure", tgt_stats->tkip_local_mic_fail);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "TKIP format err", tgt_stats->tkip_fmt_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "CCMP format Err", tgt_stats->ccmp_fmt_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n\n",
+ "CCMP Replay Err", tgt_stats->ccmp_replays);
+
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "Misc Target stats");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "=================");
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Beacon Miss count", tgt_stats->cs_bmiss_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Num Connects", tgt_stats->cs_connect_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Num disconnects", tgt_stats->cs_discon_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
+ "Beacon avg rssi", tgt_stats->cs_ave_beacon_rssi);
+
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_tgt_stats = {
+ .read = read_file_tgt_stats,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+#define print_credit_info(fmt_str, ep_list_field) \
+ (len += scnprintf(buf + len, buf_len - len, fmt_str, \
+ ep_list->ep_list_field))
+#define CREDIT_INFO_DISPLAY_STRING_LEN 200
+#define CREDIT_INFO_LEN 128
+
+static ssize_t read_file_credit_dist_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct htc_target *target = ar->htc_target;
+ struct htc_endpoint_credit_dist *ep_list;
+ char *buf;
+ unsigned int buf_len, len = 0;
+ ssize_t ret_cnt;
+
+ buf_len = CREDIT_INFO_DISPLAY_STRING_LEN +
+ get_queue_depth(&target->cred_dist_list) * CREDIT_INFO_LEN;
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
+ "Total Avail Credits: ",
+ target->cred_dist_cntxt->total_avail_credits);
+ len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
+ "Free credits :",
+ target->cred_dist_cntxt->cur_free_credits);
+
+ len += scnprintf(buf + len, buf_len - len,
+ " Epid Flags Cred_norm Cred_min Credits Cred_assngd"
+ " Seek_cred Cred_sz Cred_per_msg Cred_to_dist"
+ " qdepth\n");
+
+ list_for_each_entry(ep_list, &target->cred_dist_list, list) {
+ print_credit_info(" %2d", endpoint);
+ print_credit_info("%10x", dist_flags);
+ print_credit_info("%8d", cred_norm);
+ print_credit_info("%9d", cred_min);
+ print_credit_info("%9d", credits);
+ print_credit_info("%10d", cred_assngd);
+ print_credit_info("%13d", seek_cred);
+ print_credit_info("%12d", cred_sz);
+ print_credit_info("%9d", cred_per_msg);
+ print_credit_info("%14d", cred_to_dist);
+ len += scnprintf(buf + len, buf_len - len, "%12d\n",
+ get_queue_depth(&((struct htc_endpoint *)
+ ep_list->htc_rsvd)->txq));
+ }
+
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_credit_dist_stats = {
+ .read = read_file_credit_dist_stats,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static unsigned long ath6kl_get_num_reg(void)
+{
+ int i;
+ unsigned long n_reg = 0;
+
+ for (i = 0; i < ARRAY_SIZE(diag_reg); i++)
+ n_reg = n_reg +
+ (diag_reg[i].reg_end - diag_reg[i].reg_start) / 4 + 1;
+
+ return n_reg;
+}
+
+static bool ath6kl_dbg_is_diag_reg_valid(u32 reg_addr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(diag_reg); i++) {
+ if (reg_addr >= diag_reg[i].reg_start &&
+ reg_addr <= diag_reg[i].reg_end)
+ return true;
+ }
+
+ return false;
+}
+
+static ssize_t ath6kl_regread_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u8 buf[50];
+ unsigned int len = 0;
+
+ if (ar->debug.dbgfs_diag_reg)
+ len += scnprintf(buf + len, sizeof(buf) - len, "0x%x\n",
+ ar->debug.dbgfs_diag_reg);
+ else
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "All diag registers\n");
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_regread_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u8 buf[50];
+ unsigned int len;
+ unsigned long reg_addr;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ if (strict_strtoul(buf, 0, ®_addr))
+ return -EINVAL;
+
+ if ((reg_addr % 4) != 0)
+ return -EINVAL;
+
+ if (reg_addr && !ath6kl_dbg_is_diag_reg_valid(reg_addr))
+ return -EINVAL;
+
+ ar->debug.dbgfs_diag_reg = reg_addr;
+
+ return count;
+}
+
+static const struct file_operations fops_diag_reg_read = {
+ .read = ath6kl_regread_read,
+ .write = ath6kl_regread_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath6kl_regdump_open(struct inode *inode, struct file *file)
+{
+ struct ath6kl *ar = inode->i_private;
+ u8 *buf;
+ unsigned long int reg_len;
+ unsigned int len = 0, n_reg;
+ u32 addr;
+ __le32 reg_val;
+ int i, status;
+
+ /* Dump all the registers if no register is specified */
+ if (!ar->debug.dbgfs_diag_reg)
+ n_reg = ath6kl_get_num_reg();
+ else
+ n_reg = 1;
+
+ reg_len = n_reg * REG_OUTPUT_LEN_PER_LINE;
+ if (n_reg > 1)
+ reg_len += REGTYPE_STR_LEN;
+
+ buf = vmalloc(reg_len);
+ if (!buf)
+ return -ENOMEM;
+
+ if (n_reg == 1) {
+ addr = ar->debug.dbgfs_diag_reg;
+
+ status = ath6kl_diag_read32(ar,
+ TARG_VTOP(ar->target_type, addr),
+ (u32 *)®_val);
+ if (status)
+ goto fail_reg_read;
+
+ len += scnprintf(buf + len, reg_len - len,
+ "0x%06x 0x%08x\n", addr, le32_to_cpu(reg_val));
+ goto done;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(diag_reg); i++) {
+ len += scnprintf(buf + len, reg_len - len,
+ "%s\n", diag_reg[i].reg_info);
+ for (addr = diag_reg[i].reg_start;
+ addr <= diag_reg[i].reg_end; addr += 4) {
+ status = ath6kl_diag_read32(ar,
+ TARG_VTOP(ar->target_type, addr),
+ (u32 *)®_val);
+ if (status)
+ goto fail_reg_read;
+
+ len += scnprintf(buf + len, reg_len - len,
+ "0x%06x 0x%08x\n",
+ addr, le32_to_cpu(reg_val));
+ }
+ }
+
+done:
+ file->private_data = buf;
+ return 0;
+
+fail_reg_read:
+ ath6kl_warn("Unable to read memory:%u\n", addr);
+ vfree(buf);
+ return -EIO;
+}
+
+static ssize_t ath6kl_regdump_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ u8 *buf = file->private_data;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+static int ath6kl_regdump_release(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+ return 0;
+}
+
+static const struct file_operations fops_reg_dump = {
+ .open = ath6kl_regdump_open,
+ .read = ath6kl_regdump_read,
+ .release = ath6kl_regdump_release,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_lrssi_roam_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ unsigned long lrssi_roam_threshold;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (strict_strtoul(buf, 0, &lrssi_roam_threshold))
+ return -EINVAL;
+
+ ar->lrssi_roam_threshold = lrssi_roam_threshold;
+
+ ath6kl_wmi_set_roam_lrssi_cmd(ar->wmi, ar->lrssi_roam_threshold);
+
+ return count;
+}
+
+static ssize_t ath6kl_lrssi_roam_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = snprintf(buf, sizeof(buf), "%u\n", ar->lrssi_roam_threshold);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_lrssi_roam_threshold = {
+ .read = ath6kl_lrssi_roam_read,
+ .write = ath6kl_lrssi_roam_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_regwrite_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u8 buf[32];
+ unsigned int len = 0;
+
+ len = scnprintf(buf, sizeof(buf), "Addr: 0x%x Val: 0x%x\n",
+ ar->debug.diag_reg_addr_wr, ar->debug.diag_reg_val_wr);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_regwrite_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[32];
+ char *sptr, *token;
+ unsigned int len = 0;
+ u32 reg_addr, reg_val;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, "=");
+ if (!token)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, ®_addr))
+ return -EINVAL;
+
+ if (!ath6kl_dbg_is_diag_reg_valid(reg_addr))
+ return -EINVAL;
+
+ if (kstrtou32(sptr, 0, ®_val))
+ return -EINVAL;
+
+ ar->debug.diag_reg_addr_wr = reg_addr;
+ ar->debug.diag_reg_val_wr = reg_val;
+
+ if (ath6kl_diag_write32(ar, ar->debug.diag_reg_addr_wr,
+ cpu_to_le32(ar->debug.diag_reg_val_wr)))
+ return -EIO;
+
+ return count;
+}
+
+static const struct file_operations fops_diag_reg_write = {
+ .read = ath6kl_regwrite_read,
+ .write = ath6kl_regwrite_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath6kl_debug_init(struct ath6kl *ar)
+{
+ ar->debug.fwlog_buf.buf = vmalloc(ATH6KL_FWLOG_SIZE);
+ if (ar->debug.fwlog_buf.buf == NULL)
+ return -ENOMEM;
+
+ ar->debug.fwlog_tmp = kmalloc(ATH6KL_FWLOG_SLOT_SIZE, GFP_KERNEL);
+ if (ar->debug.fwlog_tmp == NULL) {
+ vfree(ar->debug.fwlog_buf.buf);
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&ar->debug.fwlog_lock);
+
+ /*
+ * Actually we are lying here but don't know how to read the mask
+ * value from the firmware.
+ */
+ ar->debug.fwlog_mask = 0;
+
+ ar->debugfs_phy = debugfs_create_dir("ath6kl",
+ ar->wdev->wiphy->debugfsdir);
+ if (!ar->debugfs_phy) {
+ vfree(ar->debug.fwlog_buf.buf);
+ kfree(ar->debug.fwlog_tmp);
+ return -ENOMEM;
+ }
+
+ debugfs_create_file("tgt_stats", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_tgt_stats);
+
+ debugfs_create_file("credit_dist_stats", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_credit_dist_stats);
+
+ debugfs_create_file("fwlog", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_fwlog);
+
+ debugfs_create_file("fwlog_mask", S_IRUSR | S_IWUSR, ar->debugfs_phy,
+ ar, &fops_fwlog_mask);
+
+ debugfs_create_file("reg_addr", S_IRUSR | S_IWUSR, ar->debugfs_phy, ar,
+ &fops_diag_reg_read);
+
+ debugfs_create_file("reg_dump", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_reg_dump);
+
+ debugfs_create_file("lrssi_roam_threshold", S_IRUSR | S_IWUSR,
+ ar->debugfs_phy, ar, &fops_lrssi_roam_threshold);
+
+ debugfs_create_file("reg_write", S_IRUSR | S_IWUSR,
+ ar->debugfs_phy, ar, &fops_diag_reg_write);
+
+ debugfs_create_file("war_stats", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_war_stats);
+
+ return 0;
+}
+
+void ath6kl_debug_cleanup(struct ath6kl *ar)
+{
+ vfree(ar->debug.fwlog_buf.buf);
+ kfree(ar->debug.fwlog_tmp);
+}
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
++#include <linux/module.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sd.h>
+#include "htc_hif.h"
+#include "hif-ops.h"
+#include "target.h"
+#include "debug.h"
+#include "cfg80211.h"
+
+struct ath6kl_sdio {
+ struct sdio_func *func;
+
+ spinlock_t lock;
+
+ /* free list */
+ struct list_head bus_req_freeq;
+
+ /* available bus requests */
+ struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
+
+ struct ath6kl *ar;
+ u8 *dma_buffer;
+
+ /* scatter request list head */
+ struct list_head scat_req;
+
+ spinlock_t scat_lock;
+ bool is_disabled;
+ atomic_t irq_handling;
+ const struct sdio_device_id *id;
+ struct work_struct wr_async_work;
+ struct list_head wr_asyncq;
+ spinlock_t wr_async_lock;
+};
+
+#define CMD53_ARG_READ 0
+#define CMD53_ARG_WRITE 1
+#define CMD53_ARG_BLOCK_BASIS 1
+#define CMD53_ARG_FIXED_ADDRESS 0
+#define CMD53_ARG_INCR_ADDRESS 1
+
+static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
+{
+ return ar->hif_priv;
+}
+
+/*
+ * Macro to check if DMA buffer is WORD-aligned and DMA-able.
+ * Most host controllers assume the buffer is DMA'able and will
+ * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid
+ * check fails on stack memory.
+ */
+static inline bool buf_needs_bounce(u8 *buf)
+{
+ return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
+}
+
+static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
+{
+ struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
+
+ /* EP1 has an extended range */
+ mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
+ mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
+ mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
+ mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
+ mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
+ mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
+}
+
+static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
+ u8 mode, u8 opcode, u32 addr,
+ u16 blksz)
+{
+ *arg = (((rw & 1) << 31) |
+ ((func & 0x7) << 28) |
+ ((mode & 1) << 27) |
+ ((opcode & 1) << 26) |
+ ((addr & 0x1FFFF) << 9) |
+ (blksz & 0x1FF));
+}
+
+static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
+ unsigned int address,
+ unsigned char val)
+{
+ const u8 func = 0;
+
+ *arg = ((write & 1) << 31) |
+ ((func & 0x7) << 28) |
+ ((raw & 1) << 27) |
+ (1 << 26) |
+ ((address & 0x1FFFF) << 9) |
+ (1 << 8) |
+ (val & 0xFF);
+}
+
+static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
+ unsigned int address,
+ unsigned char byte)
+{
+ struct mmc_command io_cmd;
+
+ memset(&io_cmd, 0, sizeof(io_cmd));
+ ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
+ io_cmd.opcode = SD_IO_RW_DIRECT;
+ io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+ return mmc_wait_for_cmd(card->host, &io_cmd, 0);
+}
+
+static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
+ u8 *buf, u32 len)
+{
+ int ret = 0;
+
+ if (request & HIF_WRITE) {
+ /* FIXME: looks like ugly workaround for something */
+ if (addr >= HIF_MBOX_BASE_ADDR &&
+ addr <= HIF_MBOX_END_ADDR)
+ addr += (HIF_MBOX_WIDTH - len);
+
+ /* FIXME: this also looks like ugly workaround */
+ if (addr == HIF_MBOX0_EXT_BASE_ADDR)
+ addr += HIF_MBOX0_EXT_WIDTH - len;
+
+ if (request & HIF_FIXED_ADDRESS)
+ ret = sdio_writesb(func, addr, buf, len);
+ else
+ ret = sdio_memcpy_toio(func, addr, buf, len);
+ } else {
+ if (request & HIF_FIXED_ADDRESS)
+ ret = sdio_readsb(func, buf, addr, len);
+ else
+ ret = sdio_memcpy_fromio(func, buf, addr, len);
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
+ request & HIF_WRITE ? "wr" : "rd", addr,
+ request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
+ ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len);
+
+ return ret;
+}
+
+static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
+{
+ struct bus_request *bus_req;
+ unsigned long flag;
+
+ spin_lock_irqsave(&ar_sdio->lock, flag);
+
+ if (list_empty(&ar_sdio->bus_req_freeq)) {
+ spin_unlock_irqrestore(&ar_sdio->lock, flag);
+ return NULL;
+ }
+
+ bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
+ struct bus_request, list);
+ list_del(&bus_req->list);
+
+ spin_unlock_irqrestore(&ar_sdio->lock, flag);
+ ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
+ __func__, bus_req);
+
+ return bus_req;
+}
+
+static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
+ struct bus_request *bus_req)
+{
+ unsigned long flag;
+
+ ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
+ __func__, bus_req);
+
+ spin_lock_irqsave(&ar_sdio->lock, flag);
+ list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
+ spin_unlock_irqrestore(&ar_sdio->lock, flag);
+}
+
+static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
+ struct mmc_data *data)
+{
+ struct scatterlist *sg;
+ int i;
+
+ data->blksz = HIF_MBOX_BLOCK_SIZE;
+ data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
+
+ ath6kl_dbg(ATH6KL_DBG_SCATTER,
+ "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
+ (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
+ data->blksz, data->blocks, scat_req->len,
+ scat_req->scat_entries);
+
+ data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
+ MMC_DATA_READ;
+
+ /* fill SG entries */
+ sg = scat_req->sgentries;
+ sg_init_table(sg, scat_req->scat_entries);
+
+ /* assemble SG list */
+ for (i = 0; i < scat_req->scat_entries; i++, sg++) {
+ ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
+ i, scat_req->scat_list[i].buf,
+ scat_req->scat_list[i].len);
+
+ sg_set_buf(sg, scat_req->scat_list[i].buf,
+ scat_req->scat_list[i].len);
+ }
+
+ /* set scatter-gather table for request */
+ data->sg = scat_req->sgentries;
+ data->sg_len = scat_req->scat_entries;
+}
+
+static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
+ struct bus_request *req)
+{
+ struct mmc_request mmc_req;
+ struct mmc_command cmd;
+ struct mmc_data data;
+ struct hif_scatter_req *scat_req;
+ u8 opcode, rw;
+ int status, len;
+
+ scat_req = req->scat_req;
+
+ if (scat_req->virt_scat) {
+ len = scat_req->len;
+ if (scat_req->req & HIF_BLOCK_BASIS)
+ len = round_down(len, HIF_MBOX_BLOCK_SIZE);
+
+ status = ath6kl_sdio_io(ar_sdio->func, scat_req->req,
+ scat_req->addr, scat_req->virt_dma_buf,
+ len);
+ goto scat_complete;
+ }
+
+ memset(&mmc_req, 0, sizeof(struct mmc_request));
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ memset(&data, 0, sizeof(struct mmc_data));
+
+ ath6kl_sdio_setup_scat_data(scat_req, &data);
+
+ opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
+ CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
+
+ rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
+
+ /* Fixup the address so that the last byte will fall on MBOX EOM */
+ if (scat_req->req & HIF_WRITE) {
+ if (scat_req->addr == HIF_MBOX_BASE_ADDR)
+ scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
+ else
+ /* Uses extended address range */
+ scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
+ }
+
+ /* set command argument */
+ ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
+ CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
+ data.blocks);
+
+ cmd.opcode = SD_IO_RW_EXTENDED;
+ cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+
+ mmc_req.cmd = &cmd;
+ mmc_req.data = &data;
+
+ mmc_set_data_timeout(&data, ar_sdio->func->card);
+ /* synchronous call to process request */
+ mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
+
+ status = cmd.error ? cmd.error : data.error;
+
+scat_complete:
+ scat_req->status = status;
+
+ if (scat_req->status)
+ ath6kl_err("Scatter write request failed:%d\n",
+ scat_req->status);
+
+ if (scat_req->req & HIF_ASYNCHRONOUS)
+ scat_req->complete(ar_sdio->ar->htc_target, scat_req);
+
+ return status;
+}
+
+static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
+ int n_scat_entry, int n_scat_req,
+ bool virt_scat)
+{
+ struct hif_scatter_req *s_req;
+ struct bus_request *bus_req;
+ int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz;
+ u8 *virt_buf;
+
+ scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
+ scat_req_sz = sizeof(*s_req) + scat_list_sz;
+
+ if (!virt_scat)
+ sg_sz = sizeof(struct scatterlist) * n_scat_entry;
+ else
+ buf_sz = 2 * L1_CACHE_BYTES +
+ ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
+
+ for (i = 0; i < n_scat_req; i++) {
+ /* allocate the scatter request */
+ s_req = kzalloc(scat_req_sz, GFP_KERNEL);
+ if (!s_req)
+ return -ENOMEM;
+
+ if (virt_scat) {
+ virt_buf = kzalloc(buf_sz, GFP_KERNEL);
+ if (!virt_buf) {
+ kfree(s_req);
+ return -ENOMEM;
+ }
+
+ s_req->virt_dma_buf =
+ (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
+ } else {
+ /* allocate sglist */
+ s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL);
+
+ if (!s_req->sgentries) {
+ kfree(s_req);
+ return -ENOMEM;
+ }
+ }
+
+ /* allocate a bus request for this scatter request */
+ bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
+ if (!bus_req) {
+ kfree(s_req->sgentries);
+ kfree(s_req->virt_dma_buf);
+ kfree(s_req);
+ return -ENOMEM;
+ }
+
+ /* assign the scatter request to this bus request */
+ bus_req->scat_req = s_req;
+ s_req->busrequest = bus_req;
+
+ s_req->virt_scat = virt_scat;
+
+ /* add it to the scatter pool */
+ hif_scatter_req_add(ar_sdio->ar, s_req);
+ }
+
+ return 0;
+}
+
+static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
+ u32 len, u32 request)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ u8 *tbuf = NULL;
+ int ret;
+ bool bounced = false;
+
+ if (request & HIF_BLOCK_BASIS)
+ len = round_down(len, HIF_MBOX_BLOCK_SIZE);
+
+ if (buf_needs_bounce(buf)) {
+ if (!ar_sdio->dma_buffer)
+ return -ENOMEM;
+ tbuf = ar_sdio->dma_buffer;
+ memcpy(tbuf, buf, len);
+ bounced = true;
+ } else
+ tbuf = buf;
+
+ sdio_claim_host(ar_sdio->func);
+ ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
+ if ((request & HIF_READ) && bounced)
+ memcpy(buf, tbuf, len);
+ sdio_release_host(ar_sdio->func);
+
+ return ret;
+}
+
+static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
+ struct bus_request *req)
+{
+ if (req->scat_req)
+ ath6kl_sdio_scat_rw(ar_sdio, req);
+ else {
+ void *context;
+ int status;
+
+ status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
+ req->buffer, req->length,
+ req->request);
+ context = req->packet;
+ ath6kl_sdio_free_bus_req(ar_sdio, req);
+ ath6kldev_rw_comp_handler(context, status);
+ }
+}
+
+static void ath6kl_sdio_write_async_work(struct work_struct *work)
+{
+ struct ath6kl_sdio *ar_sdio;
+ unsigned long flags;
+ struct bus_request *req, *tmp_req;
+
+ ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
+ sdio_claim_host(ar_sdio->func);
+
+ spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
+ list_del(&req->list);
+ spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ __ath6kl_sdio_write_async(ar_sdio, req);
+ spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ }
+ spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+
+ sdio_release_host(ar_sdio->func);
+}
+
+static void ath6kl_sdio_irq_handler(struct sdio_func *func)
+{
+ int status;
+ struct ath6kl_sdio *ar_sdio;
+
+ ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n");
+
+ ar_sdio = sdio_get_drvdata(func);
+ atomic_set(&ar_sdio->irq_handling, 1);
+
+ /*
+ * Release the host during interrups so we can pick it back up when
+ * we process commands.
+ */
+ sdio_release_host(ar_sdio->func);
+
+ status = ath6kldev_intr_bh_handler(ar_sdio->ar);
+ sdio_claim_host(ar_sdio->func);
+ atomic_set(&ar_sdio->irq_handling, 0);
+ WARN_ON(status && status != -ECANCELED);
+}
+
+static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
+{
+ struct sdio_func *func = ar_sdio->func;
+ int ret = 0;
+
+ if (!ar_sdio->is_disabled)
+ return 0;
+
+ sdio_claim_host(func);
+
+ ret = sdio_enable_func(func);
+ if (ret) {
+ ath6kl_err("Unable to enable sdio func: %d)\n", ret);
+ sdio_release_host(func);
+ return ret;
+ }
+
+ sdio_release_host(func);
+
+ /*
+ * Wait for hardware to initialise. It should take a lot less than
+ * 10 ms but let's be conservative here.
+ */
+ msleep(10);
+
+ ar_sdio->is_disabled = false;
+
+ return ret;
+}
+
+static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio)
+{
+ int ret;
+
+ if (ar_sdio->is_disabled)
+ return 0;
+
+ /* Disable the card */
+ sdio_claim_host(ar_sdio->func);
+ ret = sdio_disable_func(ar_sdio->func);
+ sdio_release_host(ar_sdio->func);
+
+ if (ret)
+ return ret;
+
+ ar_sdio->is_disabled = true;
+
+ return ret;
+}
+
+static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
+ u32 length, u32 request,
+ struct htc_packet *packet)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct bus_request *bus_req;
+ unsigned long flags;
+
+ bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
+
+ if (!bus_req)
+ return -ENOMEM;
+
+ bus_req->address = address;
+ bus_req->buffer = buffer;
+ bus_req->length = length;
+ bus_req->request = request;
+ bus_req->packet = packet;
+
+ spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
+ spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
+
+ return 0;
+}
+
+static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ int ret;
+
+ sdio_claim_host(ar_sdio->func);
+
+ /* Register the isr */
+ ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
+ if (ret)
+ ath6kl_err("Failed to claim sdio irq: %d\n", ret);
+
+ sdio_release_host(ar_sdio->func);
+}
+
+static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ int ret;
+
+ sdio_claim_host(ar_sdio->func);
+
+ /* Mask our function IRQ */
+ while (atomic_read(&ar_sdio->irq_handling)) {
+ sdio_release_host(ar_sdio->func);
+ schedule_timeout(HZ / 10);
+ sdio_claim_host(ar_sdio->func);
+ }
+
+ ret = sdio_release_irq(ar_sdio->func);
+ if (ret)
+ ath6kl_err("Failed to release sdio irq: %d\n", ret);
+
+ sdio_release_host(ar_sdio->func);
+}
+
+static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct hif_scatter_req *node = NULL;
+ unsigned long flag;
+
+ spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+
+ if (!list_empty(&ar_sdio->scat_req)) {
+ node = list_first_entry(&ar_sdio->scat_req,
+ struct hif_scatter_req, list);
+ list_del(&node->list);
+ }
+
+ spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+
+ return node;
+}
+
+static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
+ struct hif_scatter_req *s_req)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ unsigned long flag;
+
+ spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+
+ list_add_tail(&s_req->list, &ar_sdio->scat_req);
+
+ spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+
+}
+
+/* scatter gather read write request */
+static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
+ struct hif_scatter_req *scat_req)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ u32 request = scat_req->req;
+ int status = 0;
+ unsigned long flags;
+
+ if (!scat_req->len)
+ return -EINVAL;
+
+ ath6kl_dbg(ATH6KL_DBG_SCATTER,
+ "hif-scatter: total len: %d scatter entries: %d\n",
+ scat_req->len, scat_req->scat_entries);
+
+ if (request & HIF_SYNCHRONOUS) {
+ sdio_claim_host(ar_sdio->func);
+ status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
+ sdio_release_host(ar_sdio->func);
+ } else {
+ spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
+ spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
+ }
+
+ return status;
+}
+
+/* clean up scatter support */
+static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct hif_scatter_req *s_req, *tmp_req;
+ unsigned long flag;
+
+ /* empty the free list */
+ spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
+ list_del(&s_req->list);
+ spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+
+ if (s_req->busrequest)
+ ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
+ kfree(s_req->virt_dma_buf);
+ kfree(s_req->sgentries);
+ kfree(s_req);
+
+ spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ }
+ spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+}
+
+/* setup of HIF scatter resources */
+static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct htc_target *target = ar->htc_target;
+ int ret;
+ bool virt_scat = false;
+
+ /* check if host supports scatter and it meets our requirements */
+ if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
+ ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
+ ar_sdio->func->card->host->max_segs,
+ MAX_SCATTER_ENTRIES_PER_REQ);
+ virt_scat = true;
+ }
+
+ if (!virt_scat) {
+ ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
+ MAX_SCATTER_ENTRIES_PER_REQ,
+ MAX_SCATTER_REQUESTS, virt_scat);
+
+ if (!ret) {
+ ath6kl_dbg(ATH6KL_DBG_SCATTER,
+ "hif-scatter enabled: max scatter req : %d entries: %d\n",
+ MAX_SCATTER_REQUESTS,
+ MAX_SCATTER_ENTRIES_PER_REQ);
+
+ target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
+ target->max_xfer_szper_scatreq =
+ MAX_SCATTER_REQ_TRANSFER_SIZE;
+ } else {
+ ath6kl_sdio_cleanup_scatter(ar);
+ ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n");
+ }
+ }
+
+ if (virt_scat || ret) {
+ ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
+ ATH6KL_SCATTER_ENTRIES_PER_REQ,
+ ATH6KL_SCATTER_REQS, virt_scat);
+
+ if (ret) {
+ ath6kl_err("failed to alloc virtual scatter resources !\n");
+ ath6kl_sdio_cleanup_scatter(ar);
+ return ret;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_SCATTER,
+ "Vitual scatter enabled, max_scat_req:%d, entries:%d\n",
+ ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
+
+ target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
+ target->max_xfer_szper_scatreq =
+ ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
+ }
+
+ return 0;
+}
+
+static int ath6kl_sdio_suspend(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ mmc_pm_flag_t flags;
+ int ret;
+
+ flags = sdio_get_host_pm_caps(func);
+
+ if (!(flags & MMC_PM_KEEP_POWER))
+ /* as host doesn't support keep power we need to bail out */
+ ath6kl_dbg(ATH6KL_DBG_SDIO,
+ "func %d doesn't support MMC_PM_KEEP_POWER\n",
+ func->num);
+ return -EINVAL;
+
+ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+ if (ret) {
+ printk(KERN_ERR "ath6kl: set sdio pm flags failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath6kl_deep_sleep_enable(ar);
+
+ return 0;
+}
+
+static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
+ .read_write_sync = ath6kl_sdio_read_write_sync,
+ .write_async = ath6kl_sdio_write_async,
+ .irq_enable = ath6kl_sdio_irq_enable,
+ .irq_disable = ath6kl_sdio_irq_disable,
+ .scatter_req_get = ath6kl_sdio_scatter_req_get,
+ .scatter_req_add = ath6kl_sdio_scatter_req_add,
+ .enable_scatter = ath6kl_sdio_enable_scatter,
+ .scat_req_rw = ath6kl_sdio_async_rw_scatter,
+ .cleanup_scatter = ath6kl_sdio_cleanup_scatter,
+ .suspend = ath6kl_sdio_suspend,
+};
+
+static int ath6kl_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ int ret;
+ struct ath6kl_sdio *ar_sdio;
+ struct ath6kl *ar;
+ int count;
+
+ ath6kl_dbg(ATH6KL_DBG_SDIO,
+ "new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
+ func->num, func->vendor, func->device,
+ func->max_blksize, func->cur_blksize);
+
+ ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
+ if (!ar_sdio)
+ return -ENOMEM;
+
+ ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
+ if (!ar_sdio->dma_buffer) {
+ ret = -ENOMEM;
+ goto err_hif;
+ }
+
+ ar_sdio->func = func;
+ sdio_set_drvdata(func, ar_sdio);
+
+ ar_sdio->id = id;
+ ar_sdio->is_disabled = true;
+
+ spin_lock_init(&ar_sdio->lock);
+ spin_lock_init(&ar_sdio->scat_lock);
+ spin_lock_init(&ar_sdio->wr_async_lock);
+
+ INIT_LIST_HEAD(&ar_sdio->scat_req);
+ INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
+ INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
+
+ INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
+
+ for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
+ ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
+
+ ar = ath6kl_core_alloc(&ar_sdio->func->dev);
+ if (!ar) {
+ ath6kl_err("Failed to alloc ath6kl core\n");
+ ret = -ENOMEM;
+ goto err_dma;
+ }
+
+ ar_sdio->ar = ar;
+ ar->hif_priv = ar_sdio;
+ ar->hif_ops = &ath6kl_sdio_ops;
+
+ ath6kl_sdio_set_mbox_info(ar);
+
+ sdio_claim_host(func);
+
+ if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
+ MANUFACTURER_ID_AR6003_BASE) {
+ /* enable 4-bit ASYNC interrupt on AR6003 or later */
+ ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
+ CCCR_SDIO_IRQ_MODE_REG,
+ SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
+ if (ret) {
+ ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
+ ret);
+ sdio_release_host(func);
+ goto err_cfg80211;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_SDIO, "4-bit async irq mode enabled\n");
+ }
+
+ /* give us some time to enable, in ms */
+ func->enable_timeout = 100;
+
+ sdio_release_host(func);
+
+ ret = ath6kl_sdio_power_on(ar_sdio);
+ if (ret)
+ goto err_cfg80211;
+
+ sdio_claim_host(func);
+
+ ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
+ if (ret) {
+ ath6kl_err("Set sdio block size %d failed: %d)\n",
+ HIF_MBOX_BLOCK_SIZE, ret);
+ sdio_release_host(func);
+ goto err_off;
+ }
+
+ sdio_release_host(func);
+
+ ret = ath6kl_core_init(ar);
+ if (ret) {
+ ath6kl_err("Failed to init ath6kl core\n");
+ goto err_off;
+ }
+
+ return ret;
+
+err_off:
+ ath6kl_sdio_power_off(ar_sdio);
+err_cfg80211:
+ ath6kl_cfg80211_deinit(ar_sdio->ar);
+err_dma:
+ kfree(ar_sdio->dma_buffer);
+err_hif:
+ kfree(ar_sdio);
+
+ return ret;
+}
+
+static void ath6kl_sdio_remove(struct sdio_func *func)
+{
+ struct ath6kl_sdio *ar_sdio;
+
+ ath6kl_dbg(ATH6KL_DBG_SDIO,
+ "removed func %d vendor 0x%x device 0x%x\n",
+ func->num, func->vendor, func->device);
+
+ ar_sdio = sdio_get_drvdata(func);
+
+ ath6kl_stop_txrx(ar_sdio->ar);
+ cancel_work_sync(&ar_sdio->wr_async_work);
+
+ ath6kl_unavail_ev(ar_sdio->ar);
+
+ ath6kl_sdio_power_off(ar_sdio);
+
+ kfree(ar_sdio->dma_buffer);
+ kfree(ar_sdio);
+}
+
+static const struct sdio_device_id ath6kl_sdio_devices[] = {
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
+ {},
+};
+
+MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
+
+static struct sdio_driver ath6kl_sdio_driver = {
+ .name = "ath6kl_sdio",
+ .id_table = ath6kl_sdio_devices,
+ .probe = ath6kl_sdio_probe,
+ .remove = ath6kl_sdio_remove,
+};
+
+static int __init ath6kl_sdio_init(void)
+{
+ int ret;
+
+ ret = sdio_register_driver(&ath6kl_sdio_driver);
+ if (ret)
+ ath6kl_err("sdio driver registration failed: %d\n", ret);
+
+ return ret;
+}
+
+static void __exit ath6kl_sdio_exit(void)
+{
+ sdio_unregister_driver(&ath6kl_sdio_driver);
+}
+
+module_init(ath6kl_sdio_init);
+module_exit(ath6kl_sdio_exit);
+
+MODULE_AUTHOR("Atheros Communications, Inc.");
+MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_FIRMWARE(AR6003_REV2_OTP_FILE);
+MODULE_FIRMWARE(AR6003_REV2_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6003_REV2_PATCH_FILE);
+MODULE_FIRMWARE(AR6003_REV2_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_REV2_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_REV3_OTP_FILE);
+MODULE_FIRMWARE(AR6003_REV3_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6003_REV3_PATCH_FILE);
+MODULE_FIRMWARE(AR6003_REV3_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_REV3_DEFAULT_BOARD_DATA_FILE);
*****************************************************************************/
#include <linux/pci.h>
#include <linux/pci-aspm.h>
+ #include <linux/module.h>
#include "iwl-bus.h"
-#include "iwl-agn.h"
-#include "iwl-core.h"
#include "iwl-io.h"
+#include "iwl-shared.h"
+#include "iwl-trans.h"
+#include "iwl-csr.h"
+#include "iwl-cfg.h"
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
* Larry Finger <Larry.Finger@lwfinger.net>
*****************************************************************************/
++#include <linux/moduleparam.h>
++
#include "wifi.h"
+static unsigned int debug = DBG_EMERG;
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "Set global debug level for rtlwifi (0,2-5)");
+
void rtl_dbgp_flag_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
--- /dev/null
+/*
+ * Texas Instrument's NFC Driver For Shared Transport.
+ *
+ * NFC Driver acts as interface between NCI core and
+ * TI Shared Transport Layer.
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * Acknowledgements:
+ * This file is based on btwilink.c, which was written
+ * by Raja Mani and Pavan Savoy.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/platform_device.h>
++#include <linux/module.h>
+#include <linux/nfc.h>
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include <linux/ti_wilink_st.h>
+
+#define NFCWILINK_CHNL 12
+#define NFCWILINK_OPCODE 7
+#define NFCWILINK_MAX_FRAME_SIZE 300
+#define NFCWILINK_HDR_LEN 4
+#define NFCWILINK_OFFSET_LEN_IN_HDR 1
+#define NFCWILINK_LEN_SIZE 2
+#define NFCWILINK_REGISTER_TIMEOUT 8000 /* 8 sec */
+
+struct nfcwilink_hdr {
+ u8 chnl;
+ u8 opcode;
+ u16 len;
+} __packed;
+
+struct nfcwilink {
+ struct platform_device *pdev;
+ struct nci_dev *ndev;
+ unsigned long flags;
+
+ char st_register_cb_status;
+ long (*st_write) (struct sk_buff *);
+ struct completion st_register_completed;
+};
+
+/* NFCWILINK driver flags */
+enum {
+ NFCWILINK_RUNNING,
+};
+
+/* Called by ST when registration is complete */
+static void nfcwilink_register_complete(void *priv_data, char data)
+{
+ struct nfcwilink *drv = priv_data;
+
+ nfc_dev_dbg(&drv->pdev->dev, "register_complete entry");
+
+ /* store ST registration status */
+ drv->st_register_cb_status = data;
+
+ /* complete the wait in nfc_st_open() */
+ complete(&drv->st_register_completed);
+}
+
+/* Called by ST when receive data is available */
+static long nfcwilink_receive(void *priv_data, struct sk_buff *skb)
+{
+ struct nfcwilink *drv = priv_data;
+ int rc;
+
+ nfc_dev_dbg(&drv->pdev->dev, "receive entry, len %d", skb->len);
+
+ if (!skb)
+ return -EFAULT;
+
+ if (!drv) {
+ kfree_skb(skb);
+ return -EFAULT;
+ }
+
+ /* strip the ST header
+ (apart for the chnl byte, which is not received in the hdr) */
+ skb_pull(skb, (NFCWILINK_HDR_LEN-1));
+
+ skb->dev = (void *) drv->ndev;
+
+ /* Forward skb to NCI core layer */
+ rc = nci_recv_frame(skb);
+ if (rc < 0) {
+ nfc_dev_err(&drv->pdev->dev, "nci_recv_frame failed %d", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/* protocol structure registered with ST */
+static struct st_proto_s nfcwilink_proto = {
+ .chnl_id = NFCWILINK_CHNL,
+ .max_frame_size = NFCWILINK_MAX_FRAME_SIZE,
+ .hdr_len = (NFCWILINK_HDR_LEN-1), /* not including chnl byte */
+ .offset_len_in_hdr = NFCWILINK_OFFSET_LEN_IN_HDR,
+ .len_size = NFCWILINK_LEN_SIZE,
+ .reserve = 0,
+ .recv = nfcwilink_receive,
+ .reg_complete_cb = nfcwilink_register_complete,
+ .write = NULL,
+};
+
+static int nfcwilink_open(struct nci_dev *ndev)
+{
+ struct nfcwilink *drv = nci_get_drvdata(ndev);
+ unsigned long comp_ret;
+ int rc;
+
+ nfc_dev_dbg(&drv->pdev->dev, "open entry");
+
+ if (test_and_set_bit(NFCWILINK_RUNNING, &drv->flags)) {
+ rc = -EBUSY;
+ goto exit;
+ }
+
+ nfcwilink_proto.priv_data = drv;
+
+ init_completion(&drv->st_register_completed);
+ drv->st_register_cb_status = -EINPROGRESS;
+
+ rc = st_register(&nfcwilink_proto);
+ if (rc < 0) {
+ if (rc == -EINPROGRESS) {
+ comp_ret = wait_for_completion_timeout(
+ &drv->st_register_completed,
+ msecs_to_jiffies(NFCWILINK_REGISTER_TIMEOUT));
+
+ nfc_dev_dbg(&drv->pdev->dev,
+ "wait_for_completion_timeout returned %ld",
+ comp_ret);
+
+ if (comp_ret == 0) {
+ /* timeout */
+ rc = -ETIMEDOUT;
+ goto clear_exit;
+ } else if (drv->st_register_cb_status != 0) {
+ rc = drv->st_register_cb_status;
+ nfc_dev_err(&drv->pdev->dev,
+ "st_register_cb failed %d", rc);
+ goto clear_exit;
+ }
+ } else {
+ nfc_dev_err(&drv->pdev->dev,
+ "st_register failed %d", rc);
+ goto clear_exit;
+ }
+ }
+
+ /* st_register MUST fill the write callback */
+ BUG_ON(nfcwilink_proto.write == NULL);
+ drv->st_write = nfcwilink_proto.write;
+
+ goto exit;
+
+clear_exit:
+ clear_bit(NFCWILINK_RUNNING, &drv->flags);
+
+exit:
+ return rc;
+}
+
+static int nfcwilink_close(struct nci_dev *ndev)
+{
+ struct nfcwilink *drv = nci_get_drvdata(ndev);
+ int rc;
+
+ nfc_dev_dbg(&drv->pdev->dev, "close entry");
+
+ if (!test_and_clear_bit(NFCWILINK_RUNNING, &drv->flags))
+ return 0;
+
+ rc = st_unregister(&nfcwilink_proto);
+ if (rc)
+ nfc_dev_err(&drv->pdev->dev, "st_unregister failed %d", rc);
+
+ drv->st_write = NULL;
+
+ return rc;
+}
+
+static int nfcwilink_send(struct sk_buff *skb)
+{
+ struct nci_dev *ndev = (struct nci_dev *)skb->dev;
+ struct nfcwilink *drv = nci_get_drvdata(ndev);
+ struct nfcwilink_hdr hdr = {NFCWILINK_CHNL, NFCWILINK_OPCODE, 0x0000};
+ long len;
+
+ nfc_dev_dbg(&drv->pdev->dev, "send entry, len %d", skb->len);
+
+ if (!test_bit(NFCWILINK_RUNNING, &drv->flags))
+ return -EBUSY;
+
+ /* add the ST hdr to the start of the buffer */
+ hdr.len = skb->len;
+ memcpy(skb_push(skb, NFCWILINK_HDR_LEN), &hdr, NFCWILINK_HDR_LEN);
+
+ /* Insert skb to shared transport layer's transmit queue.
+ * Freeing skb memory is taken care in shared transport layer,
+ * so don't free skb memory here.
+ */
+ len = drv->st_write(skb);
+ if (len < 0) {
+ kfree_skb(skb);
+ nfc_dev_err(&drv->pdev->dev, "st_write failed %ld", len);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static struct nci_ops nfcwilink_ops = {
+ .open = nfcwilink_open,
+ .close = nfcwilink_close,
+ .send = nfcwilink_send,
+};
+
+static int nfcwilink_probe(struct platform_device *pdev)
+{
+ static struct nfcwilink *drv;
+ int rc;
+ u32 protocols;
+
+ nfc_dev_dbg(&pdev->dev, "probe entry");
+
+ drv = kzalloc(sizeof(struct nfcwilink), GFP_KERNEL);
+ if (!drv) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ drv->pdev = pdev;
+
+ protocols = NFC_PROTO_JEWEL_MASK
+ | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK
+ | NFC_PROTO_ISO14443_MASK
+ | NFC_PROTO_NFC_DEP_MASK;
+
+ drv->ndev = nci_allocate_device(&nfcwilink_ops,
+ protocols,
+ NFCWILINK_HDR_LEN,
+ 0);
+ if (!drv->ndev) {
+ nfc_dev_err(&pdev->dev, "nci_allocate_device failed");
+ rc = -ENOMEM;
+ goto free_exit;
+ }
+
+ nci_set_parent_dev(drv->ndev, &pdev->dev);
+ nci_set_drvdata(drv->ndev, drv);
+
+ rc = nci_register_device(drv->ndev);
+ if (rc < 0) {
+ nfc_dev_err(&pdev->dev, "nci_register_device failed %d", rc);
+ goto free_dev_exit;
+ }
+
+ dev_set_drvdata(&pdev->dev, drv);
+
+ goto exit;
+
+free_dev_exit:
+ nci_free_device(drv->ndev);
+
+free_exit:
+ kfree(drv);
+
+exit:
+ return rc;
+}
+
+static int nfcwilink_remove(struct platform_device *pdev)
+{
+ struct nfcwilink *drv = dev_get_drvdata(&pdev->dev);
+ struct nci_dev *ndev;
+
+ nfc_dev_dbg(&pdev->dev, "remove entry");
+
+ if (!drv)
+ return -EFAULT;
+
+ ndev = drv->ndev;
+
+ nci_unregister_device(ndev);
+ nci_free_device(ndev);
+
+ kfree(drv);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver nfcwilink_driver = {
+ .probe = nfcwilink_probe,
+ .remove = nfcwilink_remove,
+ .driver = {
+ .name = "nfcwilink",
+ .owner = THIS_MODULE,
+ },
+};
+
+/* ------- Module Init/Exit interfaces ------ */
+static int __init nfcwilink_init(void)
+{
+ printk(KERN_INFO "NFC Driver for TI WiLink");
+
+ return platform_driver_register(&nfcwilink_driver);
+}
+
+static void __exit nfcwilink_exit(void)
+{
+ platform_driver_unregister(&nfcwilink_driver);
+}
+
+module_init(nfcwilink_init);
+module_exit(nfcwilink_exit);
+
+/* ------ Module Info ------ */
+
+MODULE_AUTHOR("Ilan Elias <ilane@ti.com>");
+MODULE_DESCRIPTION("NFC Driver for TI Shared Transport");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * Core driver for the pin control subsystem
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#define pr_fmt(fmt) "pinctrl core: " fmt
+
+#include <linux/kernel.h>
++#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/radix-tree.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/machine.h>
+#include "core.h"
+#include "pinmux.h"
+
+/* Global list of pin control devices */
+static DEFINE_MUTEX(pinctrldev_list_mutex);
+static LIST_HEAD(pinctrldev_list);
+
+static void pinctrl_dev_release(struct device *dev)
+{
+ struct pinctrl_dev *pctldev = dev_get_drvdata(dev);
+ kfree(pctldev);
+}
+
+const char *pctldev_get_name(struct pinctrl_dev *pctldev)
+{
+ /* We're not allowed to register devices without name */
+ return pctldev->desc->name;
+}
+EXPORT_SYMBOL_GPL(pctldev_get_name);
+
+void *pctldev_get_drvdata(struct pinctrl_dev *pctldev)
+{
+ return pctldev->driver_data;
+}
+EXPORT_SYMBOL_GPL(pctldev_get_drvdata);
+
+/**
+ * get_pctldev_from_dev() - look up pin controller device
+ * @dev: a device pointer, this may be NULL but then devname needs to be
+ * defined instead
+ * @devname: the name of a device instance, as returned by dev_name(), this
+ * may be NULL but then dev needs to be defined instead
+ *
+ * Looks up a pin control device matching a certain device name or pure device
+ * pointer, the pure device pointer will take precedence.
+ */
+struct pinctrl_dev *get_pctldev_from_dev(struct device *dev,
+ const char *devname)
+{
+ struct pinctrl_dev *pctldev = NULL;
+ bool found = false;
+
+ mutex_lock(&pinctrldev_list_mutex);
+ list_for_each_entry(pctldev, &pinctrldev_list, node) {
+ if (dev && &pctldev->dev == dev) {
+ /* Matched on device pointer */
+ found = true;
+ break;
+ }
+
+ if (devname &&
+ !strcmp(dev_name(&pctldev->dev), devname)) {
+ /* Matched on device name */
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&pinctrldev_list_mutex);
+
+ if (found)
+ return pctldev;
+
+ return NULL;
+}
+
+struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, int pin)
+{
+ struct pin_desc *pindesc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pctldev->pin_desc_tree_lock, flags);
+ pindesc = radix_tree_lookup(&pctldev->pin_desc_tree, pin);
+ spin_unlock_irqrestore(&pctldev->pin_desc_tree_lock, flags);
+
+ return pindesc;
+}
+
+/**
+ * pin_is_valid() - check if pin exists on controller
+ * @pctldev: the pin control device to check the pin on
+ * @pin: pin to check, use the local pin controller index number
+ *
+ * This tells us whether a certain pin exist on a certain pin controller or
+ * not. Pin lists may be sparse, so some pins may not exist.
+ */
+bool pin_is_valid(struct pinctrl_dev *pctldev, int pin)
+{
+ struct pin_desc *pindesc;
+
+ if (pin < 0)
+ return false;
+
+ pindesc = pin_desc_get(pctldev, pin);
+ if (pindesc == NULL)
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(pin_is_valid);
+
+/* Deletes a range of pin descriptors */
+static void pinctrl_free_pindescs(struct pinctrl_dev *pctldev,
+ const struct pinctrl_pin_desc *pins,
+ unsigned num_pins)
+{
+ int i;
+
+ spin_lock(&pctldev->pin_desc_tree_lock);
+ for (i = 0; i < num_pins; i++) {
+ struct pin_desc *pindesc;
+
+ pindesc = radix_tree_lookup(&pctldev->pin_desc_tree,
+ pins[i].number);
+ if (pindesc != NULL) {
+ radix_tree_delete(&pctldev->pin_desc_tree,
+ pins[i].number);
+ }
+ kfree(pindesc);
+ }
+ spin_unlock(&pctldev->pin_desc_tree_lock);
+}
+
+static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
+ unsigned number, const char *name)
+{
+ struct pin_desc *pindesc;
+
+ pindesc = pin_desc_get(pctldev, number);
+ if (pindesc != NULL) {
+ pr_err("pin %d already registered on %s\n", number,
+ pctldev->desc->name);
+ return -EINVAL;
+ }
+
+ pindesc = kzalloc(sizeof(*pindesc), GFP_KERNEL);
+ if (pindesc == NULL)
+ return -ENOMEM;
+ spin_lock_init(&pindesc->lock);
+
+ /* Set owner */
+ pindesc->pctldev = pctldev;
+
+ /* Copy optional basic pin info */
+ if (name)
+ strlcpy(pindesc->name, name, sizeof(pindesc->name));
+
+ spin_lock(&pctldev->pin_desc_tree_lock);
+ radix_tree_insert(&pctldev->pin_desc_tree, number, pindesc);
+ spin_unlock(&pctldev->pin_desc_tree_lock);
+ pr_debug("registered pin %d (%s) on %s\n",
+ number, name ? name : "(unnamed)", pctldev->desc->name);
+ return 0;
+}
+
+static int pinctrl_register_pins(struct pinctrl_dev *pctldev,
+ struct pinctrl_pin_desc const *pins,
+ unsigned num_descs)
+{
+ unsigned i;
+ int ret = 0;
+
+ for (i = 0; i < num_descs; i++) {
+ ret = pinctrl_register_one_pin(pctldev,
+ pins[i].number, pins[i].name);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * pinctrl_match_gpio_range() - check if a certain GPIO pin is in range
+ * @pctldev: pin controller device to check
+ * @gpio: gpio pin to check taken from the global GPIO pin space
+ *
+ * Tries to match a GPIO pin number to the ranges handled by a certain pin
+ * controller, return the range or NULL
+ */
+static struct pinctrl_gpio_range *
+pinctrl_match_gpio_range(struct pinctrl_dev *pctldev, unsigned gpio)
+{
+ struct pinctrl_gpio_range *range = NULL;
+
+ /* Loop over the ranges */
+ mutex_lock(&pctldev->gpio_ranges_lock);
+ list_for_each_entry(range, &pctldev->gpio_ranges, node) {
+ /* Check if we're in the valid range */
+ if (gpio >= range->base &&
+ gpio < range->base + range->npins) {
+ mutex_unlock(&pctldev->gpio_ranges_lock);
+ return range;
+ }
+ }
+ mutex_unlock(&pctldev->gpio_ranges_lock);
+
+ return NULL;
+}
+
+/**
+ * pinctrl_get_device_gpio_range() - find device for GPIO range
+ * @gpio: the pin to locate the pin controller for
+ * @outdev: the pin control device if found
+ * @outrange: the GPIO range if found
+ *
+ * Find the pin controller handling a certain GPIO pin from the pinspace of
+ * the GPIO subsystem, return the device and the matching GPIO range. Returns
+ * negative if the GPIO range could not be found in any device.
+ */
+int pinctrl_get_device_gpio_range(unsigned gpio,
+ struct pinctrl_dev **outdev,
+ struct pinctrl_gpio_range **outrange)
+{
+ struct pinctrl_dev *pctldev = NULL;
+
+ /* Loop over the pin controllers */
+ mutex_lock(&pinctrldev_list_mutex);
+ list_for_each_entry(pctldev, &pinctrldev_list, node) {
+ struct pinctrl_gpio_range *range;
+
+ range = pinctrl_match_gpio_range(pctldev, gpio);
+ if (range != NULL) {
+ *outdev = pctldev;
+ *outrange = range;
+ return 0;
+ }
+ }
+ mutex_unlock(&pinctrldev_list_mutex);
+
+ return -EINVAL;
+}
+
+/**
+ * pinctrl_add_gpio_range() - register a GPIO range for a controller
+ * @pctldev: pin controller device to add the range to
+ * @range: the GPIO range to add
+ *
+ * This adds a range of GPIOs to be handled by a certain pin controller. Call
+ * this to register handled ranges after registering your pin controller.
+ */
+void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range)
+{
+ mutex_lock(&pctldev->gpio_ranges_lock);
+ list_add(&range->node, &pctldev->gpio_ranges);
+ mutex_unlock(&pctldev->gpio_ranges_lock);
+}
+
+/**
+ * pinctrl_remove_gpio_range() - remove a range of GPIOs fro a pin controller
+ * @pctldev: pin controller device to remove the range from
+ * @range: the GPIO range to remove
+ */
+void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range)
+{
+ mutex_lock(&pctldev->gpio_ranges_lock);
+ list_del(&range->node);
+ mutex_unlock(&pctldev->gpio_ranges_lock);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int pinctrl_pins_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev = s->private;
+ const struct pinctrl_ops *ops = pctldev->desc->pctlops;
+ unsigned pin;
+
+ seq_printf(s, "registered pins: %d\n", pctldev->desc->npins);
+ seq_printf(s, "max pin number: %d\n", pctldev->desc->maxpin);
+
+ /* The highest pin number need to be included in the loop, thus <= */
+ for (pin = 0; pin <= pctldev->desc->maxpin; pin++) {
+ struct pin_desc *desc;
+
+ desc = pin_desc_get(pctldev, pin);
+ /* Pin space may be sparse */
+ if (desc == NULL)
+ continue;
+
+ seq_printf(s, "pin %d (%s) ", pin,
+ desc->name ? desc->name : "unnamed");
+
+ /* Driver-specific info per pin */
+ if (ops->pin_dbg_show)
+ ops->pin_dbg_show(pctldev, s, pin);
+
+ seq_puts(s, "\n");
+ }
+
+ return 0;
+}
+
+static int pinctrl_groups_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev = s->private;
+ const struct pinctrl_ops *ops = pctldev->desc->pctlops;
+ unsigned selector = 0;
+
+ /* No grouping */
+ if (!ops)
+ return 0;
+
+ seq_puts(s, "registered pin groups:\n");
+ while (ops->list_groups(pctldev, selector) >= 0) {
+ unsigned *pins;
+ unsigned num_pins;
+ const char *gname = ops->get_group_name(pctldev, selector);
+ int ret;
+ int i;
+
+ ret = ops->get_group_pins(pctldev, selector,
+ &pins, &num_pins);
+ if (ret)
+ seq_printf(s, "%s [ERROR GETTING PINS]\n",
+ gname);
+ else {
+ seq_printf(s, "group: %s, pins = [ ", gname);
+ for (i = 0; i < num_pins; i++)
+ seq_printf(s, "%d ", pins[i]);
+ seq_puts(s, "]\n");
+ }
+ selector++;
+ }
+
+
+ return 0;
+}
+
+static int pinctrl_gpioranges_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev = s->private;
+ struct pinctrl_gpio_range *range = NULL;
+
+ seq_puts(s, "GPIO ranges handled:\n");
+
+ /* Loop over the ranges */
+ mutex_lock(&pctldev->gpio_ranges_lock);
+ list_for_each_entry(range, &pctldev->gpio_ranges, node) {
+ seq_printf(s, "%u: %s [%u - %u]\n", range->id, range->name,
+ range->base, (range->base + range->npins - 1));
+ }
+ mutex_unlock(&pctldev->gpio_ranges_lock);
+
+ return 0;
+}
+
+static int pinctrl_devices_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev;
+
+ seq_puts(s, "name [pinmux]\n");
+ mutex_lock(&pinctrldev_list_mutex);
+ list_for_each_entry(pctldev, &pinctrldev_list, node) {
+ seq_printf(s, "%s ", pctldev->desc->name);
+ if (pctldev->desc->pmxops)
+ seq_puts(s, "yes");
+ else
+ seq_puts(s, "no");
+ seq_puts(s, "\n");
+ }
+ mutex_unlock(&pinctrldev_list_mutex);
+
+ return 0;
+}
+
+static int pinctrl_pins_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinctrl_pins_show, inode->i_private);
+}
+
+static int pinctrl_groups_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinctrl_groups_show, inode->i_private);
+}
+
+static int pinctrl_gpioranges_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinctrl_gpioranges_show, inode->i_private);
+}
+
+static int pinctrl_devices_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinctrl_devices_show, NULL);
+}
+
+static const struct file_operations pinctrl_pins_ops = {
+ .open = pinctrl_pins_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations pinctrl_groups_ops = {
+ .open = pinctrl_groups_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations pinctrl_gpioranges_ops = {
+ .open = pinctrl_gpioranges_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations pinctrl_devices_ops = {
+ .open = pinctrl_devices_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *debugfs_root;
+
+static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
+{
+ static struct dentry *device_root;
+
+ device_root = debugfs_create_dir(dev_name(&pctldev->dev),
+ debugfs_root);
+ if (IS_ERR(device_root) || !device_root) {
+ pr_warn("failed to create debugfs directory for %s\n",
+ dev_name(&pctldev->dev));
+ return;
+ }
+ debugfs_create_file("pins", S_IFREG | S_IRUGO,
+ device_root, pctldev, &pinctrl_pins_ops);
+ debugfs_create_file("pingroups", S_IFREG | S_IRUGO,
+ device_root, pctldev, &pinctrl_groups_ops);
+ debugfs_create_file("gpio-ranges", S_IFREG | S_IRUGO,
+ device_root, pctldev, &pinctrl_gpioranges_ops);
+ pinmux_init_device_debugfs(device_root, pctldev);
+}
+
+static void pinctrl_init_debugfs(void)
+{
+ debugfs_root = debugfs_create_dir("pinctrl", NULL);
+ if (IS_ERR(debugfs_root) || !debugfs_root) {
+ pr_warn("failed to create debugfs directory\n");
+ debugfs_root = NULL;
+ return;
+ }
+
+ debugfs_create_file("pinctrl-devices", S_IFREG | S_IRUGO,
+ debugfs_root, NULL, &pinctrl_devices_ops);
+ pinmux_init_debugfs(debugfs_root);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
+{
+}
+
+static void pinctrl_init_debugfs(void)
+{
+}
+
+#endif
+
+/**
+ * pinctrl_register() - register a pin controller device
+ * @pctldesc: descriptor for this pin controller
+ * @dev: parent device for this pin controller
+ * @driver_data: private pin controller data for this pin controller
+ */
+struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
+ struct device *dev, void *driver_data)
+{
+ static atomic_t pinmux_no = ATOMIC_INIT(0);
+ struct pinctrl_dev *pctldev;
+ int ret;
+
+ if (pctldesc == NULL)
+ return ERR_PTR(-EINVAL);
+ if (pctldesc->name == NULL)
+ return ERR_PTR(-EINVAL);
+
+ /* If we're implementing pinmuxing, check the ops for sanity */
+ if (pctldesc->pmxops) {
+ ret = pinmux_check_ops(pctldesc->pmxops);
+ if (ret) {
+ pr_err("%s pinmux ops lacks necessary functions\n",
+ pctldesc->name);
+ return ERR_PTR(ret);
+ }
+ }
+
+ pctldev = kzalloc(sizeof(struct pinctrl_dev), GFP_KERNEL);
+ if (pctldev == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ /* Initialize pin control device struct */
+ pctldev->owner = pctldesc->owner;
+ pctldev->desc = pctldesc;
+ pctldev->driver_data = driver_data;
+ INIT_RADIX_TREE(&pctldev->pin_desc_tree, GFP_KERNEL);
+ spin_lock_init(&pctldev->pin_desc_tree_lock);
+ INIT_LIST_HEAD(&pctldev->gpio_ranges);
+ mutex_init(&pctldev->gpio_ranges_lock);
+
+ /* Register device */
+ pctldev->dev.parent = dev;
+ dev_set_name(&pctldev->dev, "pinctrl.%d",
+ atomic_inc_return(&pinmux_no) - 1);
+ pctldev->dev.release = pinctrl_dev_release;
+ ret = device_register(&pctldev->dev);
+ if (ret != 0) {
+ pr_err("error in device registration\n");
+ goto out_reg_dev_err;
+ }
+ dev_set_drvdata(&pctldev->dev, pctldev);
+
+ /* Register all the pins */
+ pr_debug("try to register %d pins on %s...\n",
+ pctldesc->npins, pctldesc->name);
+ ret = pinctrl_register_pins(pctldev, pctldesc->pins, pctldesc->npins);
+ if (ret) {
+ pr_err("error during pin registration\n");
+ pinctrl_free_pindescs(pctldev, pctldesc->pins,
+ pctldesc->npins);
+ goto out_reg_pins_err;
+ }
+
+ pinctrl_init_device_debugfs(pctldev);
+ mutex_lock(&pinctrldev_list_mutex);
+ list_add(&pctldev->node, &pinctrldev_list);
+ mutex_unlock(&pinctrldev_list_mutex);
+ pinmux_hog_maps(pctldev);
+ return pctldev;
+
+out_reg_pins_err:
+ device_del(&pctldev->dev);
+out_reg_dev_err:
+ put_device(&pctldev->dev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(pinctrl_register);
+
+/**
+ * pinctrl_unregister() - unregister pinmux
+ * @pctldev: pin controller to unregister
+ *
+ * Called by pinmux drivers to unregister a pinmux.
+ */
+void pinctrl_unregister(struct pinctrl_dev *pctldev)
+{
+ if (pctldev == NULL)
+ return;
+
+ pinmux_unhog_maps(pctldev);
+ /* TODO: check that no pinmuxes are still active? */
+ mutex_lock(&pinctrldev_list_mutex);
+ list_del(&pctldev->node);
+ mutex_unlock(&pinctrldev_list_mutex);
+ /* Destroy descriptor tree */
+ pinctrl_free_pindescs(pctldev, pctldev->desc->pins,
+ pctldev->desc->npins);
+ device_unregister(&pctldev->dev);
+}
+EXPORT_SYMBOL_GPL(pinctrl_unregister);
+
+static int __init pinctrl_init(void)
+{
+ pr_info("initialized pinctrl subsystem\n");
+ pinctrl_init_debugfs();
+ return 0;
+}
+
+/* init early since many drivers really need to initialized pinmux early */
+core_initcall(pinctrl_init);
#define KMSG_COMPONENT "vmur"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/kernel_stat.h>
#include <linux/cdev.h>
#include <linux/slab.h>
+ #include <linux/module.h>
#include <asm/uaccess.h>
#include <asm/cio.h>
*/
#include <linux/timer.h>
+#include <linux/delay.h>
+ #include <linux/module.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <linux/scatterlist.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
++#include <linux/export.h>
#include "sas_internal.h"
#include <linux/sched.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/card.h>
#include <linux/semaphore.h>
#include <linux/firmware.h>
+ #include <linux/module.h>
#include <asm/unaligned.h>
#include <defs.h>
#include <brcmu_wifi.h>
+ #include <linux/export.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
#include <linux/spi/spi.h>
+ #include <linux/export.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16201.h"
+ #include <linux/export.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
+ #include <linux/export.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16203.h"
+ #include <linux/export.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
+ #include <linux/export.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16204.h"
+ #include <linux/export.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
+ #include <linux/export.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16209.h"
+ #include <linux/export.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
+ #include <linux/export.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16240.h"
#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
-#include <linux/sysfs.h>
#include <linux/slab.h>
+ #include <linux/export.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../ring_sw.h"
#include "../kfifo_buf.h"
-#include "accel.h"
#include "../trigger.h"
+#include "../trigger_consumer.h"
#include "lis3l02dq.h"
/**
--- /dev/null
+/*
+ * AD7746 capacitive sensor driver supporting AD7745, AD7746 and AD7747
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
++#include <linux/module.h>
++#include <linux/stat.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+
+#include "ad7746.h"
+
+/*
+ * AD7746 Register Definition
+ */
+
+#define AD7746_REG_STATUS 0
+#define AD7746_REG_CAP_DATA_HIGH 1
+#define AD7746_REG_CAP_DATA_MID 2
+#define AD7746_REG_CAP_DATA_LOW 3
+#define AD7746_REG_VT_DATA_HIGH 4
+#define AD7746_REG_VT_DATA_MID 5
+#define AD7746_REG_VT_DATA_LOW 6
+#define AD7746_REG_CAP_SETUP 7
+#define AD7746_REG_VT_SETUP 8
+#define AD7746_REG_EXC_SETUP 9
+#define AD7746_REG_CFG 10
+#define AD7746_REG_CAPDACA 11
+#define AD7746_REG_CAPDACB 12
+#define AD7746_REG_CAP_OFFH 13
+#define AD7746_REG_CAP_OFFL 14
+#define AD7746_REG_CAP_GAINH 15
+#define AD7746_REG_CAP_GAINL 16
+#define AD7746_REG_VOLT_GAINH 17
+#define AD7746_REG_VOLT_GAINL 18
+
+/* Status Register Bit Designations (AD7746_REG_STATUS) */
+#define AD7746_STATUS_EXCERR (1 << 3)
+#define AD7746_STATUS_RDY (1 << 2)
+#define AD7746_STATUS_RDYVT (1 << 1)
+#define AD7746_STATUS_RDYCAP (1 << 0)
+
+/* Capacitive Channel Setup Register Bit Designations (AD7746_REG_CAP_SETUP) */
+#define AD7746_CAPSETUP_CAPEN (1 << 7)
+#define AD7746_CAPSETUP_CIN2 (1 << 6) /* AD7746 only */
+#define AD7746_CAPSETUP_CAPDIFF (1 << 5)
+#define AD7746_CAPSETUP_CACHOP (1 << 0)
+
+/* Voltage/Temperature Setup Register Bit Designations (AD7746_REG_VT_SETUP) */
+#define AD7746_VTSETUP_VTEN (1 << 7)
+#define AD7746_VTSETUP_VTMD_INT_TEMP (0 << 5)
+#define AD7746_VTSETUP_VTMD_EXT_TEMP (1 << 5)
+#define AD7746_VTSETUP_VTMD_VDD_MON (2 << 5)
+#define AD7746_VTSETUP_VTMD_EXT_VIN (3 << 5)
+#define AD7746_VTSETUP_EXTREF (1 << 4)
+#define AD7746_VTSETUP_VTSHORT (1 << 1)
+#define AD7746_VTSETUP_VTCHOP (1 << 0)
+
+/* Excitation Setup Register Bit Designations (AD7746_REG_EXC_SETUP) */
+#define AD7746_EXCSETUP_CLKCTRL (1 << 7)
+#define AD7746_EXCSETUP_EXCON (1 << 6)
+#define AD7746_EXCSETUP_EXCB (1 << 5)
+#define AD7746_EXCSETUP_NEXCB (1 << 4)
+#define AD7746_EXCSETUP_EXCA (1 << 3)
+#define AD7746_EXCSETUP_NEXCA (1 << 2)
+#define AD7746_EXCSETUP_EXCLVL(x) (((x) & 0x3) << 0)
+
+/* Config Register Bit Designations (AD7746_REG_CFG) */
+#define AD7746_CONF_VTFS(x) ((x) << 6)
+#define AD7746_CONF_CAPFS(x) ((x) << 3)
+#define AD7746_CONF_MODE_IDLE (0 << 0)
+#define AD7746_CONF_MODE_CONT_CONV (1 << 0)
+#define AD7746_CONF_MODE_SINGLE_CONV (2 << 0)
+#define AD7746_CONF_MODE_PWRDN (3 << 0)
+#define AD7746_CONF_MODE_OFFS_CAL (5 << 0)
+#define AD7746_CONF_MODE_GAIN_CAL (6 << 0)
+
+/* CAPDAC Register Bit Designations (AD7746_REG_CAPDACx) */
+#define AD7746_CAPDAC_DACEN (1 << 7)
+#define AD7746_CAPDAC_DACP(x) ((x) & 0x7F)
+
+/*
+ * struct ad7746_chip_info - chip specifc information
+ */
+
+struct ad7746_chip_info {
+ struct i2c_client *client;
+ /*
+ * Capacitive channel digital filter setup;
+ * conversion time/update rate setup per channel
+ */
+ u8 config;
+ u8 cap_setup;
+ u8 vt_setup;
+ u8 capdac[2][2];
+ s8 capdac_set;
+};
+
+enum ad7746_chan {
+ VIN,
+ VIN_VDD,
+ TEMP_INT,
+ TEMP_EXT,
+ CIN1,
+ CIN1_DIFF,
+ CIN2,
+ CIN2_DIFF,
+};
+
+static const struct iio_chan_spec ad7746_channels[] = {
+ [VIN] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = AD7746_REG_VT_DATA_HIGH << 8 |
+ AD7746_VTSETUP_VTMD_EXT_VIN,
+ },
+ [VIN_VDD] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .extend_name = "supply",
+ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = AD7746_REG_VT_DATA_HIGH << 8 |
+ AD7746_VTSETUP_VTMD_VDD_MON,
+ },
+ [TEMP_INT] = {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .processed_val = IIO_PROCESSED,
+ .address = AD7746_REG_VT_DATA_HIGH << 8 |
+ AD7746_VTSETUP_VTMD_INT_TEMP,
+ },
+ [TEMP_EXT] = {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 1,
+ .processed_val = IIO_PROCESSED,
+ .address = AD7746_REG_VT_DATA_HIGH << 8 |
+ AD7746_VTSETUP_VTMD_EXT_TEMP,
+ },
+ [CIN1] = {
+ .type = IIO_CAPACITANCE,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
+ (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED) |
+ (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = AD7746_REG_CAP_DATA_HIGH << 8,
+ },
+ [CIN1_DIFF] = {
+ .type = IIO_CAPACITANCE,
+ .differential = 1,
+ .indexed = 1,
+ .channel = 0,
+ .channel2 = 2,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
+ (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED) |
+ (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = AD7746_REG_CAP_DATA_HIGH << 8 |
+ AD7746_CAPSETUP_CAPDIFF
+ },
+ [CIN2] = {
+ .type = IIO_CAPACITANCE,
+ .indexed = 1,
+ .channel = 1,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
+ (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED) |
+ (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = AD7746_REG_CAP_DATA_HIGH << 8 |
+ AD7746_CAPSETUP_CIN2,
+ },
+ [CIN2_DIFF] = {
+ .type = IIO_CAPACITANCE,
+ .differential = 1,
+ .indexed = 1,
+ .channel = 1,
+ .channel2 = 3,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
+ (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED) |
+ (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .address = AD7746_REG_CAP_DATA_HIGH << 8 |
+ AD7746_CAPSETUP_CAPDIFF | AD7746_CAPSETUP_CIN2,
+ }
+};
+
+/* Values are Update Rate (Hz), Conversion Time (ms) + 1*/
+static const unsigned char ad7746_vt_filter_rate_table[][2] = {
+ {50, 20 + 1}, {31, 32 + 1}, {16, 62 + 1}, {8, 122 + 1},
+};
+
+static const unsigned char ad7746_cap_filter_rate_table[][2] = {
+ {91, 11 + 1}, {84, 12 + 1}, {50, 20 + 1}, {26, 38 + 1},
+ {16, 62 + 1}, {13, 77 + 1}, {11, 92 + 1}, {9, 110 + 1},
+};
+
+static int ad7746_select_channel(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan)
+{
+ struct ad7746_chip_info *chip = iio_priv(indio_dev);
+ int ret, delay;
+ u8 vt_setup, cap_setup;
+
+ switch (chan->type) {
+ case IIO_CAPACITANCE:
+ cap_setup = (chan->address & 0xFF) | AD7746_CAPSETUP_CAPEN;
+ vt_setup = chip->vt_setup & ~AD7746_VTSETUP_VTEN;
+ delay = ad7746_cap_filter_rate_table[(chip->config >> 3) &
+ 0x7][1];
+
+ if (chip->capdac_set != chan->channel) {
+ ret = i2c_smbus_write_byte_data(chip->client,
+ AD7746_REG_CAPDACA,
+ chip->capdac[chan->channel][0]);
+ if (ret < 0)
+ return ret;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ AD7746_REG_CAPDACB,
+ chip->capdac[chan->channel][1]);
+ if (ret < 0)
+ return ret;
+
+ chip->capdac_set = chan->channel;
+ }
+ break;
+ case IIO_VOLTAGE:
+ case IIO_TEMP:
+ vt_setup = (chan->address & 0xFF) | AD7746_VTSETUP_VTEN;
+ cap_setup = chip->cap_setup & ~AD7746_CAPSETUP_CAPEN;
+ delay = ad7746_cap_filter_rate_table[(chip->config >> 6) &
+ 0x3][1];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (chip->cap_setup != cap_setup) {
+ ret = i2c_smbus_write_byte_data(chip->client,
+ AD7746_REG_CAP_SETUP,
+ cap_setup);
+ if (ret < 0)
+ return ret;
+
+ chip->cap_setup = cap_setup;
+ }
+
+ if (chip->vt_setup != vt_setup) {
+ ret = i2c_smbus_write_byte_data(chip->client,
+ AD7746_REG_VT_SETUP,
+ vt_setup);
+ if (ret < 0)
+ return ret;
+
+ chip->vt_setup = vt_setup;
+ }
+
+ return delay;
+}
+
+static inline ssize_t ad7746_start_calib(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len,
+ u8 regval)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7746_chip_info *chip = iio_priv(indio_dev);
+ bool doit;
+ int ret, timeout = 10;
+
+ ret = strtobool(buf, &doit);
+ if (ret < 0)
+ return ret;
+
+ if (!doit)
+ return 0;
+
+ mutex_lock(&indio_dev->mlock);
+ regval |= chip->config;
+ ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CFG, regval);
+ if (ret < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+
+ do {
+ msleep(20);
+ ret = i2c_smbus_read_byte_data(chip->client, AD7746_REG_CFG);
+ if (ret < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+ } while ((ret == regval) && timeout--);
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return len;
+}
+
+static ssize_t ad7746_start_offset_calib(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ int ret = ad7746_select_channel(indio_dev,
+ &ad7746_channels[to_iio_dev_attr(attr)->address]);
+ if (ret < 0)
+ return ret;
+
+ return ad7746_start_calib(dev, attr, buf, len,
+ AD7746_CONF_MODE_OFFS_CAL);
+}
+
+static ssize_t ad7746_start_gain_calib(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ int ret = ad7746_select_channel(indio_dev,
+ &ad7746_channels[to_iio_dev_attr(attr)->address]);
+ if (ret < 0)
+ return ret;
+
+ return ad7746_start_calib(dev, attr, buf, len,
+ AD7746_CONF_MODE_GAIN_CAL);
+}
+
+static IIO_DEVICE_ATTR(in_capacitance0_calibbias_calibration,
+ S_IWUSR, NULL, ad7746_start_offset_calib, CIN1);
+static IIO_DEVICE_ATTR(in_capacitance1_calibbias_calibration,
+ S_IWUSR, NULL, ad7746_start_offset_calib, CIN2);
+static IIO_DEVICE_ATTR(in_capacitance0_calibscale_calibration,
+ S_IWUSR, NULL, ad7746_start_gain_calib, CIN1);
+static IIO_DEVICE_ATTR(in_capacitance1_calibscale_calibration,
+ S_IWUSR, NULL, ad7746_start_gain_calib, CIN2);
+static IIO_DEVICE_ATTR(in_voltage0_calibscale_calibration,
+ S_IWUSR, NULL, ad7746_start_gain_calib, VIN);
+
+static ssize_t ad7746_show_cap_filter_rate_setup(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7746_chip_info *chip = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", ad7746_cap_filter_rate_table[
+ (chip->config >> 3) & 0x7][0]);
+}
+
+static ssize_t ad7746_store_cap_filter_rate_setup(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7746_chip_info *chip = iio_priv(indio_dev);
+ u8 data;
+ int ret, i;
+
+ ret = kstrtou8(buf, 10, &data);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(ad7746_cap_filter_rate_table); i++)
+ if (data >= ad7746_cap_filter_rate_table[i][0])
+ break;
+
+ if (i >= ARRAY_SIZE(ad7746_cap_filter_rate_table))
+ i = ARRAY_SIZE(ad7746_cap_filter_rate_table) - 1;
+
+ mutex_lock(&indio_dev->mlock);
+ chip->config &= ~AD7746_CONF_CAPFS(0x7);
+ chip->config |= AD7746_CONF_CAPFS(i);
+ mutex_unlock(&indio_dev->mlock);
+
+ return len;
+}
+
+static ssize_t ad7746_show_vt_filter_rate_setup(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7746_chip_info *chip = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", ad7746_vt_filter_rate_table[
+ (chip->config >> 6) & 0x3][0]);
+}
+
+static ssize_t ad7746_store_vt_filter_rate_setup(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7746_chip_info *chip = iio_priv(indio_dev);
+ u8 data;
+ int ret, i;
+
+ ret = kstrtou8(buf, 10, &data);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(ad7746_vt_filter_rate_table); i++)
+ if (data >= ad7746_vt_filter_rate_table[i][0])
+ break;
+
+ if (i >= ARRAY_SIZE(ad7746_vt_filter_rate_table))
+ i = ARRAY_SIZE(ad7746_vt_filter_rate_table) - 1;
+
+ mutex_lock(&indio_dev->mlock);
+ chip->config &= ~AD7746_CONF_VTFS(0x3);
+ chip->config |= AD7746_CONF_VTFS(i);
+ mutex_unlock(&indio_dev->mlock);
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(in_capacitance_sampling_frequency,
+ S_IRUGO | S_IWUSR, ad7746_show_cap_filter_rate_setup,
+ ad7746_store_cap_filter_rate_setup, 0);
+
+static IIO_DEVICE_ATTR(in_voltage_sampling_frequency,
+ S_IRUGO | S_IWUSR, ad7746_show_vt_filter_rate_setup,
+ ad7746_store_vt_filter_rate_setup, 0);
+
+static IIO_CONST_ATTR(in_voltage_sampling_frequency_available, "50 31 16 8");
+static IIO_CONST_ATTR(in_capacitance_sampling_frequency_available,
+ "91 84 50 26 16 13 11 9");
+
+static struct attribute *ad7746_attributes[] = {
+ &iio_dev_attr_in_capacitance_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_in_voltage_sampling_frequency.dev_attr.attr,
+ &iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr,
+ &iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr,
+ &iio_dev_attr_in_capacitance1_calibscale_calibration.dev_attr.attr,
+ &iio_dev_attr_in_capacitance1_calibbias_calibration.dev_attr.attr,
+ &iio_dev_attr_in_voltage0_calibscale_calibration.dev_attr.attr,
+ &iio_const_attr_in_voltage_sampling_frequency_available.dev_attr.attr,
+ &iio_const_attr_in_capacitance_sampling_frequency_available.
+ dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad7746_attribute_group = {
+ .attrs = ad7746_attributes,
+};
+
+static int ad7746_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad7746_chip_info *chip = iio_priv(indio_dev);
+ int ret, reg;
+
+ mutex_lock(&indio_dev->mlock);
+
+ switch (mask) {
+ case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ if (val != 1) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ val = (val2 * 1024) / 15625;
+
+ switch (chan->type) {
+ case IIO_CAPACITANCE:
+ reg = AD7746_REG_CAP_GAINH;
+ break;
+ case IIO_VOLTAGE:
+ reg = AD7746_REG_VOLT_GAINH;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = i2c_smbus_write_word_data(chip->client, reg, swab16(val));
+ if (ret < 0)
+ goto out;
+
+ ret = 0;
+ break;
+ case (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED):
+ if ((val < 0) | (val > 0xFFFF)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = i2c_smbus_write_word_data(chip->client,
+ AD7746_REG_CAP_OFFH, swab16(val));
+ if (ret < 0)
+ goto out;
+
+ ret = 0;
+ break;
+ case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ if ((val < 0) | (val > 43008000)) { /* 21pF */
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* CAPDAC Scale = 21pF_typ / 127
+ * CIN Scale = 8.192pF / 2^24
+ * Offset Scale = CAPDAC Scale / CIN Scale = 338646
+ * */
+
+ val /= 338646;
+
+ chip->capdac[chan->channel][chan->differential] = (val > 0 ?
+ AD7746_CAPDAC_DACP(val) | AD7746_CAPDAC_DACEN : 0);
+
+ ret = i2c_smbus_write_byte_data(chip->client,
+ AD7746_REG_CAPDACA,
+ chip->capdac[chan->channel][0]);
+ if (ret < 0)
+ goto out;
+ ret = i2c_smbus_write_byte_data(chip->client,
+ AD7746_REG_CAPDACB,
+ chip->capdac[chan->channel][1]);
+ if (ret < 0)
+ goto out;
+
+ chip->capdac_set = chan->channel;
+
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static int ad7746_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct ad7746_chip_info *chip = iio_priv(indio_dev);
+ int ret, delay;
+ u8 regval, reg;
+
+ union {
+ u32 d32;
+ u8 d8[4];
+ } data;
+
+ mutex_lock(&indio_dev->mlock);
+
+ switch (mask) {
+ case 0:
+ ret = ad7746_select_channel(indio_dev, chan);
+ if (ret < 0)
+ goto out;
+ delay = ret;
+
+ regval = chip->config | AD7746_CONF_MODE_SINGLE_CONV;
+ ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CFG,
+ regval);
+ if (ret < 0)
+ goto out;
+
+ msleep(delay);
+ /* Now read the actual register */
+
+ ret = i2c_smbus_read_i2c_block_data(chip->client,
+ chan->address >> 8, 3, &data.d8[1]);
+
+ if (ret < 0)
+ goto out;
+
+ *val = (be32_to_cpu(data.d32) & 0xFFFFFF) - 0x800000;
+
+ switch (chan->type) {
+ case IIO_TEMP:
+ /* temperature in milli degrees Celsius
+ * T = ((*val / 2048) - 4096) * 1000
+ */
+ *val = (*val * 125) / 256;
+ break;
+ case IIO_VOLTAGE:
+ if (chan->channel == 1) /* supply_raw*/
+ *val = *val * 6;
+ break;
+ default:
+ break;
+ }
+
+ ret = IIO_VAL_INT;
+ break;
+ case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ switch (chan->type) {
+ case IIO_CAPACITANCE:
+ reg = AD7746_REG_CAP_GAINH;
+ break;
+ case IIO_VOLTAGE:
+ reg = AD7746_REG_VOLT_GAINH;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = i2c_smbus_read_word_data(chip->client, reg);
+ if (ret < 0)
+ goto out;
+ /* 1 + gain_val / 2^16 */
+ *val = 1;
+ *val2 = (15625 * swab16(ret)) / 1024;
+
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED):
+ ret = i2c_smbus_read_word_data(chip->client,
+ AD7746_REG_CAP_OFFH);
+ if (ret < 0)
+ goto out;
+ *val = swab16(ret);
+
+ ret = IIO_VAL_INT;
+ break;
+ case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ *val = AD7746_CAPDAC_DACP(chip->capdac[chan->channel]
+ [chan->differential]) * 338646;
+
+ ret = IIO_VAL_INT;
+ break;
+ case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ switch (chan->type) {
+ case IIO_CAPACITANCE:
+ /* 8.192pf / 2^24 */
+ *val2 = 488;
+ *val = 0;
+ break;
+ case IIO_VOLTAGE:
+ /* 1170mV / 2^23 */
+ *val2 = 139475;
+ *val = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = IIO_VAL_INT_PLUS_NANO;
+ break;
+ default:
+ ret = -EINVAL;
+ };
+out:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static const struct iio_info ad7746_info = {
+ .attrs = &ad7746_attribute_group,
+ .read_raw = &ad7746_read_raw,
+ .write_raw = &ad7746_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+/*
+ * device probe and remove
+ */
+
+static int __devinit ad7746_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ad7746_platform_data *pdata = client->dev.platform_data;
+ struct ad7746_chip_info *chip;
+ struct iio_dev *indio_dev;
+ int ret = 0;
+ unsigned char regval = 0;
+
+ indio_dev = iio_allocate_device(sizeof(*chip));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ chip = iio_priv(indio_dev);
+ /* this is only used for device removal purposes */
+ i2c_set_clientdata(client, indio_dev);
+
+ chip->client = client;
+ chip->capdac_set = -1;
+
+ /* Establish that the iio_dev is a child of the i2c device */
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &ad7746_info;
+ indio_dev->channels = ad7746_channels;
+ if (id->driver_data == 7746)
+ indio_dev->num_channels = ARRAY_SIZE(ad7746_channels);
+ else
+ indio_dev->num_channels = ARRAY_SIZE(ad7746_channels) - 2;
+ indio_dev->num_channels = ARRAY_SIZE(ad7746_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ if (pdata) {
+ if (pdata->exca_en) {
+ if (pdata->exca_inv_en)
+ regval |= AD7746_EXCSETUP_NEXCA;
+ else
+ regval |= AD7746_EXCSETUP_EXCA;
+ }
+
+ if (pdata->excb_en) {
+ if (pdata->excb_inv_en)
+ regval |= AD7746_EXCSETUP_NEXCB;
+ else
+ regval |= AD7746_EXCSETUP_EXCB;
+ }
+
+ regval |= AD7746_EXCSETUP_EXCLVL(pdata->exclvl);
+ } else {
+ dev_warn(&client->dev, "No platform data? using default\n");
+ regval = AD7746_EXCSETUP_EXCA | AD7746_EXCSETUP_EXCB |
+ AD7746_EXCSETUP_EXCLVL(3);
+ }
+
+ ret = i2c_smbus_write_byte_data(chip->client,
+ AD7746_REG_EXC_SETUP, regval);
+ if (ret < 0)
+ goto error_free_dev;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_free_dev;
+
+ dev_info(&client->dev, "%s capacitive sensor registered\n", id->name);
+
+ return 0;
+
+error_free_dev:
+ iio_free_device(indio_dev);
+error_ret:
+ return ret;
+}
+
+static int __devexit ad7746_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id ad7746_id[] = {
+ { "ad7745", 7745 },
+ { "ad7746", 7746 },
+ { "ad7747", 7747 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ad7746_id);
+
+static struct i2c_driver ad7746_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+ .probe = ad7746_probe,
+ .remove = __devexit_p(ad7746_remove),
+ .id_table = ad7746_id,
+};
+
+static __init int ad7746_init(void)
+{
+ return i2c_add_driver(&ad7746_driver);
+}
+
+static __exit void ad7746_exit(void)
+{
+ i2c_del_driver(&ad7746_driver);
+}
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD7746/5/7 capacitive sensor driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(ad7746_init);
+module_exit(ad7746_exit);
+ #include <linux/export.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
+ #include <linux/export.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16260.h"
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/bitops.h>
+ #include <linux/export.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../ring_sw.h"
-#include "../accel/accel.h"
-#include "../trigger.h"
+#include "../trigger_consumer.h"
#include "adis16400.h"
/**
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
+ #include <linux/export.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16400.h"
--- /dev/null
+/* The industrial I/O core
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Handling of buffer allocation / resizing.
+ *
+ *
+ * Things to look at here.
+ * - Better memory allocation techniques?
+ * - Alternative access techniques?
+ */
+#include <linux/kernel.h>
++#include <linux/export.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+
+#include "iio.h"
+#include "iio_core.h"
+#include "sysfs.h"
+#include "buffer_generic.h"
+
+static const char * const iio_endian_prefix[] = {
+ [IIO_BE] = "be",
+ [IIO_LE] = "le",
+};
+
+/**
+ * iio_buffer_read_first_n_outer() - chrdev read for buffer access
+ *
+ * This function relies on all buffer implementations having an
+ * iio_buffer as their first element.
+ **/
+ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
+ size_t n, loff_t *f_ps)
+{
+ struct iio_dev *indio_dev = filp->private_data;
+ struct iio_buffer *rb = indio_dev->buffer;
+
+ if (!rb->access->read_first_n)
+ return -EINVAL;
+ return rb->access->read_first_n(rb, n, buf);
+}
+
+/**
+ * iio_buffer_poll() - poll the buffer to find out if it has data
+ */
+unsigned int iio_buffer_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct iio_dev *indio_dev = filp->private_data;
+ struct iio_buffer *rb = indio_dev->buffer;
+
+ poll_wait(filp, &rb->pollq, wait);
+ if (rb->stufftoread)
+ return POLLIN | POLLRDNORM;
+ /* need a way of knowing if there may be enough data... */
+ return 0;
+}
+
+int iio_chrdev_buffer_open(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *rb = indio_dev->buffer;
+ if (!rb)
+ return -EINVAL;
+ if (rb->access->mark_in_use)
+ rb->access->mark_in_use(rb);
+ return 0;
+}
+
+void iio_chrdev_buffer_release(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *rb = indio_dev->buffer;
+
+ clear_bit(IIO_BUSY_BIT_POS, &rb->flags);
+ if (rb->access->unmark_in_use)
+ rb->access->unmark_in_use(rb);
+}
+
+void iio_buffer_init(struct iio_buffer *buffer, struct iio_dev *dev_info)
+{
+ buffer->indio_dev = dev_info;
+ init_waitqueue_head(&buffer->pollq);
+}
+EXPORT_SYMBOL(iio_buffer_init);
+
+static ssize_t iio_show_scan_index(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
+}
+
+static ssize_t iio_show_fixed_type(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ u8 type = this_attr->c->scan_type.endianness;
+
+ if (type == IIO_CPU) {
+ if (__LITTLE_ENDIAN)
+ type = IIO_LE;
+ else
+ type = IIO_BE;
+ }
+ return sprintf(buf, "%s:%c%d/%d>>%u\n",
+ iio_endian_prefix[type],
+ this_attr->c->scan_type.sign,
+ this_attr->c->scan_type.realbits,
+ this_attr->c->scan_type.storagebits,
+ this_attr->c->scan_type.shift);
+}
+
+static ssize_t iio_scan_el_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+
+ ret = iio_scan_mask_query(dev_info->buffer,
+ to_iio_dev_attr(attr)->address);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", ret);
+}
+
+static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
+{
+ clear_bit(bit, buffer->scan_mask);
+ buffer->scan_count--;
+ return 0;
+}
+
+static ssize_t iio_scan_el_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret = 0;
+ bool state;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ state = !(buf[0] == '0');
+ mutex_lock(&indio_dev->mlock);
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ ret = -EBUSY;
+ goto error_ret;
+ }
+ ret = iio_scan_mask_query(buffer, this_attr->address);
+ if (ret < 0)
+ goto error_ret;
+ if (!state && ret) {
+ ret = iio_scan_mask_clear(buffer, this_attr->address);
+ if (ret)
+ goto error_ret;
+ } else if (state && !ret) {
+ ret = iio_scan_mask_set(buffer, this_attr->address);
+ if (ret)
+ goto error_ret;
+ }
+
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+
+}
+
+static ssize_t iio_scan_el_ts_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", dev_info->buffer->scan_timestamp);
+}
+
+static ssize_t iio_scan_el_ts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret = 0;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ bool state;
+
+ state = !(buf[0] == '0');
+ mutex_lock(&indio_dev->mlock);
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ ret = -EBUSY;
+ goto error_ret;
+ }
+ indio_dev->buffer->scan_timestamp = state;
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ int ret, attrcount = 0;
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ ret = __iio_add_chan_devattr("index",
+ chan,
+ &iio_show_scan_index,
+ NULL,
+ 0,
+ 0,
+ &indio_dev->dev,
+ &buffer->scan_el_dev_attr_list);
+ if (ret)
+ goto error_ret;
+ attrcount++;
+ ret = __iio_add_chan_devattr("type",
+ chan,
+ &iio_show_fixed_type,
+ NULL,
+ 0,
+ 0,
+ &indio_dev->dev,
+ &buffer->scan_el_dev_attr_list);
+ if (ret)
+ goto error_ret;
+ attrcount++;
+ if (chan->type != IIO_TIMESTAMP)
+ ret = __iio_add_chan_devattr("en",
+ chan,
+ &iio_scan_el_show,
+ &iio_scan_el_store,
+ chan->scan_index,
+ 0,
+ &indio_dev->dev,
+ &buffer->scan_el_dev_attr_list);
+ else
+ ret = __iio_add_chan_devattr("en",
+ chan,
+ &iio_scan_el_ts_show,
+ &iio_scan_el_ts_store,
+ chan->scan_index,
+ 0,
+ &indio_dev->dev,
+ &buffer->scan_el_dev_attr_list);
+ attrcount++;
+ ret = attrcount;
+error_ret:
+ return ret;
+}
+
+static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
+ struct iio_dev_attr *p)
+{
+ kfree(p->dev_attr.attr.name);
+ kfree(p);
+}
+
+static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
+{
+ struct iio_dev_attr *p, *n;
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ list_for_each_entry_safe(p, n,
+ &buffer->scan_el_dev_attr_list, l)
+ iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
+}
+
+static const char * const iio_scan_elements_group_name = "scan_elements";
+
+int iio_buffer_register(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *channels,
+ int num_channels)
+{
+ struct iio_dev_attr *p;
+ struct attribute **attr;
+ struct iio_buffer *buffer = indio_dev->buffer;
+ int ret, i, attrn, attrcount, attrcount_orig = 0;
+
+ if (buffer->attrs)
+ indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
+
+ if (buffer->scan_el_attrs != NULL) {
+ attr = buffer->scan_el_attrs->attrs;
+ while (*attr++ != NULL)
+ attrcount_orig++;
+ }
+ attrcount = attrcount_orig;
+ INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
+ if (channels) {
+ /* new magic */
+ for (i = 0; i < num_channels; i++) {
+ /* Establish necessary mask length */
+ if (channels[i].scan_index >
+ (int)indio_dev->masklength - 1)
+ indio_dev->masklength
+ = indio_dev->channels[i].scan_index + 1;
+
+ ret = iio_buffer_add_channel_sysfs(indio_dev,
+ &channels[i]);
+ if (ret < 0)
+ goto error_cleanup_dynamic;
+ attrcount += ret;
+ }
+ if (indio_dev->masklength && buffer->scan_mask == NULL) {
+ buffer->scan_mask
+ = kzalloc(sizeof(*buffer->scan_mask)*
+ BITS_TO_LONGS(indio_dev->masklength),
+ GFP_KERNEL);
+ if (buffer->scan_mask == NULL) {
+ ret = -ENOMEM;
+ goto error_cleanup_dynamic;
+ }
+ }
+ }
+
+ buffer->scan_el_group.name = iio_scan_elements_group_name;
+
+ buffer->scan_el_group.attrs
+ = kzalloc(sizeof(buffer->scan_el_group.attrs[0])*
+ (attrcount + 1),
+ GFP_KERNEL);
+ if (buffer->scan_el_group.attrs == NULL) {
+ ret = -ENOMEM;
+ goto error_free_scan_mask;
+ }
+ if (buffer->scan_el_attrs)
+ memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
+ sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
+ attrn = attrcount_orig;
+
+ list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
+ buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
+ indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
+
+ return 0;
+
+error_free_scan_mask:
+ kfree(buffer->scan_mask);
+error_cleanup_dynamic:
+ __iio_buffer_attr_cleanup(indio_dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(iio_buffer_register);
+
+void iio_buffer_unregister(struct iio_dev *indio_dev)
+{
+ kfree(indio_dev->buffer->scan_mask);
+ kfree(indio_dev->buffer->scan_el_group.attrs);
+ __iio_buffer_attr_cleanup(indio_dev);
+}
+EXPORT_SYMBOL(iio_buffer_unregister);
+
+ssize_t iio_buffer_read_length(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ if (buffer->access->get_length)
+ return sprintf(buf, "%d\n",
+ buffer->access->get_length(buffer));
+
+ return 0;
+}
+EXPORT_SYMBOL(iio_buffer_read_length);
+
+ssize_t iio_buffer_write_length(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret;
+ ulong val;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (buffer->access->get_length)
+ if (val == buffer->access->get_length(buffer))
+ return len;
+
+ if (buffer->access->set_length) {
+ buffer->access->set_length(buffer, val);
+ if (buffer->access->mark_param_change)
+ buffer->access->mark_param_change(buffer);
+ }
+
+ return len;
+}
+EXPORT_SYMBOL(iio_buffer_write_length);
+
+ssize_t iio_buffer_read_bytes_per_datum(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ if (buffer->access->get_bytes_per_datum)
+ return sprintf(buf, "%d\n",
+ buffer->access->get_bytes_per_datum(buffer));
+
+ return 0;
+}
+EXPORT_SYMBOL(iio_buffer_read_bytes_per_datum);
+
+ssize_t iio_buffer_store_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret;
+ bool requested_state, current_state;
+ int previous_mode;
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ struct iio_buffer *buffer = dev_info->buffer;
+
+ mutex_lock(&dev_info->mlock);
+ previous_mode = dev_info->currentmode;
+ requested_state = !(buf[0] == '0');
+ current_state = !!(previous_mode & INDIO_ALL_BUFFER_MODES);
+ if (current_state == requested_state) {
+ printk(KERN_INFO "iio-buffer, current state requested again\n");
+ goto done;
+ }
+ if (requested_state) {
+ if (buffer->setup_ops->preenable) {
+ ret = buffer->setup_ops->preenable(dev_info);
+ if (ret) {
+ printk(KERN_ERR
+ "Buffer not started:"
+ "buffer preenable failed\n");
+ goto error_ret;
+ }
+ }
+ if (buffer->access->request_update) {
+ ret = buffer->access->request_update(buffer);
+ if (ret) {
+ printk(KERN_INFO
+ "Buffer not started:"
+ "buffer parameter update failed\n");
+ goto error_ret;
+ }
+ }
+ if (buffer->access->mark_in_use)
+ buffer->access->mark_in_use(buffer);
+ /* Definitely possible for devices to support both of these.*/
+ if (dev_info->modes & INDIO_BUFFER_TRIGGERED) {
+ if (!dev_info->trig) {
+ printk(KERN_INFO
+ "Buffer not started: no trigger\n");
+ ret = -EINVAL;
+ if (buffer->access->unmark_in_use)
+ buffer->access->unmark_in_use(buffer);
+ goto error_ret;
+ }
+ dev_info->currentmode = INDIO_BUFFER_TRIGGERED;
+ } else if (dev_info->modes & INDIO_BUFFER_HARDWARE)
+ dev_info->currentmode = INDIO_BUFFER_HARDWARE;
+ else { /* should never be reached */
+ ret = -EINVAL;
+ goto error_ret;
+ }
+
+ if (buffer->setup_ops->postenable) {
+ ret = buffer->setup_ops->postenable(dev_info);
+ if (ret) {
+ printk(KERN_INFO
+ "Buffer not started:"
+ "postenable failed\n");
+ if (buffer->access->unmark_in_use)
+ buffer->access->unmark_in_use(buffer);
+ dev_info->currentmode = previous_mode;
+ if (buffer->setup_ops->postdisable)
+ buffer->setup_ops->
+ postdisable(dev_info);
+ goto error_ret;
+ }
+ }
+ } else {
+ if (buffer->setup_ops->predisable) {
+ ret = buffer->setup_ops->predisable(dev_info);
+ if (ret)
+ goto error_ret;
+ }
+ if (buffer->access->unmark_in_use)
+ buffer->access->unmark_in_use(buffer);
+ dev_info->currentmode = INDIO_DIRECT_MODE;
+ if (buffer->setup_ops->postdisable) {
+ ret = buffer->setup_ops->postdisable(dev_info);
+ if (ret)
+ goto error_ret;
+ }
+ }
+done:
+ mutex_unlock(&dev_info->mlock);
+ return len;
+
+error_ret:
+ mutex_unlock(&dev_info->mlock);
+ return ret;
+}
+EXPORT_SYMBOL(iio_buffer_store_enable);
+
+ssize_t iio_buffer_show_enable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *dev_info = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", !!(dev_info->currentmode
+ & INDIO_ALL_BUFFER_MODES));
+}
+EXPORT_SYMBOL(iio_buffer_show_enable);
+
+int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *buffer = indio_dev->buffer;
+ size_t size;
+ dev_dbg(&indio_dev->dev, "%s\n", __func__);
+ /* Check if there are any scan elements enabled, if not fail*/
+ if (!(buffer->scan_count || buffer->scan_timestamp))
+ return -EINVAL;
+ if (buffer->scan_timestamp)
+ if (buffer->scan_count)
+ /* Timestamp (aligned to s64) and data */
+ size = (((buffer->scan_count * buffer->bpe)
+ + sizeof(s64) - 1)
+ & ~(sizeof(s64) - 1))
+ + sizeof(s64);
+ else /* Timestamp only */
+ size = sizeof(s64);
+ else /* Data only */
+ size = buffer->scan_count * buffer->bpe;
+ buffer->access->set_bytes_per_datum(buffer, size);
+
+ return 0;
+}
+EXPORT_SYMBOL(iio_sw_buffer_preenable);
+
+
+/* note NULL used as error indicator as it doesn't make sense. */
+static unsigned long *iio_scan_mask_match(unsigned long *av_masks,
+ unsigned int masklength,
+ unsigned long *mask)
+{
+ if (bitmap_empty(mask, masklength))
+ return NULL;
+ while (*av_masks) {
+ if (bitmap_subset(mask, av_masks, masklength))
+ return av_masks;
+ av_masks += BITS_TO_LONGS(masklength);
+ }
+ return NULL;
+}
+
+/**
+ * iio_scan_mask_set() - set particular bit in the scan mask
+ * @buffer: the buffer whose scan mask we are interested in
+ * @bit: the bit to be set.
+ **/
+int iio_scan_mask_set(struct iio_buffer *buffer, int bit)
+{
+ struct iio_dev *dev_info = buffer->indio_dev;
+ unsigned long *mask;
+ unsigned long *trialmask;
+
+ trialmask = kmalloc(sizeof(*trialmask)*
+ BITS_TO_LONGS(dev_info->masklength),
+ GFP_KERNEL);
+
+ if (trialmask == NULL)
+ return -ENOMEM;
+ if (!dev_info->masklength) {
+ WARN_ON("trying to set scanmask prior to registering buffer\n");
+ kfree(trialmask);
+ return -EINVAL;
+ }
+ bitmap_copy(trialmask, buffer->scan_mask, dev_info->masklength);
+ set_bit(bit, trialmask);
+
+ if (dev_info->available_scan_masks) {
+ mask = iio_scan_mask_match(dev_info->available_scan_masks,
+ dev_info->masklength,
+ trialmask);
+ if (!mask) {
+ kfree(trialmask);
+ return -EINVAL;
+ }
+ }
+ bitmap_copy(buffer->scan_mask, trialmask, dev_info->masklength);
+ buffer->scan_count++;
+
+ kfree(trialmask);
+
+ return 0;
+};
+EXPORT_SYMBOL_GPL(iio_scan_mask_set);
+
+int iio_scan_mask_query(struct iio_buffer *buffer, int bit)
+{
+ struct iio_dev *dev_info = buffer->indio_dev;
+ long *mask;
+
+ if (bit > dev_info->masklength)
+ return -EINVAL;
+
+ if (!buffer->scan_mask)
+ return 0;
+ if (dev_info->available_scan_masks)
+ mask = iio_scan_mask_match(dev_info->available_scan_masks,
+ dev_info->masklength,
+ buffer->scan_mask);
+ else
+ mask = buffer->scan_mask;
+ if (!mask)
+ return 0;
+
+ return test_bit(bit, mask);
+};
+EXPORT_SYMBOL_GPL(iio_scan_mask_query);
*
* Licensed under the GPL-2.
*/
+ #include <linux/export.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
*/
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
+ #include <linux/export.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "ade7758.h"
#define _HAL_INIT_C_
+#include <linux/usb.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/firmware.h>
++#include <linux/module.h>
+
#include "osdep_service.h"
#include "drv_types.h"
#include "rtl871x_byteorder.h"
#define _HCI_INTF_C_
+#include <linux/usb.h>
++#include <linux/module.h>
+
#include "osdep_service.h"
#include "drv_types.h"
#include "recv_osdep.h"
--- /dev/null
+/* Driver for Realtek RTS51xx USB card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
++#include <linux/export.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_device.h>
+
+#include "debug.h"
+#include "rts51x.h"
+#include "rts51x_chip.h"
+#include "rts51x_scsi.h"
+#include "rts51x_card.h"
+#include "rts51x_transport.h"
+#include "rts51x_sys.h"
+#include "sd_cprm.h"
+#include "ms_mg.h"
+#include "trace.h"
+
+void scsi_show_command(struct scsi_cmnd *srb)
+{
+ char *what = NULL;
+ int i, unknown_cmd = 0;
+
+ switch (srb->cmnd[0]) {
+ case TEST_UNIT_READY:
+ what = (char *)"TEST_UNIT_READY";
+ break;
+ case REZERO_UNIT:
+ what = (char *)"REZERO_UNIT";
+ break;
+ case REQUEST_SENSE:
+ what = (char *)"REQUEST_SENSE";
+ break;
+ case FORMAT_UNIT:
+ what = (char *)"FORMAT_UNIT";
+ break;
+ case READ_BLOCK_LIMITS:
+ what = (char *)"READ_BLOCK_LIMITS";
+ break;
+ case 0x07:
+ what = (char *)"REASSIGN_BLOCKS";
+ break;
+ case READ_6:
+ what = (char *)"READ_6";
+ break;
+ case WRITE_6:
+ what = (char *)"WRITE_6";
+ break;
+ case SEEK_6:
+ what = (char *)"SEEK_6";
+ break;
+ case READ_REVERSE:
+ what = (char *)"READ_REVERSE";
+ break;
+ case WRITE_FILEMARKS:
+ what = (char *)"WRITE_FILEMARKS";
+ break;
+ case SPACE:
+ what = (char *)"SPACE";
+ break;
+ case INQUIRY:
+ what = (char *)"INQUIRY";
+ break;
+ case RECOVER_BUFFERED_DATA:
+ what = (char *)"RECOVER_BUFFERED_DATA";
+ break;
+ case MODE_SELECT:
+ what = (char *)"MODE_SELECT";
+ break;
+ case RESERVE:
+ what = (char *)"RESERVE";
+ break;
+ case RELEASE:
+ what = (char *)"RELEASE";
+ break;
+ case COPY:
+ what = (char *)"COPY";
+ break;
+ case ERASE:
+ what = (char *)"ERASE";
+ break;
+ case MODE_SENSE:
+ what = (char *)"MODE_SENSE";
+ break;
+ case START_STOP:
+ what = (char *)"START_STOP";
+ break;
+ case RECEIVE_DIAGNOSTIC:
+ what = (char *)"RECEIVE_DIAGNOSTIC";
+ break;
+ case SEND_DIAGNOSTIC:
+ what = (char *)"SEND_DIAGNOSTIC";
+ break;
+ case ALLOW_MEDIUM_REMOVAL:
+ what = (char *)"ALLOW_MEDIUM_REMOVAL";
+ break;
+ case SET_WINDOW:
+ what = (char *)"SET_WINDOW";
+ break;
+ case READ_CAPACITY:
+ what = (char *)"READ_CAPACITY";
+ break;
+ case READ_10:
+ what = (char *)"READ_10";
+ break;
+ case WRITE_10:
+ what = (char *)"WRITE_10";
+ break;
+ case SEEK_10:
+ what = (char *)"SEEK_10";
+ break;
+ case WRITE_VERIFY:
+ what = (char *)"WRITE_VERIFY";
+ break;
+ case VERIFY:
+ what = (char *)"VERIFY";
+ break;
+ case SEARCH_HIGH:
+ what = (char *)"SEARCH_HIGH";
+ break;
+ case SEARCH_EQUAL:
+ what = (char *)"SEARCH_EQUAL";
+ break;
+ case SEARCH_LOW:
+ what = (char *)"SEARCH_LOW";
+ break;
+ case SET_LIMITS:
+ what = (char *)"SET_LIMITS";
+ break;
+ case READ_POSITION:
+ what = (char *)"READ_POSITION";
+ break;
+ case SYNCHRONIZE_CACHE:
+ what = (char *)"SYNCHRONIZE_CACHE";
+ break;
+ case LOCK_UNLOCK_CACHE:
+ what = (char *)"LOCK_UNLOCK_CACHE";
+ break;
+ case READ_DEFECT_DATA:
+ what = (char *)"READ_DEFECT_DATA";
+ break;
+ case MEDIUM_SCAN:
+ what = (char *)"MEDIUM_SCAN";
+ break;
+ case COMPARE:
+ what = (char *)"COMPARE";
+ break;
+ case COPY_VERIFY:
+ what = (char *)"COPY_VERIFY";
+ break;
+ case WRITE_BUFFER:
+ what = (char *)"WRITE_BUFFER";
+ break;
+ case READ_BUFFER:
+ what = (char *)"READ_BUFFER";
+ break;
+ case UPDATE_BLOCK:
+ what = (char *)"UPDATE_BLOCK";
+ break;
+ case READ_LONG:
+ what = (char *)"READ_LONG";
+ break;
+ case WRITE_LONG:
+ what = (char *)"WRITE_LONG";
+ break;
+ case CHANGE_DEFINITION:
+ what = (char *)"CHANGE_DEFINITION";
+ break;
+ case WRITE_SAME:
+ what = (char *)"WRITE_SAME";
+ break;
+ case GPCMD_READ_SUBCHANNEL:
+ what = (char *)"READ SUBCHANNEL";
+ break;
+ case READ_TOC:
+ what = (char *)"READ_TOC";
+ break;
+ case GPCMD_READ_HEADER:
+ what = (char *)"READ HEADER";
+ break;
+ case GPCMD_PLAY_AUDIO_10:
+ what = (char *)"PLAY AUDIO (10)";
+ break;
+ case GPCMD_PLAY_AUDIO_MSF:
+ what = (char *)"PLAY AUDIO MSF";
+ break;
+ case GPCMD_GET_EVENT_STATUS_NOTIFICATION:
+ what = (char *)"GET EVENT/STATUS NOTIFICATION";
+ break;
+ case GPCMD_PAUSE_RESUME:
+ what = (char *)"PAUSE/RESUME";
+ break;
+ case LOG_SELECT:
+ what = (char *)"LOG_SELECT";
+ break;
+ case LOG_SENSE:
+ what = (char *)"LOG_SENSE";
+ break;
+ case GPCMD_STOP_PLAY_SCAN:
+ what = (char *)"STOP PLAY/SCAN";
+ break;
+ case GPCMD_READ_DISC_INFO:
+ what = (char *)"READ DISC INFORMATION";
+ break;
+ case GPCMD_READ_TRACK_RZONE_INFO:
+ what = (char *)"READ TRACK INFORMATION";
+ break;
+ case GPCMD_RESERVE_RZONE_TRACK:
+ what = (char *)"RESERVE TRACK";
+ break;
+ case GPCMD_SEND_OPC:
+ what = (char *)"SEND OPC";
+ break;
+ case MODE_SELECT_10:
+ what = (char *)"MODE_SELECT_10";
+ break;
+ case GPCMD_REPAIR_RZONE_TRACK:
+ what = (char *)"REPAIR TRACK";
+ break;
+ case 0x59:
+ what = (char *)"READ MASTER CUE";
+ break;
+ case MODE_SENSE_10:
+ what = (char *)"MODE_SENSE_10";
+ break;
+ case GPCMD_CLOSE_TRACK:
+ what = (char *)"CLOSE TRACK/SESSION";
+ break;
+ case 0x5C:
+ what = (char *)"READ BUFFER CAPACITY";
+ break;
+ case 0x5D:
+ what = (char *)"SEND CUE SHEET";
+ break;
+ case GPCMD_BLANK:
+ what = (char *)"BLANK";
+ break;
+ case REPORT_LUNS:
+ what = (char *)"REPORT LUNS";
+ break;
+ case MOVE_MEDIUM:
+ what = (char *)"MOVE_MEDIUM or PLAY AUDIO (12)";
+ break;
+ case READ_12:
+ what = (char *)"READ_12";
+ break;
+ case WRITE_12:
+ what = (char *)"WRITE_12";
+ break;
+ case WRITE_VERIFY_12:
+ what = (char *)"WRITE_VERIFY_12";
+ break;
+ case SEARCH_HIGH_12:
+ what = (char *)"SEARCH_HIGH_12";
+ break;
+ case SEARCH_EQUAL_12:
+ what = (char *)"SEARCH_EQUAL_12";
+ break;
+ case SEARCH_LOW_12:
+ what = (char *)"SEARCH_LOW_12";
+ break;
+ case SEND_VOLUME_TAG:
+ what = (char *)"SEND_VOLUME_TAG";
+ break;
+ case READ_ELEMENT_STATUS:
+ what = (char *)"READ_ELEMENT_STATUS";
+ break;
+ case GPCMD_READ_CD_MSF:
+ what = (char *)"READ CD MSF";
+ break;
+ case GPCMD_SCAN:
+ what = (char *)"SCAN";
+ break;
+ case GPCMD_SET_SPEED:
+ what = (char *)"SET CD SPEED";
+ break;
+ case GPCMD_MECHANISM_STATUS:
+ what = (char *)"MECHANISM STATUS";
+ break;
+ case GPCMD_READ_CD:
+ what = (char *)"READ CD";
+ break;
+ case 0xE1:
+ what = (char *)"WRITE CONTINUE";
+ break;
+ case WRITE_LONG_2:
+ what = (char *)"WRITE_LONG_2";
+ break;
+ case VENDOR_CMND:
+ what = (char *)"Realtek's vendor command";
+ break;
+ default:
+ what = (char *)"(unknown command)";
+ unknown_cmd = 1;
+ break;
+ }
+
+ if (srb->cmnd[0] != TEST_UNIT_READY)
+ RTS51X_DEBUGP("Command %s (%d bytes)\n", what, srb->cmd_len);
+ if (unknown_cmd) {
+ RTS51X_DEBUGP("");
+ for (i = 0; i < srb->cmd_len && i < 16; i++)
+ RTS51X_DEBUGPN(" %02x", srb->cmnd[i]);
+ RTS51X_DEBUGPN("\n");
+ }
+}
+
+void set_sense_type(struct rts51x_chip *chip, unsigned int lun, int sense_type)
+{
+ switch (sense_type) {
+ case SENSE_TYPE_MEDIA_CHANGE:
+ set_sense_data(chip, lun, CUR_ERR, 0x06, 0, 0x28, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_NOT_PRESENT:
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x3A, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_LBA_OVER_RANGE:
+ set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x21, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT:
+ set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x25, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_WRITE_PROTECT:
+ set_sense_data(chip, lun, CUR_ERR, 0x07, 0, 0x27, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR:
+ set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x11, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_WRITE_ERR:
+ set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x0C, 0x02, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_INVALID_CMD_FIELD:
+ set_sense_data(chip, lun, CUR_ERR, ILGAL_REQ, 0,
+ ASC_INVLD_CDB, ASCQ_INVLD_CDB, CDB_ILLEGAL, 1);
+ break;
+
+ case SENSE_TYPE_FORMAT_IN_PROGRESS:
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04, 0, 0);
+ break;
+
+ case SENSE_TYPE_FORMAT_CMD_FAILED:
+ set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x31, 0x01, 0, 0);
+ break;
+
+#ifdef SUPPORT_MAGIC_GATE
+ case SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB:
+ set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x6F, 0x02, 0, 0);
+ break;
+
+ case SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN:
+ set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x6F, 0x00, 0, 0);
+ break;
+
+ case SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM:
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x30, 0x00, 0, 0);
+ break;
+
+ case SENSE_TYPE_MG_WRITE_ERR:
+ set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x0C, 0x00, 0, 0);
+ break;
+#endif
+
+#ifdef SUPPORT_SD_LOCK
+ case SENSE_TYPE_MEDIA_READ_FORBIDDEN:
+ set_sense_data(chip, lun, CUR_ERR, 0x07, 0, 0x11, 0x13, 0, 0);
+ break;
+#endif
+
+ case SENSE_TYPE_NO_SENSE:
+ default:
+ set_sense_data(chip, lun, CUR_ERR, 0, 0, 0, 0, 0, 0);
+ break;
+ }
+}
+
+void set_sense_data(struct rts51x_chip *chip, unsigned int lun, u8 err_code,
+ u8 sense_key, u32 info, u8 asc, u8 ascq, u8 sns_key_info0,
+ u16 sns_key_info1)
+{
+ struct sense_data_t *sense = &(chip->sense_buffer[lun]);
+
+ sense->err_code = err_code;
+ sense->sense_key = sense_key;
+ sense->info[0] = (u8) (info >> 24);
+ sense->info[1] = (u8) (info >> 16);
+ sense->info[2] = (u8) (info >> 8);
+ sense->info[3] = (u8) info;
+
+ sense->ad_sense_len = sizeof(struct sense_data_t) - 8;
+ sense->asc = asc;
+ sense->ascq = ascq;
+ if (sns_key_info0 != 0) {
+ sense->sns_key_info[0] = SKSV | sns_key_info0;
+ sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 8;
+ sense->sns_key_info[2] = sns_key_info1 & 0x0f;
+ }
+}
+
+static int test_unit_ready(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+
+ rts51x_init_cards(chip);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ return TRANSPORT_FAILED;
+ }
+
+ if (!check_lun_mc(chip, lun)) {
+ set_lun_mc(chip, lun);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ return TRANSPORT_FAILED;
+ }
+#ifdef SUPPORT_SD_LOCK
+ if (get_lun_card(chip, SCSI_LUN(srb)) == SD_CARD) {
+ struct sd_info *sd_card = &(chip->sd_card);
+ if (sd_card->sd_lock_notify) {
+ sd_card->sd_lock_notify = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ return TRANSPORT_FAILED;
+ } else if (sd_card->sd_lock_status & SD_LOCKED) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+ return TRANSPORT_FAILED;
+ }
+ }
+#endif
+
+ return TRANSPORT_GOOD;
+}
+
+unsigned char formatter_inquiry_str[20] = {
+ 'M', 'E', 'M', 'O', 'R', 'Y', 'S', 'T', 'I', 'C', 'K',
+ '-', 'M', 'G', /* Byte[47:49] */
+ 0x0B, /* Byte[50]: MG, MS, MSPro, MSXC */
+ 0x00, /* Byte[51]: Category Specific Commands */
+ 0x00, /* Byte[52]: Access Control and feature */
+ 0x20, 0x20, 0x20, /* Byte[53:55] */
+};
+
+static int inquiry(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ char *inquiry_default = (char *)"Generic-xD/SD/M.S. 1.00 ";
+ char *inquiry_string;
+ unsigned char sendbytes;
+ unsigned char *buf;
+ u8 card = get_lun_card(chip, lun);
+ int pro_formatter_flag = 0;
+ unsigned char inquiry_buf[] = {
+ QULIFIRE | DRCT_ACCESS_DEV,
+ RMB_DISC | 0x0D,
+ 0x00,
+ 0x01,
+ 0x1f,
+ 0x02,
+ 0,
+ REL_ADR | WBUS_32 | WBUS_16 | SYNC | LINKED | CMD_QUE | SFT_RE,
+ };
+
+ inquiry_string = inquiry_default;
+
+ buf = vmalloc(scsi_bufflen(srb));
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ if (MS_FORMATTER_ENABLED(chip) && (get_lun2card(chip, lun) & MS_CARD)) {
+ if (!card || (card == MS_CARD))
+ pro_formatter_flag = 1;
+ }
+
+ if (pro_formatter_flag) {
+ if (scsi_bufflen(srb) < 56)
+ sendbytes = (unsigned char)(scsi_bufflen(srb));
+ else
+ sendbytes = 56;
+ } else {
+ if (scsi_bufflen(srb) < 36)
+ sendbytes = (unsigned char)(scsi_bufflen(srb));
+ else
+ sendbytes = 36;
+ }
+
+ if (sendbytes > 8) {
+ memcpy(buf, inquiry_buf, 8);
+ memcpy(buf + 8, inquiry_string, sendbytes - 8);
+ if (pro_formatter_flag)
+ buf[4] = 0x33; /* Additional Length */
+ } else {
+ memcpy(buf, inquiry_buf, sendbytes);
+ }
+
+ if (pro_formatter_flag) {
+ if (sendbytes > 36)
+ memcpy(buf + 36, formatter_inquiry_str, sendbytes - 36);
+ }
+
+ scsi_set_resid(srb, 0);
+
+ rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int start_stop_unit(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+
+ scsi_set_resid(srb, scsi_bufflen(srb));
+
+ if (srb->cmnd[1] == 1)
+ return TRANSPORT_GOOD;
+
+ switch (srb->cmnd[0x4]) {
+ case STOP_MEDIUM:
+ /* Media disabled */
+ return TRANSPORT_GOOD;
+
+ case UNLOAD_MEDIUM:
+ /* Media shall be unload */
+ if (check_card_ready(chip, lun))
+ eject_card(chip, lun);
+ return TRANSPORT_GOOD;
+
+ case MAKE_MEDIUM_READY:
+ case LOAD_MEDIUM:
+ if (check_card_ready(chip, lun)) {
+ return TRANSPORT_GOOD;
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ break;
+ }
+
+ TRACE_RET(chip, TRANSPORT_ERROR);
+}
+
+static int allow_medium_removal(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int prevent;
+
+ prevent = srb->cmnd[4] & 0x1;
+
+ scsi_set_resid(srb, 0);
+
+ if (prevent) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static void ms_mode_sense(struct rts51x_chip *chip, u8 cmd,
+ int lun, u8 *buf, int buf_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int sys_info_offset;
+ int data_size = buf_len;
+ int support_format = 0;
+ int i = 0;
+
+ if (cmd == MODE_SENSE) {
+ sys_info_offset = 8;
+ if (data_size > 0x68)
+ data_size = 0x68;
+ buf[i++] = 0x67; /* Mode Data Length */
+ } else {
+ sys_info_offset = 12;
+ if (data_size > 0x6C)
+ data_size = 0x6C;
+ buf[i++] = 0x00; /* Mode Data Length (MSB) */
+ buf[i++] = 0x6A; /* Mode Data Length (LSB) */
+ }
+
+ /* Medium Type Code */
+ if (check_card_ready(chip, lun)) {
+ if (CHK_MSXC(ms_card)) {
+ support_format = 1;
+ buf[i++] = 0x40;
+ } else if (CHK_MSPRO(ms_card)) {
+ support_format = 1;
+ buf[i++] = 0x20;
+ } else {
+ buf[i++] = 0x10;
+ }
+
+ /* WP */
+ if (check_card_wp(chip, lun))
+ buf[i++] = 0x80;
+ else
+ buf[i++] = 0x00;
+ } else {
+ buf[i++] = 0x00; /* MediaType */
+ buf[i++] = 0x00; /* WP */
+ }
+
+ buf[i++] = 0x00; /* Reserved */
+
+ if (cmd == MODE_SENSE_10) {
+ buf[i++] = 0x00; /* Reserved */
+ buf[i++] = 0x00; /* Block descriptor length(MSB) */
+ buf[i++] = 0x00; /* Block descriptor length(LSB) */
+
+ /* The Following Data is the content of "Page 0x20" */
+ if (data_size >= 9)
+ buf[i++] = 0x20; /* Page Code */
+ if (data_size >= 10)
+ buf[i++] = 0x62; /* Page Length */
+ if (data_size >= 11)
+ buf[i++] = 0x00; /* No Access Control */
+ if (data_size >= 12) {
+ if (support_format)
+ buf[i++] = 0xC0; /* SF, SGM */
+ else
+ buf[i++] = 0x00;
+ }
+ } else {
+ /* The Following Data is the content of "Page 0x20" */
+ if (data_size >= 5)
+ buf[i++] = 0x20; /* Page Code */
+ if (data_size >= 6)
+ buf[i++] = 0x62; /* Page Length */
+ if (data_size >= 7)
+ buf[i++] = 0x00; /* No Access Control */
+ if (data_size >= 8) {
+ if (support_format)
+ buf[i++] = 0xC0; /* SF, SGM */
+ else
+ buf[i++] = 0x00;
+ }
+ }
+
+ if (data_size > sys_info_offset) {
+ /* 96 Bytes Attribute Data */
+ int len = data_size - sys_info_offset;
+ len = (len < 96) ? len : 96;
+
+ memcpy(buf + sys_info_offset, ms_card->raw_sys_info, len);
+ }
+}
+
+static int mode_sense(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ unsigned int dataSize;
+ int status;
+ int pro_formatter_flag;
+ unsigned char pageCode, *buf;
+ u8 card = get_lun_card(chip, lun);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ scsi_set_resid(srb, scsi_bufflen(srb));
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ pro_formatter_flag = 0;
+ dataSize = 8;
+ /* In Combo mode, device responses ModeSense command as a MS LUN
+ * when no card is inserted */
+ if ((get_lun2card(chip, lun) & MS_CARD)) {
+ if (!card || (card == MS_CARD)) {
+ dataSize = 108;
+ if (chip->option.mspro_formatter_enable)
+ pro_formatter_flag = 1;
+ }
+ }
+
+ buf = kmalloc(dataSize, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ pageCode = srb->cmnd[2] & 0x3f;
+
+ if ((pageCode == 0x3F) || (pageCode == 0x1C) ||
+ (pageCode == 0x00) || (pro_formatter_flag && (pageCode == 0x20))) {
+ if (srb->cmnd[0] == MODE_SENSE) {
+ if ((pageCode == 0x3F) || (pageCode == 0x20)) {
+ ms_mode_sense(chip, srb->cmnd[0], lun, buf,
+ dataSize);
+ } else {
+ dataSize = 4;
+ buf[0] = 0x03;
+ buf[1] = 0x00;
+ if (check_card_wp(chip, lun))
+ buf[2] = 0x80;
+ else
+ buf[3] = 0x00;
+ }
+ } else {
+ if ((pageCode == 0x3F) || (pageCode == 0x20)) {
+ ms_mode_sense(chip, srb->cmnd[0], lun, buf,
+ dataSize);
+ } else {
+ dataSize = 8;
+ buf[0] = 0x00;
+ buf[1] = 0x06;
+ buf[2] = 0x00;
+ if (check_card_wp(chip, lun))
+ buf[3] = 0x80;
+ else
+ buf[3] = 0x00;
+ buf[4] = 0x00;
+ buf[5] = 0x00;
+ buf[6] = 0x00;
+ buf[7] = 0x00;
+ }
+ }
+ status = TRANSPORT_GOOD;
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ scsi_set_resid(srb, scsi_bufflen(srb));
+ status = TRANSPORT_FAILED;
+ }
+
+ if (status == TRANSPORT_GOOD) {
+ unsigned int len = min(scsi_bufflen(srb), dataSize);
+ rts51x_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+ }
+ kfree(buf);
+
+ return status;
+}
+
+static int request_sense(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct sense_data_t *sense;
+ unsigned int lun = SCSI_LUN(srb);
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned char *tmp, *buf;
+
+ sense = &(chip->sense_buffer[lun]);
+
+ if ((get_lun_card(chip, lun) == MS_CARD)
+ && PRO_UNDER_FORMATTING(ms_card)) {
+ mspro_format_sense(chip, lun);
+ }
+
+ buf = vmalloc(scsi_bufflen(srb));
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ tmp = (unsigned char *)sense;
+ memcpy(buf, tmp, scsi_bufflen(srb));
+
+ rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ vfree(buf);
+
+ scsi_set_resid(srb, 0);
+ /* Reset Sense Data */
+ set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
+ return TRANSPORT_GOOD;
+}
+
+static int read_write(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+#ifdef SUPPORT_SD_LOCK
+ struct sd_info *sd_card = &(chip->sd_card);
+#endif
+ unsigned int lun = SCSI_LUN(srb);
+ int retval;
+ u32 start_sec;
+ u16 sec_cnt;
+
+ if (!check_card_ready(chip, lun) || (chip->capacity[lun] == 0)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!check_lun_mc(chip, lun)) {
+ set_lun_mc(chip, lun);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ return TRANSPORT_FAILED;
+ }
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_erase_status) {
+ /* Accessing to any card is forbidden
+ * until the erase procedure of SD is completed */
+ RTS51X_DEBUGP("SD card being erased!\n");
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (get_lun_card(chip, lun) == SD_CARD) {
+ if (sd_card->sd_lock_status & SD_LOCKED) {
+ RTS51X_DEBUGP("SD card locked!\n");
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+#endif
+
+ if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10)) {
+ start_sec =
+ ((u32) srb->cmnd[2] << 24) |
+ ((u32) srb->cmnd[3] << 16) |
+ ((u32) srb->cmnd[4] << 8) |
+ ((u32) srb->cmnd[5]);
+ sec_cnt = ((u16) (srb->cmnd[7]) << 8) | srb->cmnd[8];
+ } else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) {
+ start_sec = ((u32) (srb->cmnd[1] & 0x1F) << 16) |
+ ((u32) srb->cmnd[2] << 8) | ((u32) srb->cmnd[3]);
+ sec_cnt = srb->cmnd[4];
+ } else if ((srb->cmnd[0] == VENDOR_CMND) &&
+ (srb->cmnd[1] == SCSI_APP_CMD) &&
+ ((srb->cmnd[2] == PP_READ10) ||
+ (srb->cmnd[2] == PP_WRITE10))) {
+ start_sec = ((u32) srb->cmnd[4] << 24) |
+ ((u32) srb->cmnd[5] << 16) |
+ ((u32) srb->cmnd[6] << 8) |
+ ((u32) srb->cmnd[7]);
+ sec_cnt = ((u16) (srb->cmnd[9]) << 8) | srb->cmnd[10];
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if ((start_sec > chip->capacity[lun]) ||
+ ((start_sec + sec_cnt) > chip->capacity[lun])) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LBA_OVER_RANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (sec_cnt == 0) {
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+ }
+
+ if ((srb->sc_data_direction == DMA_TO_DEVICE)
+ && check_card_wp(chip, lun)) {
+ RTS51X_DEBUGP("Write protected card!\n");
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ retval = card_rw(srb, chip, start_sec, sec_cnt);
+ if (retval != STATUS_SUCCESS) {
+#if 0
+ if (chip->need_release & chip->lun2card[lun]) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ } else {
+#endif
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+ }
+#if 0
+ }
+#endif
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_format_capacity(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned char *buf;
+ unsigned int lun = SCSI_LUN(srb);
+ unsigned int buf_len;
+ u8 card = get_lun_card(chip, lun);
+ int desc_cnt;
+ int i = 0;
+
+ if (!check_card_ready(chip, lun)) {
+ if (!chip->option.mspro_formatter_enable) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ buf_len = (scsi_bufflen(srb) > 12) ? 0x14 : 12;
+
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ buf[i++] = 0;
+ buf[i++] = 0;
+ buf[i++] = 0;
+
+ /* Capacity List Length */
+ if ((buf_len > 12) && chip->option.mspro_formatter_enable &&
+ (chip->lun2card[lun] & MS_CARD) && (!card || (card == MS_CARD))) {
+ buf[i++] = 0x10;
+ desc_cnt = 2;
+ } else {
+ buf[i++] = 0x08;
+ desc_cnt = 1;
+ }
+
+ while (desc_cnt) {
+ if (check_card_ready(chip, lun)) {
+ buf[i++] = (unsigned char)((chip->capacity[lun]) >> 24);
+ buf[i++] = (unsigned char)((chip->capacity[lun]) >> 16);
+ buf[i++] = (unsigned char)((chip->capacity[lun]) >> 8);
+ buf[i++] = (unsigned char)(chip->capacity[lun]);
+
+ if (desc_cnt == 2)
+ /* Byte[8]: Descriptor Type: Formatted medium */
+ buf[i++] = 2;
+ else
+ buf[i++] = 0; /* Byte[16] */
+ } else {
+ buf[i++] = 0xFF;
+ buf[i++] = 0xFF;
+ buf[i++] = 0xFF;
+ buf[i++] = 0xFF;
+
+ if (desc_cnt == 2)
+ /* Byte[8]: Descriptor Type: No medium */
+ buf[i++] = 3;
+ else
+ buf[i++] = 0; /*Byte[16] */
+ }
+
+ buf[i++] = 0x00;
+ buf[i++] = 0x02;
+ buf[i++] = 0x00;
+
+ desc_cnt--;
+ }
+
+ buf_len = min(scsi_bufflen(srb), buf_len);
+ rts51x_set_xfer_buf(buf, buf_len, srb);
+ kfree(buf);
+
+ scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_capacity(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned char *buf;
+ unsigned int lun = SCSI_LUN(srb);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!check_lun_mc(chip, lun)) {
+ set_lun_mc(chip, lun);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ return TRANSPORT_FAILED;
+ }
+
+ buf = kmalloc(8, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ buf[0] = (unsigned char)((chip->capacity[lun] - 1) >> 24);
+ buf[1] = (unsigned char)((chip->capacity[lun] - 1) >> 16);
+ buf[2] = (unsigned char)((chip->capacity[lun] - 1) >> 8);
+ buf[3] = (unsigned char)(chip->capacity[lun] - 1);
+
+ buf[4] = 0x00;
+ buf[5] = 0x00;
+ buf[6] = 0x02;
+ buf[7] = 0x00;
+
+ rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ kfree(buf);
+
+ scsi_set_resid(srb, 0);
+
+ return TRANSPORT_GOOD;
+}
+
+static int get_dev_status(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ unsigned int buf_len;
+ u8 status[32] = { 0 };
+
+ rts51x_pp_status(chip, lun, status, 32);
+
+ buf_len = min(scsi_bufflen(srb), (unsigned int)sizeof(status));
+ rts51x_set_xfer_buf(status, buf_len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_status(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ u8 rts51x_status[16];
+ unsigned int buf_len;
+ unsigned int lun = SCSI_LUN(srb);
+
+ rts51x_read_status(chip, lun, rts51x_status, 16);
+
+ buf_len = min(scsi_bufflen(srb), (unsigned int)sizeof(rts51x_status));
+ rts51x_set_xfer_buf(rts51x_status, buf_len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_mem(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ unsigned short addr, len, i;
+ int retval;
+ u8 *buf;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ addr = ((u16) srb->cmnd[2] << 8) | srb->cmnd[3];
+ len = ((u16) srb->cmnd[4] << 8) | srb->cmnd[5];
+
+ if (addr < 0xe000) {
+ RTS51X_DEBUGP("filter!addr=0x%x\n", addr);
+ return TRANSPORT_GOOD;
+ }
+
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ for (i = 0; i < len; i++) {
+ retval = rts51x_ep0_read_register(chip, addr + i, buf + i);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len);
+ rts51x_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int write_mem(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ unsigned short addr, len, i;
+ int retval;
+ u8 *buf;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ addr = ((u16) srb->cmnd[2] << 8) | srb->cmnd[3];
+ len = ((u16) srb->cmnd[4] << 8) | srb->cmnd[5];
+
+ if (addr < 0xe000) {
+ RTS51X_DEBUGP("filter!addr=0x%x\n", addr);
+ return TRANSPORT_GOOD;
+ }
+
+ len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len);
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rts51x_get_xfer_buf(buf, len, srb);
+
+ for (i = 0; i < len; i++) {
+ retval =
+ rts51x_ep0_write_register(chip, addr + i, 0xFF, buf[i]);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ vfree(buf);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ return TRANSPORT_GOOD;
+}
+
+static int get_sd_csd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (get_lun_card(chip, lun) != SD_CARD) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ rts51x_set_xfer_buf(sd_card->raw_csd, scsi_bufflen(srb), srb);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_phy_register(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int retval;
+ u8 addr, len, i;
+ u8 *buf;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ addr = srb->cmnd[5];
+ len = srb->cmnd[7];
+
+ if (len) {
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ for (i = 0; i < len; i++) {
+ retval =
+ rts51x_read_phy_register(chip, addr + i, buf + i);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ len = min(scsi_bufflen(srb), (unsigned int)len);
+ rts51x_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ vfree(buf);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int write_phy_register(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int retval;
+ u8 addr, len, i;
+ u8 *buf;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ addr = srb->cmnd[5];
+ len = srb->cmnd[7];
+
+ if (len) {
+ len = min(scsi_bufflen(srb), (unsigned int)len);
+
+ buf = vmalloc(len);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rts51x_get_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ for (i = 0; i < len; i++) {
+ retval =
+ rts51x_write_phy_register(chip, addr + i, buf[i]);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ vfree(buf);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int get_card_bus_width(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ u8 card, bus_width;
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ card = get_lun_card(chip, lun);
+ if ((card == SD_CARD) || (card == MS_CARD)) {
+ bus_width = chip->card_bus_width[lun];
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ rts51x_set_xfer_buf(&bus_width, scsi_bufflen(srb), srb);
+
+ return TRANSPORT_GOOD;
+}
+
+#ifdef _MSG_TRACE
+static int trace_msg_cmd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned char *buf = NULL;
+ u8 clear;
+ unsigned int buf_len;
+
+ buf_len =
+ 4 +
+ ((2 + MSG_FUNC_LEN + MSG_FILE_LEN + TIME_VAL_LEN) * TRACE_ITEM_CNT);
+
+ if ((scsi_bufflen(srb) < buf_len) || (scsi_sglist(srb) == NULL)) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ clear = srb->cmnd[2];
+
+ buf = vmalloc(scsi_bufflen(srb));
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rts51x_trace_msg(chip, buf, clear);
+
+ rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ vfree(buf);
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+#endif
+
+static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int retval = STATUS_SUCCESS;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 cmd_type, mask, value, idx, mode, len;
+ u16 addr;
+ u32 timeout;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ switch (srb->cmnd[3]) {
+ case INIT_BATCHCMD:
+ rts51x_init_cmd(chip);
+ break;
+
+ case ADD_BATCHCMD:
+ cmd_type = srb->cmnd[4];
+ if (cmd_type > 2) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ addr = (srb->cmnd[5] << 8) | srb->cmnd[6];
+ mask = srb->cmnd[7];
+ value = srb->cmnd[8];
+ rts51x_add_cmd(chip, cmd_type, addr, mask, value);
+ break;
+
+ case SEND_BATCHCMD:
+ mode = srb->cmnd[4];
+ len = srb->cmnd[5];
+ timeout =
+ ((u32) srb->cmnd[6] << 24) | ((u32) srb->
+ cmnd[7] << 16) | ((u32) srb->
+ cmnd[8] <<
+ 8) | ((u32)
+ srb->
+ cmnd
+ [9]);
+ retval = rts51x_send_cmd(chip, mode, 1000);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if (mode & STAGE_R) {
+ retval = rts51x_get_rsp(chip, len, timeout);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+ break;
+
+ case GET_BATCHRSP:
+ idx = srb->cmnd[4];
+ value = chip->rsp_buf[idx];
+ if (scsi_bufflen(srb) < 1) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ rts51x_set_xfer_buf(&value, 1, srb);
+ scsi_set_resid(srb, 0);
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int suit_cmd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int result;
+
+ switch (srb->cmnd[3]) {
+ case INIT_BATCHCMD:
+ case ADD_BATCHCMD:
+ case SEND_BATCHCMD:
+ case GET_BATCHRSP:
+ result = rw_mem_cmd_buf(srb, chip);
+ break;
+ default:
+ result = TRANSPORT_ERROR;
+ }
+
+ return result;
+}
+
+static int app_cmd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int result;
+
+ switch (srb->cmnd[2]) {
+ case PP_READ10:
+ case PP_WRITE10:
+ result = read_write(srb, chip);
+ break;
+
+ case SUIT_CMD:
+ result = suit_cmd(srb, chip);
+ break;
+
+ case READ_PHY:
+ result = read_phy_register(srb, chip);
+ break;
+
+ case WRITE_PHY:
+ result = write_phy_register(srb, chip);
+ break;
+
+ case GET_DEV_STATUS:
+ result = get_dev_status(srb, chip);
+ break;
+
+ default:
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return result;
+}
+
+static int vendor_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int result = TRANSPORT_GOOD;
+
+ switch (srb->cmnd[1]) {
+ case READ_STATUS:
+ result = read_status(srb, chip);
+ break;
+
+ case READ_MEM:
+ result = read_mem(srb, chip);
+ break;
+
+ case WRITE_MEM:
+ result = write_mem(srb, chip);
+ break;
+
+ case GET_BUS_WIDTH:
+ result = get_card_bus_width(srb, chip);
+ break;
+
+ case GET_SD_CSD:
+ result = get_sd_csd(srb, chip);
+ break;
+
+#ifdef _MSG_TRACE
+ case TRACE_MSG:
+ result = trace_msg_cmd(srb, chip);
+ break;
+#endif
+
+ case SCSI_APP_CMD:
+ result = app_cmd(srb, chip);
+ break;
+
+ default:
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return result;
+}
+
+static int ms_format_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval, quick_format;
+
+ if (get_lun_card(chip, lun) != MS_CARD) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if ((srb->cmnd[3] != 0x4D) || (srb->cmnd[4] != 0x47)
+ || (srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D)
+ || (srb->cmnd[7] != 0x74)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (srb->cmnd[8] & 0x01)
+ quick_format = 0;
+ else
+ quick_format = 1;
+
+ if (!(chip->card_ready & MS_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (chip->card_wp & MS_CARD) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!CHK_MSPRO(ms_card)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ retval = mspro_format(srb, chip, MS_SHORT_DATA_LEN, quick_format);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+
+#ifdef SUPPORT_PCGL_1P18
+int get_ms_information(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ u8 dev_info_id, data_len;
+ u8 *buf;
+ unsigned int buf_len;
+ int i;
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if ((get_lun_card(chip, lun) != MS_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if ((srb->cmnd[2] != 0xB0) || (srb->cmnd[4] != 0x4D) ||
+ (srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) ||
+ (srb->cmnd[7] != 0x44)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ dev_info_id = srb->cmnd[3];
+ if ((CHK_MSXC(ms_card) && (dev_info_id == 0x10)) ||
+ (!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) ||
+ !CHK_MSPRO(ms_card)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (dev_info_id == 0x15)
+ buf_len = data_len = 0x3A;
+ else
+ buf_len = data_len = 0x6A;
+
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ i = 0;
+ /* GET Memory Stick Media Information Response Header */
+ buf[i++] = 0x00; /* Data length MSB */
+ buf[i++] = data_len; /* Data length LSB */
+ /* Device Information Type Code */
+ if (CHK_MSXC(ms_card))
+ buf[i++] = 0x03;
+ else
+ buf[i++] = 0x02;
+ /* SGM bit */
+ buf[i++] = 0x01;
+ /* Reserved */
+ buf[i++] = 0x00;
+ buf[i++] = 0x00;
+ buf[i++] = 0x00;
+ /* Number of Device Information */
+ buf[i++] = 0x01;
+
+ /* Device Information Body
+ * Device Information ID Number */
+ buf[i++] = dev_info_id;
+ /* Device Information Length */
+ if (dev_info_id == 0x15)
+ data_len = 0x31;
+ else
+ data_len = 0x61;
+ buf[i++] = 0x00; /* Data length MSB */
+ buf[i++] = data_len; /* Data length LSB */
+ /* Valid Bit */
+ buf[i++] = 0x80;
+ if ((dev_info_id == 0x10) || (dev_info_id == 0x13)) {
+ /* System Information */
+ memcpy(buf + i, ms_card->raw_sys_info, 96);
+ } else {
+ /* Model Name */
+ memcpy(buf + i, ms_card->raw_model_name, 48);
+ }
+
+ rts51x_set_xfer_buf(buf, buf_len, srb);
+
+ if (dev_info_id == 0x15)
+ scsi_set_resid(srb, scsi_bufflen(srb) - 0x3C);
+ else
+ scsi_set_resid(srb, scsi_bufflen(srb) - 0x6C);
+
+ kfree(buf);
+ return STATUS_SUCCESS;
+}
+#endif
+
+static int ms_sp_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int retval = TRANSPORT_ERROR;
+
+ if (srb->cmnd[2] == MS_FORMAT)
+ retval = ms_format_cmnd(srb, chip);
+#ifdef SUPPORT_PCGL_1P18
+ else if (srb->cmnd[2] == GET_MS_INFORMATION)
+ retval = get_ms_information(srb, chip);
+#endif
+
+ return retval;
+}
+
+#ifdef SUPPORT_CPRM
+static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ int result;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ sd_cleanup_work(chip);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if ((get_lun_card(chip, lun) != SD_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ switch (srb->cmnd[0]) {
+ case SD_PASS_THRU_MODE:
+ result = sd_pass_thru_mode(srb, chip);
+ break;
+
+ case SD_EXECUTE_NO_DATA:
+ result = sd_execute_no_data(srb, chip);
+ break;
+
+ case SD_EXECUTE_READ:
+ result = sd_execute_read_data(srb, chip);
+ break;
+
+ case SD_EXECUTE_WRITE:
+ result = sd_execute_write_data(srb, chip);
+ break;
+
+ case SD_GET_RSP:
+ result = sd_get_cmd_rsp(srb, chip);
+ break;
+
+ case SD_HW_RST:
+ result = sd_hw_rst(srb, chip);
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return result;
+}
+#endif
+
+#ifdef SUPPORT_MAGIC_GATE
+int mg_report_key(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval;
+ u8 key_format;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ ms_cleanup_work(chip);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if ((get_lun_card(chip, lun) != MS_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (srb->cmnd[7] != KC_MG_R_PRO) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!CHK_MSPRO(ms_card)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ key_format = srb->cmnd[10] & 0x3F;
+
+ switch (key_format) {
+ case KF_GET_LOC_EKB:
+ if ((scsi_bufflen(srb) == 0x41C) &&
+ (srb->cmnd[8] == 0x04) && (srb->cmnd[9] == 0x1C)) {
+ retval = mg_get_local_EKB(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_RSP_CHG:
+ if ((scsi_bufflen(srb) == 0x24) &&
+ (srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x24)) {
+ retval = mg_get_rsp_chg(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_GET_ICV:
+ ms_card->mg_entry_num = srb->cmnd[5];
+ if ((scsi_bufflen(srb) == 0x404) &&
+ (srb->cmnd[8] == 0x04) &&
+ (srb->cmnd[9] == 0x04) &&
+ (srb->cmnd[2] == 0x00) &&
+ (srb->cmnd[3] == 0x00) &&
+ (srb->cmnd[4] == 0x00) && (srb->cmnd[5] < 32)) {
+ retval = mg_get_ICV(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+
+int mg_send_key(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval;
+ u8 key_format;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ ms_cleanup_work(chip);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if (check_card_wp(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if ((get_lun_card(chip, lun) != MS_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (srb->cmnd[7] != KC_MG_R_PRO) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!CHK_MSPRO(ms_card)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ key_format = srb->cmnd[10] & 0x3F;
+
+ switch (key_format) {
+ case KF_SET_LEAF_ID:
+ if ((scsi_bufflen(srb) == 0x0C) &&
+ (srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x0C)) {
+ retval = mg_set_leaf_id(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_CHG_HOST:
+ if ((scsi_bufflen(srb) == 0x0C) &&
+ (srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x0C)) {
+ retval = mg_chg(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_RSP_HOST:
+ if ((scsi_bufflen(srb) == 0x0C) &&
+ (srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x0C)) {
+ retval = mg_rsp(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_SET_ICV:
+ ms_card->mg_entry_num = srb->cmnd[5];
+ if ((scsi_bufflen(srb) == 0x404) &&
+ (srb->cmnd[8] == 0x04) &&
+ (srb->cmnd[9] == 0x04) &&
+ (srb->cmnd[2] == 0x00) &&
+ (srb->cmnd[3] == 0x00) &&
+ (srb->cmnd[4] == 0x00) && (srb->cmnd[5] < 32)) {
+ retval = mg_set_ICV(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+#endif
+
+int rts51x_scsi_handler(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+#ifdef SUPPORT_SD_LOCK
+ struct sd_info *sd_card = &(chip->sd_card);
+#endif
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int result = TRANSPORT_GOOD;
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_erase_status) {
+ /* Block all SCSI command except for REQUEST_SENSE
+ * and rs_ppstatus */
+ if (!
+ ((srb->cmnd[0] == VENDOR_CMND)
+ && (srb->cmnd[1] == SCSI_APP_CMD)
+ && (srb->cmnd[2] == GET_DEV_STATUS))
+ && (srb->cmnd[0] != REQUEST_SENSE)) {
+ /* Logical Unit Not Ready Format in Progress */
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
+ 0, 0);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+#endif
+
+ if ((get_lun_card(chip, lun) == MS_CARD) &&
+ (ms_card->format_status == FORMAT_IN_PROGRESS)) {
+ if ((srb->cmnd[0] != REQUEST_SENSE)
+ && (srb->cmnd[0] != INQUIRY)) {
+ /* Logical Unit Not Ready Format in Progress */
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
+ 0, (u16) (ms_card->progress));
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ switch (srb->cmnd[0]) {
+ case READ_10:
+ case WRITE_10:
+ case READ_6:
+ case WRITE_6:
+ result = read_write(srb, chip);
+ break;
+
+ case TEST_UNIT_READY:
+ result = test_unit_ready(srb, chip);
+ break;
+
+ case INQUIRY:
+ result = inquiry(srb, chip);
+ break;
+
+ case READ_CAPACITY:
+ result = read_capacity(srb, chip);
+ break;
+
+ case START_STOP:
+ result = start_stop_unit(srb, chip);
+ break;
+
+ case ALLOW_MEDIUM_REMOVAL:
+ result = allow_medium_removal(srb, chip);
+ break;
+
+ case REQUEST_SENSE:
+ result = request_sense(srb, chip);
+ break;
+
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ result = mode_sense(srb, chip);
+ break;
+
+ case 0x23:
+ result = read_format_capacity(srb, chip);
+ break;
+
+ case VENDOR_CMND:
+ result = vendor_cmnd(srb, chip);
+ break;
+
+ case MS_SP_CMND:
+ result = ms_sp_cmnd(srb, chip);
+ break;
+
+#ifdef SUPPORT_CPRM
+ case SD_PASS_THRU_MODE:
+ case SD_EXECUTE_NO_DATA:
+ case SD_EXECUTE_READ:
+ case SD_EXECUTE_WRITE:
+ case SD_GET_RSP:
+ case SD_HW_RST:
+ result = sd_extention_cmnd(srb, chip);
+ break;
+#endif
+
+#ifdef SUPPORT_MAGIC_GATE
+ case CMD_MSPRO_MG_RKEY:
+ result = mg_report_key(srb, chip);
+ break;
+
+ case CMD_MSPRO_MG_SKEY:
+ result = mg_send_key(srb, chip);
+ break;
+#endif
+
+ case FORMAT_UNIT:
+ case MODE_SELECT:
+ case VERIFY:
+ result = TRANSPORT_GOOD;
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ result = TRANSPORT_FAILED;
+ }
+
+ return result;
+}
+
+/***********************************************************************
+ * Host functions
+ ***********************************************************************/
+
+const char *host_info(struct Scsi_Host *host)
+{
+ return "SCSI emulation for RTS51xx USB driver-based card reader";
+}
+
+int slave_alloc(struct scsi_device *sdev)
+{
+ /*
+ * Set the INQUIRY transfer length to 36. We don't use any of
+ * the extra data and many devices choke if asked for more or
+ * less than 36 bytes.
+ */
+ sdev->inquiry_len = 36;
+ return 0;
+}
+
+int slave_configure(struct scsi_device *sdev)
+{
+ /* Scatter-gather buffers (all but the last) must have a length
+ * divisible by the bulk maxpacket size. Otherwise a data packet
+ * would end up being short, causing a premature end to the data
+ * transfer. Since high-speed bulk pipes have a maxpacket size
+ * of 512, we'll use that as the scsi device queue's DMA alignment
+ * mask. Guaranteeing proper alignment of the first buffer will
+ * have the desired effect because, except at the beginning and
+ * the end, scatter-gather buffers follow page boundaries. */
+ blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
+
+ /* Set the SCSI level to at least 2. We'll leave it at 3 if that's
+ * what is originally reported. We need this to avoid confusing
+ * the SCSI layer with devices that report 0 or 1, but need 10-byte
+ * commands (ala ATAPI devices behind certain bridges, or devices
+ * which simply have broken INQUIRY data).
+ *
+ * NOTE: This means /dev/sg programs (ala cdrecord) will get the
+ * actual information. This seems to be the preference for
+ * programs like that.
+ *
+ * NOTE: This also means that /proc/scsi/scsi and sysfs may report
+ * the actual value or the modified one, depending on where the
+ * data comes from.
+ */
+ if (sdev->scsi_level < SCSI_2)
+ sdev->scsi_level = sdev->sdev_target->scsi_level = SCSI_2;
+
+ return 0;
+}
+
+/***********************************************************************
+ * /proc/scsi/ functions
+ ***********************************************************************/
+
+/* we use this macro to help us write into the buffer */
+#undef SPRINTF
+#define SPRINTF(args...) \
+ do { if (pos < buffer+length) pos += sprintf(pos, ## args); } while (0)
+
+int proc_info(struct Scsi_Host *host, char *buffer,
+ char **start, off_t offset, int length, int inout)
+{
+ char *pos = buffer;
+
+ /* if someone is sending us data, just throw it away */
+ if (inout)
+ return length;
+
+ /* print the controller name */
+ SPRINTF(" Host scsi%d: %s\n", host->host_no, RTS51X_NAME);
+
+ /* print product, vendor, and driver version strings */
+ SPRINTF(" Vendor: Realtek Corp.\n");
+ SPRINTF(" Product: RTS51xx USB Card Reader\n");
+ SPRINTF(" Version: %s\n", DRIVER_VERSION);
+ SPRINTF(" Build: %s\n", __TIME__);
+
+ /*
+ * Calculate start of next buffer, and return value.
+ */
+ *start = buffer + offset;
+
+ if ((pos - buffer) < offset)
+ return 0;
+ else if ((pos - buffer - offset) < length)
+ return pos - buffer - offset;
+ else
+ return length;
+}
+
+/* queue a command */
+/* This is always called with scsi_lock(host) held */
+int queuecommand_lck(struct scsi_cmnd *srb, void (*done) (struct scsi_cmnd *))
+{
+ struct rts51x_chip *chip = host_to_rts51x(srb->device->host);
+
+ /* check for state-transition errors */
+ if (chip->srb != NULL) {
+ RTS51X_DEBUGP("Error in %s: chip->srb = %p\n",
+ __func__, chip->srb);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ /* fail the command if we are disconnecting */
+ if (test_bit(FLIDX_DISCONNECTING, &chip->usb->dflags)) {
+ RTS51X_DEBUGP("Fail command during disconnect\n");
+ srb->result = DID_NO_CONNECT << 16;
+ done(srb);
+ return 0;
+ }
+
+ /* enqueue the command and wake up the control thread */
+ srb->scsi_done = done;
+ chip->srb = srb;
+ complete(&chip->usb->cmnd_ready);
+
+ return 0;
+}
+
+#if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37) */
+int queuecommand(struct scsi_cmnd *srb, void (*done) (struct scsi_cmnd *))
+{
+ return queuecommand_lck(srb, done);
+}
+#else
+DEF_SCSI_QCMD(queuecommand)
+#endif
+/***********************************************************************
+ * Error handling functions
+ ***********************************************************************/
+/* Command timeout and abort */
+int command_abort(struct scsi_cmnd *srb)
+{
+ struct rts51x_chip *chip = host_to_rts51x(srb->device->host);
+
+ RTS51X_DEBUGP("%s called\n", __func__);
+
+ /* us->srb together with the TIMED_OUT, RESETTING, and ABORTING
+ * bits are protected by the host lock. */
+ scsi_lock(rts51x_to_host(chip));
+
+ /* Is this command still active? */
+ if (chip->srb != srb) {
+ scsi_unlock(rts51x_to_host(chip));
+ RTS51X_DEBUGP("-- nothing to abort\n");
+ return FAILED;
+ }
+
+ /* Set the TIMED_OUT bit. Also set the ABORTING bit, but only if
+ * a device reset isn't already in progress (to avoid interfering
+ * with the reset). Note that we must retain the host lock while
+ * calling usb_stor_stop_transport(); otherwise it might interfere
+ * with an auto-reset that begins as soon as we release the lock. */
+ set_bit(FLIDX_TIMED_OUT, &chip->usb->dflags);
+ if (!test_bit(FLIDX_RESETTING, &chip->usb->dflags)) {
+ set_bit(FLIDX_ABORTING, &chip->usb->dflags);
+ /* rts51x_stop_transport(us); */
+ }
+ scsi_unlock(rts51x_to_host(chip));
+
+ /* Wait for the aborted command to finish */
+ wait_for_completion(&chip->usb->notify);
+ return SUCCESS;
+}
+
+/* This invokes the transport reset mechanism to reset the state of the
+ * device */
+int device_reset(struct scsi_cmnd *srb)
+{
+ int result = 0;
+
+ RTS51X_DEBUGP("%s called\n", __func__);
+
+ return result < 0 ? FAILED : SUCCESS;
+}
+
+/* Simulate a SCSI bus reset by resetting the device's USB port. */
+int bus_reset(struct scsi_cmnd *srb)
+{
+ int result = 0;
+
+ RTS51X_DEBUGP("%s called\n", __func__);
+
+ return result < 0 ? FAILED : SUCCESS;
+}
+
+static const char *rts5139_info(struct Scsi_Host *host)
+{
+ return "SCSI emulation for RTS5139 USB card reader";
+}
+
+struct scsi_host_template rts51x_host_template = {
+ /* basic userland interface stuff */
+ .name = RTS51X_NAME,
+ .proc_name = RTS51X_NAME,
+ .proc_info = proc_info,
+ .info = rts5139_info,
+
+ /* command interface -- queued only */
+ .queuecommand = queuecommand,
+
+ /* error and abort handlers */
+ .eh_abort_handler = command_abort,
+ .eh_device_reset_handler = device_reset,
+ .eh_bus_reset_handler = bus_reset,
+
+ /* queue commands only, only one command per LUN */
+ .can_queue = 1,
+ .cmd_per_lun = 1,
+
+ /* unknown initiator id */
+ .this_id = -1,
+
+ .slave_alloc = slave_alloc,
+ .slave_configure = slave_configure,
+
+ /* lots of sg segments can be handled */
+ .sg_tablesize = SG_ALL,
+
+ /* limit the total size of a transfer to 120 KB */
+ .max_sectors = 240,
+
+ /* merge commands... this seems to help performance, but
+ * periodically someone should test to see which setting is more
+ * optimal.
+ */
+ .use_clustering = 1,
+
+ /* emulated HBA */
+ .emulated = 1,
+
+ /* we do our own delay after a device or bus reset */
+ .skip_settle_delay = 1,
+
+ /* sysfs device attributes */
+ /* .sdev_attrs = sysfs_device_attr_list, */
+
+ /* module management */
+ .module = THIS_MODULE
+};
+
--- /dev/null
+/*
+ * opal driver interface to hvc_console.c
+ *
+ * Copyright 2011 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/console.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
++#include <linux/export.h>
+
+#include <asm/hvconsole.h>
+#include <asm/prom.h>
+#include <asm/firmware.h>
+#include <asm/hvsi.h>
+#include <asm/udbg.h>
+#include <asm/opal.h>
+
+#include "hvc_console.h"
+
+static const char hvc_opal_name[] = "hvc_opal";
+
+static struct of_device_id hvc_opal_match[] __devinitdata = {
+ { .name = "serial", .compatible = "ibm,opal-console-raw" },
+ { .name = "serial", .compatible = "ibm,opal-console-hvsi" },
+ { },
+};
+
+typedef enum hv_protocol {
+ HV_PROTOCOL_RAW,
+ HV_PROTOCOL_HVSI
+} hv_protocol_t;
+
+struct hvc_opal_priv {
+ hv_protocol_t proto; /* Raw data or HVSI packets */
+ struct hvsi_priv hvsi; /* HVSI specific data */
+};
+static struct hvc_opal_priv *hvc_opal_privs[MAX_NR_HVC_CONSOLES];
+
+/* For early boot console */
+static struct hvc_opal_priv hvc_opal_boot_priv;
+static u32 hvc_opal_boot_termno;
+
+static const struct hv_ops hvc_opal_raw_ops = {
+ .get_chars = opal_get_chars,
+ .put_chars = opal_put_chars,
+ .notifier_add = notifier_add_irq,
+ .notifier_del = notifier_del_irq,
+ .notifier_hangup = notifier_hangup_irq,
+};
+
+static int hvc_opal_hvsi_get_chars(uint32_t vtermno, char *buf, int count)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[vtermno];
+
+ if (WARN_ON(!pv))
+ return -ENODEV;
+
+ return hvsilib_get_chars(&pv->hvsi, buf, count);
+}
+
+static int hvc_opal_hvsi_put_chars(uint32_t vtermno, const char *buf, int count)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[vtermno];
+
+ if (WARN_ON(!pv))
+ return -ENODEV;
+
+ return hvsilib_put_chars(&pv->hvsi, buf, count);
+}
+
+static int hvc_opal_hvsi_open(struct hvc_struct *hp, int data)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
+ int rc;
+
+ pr_devel("HVSI@%x: do open !\n", hp->vtermno);
+
+ rc = notifier_add_irq(hp, data);
+ if (rc)
+ return rc;
+
+ return hvsilib_open(&pv->hvsi, hp);
+}
+
+static void hvc_opal_hvsi_close(struct hvc_struct *hp, int data)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
+
+ pr_devel("HVSI@%x: do close !\n", hp->vtermno);
+
+ hvsilib_close(&pv->hvsi, hp);
+
+ notifier_del_irq(hp, data);
+}
+
+void hvc_opal_hvsi_hangup(struct hvc_struct *hp, int data)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
+
+ pr_devel("HVSI@%x: do hangup !\n", hp->vtermno);
+
+ hvsilib_close(&pv->hvsi, hp);
+
+ notifier_hangup_irq(hp, data);
+}
+
+static int hvc_opal_hvsi_tiocmget(struct hvc_struct *hp)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
+
+ if (!pv)
+ return -EINVAL;
+ return pv->hvsi.mctrl;
+}
+
+static int hvc_opal_hvsi_tiocmset(struct hvc_struct *hp, unsigned int set,
+ unsigned int clear)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
+
+ pr_devel("HVSI@%x: Set modem control, set=%x,clr=%x\n",
+ hp->vtermno, set, clear);
+
+ if (set & TIOCM_DTR)
+ hvsilib_write_mctrl(&pv->hvsi, 1);
+ else if (clear & TIOCM_DTR)
+ hvsilib_write_mctrl(&pv->hvsi, 0);
+
+ return 0;
+}
+
+static const struct hv_ops hvc_opal_hvsi_ops = {
+ .get_chars = hvc_opal_hvsi_get_chars,
+ .put_chars = hvc_opal_hvsi_put_chars,
+ .notifier_add = hvc_opal_hvsi_open,
+ .notifier_del = hvc_opal_hvsi_close,
+ .notifier_hangup = hvc_opal_hvsi_hangup,
+ .tiocmget = hvc_opal_hvsi_tiocmget,
+ .tiocmset = hvc_opal_hvsi_tiocmset,
+};
+
+static int __devinit hvc_opal_probe(struct platform_device *dev)
+{
+ const struct hv_ops *ops;
+ struct hvc_struct *hp;
+ struct hvc_opal_priv *pv;
+ hv_protocol_t proto;
+ unsigned int termno, boot = 0;
+ const __be32 *reg;
+
+ if (of_device_is_compatible(dev->dev.of_node, "ibm,opal-console-raw")) {
+ proto = HV_PROTOCOL_RAW;
+ ops = &hvc_opal_raw_ops;
+ } else if (of_device_is_compatible(dev->dev.of_node,
+ "ibm,opal-console-hvsi")) {
+ proto = HV_PROTOCOL_HVSI;
+ ops = &hvc_opal_hvsi_ops;
+ } else {
+ pr_err("hvc_opal: Unkown protocol for %s\n",
+ dev->dev.of_node->full_name);
+ return -ENXIO;
+ }
+
+ reg = of_get_property(dev->dev.of_node, "reg", NULL);
+ termno = reg ? be32_to_cpup(reg) : 0;
+
+ /* Is it our boot one ? */
+ if (hvc_opal_privs[termno] == &hvc_opal_boot_priv) {
+ pv = hvc_opal_privs[termno];
+ boot = 1;
+ } else if (hvc_opal_privs[termno] == NULL) {
+ pv = kzalloc(sizeof(struct hvc_opal_priv), GFP_KERNEL);
+ if (!pv)
+ return -ENOMEM;
+ pv->proto = proto;
+ hvc_opal_privs[termno] = pv;
+ if (proto == HV_PROTOCOL_HVSI)
+ hvsilib_init(&pv->hvsi, opal_get_chars, opal_put_chars,
+ termno, 0);
+
+ /* Instanciate now to establish a mapping index==vtermno */
+ hvc_instantiate(termno, termno, ops);
+ } else {
+ pr_err("hvc_opal: Device %s has duplicate terminal number #%d\n",
+ dev->dev.of_node->full_name, termno);
+ return -ENXIO;
+ }
+
+ pr_info("hvc%d: %s protocol on %s%s\n", termno,
+ proto == HV_PROTOCOL_RAW ? "raw" : "hvsi",
+ dev->dev.of_node->full_name,
+ boot ? " (boot console)" : "");
+
+ /* We don't do IRQ yet */
+ hp = hvc_alloc(termno, 0, ops, MAX_VIO_PUT_CHARS);
+ if (IS_ERR(hp))
+ return PTR_ERR(hp);
+ dev_set_drvdata(&dev->dev, hp);
+
+ return 0;
+}
+
+static int __devexit hvc_opal_remove(struct platform_device *dev)
+{
+ struct hvc_struct *hp = dev_get_drvdata(&dev->dev);
+ int rc, termno;
+
+ termno = hp->vtermno;
+ rc = hvc_remove(hp);
+ if (rc == 0) {
+ if (hvc_opal_privs[termno] != &hvc_opal_boot_priv)
+ kfree(hvc_opal_privs[termno]);
+ hvc_opal_privs[termno] = NULL;
+ }
+ return rc;
+}
+
+static struct platform_driver hvc_opal_driver = {
+ .probe = hvc_opal_probe,
+ .remove = __devexit_p(hvc_opal_remove),
+ .driver = {
+ .name = hvc_opal_name,
+ .owner = THIS_MODULE,
+ .of_match_table = hvc_opal_match,
+ }
+};
+
+static int __init hvc_opal_init(void)
+{
+ if (!firmware_has_feature(FW_FEATURE_OPAL))
+ return -ENODEV;
+
+ /* Register as a vio device to receive callbacks */
+ return platform_driver_register(&hvc_opal_driver);
+}
+module_init(hvc_opal_init);
+
+static void __exit hvc_opal_exit(void)
+{
+ platform_driver_unregister(&hvc_opal_driver);
+}
+module_exit(hvc_opal_exit);
+
+static void udbg_opal_putc(char c)
+{
+ unsigned int termno = hvc_opal_boot_termno;
+ int count = -1;
+
+ if (c == '\n')
+ udbg_opal_putc('\r');
+
+ do {
+ switch(hvc_opal_boot_priv.proto) {
+ case HV_PROTOCOL_RAW:
+ count = opal_put_chars(termno, &c, 1);
+ break;
+ case HV_PROTOCOL_HVSI:
+ count = hvc_opal_hvsi_put_chars(termno, &c, 1);
+ break;
+ }
+ } while(count == 0 || count == -EAGAIN);
+}
+
+static int udbg_opal_getc_poll(void)
+{
+ unsigned int termno = hvc_opal_boot_termno;
+ int rc = 0;
+ char c;
+
+ switch(hvc_opal_boot_priv.proto) {
+ case HV_PROTOCOL_RAW:
+ rc = opal_get_chars(termno, &c, 1);
+ break;
+ case HV_PROTOCOL_HVSI:
+ rc = hvc_opal_hvsi_get_chars(termno, &c, 1);
+ break;
+ }
+ if (!rc)
+ return -1;
+ return c;
+}
+
+static int udbg_opal_getc(void)
+{
+ int ch;
+ for (;;) {
+ ch = udbg_opal_getc_poll();
+ if (ch == -1) {
+ /* This shouldn't be needed...but... */
+ volatile unsigned long delay;
+ for (delay=0; delay < 2000000; delay++)
+ ;
+ } else {
+ return ch;
+ }
+ }
+}
+
+static void udbg_init_opal_common(void)
+{
+ udbg_putc = udbg_opal_putc;
+ udbg_getc = udbg_opal_getc;
+ udbg_getc_poll = udbg_opal_getc_poll;
+ tb_ticks_per_usec = 0x200; /* Make udelay not suck */
+}
+
+void __init hvc_opal_init_early(void)
+{
+ struct device_node *stdout_node = NULL;
+ const u32 *termno;
+ const char *name = NULL;
+ const struct hv_ops *ops;
+ u32 index;
+
+ /* find the boot console from /chosen/stdout */
+ if (of_chosen)
+ name = of_get_property(of_chosen, "linux,stdout-path", NULL);
+ if (name) {
+ stdout_node = of_find_node_by_path(name);
+ if (!stdout_node) {
+ pr_err("hvc_opal: Failed to locate default console!\n");
+ return;
+ }
+ } else {
+ struct device_node *opal, *np;
+
+ /* Current OPAL takeover doesn't provide the stdout
+ * path, so we hard wire it
+ */
+ opal = of_find_node_by_path("/ibm,opal/consoles");
+ if (opal)
+ pr_devel("hvc_opal: Found consoles in new location\n");
+ if (!opal) {
+ opal = of_find_node_by_path("/ibm,opal");
+ if (opal)
+ pr_devel("hvc_opal: "
+ "Found consoles in old location\n");
+ }
+ if (!opal)
+ return;
+ for_each_child_of_node(opal, np) {
+ if (!strcmp(np->name, "serial")) {
+ stdout_node = np;
+ break;
+ }
+ }
+ of_node_put(opal);
+ }
+ if (!stdout_node)
+ return;
+ termno = of_get_property(stdout_node, "reg", NULL);
+ index = termno ? *termno : 0;
+ if (index >= MAX_NR_HVC_CONSOLES)
+ return;
+ hvc_opal_privs[index] = &hvc_opal_boot_priv;
+
+ /* Check the protocol */
+ if (of_device_is_compatible(stdout_node, "ibm,opal-console-raw")) {
+ hvc_opal_boot_priv.proto = HV_PROTOCOL_RAW;
+ ops = &hvc_opal_raw_ops;
+ pr_devel("hvc_opal: Found RAW console\n");
+ }
+ else if (of_device_is_compatible(stdout_node,"ibm,opal-console-hvsi")) {
+ hvc_opal_boot_priv.proto = HV_PROTOCOL_HVSI;
+ ops = &hvc_opal_hvsi_ops;
+ hvsilib_init(&hvc_opal_boot_priv.hvsi, opal_get_chars,
+ opal_put_chars, index, 1);
+ /* HVSI, perform the handshake now */
+ hvsilib_establish(&hvc_opal_boot_priv.hvsi);
+ pr_devel("hvc_opal: Found HVSI console\n");
+ } else
+ goto out;
+ hvc_opal_boot_termno = index;
+ udbg_init_opal_common();
+ add_preferred_console("hvc", index, NULL);
+ hvc_instantiate(index, index, ops);
+out:
+ of_node_put(stdout_node);
+}
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_RAW
+void __init udbg_init_debug_opal(void)
+{
+ u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO;
+ hvc_opal_privs[index] = &hvc_opal_boot_priv;
+ hvc_opal_boot_priv.proto = HV_PROTOCOL_RAW;
+ hvc_opal_boot_termno = index;
+ udbg_init_opal_common();
+}
+#endif /* CONFIG_PPC_EARLY_DEBUG_OPAL_RAW */
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI
+void __init udbg_init_debug_opal_hvsi(void)
+{
+ u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO;
+ hvc_opal_privs[index] = &hvc_opal_boot_priv;
+ hvc_opal_boot_termno = index;
+ udbg_init_opal_common();
+ hvsilib_init(&hvc_opal_boot_priv.hvsi, opal_get_chars, opal_put_chars,
+ index, 1);
+ hvsilib_establish(&hvc_opal_boot_priv.hvsi);
+}
+#endif /* CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI */
#include <linux/serial_reg.h>
#include <linux/serial_core.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/irqreturn.h>
#include <linux/mutex.h>
+ #include <linux/export.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
#include <linux/nwpserial.h>
--- /dev/null
+/**
+ * core.c - DesignWare USB3 DRD Controller Core file
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
+ * All rights reserved.
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
++#include <linux/module.h>
+
+#include "core.h"
+#include "gadget.h"
+#include "io.h"
+
+#include "debug.h"
+
+/**
+ * dwc3_core_soft_reset - Issues core soft reset and PHY reset
+ * @dwc: pointer to our context structure
+ */
+static void dwc3_core_soft_reset(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ /* Before Resetting PHY, put Core in Reset */
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ reg |= DWC3_GCTL_CORESOFTRESET;
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+
+ /* Assert USB3 PHY reset */
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+ reg |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
+ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+
+ /* Assert USB2 PHY reset */
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ reg |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+
+ mdelay(100);
+
+ /* Clear USB3 PHY reset */
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+ reg &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
+ dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+
+ /* Clear USB2 PHY reset */
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ reg &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+
+ /* After PHYs are stable we can take Core out of reset state */
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ reg &= ~DWC3_GCTL_CORESOFTRESET;
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+}
+
+/**
+ * dwc3_free_one_event_buffer - Frees one event buffer
+ * @dwc: Pointer to our controller context structure
+ * @evt: Pointer to event buffer to be freed
+ */
+static void dwc3_free_one_event_buffer(struct dwc3 *dwc,
+ struct dwc3_event_buffer *evt)
+{
+ dma_free_coherent(dwc->dev, evt->length, evt->buf, evt->dma);
+ kfree(evt);
+}
+
+/**
+ * dwc3_alloc_one_event_buffer - Allocated one event buffer structure
+ * @dwc: Pointer to our controller context structure
+ * @length: size of the event buffer
+ *
+ * Returns a pointer to the allocated event buffer structure on succes
+ * otherwise ERR_PTR(errno).
+ */
+static struct dwc3_event_buffer *__devinit
+dwc3_alloc_one_event_buffer(struct dwc3 *dwc, unsigned length)
+{
+ struct dwc3_event_buffer *evt;
+
+ evt = kzalloc(sizeof(*evt), GFP_KERNEL);
+ if (!evt)
+ return ERR_PTR(-ENOMEM);
+
+ evt->dwc = dwc;
+ evt->length = length;
+ evt->buf = dma_alloc_coherent(dwc->dev, length,
+ &evt->dma, GFP_KERNEL);
+ if (!evt->buf) {
+ kfree(evt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return evt;
+}
+
+/**
+ * dwc3_free_event_buffers - frees all allocated event buffers
+ * @dwc: Pointer to our controller context structure
+ */
+static void dwc3_free_event_buffers(struct dwc3 *dwc)
+{
+ struct dwc3_event_buffer *evt;
+ int i;
+
+ for (i = 0; i < DWC3_EVENT_BUFFERS_NUM; i++) {
+ evt = dwc->ev_buffs[i];
+ if (evt) {
+ dwc3_free_one_event_buffer(dwc, evt);
+ dwc->ev_buffs[i] = NULL;
+ }
+ }
+}
+
+/**
+ * dwc3_alloc_event_buffers - Allocates @num event buffers of size @length
+ * @dwc: Pointer to out controller context structure
+ * @num: number of event buffers to allocate
+ * @length: size of event buffer
+ *
+ * Returns 0 on success otherwise negative errno. In error the case, dwc
+ * may contain some buffers allocated but not all which were requested.
+ */
+static int __devinit dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned num,
+ unsigned length)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ struct dwc3_event_buffer *evt;
+
+ evt = dwc3_alloc_one_event_buffer(dwc, length);
+ if (IS_ERR(evt)) {
+ dev_err(dwc->dev, "can't allocate event buffer\n");
+ return PTR_ERR(evt);
+ }
+ dwc->ev_buffs[i] = evt;
+ }
+
+ return 0;
+}
+
+/**
+ * dwc3_event_buffers_setup - setup our allocated event buffers
+ * @dwc: Pointer to out controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int __devinit dwc3_event_buffers_setup(struct dwc3 *dwc)
+{
+ struct dwc3_event_buffer *evt;
+ int n;
+
+ for (n = 0; n < DWC3_EVENT_BUFFERS_NUM; n++) {
+ evt = dwc->ev_buffs[n];
+ dev_dbg(dwc->dev, "Event buf %p dma %08llx length %d\n",
+ evt->buf, (unsigned long long) evt->dma,
+ evt->length);
+
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(n),
+ lower_32_bits(evt->dma));
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n),
+ upper_32_bits(evt->dma));
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n),
+ evt->length & 0xffff);
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n), 0);
+ }
+
+ return 0;
+}
+
+static void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
+{
+ struct dwc3_event_buffer *evt;
+ int n;
+
+ for (n = 0; n < DWC3_EVENT_BUFFERS_NUM; n++) {
+ evt = dwc->ev_buffs[n];
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(n), 0);
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n), 0);
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n), 0);
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n), 0);
+ }
+}
+
+/**
+ * dwc3_core_init - Low-level initialization of DWC3 Core
+ * @dwc: Pointer to our controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int __devinit dwc3_core_init(struct dwc3 *dwc)
+{
+ unsigned long timeout;
+ u32 reg;
+ int ret;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
+ /* This should read as U3 followed by revision number */
+ if ((reg & DWC3_GSNPSID_MASK) != 0x55330000) {
+ dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
+ ret = -ENODEV;
+ goto err0;
+ }
+ dwc->revision = reg & DWC3_GSNPSREV_MASK;
+
+ dwc3_core_soft_reset(dwc);
+
+ /* issue device SoftReset too */
+ timeout = jiffies + msecs_to_jiffies(500);
+ dwc3_writel(dwc->regs, DWC3_DCTL, DWC3_DCTL_CSFTRST);
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (!(reg & DWC3_DCTL_CSFTRST))
+ break;
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(dwc->dev, "Reset Timed Out\n");
+ ret = -ETIMEDOUT;
+ goto err0;
+ }
+
+ cpu_relax();
+ } while (true);
+
+ ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_NUM,
+ DWC3_EVENT_BUFFERS_SIZE);
+ if (ret) {
+ dev_err(dwc->dev, "failed to allocate event buffers\n");
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ ret = dwc3_event_buffers_setup(dwc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to setup event buffers\n");
+ goto err1;
+ }
+
+ return 0;
+
+err1:
+ dwc3_free_event_buffers(dwc);
+
+err0:
+ return ret;
+}
+
+static void dwc3_core_exit(struct dwc3 *dwc)
+{
+ dwc3_event_buffers_cleanup(dwc);
+ dwc3_free_event_buffers(dwc);
+}
+
+#define DWC3_ALIGN_MASK (16 - 1)
+
+static int __devinit dwc3_probe(struct platform_device *pdev)
+{
+ const struct platform_device_id *id = platform_get_device_id(pdev);
+ struct resource *res;
+ struct dwc3 *dwc;
+ void __iomem *regs;
+ unsigned int features = id->driver_data;
+ int ret = -ENOMEM;
+ int irq;
+ void *mem;
+
+ mem = kzalloc(sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL);
+ if (!mem) {
+ dev_err(&pdev->dev, "not enough memory\n");
+ goto err0;
+ }
+ dwc = PTR_ALIGN(mem, DWC3_ALIGN_MASK + 1);
+ dwc->mem = mem;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "missing resource\n");
+ goto err1;
+ }
+
+ res = request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev));
+ if (!res) {
+ dev_err(&pdev->dev, "can't request mem region\n");
+ goto err1;
+ }
+
+ regs = ioremap(res->start, resource_size(res));
+ if (!regs) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ goto err2;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "missing IRQ\n");
+ goto err3;
+ }
+
+ spin_lock_init(&dwc->lock);
+ platform_set_drvdata(pdev, dwc);
+
+ dwc->regs = regs;
+ dwc->regs_size = resource_size(res);
+ dwc->dev = &pdev->dev;
+ dwc->irq = irq;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_forbid(&pdev->dev);
+
+ ret = dwc3_core_init(dwc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize core\n");
+ goto err3;
+ }
+
+ if (features & DWC3_HAS_PERIPHERAL) {
+ ret = dwc3_gadget_init(dwc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialized gadget\n");
+ goto err4;
+ }
+ }
+
+ ret = dwc3_debugfs_init(dwc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize debugfs\n");
+ goto err5;
+ }
+
+ pm_runtime_allow(&pdev->dev);
+
+ return 0;
+
+err5:
+ if (features & DWC3_HAS_PERIPHERAL)
+ dwc3_gadget_exit(dwc);
+
+err4:
+ dwc3_core_exit(dwc);
+
+err3:
+ iounmap(regs);
+
+err2:
+ release_mem_region(res->start, resource_size(res));
+
+err1:
+ kfree(dwc->mem);
+
+err0:
+ return ret;
+}
+
+static int __devexit dwc3_remove(struct platform_device *pdev)
+{
+ const struct platform_device_id *id = platform_get_device_id(pdev);
+ struct dwc3 *dwc = platform_get_drvdata(pdev);
+ struct resource *res;
+ unsigned int features = id->driver_data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ dwc3_debugfs_exit(dwc);
+
+ if (features & DWC3_HAS_PERIPHERAL)
+ dwc3_gadget_exit(dwc);
+
+ dwc3_core_exit(dwc);
+ release_mem_region(res->start, resource_size(res));
+ iounmap(dwc->regs);
+ kfree(dwc->mem);
+
+ return 0;
+}
+
+static const struct platform_device_id dwc3_id_table[] __devinitconst = {
+ {
+ .name = "dwc3-omap",
+ .driver_data = (DWC3_HAS_PERIPHERAL
+ | DWC3_HAS_XHCI
+ | DWC3_HAS_OTG),
+ },
+ {
+ .name = "dwc3-pci",
+ .driver_data = DWC3_HAS_PERIPHERAL,
+ },
+ { }, /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(platform, dwc3_id_table);
+
+static struct platform_driver dwc3_driver = {
+ .probe = dwc3_probe,
+ .remove = __devexit_p(dwc3_remove),
+ .driver = {
+ .name = "dwc3",
+ },
+ .id_table = dwc3_id_table,
+};
+
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver");
+
+static int __devinit dwc3_init(void)
+{
+ return platform_driver_register(&dwc3_driver);
+}
+module_init(dwc3_init);
+
+static void __exit dwc3_exit(void)
+{
+ platform_driver_unregister(&dwc3_driver);
+}
+module_exit(dwc3_exit);
--- /dev/null
+/**
+ * dwc3-omap.c - OMAP Specific Glue layer
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
+ * All rights reserved.
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/dwc3-omap.h>
+#include <linux/dma-mapping.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
++#include <linux/module.h>
+
+#include "io.h"
+
+/*
+ * All these registers belong to OMAP's Wrapper around the
+ * DesignWare USB3 Core.
+ */
+
+#define USBOTGSS_REVISION 0x0000
+#define USBOTGSS_SYSCONFIG 0x0010
+#define USBOTGSS_IRQ_EOI 0x0020
+#define USBOTGSS_IRQSTATUS_RAW_0 0x0024
+#define USBOTGSS_IRQSTATUS_0 0x0028
+#define USBOTGSS_IRQENABLE_SET_0 0x002c
+#define USBOTGSS_IRQENABLE_CLR_0 0x0030
+#define USBOTGSS_IRQSTATUS_RAW_1 0x0034
+#define USBOTGSS_IRQSTATUS_1 0x0038
+#define USBOTGSS_IRQENABLE_SET_1 0x003c
+#define USBOTGSS_IRQENABLE_CLR_1 0x0040
+#define USBOTGSS_UTMI_OTG_CTRL 0x0080
+#define USBOTGSS_UTMI_OTG_STATUS 0x0084
+#define USBOTGSS_MMRAM_OFFSET 0x0100
+#define USBOTGSS_FLADJ 0x0104
+#define USBOTGSS_DEBUG_CFG 0x0108
+#define USBOTGSS_DEBUG_DATA 0x010c
+
+/* SYSCONFIG REGISTER */
+#define USBOTGSS_SYSCONFIG_DMADISABLE (1 << 16)
+#define USBOTGSS_SYSCONFIG_STANDBYMODE(x) ((x) << 4)
+
+#define USBOTGSS_STANDBYMODE_FORCE_STANDBY 0
+#define USBOTGSS_STANDBYMODE_NO_STANDBY 1
+#define USBOTGSS_STANDBYMODE_SMART_STANDBY 2
+#define USBOTGSS_STANDBYMODE_SMART_WAKEUP 3
+
+#define USBOTGSS_STANDBYMODE_MASK (0x03 << 4)
+
+#define USBOTGSS_SYSCONFIG_IDLEMODE(x) ((x) << 2)
+
+#define USBOTGSS_IDLEMODE_FORCE_IDLE 0
+#define USBOTGSS_IDLEMODE_NO_IDLE 1
+#define USBOTGSS_IDLEMODE_SMART_IDLE 2
+#define USBOTGSS_IDLEMODE_SMART_WAKEUP 3
+
+#define USBOTGSS_IDLEMODE_MASK (0x03 << 2)
+
+/* IRQ_EOI REGISTER */
+#define USBOTGSS_IRQ_EOI_LINE_NUMBER (1 << 0)
+
+/* IRQS0 BITS */
+#define USBOTGSS_IRQO_COREIRQ_ST (1 << 0)
+
+/* IRQ1 BITS */
+#define USBOTGSS_IRQ1_DMADISABLECLR (1 << 17)
+#define USBOTGSS_IRQ1_OEVT (1 << 16)
+#define USBOTGSS_IRQ1_DRVVBUS_RISE (1 << 13)
+#define USBOTGSS_IRQ1_CHRGVBUS_RISE (1 << 12)
+#define USBOTGSS_IRQ1_DISCHRGVBUS_RISE (1 << 11)
+#define USBOTGSS_IRQ1_IDPULLUP_RISE (1 << 8)
+#define USBOTGSS_IRQ1_DRVVBUS_FALL (1 << 5)
+#define USBOTGSS_IRQ1_CHRGVBUS_FALL (1 << 4)
+#define USBOTGSS_IRQ1_DISCHRGVBUS_FALL (1 << 3)
+#define USBOTGSS_IRQ1_IDPULLUP_FALL (1 << 0)
+
+/* UTMI_OTG_CTRL REGISTER */
+#define USBOTGSS_UTMI_OTG_CTRL_DRVVBUS (1 << 5)
+#define USBOTGSS_UTMI_OTG_CTRL_CHRGVBUS (1 << 4)
+#define USBOTGSS_UTMI_OTG_CTRL_DISCHRGVBUS (1 << 3)
+#define USBOTGSS_UTMI_OTG_CTRL_IDPULLUP (1 << 0)
+
+/* UTMI_OTG_STATUS REGISTER */
+#define USBOTGSS_UTMI_OTG_STATUS_SW_MODE (1 << 31)
+#define USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT (1 << 9)
+#define USBOTGSS_UTMI_OTG_STATUS_TXBITSTUFFENABLE (1 << 8)
+#define USBOTGSS_UTMI_OTG_STATUS_IDDIG (1 << 4)
+#define USBOTGSS_UTMI_OTG_STATUS_SESSEND (1 << 3)
+#define USBOTGSS_UTMI_OTG_STATUS_SESSVALID (1 << 2)
+#define USBOTGSS_UTMI_OTG_STATUS_VBUSVALID (1 << 1)
+
+struct dwc3_omap {
+ /* device lock */
+ spinlock_t lock;
+
+ struct platform_device *dwc3;
+ struct device *dev;
+
+ int irq;
+ void __iomem *base;
+
+ void *context;
+ u32 resource_size;
+
+ u32 dma_status:1;
+};
+
+static irqreturn_t dwc3_omap_interrupt(int irq, void *_omap)
+{
+ struct dwc3_omap *omap = _omap;
+ u32 reg;
+
+ spin_lock(&omap->lock);
+
+ reg = dwc3_readl(omap->base, USBOTGSS_IRQSTATUS_1);
+
+ if (reg & USBOTGSS_IRQ1_DMADISABLECLR) {
+ dev_dbg(omap->dev, "DMA Disable was Cleared\n");
+ omap->dma_status = false;
+ }
+
+ if (reg & USBOTGSS_IRQ1_OEVT)
+ dev_dbg(omap->dev, "OTG Event\n");
+
+ if (reg & USBOTGSS_IRQ1_DRVVBUS_RISE)
+ dev_dbg(omap->dev, "DRVVBUS Rise\n");
+
+ if (reg & USBOTGSS_IRQ1_CHRGVBUS_RISE)
+ dev_dbg(omap->dev, "CHRGVBUS Rise\n");
+
+ if (reg & USBOTGSS_IRQ1_DISCHRGVBUS_RISE)
+ dev_dbg(omap->dev, "DISCHRGVBUS Rise\n");
+
+ if (reg & USBOTGSS_IRQ1_IDPULLUP_RISE)
+ dev_dbg(omap->dev, "IDPULLUP Rise\n");
+
+ if (reg & USBOTGSS_IRQ1_DRVVBUS_FALL)
+ dev_dbg(omap->dev, "DRVVBUS Fall\n");
+
+ if (reg & USBOTGSS_IRQ1_CHRGVBUS_FALL)
+ dev_dbg(omap->dev, "CHRGVBUS Fall\n");
+
+ if (reg & USBOTGSS_IRQ1_DISCHRGVBUS_FALL)
+ dev_dbg(omap->dev, "DISCHRGVBUS Fall\n");
+
+ if (reg & USBOTGSS_IRQ1_IDPULLUP_FALL)
+ dev_dbg(omap->dev, "IDPULLUP Fall\n");
+
+ dwc3_writel(omap->base, USBOTGSS_IRQSTATUS_1, reg);
+
+ reg = dwc3_readl(omap->base, USBOTGSS_IRQSTATUS_0);
+ dwc3_writel(omap->base, USBOTGSS_IRQSTATUS_0, reg);
+
+ spin_unlock(&omap->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit dwc3_omap_probe(struct platform_device *pdev)
+{
+ struct dwc3_omap_data *pdata = pdev->dev.platform_data;
+ struct platform_device *dwc3;
+ struct dwc3_omap *omap;
+ struct resource *res;
+
+ int ret = -ENOMEM;
+ int irq;
+
+ u32 reg;
+
+ void __iomem *base;
+ void *context;
+
+ omap = kzalloc(sizeof(*omap), GFP_KERNEL);
+ if (!omap) {
+ dev_err(&pdev->dev, "not enough memory\n");
+ goto err0;
+ }
+
+ platform_set_drvdata(pdev, omap);
+
+ irq = platform_get_irq(pdev, 1);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "missing IRQ resource\n");
+ ret = -EINVAL;
+ goto err1;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "missing memory base resource\n");
+ ret = -EINVAL;
+ goto err1;
+ }
+
+ base = ioremap_nocache(res->start, resource_size(res));
+ if (!base) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ goto err1;
+ }
+
+ dwc3 = platform_device_alloc("dwc3-omap", -1);
+ if (!dwc3) {
+ dev_err(&pdev->dev, "couldn't allocate dwc3 device\n");
+ goto err2;
+ }
+
+ context = kzalloc(resource_size(res), GFP_KERNEL);
+ if (!context) {
+ dev_err(&pdev->dev, "couldn't allocate dwc3 context memory\n");
+ goto err3;
+ }
+
+ spin_lock_init(&omap->lock);
+ dma_set_coherent_mask(&dwc3->dev, pdev->dev.coherent_dma_mask);
+
+ dwc3->dev.parent = &pdev->dev;
+ dwc3->dev.dma_mask = pdev->dev.dma_mask;
+ dwc3->dev.dma_parms = pdev->dev.dma_parms;
+ omap->resource_size = resource_size(res);
+ omap->context = context;
+ omap->dev = &pdev->dev;
+ omap->irq = irq;
+ omap->base = base;
+ omap->dwc3 = dwc3;
+
+ reg = dwc3_readl(omap->base, USBOTGSS_UTMI_OTG_STATUS);
+
+ if (!pdata) {
+ dev_dbg(&pdev->dev, "missing platform data\n");
+ } else {
+ switch (pdata->utmi_mode) {
+ case DWC3_OMAP_UTMI_MODE_SW:
+ reg |= USBOTGSS_UTMI_OTG_STATUS_SW_MODE;
+ break;
+ case DWC3_OMAP_UTMI_MODE_HW:
+ reg &= ~USBOTGSS_UTMI_OTG_STATUS_SW_MODE;
+ break;
+ default:
+ dev_dbg(&pdev->dev, "UNKNOWN utmi mode %d\n",
+ pdata->utmi_mode);
+ }
+ }
+
+ dwc3_writel(omap->base, USBOTGSS_UTMI_OTG_STATUS, reg);
+
+ /* check the DMA Status */
+ reg = dwc3_readl(omap->base, USBOTGSS_SYSCONFIG);
+ omap->dma_status = !!(reg & USBOTGSS_SYSCONFIG_DMADISABLE);
+
+ /* Set No-Idle and No-Standby */
+ reg &= ~(USBOTGSS_STANDBYMODE_MASK
+ | USBOTGSS_IDLEMODE_MASK);
+
+ reg |= (USBOTGSS_SYSCONFIG_STANDBYMODE(USBOTGSS_STANDBYMODE_NO_STANDBY)
+ | USBOTGSS_SYSCONFIG_IDLEMODE(USBOTGSS_IDLEMODE_NO_IDLE));
+
+ dwc3_writel(omap->base, USBOTGSS_SYSCONFIG, reg);
+
+ ret = request_irq(omap->irq, dwc3_omap_interrupt, 0,
+ "dwc3-omap", omap);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request IRQ #%d --> %d\n",
+ omap->irq, ret);
+ goto err4;
+ }
+
+ /* enable all IRQs */
+ reg = USBOTGSS_IRQO_COREIRQ_ST;
+ dwc3_writel(omap->base, USBOTGSS_IRQENABLE_SET_0, reg);
+
+ reg = (USBOTGSS_IRQ1_OEVT |
+ USBOTGSS_IRQ1_DRVVBUS_RISE |
+ USBOTGSS_IRQ1_CHRGVBUS_RISE |
+ USBOTGSS_IRQ1_DISCHRGVBUS_RISE |
+ USBOTGSS_IRQ1_IDPULLUP_RISE |
+ USBOTGSS_IRQ1_DRVVBUS_FALL |
+ USBOTGSS_IRQ1_CHRGVBUS_FALL |
+ USBOTGSS_IRQ1_DISCHRGVBUS_FALL |
+ USBOTGSS_IRQ1_IDPULLUP_FALL);
+
+ dwc3_writel(omap->base, USBOTGSS_IRQENABLE_SET_1, reg);
+
+ ret = platform_device_add_resources(dwc3, pdev->resource,
+ pdev->num_resources);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't add resources to dwc3 device\n");
+ goto err5;
+ }
+
+ ret = platform_device_add(dwc3);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register dwc3 device\n");
+ goto err5;
+ }
+
+ return 0;
+
+err5:
+ free_irq(omap->irq, omap);
+
+err4:
+ kfree(omap->context);
+
+err3:
+ platform_device_put(dwc3);
+
+err2:
+ iounmap(base);
+
+err1:
+ kfree(omap);
+
+err0:
+ return ret;
+}
+
+static int __devexit dwc3_omap_remove(struct platform_device *pdev)
+{
+ struct dwc3_omap *omap = platform_get_drvdata(pdev);
+
+ platform_device_unregister(omap->dwc3);
+
+ free_irq(omap->irq, omap);
+ iounmap(omap->base);
+
+ kfree(omap->context);
+ kfree(omap);
+
+ return 0;
+}
+
+static const struct of_device_id of_dwc3_matach[] = {
+ {
+ "ti,dwc3",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_dwc3_matach);
+
+static struct platform_driver dwc3_omap_driver = {
+ .probe = dwc3_omap_probe,
+ .remove = __devexit_p(dwc3_omap_remove),
+ .driver = {
+ .name = "omap-dwc3",
+ .of_match_table = of_dwc3_matach,
+ },
+};
+
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("DesignWare USB3 OMAP Glue Layer");
+
+static int __devinit dwc3_omap_init(void)
+{
+ return platform_driver_register(&dwc3_omap_driver);
+}
+module_init(dwc3_omap_init);
+
+static void __exit dwc3_omap_exit(void)
+{
+ platform_driver_unregister(&dwc3_omap_driver);
+}
+module_exit(dwc3_omap_exit);
#include <linux/console.h>
#include <linux/backlight.h>
#include <linux/gpio.h>
+ #include <linux/module.h>
#include <video/sh_mobile_lcdc.h>
+#include <video/sh_mobile_meram.h>
#include <linux/atomic.h>
#include "sh_mobile_lcdcfb.h"
#include <linux/device.h>
#include <linux/uio.h>
#include <linux/dma-direction.h>
+#include <linux/scatterlist.h>
+ #include <linux/bitmap.h>
+ #include <asm/page.h>
-struct scatterlist;
-
/**
* typedef dma_cookie_t - an opaque DMA cookie
*
#include <linux/kernel.h>
#include <linux/uaccess.h>
+ #include <linux/export.h>
/*
- * locking rule: all changes to requests or notifiers lists
+ * locking rule: all changes to constraints or notifiers lists
* or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
* held, taken with _irqsave. One lock to rule them all
*/
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
- #include <linux/module.h>
+ #include <linux/export.h>
#include <linux/hardirq.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/rcu.h>
+
+#include "rcu.h"
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key rcu_lock_key;
struct lockdep_map rcu_lock_map =
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
+ #include <linux/moduleparam.h>
#include <linux/rtnetlink.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/crc32.h>
#include <linux/slab.h>
+ #include <linux/export.h>
#include <net/mac80211.h>
#include <asm/unaligned.h>
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <net/sch_generic.h>
#include <linux/slab.h>
+ #include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
--- /dev/null
+/*
+ * The NFC Controller Interface is the communication protocol between an
+ * NFC Controller (NFCC) and a Device Host (DH).
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * Acknowledgements:
+ * This file is based on hci_core.c, which was written
+ * by Maxim Krasnyansky.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/skbuff.h>
+
+#include "../nfc.h"
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include <linux/nfc.h>
++#include <linux/export.h>
+
+static void nci_cmd_work(struct work_struct *work);
+static void nci_rx_work(struct work_struct *work);
+static void nci_tx_work(struct work_struct *work);
+
+/* ---- NCI requests ---- */
+
+void nci_req_complete(struct nci_dev *ndev, int result)
+{
+ if (ndev->req_status == NCI_REQ_PEND) {
+ ndev->req_result = result;
+ ndev->req_status = NCI_REQ_DONE;
+ complete(&ndev->req_completion);
+ }
+}
+
+static void nci_req_cancel(struct nci_dev *ndev, int err)
+{
+ if (ndev->req_status == NCI_REQ_PEND) {
+ ndev->req_result = err;
+ ndev->req_status = NCI_REQ_CANCELED;
+ complete(&ndev->req_completion);
+ }
+}
+
+/* Execute request and wait for completion. */
+static int __nci_request(struct nci_dev *ndev,
+ void (*req)(struct nci_dev *ndev, unsigned long opt),
+ unsigned long opt,
+ __u32 timeout)
+{
+ int rc = 0;
+ unsigned long completion_rc;
+
+ ndev->req_status = NCI_REQ_PEND;
+
+ init_completion(&ndev->req_completion);
+ req(ndev, opt);
+ completion_rc = wait_for_completion_interruptible_timeout(
+ &ndev->req_completion,
+ timeout);
+
+ nfc_dbg("wait_for_completion return %ld", completion_rc);
+
+ if (completion_rc > 0) {
+ switch (ndev->req_status) {
+ case NCI_REQ_DONE:
+ rc = nci_to_errno(ndev->req_result);
+ break;
+
+ case NCI_REQ_CANCELED:
+ rc = -ndev->req_result;
+ break;
+
+ default:
+ rc = -ETIMEDOUT;
+ break;
+ }
+ } else {
+ nfc_err("wait_for_completion_interruptible_timeout failed %ld",
+ completion_rc);
+
+ rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
+ }
+
+ ndev->req_status = ndev->req_result = 0;
+
+ return rc;
+}
+
+static inline int nci_request(struct nci_dev *ndev,
+ void (*req)(struct nci_dev *ndev, unsigned long opt),
+ unsigned long opt, __u32 timeout)
+{
+ int rc;
+
+ if (!test_bit(NCI_UP, &ndev->flags))
+ return -ENETDOWN;
+
+ /* Serialize all requests */
+ mutex_lock(&ndev->req_lock);
+ rc = __nci_request(ndev, req, opt, timeout);
+ mutex_unlock(&ndev->req_lock);
+
+ return rc;
+}
+
+static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
+{
+ nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 0, NULL);
+}
+
+static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
+{
+ nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
+}
+
+static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
+{
+ struct nci_core_conn_create_cmd conn_cmd;
+ struct nci_rf_disc_map_cmd cmd;
+ struct disc_map_config *cfg = cmd.mapping_configs;
+ __u8 *num = &cmd.num_mapping_configs;
+ int i;
+
+ /* create static rf connection */
+ conn_cmd.target_handle = 0;
+ conn_cmd.num_target_specific_params = 0;
+ nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, 2, &conn_cmd);
+
+ /* set rf mapping configurations */
+ *num = 0;
+
+ /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
+ for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
+ if (ndev->supported_rf_interfaces[i] ==
+ NCI_RF_INTERFACE_ISO_DEP) {
+ cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
+ cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
+ cfg[*num].rf_interface_type = NCI_RF_INTERFACE_ISO_DEP;
+ (*num)++;
+ } else if (ndev->supported_rf_interfaces[i] ==
+ NCI_RF_INTERFACE_NFC_DEP) {
+ cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
+ cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
+ cfg[*num].rf_interface_type = NCI_RF_INTERFACE_NFC_DEP;
+ (*num)++;
+ }
+
+ if (*num == NCI_MAX_NUM_MAPPING_CONFIGS)
+ break;
+ }
+
+ nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
+ (1 + ((*num)*sizeof(struct disc_map_config))),
+ &cmd);
+}
+
+static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
+{
+ struct nci_rf_disc_cmd cmd;
+ __u32 protocols = opt;
+
+ cmd.num_disc_configs = 0;
+
+ if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
+ (protocols & NFC_PROTO_JEWEL_MASK
+ || protocols & NFC_PROTO_MIFARE_MASK
+ || protocols & NFC_PROTO_ISO14443_MASK
+ || protocols & NFC_PROTO_NFC_DEP_MASK)) {
+ cmd.disc_configs[cmd.num_disc_configs].type =
+ NCI_DISCOVERY_TYPE_POLL_A_PASSIVE;
+ cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
+ cmd.num_disc_configs++;
+ }
+
+ if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
+ (protocols & NFC_PROTO_ISO14443_MASK)) {
+ cmd.disc_configs[cmd.num_disc_configs].type =
+ NCI_DISCOVERY_TYPE_POLL_B_PASSIVE;
+ cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
+ cmd.num_disc_configs++;
+ }
+
+ if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
+ (protocols & NFC_PROTO_FELICA_MASK
+ || protocols & NFC_PROTO_NFC_DEP_MASK)) {
+ cmd.disc_configs[cmd.num_disc_configs].type =
+ NCI_DISCOVERY_TYPE_POLL_F_PASSIVE;
+ cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
+ cmd.num_disc_configs++;
+ }
+
+ nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
+ (1 + (cmd.num_disc_configs*sizeof(struct disc_config))),
+ &cmd);
+}
+
+static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
+{
+ struct nci_rf_deactivate_cmd cmd;
+
+ cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
+
+ nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
+ sizeof(struct nci_rf_deactivate_cmd),
+ &cmd);
+}
+
+static int nci_open_device(struct nci_dev *ndev)
+{
+ int rc = 0;
+
+ mutex_lock(&ndev->req_lock);
+
+ if (test_bit(NCI_UP, &ndev->flags)) {
+ rc = -EALREADY;
+ goto done;
+ }
+
+ if (ndev->ops->open(ndev)) {
+ rc = -EIO;
+ goto done;
+ }
+
+ atomic_set(&ndev->cmd_cnt, 1);
+
+ set_bit(NCI_INIT, &ndev->flags);
+
+ rc = __nci_request(ndev, nci_reset_req, 0,
+ msecs_to_jiffies(NCI_RESET_TIMEOUT));
+
+ if (!rc) {
+ rc = __nci_request(ndev, nci_init_req, 0,
+ msecs_to_jiffies(NCI_INIT_TIMEOUT));
+ }
+
+ if (!rc) {
+ rc = __nci_request(ndev, nci_init_complete_req, 0,
+ msecs_to_jiffies(NCI_INIT_TIMEOUT));
+ }
+
+ clear_bit(NCI_INIT, &ndev->flags);
+
+ if (!rc) {
+ set_bit(NCI_UP, &ndev->flags);
+ } else {
+ /* Init failed, cleanup */
+ skb_queue_purge(&ndev->cmd_q);
+ skb_queue_purge(&ndev->rx_q);
+ skb_queue_purge(&ndev->tx_q);
+
+ ndev->ops->close(ndev);
+ ndev->flags = 0;
+ }
+
+done:
+ mutex_unlock(&ndev->req_lock);
+ return rc;
+}
+
+static int nci_close_device(struct nci_dev *ndev)
+{
+ nci_req_cancel(ndev, ENODEV);
+ mutex_lock(&ndev->req_lock);
+
+ if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
+ del_timer_sync(&ndev->cmd_timer);
+ mutex_unlock(&ndev->req_lock);
+ return 0;
+ }
+
+ /* Drop RX and TX queues */
+ skb_queue_purge(&ndev->rx_q);
+ skb_queue_purge(&ndev->tx_q);
+
+ /* Flush RX and TX wq */
+ flush_workqueue(ndev->rx_wq);
+ flush_workqueue(ndev->tx_wq);
+
+ /* Reset device */
+ skb_queue_purge(&ndev->cmd_q);
+ atomic_set(&ndev->cmd_cnt, 1);
+
+ set_bit(NCI_INIT, &ndev->flags);
+ __nci_request(ndev, nci_reset_req, 0,
+ msecs_to_jiffies(NCI_RESET_TIMEOUT));
+ clear_bit(NCI_INIT, &ndev->flags);
+
+ /* Flush cmd wq */
+ flush_workqueue(ndev->cmd_wq);
+
+ /* After this point our queues are empty
+ * and no works are scheduled. */
+ ndev->ops->close(ndev);
+
+ /* Clear flags */
+ ndev->flags = 0;
+
+ mutex_unlock(&ndev->req_lock);
+
+ return 0;
+}
+
+/* NCI command timer function */
+static void nci_cmd_timer(unsigned long arg)
+{
+ struct nci_dev *ndev = (void *) arg;
+
+ nfc_dbg("entry");
+
+ atomic_set(&ndev->cmd_cnt, 1);
+ queue_work(ndev->cmd_wq, &ndev->cmd_work);
+}
+
+static int nci_dev_up(struct nfc_dev *nfc_dev)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dbg("entry");
+
+ return nci_open_device(ndev);
+}
+
+static int nci_dev_down(struct nfc_dev *nfc_dev)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dbg("entry");
+
+ return nci_close_device(ndev);
+}
+
+static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+ int rc;
+
+ nfc_dbg("entry");
+
+ if (test_bit(NCI_DISCOVERY, &ndev->flags)) {
+ nfc_err("unable to start poll, since poll is already active");
+ return -EBUSY;
+ }
+
+ if (ndev->target_active_prot) {
+ nfc_err("there is an active target");
+ return -EBUSY;
+ }
+
+ if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
+ nfc_dbg("target is active, implicitly deactivate...");
+
+ rc = nci_request(ndev, nci_rf_deactivate_req, 0,
+ msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
+ if (rc)
+ return -EBUSY;
+ }
+
+ rc = nci_request(ndev, nci_rf_discover_req, protocols,
+ msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
+
+ if (!rc)
+ ndev->poll_prots = protocols;
+
+ return rc;
+}
+
+static void nci_stop_poll(struct nfc_dev *nfc_dev)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dbg("entry");
+
+ if (!test_bit(NCI_DISCOVERY, &ndev->flags)) {
+ nfc_err("unable to stop poll, since poll is not active");
+ return;
+ }
+
+ nci_request(ndev, nci_rf_deactivate_req, 0,
+ msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
+}
+
+static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
+ __u32 protocol)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dbg("entry, target_idx %d, protocol 0x%x", target_idx, protocol);
+
+ if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
+ nfc_err("there is no available target to activate");
+ return -EINVAL;
+ }
+
+ if (ndev->target_active_prot) {
+ nfc_err("there is already an active target");
+ return -EBUSY;
+ }
+
+ if (!(ndev->target_available_prots & (1 << protocol))) {
+ nfc_err("target does not support the requested protocol 0x%x",
+ protocol);
+ return -EINVAL;
+ }
+
+ ndev->target_active_prot = protocol;
+ ndev->target_available_prots = 0;
+
+ return 0;
+}
+
+static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dbg("entry, target_idx %d", target_idx);
+
+ if (!ndev->target_active_prot) {
+ nfc_err("unable to deactivate target, no active target");
+ return;
+ }
+
+ ndev->target_active_prot = 0;
+
+ if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
+ nci_request(ndev, nci_rf_deactivate_req, 0,
+ msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
+ }
+}
+
+static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
+ struct sk_buff *skb,
+ data_exchange_cb_t cb,
+ void *cb_context)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+ int rc;
+
+ nfc_dbg("entry, target_idx %d, len %d", target_idx, skb->len);
+
+ if (!ndev->target_active_prot) {
+ nfc_err("unable to exchange data, no active target");
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
+ return -EBUSY;
+
+ /* store cb and context to be used on receiving data */
+ ndev->data_exchange_cb = cb;
+ ndev->data_exchange_cb_context = cb_context;
+
+ rc = nci_send_data(ndev, ndev->conn_id, skb);
+ if (rc)
+ clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
+
+ return rc;
+}
+
+static struct nfc_ops nci_nfc_ops = {
+ .dev_up = nci_dev_up,
+ .dev_down = nci_dev_down,
+ .start_poll = nci_start_poll,
+ .stop_poll = nci_stop_poll,
+ .activate_target = nci_activate_target,
+ .deactivate_target = nci_deactivate_target,
+ .data_exchange = nci_data_exchange,
+};
+
+/* ---- Interface to NCI drivers ---- */
+
+/**
+ * nci_allocate_device - allocate a new nci device
+ *
+ * @ops: device operations
+ * @supported_protocols: NFC protocols supported by the device
+ */
+struct nci_dev *nci_allocate_device(struct nci_ops *ops,
+ __u32 supported_protocols,
+ int tx_headroom,
+ int tx_tailroom)
+{
+ struct nci_dev *ndev;
+
+ nfc_dbg("entry, supported_protocols 0x%x", supported_protocols);
+
+ if (!ops->open || !ops->close || !ops->send)
+ return NULL;
+
+ if (!supported_protocols)
+ return NULL;
+
+ ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL);
+ if (!ndev)
+ return NULL;
+
+ ndev->ops = ops;
+ ndev->tx_headroom = tx_headroom;
+ ndev->tx_tailroom = tx_tailroom;
+
+ ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
+ supported_protocols,
+ tx_headroom + NCI_DATA_HDR_SIZE,
+ tx_tailroom);
+ if (!ndev->nfc_dev)
+ goto free_exit;
+
+ nfc_set_drvdata(ndev->nfc_dev, ndev);
+
+ return ndev;
+
+free_exit:
+ kfree(ndev);
+ return NULL;
+}
+EXPORT_SYMBOL(nci_allocate_device);
+
+/**
+ * nci_free_device - deallocate nci device
+ *
+ * @ndev: The nci device to deallocate
+ */
+void nci_free_device(struct nci_dev *ndev)
+{
+ nfc_dbg("entry");
+
+ nfc_free_device(ndev->nfc_dev);
+ kfree(ndev);
+}
+EXPORT_SYMBOL(nci_free_device);
+
+/**
+ * nci_register_device - register a nci device in the nfc subsystem
+ *
+ * @dev: The nci device to register
+ */
+int nci_register_device(struct nci_dev *ndev)
+{
+ int rc;
+ struct device *dev = &ndev->nfc_dev->dev;
+ char name[32];
+
+ nfc_dbg("entry");
+
+ rc = nfc_register_device(ndev->nfc_dev);
+ if (rc)
+ goto exit;
+
+ ndev->flags = 0;
+
+ INIT_WORK(&ndev->cmd_work, nci_cmd_work);
+ snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev));
+ ndev->cmd_wq = create_singlethread_workqueue(name);
+ if (!ndev->cmd_wq) {
+ rc = -ENOMEM;
+ goto unreg_exit;
+ }
+
+ INIT_WORK(&ndev->rx_work, nci_rx_work);
+ snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev));
+ ndev->rx_wq = create_singlethread_workqueue(name);
+ if (!ndev->rx_wq) {
+ rc = -ENOMEM;
+ goto destroy_cmd_wq_exit;
+ }
+
+ INIT_WORK(&ndev->tx_work, nci_tx_work);
+ snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev));
+ ndev->tx_wq = create_singlethread_workqueue(name);
+ if (!ndev->tx_wq) {
+ rc = -ENOMEM;
+ goto destroy_rx_wq_exit;
+ }
+
+ skb_queue_head_init(&ndev->cmd_q);
+ skb_queue_head_init(&ndev->rx_q);
+ skb_queue_head_init(&ndev->tx_q);
+
+ setup_timer(&ndev->cmd_timer, nci_cmd_timer,
+ (unsigned long) ndev);
+
+ mutex_init(&ndev->req_lock);
+
+ goto exit;
+
+destroy_rx_wq_exit:
+ destroy_workqueue(ndev->rx_wq);
+
+destroy_cmd_wq_exit:
+ destroy_workqueue(ndev->cmd_wq);
+
+unreg_exit:
+ nfc_unregister_device(ndev->nfc_dev);
+
+exit:
+ return rc;
+}
+EXPORT_SYMBOL(nci_register_device);
+
+/**
+ * nci_unregister_device - unregister a nci device in the nfc subsystem
+ *
+ * @dev: The nci device to unregister
+ */
+void nci_unregister_device(struct nci_dev *ndev)
+{
+ nfc_dbg("entry");
+
+ nci_close_device(ndev);
+
+ destroy_workqueue(ndev->cmd_wq);
+ destroy_workqueue(ndev->rx_wq);
+ destroy_workqueue(ndev->tx_wq);
+
+ nfc_unregister_device(ndev->nfc_dev);
+}
+EXPORT_SYMBOL(nci_unregister_device);
+
+/**
+ * nci_recv_frame - receive frame from NCI drivers
+ *
+ * @skb: The sk_buff to receive
+ */
+int nci_recv_frame(struct sk_buff *skb)
+{
+ struct nci_dev *ndev = (struct nci_dev *) skb->dev;
+
+ nfc_dbg("entry, len %d", skb->len);
+
+ if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
+ && !test_bit(NCI_INIT, &ndev->flags))) {
+ kfree_skb(skb);
+ return -ENXIO;
+ }
+
+ /* Queue frame for rx worker thread */
+ skb_queue_tail(&ndev->rx_q, skb);
+ queue_work(ndev->rx_wq, &ndev->rx_work);
+
+ return 0;
+}
+EXPORT_SYMBOL(nci_recv_frame);
+
+static int nci_send_frame(struct sk_buff *skb)
+{
+ struct nci_dev *ndev = (struct nci_dev *) skb->dev;
+
+ nfc_dbg("entry, len %d", skb->len);
+
+ if (!ndev) {
+ kfree_skb(skb);
+ return -ENODEV;
+ }
+
+ /* Get rid of skb owner, prior to sending to the driver. */
+ skb_orphan(skb);
+
+ return ndev->ops->send(skb);
+}
+
+/* Send NCI command */
+int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
+{
+ struct nci_ctrl_hdr *hdr;
+ struct sk_buff *skb;
+
+ nfc_dbg("entry, opcode 0x%x, plen %d", opcode, plen);
+
+ skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
+ if (!skb) {
+ nfc_err("no memory for command");
+ return -ENOMEM;
+ }
+
+ hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
+ hdr->gid = nci_opcode_gid(opcode);
+ hdr->oid = nci_opcode_oid(opcode);
+ hdr->plen = plen;
+
+ nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
+ nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
+
+ if (plen)
+ memcpy(skb_put(skb, plen), payload, plen);
+
+ skb->dev = (void *) ndev;
+
+ skb_queue_tail(&ndev->cmd_q, skb);
+ queue_work(ndev->cmd_wq, &ndev->cmd_work);
+
+ return 0;
+}
+
+/* ---- NCI TX Data worker thread ---- */
+
+static void nci_tx_work(struct work_struct *work)
+{
+ struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
+ struct sk_buff *skb;
+
+ nfc_dbg("entry, credits_cnt %d", atomic_read(&ndev->credits_cnt));
+
+ /* Send queued tx data */
+ while (atomic_read(&ndev->credits_cnt)) {
+ skb = skb_dequeue(&ndev->tx_q);
+ if (!skb)
+ return;
+
+ atomic_dec(&ndev->credits_cnt);
+
+ nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d",
+ nci_pbf(skb->data),
+ nci_conn_id(skb->data),
+ nci_plen(skb->data));
+
+ nci_send_frame(skb);
+ }
+}
+
+/* ----- NCI RX worker thread (data & control) ----- */
+
+static void nci_rx_work(struct work_struct *work)
+{
+ struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&ndev->rx_q))) {
+ /* Process frame */
+ switch (nci_mt(skb->data)) {
+ case NCI_MT_RSP_PKT:
+ nci_rsp_packet(ndev, skb);
+ break;
+
+ case NCI_MT_NTF_PKT:
+ nci_ntf_packet(ndev, skb);
+ break;
+
+ case NCI_MT_DATA_PKT:
+ nci_rx_data_packet(ndev, skb);
+ break;
+
+ default:
+ nfc_err("unknown MT 0x%x", nci_mt(skb->data));
+ kfree_skb(skb);
+ break;
+ }
+ }
+}
+
+/* ----- NCI TX CMD worker thread ----- */
+
+static void nci_cmd_work(struct work_struct *work)
+{
+ struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
+ struct sk_buff *skb;
+
+ nfc_dbg("entry, cmd_cnt %d", atomic_read(&ndev->cmd_cnt));
+
+ /* Send queued command */
+ if (atomic_read(&ndev->cmd_cnt)) {
+ skb = skb_dequeue(&ndev->cmd_q);
+ if (!skb)
+ return;
+
+ atomic_dec(&ndev->cmd_cnt);
+
+ nfc_dbg("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
+ nci_pbf(skb->data),
+ nci_opcode_gid(nci_opcode(skb->data)),
+ nci_opcode_oid(nci_opcode(skb->data)),
+ nci_plen(skb->data));
+
+ nci_send_frame(skb);
+
+ mod_timer(&ndev->cmd_timer,
+ jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
+ }
+}
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/slab.h>
+ #include <linux/export.h>
#include <net/iw_handler.h>
#include <net/cfg80211.h>
+#include <net/cfg80211-wext.h>
#include "wext-compat.h"
#include "core.h"
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/slab.h>
+ #include <linux/export.h>
#include <net/cfg80211.h>
+#include <net/cfg80211-wext.h>
#include "wext-compat.h"
#include "nl80211.h"
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/time.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/uio.h>
#include <linux/dma-mapping.h>
+ #include <linux/module.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/info.h>
#include <linux/i2c.h>
#include <linux/spi/spi.h>
+#include <linux/regmap.h>
+ #include <linux/export.h>
#include <sound/soc.h>
#include <trace/events/asoc.h>
*/
#include <linux/firmware.h>
+ #include <linux/module.h>
#include <linux/bitrev.h>
+#include <linux/kernel.h>
#include "firmware.h"
#include "chip.h"