*
*/
+ #include <linux/export.h>
#include <linux/device.h>
+#include <linux/gpio.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
+#include <linux/bitmap.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
- #include <linux/module.h>
+ #include <linux/export.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
*****************************************************************************/
#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+ #include <asm/page.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/clk.h>
#include <linux/err.h>
+ #include <linux/export.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
+#include <linux/clk.h>
#include <plat/common.h>
--- /dev/null
+/* linux/arch/arm/plat-s5p/dev-tv.c
+ *
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Author: Tomasz Stanislawski <t.stanislaws@samsung.com>
+ *
+ * S5P series device definition for TV device
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/dma-mapping.h>
++#include <linux/export.h>
+
+#include <mach/irqs.h>
+#include <mach/map.h>
+
+#include <plat/devs.h>
+
+/* HDMI interface */
+static struct resource s5p_hdmi_resources[] = {
+ [0] = {
+ .start = S5P_PA_HDMI,
+ .end = S5P_PA_HDMI + SZ_1M - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_HDMI,
+ .end = IRQ_HDMI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device s5p_device_hdmi = {
+ .name = "s5p-hdmi",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(s5p_hdmi_resources),
+ .resource = s5p_hdmi_resources,
+};
+EXPORT_SYMBOL(s5p_device_hdmi);
+
+/* SDO interface */
+static struct resource s5p_sdo_resources[] = {
+ [0] = {
+ .start = S5P_PA_SDO,
+ .end = S5P_PA_SDO + SZ_64K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_SDO,
+ .end = IRQ_SDO,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+struct platform_device s5p_device_sdo = {
+ .name = "s5p-sdo",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(s5p_sdo_resources),
+ .resource = s5p_sdo_resources,
+};
+EXPORT_SYMBOL(s5p_device_sdo);
+
+/* MIXER */
+static struct resource s5p_mixer_resources[] = {
+ [0] = {
+ .start = S5P_PA_MIXER,
+ .end = S5P_PA_MIXER + SZ_64K - 1,
+ .flags = IORESOURCE_MEM,
+ .name = "mxr"
+ },
+ [1] = {
+ .start = S5P_PA_VP,
+ .end = S5P_PA_VP + SZ_64K - 1,
+ .flags = IORESOURCE_MEM,
+ .name = "vp"
+ },
+ [2] = {
+ .start = IRQ_MIXER,
+ .end = IRQ_MIXER,
+ .flags = IORESOURCE_IRQ,
+ .name = "irq"
+ }
+};
+
+static u64 s5p_tv_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device s5p_device_mixer = {
+ .name = "s5p-mixer",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(s5p_mixer_resources),
+ .resource = s5p_mixer_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .dma_mask = &s5p_tv_dmamask,
+ }
+};
+EXPORT_SYMBOL(s5p_device_mixer);
--- /dev/null
+/* linux/arch/arm/plat-samsung/dma-ops.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Samsung DMA Operations
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/amba/pl330.h>
+#include <linux/scatterlist.h>
++#include <linux/export.h>
+
+#include <mach/dma.h>
+
+static inline bool pl330_filter(struct dma_chan *chan, void *param)
+{
+ struct dma_pl330_peri *peri = chan->private;
+ return peri->peri_id == (unsigned)param;
+}
+
+static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
+ struct samsung_dma_info *info)
+{
+ struct dma_chan *chan;
+ dma_cap_mask_t mask;
+ struct dma_slave_config slave_config;
+
+ dma_cap_zero(mask);
+ dma_cap_set(info->cap, mask);
+
+ chan = dma_request_channel(mask, pl330_filter, (void *)dma_ch);
+
+ if (info->direction == DMA_FROM_DEVICE) {
+ memset(&slave_config, 0, sizeof(struct dma_slave_config));
+ slave_config.direction = info->direction;
+ slave_config.src_addr = info->fifo;
+ slave_config.src_addr_width = info->width;
+ slave_config.src_maxburst = 1;
+ dmaengine_slave_config(chan, &slave_config);
+ } else if (info->direction == DMA_TO_DEVICE) {
+ memset(&slave_config, 0, sizeof(struct dma_slave_config));
+ slave_config.direction = info->direction;
+ slave_config.dst_addr = info->fifo;
+ slave_config.dst_addr_width = info->width;
+ slave_config.dst_maxburst = 1;
+ dmaengine_slave_config(chan, &slave_config);
+ }
+
+ return (unsigned)chan;
+}
+
+static int samsung_dmadev_release(unsigned ch,
+ struct s3c2410_dma_client *client)
+{
+ dma_release_channel((struct dma_chan *)ch);
+
+ return 0;
+}
+
+static int samsung_dmadev_prepare(unsigned ch,
+ struct samsung_dma_prep_info *info)
+{
+ struct scatterlist sg;
+ struct dma_chan *chan = (struct dma_chan *)ch;
+ struct dma_async_tx_descriptor *desc;
+
+ switch (info->cap) {
+ case DMA_SLAVE:
+ sg_init_table(&sg, 1);
+ sg_dma_len(&sg) = info->len;
+ sg_set_page(&sg, pfn_to_page(PFN_DOWN(info->buf)),
+ info->len, offset_in_page(info->buf));
+ sg_dma_address(&sg) = info->buf;
+
+ desc = chan->device->device_prep_slave_sg(chan,
+ &sg, 1, info->direction, DMA_PREP_INTERRUPT);
+ break;
+ case DMA_CYCLIC:
+ desc = chan->device->device_prep_dma_cyclic(chan,
+ info->buf, info->len, info->period, info->direction);
+ break;
+ default:
+ dev_err(&chan->dev->device, "unsupported format\n");
+ return -EFAULT;
+ }
+
+ if (!desc) {
+ dev_err(&chan->dev->device, "cannot prepare cyclic dma\n");
+ return -EFAULT;
+ }
+
+ desc->callback = info->fp;
+ desc->callback_param = info->fp_param;
+
+ dmaengine_submit((struct dma_async_tx_descriptor *)desc);
+
+ return 0;
+}
+
+static inline int samsung_dmadev_trigger(unsigned ch)
+{
+ dma_async_issue_pending((struct dma_chan *)ch);
+
+ return 0;
+}
+
+static inline int samsung_dmadev_flush(unsigned ch)
+{
+ return dmaengine_terminate_all((struct dma_chan *)ch);
+}
+
+struct samsung_dma_ops dmadev_ops = {
+ .request = samsung_dmadev_request,
+ .release = samsung_dmadev_release,
+ .prepare = samsung_dmadev_prepare,
+ .trigger = samsung_dmadev_trigger,
+ .started = NULL,
+ .flush = samsung_dmadev_flush,
+ .stop = samsung_dmadev_flush,
+};
+
+void *samsung_dmadev_get_ops(void)
+{
+ return &dmadev_ops;
+}
+EXPORT_SYMBOL(samsung_dmadev_get_ops);
--- /dev/null
+/* linux/arch/arm/plat-samsung/s3c-dma-ops.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Samsung S3C-DMA Operations
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/types.h>
++#include <linux/export.h>
+
+#include <mach/dma.h>
+
+struct cb_data {
+ void (*fp) (void *);
+ void *fp_param;
+ unsigned ch;
+ struct list_head node;
+};
+
+static LIST_HEAD(dma_list);
+
+static void s3c_dma_cb(struct s3c2410_dma_chan *channel, void *param,
+ int size, enum s3c2410_dma_buffresult res)
+{
+ struct cb_data *data = param;
+
+ data->fp(data->fp_param);
+}
+
+static unsigned s3c_dma_request(enum dma_ch dma_ch,
+ struct samsung_dma_info *info)
+{
+ struct cb_data *data;
+
+ if (s3c2410_dma_request(dma_ch, info->client, NULL) < 0) {
+ s3c2410_dma_free(dma_ch, info->client);
+ return 0;
+ }
+
+ data = kzalloc(sizeof(struct cb_data), GFP_KERNEL);
+ data->ch = dma_ch;
+ list_add_tail(&data->node, &dma_list);
+
+ s3c2410_dma_devconfig(dma_ch, info->direction, info->fifo);
+
+ if (info->cap == DMA_CYCLIC)
+ s3c2410_dma_setflags(dma_ch, S3C2410_DMAF_CIRCULAR);
+
+ s3c2410_dma_config(dma_ch, info->width);
+
+ return (unsigned)dma_ch;
+}
+
+static int s3c_dma_release(unsigned ch, struct s3c2410_dma_client *client)
+{
+ struct cb_data *data;
+
+ list_for_each_entry(data, &dma_list, node)
+ if (data->ch == ch)
+ break;
+ list_del(&data->node);
+
+ s3c2410_dma_free(ch, client);
+ kfree(data);
+
+ return 0;
+}
+
+static int s3c_dma_prepare(unsigned ch, struct samsung_dma_prep_info *info)
+{
+ struct cb_data *data;
+ int len = (info->cap == DMA_CYCLIC) ? info->period : info->len;
+
+ list_for_each_entry(data, &dma_list, node)
+ if (data->ch == ch)
+ break;
+
+ if (!data->fp) {
+ s3c2410_dma_set_buffdone_fn(ch, s3c_dma_cb);
+ data->fp = info->fp;
+ data->fp_param = info->fp_param;
+ }
+
+ s3c2410_dma_enqueue(ch, (void *)data, info->buf, len);
+
+ return 0;
+}
+
+static inline int s3c_dma_trigger(unsigned ch)
+{
+ return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_START);
+}
+
+static inline int s3c_dma_started(unsigned ch)
+{
+ return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_STARTED);
+}
+
+static inline int s3c_dma_flush(unsigned ch)
+{
+ return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_FLUSH);
+}
+
+static inline int s3c_dma_stop(unsigned ch)
+{
+ return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_STOP);
+}
+
+static struct samsung_dma_ops s3c_dma_ops = {
+ .request = s3c_dma_request,
+ .release = s3c_dma_release,
+ .prepare = s3c_dma_prepare,
+ .trigger = s3c_dma_trigger,
+ .started = s3c_dma_started,
+ .flush = s3c_dma_flush,
+ .stop = s3c_dma_stop,
+};
+
+void *s3c_dma_get_ops(void)
+{
+ return &s3c_dma_ops;
+}
+EXPORT_SYMBOL(s3c_dma_get_ops);
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
- #include <linux/module.h>
#include <linux/types.h>
#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/signal.h>
*/
#include <linux/types.h>
+ #include <linux/export.h>
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_embedded.h>
+#include <linux/bcma/bcma_soc.h>
#include <asm/bootinfo.h>
#include <asm/reboot.h>
#include <asm/time.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/mm.h>
- #include <linux/module.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
*
*/
+ #include <linux/module.h>
#include <crypto/aes.h>
+#include <asm/aes.h>
asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
+ #include <linux/export.h>
#include <linux/init.h>
#include <linux/bitops.h>
+#include <linux/elf.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/debugfs.h>
-#include <linux/edac_mce.h>
#include <linux/irq_work.h>
+ #include <linux/export.h>
#include <asm/processor.h>
#include <asm/mce.h>
* Used to coordinate shared registers between HT threads or
* among events on a single PMU.
*/
-struct intel_shared_regs {
- struct er_account regs[EXTRA_REG_MAX];
- int refcnt; /* per-core: #HT threads */
- unsigned core_id; /* per-core: core id */
-};
+
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/slab.h>
++#include <linux/export.h>
+
+#include <asm/hardirq.h>
+#include <asm/apic.h>
+
+#include "perf_event.h"
/*
* Intel PerfMon, used on Core and later.
--- /dev/null
+/*
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
+ * Copyright (C) 2011 Don Zickus Red Hat, Inc.
+ *
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ */
+
+/*
+ * Handle hardware traps and faults.
+ */
+#include <linux/spinlock.h>
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/nmi.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <linux/slab.h>
++#include <linux/export.h>
+
+#include <linux/mca.h>
+
+#if defined(CONFIG_EDAC)
+#include <linux/edac.h>
+#endif
+
+#include <linux/atomic.h>
+#include <asm/traps.h>
+#include <asm/mach_traps.h>
+#include <asm/nmi.h>
+
+#define NMI_MAX_NAMELEN 16
+struct nmiaction {
+ struct list_head list;
+ nmi_handler_t handler;
+ unsigned int flags;
+ char *name;
+};
+
+struct nmi_desc {
+ spinlock_t lock;
+ struct list_head head;
+};
+
+static struct nmi_desc nmi_desc[NMI_MAX] =
+{
+ {
+ .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
+ .head = LIST_HEAD_INIT(nmi_desc[0].head),
+ },
+ {
+ .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
+ .head = LIST_HEAD_INIT(nmi_desc[1].head),
+ },
+
+};
+
+struct nmi_stats {
+ unsigned int normal;
+ unsigned int unknown;
+ unsigned int external;
+ unsigned int swallow;
+};
+
+static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
+
+static int ignore_nmis;
+
+int unknown_nmi_panic;
+/*
+ * Prevent NMI reason port (0x61) being accessed simultaneously, can
+ * only be used in NMI handler.
+ */
+static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
+
+static int __init setup_unknown_nmi_panic(char *str)
+{
+ unknown_nmi_panic = 1;
+ return 1;
+}
+__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
+
+#define nmi_to_desc(type) (&nmi_desc[type])
+
+static int notrace __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
+{
+ struct nmi_desc *desc = nmi_to_desc(type);
+ struct nmiaction *a;
+ int handled=0;
+
+ rcu_read_lock();
+
+ /*
+ * NMIs are edge-triggered, which means if you have enough
+ * of them concurrently, you can lose some because only one
+ * can be latched at any given time. Walk the whole list
+ * to handle those situations.
+ */
+ list_for_each_entry_rcu(a, &desc->head, list)
+ handled += a->handler(type, regs);
+
+ rcu_read_unlock();
+
+ /* return total number of NMI events handled */
+ return handled;
+}
+
+static int __setup_nmi(unsigned int type, struct nmiaction *action)
+{
+ struct nmi_desc *desc = nmi_to_desc(type);
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+
+ /*
+ * most handlers of type NMI_UNKNOWN never return because
+ * they just assume the NMI is theirs. Just a sanity check
+ * to manage expectations
+ */
+ WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
+
+ /*
+ * some handlers need to be executed first otherwise a fake
+ * event confuses some handlers (kdump uses this flag)
+ */
+ if (action->flags & NMI_FLAG_FIRST)
+ list_add_rcu(&action->list, &desc->head);
+ else
+ list_add_tail_rcu(&action->list, &desc->head);
+
+ spin_unlock_irqrestore(&desc->lock, flags);
+ return 0;
+}
+
+static struct nmiaction *__free_nmi(unsigned int type, const char *name)
+{
+ struct nmi_desc *desc = nmi_to_desc(type);
+ struct nmiaction *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+
+ list_for_each_entry_rcu(n, &desc->head, list) {
+ /*
+ * the name passed in to describe the nmi handler
+ * is used as the lookup key
+ */
+ if (!strcmp(n->name, name)) {
+ WARN(in_nmi(),
+ "Trying to free NMI (%s) from NMI context!\n", n->name);
+ list_del_rcu(&n->list);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&desc->lock, flags);
+ synchronize_rcu();
+ return (n);
+}
+
+int register_nmi_handler(unsigned int type, nmi_handler_t handler,
+ unsigned long nmiflags, const char *devname)
+{
+ struct nmiaction *action;
+ int retval = -ENOMEM;
+
+ if (!handler)
+ return -EINVAL;
+
+ action = kzalloc(sizeof(struct nmiaction), GFP_KERNEL);
+ if (!action)
+ goto fail_action;
+
+ action->handler = handler;
+ action->flags = nmiflags;
+ action->name = kstrndup(devname, NMI_MAX_NAMELEN, GFP_KERNEL);
+ if (!action->name)
+ goto fail_action_name;
+
+ retval = __setup_nmi(type, action);
+
+ if (retval)
+ goto fail_setup_nmi;
+
+ return retval;
+
+fail_setup_nmi:
+ kfree(action->name);
+fail_action_name:
+ kfree(action);
+fail_action:
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(register_nmi_handler);
+
+void unregister_nmi_handler(unsigned int type, const char *name)
+{
+ struct nmiaction *a;
+
+ a = __free_nmi(type, name);
+ if (a) {
+ kfree(a->name);
+ kfree(a);
+ }
+}
+
+EXPORT_SYMBOL_GPL(unregister_nmi_handler);
+
+static notrace __kprobes void
+pci_serr_error(unsigned char reason, struct pt_regs *regs)
+{
+ pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
+ reason, smp_processor_id());
+
+ /*
+ * On some machines, PCI SERR line is used to report memory
+ * errors. EDAC makes use of it.
+ */
+#if defined(CONFIG_EDAC)
+ if (edac_handler_set()) {
+ edac_atomic_assert_error();
+ return;
+ }
+#endif
+
+ if (panic_on_unrecovered_nmi)
+ panic("NMI: Not continuing");
+
+ pr_emerg("Dazed and confused, but trying to continue\n");
+
+ /* Clear and disable the PCI SERR error line. */
+ reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
+ outb(reason, NMI_REASON_PORT);
+}
+
+static notrace __kprobes void
+io_check_error(unsigned char reason, struct pt_regs *regs)
+{
+ unsigned long i;
+
+ pr_emerg(
+ "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
+ reason, smp_processor_id());
+ show_registers(regs);
+
+ if (panic_on_io_nmi)
+ panic("NMI IOCK error: Not continuing");
+
+ /* Re-enable the IOCK line, wait for a few seconds */
+ reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
+ outb(reason, NMI_REASON_PORT);
+
+ i = 20000;
+ while (--i) {
+ touch_nmi_watchdog();
+ udelay(100);
+ }
+
+ reason &= ~NMI_REASON_CLEAR_IOCHK;
+ outb(reason, NMI_REASON_PORT);
+}
+
+static notrace __kprobes void
+unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
+{
+ int handled;
+
+ /*
+ * Use 'false' as back-to-back NMIs are dealt with one level up.
+ * Of course this makes having multiple 'unknown' handlers useless
+ * as only the first one is ever run (unless it can actually determine
+ * if it caused the NMI)
+ */
+ handled = nmi_handle(NMI_UNKNOWN, regs, false);
+ if (handled) {
+ __this_cpu_add(nmi_stats.unknown, handled);
+ return;
+ }
+
+ __this_cpu_add(nmi_stats.unknown, 1);
+
+#ifdef CONFIG_MCA
+ /*
+ * Might actually be able to figure out what the guilty party
+ * is:
+ */
+ if (MCA_bus) {
+ mca_handle_nmi();
+ return;
+ }
+#endif
+ pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
+ reason, smp_processor_id());
+
+ pr_emerg("Do you have a strange power saving mode enabled?\n");
+ if (unknown_nmi_panic || panic_on_unrecovered_nmi)
+ panic("NMI: Not continuing");
+
+ pr_emerg("Dazed and confused, but trying to continue\n");
+}
+
+static DEFINE_PER_CPU(bool, swallow_nmi);
+static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
+
+static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
+{
+ unsigned char reason = 0;
+ int handled;
+ bool b2b = false;
+
+ /*
+ * CPU-specific NMI must be processed before non-CPU-specific
+ * NMI, otherwise we may lose it, because the CPU-specific
+ * NMI can not be detected/processed on other CPUs.
+ */
+
+ /*
+ * Back-to-back NMIs are interesting because they can either
+ * be two NMI or more than two NMIs (any thing over two is dropped
+ * due to NMI being edge-triggered). If this is the second half
+ * of the back-to-back NMI, assume we dropped things and process
+ * more handlers. Otherwise reset the 'swallow' NMI behaviour
+ */
+ if (regs->ip == __this_cpu_read(last_nmi_rip))
+ b2b = true;
+ else
+ __this_cpu_write(swallow_nmi, false);
+
+ __this_cpu_write(last_nmi_rip, regs->ip);
+
+ handled = nmi_handle(NMI_LOCAL, regs, b2b);
+ __this_cpu_add(nmi_stats.normal, handled);
+ if (handled) {
+ /*
+ * There are cases when a NMI handler handles multiple
+ * events in the current NMI. One of these events may
+ * be queued for in the next NMI. Because the event is
+ * already handled, the next NMI will result in an unknown
+ * NMI. Instead lets flag this for a potential NMI to
+ * swallow.
+ */
+ if (handled > 1)
+ __this_cpu_write(swallow_nmi, true);
+ return;
+ }
+
+ /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
+ raw_spin_lock(&nmi_reason_lock);
+ reason = get_nmi_reason();
+
+ if (reason & NMI_REASON_MASK) {
+ if (reason & NMI_REASON_SERR)
+ pci_serr_error(reason, regs);
+ else if (reason & NMI_REASON_IOCHK)
+ io_check_error(reason, regs);
+#ifdef CONFIG_X86_32
+ /*
+ * Reassert NMI in case it became active
+ * meanwhile as it's edge-triggered:
+ */
+ reassert_nmi();
+#endif
+ __this_cpu_add(nmi_stats.external, 1);
+ raw_spin_unlock(&nmi_reason_lock);
+ return;
+ }
+ raw_spin_unlock(&nmi_reason_lock);
+
+ /*
+ * Only one NMI can be latched at a time. To handle
+ * this we may process multiple nmi handlers at once to
+ * cover the case where an NMI is dropped. The downside
+ * to this approach is we may process an NMI prematurely,
+ * while its real NMI is sitting latched. This will cause
+ * an unknown NMI on the next run of the NMI processing.
+ *
+ * We tried to flag that condition above, by setting the
+ * swallow_nmi flag when we process more than one event.
+ * This condition is also only present on the second half
+ * of a back-to-back NMI, so we flag that condition too.
+ *
+ * If both are true, we assume we already processed this
+ * NMI previously and we swallow it. Otherwise we reset
+ * the logic.
+ *
+ * There are scenarios where we may accidentally swallow
+ * a 'real' unknown NMI. For example, while processing
+ * a perf NMI another perf NMI comes in along with a
+ * 'real' unknown NMI. These two NMIs get combined into
+ * one (as descibed above). When the next NMI gets
+ * processed, it will be flagged by perf as handled, but
+ * noone will know that there was a 'real' unknown NMI sent
+ * also. As a result it gets swallowed. Or if the first
+ * perf NMI returns two events handled then the second
+ * NMI will get eaten by the logic below, again losing a
+ * 'real' unknown NMI. But this is the best we can do
+ * for now.
+ */
+ if (b2b && __this_cpu_read(swallow_nmi))
+ __this_cpu_add(nmi_stats.swallow, 1);
+ else
+ unknown_nmi_error(reason, regs);
+}
+
+dotraplinkage notrace __kprobes void
+do_nmi(struct pt_regs *regs, long error_code)
+{
+ nmi_enter();
+
+ inc_irq_stat(__nmi_count);
+
+ if (!ignore_nmis)
+ default_do_nmi(regs);
+
+ nmi_exit();
+}
+
+void stop_nmi(void)
+{
+ ignore_nmis++;
+}
+
+void restart_nmi(void)
+{
+ ignore_nmis--;
+}
+
+/* reset the back-to-back NMI logic */
+void local_touch_nmi(void)
+{
+ __this_cpu_write(last_nmi_rip, 0);
+}
#include <linux/delay.h>
#include <linux/scatterlist.h>
#include <linux/bsg-lib.h>
--#include <linux/module.h>
++#include <linux/export.h>
#include <scsi/scsi_cmnd.h>
/**
--- /dev/null
- #include <linux/module.h>
+/*
+ * drivers/base/power/common.c - Common device power management code.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
++#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/pm_clock.h>
+
+/**
+ * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
+ * @dev: Device to handle.
+ *
+ * If power.subsys_data is NULL, point it to a new object, otherwise increment
+ * its reference counter. Return 1 if a new object has been created, otherwise
+ * return 0 or error code.
+ */
+int dev_pm_get_subsys_data(struct device *dev)
+{
+ struct pm_subsys_data *psd;
+ int ret = 0;
+
+ psd = kzalloc(sizeof(*psd), GFP_KERNEL);
+ if (!psd)
+ return -ENOMEM;
+
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.subsys_data) {
+ dev->power.subsys_data->refcount++;
+ } else {
+ spin_lock_init(&psd->lock);
+ psd->refcount = 1;
+ dev->power.subsys_data = psd;
+ pm_clk_init(dev);
+ psd = NULL;
+ ret = 1;
+ }
+
+ spin_unlock_irq(&dev->power.lock);
+
+ /* kfree() verifies that its argument is nonzero. */
+ kfree(psd);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
+
+/**
+ * dev_pm_put_subsys_data - Drop reference to power.subsys_data.
+ * @dev: Device to handle.
+ *
+ * If the reference counter of power.subsys_data is zero after dropping the
+ * reference, power.subsys_data is removed. Return 1 if that happens or 0
+ * otherwise.
+ */
+int dev_pm_put_subsys_data(struct device *dev)
+{
+ struct pm_subsys_data *psd;
+ int ret = 0;
+
+ spin_lock_irq(&dev->power.lock);
+
+ psd = dev_to_psd(dev);
+ if (!psd) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (--psd->refcount == 0) {
+ dev->power.subsys_data = NULL;
+ kfree(psd);
+ ret = 1;
+ }
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
--- /dev/null
+/*
+ * Devices PM QoS constraints management
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *
+ * This module exposes the interface to kernel space for specifying
+ * per-device PM QoS dependencies. It provides infrastructure for registration
+ * of:
+ *
+ * Dependents on a QoS value : register requests
+ * Watchers of QoS value : get notified when target QoS value changes
+ *
+ * This QoS design is best effort based. Dependents register their QoS needs.
+ * Watchers register to keep track of the current QoS needs of the system.
+ * Watchers can register different types of notification callbacks:
+ * . a per-device notification callback using the dev_pm_qos_*_notifier API.
+ * The notification chain data is stored in the per-device constraint
+ * data struct.
+ * . a system-wide notification callback using the dev_pm_qos_*_global_notifier
+ * API. The notification chain data is stored in a static variable.
+ *
+ * Note about the per-device constraint data struct allocation:
+ * . The per-device constraints data struct ptr is tored into the device
+ * dev_pm_info.
+ * . To minimize the data usage by the per-device constraints, the data struct
+ * is only allocated at the first call to dev_pm_qos_add_request.
+ * . The data is later free'd when the device is removed from the system.
+ * . A global mutex protects the constraints users from the data being
+ * allocated and free'd.
+ */
+
+#include <linux/pm_qos.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
++#include <linux/export.h>
+
+
+static DEFINE_MUTEX(dev_pm_qos_mtx);
+
+static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
+
+/**
+ * dev_pm_qos_read_value - Get PM QoS constraint for a given device.
+ * @dev: Device to get the PM QoS constraint value for.
+ */
+s32 dev_pm_qos_read_value(struct device *dev)
+{
+ struct pm_qos_constraints *c;
+ unsigned long flags;
+ s32 ret = 0;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ c = dev->power.constraints;
+ if (c)
+ ret = pm_qos_read_value(c);
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return ret;
+}
+
+/*
+ * apply_constraint
+ * @req: constraint request to apply
+ * @action: action to perform add/update/remove, of type enum pm_qos_req_action
+ * @value: defines the qos request
+ *
+ * Internal function to update the constraints list using the PM QoS core
+ * code and if needed call the per-device and the global notification
+ * callbacks
+ */
+static int apply_constraint(struct dev_pm_qos_request *req,
+ enum pm_qos_req_action action, int value)
+{
+ int ret, curr_value;
+
+ ret = pm_qos_update_target(req->dev->power.constraints,
+ &req->node, action, value);
+
+ if (ret) {
+ /* Call the global callbacks if needed */
+ curr_value = pm_qos_read_value(req->dev->power.constraints);
+ blocking_notifier_call_chain(&dev_pm_notifiers,
+ (unsigned long)curr_value,
+ req);
+ }
+
+ return ret;
+}
+
+/*
+ * dev_pm_qos_constraints_allocate
+ * @dev: device to allocate data for
+ *
+ * Called at the first call to add_request, for constraint data allocation
+ * Must be called with the dev_pm_qos_mtx mutex held
+ */
+static int dev_pm_qos_constraints_allocate(struct device *dev)
+{
+ struct pm_qos_constraints *c;
+ struct blocking_notifier_head *n;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return -ENOMEM;
+
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+ if (!n) {
+ kfree(c);
+ return -ENOMEM;
+ }
+ BLOCKING_INIT_NOTIFIER_HEAD(n);
+
+ plist_head_init(&c->list);
+ c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
+ c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
+ c->type = PM_QOS_MIN;
+ c->notifiers = n;
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.constraints = c;
+ spin_unlock_irq(&dev->power.lock);
+
+ return 0;
+}
+
+/**
+ * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer.
+ * @dev: target device
+ *
+ * Called from the device PM subsystem during device insertion under
+ * device_pm_lock().
+ */
+void dev_pm_qos_constraints_init(struct device *dev)
+{
+ mutex_lock(&dev_pm_qos_mtx);
+ dev->power.constraints = NULL;
+ dev->power.power_state = PMSG_ON;
+ mutex_unlock(&dev_pm_qos_mtx);
+}
+
+/**
+ * dev_pm_qos_constraints_destroy
+ * @dev: target device
+ *
+ * Called from the device PM subsystem on device removal under device_pm_lock().
+ */
+void dev_pm_qos_constraints_destroy(struct device *dev)
+{
+ struct dev_pm_qos_request *req, *tmp;
+ struct pm_qos_constraints *c;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ dev->power.power_state = PMSG_INVALID;
+ c = dev->power.constraints;
+ if (!c)
+ goto out;
+
+ /* Flush the constraints list for the device */
+ plist_for_each_entry_safe(req, tmp, &c->list, node) {
+ /*
+ * Update constraints list and call the notification
+ * callbacks if needed
+ */
+ apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.constraints = NULL;
+ spin_unlock_irq(&dev->power.lock);
+
+ kfree(c->notifiers);
+ kfree(c);
+
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
+}
+
+/**
+ * dev_pm_qos_add_request - inserts new qos request into the list
+ * @dev: target device for the constraint
+ * @req: pointer to a preallocated handle
+ * @value: defines the qos request
+ *
+ * This function inserts a new entry in the device constraints list of
+ * requested qos performance characteristics. It recomputes the aggregate
+ * QoS expectations of parameters and initializes the dev_pm_qos_request
+ * handle. Caller needs to save this handle for later use in updates and
+ * removal.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
+ * to allocate for data structures, -ENODEV if the device has just been removed
+ * from the system.
+ */
+int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
+ s32 value)
+{
+ int ret = 0;
+
+ if (!dev || !req) /*guard against callers passing in null */
+ return -EINVAL;
+
+ if (dev_pm_qos_request_active(req)) {
+ WARN(1, KERN_ERR "dev_pm_qos_add_request() called for already "
+ "added request\n");
+ return -EINVAL;
+ }
+
+ req->dev = dev;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (!dev->power.constraints) {
+ if (dev->power.power_state.event == PM_EVENT_INVALID) {
+ /* The device has been removed from the system. */
+ req->dev = NULL;
+ ret = -ENODEV;
+ goto out;
+ } else {
+ /*
+ * Allocate the constraints data on the first call to
+ * add_request, i.e. only if the data is not already
+ * allocated and if the device has not been removed.
+ */
+ ret = dev_pm_qos_constraints_allocate(dev);
+ }
+ }
+
+ if (!ret)
+ ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
+
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
+
+/**
+ * dev_pm_qos_update_request - modifies an existing qos request
+ * @req : handle to list element holding a dev_pm_qos request to use
+ * @new_value: defines the qos request
+ *
+ * Updates an existing dev PM qos request along with updating the
+ * target value.
+ *
+ * Attempts are made to make this code callable on hot code paths.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENODEV if the device has been
+ * removed from the system
+ */
+int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
+ s32 new_value)
+{
+ int ret = 0;
+
+ if (!req) /*guard against callers passing in null */
+ return -EINVAL;
+
+ if (!dev_pm_qos_request_active(req)) {
+ WARN(1, KERN_ERR "dev_pm_qos_update_request() called for "
+ "unknown object\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (req->dev->power.constraints) {
+ if (new_value != req->node.prio)
+ ret = apply_constraint(req, PM_QOS_UPDATE_REQ,
+ new_value);
+ } else {
+ /* Return if the device has been removed */
+ ret = -ENODEV;
+ }
+
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
+
+/**
+ * dev_pm_qos_remove_request - modifies an existing qos request
+ * @req: handle to request list element
+ *
+ * Will remove pm qos request from the list of constraints and
+ * recompute the current target value. Call this on slow code paths.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENODEV if the device has been
+ * removed from the system
+ */
+int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+{
+ int ret = 0;
+
+ if (!req) /*guard against callers passing in null */
+ return -EINVAL;
+
+ if (!dev_pm_qos_request_active(req)) {
+ WARN(1, KERN_ERR "dev_pm_qos_remove_request() called for "
+ "unknown object\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (req->dev->power.constraints) {
+ ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
+ PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ } else {
+ /* Return if the device has been removed */
+ ret = -ENODEV;
+ }
+
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
+
+/**
+ * dev_pm_qos_add_notifier - sets notification entry for changes to target value
+ * of per-device PM QoS constraints
+ *
+ * @dev: target device for the constraint
+ * @notifier: notifier block managed by caller.
+ *
+ * Will register the notifier into a notification chain that gets called
+ * upon changes to the target value for the device.
+ */
+int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
+{
+ int retval = 0;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ /* Silently return if the constraints object is not present. */
+ if (dev->power.constraints)
+ retval = blocking_notifier_chain_register(
+ dev->power.constraints->notifiers,
+ notifier);
+
+ mutex_unlock(&dev_pm_qos_mtx);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
+
+/**
+ * dev_pm_qos_remove_notifier - deletes notification for changes to target value
+ * of per-device PM QoS constraints
+ *
+ * @dev: target device for the constraint
+ * @notifier: notifier block to be removed.
+ *
+ * Will remove the notifier from the notification chain that gets called
+ * upon changes to the target value.
+ */
+int dev_pm_qos_remove_notifier(struct device *dev,
+ struct notifier_block *notifier)
+{
+ int retval = 0;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ /* Silently return if the constraints object is not present. */
+ if (dev->power.constraints)
+ retval = blocking_notifier_chain_unregister(
+ dev->power.constraints->notifiers,
+ notifier);
+
+ mutex_unlock(&dev_pm_qos_mtx);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
+
+/**
+ * dev_pm_qos_add_global_notifier - sets notification entry for changes to
+ * target value of the PM QoS constraints for any device
+ *
+ * @notifier: notifier block managed by caller.
+ *
+ * Will register the notifier into a notification chain that gets called
+ * upon changes to the target value for any device.
+ */
+int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
+{
+ return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
+
+/**
+ * dev_pm_qos_remove_global_notifier - deletes notification for changes to
+ * target value of PM QoS constraints for any device
+ *
+ * @notifier: notifier block to be removed.
+ *
+ * Will remove the notifier from the notification chain that gets called
+ * upon changes to the target value for any device.
+ */
+int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
+{
+ return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
*/
#include <linux/sched.h>
+ #include <linux/export.h>
#include <linux/pm_runtime.h>
+#include <trace/events/rpm.h>
#include "power.h"
static int rpm_resume(struct device *dev, int rpmflags);
--- /dev/null
+/*
+ * Register cache access API
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
++#include <linux/export.h>
+#include <trace/events/regmap.h>
+#include <linux/bsearch.h>
+#include <linux/sort.h>
+
+#include "internal.h"
+
+static const struct regcache_ops *cache_types[] = {
+ ®cache_indexed_ops,
+ ®cache_rbtree_ops,
+ ®cache_lzo_ops,
+};
+
+static int regcache_hw_init(struct regmap *map)
+{
+ int i, j;
+ int ret;
+ int count;
+ unsigned int val;
+ void *tmp_buf;
+
+ if (!map->num_reg_defaults_raw)
+ return -EINVAL;
+
+ if (!map->reg_defaults_raw) {
+ dev_warn(map->dev, "No cache defaults, reading back from HW\n");
+ tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
+ if (!tmp_buf)
+ return -EINVAL;
+ ret = regmap_bulk_read(map, 0, tmp_buf,
+ map->num_reg_defaults_raw);
+ if (ret < 0) {
+ kfree(tmp_buf);
+ return ret;
+ }
+ map->reg_defaults_raw = tmp_buf;
+ map->cache_free = 1;
+ }
+
+ /* calculate the size of reg_defaults */
+ for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
+ val = regcache_get_val(map->reg_defaults_raw,
+ i, map->cache_word_size);
+ if (!val)
+ continue;
+ count++;
+ }
+
+ map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
+ GFP_KERNEL);
+ if (!map->reg_defaults)
+ return -ENOMEM;
+
+ /* fill the reg_defaults */
+ map->num_reg_defaults = count;
+ for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
+ val = regcache_get_val(map->reg_defaults_raw,
+ i, map->cache_word_size);
+ if (!val)
+ continue;
+ map->reg_defaults[j].reg = i;
+ map->reg_defaults[j].def = val;
+ j++;
+ }
+
+ return 0;
+}
+
+int regcache_init(struct regmap *map)
+{
+ int ret;
+ int i;
+ void *tmp_buf;
+
+ if (map->cache_type == REGCACHE_NONE) {
+ map->cache_bypass = true;
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cache_types); i++)
+ if (cache_types[i]->type == map->cache_type)
+ break;
+
+ if (i == ARRAY_SIZE(cache_types)) {
+ dev_err(map->dev, "Could not match compress type: %d\n",
+ map->cache_type);
+ return -EINVAL;
+ }
+
+ map->cache = NULL;
+ map->cache_ops = cache_types[i];
+
+ if (!map->cache_ops->read ||
+ !map->cache_ops->write ||
+ !map->cache_ops->name)
+ return -EINVAL;
+
+ /* We still need to ensure that the reg_defaults
+ * won't vanish from under us. We'll need to make
+ * a copy of it.
+ */
+ if (map->reg_defaults) {
+ if (!map->num_reg_defaults)
+ return -EINVAL;
+ tmp_buf = kmemdup(map->reg_defaults, map->num_reg_defaults *
+ sizeof(struct reg_default), GFP_KERNEL);
+ if (!tmp_buf)
+ return -ENOMEM;
+ map->reg_defaults = tmp_buf;
+ } else if (map->num_reg_defaults_raw) {
+ /* Some devices such as PMICs don't have cache defaults,
+ * we cope with this by reading back the HW registers and
+ * crafting the cache defaults by hand.
+ */
+ ret = regcache_hw_init(map);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!map->max_register)
+ map->max_register = map->num_reg_defaults_raw;
+
+ if (map->cache_ops->init) {
+ dev_dbg(map->dev, "Initializing %s cache\n",
+ map->cache_ops->name);
+ return map->cache_ops->init(map);
+ }
+ return 0;
+}
+
+void regcache_exit(struct regmap *map)
+{
+ if (map->cache_type == REGCACHE_NONE)
+ return;
+
+ BUG_ON(!map->cache_ops);
+
+ kfree(map->reg_defaults);
+ if (map->cache_free)
+ kfree(map->reg_defaults_raw);
+
+ if (map->cache_ops->exit) {
+ dev_dbg(map->dev, "Destroying %s cache\n",
+ map->cache_ops->name);
+ map->cache_ops->exit(map);
+ }
+}
+
+/**
+ * regcache_read: Fetch the value of a given register from the cache.
+ *
+ * @map: map to configure.
+ * @reg: The register index.
+ * @value: The value to be returned.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_read(struct regmap *map,
+ unsigned int reg, unsigned int *value)
+{
+ if (map->cache_type == REGCACHE_NONE)
+ return -ENOSYS;
+
+ BUG_ON(!map->cache_ops);
+
+ if (!regmap_readable(map, reg))
+ return -EIO;
+
+ if (!regmap_volatile(map, reg))
+ return map->cache_ops->read(map, reg, value);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(regcache_read);
+
+/**
+ * regcache_write: Set the value of a given register in the cache.
+ *
+ * @map: map to configure.
+ * @reg: The register index.
+ * @value: The new register value.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_write(struct regmap *map,
+ unsigned int reg, unsigned int value)
+{
+ if (map->cache_type == REGCACHE_NONE)
+ return 0;
+
+ BUG_ON(!map->cache_ops);
+
+ if (!regmap_writeable(map, reg))
+ return -EIO;
+
+ if (!regmap_volatile(map, reg))
+ return map->cache_ops->write(map, reg, value);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regcache_write);
+
+/**
+ * regcache_sync: Sync the register cache with the hardware.
+ *
+ * @map: map to configure.
+ *
+ * Any registers that should not be synced should be marked as
+ * volatile. In general drivers can choose not to use the provided
+ * syncing functionality if they so require.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_sync(struct regmap *map)
+{
+ int ret = 0;
+ unsigned int val;
+ unsigned int i;
+ const char *name;
+ unsigned int bypass;
+
+ BUG_ON(!map->cache_ops);
+
+ mutex_lock(&map->lock);
+ /* Remember the initial bypass state */
+ bypass = map->cache_bypass;
+ dev_dbg(map->dev, "Syncing %s cache\n",
+ map->cache_ops->name);
+ name = map->cache_ops->name;
+ trace_regcache_sync(map->dev, name, "start");
+ if (map->cache_ops->sync) {
+ ret = map->cache_ops->sync(map);
+ } else {
+ for (i = 0; i < map->num_reg_defaults; i++) {
+ ret = regcache_read(map, i, &val);
+ if (ret < 0)
+ goto out;
+ map->cache_bypass = 1;
+ ret = _regmap_write(map, i, val);
+ map->cache_bypass = 0;
+ if (ret < 0)
+ goto out;
+ dev_dbg(map->dev, "Synced register %#x, value %#x\n",
+ map->reg_defaults[i].reg,
+ map->reg_defaults[i].def);
+ }
+
+ }
+out:
+ trace_regcache_sync(map->dev, name, "stop");
+ /* Restore the bypass state */
+ map->cache_bypass = bypass;
+ mutex_unlock(&map->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regcache_sync);
+
+/**
+ * regcache_cache_only: Put a register map into cache only mode
+ *
+ * @map: map to configure
+ * @cache_only: flag if changes should be written to the hardware
+ *
+ * When a register map is marked as cache only writes to the register
+ * map API will only update the register cache, they will not cause
+ * any hardware changes. This is useful for allowing portions of
+ * drivers to act as though the device were functioning as normal when
+ * it is disabled for power saving reasons.
+ */
+void regcache_cache_only(struct regmap *map, bool enable)
+{
+ mutex_lock(&map->lock);
+ WARN_ON(map->cache_bypass && enable);
+ map->cache_only = enable;
+ mutex_unlock(&map->lock);
+}
+EXPORT_SYMBOL_GPL(regcache_cache_only);
+
+/**
+ * regcache_cache_bypass: Put a register map into cache bypass mode
+ *
+ * @map: map to configure
+ * @cache_bypass: flag if changes should not be written to the hardware
+ *
+ * When a register map is marked with the cache bypass option, writes
+ * to the register map API will only update the hardware and not the
+ * the cache directly. This is useful when syncing the cache back to
+ * the hardware.
+ */
+void regcache_cache_bypass(struct regmap *map, bool enable)
+{
+ mutex_lock(&map->lock);
+ WARN_ON(map->cache_only && enable);
+ map->cache_bypass = enable;
+ mutex_unlock(&map->lock);
+}
+EXPORT_SYMBOL_GPL(regcache_cache_bypass);
+
+bool regcache_set_val(void *base, unsigned int idx,
+ unsigned int val, unsigned int word_size)
+{
+ switch (word_size) {
+ case 1: {
+ u8 *cache = base;
+ if (cache[idx] == val)
+ return true;
+ cache[idx] = val;
+ break;
+ }
+ case 2: {
+ u16 *cache = base;
+ if (cache[idx] == val)
+ return true;
+ cache[idx] = val;
+ break;
+ }
+ default:
+ BUG();
+ }
+ /* unreachable */
+ return false;
+}
+
+unsigned int regcache_get_val(const void *base, unsigned int idx,
+ unsigned int word_size)
+{
+ if (!base)
+ return -EINVAL;
+
+ switch (word_size) {
+ case 1: {
+ const u8 *cache = base;
+ return cache[idx];
+ }
+ case 2: {
+ const u16 *cache = base;
+ return cache[idx];
+ }
+ default:
+ BUG();
+ }
+ /* unreachable */
+ return -1;
+}
+
+static int regcache_default_cmp(const void *a, const void *b)
+{
+ const struct reg_default *_a = a;
+ const struct reg_default *_b = b;
+
+ return _a->reg - _b->reg;
+}
+
+int regcache_lookup_reg(struct regmap *map, unsigned int reg)
+{
+ struct reg_default key;
+ struct reg_default *r;
+
+ key.reg = reg;
+ key.def = 0;
+
+ r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
+ sizeof(struct reg_default), regcache_default_cmp);
+
+ if (r)
+ return r - map->reg_defaults;
+ else
+ return -ENOENT;
+}
+
+int regcache_insert_reg(struct regmap *map, unsigned int reg,
+ unsigned int val)
+{
+ void *tmp;
+
+ tmp = krealloc(map->reg_defaults,
+ (map->num_reg_defaults + 1) * sizeof(struct reg_default),
+ GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+ map->reg_defaults = tmp;
+ map->num_reg_defaults++;
+ map->reg_defaults[map->num_reg_defaults - 1].reg = reg;
+ map->reg_defaults[map->num_reg_defaults - 1].def = val;
+ sort(map->reg_defaults, map->num_reg_defaults,
+ sizeof(struct reg_default), regcache_default_cmp, NULL);
+ return 0;
+}
*/
#include "bcma_private.h"
++#include <linux/export.h>
#include <linux/bcma/bcma.h>
-static void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc,
- u32 offset, u32 mask, u32 set)
+static u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset)
{
- u32 value;
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
+ bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
+ return bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
+}
- bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR);
+void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value)
+{
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
+ bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value);
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_pll_write);
+
+void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
+ u32 set)
+{
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
+ bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
+ bcma_cc_maskset32(cc, BCMA_CC_PLLCTL_DATA, mask, set);
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_pll_maskset);
+
+void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc,
+ u32 offset, u32 mask, u32 set)
+{
bcma_cc_write32(cc, BCMA_CC_CHIPCTL_ADDR, offset);
bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR);
- value = bcma_cc_read32(cc, BCMA_CC_CHIPCTL_DATA);
- value &= mask;
- value |= set;
- bcma_cc_write32(cc, BCMA_CC_CHIPCTL_DATA, value);
- bcma_cc_read32(cc, BCMA_CC_CHIPCTL_DATA);
+ bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL_DATA, mask, set);
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_chipctl_maskset);
+
+void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
+ u32 set)
+{
+ bcma_cc_write32(cc, BCMA_CC_REGCTL_ADDR, offset);
+ bcma_cc_read32(cc, BCMA_CC_REGCTL_ADDR);
+ bcma_cc_maskset32(cc, BCMA_CC_REGCTL_DATA, mask, set);
}
+EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset);
static void bcma_pmu_pll_init(struct bcma_drv_cc *cc)
{
#include <linux/kernel.h>
#include <linux/cpuidle.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/moduleparam.h>
#include <linux/jiffies.h>
+ #include <linux/module.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/basic_mmio_gpio.h>
+ #include <linux/module.h>
#include <mach/hardware.h>
+#include <mach/gpio-ep93xx.h>
+
+#define irq_to_gpio(irq) ((irq) - gpio_to_irq(0))
struct ep93xx_gpio {
void __iomem *mmio_base;
#include <linux/basic_mmio_gpio.h>
#include <linux/of.h>
#include <linux/of_device.h>
+ #include <linux/module.h>
#include <asm-generic/bug.h>
+#define irq_to_gpio(irq) ((irq) - MXC_GPIO_IRQ_START)
+
enum mxc_gpio_hwtype {
IMX1_GPIO, /* runs on i.mx1 */
IMX21_GPIO, /* runs on i.mx21 and i.mx27 */
#include <linux/pci.h>
#include <linux/netdevice.h>
+ #include <linux/moduleparam.h>
#include <linux/slab.h>
+#include <linux/stat.h>
#include <linux/vmalloc.h>
#include "ipath_kernel.h"
--- /dev/null
- #include <linux/module.h>
+/*
+ * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-btree.h"
+#include "dm-btree-internal.h"
+#include "dm-transaction-manager.h"
+
++#include <linux/export.h>
+
+/*
+ * Removing an entry from a btree
+ * ==============================
+ *
+ * A very important constraint for our btree is that no node, except the
+ * root, may have fewer than a certain number of entries.
+ * (MIN_ENTRIES <= nr_entries <= MAX_ENTRIES).
+ *
+ * Ensuring this is complicated by the way we want to only ever hold the
+ * locks on 2 nodes concurrently, and only change nodes in a top to bottom
+ * fashion.
+ *
+ * Each node may have a left or right sibling. When decending the spine,
+ * if a node contains only MIN_ENTRIES then we try and increase this to at
+ * least MIN_ENTRIES + 1. We do this in the following ways:
+ *
+ * [A] No siblings => this can only happen if the node is the root, in which
+ * case we copy the childs contents over the root.
+ *
+ * [B] No left sibling
+ * ==> rebalance(node, right sibling)
+ *
+ * [C] No right sibling
+ * ==> rebalance(left sibling, node)
+ *
+ * [D] Both siblings, total_entries(left, node, right) <= DEL_THRESHOLD
+ * ==> delete node adding it's contents to left and right
+ *
+ * [E] Both siblings, total_entries(left, node, right) > DEL_THRESHOLD
+ * ==> rebalance(left, node, right)
+ *
+ * After these operations it's possible that the our original node no
+ * longer contains the desired sub tree. For this reason this rebalancing
+ * is performed on the children of the current node. This also avoids
+ * having a special case for the root.
+ *
+ * Once this rebalancing has occurred we can then step into the child node
+ * for internal nodes. Or delete the entry for leaf nodes.
+ */
+
+/*
+ * Some little utilities for moving node data around.
+ */
+static void node_shift(struct node *n, int shift)
+{
+ uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
+
+ if (shift < 0) {
+ shift = -shift;
+ memmove(key_ptr(n, 0),
+ key_ptr(n, shift),
+ (nr_entries - shift) * sizeof(__le64));
+ memmove(value_ptr(n, 0, sizeof(__le64)),
+ value_ptr(n, shift, sizeof(__le64)),
+ (nr_entries - shift) * sizeof(__le64));
+ } else {
+ memmove(key_ptr(n, shift),
+ key_ptr(n, 0),
+ nr_entries * sizeof(__le64));
+ memmove(value_ptr(n, shift, sizeof(__le64)),
+ value_ptr(n, 0, sizeof(__le64)),
+ nr_entries * sizeof(__le64));
+ }
+}
+
+static void node_copy(struct node *left, struct node *right, int shift)
+{
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+
+ if (shift < 0) {
+ shift = -shift;
+ memcpy(key_ptr(left, nr_left),
+ key_ptr(right, 0),
+ shift * sizeof(__le64));
+ memcpy(value_ptr(left, nr_left, sizeof(__le64)),
+ value_ptr(right, 0, sizeof(__le64)),
+ shift * sizeof(__le64));
+ } else {
+ memcpy(key_ptr(right, 0),
+ key_ptr(left, nr_left - shift),
+ shift * sizeof(__le64));
+ memcpy(value_ptr(right, 0, sizeof(__le64)),
+ value_ptr(left, nr_left - shift, sizeof(__le64)),
+ shift * sizeof(__le64));
+ }
+}
+
+/*
+ * Delete a specific entry from a leaf node.
+ */
+static void delete_at(struct node *n, unsigned index, size_t value_size)
+{
+ unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
+ unsigned nr_to_copy = nr_entries - (index + 1);
+
+ if (nr_to_copy) {
+ memmove(key_ptr(n, index),
+ key_ptr(n, index + 1),
+ nr_to_copy * sizeof(__le64));
+
+ memmove(value_ptr(n, index, value_size),
+ value_ptr(n, index + 1, value_size),
+ nr_to_copy * value_size);
+ }
+
+ n->header.nr_entries = cpu_to_le32(nr_entries - 1);
+}
+
+static unsigned del_threshold(struct node *n)
+{
+ return le32_to_cpu(n->header.max_entries) / 3;
+}
+
+static unsigned merge_threshold(struct node *n)
+{
+ /*
+ * The extra one is because we know we're potentially going to
+ * delete an entry.
+ */
+ return 2 * (le32_to_cpu(n->header.max_entries) / 3) + 1;
+}
+
+struct child {
+ unsigned index;
+ struct dm_block *block;
+ struct node *n;
+};
+
+static struct dm_btree_value_type le64_type = {
+ .context = NULL,
+ .size = sizeof(__le64),
+ .inc = NULL,
+ .dec = NULL,
+ .equal = NULL
+};
+
+static int init_child(struct dm_btree_info *info, struct node *parent,
+ unsigned index, struct child *result)
+{
+ int r, inc;
+ dm_block_t root;
+
+ result->index = index;
+ root = value64(parent, index);
+
+ r = dm_tm_shadow_block(info->tm, root, &btree_node_validator,
+ &result->block, &inc);
+ if (r)
+ return r;
+
+ result->n = dm_block_data(result->block);
+
+ if (inc)
+ inc_children(info->tm, result->n, &le64_type);
+
+ return 0;
+}
+
+static int exit_child(struct dm_btree_info *info, struct child *c)
+{
+ return dm_tm_unlock(info->tm, c->block);
+}
+
+static void shift(struct node *left, struct node *right, int count)
+{
+ if (!count)
+ return;
+
+ if (count > 0) {
+ node_shift(right, count);
+ node_copy(left, right, count);
+ } else {
+ node_copy(left, right, count);
+ node_shift(right, count);
+ }
+
+ left->header.nr_entries =
+ cpu_to_le32(le32_to_cpu(left->header.nr_entries) - count);
+
+ right->header.nr_entries =
+ cpu_to_le32(le32_to_cpu(right->header.nr_entries) + count);
+}
+
+static void __rebalance2(struct dm_btree_info *info, struct node *parent,
+ struct child *l, struct child *r)
+{
+ struct node *left = l->n;
+ struct node *right = r->n;
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+
+ if (nr_left + nr_right <= merge_threshold(left)) {
+ /*
+ * Merge
+ */
+ node_copy(left, right, -nr_right);
+ left->header.nr_entries = cpu_to_le32(nr_left + nr_right);
+
+ *((__le64 *) value_ptr(parent, l->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(l->block));
+ delete_at(parent, r->index, sizeof(__le64));
+
+ /*
+ * We need to decrement the right block, but not it's
+ * children, since they're still referenced by left.
+ */
+ dm_tm_dec(info->tm, dm_block_location(r->block));
+ } else {
+ /*
+ * Rebalance.
+ */
+ unsigned target_left = (nr_left + nr_right) / 2;
+
+ shift(left, right, nr_left - target_left);
+ *((__le64 *) value_ptr(parent, l->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(l->block));
+ *((__le64 *) value_ptr(parent, r->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(r->block));
+ *key_ptr(parent, r->index) = right->keys[0];
+ }
+}
+
+static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
+ unsigned left_index)
+{
+ int r;
+ struct node *parent;
+ struct child left, right;
+
+ parent = dm_block_data(shadow_current(s));
+
+ r = init_child(info, parent, left_index, &left);
+ if (r)
+ return r;
+
+ r = init_child(info, parent, left_index + 1, &right);
+ if (r) {
+ exit_child(info, &left);
+ return r;
+ }
+
+ __rebalance2(info, parent, &left, &right);
+
+ r = exit_child(info, &left);
+ if (r) {
+ exit_child(info, &right);
+ return r;
+ }
+
+ r = exit_child(info, &right);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void __rebalance3(struct dm_btree_info *info, struct node *parent,
+ struct child *l, struct child *c, struct child *r)
+{
+ struct node *left = l->n;
+ struct node *center = c->n;
+ struct node *right = r->n;
+
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
+ uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+ uint32_t max_entries = le32_to_cpu(left->header.max_entries);
+
+ unsigned target;
+
+ if (((nr_left + nr_center + nr_right) / 2) < merge_threshold(center)) {
+ /*
+ * Delete center node:
+ *
+ * We dump as many entries from center as possible into
+ * left, then the rest in right, then rebalance2. This
+ * wastes some cpu, but I want something simple atm.
+ */
+ unsigned shift = min(max_entries - nr_left, nr_center);
+
+ node_copy(left, center, -shift);
+ left->header.nr_entries = cpu_to_le32(nr_left + shift);
+
+ if (shift != nr_center) {
+ shift = nr_center - shift;
+ node_shift(right, shift);
+ node_copy(center, right, shift);
+ right->header.nr_entries = cpu_to_le32(nr_right + shift);
+ }
+
+ *((__le64 *) value_ptr(parent, l->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(l->block));
+ *((__le64 *) value_ptr(parent, r->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(r->block));
+ *key_ptr(parent, r->index) = right->keys[0];
+
+ delete_at(parent, c->index, sizeof(__le64));
+ r->index--;
+
+ dm_tm_dec(info->tm, dm_block_location(c->block));
+ __rebalance2(info, parent, l, r);
+
+ return;
+ }
+
+ /*
+ * Rebalance
+ */
+ target = (nr_left + nr_center + nr_right) / 3;
+ BUG_ON(target == nr_center);
+
+ /*
+ * Adjust the left node
+ */
+ shift(left, center, nr_left - target);
+
+ /*
+ * Adjust the right node
+ */
+ shift(center, right, target - nr_right);
+
+ *((__le64 *) value_ptr(parent, l->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(l->block));
+ *((__le64 *) value_ptr(parent, c->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(c->block));
+ *((__le64 *) value_ptr(parent, r->index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(r->block));
+
+ *key_ptr(parent, c->index) = center->keys[0];
+ *key_ptr(parent, r->index) = right->keys[0];
+}
+
+static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
+ unsigned left_index)
+{
+ int r;
+ struct node *parent = dm_block_data(shadow_current(s));
+ struct child left, center, right;
+
+ /*
+ * FIXME: fill out an array?
+ */
+ r = init_child(info, parent, left_index, &left);
+ if (r)
+ return r;
+
+ r = init_child(info, parent, left_index + 1, ¢er);
+ if (r) {
+ exit_child(info, &left);
+ return r;
+ }
+
+ r = init_child(info, parent, left_index + 2, &right);
+ if (r) {
+ exit_child(info, &left);
+ exit_child(info, ¢er);
+ return r;
+ }
+
+ __rebalance3(info, parent, &left, ¢er, &right);
+
+ r = exit_child(info, &left);
+ if (r) {
+ exit_child(info, ¢er);
+ exit_child(info, &right);
+ return r;
+ }
+
+ r = exit_child(info, ¢er);
+ if (r) {
+ exit_child(info, &right);
+ return r;
+ }
+
+ r = exit_child(info, &right);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int get_nr_entries(struct dm_transaction_manager *tm,
+ dm_block_t b, uint32_t *result)
+{
+ int r;
+ struct dm_block *block;
+ struct node *n;
+
+ r = dm_tm_read_lock(tm, b, &btree_node_validator, &block);
+ if (r)
+ return r;
+
+ n = dm_block_data(block);
+ *result = le32_to_cpu(n->header.nr_entries);
+
+ return dm_tm_unlock(tm, block);
+}
+
+static int rebalance_children(struct shadow_spine *s,
+ struct dm_btree_info *info, uint64_t key)
+{
+ int i, r, has_left_sibling, has_right_sibling;
+ uint32_t child_entries;
+ struct node *n;
+
+ n = dm_block_data(shadow_current(s));
+
+ if (le32_to_cpu(n->header.nr_entries) == 1) {
+ struct dm_block *child;
+ dm_block_t b = value64(n, 0);
+
+ r = dm_tm_read_lock(info->tm, b, &btree_node_validator, &child);
+ if (r)
+ return r;
+
+ memcpy(n, dm_block_data(child),
+ dm_bm_block_size(dm_tm_get_bm(info->tm)));
+ r = dm_tm_unlock(info->tm, child);
+ dm_tm_dec(info->tm, dm_block_location(child));
+
+ return r;
+ }
+
+ i = lower_bound(n, key);
+ if (i < 0)
+ return -ENODATA;
+
+ r = get_nr_entries(info->tm, value64(n, i), &child_entries);
+ if (r)
+ return r;
+
+ if (child_entries > del_threshold(n))
+ return 0;
+
+ has_left_sibling = i > 0 ? 1 : 0;
+ has_right_sibling =
+ (i >= (le32_to_cpu(n->header.nr_entries) - 1)) ? 0 : 1;
+
+ if (!has_left_sibling)
+ r = rebalance2(s, info, i);
+
+ else if (!has_right_sibling)
+ r = rebalance2(s, info, i - 1);
+
+ else
+ r = rebalance3(s, info, i - 1);
+
+ return r;
+}
+
+static int do_leaf(struct node *n, uint64_t key, unsigned *index)
+{
+ int i = lower_bound(n, key);
+
+ if ((i < 0) ||
+ (i >= le32_to_cpu(n->header.nr_entries)) ||
+ (le64_to_cpu(n->keys[i]) != key))
+ return -ENODATA;
+
+ *index = i;
+
+ return 0;
+}
+
+/*
+ * Prepares for removal from one level of the hierarchy. The caller must
+ * actually call delete_at() to remove the entry at index.
+ */
+static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
+ struct dm_btree_value_type *vt, dm_block_t root,
+ uint64_t key, unsigned *index)
+{
+ int i = *index, inc, r;
+ struct node *n;
+
+ for (;;) {
+ r = shadow_step(s, root, vt, &inc);
+ if (r < 0)
+ break;
+
+ /*
+ * We have to patch up the parent node, ugly, but I don't
+ * see a way to do this automatically as part of the spine
+ * op.
+ */
+ if (shadow_has_parent(s)) {
+ __le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
+ memcpy(value_ptr(dm_block_data(shadow_parent(s)), i, sizeof(uint64_t)),
+ &location, sizeof(__le64));
+ }
+
+ n = dm_block_data(shadow_current(s));
+ if (inc)
+ inc_children(info->tm, n, vt);
+
+ if (le32_to_cpu(n->header.flags) & LEAF_NODE)
+ return do_leaf(n, key, index);
+
+ r = rebalance_children(s, info, key);
+ if (r)
+ break;
+
+ n = dm_block_data(shadow_current(s));
+ if (le32_to_cpu(n->header.flags) & LEAF_NODE)
+ return do_leaf(n, key, index);
+
+ i = lower_bound(n, key);
+
+ /*
+ * We know the key is present, or else
+ * rebalance_children would have returned
+ * -ENODATA
+ */
+ root = value64(n, i);
+ }
+
+ return r;
+}
+
+int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, dm_block_t *new_root)
+{
+ unsigned level, last_level = info->levels - 1;
+ int index = 0, r = 0;
+ struct shadow_spine spine;
+ struct node *n;
+
+ init_shadow_spine(&spine, info);
+ for (level = 0; level < info->levels; level++) {
+ r = remove_raw(&spine, info,
+ (level == last_level ?
+ &info->value_type : &le64_type),
+ root, keys[level], (unsigned *)&index);
+ if (r < 0)
+ break;
+
+ n = dm_block_data(shadow_current(&spine));
+ if (level != last_level) {
+ root = value64(n, index);
+ continue;
+ }
+
+ BUG_ON(index < 0 || index >= le32_to_cpu(n->header.nr_entries));
+
+ if (info->value_type.dec)
+ info->value_type.dec(info->value_type.context,
+ value_ptr(n, index, info->value_type.size));
+
+ delete_at(n, index, info->value_type.size);
+
+ r = 0;
+ *new_root = shadow_root(&spine);
+ }
+
+ exit_shadow_spine(&spine);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_btree_remove);
--- /dev/null
- #include <linux/module.h>
+/*
+ * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-btree-internal.h"
+#include "dm-space-map.h"
+#include "dm-transaction-manager.h"
+
++#include <linux/export.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "btree"
+
+/*----------------------------------------------------------------
+ * Array manipulation
+ *--------------------------------------------------------------*/
+static void memcpy_disk(void *dest, const void *src, size_t len)
+ __dm_written_to_disk(src)
+{
+ memcpy(dest, src, len);
+ __dm_unbless_for_disk(src);
+}
+
+static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
+ unsigned index, void *elt)
+ __dm_written_to_disk(elt)
+{
+ if (index < nr_elts)
+ memmove(base + (elt_size * (index + 1)),
+ base + (elt_size * index),
+ (nr_elts - index) * elt_size);
+
+ memcpy_disk(base + (elt_size * index), elt, elt_size);
+}
+
+/*----------------------------------------------------------------*/
+
+/* makes the assumption that no two keys are the same. */
+static int bsearch(struct node *n, uint64_t key, int want_hi)
+{
+ int lo = -1, hi = le32_to_cpu(n->header.nr_entries);
+
+ while (hi - lo > 1) {
+ int mid = lo + ((hi - lo) / 2);
+ uint64_t mid_key = le64_to_cpu(n->keys[mid]);
+
+ if (mid_key == key)
+ return mid;
+
+ if (mid_key < key)
+ lo = mid;
+ else
+ hi = mid;
+ }
+
+ return want_hi ? hi : lo;
+}
+
+int lower_bound(struct node *n, uint64_t key)
+{
+ return bsearch(n, key, 0);
+}
+
+void inc_children(struct dm_transaction_manager *tm, struct node *n,
+ struct dm_btree_value_type *vt)
+{
+ unsigned i;
+ uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
+
+ if (le32_to_cpu(n->header.flags) & INTERNAL_NODE)
+ for (i = 0; i < nr_entries; i++)
+ dm_tm_inc(tm, value64(n, i));
+ else if (vt->inc)
+ for (i = 0; i < nr_entries; i++)
+ vt->inc(vt->context,
+ value_ptr(n, i, vt->size));
+}
+
+static int insert_at(size_t value_size, struct node *node, unsigned index,
+ uint64_t key, void *value)
+ __dm_written_to_disk(value)
+{
+ uint32_t nr_entries = le32_to_cpu(node->header.nr_entries);
+ __le64 key_le = cpu_to_le64(key);
+
+ if (index > nr_entries ||
+ index >= le32_to_cpu(node->header.max_entries)) {
+ DMERR("too many entries in btree node for insert");
+ __dm_unbless_for_disk(value);
+ return -ENOMEM;
+ }
+
+ __dm_bless_for_disk(&key_le);
+
+ array_insert(node->keys, sizeof(*node->keys), nr_entries, index, &key_le);
+ array_insert(value_base(node), value_size, nr_entries, index, value);
+ node->header.nr_entries = cpu_to_le32(nr_entries + 1);
+
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * We want 3n entries (for some n). This works more nicely for repeated
+ * insert remove loops than (2n + 1).
+ */
+static uint32_t calc_max_entries(size_t value_size, size_t block_size)
+{
+ uint32_t total, n;
+ size_t elt_size = sizeof(uint64_t) + value_size; /* key + value */
+
+ block_size -= sizeof(struct node_header);
+ total = block_size / elt_size;
+ n = total / 3; /* rounds down */
+
+ return 3 * n;
+}
+
+int dm_btree_create(struct dm_btree_info *info, dm_block_t *root)
+{
+ int r;
+ struct dm_block *b;
+ struct node *n;
+ size_t block_size;
+ uint32_t max_entries;
+
+ r = new_block(info, &b);
+ if (r < 0)
+ return r;
+
+ block_size = dm_bm_block_size(dm_tm_get_bm(info->tm));
+ max_entries = calc_max_entries(info->value_type.size, block_size);
+
+ n = dm_block_data(b);
+ memset(n, 0, block_size);
+ n->header.flags = cpu_to_le32(LEAF_NODE);
+ n->header.nr_entries = cpu_to_le32(0);
+ n->header.max_entries = cpu_to_le32(max_entries);
+ n->header.value_size = cpu_to_le32(info->value_type.size);
+
+ *root = dm_block_location(b);
+
+ return unlock_block(info, b);
+}
+EXPORT_SYMBOL_GPL(dm_btree_create);
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Deletion uses a recursive algorithm, since we have limited stack space
+ * we explicitly manage our own stack on the heap.
+ */
+#define MAX_SPINE_DEPTH 64
+struct frame {
+ struct dm_block *b;
+ struct node *n;
+ unsigned level;
+ unsigned nr_children;
+ unsigned current_child;
+};
+
+struct del_stack {
+ struct dm_transaction_manager *tm;
+ int top;
+ struct frame spine[MAX_SPINE_DEPTH];
+};
+
+static int top_frame(struct del_stack *s, struct frame **f)
+{
+ if (s->top < 0) {
+ DMERR("btree deletion stack empty");
+ return -EINVAL;
+ }
+
+ *f = s->spine + s->top;
+
+ return 0;
+}
+
+static int unprocessed_frames(struct del_stack *s)
+{
+ return s->top >= 0;
+}
+
+static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
+{
+ int r;
+ uint32_t ref_count;
+
+ if (s->top >= MAX_SPINE_DEPTH - 1) {
+ DMERR("btree deletion stack out of memory");
+ return -ENOMEM;
+ }
+
+ r = dm_tm_ref(s->tm, b, &ref_count);
+ if (r)
+ return r;
+
+ if (ref_count > 1)
+ /*
+ * This is a shared node, so we can just decrement its
+ * reference counter and leave the children.
+ */
+ dm_tm_dec(s->tm, b);
+
+ else {
+ struct frame *f = s->spine + ++s->top;
+
+ r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b);
+ if (r) {
+ s->top--;
+ return r;
+ }
+
+ f->n = dm_block_data(f->b);
+ f->level = level;
+ f->nr_children = le32_to_cpu(f->n->header.nr_entries);
+ f->current_child = 0;
+ }
+
+ return 0;
+}
+
+static void pop_frame(struct del_stack *s)
+{
+ struct frame *f = s->spine + s->top--;
+
+ dm_tm_dec(s->tm, dm_block_location(f->b));
+ dm_tm_unlock(s->tm, f->b);
+}
+
+int dm_btree_destroy(struct dm_btree_info *info, dm_block_t root)
+{
+ int r;
+ struct del_stack *s;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ s->tm = info->tm;
+ s->top = -1;
+
+ r = push_frame(s, root, 1);
+ if (r)
+ goto out;
+
+ while (unprocessed_frames(s)) {
+ uint32_t flags;
+ struct frame *f;
+ dm_block_t b;
+
+ r = top_frame(s, &f);
+ if (r)
+ goto out;
+
+ if (f->current_child >= f->nr_children) {
+ pop_frame(s);
+ continue;
+ }
+
+ flags = le32_to_cpu(f->n->header.flags);
+ if (flags & INTERNAL_NODE) {
+ b = value64(f->n, f->current_child);
+ f->current_child++;
+ r = push_frame(s, b, f->level);
+ if (r)
+ goto out;
+
+ } else if (f->level != (info->levels - 1)) {
+ b = value64(f->n, f->current_child);
+ f->current_child++;
+ r = push_frame(s, b, f->level + 1);
+ if (r)
+ goto out;
+
+ } else {
+ if (info->value_type.dec) {
+ unsigned i;
+
+ for (i = 0; i < f->nr_children; i++)
+ info->value_type.dec(info->value_type.context,
+ value_ptr(f->n, i, info->value_type.size));
+ }
+ f->current_child = f->nr_children;
+ }
+ }
+
+out:
+ kfree(s);
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_btree_destroy);
+
+// FIXME Implement or remove this fn before final submission.
+int dm_btree_delete_gt(struct dm_btree_info *info, dm_block_t root, uint64_t *key,
+ dm_block_t *new_root)
+{
+ /* FIXME: implement */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_btree_delete_gt);
+
+/*----------------------------------------------------------------*/
+
+static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
+ int (*search_fn)(struct node *, uint64_t),
+ uint64_t *result_key, void *v, size_t value_size)
+{
+ int i, r;
+ uint32_t flags, nr_entries;
+
+ do {
+ r = ro_step(s, block);
+ if (r < 0)
+ return r;
+
+ i = search_fn(ro_node(s), key);
+
+ flags = le32_to_cpu(ro_node(s)->header.flags);
+ nr_entries = le32_to_cpu(ro_node(s)->header.nr_entries);
+ if (i < 0 || i >= nr_entries)
+ return -ENODATA;
+
+ if (flags & INTERNAL_NODE)
+ block = value64(ro_node(s), i);
+
+ } while (!(flags & LEAF_NODE));
+
+ *result_key = le64_to_cpu(ro_node(s)->keys[i]);
+ memcpy(v, value_ptr(ro_node(s), i, value_size), value_size);
+
+ return 0;
+}
+
+int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, void *value_le)
+{
+ unsigned level, last_level = info->levels - 1;
+ int r = -ENODATA;
+ uint64_t rkey;
+ __le64 internal_value_le;
+ struct ro_spine spine;
+
+ init_ro_spine(&spine, info);
+ for (level = 0; level < info->levels; level++) {
+ size_t size;
+ void *value_p;
+
+ if (level == last_level) {
+ value_p = value_le;
+ size = info->value_type.size;
+
+ } else {
+ value_p = &internal_value_le;
+ size = sizeof(uint64_t);
+ }
+
+ r = btree_lookup_raw(&spine, root, keys[level],
+ lower_bound, &rkey,
+ value_p, size);
+
+ if (!r) {
+ if (rkey != keys[level]) {
+ exit_ro_spine(&spine);
+ return -ENODATA;
+ }
+ } else {
+ exit_ro_spine(&spine);
+ return r;
+ }
+
+ root = le64_to_cpu(internal_value_le);
+ }
+ exit_ro_spine(&spine);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_btree_lookup);
+
+/*
+ * Splits a node by creating a sibling node and shifting half the nodes
+ * contents across. Assumes there is a parent node, and it has room for
+ * another child.
+ *
+ * Before:
+ * +--------+
+ * | Parent |
+ * +--------+
+ * |
+ * v
+ * +----------+
+ * | A ++++++ |
+ * +----------+
+ *
+ *
+ * After:
+ * +--------+
+ * | Parent |
+ * +--------+
+ * | |
+ * v +------+
+ * +---------+ |
+ * | A* +++ | v
+ * +---------+ +-------+
+ * | B +++ |
+ * +-------+
+ *
+ * Where A* is a shadow of A.
+ */
+static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
+ unsigned parent_index, uint64_t key)
+{
+ int r;
+ size_t size;
+ unsigned nr_left, nr_right;
+ struct dm_block *left, *right, *parent;
+ struct node *ln, *rn, *pn;
+ __le64 location;
+
+ left = shadow_current(s);
+
+ r = new_block(s->info, &right);
+ if (r < 0)
+ return r;
+
+ ln = dm_block_data(left);
+ rn = dm_block_data(right);
+
+ nr_left = le32_to_cpu(ln->header.nr_entries) / 2;
+ nr_right = le32_to_cpu(ln->header.nr_entries) - nr_left;
+
+ ln->header.nr_entries = cpu_to_le32(nr_left);
+
+ rn->header.flags = ln->header.flags;
+ rn->header.nr_entries = cpu_to_le32(nr_right);
+ rn->header.max_entries = ln->header.max_entries;
+ rn->header.value_size = ln->header.value_size;
+ memcpy(rn->keys, ln->keys + nr_left, nr_right * sizeof(rn->keys[0]));
+
+ size = le32_to_cpu(ln->header.flags) & INTERNAL_NODE ?
+ sizeof(uint64_t) : s->info->value_type.size;
+ memcpy(value_ptr(rn, 0, size), value_ptr(ln, nr_left, size),
+ size * nr_right);
+
+ /*
+ * Patch up the parent
+ */
+ parent = shadow_parent(s);
+
+ pn = dm_block_data(parent);
+ location = cpu_to_le64(dm_block_location(left));
+ __dm_bless_for_disk(&location);
+ memcpy_disk(value_ptr(pn, parent_index, sizeof(__le64)),
+ &location, sizeof(__le64));
+
+ location = cpu_to_le64(dm_block_location(right));
+ __dm_bless_for_disk(&location);
+
+ r = insert_at(sizeof(__le64), pn, parent_index + 1,
+ le64_to_cpu(rn->keys[0]), &location);
+ if (r)
+ return r;
+
+ if (key < le64_to_cpu(rn->keys[0])) {
+ unlock_block(s->info, right);
+ s->nodes[1] = left;
+ } else {
+ unlock_block(s->info, left);
+ s->nodes[1] = right;
+ }
+
+ return 0;
+}
+
+/*
+ * Splits a node by creating two new children beneath the given node.
+ *
+ * Before:
+ * +----------+
+ * | A ++++++ |
+ * +----------+
+ *
+ *
+ * After:
+ * +------------+
+ * | A (shadow) |
+ * +------------+
+ * | |
+ * +------+ +----+
+ * | |
+ * v v
+ * +-------+ +-------+
+ * | B +++ | | C +++ |
+ * +-------+ +-------+
+ */
+static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
+{
+ int r;
+ size_t size;
+ unsigned nr_left, nr_right;
+ struct dm_block *left, *right, *new_parent;
+ struct node *pn, *ln, *rn;
+ __le64 val;
+
+ new_parent = shadow_current(s);
+
+ r = new_block(s->info, &left);
+ if (r < 0)
+ return r;
+
+ r = new_block(s->info, &right);
+ if (r < 0) {
+ /* FIXME: put left */
+ return r;
+ }
+
+ pn = dm_block_data(new_parent);
+ ln = dm_block_data(left);
+ rn = dm_block_data(right);
+
+ nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
+ nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
+
+ ln->header.flags = pn->header.flags;
+ ln->header.nr_entries = cpu_to_le32(nr_left);
+ ln->header.max_entries = pn->header.max_entries;
+ ln->header.value_size = pn->header.value_size;
+
+ rn->header.flags = pn->header.flags;
+ rn->header.nr_entries = cpu_to_le32(nr_right);
+ rn->header.max_entries = pn->header.max_entries;
+ rn->header.value_size = pn->header.value_size;
+
+ memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
+ memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
+
+ size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
+ sizeof(__le64) : s->info->value_type.size;
+ memcpy(value_ptr(ln, 0, size), value_ptr(pn, 0, size), nr_left * size);
+ memcpy(value_ptr(rn, 0, size), value_ptr(pn, nr_left, size),
+ nr_right * size);
+
+ /* new_parent should just point to l and r now */
+ pn->header.flags = cpu_to_le32(INTERNAL_NODE);
+ pn->header.nr_entries = cpu_to_le32(2);
+ pn->header.max_entries = cpu_to_le32(
+ calc_max_entries(sizeof(__le64),
+ dm_bm_block_size(
+ dm_tm_get_bm(s->info->tm))));
+ pn->header.value_size = cpu_to_le32(sizeof(__le64));
+
+ val = cpu_to_le64(dm_block_location(left));
+ __dm_bless_for_disk(&val);
+ pn->keys[0] = ln->keys[0];
+ memcpy_disk(value_ptr(pn, 0, sizeof(__le64)), &val, sizeof(__le64));
+
+ val = cpu_to_le64(dm_block_location(right));
+ __dm_bless_for_disk(&val);
+ pn->keys[1] = rn->keys[0];
+ memcpy_disk(value_ptr(pn, 1, sizeof(__le64)), &val, sizeof(__le64));
+
+ /*
+ * rejig the spine. This is ugly, since it knows too
+ * much about the spine
+ */
+ if (s->nodes[0] != new_parent) {
+ unlock_block(s->info, s->nodes[0]);
+ s->nodes[0] = new_parent;
+ }
+ if (key < le64_to_cpu(rn->keys[0])) {
+ unlock_block(s->info, right);
+ s->nodes[1] = left;
+ } else {
+ unlock_block(s->info, left);
+ s->nodes[1] = right;
+ }
+ s->count = 2;
+
+ return 0;
+}
+
+static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
+ struct dm_btree_value_type *vt,
+ uint64_t key, unsigned *index)
+{
+ int r, i = *index, inc, top = 1;
+ struct node *node;
+
+ for (;;) {
+ r = shadow_step(s, root, vt, &inc);
+ if (r < 0)
+ return r;
+
+ node = dm_block_data(shadow_current(s));
+ if (inc)
+ inc_children(s->info->tm, node, vt);
+
+ /*
+ * We have to patch up the parent node, ugly, but I don't
+ * see a way to do this automatically as part of the spine
+ * op.
+ */
+ if (shadow_has_parent(s) && i >= 0) { /* FIXME: second clause unness. */
+ __le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
+
+ __dm_bless_for_disk(&location);
+ memcpy_disk(value_ptr(dm_block_data(shadow_parent(s)), i, sizeof(uint64_t)),
+ &location, sizeof(__le64));
+ }
+
+ node = dm_block_data(shadow_current(s));
+
+ if (node->header.nr_entries == node->header.max_entries) {
+ if (top)
+ r = btree_split_beneath(s, key);
+ else
+ r = btree_split_sibling(s, root, i, key);
+
+ if (r < 0)
+ return r;
+ }
+
+ node = dm_block_data(shadow_current(s));
+
+ i = lower_bound(node, key);
+
+ if (le32_to_cpu(node->header.flags) & LEAF_NODE)
+ break;
+
+ if (i < 0) {
+ /* change the bounds on the lowest key */
+ node->keys[0] = cpu_to_le64(key);
+ i = 0;
+ }
+
+ root = value64(node, i);
+ top = 0;
+ }
+
+ if (i < 0 || le64_to_cpu(node->keys[i]) != key)
+ i++;
+
+ /* we're about to overwrite this value, so undo the increment for it */
+ /* FIXME: shame that inc information is leaking outside the spine.
+ * Plus inc is just plain wrong in the event of a split */
+ if (le64_to_cpu(node->keys[i]) == key && inc)
+ if (vt->dec)
+ vt->dec(vt->context, value_ptr(node, i, vt->size));
+
+ *index = i;
+ return 0;
+}
+
+static int insert(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, void *value, dm_block_t *new_root,
+ int *inserted)
+ __dm_written_to_disk(value)
+{
+ int r, need_insert;
+ unsigned level, index = -1, last_level = info->levels - 1;
+ dm_block_t block = root;
+ struct shadow_spine spine;
+ struct node *n;
+ struct dm_btree_value_type le64_type;
+
+ le64_type.context = NULL;
+ le64_type.size = sizeof(__le64);
+ le64_type.inc = NULL;
+ le64_type.dec = NULL;
+ le64_type.equal = NULL;
+
+ init_shadow_spine(&spine, info);
+
+ for (level = 0; level < (info->levels - 1); level++) {
+ r = btree_insert_raw(&spine, block, &le64_type, keys[level], &index);
+ if (r < 0)
+ goto bad;
+
+ n = dm_block_data(shadow_current(&spine));
+ need_insert = ((index >= le32_to_cpu(n->header.nr_entries)) ||
+ (le64_to_cpu(n->keys[index]) != keys[level]));
+
+ if (need_insert) {
+ dm_block_t new_tree;
+ __le64 new_le;
+
+ r = dm_btree_create(info, &new_tree);
+ if (r < 0)
+ goto bad;
+
+ new_le = cpu_to_le64(new_tree);
+ __dm_bless_for_disk(&new_le);
+
+ r = insert_at(sizeof(uint64_t), n, index,
+ keys[level], &new_le);
+ if (r)
+ goto bad;
+ }
+
+ if (level < last_level)
+ block = value64(n, index);
+ }
+
+ r = btree_insert_raw(&spine, block, &info->value_type,
+ keys[level], &index);
+ if (r < 0)
+ goto bad;
+
+ n = dm_block_data(shadow_current(&spine));
+ need_insert = ((index >= le32_to_cpu(n->header.nr_entries)) ||
+ (le64_to_cpu(n->keys[index]) != keys[level]));
+
+ if (need_insert) {
+ if (inserted)
+ *inserted = 1;
+
+ r = insert_at(info->value_type.size, n, index,
+ keys[level], value);
+ if (r)
+ goto bad_unblessed;
+ } else {
+ if (inserted)
+ *inserted = 0;
+
+ if (info->value_type.dec &&
+ (!info->value_type.equal ||
+ !info->value_type.equal(
+ info->value_type.context,
+ value_ptr(n, index, info->value_type.size),
+ value))) {
+ info->value_type.dec(info->value_type.context,
+ value_ptr(n, index, info->value_type.size));
+ }
+ memcpy_disk(value_ptr(n, index, info->value_type.size),
+ value, info->value_type.size);
+ }
+
+ *new_root = shadow_root(&spine);
+ exit_shadow_spine(&spine);
+
+ return 0;
+
+bad:
+ __dm_unbless_for_disk(value);
+bad_unblessed:
+ exit_shadow_spine(&spine);
+ return r;
+}
+
+int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, void *value, dm_block_t *new_root)
+ __dm_written_to_disk(value)
+{
+ return insert(info, root, keys, value, new_root, NULL);
+}
+EXPORT_SYMBOL_GPL(dm_btree_insert);
+
+int dm_btree_insert_notify(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, void *value, dm_block_t *new_root,
+ int *inserted)
+ __dm_written_to_disk(value)
+{
+ return insert(info, root, keys, value, new_root, inserted);
+}
+EXPORT_SYMBOL_GPL(dm_btree_insert_notify);
+
+/*----------------------------------------------------------------*/
+
+int dm_btree_clone(struct dm_btree_info *info, dm_block_t root,
+ dm_block_t *clone)
+{
+ int r;
+ struct dm_block *b, *orig_b;
+ struct node *b_node, *orig_node;
+
+ /* Copy the root node */
+ r = new_block(info, &b);
+ if (r < 0)
+ return r;
+
+ r = dm_tm_read_lock(info->tm, root, &btree_node_validator, &orig_b);
+ if (r < 0) {
+ dm_block_t location = dm_block_location(b);
+
+ unlock_block(info, b);
+ dm_tm_dec(info->tm, location);
+ }
+
+ *clone = dm_block_location(b);
+ b_node = dm_block_data(b);
+ orig_node = dm_block_data(orig_b);
+
+ memcpy(b_node, orig_node,
+ dm_bm_block_size(dm_tm_get_bm(info->tm)));
+ dm_tm_unlock(info->tm, orig_b);
+ inc_children(info->tm, b_node, &info->value_type);
+ dm_tm_unlock(info->tm, b);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_btree_clone);
+
+/*----------------------------------------------------------------*/
+
+static int find_highest_key(struct ro_spine *s, dm_block_t block,
+ uint64_t *result_key, dm_block_t *next_block)
+{
+ int i, r;
+ uint32_t flags;
+
+ do {
+ r = ro_step(s, block);
+ if (r < 0)
+ return r;
+
+ flags = le32_to_cpu(ro_node(s)->header.flags);
+ i = le32_to_cpu(ro_node(s)->header.nr_entries);
+ if (!i)
+ return -ENODATA;
+ else
+ i--;
+
+ *result_key = le64_to_cpu(ro_node(s)->keys[i]);
+ if (next_block || flags & INTERNAL_NODE)
+ block = value64(ro_node(s), i);
+
+ } while (flags & INTERNAL_NODE);
+
+ if (next_block)
+ *next_block = block;
+ return 0;
+}
+
+int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *result_keys)
+{
+ int r = 0, count = 0, level;
+ struct ro_spine spine;
+
+ init_ro_spine(&spine, info);
+ for (level = 0; level < info->levels; level++) {
+ r = find_highest_key(&spine, root, result_keys + level,
+ level == info->levels - 1 ? NULL : &root);
+ if (r == -ENODATA) {
+ r = 0;
+ break;
+
+ } else if (r)
+ break;
+
+ count++;
+ }
+ exit_ro_spine(&spine);
+
+ return r ? r : count;
+}
+EXPORT_SYMBOL_GPL(dm_btree_find_highest_key);
--- /dev/null
- #include <linux/module.h>
+/*
+ * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-space-map-common.h"
+#include "dm-space-map-disk.h"
+#include "dm-space-map.h"
+#include "dm-transaction-manager.h"
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
++#include <linux/export.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "space map disk"
+
+/*
+ * Bitmap validator
+ */
+static void bitmap_prepare_for_write(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
+{
+ struct disk_bitmap_header *disk_header = dm_block_data(b);
+
+ disk_header->blocknr = cpu_to_le64(dm_block_location(b));
+ disk_header->csum = cpu_to_le32(dm_block_csum_data(&disk_header->not_used, block_size - sizeof(__le32)));
+}
+
+static int bitmap_check(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
+{
+ struct disk_bitmap_header *disk_header = dm_block_data(b);
+ __le32 csum_disk;
+
+ if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) {
+ DMERR("bitmap check failed blocknr %llu wanted %llu",
+ le64_to_cpu(disk_header->blocknr), dm_block_location(b));
+ return -ENOTBLK;
+ }
+
+ csum_disk = cpu_to_le32(dm_block_csum_data(&disk_header->not_used, block_size - sizeof(__le32)));
+ if (csum_disk != disk_header->csum) {
+ DMERR("bitmap check failed csum %u wanted %u",
+ le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum));
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+struct dm_block_validator dm_sm_bitmap_validator = {
+ .name = "sm_bitmap",
+ .prepare_for_write = bitmap_prepare_for_write,
+ .check = bitmap_check
+};
+
+/*----------------------------------------------------------------*/
+
+#define ENTRIES_PER_WORD 32
+#define ENTRIES_SHIFT 5
+
+void *dm_bitmap_data(struct dm_block *b)
+{
+ return dm_block_data(b) + sizeof(struct disk_bitmap_header);
+}
+
+#define WORD_MASK_LOW 0x5555555555555555ULL
+#define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
+#define WORD_MASK_ALL 0xFFFFFFFFFFFFFFFFULL
+
+static unsigned bitmap_word_used(void *addr, unsigned b)
+{
+ __le64 *words_le = addr;
+ __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
+
+ uint64_t bits = le64_to_cpu(*w_le);
+
+ return ((bits & WORD_MASK_LOW) == WORD_MASK_LOW ||
+ (bits & WORD_MASK_HIGH) == WORD_MASK_HIGH ||
+ (bits & WORD_MASK_ALL) == WORD_MASK_ALL);
+}
+
+unsigned sm_lookup_bitmap(void *addr, unsigned b)
+{
+ __le64 *words_le = addr;
+ __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
+
+ b = (b & (ENTRIES_PER_WORD - 1)) << 1;
+
+ return (!!test_bit_le(b, (void *) w_le) << 1) |
+ (!!test_bit_le(b + 1, (void *) w_le));
+}
+
+void sm_set_bitmap(void *addr, unsigned b, unsigned val)
+{
+ __le64 *words_le = addr;
+ __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
+
+ b = (b & (ENTRIES_PER_WORD - 1)) << 1;
+
+ if (val & 2)
+ __set_bit_le(b, (void *) w_le);
+ else
+ __clear_bit_le(b, (void *) w_le);
+
+ if (val & 1)
+ __set_bit_le(b + 1, (void *) w_le);
+ else
+ __clear_bit_le(b + 1, (void *) w_le);
+}
+
+int sm_find_free(void *addr, unsigned begin, unsigned end,
+ unsigned *result)
+{
+ while (begin < end) {
+ if (!(begin & (ENTRIES_PER_WORD - 1)) &&
+ bitmap_word_used(addr, begin)) {
+ begin += ENTRIES_PER_WORD;
+ continue;
+ }
+
+ if (!sm_lookup_bitmap(addr, begin)) {
+ *result = begin;
+ return 0;
+ }
+
+ begin++;
+ }
+
+ return -ENOSPC;
+}
+
+static int disk_ll_init(struct ll_disk *io, struct dm_transaction_manager *tm)
+{
+ io->tm = tm;
+ io->bitmap_info.tm = tm;
+ io->bitmap_info.levels = 1;
+
+ /*
+ * Because the new bitmap blocks are created via a shadow
+ * operation, the old entry has already had its reference count
+ * decremented and we don't need the btree to do any bookkeeping.
+ */
+ io->bitmap_info.value_type.size = sizeof(struct disk_index_entry);
+ io->bitmap_info.value_type.inc = NULL;
+ io->bitmap_info.value_type.dec = NULL;
+ io->bitmap_info.value_type.equal = NULL;
+
+ io->ref_count_info.tm = tm;
+ io->ref_count_info.levels = 1;
+ io->ref_count_info.value_type.size = sizeof(uint32_t);
+ io->ref_count_info.value_type.inc = NULL;
+ io->ref_count_info.value_type.dec = NULL;
+ io->ref_count_info.value_type.equal = NULL;
+
+ io->block_size = dm_bm_block_size(dm_tm_get_bm(tm));
+
+ if (io->block_size > (1 << 30)) {
+ DMERR("block size too big to hold bitmaps");
+ return -EINVAL;
+ }
+
+ io->entries_per_block = (io->block_size - sizeof(struct disk_bitmap_header)) *
+ ENTRIES_PER_BYTE;
+ io->nr_blocks = 0;
+ io->bitmap_root = 0;
+ io->ref_count_root = 0;
+
+ return 0;
+}
+
+static int disk_ll_new(struct ll_disk *io, struct dm_transaction_manager *tm)
+{
+ int r;
+
+ r = disk_ll_init(io, tm);
+ if (r < 0)
+ return r;
+
+ io->nr_blocks = 0;
+ io->nr_allocated = 0;
+ r = dm_btree_create(&io->bitmap_info, &io->bitmap_root);
+ if (r < 0)
+ return r;
+
+ r = dm_btree_create(&io->ref_count_info, &io->ref_count_root);
+ if (r < 0) {
+ dm_btree_destroy(&io->bitmap_info, io->bitmap_root);
+ return r;
+ }
+
+ return 0;
+}
+
+static int disk_ll_extend(struct ll_disk *io, dm_block_t extra_blocks)
+{
+ int r;
+ dm_block_t i, nr_blocks;
+ unsigned old_blocks, blocks;
+
+ nr_blocks = io->nr_blocks + extra_blocks;
+ old_blocks = dm_sector_div_up(io->nr_blocks, io->entries_per_block);
+ blocks = dm_sector_div_up(nr_blocks, io->entries_per_block);
+
+ for (i = old_blocks; i < blocks; i++) {
+ struct dm_block *b;
+ struct disk_index_entry idx;
+
+ r = dm_tm_new_block(io->tm, &dm_sm_bitmap_validator, &b);
+ if (r < 0)
+ return r;
+ idx.blocknr = cpu_to_le64(dm_block_location(b));
+
+ r = dm_tm_unlock(io->tm, b);
+ if (r < 0)
+ return r;
+
+ idx.nr_free = cpu_to_le32(io->entries_per_block);
+ idx.none_free_before = 0;
+ __dm_bless_for_disk(&idx);
+
+ r = dm_btree_insert(&io->bitmap_info, io->bitmap_root,
+ &i, &idx, &io->bitmap_root);
+ if (r < 0)
+ return r;
+ }
+
+ io->nr_blocks = nr_blocks;
+ return 0;
+}
+
+static int disk_ll_open(struct ll_disk *ll, struct dm_transaction_manager *tm,
+ void *root_le, size_t len)
+{
+ int r;
+ struct disk_sm_root *smr = root_le;
+
+ if (len < sizeof(struct disk_sm_root)) {
+ DMERR("sm_disk root too small");
+ return -ENOMEM;
+ }
+
+ r = disk_ll_init(ll, tm);
+ if (r < 0)
+ return r;
+
+ ll->nr_blocks = le64_to_cpu(smr->nr_blocks);
+ ll->nr_allocated = le64_to_cpu(smr->nr_allocated);
+ ll->bitmap_root = le64_to_cpu(smr->bitmap_root);
+ ll->ref_count_root = le64_to_cpu(smr->ref_count_root);
+
+ return 0;
+}
+
+static int disk_ll_lookup_bitmap(struct ll_disk *io, dm_block_t b, uint32_t *result)
+{
+ int r;
+ dm_block_t index = b;
+ struct disk_index_entry ie_disk;
+ struct dm_block *blk;
+
+ do_div(index, io->entries_per_block);
+ r = dm_btree_lookup(&io->bitmap_info, io->bitmap_root, &index, &ie_disk);
+ if (r < 0)
+ return r;
+
+ r = dm_tm_read_lock(io->tm, le64_to_cpu(ie_disk.blocknr), &dm_sm_bitmap_validator, &blk);
+ if (r < 0)
+ return r;
+
+ *result = sm_lookup_bitmap(dm_bitmap_data(blk), do_div(b, io->entries_per_block));
+
+ return dm_tm_unlock(io->tm, blk);
+}
+
+static int disk_ll_lookup(struct ll_disk *io, dm_block_t b, uint32_t *result)
+{
+ __le32 rc_le;
+ int r = disk_ll_lookup_bitmap(io, b, result);
+
+ if (r)
+ return r;
+
+ if (*result != 3)
+ return r;
+
+ r = dm_btree_lookup(&io->ref_count_info, io->ref_count_root, &b, &rc_le);
+ if (r < 0)
+ return r;
+
+ *result = le32_to_cpu(rc_le);
+
+ return r;
+}
+
+static int disk_ll_find_free_block(struct ll_disk *io, dm_block_t begin,
+ dm_block_t end, dm_block_t *result)
+{
+ int r;
+ struct disk_index_entry ie_disk;
+ dm_block_t i, index_begin = begin;
+ dm_block_t index_end = dm_sector_div_up(end, io->entries_per_block);
+
+ begin = do_div(index_begin, io->entries_per_block);
+
+ for (i = index_begin; i < index_end; i++, begin = 0) {
+ struct dm_block *blk;
+ unsigned position;
+ uint32_t bit_end;
+
+ r = dm_btree_lookup(&io->bitmap_info, io->bitmap_root, &i, &ie_disk);
+ if (r < 0)
+ return r;
+
+ if (le32_to_cpu(ie_disk.nr_free) <= 0)
+ continue;
+
+ r = dm_tm_read_lock(io->tm, le64_to_cpu(ie_disk.blocknr),
+ &dm_sm_bitmap_validator, &blk);
+ if (r < 0)
+ return r;
+
+ bit_end = (i == index_end - 1) ?
+ do_div(end, io->entries_per_block) : io->entries_per_block;
+
+ r = sm_find_free(dm_bitmap_data(blk),
+ max((unsigned)begin, (unsigned)le32_to_cpu(ie_disk.none_free_before)),
+ bit_end, &position);
+ if (r < 0) {
+ dm_tm_unlock(io->tm, blk);
+ continue;
+ }
+
+ r = dm_tm_unlock(io->tm, blk);
+ if (r < 0)
+ return r;
+
+ *result = i * io->entries_per_block + (dm_block_t) position;
+
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+static int disk_ll_insert(struct ll_disk *io, dm_block_t b, uint32_t ref_count)
+{
+ int r;
+ uint32_t bit, old;
+ struct dm_block *nb;
+ dm_block_t index = b;
+ struct disk_index_entry ie_disk;
+ void *bm_le;
+ int inc;
+
+ do_div(index, io->entries_per_block);
+ r = dm_btree_lookup(&io->bitmap_info, io->bitmap_root, &index, &ie_disk);
+ if (r < 0)
+ return r;
+
+ r = dm_tm_shadow_block(io->tm, le64_to_cpu(ie_disk.blocknr),
+ &dm_sm_bitmap_validator, &nb, &inc);
+ if (r < 0) {
+ DMERR("dm_tm_shadow_block() failed");
+ return r;
+ }
+ ie_disk.blocknr = cpu_to_le64(dm_block_location(nb));
+
+ bm_le = dm_bitmap_data(nb);
+ bit = do_div(b, io->entries_per_block);
+ old = sm_lookup_bitmap(bm_le, bit);
+
+ if (ref_count <= 2) {
+ sm_set_bitmap(bm_le, bit, ref_count);
+
+ if (old > 2) {
+ r = dm_btree_remove(&io->ref_count_info, io->ref_count_root,
+ &b, &io->ref_count_root);
+ if (r) {
+ dm_tm_unlock(io->tm, nb);
+ return r;
+ }
+ }
+ } else {
+ __le32 rc_le = cpu_to_le32(ref_count);
+
+ __dm_bless_for_disk(&rc_le);
+
+ sm_set_bitmap(bm_le, bit, 3);
+ r = dm_btree_insert(&io->ref_count_info, io->ref_count_root,
+ &b, &rc_le, &io->ref_count_root);
+ if (r < 0) {
+ dm_tm_unlock(io->tm, nb);
+ DMERR("ref count insert failed");
+ return r;
+ }
+ }
+
+ r = dm_tm_unlock(io->tm, nb);
+ if (r < 0)
+ return r;
+
+ if (ref_count && !old) {
+ io->nr_allocated++;
+ ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) - 1);
+ if (le32_to_cpu(ie_disk.none_free_before) == b)
+ ie_disk.none_free_before = cpu_to_le32(b + 1);
+
+ } else if (old && !ref_count) {
+ io->nr_allocated--;
+ ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) + 1);
+ ie_disk.none_free_before = cpu_to_le32(min((dm_block_t) le32_to_cpu(ie_disk.none_free_before), b));
+ }
+
+ __dm_bless_for_disk(&ie_disk);
+
+ r = dm_btree_insert(&io->bitmap_info, io->bitmap_root, &index, &ie_disk, &io->bitmap_root);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+static int disk_ll_inc(struct ll_disk *ll, dm_block_t b)
+{
+ int r;
+ uint32_t rc;
+
+ r = disk_ll_lookup(ll, b, &rc);
+ if (r)
+ return r;
+
+ return disk_ll_insert(ll, b, rc + 1);
+}
+
+static int disk_ll_dec(struct ll_disk *ll, dm_block_t b)
+{
+ int r;
+ uint32_t rc;
+
+ r = disk_ll_lookup(ll, b, &rc);
+ if (r)
+ return r;
+
+ if (!rc)
+ return -EINVAL;
+
+ return disk_ll_insert(ll, b, rc - 1);
+}
+
+/*--------------------------------------------------------------*/
+
+/*
+ * Space map interface.
+ */
+struct sm_disk {
+ struct dm_space_map sm;
+
+ struct ll_disk ll;
+};
+
+static void sm_disk_destroy(struct dm_space_map *sm)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ kfree(smd);
+}
+
+static int sm_disk_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ return disk_ll_extend(&smd->ll, extra_blocks);
+}
+
+static int sm_disk_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ *count = smd->ll.nr_blocks;
+
+ return 0;
+}
+
+static int sm_disk_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ *count = smd->ll.nr_blocks - smd->ll.nr_allocated;
+
+ return 0;
+}
+
+static int sm_disk_get_count(struct dm_space_map *sm, dm_block_t b,
+ uint32_t *result)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ return disk_ll_lookup(&smd->ll, b, result);
+}
+
+static int sm_disk_count_is_more_than_one(struct dm_space_map *sm, dm_block_t b,
+ int *result)
+{
+ int r;
+ uint32_t count;
+
+ r = sm_disk_get_count(sm, b, &count);
+ if (r)
+ return r;
+
+ return count > 1;
+}
+
+static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b,
+ uint32_t count)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ return disk_ll_insert(&smd->ll, b, count);
+}
+
+static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ return disk_ll_inc(&smd->ll, b);
+}
+
+static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ return disk_ll_dec(&smd->ll, b);
+}
+
+static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
+{
+ int r;
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
+ /*
+ * FIXME: We should start the search where we left off.
+ */
+ r = disk_ll_find_free_block(&smd->ll, 0, smd->ll.nr_blocks, b);
+ if (r)
+ return r;
+
+ return disk_ll_inc(&smd->ll, *b);
+}
+
+static int sm_disk_commit(struct dm_space_map *sm)
+{
+ return 0;
+}
+
+static int sm_disk_root_size(struct dm_space_map *sm, size_t *result)
+{
+ *result = sizeof(struct disk_sm_root);
+
+ return 0;
+}
+
+static int sm_disk_copy_root(struct dm_space_map *sm, void *where_le, size_t max)
+{
+ struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+ struct disk_sm_root root_le;
+
+ root_le.nr_blocks = cpu_to_le64(smd->ll.nr_blocks);
+ root_le.nr_allocated = cpu_to_le64(smd->ll.nr_allocated);
+ root_le.bitmap_root = cpu_to_le64(smd->ll.bitmap_root);
+ root_le.ref_count_root = cpu_to_le64(smd->ll.ref_count_root);
+
+ if (max < sizeof(root_le))
+ return -ENOSPC;
+
+ memcpy(where_le, &root_le, sizeof(root_le));
+
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+
+static struct dm_space_map ops = {
+ .destroy = sm_disk_destroy,
+ .extend = sm_disk_extend,
+ .get_nr_blocks = sm_disk_get_nr_blocks,
+ .get_nr_free = sm_disk_get_nr_free,
+ .get_count = sm_disk_get_count,
+ .count_is_more_than_one = sm_disk_count_is_more_than_one,
+ .set_count = sm_disk_set_count,
+ .inc_block = sm_disk_inc_block,
+ .dec_block = sm_disk_dec_block,
+ .new_block = sm_disk_new_block,
+ .commit = sm_disk_commit,
+ .root_size = sm_disk_root_size,
+ .copy_root = sm_disk_copy_root
+};
+
+struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
+ dm_block_t nr_blocks)
+{
+ int r;
+ struct sm_disk *smd;
+
+ smd = kmalloc(sizeof(*smd), GFP_KERNEL);
+ if (!smd)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&smd->sm, &ops, sizeof(smd->sm));
+
+ r = disk_ll_new(&smd->ll, tm);
+ if (r)
+ goto bad;
+
+ r = disk_ll_extend(&smd->ll, nr_blocks);
+ if (r)
+ goto bad;
+
+ r = sm_disk_commit(&smd->sm);
+ if (r)
+ goto bad;
+
+ return &smd->sm;
+
+bad:
+ kfree(smd);
+ return ERR_PTR(r);
+}
+EXPORT_SYMBOL_GPL(dm_sm_disk_create);
+
+struct dm_space_map *dm_sm_disk_open(struct dm_transaction_manager *tm,
+ void *root_le, size_t len)
+{
+ int r;
+ struct sm_disk *smd;
+
+ smd = kmalloc(sizeof(*smd), GFP_KERNEL);
+ if (!smd)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&smd->sm, &ops, sizeof(smd->sm));
+
+ r = disk_ll_open(&smd->ll, tm, root_le, len);
+ if (r)
+ goto bad;
+
+ r = sm_disk_commit(&smd->sm);
+ if (r)
+ goto bad;
+
+ return &smd->sm;
+
+bad:
+ kfree(smd);
+ return ERR_PTR(r);
+}
+EXPORT_SYMBOL_GPL(dm_sm_disk_open);
--- /dev/null
- #include <linux/module.h>
+/*
+ * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+#include "dm-transaction-manager.h"
+#include "dm-space-map.h"
+#include "dm-space-map-disk.h"
+#include "dm-space-map-metadata.h"
+#include "dm-persistent-data-internal.h"
+
++#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "transaction manager"
+
+/*----------------------------------------------------------------*/
+
+struct shadow_info {
+ struct hlist_node hlist;
+ dm_block_t where;
+};
+
+/*
+ * It would be nice if we scaled with the size of transaction.
+ */
+#define HASH_SIZE 256
+#define HASH_MASK (HASH_SIZE - 1)
+
+struct dm_transaction_manager {
+ int is_clone;
+ struct dm_transaction_manager *real;
+
+ struct dm_block_manager *bm;
+ struct dm_space_map *sm;
+
+ spinlock_t lock;
+ struct hlist_head buckets[HASH_SIZE];
+};
+
+/*----------------------------------------------------------------*/
+
+static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
+{
+ int r = 0;
+ unsigned bucket = dm_hash_block(b, HASH_MASK);
+ struct shadow_info *si;
+ struct hlist_node *n;
+
+ spin_lock(&tm->lock);
+
+ hlist_for_each_entry(si, n, tm->buckets + bucket, hlist)
+ if (si->where == b) {
+ r = 1;
+ break;
+ }
+
+ spin_unlock(&tm->lock);
+
+ return r;
+}
+
+/*
+ * This can silently fail if there's no memory. We're ok with this since
+ * creating redundant shadows causes no harm.
+ */
+static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
+{
+ unsigned bucket;
+ struct shadow_info *si;
+
+ si = kmalloc(sizeof(*si), GFP_NOIO);
+ if (si) {
+ si->where = b;
+ bucket = dm_hash_block(b, HASH_MASK);
+
+ spin_lock(&tm->lock);
+ hlist_add_head(&si->hlist, tm->buckets + bucket);
+ spin_unlock(&tm->lock);
+ }
+}
+
+static void wipe_shadow_table(struct dm_transaction_manager *tm)
+{
+ struct shadow_info *si;
+ struct hlist_node *n, *tmp;
+ struct hlist_head *bucket;
+ int i;
+
+ spin_lock(&tm->lock);
+ for (i = 0; i < HASH_SIZE; i++) {
+ bucket = tm->buckets + i;
+ hlist_for_each_entry_safe(si, n, tmp, bucket, hlist)
+ kfree(si);
+
+ INIT_HLIST_HEAD(bucket);
+ }
+ spin_unlock(&tm->lock);
+}
+
+/*----------------------------------------------------------------*/
+
+static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
+ struct dm_space_map *sm)
+{
+ int i;
+ struct dm_transaction_manager *tm;
+
+ tm = kmalloc(sizeof(*tm), GFP_KERNEL);
+ if (!tm)
+ return ERR_PTR(-ENOMEM);
+
+ tm->is_clone = 0;
+ tm->real = NULL;
+ tm->bm = bm;
+ tm->sm = sm;
+
+ spin_lock_init(&tm->lock);
+ for (i = 0; i < HASH_SIZE; i++)
+ INIT_HLIST_HEAD(tm->buckets + i);
+
+ return tm;
+}
+
+struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transaction_manager *real)
+{
+ struct dm_transaction_manager *tm;
+
+ tm = kmalloc(sizeof(*tm), GFP_KERNEL);
+ if (tm) {
+ tm->is_clone = 1;
+ tm->real = real;
+ }
+
+ return tm;
+}
+EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
+
+void dm_tm_destroy(struct dm_transaction_manager *tm)
+{
+ kfree(tm);
+}
+EXPORT_SYMBOL_GPL(dm_tm_destroy);
+
+int dm_tm_pre_commit(struct dm_transaction_manager *tm)
+{
+ int r;
+
+ if (tm->is_clone)
+ return -EWOULDBLOCK;
+
+ r = dm_sm_commit(tm->sm);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
+
+int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)
+{
+ if (tm->is_clone)
+ return -EWOULDBLOCK;
+
+ wipe_shadow_table(tm);
+
+ return dm_bm_flush_and_unlock(tm->bm, root);
+}
+EXPORT_SYMBOL_GPL(dm_tm_commit);
+
+int dm_tm_new_block(struct dm_transaction_manager *tm,
+ struct dm_block_validator *v,
+ struct dm_block **result)
+{
+ int r;
+ dm_block_t new_block;
+
+ if (tm->is_clone)
+ return -EWOULDBLOCK;
+
+ r = dm_sm_new_block(tm->sm, &new_block);
+ if (r < 0)
+ return r;
+
+ r = dm_bm_write_lock_zero(tm->bm, new_block, v, result);
+ if (r < 0) {
+ dm_sm_dec_block(tm->sm, new_block);
+ return r;
+ }
+
+ /*
+ * New blocks count as shadows in that they don't need to be
+ * shadowed again.
+ */
+ insert_shadow(tm, new_block);
+
+ return 0;
+}
+
+static int __shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
+ struct dm_block_validator *v,
+ struct dm_block **result, int *inc_children)
+{
+ int r;
+ dm_block_t new;
+ uint32_t count;
+ struct dm_block *orig_block;
+
+ r = dm_sm_new_block(tm->sm, &new);
+ if (r < 0)
+ return r;
+
+ r = dm_bm_write_lock_zero(tm->bm, new, v, result);
+ if (r < 0)
+ goto bad_dec_block;
+
+ r = dm_bm_read_lock(tm->bm, orig, v, &orig_block);
+ if (r < 0)
+ goto bad_dec_block;
+
+ memcpy(dm_block_data(*result), dm_block_data(orig_block),
+ dm_bm_block_size(tm->bm));
+
+ r = dm_bm_unlock(orig_block);
+ if (r < 0)
+ goto bad_dec_block;
+
+ r = dm_sm_get_count(tm->sm, orig, &count);
+ if (r < 0)
+ goto bad;
+
+ r = dm_sm_dec_block(tm->sm, orig);
+ if (r < 0)
+ goto bad;
+
+ *inc_children = count > 1;
+
+ return 0;
+
+bad:
+ dm_bm_unlock(*result);
+bad_dec_block:
+ dm_sm_dec_block(tm->sm, new);
+
+ return r;
+}
+
+int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
+ struct dm_block_validator *v, struct dm_block **result,
+ int *inc_children)
+{
+ int r, more_than_one;
+
+ if (tm->is_clone)
+ return -EWOULDBLOCK;
+
+ if (is_shadow(tm, orig)) {
+ r = dm_sm_count_is_more_than_one(tm->sm, orig, &more_than_one);
+ if (r < 0)
+ return r;
+
+ if (!more_than_one) {
+ *inc_children = 0;
+ return dm_bm_write_lock(tm->bm, orig, v, result);
+ }
+ /* fall through */
+ }
+
+ r = __shadow_block(tm, orig, v, result, inc_children);
+ if (r < 0)
+ return r;
+
+ insert_shadow(tm, dm_block_location(*result));
+
+ return r;
+}
+
+int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
+ struct dm_block_validator *v,
+ struct dm_block **blk)
+{
+ if (tm->is_clone)
+ return dm_bm_read_try_lock(tm->real->bm, b, v, blk);
+
+ return dm_bm_read_lock(tm->bm, b, v, blk);
+}
+
+int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
+{
+ return dm_bm_unlock(b);
+}
+EXPORT_SYMBOL_GPL(dm_tm_unlock);
+
+void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b)
+{
+ /*
+ * The non-blocking clone doesn't support this.
+ */
+ BUG_ON(tm->is_clone);
+
+ dm_sm_inc_block(tm->sm, b);
+}
+EXPORT_SYMBOL_GPL(dm_tm_inc);
+
+void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b)
+{
+ /*
+ * The non-blocking clone doesn't support this.
+ */
+ BUG_ON(tm->is_clone);
+
+ dm_sm_dec_block(tm->sm, b);
+}
+
+int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
+ uint32_t *result)
+{
+ if (tm->is_clone)
+ return -EWOULDBLOCK;
+
+ return dm_sm_get_count(tm->sm, b, result);
+}
+
+struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm)
+{
+ return tm->bm;
+}
+
+/*----------------------------------------------------------------*/
+
+static int dm_tm_create_internal(struct dm_block_manager *bm,
+ dm_block_t sb_location,
+ struct dm_block_validator *sb_validator,
+ size_t root_offset, size_t root_max_len,
+ struct dm_transaction_manager **tm,
+ struct dm_space_map **sm,
+ struct dm_block **sblock,
+ int create)
+{
+ int r;
+
+ *sm = dm_sm_metadata_init();
+ if (IS_ERR(*sm))
+ return PTR_ERR(*sm);
+
+ *tm = dm_tm_create(bm, *sm);
+ if (IS_ERR(*tm)) {
+ dm_sm_destroy(*sm);
+ return PTR_ERR(*tm);
+ }
+
+ if (create) {
+ r = dm_bm_write_lock_zero(dm_tm_get_bm(*tm), sb_location,
+ sb_validator, sblock);
+ if (r < 0) {
+ DMERR("couldn't lock superblock");
+ goto bad1;
+ }
+
+ r = dm_sm_metadata_create(*sm, *tm, dm_bm_nr_blocks(bm),
+ sb_location);
+ if (r) {
+ DMERR("couldn't create metadata space map");
+ goto bad2;
+ }
+
+ } else {
+ r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
+ sb_validator, sblock);
+ if (r < 0) {
+ DMERR("couldn't lock superblock");
+ goto bad1;
+ }
+
+ r = dm_sm_metadata_open(*sm, *tm,
+ dm_block_data(*sblock) + root_offset,
+ root_max_len);
+ if (IS_ERR(*sm)) {
+ DMERR("couldn't open metadata space map");
+ goto bad2;
+ }
+ }
+
+ return 0;
+
+bad2:
+ dm_tm_unlock(*tm, *sblock);
+bad1:
+ dm_tm_destroy(*tm);
+ dm_sm_destroy(*sm);
+ return r;
+}
+
+int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
+ struct dm_block_validator *sb_validator,
+ struct dm_transaction_manager **tm,
+ struct dm_space_map **sm, struct dm_block **sblock)
+{
+ return dm_tm_create_internal(bm, sb_location, sb_validator,
+ 0, 0, tm, sm, sblock, 1);
+}
+EXPORT_SYMBOL_GPL(dm_tm_create_with_sm);
+
+int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
+ struct dm_block_validator *sb_validator,
+ size_t root_offset, size_t root_max_len,
+ struct dm_transaction_manager **tm,
+ struct dm_space_map **sm, struct dm_block **sblock)
+{
+ return dm_tm_create_internal(bm, sb_location, sb_validator, root_offset,
+ root_max_len, tm, sm, sblock, 0);
+}
+EXPORT_SYMBOL_GPL(dm_tm_open_with_sm);
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <media/saa7146.h>
+ #include <linux/module.h>
LIST_HEAD(saa7146_devices);
DEFINE_MUTEX(saa7146_devices_lock);
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <media/saa7146_vv.h>
+ #include <linux/module.h>
/****************************************************************************/
/* resource management functions, shamelessly stolen from saa7134 driver */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
+ #include <linux/export.h>
#include <media/saa7146_vv.h>
static void calculate_output_format_register(struct saa7146_dev* saa, u32 palette, u32* clip_format)
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <media/saa7146_vv.h>
#include <media/v4l2-chip-ident.h>
+ #include <linux/module.h>
static int max_memory = 32;
#include <linux/i2c.h>
+#include <linux/mutex.h>
+ #include <linux/module.h>
#include "dibx000_common.h"
*/
#include <linux/delay.h>
+ #include <linux/module.h>
#include <linux/i2c.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/version.h>
#include <media/adp1653.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
+ #include <linux/module.h>
#include <linux/mutex.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max8997.h>
#include <linux/mfd/core.h>
#include <linux/mfd/wm8400-private.h>
#include <linux/mfd/wm8400-audio.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
+ #include <linux/module.h>
static struct {
u16 readable; /* Mask of readable bits */
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/moduleparam.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
+ #include <linux/export.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include "pch_gbe.h"
#include "pch_gbe_api.h"
-#include <linux/prefetch.h>
+ #include <linux/module.h>
#define DRV_VERSION "1.00"
const char pch_driver_version[] = DRV_VERSION;
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
-
- #include <linux/module.h>
+ #include <linux/export.h>
+ #include <linux/moduleparam.h>
-
-#include "base.h"
+#include <linux/seq_file.h>
+#include <linux/list.h>
#include "debug.h"
+#include "ath5k.h"
+#include "reg.h"
+#include "base.h"
static unsigned int ath5k_debug;
module_param_named(debug, ath5k_debug, uint, 0);
--- /dev/null
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
++#include <linux/moduleparam.h>
++
+#include "core.h"
+#include "cfg80211.h"
+#include "debug.h"
+#include "hif-ops.h"
+#include "testmode.h"
+
+static unsigned int ath6kl_p2p;
+
+module_param(ath6kl_p2p, uint, 0644);
+
+#define RATETAB_ENT(_rate, _rateid, _flags) { \
+ .bitrate = (_rate), \
+ .flags = (_flags), \
+ .hw_value = (_rateid), \
+}
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _flags) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = 5000 + (5 * (_channel)), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static struct ieee80211_rate ath6kl_rates[] = {
+ RATETAB_ENT(10, 0x1, 0),
+ RATETAB_ENT(20, 0x2, 0),
+ RATETAB_ENT(55, 0x4, 0),
+ RATETAB_ENT(110, 0x8, 0),
+ RATETAB_ENT(60, 0x10, 0),
+ RATETAB_ENT(90, 0x20, 0),
+ RATETAB_ENT(120, 0x40, 0),
+ RATETAB_ENT(180, 0x80, 0),
+ RATETAB_ENT(240, 0x100, 0),
+ RATETAB_ENT(360, 0x200, 0),
+ RATETAB_ENT(480, 0x400, 0),
+ RATETAB_ENT(540, 0x800, 0),
+};
+
+#define ath6kl_a_rates (ath6kl_rates + 4)
+#define ath6kl_a_rates_size 8
+#define ath6kl_g_rates (ath6kl_rates + 0)
+#define ath6kl_g_rates_size 12
+
+static struct ieee80211_channel ath6kl_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static struct ieee80211_channel ath6kl_5ghz_a_channels[] = {
+ CHAN5G(34, 0), CHAN5G(36, 0),
+ CHAN5G(38, 0), CHAN5G(40, 0),
+ CHAN5G(42, 0), CHAN5G(44, 0),
+ CHAN5G(46, 0), CHAN5G(48, 0),
+ CHAN5G(52, 0), CHAN5G(56, 0),
+ CHAN5G(60, 0), CHAN5G(64, 0),
+ CHAN5G(100, 0), CHAN5G(104, 0),
+ CHAN5G(108, 0), CHAN5G(112, 0),
+ CHAN5G(116, 0), CHAN5G(120, 0),
+ CHAN5G(124, 0), CHAN5G(128, 0),
+ CHAN5G(132, 0), CHAN5G(136, 0),
+ CHAN5G(140, 0), CHAN5G(149, 0),
+ CHAN5G(153, 0), CHAN5G(157, 0),
+ CHAN5G(161, 0), CHAN5G(165, 0),
+ CHAN5G(184, 0), CHAN5G(188, 0),
+ CHAN5G(192, 0), CHAN5G(196, 0),
+ CHAN5G(200, 0), CHAN5G(204, 0),
+ CHAN5G(208, 0), CHAN5G(212, 0),
+ CHAN5G(216, 0),
+};
+
+static struct ieee80211_supported_band ath6kl_band_2ghz = {
+ .n_channels = ARRAY_SIZE(ath6kl_2ghz_channels),
+ .channels = ath6kl_2ghz_channels,
+ .n_bitrates = ath6kl_g_rates_size,
+ .bitrates = ath6kl_g_rates,
+};
+
+static struct ieee80211_supported_band ath6kl_band_5ghz = {
+ .n_channels = ARRAY_SIZE(ath6kl_5ghz_a_channels),
+ .channels = ath6kl_5ghz_a_channels,
+ .n_bitrates = ath6kl_a_rates_size,
+ .bitrates = ath6kl_a_rates,
+};
+
+static int ath6kl_set_wpa_version(struct ath6kl *ar,
+ enum nl80211_wpa_versions wpa_version)
+{
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: %u\n", __func__, wpa_version);
+
+ if (!wpa_version) {
+ ar->auth_mode = NONE_AUTH;
+ } else if (wpa_version & NL80211_WPA_VERSION_2) {
+ ar->auth_mode = WPA2_AUTH;
+ } else if (wpa_version & NL80211_WPA_VERSION_1) {
+ ar->auth_mode = WPA_AUTH;
+ } else {
+ ath6kl_err("%s: %u not supported\n", __func__, wpa_version);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ath6kl_set_auth_type(struct ath6kl *ar,
+ enum nl80211_auth_type auth_type)
+{
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, auth_type);
+
+ switch (auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ ar->dot11_auth_mode = OPEN_AUTH;
+ break;
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ ar->dot11_auth_mode = SHARED_AUTH;
+ break;
+ case NL80211_AUTHTYPE_NETWORK_EAP:
+ ar->dot11_auth_mode = LEAP_AUTH;
+ break;
+
+ case NL80211_AUTHTYPE_AUTOMATIC:
+ ar->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH;
+ break;
+
+ default:
+ ath6kl_err("%s: 0x%x not spported\n", __func__, auth_type);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast)
+{
+ u8 *ar_cipher = ucast ? &ar->prwise_crypto : &ar->grp_crypto;
+ u8 *ar_cipher_len = ucast ? &ar->prwise_crypto_len :
+ &ar->grp_crypto_len;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: cipher 0x%x, ucast %u\n",
+ __func__, cipher, ucast);
+
+ switch (cipher) {
+ case 0:
+ /* our own hack to use value 0 as no crypto used */
+ *ar_cipher = NONE_CRYPT;
+ *ar_cipher_len = 0;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ *ar_cipher = WEP_CRYPT;
+ *ar_cipher_len = 5;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ *ar_cipher = WEP_CRYPT;
+ *ar_cipher_len = 13;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ *ar_cipher = TKIP_CRYPT;
+ *ar_cipher_len = 0;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ *ar_cipher = AES_CRYPT;
+ *ar_cipher_len = 0;
+ break;
+ default:
+ ath6kl_err("cipher 0x%x not supported\n", cipher);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void ath6kl_set_key_mgmt(struct ath6kl *ar, u32 key_mgmt)
+{
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, key_mgmt);
+
+ if (key_mgmt == WLAN_AKM_SUITE_PSK) {
+ if (ar->auth_mode == WPA_AUTH)
+ ar->auth_mode = WPA_PSK_AUTH;
+ else if (ar->auth_mode == WPA2_AUTH)
+ ar->auth_mode = WPA2_PSK_AUTH;
+ } else if (key_mgmt != WLAN_AKM_SUITE_8021X) {
+ ar->auth_mode = NONE_AUTH;
+ }
+}
+
+static bool ath6kl_cfg80211_ready(struct ath6kl *ar)
+{
+ if (!test_bit(WMI_READY, &ar->flag)) {
+ ath6kl_err("wmi is not ready\n");
+ return false;
+ }
+
+ if (!test_bit(WLAN_ENABLED, &ar->flag)) {
+ ath6kl_err("wlan disabled\n");
+ return false;
+ }
+
+ return true;
+}
+
+static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ int status;
+
+ ar->sme_state = SME_CONNECTING;
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
+ ath6kl_err("destroy in progress\n");
+ return -EBUSY;
+ }
+
+ if (test_bit(SKIP_SCAN, &ar->flag) &&
+ ((sme->channel && sme->channel->center_freq == 0) ||
+ (sme->bssid && is_zero_ether_addr(sme->bssid)))) {
+ ath6kl_err("SkipScan: channel or bssid invalid\n");
+ return -EINVAL;
+ }
+
+ if (down_interruptible(&ar->sem)) {
+ ath6kl_err("busy, couldn't get access\n");
+ return -ERESTARTSYS;
+ }
+
+ if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
+ ath6kl_err("busy, destroy in progress\n");
+ up(&ar->sem);
+ return -EBUSY;
+ }
+
+ if (ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)]) {
+ /*
+ * sleep until the command queue drains
+ */
+ wait_event_interruptible_timeout(ar->event_wq,
+ ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0,
+ WMI_TIMEOUT);
+ if (signal_pending(current)) {
+ ath6kl_err("cmd queue drain timeout\n");
+ up(&ar->sem);
+ return -EINTR;
+ }
+ }
+
+ if (test_bit(CONNECTED, &ar->flag) &&
+ ar->ssid_len == sme->ssid_len &&
+ !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
+ ar->reconnect_flag = true;
+ status = ath6kl_wmi_reconnect_cmd(ar->wmi, ar->req_bssid,
+ ar->ch_hint);
+
+ up(&ar->sem);
+ if (status) {
+ ath6kl_err("wmi_reconnect_cmd failed\n");
+ return -EIO;
+ }
+ return 0;
+ } else if (ar->ssid_len == sme->ssid_len &&
+ !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
+ ath6kl_disconnect(ar);
+ }
+
+ memset(ar->ssid, 0, sizeof(ar->ssid));
+ ar->ssid_len = sme->ssid_len;
+ memcpy(ar->ssid, sme->ssid, sme->ssid_len);
+
+ if (sme->channel)
+ ar->ch_hint = sme->channel->center_freq;
+
+ memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+ if (sme->bssid && !is_broadcast_ether_addr(sme->bssid))
+ memcpy(ar->req_bssid, sme->bssid, sizeof(ar->req_bssid));
+
+ ath6kl_set_wpa_version(ar, sme->crypto.wpa_versions);
+
+ status = ath6kl_set_auth_type(ar, sme->auth_type);
+ if (status) {
+ up(&ar->sem);
+ return status;
+ }
+
+ if (sme->crypto.n_ciphers_pairwise)
+ ath6kl_set_cipher(ar, sme->crypto.ciphers_pairwise[0], true);
+ else
+ ath6kl_set_cipher(ar, 0, true);
+
+ ath6kl_set_cipher(ar, sme->crypto.cipher_group, false);
+
+ if (sme->crypto.n_akm_suites)
+ ath6kl_set_key_mgmt(ar, sme->crypto.akm_suites[0]);
+
+ if ((sme->key_len) &&
+ (ar->auth_mode == NONE_AUTH) && (ar->prwise_crypto == WEP_CRYPT)) {
+ struct ath6kl_key *key = NULL;
+
+ if (sme->key_idx < WMI_MIN_KEY_INDEX ||
+ sme->key_idx > WMI_MAX_KEY_INDEX) {
+ ath6kl_err("key index %d out of bounds\n",
+ sme->key_idx);
+ up(&ar->sem);
+ return -ENOENT;
+ }
+
+ key = &ar->keys[sme->key_idx];
+ key->key_len = sme->key_len;
+ memcpy(key->key, sme->key, key->key_len);
+ key->cipher = ar->prwise_crypto;
+ ar->def_txkey_index = sme->key_idx;
+
+ ath6kl_wmi_addkey_cmd(ar->wmi, sme->key_idx,
+ ar->prwise_crypto,
+ GROUP_USAGE | TX_USAGE,
+ key->key_len,
+ NULL,
+ key->key, KEY_OP_INIT_VAL, NULL,
+ NO_SYNC_WMIFLAG);
+ }
+
+ if (!ar->usr_bss_filter) {
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
+ if (ath6kl_wmi_bssfilter_cmd(ar->wmi, ALL_BSS_FILTER, 0) != 0) {
+ ath6kl_err("couldn't set bss filtering\n");
+ up(&ar->sem);
+ return -EIO;
+ }
+ }
+
+ ar->nw_type = ar->next_mode;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: connect called with authmode %d dot11 auth %d"
+ " PW crypto %d PW crypto len %d GRP crypto %d"
+ " GRP crypto len %d channel hint %u\n",
+ __func__,
+ ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
+ ar->prwise_crypto_len, ar->grp_crypto,
+ ar->grp_crypto_len, ar->ch_hint);
+
+ ar->reconnect_flag = 0;
+ status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
+ ar->dot11_auth_mode, ar->auth_mode,
+ ar->prwise_crypto,
+ ar->prwise_crypto_len,
+ ar->grp_crypto, ar->grp_crypto_len,
+ ar->ssid_len, ar->ssid,
+ ar->req_bssid, ar->ch_hint,
+ ar->connect_ctrl_flags);
+
+ up(&ar->sem);
+
+ if (status == -EINVAL) {
+ memset(ar->ssid, 0, sizeof(ar->ssid));
+ ar->ssid_len = 0;
+ ath6kl_err("invalid request\n");
+ return -ENOENT;
+ } else if (status) {
+ ath6kl_err("ath6kl_wmi_connect_cmd failed\n");
+ return -EIO;
+ }
+
+ if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) &&
+ ((ar->auth_mode == WPA_PSK_AUTH)
+ || (ar->auth_mode == WPA2_PSK_AUTH))) {
+ mod_timer(&ar->disconnect_timer,
+ jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL));
+ }
+
+ ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD;
+ set_bit(CONNECT_PEND, &ar->flag);
+
+ return 0;
+}
+
+static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid,
+ struct ieee80211_channel *chan,
+ const u8 *beacon_ie, size_t beacon_ie_len)
+{
+ struct cfg80211_bss *bss;
+ u8 *ie;
+
+ bss = cfg80211_get_bss(ar->wdev->wiphy, chan, bssid,
+ ar->ssid, ar->ssid_len, WLAN_CAPABILITY_ESS,
+ WLAN_CAPABILITY_ESS);
+ if (bss == NULL) {
+ /*
+ * Since cfg80211 may not yet know about the BSS,
+ * generate a partial entry until the first BSS info
+ * event becomes available.
+ *
+ * Prepend SSID element since it is not included in the Beacon
+ * IEs from the target.
+ */
+ ie = kmalloc(2 + ar->ssid_len + beacon_ie_len, GFP_KERNEL);
+ if (ie == NULL)
+ return -ENOMEM;
+ ie[0] = WLAN_EID_SSID;
+ ie[1] = ar->ssid_len;
+ memcpy(ie + 2, ar->ssid, ar->ssid_len);
+ memcpy(ie + 2 + ar->ssid_len, beacon_ie, beacon_ie_len);
+ bss = cfg80211_inform_bss(ar->wdev->wiphy, chan,
+ bssid, 0, WLAN_CAPABILITY_ESS, 100,
+ ie, 2 + ar->ssid_len + beacon_ie_len,
+ 0, GFP_KERNEL);
+ if (bss)
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added dummy bss for "
+ "%pM prior to indicating connect/roamed "
+ "event\n", bssid);
+ kfree(ie);
+ } else
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss "
+ "entry\n");
+
+ if (bss == NULL)
+ return -ENOMEM;
+
+ cfg80211_put_bss(bss);
+
+ return 0;
+}
+
+void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
+ u8 *bssid, u16 listen_intvl,
+ u16 beacon_intvl,
+ enum network_type nw_type,
+ u8 beacon_ie_len, u8 assoc_req_len,
+ u8 assoc_resp_len, u8 *assoc_info)
+{
+ struct ieee80211_channel *chan;
+
+ /* capinfo + listen interval */
+ u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16);
+
+ /* capinfo + status code + associd */
+ u8 assoc_resp_ie_offset = sizeof(u16) + sizeof(u16) + sizeof(u16);
+
+ u8 *assoc_req_ie = assoc_info + beacon_ie_len + assoc_req_ie_offset;
+ u8 *assoc_resp_ie = assoc_info + beacon_ie_len + assoc_req_len +
+ assoc_resp_ie_offset;
+
+ assoc_req_len -= assoc_req_ie_offset;
+ assoc_resp_len -= assoc_resp_ie_offset;
+
+ /*
+ * Store Beacon interval here; DTIM period will be available only once
+ * a Beacon frame from the AP is seen.
+ */
+ ar->assoc_bss_beacon_int = beacon_intvl;
+ clear_bit(DTIM_PERIOD_AVAIL, &ar->flag);
+
+ if (nw_type & ADHOC_NETWORK) {
+ if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in ibss mode\n", __func__);
+ return;
+ }
+ }
+
+ if (nw_type & INFRA_NETWORK) {
+ if (ar->wdev->iftype != NL80211_IFTYPE_STATION &&
+ ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in station mode\n", __func__);
+ return;
+ }
+ }
+
+ chan = ieee80211_get_channel(ar->wdev->wiphy, (int) channel);
+
+
+ if (nw_type & ADHOC_NETWORK) {
+ cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
+ return;
+ }
+
+ if (ath6kl_add_bss_if_needed(ar, bssid, chan, assoc_info,
+ beacon_ie_len) < 0) {
+ ath6kl_err("could not add cfg80211 bss entry for "
+ "connect/roamed notification\n");
+ return;
+ }
+
+ if (ar->sme_state == SME_CONNECTING) {
+ /* inform connect result to cfg80211 */
+ ar->sme_state = SME_CONNECTED;
+ cfg80211_connect_result(ar->net_dev, bssid,
+ assoc_req_ie, assoc_req_len,
+ assoc_resp_ie, assoc_resp_len,
+ WLAN_STATUS_SUCCESS, GFP_KERNEL);
+ } else if (ar->sme_state == SME_CONNECTED) {
+ /* inform roam event to cfg80211 */
+ cfg80211_roamed(ar->net_dev, chan, bssid,
+ assoc_req_ie, assoc_req_len,
+ assoc_resp_ie, assoc_resp_len, GFP_KERNEL);
+ }
+}
+
+static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy,
+ struct net_device *dev, u16 reason_code)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: reason=%u\n", __func__,
+ reason_code);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
+ ath6kl_err("busy, destroy in progress\n");
+ return -EBUSY;
+ }
+
+ if (down_interruptible(&ar->sem)) {
+ ath6kl_err("busy, couldn't get access\n");
+ return -ERESTARTSYS;
+ }
+
+ ar->reconnect_flag = 0;
+ ath6kl_disconnect(ar);
+ memset(ar->ssid, 0, sizeof(ar->ssid));
+ ar->ssid_len = 0;
+
+ if (!test_bit(SKIP_SCAN, &ar->flag))
+ memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+
+ up(&ar->sem);
+
+ ar->sme_state = SME_DISCONNECTED;
+
+ return 0;
+}
+
+void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
+ u8 *bssid, u8 assoc_resp_len,
+ u8 *assoc_info, u16 proto_reason)
+{
+ if (ar->scan_req) {
+ cfg80211_scan_done(ar->scan_req, true);
+ ar->scan_req = NULL;
+ }
+
+ if (ar->nw_type & ADHOC_NETWORK) {
+ if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in ibss mode\n", __func__);
+ return;
+ }
+ memset(bssid, 0, ETH_ALEN);
+ cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
+ return;
+ }
+
+ if (ar->nw_type & INFRA_NETWORK) {
+ if (ar->wdev->iftype != NL80211_IFTYPE_STATION &&
+ ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: ath6k not in station mode\n", __func__);
+ return;
+ }
+ }
+
+ /*
+ * Send a disconnect command to target when a disconnect event is
+ * received with reason code other than 3 (DISCONNECT_CMD - disconnect
+ * request from host) to make the firmware stop trying to connect even
+ * after giving disconnect event. There will be one more disconnect
+ * event for this disconnect command with reason code DISCONNECT_CMD
+ * which will be notified to cfg80211.
+ */
+
+ if (reason != DISCONNECT_CMD) {
+ ath6kl_wmi_disconnect_cmd(ar->wmi);
+ return;
+ }
+
+ clear_bit(CONNECT_PEND, &ar->flag);
+
+ if (ar->sme_state == SME_CONNECTING) {
+ cfg80211_connect_result(ar->net_dev,
+ bssid, NULL, 0,
+ NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ } else if (ar->sme_state == SME_CONNECTED) {
+ cfg80211_disconnected(ar->net_dev, reason,
+ NULL, 0, GFP_KERNEL);
+ }
+
+ ar->sme_state = SME_DISCONNECTED;
+}
+
+static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ s8 n_channels = 0;
+ u16 *channels = NULL;
+ int ret = 0;
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (!ar->usr_bss_filter) {
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
+ ret = ath6kl_wmi_bssfilter_cmd(
+ ar->wmi,
+ (test_bit(CONNECTED, &ar->flag) ?
+ ALL_BUT_BSS_FILTER : ALL_BSS_FILTER), 0);
+ if (ret) {
+ ath6kl_err("couldn't set bss filtering\n");
+ return ret;
+ }
+ }
+
+ if (request->n_ssids && request->ssids[0].ssid_len) {
+ u8 i;
+
+ if (request->n_ssids > (MAX_PROBED_SSID_INDEX - 1))
+ request->n_ssids = MAX_PROBED_SSID_INDEX - 1;
+
+ for (i = 0; i < request->n_ssids; i++)
+ ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
+ SPECIFIC_SSID_FLAG,
+ request->ssids[i].ssid_len,
+ request->ssids[i].ssid);
+ }
+
+ if (request->ie) {
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_REQ,
+ request->ie, request->ie_len);
+ if (ret) {
+ ath6kl_err("failed to set Probe Request appie for "
+ "scan");
+ return ret;
+ }
+ }
+
+ /*
+ * Scan only the requested channels if the request specifies a set of
+ * channels. If the list is longer than the target supports, do not
+ * configure the list and instead, scan all available channels.
+ */
+ if (request->n_channels > 0 &&
+ request->n_channels <= WMI_MAX_CHANNELS) {
+ u8 i;
+
+ n_channels = request->n_channels;
+
+ channels = kzalloc(n_channels * sizeof(u16), GFP_KERNEL);
+ if (channels == NULL) {
+ ath6kl_warn("failed to set scan channels, "
+ "scan all channels");
+ n_channels = 0;
+ }
+
+ for (i = 0; i < n_channels; i++)
+ channels[i] = request->channels[i]->center_freq;
+ }
+
+ ret = ath6kl_wmi_startscan_cmd(ar->wmi, WMI_LONG_SCAN, 0,
+ false, 0, 0, n_channels, channels);
+ if (ret)
+ ath6kl_err("wmi_startscan_cmd failed\n");
+ else
+ ar->scan_req = request;
+
+ kfree(channels);
+
+ return ret;
+}
+
+void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status)
+{
+ int i;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status %d\n", __func__, status);
+
+ if (!ar->scan_req)
+ return;
+
+ if ((status == -ECANCELED) || (status == -EBUSY)) {
+ cfg80211_scan_done(ar->scan_req, true);
+ goto out;
+ }
+
+ cfg80211_scan_done(ar->scan_req, false);
+
+ if (ar->scan_req->n_ssids && ar->scan_req->ssids[0].ssid_len) {
+ for (i = 0; i < ar->scan_req->n_ssids; i++) {
+ ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
+ DISABLE_SSID_FLAG,
+ 0, NULL);
+ }
+ }
+
+out:
+ ar->scan_req = NULL;
+}
+
+static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr,
+ struct key_params *params)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl_key *key = NULL;
+ u8 key_usage;
+ u8 key_type;
+ int status = 0;
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: key index %d out of bounds\n", __func__,
+ key_index);
+ return -ENOENT;
+ }
+
+ key = &ar->keys[key_index];
+ memset(key, 0, sizeof(struct ath6kl_key));
+
+ if (pairwise)
+ key_usage = PAIRWISE_USAGE;
+ else
+ key_usage = GROUP_USAGE;
+
+ if (params) {
+ if (params->key_len > WLAN_MAX_KEY_LEN ||
+ params->seq_len > sizeof(key->seq))
+ return -EINVAL;
+
+ key->key_len = params->key_len;
+ memcpy(key->key, params->key, key->key_len);
+ key->seq_len = params->seq_len;
+ memcpy(key->seq, params->seq, key->seq_len);
+ key->cipher = params->cipher;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ key_type = WEP_CRYPT;
+ break;
+
+ case WLAN_CIPHER_SUITE_TKIP:
+ key_type = TKIP_CRYPT;
+ break;
+
+ case WLAN_CIPHER_SUITE_CCMP:
+ key_type = AES_CRYPT;
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ if (((ar->auth_mode == WPA_PSK_AUTH)
+ || (ar->auth_mode == WPA2_PSK_AUTH))
+ && (key_usage & GROUP_USAGE))
+ del_timer(&ar->disconnect_timer);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n",
+ __func__, key_index, key->key_len, key_type,
+ key_usage, key->seq_len);
+
+ ar->def_txkey_index = key_index;
+
+ if (ar->nw_type == AP_NETWORK && !pairwise &&
+ (key_type == TKIP_CRYPT || key_type == AES_CRYPT) && params) {
+ ar->ap_mode_bkey.valid = true;
+ ar->ap_mode_bkey.key_index = key_index;
+ ar->ap_mode_bkey.key_type = key_type;
+ ar->ap_mode_bkey.key_len = key->key_len;
+ memcpy(ar->ap_mode_bkey.key, key->key, key->key_len);
+ if (!test_bit(CONNECTED, &ar->flag)) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay initial group "
+ "key configuration until AP mode has been "
+ "started\n");
+ /*
+ * The key will be set in ath6kl_connect_ap_mode() once
+ * the connected event is received from the target.
+ */
+ return 0;
+ }
+ }
+
+ if (ar->next_mode == AP_NETWORK && key_type == WEP_CRYPT &&
+ !test_bit(CONNECTED, &ar->flag)) {
+ /*
+ * Store the key locally so that it can be re-configured after
+ * the AP mode has properly started
+ * (ath6kl_install_statioc_wep_keys).
+ */
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay WEP key configuration "
+ "until AP mode has been started\n");
+ ar->wep_key_list[key_index].key_len = key->key_len;
+ memcpy(ar->wep_key_list[key_index].key, key->key, key->key_len);
+ return 0;
+ }
+
+ status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
+ key_type, key_usage, key->key_len,
+ key->seq, key->key, KEY_OP_INIT_VAL,
+ (u8 *) mac_addr, SYNC_BOTH_WMIFLAG);
+
+ if (status)
+ return -EIO;
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: key index %d out of bounds\n", __func__,
+ key_index);
+ return -ENOENT;
+ }
+
+ if (!ar->keys[key_index].key_len) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: index %d is empty\n", __func__, key_index);
+ return 0;
+ }
+
+ ar->keys[key_index].key_len = 0;
+
+ return ath6kl_wmi_deletekey_cmd(ar->wmi, key_index);
+}
+
+static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_index, bool pairwise,
+ const u8 *mac_addr, void *cookie,
+ void (*callback) (void *cookie,
+ struct key_params *))
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl_key *key = NULL;
+ struct key_params params;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: key index %d out of bounds\n", __func__,
+ key_index);
+ return -ENOENT;
+ }
+
+ key = &ar->keys[key_index];
+ memset(¶ms, 0, sizeof(params));
+ params.cipher = key->cipher;
+ params.key_len = key->key_len;
+ params.seq_len = key->seq_len;
+ params.seq = key->seq;
+ params.key = key->key;
+
+ callback(cookie, ¶ms);
+
+ return key->key_len ? 0 : -ENOENT;
+}
+
+static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
+ struct net_device *ndev,
+ u8 key_index, bool unicast,
+ bool multicast)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl_key *key = NULL;
+ int status = 0;
+ u8 key_usage;
+ enum crypto_type key_type = NONE_CRYPT;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: key index %d out of bounds\n",
+ __func__, key_index);
+ return -ENOENT;
+ }
+
+ if (!ar->keys[key_index].key_len) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: invalid key index %d\n",
+ __func__, key_index);
+ return -EINVAL;
+ }
+
+ ar->def_txkey_index = key_index;
+ key = &ar->keys[ar->def_txkey_index];
+ key_usage = GROUP_USAGE;
+ if (ar->prwise_crypto == WEP_CRYPT)
+ key_usage |= TX_USAGE;
+ if (unicast)
+ key_type = ar->prwise_crypto;
+ if (multicast)
+ key_type = ar->grp_crypto;
+
+ if (ar->next_mode == AP_NETWORK && !test_bit(CONNECTED, &ar->flag))
+ return 0; /* Delay until AP mode has been started */
+
+ status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
+ key_type, key_usage,
+ key->key_len, key->seq, key->key,
+ KEY_OP_INIT_VAL, NULL,
+ SYNC_BOTH_WMIFLAG);
+ if (status)
+ return -EIO;
+
+ return 0;
+}
+
+void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
+ bool ismcast)
+{
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast);
+
+ cfg80211_michael_mic_failure(ar->net_dev, ar->bssid,
+ (ismcast ? NL80211_KEYTYPE_GROUP :
+ NL80211_KEYTYPE_PAIRWISE), keyid, NULL,
+ GFP_KERNEL);
+}
+
+static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+ int ret;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: changed 0x%x\n", __func__,
+ changed);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
+ ret = ath6kl_wmi_set_rts_cmd(ar->wmi, wiphy->rts_threshold);
+ if (ret != 0) {
+ ath6kl_err("ath6kl_wmi_set_rts_cmd failed\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * The type nl80211_tx_power_setting replaces the following
+ * data type from 2.6.36 onwards
+*/
+static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
+ enum nl80211_tx_power_setting type,
+ int dbm)
+{
+ struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+ u8 ath6kl_dbm;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__,
+ type, dbm);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ switch (type) {
+ case NL80211_TX_POWER_AUTOMATIC:
+ return 0;
+ case NL80211_TX_POWER_LIMITED:
+ ar->tx_pwr = ath6kl_dbm = dbm;
+ break;
+ default:
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x not supported\n",
+ __func__, type);
+ return -EOPNOTSUPP;
+ }
+
+ ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, ath6kl_dbm);
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
+{
+ struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (test_bit(CONNECTED, &ar->flag)) {
+ ar->tx_pwr = 0;
+
+ if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi) != 0) {
+ ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n");
+ return -EIO;
+ }
+
+ wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 0,
+ 5 * HZ);
+
+ if (signal_pending(current)) {
+ ath6kl_err("target did not respond\n");
+ return -EINTR;
+ }
+ }
+
+ *dbm = ar->tx_pwr;
+ return 0;
+}
+
+static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+ struct net_device *dev,
+ bool pmgmt, int timeout)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct wmi_power_mode_cmd mode;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: pmgmt %d, timeout %d\n",
+ __func__, pmgmt, timeout);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (pmgmt) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: max perf\n", __func__);
+ mode.pwr_mode = REC_POWER;
+ } else {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: rec power\n", __func__);
+ mode.pwr_mode = MAX_PERF_POWER;
+ }
+
+ if (ath6kl_wmi_powermode_cmd(ar->wmi, mode.pwr_mode) != 0) {
+ ath6kl_err("wmi_powermode_cmd failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
+ struct net_device *ndev,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params)
+{
+ struct ath6kl *ar = ath6kl_priv(ndev);
+ struct wireless_dev *wdev = ar->wdev;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ ar->next_mode = INFRA_NETWORK;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ar->next_mode = ADHOC_NETWORK;
+ break;
+ case NL80211_IFTYPE_AP:
+ ar->next_mode = AP_NETWORK;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ ar->next_mode = INFRA_NETWORK;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ ar->next_mode = AP_NETWORK;
+ break;
+ default:
+ ath6kl_err("invalid interface type %u\n", type);
+ return -EOPNOTSUPP;
+ }
+
+ wdev->iftype = type;
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_ibss_params *ibss_param)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ int status;
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ ar->ssid_len = ibss_param->ssid_len;
+ memcpy(ar->ssid, ibss_param->ssid, ar->ssid_len);
+
+ if (ibss_param->channel)
+ ar->ch_hint = ibss_param->channel->center_freq;
+
+ if (ibss_param->channel_fixed) {
+ /*
+ * TODO: channel_fixed: The channel should be fixed, do not
+ * search for IBSSs to join on other channels. Target
+ * firmware does not support this feature, needs to be
+ * updated.
+ */
+ return -EOPNOTSUPP;
+ }
+
+ memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+ if (ibss_param->bssid && !is_broadcast_ether_addr(ibss_param->bssid))
+ memcpy(ar->req_bssid, ibss_param->bssid, sizeof(ar->req_bssid));
+
+ ath6kl_set_wpa_version(ar, 0);
+
+ status = ath6kl_set_auth_type(ar, NL80211_AUTHTYPE_OPEN_SYSTEM);
+ if (status)
+ return status;
+
+ if (ibss_param->privacy) {
+ ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, true);
+ ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, false);
+ } else {
+ ath6kl_set_cipher(ar, 0, true);
+ ath6kl_set_cipher(ar, 0, false);
+ }
+
+ ar->nw_type = ar->next_mode;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "%s: connect called with authmode %d dot11 auth %d"
+ " PW crypto %d PW crypto len %d GRP crypto %d"
+ " GRP crypto len %d channel hint %u\n",
+ __func__,
+ ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
+ ar->prwise_crypto_len, ar->grp_crypto,
+ ar->grp_crypto_len, ar->ch_hint);
+
+ status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
+ ar->dot11_auth_mode, ar->auth_mode,
+ ar->prwise_crypto,
+ ar->prwise_crypto_len,
+ ar->grp_crypto, ar->grp_crypto_len,
+ ar->ssid_len, ar->ssid,
+ ar->req_bssid, ar->ch_hint,
+ ar->connect_ctrl_flags);
+ set_bit(CONNECT_PEND, &ar->flag);
+
+ return 0;
+}
+
+static int ath6kl_cfg80211_leave_ibss(struct wiphy *wiphy,
+ struct net_device *dev)
+{
+ struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ ath6kl_disconnect(ar);
+ memset(ar->ssid, 0, sizeof(ar->ssid));
+ ar->ssid_len = 0;
+
+ return 0;
+}
+
+static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+};
+
+static bool is_rate_legacy(s32 rate)
+{
+ static const s32 legacy[] = { 1000, 2000, 5500, 11000,
+ 6000, 9000, 12000, 18000, 24000,
+ 36000, 48000, 54000
+ };
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(legacy); i++)
+ if (rate == legacy[i])
+ return true;
+
+ return false;
+}
+
+static bool is_rate_ht20(s32 rate, u8 *mcs, bool *sgi)
+{
+ static const s32 ht20[] = { 6500, 13000, 19500, 26000, 39000,
+ 52000, 58500, 65000, 72200
+ };
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(ht20); i++) {
+ if (rate == ht20[i]) {
+ if (i == ARRAY_SIZE(ht20) - 1)
+ /* last rate uses sgi */
+ *sgi = true;
+ else
+ *sgi = false;
+
+ *mcs = i;
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool is_rate_ht40(s32 rate, u8 *mcs, bool *sgi)
+{
+ static const s32 ht40[] = { 13500, 27000, 40500, 54000,
+ 81000, 108000, 121500, 135000,
+ 150000
+ };
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(ht40); i++) {
+ if (rate == ht40[i]) {
+ if (i == ARRAY_SIZE(ht40) - 1)
+ /* last rate uses sgi */
+ *sgi = true;
+ else
+ *sgi = false;
+
+ *mcs = i;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
+ u8 *mac, struct station_info *sinfo)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ long left;
+ bool sgi;
+ s32 rate;
+ int ret;
+ u8 mcs;
+
+ if (memcmp(mac, ar->bssid, ETH_ALEN) != 0)
+ return -ENOENT;
+
+ if (down_interruptible(&ar->sem))
+ return -EBUSY;
+
+ set_bit(STATS_UPDATE_PEND, &ar->flag);
+
+ ret = ath6kl_wmi_get_stats_cmd(ar->wmi);
+
+ if (ret != 0) {
+ up(&ar->sem);
+ return -EIO;
+ }
+
+ left = wait_event_interruptible_timeout(ar->event_wq,
+ !test_bit(STATS_UPDATE_PEND,
+ &ar->flag),
+ WMI_TIMEOUT);
+
+ up(&ar->sem);
+
+ if (left == 0)
+ return -ETIMEDOUT;
+ else if (left < 0)
+ return left;
+
+ if (ar->target_stats.rx_byte) {
+ sinfo->rx_bytes = ar->target_stats.rx_byte;
+ sinfo->filled |= STATION_INFO_RX_BYTES;
+ sinfo->rx_packets = ar->target_stats.rx_pkt;
+ sinfo->filled |= STATION_INFO_RX_PACKETS;
+ }
+
+ if (ar->target_stats.tx_byte) {
+ sinfo->tx_bytes = ar->target_stats.tx_byte;
+ sinfo->filled |= STATION_INFO_TX_BYTES;
+ sinfo->tx_packets = ar->target_stats.tx_pkt;
+ sinfo->filled |= STATION_INFO_TX_PACKETS;
+ }
+
+ sinfo->signal = ar->target_stats.cs_rssi;
+ sinfo->filled |= STATION_INFO_SIGNAL;
+
+ rate = ar->target_stats.tx_ucast_rate;
+
+ if (is_rate_legacy(rate)) {
+ sinfo->txrate.legacy = rate / 100;
+ } else if (is_rate_ht20(rate, &mcs, &sgi)) {
+ if (sgi) {
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ sinfo->txrate.mcs = mcs - 1;
+ } else {
+ sinfo->txrate.mcs = mcs;
+ }
+
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
+ } else if (is_rate_ht40(rate, &mcs, &sgi)) {
+ if (sgi) {
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ sinfo->txrate.mcs = mcs - 1;
+ } else {
+ sinfo->txrate.mcs = mcs;
+ }
+
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
+ } else {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
+ "invalid rate from stats: %d\n", rate);
+ ath6kl_debug_war(ar, ATH6KL_WAR_INVALID_RATE);
+ return 0;
+ }
+
+ sinfo->filled |= STATION_INFO_TX_BITRATE;
+
+ if (test_bit(CONNECTED, &ar->flag) &&
+ test_bit(DTIM_PERIOD_AVAIL, &ar->flag) &&
+ ar->nw_type == INFRA_NETWORK) {
+ sinfo->filled |= STATION_INFO_BSS_PARAM;
+ sinfo->bss_param.flags = 0;
+ sinfo->bss_param.dtim_period = ar->assoc_bss_dtim_period;
+ sinfo->bss_param.beacon_interval = ar->assoc_bss_beacon_int;
+ }
+
+ return 0;
+}
+
+static int ath6kl_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct ath6kl *ar = ath6kl_priv(netdev);
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
+ pmksa->pmkid, true);
+}
+
+static int ath6kl_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct ath6kl *ar = ath6kl_priv(netdev);
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
+ pmksa->pmkid, false);
+}
+
+static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
+{
+ struct ath6kl *ar = ath6kl_priv(netdev);
+ if (test_bit(CONNECTED, &ar->flag))
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, ar->bssid, NULL, false);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ar6k_cfg80211_suspend(struct wiphy *wiphy,
+ struct cfg80211_wowlan *wow)
+{
+ struct ath6kl *ar = wiphy_priv(wiphy);
+
+ return ath6kl_hif_suspend(ar);
+}
+#endif
+
+static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n",
+ __func__, chan->center_freq, chan->hw_value);
+ ar->next_chan = chan->center_freq;
+
+ return 0;
+}
+
+static bool ath6kl_is_p2p_ie(const u8 *pos)
+{
+ return pos[0] == WLAN_EID_VENDOR_SPECIFIC && pos[1] >= 4 &&
+ pos[2] == 0x50 && pos[3] == 0x6f &&
+ pos[4] == 0x9a && pos[5] == 0x09;
+}
+
+static int ath6kl_set_ap_probe_resp_ies(struct ath6kl *ar, const u8 *ies,
+ size_t ies_len)
+{
+ const u8 *pos;
+ u8 *buf = NULL;
+ size_t len = 0;
+ int ret;
+
+ /*
+ * Filter out P2P IE(s) since they will be included depending on
+ * the Probe Request frame in ath6kl_send_go_probe_resp().
+ */
+
+ if (ies && ies_len) {
+ buf = kmalloc(ies_len, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ pos = ies;
+ while (pos + 1 < ies + ies_len) {
+ if (pos + 2 + pos[1] > ies + ies_len)
+ break;
+ if (!ath6kl_is_p2p_ie(pos)) {
+ memcpy(buf + len, pos, 2 + pos[1]);
+ len += 2 + pos[1];
+ }
+ pos += 2 + pos[1];
+ }
+ }
+
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_RESP,
+ buf, len);
+ kfree(buf);
+ return ret;
+}
+
+static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
+ struct beacon_parameters *info, bool add)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ieee80211_mgmt *mgmt;
+ u8 *ies;
+ int ies_len;
+ struct wmi_connect_cmd p;
+ int res;
+ int i;
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: add=%d\n", __func__, add);
+
+ if (!ath6kl_cfg80211_ready(ar))
+ return -EIO;
+
+ if (ar->next_mode != AP_NETWORK)
+ return -EOPNOTSUPP;
+
+ if (info->beacon_ies) {
+ res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_BEACON,
+ info->beacon_ies,
+ info->beacon_ies_len);
+ if (res)
+ return res;
+ }
+ if (info->proberesp_ies) {
+ res = ath6kl_set_ap_probe_resp_ies(ar, info->proberesp_ies,
+ info->proberesp_ies_len);
+ if (res)
+ return res;
+ }
+ if (info->assocresp_ies) {
+ res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_ASSOC_RESP,
+ info->assocresp_ies,
+ info->assocresp_ies_len);
+ if (res)
+ return res;
+ }
+
+ if (!add)
+ return 0;
+
+ ar->ap_mode_bkey.valid = false;
+
+ /* TODO:
+ * info->interval
+ * info->dtim_period
+ */
+
+ if (info->head == NULL)
+ return -EINVAL;
+ mgmt = (struct ieee80211_mgmt *) info->head;
+ ies = mgmt->u.beacon.variable;
+ if (ies > info->head + info->head_len)
+ return -EINVAL;
+ ies_len = info->head + info->head_len - ies;
+
+ if (info->ssid == NULL)
+ return -EINVAL;
+ memcpy(ar->ssid, info->ssid, info->ssid_len);
+ ar->ssid_len = info->ssid_len;
+ if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE)
+ return -EOPNOTSUPP; /* TODO */
+
+ ar->dot11_auth_mode = OPEN_AUTH;
+
+ memset(&p, 0, sizeof(p));
+
+ for (i = 0; i < info->crypto.n_akm_suites; i++) {
+ switch (info->crypto.akm_suites[i]) {
+ case WLAN_AKM_SUITE_8021X:
+ if (info->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ p.auth_mode |= WPA_AUTH;
+ if (info->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+ p.auth_mode |= WPA2_AUTH;
+ break;
+ case WLAN_AKM_SUITE_PSK:
+ if (info->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ p.auth_mode |= WPA_PSK_AUTH;
+ if (info->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+ p.auth_mode |= WPA2_PSK_AUTH;
+ break;
+ }
+ }
+ if (p.auth_mode == 0)
+ p.auth_mode = NONE_AUTH;
+ ar->auth_mode = p.auth_mode;
+
+ for (i = 0; i < info->crypto.n_ciphers_pairwise; i++) {
+ switch (info->crypto.ciphers_pairwise[i]) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ p.prwise_crypto_type |= WEP_CRYPT;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ p.prwise_crypto_type |= TKIP_CRYPT;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ p.prwise_crypto_type |= AES_CRYPT;
+ break;
+ }
+ }
+ if (p.prwise_crypto_type == 0) {
+ p.prwise_crypto_type = NONE_CRYPT;
+ ath6kl_set_cipher(ar, 0, true);
+ } else if (info->crypto.n_ciphers_pairwise == 1)
+ ath6kl_set_cipher(ar, info->crypto.ciphers_pairwise[0], true);
+
+ switch (info->crypto.cipher_group) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ p.grp_crypto_type = WEP_CRYPT;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ p.grp_crypto_type = TKIP_CRYPT;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ p.grp_crypto_type = AES_CRYPT;
+ break;
+ default:
+ p.grp_crypto_type = NONE_CRYPT;
+ break;
+ }
+ ath6kl_set_cipher(ar, info->crypto.cipher_group, false);
+
+ p.nw_type = AP_NETWORK;
+ ar->nw_type = ar->next_mode;
+
+ p.ssid_len = ar->ssid_len;
+ memcpy(p.ssid, ar->ssid, ar->ssid_len);
+ p.dot11_auth_mode = ar->dot11_auth_mode;
+ p.ch = cpu_to_le16(ar->next_chan);
+
+ res = ath6kl_wmi_ap_profile_commit(ar->wmi, &p);
+ if (res < 0)
+ return res;
+
+ return 0;
+}
+
+static int ath6kl_add_beacon(struct wiphy *wiphy, struct net_device *dev,
+ struct beacon_parameters *info)
+{
+ return ath6kl_ap_beacon(wiphy, dev, info, true);
+}
+
+static int ath6kl_set_beacon(struct wiphy *wiphy, struct net_device *dev,
+ struct beacon_parameters *info)
+{
+ return ath6kl_ap_beacon(wiphy, dev, info, false);
+}
+
+static int ath6kl_del_beacon(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ if (ar->nw_type != AP_NETWORK)
+ return -EOPNOTSUPP;
+ if (!test_bit(CONNECTED, &ar->flag))
+ return -ENOTCONN;
+
+ ath6kl_wmi_disconnect_cmd(ar->wmi);
+ clear_bit(CONNECTED, &ar->flag);
+
+ return 0;
+}
+
+static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
+ u8 *mac, struct station_parameters *params)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ if (ar->nw_type != AP_NETWORK)
+ return -EOPNOTSUPP;
+
+ /* Use this only for authorizing/unauthorizing a station */
+ if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)))
+ return -EOPNOTSUPP;
+
+ if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))
+ return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_AUTHORIZE,
+ mac, 0);
+ return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_UNAUTHORIZE, mac,
+ 0);
+}
+
+static int ath6kl_remain_on_channel(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration,
+ u64 *cookie)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ /* TODO: if already pending or ongoing remain-on-channel,
+ * return -EBUSY */
+ *cookie = 1; /* only a single pending request is supported */
+
+ return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, chan->center_freq,
+ duration);
+}
+
+static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct net_device *dev,
+ u64 cookie)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ if (cookie != 1)
+ return -ENOENT;
+
+ return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi);
+}
+
+static int ath6kl_send_go_probe_resp(struct ath6kl *ar, const u8 *buf,
+ size_t len, unsigned int freq)
+{
+ const u8 *pos;
+ u8 *p2p;
+ int p2p_len;
+ int ret;
+ const struct ieee80211_mgmt *mgmt;
+
+ mgmt = (const struct ieee80211_mgmt *) buf;
+
+ /* Include P2P IE(s) from the frame generated in user space. */
+
+ p2p = kmalloc(len, GFP_KERNEL);
+ if (p2p == NULL)
+ return -ENOMEM;
+ p2p_len = 0;
+
+ pos = mgmt->u.probe_resp.variable;
+ while (pos + 1 < buf + len) {
+ if (pos + 2 + pos[1] > buf + len)
+ break;
+ if (ath6kl_is_p2p_ie(pos)) {
+ memcpy(p2p + p2p_len, pos, 2 + pos[1]);
+ p2p_len += 2 + pos[1];
+ }
+ pos += 2 + pos[1];
+ }
+
+ ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, freq, mgmt->da,
+ p2p, p2p_len);
+ kfree(p2p);
+ return ret;
+}
+
+static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel *chan, bool offchan,
+ enum nl80211_channel_type channel_type,
+ bool channel_type_valid, unsigned int wait,
+ const u8 *buf, size_t len, bool no_cck, u64 *cookie)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ u32 id;
+ const struct ieee80211_mgmt *mgmt;
+
+ mgmt = (const struct ieee80211_mgmt *) buf;
+ if (buf + len >= mgmt->u.probe_resp.variable &&
+ ar->nw_type == AP_NETWORK && test_bit(CONNECTED, &ar->flag) &&
+ ieee80211_is_probe_resp(mgmt->frame_control)) {
+ /*
+ * Send Probe Response frame in AP mode using a separate WMI
+ * command to allow the target to fill in the generic IEs.
+ */
+ *cookie = 0; /* TX status not supported */
+ return ath6kl_send_go_probe_resp(ar, buf, len,
+ chan->center_freq);
+ }
+
+ id = ar->send_action_id++;
+ if (id == 0) {
+ /*
+ * 0 is a reserved value in the WMI command and shall not be
+ * used for the command.
+ */
+ id = ar->send_action_id++;
+ }
+
+ *cookie = id;
+ return ath6kl_wmi_send_action_cmd(ar->wmi, id, chan->center_freq, wait,
+ buf, len);
+}
+
+static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
+ struct net_device *dev,
+ u16 frame_type, bool reg)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: frame_type=0x%x reg=%d\n",
+ __func__, frame_type, reg);
+ if (frame_type == IEEE80211_STYPE_PROBE_REQ) {
+ /*
+ * Note: This notification callback is not allowed to sleep, so
+ * we cannot send WMI_PROBE_REQ_REPORT_CMD here. Instead, we
+ * hardcode target to report Probe Request frames all the time.
+ */
+ ar->probe_req_report = reg;
+ }
+}
+
+static const struct ieee80211_txrx_stypes
+ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+ [NL80211_IFTYPE_STATION] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_CLIENT] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_P2P_GO] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+};
+
+static struct cfg80211_ops ath6kl_cfg80211_ops = {
+ .change_virtual_intf = ath6kl_cfg80211_change_iface,
+ .scan = ath6kl_cfg80211_scan,
+ .connect = ath6kl_cfg80211_connect,
+ .disconnect = ath6kl_cfg80211_disconnect,
+ .add_key = ath6kl_cfg80211_add_key,
+ .get_key = ath6kl_cfg80211_get_key,
+ .del_key = ath6kl_cfg80211_del_key,
+ .set_default_key = ath6kl_cfg80211_set_default_key,
+ .set_wiphy_params = ath6kl_cfg80211_set_wiphy_params,
+ .set_tx_power = ath6kl_cfg80211_set_txpower,
+ .get_tx_power = ath6kl_cfg80211_get_txpower,
+ .set_power_mgmt = ath6kl_cfg80211_set_power_mgmt,
+ .join_ibss = ath6kl_cfg80211_join_ibss,
+ .leave_ibss = ath6kl_cfg80211_leave_ibss,
+ .get_station = ath6kl_get_station,
+ .set_pmksa = ath6kl_set_pmksa,
+ .del_pmksa = ath6kl_del_pmksa,
+ .flush_pmksa = ath6kl_flush_pmksa,
+ CFG80211_TESTMODE_CMD(ath6kl_tm_cmd)
+#ifdef CONFIG_PM
+ .suspend = ar6k_cfg80211_suspend,
+#endif
+ .set_channel = ath6kl_set_channel,
+ .add_beacon = ath6kl_add_beacon,
+ .set_beacon = ath6kl_set_beacon,
+ .del_beacon = ath6kl_del_beacon,
+ .change_station = ath6kl_change_station,
+ .remain_on_channel = ath6kl_remain_on_channel,
+ .cancel_remain_on_channel = ath6kl_cancel_remain_on_channel,
+ .mgmt_tx = ath6kl_mgmt_tx,
+ .mgmt_frame_register = ath6kl_mgmt_frame_register,
+};
+
+struct wireless_dev *ath6kl_cfg80211_init(struct device *dev)
+{
+ int ret = 0;
+ struct wireless_dev *wdev;
+ struct ath6kl *ar;
+
+ wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+ if (!wdev) {
+ ath6kl_err("couldn't allocate wireless device\n");
+ return NULL;
+ }
+
+ /* create a new wiphy for use with cfg80211 */
+ wdev->wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
+ if (!wdev->wiphy) {
+ ath6kl_err("couldn't allocate wiphy device\n");
+ kfree(wdev);
+ return NULL;
+ }
+
+ ar = wiphy_priv(wdev->wiphy);
+ ar->p2p = !!ath6kl_p2p;
+
+ wdev->wiphy->mgmt_stypes = ath6kl_mgmt_stypes;
+
+ wdev->wiphy->max_remain_on_channel_duration = 5000;
+
+ /* set device pointer for wiphy */
+ set_wiphy_dev(wdev->wiphy, dev);
+
+ wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
+ if (ar->p2p) {
+ wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT);
+ }
+ /* max num of ssids that can be probed during scanning */
+ wdev->wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
+ wdev->wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
+ wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
+ wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
+ wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+ wdev->wiphy->cipher_suites = cipher_suites;
+ wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ ret = wiphy_register(wdev->wiphy);
+ if (ret < 0) {
+ ath6kl_err("couldn't register wiphy device\n");
+ wiphy_free(wdev->wiphy);
+ kfree(wdev);
+ return NULL;
+ }
+
+ return wdev;
+}
+
+void ath6kl_cfg80211_deinit(struct ath6kl *ar)
+{
+ struct wireless_dev *wdev = ar->wdev;
+
+ if (ar->scan_req) {
+ cfg80211_scan_done(ar->scan_req, true);
+ ar->scan_req = NULL;
+ }
+
+ if (!wdev)
+ return;
+
+ wiphy_unregister(wdev->wiphy);
+ wiphy_free(wdev->wiphy);
+ kfree(wdev);
+}
--- /dev/null
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+
+#include <linux/circ_buf.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
++#include <linux/export.h>
+
+#include "debug.h"
+#include "target.h"
+
+struct ath6kl_fwlog_slot {
+ __le32 timestamp;
+ __le32 length;
+
+ /* max ATH6KL_FWLOG_PAYLOAD_SIZE bytes */
+ u8 payload[0];
+};
+
+#define ATH6KL_FWLOG_SIZE 32768
+#define ATH6KL_FWLOG_SLOT_SIZE (sizeof(struct ath6kl_fwlog_slot) + \
+ ATH6KL_FWLOG_PAYLOAD_SIZE)
+#define ATH6KL_FWLOG_VALID_MASK 0x1ffff
+
+int ath6kl_printk(const char *level, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int rtn;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ rtn = printk("%sath6kl: %pV", level, &vaf);
+
+ va_end(args);
+
+ return rtn;
+}
+
+#ifdef CONFIG_ATH6KL_DEBUG
+
+#define REG_OUTPUT_LEN_PER_LINE 25
+#define REGTYPE_STR_LEN 100
+
+struct ath6kl_diag_reg_info {
+ u32 reg_start;
+ u32 reg_end;
+ const char *reg_info;
+};
+
+static const struct ath6kl_diag_reg_info diag_reg[] = {
+ { 0x20000, 0x200fc, "General DMA and Rx registers" },
+ { 0x28000, 0x28900, "MAC PCU register & keycache" },
+ { 0x20800, 0x20a40, "QCU" },
+ { 0x21000, 0x212f0, "DCU" },
+ { 0x4000, 0x42e4, "RTC" },
+ { 0x540000, 0x540000 + (256 * 1024), "RAM" },
+ { 0x29800, 0x2B210, "Base Band" },
+ { 0x1C000, 0x1C748, "Analog" },
+};
+
+void ath6kl_dump_registers(struct ath6kl_device *dev,
+ struct ath6kl_irq_proc_registers *irq_proc_reg,
+ struct ath6kl_irq_enable_reg *irq_enable_reg)
+{
+
+ ath6kl_dbg(ATH6KL_DBG_ANY, ("<------- Register Table -------->\n"));
+
+ if (irq_proc_reg != NULL) {
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Host Int status: 0x%x\n",
+ irq_proc_reg->host_int_status);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "CPU Int status: 0x%x\n",
+ irq_proc_reg->cpu_int_status);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Error Int status: 0x%x\n",
+ irq_proc_reg->error_int_status);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Counter Int status: 0x%x\n",
+ irq_proc_reg->counter_int_status);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Mbox Frame: 0x%x\n",
+ irq_proc_reg->mbox_frame);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Rx Lookahead Valid: 0x%x\n",
+ irq_proc_reg->rx_lkahd_valid);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Rx Lookahead 0: 0x%x\n",
+ irq_proc_reg->rx_lkahd[0]);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Rx Lookahead 1: 0x%x\n",
+ irq_proc_reg->rx_lkahd[1]);
+
+ if (dev->ar->mbox_info.gmbox_addr != 0) {
+ /*
+ * If the target supports GMBOX hardware, dump some
+ * additional state.
+ */
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "GMBOX Host Int status 2: 0x%x\n",
+ irq_proc_reg->host_int_status2);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "GMBOX RX Avail: 0x%x\n",
+ irq_proc_reg->gmbox_rx_avail);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "GMBOX lookahead alias 0: 0x%x\n",
+ irq_proc_reg->rx_gmbox_lkahd_alias[0]);
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "GMBOX lookahead alias 1: 0x%x\n",
+ irq_proc_reg->rx_gmbox_lkahd_alias[1]);
+ }
+
+ }
+
+ if (irq_enable_reg != NULL) {
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "Int status Enable: 0x%x\n",
+ irq_enable_reg->int_status_en);
+ ath6kl_dbg(ATH6KL_DBG_ANY, "Counter Int status Enable: 0x%x\n",
+ irq_enable_reg->cntr_int_status_en);
+ }
+ ath6kl_dbg(ATH6KL_DBG_ANY, "<------------------------------->\n");
+}
+
+static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist)
+{
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "--- endpoint: %d svc_id: 0x%X ---\n",
+ ep_dist->endpoint, ep_dist->svc_id);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " dist_flags : 0x%X\n",
+ ep_dist->dist_flags);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_norm : %d\n",
+ ep_dist->cred_norm);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_min : %d\n",
+ ep_dist->cred_min);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " credits : %d\n",
+ ep_dist->credits);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_assngd : %d\n",
+ ep_dist->cred_assngd);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " seek_cred : %d\n",
+ ep_dist->seek_cred);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_sz : %d\n",
+ ep_dist->cred_sz);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_per_msg : %d\n",
+ ep_dist->cred_per_msg);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " cred_to_dist : %d\n",
+ ep_dist->cred_to_dist);
+ ath6kl_dbg(ATH6KL_DBG_ANY, " txq_depth : %d\n",
+ get_queue_depth(&((struct htc_endpoint *)
+ ep_dist->htc_rsvd)->txq));
+ ath6kl_dbg(ATH6KL_DBG_ANY,
+ "----------------------------------\n");
+}
+
+void dump_cred_dist_stats(struct htc_target *target)
+{
+ struct htc_endpoint_credit_dist *ep_list;
+
+ if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_TRC))
+ return;
+
+ list_for_each_entry(ep_list, &target->cred_dist_list, list)
+ dump_cred_dist(ep_list);
+
+ ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:%p dist:%p\n",
+ target->cred_dist_cntxt, NULL);
+ ath6kl_dbg(ATH6KL_DBG_TRC, "credit distribution, total : %d, free : %d\n",
+ target->cred_dist_cntxt->total_avail_credits,
+ target->cred_dist_cntxt->cur_free_credits);
+}
+
+static int ath6kl_debugfs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war)
+{
+ switch (war) {
+ case ATH6KL_WAR_INVALID_RATE:
+ ar->debug.war_stats.invalid_rate++;
+ break;
+ }
+}
+
+static ssize_t read_file_war_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char *buf;
+ unsigned int len = 0, buf_len = 1500;
+ ssize_t ret_cnt;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "Workaround stats");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n\n",
+ "=================");
+ len += scnprintf(buf + len, buf_len - len, "%20s %10u\n",
+ "Invalid rates", ar->debug.war_stats.invalid_rate);
+
+ if (WARN_ON(len > buf_len))
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_war_stats = {
+ .read = read_file_war_stats,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath6kl_debug_fwlog_add(struct ath6kl *ar, const void *buf,
+ size_t buf_len)
+{
+ struct circ_buf *fwlog = &ar->debug.fwlog_buf;
+ size_t space;
+ int i;
+
+ /* entries must all be equal size */
+ if (WARN_ON(buf_len != ATH6KL_FWLOG_SLOT_SIZE))
+ return;
+
+ space = CIRC_SPACE(fwlog->head, fwlog->tail, ATH6KL_FWLOG_SIZE);
+ if (space < buf_len)
+ /* discard oldest slot */
+ fwlog->tail = (fwlog->tail + ATH6KL_FWLOG_SLOT_SIZE) &
+ (ATH6KL_FWLOG_SIZE - 1);
+
+ for (i = 0; i < buf_len; i += space) {
+ space = CIRC_SPACE_TO_END(fwlog->head, fwlog->tail,
+ ATH6KL_FWLOG_SIZE);
+
+ if ((size_t) space > buf_len - i)
+ space = buf_len - i;
+
+ memcpy(&fwlog->buf[fwlog->head], buf, space);
+ fwlog->head = (fwlog->head + space) & (ATH6KL_FWLOG_SIZE - 1);
+ }
+
+}
+
+void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len)
+{
+ struct ath6kl_fwlog_slot *slot = ar->debug.fwlog_tmp;
+ size_t slot_len;
+
+ if (WARN_ON(len > ATH6KL_FWLOG_PAYLOAD_SIZE))
+ return;
+
+ spin_lock_bh(&ar->debug.fwlog_lock);
+
+ slot->timestamp = cpu_to_le32(jiffies);
+ slot->length = cpu_to_le32(len);
+ memcpy(slot->payload, buf, len);
+
+ slot_len = sizeof(*slot) + len;
+
+ if (slot_len < ATH6KL_FWLOG_SLOT_SIZE)
+ memset(slot->payload + len, 0,
+ ATH6KL_FWLOG_SLOT_SIZE - slot_len);
+
+ ath6kl_debug_fwlog_add(ar, slot, ATH6KL_FWLOG_SLOT_SIZE);
+
+ spin_unlock_bh(&ar->debug.fwlog_lock);
+}
+
+static bool ath6kl_debug_fwlog_empty(struct ath6kl *ar)
+{
+ return CIRC_CNT(ar->debug.fwlog_buf.head,
+ ar->debug.fwlog_buf.tail,
+ ATH6KL_FWLOG_SLOT_SIZE) == 0;
+}
+
+static ssize_t ath6kl_fwlog_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct circ_buf *fwlog = &ar->debug.fwlog_buf;
+ size_t len = 0, buf_len = count;
+ ssize_t ret_cnt;
+ char *buf;
+ int ccnt;
+
+ buf = vmalloc(buf_len);
+ if (!buf)
+ return -ENOMEM;
+
+ /* read undelivered logs from firmware */
+ ath6kl_read_fwlogs(ar);
+
+ spin_lock_bh(&ar->debug.fwlog_lock);
+
+ while (len < buf_len && !ath6kl_debug_fwlog_empty(ar)) {
+ ccnt = CIRC_CNT_TO_END(fwlog->head, fwlog->tail,
+ ATH6KL_FWLOG_SIZE);
+
+ if ((size_t) ccnt > buf_len - len)
+ ccnt = buf_len - len;
+
+ memcpy(buf + len, &fwlog->buf[fwlog->tail], ccnt);
+ len += ccnt;
+
+ fwlog->tail = (fwlog->tail + ccnt) &
+ (ATH6KL_FWLOG_SIZE - 1);
+ }
+
+ spin_unlock_bh(&ar->debug.fwlog_lock);
+
+ if (WARN_ON(len > buf_len))
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ vfree(buf);
+
+ return ret_cnt;
+}
+
+static const struct file_operations fops_fwlog = {
+ .open = ath6kl_debugfs_open,
+ .read = ath6kl_fwlog_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_fwlog_mask_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[16];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), "0x%x\n", ar->debug.fwlog_mask);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_fwlog_mask_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &ar->debug.fwlog_mask);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_config_debug_module_cmd(ar->wmi,
+ ATH6KL_FWLOG_VALID_MASK,
+ ar->debug.fwlog_mask);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_fwlog_mask = {
+ .open = ath6kl_debugfs_open,
+ .read = ath6kl_fwlog_mask_read,
+ .write = ath6kl_fwlog_mask_write,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct target_stats *tgt_stats = &ar->target_stats;
+ char *buf;
+ unsigned int len = 0, buf_len = 1500;
+ int i;
+ long left;
+ ssize_t ret_cnt;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (down_interruptible(&ar->sem)) {
+ kfree(buf);
+ return -EBUSY;
+ }
+
+ set_bit(STATS_UPDATE_PEND, &ar->flag);
+
+ if (ath6kl_wmi_get_stats_cmd(ar->wmi)) {
+ up(&ar->sem);
+ kfree(buf);
+ return -EIO;
+ }
+
+ left = wait_event_interruptible_timeout(ar->event_wq,
+ !test_bit(STATS_UPDATE_PEND,
+ &ar->flag), WMI_TIMEOUT);
+
+ up(&ar->sem);
+
+ if (left <= 0) {
+ kfree(buf);
+ return -ETIMEDOUT;
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "Target Tx stats");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n\n",
+ "=================");
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Ucast packets", tgt_stats->tx_ucast_pkt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Bcast packets", tgt_stats->tx_bcast_pkt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Ucast byte", tgt_stats->tx_ucast_byte);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Bcast byte", tgt_stats->tx_bcast_byte);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Rts success cnt", tgt_stats->tx_rts_success_cnt);
+ for (i = 0; i < 4; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%18s %d %10llu\n", "PER on ac",
+ i, tgt_stats->tx_pkt_per_ac[i]);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Error", tgt_stats->tx_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Fail count", tgt_stats->tx_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Retry count", tgt_stats->tx_retry_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Multi retry cnt", tgt_stats->tx_mult_retry_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Rts fail cnt", tgt_stats->tx_rts_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%25s %10llu\n\n",
+ "TKIP counter measure used",
+ tgt_stats->tkip_cnter_measures_invoked);
+
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "Target Rx stats");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Ucast packets", tgt_stats->rx_ucast_pkt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
+ "Ucast Rate", tgt_stats->rx_ucast_rate);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Bcast packets", tgt_stats->rx_bcast_pkt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Ucast byte", tgt_stats->rx_ucast_byte);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Bcast byte", tgt_stats->rx_bcast_byte);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Fragmented pkt", tgt_stats->rx_frgment_pkt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Error", tgt_stats->rx_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "CRC Err", tgt_stats->rx_crc_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Key chache miss", tgt_stats->rx_key_cache_miss);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Decrypt Err", tgt_stats->rx_decrypt_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Duplicate frame", tgt_stats->rx_dupl_frame);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Tkip Mic failure", tgt_stats->tkip_local_mic_fail);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "TKIP format err", tgt_stats->tkip_fmt_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "CCMP format Err", tgt_stats->ccmp_fmt_err);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n\n",
+ "CCMP Replay Err", tgt_stats->ccmp_replays);
+
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "Misc Target stats");
+ len += scnprintf(buf + len, buf_len - len, "%25s\n",
+ "=================");
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Beacon Miss count", tgt_stats->cs_bmiss_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Num Connects", tgt_stats->cs_connect_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n",
+ "Num disconnects", tgt_stats->cs_discon_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
+ "Beacon avg rssi", tgt_stats->cs_ave_beacon_rssi);
+
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_tgt_stats = {
+ .read = read_file_tgt_stats,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+#define print_credit_info(fmt_str, ep_list_field) \
+ (len += scnprintf(buf + len, buf_len - len, fmt_str, \
+ ep_list->ep_list_field))
+#define CREDIT_INFO_DISPLAY_STRING_LEN 200
+#define CREDIT_INFO_LEN 128
+
+static ssize_t read_file_credit_dist_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct htc_target *target = ar->htc_target;
+ struct htc_endpoint_credit_dist *ep_list;
+ char *buf;
+ unsigned int buf_len, len = 0;
+ ssize_t ret_cnt;
+
+ buf_len = CREDIT_INFO_DISPLAY_STRING_LEN +
+ get_queue_depth(&target->cred_dist_list) * CREDIT_INFO_LEN;
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
+ "Total Avail Credits: ",
+ target->cred_dist_cntxt->total_avail_credits);
+ len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
+ "Free credits :",
+ target->cred_dist_cntxt->cur_free_credits);
+
+ len += scnprintf(buf + len, buf_len - len,
+ " Epid Flags Cred_norm Cred_min Credits Cred_assngd"
+ " Seek_cred Cred_sz Cred_per_msg Cred_to_dist"
+ " qdepth\n");
+
+ list_for_each_entry(ep_list, &target->cred_dist_list, list) {
+ print_credit_info(" %2d", endpoint);
+ print_credit_info("%10x", dist_flags);
+ print_credit_info("%8d", cred_norm);
+ print_credit_info("%9d", cred_min);
+ print_credit_info("%9d", credits);
+ print_credit_info("%10d", cred_assngd);
+ print_credit_info("%13d", seek_cred);
+ print_credit_info("%12d", cred_sz);
+ print_credit_info("%9d", cred_per_msg);
+ print_credit_info("%14d", cred_to_dist);
+ len += scnprintf(buf + len, buf_len - len, "%12d\n",
+ get_queue_depth(&((struct htc_endpoint *)
+ ep_list->htc_rsvd)->txq));
+ }
+
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_credit_dist_stats = {
+ .read = read_file_credit_dist_stats,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static unsigned long ath6kl_get_num_reg(void)
+{
+ int i;
+ unsigned long n_reg = 0;
+
+ for (i = 0; i < ARRAY_SIZE(diag_reg); i++)
+ n_reg = n_reg +
+ (diag_reg[i].reg_end - diag_reg[i].reg_start) / 4 + 1;
+
+ return n_reg;
+}
+
+static bool ath6kl_dbg_is_diag_reg_valid(u32 reg_addr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(diag_reg); i++) {
+ if (reg_addr >= diag_reg[i].reg_start &&
+ reg_addr <= diag_reg[i].reg_end)
+ return true;
+ }
+
+ return false;
+}
+
+static ssize_t ath6kl_regread_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u8 buf[50];
+ unsigned int len = 0;
+
+ if (ar->debug.dbgfs_diag_reg)
+ len += scnprintf(buf + len, sizeof(buf) - len, "0x%x\n",
+ ar->debug.dbgfs_diag_reg);
+ else
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ "All diag registers\n");
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_regread_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u8 buf[50];
+ unsigned int len;
+ unsigned long reg_addr;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ if (strict_strtoul(buf, 0, ®_addr))
+ return -EINVAL;
+
+ if ((reg_addr % 4) != 0)
+ return -EINVAL;
+
+ if (reg_addr && !ath6kl_dbg_is_diag_reg_valid(reg_addr))
+ return -EINVAL;
+
+ ar->debug.dbgfs_diag_reg = reg_addr;
+
+ return count;
+}
+
+static const struct file_operations fops_diag_reg_read = {
+ .read = ath6kl_regread_read,
+ .write = ath6kl_regread_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath6kl_regdump_open(struct inode *inode, struct file *file)
+{
+ struct ath6kl *ar = inode->i_private;
+ u8 *buf;
+ unsigned long int reg_len;
+ unsigned int len = 0, n_reg;
+ u32 addr;
+ __le32 reg_val;
+ int i, status;
+
+ /* Dump all the registers if no register is specified */
+ if (!ar->debug.dbgfs_diag_reg)
+ n_reg = ath6kl_get_num_reg();
+ else
+ n_reg = 1;
+
+ reg_len = n_reg * REG_OUTPUT_LEN_PER_LINE;
+ if (n_reg > 1)
+ reg_len += REGTYPE_STR_LEN;
+
+ buf = vmalloc(reg_len);
+ if (!buf)
+ return -ENOMEM;
+
+ if (n_reg == 1) {
+ addr = ar->debug.dbgfs_diag_reg;
+
+ status = ath6kl_diag_read32(ar,
+ TARG_VTOP(ar->target_type, addr),
+ (u32 *)®_val);
+ if (status)
+ goto fail_reg_read;
+
+ len += scnprintf(buf + len, reg_len - len,
+ "0x%06x 0x%08x\n", addr, le32_to_cpu(reg_val));
+ goto done;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(diag_reg); i++) {
+ len += scnprintf(buf + len, reg_len - len,
+ "%s\n", diag_reg[i].reg_info);
+ for (addr = diag_reg[i].reg_start;
+ addr <= diag_reg[i].reg_end; addr += 4) {
+ status = ath6kl_diag_read32(ar,
+ TARG_VTOP(ar->target_type, addr),
+ (u32 *)®_val);
+ if (status)
+ goto fail_reg_read;
+
+ len += scnprintf(buf + len, reg_len - len,
+ "0x%06x 0x%08x\n",
+ addr, le32_to_cpu(reg_val));
+ }
+ }
+
+done:
+ file->private_data = buf;
+ return 0;
+
+fail_reg_read:
+ ath6kl_warn("Unable to read memory:%u\n", addr);
+ vfree(buf);
+ return -EIO;
+}
+
+static ssize_t ath6kl_regdump_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ u8 *buf = file->private_data;
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+static int ath6kl_regdump_release(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+ return 0;
+}
+
+static const struct file_operations fops_reg_dump = {
+ .open = ath6kl_regdump_open,
+ .read = ath6kl_regdump_read,
+ .release = ath6kl_regdump_release,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_lrssi_roam_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ unsigned long lrssi_roam_threshold;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (strict_strtoul(buf, 0, &lrssi_roam_threshold))
+ return -EINVAL;
+
+ ar->lrssi_roam_threshold = lrssi_roam_threshold;
+
+ ath6kl_wmi_set_roam_lrssi_cmd(ar->wmi, ar->lrssi_roam_threshold);
+
+ return count;
+}
+
+static ssize_t ath6kl_lrssi_roam_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[32];
+ unsigned int len;
+
+ len = snprintf(buf, sizeof(buf), "%u\n", ar->lrssi_roam_threshold);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_lrssi_roam_threshold = {
+ .read = ath6kl_lrssi_roam_read,
+ .write = ath6kl_lrssi_roam_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_regwrite_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u8 buf[32];
+ unsigned int len = 0;
+
+ len = scnprintf(buf, sizeof(buf), "Addr: 0x%x Val: 0x%x\n",
+ ar->debug.diag_reg_addr_wr, ar->debug.diag_reg_val_wr);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_regwrite_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[32];
+ char *sptr, *token;
+ unsigned int len = 0;
+ u32 reg_addr, reg_val;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, "=");
+ if (!token)
+ return -EINVAL;
+
+ if (kstrtou32(token, 0, ®_addr))
+ return -EINVAL;
+
+ if (!ath6kl_dbg_is_diag_reg_valid(reg_addr))
+ return -EINVAL;
+
+ if (kstrtou32(sptr, 0, ®_val))
+ return -EINVAL;
+
+ ar->debug.diag_reg_addr_wr = reg_addr;
+ ar->debug.diag_reg_val_wr = reg_val;
+
+ if (ath6kl_diag_write32(ar, ar->debug.diag_reg_addr_wr,
+ cpu_to_le32(ar->debug.diag_reg_val_wr)))
+ return -EIO;
+
+ return count;
+}
+
+static const struct file_operations fops_diag_reg_write = {
+ .read = ath6kl_regwrite_read,
+ .write = ath6kl_regwrite_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath6kl_debug_init(struct ath6kl *ar)
+{
+ ar->debug.fwlog_buf.buf = vmalloc(ATH6KL_FWLOG_SIZE);
+ if (ar->debug.fwlog_buf.buf == NULL)
+ return -ENOMEM;
+
+ ar->debug.fwlog_tmp = kmalloc(ATH6KL_FWLOG_SLOT_SIZE, GFP_KERNEL);
+ if (ar->debug.fwlog_tmp == NULL) {
+ vfree(ar->debug.fwlog_buf.buf);
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&ar->debug.fwlog_lock);
+
+ /*
+ * Actually we are lying here but don't know how to read the mask
+ * value from the firmware.
+ */
+ ar->debug.fwlog_mask = 0;
+
+ ar->debugfs_phy = debugfs_create_dir("ath6kl",
+ ar->wdev->wiphy->debugfsdir);
+ if (!ar->debugfs_phy) {
+ vfree(ar->debug.fwlog_buf.buf);
+ kfree(ar->debug.fwlog_tmp);
+ return -ENOMEM;
+ }
+
+ debugfs_create_file("tgt_stats", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_tgt_stats);
+
+ debugfs_create_file("credit_dist_stats", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_credit_dist_stats);
+
+ debugfs_create_file("fwlog", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_fwlog);
+
+ debugfs_create_file("fwlog_mask", S_IRUSR | S_IWUSR, ar->debugfs_phy,
+ ar, &fops_fwlog_mask);
+
+ debugfs_create_file("reg_addr", S_IRUSR | S_IWUSR, ar->debugfs_phy, ar,
+ &fops_diag_reg_read);
+
+ debugfs_create_file("reg_dump", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_reg_dump);
+
+ debugfs_create_file("lrssi_roam_threshold", S_IRUSR | S_IWUSR,
+ ar->debugfs_phy, ar, &fops_lrssi_roam_threshold);
+
+ debugfs_create_file("reg_write", S_IRUSR | S_IWUSR,
+ ar->debugfs_phy, ar, &fops_diag_reg_write);
+
+ debugfs_create_file("war_stats", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_war_stats);
+
+ return 0;
+}
+
+void ath6kl_debug_cleanup(struct ath6kl *ar)
+{
+ vfree(ar->debug.fwlog_buf.buf);
+ kfree(ar->debug.fwlog_tmp);
+}
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
++#include <linux/module.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sd.h>
+#include "htc_hif.h"
+#include "hif-ops.h"
+#include "target.h"
+#include "debug.h"
+#include "cfg80211.h"
+
+struct ath6kl_sdio {
+ struct sdio_func *func;
+
+ spinlock_t lock;
+
+ /* free list */
+ struct list_head bus_req_freeq;
+
+ /* available bus requests */
+ struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
+
+ struct ath6kl *ar;
+ u8 *dma_buffer;
+
+ /* scatter request list head */
+ struct list_head scat_req;
+
+ spinlock_t scat_lock;
+ bool is_disabled;
+ atomic_t irq_handling;
+ const struct sdio_device_id *id;
+ struct work_struct wr_async_work;
+ struct list_head wr_asyncq;
+ spinlock_t wr_async_lock;
+};
+
+#define CMD53_ARG_READ 0
+#define CMD53_ARG_WRITE 1
+#define CMD53_ARG_BLOCK_BASIS 1
+#define CMD53_ARG_FIXED_ADDRESS 0
+#define CMD53_ARG_INCR_ADDRESS 1
+
+static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
+{
+ return ar->hif_priv;
+}
+
+/*
+ * Macro to check if DMA buffer is WORD-aligned and DMA-able.
+ * Most host controllers assume the buffer is DMA'able and will
+ * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid
+ * check fails on stack memory.
+ */
+static inline bool buf_needs_bounce(u8 *buf)
+{
+ return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
+}
+
+static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
+{
+ struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
+
+ /* EP1 has an extended range */
+ mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
+ mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
+ mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
+ mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
+ mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
+ mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
+}
+
+static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
+ u8 mode, u8 opcode, u32 addr,
+ u16 blksz)
+{
+ *arg = (((rw & 1) << 31) |
+ ((func & 0x7) << 28) |
+ ((mode & 1) << 27) |
+ ((opcode & 1) << 26) |
+ ((addr & 0x1FFFF) << 9) |
+ (blksz & 0x1FF));
+}
+
+static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
+ unsigned int address,
+ unsigned char val)
+{
+ const u8 func = 0;
+
+ *arg = ((write & 1) << 31) |
+ ((func & 0x7) << 28) |
+ ((raw & 1) << 27) |
+ (1 << 26) |
+ ((address & 0x1FFFF) << 9) |
+ (1 << 8) |
+ (val & 0xFF);
+}
+
+static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
+ unsigned int address,
+ unsigned char byte)
+{
+ struct mmc_command io_cmd;
+
+ memset(&io_cmd, 0, sizeof(io_cmd));
+ ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
+ io_cmd.opcode = SD_IO_RW_DIRECT;
+ io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+ return mmc_wait_for_cmd(card->host, &io_cmd, 0);
+}
+
+static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
+ u8 *buf, u32 len)
+{
+ int ret = 0;
+
+ if (request & HIF_WRITE) {
+ /* FIXME: looks like ugly workaround for something */
+ if (addr >= HIF_MBOX_BASE_ADDR &&
+ addr <= HIF_MBOX_END_ADDR)
+ addr += (HIF_MBOX_WIDTH - len);
+
+ /* FIXME: this also looks like ugly workaround */
+ if (addr == HIF_MBOX0_EXT_BASE_ADDR)
+ addr += HIF_MBOX0_EXT_WIDTH - len;
+
+ if (request & HIF_FIXED_ADDRESS)
+ ret = sdio_writesb(func, addr, buf, len);
+ else
+ ret = sdio_memcpy_toio(func, addr, buf, len);
+ } else {
+ if (request & HIF_FIXED_ADDRESS)
+ ret = sdio_readsb(func, buf, addr, len);
+ else
+ ret = sdio_memcpy_fromio(func, buf, addr, len);
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
+ request & HIF_WRITE ? "wr" : "rd", addr,
+ request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
+ ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len);
+
+ return ret;
+}
+
+static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
+{
+ struct bus_request *bus_req;
+ unsigned long flag;
+
+ spin_lock_irqsave(&ar_sdio->lock, flag);
+
+ if (list_empty(&ar_sdio->bus_req_freeq)) {
+ spin_unlock_irqrestore(&ar_sdio->lock, flag);
+ return NULL;
+ }
+
+ bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
+ struct bus_request, list);
+ list_del(&bus_req->list);
+
+ spin_unlock_irqrestore(&ar_sdio->lock, flag);
+ ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
+ __func__, bus_req);
+
+ return bus_req;
+}
+
+static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
+ struct bus_request *bus_req)
+{
+ unsigned long flag;
+
+ ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
+ __func__, bus_req);
+
+ spin_lock_irqsave(&ar_sdio->lock, flag);
+ list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
+ spin_unlock_irqrestore(&ar_sdio->lock, flag);
+}
+
+static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
+ struct mmc_data *data)
+{
+ struct scatterlist *sg;
+ int i;
+
+ data->blksz = HIF_MBOX_BLOCK_SIZE;
+ data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
+
+ ath6kl_dbg(ATH6KL_DBG_SCATTER,
+ "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
+ (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
+ data->blksz, data->blocks, scat_req->len,
+ scat_req->scat_entries);
+
+ data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
+ MMC_DATA_READ;
+
+ /* fill SG entries */
+ sg = scat_req->sgentries;
+ sg_init_table(sg, scat_req->scat_entries);
+
+ /* assemble SG list */
+ for (i = 0; i < scat_req->scat_entries; i++, sg++) {
+ ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
+ i, scat_req->scat_list[i].buf,
+ scat_req->scat_list[i].len);
+
+ sg_set_buf(sg, scat_req->scat_list[i].buf,
+ scat_req->scat_list[i].len);
+ }
+
+ /* set scatter-gather table for request */
+ data->sg = scat_req->sgentries;
+ data->sg_len = scat_req->scat_entries;
+}
+
+static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
+ struct bus_request *req)
+{
+ struct mmc_request mmc_req;
+ struct mmc_command cmd;
+ struct mmc_data data;
+ struct hif_scatter_req *scat_req;
+ u8 opcode, rw;
+ int status, len;
+
+ scat_req = req->scat_req;
+
+ if (scat_req->virt_scat) {
+ len = scat_req->len;
+ if (scat_req->req & HIF_BLOCK_BASIS)
+ len = round_down(len, HIF_MBOX_BLOCK_SIZE);
+
+ status = ath6kl_sdio_io(ar_sdio->func, scat_req->req,
+ scat_req->addr, scat_req->virt_dma_buf,
+ len);
+ goto scat_complete;
+ }
+
+ memset(&mmc_req, 0, sizeof(struct mmc_request));
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ memset(&data, 0, sizeof(struct mmc_data));
+
+ ath6kl_sdio_setup_scat_data(scat_req, &data);
+
+ opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
+ CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
+
+ rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
+
+ /* Fixup the address so that the last byte will fall on MBOX EOM */
+ if (scat_req->req & HIF_WRITE) {
+ if (scat_req->addr == HIF_MBOX_BASE_ADDR)
+ scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
+ else
+ /* Uses extended address range */
+ scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
+ }
+
+ /* set command argument */
+ ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
+ CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
+ data.blocks);
+
+ cmd.opcode = SD_IO_RW_EXTENDED;
+ cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+
+ mmc_req.cmd = &cmd;
+ mmc_req.data = &data;
+
+ mmc_set_data_timeout(&data, ar_sdio->func->card);
+ /* synchronous call to process request */
+ mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
+
+ status = cmd.error ? cmd.error : data.error;
+
+scat_complete:
+ scat_req->status = status;
+
+ if (scat_req->status)
+ ath6kl_err("Scatter write request failed:%d\n",
+ scat_req->status);
+
+ if (scat_req->req & HIF_ASYNCHRONOUS)
+ scat_req->complete(ar_sdio->ar->htc_target, scat_req);
+
+ return status;
+}
+
+static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
+ int n_scat_entry, int n_scat_req,
+ bool virt_scat)
+{
+ struct hif_scatter_req *s_req;
+ struct bus_request *bus_req;
+ int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz;
+ u8 *virt_buf;
+
+ scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
+ scat_req_sz = sizeof(*s_req) + scat_list_sz;
+
+ if (!virt_scat)
+ sg_sz = sizeof(struct scatterlist) * n_scat_entry;
+ else
+ buf_sz = 2 * L1_CACHE_BYTES +
+ ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
+
+ for (i = 0; i < n_scat_req; i++) {
+ /* allocate the scatter request */
+ s_req = kzalloc(scat_req_sz, GFP_KERNEL);
+ if (!s_req)
+ return -ENOMEM;
+
+ if (virt_scat) {
+ virt_buf = kzalloc(buf_sz, GFP_KERNEL);
+ if (!virt_buf) {
+ kfree(s_req);
+ return -ENOMEM;
+ }
+
+ s_req->virt_dma_buf =
+ (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
+ } else {
+ /* allocate sglist */
+ s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL);
+
+ if (!s_req->sgentries) {
+ kfree(s_req);
+ return -ENOMEM;
+ }
+ }
+
+ /* allocate a bus request for this scatter request */
+ bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
+ if (!bus_req) {
+ kfree(s_req->sgentries);
+ kfree(s_req->virt_dma_buf);
+ kfree(s_req);
+ return -ENOMEM;
+ }
+
+ /* assign the scatter request to this bus request */
+ bus_req->scat_req = s_req;
+ s_req->busrequest = bus_req;
+
+ s_req->virt_scat = virt_scat;
+
+ /* add it to the scatter pool */
+ hif_scatter_req_add(ar_sdio->ar, s_req);
+ }
+
+ return 0;
+}
+
+static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
+ u32 len, u32 request)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ u8 *tbuf = NULL;
+ int ret;
+ bool bounced = false;
+
+ if (request & HIF_BLOCK_BASIS)
+ len = round_down(len, HIF_MBOX_BLOCK_SIZE);
+
+ if (buf_needs_bounce(buf)) {
+ if (!ar_sdio->dma_buffer)
+ return -ENOMEM;
+ tbuf = ar_sdio->dma_buffer;
+ memcpy(tbuf, buf, len);
+ bounced = true;
+ } else
+ tbuf = buf;
+
+ sdio_claim_host(ar_sdio->func);
+ ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
+ if ((request & HIF_READ) && bounced)
+ memcpy(buf, tbuf, len);
+ sdio_release_host(ar_sdio->func);
+
+ return ret;
+}
+
+static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
+ struct bus_request *req)
+{
+ if (req->scat_req)
+ ath6kl_sdio_scat_rw(ar_sdio, req);
+ else {
+ void *context;
+ int status;
+
+ status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
+ req->buffer, req->length,
+ req->request);
+ context = req->packet;
+ ath6kl_sdio_free_bus_req(ar_sdio, req);
+ ath6kldev_rw_comp_handler(context, status);
+ }
+}
+
+static void ath6kl_sdio_write_async_work(struct work_struct *work)
+{
+ struct ath6kl_sdio *ar_sdio;
+ unsigned long flags;
+ struct bus_request *req, *tmp_req;
+
+ ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
+ sdio_claim_host(ar_sdio->func);
+
+ spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
+ list_del(&req->list);
+ spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ __ath6kl_sdio_write_async(ar_sdio, req);
+ spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ }
+ spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+
+ sdio_release_host(ar_sdio->func);
+}
+
+static void ath6kl_sdio_irq_handler(struct sdio_func *func)
+{
+ int status;
+ struct ath6kl_sdio *ar_sdio;
+
+ ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n");
+
+ ar_sdio = sdio_get_drvdata(func);
+ atomic_set(&ar_sdio->irq_handling, 1);
+
+ /*
+ * Release the host during interrups so we can pick it back up when
+ * we process commands.
+ */
+ sdio_release_host(ar_sdio->func);
+
+ status = ath6kldev_intr_bh_handler(ar_sdio->ar);
+ sdio_claim_host(ar_sdio->func);
+ atomic_set(&ar_sdio->irq_handling, 0);
+ WARN_ON(status && status != -ECANCELED);
+}
+
+static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
+{
+ struct sdio_func *func = ar_sdio->func;
+ int ret = 0;
+
+ if (!ar_sdio->is_disabled)
+ return 0;
+
+ sdio_claim_host(func);
+
+ ret = sdio_enable_func(func);
+ if (ret) {
+ ath6kl_err("Unable to enable sdio func: %d)\n", ret);
+ sdio_release_host(func);
+ return ret;
+ }
+
+ sdio_release_host(func);
+
+ /*
+ * Wait for hardware to initialise. It should take a lot less than
+ * 10 ms but let's be conservative here.
+ */
+ msleep(10);
+
+ ar_sdio->is_disabled = false;
+
+ return ret;
+}
+
+static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio)
+{
+ int ret;
+
+ if (ar_sdio->is_disabled)
+ return 0;
+
+ /* Disable the card */
+ sdio_claim_host(ar_sdio->func);
+ ret = sdio_disable_func(ar_sdio->func);
+ sdio_release_host(ar_sdio->func);
+
+ if (ret)
+ return ret;
+
+ ar_sdio->is_disabled = true;
+
+ return ret;
+}
+
+static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
+ u32 length, u32 request,
+ struct htc_packet *packet)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct bus_request *bus_req;
+ unsigned long flags;
+
+ bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
+
+ if (!bus_req)
+ return -ENOMEM;
+
+ bus_req->address = address;
+ bus_req->buffer = buffer;
+ bus_req->length = length;
+ bus_req->request = request;
+ bus_req->packet = packet;
+
+ spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
+ spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
+
+ return 0;
+}
+
+static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ int ret;
+
+ sdio_claim_host(ar_sdio->func);
+
+ /* Register the isr */
+ ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
+ if (ret)
+ ath6kl_err("Failed to claim sdio irq: %d\n", ret);
+
+ sdio_release_host(ar_sdio->func);
+}
+
+static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ int ret;
+
+ sdio_claim_host(ar_sdio->func);
+
+ /* Mask our function IRQ */
+ while (atomic_read(&ar_sdio->irq_handling)) {
+ sdio_release_host(ar_sdio->func);
+ schedule_timeout(HZ / 10);
+ sdio_claim_host(ar_sdio->func);
+ }
+
+ ret = sdio_release_irq(ar_sdio->func);
+ if (ret)
+ ath6kl_err("Failed to release sdio irq: %d\n", ret);
+
+ sdio_release_host(ar_sdio->func);
+}
+
+static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct hif_scatter_req *node = NULL;
+ unsigned long flag;
+
+ spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+
+ if (!list_empty(&ar_sdio->scat_req)) {
+ node = list_first_entry(&ar_sdio->scat_req,
+ struct hif_scatter_req, list);
+ list_del(&node->list);
+ }
+
+ spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+
+ return node;
+}
+
+static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
+ struct hif_scatter_req *s_req)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ unsigned long flag;
+
+ spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+
+ list_add_tail(&s_req->list, &ar_sdio->scat_req);
+
+ spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+
+}
+
+/* scatter gather read write request */
+static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
+ struct hif_scatter_req *scat_req)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ u32 request = scat_req->req;
+ int status = 0;
+ unsigned long flags;
+
+ if (!scat_req->len)
+ return -EINVAL;
+
+ ath6kl_dbg(ATH6KL_DBG_SCATTER,
+ "hif-scatter: total len: %d scatter entries: %d\n",
+ scat_req->len, scat_req->scat_entries);
+
+ if (request & HIF_SYNCHRONOUS) {
+ sdio_claim_host(ar_sdio->func);
+ status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
+ sdio_release_host(ar_sdio->func);
+ } else {
+ spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
+ spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
+ }
+
+ return status;
+}
+
+/* clean up scatter support */
+static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct hif_scatter_req *s_req, *tmp_req;
+ unsigned long flag;
+
+ /* empty the free list */
+ spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
+ list_del(&s_req->list);
+ spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+
+ if (s_req->busrequest)
+ ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
+ kfree(s_req->virt_dma_buf);
+ kfree(s_req->sgentries);
+ kfree(s_req);
+
+ spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ }
+ spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+}
+
+/* setup of HIF scatter resources */
+static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct htc_target *target = ar->htc_target;
+ int ret;
+ bool virt_scat = false;
+
+ /* check if host supports scatter and it meets our requirements */
+ if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
+ ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
+ ar_sdio->func->card->host->max_segs,
+ MAX_SCATTER_ENTRIES_PER_REQ);
+ virt_scat = true;
+ }
+
+ if (!virt_scat) {
+ ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
+ MAX_SCATTER_ENTRIES_PER_REQ,
+ MAX_SCATTER_REQUESTS, virt_scat);
+
+ if (!ret) {
+ ath6kl_dbg(ATH6KL_DBG_SCATTER,
+ "hif-scatter enabled: max scatter req : %d entries: %d\n",
+ MAX_SCATTER_REQUESTS,
+ MAX_SCATTER_ENTRIES_PER_REQ);
+
+ target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
+ target->max_xfer_szper_scatreq =
+ MAX_SCATTER_REQ_TRANSFER_SIZE;
+ } else {
+ ath6kl_sdio_cleanup_scatter(ar);
+ ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n");
+ }
+ }
+
+ if (virt_scat || ret) {
+ ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
+ ATH6KL_SCATTER_ENTRIES_PER_REQ,
+ ATH6KL_SCATTER_REQS, virt_scat);
+
+ if (ret) {
+ ath6kl_err("failed to alloc virtual scatter resources !\n");
+ ath6kl_sdio_cleanup_scatter(ar);
+ return ret;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_SCATTER,
+ "Vitual scatter enabled, max_scat_req:%d, entries:%d\n",
+ ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
+
+ target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
+ target->max_xfer_szper_scatreq =
+ ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
+ }
+
+ return 0;
+}
+
+static int ath6kl_sdio_suspend(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ mmc_pm_flag_t flags;
+ int ret;
+
+ flags = sdio_get_host_pm_caps(func);
+
+ if (!(flags & MMC_PM_KEEP_POWER))
+ /* as host doesn't support keep power we need to bail out */
+ ath6kl_dbg(ATH6KL_DBG_SDIO,
+ "func %d doesn't support MMC_PM_KEEP_POWER\n",
+ func->num);
+ return -EINVAL;
+
+ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+ if (ret) {
+ printk(KERN_ERR "ath6kl: set sdio pm flags failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath6kl_deep_sleep_enable(ar);
+
+ return 0;
+}
+
+static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
+ .read_write_sync = ath6kl_sdio_read_write_sync,
+ .write_async = ath6kl_sdio_write_async,
+ .irq_enable = ath6kl_sdio_irq_enable,
+ .irq_disable = ath6kl_sdio_irq_disable,
+ .scatter_req_get = ath6kl_sdio_scatter_req_get,
+ .scatter_req_add = ath6kl_sdio_scatter_req_add,
+ .enable_scatter = ath6kl_sdio_enable_scatter,
+ .scat_req_rw = ath6kl_sdio_async_rw_scatter,
+ .cleanup_scatter = ath6kl_sdio_cleanup_scatter,
+ .suspend = ath6kl_sdio_suspend,
+};
+
+static int ath6kl_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ int ret;
+ struct ath6kl_sdio *ar_sdio;
+ struct ath6kl *ar;
+ int count;
+
+ ath6kl_dbg(ATH6KL_DBG_SDIO,
+ "new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
+ func->num, func->vendor, func->device,
+ func->max_blksize, func->cur_blksize);
+
+ ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
+ if (!ar_sdio)
+ return -ENOMEM;
+
+ ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
+ if (!ar_sdio->dma_buffer) {
+ ret = -ENOMEM;
+ goto err_hif;
+ }
+
+ ar_sdio->func = func;
+ sdio_set_drvdata(func, ar_sdio);
+
+ ar_sdio->id = id;
+ ar_sdio->is_disabled = true;
+
+ spin_lock_init(&ar_sdio->lock);
+ spin_lock_init(&ar_sdio->scat_lock);
+ spin_lock_init(&ar_sdio->wr_async_lock);
+
+ INIT_LIST_HEAD(&ar_sdio->scat_req);
+ INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
+ INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
+
+ INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
+
+ for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
+ ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
+
+ ar = ath6kl_core_alloc(&ar_sdio->func->dev);
+ if (!ar) {
+ ath6kl_err("Failed to alloc ath6kl core\n");
+ ret = -ENOMEM;
+ goto err_dma;
+ }
+
+ ar_sdio->ar = ar;
+ ar->hif_priv = ar_sdio;
+ ar->hif_ops = &ath6kl_sdio_ops;
+
+ ath6kl_sdio_set_mbox_info(ar);
+
+ sdio_claim_host(func);
+
+ if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
+ MANUFACTURER_ID_AR6003_BASE) {
+ /* enable 4-bit ASYNC interrupt on AR6003 or later */
+ ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
+ CCCR_SDIO_IRQ_MODE_REG,
+ SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
+ if (ret) {
+ ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
+ ret);
+ sdio_release_host(func);
+ goto err_cfg80211;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_SDIO, "4-bit async irq mode enabled\n");
+ }
+
+ /* give us some time to enable, in ms */
+ func->enable_timeout = 100;
+
+ sdio_release_host(func);
+
+ ret = ath6kl_sdio_power_on(ar_sdio);
+ if (ret)
+ goto err_cfg80211;
+
+ sdio_claim_host(func);
+
+ ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
+ if (ret) {
+ ath6kl_err("Set sdio block size %d failed: %d)\n",
+ HIF_MBOX_BLOCK_SIZE, ret);
+ sdio_release_host(func);
+ goto err_off;
+ }
+
+ sdio_release_host(func);
+
+ ret = ath6kl_core_init(ar);
+ if (ret) {
+ ath6kl_err("Failed to init ath6kl core\n");
+ goto err_off;
+ }
+
+ return ret;
+
+err_off:
+ ath6kl_sdio_power_off(ar_sdio);
+err_cfg80211:
+ ath6kl_cfg80211_deinit(ar_sdio->ar);
+err_dma:
+ kfree(ar_sdio->dma_buffer);
+err_hif:
+ kfree(ar_sdio);
+
+ return ret;
+}
+
+static void ath6kl_sdio_remove(struct sdio_func *func)
+{
+ struct ath6kl_sdio *ar_sdio;
+
+ ath6kl_dbg(ATH6KL_DBG_SDIO,
+ "removed func %d vendor 0x%x device 0x%x\n",
+ func->num, func->vendor, func->device);
+
+ ar_sdio = sdio_get_drvdata(func);
+
+ ath6kl_stop_txrx(ar_sdio->ar);
+ cancel_work_sync(&ar_sdio->wr_async_work);
+
+ ath6kl_unavail_ev(ar_sdio->ar);
+
+ ath6kl_sdio_power_off(ar_sdio);
+
+ kfree(ar_sdio->dma_buffer);
+ kfree(ar_sdio);
+}
+
+static const struct sdio_device_id ath6kl_sdio_devices[] = {
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
+ {},
+};
+
+MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
+
+static struct sdio_driver ath6kl_sdio_driver = {
+ .name = "ath6kl_sdio",
+ .id_table = ath6kl_sdio_devices,
+ .probe = ath6kl_sdio_probe,
+ .remove = ath6kl_sdio_remove,
+};
+
+static int __init ath6kl_sdio_init(void)
+{
+ int ret;
+
+ ret = sdio_register_driver(&ath6kl_sdio_driver);
+ if (ret)
+ ath6kl_err("sdio driver registration failed: %d\n", ret);
+
+ return ret;
+}
+
+static void __exit ath6kl_sdio_exit(void)
+{
+ sdio_unregister_driver(&ath6kl_sdio_driver);
+}
+
+module_init(ath6kl_sdio_init);
+module_exit(ath6kl_sdio_exit);
+
+MODULE_AUTHOR("Atheros Communications, Inc.");
+MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
+MODULE_LICENSE("Dual BSD/GPL");
+
+MODULE_FIRMWARE(AR6003_REV2_OTP_FILE);
+MODULE_FIRMWARE(AR6003_REV2_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6003_REV2_PATCH_FILE);
+MODULE_FIRMWARE(AR6003_REV2_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_REV2_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_REV3_OTP_FILE);
+MODULE_FIRMWARE(AR6003_REV3_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6003_REV3_PATCH_FILE);
+MODULE_FIRMWARE(AR6003_REV3_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_REV3_DEFAULT_BOARD_DATA_FILE);
--- /dev/null
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/* ****************** SDIO CARD Interface Functions **************************/
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
++#include <linux/export.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/card.h>
+
+#include <defs.h>
+#include <brcm_hw_ids.h>
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+#include <soc.h>
+#include "dhd.h"
+#include "dhd_bus.h"
+#include "dhd_dbg.h"
+#include "sdio_host.h"
+
+#define SDIOH_API_ACCESS_RETRY_LIMIT 2
+
+static void brcmf_sdioh_irqhandler(struct sdio_func *func)
+{
+ struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
+
+ brcmf_dbg(TRACE, "***IRQHandler\n");
+
+ sdio_release_host(func);
+
+ brcmf_sdbrcm_isr(sdiodev->bus);
+
+ sdio_claim_host(func);
+}
+
+int brcmf_sdcard_intr_reg(struct brcmf_sdio_dev *sdiodev)
+{
+ brcmf_dbg(TRACE, "Entering\n");
+
+ sdio_claim_host(sdiodev->func[1]);
+ sdio_claim_irq(sdiodev->func[1], brcmf_sdioh_irqhandler);
+ sdio_release_host(sdiodev->func[1]);
+
+ return 0;
+}
+
+int brcmf_sdcard_intr_dereg(struct brcmf_sdio_dev *sdiodev)
+{
+ brcmf_dbg(TRACE, "Entering\n");
+
+ sdio_claim_host(sdiodev->func[1]);
+ sdio_release_irq(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func[1]);
+
+ return 0;
+}
+
+u8 brcmf_sdcard_cfg_read(struct brcmf_sdio_dev *sdiodev, uint fnc_num, u32 addr,
+ int *err)
+{
+ int status;
+ s32 retry = 0;
+ u8 data = 0;
+
+ do {
+ if (retry) /* wait for 1 ms till bus get settled down */
+ udelay(1000);
+ status = brcmf_sdioh_request_byte(sdiodev, SDIOH_READ, fnc_num,
+ addr, (u8 *) &data);
+ } while (status != 0
+ && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+ if (err)
+ *err = status;
+
+ brcmf_dbg(INFO, "fun = %d, addr = 0x%x, u8data = 0x%x\n",
+ fnc_num, addr, data);
+
+ return data;
+}
+
+void
+brcmf_sdcard_cfg_write(struct brcmf_sdio_dev *sdiodev, uint fnc_num, u32 addr,
+ u8 data, int *err)
+{
+ int status;
+ s32 retry = 0;
+
+ do {
+ if (retry) /* wait for 1 ms till bus get settled down */
+ udelay(1000);
+ status = brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, fnc_num,
+ addr, (u8 *) &data);
+ } while (status != 0
+ && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
+ if (err)
+ *err = status;
+
+ brcmf_dbg(INFO, "fun = %d, addr = 0x%x, u8data = 0x%x\n",
+ fnc_num, addr, data);
+}
+
+int
+brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
+{
+ int err = 0;
+ brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+ (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
+ if (!err)
+ brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_SBADDRMID,
+ (address >> 16) & SBSDIO_SBADDRMID_MASK,
+ &err);
+ if (!err)
+ brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_SBADDRHIGH,
+ (address >> 24) & SBSDIO_SBADDRHIGH_MASK,
+ &err);
+
+ return err;
+}
+
+u32 brcmf_sdcard_reg_read(struct brcmf_sdio_dev *sdiodev, u32 addr, uint size)
+{
+ int status;
+ u32 word = 0;
+ uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+
+ brcmf_dbg(INFO, "fun = 1, addr = 0x%x\n", addr);
+
+ if (bar0 != sdiodev->sbwad) {
+ if (brcmf_sdcard_set_sbaddr_window(sdiodev, bar0))
+ return 0xFFFFFFFF;
+
+ sdiodev->sbwad = bar0;
+ }
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ if (size == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = brcmf_sdioh_request_word(sdiodev, SDIOH_READ, SDIO_FUNC_1,
+ addr, &word, size);
+
+ sdiodev->regfail = (status != 0);
+
+ brcmf_dbg(INFO, "u32data = 0x%x\n", word);
+
+ /* if ok, return appropriately masked word */
+ if (status == 0) {
+ switch (size) {
+ case sizeof(u8):
+ return word & 0xff;
+ case sizeof(u16):
+ return word & 0xffff;
+ case sizeof(u32):
+ return word;
+ default:
+ sdiodev->regfail = true;
+
+ }
+ }
+
+ /* otherwise, bad sdio access or invalid size */
+ brcmf_dbg(ERROR, "error reading addr 0x%04x size %d\n", addr, size);
+ return 0xFFFFFFFF;
+}
+
+u32 brcmf_sdcard_reg_write(struct brcmf_sdio_dev *sdiodev, u32 addr, uint size,
+ u32 data)
+{
+ int status;
+ uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+ int err = 0;
+
+ brcmf_dbg(INFO, "fun = 1, addr = 0x%x, uint%ddata = 0x%x\n",
+ addr, size * 8, data);
+
+ if (bar0 != sdiodev->sbwad) {
+ err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
+ if (err)
+ return err;
+
+ sdiodev->sbwad = bar0;
+ }
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ if (size == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+ status =
+ brcmf_sdioh_request_word(sdiodev, SDIOH_WRITE, SDIO_FUNC_1,
+ addr, &data, size);
+ sdiodev->regfail = (status != 0);
+
+ if (status == 0)
+ return 0;
+
+ brcmf_dbg(ERROR, "error writing 0x%08x to addr 0x%04x size %d\n",
+ data, addr, size);
+ return 0xFFFFFFFF;
+}
+
+bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev)
+{
+ return sdiodev->regfail;
+}
+
+int
+brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags,
+ u8 *buf, uint nbytes, struct sk_buff *pkt)
+{
+ int status;
+ uint incr_fix;
+ uint width;
+ uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+ int err = 0;
+
+ brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
+
+ /* Async not implemented yet */
+ if (flags & SDIO_REQ_ASYNC)
+ return -ENOTSUPP;
+
+ if (bar0 != sdiodev->sbwad) {
+ err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
+ if (err)
+ return err;
+
+ sdiodev->sbwad = bar0;
+ }
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ if (width == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ status = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
+ fn, addr, width, nbytes, buf, pkt);
+
+ return status;
+}
+
+int
+brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt)
+{
+ uint incr_fix;
+ uint width;
+ uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+ int err = 0;
+
+ brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
+
+ /* Async not implemented yet */
+ if (flags & SDIO_REQ_ASYNC)
+ return -ENOTSUPP;
+
+ if (bar0 != sdiodev->sbwad) {
+ err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
+ if (err)
+ return err;
+
+ sdiodev->sbwad = bar0;
+ }
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ if (width == 4)
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ return brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
+ addr, width, nbytes, buf, pkt);
+}
+
+int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
+ u8 *buf, uint nbytes)
+{
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ return brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC,
+ (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
+ addr, 4, nbytes, buf, NULL);
+}
+
+int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
+{
+ char t_func = (char)fn;
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* issue abort cmd52 command through F0 */
+ brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
+ SDIO_CCCR_ABORT, &t_func);
+
+ brcmf_dbg(TRACE, "Exit\n");
+ return 0;
+}
+
+int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
+{
+ u32 regs = 0;
+ int ret = 0;
+
+ ret = brcmf_sdioh_attach(sdiodev);
+ if (ret)
+ goto out;
+
+ regs = SI_ENUM_BASE;
+
+ /* Report the BAR, to fix if needed */
+ sdiodev->sbwad = SI_ENUM_BASE;
+
+ /* try to attach to the target device */
+ sdiodev->bus = brcmf_sdbrcm_probe(0, 0, 0, 0, regs, sdiodev);
+ if (!sdiodev->bus) {
+ brcmf_dbg(ERROR, "device attach failed\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+out:
+ if (ret)
+ brcmf_sdio_remove(sdiodev);
+
+ return ret;
+}
+EXPORT_SYMBOL(brcmf_sdio_probe);
+
+int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev)
+{
+ if (sdiodev->bus) {
+ brcmf_sdbrcm_disconnect(sdiodev->bus);
+ sdiodev->bus = NULL;
+ }
+
+ brcmf_sdioh_detach(sdiodev);
+
+ sdiodev->sbwad = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(brcmf_sdio_remove);
+
+void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable)
+{
+ if (enable)
+ brcmf_sdbrcm_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
+ else
+ brcmf_sdbrcm_wd_timer(sdiodev->bus, 0);
+}
--- /dev/null
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/core.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/card.h>
+#include <linux/suspend.h>
+#include <linux/errno.h>
+#include <linux/sched.h> /* request_irq() */
++#include <linux/module.h>
+#include <net/cfg80211.h>
+
+#include <defs.h>
+#include <brcm_hw_ids.h>
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+#include "sdio_host.h"
+#include "dhd.h"
+#include "dhd_dbg.h"
+#include "wl_cfg80211.h"
+
+#define SDIO_VENDOR_ID_BROADCOM 0x02d0
+
+#define DMA_ALIGN_MASK 0x03
+
+#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
+
+#define SDIO_FUNC1_BLOCKSIZE 64
+#define SDIO_FUNC2_BLOCKSIZE 512
+
+/* devices we support, null terminated */
+static const struct sdio_device_id brcmf_sdmmc_ids[] = {
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
+ { /* end: all zeroes */ },
+};
+MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
+
+static bool
+brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
+{
+ bool is_err = false;
+#ifdef CONFIG_PM_SLEEP
+ is_err = atomic_read(&sdiodev->suspend);
+#endif
+ return is_err;
+}
+
+static void
+brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev, wait_queue_head_t *wq)
+{
+#ifdef CONFIG_PM_SLEEP
+ int retry = 0;
+ while (atomic_read(&sdiodev->suspend) && retry++ != 30)
+ wait_event_timeout(*wq, false, HZ/100);
+#endif
+}
+
+static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
+ uint regaddr, u8 *byte)
+{
+ struct sdio_func *sdfunc = sdiodev->func[0];
+ int err_ret;
+
+ /*
+ * Can only directly write to some F0 registers.
+ * Handle F2 enable/disable and Abort command
+ * as a special case.
+ */
+ if (regaddr == SDIO_CCCR_IOEx) {
+ sdfunc = sdiodev->func[2];
+ if (sdfunc) {
+ sdio_claim_host(sdfunc);
+ if (*byte & SDIO_FUNC_ENABLE_2) {
+ /* Enable Function 2 */
+ err_ret = sdio_enable_func(sdfunc);
+ if (err_ret)
+ brcmf_dbg(ERROR,
+ "enable F2 failed:%d\n",
+ err_ret);
+ } else {
+ /* Disable Function 2 */
+ err_ret = sdio_disable_func(sdfunc);
+ if (err_ret)
+ brcmf_dbg(ERROR,
+ "Disable F2 failed:%d\n",
+ err_ret);
+ }
+ sdio_release_host(sdfunc);
+ }
+ } else if (regaddr == SDIO_CCCR_ABORT) {
+ sdio_claim_host(sdfunc);
+ sdio_writeb(sdfunc, *byte, regaddr, &err_ret);
+ sdio_release_host(sdfunc);
+ } else if (regaddr < 0xF0) {
+ brcmf_dbg(ERROR, "F0 Wr:0x%02x: write disallowed\n", regaddr);
+ err_ret = -EPERM;
+ } else {
+ sdio_claim_host(sdfunc);
+ sdio_f0_writeb(sdfunc, *byte, regaddr, &err_ret);
+ sdio_release_host(sdfunc);
+ }
+
+ return err_ret;
+}
+
+int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint func,
+ uint regaddr, u8 *byte)
+{
+ int err_ret;
+
+ brcmf_dbg(INFO, "rw=%d, func=%d, addr=0x%05x\n", rw, func, regaddr);
+
+ brcmf_pm_resume_wait(sdiodev, &sdiodev->request_byte_wait);
+ if (brcmf_pm_resume_error(sdiodev))
+ return -EIO;
+
+ if (rw && func == 0) {
+ /* handle F0 separately */
+ err_ret = brcmf_sdioh_f0_write_byte(sdiodev, regaddr, byte);
+ } else {
+ sdio_claim_host(sdiodev->func[func]);
+ if (rw) /* CMD52 Write */
+ sdio_writeb(sdiodev->func[func], *byte, regaddr,
+ &err_ret);
+ else if (func == 0) {
+ *byte = sdio_f0_readb(sdiodev->func[func], regaddr,
+ &err_ret);
+ } else {
+ *byte = sdio_readb(sdiodev->func[func], regaddr,
+ &err_ret);
+ }
+ sdio_release_host(sdiodev->func[func]);
+ }
+
+ if (err_ret)
+ brcmf_dbg(ERROR, "Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
+ rw ? "write" : "read", func, regaddr, *byte, err_ret);
+
+ return err_ret;
+}
+
+int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
+ uint rw, uint func, uint addr, u32 *word,
+ uint nbytes)
+{
+ int err_ret = -EIO;
+
+ if (func == 0) {
+ brcmf_dbg(ERROR, "Only CMD52 allowed to F0\n");
+ return -EINVAL;
+ }
+
+ brcmf_dbg(INFO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
+ rw, func, addr, nbytes);
+
+ brcmf_pm_resume_wait(sdiodev, &sdiodev->request_word_wait);
+ if (brcmf_pm_resume_error(sdiodev))
+ return -EIO;
+ /* Claim host controller */
+ sdio_claim_host(sdiodev->func[func]);
+
+ if (rw) { /* CMD52 Write */
+ if (nbytes == 4)
+ sdio_writel(sdiodev->func[func], *word, addr,
+ &err_ret);
+ else if (nbytes == 2)
+ sdio_writew(sdiodev->func[func], (*word & 0xFFFF),
+ addr, &err_ret);
+ else
+ brcmf_dbg(ERROR, "Invalid nbytes: %d\n", nbytes);
+ } else { /* CMD52 Read */
+ if (nbytes == 4)
+ *word = sdio_readl(sdiodev->func[func], addr, &err_ret);
+ else if (nbytes == 2)
+ *word = sdio_readw(sdiodev->func[func], addr,
+ &err_ret) & 0xFFFF;
+ else
+ brcmf_dbg(ERROR, "Invalid nbytes: %d\n", nbytes);
+ }
+
+ /* Release host controller */
+ sdio_release_host(sdiodev->func[func]);
+
+ if (err_ret)
+ brcmf_dbg(ERROR, "Failed to %s word, Err: 0x%08x\n",
+ rw ? "write" : "read", err_ret);
+
+ return err_ret;
+}
+
+static int
+brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
+ uint write, uint func, uint addr,
+ struct sk_buff *pkt)
+{
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
+ u32 SGCount = 0;
+ int err_ret = 0;
+
+ struct sk_buff *pnext;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ brcmf_pm_resume_wait(sdiodev, &sdiodev->request_packet_wait);
+ if (brcmf_pm_resume_error(sdiodev))
+ return -EIO;
+
+ /* Claim host controller */
+ sdio_claim_host(sdiodev->func[func]);
+ for (pnext = pkt; pnext; pnext = pnext->next) {
+ uint pkt_len = pnext->len;
+ pkt_len += 3;
+ pkt_len &= 0xFFFFFFFC;
+
+ if ((write) && (!fifo)) {
+ err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
+ ((u8 *) (pnext->data)),
+ pkt_len);
+ } else if (write) {
+ err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
+ ((u8 *) (pnext->data)),
+ pkt_len);
+ } else if (fifo) {
+ err_ret = sdio_readsb(sdiodev->func[func],
+ ((u8 *) (pnext->data)),
+ addr, pkt_len);
+ } else {
+ err_ret = sdio_memcpy_fromio(sdiodev->func[func],
+ ((u8 *) (pnext->data)),
+ addr, pkt_len);
+ }
+
+ if (err_ret) {
+ brcmf_dbg(ERROR, "%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
+ write ? "TX" : "RX", pnext, SGCount, addr,
+ pkt_len, err_ret);
+ } else {
+ brcmf_dbg(TRACE, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n",
+ write ? "TX" : "RX", pnext, SGCount, addr,
+ pkt_len);
+ }
+
+ if (!fifo)
+ addr += pkt_len;
+ SGCount++;
+
+ }
+
+ /* Release host controller */
+ sdio_release_host(sdiodev->func[func]);
+
+ brcmf_dbg(TRACE, "Exit\n");
+ return err_ret;
+}
+
+/*
+ * This function takes a buffer or packet, and fixes everything up
+ * so that in the end, a DMA-able packet is created.
+ *
+ * A buffer does not have an associated packet pointer,
+ * and may or may not be aligned.
+ * A packet may consist of a single packet, or a packet chain.
+ * If it is a packet chain, then all the packets in the chain
+ * must be properly aligned.
+ *
+ * If the packet data is not aligned, then there may only be
+ * one packet, and in this case, it is copied to a new
+ * aligned packet.
+ *
+ */
+int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
+ uint fix_inc, uint write, uint func, uint addr,
+ uint reg_width, uint buflen_u, u8 *buffer,
+ struct sk_buff *pkt)
+{
+ int Status;
+ struct sk_buff *mypkt = NULL;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
+ if (brcmf_pm_resume_error(sdiodev))
+ return -EIO;
+ /* Case 1: we don't have a packet. */
+ if (pkt == NULL) {
+ brcmf_dbg(DATA, "Creating new %s Packet, len=%d\n",
+ write ? "TX" : "RX", buflen_u);
+ mypkt = brcmu_pkt_buf_get_skb(buflen_u);
+ if (!mypkt) {
+ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+ buflen_u);
+ return -EIO;
+ }
+
+ /* For a write, copy the buffer data into the packet. */
+ if (write)
+ memcpy(mypkt->data, buffer, buflen_u);
+
+ Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
+ func, addr, mypkt);
+
+ /* For a read, copy the packet data back to the buffer. */
+ if (!write)
+ memcpy(buffer, mypkt->data, buflen_u);
+
+ brcmu_pkt_buf_free_skb(mypkt);
+ } else if (((ulong) (pkt->data) & DMA_ALIGN_MASK) != 0) {
+ /*
+ * Case 2: We have a packet, but it is unaligned.
+ * In this case, we cannot have a chain (pkt->next == NULL)
+ */
+ brcmf_dbg(DATA, "Creating aligned %s Packet, len=%d\n",
+ write ? "TX" : "RX", pkt->len);
+ mypkt = brcmu_pkt_buf_get_skb(pkt->len);
+ if (!mypkt) {
+ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+ pkt->len);
+ return -EIO;
+ }
+
+ /* For a write, copy the buffer data into the packet. */
+ if (write)
+ memcpy(mypkt->data, pkt->data, pkt->len);
+
+ Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
+ func, addr, mypkt);
+
+ /* For a read, copy the packet data back to the buffer. */
+ if (!write)
+ memcpy(pkt->data, mypkt->data, mypkt->len);
+
+ brcmu_pkt_buf_free_skb(mypkt);
+ } else { /* case 3: We have a packet and
+ it is aligned. */
+ brcmf_dbg(DATA, "Aligned %s Packet, direct DMA\n",
+ write ? "Tx" : "Rx");
+ Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
+ func, addr, pkt);
+ }
+
+ return Status;
+}
+
+/* Read client card reg */
+static int
+brcmf_sdioh_card_regread(struct brcmf_sdio_dev *sdiodev, int func, u32 regaddr,
+ int regsize, u32 *data)
+{
+
+ if ((func == 0) || (regsize == 1)) {
+ u8 temp = 0;
+
+ brcmf_sdioh_request_byte(sdiodev, SDIOH_READ, func, regaddr,
+ &temp);
+ *data = temp;
+ *data &= 0xff;
+ brcmf_dbg(DATA, "byte read data=0x%02x\n", *data);
+ } else {
+ brcmf_sdioh_request_word(sdiodev, SDIOH_READ, func, regaddr,
+ data, regsize);
+ if (regsize == 2)
+ *data &= 0xffff;
+
+ brcmf_dbg(DATA, "word read data=0x%08x\n", *data);
+ }
+
+ return SUCCESS;
+}
+
+static int brcmf_sdioh_get_cisaddr(struct brcmf_sdio_dev *sdiodev, u32 regaddr)
+{
+ /* read 24 bits and return valid 17 bit addr */
+ int i;
+ u32 scratch, regdata;
+ __le32 scratch_le;
+ u8 *ptr = (u8 *)&scratch_le;
+
+ for (i = 0; i < 3; i++) {
+ if ((brcmf_sdioh_card_regread(sdiodev, 0, regaddr, 1,
+ ®data)) != SUCCESS)
+ brcmf_dbg(ERROR, "Can't read!\n");
+
+ *ptr++ = (u8) regdata;
+ regaddr++;
+ }
+
+ /* Only the lower 17-bits are valid */
+ scratch = le32_to_cpu(scratch_le);
+ scratch &= 0x0001FFFF;
+ return scratch;
+}
+
+static int brcmf_sdioh_enablefuncs(struct brcmf_sdio_dev *sdiodev)
+{
+ int err_ret;
+ u32 fbraddr;
+ u8 func;
+
+ brcmf_dbg(TRACE, "\n");
+
+ /* Get the Card's common CIS address */
+ sdiodev->func_cis_ptr[0] = brcmf_sdioh_get_cisaddr(sdiodev,
+ SDIO_CCCR_CIS);
+ brcmf_dbg(INFO, "Card's Common CIS Ptr = 0x%x\n",
+ sdiodev->func_cis_ptr[0]);
+
+ /* Get the Card's function CIS (for each function) */
+ for (fbraddr = SDIO_FBR_BASE(1), func = 1;
+ func <= sdiodev->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
+ sdiodev->func_cis_ptr[func] =
+ brcmf_sdioh_get_cisaddr(sdiodev, SDIO_FBR_CIS + fbraddr);
+ brcmf_dbg(INFO, "Function %d CIS Ptr = 0x%x\n",
+ func, sdiodev->func_cis_ptr[func]);
+ }
+
+ /* Enable Function 1 */
+ sdio_claim_host(sdiodev->func[1]);
+ err_ret = sdio_enable_func(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func[1]);
+ if (err_ret)
+ brcmf_dbg(ERROR, "Failed to enable F1 Err: 0x%08x\n", err_ret);
+
+ return false;
+}
+
+/*
+ * Public entry points & extern's
+ */
+int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev)
+{
+ int err_ret = 0;
+
+ brcmf_dbg(TRACE, "\n");
+
+ sdiodev->num_funcs = 2;
+
+ sdio_claim_host(sdiodev->func[1]);
+ err_ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
+ sdio_release_host(sdiodev->func[1]);
+ if (err_ret) {
+ brcmf_dbg(ERROR, "Failed to set F1 blocksize\n");
+ goto out;
+ }
+
+ sdio_claim_host(sdiodev->func[2]);
+ err_ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
+ sdio_release_host(sdiodev->func[2]);
+ if (err_ret) {
+ brcmf_dbg(ERROR, "Failed to set F2 blocksize\n");
+ goto out;
+ }
+
+ brcmf_sdioh_enablefuncs(sdiodev);
+
+out:
+ brcmf_dbg(TRACE, "Done\n");
+ return err_ret;
+}
+
+void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev)
+{
+ brcmf_dbg(TRACE, "\n");
+
+ /* Disable Function 2 */
+ sdio_claim_host(sdiodev->func[2]);
+ sdio_disable_func(sdiodev->func[2]);
+ sdio_release_host(sdiodev->func[2]);
+
+ /* Disable Function 1 */
+ sdio_claim_host(sdiodev->func[1]);
+ sdio_disable_func(sdiodev->func[1]);
+ sdio_release_host(sdiodev->func[1]);
+
+}
+
+static int brcmf_ops_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ int ret = 0;
+ struct brcmf_sdio_dev *sdiodev;
+ brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(TRACE, "func->class=%x\n", func->class);
+ brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor);
+ brcmf_dbg(TRACE, "sdio_device: 0x%04x\n", func->device);
+ brcmf_dbg(TRACE, "Function#: 0x%04x\n", func->num);
+
+ if (func->num == 1) {
+ if (dev_get_drvdata(&func->card->dev)) {
+ brcmf_dbg(ERROR, "card private drvdata occupied\n");
+ return -ENXIO;
+ }
+ sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
+ if (!sdiodev)
+ return -ENOMEM;
+ sdiodev->func[0] = func->card->sdio_func[0];
+ sdiodev->func[1] = func;
+ dev_set_drvdata(&func->card->dev, sdiodev);
+
+ atomic_set(&sdiodev->suspend, false);
+ init_waitqueue_head(&sdiodev->request_byte_wait);
+ init_waitqueue_head(&sdiodev->request_word_wait);
+ init_waitqueue_head(&sdiodev->request_packet_wait);
+ init_waitqueue_head(&sdiodev->request_buffer_wait);
+ }
+
+ if (func->num == 2) {
+ sdiodev = dev_get_drvdata(&func->card->dev);
+ if ((!sdiodev) || (sdiodev->func[1]->card != func->card))
+ return -ENODEV;
+ sdiodev->func[2] = func;
+
+ brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n");
+ ret = brcmf_sdio_probe(sdiodev);
+ }
+
+ return ret;
+}
+
+static void brcmf_ops_sdio_remove(struct sdio_func *func)
+{
+ struct brcmf_sdio_dev *sdiodev;
+ brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(INFO, "func->class=%x\n", func->class);
+ brcmf_dbg(INFO, "sdio_vendor: 0x%04x\n", func->vendor);
+ brcmf_dbg(INFO, "sdio_device: 0x%04x\n", func->device);
+ brcmf_dbg(INFO, "Function#: 0x%04x\n", func->num);
+
+ if (func->num == 2) {
+ sdiodev = dev_get_drvdata(&func->card->dev);
+ brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n");
+ brcmf_sdio_remove(sdiodev);
+ dev_set_drvdata(&func->card->dev, NULL);
+ kfree(sdiodev);
+ }
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int brcmf_sdio_suspend(struct device *dev)
+{
+ mmc_pm_flag_t sdio_flags;
+ struct brcmf_sdio_dev *sdiodev;
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ int ret = 0;
+
+ brcmf_dbg(TRACE, "\n");
+
+ sdiodev = dev_get_drvdata(&func->card->dev);
+
+ atomic_set(&sdiodev->suspend, true);
+
+ sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
+ if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
+ brcmf_dbg(ERROR, "Host can't keep power while suspended\n");
+ return -EINVAL;
+ }
+
+ ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
+ if (ret) {
+ brcmf_dbg(ERROR, "Failed to set pm_flags\n");
+ return ret;
+ }
+
+ brcmf_sdio_wdtmr_enable(sdiodev, false);
+
+ return ret;
+}
+
+static int brcmf_sdio_resume(struct device *dev)
+{
+ struct brcmf_sdio_dev *sdiodev;
+ struct sdio_func *func = dev_to_sdio_func(dev);
+
+ sdiodev = dev_get_drvdata(&func->card->dev);
+ brcmf_sdio_wdtmr_enable(sdiodev, true);
+ atomic_set(&sdiodev->suspend, false);
+ return 0;
+}
+
+static const struct dev_pm_ops brcmf_sdio_pm_ops = {
+ .suspend = brcmf_sdio_suspend,
+ .resume = brcmf_sdio_resume,
+};
+#endif /* CONFIG_PM_SLEEP */
+
+static struct sdio_driver brcmf_sdmmc_driver = {
+ .probe = brcmf_ops_sdio_probe,
+ .remove = brcmf_ops_sdio_remove,
+ .name = "brcmfmac",
+ .id_table = brcmf_sdmmc_ids,
+#ifdef CONFIG_PM_SLEEP
+ .drv = {
+ .pm = &brcmf_sdio_pm_ops,
+ },
+#endif /* CONFIG_PM_SLEEP */
+};
+
+/* bus register interface */
+int brcmf_bus_register(void)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ return sdio_register_driver(&brcmf_sdmmc_driver);
+}
+
+void brcmf_bus_unregister(void)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ sdio_unregister_driver(&brcmf_sdmmc_driver);
+}
--- /dev/null
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/hardirq.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
++#include <linux/module.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+#include <defs.h>
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+
+#include "dhd.h"
+#include "dhd_bus.h"
+#include "dhd_proto.h"
+#include "dhd_dbg.h"
+#include "wl_cfg80211.h"
+#include "bcmchip.h"
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac driver.");
+MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN fullmac cards");
+MODULE_LICENSE("Dual BSD/GPL");
+
+
+/* Interface control information */
+struct brcmf_if {
+ struct brcmf_info *info; /* back pointer to brcmf_info */
+ /* OS/stack specifics */
+ struct net_device *ndev;
+ struct net_device_stats stats;
+ int idx; /* iface idx in dongle */
+ int state; /* interface state */
+ u8 mac_addr[ETH_ALEN]; /* assigned MAC address */
+};
+
+/* Local private structure (extension of pub) */
+struct brcmf_info {
+ struct brcmf_pub pub;
+
+ /* OS/stack specifics */
+ struct brcmf_if *iflist[BRCMF_MAX_IFS];
+
+ struct mutex proto_block;
+
+ struct work_struct setmacaddr_work;
+ struct work_struct multicast_work;
+ u8 macvalue[ETH_ALEN];
+ atomic_t pend_8021x_cnt;
+};
+
+/* Error bits */
+module_param(brcmf_msg_level, int, 0);
+
+
+static int brcmf_net2idx(struct brcmf_info *drvr_priv, struct net_device *ndev)
+{
+ int i = 0;
+
+ while (i < BRCMF_MAX_IFS) {
+ if (drvr_priv->iflist[i] && drvr_priv->iflist[i]->ndev == ndev)
+ return i;
+ i++;
+ }
+
+ return BRCMF_BAD_IF;
+}
+
+int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name)
+{
+ int i = BRCMF_MAX_IFS;
+ struct brcmf_if *ifp;
+
+ if (name == NULL || *name == '\0')
+ return 0;
+
+ while (--i > 0) {
+ ifp = drvr_priv->iflist[i];
+ if (ifp && !strncmp(ifp->ndev->name, name, IFNAMSIZ))
+ break;
+ }
+
+ brcmf_dbg(TRACE, "return idx %d for \"%s\"\n", i, name);
+
+ return i; /* default - the primary interface */
+}
+
+char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
+{
+ struct brcmf_info *drvr_priv = drvr->info;
+
+ if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
+ brcmf_dbg(ERROR, "ifidx %d out of range\n", ifidx);
+ return "<if_bad>";
+ }
+
+ if (drvr_priv->iflist[ifidx] == NULL) {
+ brcmf_dbg(ERROR, "null i/f %d\n", ifidx);
+ return "<if_null>";
+ }
+
+ if (drvr_priv->iflist[ifidx]->ndev)
+ return drvr_priv->iflist[ifidx]->ndev->name;
+
+ return "<if_none>";
+}
+
+static void _brcmf_set_multicast_list(struct work_struct *work)
+{
+ struct net_device *ndev;
+ struct netdev_hw_addr *ha;
+ u32 allmulti, cnt;
+ __le32 cnt_le;
+ __le32 allmulti_le;
+
+ struct brcmf_dcmd dcmd;
+ char *buf, *bufp;
+ uint buflen;
+ int ret;
+
+ struct brcmf_info *drvr_priv = container_of(work, struct brcmf_info,
+ multicast_work);
+
+ ndev = drvr_priv->iflist[0]->ndev;
+ cnt = netdev_mc_count(ndev);
+
+ /* Determine initial value of allmulti flag */
+ allmulti = (ndev->flags & IFF_ALLMULTI) ? true : false;
+
+ /* Send down the multicast list first. */
+
+ buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETH_ALEN);
+ bufp = buf = kmalloc(buflen, GFP_ATOMIC);
+ if (!bufp)
+ return;
+
+ strcpy(bufp, "mcast_list");
+ bufp += strlen("mcast_list") + 1;
+
+ cnt_le = cpu_to_le32(cnt);
+ memcpy(bufp, &cnt_le, sizeof(cnt));
+ bufp += sizeof(cnt_le);
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ if (!cnt)
+ break;
+ memcpy(bufp, ha->addr, ETH_ALEN);
+ bufp += ETH_ALEN;
+ cnt--;
+ }
+
+ memset(&dcmd, 0, sizeof(dcmd));
+ dcmd.cmd = BRCMF_C_SET_VAR;
+ dcmd.buf = buf;
+ dcmd.len = buflen;
+ dcmd.set = true;
+
+ ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+ if (ret < 0) {
+ brcmf_dbg(ERROR, "%s: set mcast_list failed, cnt %d\n",
+ brcmf_ifname(&drvr_priv->pub, 0), cnt);
+ allmulti = cnt ? true : allmulti;
+ }
+
+ kfree(buf);
+
+ /* Now send the allmulti setting. This is based on the setting in the
+ * net_device flags, but might be modified above to be turned on if we
+ * were trying to set some addresses and dongle rejected it...
+ */
+
+ buflen = sizeof("allmulti") + sizeof(allmulti);
+ buf = kmalloc(buflen, GFP_ATOMIC);
+ if (!buf)
+ return;
+
+ allmulti_le = cpu_to_le32(allmulti);
+
+ if (!brcmu_mkiovar
+ ("allmulti", (void *)&allmulti_le,
+ sizeof(allmulti_le), buf, buflen)) {
+ brcmf_dbg(ERROR, "%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
+ brcmf_ifname(&drvr_priv->pub, 0),
+ (int)sizeof(allmulti), buflen);
+ kfree(buf);
+ return;
+ }
+
+ memset(&dcmd, 0, sizeof(dcmd));
+ dcmd.cmd = BRCMF_C_SET_VAR;
+ dcmd.buf = buf;
+ dcmd.len = buflen;
+ dcmd.set = true;
+
+ ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+ if (ret < 0) {
+ brcmf_dbg(ERROR, "%s: set allmulti %d failed\n",
+ brcmf_ifname(&drvr_priv->pub, 0),
+ le32_to_cpu(allmulti_le));
+ }
+
+ kfree(buf);
+
+ /* Finally, pick up the PROMISC flag as well, like the NIC
+ driver does */
+
+ allmulti = (ndev->flags & IFF_PROMISC) ? true : false;
+ allmulti_le = cpu_to_le32(allmulti);
+
+ memset(&dcmd, 0, sizeof(dcmd));
+ dcmd.cmd = BRCMF_C_SET_PROMISC;
+ dcmd.buf = &allmulti_le;
+ dcmd.len = sizeof(allmulti_le);
+ dcmd.set = true;
+
+ ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+ if (ret < 0) {
+ brcmf_dbg(ERROR, "%s: set promisc %d failed\n",
+ brcmf_ifname(&drvr_priv->pub, 0),
+ le32_to_cpu(allmulti_le));
+ }
+}
+
+static void
+_brcmf_set_mac_address(struct work_struct *work)
+{
+ char buf[32];
+ struct brcmf_dcmd dcmd;
+ int ret;
+
+ struct brcmf_info *drvr_priv = container_of(work, struct brcmf_info,
+ setmacaddr_work);
+
+ brcmf_dbg(TRACE, "enter\n");
+ if (!brcmu_mkiovar("cur_etheraddr", (char *)drvr_priv->macvalue,
+ ETH_ALEN, buf, 32)) {
+ brcmf_dbg(ERROR, "%s: mkiovar failed for cur_etheraddr\n",
+ brcmf_ifname(&drvr_priv->pub, 0));
+ return;
+ }
+ memset(&dcmd, 0, sizeof(dcmd));
+ dcmd.cmd = BRCMF_C_SET_VAR;
+ dcmd.buf = buf;
+ dcmd.len = 32;
+ dcmd.set = true;
+
+ ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+ if (ret < 0)
+ brcmf_dbg(ERROR, "%s: set cur_etheraddr failed\n",
+ brcmf_ifname(&drvr_priv->pub, 0));
+ else
+ memcpy(drvr_priv->iflist[0]->ndev->dev_addr,
+ drvr_priv->macvalue, ETH_ALEN);
+
+ return;
+}
+
+static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **)
+ netdev_priv(ndev);
+ struct sockaddr *sa = (struct sockaddr *)addr;
+ int ifidx;
+
+ ifidx = brcmf_net2idx(drvr_priv, ndev);
+ if (ifidx == BRCMF_BAD_IF)
+ return -1;
+
+ memcpy(&drvr_priv->macvalue, sa->sa_data, ETH_ALEN);
+ schedule_work(&drvr_priv->setmacaddr_work);
+ return 0;
+}
+
+static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
+{
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **)
+ netdev_priv(ndev);
+ int ifidx;
+
+ ifidx = brcmf_net2idx(drvr_priv, ndev);
+ if (ifidx == BRCMF_BAD_IF)
+ return;
+
+ schedule_work(&drvr_priv->multicast_work);
+}
+
+int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
+{
+ struct brcmf_info *drvr_priv = drvr->info;
+
+ /* Reject if down */
+ if (!drvr->up || (drvr->busstate == BRCMF_BUS_DOWN))
+ return -ENODEV;
+
+ /* Update multicast statistic */
+ if (pktbuf->len >= ETH_ALEN) {
+ u8 *pktdata = (u8 *) (pktbuf->data);
+ struct ethhdr *eh = (struct ethhdr *)pktdata;
+
+ if (is_multicast_ether_addr(eh->h_dest))
+ drvr->tx_multicast++;
+ if (ntohs(eh->h_proto) == ETH_P_PAE)
+ atomic_inc(&drvr_priv->pend_8021x_cnt);
+ }
+
+ /* If the protocol uses a data header, apply it */
+ brcmf_proto_hdrpush(drvr, ifidx, pktbuf);
+
+ /* Use bus module to send data frame */
+ return brcmf_sdbrcm_bus_txdata(drvr->bus, pktbuf);
+}
+
+static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ int ret;
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **)
+ netdev_priv(ndev);
+ int ifidx;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Reject if down */
+ if (!drvr_priv->pub.up || (drvr_priv->pub.busstate == BRCMF_BUS_DOWN)) {
+ brcmf_dbg(ERROR, "xmit rejected pub.up=%d busstate=%d\n",
+ drvr_priv->pub.up, drvr_priv->pub.busstate);
+ netif_stop_queue(ndev);
+ return -ENODEV;
+ }
+
+ ifidx = brcmf_net2idx(drvr_priv, ndev);
+ if (ifidx == BRCMF_BAD_IF) {
+ brcmf_dbg(ERROR, "bad ifidx %d\n", ifidx);
+ netif_stop_queue(ndev);
+ return -ENODEV;
+ }
+
+ /* Make sure there's enough room for any header */
+ if (skb_headroom(skb) < drvr_priv->pub.hdrlen) {
+ struct sk_buff *skb2;
+
+ brcmf_dbg(INFO, "%s: insufficient headroom\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx));
+ drvr_priv->pub.tx_realloc++;
+ skb2 = skb_realloc_headroom(skb, drvr_priv->pub.hdrlen);
+ dev_kfree_skb(skb);
+ skb = skb2;
+ if (skb == NULL) {
+ brcmf_dbg(ERROR, "%s: skb_realloc_headroom failed\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx));
+ ret = -ENOMEM;
+ goto done;
+ }
+ }
+
+ ret = brcmf_sendpkt(&drvr_priv->pub, ifidx, skb);
+
+done:
+ if (ret)
+ drvr_priv->pub.dstats.tx_dropped++;
+ else
+ drvr_priv->pub.tx_packets++;
+
+ /* Return ok: we always eat the packet */
+ return 0;
+}
+
+void brcmf_txflowcontrol(struct brcmf_pub *drvr, int ifidx, bool state)
+{
+ struct net_device *ndev;
+ struct brcmf_info *drvr_priv = drvr->info;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ drvr->txoff = state;
+ ndev = drvr_priv->iflist[ifidx]->ndev;
+ if (state == ON)
+ netif_stop_queue(ndev);
+ else
+ netif_wake_queue(ndev);
+}
+
+static int brcmf_host_event(struct brcmf_info *drvr_priv, int *ifidx,
+ void *pktdata, struct brcmf_event_msg *event,
+ void **data)
+{
+ int bcmerror = 0;
+
+ bcmerror = brcmf_c_host_event(drvr_priv, ifidx, pktdata, event, data);
+ if (bcmerror != 0)
+ return bcmerror;
+
+ if (drvr_priv->iflist[*ifidx]->ndev)
+ brcmf_cfg80211_event(drvr_priv->iflist[*ifidx]->ndev,
+ event, *data);
+
+ return bcmerror;
+}
+
+void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
+ int numpkt)
+{
+ struct brcmf_info *drvr_priv = drvr->info;
+ unsigned char *eth;
+ uint len;
+ void *data;
+ struct sk_buff *pnext, *save_pktbuf;
+ int i;
+ struct brcmf_if *ifp;
+ struct brcmf_event_msg event;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ save_pktbuf = skb;
+
+ for (i = 0; skb && i < numpkt; i++, skb = pnext) {
+
+ pnext = skb->next;
+ skb->next = NULL;
+
+ /* Get the protocol, maintain skb around eth_type_trans()
+ * The main reason for this hack is for the limitation of
+ * Linux 2.4 where 'eth_type_trans' uses the
+ * 'net->hard_header_len'
+ * to perform skb_pull inside vs ETH_HLEN. Since to avoid
+ * coping of the packet coming from the network stack to add
+ * BDC, Hardware header etc, during network interface
+ * registration
+ * we set the 'net->hard_header_len' to ETH_HLEN + extra space
+ * required
+ * for BDC, Hardware header etc. and not just the ETH_HLEN
+ */
+ eth = skb->data;
+ len = skb->len;
+
+ ifp = drvr_priv->iflist[ifidx];
+ if (ifp == NULL)
+ ifp = drvr_priv->iflist[0];
+
+ skb->dev = ifp->ndev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (skb->pkt_type == PACKET_MULTICAST)
+ drvr_priv->pub.rx_multicast++;
+
+ skb->data = eth;
+ skb->len = len;
+
+ /* Strip header, count, deliver upward */
+ skb_pull(skb, ETH_HLEN);
+
+ /* Process special event packets and then discard them */
+ if (ntohs(skb->protocol) == ETH_P_LINK_CTL)
+ brcmf_host_event(drvr_priv, &ifidx,
+ skb_mac_header(skb),
+ &event, &data);
+
+ if (drvr_priv->iflist[ifidx] &&
+ !drvr_priv->iflist[ifidx]->state)
+ ifp = drvr_priv->iflist[ifidx];
+
+ if (ifp->ndev)
+ ifp->ndev->last_rx = jiffies;
+
+ drvr->dstats.rx_bytes += skb->len;
+ drvr->rx_packets++; /* Local count */
+
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ /* If the receive is not processed inside an ISR,
+ * the softirqd must be woken explicitly to service
+ * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
+ * by netif_rx_ni(), but in earlier kernels, we need
+ * to do it manually.
+ */
+ netif_rx_ni(skb);
+ }
+}
+
+void brcmf_txcomplete(struct brcmf_pub *drvr, struct sk_buff *txp, bool success)
+{
+ uint ifidx;
+ struct brcmf_info *drvr_priv = drvr->info;
+ struct ethhdr *eh;
+ u16 type;
+
+ brcmf_proto_hdrpull(drvr, &ifidx, txp);
+
+ eh = (struct ethhdr *)(txp->data);
+ type = ntohs(eh->h_proto);
+
+ if (type == ETH_P_PAE)
+ atomic_dec(&drvr_priv->pend_8021x_cnt);
+
+}
+
+static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
+{
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **)
+ netdev_priv(ndev);
+ struct brcmf_if *ifp;
+ int ifidx;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ ifidx = brcmf_net2idx(drvr_priv, ndev);
+ if (ifidx == BRCMF_BAD_IF)
+ return NULL;
+
+ ifp = drvr_priv->iflist[ifidx];
+
+ if (drvr_priv->pub.up)
+ /* Use the protocol to get dongle stats */
+ brcmf_proto_dstats(&drvr_priv->pub);
+
+ /* Copy dongle stats to net device stats */
+ ifp->stats.rx_packets = drvr_priv->pub.dstats.rx_packets;
+ ifp->stats.tx_packets = drvr_priv->pub.dstats.tx_packets;
+ ifp->stats.rx_bytes = drvr_priv->pub.dstats.rx_bytes;
+ ifp->stats.tx_bytes = drvr_priv->pub.dstats.tx_bytes;
+ ifp->stats.rx_errors = drvr_priv->pub.dstats.rx_errors;
+ ifp->stats.tx_errors = drvr_priv->pub.dstats.tx_errors;
+ ifp->stats.rx_dropped = drvr_priv->pub.dstats.rx_dropped;
+ ifp->stats.tx_dropped = drvr_priv->pub.dstats.tx_dropped;
+ ifp->stats.multicast = drvr_priv->pub.dstats.multicast;
+
+ return &ifp->stats;
+}
+
+/* Retrieve current toe component enables, which are kept
+ as a bitmap in toe_ol iovar */
+static int brcmf_toe_get(struct brcmf_info *drvr_priv, int ifidx, u32 *toe_ol)
+{
+ struct brcmf_dcmd dcmd;
+ char buf[32];
+ int ret;
+
+ memset(&dcmd, 0, sizeof(dcmd));
+
+ dcmd.cmd = BRCMF_C_GET_VAR;
+ dcmd.buf = buf;
+ dcmd.len = (uint) sizeof(buf);
+ dcmd.set = false;
+
+ strcpy(buf, "toe_ol");
+ ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len);
+ if (ret < 0) {
+ /* Check for older dongle image that doesn't support toe_ol */
+ if (ret == -EIO) {
+ brcmf_dbg(ERROR, "%s: toe not supported by device\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx));
+ return -EOPNOTSUPP;
+ }
+
+ brcmf_dbg(INFO, "%s: could not get toe_ol: ret=%d\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx), ret);
+ return ret;
+ }
+
+ memcpy(toe_ol, buf, sizeof(u32));
+ return 0;
+}
+
+/* Set current toe component enables in toe_ol iovar,
+ and set toe global enable iovar */
+static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol)
+{
+ struct brcmf_dcmd dcmd;
+ char buf[32];
+ int toe, ret;
+
+ memset(&dcmd, 0, sizeof(dcmd));
+
+ dcmd.cmd = BRCMF_C_SET_VAR;
+ dcmd.buf = buf;
+ dcmd.len = (uint) sizeof(buf);
+ dcmd.set = true;
+
+ /* Set toe_ol as requested */
+
+ strcpy(buf, "toe_ol");
+ memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(u32));
+
+ ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len);
+ if (ret < 0) {
+ brcmf_dbg(ERROR, "%s: could not set toe_ol: ret=%d\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx), ret);
+ return ret;
+ }
+
+ /* Enable toe globally only if any components are enabled. */
+
+ toe = (toe_ol != 0);
+
+ strcpy(buf, "toe");
+ memcpy(&buf[sizeof("toe")], &toe, sizeof(u32));
+
+ ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len);
+ if (ret < 0) {
+ brcmf_dbg(ERROR, "%s: could not set toe: ret=%d\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx), ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **)
+ netdev_priv(ndev);
+
+ sprintf(info->driver, KBUILD_MODNAME);
+ sprintf(info->version, "%lu", drvr_priv->pub.drv_version);
+ sprintf(info->fw_version, "%s", BCM4329_FW_NAME);
+ sprintf(info->bus_info, "%s",
+ dev_name(brcmf_bus_get_device(drvr_priv->pub.bus)));
+}
+
+static struct ethtool_ops brcmf_ethtool_ops = {
+ .get_drvinfo = brcmf_ethtool_get_drvinfo
+};
+
+static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
+{
+ struct ethtool_drvinfo info;
+ char drvname[sizeof(info.driver)];
+ u32 cmd;
+ struct ethtool_value edata;
+ u32 toe_cmpnt, csum_dir;
+ int ret;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* all ethtool calls start with a cmd word */
+ if (copy_from_user(&cmd, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ETHTOOL_GDRVINFO:
+ /* Copy out any request driver name */
+ if (copy_from_user(&info, uaddr, sizeof(info)))
+ return -EFAULT;
+ strncpy(drvname, info.driver, sizeof(info.driver));
+ drvname[sizeof(info.driver) - 1] = '\0';
+
+ /* clear struct for return */
+ memset(&info, 0, sizeof(info));
+ info.cmd = cmd;
+
+ /* if requested, identify ourselves */
+ if (strcmp(drvname, "?dhd") == 0) {
+ sprintf(info.driver, "dhd");
+ strcpy(info.version, BRCMF_VERSION_STR);
+ }
+
+ /* otherwise, require dongle to be up */
+ else if (!drvr_priv->pub.up) {
+ brcmf_dbg(ERROR, "dongle is not up\n");
+ return -ENODEV;
+ }
+
+ /* finally, report dongle driver type */
+ else if (drvr_priv->pub.iswl)
+ sprintf(info.driver, "wl");
+ else
+ sprintf(info.driver, "xx");
+
+ sprintf(info.version, "%lu", drvr_priv->pub.drv_version);
+ if (copy_to_user(uaddr, &info, sizeof(info)))
+ return -EFAULT;
+ brcmf_dbg(CTL, "given %*s, returning %s\n",
+ (int)sizeof(drvname), drvname, info.driver);
+ break;
+
+ /* Get toe offload components from dongle */
+ case ETHTOOL_GRXCSUM:
+ case ETHTOOL_GTXCSUM:
+ ret = brcmf_toe_get(drvr_priv, 0, &toe_cmpnt);
+ if (ret < 0)
+ return ret;
+
+ csum_dir =
+ (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+ edata.cmd = cmd;
+ edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
+
+ if (copy_to_user(uaddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ break;
+
+ /* Set toe offload components in dongle */
+ case ETHTOOL_SRXCSUM:
+ case ETHTOOL_STXCSUM:
+ if (copy_from_user(&edata, uaddr, sizeof(edata)))
+ return -EFAULT;
+
+ /* Read the current settings, update and write back */
+ ret = brcmf_toe_get(drvr_priv, 0, &toe_cmpnt);
+ if (ret < 0)
+ return ret;
+
+ csum_dir =
+ (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+ if (edata.data != 0)
+ toe_cmpnt |= csum_dir;
+ else
+ toe_cmpnt &= ~csum_dir;
+
+ ret = brcmf_toe_set(drvr_priv, 0, toe_cmpnt);
+ if (ret < 0)
+ return ret;
+
+ /* If setting TX checksum mode, tell Linux the new mode */
+ if (cmd == ETHTOOL_STXCSUM) {
+ if (edata.data)
+ drvr_priv->iflist[0]->ndev->features |=
+ NETIF_F_IP_CSUM;
+ else
+ drvr_priv->iflist[0]->ndev->features &=
+ ~NETIF_F_IP_CSUM;
+ }
+
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
+ int cmd)
+{
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **)
+ netdev_priv(ndev);
+ int ifidx;
+
+ ifidx = brcmf_net2idx(drvr_priv, ndev);
+ brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifidx, cmd);
+
+ if (ifidx == BRCMF_BAD_IF)
+ return -1;
+
+ if (cmd == SIOCETHTOOL)
+ return brcmf_ethtool(drvr_priv, ifr->ifr_data);
+
+ return -EOPNOTSUPP;
+}
+
+/* called only from within this driver. Sends a command to the dongle. */
+s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len)
+{
+ struct brcmf_dcmd dcmd;
+ s32 err = 0;
+ int buflen = 0;
+ bool is_set_key_cmd;
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **)
+ netdev_priv(ndev);
+ int ifidx;
+
+ memset(&dcmd, 0, sizeof(dcmd));
+ dcmd.cmd = cmd;
+ dcmd.buf = arg;
+ dcmd.len = len;
+
+ ifidx = brcmf_net2idx(drvr_priv, ndev);
+
+ if (dcmd.buf != NULL)
+ buflen = min_t(uint, dcmd.len, BRCMF_DCMD_MAXLEN);
+
+ /* send to dongle (must be up, and wl) */
+ if ((drvr_priv->pub.busstate != BRCMF_BUS_DATA)) {
+ brcmf_dbg(ERROR, "DONGLE_DOWN\n");
+ err = -EIO;
+ goto done;
+ }
+
+ if (!drvr_priv->pub.iswl) {
+ err = -EIO;
+ goto done;
+ }
+
+ /*
+ * Intercept BRCMF_C_SET_KEY CMD - serialize M4 send and
+ * set key CMD to prevent M4 encryption.
+ */
+ is_set_key_cmd = ((dcmd.cmd == BRCMF_C_SET_KEY) ||
+ ((dcmd.cmd == BRCMF_C_SET_VAR) &&
+ !(strncmp("wsec_key", dcmd.buf, 9))) ||
+ ((dcmd.cmd == BRCMF_C_SET_VAR) &&
+ !(strncmp("bsscfg:wsec_key", dcmd.buf, 15))));
+ if (is_set_key_cmd)
+ brcmf_netdev_wait_pend8021x(ndev);
+
+ err = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, buflen);
+
+done:
+ if (err > 0)
+ err = 0;
+
+ return err;
+}
+
+static int brcmf_netdev_stop(struct net_device *ndev)
+{
+ struct brcmf_pub *drvr = *(struct brcmf_pub **) netdev_priv(ndev);
+
+ brcmf_dbg(TRACE, "Enter\n");
+ brcmf_cfg80211_down(drvr->config);
+ if (drvr->up == 0)
+ return 0;
+
+ /* Set state and stop OS transmissions */
+ drvr->up = 0;
+ netif_stop_queue(ndev);
+
+ return 0;
+}
+
+static int brcmf_netdev_open(struct net_device *ndev)
+{
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **)
+ netdev_priv(ndev);
+ u32 toe_ol;
+ int ifidx = brcmf_net2idx(drvr_priv, ndev);
+ s32 ret = 0;
+
+ brcmf_dbg(TRACE, "ifidx %d\n", ifidx);
+
+ if (ifidx == 0) { /* do it only for primary eth0 */
+
+ /* try to bring up bus */
+ ret = brcmf_bus_start(&drvr_priv->pub);
+ if (ret != 0) {
+ brcmf_dbg(ERROR, "failed with code %d\n", ret);
+ return -1;
+ }
+ atomic_set(&drvr_priv->pend_8021x_cnt, 0);
+
+ memcpy(ndev->dev_addr, drvr_priv->pub.mac, ETH_ALEN);
+
+ /* Get current TOE mode from dongle */
+ if (brcmf_toe_get(drvr_priv, ifidx, &toe_ol) >= 0
+ && (toe_ol & TOE_TX_CSUM_OL) != 0)
+ drvr_priv->iflist[ifidx]->ndev->features |=
+ NETIF_F_IP_CSUM;
+ else
+ drvr_priv->iflist[ifidx]->ndev->features &=
+ ~NETIF_F_IP_CSUM;
+ }
+ /* Allow transmit calls */
+ netif_start_queue(ndev);
+ drvr_priv->pub.up = 1;
+ if (brcmf_cfg80211_up(drvr_priv->pub.config)) {
+ brcmf_dbg(ERROR, "failed to bring up cfg80211\n");
+ return -1;
+ }
+
+ return ret;
+}
+
+int
+brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx, struct net_device *ndev,
+ char *name, u8 *mac_addr, u32 flags, u8 bssidx)
+{
+ struct brcmf_if *ifp;
+ int ret = 0, err = 0;
+
+ brcmf_dbg(TRACE, "idx %d, handle->%p\n", ifidx, ndev);
+
+ ifp = drvr_priv->iflist[ifidx];
+ if (!ifp) {
+ ifp = kmalloc(sizeof(struct brcmf_if), GFP_ATOMIC);
+ if (!ifp)
+ return -ENOMEM;
+ }
+
+ memset(ifp, 0, sizeof(struct brcmf_if));
+ ifp->info = drvr_priv;
+ drvr_priv->iflist[ifidx] = ifp;
+ if (mac_addr != NULL)
+ memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN);
+
+ if (ndev == NULL) {
+ ifp->state = BRCMF_E_IF_ADD;
+ ifp->idx = ifidx;
+ /*
+ * Delete the existing interface before overwriting it
+ * in case we missed the BRCMF_E_IF_DEL event.
+ */
+ if (ifp->ndev != NULL) {
+ brcmf_dbg(ERROR, "ERROR: netdev:%s already exists, try free & unregister\n",
+ ifp->ndev->name);
+ netif_stop_queue(ifp->ndev);
+ unregister_netdev(ifp->ndev);
+ free_netdev(ifp->ndev);
+ }
+
+ /* Allocate netdev, including space for private structure */
+ ifp->ndev = alloc_netdev(sizeof(drvr_priv), "wlan%d",
+ ether_setup);
+ if (!ifp->ndev) {
+ brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
+ ret = -ENOMEM;
+ }
+
+ if (ret == 0) {
+ memcpy(netdev_priv(ifp->ndev), &drvr_priv,
+ sizeof(drvr_priv));
+ err = brcmf_net_attach(&drvr_priv->pub, ifp->idx);
+ if (err != 0) {
+ brcmf_dbg(ERROR, "brcmf_net_attach failed, err %d\n",
+ err);
+ ret = -EOPNOTSUPP;
+ } else {
+ brcmf_dbg(TRACE, " ==== pid:%x, net_device for if:%s created ===\n",
+ current->pid, ifp->ndev->name);
+ ifp->state = 0;
+ }
+ }
+
+ if (ret < 0) {
+ if (ifp->ndev)
+ free_netdev(ifp->ndev);
+
+ drvr_priv->iflist[ifp->idx] = NULL;
+ kfree(ifp);
+ }
+ } else
+ ifp->ndev = ndev;
+
+ return 0;
+}
+
+void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx)
+{
+ struct brcmf_if *ifp;
+
+ brcmf_dbg(TRACE, "idx %d\n", ifidx);
+
+ ifp = drvr_priv->iflist[ifidx];
+ if (!ifp) {
+ brcmf_dbg(ERROR, "Null interface\n");
+ return;
+ }
+
+ ifp->state = BRCMF_E_IF_DEL;
+ ifp->idx = ifidx;
+ if (ifp->ndev != NULL) {
+ netif_stop_queue(ifp->ndev);
+ unregister_netdev(ifp->ndev);
+ free_netdev(ifp->ndev);
+ drvr_priv->iflist[ifidx] = NULL;
+ kfree(ifp);
+ }
+}
+
+struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
+{
+ struct brcmf_info *drvr_priv = NULL;
+ struct net_device *ndev;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Allocate netdev, including space for private structure */
+ ndev = alloc_netdev(sizeof(drvr_priv), "wlan%d", ether_setup);
+ if (!ndev) {
+ brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
+ goto fail;
+ }
+
+ /* Allocate primary brcmf_info */
+ drvr_priv = kzalloc(sizeof(struct brcmf_info), GFP_ATOMIC);
+ if (!drvr_priv)
+ goto fail;
+
+ /*
+ * Save the brcmf_info into the priv
+ */
+ memcpy(netdev_priv(ndev), &drvr_priv, sizeof(drvr_priv));
+
+ if (brcmf_add_if(drvr_priv, 0, ndev, ndev->name, NULL, 0, 0) ==
+ BRCMF_BAD_IF)
+ goto fail;
+
+ ndev->netdev_ops = NULL;
+ mutex_init(&drvr_priv->proto_block);
+
+ /* Link to info module */
+ drvr_priv->pub.info = drvr_priv;
+
+ /* Link to bus module */
+ drvr_priv->pub.bus = bus;
+ drvr_priv->pub.hdrlen = bus_hdrlen;
+
+ /* Attach and link in the protocol */
+ if (brcmf_proto_attach(&drvr_priv->pub) != 0) {
+ brcmf_dbg(ERROR, "brcmf_prot_attach failed\n");
+ goto fail;
+ }
+
+ /* Attach and link in the cfg80211 */
+ drvr_priv->pub.config =
+ brcmf_cfg80211_attach(ndev,
+ brcmf_bus_get_device(bus),
+ &drvr_priv->pub);
+ if (drvr_priv->pub.config == NULL) {
+ brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
+ goto fail;
+ }
+
+ INIT_WORK(&drvr_priv->setmacaddr_work, _brcmf_set_mac_address);
+ INIT_WORK(&drvr_priv->multicast_work, _brcmf_set_multicast_list);
+
+ /*
+ * Save the brcmf_info into the priv
+ */
+ memcpy(netdev_priv(ndev), &drvr_priv, sizeof(drvr_priv));
+
+ return &drvr_priv->pub;
+
+fail:
+ if (ndev)
+ free_netdev(ndev);
+ if (drvr_priv)
+ brcmf_detach(&drvr_priv->pub);
+
+ return NULL;
+}
+
+int brcmf_bus_start(struct brcmf_pub *drvr)
+{
+ int ret = -1;
+ struct brcmf_info *drvr_priv = drvr->info;
+ /* Room for "event_msgs" + '\0' + bitvec */
+ char iovbuf[BRCMF_EVENTING_MASK_LEN + 12];
+
+ brcmf_dbg(TRACE, "\n");
+
+ /* Bring up the bus */
+ ret = brcmf_sdbrcm_bus_init(&drvr_priv->pub);
+ if (ret != 0) {
+ brcmf_dbg(ERROR, "brcmf_sdbrcm_bus_init failed %d\n", ret);
+ return ret;
+ }
+
+ /* If bus is not ready, can't come up */
+ if (drvr_priv->pub.busstate != BRCMF_BUS_DATA) {
+ brcmf_dbg(ERROR, "failed bus is not ready\n");
+ return -ENODEV;
+ }
+
+ brcmu_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN,
+ iovbuf, sizeof(iovbuf));
+ brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR, iovbuf,
+ sizeof(iovbuf));
+ memcpy(drvr->eventmask, iovbuf, BRCMF_EVENTING_MASK_LEN);
+
+ setbit(drvr->eventmask, BRCMF_E_SET_SSID);
+ setbit(drvr->eventmask, BRCMF_E_PRUNE);
+ setbit(drvr->eventmask, BRCMF_E_AUTH);
+ setbit(drvr->eventmask, BRCMF_E_REASSOC);
+ setbit(drvr->eventmask, BRCMF_E_REASSOC_IND);
+ setbit(drvr->eventmask, BRCMF_E_DEAUTH_IND);
+ setbit(drvr->eventmask, BRCMF_E_DISASSOC_IND);
+ setbit(drvr->eventmask, BRCMF_E_DISASSOC);
+ setbit(drvr->eventmask, BRCMF_E_JOIN);
+ setbit(drvr->eventmask, BRCMF_E_ASSOC_IND);
+ setbit(drvr->eventmask, BRCMF_E_PSK_SUP);
+ setbit(drvr->eventmask, BRCMF_E_LINK);
+ setbit(drvr->eventmask, BRCMF_E_NDIS_LINK);
+ setbit(drvr->eventmask, BRCMF_E_MIC_ERROR);
+ setbit(drvr->eventmask, BRCMF_E_PMKID_CACHE);
+ setbit(drvr->eventmask, BRCMF_E_TXFAIL);
+ setbit(drvr->eventmask, BRCMF_E_JOIN_START);
+ setbit(drvr->eventmask, BRCMF_E_SCAN_COMPLETE);
+
+/* enable dongle roaming event */
+
+ drvr->pktfilter_count = 1;
+ /* Setup filter to allow only unicast */
+ drvr->pktfilter[0] = "100 0 0 0 0x01 0x00";
+
+ /* Bus is ready, do any protocol initialization */
+ ret = brcmf_proto_init(&drvr_priv->pub);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct net_device_ops brcmf_netdev_ops_pri = {
+ .ndo_open = brcmf_netdev_open,
+ .ndo_stop = brcmf_netdev_stop,
+ .ndo_get_stats = brcmf_netdev_get_stats,
+ .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
+ .ndo_start_xmit = brcmf_netdev_start_xmit,
+ .ndo_set_mac_address = brcmf_netdev_set_mac_address,
+ .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
+};
+
+int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
+{
+ struct brcmf_info *drvr_priv = drvr->info;
+ struct net_device *ndev;
+ u8 temp_addr[ETH_ALEN] = {
+ 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33};
+
+ brcmf_dbg(TRACE, "ifidx %d\n", ifidx);
+
+ ndev = drvr_priv->iflist[ifidx]->ndev;
+ ndev->netdev_ops = &brcmf_netdev_ops_pri;
+
+ /*
+ * We have to use the primary MAC for virtual interfaces
+ */
+ if (ifidx != 0) {
+ /* for virtual interfaces use the primary MAC */
+ memcpy(temp_addr, drvr_priv->pub.mac, ETH_ALEN);
+
+ }
+
+ if (ifidx == 1) {
+ brcmf_dbg(TRACE, "ACCESS POINT MAC:\n");
+ /* ACCESSPOINT INTERFACE CASE */
+ temp_addr[0] |= 0X02; /* set bit 2 ,
+ - Locally Administered address */
+
+ }
+ ndev->hard_header_len = ETH_HLEN + drvr_priv->pub.hdrlen;
+ ndev->ethtool_ops = &brcmf_ethtool_ops;
+
+ drvr_priv->pub.rxsz = ndev->mtu + ndev->hard_header_len +
+ drvr_priv->pub.hdrlen;
+
+ memcpy(ndev->dev_addr, temp_addr, ETH_ALEN);
+
+ if (register_netdev(ndev) != 0) {
+ brcmf_dbg(ERROR, "couldn't register the net device\n");
+ goto fail;
+ }
+
+ brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
+
+ return 0;
+
+fail:
+ ndev->netdev_ops = NULL;
+ return -EBADE;
+}
+
+static void brcmf_bus_detach(struct brcmf_pub *drvr)
+{
+ struct brcmf_info *drvr_priv;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (drvr) {
+ drvr_priv = drvr->info;
+ if (drvr_priv) {
+ /* Stop the protocol module */
+ brcmf_proto_stop(&drvr_priv->pub);
+
+ /* Stop the bus module */
+ brcmf_sdbrcm_bus_stop(drvr_priv->pub.bus);
+ }
+ }
+}
+
+void brcmf_detach(struct brcmf_pub *drvr)
+{
+ struct brcmf_info *drvr_priv;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (drvr) {
+ drvr_priv = drvr->info;
+ if (drvr_priv) {
+ struct brcmf_if *ifp;
+ int i;
+
+ for (i = 1; i < BRCMF_MAX_IFS; i++)
+ if (drvr_priv->iflist[i])
+ brcmf_del_if(drvr_priv, i);
+
+ ifp = drvr_priv->iflist[0];
+ if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
+ rtnl_lock();
+ brcmf_netdev_stop(ifp->ndev);
+ rtnl_unlock();
+ unregister_netdev(ifp->ndev);
+ }
+
+ cancel_work_sync(&drvr_priv->setmacaddr_work);
+ cancel_work_sync(&drvr_priv->multicast_work);
+
+ brcmf_bus_detach(drvr);
+
+ if (drvr->prot)
+ brcmf_proto_detach(drvr);
+
+ brcmf_cfg80211_detach(drvr->config);
+
+ free_netdev(ifp->ndev);
+ kfree(ifp);
+ kfree(drvr_priv);
+ }
+ }
+}
+
+static void __exit brcmf_module_cleanup(void)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ brcmf_bus_unregister();
+}
+
+static int __init brcmf_module_init(void)
+{
+ int error;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ error = brcmf_bus_register();
+
+ if (error) {
+ brcmf_dbg(ERROR, "brcmf_bus_register failed\n");
+ goto failed;
+ }
+ return 0;
+
+failed:
+ return -EINVAL;
+}
+
+module_init(brcmf_module_init);
+module_exit(brcmf_module_cleanup);
+
+int brcmf_os_proto_block(struct brcmf_pub *drvr)
+{
+ struct brcmf_info *drvr_priv = drvr->info;
+
+ if (drvr_priv) {
+ mutex_lock(&drvr_priv->proto_block);
+ return 1;
+ }
+ return 0;
+}
+
+int brcmf_os_proto_unblock(struct brcmf_pub *drvr)
+{
+ struct brcmf_info *drvr_priv = drvr->info;
+
+ if (drvr_priv) {
+ mutex_unlock(&drvr_priv->proto_block);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int brcmf_get_pend_8021x_cnt(struct brcmf_info *drvr_priv)
+{
+ return atomic_read(&drvr_priv->pend_8021x_cnt);
+}
+
+#define MAX_WAIT_FOR_8021X_TX 10
+
+int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
+{
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **)netdev_priv(ndev);
+ int timeout = 10 * HZ / 1000;
+ int ntimes = MAX_WAIT_FOR_8021X_TX;
+ int pend = brcmf_get_pend_8021x_cnt(drvr_priv);
+
+ while (ntimes && pend) {
+ if (pend) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(timeout);
+ set_current_state(TASK_RUNNING);
+ ntimes--;
+ }
+ pend = brcmf_get_pend_8021x_cnt(drvr_priv);
+ }
+ return pend;
+}
+
+#ifdef BCMDBG
+int brcmf_write_to_file(struct brcmf_pub *drvr, u8 *buf, int size)
+{
+ int ret = 0;
+ struct file *fp;
+ mm_segment_t old_fs;
+ loff_t pos = 0;
+
+ /* change to KERNEL_DS address limit */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ /* open file to write */
+ fp = filp_open("/tmp/mem_dump", O_WRONLY | O_CREAT, 0640);
+ if (!fp) {
+ brcmf_dbg(ERROR, "open file error\n");
+ ret = -1;
+ goto exit;
+ }
+
+ /* Write buf to file */
+ fp->f_op->write(fp, buf, size, &pos);
+
+exit:
+ /* free buf before return */
+ kfree(buf);
+ /* close file before return */
+ if (fp)
+ filp_close(fp, current->files);
+ /* restore previous address limit */
+ set_fs(old_fs);
+
+ return ret;
+}
+#endif /* BCMDBG */
--- /dev/null
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/printk.h>
+#include <linux/pci_ids.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/card.h>
+#include <linux/semaphore.h>
+#include <linux/firmware.h>
++#include <linux/module.h>
+#include <asm/unaligned.h>
+#include <defs.h>
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include <brcm_hw_ids.h>
+#include <soc.h>
+#include "sdio_host.h"
+
+#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
+
+#ifdef BCMDBG
+
+#define BRCMF_TRAP_INFO_SIZE 80
+
+#define CBUF_LEN (128)
+
+struct rte_log_le {
+ __le32 buf; /* Can't be pointer on (64-bit) hosts */
+ __le32 buf_size;
+ __le32 idx;
+ char *_buf_compat; /* Redundant pointer for backward compat. */
+};
+
+struct rte_console {
+ /* Virtual UART
+ * When there is no UART (e.g. Quickturn),
+ * the host should write a complete
+ * input line directly into cbuf and then write
+ * the length into vcons_in.
+ * This may also be used when there is a real UART
+ * (at risk of conflicting with
+ * the real UART). vcons_out is currently unused.
+ */
+ uint vcons_in;
+ uint vcons_out;
+
+ /* Output (logging) buffer
+ * Console output is written to a ring buffer log_buf at index log_idx.
+ * The host may read the output when it sees log_idx advance.
+ * Output will be lost if the output wraps around faster than the host
+ * polls.
+ */
+ struct rte_log_le log_le;
+
+ /* Console input line buffer
+ * Characters are read one at a time into cbuf
+ * until <CR> is received, then
+ * the buffer is processed as a command line.
+ * Also used for virtual UART.
+ */
+ uint cbuf_idx;
+ char cbuf[CBUF_LEN];
+};
+
+#endif /* BCMDBG */
+#include <chipcommon.h>
+
+#include "dhd.h"
+#include "dhd_bus.h"
+#include "dhd_proto.h"
+#include "dhd_dbg.h"
+#include <bcmchip.h>
+
+#define TXQLEN 2048 /* bulk tx queue length */
+#define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */
+#define TXLOW (TXHI - 256) /* turn off flow control below TXLOW */
+#define PRIOMASK 7
+
+#define TXRETRIES 2 /* # of retries for tx frames */
+
+#define BRCMF_RXBOUND 50 /* Default for max rx frames in
+ one scheduling */
+
+#define BRCMF_TXBOUND 20 /* Default for max tx frames in
+ one scheduling */
+
+#define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */
+
+#define MEMBLOCK 2048 /* Block size used for downloading
+ of dongle image */
+#define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold
+ biggest possible glom */
+
+#define BRCMF_FIRSTREAD (1 << 6)
+
+
+/* SBSDIO_DEVICE_CTL */
+
+/* 1: device will assert busy signal when receiving CMD53 */
+#define SBSDIO_DEVCTL_SETBUSY 0x01
+/* 1: assertion of sdio interrupt is synchronous to the sdio clock */
+#define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02
+/* 1: mask all interrupts to host except the chipActive (rev 8) */
+#define SBSDIO_DEVCTL_CA_INT_ONLY 0x04
+/* 1: isolate internal sdio signals, put external pads in tri-state; requires
+ * sdio bus power cycle to clear (rev 9) */
+#define SBSDIO_DEVCTL_PADS_ISO 0x08
+/* Force SD->SB reset mapping (rev 11) */
+#define SBSDIO_DEVCTL_SB_RST_CTL 0x30
+/* Determined by CoreControl bit */
+#define SBSDIO_DEVCTL_RST_CORECTL 0x00
+/* Force backplane reset */
+#define SBSDIO_DEVCTL_RST_BPRESET 0x10
+/* Force no backplane reset */
+#define SBSDIO_DEVCTL_RST_NOBPRESET 0x20
+
+/* SBSDIO_FUNC1_CHIPCLKCSR */
+
+/* Force ALP request to backplane */
+#define SBSDIO_FORCE_ALP 0x01
+/* Force HT request to backplane */
+#define SBSDIO_FORCE_HT 0x02
+/* Force ILP request to backplane */
+#define SBSDIO_FORCE_ILP 0x04
+/* Make ALP ready (power up xtal) */
+#define SBSDIO_ALP_AVAIL_REQ 0x08
+/* Make HT ready (power up PLL) */
+#define SBSDIO_HT_AVAIL_REQ 0x10
+/* Squelch clock requests from HW */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
+/* Status: ALP is ready */
+#define SBSDIO_ALP_AVAIL 0x40
+/* Status: HT is ready */
+#define SBSDIO_HT_AVAIL 0x80
+
+#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+
+#define SBSDIO_CLKAV(regval, alponly) \
+ (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
+
+/* direct(mapped) cis space */
+
+/* MAPPED common CIS address */
+#define SBSDIO_CIS_BASE_COMMON 0x1000
+/* maximum bytes in one CIS */
+#define SBSDIO_CIS_SIZE_LIMIT 0x200
+/* cis offset addr is < 17 bits */
+#define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF
+
+/* manfid tuple length, include tuple, link bytes */
+#define SBSDIO_CIS_MANFID_TUPLE_LEN 6
+
+/* intstatus */
+#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
+#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
+#define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */
+#define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */
+#define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */
+#define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */
+#define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */
+#define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */
+#define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */
+#define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */
+#define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */
+#define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */
+#define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */
+#define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */
+#define I_PC (1 << 10) /* descriptor error */
+#define I_PD (1 << 11) /* data error */
+#define I_DE (1 << 12) /* Descriptor protocol Error */
+#define I_RU (1 << 13) /* Receive descriptor Underflow */
+#define I_RO (1 << 14) /* Receive fifo Overflow */
+#define I_XU (1 << 15) /* Transmit fifo Underflow */
+#define I_RI (1 << 16) /* Receive Interrupt */
+#define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */
+#define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */
+#define I_XI (1 << 24) /* Transmit Interrupt */
+#define I_RF_TERM (1 << 25) /* Read Frame Terminate */
+#define I_WF_TERM (1 << 26) /* Write Frame Terminate */
+#define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */
+#define I_SBINT (1 << 28) /* sbintstatus Interrupt */
+#define I_CHIPACTIVE (1 << 29) /* chip from doze to active state */
+#define I_SRESET (1 << 30) /* CCCR RES interrupt */
+#define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */
+#define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)
+#define I_DMA (I_RI | I_XI | I_ERRORS)
+
+/* corecontrol */
+#define CC_CISRDY (1 << 0) /* CIS Ready */
+#define CC_BPRESEN (1 << 1) /* CCCR RES signal */
+#define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */
+#define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation */
+#define CC_XMTDATAAVAIL_MODE (1 << 4)
+#define CC_XMTDATAAVAIL_CTRL (1 << 5)
+
+/* SDA_FRAMECTRL */
+#define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */
+#define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */
+#define SFC_CRC4WOOS (1 << 2) /* CRC error for write out of sync */
+#define SFC_ABORTALL (1 << 3) /* Abort all in-progress frames */
+
+/* HW frame tag */
+#define SDPCM_FRAMETAG_LEN 4 /* 2 bytes len, 2 bytes check val */
+
+/* Total length of frame header for dongle protocol */
+#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
+#define SDPCM_RESERVE (SDPCM_HDRLEN + BRCMF_SDALIGN)
+
+/*
+ * Software allocation of To SB Mailbox resources
+ */
+
+/* tosbmailbox bits corresponding to intstatus bits */
+#define SMB_NAK (1 << 0) /* Frame NAK */
+#define SMB_INT_ACK (1 << 1) /* Host Interrupt ACK */
+#define SMB_USE_OOB (1 << 2) /* Use OOB Wakeup */
+#define SMB_DEV_INT (1 << 3) /* Miscellaneous Interrupt */
+
+/* tosbmailboxdata */
+#define SMB_DATA_VERSION_SHIFT 16 /* host protocol version */
+
+/*
+ * Software allocation of To Host Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_HMB_FC_STATE I_HMB_SW0 /* Flow Control State */
+#define I_HMB_FC_CHANGE I_HMB_SW1 /* Flow Control State Changed */
+#define I_HMB_FRAME_IND I_HMB_SW2 /* Frame Indication */
+#define I_HMB_HOST_INT I_HMB_SW3 /* Miscellaneous Interrupt */
+
+/* tohostmailboxdata */
+#define HMB_DATA_NAKHANDLED 1 /* retransmit NAK'd frame */
+#define HMB_DATA_DEVREADY 2 /* talk to host after enable */
+#define HMB_DATA_FC 4 /* per prio flowcontrol update flag */
+#define HMB_DATA_FWREADY 8 /* fw ready for protocol activity */
+
+#define HMB_DATA_FCDATA_MASK 0xff000000
+#define HMB_DATA_FCDATA_SHIFT 24
+
+#define HMB_DATA_VERSION_MASK 0x00ff0000
+#define HMB_DATA_VERSION_SHIFT 16
+
+/*
+ * Software-defined protocol header
+ */
+
+/* Current protocol version */
+#define SDPCM_PROT_VERSION 4
+
+/* SW frame header */
+#define SDPCM_PACKET_SEQUENCE(p) (((u8 *)p)[0] & 0xff)
+
+#define SDPCM_CHANNEL_MASK 0x00000f00
+#define SDPCM_CHANNEL_SHIFT 8
+#define SDPCM_PACKET_CHANNEL(p) (((u8 *)p)[1] & 0x0f)
+
+#define SDPCM_NEXTLEN_OFFSET 2
+
+/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
+#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */
+#define SDPCM_DOFFSET_VALUE(p) (((u8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
+#define SDPCM_DOFFSET_MASK 0xff000000
+#define SDPCM_DOFFSET_SHIFT 24
+#define SDPCM_FCMASK_OFFSET 4 /* Flow control */
+#define SDPCM_FCMASK_VALUE(p) (((u8 *)p)[SDPCM_FCMASK_OFFSET] & 0xff)
+#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */
+#define SDPCM_WINDOW_VALUE(p) (((u8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
+
+#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */
+
+/* logical channel numbers */
+#define SDPCM_CONTROL_CHANNEL 0 /* Control channel Id */
+#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */
+#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */
+#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets */
+#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */
+
+#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for 8bit frame seq */
+
+#define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
+
+/*
+ * Shared structure between dongle and the host.
+ * The structure contains pointers to trap or assert information.
+ */
+#define SDPCM_SHARED_VERSION 0x0002
+#define SDPCM_SHARED_VERSION_MASK 0x00FF
+#define SDPCM_SHARED_ASSERT_BUILT 0x0100
+#define SDPCM_SHARED_ASSERT 0x0200
+#define SDPCM_SHARED_TRAP 0x0400
+
+/* Space for header read, limit for data packets */
+#define MAX_HDR_READ (1 << 6)
+#define MAX_RX_DATASZ 2048
+
+/* Maximum milliseconds to wait for F2 to come up */
+#define BRCMF_WAIT_F2RDY 3000
+
+/* Bump up limit on waiting for HT to account for first startup;
+ * if the image is doing a CRC calculation before programming the PMU
+ * for HT availability, it could take a couple hundred ms more, so
+ * max out at a 1 second (1000000us).
+ */
+#undef PMU_MAX_TRANSITION_DLY
+#define PMU_MAX_TRANSITION_DLY 1000000
+
+/* Value for ChipClockCSR during initial setup */
+#define BRCMF_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | \
+ SBSDIO_ALP_AVAIL_REQ)
+
+/* Flags for SDH calls */
+#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
+
+/* sbimstate */
+#define SBIM_IBE 0x20000 /* inbanderror */
+#define SBIM_TO 0x40000 /* timeout */
+#define SBIM_BY 0x01800000 /* busy (sonics >= 2.3) */
+#define SBIM_RJ 0x02000000 /* reject (sonics >= 2.3) */
+
+/* sbtmstatelow */
+
+/* reset */
+#define SBTML_RESET 0x0001
+/* reject field */
+#define SBTML_REJ_MASK 0x0006
+/* reject */
+#define SBTML_REJ 0x0002
+/* temporary reject, for error recovery */
+#define SBTML_TMPREJ 0x0004
+
+/* Shift to locate the SI control flags in sbtml */
+#define SBTML_SICF_SHIFT 16
+
+/* sbtmstatehigh */
+#define SBTMH_SERR 0x0001 /* serror */
+#define SBTMH_INT 0x0002 /* interrupt */
+#define SBTMH_BUSY 0x0004 /* busy */
+#define SBTMH_TO 0x0020 /* timeout (sonics >= 2.3) */
+
+/* Shift to locate the SI status flags in sbtmh */
+#define SBTMH_SISF_SHIFT 16
+
+/* sbidlow */
+#define SBIDL_INIT 0x80 /* initiator */
+
+/* sbidhigh */
+#define SBIDH_RC_MASK 0x000f /* revision code */
+#define SBIDH_RCE_MASK 0x7000 /* revision code extension field */
+#define SBIDH_RCE_SHIFT 8
+#define SBCOREREV(sbidh) \
+ ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | \
+ ((sbidh) & SBIDH_RC_MASK))
+#define SBIDH_CC_MASK 0x8ff0 /* core code */
+#define SBIDH_CC_SHIFT 4
+#define SBIDH_VC_MASK 0xffff0000 /* vendor code */
+#define SBIDH_VC_SHIFT 16
+
+/*
+ * Conversion of 802.1D priority to precedence level
+ */
+static uint prio2prec(u32 prio)
+{
+ return (prio == PRIO_8021D_NONE || prio == PRIO_8021D_BE) ?
+ (prio^2) : prio;
+}
+
+/*
+ * Core reg address translation.
+ * Both macro's returns a 32 bits byte address on the backplane bus.
+ */
+#define CORE_CC_REG(base, field) \
+ (base + offsetof(struct chipcregs, field))
+#define CORE_BUS_REG(base, field) \
+ (base + offsetof(struct sdpcmd_regs, field))
+#define CORE_SB(base, field) \
+ (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
+
+/* core registers */
+struct sdpcmd_regs {
+ u32 corecontrol; /* 0x00, rev8 */
+ u32 corestatus; /* rev8 */
+ u32 PAD[1];
+ u32 biststatus; /* rev8 */
+
+ /* PCMCIA access */
+ u16 pcmciamesportaladdr; /* 0x010, rev8 */
+ u16 PAD[1];
+ u16 pcmciamesportalmask; /* rev8 */
+ u16 PAD[1];
+ u16 pcmciawrframebc; /* rev8 */
+ u16 PAD[1];
+ u16 pcmciaunderflowtimer; /* rev8 */
+ u16 PAD[1];
+
+ /* interrupt */
+ u32 intstatus; /* 0x020, rev8 */
+ u32 hostintmask; /* rev8 */
+ u32 intmask; /* rev8 */
+ u32 sbintstatus; /* rev8 */
+ u32 sbintmask; /* rev8 */
+ u32 funcintmask; /* rev4 */
+ u32 PAD[2];
+ u32 tosbmailbox; /* 0x040, rev8 */
+ u32 tohostmailbox; /* rev8 */
+ u32 tosbmailboxdata; /* rev8 */
+ u32 tohostmailboxdata; /* rev8 */
+
+ /* synchronized access to registers in SDIO clock domain */
+ u32 sdioaccess; /* 0x050, rev8 */
+ u32 PAD[3];
+
+ /* PCMCIA frame control */
+ u8 pcmciaframectrl; /* 0x060, rev8 */
+ u8 PAD[3];
+ u8 pcmciawatermark; /* rev8 */
+ u8 PAD[155];
+
+ /* interrupt batching control */
+ u32 intrcvlazy; /* 0x100, rev8 */
+ u32 PAD[3];
+
+ /* counters */
+ u32 cmd52rd; /* 0x110, rev8 */
+ u32 cmd52wr; /* rev8 */
+ u32 cmd53rd; /* rev8 */
+ u32 cmd53wr; /* rev8 */
+ u32 abort; /* rev8 */
+ u32 datacrcerror; /* rev8 */
+ u32 rdoutofsync; /* rev8 */
+ u32 wroutofsync; /* rev8 */
+ u32 writebusy; /* rev8 */
+ u32 readwait; /* rev8 */
+ u32 readterm; /* rev8 */
+ u32 writeterm; /* rev8 */
+ u32 PAD[40];
+ u32 clockctlstatus; /* rev8 */
+ u32 PAD[7];
+
+ u32 PAD[128]; /* DMA engines */
+
+ /* SDIO/PCMCIA CIS region */
+ char cis[512]; /* 0x400-0x5ff, rev6 */
+
+ /* PCMCIA function control registers */
+ char pcmciafcr[256]; /* 0x600-6ff, rev6 */
+ u16 PAD[55];
+
+ /* PCMCIA backplane access */
+ u16 backplanecsr; /* 0x76E, rev6 */
+ u16 backplaneaddr0; /* rev6 */
+ u16 backplaneaddr1; /* rev6 */
+ u16 backplaneaddr2; /* rev6 */
+ u16 backplaneaddr3; /* rev6 */
+ u16 backplanedata0; /* rev6 */
+ u16 backplanedata1; /* rev6 */
+ u16 backplanedata2; /* rev6 */
+ u16 backplanedata3; /* rev6 */
+ u16 PAD[31];
+
+ /* sprom "size" & "blank" info */
+ u16 spromstatus; /* 0x7BE, rev2 */
+ u32 PAD[464];
+
+ u16 PAD[0x80];
+};
+
+#ifdef BCMDBG
+/* Device console log buffer state */
+struct brcmf_console {
+ uint count; /* Poll interval msec counter */
+ uint log_addr; /* Log struct address (fixed) */
+ struct rte_log_le log_le; /* Log struct (host copy) */
+ uint bufsize; /* Size of log buffer */
+ u8 *buf; /* Log buffer (host copy) */
+ uint last; /* Last buffer read index */
+};
+#endif /* BCMDBG */
+
+struct sdpcm_shared {
+ u32 flags;
+ u32 trap_addr;
+ u32 assert_exp_addr;
+ u32 assert_file_addr;
+ u32 assert_line;
+ u32 console_addr; /* Address of struct rte_console */
+ u32 msgtrace_addr;
+ u8 tag[32];
+};
+
+struct sdpcm_shared_le {
+ __le32 flags;
+ __le32 trap_addr;
+ __le32 assert_exp_addr;
+ __le32 assert_file_addr;
+ __le32 assert_line;
+ __le32 console_addr; /* Address of struct rte_console */
+ __le32 msgtrace_addr;
+ u8 tag[32];
+};
+
+
+/* misc chip info needed by some of the routines */
+struct chip_info {
+ u32 chip;
+ u32 chiprev;
+ u32 cccorebase;
+ u32 ccrev;
+ u32 cccaps;
+ u32 buscorebase; /* 32 bits backplane bus address */
+ u32 buscorerev;
+ u32 buscoretype;
+ u32 ramcorebase;
+ u32 armcorebase;
+ u32 pmurev;
+ u32 ramsize;
+};
+
+/* Private data for SDIO bus interaction */
+struct brcmf_bus {
+ struct brcmf_pub *drvr;
+
+ struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
+ struct chip_info *ci; /* Chip info struct */
+ char *vars; /* Variables (from CIS and/or other) */
+ uint varsz; /* Size of variables buffer */
+
+ u32 ramsize; /* Size of RAM in SOCRAM (bytes) */
+
+ u32 hostintmask; /* Copy of Host Interrupt Mask */
+ u32 intstatus; /* Intstatus bits (events) pending */
+ bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */
+ bool fcstate; /* State of dongle flow-control */
+
+ uint blocksize; /* Block size of SDIO transfers */
+ uint roundup; /* Max roundup limit */
+
+ struct pktq txq; /* Queue length used for flow-control */
+ u8 flowcontrol; /* per prio flow control bitmask */
+ u8 tx_seq; /* Transmit sequence number (next) */
+ u8 tx_max; /* Maximum transmit sequence allowed */
+
+ u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
+ u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
+ u16 nextlen; /* Next Read Len from last header */
+ u8 rx_seq; /* Receive sequence number (expected) */
+ bool rxskip; /* Skip receive (awaiting NAK ACK) */
+
+ uint rxbound; /* Rx frames to read before resched */
+ uint txbound; /* Tx frames to send before resched */
+ uint txminmax;
+
+ struct sk_buff *glomd; /* Packet containing glomming descriptor */
+ struct sk_buff *glom; /* Packet chain for glommed superframe */
+ uint glomerr; /* Glom packet read errors */
+
+ u8 *rxbuf; /* Buffer for receiving control packets */
+ uint rxblen; /* Allocated length of rxbuf */
+ u8 *rxctl; /* Aligned pointer into rxbuf */
+ u8 *databuf; /* Buffer for receiving big glom packet */
+ u8 *dataptr; /* Aligned pointer into databuf */
+ uint rxlen; /* Length of valid data in buffer */
+
+ u8 sdpcm_ver; /* Bus protocol reported by dongle */
+
+ bool intr; /* Use interrupts */
+ bool poll; /* Use polling */
+ bool ipend; /* Device interrupt is pending */
+ uint intrcount; /* Count of device interrupt callbacks */
+ uint lastintrs; /* Count as of last watchdog timer */
+ uint spurious; /* Count of spurious interrupts */
+ uint pollrate; /* Ticks between device polls */
+ uint polltick; /* Tick counter */
+ uint pollcnt; /* Count of active polls */
+
+#ifdef BCMDBG
+ uint console_interval;
+ struct brcmf_console console; /* Console output polling support */
+ uint console_addr; /* Console address from shared struct */
+#endif /* BCMDBG */
+
+ uint regfails; /* Count of R_REG failures */
+
+ uint clkstate; /* State of sd and backplane clock(s) */
+ bool activity; /* Activity flag for clock down */
+ s32 idletime; /* Control for activity timeout */
+ s32 idlecount; /* Activity timeout counter */
+ s32 idleclock; /* How to set bus driver when idle */
+ s32 sd_rxchain;
+ bool use_rxchain; /* If brcmf should use PKT chains */
+ bool sleeping; /* Is SDIO bus sleeping? */
+ bool rxflow_mode; /* Rx flow control mode */
+ bool rxflow; /* Is rx flow control on */
+ bool alp_only; /* Don't use HT clock (ALP only) */
+/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
+ bool usebufpool;
+
+ /* Some additional counters */
+ uint tx_sderrs; /* Count of tx attempts with sd errors */
+ uint fcqueued; /* Tx packets that got queued */
+ uint rxrtx; /* Count of rtx requests (NAK to dongle) */
+ uint rx_toolong; /* Receive frames too long to receive */
+ uint rxc_errors; /* SDIO errors when reading control frames */
+ uint rx_hdrfail; /* SDIO errors on header reads */
+ uint rx_badhdr; /* Bad received headers (roosync?) */
+ uint rx_badseq; /* Mismatched rx sequence number */
+ uint fc_rcvd; /* Number of flow-control events received */
+ uint fc_xoff; /* Number which turned on flow-control */
+ uint fc_xon; /* Number which turned off flow-control */
+ uint rxglomfail; /* Failed deglom attempts */
+ uint rxglomframes; /* Number of glom frames (superframes) */
+ uint rxglompkts; /* Number of packets from glom frames */
+ uint f2rxhdrs; /* Number of header reads */
+ uint f2rxdata; /* Number of frame data reads */
+ uint f2txdata; /* Number of f2 frame writes */
+ uint f1regdata; /* Number of f1 register accesses */
+
+ u8 *ctrl_frame_buf;
+ u32 ctrl_frame_len;
+ bool ctrl_frame_stat;
+
+ spinlock_t txqlock;
+ wait_queue_head_t ctrl_wait;
+ wait_queue_head_t dcmd_resp_wait;
+
+ struct timer_list timer;
+ struct completion watchdog_wait;
+ struct task_struct *watchdog_tsk;
+ bool wd_timer_valid;
+ uint save_ms;
+
+ struct task_struct *dpc_tsk;
+ struct completion dpc_wait;
+
+ struct semaphore sdsem;
+
+ const char *fw_name;
+ const struct firmware *firmware;
+ const char *nv_name;
+ u32 fw_ptr;
+};
+
+struct sbconfig {
+ u32 PAD[2];
+ u32 sbipsflag; /* initiator port ocp slave flag */
+ u32 PAD[3];
+ u32 sbtpsflag; /* target port ocp slave flag */
+ u32 PAD[11];
+ u32 sbtmerrloga; /* (sonics >= 2.3) */
+ u32 PAD;
+ u32 sbtmerrlog; /* (sonics >= 2.3) */
+ u32 PAD[3];
+ u32 sbadmatch3; /* address match3 */
+ u32 PAD;
+ u32 sbadmatch2; /* address match2 */
+ u32 PAD;
+ u32 sbadmatch1; /* address match1 */
+ u32 PAD[7];
+ u32 sbimstate; /* initiator agent state */
+ u32 sbintvec; /* interrupt mask */
+ u32 sbtmstatelow; /* target state */
+ u32 sbtmstatehigh; /* target state */
+ u32 sbbwa0; /* bandwidth allocation table0 */
+ u32 PAD;
+ u32 sbimconfiglow; /* initiator configuration */
+ u32 sbimconfighigh; /* initiator configuration */
+ u32 sbadmatch0; /* address match0 */
+ u32 PAD;
+ u32 sbtmconfiglow; /* target configuration */
+ u32 sbtmconfighigh; /* target configuration */
+ u32 sbbconfig; /* broadcast configuration */
+ u32 PAD;
+ u32 sbbstate; /* broadcast state */
+ u32 PAD[3];
+ u32 sbactcnfg; /* activate configuration */
+ u32 PAD[3];
+ u32 sbflagst; /* current sbflags */
+ u32 PAD[3];
+ u32 sbidlow; /* identification */
+ u32 sbidhigh; /* identification */
+};
+
+/* clkstate */
+#define CLK_NONE 0
+#define CLK_SDONLY 1
+#define CLK_PENDING 2 /* Not used yet */
+#define CLK_AVAIL 3
+
+#ifdef BCMDBG
+static int qcount[NUMPRIO];
+static int tx_packets[NUMPRIO];
+#endif /* BCMDBG */
+
+#define SDIO_DRIVE_STRENGTH 6 /* in milliamps */
+
+#define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
+
+/* Retry count for register access failures */
+static const uint retry_limit = 2;
+
+/* Limit on rounding up frames */
+static const uint max_roundup = 512;
+
+#define ALIGNMENT 4
+
+static void pkt_align(struct sk_buff *p, int len, int align)
+{
+ uint datalign;
+ datalign = (unsigned long)(p->data);
+ datalign = roundup(datalign, (align)) - datalign;
+ if (datalign)
+ skb_pull(p, datalign);
+ __skb_trim(p, len);
+}
+
+/* To check if there's window offered */
+static bool data_ok(struct brcmf_bus *bus)
+{
+ return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
+ ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
+}
+
+/*
+ * Reads a register in the SDIO hardware block. This block occupies a series of
+ * adresses on the 32 bit backplane bus.
+ */
+static void
+r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
+{
+ *retryvar = 0;
+ do {
+ *regvar = brcmf_sdcard_reg_read(bus->sdiodev,
+ bus->ci->buscorebase + reg_offset, sizeof(u32));
+ } while (brcmf_sdcard_regfail(bus->sdiodev) &&
+ (++(*retryvar) <= retry_limit));
+ if (*retryvar) {
+ bus->regfails += (*retryvar-1);
+ if (*retryvar > retry_limit) {
+ brcmf_dbg(ERROR, "FAILED READ %Xh\n", reg_offset);
+ *regvar = 0;
+ }
+ }
+}
+
+static void
+w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar)
+{
+ *retryvar = 0;
+ do {
+ brcmf_sdcard_reg_write(bus->sdiodev,
+ bus->ci->buscorebase + reg_offset,
+ sizeof(u32), regval);
+ } while (brcmf_sdcard_regfail(bus->sdiodev) &&
+ (++(*retryvar) <= retry_limit));
+ if (*retryvar) {
+ bus->regfails += (*retryvar-1);
+ if (*retryvar > retry_limit)
+ brcmf_dbg(ERROR, "FAILED REGISTER WRITE %Xh\n",
+ reg_offset);
+ }
+}
+
+#define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND)
+
+#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
+
+/* Packet free applicable unconditionally for sdio and sdspi.
+ * Conditional if bufpool was present for gspi bus.
+ */
+static void brcmf_sdbrcm_pktfree2(struct brcmf_bus *bus, struct sk_buff *pkt)
+{
+ if (bus->usebufpool)
+ brcmu_pkt_buf_free_skb(pkt);
+}
+
+/* Turn backplane clock on or off */
+static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
+{
+ int err;
+ u8 clkctl, clkreq, devctl;
+ unsigned long timeout;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ clkctl = 0;
+
+ if (on) {
+ /* Request HT Avail */
+ clkreq =
+ bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
+
+ if ((bus->ci->chip == BCM4329_CHIP_ID)
+ && (bus->ci->chiprev == 0))
+ clkreq |= SBSDIO_FORCE_ALP;
+
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+ if (err) {
+ brcmf_dbg(ERROR, "HT Avail request error: %d\n", err);
+ return -EBADE;
+ }
+
+ if (pendok && ((bus->ci->buscoretype == PCMCIA_CORE_ID)
+ && (bus->ci->buscorerev == 9))) {
+ u32 dummy, retries;
+ r_sdreg32(bus, &dummy,
+ offsetof(struct sdpcmd_regs, clockctlstatus),
+ &retries);
+ }
+
+ /* Check current status */
+ clkctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (err) {
+ brcmf_dbg(ERROR, "HT Avail read error: %d\n", err);
+ return -EBADE;
+ }
+
+ /* Go to pending and await interrupt if appropriate */
+ if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
+ /* Allow only clock-available interrupt */
+ devctl = brcmf_sdcard_cfg_read(bus->sdiodev,
+ SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ brcmf_dbg(ERROR, "Devctl error setting CA: %d\n",
+ err);
+ return -EBADE;
+ }
+
+ devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, devctl, &err);
+ brcmf_dbg(INFO, "CLKCTL: set PENDING\n");
+ bus->clkstate = CLK_PENDING;
+
+ return 0;
+ } else if (bus->clkstate == CLK_PENDING) {
+ /* Cancel CA-only interrupt filter */
+ devctl =
+ brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, &err);
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, devctl, &err);
+ }
+
+ /* Otherwise, wait here (polling) for HT Avail */
+ timeout = jiffies +
+ msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
+ while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+ clkctl = brcmf_sdcard_cfg_read(bus->sdiodev,
+ SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ &err);
+ if (time_after(jiffies, timeout))
+ break;
+ else
+ usleep_range(5000, 10000);
+ }
+ if (err) {
+ brcmf_dbg(ERROR, "HT Avail request error: %d\n", err);
+ return -EBADE;
+ }
+ if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
+ brcmf_dbg(ERROR, "HT Avail timeout (%d): clkctl 0x%02x\n",
+ PMU_MAX_TRANSITION_DLY, clkctl);
+ return -EBADE;
+ }
+
+ /* Mark clock available */
+ bus->clkstate = CLK_AVAIL;
+ brcmf_dbg(INFO, "CLKCTL: turned ON\n");
+
+#if defined(BCMDBG)
+ if (bus->alp_only != true) {
+ if (SBSDIO_ALPONLY(clkctl))
+ brcmf_dbg(ERROR, "HT Clock should be on\n");
+ }
+#endif /* defined (BCMDBG) */
+
+ bus->activity = true;
+ } else {
+ clkreq = 0;
+
+ if (bus->clkstate == CLK_PENDING) {
+ /* Cancel CA-only interrupt filter */
+ devctl = brcmf_sdcard_cfg_read(bus->sdiodev,
+ SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, &err);
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, devctl, &err);
+ }
+
+ bus->clkstate = CLK_SDONLY;
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+ brcmf_dbg(INFO, "CLKCTL: turned OFF\n");
+ if (err) {
+ brcmf_dbg(ERROR, "Failed access turning clock off: %d\n",
+ err);
+ return -EBADE;
+ }
+ }
+ return 0;
+}
+
+/* Change idle/active SD state */
+static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (on)
+ bus->clkstate = CLK_SDONLY;
+ else
+ bus->clkstate = CLK_NONE;
+
+ return 0;
+}
+
+/* Transition SD and backplane clock readiness */
+static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok)
+{
+#ifdef BCMDBG
+ uint oldstate = bus->clkstate;
+#endif /* BCMDBG */
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Early exit if we're already there */
+ if (bus->clkstate == target) {
+ if (target == CLK_AVAIL) {
+ brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+ bus->activity = true;
+ }
+ return 0;
+ }
+
+ switch (target) {
+ case CLK_AVAIL:
+ /* Make sure SD clock is available */
+ if (bus->clkstate == CLK_NONE)
+ brcmf_sdbrcm_sdclk(bus, true);
+ /* Now request HT Avail on the backplane */
+ brcmf_sdbrcm_htclk(bus, true, pendok);
+ brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+ bus->activity = true;
+ break;
+
+ case CLK_SDONLY:
+ /* Remove HT request, or bring up SD clock */
+ if (bus->clkstate == CLK_NONE)
+ brcmf_sdbrcm_sdclk(bus, true);
+ else if (bus->clkstate == CLK_AVAIL)
+ brcmf_sdbrcm_htclk(bus, false, false);
+ else
+ brcmf_dbg(ERROR, "request for %d -> %d\n",
+ bus->clkstate, target);
+ brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+ break;
+
+ case CLK_NONE:
+ /* Make sure to remove HT request */
+ if (bus->clkstate == CLK_AVAIL)
+ brcmf_sdbrcm_htclk(bus, false, false);
+ /* Now remove the SD clock */
+ brcmf_sdbrcm_sdclk(bus, false);
+ brcmf_sdbrcm_wd_timer(bus, 0);
+ break;
+ }
+#ifdef BCMDBG
+ brcmf_dbg(INFO, "%d -> %d\n", oldstate, bus->clkstate);
+#endif /* BCMDBG */
+
+ return 0;
+}
+
+static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
+{
+ uint retries = 0;
+
+ brcmf_dbg(INFO, "request %s (currently %s)\n",
+ sleep ? "SLEEP" : "WAKE",
+ bus->sleeping ? "SLEEP" : "WAKE");
+
+ /* Done if we're already in the requested state */
+ if (sleep == bus->sleeping)
+ return 0;
+
+ /* Going to sleep: set the alarm and turn off the lights... */
+ if (sleep) {
+ /* Don't sleep if something is pending */
+ if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
+ return -EBUSY;
+
+ /* Make sure the controller has the bus up */
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+
+ /* Tell device to start using OOB wakeup */
+ w_sdreg32(bus, SMB_USE_OOB,
+ offsetof(struct sdpcmd_regs, tosbmailbox), &retries);
+ if (retries > retry_limit)
+ brcmf_dbg(ERROR, "CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n");
+
+ /* Turn off our contribution to the HT clock request */
+ brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
+
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
+
+ /* Isolate the bus */
+ if (bus->ci->chip != BCM4329_CHIP_ID) {
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL,
+ SBSDIO_DEVCTL_PADS_ISO, NULL);
+ }
+
+ /* Change state */
+ bus->sleeping = true;
+
+ } else {
+ /* Waking up: bus power up is ok, set local state */
+
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+
+ /* Force pad isolation off if possible
+ (in case power never toggled) */
+ if ((bus->ci->buscoretype == PCMCIA_CORE_ID)
+ && (bus->ci->buscorerev >= 10))
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, 0, NULL);
+
+ /* Make sure the controller has the bus up */
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+
+ /* Send misc interrupt to indicate OOB not needed */
+ w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, tosbmailboxdata),
+ &retries);
+ if (retries <= retry_limit)
+ w_sdreg32(bus, SMB_DEV_INT,
+ offsetof(struct sdpcmd_regs, tosbmailbox),
+ &retries);
+
+ if (retries > retry_limit)
+ brcmf_dbg(ERROR, "CANNOT SIGNAL CHIP TO CLEAR OOB!!\n");
+
+ /* Make sure we have SD bus access */
+ brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
+
+ /* Change state */
+ bus->sleeping = false;
+ }
+
+ return 0;
+}
+
+static void bus_wake(struct brcmf_bus *bus)
+{
+ if (bus->sleeping)
+ brcmf_sdbrcm_bussleep(bus, false);
+}
+
+static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus)
+{
+ u32 intstatus = 0;
+ u32 hmb_data;
+ u8 fcbits;
+ uint retries = 0;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Read mailbox data and ack that we did so */
+ r_sdreg32(bus, &hmb_data,
+ offsetof(struct sdpcmd_regs, tohostmailboxdata), &retries);
+
+ if (retries <= retry_limit)
+ w_sdreg32(bus, SMB_INT_ACK,
+ offsetof(struct sdpcmd_regs, tosbmailbox), &retries);
+ bus->f1regdata += 2;
+
+ /* Dongle recomposed rx frames, accept them again */
+ if (hmb_data & HMB_DATA_NAKHANDLED) {
+ brcmf_dbg(INFO, "Dongle reports NAK handled, expect rtx of %d\n",
+ bus->rx_seq);
+ if (!bus->rxskip)
+ brcmf_dbg(ERROR, "unexpected NAKHANDLED!\n");
+
+ bus->rxskip = false;
+ intstatus |= I_HMB_FRAME_IND;
+ }
+
+ /*
+ * DEVREADY does not occur with gSPI.
+ */
+ if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) {
+ bus->sdpcm_ver =
+ (hmb_data & HMB_DATA_VERSION_MASK) >>
+ HMB_DATA_VERSION_SHIFT;
+ if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
+ brcmf_dbg(ERROR, "Version mismatch, dongle reports %d, "
+ "expecting %d\n",
+ bus->sdpcm_ver, SDPCM_PROT_VERSION);
+ else
+ brcmf_dbg(INFO, "Dongle ready, protocol version %d\n",
+ bus->sdpcm_ver);
+ }
+
+ /*
+ * Flow Control has been moved into the RX headers and this out of band
+ * method isn't used any more.
+ * remaining backward compatible with older dongles.
+ */
+ if (hmb_data & HMB_DATA_FC) {
+ fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >>
+ HMB_DATA_FCDATA_SHIFT;
+
+ if (fcbits & ~bus->flowcontrol)
+ bus->fc_xoff++;
+
+ if (bus->flowcontrol & ~fcbits)
+ bus->fc_xon++;
+
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+ /* Shouldn't be any others */
+ if (hmb_data & ~(HMB_DATA_DEVREADY |
+ HMB_DATA_NAKHANDLED |
+ HMB_DATA_FC |
+ HMB_DATA_FWREADY |
+ HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK))
+ brcmf_dbg(ERROR, "Unknown mailbox data content: 0x%02x\n",
+ hmb_data);
+
+ return intstatus;
+}
+
+static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx)
+{
+ uint retries = 0;
+ u16 lastrbc;
+ u8 hi, lo;
+ int err;
+
+ brcmf_dbg(ERROR, "%sterminate frame%s\n",
+ abort ? "abort command, " : "",
+ rtx ? ", send NAK" : "");
+
+ if (abort)
+ brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
+
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_FRAMECTRL,
+ SFC_RF_TERM, &err);
+ bus->f1regdata++;
+
+ /* Wait until the packet has been flushed (device/FIFO stable) */
+ for (lastrbc = retries = 0xffff; retries > 0; retries--) {
+ hi = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_RFRAMEBCHI, NULL);
+ lo = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_RFRAMEBCLO, NULL);
+ bus->f1regdata += 2;
+
+ if ((hi == 0) && (lo == 0))
+ break;
+
+ if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
+ brcmf_dbg(ERROR, "count growing: last 0x%04x now 0x%04x\n",
+ lastrbc, (hi << 8) + lo);
+ }
+ lastrbc = (hi << 8) + lo;
+ }
+
+ if (!retries)
+ brcmf_dbg(ERROR, "count never zeroed: last 0x%04x\n", lastrbc);
+ else
+ brcmf_dbg(INFO, "flush took %d iterations\n", 0xffff - retries);
+
+ if (rtx) {
+ bus->rxrtx++;
+ w_sdreg32(bus, SMB_NAK,
+ offsetof(struct sdpcmd_regs, tosbmailbox), &retries);
+
+ bus->f1regdata++;
+ if (retries <= retry_limit)
+ bus->rxskip = true;
+ }
+
+ /* Clear partial in any case */
+ bus->nextlen = 0;
+
+ /* If we can't reach the device, signal failure */
+ if (err || brcmf_sdcard_regfail(bus->sdiodev))
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
+}
+
+static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
+{
+ u16 dlen, totlen;
+ u8 *dptr, num = 0;
+
+ u16 sublen, check;
+ struct sk_buff *pfirst, *plast, *pnext, *save_pfirst;
+
+ int errcode;
+ u8 chan, seq, doff, sfdoff;
+ u8 txmax;
+
+ int ifidx = 0;
+ bool usechain = bus->use_rxchain;
+
+ /* If packets, issue read(s) and send up packet chain */
+ /* Return sequence numbers consumed? */
+
+ brcmf_dbg(TRACE, "start: glomd %p glom %p\n", bus->glomd, bus->glom);
+
+ /* If there's a descriptor, generate the packet chain */
+ if (bus->glomd) {
+ pfirst = plast = pnext = NULL;
+ dlen = (u16) (bus->glomd->len);
+ dptr = bus->glomd->data;
+ if (!dlen || (dlen & 1)) {
+ brcmf_dbg(ERROR, "bad glomd len(%d), ignore descriptor\n",
+ dlen);
+ dlen = 0;
+ }
+
+ for (totlen = num = 0; dlen; num++) {
+ /* Get (and move past) next length */
+ sublen = get_unaligned_le16(dptr);
+ dlen -= sizeof(u16);
+ dptr += sizeof(u16);
+ if ((sublen < SDPCM_HDRLEN) ||
+ ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
+ brcmf_dbg(ERROR, "descriptor len %d bad: %d\n",
+ num, sublen);
+ pnext = NULL;
+ break;
+ }
+ if (sublen % BRCMF_SDALIGN) {
+ brcmf_dbg(ERROR, "sublen %d not multiple of %d\n",
+ sublen, BRCMF_SDALIGN);
+ usechain = false;
+ }
+ totlen += sublen;
+
+ /* For last frame, adjust read len so total
+ is a block multiple */
+ if (!dlen) {
+ sublen +=
+ (roundup(totlen, bus->blocksize) - totlen);
+ totlen = roundup(totlen, bus->blocksize);
+ }
+
+ /* Allocate/chain packet for next subframe */
+ pnext = brcmu_pkt_buf_get_skb(sublen + BRCMF_SDALIGN);
+ if (pnext == NULL) {
+ brcmf_dbg(ERROR, "bcm_pkt_buf_get_skb failed, num %d len %d\n",
+ num, sublen);
+ break;
+ }
+ if (!pfirst) {
+ pfirst = plast = pnext;
+ } else {
+ plast->next = pnext;
+ plast = pnext;
+ }
+
+ /* Adhere to start alignment requirements */
+ pkt_align(pnext, sublen, BRCMF_SDALIGN);
+ }
+
+ /* If all allocations succeeded, save packet chain
+ in bus structure */
+ if (pnext) {
+ brcmf_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n",
+ totlen, num);
+ if (BRCMF_GLOM_ON() && bus->nextlen &&
+ totlen != bus->nextlen) {
+ brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
+ bus->nextlen, totlen, rxseq);
+ }
+ bus->glom = pfirst;
+ pfirst = pnext = NULL;
+ } else {
+ if (pfirst)
+ brcmu_pkt_buf_free_skb(pfirst);
+ bus->glom = NULL;
+ num = 0;
+ }
+
+ /* Done with descriptor packet */
+ brcmu_pkt_buf_free_skb(bus->glomd);
+ bus->glomd = NULL;
+ bus->nextlen = 0;
+ }
+
+ /* Ok -- either we just generated a packet chain,
+ or had one from before */
+ if (bus->glom) {
+ if (BRCMF_GLOM_ON()) {
+ brcmf_dbg(GLOM, "try superframe read, packet chain:\n");
+ for (pnext = bus->glom; pnext; pnext = pnext->next) {
+ brcmf_dbg(GLOM, " %p: %p len 0x%04x (%d)\n",
+ pnext, (u8 *) (pnext->data),
+ pnext->len, pnext->len);
+ }
+ }
+
+ pfirst = bus->glom;
+ dlen = (u16) brcmu_pkttotlen(pfirst);
+
+ /* Do an SDIO read for the superframe. Configurable iovar to
+ * read directly into the chained packet, or allocate a large
+ * packet and and copy into the chain.
+ */
+ if (usechain) {
+ errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
+ bus->sdiodev->sbwad,
+ SDIO_FUNC_2,
+ F2SYNC, (u8 *) pfirst->data, dlen,
+ pfirst);
+ } else if (bus->dataptr) {
+ errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
+ bus->sdiodev->sbwad,
+ SDIO_FUNC_2,
+ F2SYNC, bus->dataptr, dlen,
+ NULL);
+ sublen = (u16) brcmu_pktfrombuf(pfirst, 0, dlen,
+ bus->dataptr);
+ if (sublen != dlen) {
+ brcmf_dbg(ERROR, "FAILED TO COPY, dlen %d sublen %d\n",
+ dlen, sublen);
+ errcode = -1;
+ }
+ pnext = NULL;
+ } else {
+ brcmf_dbg(ERROR, "COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n",
+ dlen);
+ errcode = -1;
+ }
+ bus->f2rxdata++;
+
+ /* On failure, kill the superframe, allow a couple retries */
+ if (errcode < 0) {
+ brcmf_dbg(ERROR, "glom read of %d bytes failed: %d\n",
+ dlen, errcode);
+ bus->drvr->rx_errors++;
+
+ if (bus->glomerr++ < 3) {
+ brcmf_sdbrcm_rxfail(bus, true, true);
+ } else {
+ bus->glomerr = 0;
+ brcmf_sdbrcm_rxfail(bus, true, false);
+ brcmu_pkt_buf_free_skb(bus->glom);
+ bus->rxglomfail++;
+ bus->glom = NULL;
+ }
+ return 0;
+ }
+#ifdef BCMDBG
+ if (BRCMF_GLOM_ON()) {
+ printk(KERN_DEBUG "SUPERFRAME:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ pfirst->data, min_t(int, pfirst->len, 48));
+ }
+#endif
+
+ /* Validate the superframe header */
+ dptr = (u8 *) (pfirst->data);
+ sublen = get_unaligned_le16(dptr);
+ check = get_unaligned_le16(dptr + sizeof(u16));
+
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+ bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ brcmf_dbg(INFO, "nextlen too large (%d) seq %d\n",
+ bus->nextlen, seq);
+ bus->nextlen = 0;
+ }
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+ errcode = 0;
+ if ((u16)~(sublen ^ check)) {
+ brcmf_dbg(ERROR, "(superframe): HW hdr error: len/check 0x%04x/0x%04x\n",
+ sublen, check);
+ errcode = -1;
+ } else if (roundup(sublen, bus->blocksize) != dlen) {
+ brcmf_dbg(ERROR, "(superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n",
+ sublen, roundup(sublen, bus->blocksize),
+ dlen);
+ errcode = -1;
+ } else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) !=
+ SDPCM_GLOM_CHANNEL) {
+ brcmf_dbg(ERROR, "(superframe): bad channel %d\n",
+ SDPCM_PACKET_CHANNEL(
+ &dptr[SDPCM_FRAMETAG_LEN]));
+ errcode = -1;
+ } else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
+ brcmf_dbg(ERROR, "(superframe): got 2nd descriptor?\n");
+ errcode = -1;
+ } else if ((doff < SDPCM_HDRLEN) ||
+ (doff > (pfirst->len - SDPCM_HDRLEN))) {
+ brcmf_dbg(ERROR, "(superframe): Bad data offset %d: HW %d pkt %d min %d\n",
+ doff, sublen, pfirst->len, SDPCM_HDRLEN);
+ errcode = -1;
+ }
+
+ /* Check sequence number of superframe SW header */
+ if (rxseq != seq) {
+ brcmf_dbg(INFO, "(superframe) rx_seq %d, expected %d\n",
+ seq, rxseq);
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((u8) (txmax - bus->tx_seq) > 0x40) {
+ brcmf_dbg(ERROR, "unlikely tx max %d with tx_seq %d\n",
+ txmax, bus->tx_seq);
+ txmax = bus->tx_seq + 2;
+ }
+ bus->tx_max = txmax;
+
+ /* Remove superframe header, remember offset */
+ skb_pull(pfirst, doff);
+ sfdoff = doff;
+
+ /* Validate all the subframe headers */
+ for (num = 0, pnext = pfirst; pnext && !errcode;
+ num++, pnext = pnext->next) {
+ dptr = (u8 *) (pnext->data);
+ dlen = (u16) (pnext->len);
+ sublen = get_unaligned_le16(dptr);
+ check = get_unaligned_le16(dptr + sizeof(u16));
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+#ifdef BCMDBG
+ if (BRCMF_GLOM_ON()) {
+ printk(KERN_DEBUG "subframe:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ dptr, 32);
+ }
+#endif
+
+ if ((u16)~(sublen ^ check)) {
+ brcmf_dbg(ERROR, "(subframe %d): HW hdr error: len/check 0x%04x/0x%04x\n",
+ num, sublen, check);
+ errcode = -1;
+ } else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) {
+ brcmf_dbg(ERROR, "(subframe %d): length mismatch: len 0x%04x, expect 0x%04x\n",
+ num, sublen, dlen);
+ errcode = -1;
+ } else if ((chan != SDPCM_DATA_CHANNEL) &&
+ (chan != SDPCM_EVENT_CHANNEL)) {
+ brcmf_dbg(ERROR, "(subframe %d): bad channel %d\n",
+ num, chan);
+ errcode = -1;
+ } else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) {
+ brcmf_dbg(ERROR, "(subframe %d): Bad data offset %d: HW %d min %d\n",
+ num, doff, sublen, SDPCM_HDRLEN);
+ errcode = -1;
+ }
+ }
+
+ if (errcode) {
+ /* Terminate frame on error, request
+ a couple retries */
+ if (bus->glomerr++ < 3) {
+ /* Restore superframe header space */
+ skb_push(pfirst, sfdoff);
+ brcmf_sdbrcm_rxfail(bus, true, true);
+ } else {
+ bus->glomerr = 0;
+ brcmf_sdbrcm_rxfail(bus, true, false);
+ brcmu_pkt_buf_free_skb(bus->glom);
+ bus->rxglomfail++;
+ bus->glom = NULL;
+ }
+ bus->nextlen = 0;
+ return 0;
+ }
+
+ /* Basic SD framing looks ok - process each packet (header) */
+ save_pfirst = pfirst;
+ bus->glom = NULL;
+ plast = NULL;
+
+ for (num = 0; pfirst; rxseq++, pfirst = pnext) {
+ pnext = pfirst->next;
+ pfirst->next = NULL;
+
+ dptr = (u8 *) (pfirst->data);
+ sublen = get_unaligned_le16(dptr);
+ chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+
+ brcmf_dbg(GLOM, "Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n",
+ num, pfirst, pfirst->data,
+ pfirst->len, sublen, chan, seq);
+
+ /* precondition: chan == SDPCM_DATA_CHANNEL ||
+ chan == SDPCM_EVENT_CHANNEL */
+
+ if (rxseq != seq) {
+ brcmf_dbg(GLOM, "rx_seq %d, expected %d\n",
+ seq, rxseq);
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+#ifdef BCMDBG
+ if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
+ printk(KERN_DEBUG "Rx Subframe Data:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ dptr, dlen);
+ }
+#endif
+
+ __skb_trim(pfirst, sublen);
+ skb_pull(pfirst, doff);
+
+ if (pfirst->len == 0) {
+ brcmu_pkt_buf_free_skb(pfirst);
+ if (plast)
+ plast->next = pnext;
+ else
+ save_pfirst = pnext;
+
+ continue;
+ } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx,
+ pfirst) != 0) {
+ brcmf_dbg(ERROR, "rx protocol error\n");
+ bus->drvr->rx_errors++;
+ brcmu_pkt_buf_free_skb(pfirst);
+ if (plast)
+ plast->next = pnext;
+ else
+ save_pfirst = pnext;
+
+ continue;
+ }
+
+ /* this packet will go up, link back into
+ chain and count it */
+ pfirst->next = pnext;
+ plast = pfirst;
+ num++;
+
+#ifdef BCMDBG
+ if (BRCMF_GLOM_ON()) {
+ brcmf_dbg(GLOM, "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
+ num, pfirst, pfirst->data,
+ pfirst->len, pfirst->next,
+ pfirst->prev);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ pfirst->data,
+ min_t(int, pfirst->len, 32));
+ }
+#endif /* BCMDBG */
+ }
+ if (num) {
+ up(&bus->sdsem);
+ brcmf_rx_frame(bus->drvr, ifidx, save_pfirst, num);
+ down(&bus->sdsem);
+ }
+
+ bus->rxglomframes++;
+ bus->rxglompkts += num;
+ }
+ return num;
+}
+
+static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition,
+ bool *pending)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int timeout = msecs_to_jiffies(DCMD_RESP_TIMEOUT);
+
+ /* Wait until control frame is available */
+ add_wait_queue(&bus->dcmd_resp_wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ while (!(*condition) && (!signal_pending(current) && timeout))
+ timeout = schedule_timeout(timeout);
+
+ if (signal_pending(current))
+ *pending = true;
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&bus->dcmd_resp_wait, &wait);
+
+ return timeout;
+}
+
+static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus)
+{
+ if (waitqueue_active(&bus->dcmd_resp_wait))
+ wake_up_interruptible(&bus->dcmd_resp_wait);
+
+ return 0;
+}
+static void
+brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
+{
+ uint rdlen, pad;
+
+ int sdret;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Set rxctl for frame (w/optional alignment) */
+ bus->rxctl = bus->rxbuf;
+ bus->rxctl += BRCMF_FIRSTREAD;
+ pad = ((unsigned long)bus->rxctl % BRCMF_SDALIGN);
+ if (pad)
+ bus->rxctl += (BRCMF_SDALIGN - pad);
+ bus->rxctl -= BRCMF_FIRSTREAD;
+
+ /* Copy the already-read portion over */
+ memcpy(bus->rxctl, hdr, BRCMF_FIRSTREAD);
+ if (len <= BRCMF_FIRSTREAD)
+ goto gotpkt;
+
+ /* Raise rdlen to next SDIO block to avoid tail command */
+ rdlen = len - BRCMF_FIRSTREAD;
+ if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
+ pad = bus->blocksize - (rdlen % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+ ((len + pad) < bus->drvr->maxctl))
+ rdlen += pad;
+ } else if (rdlen % BRCMF_SDALIGN) {
+ rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
+ }
+
+ /* Satisfy length-alignment requirements */
+ if (rdlen & (ALIGNMENT - 1))
+ rdlen = roundup(rdlen, ALIGNMENT);
+
+ /* Drop if the read is too big or it exceeds our maximum */
+ if ((rdlen + BRCMF_FIRSTREAD) > bus->drvr->maxctl) {
+ brcmf_dbg(ERROR, "%d-byte control read exceeds %d-byte buffer\n",
+ rdlen, bus->drvr->maxctl);
+ bus->drvr->rx_errors++;
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ goto done;
+ }
+
+ if ((len - doff) > bus->drvr->maxctl) {
+ brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
+ len, len - doff, bus->drvr->maxctl);
+ bus->drvr->rx_errors++;
+ bus->rx_toolong++;
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ goto done;
+ }
+
+ /* Read remainder of frame body into the rxctl buffer */
+ sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
+ bus->sdiodev->sbwad,
+ SDIO_FUNC_2,
+ F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen,
+ NULL);
+ bus->f2rxdata++;
+
+ /* Control frame failures need retransmission */
+ if (sdret < 0) {
+ brcmf_dbg(ERROR, "read %d control bytes failed: %d\n",
+ rdlen, sdret);
+ bus->rxc_errors++;
+ brcmf_sdbrcm_rxfail(bus, true, true);
+ goto done;
+ }
+
+gotpkt:
+
+#ifdef BCMDBG
+ if (BRCMF_BYTES_ON() && BRCMF_CTL_ON()) {
+ printk(KERN_DEBUG "RxCtrl:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, bus->rxctl, len);
+ }
+#endif
+
+ /* Point to valid data and indicate its length */
+ bus->rxctl += doff;
+ bus->rxlen = len - doff;
+
+done:
+ /* Awake any waiters */
+ brcmf_sdbrcm_dcmd_resp_wake(bus);
+}
+
+/* Pad read to blocksize for efficiency */
+static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen)
+{
+ if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
+ *pad = bus->blocksize - (*rdlen % bus->blocksize);
+ if (*pad <= bus->roundup && *pad < bus->blocksize &&
+ *rdlen + *pad + BRCMF_FIRSTREAD < MAX_RX_DATASZ)
+ *rdlen += *pad;
+ } else if (*rdlen % BRCMF_SDALIGN) {
+ *rdlen += BRCMF_SDALIGN - (*rdlen % BRCMF_SDALIGN);
+ }
+}
+
+static void
+brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
+ struct sk_buff **pkt, u8 **rxbuf)
+{
+ int sdret; /* Return code from calls */
+
+ *pkt = brcmu_pkt_buf_get_skb(rdlen + BRCMF_SDALIGN);
+ if (*pkt == NULL)
+ return;
+
+ pkt_align(*pkt, rdlen, BRCMF_SDALIGN);
+ *rxbuf = (u8 *) ((*pkt)->data);
+ /* Read the entire frame */
+ sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC,
+ *rxbuf, rdlen, *pkt);
+ bus->f2rxdata++;
+
+ if (sdret < 0) {
+ brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n",
+ rdlen, sdret);
+ brcmu_pkt_buf_free_skb(*pkt);
+ bus->drvr->rx_errors++;
+ /* Force retry w/normal header read.
+ * Don't attempt NAK for
+ * gSPI
+ */
+ brcmf_sdbrcm_rxfail(bus, true, true);
+ *pkt = NULL;
+ }
+}
+
+/* Checks the header */
+static int
+brcmf_check_rxbuf(struct brcmf_bus *bus, struct sk_buff *pkt, u8 *rxbuf,
+ u8 rxseq, u16 nextlen, u16 *len)
+{
+ u16 check;
+ bool len_consistent; /* Result of comparing readahead len and
+ len from hw-hdr */
+
+ memcpy(bus->rxhdr, rxbuf, SDPCM_HDRLEN);
+
+ /* Extract hardware header fields */
+ *len = get_unaligned_le16(bus->rxhdr);
+ check = get_unaligned_le16(bus->rxhdr + sizeof(u16));
+
+ /* All zeros means readahead info was bad */
+ if (!(*len | check)) {
+ brcmf_dbg(INFO, "(nextlen): read zeros in HW header???\n");
+ goto fail;
+ }
+
+ /* Validate check bytes */
+ if ((u16)~(*len ^ check)) {
+ brcmf_dbg(ERROR, "(nextlen): HW hdr error: nextlen/len/check 0x%04x/0x%04x/0x%04x\n",
+ nextlen, *len, check);
+ bus->rx_badhdr++;
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ goto fail;
+ }
+
+ /* Validate frame length */
+ if (*len < SDPCM_HDRLEN) {
+ brcmf_dbg(ERROR, "(nextlen): HW hdr length invalid: %d\n",
+ *len);
+ goto fail;
+ }
+
+ /* Check for consistency with readahead info */
+ len_consistent = (nextlen != (roundup(*len, 16) >> 4));
+ if (len_consistent) {
+ /* Mismatch, force retry w/normal
+ header (may be >4K) */
+ brcmf_dbg(ERROR, "(nextlen): mismatch, nextlen %d len %d rnd %d; expected rxseq %d\n",
+ nextlen, *len, roundup(*len, 16),
+ rxseq);
+ brcmf_sdbrcm_rxfail(bus, true, true);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ brcmf_sdbrcm_pktfree2(bus, pkt);
+ return -EINVAL;
+}
+
+/* Return true if there may be more frames to read */
+static uint
+brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
+{
+ u16 len, check; /* Extracted hardware header fields */
+ u8 chan, seq, doff; /* Extracted software header fields */
+ u8 fcbits; /* Extracted fcbits from software header */
+
+ struct sk_buff *pkt; /* Packet for event or data frames */
+ u16 pad; /* Number of pad bytes to read */
+ u16 rdlen; /* Total number of bytes to read */
+ u8 rxseq; /* Next sequence number to expect */
+ uint rxleft = 0; /* Remaining number of frames allowed */
+ int sdret; /* Return code from calls */
+ u8 txmax; /* Maximum tx sequence offered */
+ u8 *rxbuf;
+ int ifidx = 0;
+ uint rxcount = 0; /* Total frames read */
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Not finished unless we encounter no more frames indication */
+ *finished = false;
+
+ for (rxseq = bus->rx_seq, rxleft = maxframes;
+ !bus->rxskip && rxleft && bus->drvr->busstate != BRCMF_BUS_DOWN;
+ rxseq++, rxleft--) {
+
+ /* Handle glomming separately */
+ if (bus->glom || bus->glomd) {
+ u8 cnt;
+ brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
+ bus->glomd, bus->glom);
+ cnt = brcmf_sdbrcm_rxglom(bus, rxseq);
+ brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
+ rxseq += cnt - 1;
+ rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
+ continue;
+ }
+
+ /* Try doing single read if we can */
+ if (bus->nextlen) {
+ u16 nextlen = bus->nextlen;
+ bus->nextlen = 0;
+
+ rdlen = len = nextlen << 4;
+ brcmf_pad(bus, &pad, &rdlen);
+
+ /*
+ * After the frame is received we have to
+ * distinguish whether it is data
+ * or non-data frame.
+ */
+ brcmf_alloc_pkt_and_read(bus, rdlen, &pkt, &rxbuf);
+ if (pkt == NULL) {
+ /* Give up on data, request rtx of events */
+ brcmf_dbg(ERROR, "(nextlen): brcmf_alloc_pkt_and_read failed: len %d rdlen %d expected rxseq %d\n",
+ len, rdlen, rxseq);
+ continue;
+ }
+
+ if (brcmf_check_rxbuf(bus, pkt, rxbuf, rxseq, nextlen,
+ &len) < 0)
+ continue;
+
+ /* Extract software header fields */
+ chan = SDPCM_PACKET_CHANNEL(
+ &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(
+ &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(
+ &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(
+ &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ bus->nextlen =
+ bus->rxhdr[SDPCM_FRAMETAG_LEN +
+ SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ brcmf_dbg(INFO, "(nextlen): got frame w/nextlen too large (%d), seq %d\n",
+ bus->nextlen, seq);
+ bus->nextlen = 0;
+ }
+
+ bus->drvr->rx_readahead_cnt++;
+
+ /* Handle Flow Control */
+ fcbits = SDPCM_FCMASK_VALUE(
+ &bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ if (bus->flowcontrol != fcbits) {
+ if (~bus->flowcontrol & fcbits)
+ bus->fc_xoff++;
+
+ if (bus->flowcontrol & ~fcbits)
+ bus->fc_xon++;
+
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+ /* Check and update sequence number */
+ if (rxseq != seq) {
+ brcmf_dbg(INFO, "(nextlen): rx_seq %d, expected %d\n",
+ seq, rxseq);
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((u8) (txmax - bus->tx_seq) > 0x40) {
+ brcmf_dbg(ERROR, "got unlikely tx max %d with tx_seq %d\n",
+ txmax, bus->tx_seq);
+ txmax = bus->tx_seq + 2;
+ }
+ bus->tx_max = txmax;
+
+#ifdef BCMDBG
+ if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
+ printk(KERN_DEBUG "Rx Data:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ rxbuf, len);
+ } else if (BRCMF_HDRS_ON()) {
+ printk(KERN_DEBUG "RxHdr:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ bus->rxhdr, SDPCM_HDRLEN);
+ }
+#endif
+
+ if (chan == SDPCM_CONTROL_CHANNEL) {
+ brcmf_dbg(ERROR, "(nextlen): readahead on control packet %d?\n",
+ seq);
+ /* Force retry w/normal header read */
+ bus->nextlen = 0;
+ brcmf_sdbrcm_rxfail(bus, false, true);
+ brcmf_sdbrcm_pktfree2(bus, pkt);
+ continue;
+ }
+
+ /* Validate data offset */
+ if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+ brcmf_dbg(ERROR, "(nextlen): bad data offset %d: HW len %d min %d\n",
+ doff, len, SDPCM_HDRLEN);
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ brcmf_sdbrcm_pktfree2(bus, pkt);
+ continue;
+ }
+
+ /* All done with this one -- now deliver the packet */
+ goto deliver;
+ }
+
+ /* Read frame header (hardware and software) */
+ sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, bus->rxhdr,
+ BRCMF_FIRSTREAD, NULL);
+ bus->f2rxhdrs++;
+
+ if (sdret < 0) {
+ brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n", sdret);
+ bus->rx_hdrfail++;
+ brcmf_sdbrcm_rxfail(bus, true, true);
+ continue;
+ }
+#ifdef BCMDBG
+ if (BRCMF_BYTES_ON() || BRCMF_HDRS_ON()) {
+ printk(KERN_DEBUG "RxHdr:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ bus->rxhdr, SDPCM_HDRLEN);
+ }
+#endif
+
+ /* Extract hardware header fields */
+ len = get_unaligned_le16(bus->rxhdr);
+ check = get_unaligned_le16(bus->rxhdr + sizeof(u16));
+
+ /* All zeros means no more frames */
+ if (!(len | check)) {
+ *finished = true;
+ break;
+ }
+
+ /* Validate check bytes */
+ if ((u16) ~(len ^ check)) {
+ brcmf_dbg(ERROR, "HW hdr err: len/check 0x%04x/0x%04x\n",
+ len, check);
+ bus->rx_badhdr++;
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ continue;
+ }
+
+ /* Validate frame length */
+ if (len < SDPCM_HDRLEN) {
+ brcmf_dbg(ERROR, "HW hdr length invalid: %d\n", len);
+ continue;
+ }
+
+ /* Extract software header fields */
+ chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+ txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ /* Validate data offset */
+ if ((doff < SDPCM_HDRLEN) || (doff > len)) {
+ brcmf_dbg(ERROR, "Bad data offset %d: HW len %d, min %d seq %d\n",
+ doff, len, SDPCM_HDRLEN, seq);
+ bus->rx_badhdr++;
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ continue;
+ }
+
+ /* Save the readahead length if there is one */
+ bus->nextlen =
+ bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
+ brcmf_dbg(INFO, "(nextlen): got frame w/nextlen too large (%d), seq %d\n",
+ bus->nextlen, seq);
+ bus->nextlen = 0;
+ }
+
+ /* Handle Flow Control */
+ fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
+
+ if (bus->flowcontrol != fcbits) {
+ if (~bus->flowcontrol & fcbits)
+ bus->fc_xoff++;
+
+ if (bus->flowcontrol & ~fcbits)
+ bus->fc_xon++;
+
+ bus->fc_rcvd++;
+ bus->flowcontrol = fcbits;
+ }
+
+ /* Check and update sequence number */
+ if (rxseq != seq) {
+ brcmf_dbg(INFO, "rx_seq %d, expected %d\n", seq, rxseq);
+ bus->rx_badseq++;
+ rxseq = seq;
+ }
+
+ /* Check window for sanity */
+ if ((u8) (txmax - bus->tx_seq) > 0x40) {
+ brcmf_dbg(ERROR, "unlikely tx max %d with tx_seq %d\n",
+ txmax, bus->tx_seq);
+ txmax = bus->tx_seq + 2;
+ }
+ bus->tx_max = txmax;
+
+ /* Call a separate function for control frames */
+ if (chan == SDPCM_CONTROL_CHANNEL) {
+ brcmf_sdbrcm_read_control(bus, bus->rxhdr, len, doff);
+ continue;
+ }
+
+ /* precondition: chan is either SDPCM_DATA_CHANNEL,
+ SDPCM_EVENT_CHANNEL, SDPCM_TEST_CHANNEL or
+ SDPCM_GLOM_CHANNEL */
+
+ /* Length to read */
+ rdlen = (len > BRCMF_FIRSTREAD) ? (len - BRCMF_FIRSTREAD) : 0;
+
+ /* May pad read to blocksize for efficiency */
+ if (bus->roundup && bus->blocksize &&
+ (rdlen > bus->blocksize)) {
+ pad = bus->blocksize - (rdlen % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
+ ((rdlen + pad + BRCMF_FIRSTREAD) < MAX_RX_DATASZ))
+ rdlen += pad;
+ } else if (rdlen % BRCMF_SDALIGN) {
+ rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
+ }
+
+ /* Satisfy length-alignment requirements */
+ if (rdlen & (ALIGNMENT - 1))
+ rdlen = roundup(rdlen, ALIGNMENT);
+
+ if ((rdlen + BRCMF_FIRSTREAD) > MAX_RX_DATASZ) {
+ /* Too long -- skip this frame */
+ brcmf_dbg(ERROR, "too long: len %d rdlen %d\n",
+ len, rdlen);
+ bus->drvr->rx_errors++;
+ bus->rx_toolong++;
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ continue;
+ }
+
+ pkt = brcmu_pkt_buf_get_skb(rdlen +
+ BRCMF_FIRSTREAD + BRCMF_SDALIGN);
+ if (!pkt) {
+ /* Give up on data, request rtx of events */
+ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: rdlen %d chan %d\n",
+ rdlen, chan);
+ bus->drvr->rx_dropped++;
+ brcmf_sdbrcm_rxfail(bus, false, RETRYCHAN(chan));
+ continue;
+ }
+
+ /* Leave room for what we already read, and align remainder */
+ skb_pull(pkt, BRCMF_FIRSTREAD);
+ pkt_align(pkt, rdlen, BRCMF_SDALIGN);
+
+ /* Read the remaining frame data */
+ sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, ((u8 *) (pkt->data)),
+ rdlen, pkt);
+ bus->f2rxdata++;
+
+ if (sdret < 0) {
+ brcmf_dbg(ERROR, "read %d %s bytes failed: %d\n", rdlen,
+ ((chan == SDPCM_EVENT_CHANNEL) ? "event"
+ : ((chan == SDPCM_DATA_CHANNEL) ? "data"
+ : "test")), sdret);
+ brcmu_pkt_buf_free_skb(pkt);
+ bus->drvr->rx_errors++;
+ brcmf_sdbrcm_rxfail(bus, true, RETRYCHAN(chan));
+ continue;
+ }
+
+ /* Copy the already-read portion */
+ skb_push(pkt, BRCMF_FIRSTREAD);
+ memcpy(pkt->data, bus->rxhdr, BRCMF_FIRSTREAD);
+
+#ifdef BCMDBG
+ if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
+ printk(KERN_DEBUG "Rx Data:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ pkt->data, len);
+ }
+#endif
+
+deliver:
+ /* Save superframe descriptor and allocate packet frame */
+ if (chan == SDPCM_GLOM_CHANNEL) {
+ if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
+ brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
+ len);
+#ifdef BCMDBG
+ if (BRCMF_GLOM_ON()) {
+ printk(KERN_DEBUG "Glom Data:\n");
+ print_hex_dump_bytes("",
+ DUMP_PREFIX_OFFSET,
+ pkt->data, len);
+ }
+#endif
+ __skb_trim(pkt, len);
+ skb_pull(pkt, SDPCM_HDRLEN);
+ bus->glomd = pkt;
+ } else {
+ brcmf_dbg(ERROR, "%s: glom superframe w/o "
+ "descriptor!\n", __func__);
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ }
+ continue;
+ }
+
+ /* Fill in packet len and prio, deliver upward */
+ __skb_trim(pkt, len);
+ skb_pull(pkt, doff);
+
+ if (pkt->len == 0) {
+ brcmu_pkt_buf_free_skb(pkt);
+ continue;
+ } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx, pkt) != 0) {
+ brcmf_dbg(ERROR, "rx protocol error\n");
+ brcmu_pkt_buf_free_skb(pkt);
+ bus->drvr->rx_errors++;
+ continue;
+ }
+
+ /* Unlock during rx call */
+ up(&bus->sdsem);
+ brcmf_rx_frame(bus->drvr, ifidx, pkt, 1);
+ down(&bus->sdsem);
+ }
+ rxcount = maxframes - rxleft;
+#ifdef BCMDBG
+ /* Message if we hit the limit */
+ if (!rxleft)
+ brcmf_dbg(DATA, "hit rx limit of %d frames\n",
+ maxframes);
+ else
+#endif /* BCMDBG */
+ brcmf_dbg(DATA, "processed %d frames\n", rxcount);
+ /* Back off rxseq if awaiting rtx, update rx_seq */
+ if (bus->rxskip)
+ rxseq--;
+ bus->rx_seq = rxseq;
+
+ return rxcount;
+}
+
+static int
+brcmf_sdbrcm_send_buf(struct brcmf_bus *bus, u32 addr, uint fn, uint flags,
+ u8 *buf, uint nbytes, struct sk_buff *pkt)
+{
+ return brcmf_sdcard_send_buf
+ (bus->sdiodev, addr, fn, flags, buf, nbytes, pkt);
+}
+
+static void
+brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar)
+{
+ up(&bus->sdsem);
+ wait_event_interruptible_timeout(bus->ctrl_wait,
+ (*lockvar == false), HZ * 2);
+ down(&bus->sdsem);
+ return;
+}
+
+static void
+brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus)
+{
+ if (waitqueue_active(&bus->ctrl_wait))
+ wake_up_interruptible(&bus->ctrl_wait);
+ return;
+}
+
+/* Writes a HW/SW header into the packet and sends it. */
+/* Assumes: (a) header space already there, (b) caller holds lock */
+static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
+ uint chan, bool free_pkt)
+{
+ int ret;
+ u8 *frame;
+ u16 len, pad = 0;
+ u32 swheader;
+ struct sk_buff *new;
+ int i;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ frame = (u8 *) (pkt->data);
+
+ /* Add alignment padding, allocate new packet if needed */
+ pad = ((unsigned long)frame % BRCMF_SDALIGN);
+ if (pad) {
+ if (skb_headroom(pkt) < pad) {
+ brcmf_dbg(INFO, "insufficient headroom %d for %d pad\n",
+ skb_headroom(pkt), pad);
+ bus->drvr->tx_realloc++;
+ new = brcmu_pkt_buf_get_skb(pkt->len + BRCMF_SDALIGN);
+ if (!new) {
+ brcmf_dbg(ERROR, "couldn't allocate new %d-byte packet\n",
+ pkt->len + BRCMF_SDALIGN);
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ pkt_align(new, pkt->len, BRCMF_SDALIGN);
+ memcpy(new->data, pkt->data, pkt->len);
+ if (free_pkt)
+ brcmu_pkt_buf_free_skb(pkt);
+ /* free the pkt if canned one is not used */
+ free_pkt = true;
+ pkt = new;
+ frame = (u8 *) (pkt->data);
+ /* precondition: (frame % BRCMF_SDALIGN) == 0) */
+ pad = 0;
+ } else {
+ skb_push(pkt, pad);
+ frame = (u8 *) (pkt->data);
+ /* precondition: pad + SDPCM_HDRLEN <= pkt->len */
+ memset(frame, 0, pad + SDPCM_HDRLEN);
+ }
+ }
+ /* precondition: pad < BRCMF_SDALIGN */
+
+ /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+ len = (u16) (pkt->len);
+ *(__le16 *) frame = cpu_to_le16(len);
+ *(((__le16 *) frame) + 1) = cpu_to_le16(~len);
+
+ /* Software tag: channel, sequence number, data offset */
+ swheader =
+ ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq |
+ (((pad +
+ SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+
+ put_unaligned_le32(swheader, frame + SDPCM_FRAMETAG_LEN);
+ put_unaligned_le32(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+
+#ifdef BCMDBG
+ tx_packets[pkt->priority]++;
+ if (BRCMF_BYTES_ON() &&
+ (((BRCMF_CTL_ON() && (chan == SDPCM_CONTROL_CHANNEL)) ||
+ (BRCMF_DATA_ON() && (chan != SDPCM_CONTROL_CHANNEL))))) {
+ printk(KERN_DEBUG "Tx Frame:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, frame, len);
+ } else if (BRCMF_HDRS_ON()) {
+ printk(KERN_DEBUG "TxHdr:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ frame, min_t(u16, len, 16));
+ }
+#endif
+
+ /* Raise len to next SDIO block to eliminate tail command */
+ if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+ u16 pad = bus->blocksize - (len % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize))
+ len += pad;
+ } else if (len % BRCMF_SDALIGN) {
+ len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
+ }
+
+ /* Some controllers have trouble with odd bytes -- round to even */
+ if (len & (ALIGNMENT - 1))
+ len = roundup(len, ALIGNMENT);
+
+ ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, frame,
+ len, pkt);
+ bus->f2txdata++;
+
+ if (ret < 0) {
+ /* On failure, abort the command and terminate the frame */
+ brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
+ ret);
+ bus->tx_sderrs++;
+
+ brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM,
+ NULL);
+ bus->f1regdata++;
+
+ for (i = 0; i < 3; i++) {
+ u8 hi, lo;
+ hi = brcmf_sdcard_cfg_read(bus->sdiodev,
+ SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCHI,
+ NULL);
+ lo = brcmf_sdcard_cfg_read(bus->sdiodev,
+ SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCLO,
+ NULL);
+ bus->f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+
+ }
+ if (ret == 0)
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+
+done:
+ /* restore pkt buffer pointer before calling tx complete routine */
+ skb_pull(pkt, SDPCM_HDRLEN + pad);
+ up(&bus->sdsem);
+ brcmf_txcomplete(bus->drvr, pkt, ret != 0);
+ down(&bus->sdsem);
+
+ if (free_pkt)
+ brcmu_pkt_buf_free_skb(pkt);
+
+ return ret;
+}
+
+static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
+{
+ struct sk_buff *pkt;
+ u32 intstatus = 0;
+ uint retries = 0;
+ int ret = 0, prec_out;
+ uint cnt = 0;
+ uint datalen;
+ u8 tx_prec_map;
+
+ struct brcmf_pub *drvr = bus->drvr;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ tx_prec_map = ~bus->flowcontrol;
+
+ /* Send frames until the limit or some other event */
+ for (cnt = 0; (cnt < maxframes) && data_ok(bus); cnt++) {
+ spin_lock_bh(&bus->txqlock);
+ pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map, &prec_out);
+ if (pkt == NULL) {
+ spin_unlock_bh(&bus->txqlock);
+ break;
+ }
+ spin_unlock_bh(&bus->txqlock);
+ datalen = pkt->len - SDPCM_HDRLEN;
+
+ ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true);
+ if (ret)
+ bus->drvr->tx_errors++;
+ else
+ bus->drvr->dstats.tx_bytes += datalen;
+
+ /* In poll mode, need to check for other events */
+ if (!bus->intr && cnt) {
+ /* Check device status, signal pending interrupt */
+ r_sdreg32(bus, &intstatus,
+ offsetof(struct sdpcmd_regs, intstatus),
+ &retries);
+ bus->f2txdata++;
+ if (brcmf_sdcard_regfail(bus->sdiodev))
+ break;
+ if (intstatus & bus->hostintmask)
+ bus->ipend = true;
+ }
+ }
+
+ /* Deflow-control stack if needed */
+ if (drvr->up && (drvr->busstate == BRCMF_BUS_DATA) &&
+ drvr->txoff && (pktq_len(&bus->txq) < TXLOW))
+ brcmf_txflowcontrol(drvr, 0, OFF);
+
+ return cnt;
+}
+
+static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
+{
+ u32 intstatus, newstatus = 0;
+ uint retries = 0;
+ uint rxlimit = bus->rxbound; /* Rx frames to read before resched */
+ uint txlimit = bus->txbound; /* Tx frames to send before resched */
+ uint framecnt = 0; /* Temporary counter of tx/rx frames */
+ bool rxdone = true; /* Flag for no more read data */
+ bool resched = false; /* Flag indicating resched wanted */
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Start with leftover status bits */
+ intstatus = bus->intstatus;
+
+ down(&bus->sdsem);
+
+ /* If waiting for HTAVAIL, check status */
+ if (bus->clkstate == CLK_PENDING) {
+ int err;
+ u8 clkctl, devctl = 0;
+
+#ifdef BCMDBG
+ /* Check for inconsistent device control */
+ devctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err);
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
+ }
+#endif /* BCMDBG */
+
+ /* Read CSR, if clock on switch to AVAIL, else ignore */
+ clkctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (err) {
+ brcmf_dbg(ERROR, "error reading CSR: %d\n",
+ err);
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
+ }
+
+ brcmf_dbg(INFO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
+ devctl, clkctl);
+
+ if (SBSDIO_HTAV(clkctl)) {
+ devctl = brcmf_sdcard_cfg_read(bus->sdiodev,
+ SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, &err);
+ if (err) {
+ brcmf_dbg(ERROR, "error reading DEVCTL: %d\n",
+ err);
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
+ }
+ devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, devctl, &err);
+ if (err) {
+ brcmf_dbg(ERROR, "error writing DEVCTL: %d\n",
+ err);
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
+ }
+ bus->clkstate = CLK_AVAIL;
+ } else {
+ goto clkwait;
+ }
+ }
+
+ bus_wake(bus);
+
+ /* Make sure backplane clock is on */
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, true);
+ if (bus->clkstate == CLK_PENDING)
+ goto clkwait;
+
+ /* Pending interrupt indicates new device status */
+ if (bus->ipend) {
+ bus->ipend = false;
+ r_sdreg32(bus, &newstatus,
+ offsetof(struct sdpcmd_regs, intstatus), &retries);
+ bus->f1regdata++;
+ if (brcmf_sdcard_regfail(bus->sdiodev))
+ newstatus = 0;
+ newstatus &= bus->hostintmask;
+ bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
+ if (newstatus) {
+ w_sdreg32(bus, newstatus,
+ offsetof(struct sdpcmd_regs, intstatus),
+ &retries);
+ bus->f1regdata++;
+ }
+ }
+
+ /* Merge new bits with previous */
+ intstatus |= newstatus;
+ bus->intstatus = 0;
+
+ /* Handle flow-control change: read new state in case our ack
+ * crossed another change interrupt. If change still set, assume
+ * FC ON for safety, let next loop through do the debounce.
+ */
+ if (intstatus & I_HMB_FC_CHANGE) {
+ intstatus &= ~I_HMB_FC_CHANGE;
+ w_sdreg32(bus, I_HMB_FC_CHANGE,
+ offsetof(struct sdpcmd_regs, intstatus), &retries);
+
+ r_sdreg32(bus, &newstatus,
+ offsetof(struct sdpcmd_regs, intstatus), &retries);
+ bus->f1regdata += 2;
+ bus->fcstate =
+ !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
+ intstatus |= (newstatus & bus->hostintmask);
+ }
+
+ /* Handle host mailbox indication */
+ if (intstatus & I_HMB_HOST_INT) {
+ intstatus &= ~I_HMB_HOST_INT;
+ intstatus |= brcmf_sdbrcm_hostmail(bus);
+ }
+
+ /* Generally don't ask for these, can get CRC errors... */
+ if (intstatus & I_WR_OOSYNC) {
+ brcmf_dbg(ERROR, "Dongle reports WR_OOSYNC\n");
+ intstatus &= ~I_WR_OOSYNC;
+ }
+
+ if (intstatus & I_RD_OOSYNC) {
+ brcmf_dbg(ERROR, "Dongle reports RD_OOSYNC\n");
+ intstatus &= ~I_RD_OOSYNC;
+ }
+
+ if (intstatus & I_SBINT) {
+ brcmf_dbg(ERROR, "Dongle reports SBINT\n");
+ intstatus &= ~I_SBINT;
+ }
+
+ /* Would be active due to wake-wlan in gSPI */
+ if (intstatus & I_CHIPACTIVE) {
+ brcmf_dbg(INFO, "Dongle reports CHIPACTIVE\n");
+ intstatus &= ~I_CHIPACTIVE;
+ }
+
+ /* Ignore frame indications if rxskip is set */
+ if (bus->rxskip)
+ intstatus &= ~I_HMB_FRAME_IND;
+
+ /* On frame indication, read available frames */
+ if (PKT_AVAILABLE()) {
+ framecnt = brcmf_sdbrcm_readframes(bus, rxlimit, &rxdone);
+ if (rxdone || bus->rxskip)
+ intstatus &= ~I_HMB_FRAME_IND;
+ rxlimit -= min(framecnt, rxlimit);
+ }
+
+ /* Keep still-pending events for next scheduling */
+ bus->intstatus = intstatus;
+
+clkwait:
+ if (data_ok(bus) && bus->ctrl_frame_stat &&
+ (bus->clkstate == CLK_AVAIL)) {
+ int ret, i;
+
+ ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf,
+ (u32) bus->ctrl_frame_len, NULL);
+
+ if (ret < 0) {
+ /* On failure, abort the command and
+ terminate the frame */
+ brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
+ ret);
+ bus->tx_sderrs++;
+
+ brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
+
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM,
+ NULL);
+ bus->f1regdata++;
+
+ for (i = 0; i < 3; i++) {
+ u8 hi, lo;
+ hi = brcmf_sdcard_cfg_read(bus->sdiodev,
+ SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCHI,
+ NULL);
+ lo = brcmf_sdcard_cfg_read(bus->sdiodev,
+ SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCLO,
+ NULL);
+ bus->f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+
+ }
+ if (ret == 0)
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+
+ brcmf_dbg(INFO, "Return_dpc value is : %d\n", ret);
+ bus->ctrl_frame_stat = false;
+ brcmf_sdbrcm_wait_event_wakeup(bus);
+ }
+ /* Send queued frames (limit 1 if rx may still be pending) */
+ else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
+ brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
+ && data_ok(bus)) {
+ framecnt = rxdone ? txlimit : min(txlimit, bus->txminmax);
+ framecnt = brcmf_sdbrcm_sendfromq(bus, framecnt);
+ txlimit -= framecnt;
+ }
+
+ /* Resched if events or tx frames are pending,
+ else await next interrupt */
+ /* On failed register access, all bets are off:
+ no resched or interrupts */
+ if ((bus->drvr->busstate == BRCMF_BUS_DOWN) ||
+ brcmf_sdcard_regfail(bus->sdiodev)) {
+ brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation %d\n",
+ brcmf_sdcard_regfail(bus->sdiodev));
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->intstatus = 0;
+ } else if (bus->clkstate == CLK_PENDING) {
+ brcmf_dbg(INFO, "rescheduled due to CLK_PENDING awaiting I_CHIPACTIVE interrupt\n");
+ resched = true;
+ } else if (bus->intstatus || bus->ipend ||
+ (!bus->fcstate && brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol)
+ && data_ok(bus)) || PKT_AVAILABLE()) {
+ resched = true;
+ }
+
+ bus->dpc_sched = resched;
+
+ /* If we're done for now, turn off clock request. */
+ if ((bus->clkstate != CLK_PENDING)
+ && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
+ bus->activity = false;
+ brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
+ }
+
+ up(&bus->sdsem);
+
+ return resched;
+}
+
+static int brcmf_sdbrcm_dpc_thread(void *data)
+{
+ struct brcmf_bus *bus = (struct brcmf_bus *) data;
+
+ allow_signal(SIGTERM);
+ /* Run until signal received */
+ while (1) {
+ if (kthread_should_stop())
+ break;
+ if (!wait_for_completion_interruptible(&bus->dpc_wait)) {
+ /* Call bus dpc unless it indicated down
+ (then clean stop) */
+ if (bus->drvr->busstate != BRCMF_BUS_DOWN) {
+ if (brcmf_sdbrcm_dpc(bus))
+ complete(&bus->dpc_wait);
+ } else {
+ /* after stopping the bus, exit thread */
+ brcmf_sdbrcm_bus_stop(bus);
+ bus->dpc_tsk = NULL;
+ break;
+ }
+ } else
+ break;
+ }
+ return 0;
+}
+
+int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
+{
+ int ret = -EBADE;
+ uint datalen, prec;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ datalen = pkt->len;
+
+ /* Add space for the header */
+ skb_push(pkt, SDPCM_HDRLEN);
+ /* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
+
+ prec = prio2prec((pkt->priority & PRIOMASK));
+
+ /* Check for existing queue, current flow-control,
+ pending event, or pending clock */
+ brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq));
+ bus->fcqueued++;
+
+ /* Priority based enq */
+ spin_lock_bh(&bus->txqlock);
+ if (brcmf_c_prec_enq(bus->drvr, &bus->txq, pkt, prec) == false) {
+ skb_pull(pkt, SDPCM_HDRLEN);
+ brcmf_txcomplete(bus->drvr, pkt, false);
+ brcmu_pkt_buf_free_skb(pkt);
+ brcmf_dbg(ERROR, "out of bus->txq !!!\n");
+ ret = -ENOSR;
+ } else {
+ ret = 0;
+ }
+ spin_unlock_bh(&bus->txqlock);
+
+ if (pktq_len(&bus->txq) >= TXHI)
+ brcmf_txflowcontrol(bus->drvr, 0, ON);
+
+#ifdef BCMDBG
+ if (pktq_plen(&bus->txq, prec) > qcount[prec])
+ qcount[prec] = pktq_plen(&bus->txq, prec);
+#endif
+ /* Schedule DPC if needed to send queued packet(s) */
+ if (!bus->dpc_sched) {
+ bus->dpc_sched = true;
+ if (bus->dpc_tsk)
+ complete(&bus->dpc_wait);
+ }
+
+ return ret;
+}
+
+static int
+brcmf_sdbrcm_membytes(struct brcmf_bus *bus, bool write, u32 address, u8 *data,
+ uint size)
+{
+ int bcmerror = 0;
+ u32 sdaddr;
+ uint dsize;
+
+ /* Determine initial transfer parameters */
+ sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
+ if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
+ dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
+ else
+ dsize = size;
+
+ /* Set the backplane window to include the start address */
+ bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev, address);
+ if (bcmerror) {
+ brcmf_dbg(ERROR, "window change failed\n");
+ goto xfer_done;
+ }
+
+ /* Do the transfer(s) */
+ while (size) {
+ brcmf_dbg(INFO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
+ write ? "write" : "read", dsize,
+ sdaddr, address & SBSDIO_SBWINDOW_MASK);
+ bcmerror = brcmf_sdcard_rwdata(bus->sdiodev, write,
+ sdaddr, data, dsize);
+ if (bcmerror) {
+ brcmf_dbg(ERROR, "membytes transfer failed\n");
+ break;
+ }
+
+ /* Adjust for next transfer (if any) */
+ size -= dsize;
+ if (size) {
+ data += dsize;
+ address += dsize;
+ bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev,
+ address);
+ if (bcmerror) {
+ brcmf_dbg(ERROR, "window change failed\n");
+ break;
+ }
+ sdaddr = 0;
+ dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
+ }
+ }
+
+xfer_done:
+ /* Return the window to backplane enumeration space for core access */
+ if (brcmf_sdcard_set_sbaddr_window(bus->sdiodev, bus->sdiodev->sbwad))
+ brcmf_dbg(ERROR, "FAILED to set window back to 0x%x\n",
+ bus->sdiodev->sbwad);
+
+ return bcmerror;
+}
+
+#ifdef BCMDBG
+#define CONSOLE_LINE_MAX 192
+
+static int brcmf_sdbrcm_readconsole(struct brcmf_bus *bus)
+{
+ struct brcmf_console *c = &bus->console;
+ u8 line[CONSOLE_LINE_MAX], ch;
+ u32 n, idx, addr;
+ int rv;
+
+ /* Don't do anything until FWREADY updates console address */
+ if (bus->console_addr == 0)
+ return 0;
+
+ /* Read console log struct */
+ addr = bus->console_addr + offsetof(struct rte_console, log_le);
+ rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&c->log_le,
+ sizeof(c->log_le));
+ if (rv < 0)
+ return rv;
+
+ /* Allocate console buffer (one time only) */
+ if (c->buf == NULL) {
+ c->bufsize = le32_to_cpu(c->log_le.buf_size);
+ c->buf = kmalloc(c->bufsize, GFP_ATOMIC);
+ if (c->buf == NULL)
+ return -ENOMEM;
+ }
+
+ idx = le32_to_cpu(c->log_le.idx);
+
+ /* Protect against corrupt value */
+ if (idx > c->bufsize)
+ return -EBADE;
+
+ /* Skip reading the console buffer if the index pointer
+ has not moved */
+ if (idx == c->last)
+ return 0;
+
+ /* Read the console buffer */
+ addr = le32_to_cpu(c->log_le.buf);
+ rv = brcmf_sdbrcm_membytes(bus, false, addr, c->buf, c->bufsize);
+ if (rv < 0)
+ return rv;
+
+ while (c->last != idx) {
+ for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+ if (c->last == idx) {
+ /* This would output a partial line.
+ * Instead, back up
+ * the buffer pointer and output this
+ * line next time around.
+ */
+ if (c->last >= n)
+ c->last -= n;
+ else
+ c->last = c->bufsize - n;
+ goto break2;
+ }
+ ch = c->buf[c->last];
+ c->last = (c->last + 1) % c->bufsize;
+ if (ch == '\n')
+ break;
+ line[n] = ch;
+ }
+
+ if (n > 0) {
+ if (line[n - 1] == '\r')
+ n--;
+ line[n] = 0;
+ printk(KERN_DEBUG "CONSOLE: %s\n", line);
+ }
+ }
+break2:
+
+ return 0;
+}
+#endif /* BCMDBG */
+
+static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len)
+{
+ int i;
+ int ret;
+
+ bus->ctrl_frame_stat = false;
+ ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, frame, len, NULL);
+
+ if (ret < 0) {
+ /* On failure, abort the command and terminate the frame */
+ brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
+ ret);
+ bus->tx_sderrs++;
+
+ brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
+
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
+ bus->f1regdata++;
+
+ for (i = 0; i < 3; i++) {
+ u8 hi, lo;
+ hi = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCHI,
+ NULL);
+ lo = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_WFRAMEBCLO,
+ NULL);
+ bus->f1regdata += 2;
+ if (hi == 0 && lo == 0)
+ break;
+ }
+ return ret;
+ }
+
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+
+ return ret;
+}
+
+int
+brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
+{
+ u8 *frame;
+ u16 len;
+ u32 swheader;
+ uint retries = 0;
+ u8 doff = 0;
+ int ret = -1;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Back the pointer to make a room for bus header */
+ frame = msg - SDPCM_HDRLEN;
+ len = (msglen += SDPCM_HDRLEN);
+
+ /* Add alignment padding (optional for ctl frames) */
+ doff = ((unsigned long)frame % BRCMF_SDALIGN);
+ if (doff) {
+ frame -= doff;
+ len += doff;
+ msglen += doff;
+ memset(frame, 0, doff + SDPCM_HDRLEN);
+ }
+ /* precondition: doff < BRCMF_SDALIGN */
+ doff += SDPCM_HDRLEN;
+
+ /* Round send length to next SDIO block */
+ if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+ u16 pad = bus->blocksize - (len % bus->blocksize);
+ if ((pad <= bus->roundup) && (pad < bus->blocksize))
+ len += pad;
+ } else if (len % BRCMF_SDALIGN) {
+ len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
+ }
+
+ /* Satisfy length-alignment requirements */
+ if (len & (ALIGNMENT - 1))
+ len = roundup(len, ALIGNMENT);
+
+ /* precondition: IS_ALIGNED((unsigned long)frame, 2) */
+
+ /* Need to lock here to protect txseq and SDIO tx calls */
+ down(&bus->sdsem);
+
+ bus_wake(bus);
+
+ /* Make sure backplane clock is on */
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+
+ /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
+ *(__le16 *) frame = cpu_to_le16((u16) msglen);
+ *(((__le16 *) frame) + 1) = cpu_to_le16(~msglen);
+
+ /* Software tag: channel, sequence number, data offset */
+ swheader =
+ ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) &
+ SDPCM_CHANNEL_MASK)
+ | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) &
+ SDPCM_DOFFSET_MASK);
+ put_unaligned_le32(swheader, frame + SDPCM_FRAMETAG_LEN);
+ put_unaligned_le32(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+
+ if (!data_ok(bus)) {
+ brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
+ bus->tx_max, bus->tx_seq);
+ bus->ctrl_frame_stat = true;
+ /* Send from dpc */
+ bus->ctrl_frame_buf = frame;
+ bus->ctrl_frame_len = len;
+
+ brcmf_sdbrcm_wait_for_event(bus, &bus->ctrl_frame_stat);
+
+ if (bus->ctrl_frame_stat == false) {
+ brcmf_dbg(INFO, "ctrl_frame_stat == false\n");
+ ret = 0;
+ } else {
+ brcmf_dbg(INFO, "ctrl_frame_stat == true\n");
+ ret = -1;
+ }
+ }
+
+ if (ret == -1) {
+#ifdef BCMDBG
+ if (BRCMF_BYTES_ON() && BRCMF_CTL_ON()) {
+ printk(KERN_DEBUG "Tx Frame:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ frame, len);
+ } else if (BRCMF_HDRS_ON()) {
+ printk(KERN_DEBUG "TxHdr:\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
+ frame, min_t(u16, len, 16));
+ }
+#endif
+
+ do {
+ ret = brcmf_tx_frame(bus, frame, len);
+ } while (ret < 0 && retries++ < TXRETRIES);
+ }
+
+ if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = false;
+ brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
+ }
+
+ up(&bus->sdsem);
+
+ if (ret)
+ bus->drvr->tx_ctlerrs++;
+ else
+ bus->drvr->tx_ctlpkts++;
+
+ return ret ? -EIO : 0;
+}
+
+int
+brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
+{
+ int timeleft;
+ uint rxlen = 0;
+ bool pending;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Wait until control frame is available */
+ timeleft = brcmf_sdbrcm_dcmd_resp_wait(bus, &bus->rxlen, &pending);
+
+ down(&bus->sdsem);
+ rxlen = bus->rxlen;
+ memcpy(msg, bus->rxctl, min(msglen, rxlen));
+ bus->rxlen = 0;
+ up(&bus->sdsem);
+
+ if (rxlen) {
+ brcmf_dbg(CTL, "resumed on rxctl frame, got %d expected %d\n",
+ rxlen, msglen);
+ } else if (timeleft == 0) {
+ brcmf_dbg(ERROR, "resumed on timeout\n");
+ } else if (pending == true) {
+ brcmf_dbg(CTL, "cancelled\n");
+ return -ERESTARTSYS;
+ } else {
+ brcmf_dbg(CTL, "resumed for unknown reason?\n");
+ }
+
+ if (rxlen)
+ bus->drvr->rx_ctlpkts++;
+ else
+ bus->drvr->rx_ctlerrs++;
+
+ return rxlen ? (int)rxlen : -ETIMEDOUT;
+}
+
+static int brcmf_sdbrcm_downloadvars(struct brcmf_bus *bus, void *arg, int len)
+{
+ int bcmerror = 0;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Basic sanity checks */
+ if (bus->drvr->up) {
+ bcmerror = -EISCONN;
+ goto err;
+ }
+ if (!len) {
+ bcmerror = -EOVERFLOW;
+ goto err;
+ }
+
+ /* Free the old ones and replace with passed variables */
+ kfree(bus->vars);
+
+ bus->vars = kmalloc(len, GFP_ATOMIC);
+ bus->varsz = bus->vars ? len : 0;
+ if (bus->vars == NULL) {
+ bcmerror = -ENOMEM;
+ goto err;
+ }
+
+ /* Copy the passed variables, which should include the
+ terminating double-null */
+ memcpy(bus->vars, arg, bus->varsz);
+err:
+ return bcmerror;
+}
+
+static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
+{
+ int bcmerror = 0;
+ u32 varsize;
+ u32 varaddr;
+ u8 *vbuffer;
+ u32 varsizew;
+ __le32 varsizew_le;
+#ifdef BCMDBG
+ char *nvram_ularray;
+#endif /* BCMDBG */
+
+ /* Even if there are no vars are to be written, we still
+ need to set the ramsize. */
+ varsize = bus->varsz ? roundup(bus->varsz, 4) : 0;
+ varaddr = (bus->ramsize - 4) - varsize;
+
+ if (bus->vars) {
+ vbuffer = kzalloc(varsize, GFP_ATOMIC);
+ if (!vbuffer)
+ return -ENOMEM;
+
+ memcpy(vbuffer, bus->vars, bus->varsz);
+
+ /* Write the vars list */
+ bcmerror =
+ brcmf_sdbrcm_membytes(bus, true, varaddr, vbuffer, varsize);
+#ifdef BCMDBG
+ /* Verify NVRAM bytes */
+ brcmf_dbg(INFO, "Compare NVRAM dl & ul; varsize=%d\n", varsize);
+ nvram_ularray = kmalloc(varsize, GFP_ATOMIC);
+ if (!nvram_ularray)
+ return -ENOMEM;
+
+ /* Upload image to verify downloaded contents. */
+ memset(nvram_ularray, 0xaa, varsize);
+
+ /* Read the vars list to temp buffer for comparison */
+ bcmerror =
+ brcmf_sdbrcm_membytes(bus, false, varaddr, nvram_ularray,
+ varsize);
+ if (bcmerror) {
+ brcmf_dbg(ERROR, "error %d on reading %d nvram bytes at 0x%08x\n",
+ bcmerror, varsize, varaddr);
+ }
+ /* Compare the org NVRAM with the one read from RAM */
+ if (memcmp(vbuffer, nvram_ularray, varsize))
+ brcmf_dbg(ERROR, "Downloaded NVRAM image is corrupted\n");
+ else
+ brcmf_dbg(ERROR, "Download/Upload/Compare of NVRAM ok\n");
+
+ kfree(nvram_ularray);
+#endif /* BCMDBG */
+
+ kfree(vbuffer);
+ }
+
+ /* adjust to the user specified RAM */
+ brcmf_dbg(INFO, "Physical memory size: %d\n", bus->ramsize);
+ brcmf_dbg(INFO, "Vars are at %d, orig varsize is %d\n",
+ varaddr, varsize);
+ varsize = ((bus->ramsize - 4) - varaddr);
+
+ /*
+ * Determine the length token:
+ * Varsize, converted to words, in lower 16-bits, checksum
+ * in upper 16-bits.
+ */
+ if (bcmerror) {
+ varsizew = 0;
+ varsizew_le = cpu_to_le32(0);
+ } else {
+ varsizew = varsize / 4;
+ varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
+ varsizew_le = cpu_to_le32(varsizew);
+ }
+
+ brcmf_dbg(INFO, "New varsize is %d, length token=0x%08x\n",
+ varsize, varsizew);
+
+ /* Write the length token to the last word */
+ bcmerror = brcmf_sdbrcm_membytes(bus, true, (bus->ramsize - 4),
+ (u8 *)&varsizew_le, 4);
+
+ return bcmerror;
+}
+
+static void
+brcmf_sdbrcm_chip_disablecore(struct brcmf_sdio_dev *sdiodev, u32 corebase)
+{
+ u32 regdata;
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbtmstatelow), 4);
+ if (regdata & SBTML_RESET)
+ return;
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbtmstatelow), 4);
+ if ((regdata & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) != 0) {
+ /*
+ * set target reject and spin until busy is clear
+ * (preserve core-specific bits)
+ */
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbtmstatelow), 4);
+ brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow),
+ 4, regdata | SBTML_REJ);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbtmstatelow), 4);
+ udelay(1);
+ SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbtmstatehigh), 4) &
+ SBTMH_BUSY), 100000);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbtmstatehigh), 4);
+ if (regdata & SBTMH_BUSY)
+ brcmf_dbg(ERROR, "ARM core still busy\n");
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbidlow), 4);
+ if (regdata & SBIDL_INIT) {
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbimstate), 4) |
+ SBIM_RJ;
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(corebase, sbimstate), 4,
+ regdata);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbimstate), 4);
+ udelay(1);
+ SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbimstate), 4) &
+ SBIM_BY), 100000);
+ }
+
+ /* set reset and reject while enabling the clocks */
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(corebase, sbtmstatelow), 4,
+ (((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+ SBTML_REJ | SBTML_RESET));
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbtmstatelow), 4);
+ udelay(10);
+
+ /* clear the initiator reject bit */
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbidlow), 4);
+ if (regdata & SBIDL_INIT) {
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbimstate), 4) &
+ ~SBIM_RJ;
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(corebase, sbimstate), 4,
+ regdata);
+ }
+ }
+
+ /* leave reset and reject asserted */
+ brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
+ (SBTML_REJ | SBTML_RESET));
+ udelay(1);
+}
+
+static void
+brcmf_sdbrcm_chip_resetcore(struct brcmf_sdio_dev *sdiodev, u32 corebase)
+{
+ u32 regdata;
+
+ /*
+ * Must do the disable sequence first to work for
+ * arbitrary current core state.
+ */
+ brcmf_sdbrcm_chip_disablecore(sdiodev, corebase);
+
+ /*
+ * Now do the initialization sequence.
+ * set reset while enabling the clock and
+ * forcing them on throughout the core
+ */
+ brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
+ ((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
+ SBTML_RESET);
+ udelay(1);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbtmstatehigh), 4);
+ if (regdata & SBTMH_SERR)
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(corebase, sbtmstatehigh), 4, 0);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(corebase, sbimstate), 4);
+ if (regdata & (SBIM_IBE | SBIM_TO))
+ brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbimstate), 4,
+ regdata & ~(SBIM_IBE | SBIM_TO));
+
+ /* clear reset and allow it to propagate throughout the core */
+ brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
+ (SICF_FGC << SBTML_SICF_SHIFT) |
+ (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
+ udelay(1);
+
+ /* leave clock enabled */
+ brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
+ (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
+ udelay(1);
+}
+
+static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
+{
+ uint retries;
+ u32 regdata;
+ int bcmerror = 0;
+
+ /* To enter download state, disable ARM and reset SOCRAM.
+ * To exit download state, simply reset ARM (default is RAM boot).
+ */
+ if (enter) {
+ bus->alp_only = true;
+
+ brcmf_sdbrcm_chip_disablecore(bus->sdiodev,
+ bus->ci->armcorebase);
+
+ brcmf_sdbrcm_chip_resetcore(bus->sdiodev, bus->ci->ramcorebase);
+
+ /* Clear the top bit of memory */
+ if (bus->ramsize) {
+ u32 zeros = 0;
+ brcmf_sdbrcm_membytes(bus, true, bus->ramsize - 4,
+ (u8 *)&zeros, 4);
+ }
+ } else {
+ regdata = brcmf_sdcard_reg_read(bus->sdiodev,
+ CORE_SB(bus->ci->ramcorebase, sbtmstatelow), 4);
+ regdata &= (SBTML_RESET | SBTML_REJ_MASK |
+ (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
+ if ((SICF_CLOCK_EN << SBTML_SICF_SHIFT) != regdata) {
+ brcmf_dbg(ERROR, "SOCRAM core is down after reset?\n");
+ bcmerror = -EBADE;
+ goto fail;
+ }
+
+ bcmerror = brcmf_sdbrcm_write_vars(bus);
+ if (bcmerror) {
+ brcmf_dbg(ERROR, "no vars written to RAM\n");
+ bcmerror = 0;
+ }
+
+ w_sdreg32(bus, 0xFFFFFFFF,
+ offsetof(struct sdpcmd_regs, intstatus), &retries);
+
+ brcmf_sdbrcm_chip_resetcore(bus->sdiodev, bus->ci->armcorebase);
+
+ /* Allow HT Clock now that the ARM is running. */
+ bus->alp_only = false;
+
+ bus->drvr->busstate = BRCMF_BUS_LOAD;
+ }
+fail:
+ return bcmerror;
+}
+
+static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus)
+{
+ if (bus->firmware->size < bus->fw_ptr + len)
+ len = bus->firmware->size - bus->fw_ptr;
+
+ memcpy(buf, &bus->firmware->data[bus->fw_ptr], len);
+ bus->fw_ptr += len;
+ return len;
+}
+
+MODULE_FIRMWARE(BCM4329_FW_NAME);
+MODULE_FIRMWARE(BCM4329_NV_NAME);
+
+static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
+{
+ int offset = 0;
+ uint len;
+ u8 *memblock = NULL, *memptr;
+ int ret;
+
+ brcmf_dbg(INFO, "Enter\n");
+
+ bus->fw_name = BCM4329_FW_NAME;
+ ret = request_firmware(&bus->firmware, bus->fw_name,
+ &bus->sdiodev->func[2]->dev);
+ if (ret) {
+ brcmf_dbg(ERROR, "Fail to request firmware %d\n", ret);
+ return ret;
+ }
+ bus->fw_ptr = 0;
+
+ memptr = memblock = kmalloc(MEMBLOCK + BRCMF_SDALIGN, GFP_ATOMIC);
+ if (memblock == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ if ((u32)(unsigned long)memblock % BRCMF_SDALIGN)
+ memptr += (BRCMF_SDALIGN -
+ ((u32)(unsigned long)memblock % BRCMF_SDALIGN));
+
+ /* Download image */
+ while ((len =
+ brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus))) {
+ ret = brcmf_sdbrcm_membytes(bus, true, offset, memptr, len);
+ if (ret) {
+ brcmf_dbg(ERROR, "error %d on writing %d membytes at 0x%08x\n",
+ ret, MEMBLOCK, offset);
+ goto err;
+ }
+
+ offset += MEMBLOCK;
+ }
+
+err:
+ kfree(memblock);
+
+ release_firmware(bus->firmware);
+ bus->fw_ptr = 0;
+
+ return ret;
+}
+
+/*
+ * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file
+ * and ending in a NUL.
+ * Removes carriage returns, empty lines, comment lines, and converts
+ * newlines to NULs.
+ * Shortens buffer as needed and pads with NULs. End of buffer is marked
+ * by two NULs.
+*/
+
+static uint brcmf_process_nvram_vars(char *varbuf, uint len)
+{
+ char *dp;
+ bool findNewline;
+ int column;
+ uint buf_len, n;
+
+ dp = varbuf;
+
+ findNewline = false;
+ column = 0;
+
+ for (n = 0; n < len; n++) {
+ if (varbuf[n] == 0)
+ break;
+ if (varbuf[n] == '\r')
+ continue;
+ if (findNewline && varbuf[n] != '\n')
+ continue;
+ findNewline = false;
+ if (varbuf[n] == '#') {
+ findNewline = true;
+ continue;
+ }
+ if (varbuf[n] == '\n') {
+ if (column == 0)
+ continue;
+ *dp++ = 0;
+ column = 0;
+ continue;
+ }
+ *dp++ = varbuf[n];
+ column++;
+ }
+ buf_len = dp - varbuf;
+
+ while (dp < varbuf + n)
+ *dp++ = 0;
+
+ return buf_len;
+}
+
+static int brcmf_sdbrcm_download_nvram(struct brcmf_bus *bus)
+{
+ uint len;
+ char *memblock = NULL;
+ char *bufp;
+ int ret;
+
+ bus->nv_name = BCM4329_NV_NAME;
+ ret = request_firmware(&bus->firmware, bus->nv_name,
+ &bus->sdiodev->func[2]->dev);
+ if (ret) {
+ brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret);
+ return ret;
+ }
+ bus->fw_ptr = 0;
+
+ memblock = kmalloc(MEMBLOCK, GFP_ATOMIC);
+ if (memblock == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ len = brcmf_sdbrcm_get_image(memblock, MEMBLOCK, bus);
+
+ if (len > 0 && len < MEMBLOCK) {
+ bufp = (char *)memblock;
+ bufp[len] = 0;
+ len = brcmf_process_nvram_vars(bufp, len);
+ bufp += len;
+ *bufp++ = 0;
+ if (len)
+ ret = brcmf_sdbrcm_downloadvars(bus, memblock, len + 1);
+ if (ret)
+ brcmf_dbg(ERROR, "error downloading vars: %d\n", ret);
+ } else {
+ brcmf_dbg(ERROR, "error reading nvram file: %d\n", len);
+ ret = -EIO;
+ }
+
+err:
+ kfree(memblock);
+
+ release_firmware(bus->firmware);
+ bus->fw_ptr = 0;
+
+ return ret;
+}
+
+static int _brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
+{
+ int bcmerror = -1;
+
+ /* Keep arm in reset */
+ if (brcmf_sdbrcm_download_state(bus, true)) {
+ brcmf_dbg(ERROR, "error placing ARM core in reset\n");
+ goto err;
+ }
+
+ /* External image takes precedence if specified */
+ if (brcmf_sdbrcm_download_code_file(bus)) {
+ brcmf_dbg(ERROR, "dongle image file download failed\n");
+ goto err;
+ }
+
+ /* External nvram takes precedence if specified */
+ if (brcmf_sdbrcm_download_nvram(bus))
+ brcmf_dbg(ERROR, "dongle nvram file download failed\n");
+
+ /* Take arm out of reset */
+ if (brcmf_sdbrcm_download_state(bus, false)) {
+ brcmf_dbg(ERROR, "error getting out of ARM core reset\n");
+ goto err;
+ }
+
+ bcmerror = 0;
+
+err:
+ return bcmerror;
+}
+
+static bool
+brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
+{
+ bool ret;
+
+ /* Download the firmware */
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+
+ ret = _brcmf_sdbrcm_download_firmware(bus) == 0;
+
+ brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
+
+ return ret;
+}
+
+void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
+{
+ u32 local_hostintmask;
+ u8 saveclk;
+ uint retries;
+ int err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (bus->watchdog_tsk) {
+ send_sig(SIGTERM, bus->watchdog_tsk, 1);
+ kthread_stop(bus->watchdog_tsk);
+ bus->watchdog_tsk = NULL;
+ }
+
+ if (bus->dpc_tsk && bus->dpc_tsk != current) {
+ send_sig(SIGTERM, bus->dpc_tsk, 1);
+ kthread_stop(bus->dpc_tsk);
+ bus->dpc_tsk = NULL;
+ }
+
+ down(&bus->sdsem);
+
+ bus_wake(bus);
+
+ /* Enable clock for device interrupts */
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+
+ /* Disable and clear interrupts at the chip level also */
+ w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask), &retries);
+ local_hostintmask = bus->hostintmask;
+ bus->hostintmask = 0;
+
+ /* Change our idea of bus state */
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
+
+ /* Force clocks on backplane to be sure F2 interrupt propagates */
+ saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (!err) {
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ }
+ if (err)
+ brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err);
+
+ /* Turn off the bus (F2), free any pending packets */
+ brcmf_dbg(INTR, "disable SDIO interrupts\n");
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
+ SDIO_FUNC_ENABLE_1, NULL);
+
+ /* Clear any pending interrupts now that F2 is disabled */
+ w_sdreg32(bus, local_hostintmask,
+ offsetof(struct sdpcmd_regs, intstatus), &retries);
+
+ /* Turn off the backplane clock (only) */
+ brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
+
+ /* Clear the data packet queues */
+ brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
+
+ /* Clear any held glomming stuff */
+ if (bus->glomd)
+ brcmu_pkt_buf_free_skb(bus->glomd);
+
+ if (bus->glom)
+ brcmu_pkt_buf_free_skb(bus->glom);
+
+ bus->glom = bus->glomd = NULL;
+
+ /* Clear rx control and wake any waiters */
+ bus->rxlen = 0;
+ brcmf_sdbrcm_dcmd_resp_wake(bus);
+
+ /* Reset some F2 state stuff */
+ bus->rxskip = false;
+ bus->tx_seq = bus->rx_seq = 0;
+
+ up(&bus->sdsem);
+}
+
+int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
+{
+ struct brcmf_bus *bus = drvr->bus;
+ unsigned long timeout;
+ uint retries = 0;
+ u8 ready, enable;
+ int err, ret = 0;
+ u8 saveclk;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* try to download image and nvram to the dongle */
+ if (drvr->busstate == BRCMF_BUS_DOWN) {
+ if (!(brcmf_sdbrcm_download_firmware(bus)))
+ return -1;
+ }
+
+ if (!bus->drvr)
+ return 0;
+
+ /* Start the watchdog timer */
+ bus->drvr->tickcnt = 0;
+ brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+
+ down(&bus->sdsem);
+
+ /* Make sure backplane clock is on, needed to generate F2 interrupt */
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+ if (bus->clkstate != CLK_AVAIL)
+ goto exit;
+
+ /* Force clocks on backplane to be sure F2 interrupt propagates */
+ saveclk =
+ brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (!err) {
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ }
+ if (err) {
+ brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err);
+ goto exit;
+ }
+
+ /* Enable function 2 (frame transfers) */
+ w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
+ offsetof(struct sdpcmd_regs, tosbmailboxdata), &retries);
+ enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
+
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
+ enable, NULL);
+
+ timeout = jiffies + msecs_to_jiffies(BRCMF_WAIT_F2RDY);
+ ready = 0;
+ while (enable != ready) {
+ ready = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_0,
+ SDIO_CCCR_IORx, NULL);
+ if (time_after(jiffies, timeout))
+ break;
+ else if (time_after(jiffies, timeout - BRCMF_WAIT_F2RDY + 50))
+ /* prevent busy waiting if it takes too long */
+ msleep_interruptible(20);
+ }
+
+ brcmf_dbg(INFO, "enable 0x%02x, ready 0x%02x\n", enable, ready);
+
+ /* If F2 successfully enabled, set core and enable interrupts */
+ if (ready == enable) {
+ /* Set up the interrupt mask and enable interrupts */
+ bus->hostintmask = HOSTINTMASK;
+ w_sdreg32(bus, bus->hostintmask,
+ offsetof(struct sdpcmd_regs, hostintmask), &retries);
+
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_WATERMARK, 8, &err);
+
+ /* Set bus state according to enable result */
+ drvr->busstate = BRCMF_BUS_DATA;
+ }
+
+ else {
+ /* Disable F2 again */
+ enable = SDIO_FUNC_ENABLE_1;
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0,
+ SDIO_CCCR_IOEx, enable, NULL);
+ }
+
+ /* Restore previous clock setting */
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
+
+ /* If we didn't come up, turn off backplane clock */
+ if (drvr->busstate != BRCMF_BUS_DATA)
+ brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
+
+exit:
+ up(&bus->sdsem);
+
+ return ret;
+}
+
+void brcmf_sdbrcm_isr(void *arg)
+{
+ struct brcmf_bus *bus = (struct brcmf_bus *) arg;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (!bus) {
+ brcmf_dbg(ERROR, "bus is null pointer, exiting\n");
+ return;
+ }
+
+ if (bus->drvr->busstate == BRCMF_BUS_DOWN) {
+ brcmf_dbg(ERROR, "bus is down. we have nothing to do\n");
+ return;
+ }
+ /* Count the interrupt call */
+ bus->intrcount++;
+ bus->ipend = true;
+
+ /* Shouldn't get this interrupt if we're sleeping? */
+ if (bus->sleeping) {
+ brcmf_dbg(ERROR, "INTERRUPT WHILE SLEEPING??\n");
+ return;
+ }
+
+ /* Disable additional interrupts (is this needed now)? */
+ if (!bus->intr)
+ brcmf_dbg(ERROR, "isr w/o interrupt configured!\n");
+
+ bus->dpc_sched = true;
+ if (bus->dpc_tsk)
+ complete(&bus->dpc_wait);
+}
+
+static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr)
+{
+ struct brcmf_bus *bus;
+
+ brcmf_dbg(TIMER, "Enter\n");
+
+ bus = drvr->bus;
+
+ /* Ignore the timer if simulating bus down */
+ if (bus->sleeping)
+ return false;
+
+ down(&bus->sdsem);
+
+ /* Poll period: check device if appropriate. */
+ if (bus->poll && (++bus->polltick >= bus->pollrate)) {
+ u32 intstatus = 0;
+
+ /* Reset poll tick */
+ bus->polltick = 0;
+
+ /* Check device if no interrupts */
+ if (!bus->intr || (bus->intrcount == bus->lastintrs)) {
+
+ if (!bus->dpc_sched) {
+ u8 devpend;
+ devpend = brcmf_sdcard_cfg_read(bus->sdiodev,
+ SDIO_FUNC_0, SDIO_CCCR_INTx,
+ NULL);
+ intstatus =
+ devpend & (INTR_STATUS_FUNC1 |
+ INTR_STATUS_FUNC2);
+ }
+
+ /* If there is something, make like the ISR and
+ schedule the DPC */
+ if (intstatus) {
+ bus->pollcnt++;
+ bus->ipend = true;
+
+ bus->dpc_sched = true;
+ if (bus->dpc_tsk)
+ complete(&bus->dpc_wait);
+ }
+ }
+
+ /* Update interrupt tracking */
+ bus->lastintrs = bus->intrcount;
+ }
+#ifdef BCMDBG
+ /* Poll for console output periodically */
+ if (drvr->busstate == BRCMF_BUS_DATA && bus->console_interval != 0) {
+ bus->console.count += BRCMF_WD_POLL_MS;
+ if (bus->console.count >= bus->console_interval) {
+ bus->console.count -= bus->console_interval;
+ /* Make sure backplane clock is on */
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+ if (brcmf_sdbrcm_readconsole(bus) < 0)
+ /* stop on error */
+ bus->console_interval = 0;
+ }
+ }
+#endif /* BCMDBG */
+
+ /* On idle timeout clear activity flag and/or turn off clock */
+ if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
+ if (++bus->idlecount >= bus->idletime) {
+ bus->idlecount = 0;
+ if (bus->activity) {
+ bus->activity = false;
+ brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
+ } else {
+ brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
+ }
+ }
+ }
+
+ up(&bus->sdsem);
+
+ return bus->ipend;
+}
+
+static bool brcmf_sdbrcm_chipmatch(u16 chipid)
+{
+ if (chipid == BCM4329_CHIP_ID)
+ return true;
+ return false;
+}
+
+static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ kfree(bus->rxbuf);
+ bus->rxctl = bus->rxbuf = NULL;
+ bus->rxlen = 0;
+
+ kfree(bus->databuf);
+ bus->databuf = NULL;
+}
+
+static bool brcmf_sdbrcm_probe_malloc(struct brcmf_bus *bus)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (bus->drvr->maxctl) {
+ bus->rxblen =
+ roundup((bus->drvr->maxctl + SDPCM_HDRLEN),
+ ALIGNMENT) + BRCMF_SDALIGN;
+ bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
+ if (!(bus->rxbuf))
+ goto fail;
+ }
+
+ /* Allocate buffer to receive glomed packet */
+ bus->databuf = kmalloc(MAX_DATA_BUF, GFP_ATOMIC);
+ if (!(bus->databuf)) {
+ /* release rxbuf which was already located as above */
+ if (!bus->rxblen)
+ kfree(bus->rxbuf);
+ goto fail;
+ }
+
+ /* Align the buffer */
+ if ((unsigned long)bus->databuf % BRCMF_SDALIGN)
+ bus->dataptr = bus->databuf + (BRCMF_SDALIGN -
+ ((unsigned long)bus->databuf % BRCMF_SDALIGN));
+ else
+ bus->dataptr = bus->databuf;
+
+ return true;
+
+fail:
+ return false;
+}
+
+/* SDIO Pad drive strength to select value mappings */
+struct sdiod_drive_str {
+ u8 strength; /* Pad Drive Strength in mA */
+ u8 sel; /* Chip-specific select value */
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 1 */
+static const struct sdiod_drive_str sdiod_drive_strength_tab1[] = {
+ {
+ 4, 0x2}, {
+ 2, 0x3}, {
+ 1, 0x0}, {
+ 0, 0x0}
+ };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
+static const struct sdiod_drive_str sdiod_drive_strength_tab2[] = {
+ {
+ 12, 0x7}, {
+ 10, 0x6}, {
+ 8, 0x5}, {
+ 6, 0x4}, {
+ 4, 0x2}, {
+ 2, 0x1}, {
+ 0, 0x0}
+ };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
+static const struct sdiod_drive_str sdiod_drive_strength_tab3[] = {
+ {
+ 32, 0x7}, {
+ 26, 0x6}, {
+ 22, 0x5}, {
+ 16, 0x4}, {
+ 12, 0x3}, {
+ 8, 0x2}, {
+ 4, 0x1}, {
+ 0, 0x0}
+ };
+
+#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+
+static void brcmf_sdbrcm_sdiod_drive_strength_init(struct brcmf_bus *bus,
+ u32 drivestrength) {
+ struct sdiod_drive_str *str_tab = NULL;
+ u32 str_mask = 0;
+ u32 str_shift = 0;
+ char chn[8];
+
+ if (!(bus->ci->cccaps & CC_CAP_PMU))
+ return;
+
+ switch (SDIOD_DRVSTR_KEY(bus->ci->chip, bus->ci->pmurev)) {
+ case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
+ str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab1;
+ str_mask = 0x30000000;
+ str_shift = 28;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
+ case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
+ str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab2;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
+ str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab3;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ default:
+ brcmf_dbg(ERROR, "No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+ brcmu_chipname(bus->ci->chip, chn, 8),
+ bus->ci->chiprev, bus->ci->pmurev);
+ break;
+ }
+
+ if (str_tab != NULL) {
+ u32 drivestrength_sel = 0;
+ u32 cc_data_temp;
+ int i;
+
+ for (i = 0; str_tab[i].strength != 0; i++) {
+ if (drivestrength >= str_tab[i].strength) {
+ drivestrength_sel = str_tab[i].sel;
+ break;
+ }
+ }
+
+ brcmf_sdcard_reg_write(bus->sdiodev,
+ CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr),
+ 4, 1);
+ cc_data_temp = brcmf_sdcard_reg_read(bus->sdiodev,
+ CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr), 4);
+ cc_data_temp &= ~str_mask;
+ drivestrength_sel <<= str_shift;
+ cc_data_temp |= drivestrength_sel;
+ brcmf_sdcard_reg_write(bus->sdiodev,
+ CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr),
+ 4, cc_data_temp);
+
+ brcmf_dbg(INFO, "SDIO: %dmA drive strength selected, set to 0x%08x\n",
+ drivestrength, cc_data_temp);
+ }
+}
+
+static int
+brcmf_sdbrcm_chip_recognition(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u32 regs)
+{
+ u32 regdata;
+
+ /*
+ * Get CC core rev
+ * Chipid is assume to be at offset 0 from regs arg
+ * For different chiptypes or old sdio hosts w/o chipcommon,
+ * other ways of recognition should be added here.
+ */
+ ci->cccorebase = regs;
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_CC_REG(ci->cccorebase, chipid), 4);
+ ci->chip = regdata & CID_ID_MASK;
+ ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+
+ brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
+
+ /* Address of cores for new chips should be added here */
+ switch (ci->chip) {
+ case BCM4329_CHIP_ID:
+ ci->buscorebase = BCM4329_CORE_BUS_BASE;
+ ci->ramcorebase = BCM4329_CORE_SOCRAM_BASE;
+ ci->armcorebase = BCM4329_CORE_ARM_BASE;
+ ci->ramsize = BCM4329_RAMSIZE;
+ break;
+ default:
+ brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
+ return -ENODEV;
+ }
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->cccorebase, sbidhigh), 4);
+ ci->ccrev = SBCOREREV(regdata);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_CC_REG(ci->cccorebase, pmucapabilities), 4);
+ ci->pmurev = regdata & PCAP_REV_MASK;
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->buscorebase, sbidhigh), 4);
+ ci->buscorerev = SBCOREREV(regdata);
+ ci->buscoretype = (regdata & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT;
+
+ brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
+ ci->ccrev, ci->pmurev, ci->buscorerev, ci->buscoretype);
+
+ /* get chipcommon capabilites */
+ ci->cccaps = brcmf_sdcard_reg_read(sdiodev,
+ CORE_CC_REG(ci->cccorebase, capabilities), 4);
+
+ return 0;
+}
+
+static int
+brcmf_sdbrcm_chip_attach(struct brcmf_bus *bus, u32 regs)
+{
+ struct chip_info *ci;
+ int err;
+ u8 clkval, clkset;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* alloc chip_info_t */
+ ci = kzalloc(sizeof(struct chip_info), GFP_ATOMIC);
+ if (NULL == ci)
+ return -ENOMEM;
+
+ /* bus/core/clk setup for register access */
+ /* Try forcing SDIO core to do ALPAvail request only */
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ if (err) {
+ brcmf_dbg(ERROR, "error writing for HT off\n");
+ goto fail;
+ }
+
+ /* If register supported, wait for ALPAvail and then force ALP */
+ /* This may take up to 15 milliseconds */
+ clkval = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+ if ((clkval & ~SBSDIO_AVBITS) == clkset) {
+ SPINWAIT(((clkval =
+ brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ NULL)),
+ !SBSDIO_ALPAV(clkval)),
+ PMU_MAX_TRANSITION_DLY);
+ if (!SBSDIO_ALPAV(clkval)) {
+ brcmf_dbg(ERROR, "timeout on ALPAV wait, clkval 0x%02x\n",
+ clkval);
+ err = -EBUSY;
+ goto fail;
+ }
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF |
+ SBSDIO_FORCE_ALP;
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ clkset, &err);
+ udelay(65);
+ } else {
+ brcmf_dbg(ERROR, "ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
+ clkset, clkval);
+ err = -EACCES;
+ goto fail;
+ }
+
+ /* Also, disable the extra SDIO pull-ups */
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+
+ err = brcmf_sdbrcm_chip_recognition(bus->sdiodev, ci, regs);
+ if (err)
+ goto fail;
+
+ /*
+ * Make sure any on-chip ARM is off (in case strapping is wrong),
+ * or downloaded code was already running.
+ */
+ brcmf_sdbrcm_chip_disablecore(bus->sdiodev, ci->armcorebase);
+
+ brcmf_sdcard_reg_write(bus->sdiodev,
+ CORE_CC_REG(ci->cccorebase, gpiopullup), 4, 0);
+ brcmf_sdcard_reg_write(bus->sdiodev,
+ CORE_CC_REG(ci->cccorebase, gpiopulldown), 4, 0);
+
+ /* Disable F2 to clear any intermediate frame state on the dongle */
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
+ SDIO_FUNC_ENABLE_1, NULL);
+
+ /* WAR: cmd52 backplane read so core HW will drop ALPReq */
+ clkval = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ 0, NULL);
+
+ /* Done with backplane-dependent accesses, can drop clock... */
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+
+ bus->ci = ci;
+ return 0;
+fail:
+ bus->ci = NULL;
+ kfree(ci);
+ return err;
+}
+
+static bool
+brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
+{
+ u8 clkctl = 0;
+ int err = 0;
+ int reg_addr;
+ u32 reg_val;
+
+ bus->alp_only = true;
+
+ /* Return the window to backplane enumeration space for core access */
+ if (brcmf_sdcard_set_sbaddr_window(bus->sdiodev, SI_ENUM_BASE))
+ brcmf_dbg(ERROR, "FAILED to return to SI_ENUM_BASE\n");
+
+#ifdef BCMDBG
+ printk(KERN_DEBUG "F1 signature read @0x18000000=0x%4x\n",
+ brcmf_sdcard_reg_read(bus->sdiodev, SI_ENUM_BASE, 4));
+
+#endif /* BCMDBG */
+
+ /*
+ * Force PLL off until brcmf_sdbrcm_chip_attach()
+ * programs PLL control regs
+ */
+
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ BRCMF_INIT_CLKCTL1, &err);
+ if (!err)
+ clkctl =
+ brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
+
+ if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
+ brcmf_dbg(ERROR, "ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
+ err, BRCMF_INIT_CLKCTL1, clkctl);
+ goto fail;
+ }
+
+ if (brcmf_sdbrcm_chip_attach(bus, regsva)) {
+ brcmf_dbg(ERROR, "brcmf_sdbrcm_chip_attach failed!\n");
+ goto fail;
+ }
+
+ if (!brcmf_sdbrcm_chipmatch((u16) bus->ci->chip)) {
+ brcmf_dbg(ERROR, "unsupported chip: 0x%04x\n", bus->ci->chip);
+ goto fail;
+ }
+
+ brcmf_sdbrcm_sdiod_drive_strength_init(bus, SDIO_DRIVE_STRENGTH);
+
+ /* Get info on the ARM and SOCRAM cores... */
+ brcmf_sdcard_reg_read(bus->sdiodev,
+ CORE_SB(bus->ci->armcorebase, sbidhigh), 4);
+ bus->ramsize = bus->ci->ramsize;
+ if (!(bus->ramsize)) {
+ brcmf_dbg(ERROR, "failed to find SOCRAM memory!\n");
+ goto fail;
+ }
+
+ /* Set core control so an SDIO reset does a backplane reset */
+ reg_addr = bus->ci->buscorebase +
+ offsetof(struct sdpcmd_regs, corecontrol);
+ reg_val = brcmf_sdcard_reg_read(bus->sdiodev, reg_addr, sizeof(u32));
+ brcmf_sdcard_reg_write(bus->sdiodev, reg_addr, sizeof(u32),
+ reg_val | CC_BPRESEN);
+
+ brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
+
+ /* Locate an appropriately-aligned portion of hdrbuf */
+ bus->rxhdr = (u8 *) roundup((unsigned long)&bus->hdrbuf[0],
+ BRCMF_SDALIGN);
+
+ /* Set the poll and/or interrupt flags */
+ bus->intr = true;
+ bus->poll = false;
+ if (bus->poll)
+ bus->pollrate = 1;
+
+ return true;
+
+fail:
+ return false;
+}
+
+static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* Disable F2 to clear any intermediate frame state on the dongle */
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
+ SDIO_FUNC_ENABLE_1, NULL);
+
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sleeping = false;
+ bus->rxflow = false;
+
+ /* Done with backplane-dependent accesses, can drop clock... */
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+
+ /* ...and initialize clock/power states */
+ bus->clkstate = CLK_SDONLY;
+ bus->idletime = BRCMF_IDLE_INTERVAL;
+ bus->idleclock = BRCMF_IDLE_ACTIVE;
+
+ /* Query the F2 block size, set roundup accordingly */
+ bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
+ bus->roundup = min(max_roundup, bus->blocksize);
+
+ /* bus module does not support packet chaining */
+ bus->use_rxchain = false;
+ bus->sd_rxchain = false;
+
+ return true;
+}
+
+static int
+brcmf_sdbrcm_watchdog_thread(void *data)
+{
+ struct brcmf_bus *bus = (struct brcmf_bus *)data;
+
+ allow_signal(SIGTERM);
+ /* Run until signal received */
+ while (1) {
+ if (kthread_should_stop())
+ break;
+ if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
+ brcmf_sdbrcm_bus_watchdog(bus->drvr);
+ /* Count the tick for reference */
+ bus->drvr->tickcnt++;
+ } else
+ break;
+ }
+ return 0;
+}
+
+static void
+brcmf_sdbrcm_watchdog(unsigned long data)
+{
+ struct brcmf_bus *bus = (struct brcmf_bus *)data;
+
+ if (bus->watchdog_tsk) {
+ complete(&bus->watchdog_wait);
+ /* Reschedule the watchdog */
+ if (bus->wd_timer_valid)
+ mod_timer(&bus->timer,
+ jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
+ }
+}
+
+static void
+brcmf_sdbrcm_chip_detach(struct brcmf_bus *bus)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ kfree(bus->ci);
+ bus->ci = NULL;
+}
+
+static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (bus->ci) {
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+ brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
+ brcmf_sdbrcm_chip_detach(bus);
+ if (bus->vars && bus->varsz)
+ kfree(bus->vars);
+ bus->vars = NULL;
+ }
+
+ brcmf_dbg(TRACE, "Disconnected\n");
+}
+
+/* Detach and free everything */
+static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (bus) {
+ /* De-register interrupt handler */
+ brcmf_sdcard_intr_dereg(bus->sdiodev);
+
+ if (bus->drvr) {
+ brcmf_detach(bus->drvr);
+ brcmf_sdbrcm_release_dongle(bus);
+ bus->drvr = NULL;
+ }
+
+ brcmf_sdbrcm_release_malloc(bus);
+
+ kfree(bus);
+ }
+
+ brcmf_dbg(TRACE, "Disconnected\n");
+}
+
+void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
+ u32 regsva, struct brcmf_sdio_dev *sdiodev)
+{
+ int ret;
+ struct brcmf_bus *bus;
+
+ /* Init global variables at run-time, not as part of the declaration.
+ * This is required to support init/de-init of the driver.
+ * Initialization
+ * of globals as part of the declaration results in non-deterministic
+ * behavior since the value of the globals may be different on the
+ * first time that the driver is initialized vs subsequent
+ * initializations.
+ */
+ brcmf_c_init();
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* We make an assumption about address window mappings:
+ * regsva == SI_ENUM_BASE*/
+
+ /* Allocate private bus interface state */
+ bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
+ if (!bus)
+ goto fail;
+
+ bus->sdiodev = sdiodev;
+ sdiodev->bus = bus;
+ bus->txbound = BRCMF_TXBOUND;
+ bus->rxbound = BRCMF_RXBOUND;
+ bus->txminmax = BRCMF_TXMINMAX;
+ bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
+ bus->usebufpool = false; /* Use bufpool if allocated,
+ else use locally malloced rxbuf */
+
+ /* attempt to attach to the dongle */
+ if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) {
+ brcmf_dbg(ERROR, "brcmf_sdbrcm_probe_attach failed\n");
+ goto fail;
+ }
+
+ spin_lock_init(&bus->txqlock);
+ init_waitqueue_head(&bus->ctrl_wait);
+ init_waitqueue_head(&bus->dcmd_resp_wait);
+
+ /* Set up the watchdog timer */
+ init_timer(&bus->timer);
+ bus->timer.data = (unsigned long)bus;
+ bus->timer.function = brcmf_sdbrcm_watchdog;
+
+ /* Initialize thread based operation and lock */
+ sema_init(&bus->sdsem, 1);
+
+ /* Initialize watchdog thread */
+ init_completion(&bus->watchdog_wait);
+ bus->watchdog_tsk = kthread_run(brcmf_sdbrcm_watchdog_thread,
+ bus, "brcmf_watchdog");
+ if (IS_ERR(bus->watchdog_tsk)) {
+ printk(KERN_WARNING
+ "brcmf_watchdog thread failed to start\n");
+ bus->watchdog_tsk = NULL;
+ }
+ /* Initialize DPC thread */
+ init_completion(&bus->dpc_wait);
+ bus->dpc_tsk = kthread_run(brcmf_sdbrcm_dpc_thread,
+ bus, "brcmf_dpc");
+ if (IS_ERR(bus->dpc_tsk)) {
+ printk(KERN_WARNING
+ "brcmf_dpc thread failed to start\n");
+ bus->dpc_tsk = NULL;
+ }
+
+ /* Attach to the brcmf/OS/network interface */
+ bus->drvr = brcmf_attach(bus, SDPCM_RESERVE);
+ if (!bus->drvr) {
+ brcmf_dbg(ERROR, "brcmf_attach failed\n");
+ goto fail;
+ }
+
+ /* Allocate buffers */
+ if (!(brcmf_sdbrcm_probe_malloc(bus))) {
+ brcmf_dbg(ERROR, "brcmf_sdbrcm_probe_malloc failed\n");
+ goto fail;
+ }
+
+ if (!(brcmf_sdbrcm_probe_init(bus))) {
+ brcmf_dbg(ERROR, "brcmf_sdbrcm_probe_init failed\n");
+ goto fail;
+ }
+
+ /* Register interrupt callback, but mask it (not operational yet). */
+ brcmf_dbg(INTR, "disable SDIO interrupts (not interested yet)\n");
+ ret = brcmf_sdcard_intr_reg(bus->sdiodev);
+ if (ret != 0) {
+ brcmf_dbg(ERROR, "FAILED: sdcard_intr_reg returned %d\n", ret);
+ goto fail;
+ }
+ brcmf_dbg(INTR, "registered SDIO interrupt function ok\n");
+
+ brcmf_dbg(INFO, "completed!!\n");
+
+ /* if firmware path present try to download and bring up bus */
+ ret = brcmf_bus_start(bus->drvr);
+ if (ret != 0) {
+ if (ret == -ENOLINK) {
+ brcmf_dbg(ERROR, "dongle is not responding\n");
+ goto fail;
+ }
+ }
+ /* Ok, have the per-port tell the stack we're open for business */
+ if (brcmf_net_attach(bus->drvr, 0) != 0) {
+ brcmf_dbg(ERROR, "Net attach failed!!\n");
+ goto fail;
+ }
+
+ return bus;
+
+fail:
+ brcmf_sdbrcm_release(bus);
+ return NULL;
+}
+
+void brcmf_sdbrcm_disconnect(void *ptr)
+{
+ struct brcmf_bus *bus = (struct brcmf_bus *)ptr;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (bus)
+ brcmf_sdbrcm_release(bus);
+
+ brcmf_dbg(TRACE, "Disconnected\n");
+}
+
+struct device *brcmf_bus_get_device(struct brcmf_bus *bus)
+{
+ return &bus->sdiodev->func[2]->dev;
+}
+
+void
+brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick)
+{
+ /* don't start the wd until fw is loaded */
+ if (bus->drvr->busstate == BRCMF_BUS_DOWN)
+ return;
+
+ /* Totally stop the timer */
+ if (!wdtick && bus->wd_timer_valid == true) {
+ del_timer_sync(&bus->timer);
+ bus->wd_timer_valid = false;
+ bus->save_ms = wdtick;
+ return;
+ }
+
+ if (wdtick) {
+ if (bus->save_ms != BRCMF_WD_POLL_MS) {
+ if (bus->wd_timer_valid == true)
+ /* Stop timer and restart at new value */
+ del_timer_sync(&bus->timer);
+
+ /* Create timer again when watchdog period is
+ dynamically changed or in the first instance
+ */
+ bus->timer.expires =
+ jiffies + BRCMF_WD_POLL_MS * HZ / 1000;
+ add_timer(&bus->timer);
+
+ } else {
+ /* Re arm the timer, at last watchdog period */
+ mod_timer(&bus->timer,
+ jiffies + BRCMF_WD_POLL_MS * HZ / 1000);
+ }
+
+ bus->wd_timer_valid = true;
+ bus->save_ms = wdtick;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/netdevice.h>
++#include <linux/module.h>
+#include <brcmu_utils.h>
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver utilities.");
+MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
+MODULE_LICENSE("Dual BSD/GPL");
+
+struct sk_buff *brcmu_pkt_buf_get_skb(uint len)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(len);
+ if (skb) {
+ skb_put(skb, len);
+ skb->priority = 0;
+ }
+
+ return skb;
+}
+EXPORT_SYMBOL(brcmu_pkt_buf_get_skb);
+
+/* Free the driver packet. Free the tag if present */
+void brcmu_pkt_buf_free_skb(struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+ int nest = 0;
+
+ /* perversion: we use skb->next to chain multi-skb packets */
+ while (skb) {
+ nskb = skb->next;
+ skb->next = NULL;
+
+ if (skb->destructor)
+ /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
+ * destructor exists
+ */
+ dev_kfree_skb_any(skb);
+ else
+ /* can free immediately (even in_irq()) if destructor
+ * does not exist
+ */
+ dev_kfree_skb(skb);
+
+ nest++;
+ skb = nskb;
+ }
+}
+EXPORT_SYMBOL(brcmu_pkt_buf_free_skb);
+
+
+/* copy a buffer into a pkt buffer chain */
+uint brcmu_pktfrombuf(struct sk_buff *p, uint offset, int len,
+ unsigned char *buf)
+{
+ uint n, ret = 0;
+
+ /* skip 'offset' bytes */
+ for (; p && offset; p = p->next) {
+ if (offset < (uint) (p->len))
+ break;
+ offset -= p->len;
+ }
+
+ if (!p)
+ return 0;
+
+ /* copy the data */
+ for (; p && len; p = p->next) {
+ n = min((uint) (p->len) - offset, (uint) len);
+ memcpy(p->data + offset, buf, n);
+ buf += n;
+ len -= n;
+ ret += n;
+ offset = 0;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(brcmu_pktfrombuf);
+
+/* return total length of buffer chain */
+uint brcmu_pkttotlen(struct sk_buff *p)
+{
+ uint total;
+
+ total = 0;
+ for (; p; p = p->next)
+ total += p->len;
+ return total;
+}
+EXPORT_SYMBOL(brcmu_pkttotlen);
+
+/*
+ * osl multiple-precedence packet queue
+ * hi_prec is always >= the number of the highest non-empty precedence
+ */
+struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
+ struct sk_buff *p)
+{
+ struct pktq_prec *q;
+
+ if (pktq_full(pq) || pktq_pfull(pq, prec))
+ return NULL;
+
+ q = &pq->q[prec];
+
+ if (q->head)
+ q->tail->prev = p;
+ else
+ q->head = p;
+
+ q->tail = p;
+ q->len++;
+
+ pq->len++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (u8) prec;
+
+ return p;
+}
+EXPORT_SYMBOL(brcmu_pktq_penq);
+
+struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
+ struct sk_buff *p)
+{
+ struct pktq_prec *q;
+
+ if (pktq_full(pq) || pktq_pfull(pq, prec))
+ return NULL;
+
+ q = &pq->q[prec];
+
+ if (q->head == NULL)
+ q->tail = p;
+
+ p->prev = q->head;
+ q->head = p;
+ q->len++;
+
+ pq->len++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (u8) prec;
+
+ return p;
+}
+EXPORT_SYMBOL(brcmu_pktq_penq_head);
+
+struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ struct sk_buff *p;
+
+ q = &pq->q[prec];
+
+ p = q->head;
+ if (p == NULL)
+ return NULL;
+
+ q->head = p->prev;
+ if (q->head == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ pq->len--;
+
+ p->prev = NULL;
+
+ return p;
+}
+EXPORT_SYMBOL(brcmu_pktq_pdeq);
+
+struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ struct sk_buff *p, *prev;
+
+ q = &pq->q[prec];
+
+ p = q->head;
+ if (p == NULL)
+ return NULL;
+
+ for (prev = NULL; p != q->tail; p = p->prev)
+ prev = p;
+
+ if (prev)
+ prev->prev = NULL;
+ else
+ q->head = NULL;
+
+ q->tail = prev;
+ q->len--;
+
+ pq->len--;
+
+ return p;
+}
+EXPORT_SYMBOL(brcmu_pktq_pdeq_tail);
+
+void
+brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir,
+ bool (*fn)(struct sk_buff *, void *), void *arg)
+{
+ struct pktq_prec *q;
+ struct sk_buff *p, *prev = NULL;
+
+ q = &pq->q[prec];
+ p = q->head;
+ while (p) {
+ if (fn == NULL || (*fn) (p, arg)) {
+ bool head = (p == q->head);
+ if (head)
+ q->head = p->prev;
+ else
+ prev->prev = p->prev;
+ p->prev = NULL;
+ brcmu_pkt_buf_free_skb(p);
+ q->len--;
+ pq->len--;
+ p = (head ? q->head : prev->prev);
+ } else {
+ prev = p;
+ p = p->prev;
+ }
+ }
+
+ if (q->head == NULL)
+ q->tail = NULL;
+}
+EXPORT_SYMBOL(brcmu_pktq_pflush);
+
+void brcmu_pktq_flush(struct pktq *pq, bool dir,
+ bool (*fn)(struct sk_buff *, void *), void *arg)
+{
+ int prec;
+ for (prec = 0; prec < pq->num_prec; prec++)
+ brcmu_pktq_pflush(pq, prec, dir, fn, arg);
+}
+EXPORT_SYMBOL(brcmu_pktq_flush);
+
+void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len)
+{
+ int prec;
+
+ /* pq is variable size; only zero out what's requested */
+ memset(pq, 0,
+ offsetof(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
+
+ pq->num_prec = (u16) num_prec;
+
+ pq->max = (u16) max_len;
+
+ for (prec = 0; prec < num_prec; prec++)
+ pq->q[prec].max = pq->max;
+}
+EXPORT_SYMBOL(brcmu_pktq_init);
+
+struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out)
+{
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ for (prec = 0; prec < pq->hi_prec; prec++)
+ if (pq->q[prec].head)
+ break;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ return pq->q[prec].tail;
+}
+EXPORT_SYMBOL(brcmu_pktq_peek_tail);
+
+/* Return sum of lengths of a specific set of precedences */
+int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp)
+{
+ int prec, len;
+
+ len = 0;
+
+ for (prec = 0; prec <= pq->hi_prec; prec++)
+ if (prec_bmp & (1 << prec))
+ len += pq->q[prec].len;
+
+ return len;
+}
+EXPORT_SYMBOL(brcmu_pktq_mlen);
+
+/* Priority dequeue from a specific set of precedences */
+struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
+ int *prec_out)
+{
+ struct pktq_prec *q;
+ struct sk_buff *p;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+ if (prec-- == 0)
+ return NULL;
+
+ q = &pq->q[prec];
+
+ p = q->head;
+ if (p == NULL)
+ return NULL;
+
+ q->head = p->prev;
+ if (q->head == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ pq->len--;
+
+ p->prev = NULL;
+
+ return p;
+}
+EXPORT_SYMBOL(brcmu_pktq_mdeq);
+
+#if defined(BCMDBG)
+/* pretty hex print a pkt buffer chain */
+void brcmu_prpkt(const char *msg, struct sk_buff *p0)
+{
+ struct sk_buff *p;
+
+ if (msg && (msg[0] != '\0'))
+ printk(KERN_DEBUG "%s:\n", msg);
+
+ for (p = p0; p; p = p->next)
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, p->data, p->len);
+}
+EXPORT_SYMBOL(brcmu_prpkt);
+#endif /* defined(BCMDBG) */
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+struct brcmu_tlv *brcmu_parse_tlvs(void *buf, int buflen, uint key)
+{
+ struct brcmu_tlv *elt;
+ int totlen;
+
+ elt = (struct brcmu_tlv *) buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= 2) {
+ int len = elt->len;
+
+ /* validate remaining totlen */
+ if ((elt->id == key) && (totlen >= (len + 2)))
+ return elt;
+
+ elt = (struct brcmu_tlv *) ((u8 *) elt + (len + 2));
+ totlen -= (len + 2);
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(brcmu_parse_tlvs);
+
+
+#if defined(BCMDBG)
+int
+brcmu_format_flags(const struct brcmu_bit_desc *bd, u32 flags, char *buf,
+ int len)
+{
+ int i;
+ char *p = buf;
+ char hexstr[16];
+ int slen = 0, nlen = 0;
+ u32 bit;
+ const char *name;
+
+ if (len < 2 || !buf)
+ return 0;
+
+ buf[0] = '\0';
+
+ for (i = 0; flags != 0; i++) {
+ bit = bd[i].bit;
+ name = bd[i].name;
+ if (bit == 0 && flags != 0) {
+ /* print any unnamed bits */
+ snprintf(hexstr, 16, "0x%X", flags);
+ name = hexstr;
+ flags = 0; /* exit loop */
+ } else if ((flags & bit) == 0)
+ continue;
+ flags &= ~bit;
+ nlen = strlen(name);
+ slen += nlen;
+ /* count btwn flag space */
+ if (flags != 0)
+ slen += 1;
+ /* need NULL char as well */
+ if (len <= slen)
+ break;
+ /* copy NULL char but don't count it */
+ strncpy(p, name, nlen + 1);
+ p += nlen;
+ /* copy btwn flag space and NULL char */
+ if (flags != 0)
+ p += snprintf(p, 2, " ");
+ len -= slen;
+ }
+
+ /* indicate the str was too short */
+ if (flags != 0) {
+ if (len < 2)
+ p -= 2 - len; /* overwrite last char */
+ p += snprintf(p, 2, ">");
+ }
+
+ return (int)(p - buf);
+}
+EXPORT_SYMBOL(brcmu_format_flags);
+
+/*
+ * print bytes formatted as hex to a string. return the resulting
+ * string length
+ */
+int brcmu_format_hex(char *str, const void *bytes, int len)
+{
+ int i;
+ char *p = str;
+ const u8 *src = (const u8 *)bytes;
+
+ for (i = 0; i < len; i++) {
+ p += snprintf(p, 3, "%02X", *src);
+ src++;
+ }
+ return (int)(p - str);
+}
+EXPORT_SYMBOL(brcmu_format_hex);
+#endif /* defined(BCMDBG) */
+
+char *brcmu_chipname(uint chipid, char *buf, uint len)
+{
+ const char *fmt;
+
+ fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+ snprintf(buf, len, fmt, chipid);
+ return buf;
+}
+EXPORT_SYMBOL(brcmu_chipname);
+
+uint brcmu_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
+{
+ uint len;
+
+ len = strlen(name) + 1;
+
+ if ((len + datalen) > buflen)
+ return 0;
+
+ strncpy(buf, name, buflen);
+
+ /* append data onto the end of the name string */
+ memcpy(&buf[len], data, datalen);
+ len += datalen;
+
+ return len;
+}
+EXPORT_SYMBOL(brcmu_mkiovar);
+
+/* Quarter dBm units to mW
+ * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
+ * Table is offset so the last entry is largest mW value that fits in
+ * a u16.
+ */
+
+#define QDBM_OFFSET 153 /* Offset for first entry */
+#define QDBM_TABLE_LEN 40 /* Table size */
+
+/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
+ * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
+ */
+#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */
+
+/* Largest mW value that will round down to the last table entry,
+ * QDBM_OFFSET + QDBM_TABLE_LEN-1.
+ * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) +
+ * mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
+ */
+#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */
+
+static const u16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
+/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */
+/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000,
+/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849,
+/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119,
+/* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811,
+/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096
+};
+
+u16 brcmu_qdbm_to_mw(u8 qdbm)
+{
+ uint factor = 1;
+ int idx = qdbm - QDBM_OFFSET;
+
+ if (idx >= QDBM_TABLE_LEN)
+ /* clamp to max u16 mW value */
+ return 0xFFFF;
+
+ /* scale the qdBm index up to the range of the table 0-40
+ * where an offset of 40 qdBm equals a factor of 10 mW.
+ */
+ while (idx < 0) {
+ idx += 40;
+ factor *= 10;
+ }
+
+ /* return the mW value scaled down to the correct factor of 10,
+ * adding in factor/2 to get proper rounding.
+ */
+ return (nqdBm_to_mW_map[idx] + factor / 2) / factor;
+}
+EXPORT_SYMBOL(brcmu_qdbm_to_mw);
+
+u8 brcmu_mw_to_qdbm(u16 mw)
+{
+ u8 qdbm;
+ int offset;
+ uint mw_uint = mw;
+ uint boundary;
+
+ /* handle boundary case */
+ if (mw_uint <= 1)
+ return 0;
+
+ offset = QDBM_OFFSET;
+
+ /* move mw into the range of the table */
+ while (mw_uint < QDBM_TABLE_LOW_BOUND) {
+ mw_uint *= 10;
+ offset -= 40;
+ }
+
+ for (qdbm = 0; qdbm < QDBM_TABLE_LEN - 1; qdbm++) {
+ boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm + 1] -
+ nqdBm_to_mW_map[qdbm]) / 2;
+ if (mw_uint < boundary)
+ break;
+ }
+
+ qdbm += (u8) offset;
+
+ return qdbm;
+}
+EXPORT_SYMBOL(brcmu_mw_to_qdbm);
+
+uint brcmu_bitcount(u8 *bitmap, uint length)
+{
+ uint bitcount = 0, i;
+ u8 tmp;
+ for (i = 0; i < length; i++) {
+ tmp = bitmap[i];
+ while (tmp) {
+ bitcount++;
+ tmp &= (tmp - 1);
+ }
+ }
+ return bitcount;
+}
+EXPORT_SYMBOL(brcmu_bitcount);
--- /dev/null
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
++#include <linux/export.h>
++
+#include <brcmu_wifi.h>
+
+/*
+ * Verify the chanspec is using a legal set of parameters, i.e. that the
+ * chanspec specified a band, bw, ctl_sb and channel and that the
+ * combination could be legal given any set of circumstances.
+ * RETURNS: true is the chanspec is malformed, false if it looks good.
+ */
+bool brcmu_chspec_malformed(u16 chanspec)
+{
+ /* must be 2G or 5G band */
+ if (!CHSPEC_IS5G(chanspec) && !CHSPEC_IS2G(chanspec))
+ return true;
+ /* must be 20 or 40 bandwidth */
+ if (!CHSPEC_IS40(chanspec) && !CHSPEC_IS20(chanspec))
+ return true;
+
+ /* 20MHZ b/w must have no ctl sb, 40 must have a ctl sb */
+ if (CHSPEC_IS20(chanspec)) {
+ if (!CHSPEC_SB_NONE(chanspec))
+ return true;
+ } else if (!CHSPEC_SB_UPPER(chanspec) && !CHSPEC_SB_LOWER(chanspec)) {
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(brcmu_chspec_malformed);
+
+/*
+ * This function returns the channel number that control traffic is being sent
+ * on, for legacy channels this is just the channel number, for 40MHZ channels
+ * it is the upper or lower 20MHZ sideband depending on the chanspec selected.
+ */
+u8 brcmu_chspec_ctlchan(u16 chspec)
+{
+ u8 ctl_chan;
+
+ /* Is there a sideband ? */
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_NONE) {
+ return CHSPEC_CHANNEL(chspec);
+ } else {
+ /*
+ * we only support 40MHZ with sidebands. chanspec channel holds
+ * the centre frequency, use that and the side band information
+ * to reconstruct the control channel number
+ */
+ if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_UPPER)
+ /*
+ * control chan is the upper 20 MHZ SB of the
+ * 40MHZ channel
+ */
+ ctl_chan = upper_20_sb(CHSPEC_CHANNEL(chspec));
+ else
+ /*
+ * control chan is the lower 20 MHZ SB of the
+ * 40MHZ channel
+ */
+ ctl_chan = lower_20_sb(CHSPEC_CHANNEL(chspec));
+ }
+
+ return ctl_chan;
+}
+EXPORT_SYMBOL(brcmu_chspec_ctlchan);
+
+/*
+ * Return the channel number for a given frequency and base frequency.
+ * The returned channel number is relative to the given base frequency.
+ * If the given base frequency is zero, a base frequency of 5 GHz is assumed for
+ * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz.
+ *
+ * Frequency is specified in MHz.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
+ * 2.4 GHz and 5 GHz bands.
+ *
+ * The returned channel will be in the range [1, 14] in the 2.4 GHz band
+ * and [0, 200] otherwise.
+ * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the
+ * frequency is not a 2.4 GHz channel, or if the frequency is not and even
+ * multiple of 5 MHz from the base frequency to the base plus 1 GHz.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ */
+int brcmu_mhz2channel(uint freq, uint start_factor)
+{
+ int ch = -1;
+ uint base;
+ int offset;
+
+ /* take the default channel start frequency */
+ if (start_factor == 0) {
+ if (freq >= 2400 && freq <= 2500)
+ start_factor = WF_CHAN_FACTOR_2_4_G;
+ else if (freq >= 5000 && freq <= 6000)
+ start_factor = WF_CHAN_FACTOR_5_G;
+ }
+
+ if (freq == 2484 && start_factor == WF_CHAN_FACTOR_2_4_G)
+ return 14;
+
+ base = start_factor / 2;
+
+ /* check that the frequency is in 1GHz range of the base */
+ if ((freq < base) || (freq > base + 1000))
+ return -1;
+
+ offset = freq - base;
+ ch = offset / 5;
+
+ /* check that frequency is a 5MHz multiple from the base */
+ if (offset != (ch * 5))
+ return -1;
+
+ /* restricted channel range check for 2.4G */
+ if (start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 13))
+ return -1;
+
+ return ch;
+}
+EXPORT_SYMBOL(brcmu_mhz2channel);
*****************************************************************************/
#include <linux/pci.h>
#include <linux/pci-aspm.h>
+ #include <linux/module.h>
#include "iwl-bus.h"
-#include "iwl-agn.h"
-#include "iwl-core.h"
#include "iwl-io.h"
+#include "iwl-shared.h"
+#include "iwl-trans.h"
+#include "iwl-csr.h"
+#include "iwl-cfg.h"
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
* Larry Finger <Larry.Finger@lwfinger.net>
*****************************************************************************/
++#include <linux/moduleparam.h>
++
#include "wifi.h"
void rtl_dbgp_flag_init(struct ieee80211_hw *hw)
--- /dev/null
+/*
+ * Texas Instrument's NFC Driver For Shared Transport.
+ *
+ * NFC Driver acts as interface between NCI core and
+ * TI Shared Transport Layer.
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * Acknowledgements:
+ * This file is based on btwilink.c, which was written
+ * by Raja Mani and Pavan Savoy.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/platform_device.h>
++#include <linux/module.h>
+#include <linux/nfc.h>
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include <linux/ti_wilink_st.h>
+
+#define NFCWILINK_CHNL 12
+#define NFCWILINK_OPCODE 7
+#define NFCWILINK_MAX_FRAME_SIZE 300
+#define NFCWILINK_HDR_LEN 4
+#define NFCWILINK_OFFSET_LEN_IN_HDR 1
+#define NFCWILINK_LEN_SIZE 2
+#define NFCWILINK_REGISTER_TIMEOUT 8000 /* 8 sec */
+
+struct nfcwilink_hdr {
+ u8 chnl;
+ u8 opcode;
+ u16 len;
+} __packed;
+
+struct nfcwilink {
+ struct platform_device *pdev;
+ struct nci_dev *ndev;
+ unsigned long flags;
+
+ char st_register_cb_status;
+ long (*st_write) (struct sk_buff *);
+ struct completion st_register_completed;
+};
+
+/* NFCWILINK driver flags */
+enum {
+ NFCWILINK_RUNNING,
+};
+
+/* Called by ST when registration is complete */
+static void nfcwilink_register_complete(void *priv_data, char data)
+{
+ struct nfcwilink *drv = priv_data;
+
+ nfc_dev_dbg(&drv->pdev->dev, "register_complete entry");
+
+ /* store ST registration status */
+ drv->st_register_cb_status = data;
+
+ /* complete the wait in nfc_st_open() */
+ complete(&drv->st_register_completed);
+}
+
+/* Called by ST when receive data is available */
+static long nfcwilink_receive(void *priv_data, struct sk_buff *skb)
+{
+ struct nfcwilink *drv = priv_data;
+ int rc;
+
+ nfc_dev_dbg(&drv->pdev->dev, "receive entry, len %d", skb->len);
+
+ if (!skb)
+ return -EFAULT;
+
+ if (!drv) {
+ kfree_skb(skb);
+ return -EFAULT;
+ }
+
+ /* strip the ST header
+ (apart for the chnl byte, which is not received in the hdr) */
+ skb_pull(skb, (NFCWILINK_HDR_LEN-1));
+
+ skb->dev = (void *) drv->ndev;
+
+ /* Forward skb to NCI core layer */
+ rc = nci_recv_frame(skb);
+ if (rc < 0) {
+ nfc_dev_err(&drv->pdev->dev, "nci_recv_frame failed %d", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/* protocol structure registered with ST */
+static struct st_proto_s nfcwilink_proto = {
+ .chnl_id = NFCWILINK_CHNL,
+ .max_frame_size = NFCWILINK_MAX_FRAME_SIZE,
+ .hdr_len = (NFCWILINK_HDR_LEN-1), /* not including chnl byte */
+ .offset_len_in_hdr = NFCWILINK_OFFSET_LEN_IN_HDR,
+ .len_size = NFCWILINK_LEN_SIZE,
+ .reserve = 0,
+ .recv = nfcwilink_receive,
+ .reg_complete_cb = nfcwilink_register_complete,
+ .write = NULL,
+};
+
+static int nfcwilink_open(struct nci_dev *ndev)
+{
+ struct nfcwilink *drv = nci_get_drvdata(ndev);
+ unsigned long comp_ret;
+ int rc;
+
+ nfc_dev_dbg(&drv->pdev->dev, "open entry");
+
+ if (test_and_set_bit(NFCWILINK_RUNNING, &drv->flags)) {
+ rc = -EBUSY;
+ goto exit;
+ }
+
+ nfcwilink_proto.priv_data = drv;
+
+ init_completion(&drv->st_register_completed);
+ drv->st_register_cb_status = -EINPROGRESS;
+
+ rc = st_register(&nfcwilink_proto);
+ if (rc < 0) {
+ if (rc == -EINPROGRESS) {
+ comp_ret = wait_for_completion_timeout(
+ &drv->st_register_completed,
+ msecs_to_jiffies(NFCWILINK_REGISTER_TIMEOUT));
+
+ nfc_dev_dbg(&drv->pdev->dev,
+ "wait_for_completion_timeout returned %ld",
+ comp_ret);
+
+ if (comp_ret == 0) {
+ /* timeout */
+ rc = -ETIMEDOUT;
+ goto clear_exit;
+ } else if (drv->st_register_cb_status != 0) {
+ rc = drv->st_register_cb_status;
+ nfc_dev_err(&drv->pdev->dev,
+ "st_register_cb failed %d", rc);
+ goto clear_exit;
+ }
+ } else {
+ nfc_dev_err(&drv->pdev->dev,
+ "st_register failed %d", rc);
+ goto clear_exit;
+ }
+ }
+
+ /* st_register MUST fill the write callback */
+ BUG_ON(nfcwilink_proto.write == NULL);
+ drv->st_write = nfcwilink_proto.write;
+
+ goto exit;
+
+clear_exit:
+ clear_bit(NFCWILINK_RUNNING, &drv->flags);
+
+exit:
+ return rc;
+}
+
+static int nfcwilink_close(struct nci_dev *ndev)
+{
+ struct nfcwilink *drv = nci_get_drvdata(ndev);
+ int rc;
+
+ nfc_dev_dbg(&drv->pdev->dev, "close entry");
+
+ if (!test_and_clear_bit(NFCWILINK_RUNNING, &drv->flags))
+ return 0;
+
+ rc = st_unregister(&nfcwilink_proto);
+ if (rc)
+ nfc_dev_err(&drv->pdev->dev, "st_unregister failed %d", rc);
+
+ drv->st_write = NULL;
+
+ return rc;
+}
+
+static int nfcwilink_send(struct sk_buff *skb)
+{
+ struct nci_dev *ndev = (struct nci_dev *)skb->dev;
+ struct nfcwilink *drv = nci_get_drvdata(ndev);
+ struct nfcwilink_hdr hdr = {NFCWILINK_CHNL, NFCWILINK_OPCODE, 0x0000};
+ long len;
+
+ nfc_dev_dbg(&drv->pdev->dev, "send entry, len %d", skb->len);
+
+ if (!test_bit(NFCWILINK_RUNNING, &drv->flags))
+ return -EBUSY;
+
+ /* add the ST hdr to the start of the buffer */
+ hdr.len = skb->len;
+ memcpy(skb_push(skb, NFCWILINK_HDR_LEN), &hdr, NFCWILINK_HDR_LEN);
+
+ /* Insert skb to shared transport layer's transmit queue.
+ * Freeing skb memory is taken care in shared transport layer,
+ * so don't free skb memory here.
+ */
+ len = drv->st_write(skb);
+ if (len < 0) {
+ kfree_skb(skb);
+ nfc_dev_err(&drv->pdev->dev, "st_write failed %ld", len);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static struct nci_ops nfcwilink_ops = {
+ .open = nfcwilink_open,
+ .close = nfcwilink_close,
+ .send = nfcwilink_send,
+};
+
+static int nfcwilink_probe(struct platform_device *pdev)
+{
+ static struct nfcwilink *drv;
+ int rc;
+ u32 protocols;
+
+ nfc_dev_dbg(&pdev->dev, "probe entry");
+
+ drv = kzalloc(sizeof(struct nfcwilink), GFP_KERNEL);
+ if (!drv) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ drv->pdev = pdev;
+
+ protocols = NFC_PROTO_JEWEL_MASK
+ | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK
+ | NFC_PROTO_ISO14443_MASK
+ | NFC_PROTO_NFC_DEP_MASK;
+
+ drv->ndev = nci_allocate_device(&nfcwilink_ops,
+ protocols,
+ NFCWILINK_HDR_LEN,
+ 0);
+ if (!drv->ndev) {
+ nfc_dev_err(&pdev->dev, "nci_allocate_device failed");
+ rc = -ENOMEM;
+ goto free_exit;
+ }
+
+ nci_set_parent_dev(drv->ndev, &pdev->dev);
+ nci_set_drvdata(drv->ndev, drv);
+
+ rc = nci_register_device(drv->ndev);
+ if (rc < 0) {
+ nfc_dev_err(&pdev->dev, "nci_register_device failed %d", rc);
+ goto free_dev_exit;
+ }
+
+ dev_set_drvdata(&pdev->dev, drv);
+
+ goto exit;
+
+free_dev_exit:
+ nci_free_device(drv->ndev);
+
+free_exit:
+ kfree(drv);
+
+exit:
+ return rc;
+}
+
+static int nfcwilink_remove(struct platform_device *pdev)
+{
+ struct nfcwilink *drv = dev_get_drvdata(&pdev->dev);
+ struct nci_dev *ndev;
+
+ nfc_dev_dbg(&pdev->dev, "remove entry");
+
+ if (!drv)
+ return -EFAULT;
+
+ ndev = drv->ndev;
+
+ nci_unregister_device(ndev);
+ nci_free_device(ndev);
+
+ kfree(drv);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver nfcwilink_driver = {
+ .probe = nfcwilink_probe,
+ .remove = nfcwilink_remove,
+ .driver = {
+ .name = "nfcwilink",
+ .owner = THIS_MODULE,
+ },
+};
+
+/* ------- Module Init/Exit interfaces ------ */
+static int __init nfcwilink_init(void)
+{
+ printk(KERN_INFO "NFC Driver for TI WiLink");
+
+ return platform_driver_register(&nfcwilink_driver);
+}
+
+static void __exit nfcwilink_exit(void)
+{
+ platform_driver_unregister(&nfcwilink_driver);
+}
+
+module_init(nfcwilink_init);
+module_exit(nfcwilink_exit);
+
+/* ------ Module Info ------ */
+
+MODULE_AUTHOR("Ilan Elias <ilane@ti.com>");
+MODULE_DESCRIPTION("NFC Driver for TI Shared Transport");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*
+ * Core driver for the pin control subsystem
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#define pr_fmt(fmt) "pinctrl core: " fmt
+
+#include <linux/kernel.h>
++#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/radix-tree.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/machine.h>
+#include "core.h"
+#include "pinmux.h"
+
+/* Global list of pin control devices */
+static DEFINE_MUTEX(pinctrldev_list_mutex);
+static LIST_HEAD(pinctrldev_list);
+
+static void pinctrl_dev_release(struct device *dev)
+{
+ struct pinctrl_dev *pctldev = dev_get_drvdata(dev);
+ kfree(pctldev);
+}
+
+const char *pctldev_get_name(struct pinctrl_dev *pctldev)
+{
+ /* We're not allowed to register devices without name */
+ return pctldev->desc->name;
+}
+EXPORT_SYMBOL_GPL(pctldev_get_name);
+
+void *pctldev_get_drvdata(struct pinctrl_dev *pctldev)
+{
+ return pctldev->driver_data;
+}
+EXPORT_SYMBOL_GPL(pctldev_get_drvdata);
+
+/**
+ * get_pctldev_from_dev() - look up pin controller device
+ * @dev: a device pointer, this may be NULL but then devname needs to be
+ * defined instead
+ * @devname: the name of a device instance, as returned by dev_name(), this
+ * may be NULL but then dev needs to be defined instead
+ *
+ * Looks up a pin control device matching a certain device name or pure device
+ * pointer, the pure device pointer will take precedence.
+ */
+struct pinctrl_dev *get_pctldev_from_dev(struct device *dev,
+ const char *devname)
+{
+ struct pinctrl_dev *pctldev = NULL;
+ bool found = false;
+
+ mutex_lock(&pinctrldev_list_mutex);
+ list_for_each_entry(pctldev, &pinctrldev_list, node) {
+ if (dev && &pctldev->dev == dev) {
+ /* Matched on device pointer */
+ found = true;
+ break;
+ }
+
+ if (devname &&
+ !strcmp(dev_name(&pctldev->dev), devname)) {
+ /* Matched on device name */
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&pinctrldev_list_mutex);
+
+ if (found)
+ return pctldev;
+
+ return NULL;
+}
+
+struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, int pin)
+{
+ struct pin_desc *pindesc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pctldev->pin_desc_tree_lock, flags);
+ pindesc = radix_tree_lookup(&pctldev->pin_desc_tree, pin);
+ spin_unlock_irqrestore(&pctldev->pin_desc_tree_lock, flags);
+
+ return pindesc;
+}
+
+/**
+ * pin_is_valid() - check if pin exists on controller
+ * @pctldev: the pin control device to check the pin on
+ * @pin: pin to check, use the local pin controller index number
+ *
+ * This tells us whether a certain pin exist on a certain pin controller or
+ * not. Pin lists may be sparse, so some pins may not exist.
+ */
+bool pin_is_valid(struct pinctrl_dev *pctldev, int pin)
+{
+ struct pin_desc *pindesc;
+
+ if (pin < 0)
+ return false;
+
+ pindesc = pin_desc_get(pctldev, pin);
+ if (pindesc == NULL)
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(pin_is_valid);
+
+/* Deletes a range of pin descriptors */
+static void pinctrl_free_pindescs(struct pinctrl_dev *pctldev,
+ const struct pinctrl_pin_desc *pins,
+ unsigned num_pins)
+{
+ int i;
+
+ spin_lock(&pctldev->pin_desc_tree_lock);
+ for (i = 0; i < num_pins; i++) {
+ struct pin_desc *pindesc;
+
+ pindesc = radix_tree_lookup(&pctldev->pin_desc_tree,
+ pins[i].number);
+ if (pindesc != NULL) {
+ radix_tree_delete(&pctldev->pin_desc_tree,
+ pins[i].number);
+ }
+ kfree(pindesc);
+ }
+ spin_unlock(&pctldev->pin_desc_tree_lock);
+}
+
+static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
+ unsigned number, const char *name)
+{
+ struct pin_desc *pindesc;
+
+ pindesc = pin_desc_get(pctldev, number);
+ if (pindesc != NULL) {
+ pr_err("pin %d already registered on %s\n", number,
+ pctldev->desc->name);
+ return -EINVAL;
+ }
+
+ pindesc = kzalloc(sizeof(*pindesc), GFP_KERNEL);
+ if (pindesc == NULL)
+ return -ENOMEM;
+ spin_lock_init(&pindesc->lock);
+
+ /* Set owner */
+ pindesc->pctldev = pctldev;
+
+ /* Copy optional basic pin info */
+ if (name)
+ strlcpy(pindesc->name, name, sizeof(pindesc->name));
+
+ spin_lock(&pctldev->pin_desc_tree_lock);
+ radix_tree_insert(&pctldev->pin_desc_tree, number, pindesc);
+ spin_unlock(&pctldev->pin_desc_tree_lock);
+ pr_debug("registered pin %d (%s) on %s\n",
+ number, name ? name : "(unnamed)", pctldev->desc->name);
+ return 0;
+}
+
+static int pinctrl_register_pins(struct pinctrl_dev *pctldev,
+ struct pinctrl_pin_desc const *pins,
+ unsigned num_descs)
+{
+ unsigned i;
+ int ret = 0;
+
+ for (i = 0; i < num_descs; i++) {
+ ret = pinctrl_register_one_pin(pctldev,
+ pins[i].number, pins[i].name);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * pinctrl_match_gpio_range() - check if a certain GPIO pin is in range
+ * @pctldev: pin controller device to check
+ * @gpio: gpio pin to check taken from the global GPIO pin space
+ *
+ * Tries to match a GPIO pin number to the ranges handled by a certain pin
+ * controller, return the range or NULL
+ */
+static struct pinctrl_gpio_range *
+pinctrl_match_gpio_range(struct pinctrl_dev *pctldev, unsigned gpio)
+{
+ struct pinctrl_gpio_range *range = NULL;
+
+ /* Loop over the ranges */
+ mutex_lock(&pctldev->gpio_ranges_lock);
+ list_for_each_entry(range, &pctldev->gpio_ranges, node) {
+ /* Check if we're in the valid range */
+ if (gpio >= range->base &&
+ gpio < range->base + range->npins) {
+ mutex_unlock(&pctldev->gpio_ranges_lock);
+ return range;
+ }
+ }
+ mutex_unlock(&pctldev->gpio_ranges_lock);
+
+ return NULL;
+}
+
+/**
+ * pinctrl_get_device_gpio_range() - find device for GPIO range
+ * @gpio: the pin to locate the pin controller for
+ * @outdev: the pin control device if found
+ * @outrange: the GPIO range if found
+ *
+ * Find the pin controller handling a certain GPIO pin from the pinspace of
+ * the GPIO subsystem, return the device and the matching GPIO range. Returns
+ * negative if the GPIO range could not be found in any device.
+ */
+int pinctrl_get_device_gpio_range(unsigned gpio,
+ struct pinctrl_dev **outdev,
+ struct pinctrl_gpio_range **outrange)
+{
+ struct pinctrl_dev *pctldev = NULL;
+
+ /* Loop over the pin controllers */
+ mutex_lock(&pinctrldev_list_mutex);
+ list_for_each_entry(pctldev, &pinctrldev_list, node) {
+ struct pinctrl_gpio_range *range;
+
+ range = pinctrl_match_gpio_range(pctldev, gpio);
+ if (range != NULL) {
+ *outdev = pctldev;
+ *outrange = range;
+ return 0;
+ }
+ }
+ mutex_unlock(&pinctrldev_list_mutex);
+
+ return -EINVAL;
+}
+
+/**
+ * pinctrl_add_gpio_range() - register a GPIO range for a controller
+ * @pctldev: pin controller device to add the range to
+ * @range: the GPIO range to add
+ *
+ * This adds a range of GPIOs to be handled by a certain pin controller. Call
+ * this to register handled ranges after registering your pin controller.
+ */
+void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range)
+{
+ mutex_lock(&pctldev->gpio_ranges_lock);
+ list_add(&range->node, &pctldev->gpio_ranges);
+ mutex_unlock(&pctldev->gpio_ranges_lock);
+}
+
+/**
+ * pinctrl_remove_gpio_range() - remove a range of GPIOs fro a pin controller
+ * @pctldev: pin controller device to remove the range from
+ * @range: the GPIO range to remove
+ */
+void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range)
+{
+ mutex_lock(&pctldev->gpio_ranges_lock);
+ list_del(&range->node);
+ mutex_unlock(&pctldev->gpio_ranges_lock);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int pinctrl_pins_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev = s->private;
+ const struct pinctrl_ops *ops = pctldev->desc->pctlops;
+ unsigned pin;
+
+ seq_printf(s, "registered pins: %d\n", pctldev->desc->npins);
+ seq_printf(s, "max pin number: %d\n", pctldev->desc->maxpin);
+
+ /* The highest pin number need to be included in the loop, thus <= */
+ for (pin = 0; pin <= pctldev->desc->maxpin; pin++) {
+ struct pin_desc *desc;
+
+ desc = pin_desc_get(pctldev, pin);
+ /* Pin space may be sparse */
+ if (desc == NULL)
+ continue;
+
+ seq_printf(s, "pin %d (%s) ", pin,
+ desc->name ? desc->name : "unnamed");
+
+ /* Driver-specific info per pin */
+ if (ops->pin_dbg_show)
+ ops->pin_dbg_show(pctldev, s, pin);
+
+ seq_puts(s, "\n");
+ }
+
+ return 0;
+}
+
+static int pinctrl_groups_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev = s->private;
+ const struct pinctrl_ops *ops = pctldev->desc->pctlops;
+ unsigned selector = 0;
+
+ /* No grouping */
+ if (!ops)
+ return 0;
+
+ seq_puts(s, "registered pin groups:\n");
+ while (ops->list_groups(pctldev, selector) >= 0) {
+ unsigned *pins;
+ unsigned num_pins;
+ const char *gname = ops->get_group_name(pctldev, selector);
+ int ret;
+ int i;
+
+ ret = ops->get_group_pins(pctldev, selector,
+ &pins, &num_pins);
+ if (ret)
+ seq_printf(s, "%s [ERROR GETTING PINS]\n",
+ gname);
+ else {
+ seq_printf(s, "group: %s, pins = [ ", gname);
+ for (i = 0; i < num_pins; i++)
+ seq_printf(s, "%d ", pins[i]);
+ seq_puts(s, "]\n");
+ }
+ selector++;
+ }
+
+
+ return 0;
+}
+
+static int pinctrl_gpioranges_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev = s->private;
+ struct pinctrl_gpio_range *range = NULL;
+
+ seq_puts(s, "GPIO ranges handled:\n");
+
+ /* Loop over the ranges */
+ mutex_lock(&pctldev->gpio_ranges_lock);
+ list_for_each_entry(range, &pctldev->gpio_ranges, node) {
+ seq_printf(s, "%u: %s [%u - %u]\n", range->id, range->name,
+ range->base, (range->base + range->npins - 1));
+ }
+ mutex_unlock(&pctldev->gpio_ranges_lock);
+
+ return 0;
+}
+
+static int pinctrl_devices_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev;
+
+ seq_puts(s, "name [pinmux]\n");
+ mutex_lock(&pinctrldev_list_mutex);
+ list_for_each_entry(pctldev, &pinctrldev_list, node) {
+ seq_printf(s, "%s ", pctldev->desc->name);
+ if (pctldev->desc->pmxops)
+ seq_puts(s, "yes");
+ else
+ seq_puts(s, "no");
+ seq_puts(s, "\n");
+ }
+ mutex_unlock(&pinctrldev_list_mutex);
+
+ return 0;
+}
+
+static int pinctrl_pins_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinctrl_pins_show, inode->i_private);
+}
+
+static int pinctrl_groups_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinctrl_groups_show, inode->i_private);
+}
+
+static int pinctrl_gpioranges_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinctrl_gpioranges_show, inode->i_private);
+}
+
+static int pinctrl_devices_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinctrl_devices_show, NULL);
+}
+
+static const struct file_operations pinctrl_pins_ops = {
+ .open = pinctrl_pins_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations pinctrl_groups_ops = {
+ .open = pinctrl_groups_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations pinctrl_gpioranges_ops = {
+ .open = pinctrl_gpioranges_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations pinctrl_devices_ops = {
+ .open = pinctrl_devices_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *debugfs_root;
+
+static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
+{
+ static struct dentry *device_root;
+
+ device_root = debugfs_create_dir(dev_name(&pctldev->dev),
+ debugfs_root);
+ if (IS_ERR(device_root) || !device_root) {
+ pr_warn("failed to create debugfs directory for %s\n",
+ dev_name(&pctldev->dev));
+ return;
+ }
+ debugfs_create_file("pins", S_IFREG | S_IRUGO,
+ device_root, pctldev, &pinctrl_pins_ops);
+ debugfs_create_file("pingroups", S_IFREG | S_IRUGO,
+ device_root, pctldev, &pinctrl_groups_ops);
+ debugfs_create_file("gpio-ranges", S_IFREG | S_IRUGO,
+ device_root, pctldev, &pinctrl_gpioranges_ops);
+ pinmux_init_device_debugfs(device_root, pctldev);
+}
+
+static void pinctrl_init_debugfs(void)
+{
+ debugfs_root = debugfs_create_dir("pinctrl", NULL);
+ if (IS_ERR(debugfs_root) || !debugfs_root) {
+ pr_warn("failed to create debugfs directory\n");
+ debugfs_root = NULL;
+ return;
+ }
+
+ debugfs_create_file("pinctrl-devices", S_IFREG | S_IRUGO,
+ debugfs_root, NULL, &pinctrl_devices_ops);
+ pinmux_init_debugfs(debugfs_root);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
+{
+}
+
+static void pinctrl_init_debugfs(void)
+{
+}
+
+#endif
+
+/**
+ * pinctrl_register() - register a pin controller device
+ * @pctldesc: descriptor for this pin controller
+ * @dev: parent device for this pin controller
+ * @driver_data: private pin controller data for this pin controller
+ */
+struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
+ struct device *dev, void *driver_data)
+{
+ static atomic_t pinmux_no = ATOMIC_INIT(0);
+ struct pinctrl_dev *pctldev;
+ int ret;
+
+ if (pctldesc == NULL)
+ return ERR_PTR(-EINVAL);
+ if (pctldesc->name == NULL)
+ return ERR_PTR(-EINVAL);
+
+ /* If we're implementing pinmuxing, check the ops for sanity */
+ if (pctldesc->pmxops) {
+ ret = pinmux_check_ops(pctldesc->pmxops);
+ if (ret) {
+ pr_err("%s pinmux ops lacks necessary functions\n",
+ pctldesc->name);
+ return ERR_PTR(ret);
+ }
+ }
+
+ pctldev = kzalloc(sizeof(struct pinctrl_dev), GFP_KERNEL);
+ if (pctldev == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ /* Initialize pin control device struct */
+ pctldev->owner = pctldesc->owner;
+ pctldev->desc = pctldesc;
+ pctldev->driver_data = driver_data;
+ INIT_RADIX_TREE(&pctldev->pin_desc_tree, GFP_KERNEL);
+ spin_lock_init(&pctldev->pin_desc_tree_lock);
+ INIT_LIST_HEAD(&pctldev->gpio_ranges);
+ mutex_init(&pctldev->gpio_ranges_lock);
+
+ /* Register device */
+ pctldev->dev.parent = dev;
+ dev_set_name(&pctldev->dev, "pinctrl.%d",
+ atomic_inc_return(&pinmux_no) - 1);
+ pctldev->dev.release = pinctrl_dev_release;
+ ret = device_register(&pctldev->dev);
+ if (ret != 0) {
+ pr_err("error in device registration\n");
+ goto out_reg_dev_err;
+ }
+ dev_set_drvdata(&pctldev->dev, pctldev);
+
+ /* Register all the pins */
+ pr_debug("try to register %d pins on %s...\n",
+ pctldesc->npins, pctldesc->name);
+ ret = pinctrl_register_pins(pctldev, pctldesc->pins, pctldesc->npins);
+ if (ret) {
+ pr_err("error during pin registration\n");
+ pinctrl_free_pindescs(pctldev, pctldesc->pins,
+ pctldesc->npins);
+ goto out_reg_pins_err;
+ }
+
+ pinctrl_init_device_debugfs(pctldev);
+ mutex_lock(&pinctrldev_list_mutex);
+ list_add(&pctldev->node, &pinctrldev_list);
+ mutex_unlock(&pinctrldev_list_mutex);
+ pinmux_hog_maps(pctldev);
+ return pctldev;
+
+out_reg_pins_err:
+ device_del(&pctldev->dev);
+out_reg_dev_err:
+ put_device(&pctldev->dev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(pinctrl_register);
+
+/**
+ * pinctrl_unregister() - unregister pinmux
+ * @pctldev: pin controller to unregister
+ *
+ * Called by pinmux drivers to unregister a pinmux.
+ */
+void pinctrl_unregister(struct pinctrl_dev *pctldev)
+{
+ if (pctldev == NULL)
+ return;
+
+ pinmux_unhog_maps(pctldev);
+ /* TODO: check that no pinmuxes are still active? */
+ mutex_lock(&pinctrldev_list_mutex);
+ list_del(&pctldev->node);
+ mutex_unlock(&pinctrldev_list_mutex);
+ /* Destroy descriptor tree */
+ pinctrl_free_pindescs(pctldev, pctldev->desc->pins,
+ pctldev->desc->npins);
+ device_unregister(&pctldev->dev);
+}
+EXPORT_SYMBOL_GPL(pinctrl_unregister);
+
+static int __init pinctrl_init(void)
+{
+ pr_info("initialized pinctrl subsystem\n");
+ pinctrl_init_debugfs();
+ return 0;
+}
+
+/* init early since many drivers really need to initialized pinmux early */
+core_initcall(pinctrl_init);
#define KMSG_COMPONENT "vmur"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/kernel_stat.h>
#include <linux/cdev.h>
#include <linux/slab.h>
+ #include <linux/module.h>
#include <asm/uaccess.h>
#include <asm/cio.h>
*/
#include <linux/timer.h>
+#include <linux/delay.h>
+ #include <linux/module.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <linux/scatterlist.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
++#include <linux/export.h>
#include "sas_internal.h"
#include <linux/sched.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/card.h>
#include <linux/semaphore.h>
#include <linux/firmware.h>
+ #include <linux/module.h>
#include <asm/unaligned.h>
#include <defs.h>
#include <brcmu_wifi.h>
+ #include <linux/export.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
#include <linux/spi/spi.h>
+ #include <linux/export.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16201.h"
+ #include <linux/export.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
+ #include <linux/export.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16203.h"
+ #include <linux/export.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
+ #include <linux/export.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16204.h"
+ #include <linux/export.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
+ #include <linux/export.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16209.h"
+ #include <linux/export.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
+ #include <linux/export.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16240.h"
#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
-#include <linux/sysfs.h>
#include <linux/slab.h>
+ #include <linux/export.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../ring_sw.h"
#include "../kfifo_buf.h"
-#include "accel.h"
#include "../trigger.h"
+#include "../trigger_consumer.h"
#include "lis3l02dq.h"
/**
+ #include <linux/export.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
+ #include <linux/export.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16260.h"
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/bitops.h>
+ #include <linux/export.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../ring_sw.h"
-#include "../accel/accel.h"
-#include "../trigger.h"
+#include "../trigger_consumer.h"
#include "adis16400.h"
/**
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
+ #include <linux/export.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "adis16400.h"
--- /dev/null
+/* The industrial I/O core
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Handling of buffer allocation / resizing.
+ *
+ *
+ * Things to look at here.
+ * - Better memory allocation techniques?
+ * - Alternative access techniques?
+ */
+#include <linux/kernel.h>
++#include <linux/export.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+
+#include "iio.h"
+#include "iio_core.h"
+#include "sysfs.h"
+#include "buffer_generic.h"
+
+static const char * const iio_endian_prefix[] = {
+ [IIO_BE] = "be",
+ [IIO_LE] = "le",
+};
+
+/**
+ * iio_buffer_read_first_n_outer() - chrdev read for buffer access
+ *
+ * This function relies on all buffer implementations having an
+ * iio_buffer as their first element.
+ **/
+ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
+ size_t n, loff_t *f_ps)
+{
+ struct iio_dev *indio_dev = filp->private_data;
+ struct iio_buffer *rb = indio_dev->buffer;
+
+ if (!rb->access->read_first_n)
+ return -EINVAL;
+ return rb->access->read_first_n(rb, n, buf);
+}
+
+/**
+ * iio_buffer_poll() - poll the buffer to find out if it has data
+ */
+unsigned int iio_buffer_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct iio_dev *indio_dev = filp->private_data;
+ struct iio_buffer *rb = indio_dev->buffer;
+
+ poll_wait(filp, &rb->pollq, wait);
+ if (rb->stufftoread)
+ return POLLIN | POLLRDNORM;
+ /* need a way of knowing if there may be enough data... */
+ return 0;
+}
+
+int iio_chrdev_buffer_open(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *rb = indio_dev->buffer;
+ if (!rb)
+ return -EINVAL;
+ if (rb->access->mark_in_use)
+ rb->access->mark_in_use(rb);
+ return 0;
+}
+
+void iio_chrdev_buffer_release(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *rb = indio_dev->buffer;
+
+ clear_bit(IIO_BUSY_BIT_POS, &rb->flags);
+ if (rb->access->unmark_in_use)
+ rb->access->unmark_in_use(rb);
+}
+
+void iio_buffer_init(struct iio_buffer *buffer, struct iio_dev *indio_dev)
+{
+ buffer->indio_dev = indio_dev;
+ init_waitqueue_head(&buffer->pollq);
+}
+EXPORT_SYMBOL(iio_buffer_init);
+
+static ssize_t iio_show_scan_index(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
+}
+
+static ssize_t iio_show_fixed_type(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ u8 type = this_attr->c->scan_type.endianness;
+
+ if (type == IIO_CPU) {
+#ifdef __LITTLE_ENDIAN
+ type = IIO_LE;
+#else
+ type = IIO_BE;
+#endif
+ }
+ return sprintf(buf, "%s:%c%d/%d>>%u\n",
+ iio_endian_prefix[type],
+ this_attr->c->scan_type.sign,
+ this_attr->c->scan_type.realbits,
+ this_attr->c->scan_type.storagebits,
+ this_attr->c->scan_type.shift);
+}
+
+static ssize_t iio_scan_el_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+
+ ret = iio_scan_mask_query(indio_dev->buffer,
+ to_iio_dev_attr(attr)->address);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", ret);
+}
+
+static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
+{
+ clear_bit(bit, buffer->scan_mask);
+ buffer->scan_count--;
+ return 0;
+}
+
+static ssize_t iio_scan_el_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret = 0;
+ bool state;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ state = !(buf[0] == '0');
+ mutex_lock(&indio_dev->mlock);
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ ret = -EBUSY;
+ goto error_ret;
+ }
+ ret = iio_scan_mask_query(buffer, this_attr->address);
+ if (ret < 0)
+ goto error_ret;
+ if (!state && ret) {
+ ret = iio_scan_mask_clear(buffer, this_attr->address);
+ if (ret)
+ goto error_ret;
+ } else if (state && !ret) {
+ ret = iio_scan_mask_set(buffer, this_attr->address);
+ if (ret)
+ goto error_ret;
+ }
+
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+
+}
+
+static ssize_t iio_scan_el_ts_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
+}
+
+static ssize_t iio_scan_el_ts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret = 0;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ bool state;
+
+ state = !(buf[0] == '0');
+ mutex_lock(&indio_dev->mlock);
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ ret = -EBUSY;
+ goto error_ret;
+ }
+ indio_dev->buffer->scan_timestamp = state;
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ int ret, attrcount = 0;
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ ret = __iio_add_chan_devattr("index",
+ chan,
+ &iio_show_scan_index,
+ NULL,
+ 0,
+ 0,
+ &indio_dev->dev,
+ &buffer->scan_el_dev_attr_list);
+ if (ret)
+ goto error_ret;
+ attrcount++;
+ ret = __iio_add_chan_devattr("type",
+ chan,
+ &iio_show_fixed_type,
+ NULL,
+ 0,
+ 0,
+ &indio_dev->dev,
+ &buffer->scan_el_dev_attr_list);
+ if (ret)
+ goto error_ret;
+ attrcount++;
+ if (chan->type != IIO_TIMESTAMP)
+ ret = __iio_add_chan_devattr("en",
+ chan,
+ &iio_scan_el_show,
+ &iio_scan_el_store,
+ chan->scan_index,
+ 0,
+ &indio_dev->dev,
+ &buffer->scan_el_dev_attr_list);
+ else
+ ret = __iio_add_chan_devattr("en",
+ chan,
+ &iio_scan_el_ts_show,
+ &iio_scan_el_ts_store,
+ chan->scan_index,
+ 0,
+ &indio_dev->dev,
+ &buffer->scan_el_dev_attr_list);
+ attrcount++;
+ ret = attrcount;
+error_ret:
+ return ret;
+}
+
+static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
+ struct iio_dev_attr *p)
+{
+ kfree(p->dev_attr.attr.name);
+ kfree(p);
+}
+
+static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
+{
+ struct iio_dev_attr *p, *n;
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ list_for_each_entry_safe(p, n,
+ &buffer->scan_el_dev_attr_list, l)
+ iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
+}
+
+static const char * const iio_scan_elements_group_name = "scan_elements";
+
+int iio_buffer_register(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *channels,
+ int num_channels)
+{
+ struct iio_dev_attr *p;
+ struct attribute **attr;
+ struct iio_buffer *buffer = indio_dev->buffer;
+ int ret, i, attrn, attrcount, attrcount_orig = 0;
+
+ if (buffer->attrs)
+ indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
+
+ if (buffer->scan_el_attrs != NULL) {
+ attr = buffer->scan_el_attrs->attrs;
+ while (*attr++ != NULL)
+ attrcount_orig++;
+ }
+ attrcount = attrcount_orig;
+ INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
+ if (channels) {
+ /* new magic */
+ for (i = 0; i < num_channels; i++) {
+ /* Establish necessary mask length */
+ if (channels[i].scan_index >
+ (int)indio_dev->masklength - 1)
+ indio_dev->masklength
+ = indio_dev->channels[i].scan_index + 1;
+
+ ret = iio_buffer_add_channel_sysfs(indio_dev,
+ &channels[i]);
+ if (ret < 0)
+ goto error_cleanup_dynamic;
+ attrcount += ret;
+ }
+ if (indio_dev->masklength && buffer->scan_mask == NULL) {
+ buffer->scan_mask
+ = kzalloc(sizeof(*buffer->scan_mask)*
+ BITS_TO_LONGS(indio_dev->masklength),
+ GFP_KERNEL);
+ if (buffer->scan_mask == NULL) {
+ ret = -ENOMEM;
+ goto error_cleanup_dynamic;
+ }
+ }
+ }
+
+ buffer->scan_el_group.name = iio_scan_elements_group_name;
+
+ buffer->scan_el_group.attrs
+ = kzalloc(sizeof(buffer->scan_el_group.attrs[0])*
+ (attrcount + 1),
+ GFP_KERNEL);
+ if (buffer->scan_el_group.attrs == NULL) {
+ ret = -ENOMEM;
+ goto error_free_scan_mask;
+ }
+ if (buffer->scan_el_attrs)
+ memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
+ sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
+ attrn = attrcount_orig;
+
+ list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
+ buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
+ indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
+
+ return 0;
+
+error_free_scan_mask:
+ kfree(buffer->scan_mask);
+error_cleanup_dynamic:
+ __iio_buffer_attr_cleanup(indio_dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(iio_buffer_register);
+
+void iio_buffer_unregister(struct iio_dev *indio_dev)
+{
+ kfree(indio_dev->buffer->scan_mask);
+ kfree(indio_dev->buffer->scan_el_group.attrs);
+ __iio_buffer_attr_cleanup(indio_dev);
+}
+EXPORT_SYMBOL(iio_buffer_unregister);
+
+ssize_t iio_buffer_read_length(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ if (buffer->access->get_length)
+ return sprintf(buf, "%d\n",
+ buffer->access->get_length(buffer));
+
+ return 0;
+}
+EXPORT_SYMBOL(iio_buffer_read_length);
+
+ssize_t iio_buffer_write_length(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret;
+ ulong val;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (buffer->access->get_length)
+ if (val == buffer->access->get_length(buffer))
+ return len;
+
+ if (buffer->access->set_length) {
+ buffer->access->set_length(buffer, val);
+ if (buffer->access->mark_param_change)
+ buffer->access->mark_param_change(buffer);
+ }
+
+ return len;
+}
+EXPORT_SYMBOL(iio_buffer_write_length);
+
+ssize_t iio_buffer_read_bytes_per_datum(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ if (buffer->access->get_bytes_per_datum)
+ return sprintf(buf, "%d\n",
+ buffer->access->get_bytes_per_datum(buffer));
+
+ return 0;
+}
+EXPORT_SYMBOL(iio_buffer_read_bytes_per_datum);
+
+ssize_t iio_buffer_store_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ int ret;
+ bool requested_state, current_state;
+ int previous_mode;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ mutex_lock(&indio_dev->mlock);
+ previous_mode = indio_dev->currentmode;
+ requested_state = !(buf[0] == '0');
+ current_state = !!(previous_mode & INDIO_ALL_BUFFER_MODES);
+ if (current_state == requested_state) {
+ printk(KERN_INFO "iio-buffer, current state requested again\n");
+ goto done;
+ }
+ if (requested_state) {
+ if (buffer->setup_ops->preenable) {
+ ret = buffer->setup_ops->preenable(indio_dev);
+ if (ret) {
+ printk(KERN_ERR
+ "Buffer not started:"
+ "buffer preenable failed\n");
+ goto error_ret;
+ }
+ }
+ if (buffer->access->request_update) {
+ ret = buffer->access->request_update(buffer);
+ if (ret) {
+ printk(KERN_INFO
+ "Buffer not started:"
+ "buffer parameter update failed\n");
+ goto error_ret;
+ }
+ }
+ if (buffer->access->mark_in_use)
+ buffer->access->mark_in_use(buffer);
+ /* Definitely possible for devices to support both of these.*/
+ if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
+ if (!indio_dev->trig) {
+ printk(KERN_INFO
+ "Buffer not started: no trigger\n");
+ ret = -EINVAL;
+ if (buffer->access->unmark_in_use)
+ buffer->access->unmark_in_use(buffer);
+ goto error_ret;
+ }
+ indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
+ } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE)
+ indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
+ else { /* should never be reached */
+ ret = -EINVAL;
+ goto error_ret;
+ }
+
+ if (buffer->setup_ops->postenable) {
+ ret = buffer->setup_ops->postenable(indio_dev);
+ if (ret) {
+ printk(KERN_INFO
+ "Buffer not started:"
+ "postenable failed\n");
+ if (buffer->access->unmark_in_use)
+ buffer->access->unmark_in_use(buffer);
+ indio_dev->currentmode = previous_mode;
+ if (buffer->setup_ops->postdisable)
+ buffer->setup_ops->
+ postdisable(indio_dev);
+ goto error_ret;
+ }
+ }
+ } else {
+ if (buffer->setup_ops->predisable) {
+ ret = buffer->setup_ops->predisable(indio_dev);
+ if (ret)
+ goto error_ret;
+ }
+ if (buffer->access->unmark_in_use)
+ buffer->access->unmark_in_use(buffer);
+ indio_dev->currentmode = INDIO_DIRECT_MODE;
+ if (buffer->setup_ops->postdisable) {
+ ret = buffer->setup_ops->postdisable(indio_dev);
+ if (ret)
+ goto error_ret;
+ }
+ }
+done:
+ mutex_unlock(&indio_dev->mlock);
+ return len;
+
+error_ret:
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+EXPORT_SYMBOL(iio_buffer_store_enable);
+
+ssize_t iio_buffer_show_enable(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", !!(indio_dev->currentmode
+ & INDIO_ALL_BUFFER_MODES));
+}
+EXPORT_SYMBOL(iio_buffer_show_enable);
+
+int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *buffer = indio_dev->buffer;
+ size_t size;
+ dev_dbg(&indio_dev->dev, "%s\n", __func__);
+ /* Check if there are any scan elements enabled, if not fail*/
+ if (!(buffer->scan_count || buffer->scan_timestamp))
+ return -EINVAL;
+ if (buffer->scan_timestamp)
+ if (buffer->scan_count)
+ /* Timestamp (aligned to s64) and data */
+ size = (((buffer->scan_count * buffer->bpe)
+ + sizeof(s64) - 1)
+ & ~(sizeof(s64) - 1))
+ + sizeof(s64);
+ else /* Timestamp only */
+ size = sizeof(s64);
+ else /* Data only */
+ size = buffer->scan_count * buffer->bpe;
+ buffer->access->set_bytes_per_datum(buffer, size);
+
+ return 0;
+}
+EXPORT_SYMBOL(iio_sw_buffer_preenable);
+
+
+/* note NULL used as error indicator as it doesn't make sense. */
+static unsigned long *iio_scan_mask_match(unsigned long *av_masks,
+ unsigned int masklength,
+ unsigned long *mask)
+{
+ if (bitmap_empty(mask, masklength))
+ return NULL;
+ while (*av_masks) {
+ if (bitmap_subset(mask, av_masks, masklength))
+ return av_masks;
+ av_masks += BITS_TO_LONGS(masklength);
+ }
+ return NULL;
+}
+
+/**
+ * iio_scan_mask_set() - set particular bit in the scan mask
+ * @buffer: the buffer whose scan mask we are interested in
+ * @bit: the bit to be set.
+ **/
+int iio_scan_mask_set(struct iio_buffer *buffer, int bit)
+{
+ struct iio_dev *indio_dev = buffer->indio_dev;
+ unsigned long *mask;
+ unsigned long *trialmask;
+
+ trialmask = kmalloc(sizeof(*trialmask)*
+ BITS_TO_LONGS(indio_dev->masklength),
+ GFP_KERNEL);
+
+ if (trialmask == NULL)
+ return -ENOMEM;
+ if (!indio_dev->masklength) {
+ WARN_ON("trying to set scanmask prior to registering buffer\n");
+ kfree(trialmask);
+ return -EINVAL;
+ }
+ bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
+ set_bit(bit, trialmask);
+
+ if (indio_dev->available_scan_masks) {
+ mask = iio_scan_mask_match(indio_dev->available_scan_masks,
+ indio_dev->masklength,
+ trialmask);
+ if (!mask) {
+ kfree(trialmask);
+ return -EINVAL;
+ }
+ }
+ bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
+ buffer->scan_count++;
+
+ kfree(trialmask);
+
+ return 0;
+};
+EXPORT_SYMBOL_GPL(iio_scan_mask_set);
+
+int iio_scan_mask_query(struct iio_buffer *buffer, int bit)
+{
+ struct iio_dev *indio_dev = buffer->indio_dev;
+ long *mask;
+
+ if (bit > indio_dev->masklength)
+ return -EINVAL;
+
+ if (!buffer->scan_mask)
+ return 0;
+ if (indio_dev->available_scan_masks)
+ mask = iio_scan_mask_match(indio_dev->available_scan_masks,
+ indio_dev->masklength,
+ buffer->scan_mask);
+ else
+ mask = buffer->scan_mask;
+ if (!mask)
+ return 0;
+
+ return test_bit(bit, mask);
+};
+EXPORT_SYMBOL_GPL(iio_scan_mask_query);
*
* Licensed under the GPL-2.
*/
+ #include <linux/export.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
*/
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/spi/spi.h>
+ #include <linux/export.h>
#include "../iio.h"
-#include "../sysfs.h"
#include "../trigger.h"
#include "ade7758.h"
--- /dev/null
+/* Driver for Realtek RTS51xx USB card reader
+ *
+ * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * wwang (wei_wang@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ * Maintainer:
+ * Edwin Rong (edwin_rong@realsil.com.cn)
+ * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
+ */
+
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
++#include <linux/export.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_device.h>
+
+#include "debug.h"
+#include "rts51x.h"
+#include "rts51x_chip.h"
+#include "rts51x_scsi.h"
+#include "rts51x_card.h"
+#include "rts51x_transport.h"
+#include "rts51x_sys.h"
+#include "sd_cprm.h"
+#include "ms_mg.h"
+#include "trace.h"
+
+void scsi_show_command(struct scsi_cmnd *srb)
+{
+ char *what = NULL;
+ int i, unknown_cmd = 0;
+
+ switch (srb->cmnd[0]) {
+ case TEST_UNIT_READY:
+ what = (char *)"TEST_UNIT_READY";
+ break;
+ case REZERO_UNIT:
+ what = (char *)"REZERO_UNIT";
+ break;
+ case REQUEST_SENSE:
+ what = (char *)"REQUEST_SENSE";
+ break;
+ case FORMAT_UNIT:
+ what = (char *)"FORMAT_UNIT";
+ break;
+ case READ_BLOCK_LIMITS:
+ what = (char *)"READ_BLOCK_LIMITS";
+ break;
+ case 0x07:
+ what = (char *)"REASSIGN_BLOCKS";
+ break;
+ case READ_6:
+ what = (char *)"READ_6";
+ break;
+ case WRITE_6:
+ what = (char *)"WRITE_6";
+ break;
+ case SEEK_6:
+ what = (char *)"SEEK_6";
+ break;
+ case READ_REVERSE:
+ what = (char *)"READ_REVERSE";
+ break;
+ case WRITE_FILEMARKS:
+ what = (char *)"WRITE_FILEMARKS";
+ break;
+ case SPACE:
+ what = (char *)"SPACE";
+ break;
+ case INQUIRY:
+ what = (char *)"INQUIRY";
+ break;
+ case RECOVER_BUFFERED_DATA:
+ what = (char *)"RECOVER_BUFFERED_DATA";
+ break;
+ case MODE_SELECT:
+ what = (char *)"MODE_SELECT";
+ break;
+ case RESERVE:
+ what = (char *)"RESERVE";
+ break;
+ case RELEASE:
+ what = (char *)"RELEASE";
+ break;
+ case COPY:
+ what = (char *)"COPY";
+ break;
+ case ERASE:
+ what = (char *)"ERASE";
+ break;
+ case MODE_SENSE:
+ what = (char *)"MODE_SENSE";
+ break;
+ case START_STOP:
+ what = (char *)"START_STOP";
+ break;
+ case RECEIVE_DIAGNOSTIC:
+ what = (char *)"RECEIVE_DIAGNOSTIC";
+ break;
+ case SEND_DIAGNOSTIC:
+ what = (char *)"SEND_DIAGNOSTIC";
+ break;
+ case ALLOW_MEDIUM_REMOVAL:
+ what = (char *)"ALLOW_MEDIUM_REMOVAL";
+ break;
+ case SET_WINDOW:
+ what = (char *)"SET_WINDOW";
+ break;
+ case READ_CAPACITY:
+ what = (char *)"READ_CAPACITY";
+ break;
+ case READ_10:
+ what = (char *)"READ_10";
+ break;
+ case WRITE_10:
+ what = (char *)"WRITE_10";
+ break;
+ case SEEK_10:
+ what = (char *)"SEEK_10";
+ break;
+ case WRITE_VERIFY:
+ what = (char *)"WRITE_VERIFY";
+ break;
+ case VERIFY:
+ what = (char *)"VERIFY";
+ break;
+ case SEARCH_HIGH:
+ what = (char *)"SEARCH_HIGH";
+ break;
+ case SEARCH_EQUAL:
+ what = (char *)"SEARCH_EQUAL";
+ break;
+ case SEARCH_LOW:
+ what = (char *)"SEARCH_LOW";
+ break;
+ case SET_LIMITS:
+ what = (char *)"SET_LIMITS";
+ break;
+ case READ_POSITION:
+ what = (char *)"READ_POSITION";
+ break;
+ case SYNCHRONIZE_CACHE:
+ what = (char *)"SYNCHRONIZE_CACHE";
+ break;
+ case LOCK_UNLOCK_CACHE:
+ what = (char *)"LOCK_UNLOCK_CACHE";
+ break;
+ case READ_DEFECT_DATA:
+ what = (char *)"READ_DEFECT_DATA";
+ break;
+ case MEDIUM_SCAN:
+ what = (char *)"MEDIUM_SCAN";
+ break;
+ case COMPARE:
+ what = (char *)"COMPARE";
+ break;
+ case COPY_VERIFY:
+ what = (char *)"COPY_VERIFY";
+ break;
+ case WRITE_BUFFER:
+ what = (char *)"WRITE_BUFFER";
+ break;
+ case READ_BUFFER:
+ what = (char *)"READ_BUFFER";
+ break;
+ case UPDATE_BLOCK:
+ what = (char *)"UPDATE_BLOCK";
+ break;
+ case READ_LONG:
+ what = (char *)"READ_LONG";
+ break;
+ case WRITE_LONG:
+ what = (char *)"WRITE_LONG";
+ break;
+ case CHANGE_DEFINITION:
+ what = (char *)"CHANGE_DEFINITION";
+ break;
+ case WRITE_SAME:
+ what = (char *)"WRITE_SAME";
+ break;
+ case GPCMD_READ_SUBCHANNEL:
+ what = (char *)"READ SUBCHANNEL";
+ break;
+ case READ_TOC:
+ what = (char *)"READ_TOC";
+ break;
+ case GPCMD_READ_HEADER:
+ what = (char *)"READ HEADER";
+ break;
+ case GPCMD_PLAY_AUDIO_10:
+ what = (char *)"PLAY AUDIO (10)";
+ break;
+ case GPCMD_PLAY_AUDIO_MSF:
+ what = (char *)"PLAY AUDIO MSF";
+ break;
+ case GPCMD_GET_EVENT_STATUS_NOTIFICATION:
+ what = (char *)"GET EVENT/STATUS NOTIFICATION";
+ break;
+ case GPCMD_PAUSE_RESUME:
+ what = (char *)"PAUSE/RESUME";
+ break;
+ case LOG_SELECT:
+ what = (char *)"LOG_SELECT";
+ break;
+ case LOG_SENSE:
+ what = (char *)"LOG_SENSE";
+ break;
+ case GPCMD_STOP_PLAY_SCAN:
+ what = (char *)"STOP PLAY/SCAN";
+ break;
+ case GPCMD_READ_DISC_INFO:
+ what = (char *)"READ DISC INFORMATION";
+ break;
+ case GPCMD_READ_TRACK_RZONE_INFO:
+ what = (char *)"READ TRACK INFORMATION";
+ break;
+ case GPCMD_RESERVE_RZONE_TRACK:
+ what = (char *)"RESERVE TRACK";
+ break;
+ case GPCMD_SEND_OPC:
+ what = (char *)"SEND OPC";
+ break;
+ case MODE_SELECT_10:
+ what = (char *)"MODE_SELECT_10";
+ break;
+ case GPCMD_REPAIR_RZONE_TRACK:
+ what = (char *)"REPAIR TRACK";
+ break;
+ case 0x59:
+ what = (char *)"READ MASTER CUE";
+ break;
+ case MODE_SENSE_10:
+ what = (char *)"MODE_SENSE_10";
+ break;
+ case GPCMD_CLOSE_TRACK:
+ what = (char *)"CLOSE TRACK/SESSION";
+ break;
+ case 0x5C:
+ what = (char *)"READ BUFFER CAPACITY";
+ break;
+ case 0x5D:
+ what = (char *)"SEND CUE SHEET";
+ break;
+ case GPCMD_BLANK:
+ what = (char *)"BLANK";
+ break;
+ case REPORT_LUNS:
+ what = (char *)"REPORT LUNS";
+ break;
+ case MOVE_MEDIUM:
+ what = (char *)"MOVE_MEDIUM or PLAY AUDIO (12)";
+ break;
+ case READ_12:
+ what = (char *)"READ_12";
+ break;
+ case WRITE_12:
+ what = (char *)"WRITE_12";
+ break;
+ case WRITE_VERIFY_12:
+ what = (char *)"WRITE_VERIFY_12";
+ break;
+ case SEARCH_HIGH_12:
+ what = (char *)"SEARCH_HIGH_12";
+ break;
+ case SEARCH_EQUAL_12:
+ what = (char *)"SEARCH_EQUAL_12";
+ break;
+ case SEARCH_LOW_12:
+ what = (char *)"SEARCH_LOW_12";
+ break;
+ case SEND_VOLUME_TAG:
+ what = (char *)"SEND_VOLUME_TAG";
+ break;
+ case READ_ELEMENT_STATUS:
+ what = (char *)"READ_ELEMENT_STATUS";
+ break;
+ case GPCMD_READ_CD_MSF:
+ what = (char *)"READ CD MSF";
+ break;
+ case GPCMD_SCAN:
+ what = (char *)"SCAN";
+ break;
+ case GPCMD_SET_SPEED:
+ what = (char *)"SET CD SPEED";
+ break;
+ case GPCMD_MECHANISM_STATUS:
+ what = (char *)"MECHANISM STATUS";
+ break;
+ case GPCMD_READ_CD:
+ what = (char *)"READ CD";
+ break;
+ case 0xE1:
+ what = (char *)"WRITE CONTINUE";
+ break;
+ case WRITE_LONG_2:
+ what = (char *)"WRITE_LONG_2";
+ break;
+ case VENDOR_CMND:
+ what = (char *)"Realtek's vendor command";
+ break;
+ default:
+ what = (char *)"(unknown command)";
+ unknown_cmd = 1;
+ break;
+ }
+
+ if (srb->cmnd[0] != TEST_UNIT_READY)
+ RTS51X_DEBUGP("Command %s (%d bytes)\n", what, srb->cmd_len);
+ if (unknown_cmd) {
+ RTS51X_DEBUGP("");
+ for (i = 0; i < srb->cmd_len && i < 16; i++)
+ RTS51X_DEBUGPN(" %02x", srb->cmnd[i]);
+ RTS51X_DEBUGPN("\n");
+ }
+}
+
+void set_sense_type(struct rts51x_chip *chip, unsigned int lun, int sense_type)
+{
+ switch (sense_type) {
+ case SENSE_TYPE_MEDIA_CHANGE:
+ set_sense_data(chip, lun, CUR_ERR, 0x06, 0, 0x28, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_NOT_PRESENT:
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x3A, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_LBA_OVER_RANGE:
+ set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x21, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT:
+ set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x25, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_WRITE_PROTECT:
+ set_sense_data(chip, lun, CUR_ERR, 0x07, 0, 0x27, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR:
+ set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x11, 0, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_WRITE_ERR:
+ set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x0C, 0x02, 0, 0);
+ break;
+
+ case SENSE_TYPE_MEDIA_INVALID_CMD_FIELD:
+ set_sense_data(chip, lun, CUR_ERR, ILGAL_REQ, 0,
+ ASC_INVLD_CDB, ASCQ_INVLD_CDB, CDB_ILLEGAL, 1);
+ break;
+
+ case SENSE_TYPE_FORMAT_IN_PROGRESS:
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04, 0, 0);
+ break;
+
+ case SENSE_TYPE_FORMAT_CMD_FAILED:
+ set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x31, 0x01, 0, 0);
+ break;
+
+#ifdef SUPPORT_MAGIC_GATE
+ case SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB:
+ set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x6F, 0x02, 0, 0);
+ break;
+
+ case SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN:
+ set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x6F, 0x00, 0, 0);
+ break;
+
+ case SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM:
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x30, 0x00, 0, 0);
+ break;
+
+ case SENSE_TYPE_MG_WRITE_ERR:
+ set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x0C, 0x00, 0, 0);
+ break;
+#endif
+
+#ifdef SUPPORT_SD_LOCK
+ case SENSE_TYPE_MEDIA_READ_FORBIDDEN:
+ set_sense_data(chip, lun, CUR_ERR, 0x07, 0, 0x11, 0x13, 0, 0);
+ break;
+#endif
+
+ case SENSE_TYPE_NO_SENSE:
+ default:
+ set_sense_data(chip, lun, CUR_ERR, 0, 0, 0, 0, 0, 0);
+ break;
+ }
+}
+
+void set_sense_data(struct rts51x_chip *chip, unsigned int lun, u8 err_code,
+ u8 sense_key, u32 info, u8 asc, u8 ascq, u8 sns_key_info0,
+ u16 sns_key_info1)
+{
+ struct sense_data_t *sense = &(chip->sense_buffer[lun]);
+
+ sense->err_code = err_code;
+ sense->sense_key = sense_key;
+ sense->info[0] = (u8) (info >> 24);
+ sense->info[1] = (u8) (info >> 16);
+ sense->info[2] = (u8) (info >> 8);
+ sense->info[3] = (u8) info;
+
+ sense->ad_sense_len = sizeof(struct sense_data_t) - 8;
+ sense->asc = asc;
+ sense->ascq = ascq;
+ if (sns_key_info0 != 0) {
+ sense->sns_key_info[0] = SKSV | sns_key_info0;
+ sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 8;
+ sense->sns_key_info[2] = sns_key_info1 & 0x0f;
+ }
+}
+
+static int test_unit_ready(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+
+ rts51x_init_cards(chip);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ return TRANSPORT_FAILED;
+ }
+
+ if (!check_lun_mc(chip, lun)) {
+ set_lun_mc(chip, lun);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ return TRANSPORT_FAILED;
+ }
+#ifdef SUPPORT_SD_LOCK
+ if (get_lun_card(chip, SCSI_LUN(srb)) == SD_CARD) {
+ struct sd_info *sd_card = &(chip->sd_card);
+ if (sd_card->sd_lock_notify) {
+ sd_card->sd_lock_notify = 0;
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ return TRANSPORT_FAILED;
+ } else if (sd_card->sd_lock_status & SD_LOCKED) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+ return TRANSPORT_FAILED;
+ }
+ }
+#endif
+
+ return TRANSPORT_GOOD;
+}
+
+unsigned char formatter_inquiry_str[20] = {
+ 'M', 'E', 'M', 'O', 'R', 'Y', 'S', 'T', 'I', 'C', 'K',
+ '-', 'M', 'G', /* Byte[47:49] */
+ 0x0B, /* Byte[50]: MG, MS, MSPro, MSXC */
+ 0x00, /* Byte[51]: Category Specific Commands */
+ 0x00, /* Byte[52]: Access Control and feature */
+ 0x20, 0x20, 0x20, /* Byte[53:55] */
+};
+
+static int inquiry(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ char *inquiry_default = (char *)"Generic-xD/SD/M.S. 1.00 ";
+ char *inquiry_string;
+ unsigned char sendbytes;
+ unsigned char *buf;
+ u8 card = get_lun_card(chip, lun);
+ int pro_formatter_flag = 0;
+ unsigned char inquiry_buf[] = {
+ QULIFIRE | DRCT_ACCESS_DEV,
+ RMB_DISC | 0x0D,
+ 0x00,
+ 0x01,
+ 0x1f,
+ 0x02,
+ 0,
+ REL_ADR | WBUS_32 | WBUS_16 | SYNC | LINKED | CMD_QUE | SFT_RE,
+ };
+
+ inquiry_string = inquiry_default;
+
+ buf = vmalloc(scsi_bufflen(srb));
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ if (MS_FORMATTER_ENABLED(chip) && (get_lun2card(chip, lun) & MS_CARD)) {
+ if (!card || (card == MS_CARD))
+ pro_formatter_flag = 1;
+ }
+
+ if (pro_formatter_flag) {
+ if (scsi_bufflen(srb) < 56)
+ sendbytes = (unsigned char)(scsi_bufflen(srb));
+ else
+ sendbytes = 56;
+ } else {
+ if (scsi_bufflen(srb) < 36)
+ sendbytes = (unsigned char)(scsi_bufflen(srb));
+ else
+ sendbytes = 36;
+ }
+
+ if (sendbytes > 8) {
+ memcpy(buf, inquiry_buf, 8);
+ memcpy(buf + 8, inquiry_string, sendbytes - 8);
+ if (pro_formatter_flag)
+ buf[4] = 0x33; /* Additional Length */
+ } else {
+ memcpy(buf, inquiry_buf, sendbytes);
+ }
+
+ if (pro_formatter_flag) {
+ if (sendbytes > 36)
+ memcpy(buf + 36, formatter_inquiry_str, sendbytes - 36);
+ }
+
+ scsi_set_resid(srb, 0);
+
+ rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int start_stop_unit(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+
+ scsi_set_resid(srb, scsi_bufflen(srb));
+
+ if (srb->cmnd[1] == 1)
+ return TRANSPORT_GOOD;
+
+ switch (srb->cmnd[0x4]) {
+ case STOP_MEDIUM:
+ /* Media disabled */
+ return TRANSPORT_GOOD;
+
+ case UNLOAD_MEDIUM:
+ /* Media shall be unload */
+ if (check_card_ready(chip, lun))
+ eject_card(chip, lun);
+ return TRANSPORT_GOOD;
+
+ case MAKE_MEDIUM_READY:
+ case LOAD_MEDIUM:
+ if (check_card_ready(chip, lun)) {
+ return TRANSPORT_GOOD;
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ break;
+ }
+
+ TRACE_RET(chip, TRANSPORT_ERROR);
+}
+
+static int allow_medium_removal(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int prevent;
+
+ prevent = srb->cmnd[4] & 0x1;
+
+ scsi_set_resid(srb, 0);
+
+ if (prevent) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static void ms_mode_sense(struct rts51x_chip *chip, u8 cmd,
+ int lun, u8 *buf, int buf_len)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ int sys_info_offset;
+ int data_size = buf_len;
+ int support_format = 0;
+ int i = 0;
+
+ if (cmd == MODE_SENSE) {
+ sys_info_offset = 8;
+ if (data_size > 0x68)
+ data_size = 0x68;
+ buf[i++] = 0x67; /* Mode Data Length */
+ } else {
+ sys_info_offset = 12;
+ if (data_size > 0x6C)
+ data_size = 0x6C;
+ buf[i++] = 0x00; /* Mode Data Length (MSB) */
+ buf[i++] = 0x6A; /* Mode Data Length (LSB) */
+ }
+
+ /* Medium Type Code */
+ if (check_card_ready(chip, lun)) {
+ if (CHK_MSXC(ms_card)) {
+ support_format = 1;
+ buf[i++] = 0x40;
+ } else if (CHK_MSPRO(ms_card)) {
+ support_format = 1;
+ buf[i++] = 0x20;
+ } else {
+ buf[i++] = 0x10;
+ }
+
+ /* WP */
+ if (check_card_wp(chip, lun))
+ buf[i++] = 0x80;
+ else
+ buf[i++] = 0x00;
+ } else {
+ buf[i++] = 0x00; /* MediaType */
+ buf[i++] = 0x00; /* WP */
+ }
+
+ buf[i++] = 0x00; /* Reserved */
+
+ if (cmd == MODE_SENSE_10) {
+ buf[i++] = 0x00; /* Reserved */
+ buf[i++] = 0x00; /* Block descriptor length(MSB) */
+ buf[i++] = 0x00; /* Block descriptor length(LSB) */
+
+ /* The Following Data is the content of "Page 0x20" */
+ if (data_size >= 9)
+ buf[i++] = 0x20; /* Page Code */
+ if (data_size >= 10)
+ buf[i++] = 0x62; /* Page Length */
+ if (data_size >= 11)
+ buf[i++] = 0x00; /* No Access Control */
+ if (data_size >= 12) {
+ if (support_format)
+ buf[i++] = 0xC0; /* SF, SGM */
+ else
+ buf[i++] = 0x00;
+ }
+ } else {
+ /* The Following Data is the content of "Page 0x20" */
+ if (data_size >= 5)
+ buf[i++] = 0x20; /* Page Code */
+ if (data_size >= 6)
+ buf[i++] = 0x62; /* Page Length */
+ if (data_size >= 7)
+ buf[i++] = 0x00; /* No Access Control */
+ if (data_size >= 8) {
+ if (support_format)
+ buf[i++] = 0xC0; /* SF, SGM */
+ else
+ buf[i++] = 0x00;
+ }
+ }
+
+ if (data_size > sys_info_offset) {
+ /* 96 Bytes Attribute Data */
+ int len = data_size - sys_info_offset;
+ len = (len < 96) ? len : 96;
+
+ memcpy(buf + sys_info_offset, ms_card->raw_sys_info, len);
+ }
+}
+
+static int mode_sense(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ unsigned int dataSize;
+ int status;
+ int pro_formatter_flag;
+ unsigned char pageCode, *buf;
+ u8 card = get_lun_card(chip, lun);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ scsi_set_resid(srb, scsi_bufflen(srb));
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ pro_formatter_flag = 0;
+ dataSize = 8;
+ /* In Combo mode, device responses ModeSense command as a MS LUN
+ * when no card is inserted */
+ if ((get_lun2card(chip, lun) & MS_CARD)) {
+ if (!card || (card == MS_CARD)) {
+ dataSize = 108;
+ if (chip->option.mspro_formatter_enable)
+ pro_formatter_flag = 1;
+ }
+ }
+
+ buf = kmalloc(dataSize, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ pageCode = srb->cmnd[2] & 0x3f;
+
+ if ((pageCode == 0x3F) || (pageCode == 0x1C) ||
+ (pageCode == 0x00) || (pro_formatter_flag && (pageCode == 0x20))) {
+ if (srb->cmnd[0] == MODE_SENSE) {
+ if ((pageCode == 0x3F) || (pageCode == 0x20)) {
+ ms_mode_sense(chip, srb->cmnd[0], lun, buf,
+ dataSize);
+ } else {
+ dataSize = 4;
+ buf[0] = 0x03;
+ buf[1] = 0x00;
+ if (check_card_wp(chip, lun))
+ buf[2] = 0x80;
+ else
+ buf[3] = 0x00;
+ }
+ } else {
+ if ((pageCode == 0x3F) || (pageCode == 0x20)) {
+ ms_mode_sense(chip, srb->cmnd[0], lun, buf,
+ dataSize);
+ } else {
+ dataSize = 8;
+ buf[0] = 0x00;
+ buf[1] = 0x06;
+ buf[2] = 0x00;
+ if (check_card_wp(chip, lun))
+ buf[3] = 0x80;
+ else
+ buf[3] = 0x00;
+ buf[4] = 0x00;
+ buf[5] = 0x00;
+ buf[6] = 0x00;
+ buf[7] = 0x00;
+ }
+ }
+ status = TRANSPORT_GOOD;
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ scsi_set_resid(srb, scsi_bufflen(srb));
+ status = TRANSPORT_FAILED;
+ }
+
+ if (status == TRANSPORT_GOOD) {
+ unsigned int len = min(scsi_bufflen(srb), dataSize);
+ rts51x_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+ }
+ kfree(buf);
+
+ return status;
+}
+
+static int request_sense(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct sense_data_t *sense;
+ unsigned int lun = SCSI_LUN(srb);
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned char *tmp, *buf;
+
+ sense = &(chip->sense_buffer[lun]);
+
+ if ((get_lun_card(chip, lun) == MS_CARD)
+ && PRO_UNDER_FORMATTING(ms_card)) {
+ mspro_format_sense(chip, lun);
+ }
+
+ buf = vmalloc(scsi_bufflen(srb));
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ tmp = (unsigned char *)sense;
+ memcpy(buf, tmp, scsi_bufflen(srb));
+
+ rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ vfree(buf);
+
+ scsi_set_resid(srb, 0);
+ /* Reset Sense Data */
+ set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
+ return TRANSPORT_GOOD;
+}
+
+static int read_write(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+#ifdef SUPPORT_SD_LOCK
+ struct sd_info *sd_card = &(chip->sd_card);
+#endif
+ unsigned int lun = SCSI_LUN(srb);
+ int retval;
+ u32 start_sec;
+ u16 sec_cnt;
+
+ if (!check_card_ready(chip, lun) || (chip->capacity[lun] == 0)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!check_lun_mc(chip, lun)) {
+ set_lun_mc(chip, lun);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ return TRANSPORT_FAILED;
+ }
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_erase_status) {
+ /* Accessing to any card is forbidden
+ * until the erase procedure of SD is completed */
+ RTS51X_DEBUGP("SD card being erased!\n");
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (get_lun_card(chip, lun) == SD_CARD) {
+ if (sd_card->sd_lock_status & SD_LOCKED) {
+ RTS51X_DEBUGP("SD card locked!\n");
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+#endif
+
+ if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10)) {
+ start_sec =
+ ((u32) srb->cmnd[2] << 24) |
+ ((u32) srb->cmnd[3] << 16) |
+ ((u32) srb->cmnd[4] << 8) |
+ ((u32) srb->cmnd[5]);
+ sec_cnt = ((u16) (srb->cmnd[7]) << 8) | srb->cmnd[8];
+ } else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) {
+ start_sec = ((u32) (srb->cmnd[1] & 0x1F) << 16) |
+ ((u32) srb->cmnd[2] << 8) | ((u32) srb->cmnd[3]);
+ sec_cnt = srb->cmnd[4];
+ } else if ((srb->cmnd[0] == VENDOR_CMND) &&
+ (srb->cmnd[1] == SCSI_APP_CMD) &&
+ ((srb->cmnd[2] == PP_READ10) ||
+ (srb->cmnd[2] == PP_WRITE10))) {
+ start_sec = ((u32) srb->cmnd[4] << 24) |
+ ((u32) srb->cmnd[5] << 16) |
+ ((u32) srb->cmnd[6] << 8) |
+ ((u32) srb->cmnd[7]);
+ sec_cnt = ((u16) (srb->cmnd[9]) << 8) | srb->cmnd[10];
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if ((start_sec > chip->capacity[lun]) ||
+ ((start_sec + sec_cnt) > chip->capacity[lun])) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LBA_OVER_RANGE);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (sec_cnt == 0) {
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+ }
+
+ if ((srb->sc_data_direction == DMA_TO_DEVICE)
+ && check_card_wp(chip, lun)) {
+ RTS51X_DEBUGP("Write protected card!\n");
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ retval = card_rw(srb, chip, start_sec, sec_cnt);
+ if (retval != STATUS_SUCCESS) {
+#if 0
+ if (chip->need_release & chip->lun2card[lun]) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ } else {
+#endif
+ if (srb->sc_data_direction == DMA_FROM_DEVICE) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+ }
+#if 0
+ }
+#endif
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_format_capacity(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned char *buf;
+ unsigned int lun = SCSI_LUN(srb);
+ unsigned int buf_len;
+ u8 card = get_lun_card(chip, lun);
+ int desc_cnt;
+ int i = 0;
+
+ if (!check_card_ready(chip, lun)) {
+ if (!chip->option.mspro_formatter_enable) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ buf_len = (scsi_bufflen(srb) > 12) ? 0x14 : 12;
+
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ buf[i++] = 0;
+ buf[i++] = 0;
+ buf[i++] = 0;
+
+ /* Capacity List Length */
+ if ((buf_len > 12) && chip->option.mspro_formatter_enable &&
+ (chip->lun2card[lun] & MS_CARD) && (!card || (card == MS_CARD))) {
+ buf[i++] = 0x10;
+ desc_cnt = 2;
+ } else {
+ buf[i++] = 0x08;
+ desc_cnt = 1;
+ }
+
+ while (desc_cnt) {
+ if (check_card_ready(chip, lun)) {
+ buf[i++] = (unsigned char)((chip->capacity[lun]) >> 24);
+ buf[i++] = (unsigned char)((chip->capacity[lun]) >> 16);
+ buf[i++] = (unsigned char)((chip->capacity[lun]) >> 8);
+ buf[i++] = (unsigned char)(chip->capacity[lun]);
+
+ if (desc_cnt == 2)
+ /* Byte[8]: Descriptor Type: Formatted medium */
+ buf[i++] = 2;
+ else
+ buf[i++] = 0; /* Byte[16] */
+ } else {
+ buf[i++] = 0xFF;
+ buf[i++] = 0xFF;
+ buf[i++] = 0xFF;
+ buf[i++] = 0xFF;
+
+ if (desc_cnt == 2)
+ /* Byte[8]: Descriptor Type: No medium */
+ buf[i++] = 3;
+ else
+ buf[i++] = 0; /*Byte[16] */
+ }
+
+ buf[i++] = 0x00;
+ buf[i++] = 0x02;
+ buf[i++] = 0x00;
+
+ desc_cnt--;
+ }
+
+ buf_len = min(scsi_bufflen(srb), buf_len);
+ rts51x_set_xfer_buf(buf, buf_len, srb);
+ kfree(buf);
+
+ scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_capacity(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned char *buf;
+ unsigned int lun = SCSI_LUN(srb);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!check_lun_mc(chip, lun)) {
+ set_lun_mc(chip, lun);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
+ return TRANSPORT_FAILED;
+ }
+
+ buf = kmalloc(8, GFP_KERNEL);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ buf[0] = (unsigned char)((chip->capacity[lun] - 1) >> 24);
+ buf[1] = (unsigned char)((chip->capacity[lun] - 1) >> 16);
+ buf[2] = (unsigned char)((chip->capacity[lun] - 1) >> 8);
+ buf[3] = (unsigned char)(chip->capacity[lun] - 1);
+
+ buf[4] = 0x00;
+ buf[5] = 0x00;
+ buf[6] = 0x02;
+ buf[7] = 0x00;
+
+ rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ kfree(buf);
+
+ scsi_set_resid(srb, 0);
+
+ return TRANSPORT_GOOD;
+}
+
+static int get_dev_status(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ unsigned int buf_len;
+ u8 status[32] = { 0 };
+
+ rts51x_pp_status(chip, lun, status, 32);
+
+ buf_len = min(scsi_bufflen(srb), (unsigned int)sizeof(status));
+ rts51x_set_xfer_buf(status, buf_len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_status(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ u8 rts51x_status[16];
+ unsigned int buf_len;
+ unsigned int lun = SCSI_LUN(srb);
+
+ rts51x_read_status(chip, lun, rts51x_status, 16);
+
+ buf_len = min(scsi_bufflen(srb), (unsigned int)sizeof(rts51x_status));
+ rts51x_set_xfer_buf(rts51x_status, buf_len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_mem(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ unsigned short addr, len, i;
+ int retval;
+ u8 *buf;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ addr = ((u16) srb->cmnd[2] << 8) | srb->cmnd[3];
+ len = ((u16) srb->cmnd[4] << 8) | srb->cmnd[5];
+
+ if (addr < 0xe000) {
+ RTS51X_DEBUGP("filter!addr=0x%x\n", addr);
+ return TRANSPORT_GOOD;
+ }
+
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ for (i = 0; i < len; i++) {
+ retval = rts51x_ep0_read_register(chip, addr + i, buf + i);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len);
+ rts51x_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ vfree(buf);
+
+ return TRANSPORT_GOOD;
+}
+
+static int write_mem(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ unsigned short addr, len, i;
+ int retval;
+ u8 *buf;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ addr = ((u16) srb->cmnd[2] << 8) | srb->cmnd[3];
+ len = ((u16) srb->cmnd[4] << 8) | srb->cmnd[5];
+
+ if (addr < 0xe000) {
+ RTS51X_DEBUGP("filter!addr=0x%x\n", addr);
+ return TRANSPORT_GOOD;
+ }
+
+ len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len);
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rts51x_get_xfer_buf(buf, len, srb);
+
+ for (i = 0; i < len; i++) {
+ retval =
+ rts51x_ep0_write_register(chip, addr + i, 0xFF, buf[i]);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ vfree(buf);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ return TRANSPORT_GOOD;
+}
+
+static int get_sd_csd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct sd_info *sd_card = &(chip->sd_card);
+ unsigned int lun = SCSI_LUN(srb);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (get_lun_card(chip, lun) != SD_CARD) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ rts51x_set_xfer_buf(sd_card->raw_csd, scsi_bufflen(srb), srb);
+
+ return TRANSPORT_GOOD;
+}
+
+static int read_phy_register(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int retval;
+ u8 addr, len, i;
+ u8 *buf;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ addr = srb->cmnd[5];
+ len = srb->cmnd[7];
+
+ if (len) {
+ buf = vmalloc(len);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ for (i = 0; i < len; i++) {
+ retval =
+ rts51x_read_phy_register(chip, addr + i, buf + i);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ len = min(scsi_bufflen(srb), (unsigned int)len);
+ rts51x_set_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ vfree(buf);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int write_phy_register(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int retval;
+ u8 addr, len, i;
+ u8 *buf;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ addr = srb->cmnd[5];
+ len = srb->cmnd[7];
+
+ if (len) {
+ len = min(scsi_bufflen(srb), (unsigned int)len);
+
+ buf = vmalloc(len);
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rts51x_get_xfer_buf(buf, len, srb);
+ scsi_set_resid(srb, scsi_bufflen(srb) - len);
+
+ for (i = 0; i < len; i++) {
+ retval =
+ rts51x_write_phy_register(chip, addr + i, buf[i]);
+ if (retval != STATUS_SUCCESS) {
+ vfree(buf);
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ vfree(buf);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int get_card_bus_width(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ u8 card, bus_width;
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ card = get_lun_card(chip, lun);
+ if ((card == SD_CARD) || (card == MS_CARD)) {
+ bus_width = chip->card_bus_width[lun];
+ } else {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ rts51x_set_xfer_buf(&bus_width, scsi_bufflen(srb), srb);
+
+ return TRANSPORT_GOOD;
+}
+
+#ifdef _MSG_TRACE
+static int trace_msg_cmd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned char *buf = NULL;
+ u8 clear;
+ unsigned int buf_len;
+
+ buf_len =
+ 4 +
+ ((2 + MSG_FUNC_LEN + MSG_FILE_LEN + TIME_VAL_LEN) * TRACE_ITEM_CNT);
+
+ if ((scsi_bufflen(srb) < buf_len) || (scsi_sglist(srb) == NULL)) {
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ clear = srb->cmnd[2];
+
+ buf = vmalloc(scsi_bufflen(srb));
+ if (buf == NULL)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ rts51x_trace_msg(chip, buf, clear);
+
+ rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb);
+ vfree(buf);
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+#endif
+
+static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int retval = STATUS_SUCCESS;
+ unsigned int lun = SCSI_LUN(srb);
+ u8 cmd_type, mask, value, idx, mode, len;
+ u16 addr;
+ u32 timeout;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ switch (srb->cmnd[3]) {
+ case INIT_BATCHCMD:
+ rts51x_init_cmd(chip);
+ break;
+
+ case ADD_BATCHCMD:
+ cmd_type = srb->cmnd[4];
+ if (cmd_type > 2) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ addr = (srb->cmnd[5] << 8) | srb->cmnd[6];
+ mask = srb->cmnd[7];
+ value = srb->cmnd[8];
+ rts51x_add_cmd(chip, cmd_type, addr, mask, value);
+ break;
+
+ case SEND_BATCHCMD:
+ mode = srb->cmnd[4];
+ len = srb->cmnd[5];
+ timeout =
+ ((u32) srb->cmnd[6] << 24) | ((u32) srb->
+ cmnd[7] << 16) | ((u32) srb->
+ cmnd[8] <<
+ 8) | ((u32)
+ srb->
+ cmnd
+ [9]);
+ retval = rts51x_send_cmd(chip, mode, 1000);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if (mode & STAGE_R) {
+ retval = rts51x_get_rsp(chip, len, timeout);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+ break;
+
+ case GET_BATCHRSP:
+ idx = srb->cmnd[4];
+ value = chip->rsp_buf[idx];
+ if (scsi_bufflen(srb) < 1) {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ rts51x_set_xfer_buf(&value, 1, srb);
+ scsi_set_resid(srb, 0);
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return TRANSPORT_GOOD;
+}
+
+static int suit_cmd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int result;
+
+ switch (srb->cmnd[3]) {
+ case INIT_BATCHCMD:
+ case ADD_BATCHCMD:
+ case SEND_BATCHCMD:
+ case GET_BATCHRSP:
+ result = rw_mem_cmd_buf(srb, chip);
+ break;
+ default:
+ result = TRANSPORT_ERROR;
+ }
+
+ return result;
+}
+
+static int app_cmd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int result;
+
+ switch (srb->cmnd[2]) {
+ case PP_READ10:
+ case PP_WRITE10:
+ result = read_write(srb, chip);
+ break;
+
+ case SUIT_CMD:
+ result = suit_cmd(srb, chip);
+ break;
+
+ case READ_PHY:
+ result = read_phy_register(srb, chip);
+ break;
+
+ case WRITE_PHY:
+ result = write_phy_register(srb, chip);
+ break;
+
+ case GET_DEV_STATUS:
+ result = get_dev_status(srb, chip);
+ break;
+
+ default:
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return result;
+}
+
+static int vendor_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int result = TRANSPORT_GOOD;
+
+ switch (srb->cmnd[1]) {
+ case READ_STATUS:
+ result = read_status(srb, chip);
+ break;
+
+ case READ_MEM:
+ result = read_mem(srb, chip);
+ break;
+
+ case WRITE_MEM:
+ result = write_mem(srb, chip);
+ break;
+
+ case GET_BUS_WIDTH:
+ result = get_card_bus_width(srb, chip);
+ break;
+
+ case GET_SD_CSD:
+ result = get_sd_csd(srb, chip);
+ break;
+
+#ifdef _MSG_TRACE
+ case TRACE_MSG:
+ result = trace_msg_cmd(srb, chip);
+ break;
+#endif
+
+ case SCSI_APP_CMD:
+ result = app_cmd(srb, chip);
+ break;
+
+ default:
+ set_sense_type(chip, SCSI_LUN(srb),
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return result;
+}
+
+static int ms_format_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval, quick_format;
+
+ if (get_lun_card(chip, lun) != MS_CARD) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if ((srb->cmnd[3] != 0x4D) || (srb->cmnd[4] != 0x47)
+ || (srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D)
+ || (srb->cmnd[7] != 0x74)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (srb->cmnd[8] & 0x01)
+ quick_format = 0;
+ else
+ quick_format = 1;
+
+ if (!(chip->card_ready & MS_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (chip->card_wp & MS_CARD) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!CHK_MSPRO(ms_card)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ retval = mspro_format(srb, chip, MS_SHORT_DATA_LEN, quick_format);
+ if (retval != STATUS_SUCCESS) {
+ set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+
+#ifdef SUPPORT_PCGL_1P18
+int get_ms_information(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ u8 dev_info_id, data_len;
+ u8 *buf;
+ unsigned int buf_len;
+ int i;
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if ((get_lun_card(chip, lun) != MS_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if ((srb->cmnd[2] != 0xB0) || (srb->cmnd[4] != 0x4D) ||
+ (srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) ||
+ (srb->cmnd[7] != 0x44)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ dev_info_id = srb->cmnd[3];
+ if ((CHK_MSXC(ms_card) && (dev_info_id == 0x10)) ||
+ (!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) ||
+ !CHK_MSPRO(ms_card)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (dev_info_id == 0x15)
+ buf_len = data_len = 0x3A;
+ else
+ buf_len = data_len = 0x6A;
+
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ TRACE_RET(chip, TRANSPORT_ERROR);
+
+ i = 0;
+ /* GET Memory Stick Media Information Response Header */
+ buf[i++] = 0x00; /* Data length MSB */
+ buf[i++] = data_len; /* Data length LSB */
+ /* Device Information Type Code */
+ if (CHK_MSXC(ms_card))
+ buf[i++] = 0x03;
+ else
+ buf[i++] = 0x02;
+ /* SGM bit */
+ buf[i++] = 0x01;
+ /* Reserved */
+ buf[i++] = 0x00;
+ buf[i++] = 0x00;
+ buf[i++] = 0x00;
+ /* Number of Device Information */
+ buf[i++] = 0x01;
+
+ /* Device Information Body
+ * Device Information ID Number */
+ buf[i++] = dev_info_id;
+ /* Device Information Length */
+ if (dev_info_id == 0x15)
+ data_len = 0x31;
+ else
+ data_len = 0x61;
+ buf[i++] = 0x00; /* Data length MSB */
+ buf[i++] = data_len; /* Data length LSB */
+ /* Valid Bit */
+ buf[i++] = 0x80;
+ if ((dev_info_id == 0x10) || (dev_info_id == 0x13)) {
+ /* System Information */
+ memcpy(buf + i, ms_card->raw_sys_info, 96);
+ } else {
+ /* Model Name */
+ memcpy(buf + i, ms_card->raw_model_name, 48);
+ }
+
+ rts51x_set_xfer_buf(buf, buf_len, srb);
+
+ if (dev_info_id == 0x15)
+ scsi_set_resid(srb, scsi_bufflen(srb) - 0x3C);
+ else
+ scsi_set_resid(srb, scsi_bufflen(srb) - 0x6C);
+
+ kfree(buf);
+ return STATUS_SUCCESS;
+}
+#endif
+
+static int ms_sp_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ int retval = TRANSPORT_ERROR;
+
+ if (srb->cmnd[2] == MS_FORMAT)
+ retval = ms_format_cmnd(srb, chip);
+#ifdef SUPPORT_PCGL_1P18
+ else if (srb->cmnd[2] == GET_MS_INFORMATION)
+ retval = get_ms_information(srb, chip);
+#endif
+
+ return retval;
+}
+
+#ifdef SUPPORT_CPRM
+static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ unsigned int lun = SCSI_LUN(srb);
+ int result;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ sd_cleanup_work(chip);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if ((get_lun_card(chip, lun) != SD_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ switch (srb->cmnd[0]) {
+ case SD_PASS_THRU_MODE:
+ result = sd_pass_thru_mode(srb, chip);
+ break;
+
+ case SD_EXECUTE_NO_DATA:
+ result = sd_execute_no_data(srb, chip);
+ break;
+
+ case SD_EXECUTE_READ:
+ result = sd_execute_read_data(srb, chip);
+ break;
+
+ case SD_EXECUTE_WRITE:
+ result = sd_execute_write_data(srb, chip);
+ break;
+
+ case SD_GET_RSP:
+ result = sd_get_cmd_rsp(srb, chip);
+ break;
+
+ case SD_HW_RST:
+ result = sd_hw_rst(srb, chip);
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ return result;
+}
+#endif
+
+#ifdef SUPPORT_MAGIC_GATE
+int mg_report_key(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval;
+ u8 key_format;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ ms_cleanup_work(chip);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if ((get_lun_card(chip, lun) != MS_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (srb->cmnd[7] != KC_MG_R_PRO) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!CHK_MSPRO(ms_card)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ key_format = srb->cmnd[10] & 0x3F;
+
+ switch (key_format) {
+ case KF_GET_LOC_EKB:
+ if ((scsi_bufflen(srb) == 0x41C) &&
+ (srb->cmnd[8] == 0x04) && (srb->cmnd[9] == 0x1C)) {
+ retval = mg_get_local_EKB(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_RSP_CHG:
+ if ((scsi_bufflen(srb) == 0x24) &&
+ (srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x24)) {
+ retval = mg_get_rsp_chg(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_GET_ICV:
+ ms_card->mg_entry_num = srb->cmnd[5];
+ if ((scsi_bufflen(srb) == 0x404) &&
+ (srb->cmnd[8] == 0x04) &&
+ (srb->cmnd[9] == 0x04) &&
+ (srb->cmnd[2] == 0x00) &&
+ (srb->cmnd[3] == 0x00) &&
+ (srb->cmnd[4] == 0x00) && (srb->cmnd[5] < 32)) {
+ retval = mg_get_ICV(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+
+int mg_send_key(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int retval;
+ u8 key_format;
+
+ rts51x_prepare_run(chip);
+ RTS51X_SET_STAT(chip, STAT_RUN);
+
+ ms_cleanup_work(chip);
+
+ if (!check_card_ready(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if (check_card_wp(chip, lun)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ if ((get_lun_card(chip, lun) != MS_CARD)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (srb->cmnd[7] != KC_MG_R_PRO) {
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ if (!CHK_MSPRO(ms_card)) {
+ set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ key_format = srb->cmnd[10] & 0x3F;
+
+ switch (key_format) {
+ case KF_SET_LEAF_ID:
+ if ((scsi_bufflen(srb) == 0x0C) &&
+ (srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x0C)) {
+ retval = mg_set_leaf_id(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_CHG_HOST:
+ if ((scsi_bufflen(srb) == 0x0C) &&
+ (srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x0C)) {
+ retval = mg_chg(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_RSP_HOST:
+ if ((scsi_bufflen(srb) == 0x0C) &&
+ (srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x0C)) {
+ retval = mg_rsp(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ case KF_SET_ICV:
+ ms_card->mg_entry_num = srb->cmnd[5];
+ if ((scsi_bufflen(srb) == 0x404) &&
+ (srb->cmnd[8] == 0x04) &&
+ (srb->cmnd[9] == 0x04) &&
+ (srb->cmnd[2] == 0x00) &&
+ (srb->cmnd[3] == 0x00) &&
+ (srb->cmnd[4] == 0x00) && (srb->cmnd[5] < 32)) {
+ retval = mg_set_ICV(srb, chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ } else {
+ set_sense_type(chip, lun,
+ SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+
+ scsi_set_resid(srb, 0);
+ return TRANSPORT_GOOD;
+}
+#endif
+
+int rts51x_scsi_handler(struct scsi_cmnd *srb, struct rts51x_chip *chip)
+{
+#ifdef SUPPORT_SD_LOCK
+ struct sd_info *sd_card = &(chip->sd_card);
+#endif
+ struct ms_info *ms_card = &(chip->ms_card);
+ unsigned int lun = SCSI_LUN(srb);
+ int result = TRANSPORT_GOOD;
+
+#ifdef SUPPORT_SD_LOCK
+ if (sd_card->sd_erase_status) {
+ /* Block all SCSI command except for REQUEST_SENSE
+ * and rs_ppstatus */
+ if (!
+ ((srb->cmnd[0] == VENDOR_CMND)
+ && (srb->cmnd[1] == SCSI_APP_CMD)
+ && (srb->cmnd[2] == GET_DEV_STATUS))
+ && (srb->cmnd[0] != REQUEST_SENSE)) {
+ /* Logical Unit Not Ready Format in Progress */
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
+ 0, 0);
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+#endif
+
+ if ((get_lun_card(chip, lun) == MS_CARD) &&
+ (ms_card->format_status == FORMAT_IN_PROGRESS)) {
+ if ((srb->cmnd[0] != REQUEST_SENSE)
+ && (srb->cmnd[0] != INQUIRY)) {
+ /* Logical Unit Not Ready Format in Progress */
+ set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
+ 0, (u16) (ms_card->progress));
+ TRACE_RET(chip, TRANSPORT_FAILED);
+ }
+ }
+
+ switch (srb->cmnd[0]) {
+ case READ_10:
+ case WRITE_10:
+ case READ_6:
+ case WRITE_6:
+ result = read_write(srb, chip);
+ break;
+
+ case TEST_UNIT_READY:
+ result = test_unit_ready(srb, chip);
+ break;
+
+ case INQUIRY:
+ result = inquiry(srb, chip);
+ break;
+
+ case READ_CAPACITY:
+ result = read_capacity(srb, chip);
+ break;
+
+ case START_STOP:
+ result = start_stop_unit(srb, chip);
+ break;
+
+ case ALLOW_MEDIUM_REMOVAL:
+ result = allow_medium_removal(srb, chip);
+ break;
+
+ case REQUEST_SENSE:
+ result = request_sense(srb, chip);
+ break;
+
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ result = mode_sense(srb, chip);
+ break;
+
+ case 0x23:
+ result = read_format_capacity(srb, chip);
+ break;
+
+ case VENDOR_CMND:
+ result = vendor_cmnd(srb, chip);
+ break;
+
+ case MS_SP_CMND:
+ result = ms_sp_cmnd(srb, chip);
+ break;
+
+#ifdef SUPPORT_CPRM
+ case SD_PASS_THRU_MODE:
+ case SD_EXECUTE_NO_DATA:
+ case SD_EXECUTE_READ:
+ case SD_EXECUTE_WRITE:
+ case SD_GET_RSP:
+ case SD_HW_RST:
+ result = sd_extention_cmnd(srb, chip);
+ break;
+#endif
+
+#ifdef SUPPORT_MAGIC_GATE
+ case CMD_MSPRO_MG_RKEY:
+ result = mg_report_key(srb, chip);
+ break;
+
+ case CMD_MSPRO_MG_SKEY:
+ result = mg_send_key(srb, chip);
+ break;
+#endif
+
+ case FORMAT_UNIT:
+ case MODE_SELECT:
+ case VERIFY:
+ result = TRANSPORT_GOOD;
+ break;
+
+ default:
+ set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+ result = TRANSPORT_FAILED;
+ }
+
+ return result;
+}
+
+/***********************************************************************
+ * Host functions
+ ***********************************************************************/
+
+const char *host_info(struct Scsi_Host *host)
+{
+ return "SCSI emulation for RTS51xx USB driver-based card reader";
+}
+
+int slave_alloc(struct scsi_device *sdev)
+{
+ /*
+ * Set the INQUIRY transfer length to 36. We don't use any of
+ * the extra data and many devices choke if asked for more or
+ * less than 36 bytes.
+ */
+ sdev->inquiry_len = 36;
+ return 0;
+}
+
+int slave_configure(struct scsi_device *sdev)
+{
+ /* Scatter-gather buffers (all but the last) must have a length
+ * divisible by the bulk maxpacket size. Otherwise a data packet
+ * would end up being short, causing a premature end to the data
+ * transfer. Since high-speed bulk pipes have a maxpacket size
+ * of 512, we'll use that as the scsi device queue's DMA alignment
+ * mask. Guaranteeing proper alignment of the first buffer will
+ * have the desired effect because, except at the beginning and
+ * the end, scatter-gather buffers follow page boundaries. */
+ blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
+
+ /* Set the SCSI level to at least 2. We'll leave it at 3 if that's
+ * what is originally reported. We need this to avoid confusing
+ * the SCSI layer with devices that report 0 or 1, but need 10-byte
+ * commands (ala ATAPI devices behind certain bridges, or devices
+ * which simply have broken INQUIRY data).
+ *
+ * NOTE: This means /dev/sg programs (ala cdrecord) will get the
+ * actual information. This seems to be the preference for
+ * programs like that.
+ *
+ * NOTE: This also means that /proc/scsi/scsi and sysfs may report
+ * the actual value or the modified one, depending on where the
+ * data comes from.
+ */
+ if (sdev->scsi_level < SCSI_2)
+ sdev->scsi_level = sdev->sdev_target->scsi_level = SCSI_2;
+
+ return 0;
+}
+
+/***********************************************************************
+ * /proc/scsi/ functions
+ ***********************************************************************/
+
+/* we use this macro to help us write into the buffer */
+#undef SPRINTF
+#define SPRINTF(args...) \
+ do { if (pos < buffer+length) pos += sprintf(pos, ## args); } while (0)
+
+int proc_info(struct Scsi_Host *host, char *buffer,
+ char **start, off_t offset, int length, int inout)
+{
+ char *pos = buffer;
+
+ /* if someone is sending us data, just throw it away */
+ if (inout)
+ return length;
+
+ /* print the controller name */
+ SPRINTF(" Host scsi%d: %s\n", host->host_no, RTS51X_NAME);
+
+ /* print product, vendor, and driver version strings */
+ SPRINTF(" Vendor: Realtek Corp.\n");
+ SPRINTF(" Product: RTS51xx USB Card Reader\n");
+ SPRINTF(" Version: %s\n", DRIVER_VERSION);
+ SPRINTF(" Build: %s\n", __TIME__);
+
+ /*
+ * Calculate start of next buffer, and return value.
+ */
+ *start = buffer + offset;
+
+ if ((pos - buffer) < offset)
+ return 0;
+ else if ((pos - buffer - offset) < length)
+ return pos - buffer - offset;
+ else
+ return length;
+}
+
+/* queue a command */
+/* This is always called with scsi_lock(host) held */
+int queuecommand_lck(struct scsi_cmnd *srb, void (*done) (struct scsi_cmnd *))
+{
+ struct rts51x_chip *chip = host_to_rts51x(srb->device->host);
+
+ /* check for state-transition errors */
+ if (chip->srb != NULL) {
+ RTS51X_DEBUGP("Error in %s: chip->srb = %p\n",
+ __func__, chip->srb);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ /* fail the command if we are disconnecting */
+ if (test_bit(FLIDX_DISCONNECTING, &chip->usb->dflags)) {
+ RTS51X_DEBUGP("Fail command during disconnect\n");
+ srb->result = DID_NO_CONNECT << 16;
+ done(srb);
+ return 0;
+ }
+
+ /* enqueue the command and wake up the control thread */
+ srb->scsi_done = done;
+ chip->srb = srb;
+ complete(&chip->usb->cmnd_ready);
+
+ return 0;
+}
+
+#if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37) */
+int queuecommand(struct scsi_cmnd *srb, void (*done) (struct scsi_cmnd *))
+{
+ return queuecommand_lck(srb, done);
+}
+#else
+DEF_SCSI_QCMD(queuecommand)
+#endif
+/***********************************************************************
+ * Error handling functions
+ ***********************************************************************/
+/* Command timeout and abort */
+int command_abort(struct scsi_cmnd *srb)
+{
+ struct rts51x_chip *chip = host_to_rts51x(srb->device->host);
+
+ RTS51X_DEBUGP("%s called\n", __func__);
+
+ /* us->srb together with the TIMED_OUT, RESETTING, and ABORTING
+ * bits are protected by the host lock. */
+ scsi_lock(rts51x_to_host(chip));
+
+ /* Is this command still active? */
+ if (chip->srb != srb) {
+ scsi_unlock(rts51x_to_host(chip));
+ RTS51X_DEBUGP("-- nothing to abort\n");
+ return FAILED;
+ }
+
+ /* Set the TIMED_OUT bit. Also set the ABORTING bit, but only if
+ * a device reset isn't already in progress (to avoid interfering
+ * with the reset). Note that we must retain the host lock while
+ * calling usb_stor_stop_transport(); otherwise it might interfere
+ * with an auto-reset that begins as soon as we release the lock. */
+ set_bit(FLIDX_TIMED_OUT, &chip->usb->dflags);
+ if (!test_bit(FLIDX_RESETTING, &chip->usb->dflags)) {
+ set_bit(FLIDX_ABORTING, &chip->usb->dflags);
+ /* rts51x_stop_transport(us); */
+ }
+ scsi_unlock(rts51x_to_host(chip));
+
+ /* Wait for the aborted command to finish */
+ wait_for_completion(&chip->usb->notify);
+ return SUCCESS;
+}
+
+/* This invokes the transport reset mechanism to reset the state of the
+ * device */
+int device_reset(struct scsi_cmnd *srb)
+{
+ int result = 0;
+
+ RTS51X_DEBUGP("%s called\n", __func__);
+
+ return result < 0 ? FAILED : SUCCESS;
+}
+
+/* Simulate a SCSI bus reset by resetting the device's USB port. */
+int bus_reset(struct scsi_cmnd *srb)
+{
+ int result = 0;
+
+ RTS51X_DEBUGP("%s called\n", __func__);
+
+ return result < 0 ? FAILED : SUCCESS;
+}
+
+static const char *rts5139_info(struct Scsi_Host *host)
+{
+ return "SCSI emulation for RTS5139 USB card reader";
+}
+
+struct scsi_host_template rts51x_host_template = {
+ /* basic userland interface stuff */
+ .name = RTS51X_NAME,
+ .proc_name = RTS51X_NAME,
+ .proc_info = proc_info,
+ .info = rts5139_info,
+
+ /* command interface -- queued only */
+ .queuecommand = queuecommand,
+
+ /* error and abort handlers */
+ .eh_abort_handler = command_abort,
+ .eh_device_reset_handler = device_reset,
+ .eh_bus_reset_handler = bus_reset,
+
+ /* queue commands only, only one command per LUN */
+ .can_queue = 1,
+ .cmd_per_lun = 1,
+
+ /* unknown initiator id */
+ .this_id = -1,
+
+ .slave_alloc = slave_alloc,
+ .slave_configure = slave_configure,
+
+ /* lots of sg segments can be handled */
+ .sg_tablesize = SG_ALL,
+
+ /* limit the total size of a transfer to 120 KB */
+ .max_sectors = 240,
+
+ /* merge commands... this seems to help performance, but
+ * periodically someone should test to see which setting is more
+ * optimal.
+ */
+ .use_clustering = 1,
+
+ /* emulated HBA */
+ .emulated = 1,
+
+ /* we do our own delay after a device or bus reset */
+ .skip_settle_delay = 1,
+
+ /* sysfs device attributes */
+ /* .sdev_attrs = sysfs_device_attr_list, */
+
+ /* module management */
+ .module = THIS_MODULE
+};
+
--- /dev/null
+/*
+ * opal driver interface to hvc_console.c
+ *
+ * Copyright 2011 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/console.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
++#include <linux/export.h>
+
+#include <asm/hvconsole.h>
+#include <asm/prom.h>
+#include <asm/firmware.h>
+#include <asm/hvsi.h>
+#include <asm/udbg.h>
+#include <asm/opal.h>
+
+#include "hvc_console.h"
+
+static const char hvc_opal_name[] = "hvc_opal";
+
+static struct of_device_id hvc_opal_match[] __devinitdata = {
+ { .name = "serial", .compatible = "ibm,opal-console-raw" },
+ { .name = "serial", .compatible = "ibm,opal-console-hvsi" },
+ { },
+};
+
+typedef enum hv_protocol {
+ HV_PROTOCOL_RAW,
+ HV_PROTOCOL_HVSI
+} hv_protocol_t;
+
+struct hvc_opal_priv {
+ hv_protocol_t proto; /* Raw data or HVSI packets */
+ struct hvsi_priv hvsi; /* HVSI specific data */
+};
+static struct hvc_opal_priv *hvc_opal_privs[MAX_NR_HVC_CONSOLES];
+
+/* For early boot console */
+static struct hvc_opal_priv hvc_opal_boot_priv;
+static u32 hvc_opal_boot_termno;
+
+static const struct hv_ops hvc_opal_raw_ops = {
+ .get_chars = opal_get_chars,
+ .put_chars = opal_put_chars,
+ .notifier_add = notifier_add_irq,
+ .notifier_del = notifier_del_irq,
+ .notifier_hangup = notifier_hangup_irq,
+};
+
+static int hvc_opal_hvsi_get_chars(uint32_t vtermno, char *buf, int count)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[vtermno];
+
+ if (WARN_ON(!pv))
+ return -ENODEV;
+
+ return hvsilib_get_chars(&pv->hvsi, buf, count);
+}
+
+static int hvc_opal_hvsi_put_chars(uint32_t vtermno, const char *buf, int count)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[vtermno];
+
+ if (WARN_ON(!pv))
+ return -ENODEV;
+
+ return hvsilib_put_chars(&pv->hvsi, buf, count);
+}
+
+static int hvc_opal_hvsi_open(struct hvc_struct *hp, int data)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
+ int rc;
+
+ pr_devel("HVSI@%x: do open !\n", hp->vtermno);
+
+ rc = notifier_add_irq(hp, data);
+ if (rc)
+ return rc;
+
+ return hvsilib_open(&pv->hvsi, hp);
+}
+
+static void hvc_opal_hvsi_close(struct hvc_struct *hp, int data)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
+
+ pr_devel("HVSI@%x: do close !\n", hp->vtermno);
+
+ hvsilib_close(&pv->hvsi, hp);
+
+ notifier_del_irq(hp, data);
+}
+
+void hvc_opal_hvsi_hangup(struct hvc_struct *hp, int data)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
+
+ pr_devel("HVSI@%x: do hangup !\n", hp->vtermno);
+
+ hvsilib_close(&pv->hvsi, hp);
+
+ notifier_hangup_irq(hp, data);
+}
+
+static int hvc_opal_hvsi_tiocmget(struct hvc_struct *hp)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
+
+ if (!pv)
+ return -EINVAL;
+ return pv->hvsi.mctrl;
+}
+
+static int hvc_opal_hvsi_tiocmset(struct hvc_struct *hp, unsigned int set,
+ unsigned int clear)
+{
+ struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno];
+
+ pr_devel("HVSI@%x: Set modem control, set=%x,clr=%x\n",
+ hp->vtermno, set, clear);
+
+ if (set & TIOCM_DTR)
+ hvsilib_write_mctrl(&pv->hvsi, 1);
+ else if (clear & TIOCM_DTR)
+ hvsilib_write_mctrl(&pv->hvsi, 0);
+
+ return 0;
+}
+
+static const struct hv_ops hvc_opal_hvsi_ops = {
+ .get_chars = hvc_opal_hvsi_get_chars,
+ .put_chars = hvc_opal_hvsi_put_chars,
+ .notifier_add = hvc_opal_hvsi_open,
+ .notifier_del = hvc_opal_hvsi_close,
+ .notifier_hangup = hvc_opal_hvsi_hangup,
+ .tiocmget = hvc_opal_hvsi_tiocmget,
+ .tiocmset = hvc_opal_hvsi_tiocmset,
+};
+
+static int __devinit hvc_opal_probe(struct platform_device *dev)
+{
+ const struct hv_ops *ops;
+ struct hvc_struct *hp;
+ struct hvc_opal_priv *pv;
+ hv_protocol_t proto;
+ unsigned int termno, boot = 0;
+ const __be32 *reg;
+
+ if (of_device_is_compatible(dev->dev.of_node, "ibm,opal-console-raw")) {
+ proto = HV_PROTOCOL_RAW;
+ ops = &hvc_opal_raw_ops;
+ } else if (of_device_is_compatible(dev->dev.of_node,
+ "ibm,opal-console-hvsi")) {
+ proto = HV_PROTOCOL_HVSI;
+ ops = &hvc_opal_hvsi_ops;
+ } else {
+ pr_err("hvc_opal: Unkown protocol for %s\n",
+ dev->dev.of_node->full_name);
+ return -ENXIO;
+ }
+
+ reg = of_get_property(dev->dev.of_node, "reg", NULL);
+ termno = reg ? be32_to_cpup(reg) : 0;
+
+ /* Is it our boot one ? */
+ if (hvc_opal_privs[termno] == &hvc_opal_boot_priv) {
+ pv = hvc_opal_privs[termno];
+ boot = 1;
+ } else if (hvc_opal_privs[termno] == NULL) {
+ pv = kzalloc(sizeof(struct hvc_opal_priv), GFP_KERNEL);
+ if (!pv)
+ return -ENOMEM;
+ pv->proto = proto;
+ hvc_opal_privs[termno] = pv;
+ if (proto == HV_PROTOCOL_HVSI)
+ hvsilib_init(&pv->hvsi, opal_get_chars, opal_put_chars,
+ termno, 0);
+
+ /* Instanciate now to establish a mapping index==vtermno */
+ hvc_instantiate(termno, termno, ops);
+ } else {
+ pr_err("hvc_opal: Device %s has duplicate terminal number #%d\n",
+ dev->dev.of_node->full_name, termno);
+ return -ENXIO;
+ }
+
+ pr_info("hvc%d: %s protocol on %s%s\n", termno,
+ proto == HV_PROTOCOL_RAW ? "raw" : "hvsi",
+ dev->dev.of_node->full_name,
+ boot ? " (boot console)" : "");
+
+ /* We don't do IRQ yet */
+ hp = hvc_alloc(termno, 0, ops, MAX_VIO_PUT_CHARS);
+ if (IS_ERR(hp))
+ return PTR_ERR(hp);
+ dev_set_drvdata(&dev->dev, hp);
+
+ return 0;
+}
+
+static int __devexit hvc_opal_remove(struct platform_device *dev)
+{
+ struct hvc_struct *hp = dev_get_drvdata(&dev->dev);
+ int rc, termno;
+
+ termno = hp->vtermno;
+ rc = hvc_remove(hp);
+ if (rc == 0) {
+ if (hvc_opal_privs[termno] != &hvc_opal_boot_priv)
+ kfree(hvc_opal_privs[termno]);
+ hvc_opal_privs[termno] = NULL;
+ }
+ return rc;
+}
+
+static struct platform_driver hvc_opal_driver = {
+ .probe = hvc_opal_probe,
+ .remove = __devexit_p(hvc_opal_remove),
+ .driver = {
+ .name = hvc_opal_name,
+ .owner = THIS_MODULE,
+ .of_match_table = hvc_opal_match,
+ }
+};
+
+static int __init hvc_opal_init(void)
+{
+ if (!firmware_has_feature(FW_FEATURE_OPAL))
+ return -ENODEV;
+
+ /* Register as a vio device to receive callbacks */
+ return platform_driver_register(&hvc_opal_driver);
+}
+module_init(hvc_opal_init);
+
+static void __exit hvc_opal_exit(void)
+{
+ platform_driver_unregister(&hvc_opal_driver);
+}
+module_exit(hvc_opal_exit);
+
+static void udbg_opal_putc(char c)
+{
+ unsigned int termno = hvc_opal_boot_termno;
+ int count = -1;
+
+ if (c == '\n')
+ udbg_opal_putc('\r');
+
+ do {
+ switch(hvc_opal_boot_priv.proto) {
+ case HV_PROTOCOL_RAW:
+ count = opal_put_chars(termno, &c, 1);
+ break;
+ case HV_PROTOCOL_HVSI:
+ count = hvc_opal_hvsi_put_chars(termno, &c, 1);
+ break;
+ }
+ } while(count == 0 || count == -EAGAIN);
+}
+
+static int udbg_opal_getc_poll(void)
+{
+ unsigned int termno = hvc_opal_boot_termno;
+ int rc = 0;
+ char c;
+
+ switch(hvc_opal_boot_priv.proto) {
+ case HV_PROTOCOL_RAW:
+ rc = opal_get_chars(termno, &c, 1);
+ break;
+ case HV_PROTOCOL_HVSI:
+ rc = hvc_opal_hvsi_get_chars(termno, &c, 1);
+ break;
+ }
+ if (!rc)
+ return -1;
+ return c;
+}
+
+static int udbg_opal_getc(void)
+{
+ int ch;
+ for (;;) {
+ ch = udbg_opal_getc_poll();
+ if (ch == -1) {
+ /* This shouldn't be needed...but... */
+ volatile unsigned long delay;
+ for (delay=0; delay < 2000000; delay++)
+ ;
+ } else {
+ return ch;
+ }
+ }
+}
+
+static void udbg_init_opal_common(void)
+{
+ udbg_putc = udbg_opal_putc;
+ udbg_getc = udbg_opal_getc;
+ udbg_getc_poll = udbg_opal_getc_poll;
+ tb_ticks_per_usec = 0x200; /* Make udelay not suck */
+}
+
+void __init hvc_opal_init_early(void)
+{
+ struct device_node *stdout_node = NULL;
+ const u32 *termno;
+ const char *name = NULL;
+ const struct hv_ops *ops;
+ u32 index;
+
+ /* find the boot console from /chosen/stdout */
+ if (of_chosen)
+ name = of_get_property(of_chosen, "linux,stdout-path", NULL);
+ if (name) {
+ stdout_node = of_find_node_by_path(name);
+ if (!stdout_node) {
+ pr_err("hvc_opal: Failed to locate default console!\n");
+ return;
+ }
+ } else {
+ struct device_node *opal, *np;
+
+ /* Current OPAL takeover doesn't provide the stdout
+ * path, so we hard wire it
+ */
+ opal = of_find_node_by_path("/ibm,opal/consoles");
+ if (opal)
+ pr_devel("hvc_opal: Found consoles in new location\n");
+ if (!opal) {
+ opal = of_find_node_by_path("/ibm,opal");
+ if (opal)
+ pr_devel("hvc_opal: "
+ "Found consoles in old location\n");
+ }
+ if (!opal)
+ return;
+ for_each_child_of_node(opal, np) {
+ if (!strcmp(np->name, "serial")) {
+ stdout_node = np;
+ break;
+ }
+ }
+ of_node_put(opal);
+ }
+ if (!stdout_node)
+ return;
+ termno = of_get_property(stdout_node, "reg", NULL);
+ index = termno ? *termno : 0;
+ if (index >= MAX_NR_HVC_CONSOLES)
+ return;
+ hvc_opal_privs[index] = &hvc_opal_boot_priv;
+
+ /* Check the protocol */
+ if (of_device_is_compatible(stdout_node, "ibm,opal-console-raw")) {
+ hvc_opal_boot_priv.proto = HV_PROTOCOL_RAW;
+ ops = &hvc_opal_raw_ops;
+ pr_devel("hvc_opal: Found RAW console\n");
+ }
+ else if (of_device_is_compatible(stdout_node,"ibm,opal-console-hvsi")) {
+ hvc_opal_boot_priv.proto = HV_PROTOCOL_HVSI;
+ ops = &hvc_opal_hvsi_ops;
+ hvsilib_init(&hvc_opal_boot_priv.hvsi, opal_get_chars,
+ opal_put_chars, index, 1);
+ /* HVSI, perform the handshake now */
+ hvsilib_establish(&hvc_opal_boot_priv.hvsi);
+ pr_devel("hvc_opal: Found HVSI console\n");
+ } else
+ goto out;
+ hvc_opal_boot_termno = index;
+ udbg_init_opal_common();
+ add_preferred_console("hvc", index, NULL);
+ hvc_instantiate(index, index, ops);
+out:
+ of_node_put(stdout_node);
+}
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_RAW
+void __init udbg_init_debug_opal(void)
+{
+ u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO;
+ hvc_opal_privs[index] = &hvc_opal_boot_priv;
+ hvc_opal_boot_priv.proto = HV_PROTOCOL_RAW;
+ hvc_opal_boot_termno = index;
+ udbg_init_opal_common();
+}
+#endif /* CONFIG_PPC_EARLY_DEBUG_OPAL_RAW */
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI
+void __init udbg_init_debug_opal_hvsi(void)
+{
+ u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO;
+ hvc_opal_privs[index] = &hvc_opal_boot_priv;
+ hvc_opal_boot_termno = index;
+ udbg_init_opal_common();
+ hvsilib_init(&hvc_opal_boot_priv.hvsi, opal_get_chars, opal_put_chars,
+ index, 1);
+ hvsilib_establish(&hvc_opal_boot_priv.hvsi);
+}
+#endif /* CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI */
#include <linux/serial_reg.h>
#include <linux/serial_core.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/irqreturn.h>
#include <linux/mutex.h>
+ #include <linux/export.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
#include <linux/nwpserial.h>
#include <linux/console.h>
#include <linux/backlight.h>
#include <linux/gpio.h>
+ #include <linux/module.h>
#include <video/sh_mobile_lcdc.h>
+#include <video/sh_mobile_meram.h>
#include <linux/atomic.h>
#include "sh_mobile_lcdcfb.h"
#include <linux/device.h>
#include <linux/uio.h>
#include <linux/dma-direction.h>
+#include <linux/scatterlist.h>
+ #include <linux/bitmap.h>
+ #include <asm/page.h>
-struct scatterlist;
-
/**
* typedef dma_cookie_t - an opaque DMA cookie
*
#include <linux/kernel.h>
#include <linux/uaccess.h>
+ #include <linux/export.h>
/*
- * locking rule: all changes to requests or notifiers lists
+ * locking rule: all changes to constraints or notifiers lists
* or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
* held, taken with _irqsave. One lock to rule them all
*/
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
- #include <linux/module.h>
+ #include <linux/export.h>
#include <linux/hardirq.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/rcu.h>
+
+#include "rcu.h"
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key rcu_lock_key;
struct lockdep_map rcu_lock_map =
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
+ #include <linux/moduleparam.h>
#include <linux/rtnetlink.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/crc32.h>
#include <linux/slab.h>
+ #include <linux/export.h>
#include <net/mac80211.h>
#include <asm/unaligned.h>
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <net/sch_generic.h>
#include <linux/slab.h>
+ #include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
--- /dev/null
+/*
+ * The NFC Controller Interface is the communication protocol between an
+ * NFC Controller (NFCC) and a Device Host (DH).
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * Acknowledgements:
+ * This file is based on hci_core.c, which was written
+ * by Maxim Krasnyansky.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/skbuff.h>
+
+#include "../nfc.h"
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include <linux/nfc.h>
++#include <linux/export.h>
+
+static void nci_cmd_work(struct work_struct *work);
+static void nci_rx_work(struct work_struct *work);
+static void nci_tx_work(struct work_struct *work);
+
+/* ---- NCI requests ---- */
+
+void nci_req_complete(struct nci_dev *ndev, int result)
+{
+ if (ndev->req_status == NCI_REQ_PEND) {
+ ndev->req_result = result;
+ ndev->req_status = NCI_REQ_DONE;
+ complete(&ndev->req_completion);
+ }
+}
+
+static void nci_req_cancel(struct nci_dev *ndev, int err)
+{
+ if (ndev->req_status == NCI_REQ_PEND) {
+ ndev->req_result = err;
+ ndev->req_status = NCI_REQ_CANCELED;
+ complete(&ndev->req_completion);
+ }
+}
+
+/* Execute request and wait for completion. */
+static int __nci_request(struct nci_dev *ndev,
+ void (*req)(struct nci_dev *ndev, unsigned long opt),
+ unsigned long opt,
+ __u32 timeout)
+{
+ int rc = 0;
+ unsigned long completion_rc;
+
+ ndev->req_status = NCI_REQ_PEND;
+
+ init_completion(&ndev->req_completion);
+ req(ndev, opt);
+ completion_rc = wait_for_completion_interruptible_timeout(
+ &ndev->req_completion,
+ timeout);
+
+ nfc_dbg("wait_for_completion return %ld", completion_rc);
+
+ if (completion_rc > 0) {
+ switch (ndev->req_status) {
+ case NCI_REQ_DONE:
+ rc = nci_to_errno(ndev->req_result);
+ break;
+
+ case NCI_REQ_CANCELED:
+ rc = -ndev->req_result;
+ break;
+
+ default:
+ rc = -ETIMEDOUT;
+ break;
+ }
+ } else {
+ nfc_err("wait_for_completion_interruptible_timeout failed %ld",
+ completion_rc);
+
+ rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
+ }
+
+ ndev->req_status = ndev->req_result = 0;
+
+ return rc;
+}
+
+static inline int nci_request(struct nci_dev *ndev,
+ void (*req)(struct nci_dev *ndev, unsigned long opt),
+ unsigned long opt, __u32 timeout)
+{
+ int rc;
+
+ if (!test_bit(NCI_UP, &ndev->flags))
+ return -ENETDOWN;
+
+ /* Serialize all requests */
+ mutex_lock(&ndev->req_lock);
+ rc = __nci_request(ndev, req, opt, timeout);
+ mutex_unlock(&ndev->req_lock);
+
+ return rc;
+}
+
+static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
+{
+ nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 0, NULL);
+}
+
+static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
+{
+ nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
+}
+
+static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
+{
+ struct nci_core_conn_create_cmd conn_cmd;
+ struct nci_rf_disc_map_cmd cmd;
+ struct disc_map_config *cfg = cmd.mapping_configs;
+ __u8 *num = &cmd.num_mapping_configs;
+ int i;
+
+ /* create static rf connection */
+ conn_cmd.target_handle = 0;
+ conn_cmd.num_target_specific_params = 0;
+ nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, 2, &conn_cmd);
+
+ /* set rf mapping configurations */
+ *num = 0;
+
+ /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
+ for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
+ if (ndev->supported_rf_interfaces[i] ==
+ NCI_RF_INTERFACE_ISO_DEP) {
+ cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
+ cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
+ cfg[*num].rf_interface_type = NCI_RF_INTERFACE_ISO_DEP;
+ (*num)++;
+ } else if (ndev->supported_rf_interfaces[i] ==
+ NCI_RF_INTERFACE_NFC_DEP) {
+ cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
+ cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
+ cfg[*num].rf_interface_type = NCI_RF_INTERFACE_NFC_DEP;
+ (*num)++;
+ }
+
+ if (*num == NCI_MAX_NUM_MAPPING_CONFIGS)
+ break;
+ }
+
+ nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
+ (1 + ((*num)*sizeof(struct disc_map_config))),
+ &cmd);
+}
+
+static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
+{
+ struct nci_rf_disc_cmd cmd;
+ __u32 protocols = opt;
+
+ cmd.num_disc_configs = 0;
+
+ if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
+ (protocols & NFC_PROTO_JEWEL_MASK
+ || protocols & NFC_PROTO_MIFARE_MASK
+ || protocols & NFC_PROTO_ISO14443_MASK
+ || protocols & NFC_PROTO_NFC_DEP_MASK)) {
+ cmd.disc_configs[cmd.num_disc_configs].type =
+ NCI_DISCOVERY_TYPE_POLL_A_PASSIVE;
+ cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
+ cmd.num_disc_configs++;
+ }
+
+ if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
+ (protocols & NFC_PROTO_ISO14443_MASK)) {
+ cmd.disc_configs[cmd.num_disc_configs].type =
+ NCI_DISCOVERY_TYPE_POLL_B_PASSIVE;
+ cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
+ cmd.num_disc_configs++;
+ }
+
+ if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
+ (protocols & NFC_PROTO_FELICA_MASK
+ || protocols & NFC_PROTO_NFC_DEP_MASK)) {
+ cmd.disc_configs[cmd.num_disc_configs].type =
+ NCI_DISCOVERY_TYPE_POLL_F_PASSIVE;
+ cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
+ cmd.num_disc_configs++;
+ }
+
+ nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
+ (1 + (cmd.num_disc_configs*sizeof(struct disc_config))),
+ &cmd);
+}
+
+static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
+{
+ struct nci_rf_deactivate_cmd cmd;
+
+ cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
+
+ nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
+ sizeof(struct nci_rf_deactivate_cmd),
+ &cmd);
+}
+
+static int nci_open_device(struct nci_dev *ndev)
+{
+ int rc = 0;
+
+ mutex_lock(&ndev->req_lock);
+
+ if (test_bit(NCI_UP, &ndev->flags)) {
+ rc = -EALREADY;
+ goto done;
+ }
+
+ if (ndev->ops->open(ndev)) {
+ rc = -EIO;
+ goto done;
+ }
+
+ atomic_set(&ndev->cmd_cnt, 1);
+
+ set_bit(NCI_INIT, &ndev->flags);
+
+ rc = __nci_request(ndev, nci_reset_req, 0,
+ msecs_to_jiffies(NCI_RESET_TIMEOUT));
+
+ if (!rc) {
+ rc = __nci_request(ndev, nci_init_req, 0,
+ msecs_to_jiffies(NCI_INIT_TIMEOUT));
+ }
+
+ if (!rc) {
+ rc = __nci_request(ndev, nci_init_complete_req, 0,
+ msecs_to_jiffies(NCI_INIT_TIMEOUT));
+ }
+
+ clear_bit(NCI_INIT, &ndev->flags);
+
+ if (!rc) {
+ set_bit(NCI_UP, &ndev->flags);
+ } else {
+ /* Init failed, cleanup */
+ skb_queue_purge(&ndev->cmd_q);
+ skb_queue_purge(&ndev->rx_q);
+ skb_queue_purge(&ndev->tx_q);
+
+ ndev->ops->close(ndev);
+ ndev->flags = 0;
+ }
+
+done:
+ mutex_unlock(&ndev->req_lock);
+ return rc;
+}
+
+static int nci_close_device(struct nci_dev *ndev)
+{
+ nci_req_cancel(ndev, ENODEV);
+ mutex_lock(&ndev->req_lock);
+
+ if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
+ del_timer_sync(&ndev->cmd_timer);
+ mutex_unlock(&ndev->req_lock);
+ return 0;
+ }
+
+ /* Drop RX and TX queues */
+ skb_queue_purge(&ndev->rx_q);
+ skb_queue_purge(&ndev->tx_q);
+
+ /* Flush RX and TX wq */
+ flush_workqueue(ndev->rx_wq);
+ flush_workqueue(ndev->tx_wq);
+
+ /* Reset device */
+ skb_queue_purge(&ndev->cmd_q);
+ atomic_set(&ndev->cmd_cnt, 1);
+
+ set_bit(NCI_INIT, &ndev->flags);
+ __nci_request(ndev, nci_reset_req, 0,
+ msecs_to_jiffies(NCI_RESET_TIMEOUT));
+ clear_bit(NCI_INIT, &ndev->flags);
+
+ /* Flush cmd wq */
+ flush_workqueue(ndev->cmd_wq);
+
+ /* After this point our queues are empty
+ * and no works are scheduled. */
+ ndev->ops->close(ndev);
+
+ /* Clear flags */
+ ndev->flags = 0;
+
+ mutex_unlock(&ndev->req_lock);
+
+ return 0;
+}
+
+/* NCI command timer function */
+static void nci_cmd_timer(unsigned long arg)
+{
+ struct nci_dev *ndev = (void *) arg;
+
+ nfc_dbg("entry");
+
+ atomic_set(&ndev->cmd_cnt, 1);
+ queue_work(ndev->cmd_wq, &ndev->cmd_work);
+}
+
+static int nci_dev_up(struct nfc_dev *nfc_dev)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dbg("entry");
+
+ return nci_open_device(ndev);
+}
+
+static int nci_dev_down(struct nfc_dev *nfc_dev)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dbg("entry");
+
+ return nci_close_device(ndev);
+}
+
+static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+ int rc;
+
+ nfc_dbg("entry");
+
+ if (test_bit(NCI_DISCOVERY, &ndev->flags)) {
+ nfc_err("unable to start poll, since poll is already active");
+ return -EBUSY;
+ }
+
+ if (ndev->target_active_prot) {
+ nfc_err("there is an active target");
+ return -EBUSY;
+ }
+
+ if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
+ nfc_dbg("target is active, implicitly deactivate...");
+
+ rc = nci_request(ndev, nci_rf_deactivate_req, 0,
+ msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
+ if (rc)
+ return -EBUSY;
+ }
+
+ rc = nci_request(ndev, nci_rf_discover_req, protocols,
+ msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
+
+ if (!rc)
+ ndev->poll_prots = protocols;
+
+ return rc;
+}
+
+static void nci_stop_poll(struct nfc_dev *nfc_dev)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dbg("entry");
+
+ if (!test_bit(NCI_DISCOVERY, &ndev->flags)) {
+ nfc_err("unable to stop poll, since poll is not active");
+ return;
+ }
+
+ nci_request(ndev, nci_rf_deactivate_req, 0,
+ msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
+}
+
+static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
+ __u32 protocol)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dbg("entry, target_idx %d, protocol 0x%x", target_idx, protocol);
+
+ if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
+ nfc_err("there is no available target to activate");
+ return -EINVAL;
+ }
+
+ if (ndev->target_active_prot) {
+ nfc_err("there is already an active target");
+ return -EBUSY;
+ }
+
+ if (!(ndev->target_available_prots & (1 << protocol))) {
+ nfc_err("target does not support the requested protocol 0x%x",
+ protocol);
+ return -EINVAL;
+ }
+
+ ndev->target_active_prot = protocol;
+ ndev->target_available_prots = 0;
+
+ return 0;
+}
+
+static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dbg("entry, target_idx %d", target_idx);
+
+ if (!ndev->target_active_prot) {
+ nfc_err("unable to deactivate target, no active target");
+ return;
+ }
+
+ ndev->target_active_prot = 0;
+
+ if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
+ nci_request(ndev, nci_rf_deactivate_req, 0,
+ msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
+ }
+}
+
+static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
+ struct sk_buff *skb,
+ data_exchange_cb_t cb,
+ void *cb_context)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+ int rc;
+
+ nfc_dbg("entry, target_idx %d, len %d", target_idx, skb->len);
+
+ if (!ndev->target_active_prot) {
+ nfc_err("unable to exchange data, no active target");
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
+ return -EBUSY;
+
+ /* store cb and context to be used on receiving data */
+ ndev->data_exchange_cb = cb;
+ ndev->data_exchange_cb_context = cb_context;
+
+ rc = nci_send_data(ndev, ndev->conn_id, skb);
+ if (rc)
+ clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
+
+ return rc;
+}
+
+static struct nfc_ops nci_nfc_ops = {
+ .dev_up = nci_dev_up,
+ .dev_down = nci_dev_down,
+ .start_poll = nci_start_poll,
+ .stop_poll = nci_stop_poll,
+ .activate_target = nci_activate_target,
+ .deactivate_target = nci_deactivate_target,
+ .data_exchange = nci_data_exchange,
+};
+
+/* ---- Interface to NCI drivers ---- */
+
+/**
+ * nci_allocate_device - allocate a new nci device
+ *
+ * @ops: device operations
+ * @supported_protocols: NFC protocols supported by the device
+ */
+struct nci_dev *nci_allocate_device(struct nci_ops *ops,
+ __u32 supported_protocols,
+ int tx_headroom,
+ int tx_tailroom)
+{
+ struct nci_dev *ndev;
+
+ nfc_dbg("entry, supported_protocols 0x%x", supported_protocols);
+
+ if (!ops->open || !ops->close || !ops->send)
+ return NULL;
+
+ if (!supported_protocols)
+ return NULL;
+
+ ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL);
+ if (!ndev)
+ return NULL;
+
+ ndev->ops = ops;
+ ndev->tx_headroom = tx_headroom;
+ ndev->tx_tailroom = tx_tailroom;
+
+ ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
+ supported_protocols,
+ tx_headroom + NCI_DATA_HDR_SIZE,
+ tx_tailroom);
+ if (!ndev->nfc_dev)
+ goto free_exit;
+
+ nfc_set_drvdata(ndev->nfc_dev, ndev);
+
+ return ndev;
+
+free_exit:
+ kfree(ndev);
+ return NULL;
+}
+EXPORT_SYMBOL(nci_allocate_device);
+
+/**
+ * nci_free_device - deallocate nci device
+ *
+ * @ndev: The nci device to deallocate
+ */
+void nci_free_device(struct nci_dev *ndev)
+{
+ nfc_dbg("entry");
+
+ nfc_free_device(ndev->nfc_dev);
+ kfree(ndev);
+}
+EXPORT_SYMBOL(nci_free_device);
+
+/**
+ * nci_register_device - register a nci device in the nfc subsystem
+ *
+ * @dev: The nci device to register
+ */
+int nci_register_device(struct nci_dev *ndev)
+{
+ int rc;
+ struct device *dev = &ndev->nfc_dev->dev;
+ char name[32];
+
+ nfc_dbg("entry");
+
+ rc = nfc_register_device(ndev->nfc_dev);
+ if (rc)
+ goto exit;
+
+ ndev->flags = 0;
+
+ INIT_WORK(&ndev->cmd_work, nci_cmd_work);
+ snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev));
+ ndev->cmd_wq = create_singlethread_workqueue(name);
+ if (!ndev->cmd_wq) {
+ rc = -ENOMEM;
+ goto unreg_exit;
+ }
+
+ INIT_WORK(&ndev->rx_work, nci_rx_work);
+ snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev));
+ ndev->rx_wq = create_singlethread_workqueue(name);
+ if (!ndev->rx_wq) {
+ rc = -ENOMEM;
+ goto destroy_cmd_wq_exit;
+ }
+
+ INIT_WORK(&ndev->tx_work, nci_tx_work);
+ snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev));
+ ndev->tx_wq = create_singlethread_workqueue(name);
+ if (!ndev->tx_wq) {
+ rc = -ENOMEM;
+ goto destroy_rx_wq_exit;
+ }
+
+ skb_queue_head_init(&ndev->cmd_q);
+ skb_queue_head_init(&ndev->rx_q);
+ skb_queue_head_init(&ndev->tx_q);
+
+ setup_timer(&ndev->cmd_timer, nci_cmd_timer,
+ (unsigned long) ndev);
+
+ mutex_init(&ndev->req_lock);
+
+ goto exit;
+
+destroy_rx_wq_exit:
+ destroy_workqueue(ndev->rx_wq);
+
+destroy_cmd_wq_exit:
+ destroy_workqueue(ndev->cmd_wq);
+
+unreg_exit:
+ nfc_unregister_device(ndev->nfc_dev);
+
+exit:
+ return rc;
+}
+EXPORT_SYMBOL(nci_register_device);
+
+/**
+ * nci_unregister_device - unregister a nci device in the nfc subsystem
+ *
+ * @dev: The nci device to unregister
+ */
+void nci_unregister_device(struct nci_dev *ndev)
+{
+ nfc_dbg("entry");
+
+ nci_close_device(ndev);
+
+ destroy_workqueue(ndev->cmd_wq);
+ destroy_workqueue(ndev->rx_wq);
+ destroy_workqueue(ndev->tx_wq);
+
+ nfc_unregister_device(ndev->nfc_dev);
+}
+EXPORT_SYMBOL(nci_unregister_device);
+
+/**
+ * nci_recv_frame - receive frame from NCI drivers
+ *
+ * @skb: The sk_buff to receive
+ */
+int nci_recv_frame(struct sk_buff *skb)
+{
+ struct nci_dev *ndev = (struct nci_dev *) skb->dev;
+
+ nfc_dbg("entry, len %d", skb->len);
+
+ if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
+ && !test_bit(NCI_INIT, &ndev->flags))) {
+ kfree_skb(skb);
+ return -ENXIO;
+ }
+
+ /* Queue frame for rx worker thread */
+ skb_queue_tail(&ndev->rx_q, skb);
+ queue_work(ndev->rx_wq, &ndev->rx_work);
+
+ return 0;
+}
+EXPORT_SYMBOL(nci_recv_frame);
+
+static int nci_send_frame(struct sk_buff *skb)
+{
+ struct nci_dev *ndev = (struct nci_dev *) skb->dev;
+
+ nfc_dbg("entry, len %d", skb->len);
+
+ if (!ndev) {
+ kfree_skb(skb);
+ return -ENODEV;
+ }
+
+ /* Get rid of skb owner, prior to sending to the driver. */
+ skb_orphan(skb);
+
+ return ndev->ops->send(skb);
+}
+
+/* Send NCI command */
+int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
+{
+ struct nci_ctrl_hdr *hdr;
+ struct sk_buff *skb;
+
+ nfc_dbg("entry, opcode 0x%x, plen %d", opcode, plen);
+
+ skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
+ if (!skb) {
+ nfc_err("no memory for command");
+ return -ENOMEM;
+ }
+
+ hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
+ hdr->gid = nci_opcode_gid(opcode);
+ hdr->oid = nci_opcode_oid(opcode);
+ hdr->plen = plen;
+
+ nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
+ nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
+
+ if (plen)
+ memcpy(skb_put(skb, plen), payload, plen);
+
+ skb->dev = (void *) ndev;
+
+ skb_queue_tail(&ndev->cmd_q, skb);
+ queue_work(ndev->cmd_wq, &ndev->cmd_work);
+
+ return 0;
+}
+
+/* ---- NCI TX Data worker thread ---- */
+
+static void nci_tx_work(struct work_struct *work)
+{
+ struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
+ struct sk_buff *skb;
+
+ nfc_dbg("entry, credits_cnt %d", atomic_read(&ndev->credits_cnt));
+
+ /* Send queued tx data */
+ while (atomic_read(&ndev->credits_cnt)) {
+ skb = skb_dequeue(&ndev->tx_q);
+ if (!skb)
+ return;
+
+ atomic_dec(&ndev->credits_cnt);
+
+ nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d",
+ nci_pbf(skb->data),
+ nci_conn_id(skb->data),
+ nci_plen(skb->data));
+
+ nci_send_frame(skb);
+ }
+}
+
+/* ----- NCI RX worker thread (data & control) ----- */
+
+static void nci_rx_work(struct work_struct *work)
+{
+ struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&ndev->rx_q))) {
+ /* Process frame */
+ switch (nci_mt(skb->data)) {
+ case NCI_MT_RSP_PKT:
+ nci_rsp_packet(ndev, skb);
+ break;
+
+ case NCI_MT_NTF_PKT:
+ nci_ntf_packet(ndev, skb);
+ break;
+
+ case NCI_MT_DATA_PKT:
+ nci_rx_data_packet(ndev, skb);
+ break;
+
+ default:
+ nfc_err("unknown MT 0x%x", nci_mt(skb->data));
+ kfree_skb(skb);
+ break;
+ }
+ }
+}
+
+/* ----- NCI TX CMD worker thread ----- */
+
+static void nci_cmd_work(struct work_struct *work)
+{
+ struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
+ struct sk_buff *skb;
+
+ nfc_dbg("entry, cmd_cnt %d", atomic_read(&ndev->cmd_cnt));
+
+ /* Send queued command */
+ if (atomic_read(&ndev->cmd_cnt)) {
+ skb = skb_dequeue(&ndev->cmd_q);
+ if (!skb)
+ return;
+
+ atomic_dec(&ndev->cmd_cnt);
+
+ nfc_dbg("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
+ nci_pbf(skb->data),
+ nci_opcode_gid(nci_opcode(skb->data)),
+ nci_opcode_oid(nci_opcode(skb->data)),
+ nci_plen(skb->data));
+
+ nci_send_frame(skb);
+
+ mod_timer(&ndev->cmd_timer,
+ jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
+ }
+}
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/slab.h>
+ #include <linux/export.h>
#include <net/iw_handler.h>
#include <net/cfg80211.h>
+#include <net/cfg80211-wext.h>
#include "wext-compat.h"
#include "core.h"
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/slab.h>
+ #include <linux/export.h>
#include <net/cfg80211.h>
+#include <net/cfg80211-wext.h>
#include "wext-compat.h"
#include "nl80211.h"
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/time.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/uio.h>
#include <linux/dma-mapping.h>
+ #include <linux/module.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/info.h>
#include <linux/i2c.h>
#include <linux/spi/spi.h>
+#include <linux/regmap.h>
+ #include <linux/export.h>
#include <sound/soc.h>
#include <trace/events/asoc.h>
*/
#include <linux/firmware.h>
+ #include <linux/module.h>
#include <linux/bitrev.h>
+#include <linux/kernel.h>
#include "firmware.h"
#include "chip.h"