From: Stephen Rothwell Date: Thu, 13 Oct 2011 06:25:08 +0000 (+1100) Subject: Merge remote-tracking branch 'moduleh/for-sfr' X-Git-Tag: next-20111013~3 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=34202fa6fa6d15d4efb6e935822f3ee30d79e312;p=karo-tx-linux.git Merge remote-tracking branch 'moduleh/for-sfr' Conflicts: arch/arm/mach-bcmring/mm.c arch/arm/plat-s3c24xx/devs.c arch/arm/plat-s5p/dev-ehci.c arch/arm/plat-s5p/dev-onenand.c arch/arm/plat-samsung/dev-adc.c arch/arm/plat-samsung/dev-asocdma.c arch/arm/plat-samsung/dev-ide.c arch/arm/plat-samsung/dev-nand.c arch/arm/plat-samsung/dev-pwm.c arch/arm/plat-samsung/dev-rtc.c arch/arm/plat-samsung/dev-ts.c arch/arm/plat-samsung/dev-usb.c arch/arm/plat-samsung/dev-wdt.c drivers/media/dvb/frontends/dibx000_common.c drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c drivers/scsi/libfc/fc_lport.c drivers/staging/brcm80211/brcmfmac/bcmsdh.c drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c drivers/staging/brcm80211/brcmfmac/dhd_linux.c drivers/staging/brcm80211/brcmfmac/dhd_sdio.c drivers/staging/iio/industrialio-ring.c include/linux/dmaengine.h sound/soc/soc-io.c --- 34202fa6fa6d15d4efb6e935822f3ee30d79e312 diff --cc arch/arm/common/scoop.c index 1cde34a080d7,f749e60639ee..3229323eeb57 --- a/arch/arm/common/scoop.c +++ b/arch/arm/common/scoop.c @@@ -11,8 -11,8 +11,9 @@@ * */ + #include #include +#include #include #include #include diff --cc arch/arm/kernel/perf_event.c index e6e5d7c84f1a,0d6679cb95d9..24e2347be6b1 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@@ -12,10 -12,9 +12,10 @@@ */ #define pr_fmt(fmt) "hw perfevents: " fmt +#include #include #include - #include + #include #include #include #include diff --cc arch/arm/mach-bcmring/mm.c index 8616876abb9f,293b6d3e0d1d..1adec78ec940 --- a/arch/arm/mach-bcmring/mm.c +++ b/arch/arm/mach-bcmring/mm.c @@@ -13,7 -13,7 +13,8 @@@ *****************************************************************************/ #include +#include + #include #include #include diff --cc arch/arm/mach-omap2/voltage.c index 64070ac1e761,e964cfd3a3d0..1f8fdf736e63 --- a/arch/arm/mach-omap2/voltage.c +++ b/arch/arm/mach-omap2/voltage.c @@@ -21,10 -21,11 +21,11 @@@ #include #include -#include #include + #include #include #include +#include #include diff --cc arch/arm/plat-samsung/dma-ops.c index 6e3d9abc9e2e,000000000000..93a994a5dd8f mode 100644,000000..100644 --- a/arch/arm/plat-samsung/dma-ops.c +++ b/arch/arm/plat-samsung/dma-ops.c @@@ -1,131 -1,0 +1,132 @@@ +/* linux/arch/arm/plat-samsung/dma-ops.c + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Samsung DMA Operations + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include ++#include + +#include + +static inline bool pl330_filter(struct dma_chan *chan, void *param) +{ + struct dma_pl330_peri *peri = chan->private; + return peri->peri_id == (unsigned)param; +} + +static unsigned samsung_dmadev_request(enum dma_ch dma_ch, + struct samsung_dma_info *info) +{ + struct dma_chan *chan; + dma_cap_mask_t mask; + struct dma_slave_config slave_config; + + dma_cap_zero(mask); + dma_cap_set(info->cap, mask); + + chan = dma_request_channel(mask, pl330_filter, (void *)dma_ch); + + if (info->direction == DMA_FROM_DEVICE) { + memset(&slave_config, 0, sizeof(struct dma_slave_config)); + slave_config.direction = info->direction; + slave_config.src_addr = info->fifo; + slave_config.src_addr_width = info->width; + slave_config.src_maxburst = 1; + dmaengine_slave_config(chan, &slave_config); + } else if (info->direction == DMA_TO_DEVICE) { + memset(&slave_config, 0, sizeof(struct dma_slave_config)); + slave_config.direction = info->direction; + slave_config.dst_addr = info->fifo; + slave_config.dst_addr_width = info->width; + slave_config.dst_maxburst = 1; + dmaengine_slave_config(chan, &slave_config); + } + + return (unsigned)chan; +} + +static int samsung_dmadev_release(unsigned ch, + struct s3c2410_dma_client *client) +{ + dma_release_channel((struct dma_chan *)ch); + + return 0; +} + +static int samsung_dmadev_prepare(unsigned ch, + struct samsung_dma_prep_info *info) +{ + struct scatterlist sg; + struct dma_chan *chan = (struct dma_chan *)ch; + struct dma_async_tx_descriptor *desc; + + switch (info->cap) { + case DMA_SLAVE: + sg_init_table(&sg, 1); + sg_dma_len(&sg) = info->len; + sg_set_page(&sg, pfn_to_page(PFN_DOWN(info->buf)), + info->len, offset_in_page(info->buf)); + sg_dma_address(&sg) = info->buf; + + desc = chan->device->device_prep_slave_sg(chan, + &sg, 1, info->direction, DMA_PREP_INTERRUPT); + break; + case DMA_CYCLIC: + desc = chan->device->device_prep_dma_cyclic(chan, + info->buf, info->len, info->period, info->direction); + break; + default: + dev_err(&chan->dev->device, "unsupported format\n"); + return -EFAULT; + } + + if (!desc) { + dev_err(&chan->dev->device, "cannot prepare cyclic dma\n"); + return -EFAULT; + } + + desc->callback = info->fp; + desc->callback_param = info->fp_param; + + dmaengine_submit((struct dma_async_tx_descriptor *)desc); + + return 0; +} + +static inline int samsung_dmadev_trigger(unsigned ch) +{ + dma_async_issue_pending((struct dma_chan *)ch); + + return 0; +} + +static inline int samsung_dmadev_flush(unsigned ch) +{ + return dmaengine_terminate_all((struct dma_chan *)ch); +} + +struct samsung_dma_ops dmadev_ops = { + .request = samsung_dmadev_request, + .release = samsung_dmadev_release, + .prepare = samsung_dmadev_prepare, + .trigger = samsung_dmadev_trigger, + .started = NULL, + .flush = samsung_dmadev_flush, + .stop = samsung_dmadev_flush, +}; + +void *samsung_dmadev_get_ops(void) +{ + return &dmadev_ops; +} +EXPORT_SYMBOL(samsung_dmadev_get_ops); diff --cc arch/arm/plat-samsung/s3c-dma-ops.c index 582333c70585,000000000000..781494912827 mode 100644,000000..100644 --- a/arch/arm/plat-samsung/s3c-dma-ops.c +++ b/arch/arm/plat-samsung/s3c-dma-ops.c @@@ -1,130 -1,0 +1,131 @@@ +/* linux/arch/arm/plat-samsung/s3c-dma-ops.c + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Samsung S3C-DMA Operations + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include ++#include + +#include + +struct cb_data { + void (*fp) (void *); + void *fp_param; + unsigned ch; + struct list_head node; +}; + +static LIST_HEAD(dma_list); + +static void s3c_dma_cb(struct s3c2410_dma_chan *channel, void *param, + int size, enum s3c2410_dma_buffresult res) +{ + struct cb_data *data = param; + + data->fp(data->fp_param); +} + +static unsigned s3c_dma_request(enum dma_ch dma_ch, + struct samsung_dma_info *info) +{ + struct cb_data *data; + + if (s3c2410_dma_request(dma_ch, info->client, NULL) < 0) { + s3c2410_dma_free(dma_ch, info->client); + return 0; + } + + data = kzalloc(sizeof(struct cb_data), GFP_KERNEL); + data->ch = dma_ch; + list_add_tail(&data->node, &dma_list); + + s3c2410_dma_devconfig(dma_ch, info->direction, info->fifo); + + if (info->cap == DMA_CYCLIC) + s3c2410_dma_setflags(dma_ch, S3C2410_DMAF_CIRCULAR); + + s3c2410_dma_config(dma_ch, info->width); + + return (unsigned)dma_ch; +} + +static int s3c_dma_release(unsigned ch, struct s3c2410_dma_client *client) +{ + struct cb_data *data; + + list_for_each_entry(data, &dma_list, node) + if (data->ch == ch) + break; + list_del(&data->node); + + s3c2410_dma_free(ch, client); + kfree(data); + + return 0; +} + +static int s3c_dma_prepare(unsigned ch, struct samsung_dma_prep_info *info) +{ + struct cb_data *data; + int len = (info->cap == DMA_CYCLIC) ? info->period : info->len; + + list_for_each_entry(data, &dma_list, node) + if (data->ch == ch) + break; + + if (!data->fp) { + s3c2410_dma_set_buffdone_fn(ch, s3c_dma_cb); + data->fp = info->fp; + data->fp_param = info->fp_param; + } + + s3c2410_dma_enqueue(ch, (void *)data, info->buf, len); + + return 0; +} + +static inline int s3c_dma_trigger(unsigned ch) +{ + return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_START); +} + +static inline int s3c_dma_started(unsigned ch) +{ + return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_STARTED); +} + +static inline int s3c_dma_flush(unsigned ch) +{ + return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_FLUSH); +} + +static inline int s3c_dma_stop(unsigned ch) +{ + return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_STOP); +} + +static struct samsung_dma_ops s3c_dma_ops = { + .request = s3c_dma_request, + .release = s3c_dma_release, + .prepare = s3c_dma_prepare, + .trigger = s3c_dma_trigger, + .started = s3c_dma_started, + .flush = s3c_dma_flush, + .stop = s3c_dma_stop, +}; + +void *s3c_dma_get_ops(void) +{ + return &s3c_dma_ops; +} +EXPORT_SYMBOL(s3c_dma_get_ops); diff --cc arch/arm/vfp/vfpmodule.c index 0cbd5a0a9332,2d17f264847c..8f3ccddbdafd --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@@ -8,10 -8,8 +8,9 @@@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ - #include #include #include +#include #include #include #include diff --cc arch/mips/bcm47xx/setup.c index 17c3d14d7c49,0eda3808ee9d..6084dc24b18c --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c @@@ -27,9 -27,9 +27,10 @@@ */ #include + #include #include #include +#include #include #include #include diff --cc arch/mips/kernel/traps.c index cbea618af0b4,74c3cb6cee77..261ccbc07740 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@@ -14,9 -14,7 +14,8 @@@ #include #include #include +#include #include - #include #include #include #include diff --cc arch/x86/crypto/aes_glue.c index b0b6950cc8c8,bdce3eeeaa37..8efcf42a9d7e --- a/arch/x86/crypto/aes_glue.c +++ b/arch/x86/crypto/aes_glue.c @@@ -3,8 -3,8 +3,9 @@@ * */ + #include #include +#include asmlinkage void aes_enc_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); asmlinkage void aes_dec_blk(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in); diff --cc arch/x86/kernel/cpu/amd.c index 13c6ec812545,b070bde05825..ef2e3462702d --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@@ -1,6 -1,6 +1,7 @@@ + #include #include #include +#include #include #include diff --cc arch/x86/kernel/cpu/mcheck/mce.c index f2b5bb34b282,0dc80178b16c..4e585c741fda --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@@ -36,7 -36,9 +36,8 @@@ #include #include #include -#include #include + #include #include #include diff --cc arch/x86/kernel/cpu/perf_event_intel.c index e09ca20e86ee,f88af2c2a561..2be5ebe99872 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@@ -4,16 -6,11 +4,17 @@@ * Used to coordinate shared registers between HT threads or * among events on a single PMU. */ -struct intel_shared_regs { - struct er_account regs[EXTRA_REG_MAX]; - int refcnt; /* per-core: #HT threads */ - unsigned core_id; /* per-core: core id */ -}; + +#include +#include +#include +#include ++#include + +#include +#include + +#include "perf_event.h" /* * Intel PerfMon, used on Core and later. diff --cc arch/x86/kernel/nmi.c index 7ec5bd140b87,000000000000..b9c8628974af mode 100644,000000..100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@@ -1,433 -1,0 +1,434 @@@ +/* + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs + * Copyright (C) 2011 Don Zickus Red Hat, Inc. + * + * Pentium III FXSR, SSE support + * Gareth Hughes , May 2000 + */ + +/* + * Handle hardware traps and faults. + */ +#include +#include +#include +#include +#include +#include +#include ++#include + +#include + +#if defined(CONFIG_EDAC) +#include +#endif + +#include +#include +#include +#include + +#define NMI_MAX_NAMELEN 16 +struct nmiaction { + struct list_head list; + nmi_handler_t handler; + unsigned int flags; + char *name; +}; + +struct nmi_desc { + spinlock_t lock; + struct list_head head; +}; + +static struct nmi_desc nmi_desc[NMI_MAX] = +{ + { + .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock), + .head = LIST_HEAD_INIT(nmi_desc[0].head), + }, + { + .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock), + .head = LIST_HEAD_INIT(nmi_desc[1].head), + }, + +}; + +struct nmi_stats { + unsigned int normal; + unsigned int unknown; + unsigned int external; + unsigned int swallow; +}; + +static DEFINE_PER_CPU(struct nmi_stats, nmi_stats); + +static int ignore_nmis; + +int unknown_nmi_panic; +/* + * Prevent NMI reason port (0x61) being accessed simultaneously, can + * only be used in NMI handler. + */ +static DEFINE_RAW_SPINLOCK(nmi_reason_lock); + +static int __init setup_unknown_nmi_panic(char *str) +{ + unknown_nmi_panic = 1; + return 1; +} +__setup("unknown_nmi_panic", setup_unknown_nmi_panic); + +#define nmi_to_desc(type) (&nmi_desc[type]) + +static int notrace __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b) +{ + struct nmi_desc *desc = nmi_to_desc(type); + struct nmiaction *a; + int handled=0; + + rcu_read_lock(); + + /* + * NMIs are edge-triggered, which means if you have enough + * of them concurrently, you can lose some because only one + * can be latched at any given time. Walk the whole list + * to handle those situations. + */ + list_for_each_entry_rcu(a, &desc->head, list) + handled += a->handler(type, regs); + + rcu_read_unlock(); + + /* return total number of NMI events handled */ + return handled; +} + +static int __setup_nmi(unsigned int type, struct nmiaction *action) +{ + struct nmi_desc *desc = nmi_to_desc(type); + unsigned long flags; + + spin_lock_irqsave(&desc->lock, flags); + + /* + * most handlers of type NMI_UNKNOWN never return because + * they just assume the NMI is theirs. Just a sanity check + * to manage expectations + */ + WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head)); + + /* + * some handlers need to be executed first otherwise a fake + * event confuses some handlers (kdump uses this flag) + */ + if (action->flags & NMI_FLAG_FIRST) + list_add_rcu(&action->list, &desc->head); + else + list_add_tail_rcu(&action->list, &desc->head); + + spin_unlock_irqrestore(&desc->lock, flags); + return 0; +} + +static struct nmiaction *__free_nmi(unsigned int type, const char *name) +{ + struct nmi_desc *desc = nmi_to_desc(type); + struct nmiaction *n; + unsigned long flags; + + spin_lock_irqsave(&desc->lock, flags); + + list_for_each_entry_rcu(n, &desc->head, list) { + /* + * the name passed in to describe the nmi handler + * is used as the lookup key + */ + if (!strcmp(n->name, name)) { + WARN(in_nmi(), + "Trying to free NMI (%s) from NMI context!\n", n->name); + list_del_rcu(&n->list); + break; + } + } + + spin_unlock_irqrestore(&desc->lock, flags); + synchronize_rcu(); + return (n); +} + +int register_nmi_handler(unsigned int type, nmi_handler_t handler, + unsigned long nmiflags, const char *devname) +{ + struct nmiaction *action; + int retval = -ENOMEM; + + if (!handler) + return -EINVAL; + + action = kzalloc(sizeof(struct nmiaction), GFP_KERNEL); + if (!action) + goto fail_action; + + action->handler = handler; + action->flags = nmiflags; + action->name = kstrndup(devname, NMI_MAX_NAMELEN, GFP_KERNEL); + if (!action->name) + goto fail_action_name; + + retval = __setup_nmi(type, action); + + if (retval) + goto fail_setup_nmi; + + return retval; + +fail_setup_nmi: + kfree(action->name); +fail_action_name: + kfree(action); +fail_action: + + return retval; +} +EXPORT_SYMBOL_GPL(register_nmi_handler); + +void unregister_nmi_handler(unsigned int type, const char *name) +{ + struct nmiaction *a; + + a = __free_nmi(type, name); + if (a) { + kfree(a->name); + kfree(a); + } +} + +EXPORT_SYMBOL_GPL(unregister_nmi_handler); + +static notrace __kprobes void +pci_serr_error(unsigned char reason, struct pt_regs *regs) +{ + pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n", + reason, smp_processor_id()); + + /* + * On some machines, PCI SERR line is used to report memory + * errors. EDAC makes use of it. + */ +#if defined(CONFIG_EDAC) + if (edac_handler_set()) { + edac_atomic_assert_error(); + return; + } +#endif + + if (panic_on_unrecovered_nmi) + panic("NMI: Not continuing"); + + pr_emerg("Dazed and confused, but trying to continue\n"); + + /* Clear and disable the PCI SERR error line. */ + reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR; + outb(reason, NMI_REASON_PORT); +} + +static notrace __kprobes void +io_check_error(unsigned char reason, struct pt_regs *regs) +{ + unsigned long i; + + pr_emerg( + "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n", + reason, smp_processor_id()); + show_registers(regs); + + if (panic_on_io_nmi) + panic("NMI IOCK error: Not continuing"); + + /* Re-enable the IOCK line, wait for a few seconds */ + reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK; + outb(reason, NMI_REASON_PORT); + + i = 20000; + while (--i) { + touch_nmi_watchdog(); + udelay(100); + } + + reason &= ~NMI_REASON_CLEAR_IOCHK; + outb(reason, NMI_REASON_PORT); +} + +static notrace __kprobes void +unknown_nmi_error(unsigned char reason, struct pt_regs *regs) +{ + int handled; + + /* + * Use 'false' as back-to-back NMIs are dealt with one level up. + * Of course this makes having multiple 'unknown' handlers useless + * as only the first one is ever run (unless it can actually determine + * if it caused the NMI) + */ + handled = nmi_handle(NMI_UNKNOWN, regs, false); + if (handled) { + __this_cpu_add(nmi_stats.unknown, handled); + return; + } + + __this_cpu_add(nmi_stats.unknown, 1); + +#ifdef CONFIG_MCA + /* + * Might actually be able to figure out what the guilty party + * is: + */ + if (MCA_bus) { + mca_handle_nmi(); + return; + } +#endif + pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", + reason, smp_processor_id()); + + pr_emerg("Do you have a strange power saving mode enabled?\n"); + if (unknown_nmi_panic || panic_on_unrecovered_nmi) + panic("NMI: Not continuing"); + + pr_emerg("Dazed and confused, but trying to continue\n"); +} + +static DEFINE_PER_CPU(bool, swallow_nmi); +static DEFINE_PER_CPU(unsigned long, last_nmi_rip); + +static notrace __kprobes void default_do_nmi(struct pt_regs *regs) +{ + unsigned char reason = 0; + int handled; + bool b2b = false; + + /* + * CPU-specific NMI must be processed before non-CPU-specific + * NMI, otherwise we may lose it, because the CPU-specific + * NMI can not be detected/processed on other CPUs. + */ + + /* + * Back-to-back NMIs are interesting because they can either + * be two NMI or more than two NMIs (any thing over two is dropped + * due to NMI being edge-triggered). If this is the second half + * of the back-to-back NMI, assume we dropped things and process + * more handlers. Otherwise reset the 'swallow' NMI behaviour + */ + if (regs->ip == __this_cpu_read(last_nmi_rip)) + b2b = true; + else + __this_cpu_write(swallow_nmi, false); + + __this_cpu_write(last_nmi_rip, regs->ip); + + handled = nmi_handle(NMI_LOCAL, regs, b2b); + __this_cpu_add(nmi_stats.normal, handled); + if (handled) { + /* + * There are cases when a NMI handler handles multiple + * events in the current NMI. One of these events may + * be queued for in the next NMI. Because the event is + * already handled, the next NMI will result in an unknown + * NMI. Instead lets flag this for a potential NMI to + * swallow. + */ + if (handled > 1) + __this_cpu_write(swallow_nmi, true); + return; + } + + /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */ + raw_spin_lock(&nmi_reason_lock); + reason = get_nmi_reason(); + + if (reason & NMI_REASON_MASK) { + if (reason & NMI_REASON_SERR) + pci_serr_error(reason, regs); + else if (reason & NMI_REASON_IOCHK) + io_check_error(reason, regs); +#ifdef CONFIG_X86_32 + /* + * Reassert NMI in case it became active + * meanwhile as it's edge-triggered: + */ + reassert_nmi(); +#endif + __this_cpu_add(nmi_stats.external, 1); + raw_spin_unlock(&nmi_reason_lock); + return; + } + raw_spin_unlock(&nmi_reason_lock); + + /* + * Only one NMI can be latched at a time. To handle + * this we may process multiple nmi handlers at once to + * cover the case where an NMI is dropped. The downside + * to this approach is we may process an NMI prematurely, + * while its real NMI is sitting latched. This will cause + * an unknown NMI on the next run of the NMI processing. + * + * We tried to flag that condition above, by setting the + * swallow_nmi flag when we process more than one event. + * This condition is also only present on the second half + * of a back-to-back NMI, so we flag that condition too. + * + * If both are true, we assume we already processed this + * NMI previously and we swallow it. Otherwise we reset + * the logic. + * + * There are scenarios where we may accidentally swallow + * a 'real' unknown NMI. For example, while processing + * a perf NMI another perf NMI comes in along with a + * 'real' unknown NMI. These two NMIs get combined into + * one (as descibed above). When the next NMI gets + * processed, it will be flagged by perf as handled, but + * noone will know that there was a 'real' unknown NMI sent + * also. As a result it gets swallowed. Or if the first + * perf NMI returns two events handled then the second + * NMI will get eaten by the logic below, again losing a + * 'real' unknown NMI. But this is the best we can do + * for now. + */ + if (b2b && __this_cpu_read(swallow_nmi)) + __this_cpu_add(nmi_stats.swallow, 1); + else + unknown_nmi_error(reason, regs); +} + +dotraplinkage notrace __kprobes void +do_nmi(struct pt_regs *regs, long error_code) +{ + nmi_enter(); + + inc_irq_stat(__nmi_count); + + if (!ignore_nmis) + default_do_nmi(regs); + + nmi_exit(); +} + +void stop_nmi(void) +{ + ignore_nmis++; +} + +void restart_nmi(void) +{ + ignore_nmis--; +} + +/* reset the back-to-back NMI logic */ +void local_touch_nmi(void) +{ + __this_cpu_write(last_nmi_rip, 0); +} diff --cc block/bsg-lib.c index 6690e6e41037,6690e6e41037..7ad49c88f6b1 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@@ -25,7 -25,7 +25,7 @@@ #include #include #include --#include ++#include #include /** diff --cc drivers/base/power/common.c index 29820c396182,000000000000..4af7c1cbf909 mode 100644,000000..100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@@ -1,86 -1,0 +1,86 @@@ +/* + * drivers/base/power/common.c - Common device power management code. + * + * Copyright (C) 2011 Rafael J. Wysocki , Renesas Electronics Corp. + * + * This file is released under the GPLv2. + */ + +#include +#include - #include ++#include +#include +#include + +/** + * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device. + * @dev: Device to handle. + * + * If power.subsys_data is NULL, point it to a new object, otherwise increment + * its reference counter. Return 1 if a new object has been created, otherwise + * return 0 or error code. + */ +int dev_pm_get_subsys_data(struct device *dev) +{ + struct pm_subsys_data *psd; + int ret = 0; + + psd = kzalloc(sizeof(*psd), GFP_KERNEL); + if (!psd) + return -ENOMEM; + + spin_lock_irq(&dev->power.lock); + + if (dev->power.subsys_data) { + dev->power.subsys_data->refcount++; + } else { + spin_lock_init(&psd->lock); + psd->refcount = 1; + dev->power.subsys_data = psd; + pm_clk_init(dev); + psd = NULL; + ret = 1; + } + + spin_unlock_irq(&dev->power.lock); + + /* kfree() verifies that its argument is nonzero. */ + kfree(psd); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); + +/** + * dev_pm_put_subsys_data - Drop reference to power.subsys_data. + * @dev: Device to handle. + * + * If the reference counter of power.subsys_data is zero after dropping the + * reference, power.subsys_data is removed. Return 1 if that happens or 0 + * otherwise. + */ +int dev_pm_put_subsys_data(struct device *dev) +{ + struct pm_subsys_data *psd; + int ret = 0; + + spin_lock_irq(&dev->power.lock); + + psd = dev_to_psd(dev); + if (!psd) { + ret = -EINVAL; + goto out; + } + + if (--psd->refcount == 0) { + dev->power.subsys_data = NULL; + kfree(psd); + ret = 1; + } + + out: + spin_unlock_irq(&dev->power.lock); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); diff --cc drivers/base/power/qos.c index 91e061417382,000000000000..30a94eadc200 mode 100644,000000..100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@@ -1,419 -1,0 +1,420 @@@ +/* + * Devices PM QoS constraints management + * + * Copyright (C) 2011 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * + * This module exposes the interface to kernel space for specifying + * per-device PM QoS dependencies. It provides infrastructure for registration + * of: + * + * Dependents on a QoS value : register requests + * Watchers of QoS value : get notified when target QoS value changes + * + * This QoS design is best effort based. Dependents register their QoS needs. + * Watchers register to keep track of the current QoS needs of the system. + * Watchers can register different types of notification callbacks: + * . a per-device notification callback using the dev_pm_qos_*_notifier API. + * The notification chain data is stored in the per-device constraint + * data struct. + * . a system-wide notification callback using the dev_pm_qos_*_global_notifier + * API. The notification chain data is stored in a static variable. + * + * Note about the per-device constraint data struct allocation: + * . The per-device constraints data struct ptr is tored into the device + * dev_pm_info. + * . To minimize the data usage by the per-device constraints, the data struct + * is only allocated at the first call to dev_pm_qos_add_request. + * . The data is later free'd when the device is removed from the system. + * . A global mutex protects the constraints users from the data being + * allocated and free'd. + */ + +#include +#include +#include +#include +#include ++#include + + +static DEFINE_MUTEX(dev_pm_qos_mtx); + +static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); + +/** + * dev_pm_qos_read_value - Get PM QoS constraint for a given device. + * @dev: Device to get the PM QoS constraint value for. + */ +s32 dev_pm_qos_read_value(struct device *dev) +{ + struct pm_qos_constraints *c; + unsigned long flags; + s32 ret = 0; + + spin_lock_irqsave(&dev->power.lock, flags); + + c = dev->power.constraints; + if (c) + ret = pm_qos_read_value(c); + + spin_unlock_irqrestore(&dev->power.lock, flags); + + return ret; +} + +/* + * apply_constraint + * @req: constraint request to apply + * @action: action to perform add/update/remove, of type enum pm_qos_req_action + * @value: defines the qos request + * + * Internal function to update the constraints list using the PM QoS core + * code and if needed call the per-device and the global notification + * callbacks + */ +static int apply_constraint(struct dev_pm_qos_request *req, + enum pm_qos_req_action action, int value) +{ + int ret, curr_value; + + ret = pm_qos_update_target(req->dev->power.constraints, + &req->node, action, value); + + if (ret) { + /* Call the global callbacks if needed */ + curr_value = pm_qos_read_value(req->dev->power.constraints); + blocking_notifier_call_chain(&dev_pm_notifiers, + (unsigned long)curr_value, + req); + } + + return ret; +} + +/* + * dev_pm_qos_constraints_allocate + * @dev: device to allocate data for + * + * Called at the first call to add_request, for constraint data allocation + * Must be called with the dev_pm_qos_mtx mutex held + */ +static int dev_pm_qos_constraints_allocate(struct device *dev) +{ + struct pm_qos_constraints *c; + struct blocking_notifier_head *n; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return -ENOMEM; + + n = kzalloc(sizeof(*n), GFP_KERNEL); + if (!n) { + kfree(c); + return -ENOMEM; + } + BLOCKING_INIT_NOTIFIER_HEAD(n); + + plist_head_init(&c->list); + c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; + c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; + c->type = PM_QOS_MIN; + c->notifiers = n; + + spin_lock_irq(&dev->power.lock); + dev->power.constraints = c; + spin_unlock_irq(&dev->power.lock); + + return 0; +} + +/** + * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer. + * @dev: target device + * + * Called from the device PM subsystem during device insertion under + * device_pm_lock(). + */ +void dev_pm_qos_constraints_init(struct device *dev) +{ + mutex_lock(&dev_pm_qos_mtx); + dev->power.constraints = NULL; + dev->power.power_state = PMSG_ON; + mutex_unlock(&dev_pm_qos_mtx); +} + +/** + * dev_pm_qos_constraints_destroy + * @dev: target device + * + * Called from the device PM subsystem on device removal under device_pm_lock(). + */ +void dev_pm_qos_constraints_destroy(struct device *dev) +{ + struct dev_pm_qos_request *req, *tmp; + struct pm_qos_constraints *c; + + mutex_lock(&dev_pm_qos_mtx); + + dev->power.power_state = PMSG_INVALID; + c = dev->power.constraints; + if (!c) + goto out; + + /* Flush the constraints list for the device */ + plist_for_each_entry_safe(req, tmp, &c->list, node) { + /* + * Update constraints list and call the notification + * callbacks if needed + */ + apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); + } + + spin_lock_irq(&dev->power.lock); + dev->power.constraints = NULL; + spin_unlock_irq(&dev->power.lock); + + kfree(c->notifiers); + kfree(c); + + out: + mutex_unlock(&dev_pm_qos_mtx); +} + +/** + * dev_pm_qos_add_request - inserts new qos request into the list + * @dev: target device for the constraint + * @req: pointer to a preallocated handle + * @value: defines the qos request + * + * This function inserts a new entry in the device constraints list of + * requested qos performance characteristics. It recomputes the aggregate + * QoS expectations of parameters and initializes the dev_pm_qos_request + * handle. Caller needs to save this handle for later use in updates and + * removal. + * + * Returns 1 if the aggregated constraint value has changed, + * 0 if the aggregated constraint value has not changed, + * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory + * to allocate for data structures, -ENODEV if the device has just been removed + * from the system. + */ +int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, + s32 value) +{ + int ret = 0; + + if (!dev || !req) /*guard against callers passing in null */ + return -EINVAL; + + if (dev_pm_qos_request_active(req)) { + WARN(1, KERN_ERR "dev_pm_qos_add_request() called for already " + "added request\n"); + return -EINVAL; + } + + req->dev = dev; + + mutex_lock(&dev_pm_qos_mtx); + + if (!dev->power.constraints) { + if (dev->power.power_state.event == PM_EVENT_INVALID) { + /* The device has been removed from the system. */ + req->dev = NULL; + ret = -ENODEV; + goto out; + } else { + /* + * Allocate the constraints data on the first call to + * add_request, i.e. only if the data is not already + * allocated and if the device has not been removed. + */ + ret = dev_pm_qos_constraints_allocate(dev); + } + } + + if (!ret) + ret = apply_constraint(req, PM_QOS_ADD_REQ, value); + + out: + mutex_unlock(&dev_pm_qos_mtx); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); + +/** + * dev_pm_qos_update_request - modifies an existing qos request + * @req : handle to list element holding a dev_pm_qos request to use + * @new_value: defines the qos request + * + * Updates an existing dev PM qos request along with updating the + * target value. + * + * Attempts are made to make this code callable on hot code paths. + * + * Returns 1 if the aggregated constraint value has changed, + * 0 if the aggregated constraint value has not changed, + * -EINVAL in case of wrong parameters, -ENODEV if the device has been + * removed from the system + */ +int dev_pm_qos_update_request(struct dev_pm_qos_request *req, + s32 new_value) +{ + int ret = 0; + + if (!req) /*guard against callers passing in null */ + return -EINVAL; + + if (!dev_pm_qos_request_active(req)) { + WARN(1, KERN_ERR "dev_pm_qos_update_request() called for " + "unknown object\n"); + return -EINVAL; + } + + mutex_lock(&dev_pm_qos_mtx); + + if (req->dev->power.constraints) { + if (new_value != req->node.prio) + ret = apply_constraint(req, PM_QOS_UPDATE_REQ, + new_value); + } else { + /* Return if the device has been removed */ + ret = -ENODEV; + } + + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); + +/** + * dev_pm_qos_remove_request - modifies an existing qos request + * @req: handle to request list element + * + * Will remove pm qos request from the list of constraints and + * recompute the current target value. Call this on slow code paths. + * + * Returns 1 if the aggregated constraint value has changed, + * 0 if the aggregated constraint value has not changed, + * -EINVAL in case of wrong parameters, -ENODEV if the device has been + * removed from the system + */ +int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) +{ + int ret = 0; + + if (!req) /*guard against callers passing in null */ + return -EINVAL; + + if (!dev_pm_qos_request_active(req)) { + WARN(1, KERN_ERR "dev_pm_qos_remove_request() called for " + "unknown object\n"); + return -EINVAL; + } + + mutex_lock(&dev_pm_qos_mtx); + + if (req->dev->power.constraints) { + ret = apply_constraint(req, PM_QOS_REMOVE_REQ, + PM_QOS_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); + } else { + /* Return if the device has been removed */ + ret = -ENODEV; + } + + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); + +/** + * dev_pm_qos_add_notifier - sets notification entry for changes to target value + * of per-device PM QoS constraints + * + * @dev: target device for the constraint + * @notifier: notifier block managed by caller. + * + * Will register the notifier into a notification chain that gets called + * upon changes to the target value for the device. + */ +int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) +{ + int retval = 0; + + mutex_lock(&dev_pm_qos_mtx); + + /* Silently return if the constraints object is not present. */ + if (dev->power.constraints) + retval = blocking_notifier_chain_register( + dev->power.constraints->notifiers, + notifier); + + mutex_unlock(&dev_pm_qos_mtx); + return retval; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier); + +/** + * dev_pm_qos_remove_notifier - deletes notification for changes to target value + * of per-device PM QoS constraints + * + * @dev: target device for the constraint + * @notifier: notifier block to be removed. + * + * Will remove the notifier from the notification chain that gets called + * upon changes to the target value. + */ +int dev_pm_qos_remove_notifier(struct device *dev, + struct notifier_block *notifier) +{ + int retval = 0; + + mutex_lock(&dev_pm_qos_mtx); + + /* Silently return if the constraints object is not present. */ + if (dev->power.constraints) + retval = blocking_notifier_chain_unregister( + dev->power.constraints->notifiers, + notifier); + + mutex_unlock(&dev_pm_qos_mtx); + return retval; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); + +/** + * dev_pm_qos_add_global_notifier - sets notification entry for changes to + * target value of the PM QoS constraints for any device + * + * @notifier: notifier block managed by caller. + * + * Will register the notifier into a notification chain that gets called + * upon changes to the target value for any device. + */ +int dev_pm_qos_add_global_notifier(struct notifier_block *notifier) +{ + return blocking_notifier_chain_register(&dev_pm_notifiers, notifier); +} +EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier); + +/** + * dev_pm_qos_remove_global_notifier - deletes notification for changes to + * target value of PM QoS constraints for any device + * + * @notifier: notifier block to be removed. + * + * Will remove the notifier from the notification chain that gets called + * upon changes to the target value for any device. + */ +int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier) +{ + return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier); +} +EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); diff --cc drivers/base/power/runtime.c index 6bb3aafa85ed,5b084de0fbe9..1079e030fd9e --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@@ -8,8 -8,8 +8,9 @@@ */ #include + #include #include +#include #include "power.h" static int rpm_resume(struct device *dev, int rpmflags); diff --cc drivers/base/regmap/regcache.c index afcfef838263,000000000000..666f6f5011dc mode 100644,000000..100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@@ -1,401 -1,0 +1,402 @@@ +/* + * Register cache access API + * + * Copyright 2011 Wolfson Microelectronics plc + * + * Author: Dimitris Papastamos + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include ++#include +#include +#include +#include + +#include "internal.h" + +static const struct regcache_ops *cache_types[] = { + ®cache_indexed_ops, + ®cache_rbtree_ops, + ®cache_lzo_ops, +}; + +static int regcache_hw_init(struct regmap *map) +{ + int i, j; + int ret; + int count; + unsigned int val; + void *tmp_buf; + + if (!map->num_reg_defaults_raw) + return -EINVAL; + + if (!map->reg_defaults_raw) { + dev_warn(map->dev, "No cache defaults, reading back from HW\n"); + tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL); + if (!tmp_buf) + return -EINVAL; + ret = regmap_bulk_read(map, 0, tmp_buf, + map->num_reg_defaults_raw); + if (ret < 0) { + kfree(tmp_buf); + return ret; + } + map->reg_defaults_raw = tmp_buf; + map->cache_free = 1; + } + + /* calculate the size of reg_defaults */ + for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) { + val = regcache_get_val(map->reg_defaults_raw, + i, map->cache_word_size); + if (!val) + continue; + count++; + } + + map->reg_defaults = kmalloc(count * sizeof(struct reg_default), + GFP_KERNEL); + if (!map->reg_defaults) + return -ENOMEM; + + /* fill the reg_defaults */ + map->num_reg_defaults = count; + for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) { + val = regcache_get_val(map->reg_defaults_raw, + i, map->cache_word_size); + if (!val) + continue; + map->reg_defaults[j].reg = i; + map->reg_defaults[j].def = val; + j++; + } + + return 0; +} + +int regcache_init(struct regmap *map) +{ + int ret; + int i; + void *tmp_buf; + + if (map->cache_type == REGCACHE_NONE) { + map->cache_bypass = true; + return 0; + } + + for (i = 0; i < ARRAY_SIZE(cache_types); i++) + if (cache_types[i]->type == map->cache_type) + break; + + if (i == ARRAY_SIZE(cache_types)) { + dev_err(map->dev, "Could not match compress type: %d\n", + map->cache_type); + return -EINVAL; + } + + map->cache = NULL; + map->cache_ops = cache_types[i]; + + if (!map->cache_ops->read || + !map->cache_ops->write || + !map->cache_ops->name) + return -EINVAL; + + /* We still need to ensure that the reg_defaults + * won't vanish from under us. We'll need to make + * a copy of it. + */ + if (map->reg_defaults) { + if (!map->num_reg_defaults) + return -EINVAL; + tmp_buf = kmemdup(map->reg_defaults, map->num_reg_defaults * + sizeof(struct reg_default), GFP_KERNEL); + if (!tmp_buf) + return -ENOMEM; + map->reg_defaults = tmp_buf; + } else if (map->num_reg_defaults_raw) { + /* Some devices such as PMICs don't have cache defaults, + * we cope with this by reading back the HW registers and + * crafting the cache defaults by hand. + */ + ret = regcache_hw_init(map); + if (ret < 0) + return ret; + } + + if (!map->max_register) + map->max_register = map->num_reg_defaults_raw; + + if (map->cache_ops->init) { + dev_dbg(map->dev, "Initializing %s cache\n", + map->cache_ops->name); + return map->cache_ops->init(map); + } + return 0; +} + +void regcache_exit(struct regmap *map) +{ + if (map->cache_type == REGCACHE_NONE) + return; + + BUG_ON(!map->cache_ops); + + kfree(map->reg_defaults); + if (map->cache_free) + kfree(map->reg_defaults_raw); + + if (map->cache_ops->exit) { + dev_dbg(map->dev, "Destroying %s cache\n", + map->cache_ops->name); + map->cache_ops->exit(map); + } +} + +/** + * regcache_read: Fetch the value of a given register from the cache. + * + * @map: map to configure. + * @reg: The register index. + * @value: The value to be returned. + * + * Return a negative value on failure, 0 on success. + */ +int regcache_read(struct regmap *map, + unsigned int reg, unsigned int *value) +{ + if (map->cache_type == REGCACHE_NONE) + return -ENOSYS; + + BUG_ON(!map->cache_ops); + + if (!regmap_readable(map, reg)) + return -EIO; + + if (!regmap_volatile(map, reg)) + return map->cache_ops->read(map, reg, value); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(regcache_read); + +/** + * regcache_write: Set the value of a given register in the cache. + * + * @map: map to configure. + * @reg: The register index. + * @value: The new register value. + * + * Return a negative value on failure, 0 on success. + */ +int regcache_write(struct regmap *map, + unsigned int reg, unsigned int value) +{ + if (map->cache_type == REGCACHE_NONE) + return 0; + + BUG_ON(!map->cache_ops); + + if (!regmap_writeable(map, reg)) + return -EIO; + + if (!regmap_volatile(map, reg)) + return map->cache_ops->write(map, reg, value); + + return 0; +} +EXPORT_SYMBOL_GPL(regcache_write); + +/** + * regcache_sync: Sync the register cache with the hardware. + * + * @map: map to configure. + * + * Any registers that should not be synced should be marked as + * volatile. In general drivers can choose not to use the provided + * syncing functionality if they so require. + * + * Return a negative value on failure, 0 on success. + */ +int regcache_sync(struct regmap *map) +{ + int ret = 0; + unsigned int val; + unsigned int i; + const char *name; + unsigned int bypass; + + BUG_ON(!map->cache_ops); + + mutex_lock(&map->lock); + /* Remember the initial bypass state */ + bypass = map->cache_bypass; + dev_dbg(map->dev, "Syncing %s cache\n", + map->cache_ops->name); + name = map->cache_ops->name; + trace_regcache_sync(map->dev, name, "start"); + if (map->cache_ops->sync) { + ret = map->cache_ops->sync(map); + } else { + for (i = 0; i < map->num_reg_defaults; i++) { + ret = regcache_read(map, i, &val); + if (ret < 0) + goto out; + map->cache_bypass = 1; + ret = _regmap_write(map, i, val); + map->cache_bypass = 0; + if (ret < 0) + goto out; + dev_dbg(map->dev, "Synced register %#x, value %#x\n", + map->reg_defaults[i].reg, + map->reg_defaults[i].def); + } + + } +out: + trace_regcache_sync(map->dev, name, "stop"); + /* Restore the bypass state */ + map->cache_bypass = bypass; + mutex_unlock(&map->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(regcache_sync); + +/** + * regcache_cache_only: Put a register map into cache only mode + * + * @map: map to configure + * @cache_only: flag if changes should be written to the hardware + * + * When a register map is marked as cache only writes to the register + * map API will only update the register cache, they will not cause + * any hardware changes. This is useful for allowing portions of + * drivers to act as though the device were functioning as normal when + * it is disabled for power saving reasons. + */ +void regcache_cache_only(struct regmap *map, bool enable) +{ + mutex_lock(&map->lock); + WARN_ON(map->cache_bypass && enable); + map->cache_only = enable; + mutex_unlock(&map->lock); +} +EXPORT_SYMBOL_GPL(regcache_cache_only); + +/** + * regcache_cache_bypass: Put a register map into cache bypass mode + * + * @map: map to configure + * @cache_bypass: flag if changes should not be written to the hardware + * + * When a register map is marked with the cache bypass option, writes + * to the register map API will only update the hardware and not the + * the cache directly. This is useful when syncing the cache back to + * the hardware. + */ +void regcache_cache_bypass(struct regmap *map, bool enable) +{ + mutex_lock(&map->lock); + WARN_ON(map->cache_only && enable); + map->cache_bypass = enable; + mutex_unlock(&map->lock); +} +EXPORT_SYMBOL_GPL(regcache_cache_bypass); + +bool regcache_set_val(void *base, unsigned int idx, + unsigned int val, unsigned int word_size) +{ + switch (word_size) { + case 1: { + u8 *cache = base; + if (cache[idx] == val) + return true; + cache[idx] = val; + break; + } + case 2: { + u16 *cache = base; + if (cache[idx] == val) + return true; + cache[idx] = val; + break; + } + default: + BUG(); + } + /* unreachable */ + return false; +} + +unsigned int regcache_get_val(const void *base, unsigned int idx, + unsigned int word_size) +{ + if (!base) + return -EINVAL; + + switch (word_size) { + case 1: { + const u8 *cache = base; + return cache[idx]; + } + case 2: { + const u16 *cache = base; + return cache[idx]; + } + default: + BUG(); + } + /* unreachable */ + return -1; +} + +static int regcache_default_cmp(const void *a, const void *b) +{ + const struct reg_default *_a = a; + const struct reg_default *_b = b; + + return _a->reg - _b->reg; +} + +int regcache_lookup_reg(struct regmap *map, unsigned int reg) +{ + struct reg_default key; + struct reg_default *r; + + key.reg = reg; + key.def = 0; + + r = bsearch(&key, map->reg_defaults, map->num_reg_defaults, + sizeof(struct reg_default), regcache_default_cmp); + + if (r) + return r - map->reg_defaults; + else + return -ENOENT; +} + +int regcache_insert_reg(struct regmap *map, unsigned int reg, + unsigned int val) +{ + void *tmp; + + tmp = krealloc(map->reg_defaults, + (map->num_reg_defaults + 1) * sizeof(struct reg_default), + GFP_KERNEL); + if (!tmp) + return -ENOMEM; + map->reg_defaults = tmp; + map->num_reg_defaults++; + map->reg_defaults[map->num_reg_defaults - 1].reg = reg; + map->reg_defaults[map->num_reg_defaults - 1].def = val; + sort(map->reg_defaults, map->num_reg_defaults, + sizeof(struct reg_default), regcache_default_cmp, NULL); + return 0; +} diff --cc drivers/bcma/driver_chipcommon_pmu.c index 2968d809d49f,fcc63db0ce75..800163c8c2e7 --- a/drivers/bcma/driver_chipcommon_pmu.c +++ b/drivers/bcma/driver_chipcommon_pmu.c @@@ -9,49 -9,22 +9,50 @@@ */ #include "bcma_private.h" ++#include #include -static void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc, - u32 offset, u32 mask, u32 set) +static u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset) { - u32 value; + bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset); + bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR); + return bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA); +} - bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR); +void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value) +{ + bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset); + bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR); + bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value); +} +EXPORT_SYMBOL_GPL(bcma_chipco_pll_write); + +void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask, + u32 set) +{ + bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset); + bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR); + bcma_cc_maskset32(cc, BCMA_CC_PLLCTL_DATA, mask, set); +} +EXPORT_SYMBOL_GPL(bcma_chipco_pll_maskset); + +void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc, + u32 offset, u32 mask, u32 set) +{ bcma_cc_write32(cc, BCMA_CC_CHIPCTL_ADDR, offset); bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR); - value = bcma_cc_read32(cc, BCMA_CC_CHIPCTL_DATA); - value &= mask; - value |= set; - bcma_cc_write32(cc, BCMA_CC_CHIPCTL_DATA, value); - bcma_cc_read32(cc, BCMA_CC_CHIPCTL_DATA); + bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL_DATA, mask, set); +} +EXPORT_SYMBOL_GPL(bcma_chipco_chipctl_maskset); + +void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask, + u32 set) +{ + bcma_cc_write32(cc, BCMA_CC_REGCTL_ADDR, offset); + bcma_cc_read32(cc, BCMA_CC_REGCTL_ADDR); + bcma_cc_maskset32(cc, BCMA_CC_REGCTL_DATA, mask, set); } +EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset); static void bcma_pmu_pll_init(struct bcma_drv_cc *cc) { diff --cc drivers/cpuidle/governors/ladder.c index f62fde21e962,2f138b5ded36..7f0739bab8d0 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c @@@ -14,9 -14,10 +14,10 @@@ #include #include -#include +#include #include #include + #include #include #include diff --cc drivers/gpio/gpio-ep93xx.c index 7aafbb437339,468b27dfd541..386d9ffe01d1 --- a/drivers/gpio/gpio-ep93xx.c +++ b/drivers/gpio/gpio-ep93xx.c @@@ -21,11 -21,9 +21,12 @@@ #include #include #include + #include #include +#include + +#define irq_to_gpio(irq) ((irq) - gpio_to_irq(0)) struct ep93xx_gpio { void __iomem *mmio_base; diff --cc drivers/gpio/gpio-mxc.c index b588f8a41e60,73a177b92e84..0f189b34bda3 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c @@@ -29,10 -29,9 +29,11 @@@ #include #include #include + #include #include +#define irq_to_gpio(irq) ((irq) - MXC_GPIO_IRQ_START) + enum mxc_gpio_hwtype { IMX1_GPIO, /* runs on i.mx1 */ IMX21_GPIO, /* runs on i.mx21 and i.mx27 */ diff --cc drivers/infiniband/hw/ipath/ipath_init_chip.c index 824a4d508836,692630a3eafb..49b09c697c7c --- a/drivers/infiniband/hw/ipath/ipath_init_chip.c +++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c @@@ -33,8 -33,8 +33,9 @@@ #include #include + #include #include +#include #include #include "ipath_kernel.h" diff --cc drivers/md/persistent-data/dm-btree-remove.c index cdd71d67def9,000000000000..e7071f66dc39 mode 100644,000000..100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c @@@ -1,571 -1,0 +1,571 @@@ +/* + * Copyright (C) 2011 Red Hat, Inc. All rights reserved. + * + * This file is released under the GPL. + */ + +#include "dm-btree.h" +#include "dm-btree-internal.h" +#include "dm-transaction-manager.h" + - #include ++#include + +/* + * Removing an entry from a btree + * ============================== + * + * A very important constraint for our btree is that no node, except the + * root, may have fewer than a certain number of entries. + * (MIN_ENTRIES <= nr_entries <= MAX_ENTRIES). + * + * Ensuring this is complicated by the way we want to only ever hold the + * locks on 2 nodes concurrently, and only change nodes in a top to bottom + * fashion. + * + * Each node may have a left or right sibling. When decending the spine, + * if a node contains only MIN_ENTRIES then we try and increase this to at + * least MIN_ENTRIES + 1. We do this in the following ways: + * + * [A] No siblings => this can only happen if the node is the root, in which + * case we copy the childs contents over the root. + * + * [B] No left sibling + * ==> rebalance(node, right sibling) + * + * [C] No right sibling + * ==> rebalance(left sibling, node) + * + * [D] Both siblings, total_entries(left, node, right) <= DEL_THRESHOLD + * ==> delete node adding it's contents to left and right + * + * [E] Both siblings, total_entries(left, node, right) > DEL_THRESHOLD + * ==> rebalance(left, node, right) + * + * After these operations it's possible that the our original node no + * longer contains the desired sub tree. For this reason this rebalancing + * is performed on the children of the current node. This also avoids + * having a special case for the root. + * + * Once this rebalancing has occurred we can then step into the child node + * for internal nodes. Or delete the entry for leaf nodes. + */ + +/* + * Some little utilities for moving node data around. + */ +static void node_shift(struct node *n, int shift) +{ + uint32_t nr_entries = le32_to_cpu(n->header.nr_entries); + + if (shift < 0) { + shift = -shift; + memmove(key_ptr(n, 0), + key_ptr(n, shift), + (nr_entries - shift) * sizeof(__le64)); + memmove(value_ptr(n, 0, sizeof(__le64)), + value_ptr(n, shift, sizeof(__le64)), + (nr_entries - shift) * sizeof(__le64)); + } else { + memmove(key_ptr(n, shift), + key_ptr(n, 0), + nr_entries * sizeof(__le64)); + memmove(value_ptr(n, shift, sizeof(__le64)), + value_ptr(n, 0, sizeof(__le64)), + nr_entries * sizeof(__le64)); + } +} + +static void node_copy(struct node *left, struct node *right, int shift) +{ + uint32_t nr_left = le32_to_cpu(left->header.nr_entries); + + if (shift < 0) { + shift = -shift; + memcpy(key_ptr(left, nr_left), + key_ptr(right, 0), + shift * sizeof(__le64)); + memcpy(value_ptr(left, nr_left, sizeof(__le64)), + value_ptr(right, 0, sizeof(__le64)), + shift * sizeof(__le64)); + } else { + memcpy(key_ptr(right, 0), + key_ptr(left, nr_left - shift), + shift * sizeof(__le64)); + memcpy(value_ptr(right, 0, sizeof(__le64)), + value_ptr(left, nr_left - shift, sizeof(__le64)), + shift * sizeof(__le64)); + } +} + +/* + * Delete a specific entry from a leaf node. + */ +static void delete_at(struct node *n, unsigned index, size_t value_size) +{ + unsigned nr_entries = le32_to_cpu(n->header.nr_entries); + unsigned nr_to_copy = nr_entries - (index + 1); + + if (nr_to_copy) { + memmove(key_ptr(n, index), + key_ptr(n, index + 1), + nr_to_copy * sizeof(__le64)); + + memmove(value_ptr(n, index, value_size), + value_ptr(n, index + 1, value_size), + nr_to_copy * value_size); + } + + n->header.nr_entries = cpu_to_le32(nr_entries - 1); +} + +static unsigned del_threshold(struct node *n) +{ + return le32_to_cpu(n->header.max_entries) / 3; +} + +static unsigned merge_threshold(struct node *n) +{ + /* + * The extra one is because we know we're potentially going to + * delete an entry. + */ + return 2 * (le32_to_cpu(n->header.max_entries) / 3) + 1; +} + +struct child { + unsigned index; + struct dm_block *block; + struct node *n; +}; + +static struct dm_btree_value_type le64_type = { + .context = NULL, + .size = sizeof(__le64), + .inc = NULL, + .dec = NULL, + .equal = NULL +}; + +static int init_child(struct dm_btree_info *info, struct node *parent, + unsigned index, struct child *result) +{ + int r, inc; + dm_block_t root; + + result->index = index; + root = value64(parent, index); + + r = dm_tm_shadow_block(info->tm, root, &btree_node_validator, + &result->block, &inc); + if (r) + return r; + + result->n = dm_block_data(result->block); + + if (inc) + inc_children(info->tm, result->n, &le64_type); + + return 0; +} + +static int exit_child(struct dm_btree_info *info, struct child *c) +{ + return dm_tm_unlock(info->tm, c->block); +} + +static void shift(struct node *left, struct node *right, int count) +{ + if (!count) + return; + + if (count > 0) { + node_shift(right, count); + node_copy(left, right, count); + } else { + node_copy(left, right, count); + node_shift(right, count); + } + + left->header.nr_entries = + cpu_to_le32(le32_to_cpu(left->header.nr_entries) - count); + + right->header.nr_entries = + cpu_to_le32(le32_to_cpu(right->header.nr_entries) + count); +} + +static void __rebalance2(struct dm_btree_info *info, struct node *parent, + struct child *l, struct child *r) +{ + struct node *left = l->n; + struct node *right = r->n; + uint32_t nr_left = le32_to_cpu(left->header.nr_entries); + uint32_t nr_right = le32_to_cpu(right->header.nr_entries); + + if (nr_left + nr_right <= merge_threshold(left)) { + /* + * Merge + */ + node_copy(left, right, -nr_right); + left->header.nr_entries = cpu_to_le32(nr_left + nr_right); + + *((__le64 *) value_ptr(parent, l->index, sizeof(__le64))) = + cpu_to_le64(dm_block_location(l->block)); + delete_at(parent, r->index, sizeof(__le64)); + + /* + * We need to decrement the right block, but not it's + * children, since they're still referenced by left. + */ + dm_tm_dec(info->tm, dm_block_location(r->block)); + } else { + /* + * Rebalance. + */ + unsigned target_left = (nr_left + nr_right) / 2; + + shift(left, right, nr_left - target_left); + *((__le64 *) value_ptr(parent, l->index, sizeof(__le64))) = + cpu_to_le64(dm_block_location(l->block)); + *((__le64 *) value_ptr(parent, r->index, sizeof(__le64))) = + cpu_to_le64(dm_block_location(r->block)); + *key_ptr(parent, r->index) = right->keys[0]; + } +} + +static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, + unsigned left_index) +{ + int r; + struct node *parent; + struct child left, right; + + parent = dm_block_data(shadow_current(s)); + + r = init_child(info, parent, left_index, &left); + if (r) + return r; + + r = init_child(info, parent, left_index + 1, &right); + if (r) { + exit_child(info, &left); + return r; + } + + __rebalance2(info, parent, &left, &right); + + r = exit_child(info, &left); + if (r) { + exit_child(info, &right); + return r; + } + + r = exit_child(info, &right); + if (r) + return r; + + return 0; +} + +static void __rebalance3(struct dm_btree_info *info, struct node *parent, + struct child *l, struct child *c, struct child *r) +{ + struct node *left = l->n; + struct node *center = c->n; + struct node *right = r->n; + + uint32_t nr_left = le32_to_cpu(left->header.nr_entries); + uint32_t nr_center = le32_to_cpu(center->header.nr_entries); + uint32_t nr_right = le32_to_cpu(right->header.nr_entries); + uint32_t max_entries = le32_to_cpu(left->header.max_entries); + + unsigned target; + + if (((nr_left + nr_center + nr_right) / 2) < merge_threshold(center)) { + /* + * Delete center node: + * + * We dump as many entries from center as possible into + * left, then the rest in right, then rebalance2. This + * wastes some cpu, but I want something simple atm. + */ + unsigned shift = min(max_entries - nr_left, nr_center); + + node_copy(left, center, -shift); + left->header.nr_entries = cpu_to_le32(nr_left + shift); + + if (shift != nr_center) { + shift = nr_center - shift; + node_shift(right, shift); + node_copy(center, right, shift); + right->header.nr_entries = cpu_to_le32(nr_right + shift); + } + + *((__le64 *) value_ptr(parent, l->index, sizeof(__le64))) = + cpu_to_le64(dm_block_location(l->block)); + *((__le64 *) value_ptr(parent, r->index, sizeof(__le64))) = + cpu_to_le64(dm_block_location(r->block)); + *key_ptr(parent, r->index) = right->keys[0]; + + delete_at(parent, c->index, sizeof(__le64)); + r->index--; + + dm_tm_dec(info->tm, dm_block_location(c->block)); + __rebalance2(info, parent, l, r); + + return; + } + + /* + * Rebalance + */ + target = (nr_left + nr_center + nr_right) / 3; + BUG_ON(target == nr_center); + + /* + * Adjust the left node + */ + shift(left, center, nr_left - target); + + /* + * Adjust the right node + */ + shift(center, right, target - nr_right); + + *((__le64 *) value_ptr(parent, l->index, sizeof(__le64))) = + cpu_to_le64(dm_block_location(l->block)); + *((__le64 *) value_ptr(parent, c->index, sizeof(__le64))) = + cpu_to_le64(dm_block_location(c->block)); + *((__le64 *) value_ptr(parent, r->index, sizeof(__le64))) = + cpu_to_le64(dm_block_location(r->block)); + + *key_ptr(parent, c->index) = center->keys[0]; + *key_ptr(parent, r->index) = right->keys[0]; +} + +static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info, + unsigned left_index) +{ + int r; + struct node *parent = dm_block_data(shadow_current(s)); + struct child left, center, right; + + /* + * FIXME: fill out an array? + */ + r = init_child(info, parent, left_index, &left); + if (r) + return r; + + r = init_child(info, parent, left_index + 1, ¢er); + if (r) { + exit_child(info, &left); + return r; + } + + r = init_child(info, parent, left_index + 2, &right); + if (r) { + exit_child(info, &left); + exit_child(info, ¢er); + return r; + } + + __rebalance3(info, parent, &left, ¢er, &right); + + r = exit_child(info, &left); + if (r) { + exit_child(info, ¢er); + exit_child(info, &right); + return r; + } + + r = exit_child(info, ¢er); + if (r) { + exit_child(info, &right); + return r; + } + + r = exit_child(info, &right); + if (r) + return r; + + return 0; +} + +static int get_nr_entries(struct dm_transaction_manager *tm, + dm_block_t b, uint32_t *result) +{ + int r; + struct dm_block *block; + struct node *n; + + r = dm_tm_read_lock(tm, b, &btree_node_validator, &block); + if (r) + return r; + + n = dm_block_data(block); + *result = le32_to_cpu(n->header.nr_entries); + + return dm_tm_unlock(tm, block); +} + +static int rebalance_children(struct shadow_spine *s, + struct dm_btree_info *info, uint64_t key) +{ + int i, r, has_left_sibling, has_right_sibling; + uint32_t child_entries; + struct node *n; + + n = dm_block_data(shadow_current(s)); + + if (le32_to_cpu(n->header.nr_entries) == 1) { + struct dm_block *child; + dm_block_t b = value64(n, 0); + + r = dm_tm_read_lock(info->tm, b, &btree_node_validator, &child); + if (r) + return r; + + memcpy(n, dm_block_data(child), + dm_bm_block_size(dm_tm_get_bm(info->tm))); + r = dm_tm_unlock(info->tm, child); + dm_tm_dec(info->tm, dm_block_location(child)); + + return r; + } + + i = lower_bound(n, key); + if (i < 0) + return -ENODATA; + + r = get_nr_entries(info->tm, value64(n, i), &child_entries); + if (r) + return r; + + if (child_entries > del_threshold(n)) + return 0; + + has_left_sibling = i > 0 ? 1 : 0; + has_right_sibling = + (i >= (le32_to_cpu(n->header.nr_entries) - 1)) ? 0 : 1; + + if (!has_left_sibling) + r = rebalance2(s, info, i); + + else if (!has_right_sibling) + r = rebalance2(s, info, i - 1); + + else + r = rebalance3(s, info, i - 1); + + return r; +} + +static int do_leaf(struct node *n, uint64_t key, unsigned *index) +{ + int i = lower_bound(n, key); + + if ((i < 0) || + (i >= le32_to_cpu(n->header.nr_entries)) || + (le64_to_cpu(n->keys[i]) != key)) + return -ENODATA; + + *index = i; + + return 0; +} + +/* + * Prepares for removal from one level of the hierarchy. The caller must + * actually call delete_at() to remove the entry at index. + */ +static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info, + struct dm_btree_value_type *vt, dm_block_t root, + uint64_t key, unsigned *index) +{ + int i = *index, inc, r; + struct node *n; + + for (;;) { + r = shadow_step(s, root, vt, &inc); + if (r < 0) + break; + + /* + * We have to patch up the parent node, ugly, but I don't + * see a way to do this automatically as part of the spine + * op. + */ + if (shadow_has_parent(s)) { + __le64 location = cpu_to_le64(dm_block_location(shadow_current(s))); + memcpy(value_ptr(dm_block_data(shadow_parent(s)), i, sizeof(uint64_t)), + &location, sizeof(__le64)); + } + + n = dm_block_data(shadow_current(s)); + if (inc) + inc_children(info->tm, n, vt); + + if (le32_to_cpu(n->header.flags) & LEAF_NODE) + return do_leaf(n, key, index); + + r = rebalance_children(s, info, key); + if (r) + break; + + n = dm_block_data(shadow_current(s)); + if (le32_to_cpu(n->header.flags) & LEAF_NODE) + return do_leaf(n, key, index); + + i = lower_bound(n, key); + + /* + * We know the key is present, or else + * rebalance_children would have returned + * -ENODATA + */ + root = value64(n, i); + } + + return r; +} + +int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, + uint64_t *keys, dm_block_t *new_root) +{ + unsigned level, last_level = info->levels - 1; + int index = 0, r = 0; + struct shadow_spine spine; + struct node *n; + + init_shadow_spine(&spine, info); + for (level = 0; level < info->levels; level++) { + r = remove_raw(&spine, info, + (level == last_level ? + &info->value_type : &le64_type), + root, keys[level], (unsigned *)&index); + if (r < 0) + break; + + n = dm_block_data(shadow_current(&spine)); + if (level != last_level) { + root = value64(n, index); + continue; + } + + BUG_ON(index < 0 || index >= le32_to_cpu(n->header.nr_entries)); + + if (info->value_type.dec) + info->value_type.dec(info->value_type.context, + value_ptr(n, index, info->value_type.size)); + + delete_at(n, index, info->value_type.size); + + r = 0; + *new_root = shadow_root(&spine); + } + + exit_shadow_spine(&spine); + + return r; +} +EXPORT_SYMBOL_GPL(dm_btree_remove); diff --cc drivers/md/persistent-data/dm-btree.c index ca16d5b4799a,000000000000..408b762532a7 mode 100644,000000..100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@@ -1,861 -1,0 +1,861 @@@ +/* + * Copyright (C) 2011 Red Hat, Inc. All rights reserved. + * + * This file is released under the GPL. + */ + +#include "dm-btree-internal.h" +#include "dm-space-map.h" +#include "dm-transaction-manager.h" + - #include ++#include +#include + +#define DM_MSG_PREFIX "btree" + +/*---------------------------------------------------------------- + * Array manipulation + *--------------------------------------------------------------*/ +static void memcpy_disk(void *dest, const void *src, size_t len) + __dm_written_to_disk(src) +{ + memcpy(dest, src, len); + __dm_unbless_for_disk(src); +} + +static void array_insert(void *base, size_t elt_size, unsigned nr_elts, + unsigned index, void *elt) + __dm_written_to_disk(elt) +{ + if (index < nr_elts) + memmove(base + (elt_size * (index + 1)), + base + (elt_size * index), + (nr_elts - index) * elt_size); + + memcpy_disk(base + (elt_size * index), elt, elt_size); +} + +/*----------------------------------------------------------------*/ + +/* makes the assumption that no two keys are the same. */ +static int bsearch(struct node *n, uint64_t key, int want_hi) +{ + int lo = -1, hi = le32_to_cpu(n->header.nr_entries); + + while (hi - lo > 1) { + int mid = lo + ((hi - lo) / 2); + uint64_t mid_key = le64_to_cpu(n->keys[mid]); + + if (mid_key == key) + return mid; + + if (mid_key < key) + lo = mid; + else + hi = mid; + } + + return want_hi ? hi : lo; +} + +int lower_bound(struct node *n, uint64_t key) +{ + return bsearch(n, key, 0); +} + +void inc_children(struct dm_transaction_manager *tm, struct node *n, + struct dm_btree_value_type *vt) +{ + unsigned i; + uint32_t nr_entries = le32_to_cpu(n->header.nr_entries); + + if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) + for (i = 0; i < nr_entries; i++) + dm_tm_inc(tm, value64(n, i)); + else if (vt->inc) + for (i = 0; i < nr_entries; i++) + vt->inc(vt->context, + value_ptr(n, i, vt->size)); +} + +static int insert_at(size_t value_size, struct node *node, unsigned index, + uint64_t key, void *value) + __dm_written_to_disk(value) +{ + uint32_t nr_entries = le32_to_cpu(node->header.nr_entries); + __le64 key_le = cpu_to_le64(key); + + if (index > nr_entries || + index >= le32_to_cpu(node->header.max_entries)) { + DMERR("too many entries in btree node for insert"); + __dm_unbless_for_disk(value); + return -ENOMEM; + } + + __dm_bless_for_disk(&key_le); + + array_insert(node->keys, sizeof(*node->keys), nr_entries, index, &key_le); + array_insert(value_base(node), value_size, nr_entries, index, value); + node->header.nr_entries = cpu_to_le32(nr_entries + 1); + + return 0; +} + +/*----------------------------------------------------------------*/ + +/* + * We want 3n entries (for some n). This works more nicely for repeated + * insert remove loops than (2n + 1). + */ +static uint32_t calc_max_entries(size_t value_size, size_t block_size) +{ + uint32_t total, n; + size_t elt_size = sizeof(uint64_t) + value_size; /* key + value */ + + block_size -= sizeof(struct node_header); + total = block_size / elt_size; + n = total / 3; /* rounds down */ + + return 3 * n; +} + +int dm_btree_create(struct dm_btree_info *info, dm_block_t *root) +{ + int r; + struct dm_block *b; + struct node *n; + size_t block_size; + uint32_t max_entries; + + r = new_block(info, &b); + if (r < 0) + return r; + + block_size = dm_bm_block_size(dm_tm_get_bm(info->tm)); + max_entries = calc_max_entries(info->value_type.size, block_size); + + n = dm_block_data(b); + memset(n, 0, block_size); + n->header.flags = cpu_to_le32(LEAF_NODE); + n->header.nr_entries = cpu_to_le32(0); + n->header.max_entries = cpu_to_le32(max_entries); + n->header.value_size = cpu_to_le32(info->value_type.size); + + *root = dm_block_location(b); + + return unlock_block(info, b); +} +EXPORT_SYMBOL_GPL(dm_btree_create); + +/*----------------------------------------------------------------*/ + +/* + * Deletion uses a recursive algorithm, since we have limited stack space + * we explicitly manage our own stack on the heap. + */ +#define MAX_SPINE_DEPTH 64 +struct frame { + struct dm_block *b; + struct node *n; + unsigned level; + unsigned nr_children; + unsigned current_child; +}; + +struct del_stack { + struct dm_transaction_manager *tm; + int top; + struct frame spine[MAX_SPINE_DEPTH]; +}; + +static int top_frame(struct del_stack *s, struct frame **f) +{ + if (s->top < 0) { + DMERR("btree deletion stack empty"); + return -EINVAL; + } + + *f = s->spine + s->top; + + return 0; +} + +static int unprocessed_frames(struct del_stack *s) +{ + return s->top >= 0; +} + +static int push_frame(struct del_stack *s, dm_block_t b, unsigned level) +{ + int r; + uint32_t ref_count; + + if (s->top >= MAX_SPINE_DEPTH - 1) { + DMERR("btree deletion stack out of memory"); + return -ENOMEM; + } + + r = dm_tm_ref(s->tm, b, &ref_count); + if (r) + return r; + + if (ref_count > 1) + /* + * This is a shared node, so we can just decrement its + * reference counter and leave the children. + */ + dm_tm_dec(s->tm, b); + + else { + struct frame *f = s->spine + ++s->top; + + r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b); + if (r) { + s->top--; + return r; + } + + f->n = dm_block_data(f->b); + f->level = level; + f->nr_children = le32_to_cpu(f->n->header.nr_entries); + f->current_child = 0; + } + + return 0; +} + +static void pop_frame(struct del_stack *s) +{ + struct frame *f = s->spine + s->top--; + + dm_tm_dec(s->tm, dm_block_location(f->b)); + dm_tm_unlock(s->tm, f->b); +} + +int dm_btree_destroy(struct dm_btree_info *info, dm_block_t root) +{ + int r; + struct del_stack *s; + + s = kmalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + s->tm = info->tm; + s->top = -1; + + r = push_frame(s, root, 1); + if (r) + goto out; + + while (unprocessed_frames(s)) { + uint32_t flags; + struct frame *f; + dm_block_t b; + + r = top_frame(s, &f); + if (r) + goto out; + + if (f->current_child >= f->nr_children) { + pop_frame(s); + continue; + } + + flags = le32_to_cpu(f->n->header.flags); + if (flags & INTERNAL_NODE) { + b = value64(f->n, f->current_child); + f->current_child++; + r = push_frame(s, b, f->level); + if (r) + goto out; + + } else if (f->level != (info->levels - 1)) { + b = value64(f->n, f->current_child); + f->current_child++; + r = push_frame(s, b, f->level + 1); + if (r) + goto out; + + } else { + if (info->value_type.dec) { + unsigned i; + + for (i = 0; i < f->nr_children; i++) + info->value_type.dec(info->value_type.context, + value_ptr(f->n, i, info->value_type.size)); + } + f->current_child = f->nr_children; + } + } + +out: + kfree(s); + return r; +} +EXPORT_SYMBOL_GPL(dm_btree_destroy); + +// FIXME Implement or remove this fn before final submission. +int dm_btree_delete_gt(struct dm_btree_info *info, dm_block_t root, uint64_t *key, + dm_block_t *new_root) +{ + /* FIXME: implement */ + return 0; +} +EXPORT_SYMBOL_GPL(dm_btree_delete_gt); + +/*----------------------------------------------------------------*/ + +static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key, + int (*search_fn)(struct node *, uint64_t), + uint64_t *result_key, void *v, size_t value_size) +{ + int i, r; + uint32_t flags, nr_entries; + + do { + r = ro_step(s, block); + if (r < 0) + return r; + + i = search_fn(ro_node(s), key); + + flags = le32_to_cpu(ro_node(s)->header.flags); + nr_entries = le32_to_cpu(ro_node(s)->header.nr_entries); + if (i < 0 || i >= nr_entries) + return -ENODATA; + + if (flags & INTERNAL_NODE) + block = value64(ro_node(s), i); + + } while (!(flags & LEAF_NODE)); + + *result_key = le64_to_cpu(ro_node(s)->keys[i]); + memcpy(v, value_ptr(ro_node(s), i, value_size), value_size); + + return 0; +} + +int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root, + uint64_t *keys, void *value_le) +{ + unsigned level, last_level = info->levels - 1; + int r = -ENODATA; + uint64_t rkey; + __le64 internal_value_le; + struct ro_spine spine; + + init_ro_spine(&spine, info); + for (level = 0; level < info->levels; level++) { + size_t size; + void *value_p; + + if (level == last_level) { + value_p = value_le; + size = info->value_type.size; + + } else { + value_p = &internal_value_le; + size = sizeof(uint64_t); + } + + r = btree_lookup_raw(&spine, root, keys[level], + lower_bound, &rkey, + value_p, size); + + if (!r) { + if (rkey != keys[level]) { + exit_ro_spine(&spine); + return -ENODATA; + } + } else { + exit_ro_spine(&spine); + return r; + } + + root = le64_to_cpu(internal_value_le); + } + exit_ro_spine(&spine); + + return r; +} +EXPORT_SYMBOL_GPL(dm_btree_lookup); + +/* + * Splits a node by creating a sibling node and shifting half the nodes + * contents across. Assumes there is a parent node, and it has room for + * another child. + * + * Before: + * +--------+ + * | Parent | + * +--------+ + * | + * v + * +----------+ + * | A ++++++ | + * +----------+ + * + * + * After: + * +--------+ + * | Parent | + * +--------+ + * | | + * v +------+ + * +---------+ | + * | A* +++ | v + * +---------+ +-------+ + * | B +++ | + * +-------+ + * + * Where A* is a shadow of A. + */ +static int btree_split_sibling(struct shadow_spine *s, dm_block_t root, + unsigned parent_index, uint64_t key) +{ + int r; + size_t size; + unsigned nr_left, nr_right; + struct dm_block *left, *right, *parent; + struct node *ln, *rn, *pn; + __le64 location; + + left = shadow_current(s); + + r = new_block(s->info, &right); + if (r < 0) + return r; + + ln = dm_block_data(left); + rn = dm_block_data(right); + + nr_left = le32_to_cpu(ln->header.nr_entries) / 2; + nr_right = le32_to_cpu(ln->header.nr_entries) - nr_left; + + ln->header.nr_entries = cpu_to_le32(nr_left); + + rn->header.flags = ln->header.flags; + rn->header.nr_entries = cpu_to_le32(nr_right); + rn->header.max_entries = ln->header.max_entries; + rn->header.value_size = ln->header.value_size; + memcpy(rn->keys, ln->keys + nr_left, nr_right * sizeof(rn->keys[0])); + + size = le32_to_cpu(ln->header.flags) & INTERNAL_NODE ? + sizeof(uint64_t) : s->info->value_type.size; + memcpy(value_ptr(rn, 0, size), value_ptr(ln, nr_left, size), + size * nr_right); + + /* + * Patch up the parent + */ + parent = shadow_parent(s); + + pn = dm_block_data(parent); + location = cpu_to_le64(dm_block_location(left)); + __dm_bless_for_disk(&location); + memcpy_disk(value_ptr(pn, parent_index, sizeof(__le64)), + &location, sizeof(__le64)); + + location = cpu_to_le64(dm_block_location(right)); + __dm_bless_for_disk(&location); + + r = insert_at(sizeof(__le64), pn, parent_index + 1, + le64_to_cpu(rn->keys[0]), &location); + if (r) + return r; + + if (key < le64_to_cpu(rn->keys[0])) { + unlock_block(s->info, right); + s->nodes[1] = left; + } else { + unlock_block(s->info, left); + s->nodes[1] = right; + } + + return 0; +} + +/* + * Splits a node by creating two new children beneath the given node. + * + * Before: + * +----------+ + * | A ++++++ | + * +----------+ + * + * + * After: + * +------------+ + * | A (shadow) | + * +------------+ + * | | + * +------+ +----+ + * | | + * v v + * +-------+ +-------+ + * | B +++ | | C +++ | + * +-------+ +-------+ + */ +static int btree_split_beneath(struct shadow_spine *s, uint64_t key) +{ + int r; + size_t size; + unsigned nr_left, nr_right; + struct dm_block *left, *right, *new_parent; + struct node *pn, *ln, *rn; + __le64 val; + + new_parent = shadow_current(s); + + r = new_block(s->info, &left); + if (r < 0) + return r; + + r = new_block(s->info, &right); + if (r < 0) { + /* FIXME: put left */ + return r; + } + + pn = dm_block_data(new_parent); + ln = dm_block_data(left); + rn = dm_block_data(right); + + nr_left = le32_to_cpu(pn->header.nr_entries) / 2; + nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left; + + ln->header.flags = pn->header.flags; + ln->header.nr_entries = cpu_to_le32(nr_left); + ln->header.max_entries = pn->header.max_entries; + ln->header.value_size = pn->header.value_size; + + rn->header.flags = pn->header.flags; + rn->header.nr_entries = cpu_to_le32(nr_right); + rn->header.max_entries = pn->header.max_entries; + rn->header.value_size = pn->header.value_size; + + memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0])); + memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0])); + + size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ? + sizeof(__le64) : s->info->value_type.size; + memcpy(value_ptr(ln, 0, size), value_ptr(pn, 0, size), nr_left * size); + memcpy(value_ptr(rn, 0, size), value_ptr(pn, nr_left, size), + nr_right * size); + + /* new_parent should just point to l and r now */ + pn->header.flags = cpu_to_le32(INTERNAL_NODE); + pn->header.nr_entries = cpu_to_le32(2); + pn->header.max_entries = cpu_to_le32( + calc_max_entries(sizeof(__le64), + dm_bm_block_size( + dm_tm_get_bm(s->info->tm)))); + pn->header.value_size = cpu_to_le32(sizeof(__le64)); + + val = cpu_to_le64(dm_block_location(left)); + __dm_bless_for_disk(&val); + pn->keys[0] = ln->keys[0]; + memcpy_disk(value_ptr(pn, 0, sizeof(__le64)), &val, sizeof(__le64)); + + val = cpu_to_le64(dm_block_location(right)); + __dm_bless_for_disk(&val); + pn->keys[1] = rn->keys[0]; + memcpy_disk(value_ptr(pn, 1, sizeof(__le64)), &val, sizeof(__le64)); + + /* + * rejig the spine. This is ugly, since it knows too + * much about the spine + */ + if (s->nodes[0] != new_parent) { + unlock_block(s->info, s->nodes[0]); + s->nodes[0] = new_parent; + } + if (key < le64_to_cpu(rn->keys[0])) { + unlock_block(s->info, right); + s->nodes[1] = left; + } else { + unlock_block(s->info, left); + s->nodes[1] = right; + } + s->count = 2; + + return 0; +} + +static int btree_insert_raw(struct shadow_spine *s, dm_block_t root, + struct dm_btree_value_type *vt, + uint64_t key, unsigned *index) +{ + int r, i = *index, inc, top = 1; + struct node *node; + + for (;;) { + r = shadow_step(s, root, vt, &inc); + if (r < 0) + return r; + + node = dm_block_data(shadow_current(s)); + if (inc) + inc_children(s->info->tm, node, vt); + + /* + * We have to patch up the parent node, ugly, but I don't + * see a way to do this automatically as part of the spine + * op. + */ + if (shadow_has_parent(s) && i >= 0) { /* FIXME: second clause unness. */ + __le64 location = cpu_to_le64(dm_block_location(shadow_current(s))); + + __dm_bless_for_disk(&location); + memcpy_disk(value_ptr(dm_block_data(shadow_parent(s)), i, sizeof(uint64_t)), + &location, sizeof(__le64)); + } + + node = dm_block_data(shadow_current(s)); + + if (node->header.nr_entries == node->header.max_entries) { + if (top) + r = btree_split_beneath(s, key); + else + r = btree_split_sibling(s, root, i, key); + + if (r < 0) + return r; + } + + node = dm_block_data(shadow_current(s)); + + i = lower_bound(node, key); + + if (le32_to_cpu(node->header.flags) & LEAF_NODE) + break; + + if (i < 0) { + /* change the bounds on the lowest key */ + node->keys[0] = cpu_to_le64(key); + i = 0; + } + + root = value64(node, i); + top = 0; + } + + if (i < 0 || le64_to_cpu(node->keys[i]) != key) + i++; + + /* we're about to overwrite this value, so undo the increment for it */ + /* FIXME: shame that inc information is leaking outside the spine. + * Plus inc is just plain wrong in the event of a split */ + if (le64_to_cpu(node->keys[i]) == key && inc) + if (vt->dec) + vt->dec(vt->context, value_ptr(node, i, vt->size)); + + *index = i; + return 0; +} + +static int insert(struct dm_btree_info *info, dm_block_t root, + uint64_t *keys, void *value, dm_block_t *new_root, + int *inserted) + __dm_written_to_disk(value) +{ + int r, need_insert; + unsigned level, index = -1, last_level = info->levels - 1; + dm_block_t block = root; + struct shadow_spine spine; + struct node *n; + struct dm_btree_value_type le64_type; + + le64_type.context = NULL; + le64_type.size = sizeof(__le64); + le64_type.inc = NULL; + le64_type.dec = NULL; + le64_type.equal = NULL; + + init_shadow_spine(&spine, info); + + for (level = 0; level < (info->levels - 1); level++) { + r = btree_insert_raw(&spine, block, &le64_type, keys[level], &index); + if (r < 0) + goto bad; + + n = dm_block_data(shadow_current(&spine)); + need_insert = ((index >= le32_to_cpu(n->header.nr_entries)) || + (le64_to_cpu(n->keys[index]) != keys[level])); + + if (need_insert) { + dm_block_t new_tree; + __le64 new_le; + + r = dm_btree_create(info, &new_tree); + if (r < 0) + goto bad; + + new_le = cpu_to_le64(new_tree); + __dm_bless_for_disk(&new_le); + + r = insert_at(sizeof(uint64_t), n, index, + keys[level], &new_le); + if (r) + goto bad; + } + + if (level < last_level) + block = value64(n, index); + } + + r = btree_insert_raw(&spine, block, &info->value_type, + keys[level], &index); + if (r < 0) + goto bad; + + n = dm_block_data(shadow_current(&spine)); + need_insert = ((index >= le32_to_cpu(n->header.nr_entries)) || + (le64_to_cpu(n->keys[index]) != keys[level])); + + if (need_insert) { + if (inserted) + *inserted = 1; + + r = insert_at(info->value_type.size, n, index, + keys[level], value); + if (r) + goto bad_unblessed; + } else { + if (inserted) + *inserted = 0; + + if (info->value_type.dec && + (!info->value_type.equal || + !info->value_type.equal( + info->value_type.context, + value_ptr(n, index, info->value_type.size), + value))) { + info->value_type.dec(info->value_type.context, + value_ptr(n, index, info->value_type.size)); + } + memcpy_disk(value_ptr(n, index, info->value_type.size), + value, info->value_type.size); + } + + *new_root = shadow_root(&spine); + exit_shadow_spine(&spine); + + return 0; + +bad: + __dm_unbless_for_disk(value); +bad_unblessed: + exit_shadow_spine(&spine); + return r; +} + +int dm_btree_insert(struct dm_btree_info *info, dm_block_t root, + uint64_t *keys, void *value, dm_block_t *new_root) + __dm_written_to_disk(value) +{ + return insert(info, root, keys, value, new_root, NULL); +} +EXPORT_SYMBOL_GPL(dm_btree_insert); + +int dm_btree_insert_notify(struct dm_btree_info *info, dm_block_t root, + uint64_t *keys, void *value, dm_block_t *new_root, + int *inserted) + __dm_written_to_disk(value) +{ + return insert(info, root, keys, value, new_root, inserted); +} +EXPORT_SYMBOL_GPL(dm_btree_insert_notify); + +/*----------------------------------------------------------------*/ + +int dm_btree_clone(struct dm_btree_info *info, dm_block_t root, + dm_block_t *clone) +{ + int r; + struct dm_block *b, *orig_b; + struct node *b_node, *orig_node; + + /* Copy the root node */ + r = new_block(info, &b); + if (r < 0) + return r; + + r = dm_tm_read_lock(info->tm, root, &btree_node_validator, &orig_b); + if (r < 0) { + dm_block_t location = dm_block_location(b); + + unlock_block(info, b); + dm_tm_dec(info->tm, location); + } + + *clone = dm_block_location(b); + b_node = dm_block_data(b); + orig_node = dm_block_data(orig_b); + + memcpy(b_node, orig_node, + dm_bm_block_size(dm_tm_get_bm(info->tm))); + dm_tm_unlock(info->tm, orig_b); + inc_children(info->tm, b_node, &info->value_type); + dm_tm_unlock(info->tm, b); + + return 0; +} +EXPORT_SYMBOL_GPL(dm_btree_clone); + +/*----------------------------------------------------------------*/ + +static int find_highest_key(struct ro_spine *s, dm_block_t block, + uint64_t *result_key, dm_block_t *next_block) +{ + int i, r; + uint32_t flags; + + do { + r = ro_step(s, block); + if (r < 0) + return r; + + flags = le32_to_cpu(ro_node(s)->header.flags); + i = le32_to_cpu(ro_node(s)->header.nr_entries); + if (!i) + return -ENODATA; + else + i--; + + *result_key = le64_to_cpu(ro_node(s)->keys[i]); + if (next_block || flags & INTERNAL_NODE) + block = value64(ro_node(s), i); + + } while (flags & INTERNAL_NODE); + + if (next_block) + *next_block = block; + return 0; +} + +int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root, + uint64_t *result_keys) +{ + int r = 0, count = 0, level; + struct ro_spine spine; + + init_ro_spine(&spine, info); + for (level = 0; level < info->levels; level++) { + r = find_highest_key(&spine, root, result_keys + level, + level == info->levels - 1 ? NULL : &root); + if (r == -ENODATA) { + r = 0; + break; + + } else if (r) + break; + + count++; + } + exit_ro_spine(&spine); + + return r ? r : count; +} +EXPORT_SYMBOL_GPL(dm_btree_find_highest_key); diff --cc drivers/md/persistent-data/dm-space-map-disk.c index 6229a4e68f88,000000000000..e6b9d67270ee mode 100644,000000..100644 --- a/drivers/md/persistent-data/dm-space-map-disk.c +++ b/drivers/md/persistent-data/dm-space-map-disk.c @@@ -1,663 -1,0 +1,663 @@@ +/* + * Copyright (C) 2011 Red Hat, Inc. All rights reserved. + * + * This file is released under the GPL. + */ + +#include "dm-space-map-common.h" +#include "dm-space-map-disk.h" +#include "dm-space-map.h" +#include "dm-transaction-manager.h" + +#include +#include +#include - #include ++#include +#include + +#define DM_MSG_PREFIX "space map disk" + +/* + * Bitmap validator + */ +static void bitmap_prepare_for_write(struct dm_block_validator *v, + struct dm_block *b, + size_t block_size) +{ + struct disk_bitmap_header *disk_header = dm_block_data(b); + + disk_header->blocknr = cpu_to_le64(dm_block_location(b)); + disk_header->csum = cpu_to_le32(dm_block_csum_data(&disk_header->not_used, block_size - sizeof(__le32))); +} + +static int bitmap_check(struct dm_block_validator *v, + struct dm_block *b, + size_t block_size) +{ + struct disk_bitmap_header *disk_header = dm_block_data(b); + __le32 csum_disk; + + if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) { + DMERR("bitmap check failed blocknr %llu wanted %llu", + le64_to_cpu(disk_header->blocknr), dm_block_location(b)); + return -ENOTBLK; + } + + csum_disk = cpu_to_le32(dm_block_csum_data(&disk_header->not_used, block_size - sizeof(__le32))); + if (csum_disk != disk_header->csum) { + DMERR("bitmap check failed csum %u wanted %u", + le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum)); + return -EILSEQ; + } + + return 0; +} + +struct dm_block_validator dm_sm_bitmap_validator = { + .name = "sm_bitmap", + .prepare_for_write = bitmap_prepare_for_write, + .check = bitmap_check +}; + +/*----------------------------------------------------------------*/ + +#define ENTRIES_PER_WORD 32 +#define ENTRIES_SHIFT 5 + +void *dm_bitmap_data(struct dm_block *b) +{ + return dm_block_data(b) + sizeof(struct disk_bitmap_header); +} + +#define WORD_MASK_LOW 0x5555555555555555ULL +#define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL +#define WORD_MASK_ALL 0xFFFFFFFFFFFFFFFFULL + +static unsigned bitmap_word_used(void *addr, unsigned b) +{ + __le64 *words_le = addr; + __le64 *w_le = words_le + (b >> ENTRIES_SHIFT); + + uint64_t bits = le64_to_cpu(*w_le); + + return ((bits & WORD_MASK_LOW) == WORD_MASK_LOW || + (bits & WORD_MASK_HIGH) == WORD_MASK_HIGH || + (bits & WORD_MASK_ALL) == WORD_MASK_ALL); +} + +unsigned sm_lookup_bitmap(void *addr, unsigned b) +{ + __le64 *words_le = addr; + __le64 *w_le = words_le + (b >> ENTRIES_SHIFT); + + b = (b & (ENTRIES_PER_WORD - 1)) << 1; + + return (!!test_bit_le(b, (void *) w_le) << 1) | + (!!test_bit_le(b + 1, (void *) w_le)); +} + +void sm_set_bitmap(void *addr, unsigned b, unsigned val) +{ + __le64 *words_le = addr; + __le64 *w_le = words_le + (b >> ENTRIES_SHIFT); + + b = (b & (ENTRIES_PER_WORD - 1)) << 1; + + if (val & 2) + __set_bit_le(b, (void *) w_le); + else + __clear_bit_le(b, (void *) w_le); + + if (val & 1) + __set_bit_le(b + 1, (void *) w_le); + else + __clear_bit_le(b + 1, (void *) w_le); +} + +int sm_find_free(void *addr, unsigned begin, unsigned end, + unsigned *result) +{ + while (begin < end) { + if (!(begin & (ENTRIES_PER_WORD - 1)) && + bitmap_word_used(addr, begin)) { + begin += ENTRIES_PER_WORD; + continue; + } + + if (!sm_lookup_bitmap(addr, begin)) { + *result = begin; + return 0; + } + + begin++; + } + + return -ENOSPC; +} + +static int disk_ll_init(struct ll_disk *io, struct dm_transaction_manager *tm) +{ + io->tm = tm; + io->bitmap_info.tm = tm; + io->bitmap_info.levels = 1; + + /* + * Because the new bitmap blocks are created via a shadow + * operation, the old entry has already had its reference count + * decremented and we don't need the btree to do any bookkeeping. + */ + io->bitmap_info.value_type.size = sizeof(struct disk_index_entry); + io->bitmap_info.value_type.inc = NULL; + io->bitmap_info.value_type.dec = NULL; + io->bitmap_info.value_type.equal = NULL; + + io->ref_count_info.tm = tm; + io->ref_count_info.levels = 1; + io->ref_count_info.value_type.size = sizeof(uint32_t); + io->ref_count_info.value_type.inc = NULL; + io->ref_count_info.value_type.dec = NULL; + io->ref_count_info.value_type.equal = NULL; + + io->block_size = dm_bm_block_size(dm_tm_get_bm(tm)); + + if (io->block_size > (1 << 30)) { + DMERR("block size too big to hold bitmaps"); + return -EINVAL; + } + + io->entries_per_block = (io->block_size - sizeof(struct disk_bitmap_header)) * + ENTRIES_PER_BYTE; + io->nr_blocks = 0; + io->bitmap_root = 0; + io->ref_count_root = 0; + + return 0; +} + +static int disk_ll_new(struct ll_disk *io, struct dm_transaction_manager *tm) +{ + int r; + + r = disk_ll_init(io, tm); + if (r < 0) + return r; + + io->nr_blocks = 0; + io->nr_allocated = 0; + r = dm_btree_create(&io->bitmap_info, &io->bitmap_root); + if (r < 0) + return r; + + r = dm_btree_create(&io->ref_count_info, &io->ref_count_root); + if (r < 0) { + dm_btree_destroy(&io->bitmap_info, io->bitmap_root); + return r; + } + + return 0; +} + +static int disk_ll_extend(struct ll_disk *io, dm_block_t extra_blocks) +{ + int r; + dm_block_t i, nr_blocks; + unsigned old_blocks, blocks; + + nr_blocks = io->nr_blocks + extra_blocks; + old_blocks = dm_sector_div_up(io->nr_blocks, io->entries_per_block); + blocks = dm_sector_div_up(nr_blocks, io->entries_per_block); + + for (i = old_blocks; i < blocks; i++) { + struct dm_block *b; + struct disk_index_entry idx; + + r = dm_tm_new_block(io->tm, &dm_sm_bitmap_validator, &b); + if (r < 0) + return r; + idx.blocknr = cpu_to_le64(dm_block_location(b)); + + r = dm_tm_unlock(io->tm, b); + if (r < 0) + return r; + + idx.nr_free = cpu_to_le32(io->entries_per_block); + idx.none_free_before = 0; + __dm_bless_for_disk(&idx); + + r = dm_btree_insert(&io->bitmap_info, io->bitmap_root, + &i, &idx, &io->bitmap_root); + if (r < 0) + return r; + } + + io->nr_blocks = nr_blocks; + return 0; +} + +static int disk_ll_open(struct ll_disk *ll, struct dm_transaction_manager *tm, + void *root_le, size_t len) +{ + int r; + struct disk_sm_root *smr = root_le; + + if (len < sizeof(struct disk_sm_root)) { + DMERR("sm_disk root too small"); + return -ENOMEM; + } + + r = disk_ll_init(ll, tm); + if (r < 0) + return r; + + ll->nr_blocks = le64_to_cpu(smr->nr_blocks); + ll->nr_allocated = le64_to_cpu(smr->nr_allocated); + ll->bitmap_root = le64_to_cpu(smr->bitmap_root); + ll->ref_count_root = le64_to_cpu(smr->ref_count_root); + + return 0; +} + +static int disk_ll_lookup_bitmap(struct ll_disk *io, dm_block_t b, uint32_t *result) +{ + int r; + dm_block_t index = b; + struct disk_index_entry ie_disk; + struct dm_block *blk; + + do_div(index, io->entries_per_block); + r = dm_btree_lookup(&io->bitmap_info, io->bitmap_root, &index, &ie_disk); + if (r < 0) + return r; + + r = dm_tm_read_lock(io->tm, le64_to_cpu(ie_disk.blocknr), &dm_sm_bitmap_validator, &blk); + if (r < 0) + return r; + + *result = sm_lookup_bitmap(dm_bitmap_data(blk), do_div(b, io->entries_per_block)); + + return dm_tm_unlock(io->tm, blk); +} + +static int disk_ll_lookup(struct ll_disk *io, dm_block_t b, uint32_t *result) +{ + __le32 rc_le; + int r = disk_ll_lookup_bitmap(io, b, result); + + if (r) + return r; + + if (*result != 3) + return r; + + r = dm_btree_lookup(&io->ref_count_info, io->ref_count_root, &b, &rc_le); + if (r < 0) + return r; + + *result = le32_to_cpu(rc_le); + + return r; +} + +static int disk_ll_find_free_block(struct ll_disk *io, dm_block_t begin, + dm_block_t end, dm_block_t *result) +{ + int r; + struct disk_index_entry ie_disk; + dm_block_t i, index_begin = begin; + dm_block_t index_end = dm_sector_div_up(end, io->entries_per_block); + + begin = do_div(index_begin, io->entries_per_block); + + for (i = index_begin; i < index_end; i++, begin = 0) { + struct dm_block *blk; + unsigned position; + uint32_t bit_end; + + r = dm_btree_lookup(&io->bitmap_info, io->bitmap_root, &i, &ie_disk); + if (r < 0) + return r; + + if (le32_to_cpu(ie_disk.nr_free) <= 0) + continue; + + r = dm_tm_read_lock(io->tm, le64_to_cpu(ie_disk.blocknr), + &dm_sm_bitmap_validator, &blk); + if (r < 0) + return r; + + bit_end = (i == index_end - 1) ? + do_div(end, io->entries_per_block) : io->entries_per_block; + + r = sm_find_free(dm_bitmap_data(blk), + max((unsigned)begin, (unsigned)le32_to_cpu(ie_disk.none_free_before)), + bit_end, &position); + if (r < 0) { + dm_tm_unlock(io->tm, blk); + continue; + } + + r = dm_tm_unlock(io->tm, blk); + if (r < 0) + return r; + + *result = i * io->entries_per_block + (dm_block_t) position; + + return 0; + } + + return -ENOSPC; +} + +static int disk_ll_insert(struct ll_disk *io, dm_block_t b, uint32_t ref_count) +{ + int r; + uint32_t bit, old; + struct dm_block *nb; + dm_block_t index = b; + struct disk_index_entry ie_disk; + void *bm_le; + int inc; + + do_div(index, io->entries_per_block); + r = dm_btree_lookup(&io->bitmap_info, io->bitmap_root, &index, &ie_disk); + if (r < 0) + return r; + + r = dm_tm_shadow_block(io->tm, le64_to_cpu(ie_disk.blocknr), + &dm_sm_bitmap_validator, &nb, &inc); + if (r < 0) { + DMERR("dm_tm_shadow_block() failed"); + return r; + } + ie_disk.blocknr = cpu_to_le64(dm_block_location(nb)); + + bm_le = dm_bitmap_data(nb); + bit = do_div(b, io->entries_per_block); + old = sm_lookup_bitmap(bm_le, bit); + + if (ref_count <= 2) { + sm_set_bitmap(bm_le, bit, ref_count); + + if (old > 2) { + r = dm_btree_remove(&io->ref_count_info, io->ref_count_root, + &b, &io->ref_count_root); + if (r) { + dm_tm_unlock(io->tm, nb); + return r; + } + } + } else { + __le32 rc_le = cpu_to_le32(ref_count); + + __dm_bless_for_disk(&rc_le); + + sm_set_bitmap(bm_le, bit, 3); + r = dm_btree_insert(&io->ref_count_info, io->ref_count_root, + &b, &rc_le, &io->ref_count_root); + if (r < 0) { + dm_tm_unlock(io->tm, nb); + DMERR("ref count insert failed"); + return r; + } + } + + r = dm_tm_unlock(io->tm, nb); + if (r < 0) + return r; + + if (ref_count && !old) { + io->nr_allocated++; + ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) - 1); + if (le32_to_cpu(ie_disk.none_free_before) == b) + ie_disk.none_free_before = cpu_to_le32(b + 1); + + } else if (old && !ref_count) { + io->nr_allocated--; + ie_disk.nr_free = cpu_to_le32(le32_to_cpu(ie_disk.nr_free) + 1); + ie_disk.none_free_before = cpu_to_le32(min((dm_block_t) le32_to_cpu(ie_disk.none_free_before), b)); + } + + __dm_bless_for_disk(&ie_disk); + + r = dm_btree_insert(&io->bitmap_info, io->bitmap_root, &index, &ie_disk, &io->bitmap_root); + if (r < 0) + return r; + + return 0; +} + +static int disk_ll_inc(struct ll_disk *ll, dm_block_t b) +{ + int r; + uint32_t rc; + + r = disk_ll_lookup(ll, b, &rc); + if (r) + return r; + + return disk_ll_insert(ll, b, rc + 1); +} + +static int disk_ll_dec(struct ll_disk *ll, dm_block_t b) +{ + int r; + uint32_t rc; + + r = disk_ll_lookup(ll, b, &rc); + if (r) + return r; + + if (!rc) + return -EINVAL; + + return disk_ll_insert(ll, b, rc - 1); +} + +/*--------------------------------------------------------------*/ + +/* + * Space map interface. + */ +struct sm_disk { + struct dm_space_map sm; + + struct ll_disk ll; +}; + +static void sm_disk_destroy(struct dm_space_map *sm) +{ + struct sm_disk *smd = container_of(sm, struct sm_disk, sm); + + kfree(smd); +} + +static int sm_disk_extend(struct dm_space_map *sm, dm_block_t extra_blocks) +{ + struct sm_disk *smd = container_of(sm, struct sm_disk, sm); + + return disk_ll_extend(&smd->ll, extra_blocks); +} + +static int sm_disk_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count) +{ + struct sm_disk *smd = container_of(sm, struct sm_disk, sm); + + *count = smd->ll.nr_blocks; + + return 0; +} + +static int sm_disk_get_nr_free(struct dm_space_map *sm, dm_block_t *count) +{ + struct sm_disk *smd = container_of(sm, struct sm_disk, sm); + + *count = smd->ll.nr_blocks - smd->ll.nr_allocated; + + return 0; +} + +static int sm_disk_get_count(struct dm_space_map *sm, dm_block_t b, + uint32_t *result) +{ + struct sm_disk *smd = container_of(sm, struct sm_disk, sm); + + return disk_ll_lookup(&smd->ll, b, result); +} + +static int sm_disk_count_is_more_than_one(struct dm_space_map *sm, dm_block_t b, + int *result) +{ + int r; + uint32_t count; + + r = sm_disk_get_count(sm, b, &count); + if (r) + return r; + + return count > 1; +} + +static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b, + uint32_t count) +{ + struct sm_disk *smd = container_of(sm, struct sm_disk, sm); + + return disk_ll_insert(&smd->ll, b, count); +} + +static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b) +{ + struct sm_disk *smd = container_of(sm, struct sm_disk, sm); + + return disk_ll_inc(&smd->ll, b); +} + +static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b) +{ + struct sm_disk *smd = container_of(sm, struct sm_disk, sm); + + return disk_ll_dec(&smd->ll, b); +} + +static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) +{ + int r; + struct sm_disk *smd = container_of(sm, struct sm_disk, sm); + + /* + * FIXME: We should start the search where we left off. + */ + r = disk_ll_find_free_block(&smd->ll, 0, smd->ll.nr_blocks, b); + if (r) + return r; + + return disk_ll_inc(&smd->ll, *b); +} + +static int sm_disk_commit(struct dm_space_map *sm) +{ + return 0; +} + +static int sm_disk_root_size(struct dm_space_map *sm, size_t *result) +{ + *result = sizeof(struct disk_sm_root); + + return 0; +} + +static int sm_disk_copy_root(struct dm_space_map *sm, void *where_le, size_t max) +{ + struct sm_disk *smd = container_of(sm, struct sm_disk, sm); + struct disk_sm_root root_le; + + root_le.nr_blocks = cpu_to_le64(smd->ll.nr_blocks); + root_le.nr_allocated = cpu_to_le64(smd->ll.nr_allocated); + root_le.bitmap_root = cpu_to_le64(smd->ll.bitmap_root); + root_le.ref_count_root = cpu_to_le64(smd->ll.ref_count_root); + + if (max < sizeof(root_le)) + return -ENOSPC; + + memcpy(where_le, &root_le, sizeof(root_le)); + + return 0; +} + +/*----------------------------------------------------------------*/ + +static struct dm_space_map ops = { + .destroy = sm_disk_destroy, + .extend = sm_disk_extend, + .get_nr_blocks = sm_disk_get_nr_blocks, + .get_nr_free = sm_disk_get_nr_free, + .get_count = sm_disk_get_count, + .count_is_more_than_one = sm_disk_count_is_more_than_one, + .set_count = sm_disk_set_count, + .inc_block = sm_disk_inc_block, + .dec_block = sm_disk_dec_block, + .new_block = sm_disk_new_block, + .commit = sm_disk_commit, + .root_size = sm_disk_root_size, + .copy_root = sm_disk_copy_root +}; + +struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm, + dm_block_t nr_blocks) +{ + int r; + struct sm_disk *smd; + + smd = kmalloc(sizeof(*smd), GFP_KERNEL); + if (!smd) + return ERR_PTR(-ENOMEM); + + memcpy(&smd->sm, &ops, sizeof(smd->sm)); + + r = disk_ll_new(&smd->ll, tm); + if (r) + goto bad; + + r = disk_ll_extend(&smd->ll, nr_blocks); + if (r) + goto bad; + + r = sm_disk_commit(&smd->sm); + if (r) + goto bad; + + return &smd->sm; + +bad: + kfree(smd); + return ERR_PTR(r); +} +EXPORT_SYMBOL_GPL(dm_sm_disk_create); + +struct dm_space_map *dm_sm_disk_open(struct dm_transaction_manager *tm, + void *root_le, size_t len) +{ + int r; + struct sm_disk *smd; + + smd = kmalloc(sizeof(*smd), GFP_KERNEL); + if (!smd) + return ERR_PTR(-ENOMEM); + + memcpy(&smd->sm, &ops, sizeof(smd->sm)); + + r = disk_ll_open(&smd->ll, tm, root_le, len); + if (r) + goto bad; + + r = sm_disk_commit(&smd->sm); + if (r) + goto bad; + + return &smd->sm; + +bad: + kfree(smd); + return ERR_PTR(r); +} +EXPORT_SYMBOL_GPL(dm_sm_disk_open); diff --cc drivers/md/persistent-data/dm-transaction-manager.c index be594dd0a12d,000000000000..e58e89ecfd67 mode 100644,000000..100644 --- a/drivers/md/persistent-data/dm-transaction-manager.c +++ b/drivers/md/persistent-data/dm-transaction-manager.c @@@ -1,414 -1,0 +1,414 @@@ +/* + * Copyright (C) 2011 Red Hat, Inc. All rights reserved. + * + * This file is released under the GPL. + */ +#include "dm-transaction-manager.h" +#include "dm-space-map.h" +#include "dm-space-map-disk.h" +#include "dm-space-map-metadata.h" +#include "dm-persistent-data-internal.h" + - #include ++#include +#include +#include + +#define DM_MSG_PREFIX "transaction manager" + +/*----------------------------------------------------------------*/ + +struct shadow_info { + struct hlist_node hlist; + dm_block_t where; +}; + +/* + * It would be nice if we scaled with the size of transaction. + */ +#define HASH_SIZE 256 +#define HASH_MASK (HASH_SIZE - 1) + +struct dm_transaction_manager { + int is_clone; + struct dm_transaction_manager *real; + + struct dm_block_manager *bm; + struct dm_space_map *sm; + + spinlock_t lock; + struct hlist_head buckets[HASH_SIZE]; +}; + +/*----------------------------------------------------------------*/ + +static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b) +{ + int r = 0; + unsigned bucket = dm_hash_block(b, HASH_MASK); + struct shadow_info *si; + struct hlist_node *n; + + spin_lock(&tm->lock); + + hlist_for_each_entry(si, n, tm->buckets + bucket, hlist) + if (si->where == b) { + r = 1; + break; + } + + spin_unlock(&tm->lock); + + return r; +} + +/* + * This can silently fail if there's no memory. We're ok with this since + * creating redundant shadows causes no harm. + */ +static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b) +{ + unsigned bucket; + struct shadow_info *si; + + si = kmalloc(sizeof(*si), GFP_NOIO); + if (si) { + si->where = b; + bucket = dm_hash_block(b, HASH_MASK); + + spin_lock(&tm->lock); + hlist_add_head(&si->hlist, tm->buckets + bucket); + spin_unlock(&tm->lock); + } +} + +static void wipe_shadow_table(struct dm_transaction_manager *tm) +{ + struct shadow_info *si; + struct hlist_node *n, *tmp; + struct hlist_head *bucket; + int i; + + spin_lock(&tm->lock); + for (i = 0; i < HASH_SIZE; i++) { + bucket = tm->buckets + i; + hlist_for_each_entry_safe(si, n, tmp, bucket, hlist) + kfree(si); + + INIT_HLIST_HEAD(bucket); + } + spin_unlock(&tm->lock); +} + +/*----------------------------------------------------------------*/ + +static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm, + struct dm_space_map *sm) +{ + int i; + struct dm_transaction_manager *tm; + + tm = kmalloc(sizeof(*tm), GFP_KERNEL); + if (!tm) + return ERR_PTR(-ENOMEM); + + tm->is_clone = 0; + tm->real = NULL; + tm->bm = bm; + tm->sm = sm; + + spin_lock_init(&tm->lock); + for (i = 0; i < HASH_SIZE; i++) + INIT_HLIST_HEAD(tm->buckets + i); + + return tm; +} + +struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transaction_manager *real) +{ + struct dm_transaction_manager *tm; + + tm = kmalloc(sizeof(*tm), GFP_KERNEL); + if (tm) { + tm->is_clone = 1; + tm->real = real; + } + + return tm; +} +EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone); + +void dm_tm_destroy(struct dm_transaction_manager *tm) +{ + kfree(tm); +} +EXPORT_SYMBOL_GPL(dm_tm_destroy); + +int dm_tm_pre_commit(struct dm_transaction_manager *tm) +{ + int r; + + if (tm->is_clone) + return -EWOULDBLOCK; + + r = dm_sm_commit(tm->sm); + if (r < 0) + return r; + + return 0; +} +EXPORT_SYMBOL_GPL(dm_tm_pre_commit); + +int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root) +{ + if (tm->is_clone) + return -EWOULDBLOCK; + + wipe_shadow_table(tm); + + return dm_bm_flush_and_unlock(tm->bm, root); +} +EXPORT_SYMBOL_GPL(dm_tm_commit); + +int dm_tm_new_block(struct dm_transaction_manager *tm, + struct dm_block_validator *v, + struct dm_block **result) +{ + int r; + dm_block_t new_block; + + if (tm->is_clone) + return -EWOULDBLOCK; + + r = dm_sm_new_block(tm->sm, &new_block); + if (r < 0) + return r; + + r = dm_bm_write_lock_zero(tm->bm, new_block, v, result); + if (r < 0) { + dm_sm_dec_block(tm->sm, new_block); + return r; + } + + /* + * New blocks count as shadows in that they don't need to be + * shadowed again. + */ + insert_shadow(tm, new_block); + + return 0; +} + +static int __shadow_block(struct dm_transaction_manager *tm, dm_block_t orig, + struct dm_block_validator *v, + struct dm_block **result, int *inc_children) +{ + int r; + dm_block_t new; + uint32_t count; + struct dm_block *orig_block; + + r = dm_sm_new_block(tm->sm, &new); + if (r < 0) + return r; + + r = dm_bm_write_lock_zero(tm->bm, new, v, result); + if (r < 0) + goto bad_dec_block; + + r = dm_bm_read_lock(tm->bm, orig, v, &orig_block); + if (r < 0) + goto bad_dec_block; + + memcpy(dm_block_data(*result), dm_block_data(orig_block), + dm_bm_block_size(tm->bm)); + + r = dm_bm_unlock(orig_block); + if (r < 0) + goto bad_dec_block; + + r = dm_sm_get_count(tm->sm, orig, &count); + if (r < 0) + goto bad; + + r = dm_sm_dec_block(tm->sm, orig); + if (r < 0) + goto bad; + + *inc_children = count > 1; + + return 0; + +bad: + dm_bm_unlock(*result); +bad_dec_block: + dm_sm_dec_block(tm->sm, new); + + return r; +} + +int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig, + struct dm_block_validator *v, struct dm_block **result, + int *inc_children) +{ + int r, more_than_one; + + if (tm->is_clone) + return -EWOULDBLOCK; + + if (is_shadow(tm, orig)) { + r = dm_sm_count_is_more_than_one(tm->sm, orig, &more_than_one); + if (r < 0) + return r; + + if (!more_than_one) { + *inc_children = 0; + return dm_bm_write_lock(tm->bm, orig, v, result); + } + /* fall through */ + } + + r = __shadow_block(tm, orig, v, result, inc_children); + if (r < 0) + return r; + + insert_shadow(tm, dm_block_location(*result)); + + return r; +} + +int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b, + struct dm_block_validator *v, + struct dm_block **blk) +{ + if (tm->is_clone) + return dm_bm_read_try_lock(tm->real->bm, b, v, blk); + + return dm_bm_read_lock(tm->bm, b, v, blk); +} + +int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b) +{ + return dm_bm_unlock(b); +} +EXPORT_SYMBOL_GPL(dm_tm_unlock); + +void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b) +{ + /* + * The non-blocking clone doesn't support this. + */ + BUG_ON(tm->is_clone); + + dm_sm_inc_block(tm->sm, b); +} +EXPORT_SYMBOL_GPL(dm_tm_inc); + +void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b) +{ + /* + * The non-blocking clone doesn't support this. + */ + BUG_ON(tm->is_clone); + + dm_sm_dec_block(tm->sm, b); +} + +int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b, + uint32_t *result) +{ + if (tm->is_clone) + return -EWOULDBLOCK; + + return dm_sm_get_count(tm->sm, b, result); +} + +struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm) +{ + return tm->bm; +} + +/*----------------------------------------------------------------*/ + +static int dm_tm_create_internal(struct dm_block_manager *bm, + dm_block_t sb_location, + struct dm_block_validator *sb_validator, + size_t root_offset, size_t root_max_len, + struct dm_transaction_manager **tm, + struct dm_space_map **sm, + struct dm_block **sblock, + int create) +{ + int r; + + *sm = dm_sm_metadata_init(); + if (IS_ERR(*sm)) + return PTR_ERR(*sm); + + *tm = dm_tm_create(bm, *sm); + if (IS_ERR(*tm)) { + dm_sm_destroy(*sm); + return PTR_ERR(*tm); + } + + if (create) { + r = dm_bm_write_lock_zero(dm_tm_get_bm(*tm), sb_location, + sb_validator, sblock); + if (r < 0) { + DMERR("couldn't lock superblock"); + goto bad1; + } + + r = dm_sm_metadata_create(*sm, *tm, dm_bm_nr_blocks(bm), + sb_location); + if (r) { + DMERR("couldn't create metadata space map"); + goto bad2; + } + + } else { + r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location, + sb_validator, sblock); + if (r < 0) { + DMERR("couldn't lock superblock"); + goto bad1; + } + + r = dm_sm_metadata_open(*sm, *tm, + dm_block_data(*sblock) + root_offset, + root_max_len); + if (IS_ERR(*sm)) { + DMERR("couldn't open metadata space map"); + goto bad2; + } + } + + return 0; + +bad2: + dm_tm_unlock(*tm, *sblock); +bad1: + dm_tm_destroy(*tm); + dm_sm_destroy(*sm); + return r; +} + +int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location, + struct dm_block_validator *sb_validator, + struct dm_transaction_manager **tm, + struct dm_space_map **sm, struct dm_block **sblock) +{ + return dm_tm_create_internal(bm, sb_location, sb_validator, + 0, 0, tm, sm, sblock, 1); +} +EXPORT_SYMBOL_GPL(dm_tm_create_with_sm); + +int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location, + struct dm_block_validator *sb_validator, + size_t root_offset, size_t root_max_len, + struct dm_transaction_manager **tm, + struct dm_space_map **sm, struct dm_block **sblock) +{ + return dm_tm_create_internal(bm, sb_location, sb_validator, root_offset, + root_max_len, tm, sm, sblock, 0); +} +EXPORT_SYMBOL_GPL(dm_tm_open_with_sm); diff --cc drivers/media/common/saa7146_core.c index f5d53a202344,31e53b6a881a..d6b1cf66042d --- a/drivers/media/common/saa7146_core.c +++ b/drivers/media/common/saa7146_core.c @@@ -18,9 -18,8 +18,10 @@@ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include + #include LIST_HEAD(saa7146_devices); DEFINE_MUTEX(saa7146_devices_lock); diff --cc drivers/media/common/saa7146_fops.c index a92546144eaa,e4547afcfa88..71f8e018e564 --- a/drivers/media/common/saa7146_fops.c +++ b/drivers/media/common/saa7146_fops.c @@@ -1,6 -1,5 +1,7 @@@ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include + #include /****************************************************************************/ /* resource management functions, shamelessly stolen from saa7134 driver */ diff --cc drivers/media/common/saa7146_hlp.c index 79ad73accb27,c9c6e9a6c31d..bc1f545c95cb --- a/drivers/media/common/saa7146_hlp.c +++ b/drivers/media/common/saa7146_hlp.c @@@ -1,6 -1,5 +1,7 @@@ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include + #include #include static void calculate_output_format_register(struct saa7146_dev* saa, u32 palette, u32* clip_format) diff --cc drivers/media/common/saa7146_video.c index 384b358d3037,3a00253fe1ee..ce30533fd972 --- a/drivers/media/common/saa7146_video.c +++ b/drivers/media/common/saa7146_video.c @@@ -1,7 -1,6 +1,8 @@@ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include + #include static int max_memory = 32; diff --cc drivers/media/dvb/frontends/dibx000_common.c index 774d507b66cc,977211fec137..43be7238311e --- a/drivers/media/dvb/frontends/dibx000_common.c +++ b/drivers/media/dvb/frontends/dibx000_common.c @@@ -1,5 -1,5 +1,6 @@@ #include +#include + #include #include "dibx000_common.h" diff --cc drivers/media/video/adp1653.c index 5914390211ff,c2594948ca3f..12eedf4d515a --- a/drivers/media/video/adp1653.c +++ b/drivers/media/video/adp1653.c @@@ -31,8 -31,8 +31,9 @@@ */ #include + #include #include +#include #include #include #include diff --cc drivers/mfd/max8997.c index dc58750bb71b,77032e43af4f..5be53ae9b61c --- a/drivers/mfd/max8997.c +++ b/drivers/mfd/max8997.c @@@ -23,8 -23,8 +23,9 @@@ #include #include +#include #include + #include #include #include #include diff --cc drivers/mfd/wm8400-core.c index e06ba9440cdb,b49b91eb44ec..36de271ba0e3 --- a/drivers/mfd/wm8400-core.c +++ b/drivers/mfd/wm8400-core.c @@@ -19,8 -18,8 +19,9 @@@ #include #include #include +#include #include + #include static struct { u16 readable; /* Mask of readable bits */ diff --cc drivers/mmc/core/debugfs.c index 6045ea469362,f57375393127..65138e05d188 --- a/drivers/mmc/core/debugfs.c +++ b/drivers/mmc/core/debugfs.c @@@ -7,9 -7,9 +7,10 @@@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#include #include #include + #include #include #include #include diff --cc drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index b89f3a684aec,848dce16073c..48406ca382f1 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@@ -20,6 -20,8 +20,7 @@@ #include "pch_gbe.h" #include "pch_gbe_api.h" -#include + #include #define DRV_VERSION "1.00" const char pch_driver_version[] = DRV_VERSION; diff --cc drivers/net/wireless/ath/ath5k/debug.c index fce8c904eea9,b2a523ed5498..f542b418beda --- a/drivers/net/wireless/ath/ath5k/debug.c +++ b/drivers/net/wireless/ath/ath5k/debug.c @@@ -57,14 -57,11 +57,14 @@@ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ - - #include + #include + #include - -#include "base.h" +#include +#include #include "debug.h" +#include "ath5k.h" +#include "reg.h" +#include "base.h" static unsigned int ath5k_debug; module_param_named(debug, ath5k_debug, uint, 0); diff --cc drivers/net/wireless/ath/ath6kl/cfg80211.c index 3aff36bad5d3,000000000000..f517eb8f7b44 mode 100644,000000..100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@@ -1,1914 -1,0 +1,1916 @@@ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + ++#include ++ +#include "core.h" +#include "cfg80211.h" +#include "debug.h" +#include "hif-ops.h" +#include "testmode.h" + +static unsigned int ath6kl_p2p; + +module_param(ath6kl_p2p, uint, 0644); + +#define RATETAB_ENT(_rate, _rateid, _flags) { \ + .bitrate = (_rate), \ + .flags = (_flags), \ + .hw_value = (_rateid), \ +} + +#define CHAN2G(_channel, _freq, _flags) { \ + .band = IEEE80211_BAND_2GHZ, \ + .hw_value = (_channel), \ + .center_freq = (_freq), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +#define CHAN5G(_channel, _flags) { \ + .band = IEEE80211_BAND_5GHZ, \ + .hw_value = (_channel), \ + .center_freq = 5000 + (5 * (_channel)), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +static struct ieee80211_rate ath6kl_rates[] = { + RATETAB_ENT(10, 0x1, 0), + RATETAB_ENT(20, 0x2, 0), + RATETAB_ENT(55, 0x4, 0), + RATETAB_ENT(110, 0x8, 0), + RATETAB_ENT(60, 0x10, 0), + RATETAB_ENT(90, 0x20, 0), + RATETAB_ENT(120, 0x40, 0), + RATETAB_ENT(180, 0x80, 0), + RATETAB_ENT(240, 0x100, 0), + RATETAB_ENT(360, 0x200, 0), + RATETAB_ENT(480, 0x400, 0), + RATETAB_ENT(540, 0x800, 0), +}; + +#define ath6kl_a_rates (ath6kl_rates + 4) +#define ath6kl_a_rates_size 8 +#define ath6kl_g_rates (ath6kl_rates + 0) +#define ath6kl_g_rates_size 12 + +static struct ieee80211_channel ath6kl_2ghz_channels[] = { + CHAN2G(1, 2412, 0), + CHAN2G(2, 2417, 0), + CHAN2G(3, 2422, 0), + CHAN2G(4, 2427, 0), + CHAN2G(5, 2432, 0), + CHAN2G(6, 2437, 0), + CHAN2G(7, 2442, 0), + CHAN2G(8, 2447, 0), + CHAN2G(9, 2452, 0), + CHAN2G(10, 2457, 0), + CHAN2G(11, 2462, 0), + CHAN2G(12, 2467, 0), + CHAN2G(13, 2472, 0), + CHAN2G(14, 2484, 0), +}; + +static struct ieee80211_channel ath6kl_5ghz_a_channels[] = { + CHAN5G(34, 0), CHAN5G(36, 0), + CHAN5G(38, 0), CHAN5G(40, 0), + CHAN5G(42, 0), CHAN5G(44, 0), + CHAN5G(46, 0), CHAN5G(48, 0), + CHAN5G(52, 0), CHAN5G(56, 0), + CHAN5G(60, 0), CHAN5G(64, 0), + CHAN5G(100, 0), CHAN5G(104, 0), + CHAN5G(108, 0), CHAN5G(112, 0), + CHAN5G(116, 0), CHAN5G(120, 0), + CHAN5G(124, 0), CHAN5G(128, 0), + CHAN5G(132, 0), CHAN5G(136, 0), + CHAN5G(140, 0), CHAN5G(149, 0), + CHAN5G(153, 0), CHAN5G(157, 0), + CHAN5G(161, 0), CHAN5G(165, 0), + CHAN5G(184, 0), CHAN5G(188, 0), + CHAN5G(192, 0), CHAN5G(196, 0), + CHAN5G(200, 0), CHAN5G(204, 0), + CHAN5G(208, 0), CHAN5G(212, 0), + CHAN5G(216, 0), +}; + +static struct ieee80211_supported_band ath6kl_band_2ghz = { + .n_channels = ARRAY_SIZE(ath6kl_2ghz_channels), + .channels = ath6kl_2ghz_channels, + .n_bitrates = ath6kl_g_rates_size, + .bitrates = ath6kl_g_rates, +}; + +static struct ieee80211_supported_band ath6kl_band_5ghz = { + .n_channels = ARRAY_SIZE(ath6kl_5ghz_a_channels), + .channels = ath6kl_5ghz_a_channels, + .n_bitrates = ath6kl_a_rates_size, + .bitrates = ath6kl_a_rates, +}; + +static int ath6kl_set_wpa_version(struct ath6kl *ar, + enum nl80211_wpa_versions wpa_version) +{ + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: %u\n", __func__, wpa_version); + + if (!wpa_version) { + ar->auth_mode = NONE_AUTH; + } else if (wpa_version & NL80211_WPA_VERSION_2) { + ar->auth_mode = WPA2_AUTH; + } else if (wpa_version & NL80211_WPA_VERSION_1) { + ar->auth_mode = WPA_AUTH; + } else { + ath6kl_err("%s: %u not supported\n", __func__, wpa_version); + return -ENOTSUPP; + } + + return 0; +} + +static int ath6kl_set_auth_type(struct ath6kl *ar, + enum nl80211_auth_type auth_type) +{ + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, auth_type); + + switch (auth_type) { + case NL80211_AUTHTYPE_OPEN_SYSTEM: + ar->dot11_auth_mode = OPEN_AUTH; + break; + case NL80211_AUTHTYPE_SHARED_KEY: + ar->dot11_auth_mode = SHARED_AUTH; + break; + case NL80211_AUTHTYPE_NETWORK_EAP: + ar->dot11_auth_mode = LEAP_AUTH; + break; + + case NL80211_AUTHTYPE_AUTOMATIC: + ar->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH; + break; + + default: + ath6kl_err("%s: 0x%x not spported\n", __func__, auth_type); + return -ENOTSUPP; + } + + return 0; +} + +static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast) +{ + u8 *ar_cipher = ucast ? &ar->prwise_crypto : &ar->grp_crypto; + u8 *ar_cipher_len = ucast ? &ar->prwise_crypto_len : + &ar->grp_crypto_len; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: cipher 0x%x, ucast %u\n", + __func__, cipher, ucast); + + switch (cipher) { + case 0: + /* our own hack to use value 0 as no crypto used */ + *ar_cipher = NONE_CRYPT; + *ar_cipher_len = 0; + break; + case WLAN_CIPHER_SUITE_WEP40: + *ar_cipher = WEP_CRYPT; + *ar_cipher_len = 5; + break; + case WLAN_CIPHER_SUITE_WEP104: + *ar_cipher = WEP_CRYPT; + *ar_cipher_len = 13; + break; + case WLAN_CIPHER_SUITE_TKIP: + *ar_cipher = TKIP_CRYPT; + *ar_cipher_len = 0; + break; + case WLAN_CIPHER_SUITE_CCMP: + *ar_cipher = AES_CRYPT; + *ar_cipher_len = 0; + break; + default: + ath6kl_err("cipher 0x%x not supported\n", cipher); + return -ENOTSUPP; + } + + return 0; +} + +static void ath6kl_set_key_mgmt(struct ath6kl *ar, u32 key_mgmt) +{ + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, key_mgmt); + + if (key_mgmt == WLAN_AKM_SUITE_PSK) { + if (ar->auth_mode == WPA_AUTH) + ar->auth_mode = WPA_PSK_AUTH; + else if (ar->auth_mode == WPA2_AUTH) + ar->auth_mode = WPA2_PSK_AUTH; + } else if (key_mgmt != WLAN_AKM_SUITE_8021X) { + ar->auth_mode = NONE_AUTH; + } +} + +static bool ath6kl_cfg80211_ready(struct ath6kl *ar) +{ + if (!test_bit(WMI_READY, &ar->flag)) { + ath6kl_err("wmi is not ready\n"); + return false; + } + + if (!test_bit(WLAN_ENABLED, &ar->flag)) { + ath6kl_err("wlan disabled\n"); + return false; + } + + return true; +} + +static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_connect_params *sme) +{ + struct ath6kl *ar = ath6kl_priv(dev); + int status; + + ar->sme_state = SME_CONNECTING; + + if (!ath6kl_cfg80211_ready(ar)) + return -EIO; + + if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) { + ath6kl_err("destroy in progress\n"); + return -EBUSY; + } + + if (test_bit(SKIP_SCAN, &ar->flag) && + ((sme->channel && sme->channel->center_freq == 0) || + (sme->bssid && is_zero_ether_addr(sme->bssid)))) { + ath6kl_err("SkipScan: channel or bssid invalid\n"); + return -EINVAL; + } + + if (down_interruptible(&ar->sem)) { + ath6kl_err("busy, couldn't get access\n"); + return -ERESTARTSYS; + } + + if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) { + ath6kl_err("busy, destroy in progress\n"); + up(&ar->sem); + return -EBUSY; + } + + if (ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)]) { + /* + * sleep until the command queue drains + */ + wait_event_interruptible_timeout(ar->event_wq, + ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0, + WMI_TIMEOUT); + if (signal_pending(current)) { + ath6kl_err("cmd queue drain timeout\n"); + up(&ar->sem); + return -EINTR; + } + } + + if (test_bit(CONNECTED, &ar->flag) && + ar->ssid_len == sme->ssid_len && + !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) { + ar->reconnect_flag = true; + status = ath6kl_wmi_reconnect_cmd(ar->wmi, ar->req_bssid, + ar->ch_hint); + + up(&ar->sem); + if (status) { + ath6kl_err("wmi_reconnect_cmd failed\n"); + return -EIO; + } + return 0; + } else if (ar->ssid_len == sme->ssid_len && + !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) { + ath6kl_disconnect(ar); + } + + memset(ar->ssid, 0, sizeof(ar->ssid)); + ar->ssid_len = sme->ssid_len; + memcpy(ar->ssid, sme->ssid, sme->ssid_len); + + if (sme->channel) + ar->ch_hint = sme->channel->center_freq; + + memset(ar->req_bssid, 0, sizeof(ar->req_bssid)); + if (sme->bssid && !is_broadcast_ether_addr(sme->bssid)) + memcpy(ar->req_bssid, sme->bssid, sizeof(ar->req_bssid)); + + ath6kl_set_wpa_version(ar, sme->crypto.wpa_versions); + + status = ath6kl_set_auth_type(ar, sme->auth_type); + if (status) { + up(&ar->sem); + return status; + } + + if (sme->crypto.n_ciphers_pairwise) + ath6kl_set_cipher(ar, sme->crypto.ciphers_pairwise[0], true); + else + ath6kl_set_cipher(ar, 0, true); + + ath6kl_set_cipher(ar, sme->crypto.cipher_group, false); + + if (sme->crypto.n_akm_suites) + ath6kl_set_key_mgmt(ar, sme->crypto.akm_suites[0]); + + if ((sme->key_len) && + (ar->auth_mode == NONE_AUTH) && (ar->prwise_crypto == WEP_CRYPT)) { + struct ath6kl_key *key = NULL; + + if (sme->key_idx < WMI_MIN_KEY_INDEX || + sme->key_idx > WMI_MAX_KEY_INDEX) { + ath6kl_err("key index %d out of bounds\n", + sme->key_idx); + up(&ar->sem); + return -ENOENT; + } + + key = &ar->keys[sme->key_idx]; + key->key_len = sme->key_len; + memcpy(key->key, sme->key, key->key_len); + key->cipher = ar->prwise_crypto; + ar->def_txkey_index = sme->key_idx; + + ath6kl_wmi_addkey_cmd(ar->wmi, sme->key_idx, + ar->prwise_crypto, + GROUP_USAGE | TX_USAGE, + key->key_len, + NULL, + key->key, KEY_OP_INIT_VAL, NULL, + NO_SYNC_WMIFLAG); + } + + if (!ar->usr_bss_filter) { + clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag); + if (ath6kl_wmi_bssfilter_cmd(ar->wmi, ALL_BSS_FILTER, 0) != 0) { + ath6kl_err("couldn't set bss filtering\n"); + up(&ar->sem); + return -EIO; + } + } + + ar->nw_type = ar->next_mode; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: connect called with authmode %d dot11 auth %d" + " PW crypto %d PW crypto len %d GRP crypto %d" + " GRP crypto len %d channel hint %u\n", + __func__, + ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto, + ar->prwise_crypto_len, ar->grp_crypto, + ar->grp_crypto_len, ar->ch_hint); + + ar->reconnect_flag = 0; + status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type, + ar->dot11_auth_mode, ar->auth_mode, + ar->prwise_crypto, + ar->prwise_crypto_len, + ar->grp_crypto, ar->grp_crypto_len, + ar->ssid_len, ar->ssid, + ar->req_bssid, ar->ch_hint, + ar->connect_ctrl_flags); + + up(&ar->sem); + + if (status == -EINVAL) { + memset(ar->ssid, 0, sizeof(ar->ssid)); + ar->ssid_len = 0; + ath6kl_err("invalid request\n"); + return -ENOENT; + } else if (status) { + ath6kl_err("ath6kl_wmi_connect_cmd failed\n"); + return -EIO; + } + + if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) && + ((ar->auth_mode == WPA_PSK_AUTH) + || (ar->auth_mode == WPA2_PSK_AUTH))) { + mod_timer(&ar->disconnect_timer, + jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL)); + } + + ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD; + set_bit(CONNECT_PEND, &ar->flag); + + return 0; +} + +static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid, + struct ieee80211_channel *chan, + const u8 *beacon_ie, size_t beacon_ie_len) +{ + struct cfg80211_bss *bss; + u8 *ie; + + bss = cfg80211_get_bss(ar->wdev->wiphy, chan, bssid, + ar->ssid, ar->ssid_len, WLAN_CAPABILITY_ESS, + WLAN_CAPABILITY_ESS); + if (bss == NULL) { + /* + * Since cfg80211 may not yet know about the BSS, + * generate a partial entry until the first BSS info + * event becomes available. + * + * Prepend SSID element since it is not included in the Beacon + * IEs from the target. + */ + ie = kmalloc(2 + ar->ssid_len + beacon_ie_len, GFP_KERNEL); + if (ie == NULL) + return -ENOMEM; + ie[0] = WLAN_EID_SSID; + ie[1] = ar->ssid_len; + memcpy(ie + 2, ar->ssid, ar->ssid_len); + memcpy(ie + 2 + ar->ssid_len, beacon_ie, beacon_ie_len); + bss = cfg80211_inform_bss(ar->wdev->wiphy, chan, + bssid, 0, WLAN_CAPABILITY_ESS, 100, + ie, 2 + ar->ssid_len + beacon_ie_len, + 0, GFP_KERNEL); + if (bss) + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added dummy bss for " + "%pM prior to indicating connect/roamed " + "event\n", bssid); + kfree(ie); + } else + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss " + "entry\n"); + + if (bss == NULL) + return -ENOMEM; + + cfg80211_put_bss(bss); + + return 0; +} + +void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel, + u8 *bssid, u16 listen_intvl, + u16 beacon_intvl, + enum network_type nw_type, + u8 beacon_ie_len, u8 assoc_req_len, + u8 assoc_resp_len, u8 *assoc_info) +{ + struct ieee80211_channel *chan; + + /* capinfo + listen interval */ + u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16); + + /* capinfo + status code + associd */ + u8 assoc_resp_ie_offset = sizeof(u16) + sizeof(u16) + sizeof(u16); + + u8 *assoc_req_ie = assoc_info + beacon_ie_len + assoc_req_ie_offset; + u8 *assoc_resp_ie = assoc_info + beacon_ie_len + assoc_req_len + + assoc_resp_ie_offset; + + assoc_req_len -= assoc_req_ie_offset; + assoc_resp_len -= assoc_resp_ie_offset; + + /* + * Store Beacon interval here; DTIM period will be available only once + * a Beacon frame from the AP is seen. + */ + ar->assoc_bss_beacon_int = beacon_intvl; + clear_bit(DTIM_PERIOD_AVAIL, &ar->flag); + + if (nw_type & ADHOC_NETWORK) { + if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: ath6k not in ibss mode\n", __func__); + return; + } + } + + if (nw_type & INFRA_NETWORK) { + if (ar->wdev->iftype != NL80211_IFTYPE_STATION && + ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: ath6k not in station mode\n", __func__); + return; + } + } + + chan = ieee80211_get_channel(ar->wdev->wiphy, (int) channel); + + + if (nw_type & ADHOC_NETWORK) { + cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL); + return; + } + + if (ath6kl_add_bss_if_needed(ar, bssid, chan, assoc_info, + beacon_ie_len) < 0) { + ath6kl_err("could not add cfg80211 bss entry for " + "connect/roamed notification\n"); + return; + } + + if (ar->sme_state == SME_CONNECTING) { + /* inform connect result to cfg80211 */ + ar->sme_state = SME_CONNECTED; + cfg80211_connect_result(ar->net_dev, bssid, + assoc_req_ie, assoc_req_len, + assoc_resp_ie, assoc_resp_len, + WLAN_STATUS_SUCCESS, GFP_KERNEL); + } else if (ar->sme_state == SME_CONNECTED) { + /* inform roam event to cfg80211 */ + cfg80211_roamed(ar->net_dev, chan, bssid, + assoc_req_ie, assoc_req_len, + assoc_resp_ie, assoc_resp_len, GFP_KERNEL); + } +} + +static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy, + struct net_device *dev, u16 reason_code) +{ + struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev); + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: reason=%u\n", __func__, + reason_code); + + if (!ath6kl_cfg80211_ready(ar)) + return -EIO; + + if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) { + ath6kl_err("busy, destroy in progress\n"); + return -EBUSY; + } + + if (down_interruptible(&ar->sem)) { + ath6kl_err("busy, couldn't get access\n"); + return -ERESTARTSYS; + } + + ar->reconnect_flag = 0; + ath6kl_disconnect(ar); + memset(ar->ssid, 0, sizeof(ar->ssid)); + ar->ssid_len = 0; + + if (!test_bit(SKIP_SCAN, &ar->flag)) + memset(ar->req_bssid, 0, sizeof(ar->req_bssid)); + + up(&ar->sem); + + ar->sme_state = SME_DISCONNECTED; + + return 0; +} + +void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason, + u8 *bssid, u8 assoc_resp_len, + u8 *assoc_info, u16 proto_reason) +{ + if (ar->scan_req) { + cfg80211_scan_done(ar->scan_req, true); + ar->scan_req = NULL; + } + + if (ar->nw_type & ADHOC_NETWORK) { + if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: ath6k not in ibss mode\n", __func__); + return; + } + memset(bssid, 0, ETH_ALEN); + cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL); + return; + } + + if (ar->nw_type & INFRA_NETWORK) { + if (ar->wdev->iftype != NL80211_IFTYPE_STATION && + ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: ath6k not in station mode\n", __func__); + return; + } + } + + /* + * Send a disconnect command to target when a disconnect event is + * received with reason code other than 3 (DISCONNECT_CMD - disconnect + * request from host) to make the firmware stop trying to connect even + * after giving disconnect event. There will be one more disconnect + * event for this disconnect command with reason code DISCONNECT_CMD + * which will be notified to cfg80211. + */ + + if (reason != DISCONNECT_CMD) { + ath6kl_wmi_disconnect_cmd(ar->wmi); + return; + } + + clear_bit(CONNECT_PEND, &ar->flag); + + if (ar->sme_state == SME_CONNECTING) { + cfg80211_connect_result(ar->net_dev, + bssid, NULL, 0, + NULL, 0, + WLAN_STATUS_UNSPECIFIED_FAILURE, + GFP_KERNEL); + } else if (ar->sme_state == SME_CONNECTED) { + cfg80211_disconnected(ar->net_dev, reason, + NULL, 0, GFP_KERNEL); + } + + ar->sme_state = SME_DISCONNECTED; +} + +static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_scan_request *request) +{ + struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev); + s8 n_channels = 0; + u16 *channels = NULL; + int ret = 0; + + if (!ath6kl_cfg80211_ready(ar)) + return -EIO; + + if (!ar->usr_bss_filter) { + clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag); + ret = ath6kl_wmi_bssfilter_cmd( + ar->wmi, + (test_bit(CONNECTED, &ar->flag) ? + ALL_BUT_BSS_FILTER : ALL_BSS_FILTER), 0); + if (ret) { + ath6kl_err("couldn't set bss filtering\n"); + return ret; + } + } + + if (request->n_ssids && request->ssids[0].ssid_len) { + u8 i; + + if (request->n_ssids > (MAX_PROBED_SSID_INDEX - 1)) + request->n_ssids = MAX_PROBED_SSID_INDEX - 1; + + for (i = 0; i < request->n_ssids; i++) + ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1, + SPECIFIC_SSID_FLAG, + request->ssids[i].ssid_len, + request->ssids[i].ssid); + } + + if (request->ie) { + ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_REQ, + request->ie, request->ie_len); + if (ret) { + ath6kl_err("failed to set Probe Request appie for " + "scan"); + return ret; + } + } + + /* + * Scan only the requested channels if the request specifies a set of + * channels. If the list is longer than the target supports, do not + * configure the list and instead, scan all available channels. + */ + if (request->n_channels > 0 && + request->n_channels <= WMI_MAX_CHANNELS) { + u8 i; + + n_channels = request->n_channels; + + channels = kzalloc(n_channels * sizeof(u16), GFP_KERNEL); + if (channels == NULL) { + ath6kl_warn("failed to set scan channels, " + "scan all channels"); + n_channels = 0; + } + + for (i = 0; i < n_channels; i++) + channels[i] = request->channels[i]->center_freq; + } + + ret = ath6kl_wmi_startscan_cmd(ar->wmi, WMI_LONG_SCAN, 0, + false, 0, 0, n_channels, channels); + if (ret) + ath6kl_err("wmi_startscan_cmd failed\n"); + else + ar->scan_req = request; + + kfree(channels); + + return ret; +} + +void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status) +{ + int i; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status %d\n", __func__, status); + + if (!ar->scan_req) + return; + + if ((status == -ECANCELED) || (status == -EBUSY)) { + cfg80211_scan_done(ar->scan_req, true); + goto out; + } + + cfg80211_scan_done(ar->scan_req, false); + + if (ar->scan_req->n_ssids && ar->scan_req->ssids[0].ssid_len) { + for (i = 0; i < ar->scan_req->n_ssids; i++) { + ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1, + DISABLE_SSID_FLAG, + 0, NULL); + } + } + +out: + ar->scan_req = NULL; +} + +static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, + u8 key_index, bool pairwise, + const u8 *mac_addr, + struct key_params *params) +{ + struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev); + struct ath6kl_key *key = NULL; + u8 key_usage; + u8 key_type; + int status = 0; + + if (!ath6kl_cfg80211_ready(ar)) + return -EIO; + + if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: key index %d out of bounds\n", __func__, + key_index); + return -ENOENT; + } + + key = &ar->keys[key_index]; + memset(key, 0, sizeof(struct ath6kl_key)); + + if (pairwise) + key_usage = PAIRWISE_USAGE; + else + key_usage = GROUP_USAGE; + + if (params) { + if (params->key_len > WLAN_MAX_KEY_LEN || + params->seq_len > sizeof(key->seq)) + return -EINVAL; + + key->key_len = params->key_len; + memcpy(key->key, params->key, key->key_len); + key->seq_len = params->seq_len; + memcpy(key->seq, params->seq, key->seq_len); + key->cipher = params->cipher; + } + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + key_type = WEP_CRYPT; + break; + + case WLAN_CIPHER_SUITE_TKIP: + key_type = TKIP_CRYPT; + break; + + case WLAN_CIPHER_SUITE_CCMP: + key_type = AES_CRYPT; + break; + + default: + return -ENOTSUPP; + } + + if (((ar->auth_mode == WPA_PSK_AUTH) + || (ar->auth_mode == WPA2_PSK_AUTH)) + && (key_usage & GROUP_USAGE)) + del_timer(&ar->disconnect_timer); + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n", + __func__, key_index, key->key_len, key_type, + key_usage, key->seq_len); + + ar->def_txkey_index = key_index; + + if (ar->nw_type == AP_NETWORK && !pairwise && + (key_type == TKIP_CRYPT || key_type == AES_CRYPT) && params) { + ar->ap_mode_bkey.valid = true; + ar->ap_mode_bkey.key_index = key_index; + ar->ap_mode_bkey.key_type = key_type; + ar->ap_mode_bkey.key_len = key->key_len; + memcpy(ar->ap_mode_bkey.key, key->key, key->key_len); + if (!test_bit(CONNECTED, &ar->flag)) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay initial group " + "key configuration until AP mode has been " + "started\n"); + /* + * The key will be set in ath6kl_connect_ap_mode() once + * the connected event is received from the target. + */ + return 0; + } + } + + if (ar->next_mode == AP_NETWORK && key_type == WEP_CRYPT && + !test_bit(CONNECTED, &ar->flag)) { + /* + * Store the key locally so that it can be re-configured after + * the AP mode has properly started + * (ath6kl_install_statioc_wep_keys). + */ + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay WEP key configuration " + "until AP mode has been started\n"); + ar->wep_key_list[key_index].key_len = key->key_len; + memcpy(ar->wep_key_list[key_index].key, key->key, key->key_len); + return 0; + } + + status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index, + key_type, key_usage, key->key_len, + key->seq, key->key, KEY_OP_INIT_VAL, + (u8 *) mac_addr, SYNC_BOTH_WMIFLAG); + + if (status) + return -EIO; + + return 0; +} + +static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, + u8 key_index, bool pairwise, + const u8 *mac_addr) +{ + struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev); + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index); + + if (!ath6kl_cfg80211_ready(ar)) + return -EIO; + + if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: key index %d out of bounds\n", __func__, + key_index); + return -ENOENT; + } + + if (!ar->keys[key_index].key_len) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: index %d is empty\n", __func__, key_index); + return 0; + } + + ar->keys[key_index].key_len = 0; + + return ath6kl_wmi_deletekey_cmd(ar->wmi, key_index); +} + +static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, + u8 key_index, bool pairwise, + const u8 *mac_addr, void *cookie, + void (*callback) (void *cookie, + struct key_params *)) +{ + struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev); + struct ath6kl_key *key = NULL; + struct key_params params; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index); + + if (!ath6kl_cfg80211_ready(ar)) + return -EIO; + + if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: key index %d out of bounds\n", __func__, + key_index); + return -ENOENT; + } + + key = &ar->keys[key_index]; + memset(¶ms, 0, sizeof(params)); + params.cipher = key->cipher; + params.key_len = key->key_len; + params.seq_len = key->seq_len; + params.seq = key->seq; + params.key = key->key; + + callback(cookie, ¶ms); + + return key->key_len ? 0 : -ENOENT; +} + +static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy, + struct net_device *ndev, + u8 key_index, bool unicast, + bool multicast) +{ + struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev); + struct ath6kl_key *key = NULL; + int status = 0; + u8 key_usage; + enum crypto_type key_type = NONE_CRYPT; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index); + + if (!ath6kl_cfg80211_ready(ar)) + return -EIO; + + if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: key index %d out of bounds\n", + __func__, key_index); + return -ENOENT; + } + + if (!ar->keys[key_index].key_len) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: invalid key index %d\n", + __func__, key_index); + return -EINVAL; + } + + ar->def_txkey_index = key_index; + key = &ar->keys[ar->def_txkey_index]; + key_usage = GROUP_USAGE; + if (ar->prwise_crypto == WEP_CRYPT) + key_usage |= TX_USAGE; + if (unicast) + key_type = ar->prwise_crypto; + if (multicast) + key_type = ar->grp_crypto; + + if (ar->next_mode == AP_NETWORK && !test_bit(CONNECTED, &ar->flag)) + return 0; /* Delay until AP mode has been started */ + + status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index, + key_type, key_usage, + key->key_len, key->seq, key->key, + KEY_OP_INIT_VAL, NULL, + SYNC_BOTH_WMIFLAG); + if (status) + return -EIO; + + return 0; +} + +void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid, + bool ismcast) +{ + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast); + + cfg80211_michael_mic_failure(ar->net_dev, ar->bssid, + (ismcast ? NL80211_KEYTYPE_GROUP : + NL80211_KEYTYPE_PAIRWISE), keyid, NULL, + GFP_KERNEL); +} + +static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) +{ + struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy); + int ret; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: changed 0x%x\n", __func__, + changed); + + if (!ath6kl_cfg80211_ready(ar)) + return -EIO; + + if (changed & WIPHY_PARAM_RTS_THRESHOLD) { + ret = ath6kl_wmi_set_rts_cmd(ar->wmi, wiphy->rts_threshold); + if (ret != 0) { + ath6kl_err("ath6kl_wmi_set_rts_cmd failed\n"); + return -EIO; + } + } + + return 0; +} + +/* + * The type nl80211_tx_power_setting replaces the following + * data type from 2.6.36 onwards +*/ +static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy, + enum nl80211_tx_power_setting type, + int dbm) +{ + struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy); + u8 ath6kl_dbm; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__, + type, dbm); + + if (!ath6kl_cfg80211_ready(ar)) + return -EIO; + + switch (type) { + case NL80211_TX_POWER_AUTOMATIC: + return 0; + case NL80211_TX_POWER_LIMITED: + ar->tx_pwr = ath6kl_dbm = dbm; + break; + default: + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x not supported\n", + __func__, type); + return -EOPNOTSUPP; + } + + ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, ath6kl_dbm); + + return 0; +} + +static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm) +{ + struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy); + + if (!ath6kl_cfg80211_ready(ar)) + return -EIO; + + if (test_bit(CONNECTED, &ar->flag)) { + ar->tx_pwr = 0; + + if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi) != 0) { + ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n"); + return -EIO; + } + + wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 0, + 5 * HZ); + + if (signal_pending(current)) { + ath6kl_err("target did not respond\n"); + return -EINTR; + } + } + + *dbm = ar->tx_pwr; + return 0; +} + +static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy, + struct net_device *dev, + bool pmgmt, int timeout) +{ + struct ath6kl *ar = ath6kl_priv(dev); + struct wmi_power_mode_cmd mode; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: pmgmt %d, timeout %d\n", + __func__, pmgmt, timeout); + + if (!ath6kl_cfg80211_ready(ar)) + return -EIO; + + if (pmgmt) { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: max perf\n", __func__); + mode.pwr_mode = REC_POWER; + } else { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: rec power\n", __func__); + mode.pwr_mode = MAX_PERF_POWER; + } + + if (ath6kl_wmi_powermode_cmd(ar->wmi, mode.pwr_mode) != 0) { + ath6kl_err("wmi_powermode_cmd failed\n"); + return -EIO; + } + + return 0; +} + +static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy, + struct net_device *ndev, + enum nl80211_iftype type, u32 *flags, + struct vif_params *params) +{ + struct ath6kl *ar = ath6kl_priv(ndev); + struct wireless_dev *wdev = ar->wdev; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type); + + if (!ath6kl_cfg80211_ready(ar)) + return -EIO; + + switch (type) { + case NL80211_IFTYPE_STATION: + ar->next_mode = INFRA_NETWORK; + break; + case NL80211_IFTYPE_ADHOC: + ar->next_mode = ADHOC_NETWORK; + break; + case NL80211_IFTYPE_AP: + ar->next_mode = AP_NETWORK; + break; + case NL80211_IFTYPE_P2P_CLIENT: + ar->next_mode = INFRA_NETWORK; + break; + case NL80211_IFTYPE_P2P_GO: + ar->next_mode = AP_NETWORK; + break; + default: + ath6kl_err("invalid interface type %u\n", type); + return -EOPNOTSUPP; + } + + wdev->iftype = type; + + return 0; +} + +static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_ibss_params *ibss_param) +{ + struct ath6kl *ar = ath6kl_priv(dev); + int status; + + if (!ath6kl_cfg80211_ready(ar)) + return -EIO; + + ar->ssid_len = ibss_param->ssid_len; + memcpy(ar->ssid, ibss_param->ssid, ar->ssid_len); + + if (ibss_param->channel) + ar->ch_hint = ibss_param->channel->center_freq; + + if (ibss_param->channel_fixed) { + /* + * TODO: channel_fixed: The channel should be fixed, do not + * search for IBSSs to join on other channels. Target + * firmware does not support this feature, needs to be + * updated. + */ + return -EOPNOTSUPP; + } + + memset(ar->req_bssid, 0, sizeof(ar->req_bssid)); + if (ibss_param->bssid && !is_broadcast_ether_addr(ibss_param->bssid)) + memcpy(ar->req_bssid, ibss_param->bssid, sizeof(ar->req_bssid)); + + ath6kl_set_wpa_version(ar, 0); + + status = ath6kl_set_auth_type(ar, NL80211_AUTHTYPE_OPEN_SYSTEM); + if (status) + return status; + + if (ibss_param->privacy) { + ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, true); + ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, false); + } else { + ath6kl_set_cipher(ar, 0, true); + ath6kl_set_cipher(ar, 0, false); + } + + ar->nw_type = ar->next_mode; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "%s: connect called with authmode %d dot11 auth %d" + " PW crypto %d PW crypto len %d GRP crypto %d" + " GRP crypto len %d channel hint %u\n", + __func__, + ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto, + ar->prwise_crypto_len, ar->grp_crypto, + ar->grp_crypto_len, ar->ch_hint); + + status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type, + ar->dot11_auth_mode, ar->auth_mode, + ar->prwise_crypto, + ar->prwise_crypto_len, + ar->grp_crypto, ar->grp_crypto_len, + ar->ssid_len, ar->ssid, + ar->req_bssid, ar->ch_hint, + ar->connect_ctrl_flags); + set_bit(CONNECT_PEND, &ar->flag); + + return 0; +} + +static int ath6kl_cfg80211_leave_ibss(struct wiphy *wiphy, + struct net_device *dev) +{ + struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev); + + if (!ath6kl_cfg80211_ready(ar)) + return -EIO; + + ath6kl_disconnect(ar); + memset(ar->ssid, 0, sizeof(ar->ssid)); + ar->ssid_len = 0; + + return 0; +} + +static const u32 cipher_suites[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, +}; + +static bool is_rate_legacy(s32 rate) +{ + static const s32 legacy[] = { 1000, 2000, 5500, 11000, + 6000, 9000, 12000, 18000, 24000, + 36000, 48000, 54000 + }; + u8 i; + + for (i = 0; i < ARRAY_SIZE(legacy); i++) + if (rate == legacy[i]) + return true; + + return false; +} + +static bool is_rate_ht20(s32 rate, u8 *mcs, bool *sgi) +{ + static const s32 ht20[] = { 6500, 13000, 19500, 26000, 39000, + 52000, 58500, 65000, 72200 + }; + u8 i; + + for (i = 0; i < ARRAY_SIZE(ht20); i++) { + if (rate == ht20[i]) { + if (i == ARRAY_SIZE(ht20) - 1) + /* last rate uses sgi */ + *sgi = true; + else + *sgi = false; + + *mcs = i; + return true; + } + } + return false; +} + +static bool is_rate_ht40(s32 rate, u8 *mcs, bool *sgi) +{ + static const s32 ht40[] = { 13500, 27000, 40500, 54000, + 81000, 108000, 121500, 135000, + 150000 + }; + u8 i; + + for (i = 0; i < ARRAY_SIZE(ht40); i++) { + if (rate == ht40[i]) { + if (i == ARRAY_SIZE(ht40) - 1) + /* last rate uses sgi */ + *sgi = true; + else + *sgi = false; + + *mcs = i; + return true; + } + } + + return false; +} + +static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev, + u8 *mac, struct station_info *sinfo) +{ + struct ath6kl *ar = ath6kl_priv(dev); + long left; + bool sgi; + s32 rate; + int ret; + u8 mcs; + + if (memcmp(mac, ar->bssid, ETH_ALEN) != 0) + return -ENOENT; + + if (down_interruptible(&ar->sem)) + return -EBUSY; + + set_bit(STATS_UPDATE_PEND, &ar->flag); + + ret = ath6kl_wmi_get_stats_cmd(ar->wmi); + + if (ret != 0) { + up(&ar->sem); + return -EIO; + } + + left = wait_event_interruptible_timeout(ar->event_wq, + !test_bit(STATS_UPDATE_PEND, + &ar->flag), + WMI_TIMEOUT); + + up(&ar->sem); + + if (left == 0) + return -ETIMEDOUT; + else if (left < 0) + return left; + + if (ar->target_stats.rx_byte) { + sinfo->rx_bytes = ar->target_stats.rx_byte; + sinfo->filled |= STATION_INFO_RX_BYTES; + sinfo->rx_packets = ar->target_stats.rx_pkt; + sinfo->filled |= STATION_INFO_RX_PACKETS; + } + + if (ar->target_stats.tx_byte) { + sinfo->tx_bytes = ar->target_stats.tx_byte; + sinfo->filled |= STATION_INFO_TX_BYTES; + sinfo->tx_packets = ar->target_stats.tx_pkt; + sinfo->filled |= STATION_INFO_TX_PACKETS; + } + + sinfo->signal = ar->target_stats.cs_rssi; + sinfo->filled |= STATION_INFO_SIGNAL; + + rate = ar->target_stats.tx_ucast_rate; + + if (is_rate_legacy(rate)) { + sinfo->txrate.legacy = rate / 100; + } else if (is_rate_ht20(rate, &mcs, &sgi)) { + if (sgi) { + sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + sinfo->txrate.mcs = mcs - 1; + } else { + sinfo->txrate.mcs = mcs; + } + + sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS; + } else if (is_rate_ht40(rate, &mcs, &sgi)) { + if (sgi) { + sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + sinfo->txrate.mcs = mcs - 1; + } else { + sinfo->txrate.mcs = mcs; + } + + sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; + sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS; + } else { + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, + "invalid rate from stats: %d\n", rate); + ath6kl_debug_war(ar, ATH6KL_WAR_INVALID_RATE); + return 0; + } + + sinfo->filled |= STATION_INFO_TX_BITRATE; + + if (test_bit(CONNECTED, &ar->flag) && + test_bit(DTIM_PERIOD_AVAIL, &ar->flag) && + ar->nw_type == INFRA_NETWORK) { + sinfo->filled |= STATION_INFO_BSS_PARAM; + sinfo->bss_param.flags = 0; + sinfo->bss_param.dtim_period = ar->assoc_bss_dtim_period; + sinfo->bss_param.beacon_interval = ar->assoc_bss_beacon_int; + } + + return 0; +} + +static int ath6kl_set_pmksa(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_pmksa *pmksa) +{ + struct ath6kl *ar = ath6kl_priv(netdev); + return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid, + pmksa->pmkid, true); +} + +static int ath6kl_del_pmksa(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_pmksa *pmksa) +{ + struct ath6kl *ar = ath6kl_priv(netdev); + return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid, + pmksa->pmkid, false); +} + +static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev) +{ + struct ath6kl *ar = ath6kl_priv(netdev); + if (test_bit(CONNECTED, &ar->flag)) + return ath6kl_wmi_setpmkid_cmd(ar->wmi, ar->bssid, NULL, false); + return 0; +} + +#ifdef CONFIG_PM +static int ar6k_cfg80211_suspend(struct wiphy *wiphy, + struct cfg80211_wowlan *wow) +{ + struct ath6kl *ar = wiphy_priv(wiphy); + + return ath6kl_hif_suspend(ar); +} +#endif + +static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type) +{ + struct ath6kl *ar = ath6kl_priv(dev); + + if (!ath6kl_cfg80211_ready(ar)) + return -EIO; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n", + __func__, chan->center_freq, chan->hw_value); + ar->next_chan = chan->center_freq; + + return 0; +} + +static bool ath6kl_is_p2p_ie(const u8 *pos) +{ + return pos[0] == WLAN_EID_VENDOR_SPECIFIC && pos[1] >= 4 && + pos[2] == 0x50 && pos[3] == 0x6f && + pos[4] == 0x9a && pos[5] == 0x09; +} + +static int ath6kl_set_ap_probe_resp_ies(struct ath6kl *ar, const u8 *ies, + size_t ies_len) +{ + const u8 *pos; + u8 *buf = NULL; + size_t len = 0; + int ret; + + /* + * Filter out P2P IE(s) since they will be included depending on + * the Probe Request frame in ath6kl_send_go_probe_resp(). + */ + + if (ies && ies_len) { + buf = kmalloc(ies_len, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + pos = ies; + while (pos + 1 < ies + ies_len) { + if (pos + 2 + pos[1] > ies + ies_len) + break; + if (!ath6kl_is_p2p_ie(pos)) { + memcpy(buf + len, pos, 2 + pos[1]); + len += 2 + pos[1]; + } + pos += 2 + pos[1]; + } + } + + ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_RESP, + buf, len); + kfree(buf); + return ret; +} + +static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev, + struct beacon_parameters *info, bool add) +{ + struct ath6kl *ar = ath6kl_priv(dev); + struct ieee80211_mgmt *mgmt; + u8 *ies; + int ies_len; + struct wmi_connect_cmd p; + int res; + int i; + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: add=%d\n", __func__, add); + + if (!ath6kl_cfg80211_ready(ar)) + return -EIO; + + if (ar->next_mode != AP_NETWORK) + return -EOPNOTSUPP; + + if (info->beacon_ies) { + res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_BEACON, + info->beacon_ies, + info->beacon_ies_len); + if (res) + return res; + } + if (info->proberesp_ies) { + res = ath6kl_set_ap_probe_resp_ies(ar, info->proberesp_ies, + info->proberesp_ies_len); + if (res) + return res; + } + if (info->assocresp_ies) { + res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_ASSOC_RESP, + info->assocresp_ies, + info->assocresp_ies_len); + if (res) + return res; + } + + if (!add) + return 0; + + ar->ap_mode_bkey.valid = false; + + /* TODO: + * info->interval + * info->dtim_period + */ + + if (info->head == NULL) + return -EINVAL; + mgmt = (struct ieee80211_mgmt *) info->head; + ies = mgmt->u.beacon.variable; + if (ies > info->head + info->head_len) + return -EINVAL; + ies_len = info->head + info->head_len - ies; + + if (info->ssid == NULL) + return -EINVAL; + memcpy(ar->ssid, info->ssid, info->ssid_len); + ar->ssid_len = info->ssid_len; + if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE) + return -EOPNOTSUPP; /* TODO */ + + ar->dot11_auth_mode = OPEN_AUTH; + + memset(&p, 0, sizeof(p)); + + for (i = 0; i < info->crypto.n_akm_suites; i++) { + switch (info->crypto.akm_suites[i]) { + case WLAN_AKM_SUITE_8021X: + if (info->crypto.wpa_versions & NL80211_WPA_VERSION_1) + p.auth_mode |= WPA_AUTH; + if (info->crypto.wpa_versions & NL80211_WPA_VERSION_2) + p.auth_mode |= WPA2_AUTH; + break; + case WLAN_AKM_SUITE_PSK: + if (info->crypto.wpa_versions & NL80211_WPA_VERSION_1) + p.auth_mode |= WPA_PSK_AUTH; + if (info->crypto.wpa_versions & NL80211_WPA_VERSION_2) + p.auth_mode |= WPA2_PSK_AUTH; + break; + } + } + if (p.auth_mode == 0) + p.auth_mode = NONE_AUTH; + ar->auth_mode = p.auth_mode; + + for (i = 0; i < info->crypto.n_ciphers_pairwise; i++) { + switch (info->crypto.ciphers_pairwise[i]) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + p.prwise_crypto_type |= WEP_CRYPT; + break; + case WLAN_CIPHER_SUITE_TKIP: + p.prwise_crypto_type |= TKIP_CRYPT; + break; + case WLAN_CIPHER_SUITE_CCMP: + p.prwise_crypto_type |= AES_CRYPT; + break; + } + } + if (p.prwise_crypto_type == 0) { + p.prwise_crypto_type = NONE_CRYPT; + ath6kl_set_cipher(ar, 0, true); + } else if (info->crypto.n_ciphers_pairwise == 1) + ath6kl_set_cipher(ar, info->crypto.ciphers_pairwise[0], true); + + switch (info->crypto.cipher_group) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + p.grp_crypto_type = WEP_CRYPT; + break; + case WLAN_CIPHER_SUITE_TKIP: + p.grp_crypto_type = TKIP_CRYPT; + break; + case WLAN_CIPHER_SUITE_CCMP: + p.grp_crypto_type = AES_CRYPT; + break; + default: + p.grp_crypto_type = NONE_CRYPT; + break; + } + ath6kl_set_cipher(ar, info->crypto.cipher_group, false); + + p.nw_type = AP_NETWORK; + ar->nw_type = ar->next_mode; + + p.ssid_len = ar->ssid_len; + memcpy(p.ssid, ar->ssid, ar->ssid_len); + p.dot11_auth_mode = ar->dot11_auth_mode; + p.ch = cpu_to_le16(ar->next_chan); + + res = ath6kl_wmi_ap_profile_commit(ar->wmi, &p); + if (res < 0) + return res; + + return 0; +} + +static int ath6kl_add_beacon(struct wiphy *wiphy, struct net_device *dev, + struct beacon_parameters *info) +{ + return ath6kl_ap_beacon(wiphy, dev, info, true); +} + +static int ath6kl_set_beacon(struct wiphy *wiphy, struct net_device *dev, + struct beacon_parameters *info) +{ + return ath6kl_ap_beacon(wiphy, dev, info, false); +} + +static int ath6kl_del_beacon(struct wiphy *wiphy, struct net_device *dev) +{ + struct ath6kl *ar = ath6kl_priv(dev); + + if (ar->nw_type != AP_NETWORK) + return -EOPNOTSUPP; + if (!test_bit(CONNECTED, &ar->flag)) + return -ENOTCONN; + + ath6kl_wmi_disconnect_cmd(ar->wmi); + clear_bit(CONNECTED, &ar->flag); + + return 0; +} + +static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev, + u8 *mac, struct station_parameters *params) +{ + struct ath6kl *ar = ath6kl_priv(dev); + + if (ar->nw_type != AP_NETWORK) + return -EOPNOTSUPP; + + /* Use this only for authorizing/unauthorizing a station */ + if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) + return -EOPNOTSUPP; + + if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED)) + return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_AUTHORIZE, + mac, 0); + return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_UNAUTHORIZE, mac, + 0); +} + +static int ath6kl_remain_on_channel(struct wiphy *wiphy, + struct net_device *dev, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, + unsigned int duration, + u64 *cookie) +{ + struct ath6kl *ar = ath6kl_priv(dev); + + /* TODO: if already pending or ongoing remain-on-channel, + * return -EBUSY */ + *cookie = 1; /* only a single pending request is supported */ + + return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, chan->center_freq, + duration); +} + +static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy, + struct net_device *dev, + u64 cookie) +{ + struct ath6kl *ar = ath6kl_priv(dev); + + if (cookie != 1) + return -ENOENT; + + return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi); +} + +static int ath6kl_send_go_probe_resp(struct ath6kl *ar, const u8 *buf, + size_t len, unsigned int freq) +{ + const u8 *pos; + u8 *p2p; + int p2p_len; + int ret; + const struct ieee80211_mgmt *mgmt; + + mgmt = (const struct ieee80211_mgmt *) buf; + + /* Include P2P IE(s) from the frame generated in user space. */ + + p2p = kmalloc(len, GFP_KERNEL); + if (p2p == NULL) + return -ENOMEM; + p2p_len = 0; + + pos = mgmt->u.probe_resp.variable; + while (pos + 1 < buf + len) { + if (pos + 2 + pos[1] > buf + len) + break; + if (ath6kl_is_p2p_ie(pos)) { + memcpy(p2p + p2p_len, pos, 2 + pos[1]); + p2p_len += 2 + pos[1]; + } + pos += 2 + pos[1]; + } + + ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, freq, mgmt->da, + p2p, p2p_len); + kfree(p2p); + return ret; +} + +static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, + struct ieee80211_channel *chan, bool offchan, + enum nl80211_channel_type channel_type, + bool channel_type_valid, unsigned int wait, + const u8 *buf, size_t len, bool no_cck, u64 *cookie) +{ + struct ath6kl *ar = ath6kl_priv(dev); + u32 id; + const struct ieee80211_mgmt *mgmt; + + mgmt = (const struct ieee80211_mgmt *) buf; + if (buf + len >= mgmt->u.probe_resp.variable && + ar->nw_type == AP_NETWORK && test_bit(CONNECTED, &ar->flag) && + ieee80211_is_probe_resp(mgmt->frame_control)) { + /* + * Send Probe Response frame in AP mode using a separate WMI + * command to allow the target to fill in the generic IEs. + */ + *cookie = 0; /* TX status not supported */ + return ath6kl_send_go_probe_resp(ar, buf, len, + chan->center_freq); + } + + id = ar->send_action_id++; + if (id == 0) { + /* + * 0 is a reserved value in the WMI command and shall not be + * used for the command. + */ + id = ar->send_action_id++; + } + + *cookie = id; + return ath6kl_wmi_send_action_cmd(ar->wmi, id, chan->center_freq, wait, + buf, len); +} + +static void ath6kl_mgmt_frame_register(struct wiphy *wiphy, + struct net_device *dev, + u16 frame_type, bool reg) +{ + struct ath6kl *ar = ath6kl_priv(dev); + + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: frame_type=0x%x reg=%d\n", + __func__, frame_type, reg); + if (frame_type == IEEE80211_STYPE_PROBE_REQ) { + /* + * Note: This notification callback is not allowed to sleep, so + * we cannot send WMI_PROBE_REQ_REPORT_CMD here. Instead, we + * hardcode target to report Probe Request frames all the time. + */ + ar->probe_req_report = reg; + } +} + +static const struct ieee80211_txrx_stypes +ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = { + [NL80211_IFTYPE_STATION] = { + .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_RESP >> 4), + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, + [NL80211_IFTYPE_P2P_CLIENT] = { + .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_RESP >> 4), + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, + [NL80211_IFTYPE_P2P_GO] = { + .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_RESP >> 4), + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, +}; + +static struct cfg80211_ops ath6kl_cfg80211_ops = { + .change_virtual_intf = ath6kl_cfg80211_change_iface, + .scan = ath6kl_cfg80211_scan, + .connect = ath6kl_cfg80211_connect, + .disconnect = ath6kl_cfg80211_disconnect, + .add_key = ath6kl_cfg80211_add_key, + .get_key = ath6kl_cfg80211_get_key, + .del_key = ath6kl_cfg80211_del_key, + .set_default_key = ath6kl_cfg80211_set_default_key, + .set_wiphy_params = ath6kl_cfg80211_set_wiphy_params, + .set_tx_power = ath6kl_cfg80211_set_txpower, + .get_tx_power = ath6kl_cfg80211_get_txpower, + .set_power_mgmt = ath6kl_cfg80211_set_power_mgmt, + .join_ibss = ath6kl_cfg80211_join_ibss, + .leave_ibss = ath6kl_cfg80211_leave_ibss, + .get_station = ath6kl_get_station, + .set_pmksa = ath6kl_set_pmksa, + .del_pmksa = ath6kl_del_pmksa, + .flush_pmksa = ath6kl_flush_pmksa, + CFG80211_TESTMODE_CMD(ath6kl_tm_cmd) +#ifdef CONFIG_PM + .suspend = ar6k_cfg80211_suspend, +#endif + .set_channel = ath6kl_set_channel, + .add_beacon = ath6kl_add_beacon, + .set_beacon = ath6kl_set_beacon, + .del_beacon = ath6kl_del_beacon, + .change_station = ath6kl_change_station, + .remain_on_channel = ath6kl_remain_on_channel, + .cancel_remain_on_channel = ath6kl_cancel_remain_on_channel, + .mgmt_tx = ath6kl_mgmt_tx, + .mgmt_frame_register = ath6kl_mgmt_frame_register, +}; + +struct wireless_dev *ath6kl_cfg80211_init(struct device *dev) +{ + int ret = 0; + struct wireless_dev *wdev; + struct ath6kl *ar; + + wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); + if (!wdev) { + ath6kl_err("couldn't allocate wireless device\n"); + return NULL; + } + + /* create a new wiphy for use with cfg80211 */ + wdev->wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl)); + if (!wdev->wiphy) { + ath6kl_err("couldn't allocate wiphy device\n"); + kfree(wdev); + return NULL; + } + + ar = wiphy_priv(wdev->wiphy); + ar->p2p = !!ath6kl_p2p; + + wdev->wiphy->mgmt_stypes = ath6kl_mgmt_stypes; + + wdev->wiphy->max_remain_on_channel_duration = 5000; + + /* set device pointer for wiphy */ + set_wiphy_dev(wdev->wiphy, dev); + + wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP); + if (ar->p2p) { + wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_P2P_CLIENT); + } + /* max num of ssids that can be probed during scanning */ + wdev->wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX; + wdev->wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */ + wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz; + wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz; + wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; + + wdev->wiphy->cipher_suites = cipher_suites; + wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); + + ret = wiphy_register(wdev->wiphy); + if (ret < 0) { + ath6kl_err("couldn't register wiphy device\n"); + wiphy_free(wdev->wiphy); + kfree(wdev); + return NULL; + } + + return wdev; +} + +void ath6kl_cfg80211_deinit(struct ath6kl *ar) +{ + struct wireless_dev *wdev = ar->wdev; + + if (ar->scan_req) { + cfg80211_scan_done(ar->scan_req, true); + ar->scan_req = NULL; + } + + if (!wdev) + return; + + wiphy_unregister(wdev->wiphy); + wiphy_free(wdev->wiphy); + kfree(wdev); +} diff --cc drivers/net/wireless/ath/ath6kl/debug.c index ba3f23d71150,000000000000..7879b5314285 mode 100644,000000..100644 --- a/drivers/net/wireless/ath/ath6kl/debug.c +++ b/drivers/net/wireless/ath/ath6kl/debug.c @@@ -1,934 -1,0 +1,935 @@@ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "core.h" + +#include +#include +#include ++#include + +#include "debug.h" +#include "target.h" + +struct ath6kl_fwlog_slot { + __le32 timestamp; + __le32 length; + + /* max ATH6KL_FWLOG_PAYLOAD_SIZE bytes */ + u8 payload[0]; +}; + +#define ATH6KL_FWLOG_SIZE 32768 +#define ATH6KL_FWLOG_SLOT_SIZE (sizeof(struct ath6kl_fwlog_slot) + \ + ATH6KL_FWLOG_PAYLOAD_SIZE) +#define ATH6KL_FWLOG_VALID_MASK 0x1ffff + +int ath6kl_printk(const char *level, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + int rtn; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + rtn = printk("%sath6kl: %pV", level, &vaf); + + va_end(args); + + return rtn; +} + +#ifdef CONFIG_ATH6KL_DEBUG + +#define REG_OUTPUT_LEN_PER_LINE 25 +#define REGTYPE_STR_LEN 100 + +struct ath6kl_diag_reg_info { + u32 reg_start; + u32 reg_end; + const char *reg_info; +}; + +static const struct ath6kl_diag_reg_info diag_reg[] = { + { 0x20000, 0x200fc, "General DMA and Rx registers" }, + { 0x28000, 0x28900, "MAC PCU register & keycache" }, + { 0x20800, 0x20a40, "QCU" }, + { 0x21000, 0x212f0, "DCU" }, + { 0x4000, 0x42e4, "RTC" }, + { 0x540000, 0x540000 + (256 * 1024), "RAM" }, + { 0x29800, 0x2B210, "Base Band" }, + { 0x1C000, 0x1C748, "Analog" }, +}; + +void ath6kl_dump_registers(struct ath6kl_device *dev, + struct ath6kl_irq_proc_registers *irq_proc_reg, + struct ath6kl_irq_enable_reg *irq_enable_reg) +{ + + ath6kl_dbg(ATH6KL_DBG_ANY, ("<------- Register Table -------->\n")); + + if (irq_proc_reg != NULL) { + ath6kl_dbg(ATH6KL_DBG_ANY, + "Host Int status: 0x%x\n", + irq_proc_reg->host_int_status); + ath6kl_dbg(ATH6KL_DBG_ANY, + "CPU Int status: 0x%x\n", + irq_proc_reg->cpu_int_status); + ath6kl_dbg(ATH6KL_DBG_ANY, + "Error Int status: 0x%x\n", + irq_proc_reg->error_int_status); + ath6kl_dbg(ATH6KL_DBG_ANY, + "Counter Int status: 0x%x\n", + irq_proc_reg->counter_int_status); + ath6kl_dbg(ATH6KL_DBG_ANY, + "Mbox Frame: 0x%x\n", + irq_proc_reg->mbox_frame); + ath6kl_dbg(ATH6KL_DBG_ANY, + "Rx Lookahead Valid: 0x%x\n", + irq_proc_reg->rx_lkahd_valid); + ath6kl_dbg(ATH6KL_DBG_ANY, + "Rx Lookahead 0: 0x%x\n", + irq_proc_reg->rx_lkahd[0]); + ath6kl_dbg(ATH6KL_DBG_ANY, + "Rx Lookahead 1: 0x%x\n", + irq_proc_reg->rx_lkahd[1]); + + if (dev->ar->mbox_info.gmbox_addr != 0) { + /* + * If the target supports GMBOX hardware, dump some + * additional state. + */ + ath6kl_dbg(ATH6KL_DBG_ANY, + "GMBOX Host Int status 2: 0x%x\n", + irq_proc_reg->host_int_status2); + ath6kl_dbg(ATH6KL_DBG_ANY, + "GMBOX RX Avail: 0x%x\n", + irq_proc_reg->gmbox_rx_avail); + ath6kl_dbg(ATH6KL_DBG_ANY, + "GMBOX lookahead alias 0: 0x%x\n", + irq_proc_reg->rx_gmbox_lkahd_alias[0]); + ath6kl_dbg(ATH6KL_DBG_ANY, + "GMBOX lookahead alias 1: 0x%x\n", + irq_proc_reg->rx_gmbox_lkahd_alias[1]); + } + + } + + if (irq_enable_reg != NULL) { + ath6kl_dbg(ATH6KL_DBG_ANY, + "Int status Enable: 0x%x\n", + irq_enable_reg->int_status_en); + ath6kl_dbg(ATH6KL_DBG_ANY, "Counter Int status Enable: 0x%x\n", + irq_enable_reg->cntr_int_status_en); + } + ath6kl_dbg(ATH6KL_DBG_ANY, "<------------------------------->\n"); +} + +static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist) +{ + ath6kl_dbg(ATH6KL_DBG_ANY, + "--- endpoint: %d svc_id: 0x%X ---\n", + ep_dist->endpoint, ep_dist->svc_id); + ath6kl_dbg(ATH6KL_DBG_ANY, " dist_flags : 0x%X\n", + ep_dist->dist_flags); + ath6kl_dbg(ATH6KL_DBG_ANY, " cred_norm : %d\n", + ep_dist->cred_norm); + ath6kl_dbg(ATH6KL_DBG_ANY, " cred_min : %d\n", + ep_dist->cred_min); + ath6kl_dbg(ATH6KL_DBG_ANY, " credits : %d\n", + ep_dist->credits); + ath6kl_dbg(ATH6KL_DBG_ANY, " cred_assngd : %d\n", + ep_dist->cred_assngd); + ath6kl_dbg(ATH6KL_DBG_ANY, " seek_cred : %d\n", + ep_dist->seek_cred); + ath6kl_dbg(ATH6KL_DBG_ANY, " cred_sz : %d\n", + ep_dist->cred_sz); + ath6kl_dbg(ATH6KL_DBG_ANY, " cred_per_msg : %d\n", + ep_dist->cred_per_msg); + ath6kl_dbg(ATH6KL_DBG_ANY, " cred_to_dist : %d\n", + ep_dist->cred_to_dist); + ath6kl_dbg(ATH6KL_DBG_ANY, " txq_depth : %d\n", + get_queue_depth(&((struct htc_endpoint *) + ep_dist->htc_rsvd)->txq)); + ath6kl_dbg(ATH6KL_DBG_ANY, + "----------------------------------\n"); +} + +void dump_cred_dist_stats(struct htc_target *target) +{ + struct htc_endpoint_credit_dist *ep_list; + + if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_TRC)) + return; + + list_for_each_entry(ep_list, &target->cred_dist_list, list) + dump_cred_dist(ep_list); + + ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:%p dist:%p\n", + target->cred_dist_cntxt, NULL); + ath6kl_dbg(ATH6KL_DBG_TRC, "credit distribution, total : %d, free : %d\n", + target->cred_dist_cntxt->total_avail_credits, + target->cred_dist_cntxt->cur_free_credits); +} + +static int ath6kl_debugfs_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war) +{ + switch (war) { + case ATH6KL_WAR_INVALID_RATE: + ar->debug.war_stats.invalid_rate++; + break; + } +} + +static ssize_t read_file_war_stats(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + char *buf; + unsigned int len = 0, buf_len = 1500; + ssize_t ret_cnt; + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%25s\n", + "Workaround stats"); + len += scnprintf(buf + len, buf_len - len, "%25s\n\n", + "================="); + len += scnprintf(buf + len, buf_len - len, "%20s %10u\n", + "Invalid rates", ar->debug.war_stats.invalid_rate); + + if (WARN_ON(len > buf_len)) + len = buf_len; + + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + kfree(buf); + return ret_cnt; +} + +static const struct file_operations fops_war_stats = { + .read = read_file_war_stats, + .open = ath6kl_debugfs_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static void ath6kl_debug_fwlog_add(struct ath6kl *ar, const void *buf, + size_t buf_len) +{ + struct circ_buf *fwlog = &ar->debug.fwlog_buf; + size_t space; + int i; + + /* entries must all be equal size */ + if (WARN_ON(buf_len != ATH6KL_FWLOG_SLOT_SIZE)) + return; + + space = CIRC_SPACE(fwlog->head, fwlog->tail, ATH6KL_FWLOG_SIZE); + if (space < buf_len) + /* discard oldest slot */ + fwlog->tail = (fwlog->tail + ATH6KL_FWLOG_SLOT_SIZE) & + (ATH6KL_FWLOG_SIZE - 1); + + for (i = 0; i < buf_len; i += space) { + space = CIRC_SPACE_TO_END(fwlog->head, fwlog->tail, + ATH6KL_FWLOG_SIZE); + + if ((size_t) space > buf_len - i) + space = buf_len - i; + + memcpy(&fwlog->buf[fwlog->head], buf, space); + fwlog->head = (fwlog->head + space) & (ATH6KL_FWLOG_SIZE - 1); + } + +} + +void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len) +{ + struct ath6kl_fwlog_slot *slot = ar->debug.fwlog_tmp; + size_t slot_len; + + if (WARN_ON(len > ATH6KL_FWLOG_PAYLOAD_SIZE)) + return; + + spin_lock_bh(&ar->debug.fwlog_lock); + + slot->timestamp = cpu_to_le32(jiffies); + slot->length = cpu_to_le32(len); + memcpy(slot->payload, buf, len); + + slot_len = sizeof(*slot) + len; + + if (slot_len < ATH6KL_FWLOG_SLOT_SIZE) + memset(slot->payload + len, 0, + ATH6KL_FWLOG_SLOT_SIZE - slot_len); + + ath6kl_debug_fwlog_add(ar, slot, ATH6KL_FWLOG_SLOT_SIZE); + + spin_unlock_bh(&ar->debug.fwlog_lock); +} + +static bool ath6kl_debug_fwlog_empty(struct ath6kl *ar) +{ + return CIRC_CNT(ar->debug.fwlog_buf.head, + ar->debug.fwlog_buf.tail, + ATH6KL_FWLOG_SLOT_SIZE) == 0; +} + +static ssize_t ath6kl_fwlog_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + struct circ_buf *fwlog = &ar->debug.fwlog_buf; + size_t len = 0, buf_len = count; + ssize_t ret_cnt; + char *buf; + int ccnt; + + buf = vmalloc(buf_len); + if (!buf) + return -ENOMEM; + + /* read undelivered logs from firmware */ + ath6kl_read_fwlogs(ar); + + spin_lock_bh(&ar->debug.fwlog_lock); + + while (len < buf_len && !ath6kl_debug_fwlog_empty(ar)) { + ccnt = CIRC_CNT_TO_END(fwlog->head, fwlog->tail, + ATH6KL_FWLOG_SIZE); + + if ((size_t) ccnt > buf_len - len) + ccnt = buf_len - len; + + memcpy(buf + len, &fwlog->buf[fwlog->tail], ccnt); + len += ccnt; + + fwlog->tail = (fwlog->tail + ccnt) & + (ATH6KL_FWLOG_SIZE - 1); + } + + spin_unlock_bh(&ar->debug.fwlog_lock); + + if (WARN_ON(len > buf_len)) + len = buf_len; + + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + vfree(buf); + + return ret_cnt; +} + +static const struct file_operations fops_fwlog = { + .open = ath6kl_debugfs_open, + .read = ath6kl_fwlog_read, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_fwlog_mask_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + char buf[16]; + int len; + + len = snprintf(buf, sizeof(buf), "0x%x\n", ar->debug.fwlog_mask); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath6kl_fwlog_mask_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + int ret; + + ret = kstrtou32_from_user(user_buf, count, 0, &ar->debug.fwlog_mask); + if (ret) + return ret; + + ret = ath6kl_wmi_config_debug_module_cmd(ar->wmi, + ATH6KL_FWLOG_VALID_MASK, + ar->debug.fwlog_mask); + if (ret) + return ret; + + return count; +} + +static const struct file_operations fops_fwlog_mask = { + .open = ath6kl_debugfs_open, + .read = ath6kl_fwlog_mask_read, + .write = ath6kl_fwlog_mask_write, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + struct target_stats *tgt_stats = &ar->target_stats; + char *buf; + unsigned int len = 0, buf_len = 1500; + int i; + long left; + ssize_t ret_cnt; + + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (down_interruptible(&ar->sem)) { + kfree(buf); + return -EBUSY; + } + + set_bit(STATS_UPDATE_PEND, &ar->flag); + + if (ath6kl_wmi_get_stats_cmd(ar->wmi)) { + up(&ar->sem); + kfree(buf); + return -EIO; + } + + left = wait_event_interruptible_timeout(ar->event_wq, + !test_bit(STATS_UPDATE_PEND, + &ar->flag), WMI_TIMEOUT); + + up(&ar->sem); + + if (left <= 0) { + kfree(buf); + return -ETIMEDOUT; + } + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%25s\n", + "Target Tx stats"); + len += scnprintf(buf + len, buf_len - len, "%25s\n\n", + "================="); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Ucast packets", tgt_stats->tx_ucast_pkt); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Bcast packets", tgt_stats->tx_bcast_pkt); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Ucast byte", tgt_stats->tx_ucast_byte); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Bcast byte", tgt_stats->tx_bcast_byte); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Rts success cnt", tgt_stats->tx_rts_success_cnt); + for (i = 0; i < 4; i++) + len += scnprintf(buf + len, buf_len - len, + "%18s %d %10llu\n", "PER on ac", + i, tgt_stats->tx_pkt_per_ac[i]); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Error", tgt_stats->tx_err); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Fail count", tgt_stats->tx_fail_cnt); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Retry count", tgt_stats->tx_retry_cnt); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Multi retry cnt", tgt_stats->tx_mult_retry_cnt); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Rts fail cnt", tgt_stats->tx_rts_fail_cnt); + len += scnprintf(buf + len, buf_len - len, "%25s %10llu\n\n", + "TKIP counter measure used", + tgt_stats->tkip_cnter_measures_invoked); + + len += scnprintf(buf + len, buf_len - len, "%25s\n", + "Target Rx stats"); + len += scnprintf(buf + len, buf_len - len, "%25s\n", + "================="); + + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Ucast packets", tgt_stats->rx_ucast_pkt); + len += scnprintf(buf + len, buf_len - len, "%20s %10d\n", + "Ucast Rate", tgt_stats->rx_ucast_rate); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Bcast packets", tgt_stats->rx_bcast_pkt); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Ucast byte", tgt_stats->rx_ucast_byte); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Bcast byte", tgt_stats->rx_bcast_byte); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Fragmented pkt", tgt_stats->rx_frgment_pkt); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Error", tgt_stats->rx_err); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "CRC Err", tgt_stats->rx_crc_err); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Key chache miss", tgt_stats->rx_key_cache_miss); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Decrypt Err", tgt_stats->rx_decrypt_err); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Duplicate frame", tgt_stats->rx_dupl_frame); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Tkip Mic failure", tgt_stats->tkip_local_mic_fail); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "TKIP format err", tgt_stats->tkip_fmt_err); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "CCMP format Err", tgt_stats->ccmp_fmt_err); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n\n", + "CCMP Replay Err", tgt_stats->ccmp_replays); + + len += scnprintf(buf + len, buf_len - len, "%25s\n", + "Misc Target stats"); + len += scnprintf(buf + len, buf_len - len, "%25s\n", + "================="); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Beacon Miss count", tgt_stats->cs_bmiss_cnt); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Num Connects", tgt_stats->cs_connect_cnt); + len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", + "Num disconnects", tgt_stats->cs_discon_cnt); + len += scnprintf(buf + len, buf_len - len, "%20s %10d\n", + "Beacon avg rssi", tgt_stats->cs_ave_beacon_rssi); + + if (len > buf_len) + len = buf_len; + + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); + + kfree(buf); + return ret_cnt; +} + +static const struct file_operations fops_tgt_stats = { + .read = read_file_tgt_stats, + .open = ath6kl_debugfs_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +#define print_credit_info(fmt_str, ep_list_field) \ + (len += scnprintf(buf + len, buf_len - len, fmt_str, \ + ep_list->ep_list_field)) +#define CREDIT_INFO_DISPLAY_STRING_LEN 200 +#define CREDIT_INFO_LEN 128 + +static ssize_t read_file_credit_dist_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + struct htc_target *target = ar->htc_target; + struct htc_endpoint_credit_dist *ep_list; + char *buf; + unsigned int buf_len, len = 0; + ssize_t ret_cnt; + + buf_len = CREDIT_INFO_DISPLAY_STRING_LEN + + get_queue_depth(&target->cred_dist_list) * CREDIT_INFO_LEN; + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len += scnprintf(buf + len, buf_len - len, "%25s%5d\n", + "Total Avail Credits: ", + target->cred_dist_cntxt->total_avail_credits); + len += scnprintf(buf + len, buf_len - len, "%25s%5d\n", + "Free credits :", + target->cred_dist_cntxt->cur_free_credits); + + len += scnprintf(buf + len, buf_len - len, + " Epid Flags Cred_norm Cred_min Credits Cred_assngd" + " Seek_cred Cred_sz Cred_per_msg Cred_to_dist" + " qdepth\n"); + + list_for_each_entry(ep_list, &target->cred_dist_list, list) { + print_credit_info(" %2d", endpoint); + print_credit_info("%10x", dist_flags); + print_credit_info("%8d", cred_norm); + print_credit_info("%9d", cred_min); + print_credit_info("%9d", credits); + print_credit_info("%10d", cred_assngd); + print_credit_info("%13d", seek_cred); + print_credit_info("%12d", cred_sz); + print_credit_info("%9d", cred_per_msg); + print_credit_info("%14d", cred_to_dist); + len += scnprintf(buf + len, buf_len - len, "%12d\n", + get_queue_depth(&((struct htc_endpoint *) + ep_list->htc_rsvd)->txq)); + } + + if (len > buf_len) + len = buf_len; + + ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + return ret_cnt; +} + +static const struct file_operations fops_credit_dist_stats = { + .read = read_file_credit_dist_stats, + .open = ath6kl_debugfs_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static unsigned long ath6kl_get_num_reg(void) +{ + int i; + unsigned long n_reg = 0; + + for (i = 0; i < ARRAY_SIZE(diag_reg); i++) + n_reg = n_reg + + (diag_reg[i].reg_end - diag_reg[i].reg_start) / 4 + 1; + + return n_reg; +} + +static bool ath6kl_dbg_is_diag_reg_valid(u32 reg_addr) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(diag_reg); i++) { + if (reg_addr >= diag_reg[i].reg_start && + reg_addr <= diag_reg[i].reg_end) + return true; + } + + return false; +} + +static ssize_t ath6kl_regread_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + u8 buf[50]; + unsigned int len = 0; + + if (ar->debug.dbgfs_diag_reg) + len += scnprintf(buf + len, sizeof(buf) - len, "0x%x\n", + ar->debug.dbgfs_diag_reg); + else + len += scnprintf(buf + len, sizeof(buf) - len, + "All diag registers\n"); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath6kl_regread_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + u8 buf[50]; + unsigned int len; + unsigned long reg_addr; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + + if (strict_strtoul(buf, 0, ®_addr)) + return -EINVAL; + + if ((reg_addr % 4) != 0) + return -EINVAL; + + if (reg_addr && !ath6kl_dbg_is_diag_reg_valid(reg_addr)) + return -EINVAL; + + ar->debug.dbgfs_diag_reg = reg_addr; + + return count; +} + +static const struct file_operations fops_diag_reg_read = { + .read = ath6kl_regread_read, + .write = ath6kl_regread_write, + .open = ath6kl_debugfs_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static int ath6kl_regdump_open(struct inode *inode, struct file *file) +{ + struct ath6kl *ar = inode->i_private; + u8 *buf; + unsigned long int reg_len; + unsigned int len = 0, n_reg; + u32 addr; + __le32 reg_val; + int i, status; + + /* Dump all the registers if no register is specified */ + if (!ar->debug.dbgfs_diag_reg) + n_reg = ath6kl_get_num_reg(); + else + n_reg = 1; + + reg_len = n_reg * REG_OUTPUT_LEN_PER_LINE; + if (n_reg > 1) + reg_len += REGTYPE_STR_LEN; + + buf = vmalloc(reg_len); + if (!buf) + return -ENOMEM; + + if (n_reg == 1) { + addr = ar->debug.dbgfs_diag_reg; + + status = ath6kl_diag_read32(ar, + TARG_VTOP(ar->target_type, addr), + (u32 *)®_val); + if (status) + goto fail_reg_read; + + len += scnprintf(buf + len, reg_len - len, + "0x%06x 0x%08x\n", addr, le32_to_cpu(reg_val)); + goto done; + } + + for (i = 0; i < ARRAY_SIZE(diag_reg); i++) { + len += scnprintf(buf + len, reg_len - len, + "%s\n", diag_reg[i].reg_info); + for (addr = diag_reg[i].reg_start; + addr <= diag_reg[i].reg_end; addr += 4) { + status = ath6kl_diag_read32(ar, + TARG_VTOP(ar->target_type, addr), + (u32 *)®_val); + if (status) + goto fail_reg_read; + + len += scnprintf(buf + len, reg_len - len, + "0x%06x 0x%08x\n", + addr, le32_to_cpu(reg_val)); + } + } + +done: + file->private_data = buf; + return 0; + +fail_reg_read: + ath6kl_warn("Unable to read memory:%u\n", addr); + vfree(buf); + return -EIO; +} + +static ssize_t ath6kl_regdump_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + u8 *buf = file->private_data; + return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); +} + +static int ath6kl_regdump_release(struct inode *inode, struct file *file) +{ + vfree(file->private_data); + return 0; +} + +static const struct file_operations fops_reg_dump = { + .open = ath6kl_regdump_open, + .read = ath6kl_regdump_read, + .release = ath6kl_regdump_release, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_lrssi_roam_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + unsigned long lrssi_roam_threshold; + char buf[32]; + ssize_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + if (strict_strtoul(buf, 0, &lrssi_roam_threshold)) + return -EINVAL; + + ar->lrssi_roam_threshold = lrssi_roam_threshold; + + ath6kl_wmi_set_roam_lrssi_cmd(ar->wmi, ar->lrssi_roam_threshold); + + return count; +} + +static ssize_t ath6kl_lrssi_roam_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + char buf[32]; + unsigned int len; + + len = snprintf(buf, sizeof(buf), "%u\n", ar->lrssi_roam_threshold); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_lrssi_roam_threshold = { + .read = ath6kl_lrssi_roam_read, + .write = ath6kl_lrssi_roam_write, + .open = ath6kl_debugfs_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath6kl_regwrite_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + u8 buf[32]; + unsigned int len = 0; + + len = scnprintf(buf, sizeof(buf), "Addr: 0x%x Val: 0x%x\n", + ar->debug.diag_reg_addr_wr, ar->debug.diag_reg_val_wr); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath6kl_regwrite_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath6kl *ar = file->private_data; + char buf[32]; + char *sptr, *token; + unsigned int len = 0; + u32 reg_addr, reg_val; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + sptr = buf; + + token = strsep(&sptr, "="); + if (!token) + return -EINVAL; + + if (kstrtou32(token, 0, ®_addr)) + return -EINVAL; + + if (!ath6kl_dbg_is_diag_reg_valid(reg_addr)) + return -EINVAL; + + if (kstrtou32(sptr, 0, ®_val)) + return -EINVAL; + + ar->debug.diag_reg_addr_wr = reg_addr; + ar->debug.diag_reg_val_wr = reg_val; + + if (ath6kl_diag_write32(ar, ar->debug.diag_reg_addr_wr, + cpu_to_le32(ar->debug.diag_reg_val_wr))) + return -EIO; + + return count; +} + +static const struct file_operations fops_diag_reg_write = { + .read = ath6kl_regwrite_read, + .write = ath6kl_regwrite_write, + .open = ath6kl_debugfs_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +int ath6kl_debug_init(struct ath6kl *ar) +{ + ar->debug.fwlog_buf.buf = vmalloc(ATH6KL_FWLOG_SIZE); + if (ar->debug.fwlog_buf.buf == NULL) + return -ENOMEM; + + ar->debug.fwlog_tmp = kmalloc(ATH6KL_FWLOG_SLOT_SIZE, GFP_KERNEL); + if (ar->debug.fwlog_tmp == NULL) { + vfree(ar->debug.fwlog_buf.buf); + return -ENOMEM; + } + + spin_lock_init(&ar->debug.fwlog_lock); + + /* + * Actually we are lying here but don't know how to read the mask + * value from the firmware. + */ + ar->debug.fwlog_mask = 0; + + ar->debugfs_phy = debugfs_create_dir("ath6kl", + ar->wdev->wiphy->debugfsdir); + if (!ar->debugfs_phy) { + vfree(ar->debug.fwlog_buf.buf); + kfree(ar->debug.fwlog_tmp); + return -ENOMEM; + } + + debugfs_create_file("tgt_stats", S_IRUSR, ar->debugfs_phy, ar, + &fops_tgt_stats); + + debugfs_create_file("credit_dist_stats", S_IRUSR, ar->debugfs_phy, ar, + &fops_credit_dist_stats); + + debugfs_create_file("fwlog", S_IRUSR, ar->debugfs_phy, ar, + &fops_fwlog); + + debugfs_create_file("fwlog_mask", S_IRUSR | S_IWUSR, ar->debugfs_phy, + ar, &fops_fwlog_mask); + + debugfs_create_file("reg_addr", S_IRUSR | S_IWUSR, ar->debugfs_phy, ar, + &fops_diag_reg_read); + + debugfs_create_file("reg_dump", S_IRUSR, ar->debugfs_phy, ar, + &fops_reg_dump); + + debugfs_create_file("lrssi_roam_threshold", S_IRUSR | S_IWUSR, + ar->debugfs_phy, ar, &fops_lrssi_roam_threshold); + + debugfs_create_file("reg_write", S_IRUSR | S_IWUSR, + ar->debugfs_phy, ar, &fops_diag_reg_write); + + debugfs_create_file("war_stats", S_IRUSR, ar->debugfs_phy, ar, + &fops_war_stats); + + return 0; +} + +void ath6kl_debug_cleanup(struct ath6kl *ar) +{ + vfree(ar->debug.fwlog_buf.buf); + kfree(ar->debug.fwlog_tmp); +} + +#endif diff --cc drivers/net/wireless/ath/ath6kl/sdio.c index f1dc311ee0c7,000000000000..066d4f88807f mode 100644,000000..100644 --- a/drivers/net/wireless/ath/ath6kl/sdio.c +++ b/drivers/net/wireless/ath/ath6kl/sdio.c @@@ -1,949 -1,0 +1,950 @@@ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + ++#include +#include +#include +#include +#include +#include +#include +#include +#include "htc_hif.h" +#include "hif-ops.h" +#include "target.h" +#include "debug.h" +#include "cfg80211.h" + +struct ath6kl_sdio { + struct sdio_func *func; + + spinlock_t lock; + + /* free list */ + struct list_head bus_req_freeq; + + /* available bus requests */ + struct bus_request bus_req[BUS_REQUEST_MAX_NUM]; + + struct ath6kl *ar; + u8 *dma_buffer; + + /* scatter request list head */ + struct list_head scat_req; + + spinlock_t scat_lock; + bool is_disabled; + atomic_t irq_handling; + const struct sdio_device_id *id; + struct work_struct wr_async_work; + struct list_head wr_asyncq; + spinlock_t wr_async_lock; +}; + +#define CMD53_ARG_READ 0 +#define CMD53_ARG_WRITE 1 +#define CMD53_ARG_BLOCK_BASIS 1 +#define CMD53_ARG_FIXED_ADDRESS 0 +#define CMD53_ARG_INCR_ADDRESS 1 + +static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar) +{ + return ar->hif_priv; +} + +/* + * Macro to check if DMA buffer is WORD-aligned and DMA-able. + * Most host controllers assume the buffer is DMA'able and will + * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid + * check fails on stack memory. + */ +static inline bool buf_needs_bounce(u8 *buf) +{ + return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf); +} + +static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar) +{ + struct ath6kl_mbox_info *mbox_info = &ar->mbox_info; + + /* EP1 has an extended range */ + mbox_info->htc_addr = HIF_MBOX_BASE_ADDR; + mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR; + mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH; + mbox_info->block_size = HIF_MBOX_BLOCK_SIZE; + mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR; + mbox_info->gmbox_sz = HIF_GMBOX_WIDTH; +} + +static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func, + u8 mode, u8 opcode, u32 addr, + u16 blksz) +{ + *arg = (((rw & 1) << 31) | + ((func & 0x7) << 28) | + ((mode & 1) << 27) | + ((opcode & 1) << 26) | + ((addr & 0x1FFFF) << 9) | + (blksz & 0x1FF)); +} + +static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw, + unsigned int address, + unsigned char val) +{ + const u8 func = 0; + + *arg = ((write & 1) << 31) | + ((func & 0x7) << 28) | + ((raw & 1) << 27) | + (1 << 26) | + ((address & 0x1FFFF) << 9) | + (1 << 8) | + (val & 0xFF); +} + +static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card, + unsigned int address, + unsigned char byte) +{ + struct mmc_command io_cmd; + + memset(&io_cmd, 0, sizeof(io_cmd)); + ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte); + io_cmd.opcode = SD_IO_RW_DIRECT; + io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC; + + return mmc_wait_for_cmd(card->host, &io_cmd, 0); +} + +static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr, + u8 *buf, u32 len) +{ + int ret = 0; + + if (request & HIF_WRITE) { + /* FIXME: looks like ugly workaround for something */ + if (addr >= HIF_MBOX_BASE_ADDR && + addr <= HIF_MBOX_END_ADDR) + addr += (HIF_MBOX_WIDTH - len); + + /* FIXME: this also looks like ugly workaround */ + if (addr == HIF_MBOX0_EXT_BASE_ADDR) + addr += HIF_MBOX0_EXT_WIDTH - len; + + if (request & HIF_FIXED_ADDRESS) + ret = sdio_writesb(func, addr, buf, len); + else + ret = sdio_memcpy_toio(func, addr, buf, len); + } else { + if (request & HIF_FIXED_ADDRESS) + ret = sdio_readsb(func, buf, addr, len); + else + ret = sdio_memcpy_fromio(func, buf, addr, len); + } + + ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n", + request & HIF_WRITE ? "wr" : "rd", addr, + request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len); + ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len); + + return ret; +} + +static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio) +{ + struct bus_request *bus_req; + unsigned long flag; + + spin_lock_irqsave(&ar_sdio->lock, flag); + + if (list_empty(&ar_sdio->bus_req_freeq)) { + spin_unlock_irqrestore(&ar_sdio->lock, flag); + return NULL; + } + + bus_req = list_first_entry(&ar_sdio->bus_req_freeq, + struct bus_request, list); + list_del(&bus_req->list); + + spin_unlock_irqrestore(&ar_sdio->lock, flag); + ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", + __func__, bus_req); + + return bus_req; +} + +static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio, + struct bus_request *bus_req) +{ + unsigned long flag; + + ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n", + __func__, bus_req); + + spin_lock_irqsave(&ar_sdio->lock, flag); + list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq); + spin_unlock_irqrestore(&ar_sdio->lock, flag); +} + +static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req, + struct mmc_data *data) +{ + struct scatterlist *sg; + int i; + + data->blksz = HIF_MBOX_BLOCK_SIZE; + data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE; + + ath6kl_dbg(ATH6KL_DBG_SCATTER, + "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n", + (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr, + data->blksz, data->blocks, scat_req->len, + scat_req->scat_entries); + + data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE : + MMC_DATA_READ; + + /* fill SG entries */ + sg = scat_req->sgentries; + sg_init_table(sg, scat_req->scat_entries); + + /* assemble SG list */ + for (i = 0; i < scat_req->scat_entries; i++, sg++) { + ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n", + i, scat_req->scat_list[i].buf, + scat_req->scat_list[i].len); + + sg_set_buf(sg, scat_req->scat_list[i].buf, + scat_req->scat_list[i].len); + } + + /* set scatter-gather table for request */ + data->sg = scat_req->sgentries; + data->sg_len = scat_req->scat_entries; +} + +static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio, + struct bus_request *req) +{ + struct mmc_request mmc_req; + struct mmc_command cmd; + struct mmc_data data; + struct hif_scatter_req *scat_req; + u8 opcode, rw; + int status, len; + + scat_req = req->scat_req; + + if (scat_req->virt_scat) { + len = scat_req->len; + if (scat_req->req & HIF_BLOCK_BASIS) + len = round_down(len, HIF_MBOX_BLOCK_SIZE); + + status = ath6kl_sdio_io(ar_sdio->func, scat_req->req, + scat_req->addr, scat_req->virt_dma_buf, + len); + goto scat_complete; + } + + memset(&mmc_req, 0, sizeof(struct mmc_request)); + memset(&cmd, 0, sizeof(struct mmc_command)); + memset(&data, 0, sizeof(struct mmc_data)); + + ath6kl_sdio_setup_scat_data(scat_req, &data); + + opcode = (scat_req->req & HIF_FIXED_ADDRESS) ? + CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS; + + rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ; + + /* Fixup the address so that the last byte will fall on MBOX EOM */ + if (scat_req->req & HIF_WRITE) { + if (scat_req->addr == HIF_MBOX_BASE_ADDR) + scat_req->addr += HIF_MBOX_WIDTH - scat_req->len; + else + /* Uses extended address range */ + scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len; + } + + /* set command argument */ + ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num, + CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr, + data.blocks); + + cmd.opcode = SD_IO_RW_EXTENDED; + cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; + + mmc_req.cmd = &cmd; + mmc_req.data = &data; + + mmc_set_data_timeout(&data, ar_sdio->func->card); + /* synchronous call to process request */ + mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req); + + status = cmd.error ? cmd.error : data.error; + +scat_complete: + scat_req->status = status; + + if (scat_req->status) + ath6kl_err("Scatter write request failed:%d\n", + scat_req->status); + + if (scat_req->req & HIF_ASYNCHRONOUS) + scat_req->complete(ar_sdio->ar->htc_target, scat_req); + + return status; +} + +static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio, + int n_scat_entry, int n_scat_req, + bool virt_scat) +{ + struct hif_scatter_req *s_req; + struct bus_request *bus_req; + int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz; + u8 *virt_buf; + + scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item); + scat_req_sz = sizeof(*s_req) + scat_list_sz; + + if (!virt_scat) + sg_sz = sizeof(struct scatterlist) * n_scat_entry; + else + buf_sz = 2 * L1_CACHE_BYTES + + ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; + + for (i = 0; i < n_scat_req; i++) { + /* allocate the scatter request */ + s_req = kzalloc(scat_req_sz, GFP_KERNEL); + if (!s_req) + return -ENOMEM; + + if (virt_scat) { + virt_buf = kzalloc(buf_sz, GFP_KERNEL); + if (!virt_buf) { + kfree(s_req); + return -ENOMEM; + } + + s_req->virt_dma_buf = + (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf); + } else { + /* allocate sglist */ + s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL); + + if (!s_req->sgentries) { + kfree(s_req); + return -ENOMEM; + } + } + + /* allocate a bus request for this scatter request */ + bus_req = ath6kl_sdio_alloc_busreq(ar_sdio); + if (!bus_req) { + kfree(s_req->sgentries); + kfree(s_req->virt_dma_buf); + kfree(s_req); + return -ENOMEM; + } + + /* assign the scatter request to this bus request */ + bus_req->scat_req = s_req; + s_req->busrequest = bus_req; + + s_req->virt_scat = virt_scat; + + /* add it to the scatter pool */ + hif_scatter_req_add(ar_sdio->ar, s_req); + } + + return 0; +} + +static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf, + u32 len, u32 request) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + u8 *tbuf = NULL; + int ret; + bool bounced = false; + + if (request & HIF_BLOCK_BASIS) + len = round_down(len, HIF_MBOX_BLOCK_SIZE); + + if (buf_needs_bounce(buf)) { + if (!ar_sdio->dma_buffer) + return -ENOMEM; + tbuf = ar_sdio->dma_buffer; + memcpy(tbuf, buf, len); + bounced = true; + } else + tbuf = buf; + + sdio_claim_host(ar_sdio->func); + ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len); + if ((request & HIF_READ) && bounced) + memcpy(buf, tbuf, len); + sdio_release_host(ar_sdio->func); + + return ret; +} + +static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio, + struct bus_request *req) +{ + if (req->scat_req) + ath6kl_sdio_scat_rw(ar_sdio, req); + else { + void *context; + int status; + + status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address, + req->buffer, req->length, + req->request); + context = req->packet; + ath6kl_sdio_free_bus_req(ar_sdio, req); + ath6kldev_rw_comp_handler(context, status); + } +} + +static void ath6kl_sdio_write_async_work(struct work_struct *work) +{ + struct ath6kl_sdio *ar_sdio; + unsigned long flags; + struct bus_request *req, *tmp_req; + + ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work); + sdio_claim_host(ar_sdio->func); + + spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); + list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { + list_del(&req->list); + spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); + __ath6kl_sdio_write_async(ar_sdio, req); + spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); + } + spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); + + sdio_release_host(ar_sdio->func); +} + +static void ath6kl_sdio_irq_handler(struct sdio_func *func) +{ + int status; + struct ath6kl_sdio *ar_sdio; + + ath6kl_dbg(ATH6KL_DBG_SDIO, "irq\n"); + + ar_sdio = sdio_get_drvdata(func); + atomic_set(&ar_sdio->irq_handling, 1); + + /* + * Release the host during interrups so we can pick it back up when + * we process commands. + */ + sdio_release_host(ar_sdio->func); + + status = ath6kldev_intr_bh_handler(ar_sdio->ar); + sdio_claim_host(ar_sdio->func); + atomic_set(&ar_sdio->irq_handling, 0); + WARN_ON(status && status != -ECANCELED); +} + +static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio) +{ + struct sdio_func *func = ar_sdio->func; + int ret = 0; + + if (!ar_sdio->is_disabled) + return 0; + + sdio_claim_host(func); + + ret = sdio_enable_func(func); + if (ret) { + ath6kl_err("Unable to enable sdio func: %d)\n", ret); + sdio_release_host(func); + return ret; + } + + sdio_release_host(func); + + /* + * Wait for hardware to initialise. It should take a lot less than + * 10 ms but let's be conservative here. + */ + msleep(10); + + ar_sdio->is_disabled = false; + + return ret; +} + +static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio) +{ + int ret; + + if (ar_sdio->is_disabled) + return 0; + + /* Disable the card */ + sdio_claim_host(ar_sdio->func); + ret = sdio_disable_func(ar_sdio->func); + sdio_release_host(ar_sdio->func); + + if (ret) + return ret; + + ar_sdio->is_disabled = true; + + return ret; +} + +static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer, + u32 length, u32 request, + struct htc_packet *packet) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + struct bus_request *bus_req; + unsigned long flags; + + bus_req = ath6kl_sdio_alloc_busreq(ar_sdio); + + if (!bus_req) + return -ENOMEM; + + bus_req->address = address; + bus_req->buffer = buffer; + bus_req->length = length; + bus_req->request = request; + bus_req->packet = packet; + + spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); + list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq); + spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); + queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); + + return 0; +} + +static void ath6kl_sdio_irq_enable(struct ath6kl *ar) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + int ret; + + sdio_claim_host(ar_sdio->func); + + /* Register the isr */ + ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler); + if (ret) + ath6kl_err("Failed to claim sdio irq: %d\n", ret); + + sdio_release_host(ar_sdio->func); +} + +static void ath6kl_sdio_irq_disable(struct ath6kl *ar) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + int ret; + + sdio_claim_host(ar_sdio->func); + + /* Mask our function IRQ */ + while (atomic_read(&ar_sdio->irq_handling)) { + sdio_release_host(ar_sdio->func); + schedule_timeout(HZ / 10); + sdio_claim_host(ar_sdio->func); + } + + ret = sdio_release_irq(ar_sdio->func); + if (ret) + ath6kl_err("Failed to release sdio irq: %d\n", ret); + + sdio_release_host(ar_sdio->func); +} + +static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + struct hif_scatter_req *node = NULL; + unsigned long flag; + + spin_lock_irqsave(&ar_sdio->scat_lock, flag); + + if (!list_empty(&ar_sdio->scat_req)) { + node = list_first_entry(&ar_sdio->scat_req, + struct hif_scatter_req, list); + list_del(&node->list); + } + + spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); + + return node; +} + +static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar, + struct hif_scatter_req *s_req) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + unsigned long flag; + + spin_lock_irqsave(&ar_sdio->scat_lock, flag); + + list_add_tail(&s_req->list, &ar_sdio->scat_req); + + spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); + +} + +/* scatter gather read write request */ +static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar, + struct hif_scatter_req *scat_req) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + u32 request = scat_req->req; + int status = 0; + unsigned long flags; + + if (!scat_req->len) + return -EINVAL; + + ath6kl_dbg(ATH6KL_DBG_SCATTER, + "hif-scatter: total len: %d scatter entries: %d\n", + scat_req->len, scat_req->scat_entries); + + if (request & HIF_SYNCHRONOUS) { + sdio_claim_host(ar_sdio->func); + status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest); + sdio_release_host(ar_sdio->func); + } else { + spin_lock_irqsave(&ar_sdio->wr_async_lock, flags); + list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq); + spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags); + queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work); + } + + return status; +} + +/* clean up scatter support */ +static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + struct hif_scatter_req *s_req, *tmp_req; + unsigned long flag; + + /* empty the free list */ + spin_lock_irqsave(&ar_sdio->scat_lock, flag); + list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) { + list_del(&s_req->list); + spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); + + if (s_req->busrequest) + ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest); + kfree(s_req->virt_dma_buf); + kfree(s_req->sgentries); + kfree(s_req); + + spin_lock_irqsave(&ar_sdio->scat_lock, flag); + } + spin_unlock_irqrestore(&ar_sdio->scat_lock, flag); +} + +/* setup of HIF scatter resources */ +static int ath6kl_sdio_enable_scatter(struct ath6kl *ar) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + struct htc_target *target = ar->htc_target; + int ret; + bool virt_scat = false; + + /* check if host supports scatter and it meets our requirements */ + if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) { + ath6kl_err("host only supports scatter of :%d entries, need: %d\n", + ar_sdio->func->card->host->max_segs, + MAX_SCATTER_ENTRIES_PER_REQ); + virt_scat = true; + } + + if (!virt_scat) { + ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio, + MAX_SCATTER_ENTRIES_PER_REQ, + MAX_SCATTER_REQUESTS, virt_scat); + + if (!ret) { + ath6kl_dbg(ATH6KL_DBG_SCATTER, + "hif-scatter enabled: max scatter req : %d entries: %d\n", + MAX_SCATTER_REQUESTS, + MAX_SCATTER_ENTRIES_PER_REQ); + + target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ; + target->max_xfer_szper_scatreq = + MAX_SCATTER_REQ_TRANSFER_SIZE; + } else { + ath6kl_sdio_cleanup_scatter(ar); + ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n"); + } + } + + if (virt_scat || ret) { + ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio, + ATH6KL_SCATTER_ENTRIES_PER_REQ, + ATH6KL_SCATTER_REQS, virt_scat); + + if (ret) { + ath6kl_err("failed to alloc virtual scatter resources !\n"); + ath6kl_sdio_cleanup_scatter(ar); + return ret; + } + + ath6kl_dbg(ATH6KL_DBG_SCATTER, + "Vitual scatter enabled, max_scat_req:%d, entries:%d\n", + ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ); + + target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ; + target->max_xfer_szper_scatreq = + ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; + } + + return 0; +} + +static int ath6kl_sdio_suspend(struct ath6kl *ar) +{ + struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); + struct sdio_func *func = ar_sdio->func; + mmc_pm_flag_t flags; + int ret; + + flags = sdio_get_host_pm_caps(func); + + if (!(flags & MMC_PM_KEEP_POWER)) + /* as host doesn't support keep power we need to bail out */ + ath6kl_dbg(ATH6KL_DBG_SDIO, + "func %d doesn't support MMC_PM_KEEP_POWER\n", + func->num); + return -EINVAL; + + ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + if (ret) { + printk(KERN_ERR "ath6kl: set sdio pm flags failed: %d\n", + ret); + return ret; + } + + ath6kl_deep_sleep_enable(ar); + + return 0; +} + +static const struct ath6kl_hif_ops ath6kl_sdio_ops = { + .read_write_sync = ath6kl_sdio_read_write_sync, + .write_async = ath6kl_sdio_write_async, + .irq_enable = ath6kl_sdio_irq_enable, + .irq_disable = ath6kl_sdio_irq_disable, + .scatter_req_get = ath6kl_sdio_scatter_req_get, + .scatter_req_add = ath6kl_sdio_scatter_req_add, + .enable_scatter = ath6kl_sdio_enable_scatter, + .scat_req_rw = ath6kl_sdio_async_rw_scatter, + .cleanup_scatter = ath6kl_sdio_cleanup_scatter, + .suspend = ath6kl_sdio_suspend, +}; + +static int ath6kl_sdio_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + int ret; + struct ath6kl_sdio *ar_sdio; + struct ath6kl *ar; + int count; + + ath6kl_dbg(ATH6KL_DBG_SDIO, + "new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", + func->num, func->vendor, func->device, + func->max_blksize, func->cur_blksize); + + ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL); + if (!ar_sdio) + return -ENOMEM; + + ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL); + if (!ar_sdio->dma_buffer) { + ret = -ENOMEM; + goto err_hif; + } + + ar_sdio->func = func; + sdio_set_drvdata(func, ar_sdio); + + ar_sdio->id = id; + ar_sdio->is_disabled = true; + + spin_lock_init(&ar_sdio->lock); + spin_lock_init(&ar_sdio->scat_lock); + spin_lock_init(&ar_sdio->wr_async_lock); + + INIT_LIST_HEAD(&ar_sdio->scat_req); + INIT_LIST_HEAD(&ar_sdio->bus_req_freeq); + INIT_LIST_HEAD(&ar_sdio->wr_asyncq); + + INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work); + + for (count = 0; count < BUS_REQUEST_MAX_NUM; count++) + ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]); + + ar = ath6kl_core_alloc(&ar_sdio->func->dev); + if (!ar) { + ath6kl_err("Failed to alloc ath6kl core\n"); + ret = -ENOMEM; + goto err_dma; + } + + ar_sdio->ar = ar; + ar->hif_priv = ar_sdio; + ar->hif_ops = &ath6kl_sdio_ops; + + ath6kl_sdio_set_mbox_info(ar); + + sdio_claim_host(func); + + if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >= + MANUFACTURER_ID_AR6003_BASE) { + /* enable 4-bit ASYNC interrupt on AR6003 or later */ + ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card, + CCCR_SDIO_IRQ_MODE_REG, + SDIO_IRQ_MODE_ASYNC_4BIT_IRQ); + if (ret) { + ath6kl_err("Failed to enable 4-bit async irq mode %d\n", + ret); + sdio_release_host(func); + goto err_cfg80211; + } + + ath6kl_dbg(ATH6KL_DBG_SDIO, "4-bit async irq mode enabled\n"); + } + + /* give us some time to enable, in ms */ + func->enable_timeout = 100; + + sdio_release_host(func); + + ret = ath6kl_sdio_power_on(ar_sdio); + if (ret) + goto err_cfg80211; + + sdio_claim_host(func); + + ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE); + if (ret) { + ath6kl_err("Set sdio block size %d failed: %d)\n", + HIF_MBOX_BLOCK_SIZE, ret); + sdio_release_host(func); + goto err_off; + } + + sdio_release_host(func); + + ret = ath6kl_core_init(ar); + if (ret) { + ath6kl_err("Failed to init ath6kl core\n"); + goto err_off; + } + + return ret; + +err_off: + ath6kl_sdio_power_off(ar_sdio); +err_cfg80211: + ath6kl_cfg80211_deinit(ar_sdio->ar); +err_dma: + kfree(ar_sdio->dma_buffer); +err_hif: + kfree(ar_sdio); + + return ret; +} + +static void ath6kl_sdio_remove(struct sdio_func *func) +{ + struct ath6kl_sdio *ar_sdio; + + ath6kl_dbg(ATH6KL_DBG_SDIO, + "removed func %d vendor 0x%x device 0x%x\n", + func->num, func->vendor, func->device); + + ar_sdio = sdio_get_drvdata(func); + + ath6kl_stop_txrx(ar_sdio->ar); + cancel_work_sync(&ar_sdio->wr_async_work); + + ath6kl_unavail_ev(ar_sdio->ar); + + ath6kl_sdio_power_off(ar_sdio); + + kfree(ar_sdio->dma_buffer); + kfree(ar_sdio); +} + +static const struct sdio_device_id ath6kl_sdio_devices[] = { + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))}, + {}, +}; + +MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices); + +static struct sdio_driver ath6kl_sdio_driver = { + .name = "ath6kl_sdio", + .id_table = ath6kl_sdio_devices, + .probe = ath6kl_sdio_probe, + .remove = ath6kl_sdio_remove, +}; + +static int __init ath6kl_sdio_init(void) +{ + int ret; + + ret = sdio_register_driver(&ath6kl_sdio_driver); + if (ret) + ath6kl_err("sdio driver registration failed: %d\n", ret); + + return ret; +} + +static void __exit ath6kl_sdio_exit(void) +{ + sdio_unregister_driver(&ath6kl_sdio_driver); +} + +module_init(ath6kl_sdio_init); +module_exit(ath6kl_sdio_exit); + +MODULE_AUTHOR("Atheros Communications, Inc."); +MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices"); +MODULE_LICENSE("Dual BSD/GPL"); + +MODULE_FIRMWARE(AR6003_REV2_OTP_FILE); +MODULE_FIRMWARE(AR6003_REV2_FIRMWARE_FILE); +MODULE_FIRMWARE(AR6003_REV2_PATCH_FILE); +MODULE_FIRMWARE(AR6003_REV2_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6003_REV2_DEFAULT_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6003_REV3_OTP_FILE); +MODULE_FIRMWARE(AR6003_REV3_FIRMWARE_FILE); +MODULE_FIRMWARE(AR6003_REV3_PATCH_FILE); +MODULE_FIRMWARE(AR6003_REV3_BOARD_DATA_FILE); +MODULE_FIRMWARE(AR6003_REV3_DEFAULT_BOARD_DATA_FILE); diff --cc drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c index bff9dcd6fadc,000000000000..76c78fd07611 mode 100644,000000..100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c @@@ -1,371 -1,0 +1,372 @@@ +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/* ****************** SDIO CARD Interface Functions **************************/ + +#include +#include +#include +#include +#include +#include ++#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include "dhd.h" +#include "dhd_bus.h" +#include "dhd_dbg.h" +#include "sdio_host.h" + +#define SDIOH_API_ACCESS_RETRY_LIMIT 2 + +static void brcmf_sdioh_irqhandler(struct sdio_func *func) +{ + struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev); + + brcmf_dbg(TRACE, "***IRQHandler\n"); + + sdio_release_host(func); + + brcmf_sdbrcm_isr(sdiodev->bus); + + sdio_claim_host(func); +} + +int brcmf_sdcard_intr_reg(struct brcmf_sdio_dev *sdiodev) +{ + brcmf_dbg(TRACE, "Entering\n"); + + sdio_claim_host(sdiodev->func[1]); + sdio_claim_irq(sdiodev->func[1], brcmf_sdioh_irqhandler); + sdio_release_host(sdiodev->func[1]); + + return 0; +} + +int brcmf_sdcard_intr_dereg(struct brcmf_sdio_dev *sdiodev) +{ + brcmf_dbg(TRACE, "Entering\n"); + + sdio_claim_host(sdiodev->func[1]); + sdio_release_irq(sdiodev->func[1]); + sdio_release_host(sdiodev->func[1]); + + return 0; +} + +u8 brcmf_sdcard_cfg_read(struct brcmf_sdio_dev *sdiodev, uint fnc_num, u32 addr, + int *err) +{ + int status; + s32 retry = 0; + u8 data = 0; + + do { + if (retry) /* wait for 1 ms till bus get settled down */ + udelay(1000); + status = brcmf_sdioh_request_byte(sdiodev, SDIOH_READ, fnc_num, + addr, (u8 *) &data); + } while (status != 0 + && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT)); + if (err) + *err = status; + + brcmf_dbg(INFO, "fun = %d, addr = 0x%x, u8data = 0x%x\n", + fnc_num, addr, data); + + return data; +} + +void +brcmf_sdcard_cfg_write(struct brcmf_sdio_dev *sdiodev, uint fnc_num, u32 addr, + u8 data, int *err) +{ + int status; + s32 retry = 0; + + do { + if (retry) /* wait for 1 ms till bus get settled down */ + udelay(1000); + status = brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, fnc_num, + addr, (u8 *) &data); + } while (status != 0 + && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT)); + if (err) + *err = status; + + brcmf_dbg(INFO, "fun = %d, addr = 0x%x, u8data = 0x%x\n", + fnc_num, addr, data); +} + +int +brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address) +{ + int err = 0; + brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW, + (address >> 8) & SBSDIO_SBADDRLOW_MASK, &err); + if (!err) + brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1, + SBSDIO_FUNC1_SBADDRMID, + (address >> 16) & SBSDIO_SBADDRMID_MASK, + &err); + if (!err) + brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1, + SBSDIO_FUNC1_SBADDRHIGH, + (address >> 24) & SBSDIO_SBADDRHIGH_MASK, + &err); + + return err; +} + +u32 brcmf_sdcard_reg_read(struct brcmf_sdio_dev *sdiodev, u32 addr, uint size) +{ + int status; + u32 word = 0; + uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK; + + brcmf_dbg(INFO, "fun = 1, addr = 0x%x\n", addr); + + if (bar0 != sdiodev->sbwad) { + if (brcmf_sdcard_set_sbaddr_window(sdiodev, bar0)) + return 0xFFFFFFFF; + + sdiodev->sbwad = bar0; + } + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + if (size == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = brcmf_sdioh_request_word(sdiodev, SDIOH_READ, SDIO_FUNC_1, + addr, &word, size); + + sdiodev->regfail = (status != 0); + + brcmf_dbg(INFO, "u32data = 0x%x\n", word); + + /* if ok, return appropriately masked word */ + if (status == 0) { + switch (size) { + case sizeof(u8): + return word & 0xff; + case sizeof(u16): + return word & 0xffff; + case sizeof(u32): + return word; + default: + sdiodev->regfail = true; + + } + } + + /* otherwise, bad sdio access or invalid size */ + brcmf_dbg(ERROR, "error reading addr 0x%04x size %d\n", addr, size); + return 0xFFFFFFFF; +} + +u32 brcmf_sdcard_reg_write(struct brcmf_sdio_dev *sdiodev, u32 addr, uint size, + u32 data) +{ + int status; + uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK; + int err = 0; + + brcmf_dbg(INFO, "fun = 1, addr = 0x%x, uint%ddata = 0x%x\n", + addr, size * 8, data); + + if (bar0 != sdiodev->sbwad) { + err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0); + if (err) + return err; + + sdiodev->sbwad = bar0; + } + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + if (size == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + status = + brcmf_sdioh_request_word(sdiodev, SDIOH_WRITE, SDIO_FUNC_1, + addr, &data, size); + sdiodev->regfail = (status != 0); + + if (status == 0) + return 0; + + brcmf_dbg(ERROR, "error writing 0x%08x to addr 0x%04x size %d\n", + data, addr, size); + return 0xFFFFFFFF; +} + +bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev) +{ + return sdiodev->regfail; +} + +int +brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, + uint flags, + u8 *buf, uint nbytes, struct sk_buff *pkt) +{ + int status; + uint incr_fix; + uint width; + uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK; + int err = 0; + + brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes); + + /* Async not implemented yet */ + if (flags & SDIO_REQ_ASYNC) + return -ENOTSUPP; + + if (bar0 != sdiodev->sbwad) { + err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0); + if (err) + return err; + + sdiodev->sbwad = bar0; + } + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + + incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; + width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; + if (width == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + status = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ, + fn, addr, width, nbytes, buf, pkt); + + return status; +} + +int +brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn, + uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt) +{ + uint incr_fix; + uint width; + uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK; + int err = 0; + + brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes); + + /* Async not implemented yet */ + if (flags & SDIO_REQ_ASYNC) + return -ENOTSUPP; + + if (bar0 != sdiodev->sbwad) { + err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0); + if (err) + return err; + + sdiodev->sbwad = bar0; + } + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + + incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC; + width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; + if (width == 4) + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + return brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn, + addr, width, nbytes, buf, pkt); +} + +int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr, + u8 *buf, uint nbytes) +{ + addr &= SBSDIO_SB_OFT_ADDR_MASK; + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + return brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC, + (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1, + addr, 4, nbytes, buf, NULL); +} + +int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn) +{ + char t_func = (char)fn; + brcmf_dbg(TRACE, "Enter\n"); + + /* issue abort cmd52 command through F0 */ + brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0, + SDIO_CCCR_ABORT, &t_func); + + brcmf_dbg(TRACE, "Exit\n"); + return 0; +} + +int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) +{ + u32 regs = 0; + int ret = 0; + + ret = brcmf_sdioh_attach(sdiodev); + if (ret) + goto out; + + regs = SI_ENUM_BASE; + + /* Report the BAR, to fix if needed */ + sdiodev->sbwad = SI_ENUM_BASE; + + /* try to attach to the target device */ + sdiodev->bus = brcmf_sdbrcm_probe(0, 0, 0, 0, regs, sdiodev); + if (!sdiodev->bus) { + brcmf_dbg(ERROR, "device attach failed\n"); + ret = -ENODEV; + goto out; + } + +out: + if (ret) + brcmf_sdio_remove(sdiodev); + + return ret; +} +EXPORT_SYMBOL(brcmf_sdio_probe); + +int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev) +{ + if (sdiodev->bus) { + brcmf_sdbrcm_disconnect(sdiodev->bus); + sdiodev->bus = NULL; + } + + brcmf_sdioh_detach(sdiodev); + + sdiodev->sbwad = 0; + + return 0; +} +EXPORT_SYMBOL(brcmf_sdio_remove); + +void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable) +{ + if (enable) + brcmf_sdbrcm_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS); + else + brcmf_sdbrcm_wd_timer(sdiodev->bus, 0); +} diff --cc drivers/net/wireless/iwlwifi/iwl-pci.c index 1d7bb7423f94,95172aad82c5..5d37172acc88 --- a/drivers/net/wireless/iwlwifi/iwl-pci.c +++ b/drivers/net/wireless/iwlwifi/iwl-pci.c @@@ -62,13 -62,12 +62,14 @@@ *****************************************************************************/ #include #include + #include #include "iwl-bus.h" -#include "iwl-agn.h" -#include "iwl-core.h" #include "iwl-io.h" +#include "iwl-shared.h" +#include "iwl-trans.h" +#include "iwl-csr.h" +#include "iwl-cfg.h" /* PCI registers */ #define PCI_CFG_RETRY_TIMEOUT 0x041 diff --cc drivers/net/wireless/rtlwifi/debug.c index 1b5cb7153a52,5fa73852cb66..579a05eb2e4b --- a/drivers/net/wireless/rtlwifi/debug.c +++ b/drivers/net/wireless/rtlwifi/debug.c @@@ -26,6 -26,6 +26,8 @@@ * Larry Finger *****************************************************************************/ ++#include ++ #include "wifi.h" void rtl_dbgp_flag_init(struct ieee80211_hw *hw) diff --cc drivers/nfc/nfcwilink.c index 5b0f1ff80361,000000000000..06c3642e5bdb mode 100644,000000..100644 --- a/drivers/nfc/nfcwilink.c +++ b/drivers/nfc/nfcwilink.c @@@ -1,342 -1,0 +1,343 @@@ +/* + * Texas Instrument's NFC Driver For Shared Transport. + * + * NFC Driver acts as interface between NCI core and + * TI Shared Transport Layer. + * + * Copyright (C) 2011 Texas Instruments, Inc. + * + * Written by Ilan Elias + * + * Acknowledgements: + * This file is based on btwilink.c, which was written + * by Raja Mani and Pavan Savoy. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ +#include ++#include +#include +#include +#include +#include + +#define NFCWILINK_CHNL 12 +#define NFCWILINK_OPCODE 7 +#define NFCWILINK_MAX_FRAME_SIZE 300 +#define NFCWILINK_HDR_LEN 4 +#define NFCWILINK_OFFSET_LEN_IN_HDR 1 +#define NFCWILINK_LEN_SIZE 2 +#define NFCWILINK_REGISTER_TIMEOUT 8000 /* 8 sec */ + +struct nfcwilink_hdr { + u8 chnl; + u8 opcode; + u16 len; +} __packed; + +struct nfcwilink { + struct platform_device *pdev; + struct nci_dev *ndev; + unsigned long flags; + + char st_register_cb_status; + long (*st_write) (struct sk_buff *); + struct completion st_register_completed; +}; + +/* NFCWILINK driver flags */ +enum { + NFCWILINK_RUNNING, +}; + +/* Called by ST when registration is complete */ +static void nfcwilink_register_complete(void *priv_data, char data) +{ + struct nfcwilink *drv = priv_data; + + nfc_dev_dbg(&drv->pdev->dev, "register_complete entry"); + + /* store ST registration status */ + drv->st_register_cb_status = data; + + /* complete the wait in nfc_st_open() */ + complete(&drv->st_register_completed); +} + +/* Called by ST when receive data is available */ +static long nfcwilink_receive(void *priv_data, struct sk_buff *skb) +{ + struct nfcwilink *drv = priv_data; + int rc; + + nfc_dev_dbg(&drv->pdev->dev, "receive entry, len %d", skb->len); + + if (!skb) + return -EFAULT; + + if (!drv) { + kfree_skb(skb); + return -EFAULT; + } + + /* strip the ST header + (apart for the chnl byte, which is not received in the hdr) */ + skb_pull(skb, (NFCWILINK_HDR_LEN-1)); + + skb->dev = (void *) drv->ndev; + + /* Forward skb to NCI core layer */ + rc = nci_recv_frame(skb); + if (rc < 0) { + nfc_dev_err(&drv->pdev->dev, "nci_recv_frame failed %d", rc); + return rc; + } + + return 0; +} + +/* protocol structure registered with ST */ +static struct st_proto_s nfcwilink_proto = { + .chnl_id = NFCWILINK_CHNL, + .max_frame_size = NFCWILINK_MAX_FRAME_SIZE, + .hdr_len = (NFCWILINK_HDR_LEN-1), /* not including chnl byte */ + .offset_len_in_hdr = NFCWILINK_OFFSET_LEN_IN_HDR, + .len_size = NFCWILINK_LEN_SIZE, + .reserve = 0, + .recv = nfcwilink_receive, + .reg_complete_cb = nfcwilink_register_complete, + .write = NULL, +}; + +static int nfcwilink_open(struct nci_dev *ndev) +{ + struct nfcwilink *drv = nci_get_drvdata(ndev); + unsigned long comp_ret; + int rc; + + nfc_dev_dbg(&drv->pdev->dev, "open entry"); + + if (test_and_set_bit(NFCWILINK_RUNNING, &drv->flags)) { + rc = -EBUSY; + goto exit; + } + + nfcwilink_proto.priv_data = drv; + + init_completion(&drv->st_register_completed); + drv->st_register_cb_status = -EINPROGRESS; + + rc = st_register(&nfcwilink_proto); + if (rc < 0) { + if (rc == -EINPROGRESS) { + comp_ret = wait_for_completion_timeout( + &drv->st_register_completed, + msecs_to_jiffies(NFCWILINK_REGISTER_TIMEOUT)); + + nfc_dev_dbg(&drv->pdev->dev, + "wait_for_completion_timeout returned %ld", + comp_ret); + + if (comp_ret == 0) { + /* timeout */ + rc = -ETIMEDOUT; + goto clear_exit; + } else if (drv->st_register_cb_status != 0) { + rc = drv->st_register_cb_status; + nfc_dev_err(&drv->pdev->dev, + "st_register_cb failed %d", rc); + goto clear_exit; + } + } else { + nfc_dev_err(&drv->pdev->dev, + "st_register failed %d", rc); + goto clear_exit; + } + } + + /* st_register MUST fill the write callback */ + BUG_ON(nfcwilink_proto.write == NULL); + drv->st_write = nfcwilink_proto.write; + + goto exit; + +clear_exit: + clear_bit(NFCWILINK_RUNNING, &drv->flags); + +exit: + return rc; +} + +static int nfcwilink_close(struct nci_dev *ndev) +{ + struct nfcwilink *drv = nci_get_drvdata(ndev); + int rc; + + nfc_dev_dbg(&drv->pdev->dev, "close entry"); + + if (!test_and_clear_bit(NFCWILINK_RUNNING, &drv->flags)) + return 0; + + rc = st_unregister(&nfcwilink_proto); + if (rc) + nfc_dev_err(&drv->pdev->dev, "st_unregister failed %d", rc); + + drv->st_write = NULL; + + return rc; +} + +static int nfcwilink_send(struct sk_buff *skb) +{ + struct nci_dev *ndev = (struct nci_dev *)skb->dev; + struct nfcwilink *drv = nci_get_drvdata(ndev); + struct nfcwilink_hdr hdr = {NFCWILINK_CHNL, NFCWILINK_OPCODE, 0x0000}; + long len; + + nfc_dev_dbg(&drv->pdev->dev, "send entry, len %d", skb->len); + + if (!test_bit(NFCWILINK_RUNNING, &drv->flags)) + return -EBUSY; + + /* add the ST hdr to the start of the buffer */ + hdr.len = skb->len; + memcpy(skb_push(skb, NFCWILINK_HDR_LEN), &hdr, NFCWILINK_HDR_LEN); + + /* Insert skb to shared transport layer's transmit queue. + * Freeing skb memory is taken care in shared transport layer, + * so don't free skb memory here. + */ + len = drv->st_write(skb); + if (len < 0) { + kfree_skb(skb); + nfc_dev_err(&drv->pdev->dev, "st_write failed %ld", len); + return -EFAULT; + } + + return 0; +} + +static struct nci_ops nfcwilink_ops = { + .open = nfcwilink_open, + .close = nfcwilink_close, + .send = nfcwilink_send, +}; + +static int nfcwilink_probe(struct platform_device *pdev) +{ + static struct nfcwilink *drv; + int rc; + u32 protocols; + + nfc_dev_dbg(&pdev->dev, "probe entry"); + + drv = kzalloc(sizeof(struct nfcwilink), GFP_KERNEL); + if (!drv) { + rc = -ENOMEM; + goto exit; + } + + drv->pdev = pdev; + + protocols = NFC_PROTO_JEWEL_MASK + | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK + | NFC_PROTO_ISO14443_MASK + | NFC_PROTO_NFC_DEP_MASK; + + drv->ndev = nci_allocate_device(&nfcwilink_ops, + protocols, + NFCWILINK_HDR_LEN, + 0); + if (!drv->ndev) { + nfc_dev_err(&pdev->dev, "nci_allocate_device failed"); + rc = -ENOMEM; + goto free_exit; + } + + nci_set_parent_dev(drv->ndev, &pdev->dev); + nci_set_drvdata(drv->ndev, drv); + + rc = nci_register_device(drv->ndev); + if (rc < 0) { + nfc_dev_err(&pdev->dev, "nci_register_device failed %d", rc); + goto free_dev_exit; + } + + dev_set_drvdata(&pdev->dev, drv); + + goto exit; + +free_dev_exit: + nci_free_device(drv->ndev); + +free_exit: + kfree(drv); + +exit: + return rc; +} + +static int nfcwilink_remove(struct platform_device *pdev) +{ + struct nfcwilink *drv = dev_get_drvdata(&pdev->dev); + struct nci_dev *ndev; + + nfc_dev_dbg(&pdev->dev, "remove entry"); + + if (!drv) + return -EFAULT; + + ndev = drv->ndev; + + nci_unregister_device(ndev); + nci_free_device(ndev); + + kfree(drv); + + dev_set_drvdata(&pdev->dev, NULL); + + return 0; +} + +static struct platform_driver nfcwilink_driver = { + .probe = nfcwilink_probe, + .remove = nfcwilink_remove, + .driver = { + .name = "nfcwilink", + .owner = THIS_MODULE, + }, +}; + +/* ------- Module Init/Exit interfaces ------ */ +static int __init nfcwilink_init(void) +{ + printk(KERN_INFO "NFC Driver for TI WiLink"); + + return platform_driver_register(&nfcwilink_driver); +} + +static void __exit nfcwilink_exit(void) +{ + platform_driver_unregister(&nfcwilink_driver); +} + +module_init(nfcwilink_init); +module_exit(nfcwilink_exit); + +/* ------ Module Info ------ */ + +MODULE_AUTHOR("Ilan Elias "); +MODULE_DESCRIPTION("NFC Driver for TI Shared Transport"); +MODULE_LICENSE("GPL"); diff --cc drivers/pinctrl/core.c index d269327bd04b,000000000000..623afbd98a2e mode 100644,000000..100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@@ -1,601 -1,0 +1,602 @@@ +/* + * Core driver for the pin control subsystem + * + * Copyright (C) 2011 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * Based on bits of regulator core, gpio core and clk core + * + * Author: Linus Walleij + * + * License terms: GNU General Public License (GPL) version 2 + */ +#define pr_fmt(fmt) "pinctrl core: " fmt + +#include ++#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "core.h" +#include "pinmux.h" + +/* Global list of pin control devices */ +static DEFINE_MUTEX(pinctrldev_list_mutex); +static LIST_HEAD(pinctrldev_list); + +static void pinctrl_dev_release(struct device *dev) +{ + struct pinctrl_dev *pctldev = dev_get_drvdata(dev); + kfree(pctldev); +} + +const char *pctldev_get_name(struct pinctrl_dev *pctldev) +{ + /* We're not allowed to register devices without name */ + return pctldev->desc->name; +} +EXPORT_SYMBOL_GPL(pctldev_get_name); + +void *pctldev_get_drvdata(struct pinctrl_dev *pctldev) +{ + return pctldev->driver_data; +} +EXPORT_SYMBOL_GPL(pctldev_get_drvdata); + +/** + * get_pctldev_from_dev() - look up pin controller device + * @dev: a device pointer, this may be NULL but then devname needs to be + * defined instead + * @devname: the name of a device instance, as returned by dev_name(), this + * may be NULL but then dev needs to be defined instead + * + * Looks up a pin control device matching a certain device name or pure device + * pointer, the pure device pointer will take precedence. + */ +struct pinctrl_dev *get_pctldev_from_dev(struct device *dev, + const char *devname) +{ + struct pinctrl_dev *pctldev = NULL; + bool found = false; + + mutex_lock(&pinctrldev_list_mutex); + list_for_each_entry(pctldev, &pinctrldev_list, node) { + if (dev && &pctldev->dev == dev) { + /* Matched on device pointer */ + found = true; + break; + } + + if (devname && + !strcmp(dev_name(&pctldev->dev), devname)) { + /* Matched on device name */ + found = true; + break; + } + } + mutex_unlock(&pinctrldev_list_mutex); + + if (found) + return pctldev; + + return NULL; +} + +struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, int pin) +{ + struct pin_desc *pindesc; + unsigned long flags; + + spin_lock_irqsave(&pctldev->pin_desc_tree_lock, flags); + pindesc = radix_tree_lookup(&pctldev->pin_desc_tree, pin); + spin_unlock_irqrestore(&pctldev->pin_desc_tree_lock, flags); + + return pindesc; +} + +/** + * pin_is_valid() - check if pin exists on controller + * @pctldev: the pin control device to check the pin on + * @pin: pin to check, use the local pin controller index number + * + * This tells us whether a certain pin exist on a certain pin controller or + * not. Pin lists may be sparse, so some pins may not exist. + */ +bool pin_is_valid(struct pinctrl_dev *pctldev, int pin) +{ + struct pin_desc *pindesc; + + if (pin < 0) + return false; + + pindesc = pin_desc_get(pctldev, pin); + if (pindesc == NULL) + return false; + + return true; +} +EXPORT_SYMBOL_GPL(pin_is_valid); + +/* Deletes a range of pin descriptors */ +static void pinctrl_free_pindescs(struct pinctrl_dev *pctldev, + const struct pinctrl_pin_desc *pins, + unsigned num_pins) +{ + int i; + + spin_lock(&pctldev->pin_desc_tree_lock); + for (i = 0; i < num_pins; i++) { + struct pin_desc *pindesc; + + pindesc = radix_tree_lookup(&pctldev->pin_desc_tree, + pins[i].number); + if (pindesc != NULL) { + radix_tree_delete(&pctldev->pin_desc_tree, + pins[i].number); + } + kfree(pindesc); + } + spin_unlock(&pctldev->pin_desc_tree_lock); +} + +static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev, + unsigned number, const char *name) +{ + struct pin_desc *pindesc; + + pindesc = pin_desc_get(pctldev, number); + if (pindesc != NULL) { + pr_err("pin %d already registered on %s\n", number, + pctldev->desc->name); + return -EINVAL; + } + + pindesc = kzalloc(sizeof(*pindesc), GFP_KERNEL); + if (pindesc == NULL) + return -ENOMEM; + spin_lock_init(&pindesc->lock); + + /* Set owner */ + pindesc->pctldev = pctldev; + + /* Copy optional basic pin info */ + if (name) + strlcpy(pindesc->name, name, sizeof(pindesc->name)); + + spin_lock(&pctldev->pin_desc_tree_lock); + radix_tree_insert(&pctldev->pin_desc_tree, number, pindesc); + spin_unlock(&pctldev->pin_desc_tree_lock); + pr_debug("registered pin %d (%s) on %s\n", + number, name ? name : "(unnamed)", pctldev->desc->name); + return 0; +} + +static int pinctrl_register_pins(struct pinctrl_dev *pctldev, + struct pinctrl_pin_desc const *pins, + unsigned num_descs) +{ + unsigned i; + int ret = 0; + + for (i = 0; i < num_descs; i++) { + ret = pinctrl_register_one_pin(pctldev, + pins[i].number, pins[i].name); + if (ret) + return ret; + } + + return 0; +} + +/** + * pinctrl_match_gpio_range() - check if a certain GPIO pin is in range + * @pctldev: pin controller device to check + * @gpio: gpio pin to check taken from the global GPIO pin space + * + * Tries to match a GPIO pin number to the ranges handled by a certain pin + * controller, return the range or NULL + */ +static struct pinctrl_gpio_range * +pinctrl_match_gpio_range(struct pinctrl_dev *pctldev, unsigned gpio) +{ + struct pinctrl_gpio_range *range = NULL; + + /* Loop over the ranges */ + mutex_lock(&pctldev->gpio_ranges_lock); + list_for_each_entry(range, &pctldev->gpio_ranges, node) { + /* Check if we're in the valid range */ + if (gpio >= range->base && + gpio < range->base + range->npins) { + mutex_unlock(&pctldev->gpio_ranges_lock); + return range; + } + } + mutex_unlock(&pctldev->gpio_ranges_lock); + + return NULL; +} + +/** + * pinctrl_get_device_gpio_range() - find device for GPIO range + * @gpio: the pin to locate the pin controller for + * @outdev: the pin control device if found + * @outrange: the GPIO range if found + * + * Find the pin controller handling a certain GPIO pin from the pinspace of + * the GPIO subsystem, return the device and the matching GPIO range. Returns + * negative if the GPIO range could not be found in any device. + */ +int pinctrl_get_device_gpio_range(unsigned gpio, + struct pinctrl_dev **outdev, + struct pinctrl_gpio_range **outrange) +{ + struct pinctrl_dev *pctldev = NULL; + + /* Loop over the pin controllers */ + mutex_lock(&pinctrldev_list_mutex); + list_for_each_entry(pctldev, &pinctrldev_list, node) { + struct pinctrl_gpio_range *range; + + range = pinctrl_match_gpio_range(pctldev, gpio); + if (range != NULL) { + *outdev = pctldev; + *outrange = range; + return 0; + } + } + mutex_unlock(&pinctrldev_list_mutex); + + return -EINVAL; +} + +/** + * pinctrl_add_gpio_range() - register a GPIO range for a controller + * @pctldev: pin controller device to add the range to + * @range: the GPIO range to add + * + * This adds a range of GPIOs to be handled by a certain pin controller. Call + * this to register handled ranges after registering your pin controller. + */ +void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range) +{ + mutex_lock(&pctldev->gpio_ranges_lock); + list_add(&range->node, &pctldev->gpio_ranges); + mutex_unlock(&pctldev->gpio_ranges_lock); +} + +/** + * pinctrl_remove_gpio_range() - remove a range of GPIOs fro a pin controller + * @pctldev: pin controller device to remove the range from + * @range: the GPIO range to remove + */ +void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range) +{ + mutex_lock(&pctldev->gpio_ranges_lock); + list_del(&range->node); + mutex_unlock(&pctldev->gpio_ranges_lock); +} + +#ifdef CONFIG_DEBUG_FS + +static int pinctrl_pins_show(struct seq_file *s, void *what) +{ + struct pinctrl_dev *pctldev = s->private; + const struct pinctrl_ops *ops = pctldev->desc->pctlops; + unsigned pin; + + seq_printf(s, "registered pins: %d\n", pctldev->desc->npins); + seq_printf(s, "max pin number: %d\n", pctldev->desc->maxpin); + + /* The highest pin number need to be included in the loop, thus <= */ + for (pin = 0; pin <= pctldev->desc->maxpin; pin++) { + struct pin_desc *desc; + + desc = pin_desc_get(pctldev, pin); + /* Pin space may be sparse */ + if (desc == NULL) + continue; + + seq_printf(s, "pin %d (%s) ", pin, + desc->name ? desc->name : "unnamed"); + + /* Driver-specific info per pin */ + if (ops->pin_dbg_show) + ops->pin_dbg_show(pctldev, s, pin); + + seq_puts(s, "\n"); + } + + return 0; +} + +static int pinctrl_groups_show(struct seq_file *s, void *what) +{ + struct pinctrl_dev *pctldev = s->private; + const struct pinctrl_ops *ops = pctldev->desc->pctlops; + unsigned selector = 0; + + /* No grouping */ + if (!ops) + return 0; + + seq_puts(s, "registered pin groups:\n"); + while (ops->list_groups(pctldev, selector) >= 0) { + unsigned *pins; + unsigned num_pins; + const char *gname = ops->get_group_name(pctldev, selector); + int ret; + int i; + + ret = ops->get_group_pins(pctldev, selector, + &pins, &num_pins); + if (ret) + seq_printf(s, "%s [ERROR GETTING PINS]\n", + gname); + else { + seq_printf(s, "group: %s, pins = [ ", gname); + for (i = 0; i < num_pins; i++) + seq_printf(s, "%d ", pins[i]); + seq_puts(s, "]\n"); + } + selector++; + } + + + return 0; +} + +static int pinctrl_gpioranges_show(struct seq_file *s, void *what) +{ + struct pinctrl_dev *pctldev = s->private; + struct pinctrl_gpio_range *range = NULL; + + seq_puts(s, "GPIO ranges handled:\n"); + + /* Loop over the ranges */ + mutex_lock(&pctldev->gpio_ranges_lock); + list_for_each_entry(range, &pctldev->gpio_ranges, node) { + seq_printf(s, "%u: %s [%u - %u]\n", range->id, range->name, + range->base, (range->base + range->npins - 1)); + } + mutex_unlock(&pctldev->gpio_ranges_lock); + + return 0; +} + +static int pinctrl_devices_show(struct seq_file *s, void *what) +{ + struct pinctrl_dev *pctldev; + + seq_puts(s, "name [pinmux]\n"); + mutex_lock(&pinctrldev_list_mutex); + list_for_each_entry(pctldev, &pinctrldev_list, node) { + seq_printf(s, "%s ", pctldev->desc->name); + if (pctldev->desc->pmxops) + seq_puts(s, "yes"); + else + seq_puts(s, "no"); + seq_puts(s, "\n"); + } + mutex_unlock(&pinctrldev_list_mutex); + + return 0; +} + +static int pinctrl_pins_open(struct inode *inode, struct file *file) +{ + return single_open(file, pinctrl_pins_show, inode->i_private); +} + +static int pinctrl_groups_open(struct inode *inode, struct file *file) +{ + return single_open(file, pinctrl_groups_show, inode->i_private); +} + +static int pinctrl_gpioranges_open(struct inode *inode, struct file *file) +{ + return single_open(file, pinctrl_gpioranges_show, inode->i_private); +} + +static int pinctrl_devices_open(struct inode *inode, struct file *file) +{ + return single_open(file, pinctrl_devices_show, NULL); +} + +static const struct file_operations pinctrl_pins_ops = { + .open = pinctrl_pins_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations pinctrl_groups_ops = { + .open = pinctrl_groups_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations pinctrl_gpioranges_ops = { + .open = pinctrl_gpioranges_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations pinctrl_devices_ops = { + .open = pinctrl_devices_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct dentry *debugfs_root; + +static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev) +{ + static struct dentry *device_root; + + device_root = debugfs_create_dir(dev_name(&pctldev->dev), + debugfs_root); + if (IS_ERR(device_root) || !device_root) { + pr_warn("failed to create debugfs directory for %s\n", + dev_name(&pctldev->dev)); + return; + } + debugfs_create_file("pins", S_IFREG | S_IRUGO, + device_root, pctldev, &pinctrl_pins_ops); + debugfs_create_file("pingroups", S_IFREG | S_IRUGO, + device_root, pctldev, &pinctrl_groups_ops); + debugfs_create_file("gpio-ranges", S_IFREG | S_IRUGO, + device_root, pctldev, &pinctrl_gpioranges_ops); + pinmux_init_device_debugfs(device_root, pctldev); +} + +static void pinctrl_init_debugfs(void) +{ + debugfs_root = debugfs_create_dir("pinctrl", NULL); + if (IS_ERR(debugfs_root) || !debugfs_root) { + pr_warn("failed to create debugfs directory\n"); + debugfs_root = NULL; + return; + } + + debugfs_create_file("pinctrl-devices", S_IFREG | S_IRUGO, + debugfs_root, NULL, &pinctrl_devices_ops); + pinmux_init_debugfs(debugfs_root); +} + +#else /* CONFIG_DEBUG_FS */ + +static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev) +{ +} + +static void pinctrl_init_debugfs(void) +{ +} + +#endif + +/** + * pinctrl_register() - register a pin controller device + * @pctldesc: descriptor for this pin controller + * @dev: parent device for this pin controller + * @driver_data: private pin controller data for this pin controller + */ +struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, + struct device *dev, void *driver_data) +{ + static atomic_t pinmux_no = ATOMIC_INIT(0); + struct pinctrl_dev *pctldev; + int ret; + + if (pctldesc == NULL) + return ERR_PTR(-EINVAL); + if (pctldesc->name == NULL) + return ERR_PTR(-EINVAL); + + /* If we're implementing pinmuxing, check the ops for sanity */ + if (pctldesc->pmxops) { + ret = pinmux_check_ops(pctldesc->pmxops); + if (ret) { + pr_err("%s pinmux ops lacks necessary functions\n", + pctldesc->name); + return ERR_PTR(ret); + } + } + + pctldev = kzalloc(sizeof(struct pinctrl_dev), GFP_KERNEL); + if (pctldev == NULL) + return ERR_PTR(-ENOMEM); + + /* Initialize pin control device struct */ + pctldev->owner = pctldesc->owner; + pctldev->desc = pctldesc; + pctldev->driver_data = driver_data; + INIT_RADIX_TREE(&pctldev->pin_desc_tree, GFP_KERNEL); + spin_lock_init(&pctldev->pin_desc_tree_lock); + INIT_LIST_HEAD(&pctldev->gpio_ranges); + mutex_init(&pctldev->gpio_ranges_lock); + + /* Register device */ + pctldev->dev.parent = dev; + dev_set_name(&pctldev->dev, "pinctrl.%d", + atomic_inc_return(&pinmux_no) - 1); + pctldev->dev.release = pinctrl_dev_release; + ret = device_register(&pctldev->dev); + if (ret != 0) { + pr_err("error in device registration\n"); + goto out_reg_dev_err; + } + dev_set_drvdata(&pctldev->dev, pctldev); + + /* Register all the pins */ + pr_debug("try to register %d pins on %s...\n", + pctldesc->npins, pctldesc->name); + ret = pinctrl_register_pins(pctldev, pctldesc->pins, pctldesc->npins); + if (ret) { + pr_err("error during pin registration\n"); + pinctrl_free_pindescs(pctldev, pctldesc->pins, + pctldesc->npins); + goto out_reg_pins_err; + } + + pinctrl_init_device_debugfs(pctldev); + mutex_lock(&pinctrldev_list_mutex); + list_add(&pctldev->node, &pinctrldev_list); + mutex_unlock(&pinctrldev_list_mutex); + pinmux_hog_maps(pctldev); + return pctldev; + +out_reg_pins_err: + device_del(&pctldev->dev); +out_reg_dev_err: + put_device(&pctldev->dev); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(pinctrl_register); + +/** + * pinctrl_unregister() - unregister pinmux + * @pctldev: pin controller to unregister + * + * Called by pinmux drivers to unregister a pinmux. + */ +void pinctrl_unregister(struct pinctrl_dev *pctldev) +{ + if (pctldev == NULL) + return; + + pinmux_unhog_maps(pctldev); + /* TODO: check that no pinmuxes are still active? */ + mutex_lock(&pinctrldev_list_mutex); + list_del(&pctldev->node); + mutex_unlock(&pinctrldev_list_mutex); + /* Destroy descriptor tree */ + pinctrl_free_pindescs(pctldev, pctldev->desc->pins, + pctldev->desc->npins); + device_unregister(&pctldev->dev); +} +EXPORT_SYMBOL_GPL(pinctrl_unregister); + +static int __init pinctrl_init(void) +{ + pr_info("initialized pinctrl subsystem\n"); + pinctrl_init_debugfs(); + return 0; +} + +/* init early since many drivers really need to initialized pinmux early */ +core_initcall(pinctrl_init); diff --cc drivers/s390/char/vmur.c index d291a54acfad,b95cbdccc11a..85f4a9a5d12e --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c @@@ -11,8 -11,10 +11,9 @@@ #define KMSG_COMPONENT "vmur" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt -#include #include #include + #include #include #include diff --cc drivers/scsi/libfc/fc_lport.c index 628f347404f9,f4e4885f71b7..31018a8465ab --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@@ -88,7 -88,7 +88,8 @@@ */ #include +#include + #include #include #include diff --cc drivers/scsi/libsas/sas_host_smp.c index e1aa17840c5b,04ad8dd1a74c..bb8f49269a68 --- a/drivers/scsi/libsas/sas_host_smp.c +++ b/drivers/scsi/libsas/sas_host_smp.c @@@ -11,6 -11,6 +11,7 @@@ #include #include #include ++#include #include "sas_internal.h" diff --cc drivers/staging/iio/accel/adis16201_ring.c index dbd883294d6c,a84083caa7c4..0016ed378e3a --- a/drivers/staging/iio/accel/adis16201_ring.c +++ b/drivers/staging/iio/accel/adis16201_ring.c @@@ -1,5 -1,8 +1,6 @@@ + #include #include -#include #include -#include #include #include #include diff --cc drivers/staging/iio/accel/adis16201_trigger.c index f448258884c5,5a4123707cd5..bce505e716d0 --- a/drivers/staging/iio/accel/adis16201_trigger.c +++ b/drivers/staging/iio/accel/adis16201_trigger.c @@@ -1,8 -1,14 +1,9 @@@ #include -#include -#include -#include #include -#include #include + #include #include "../iio.h" -#include "../sysfs.h" #include "../trigger.h" #include "adis16201.h" diff --cc drivers/staging/iio/accel/adis16203_ring.c index 838d3012c87b,ac6460bbe48c..1fdfe6f6ac6e --- a/drivers/staging/iio/accel/adis16203_ring.c +++ b/drivers/staging/iio/accel/adis16203_ring.c @@@ -1,5 -1,10 +1,6 @@@ + #include #include -#include -#include -#include #include -#include #include #include #include diff --cc drivers/staging/iio/accel/adis16203_trigger.c index 50165f9ddc52,4d5d4a1b29ce..24bcb8e15c55 --- a/drivers/staging/iio/accel/adis16203_trigger.c +++ b/drivers/staging/iio/accel/adis16203_trigger.c @@@ -1,8 -1,15 +1,9 @@@ #include -#include -#include -#include #include -#include -#include #include + #include #include "../iio.h" -#include "../sysfs.h" #include "../trigger.h" #include "adis16203.h" diff --cc drivers/staging/iio/accel/adis16204_ring.c index 08551bb48f18,bf4da926a727..6fd3d8f51f2c --- a/drivers/staging/iio/accel/adis16204_ring.c +++ b/drivers/staging/iio/accel/adis16204_ring.c @@@ -1,5 -1,10 +1,6 @@@ + #include #include -#include -#include -#include #include -#include #include #include #include diff --cc drivers/staging/iio/accel/adis16204_trigger.c index 55b661c98d2d,daf5186592d0..6e542af02c09 --- a/drivers/staging/iio/accel/adis16204_trigger.c +++ b/drivers/staging/iio/accel/adis16204_trigger.c @@@ -1,8 -1,15 +1,9 @@@ #include -#include -#include -#include #include -#include -#include #include + #include #include "../iio.h" -#include "../sysfs.h" #include "../trigger.h" #include "adis16204.h" diff --cc drivers/staging/iio/accel/adis16209_ring.c index bb66364bef04,131a7b6dd60a..d17e39d95459 --- a/drivers/staging/iio/accel/adis16209_ring.c +++ b/drivers/staging/iio/accel/adis16209_ring.c @@@ -1,5 -1,10 +1,6 @@@ + #include #include -#include -#include -#include #include -#include #include #include #include diff --cc drivers/staging/iio/accel/adis16209_trigger.c index 8df8a9791d5e,03e7b65efecc..c5d82c1a55d9 --- a/drivers/staging/iio/accel/adis16209_trigger.c +++ b/drivers/staging/iio/accel/adis16209_trigger.c @@@ -1,8 -1,15 +1,9 @@@ #include -#include -#include -#include #include -#include -#include #include + #include #include "../iio.h" -#include "../sysfs.h" #include "../trigger.h" #include "adis16209.h" diff --cc drivers/staging/iio/accel/adis16240_ring.c index 34f1e7e6a56f,d261647d9050..b907ca3f4fdf --- a/drivers/staging/iio/accel/adis16240_ring.c +++ b/drivers/staging/iio/accel/adis16240_ring.c @@@ -1,5 -1,10 +1,6 @@@ + #include #include -#include -#include -#include #include -#include #include #include #include diff --cc drivers/staging/iio/accel/adis16240_trigger.c index 13f1d142eea3,1be0de3308ce..8e0ce568e64c --- a/drivers/staging/iio/accel/adis16240_trigger.c +++ b/drivers/staging/iio/accel/adis16240_trigger.c @@@ -1,8 -1,15 +1,9 @@@ #include -#include -#include -#include #include -#include -#include #include + #include #include "../iio.h" -#include "../sysfs.h" #include "../trigger.h" #include "adis16240.h" diff --cc drivers/staging/iio/accel/lis3l02dq_ring.c index 5c542dd04616,ce905bb663e1..89527af8f4c5 --- a/drivers/staging/iio/accel/lis3l02dq_ring.c +++ b/drivers/staging/iio/accel/lis3l02dq_ring.c @@@ -1,15 -1,20 +1,16 @@@ #include -#include #include #include -#include #include #include -#include #include + #include #include "../iio.h" -#include "../sysfs.h" #include "../ring_sw.h" #include "../kfifo_buf.h" -#include "accel.h" #include "../trigger.h" +#include "../trigger_consumer.h" #include "lis3l02dq.h" /** diff --cc drivers/staging/iio/gyro/adis16260_ring.c index 679c15155716,469d6b208929..52a9e784e7c8 --- a/drivers/staging/iio/gyro/adis16260_ring.c +++ b/drivers/staging/iio/gyro/adis16260_ring.c @@@ -1,5 -1,10 +1,6 @@@ + #include #include -#include -#include -#include #include -#include #include #include #include diff --cc drivers/staging/iio/gyro/adis16260_trigger.c index 2f2b2160f44d,8f57a6b823f7..8299cd18d705 --- a/drivers/staging/iio/gyro/adis16260_trigger.c +++ b/drivers/staging/iio/gyro/adis16260_trigger.c @@@ -1,8 -1,15 +1,9 @@@ #include -#include -#include -#include #include -#include -#include #include + #include #include "../iio.h" -#include "../sysfs.h" #include "../trigger.h" #include "adis16260.h" diff --cc drivers/staging/iio/imu/adis16400_ring.c index c3682458d78d,9fcad468b18c..fd886bf51a6d --- a/drivers/staging/iio/imu/adis16400_ring.c +++ b/drivers/staging/iio/imu/adis16400_ring.c @@@ -3,11 -6,17 +3,12 @@@ #include #include #include -#include -#include #include + #include #include "../iio.h" -#include "../sysfs.h" #include "../ring_sw.h" -#include "../accel/accel.h" -#include "../trigger.h" +#include "../trigger_consumer.h" #include "adis16400.h" /** diff --cc drivers/staging/iio/imu/adis16400_trigger.c index bf991531e0d6,e5d7f962fd42..5bf000757522 --- a/drivers/staging/iio/imu/adis16400_trigger.c +++ b/drivers/staging/iio/imu/adis16400_trigger.c @@@ -1,8 -1,15 +1,9 @@@ #include -#include -#include -#include #include -#include -#include #include + #include #include "../iio.h" -#include "../sysfs.h" #include "../trigger.h" #include "adis16400.h" diff --cc drivers/staging/iio/industrialio-buffer.c index 6dd5d7d629a1,000000000000..9df0ce81dade mode 100644,000000..100644 --- a/drivers/staging/iio/industrialio-buffer.c +++ b/drivers/staging/iio/industrialio-buffer.c @@@ -1,635 -1,0 +1,636 @@@ +/* The industrial I/O core + * + * Copyright (c) 2008 Jonathan Cameron + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * Handling of buffer allocation / resizing. + * + * + * Things to look at here. + * - Better memory allocation techniques? + * - Alternative access techniques? + */ +#include ++#include +#include +#include +#include +#include +#include + +#include "iio.h" +#include "iio_core.h" +#include "sysfs.h" +#include "buffer_generic.h" + +static const char * const iio_endian_prefix[] = { + [IIO_BE] = "be", + [IIO_LE] = "le", +}; + +/** + * iio_buffer_read_first_n_outer() - chrdev read for buffer access + * + * This function relies on all buffer implementations having an + * iio_buffer as their first element. + **/ +ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, + size_t n, loff_t *f_ps) +{ + struct iio_dev *indio_dev = filp->private_data; + struct iio_buffer *rb = indio_dev->buffer; + + if (!rb->access->read_first_n) + return -EINVAL; + return rb->access->read_first_n(rb, n, buf); +} + +/** + * iio_buffer_poll() - poll the buffer to find out if it has data + */ +unsigned int iio_buffer_poll(struct file *filp, + struct poll_table_struct *wait) +{ + struct iio_dev *indio_dev = filp->private_data; + struct iio_buffer *rb = indio_dev->buffer; + + poll_wait(filp, &rb->pollq, wait); + if (rb->stufftoread) + return POLLIN | POLLRDNORM; + /* need a way of knowing if there may be enough data... */ + return 0; +} + +int iio_chrdev_buffer_open(struct iio_dev *indio_dev) +{ + struct iio_buffer *rb = indio_dev->buffer; + if (!rb) + return -EINVAL; + if (rb->access->mark_in_use) + rb->access->mark_in_use(rb); + return 0; +} + +void iio_chrdev_buffer_release(struct iio_dev *indio_dev) +{ + struct iio_buffer *rb = indio_dev->buffer; + + clear_bit(IIO_BUSY_BIT_POS, &rb->flags); + if (rb->access->unmark_in_use) + rb->access->unmark_in_use(rb); +} + +void iio_buffer_init(struct iio_buffer *buffer, struct iio_dev *indio_dev) +{ + buffer->indio_dev = indio_dev; + init_waitqueue_head(&buffer->pollq); +} +EXPORT_SYMBOL(iio_buffer_init); + +static ssize_t iio_show_scan_index(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); +} + +static ssize_t iio_show_fixed_type(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); + u8 type = this_attr->c->scan_type.endianness; + + if (type == IIO_CPU) { +#ifdef __LITTLE_ENDIAN + type = IIO_LE; +#else + type = IIO_BE; +#endif + } + return sprintf(buf, "%s:%c%d/%d>>%u\n", + iio_endian_prefix[type], + this_attr->c->scan_type.sign, + this_attr->c->scan_type.realbits, + this_attr->c->scan_type.storagebits, + this_attr->c->scan_type.shift); +} + +static ssize_t iio_scan_el_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int ret; + struct iio_dev *indio_dev = dev_get_drvdata(dev); + + ret = iio_scan_mask_query(indio_dev->buffer, + to_iio_dev_attr(attr)->address); + if (ret < 0) + return ret; + return sprintf(buf, "%d\n", ret); +} + +static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) +{ + clear_bit(bit, buffer->scan_mask); + buffer->scan_count--; + return 0; +} + +static ssize_t iio_scan_el_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len) +{ + int ret = 0; + bool state; + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_buffer *buffer = indio_dev->buffer; + struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); + + state = !(buf[0] == '0'); + mutex_lock(&indio_dev->mlock); + if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { + ret = -EBUSY; + goto error_ret; + } + ret = iio_scan_mask_query(buffer, this_attr->address); + if (ret < 0) + goto error_ret; + if (!state && ret) { + ret = iio_scan_mask_clear(buffer, this_attr->address); + if (ret) + goto error_ret; + } else if (state && !ret) { + ret = iio_scan_mask_set(buffer, this_attr->address); + if (ret) + goto error_ret; + } + +error_ret: + mutex_unlock(&indio_dev->mlock); + + return ret ? ret : len; + +} + +static ssize_t iio_scan_el_ts_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp); +} + +static ssize_t iio_scan_el_ts_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len) +{ + int ret = 0; + struct iio_dev *indio_dev = dev_get_drvdata(dev); + bool state; + + state = !(buf[0] == '0'); + mutex_lock(&indio_dev->mlock); + if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { + ret = -EBUSY; + goto error_ret; + } + indio_dev->buffer->scan_timestamp = state; +error_ret: + mutex_unlock(&indio_dev->mlock); + + return ret ? ret : len; +} + +static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan) +{ + int ret, attrcount = 0; + struct iio_buffer *buffer = indio_dev->buffer; + + ret = __iio_add_chan_devattr("index", + chan, + &iio_show_scan_index, + NULL, + 0, + 0, + &indio_dev->dev, + &buffer->scan_el_dev_attr_list); + if (ret) + goto error_ret; + attrcount++; + ret = __iio_add_chan_devattr("type", + chan, + &iio_show_fixed_type, + NULL, + 0, + 0, + &indio_dev->dev, + &buffer->scan_el_dev_attr_list); + if (ret) + goto error_ret; + attrcount++; + if (chan->type != IIO_TIMESTAMP) + ret = __iio_add_chan_devattr("en", + chan, + &iio_scan_el_show, + &iio_scan_el_store, + chan->scan_index, + 0, + &indio_dev->dev, + &buffer->scan_el_dev_attr_list); + else + ret = __iio_add_chan_devattr("en", + chan, + &iio_scan_el_ts_show, + &iio_scan_el_ts_store, + chan->scan_index, + 0, + &indio_dev->dev, + &buffer->scan_el_dev_attr_list); + attrcount++; + ret = attrcount; +error_ret: + return ret; +} + +static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev, + struct iio_dev_attr *p) +{ + kfree(p->dev_attr.attr.name); + kfree(p); +} + +static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev) +{ + struct iio_dev_attr *p, *n; + struct iio_buffer *buffer = indio_dev->buffer; + + list_for_each_entry_safe(p, n, + &buffer->scan_el_dev_attr_list, l) + iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p); +} + +static const char * const iio_scan_elements_group_name = "scan_elements"; + +int iio_buffer_register(struct iio_dev *indio_dev, + const struct iio_chan_spec *channels, + int num_channels) +{ + struct iio_dev_attr *p; + struct attribute **attr; + struct iio_buffer *buffer = indio_dev->buffer; + int ret, i, attrn, attrcount, attrcount_orig = 0; + + if (buffer->attrs) + indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs; + + if (buffer->scan_el_attrs != NULL) { + attr = buffer->scan_el_attrs->attrs; + while (*attr++ != NULL) + attrcount_orig++; + } + attrcount = attrcount_orig; + INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list); + if (channels) { + /* new magic */ + for (i = 0; i < num_channels; i++) { + /* Establish necessary mask length */ + if (channels[i].scan_index > + (int)indio_dev->masklength - 1) + indio_dev->masklength + = indio_dev->channels[i].scan_index + 1; + + ret = iio_buffer_add_channel_sysfs(indio_dev, + &channels[i]); + if (ret < 0) + goto error_cleanup_dynamic; + attrcount += ret; + } + if (indio_dev->masklength && buffer->scan_mask == NULL) { + buffer->scan_mask + = kzalloc(sizeof(*buffer->scan_mask)* + BITS_TO_LONGS(indio_dev->masklength), + GFP_KERNEL); + if (buffer->scan_mask == NULL) { + ret = -ENOMEM; + goto error_cleanup_dynamic; + } + } + } + + buffer->scan_el_group.name = iio_scan_elements_group_name; + + buffer->scan_el_group.attrs + = kzalloc(sizeof(buffer->scan_el_group.attrs[0])* + (attrcount + 1), + GFP_KERNEL); + if (buffer->scan_el_group.attrs == NULL) { + ret = -ENOMEM; + goto error_free_scan_mask; + } + if (buffer->scan_el_attrs) + memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs, + sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig); + attrn = attrcount_orig; + + list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) + buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr; + indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group; + + return 0; + +error_free_scan_mask: + kfree(buffer->scan_mask); +error_cleanup_dynamic: + __iio_buffer_attr_cleanup(indio_dev); + + return ret; +} +EXPORT_SYMBOL(iio_buffer_register); + +void iio_buffer_unregister(struct iio_dev *indio_dev) +{ + kfree(indio_dev->buffer->scan_mask); + kfree(indio_dev->buffer->scan_el_group.attrs); + __iio_buffer_attr_cleanup(indio_dev); +} +EXPORT_SYMBOL(iio_buffer_unregister); + +ssize_t iio_buffer_read_length(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_buffer *buffer = indio_dev->buffer; + + if (buffer->access->get_length) + return sprintf(buf, "%d\n", + buffer->access->get_length(buffer)); + + return 0; +} +EXPORT_SYMBOL(iio_buffer_read_length); + +ssize_t iio_buffer_write_length(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len) +{ + int ret; + ulong val; + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_buffer *buffer = indio_dev->buffer; + + ret = strict_strtoul(buf, 10, &val); + if (ret) + return ret; + + if (buffer->access->get_length) + if (val == buffer->access->get_length(buffer)) + return len; + + if (buffer->access->set_length) { + buffer->access->set_length(buffer, val); + if (buffer->access->mark_param_change) + buffer->access->mark_param_change(buffer); + } + + return len; +} +EXPORT_SYMBOL(iio_buffer_write_length); + +ssize_t iio_buffer_read_bytes_per_datum(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_buffer *buffer = indio_dev->buffer; + + if (buffer->access->get_bytes_per_datum) + return sprintf(buf, "%d\n", + buffer->access->get_bytes_per_datum(buffer)); + + return 0; +} +EXPORT_SYMBOL(iio_buffer_read_bytes_per_datum); + +ssize_t iio_buffer_store_enable(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len) +{ + int ret; + bool requested_state, current_state; + int previous_mode; + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct iio_buffer *buffer = indio_dev->buffer; + + mutex_lock(&indio_dev->mlock); + previous_mode = indio_dev->currentmode; + requested_state = !(buf[0] == '0'); + current_state = !!(previous_mode & INDIO_ALL_BUFFER_MODES); + if (current_state == requested_state) { + printk(KERN_INFO "iio-buffer, current state requested again\n"); + goto done; + } + if (requested_state) { + if (buffer->setup_ops->preenable) { + ret = buffer->setup_ops->preenable(indio_dev); + if (ret) { + printk(KERN_ERR + "Buffer not started:" + "buffer preenable failed\n"); + goto error_ret; + } + } + if (buffer->access->request_update) { + ret = buffer->access->request_update(buffer); + if (ret) { + printk(KERN_INFO + "Buffer not started:" + "buffer parameter update failed\n"); + goto error_ret; + } + } + if (buffer->access->mark_in_use) + buffer->access->mark_in_use(buffer); + /* Definitely possible for devices to support both of these.*/ + if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) { + if (!indio_dev->trig) { + printk(KERN_INFO + "Buffer not started: no trigger\n"); + ret = -EINVAL; + if (buffer->access->unmark_in_use) + buffer->access->unmark_in_use(buffer); + goto error_ret; + } + indio_dev->currentmode = INDIO_BUFFER_TRIGGERED; + } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) + indio_dev->currentmode = INDIO_BUFFER_HARDWARE; + else { /* should never be reached */ + ret = -EINVAL; + goto error_ret; + } + + if (buffer->setup_ops->postenable) { + ret = buffer->setup_ops->postenable(indio_dev); + if (ret) { + printk(KERN_INFO + "Buffer not started:" + "postenable failed\n"); + if (buffer->access->unmark_in_use) + buffer->access->unmark_in_use(buffer); + indio_dev->currentmode = previous_mode; + if (buffer->setup_ops->postdisable) + buffer->setup_ops-> + postdisable(indio_dev); + goto error_ret; + } + } + } else { + if (buffer->setup_ops->predisable) { + ret = buffer->setup_ops->predisable(indio_dev); + if (ret) + goto error_ret; + } + if (buffer->access->unmark_in_use) + buffer->access->unmark_in_use(buffer); + indio_dev->currentmode = INDIO_DIRECT_MODE; + if (buffer->setup_ops->postdisable) { + ret = buffer->setup_ops->postdisable(indio_dev); + if (ret) + goto error_ret; + } + } +done: + mutex_unlock(&indio_dev->mlock); + return len; + +error_ret: + mutex_unlock(&indio_dev->mlock); + return ret; +} +EXPORT_SYMBOL(iio_buffer_store_enable); + +ssize_t iio_buffer_show_enable(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + return sprintf(buf, "%d\n", !!(indio_dev->currentmode + & INDIO_ALL_BUFFER_MODES)); +} +EXPORT_SYMBOL(iio_buffer_show_enable); + +int iio_sw_buffer_preenable(struct iio_dev *indio_dev) +{ + struct iio_buffer *buffer = indio_dev->buffer; + size_t size; + dev_dbg(&indio_dev->dev, "%s\n", __func__); + /* Check if there are any scan elements enabled, if not fail*/ + if (!(buffer->scan_count || buffer->scan_timestamp)) + return -EINVAL; + if (buffer->scan_timestamp) + if (buffer->scan_count) + /* Timestamp (aligned to s64) and data */ + size = (((buffer->scan_count * buffer->bpe) + + sizeof(s64) - 1) + & ~(sizeof(s64) - 1)) + + sizeof(s64); + else /* Timestamp only */ + size = sizeof(s64); + else /* Data only */ + size = buffer->scan_count * buffer->bpe; + buffer->access->set_bytes_per_datum(buffer, size); + + return 0; +} +EXPORT_SYMBOL(iio_sw_buffer_preenable); + + +/* note NULL used as error indicator as it doesn't make sense. */ +static unsigned long *iio_scan_mask_match(unsigned long *av_masks, + unsigned int masklength, + unsigned long *mask) +{ + if (bitmap_empty(mask, masklength)) + return NULL; + while (*av_masks) { + if (bitmap_subset(mask, av_masks, masklength)) + return av_masks; + av_masks += BITS_TO_LONGS(masklength); + } + return NULL; +} + +/** + * iio_scan_mask_set() - set particular bit in the scan mask + * @buffer: the buffer whose scan mask we are interested in + * @bit: the bit to be set. + **/ +int iio_scan_mask_set(struct iio_buffer *buffer, int bit) +{ + struct iio_dev *indio_dev = buffer->indio_dev; + unsigned long *mask; + unsigned long *trialmask; + + trialmask = kmalloc(sizeof(*trialmask)* + BITS_TO_LONGS(indio_dev->masklength), + GFP_KERNEL); + + if (trialmask == NULL) + return -ENOMEM; + if (!indio_dev->masklength) { + WARN_ON("trying to set scanmask prior to registering buffer\n"); + kfree(trialmask); + return -EINVAL; + } + bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); + set_bit(bit, trialmask); + + if (indio_dev->available_scan_masks) { + mask = iio_scan_mask_match(indio_dev->available_scan_masks, + indio_dev->masklength, + trialmask); + if (!mask) { + kfree(trialmask); + return -EINVAL; + } + } + bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); + buffer->scan_count++; + + kfree(trialmask); + + return 0; +}; +EXPORT_SYMBOL_GPL(iio_scan_mask_set); + +int iio_scan_mask_query(struct iio_buffer *buffer, int bit) +{ + struct iio_dev *indio_dev = buffer->indio_dev; + long *mask; + + if (bit > indio_dev->masklength) + return -EINVAL; + + if (!buffer->scan_mask) + return 0; + if (indio_dev->available_scan_masks) + mask = iio_scan_mask_match(indio_dev->available_scan_masks, + indio_dev->masklength, + buffer->scan_mask); + else + mask = buffer->scan_mask; + if (!mask) + return 0; + + return test_bit(bit, mask); +}; +EXPORT_SYMBOL_GPL(iio_scan_mask_query); diff --cc drivers/staging/iio/meter/ade7758_ring.c index 99ade658a2dc,1649f5bd1737..00fa2ac5c459 --- a/drivers/staging/iio/meter/ade7758_ring.c +++ b/drivers/staging/iio/meter/ade7758_ring.c @@@ -5,7 -5,13 +5,8 @@@ * * Licensed under the GPL-2. */ + #include #include -#include -#include -#include -#include -#include #include #include #include diff --cc drivers/staging/iio/meter/ade7758_trigger.c index 392dfe302443,851b7458cade..b6569c706651 --- a/drivers/staging/iio/meter/ade7758_trigger.c +++ b/drivers/staging/iio/meter/ade7758_trigger.c @@@ -7,10 -7,17 +7,11 @@@ */ #include -#include -#include -#include #include -#include -#include #include + #include #include "../iio.h" -#include "../sysfs.h" #include "../trigger.h" #include "ade7758.h" diff --cc drivers/staging/rts5139/rts51x_scsi.c index 3b32f9e6e4f0,000000000000..87c9cdc8bd29 mode 100644,000000..100644 --- a/drivers/staging/rts5139/rts51x_scsi.c +++ b/drivers/staging/rts5139/rts51x_scsi.c @@@ -1,2234 -1,0 +1,2235 @@@ +/* Driver for Realtek RTS51xx USB card reader + * + * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * Author: + * wwang (wei_wang@realsil.com.cn) + * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China + * Maintainer: + * Edwin Rong (edwin_rong@realsil.com.cn) + * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China + */ + +#include +#include +#include +#include +#include ++#include + +#include +#include +#include + +#include "debug.h" +#include "rts51x.h" +#include "rts51x_chip.h" +#include "rts51x_scsi.h" +#include "rts51x_card.h" +#include "rts51x_transport.h" +#include "rts51x_sys.h" +#include "sd_cprm.h" +#include "ms_mg.h" +#include "trace.h" + +void scsi_show_command(struct scsi_cmnd *srb) +{ + char *what = NULL; + int i, unknown_cmd = 0; + + switch (srb->cmnd[0]) { + case TEST_UNIT_READY: + what = (char *)"TEST_UNIT_READY"; + break; + case REZERO_UNIT: + what = (char *)"REZERO_UNIT"; + break; + case REQUEST_SENSE: + what = (char *)"REQUEST_SENSE"; + break; + case FORMAT_UNIT: + what = (char *)"FORMAT_UNIT"; + break; + case READ_BLOCK_LIMITS: + what = (char *)"READ_BLOCK_LIMITS"; + break; + case 0x07: + what = (char *)"REASSIGN_BLOCKS"; + break; + case READ_6: + what = (char *)"READ_6"; + break; + case WRITE_6: + what = (char *)"WRITE_6"; + break; + case SEEK_6: + what = (char *)"SEEK_6"; + break; + case READ_REVERSE: + what = (char *)"READ_REVERSE"; + break; + case WRITE_FILEMARKS: + what = (char *)"WRITE_FILEMARKS"; + break; + case SPACE: + what = (char *)"SPACE"; + break; + case INQUIRY: + what = (char *)"INQUIRY"; + break; + case RECOVER_BUFFERED_DATA: + what = (char *)"RECOVER_BUFFERED_DATA"; + break; + case MODE_SELECT: + what = (char *)"MODE_SELECT"; + break; + case RESERVE: + what = (char *)"RESERVE"; + break; + case RELEASE: + what = (char *)"RELEASE"; + break; + case COPY: + what = (char *)"COPY"; + break; + case ERASE: + what = (char *)"ERASE"; + break; + case MODE_SENSE: + what = (char *)"MODE_SENSE"; + break; + case START_STOP: + what = (char *)"START_STOP"; + break; + case RECEIVE_DIAGNOSTIC: + what = (char *)"RECEIVE_DIAGNOSTIC"; + break; + case SEND_DIAGNOSTIC: + what = (char *)"SEND_DIAGNOSTIC"; + break; + case ALLOW_MEDIUM_REMOVAL: + what = (char *)"ALLOW_MEDIUM_REMOVAL"; + break; + case SET_WINDOW: + what = (char *)"SET_WINDOW"; + break; + case READ_CAPACITY: + what = (char *)"READ_CAPACITY"; + break; + case READ_10: + what = (char *)"READ_10"; + break; + case WRITE_10: + what = (char *)"WRITE_10"; + break; + case SEEK_10: + what = (char *)"SEEK_10"; + break; + case WRITE_VERIFY: + what = (char *)"WRITE_VERIFY"; + break; + case VERIFY: + what = (char *)"VERIFY"; + break; + case SEARCH_HIGH: + what = (char *)"SEARCH_HIGH"; + break; + case SEARCH_EQUAL: + what = (char *)"SEARCH_EQUAL"; + break; + case SEARCH_LOW: + what = (char *)"SEARCH_LOW"; + break; + case SET_LIMITS: + what = (char *)"SET_LIMITS"; + break; + case READ_POSITION: + what = (char *)"READ_POSITION"; + break; + case SYNCHRONIZE_CACHE: + what = (char *)"SYNCHRONIZE_CACHE"; + break; + case LOCK_UNLOCK_CACHE: + what = (char *)"LOCK_UNLOCK_CACHE"; + break; + case READ_DEFECT_DATA: + what = (char *)"READ_DEFECT_DATA"; + break; + case MEDIUM_SCAN: + what = (char *)"MEDIUM_SCAN"; + break; + case COMPARE: + what = (char *)"COMPARE"; + break; + case COPY_VERIFY: + what = (char *)"COPY_VERIFY"; + break; + case WRITE_BUFFER: + what = (char *)"WRITE_BUFFER"; + break; + case READ_BUFFER: + what = (char *)"READ_BUFFER"; + break; + case UPDATE_BLOCK: + what = (char *)"UPDATE_BLOCK"; + break; + case READ_LONG: + what = (char *)"READ_LONG"; + break; + case WRITE_LONG: + what = (char *)"WRITE_LONG"; + break; + case CHANGE_DEFINITION: + what = (char *)"CHANGE_DEFINITION"; + break; + case WRITE_SAME: + what = (char *)"WRITE_SAME"; + break; + case GPCMD_READ_SUBCHANNEL: + what = (char *)"READ SUBCHANNEL"; + break; + case READ_TOC: + what = (char *)"READ_TOC"; + break; + case GPCMD_READ_HEADER: + what = (char *)"READ HEADER"; + break; + case GPCMD_PLAY_AUDIO_10: + what = (char *)"PLAY AUDIO (10)"; + break; + case GPCMD_PLAY_AUDIO_MSF: + what = (char *)"PLAY AUDIO MSF"; + break; + case GPCMD_GET_EVENT_STATUS_NOTIFICATION: + what = (char *)"GET EVENT/STATUS NOTIFICATION"; + break; + case GPCMD_PAUSE_RESUME: + what = (char *)"PAUSE/RESUME"; + break; + case LOG_SELECT: + what = (char *)"LOG_SELECT"; + break; + case LOG_SENSE: + what = (char *)"LOG_SENSE"; + break; + case GPCMD_STOP_PLAY_SCAN: + what = (char *)"STOP PLAY/SCAN"; + break; + case GPCMD_READ_DISC_INFO: + what = (char *)"READ DISC INFORMATION"; + break; + case GPCMD_READ_TRACK_RZONE_INFO: + what = (char *)"READ TRACK INFORMATION"; + break; + case GPCMD_RESERVE_RZONE_TRACK: + what = (char *)"RESERVE TRACK"; + break; + case GPCMD_SEND_OPC: + what = (char *)"SEND OPC"; + break; + case MODE_SELECT_10: + what = (char *)"MODE_SELECT_10"; + break; + case GPCMD_REPAIR_RZONE_TRACK: + what = (char *)"REPAIR TRACK"; + break; + case 0x59: + what = (char *)"READ MASTER CUE"; + break; + case MODE_SENSE_10: + what = (char *)"MODE_SENSE_10"; + break; + case GPCMD_CLOSE_TRACK: + what = (char *)"CLOSE TRACK/SESSION"; + break; + case 0x5C: + what = (char *)"READ BUFFER CAPACITY"; + break; + case 0x5D: + what = (char *)"SEND CUE SHEET"; + break; + case GPCMD_BLANK: + what = (char *)"BLANK"; + break; + case REPORT_LUNS: + what = (char *)"REPORT LUNS"; + break; + case MOVE_MEDIUM: + what = (char *)"MOVE_MEDIUM or PLAY AUDIO (12)"; + break; + case READ_12: + what = (char *)"READ_12"; + break; + case WRITE_12: + what = (char *)"WRITE_12"; + break; + case WRITE_VERIFY_12: + what = (char *)"WRITE_VERIFY_12"; + break; + case SEARCH_HIGH_12: + what = (char *)"SEARCH_HIGH_12"; + break; + case SEARCH_EQUAL_12: + what = (char *)"SEARCH_EQUAL_12"; + break; + case SEARCH_LOW_12: + what = (char *)"SEARCH_LOW_12"; + break; + case SEND_VOLUME_TAG: + what = (char *)"SEND_VOLUME_TAG"; + break; + case READ_ELEMENT_STATUS: + what = (char *)"READ_ELEMENT_STATUS"; + break; + case GPCMD_READ_CD_MSF: + what = (char *)"READ CD MSF"; + break; + case GPCMD_SCAN: + what = (char *)"SCAN"; + break; + case GPCMD_SET_SPEED: + what = (char *)"SET CD SPEED"; + break; + case GPCMD_MECHANISM_STATUS: + what = (char *)"MECHANISM STATUS"; + break; + case GPCMD_READ_CD: + what = (char *)"READ CD"; + break; + case 0xE1: + what = (char *)"WRITE CONTINUE"; + break; + case WRITE_LONG_2: + what = (char *)"WRITE_LONG_2"; + break; + case VENDOR_CMND: + what = (char *)"Realtek's vendor command"; + break; + default: + what = (char *)"(unknown command)"; + unknown_cmd = 1; + break; + } + + if (srb->cmnd[0] != TEST_UNIT_READY) + RTS51X_DEBUGP("Command %s (%d bytes)\n", what, srb->cmd_len); + if (unknown_cmd) { + RTS51X_DEBUGP(""); + for (i = 0; i < srb->cmd_len && i < 16; i++) + RTS51X_DEBUGPN(" %02x", srb->cmnd[i]); + RTS51X_DEBUGPN("\n"); + } +} + +void set_sense_type(struct rts51x_chip *chip, unsigned int lun, int sense_type) +{ + switch (sense_type) { + case SENSE_TYPE_MEDIA_CHANGE: + set_sense_data(chip, lun, CUR_ERR, 0x06, 0, 0x28, 0, 0, 0); + break; + + case SENSE_TYPE_MEDIA_NOT_PRESENT: + set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x3A, 0, 0, 0); + break; + + case SENSE_TYPE_MEDIA_LBA_OVER_RANGE: + set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x21, 0, 0, 0); + break; + + case SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT: + set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x25, 0, 0, 0); + break; + + case SENSE_TYPE_MEDIA_WRITE_PROTECT: + set_sense_data(chip, lun, CUR_ERR, 0x07, 0, 0x27, 0, 0, 0); + break; + + case SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR: + set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x11, 0, 0, 0); + break; + + case SENSE_TYPE_MEDIA_WRITE_ERR: + set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x0C, 0x02, 0, 0); + break; + + case SENSE_TYPE_MEDIA_INVALID_CMD_FIELD: + set_sense_data(chip, lun, CUR_ERR, ILGAL_REQ, 0, + ASC_INVLD_CDB, ASCQ_INVLD_CDB, CDB_ILLEGAL, 1); + break; + + case SENSE_TYPE_FORMAT_IN_PROGRESS: + set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04, 0, 0); + break; + + case SENSE_TYPE_FORMAT_CMD_FAILED: + set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x31, 0x01, 0, 0); + break; + +#ifdef SUPPORT_MAGIC_GATE + case SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB: + set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x6F, 0x02, 0, 0); + break; + + case SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN: + set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x6F, 0x00, 0, 0); + break; + + case SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM: + set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x30, 0x00, 0, 0); + break; + + case SENSE_TYPE_MG_WRITE_ERR: + set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x0C, 0x00, 0, 0); + break; +#endif + +#ifdef SUPPORT_SD_LOCK + case SENSE_TYPE_MEDIA_READ_FORBIDDEN: + set_sense_data(chip, lun, CUR_ERR, 0x07, 0, 0x11, 0x13, 0, 0); + break; +#endif + + case SENSE_TYPE_NO_SENSE: + default: + set_sense_data(chip, lun, CUR_ERR, 0, 0, 0, 0, 0, 0); + break; + } +} + +void set_sense_data(struct rts51x_chip *chip, unsigned int lun, u8 err_code, + u8 sense_key, u32 info, u8 asc, u8 ascq, u8 sns_key_info0, + u16 sns_key_info1) +{ + struct sense_data_t *sense = &(chip->sense_buffer[lun]); + + sense->err_code = err_code; + sense->sense_key = sense_key; + sense->info[0] = (u8) (info >> 24); + sense->info[1] = (u8) (info >> 16); + sense->info[2] = (u8) (info >> 8); + sense->info[3] = (u8) info; + + sense->ad_sense_len = sizeof(struct sense_data_t) - 8; + sense->asc = asc; + sense->ascq = ascq; + if (sns_key_info0 != 0) { + sense->sns_key_info[0] = SKSV | sns_key_info0; + sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 8; + sense->sns_key_info[2] = sns_key_info1 & 0x0f; + } +} + +static int test_unit_ready(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + unsigned int lun = SCSI_LUN(srb); + + rts51x_init_cards(chip); + + if (!check_card_ready(chip, lun)) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); + return TRANSPORT_FAILED; + } + + if (!check_lun_mc(chip, lun)) { + set_lun_mc(chip, lun); + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE); + return TRANSPORT_FAILED; + } +#ifdef SUPPORT_SD_LOCK + if (get_lun_card(chip, SCSI_LUN(srb)) == SD_CARD) { + struct sd_info *sd_card = &(chip->sd_card); + if (sd_card->sd_lock_notify) { + sd_card->sd_lock_notify = 0; + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE); + return TRANSPORT_FAILED; + } else if (sd_card->sd_lock_status & SD_LOCKED) { + set_sense_type(chip, lun, + SENSE_TYPE_MEDIA_READ_FORBIDDEN); + return TRANSPORT_FAILED; + } + } +#endif + + return TRANSPORT_GOOD; +} + +unsigned char formatter_inquiry_str[20] = { + 'M', 'E', 'M', 'O', 'R', 'Y', 'S', 'T', 'I', 'C', 'K', + '-', 'M', 'G', /* Byte[47:49] */ + 0x0B, /* Byte[50]: MG, MS, MSPro, MSXC */ + 0x00, /* Byte[51]: Category Specific Commands */ + 0x00, /* Byte[52]: Access Control and feature */ + 0x20, 0x20, 0x20, /* Byte[53:55] */ +}; + +static int inquiry(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + unsigned int lun = SCSI_LUN(srb); + char *inquiry_default = (char *)"Generic-xD/SD/M.S. 1.00 "; + char *inquiry_string; + unsigned char sendbytes; + unsigned char *buf; + u8 card = get_lun_card(chip, lun); + int pro_formatter_flag = 0; + unsigned char inquiry_buf[] = { + QULIFIRE | DRCT_ACCESS_DEV, + RMB_DISC | 0x0D, + 0x00, + 0x01, + 0x1f, + 0x02, + 0, + REL_ADR | WBUS_32 | WBUS_16 | SYNC | LINKED | CMD_QUE | SFT_RE, + }; + + inquiry_string = inquiry_default; + + buf = vmalloc(scsi_bufflen(srb)); + if (buf == NULL) + TRACE_RET(chip, TRANSPORT_ERROR); + + if (MS_FORMATTER_ENABLED(chip) && (get_lun2card(chip, lun) & MS_CARD)) { + if (!card || (card == MS_CARD)) + pro_formatter_flag = 1; + } + + if (pro_formatter_flag) { + if (scsi_bufflen(srb) < 56) + sendbytes = (unsigned char)(scsi_bufflen(srb)); + else + sendbytes = 56; + } else { + if (scsi_bufflen(srb) < 36) + sendbytes = (unsigned char)(scsi_bufflen(srb)); + else + sendbytes = 36; + } + + if (sendbytes > 8) { + memcpy(buf, inquiry_buf, 8); + memcpy(buf + 8, inquiry_string, sendbytes - 8); + if (pro_formatter_flag) + buf[4] = 0x33; /* Additional Length */ + } else { + memcpy(buf, inquiry_buf, sendbytes); + } + + if (pro_formatter_flag) { + if (sendbytes > 36) + memcpy(buf + 36, formatter_inquiry_str, sendbytes - 36); + } + + scsi_set_resid(srb, 0); + + rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb); + vfree(buf); + + return TRANSPORT_GOOD; +} + +static int start_stop_unit(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + unsigned int lun = SCSI_LUN(srb); + + scsi_set_resid(srb, scsi_bufflen(srb)); + + if (srb->cmnd[1] == 1) + return TRANSPORT_GOOD; + + switch (srb->cmnd[0x4]) { + case STOP_MEDIUM: + /* Media disabled */ + return TRANSPORT_GOOD; + + case UNLOAD_MEDIUM: + /* Media shall be unload */ + if (check_card_ready(chip, lun)) + eject_card(chip, lun); + return TRANSPORT_GOOD; + + case MAKE_MEDIUM_READY: + case LOAD_MEDIUM: + if (check_card_ready(chip, lun)) { + return TRANSPORT_GOOD; + } else { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + break; + } + + TRACE_RET(chip, TRANSPORT_ERROR); +} + +static int allow_medium_removal(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + int prevent; + + prevent = srb->cmnd[4] & 0x1; + + scsi_set_resid(srb, 0); + + if (prevent) { + set_sense_type(chip, SCSI_LUN(srb), + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + return TRANSPORT_GOOD; +} + +static void ms_mode_sense(struct rts51x_chip *chip, u8 cmd, + int lun, u8 *buf, int buf_len) +{ + struct ms_info *ms_card = &(chip->ms_card); + int sys_info_offset; + int data_size = buf_len; + int support_format = 0; + int i = 0; + + if (cmd == MODE_SENSE) { + sys_info_offset = 8; + if (data_size > 0x68) + data_size = 0x68; + buf[i++] = 0x67; /* Mode Data Length */ + } else { + sys_info_offset = 12; + if (data_size > 0x6C) + data_size = 0x6C; + buf[i++] = 0x00; /* Mode Data Length (MSB) */ + buf[i++] = 0x6A; /* Mode Data Length (LSB) */ + } + + /* Medium Type Code */ + if (check_card_ready(chip, lun)) { + if (CHK_MSXC(ms_card)) { + support_format = 1; + buf[i++] = 0x40; + } else if (CHK_MSPRO(ms_card)) { + support_format = 1; + buf[i++] = 0x20; + } else { + buf[i++] = 0x10; + } + + /* WP */ + if (check_card_wp(chip, lun)) + buf[i++] = 0x80; + else + buf[i++] = 0x00; + } else { + buf[i++] = 0x00; /* MediaType */ + buf[i++] = 0x00; /* WP */ + } + + buf[i++] = 0x00; /* Reserved */ + + if (cmd == MODE_SENSE_10) { + buf[i++] = 0x00; /* Reserved */ + buf[i++] = 0x00; /* Block descriptor length(MSB) */ + buf[i++] = 0x00; /* Block descriptor length(LSB) */ + + /* The Following Data is the content of "Page 0x20" */ + if (data_size >= 9) + buf[i++] = 0x20; /* Page Code */ + if (data_size >= 10) + buf[i++] = 0x62; /* Page Length */ + if (data_size >= 11) + buf[i++] = 0x00; /* No Access Control */ + if (data_size >= 12) { + if (support_format) + buf[i++] = 0xC0; /* SF, SGM */ + else + buf[i++] = 0x00; + } + } else { + /* The Following Data is the content of "Page 0x20" */ + if (data_size >= 5) + buf[i++] = 0x20; /* Page Code */ + if (data_size >= 6) + buf[i++] = 0x62; /* Page Length */ + if (data_size >= 7) + buf[i++] = 0x00; /* No Access Control */ + if (data_size >= 8) { + if (support_format) + buf[i++] = 0xC0; /* SF, SGM */ + else + buf[i++] = 0x00; + } + } + + if (data_size > sys_info_offset) { + /* 96 Bytes Attribute Data */ + int len = data_size - sys_info_offset; + len = (len < 96) ? len : 96; + + memcpy(buf + sys_info_offset, ms_card->raw_sys_info, len); + } +} + +static int mode_sense(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + unsigned int lun = SCSI_LUN(srb); + unsigned int dataSize; + int status; + int pro_formatter_flag; + unsigned char pageCode, *buf; + u8 card = get_lun_card(chip, lun); + + if (!check_card_ready(chip, lun)) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); + scsi_set_resid(srb, scsi_bufflen(srb)); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + pro_formatter_flag = 0; + dataSize = 8; + /* In Combo mode, device responses ModeSense command as a MS LUN + * when no card is inserted */ + if ((get_lun2card(chip, lun) & MS_CARD)) { + if (!card || (card == MS_CARD)) { + dataSize = 108; + if (chip->option.mspro_formatter_enable) + pro_formatter_flag = 1; + } + } + + buf = kmalloc(dataSize, GFP_KERNEL); + if (buf == NULL) + TRACE_RET(chip, TRANSPORT_ERROR); + + pageCode = srb->cmnd[2] & 0x3f; + + if ((pageCode == 0x3F) || (pageCode == 0x1C) || + (pageCode == 0x00) || (pro_formatter_flag && (pageCode == 0x20))) { + if (srb->cmnd[0] == MODE_SENSE) { + if ((pageCode == 0x3F) || (pageCode == 0x20)) { + ms_mode_sense(chip, srb->cmnd[0], lun, buf, + dataSize); + } else { + dataSize = 4; + buf[0] = 0x03; + buf[1] = 0x00; + if (check_card_wp(chip, lun)) + buf[2] = 0x80; + else + buf[3] = 0x00; + } + } else { + if ((pageCode == 0x3F) || (pageCode == 0x20)) { + ms_mode_sense(chip, srb->cmnd[0], lun, buf, + dataSize); + } else { + dataSize = 8; + buf[0] = 0x00; + buf[1] = 0x06; + buf[2] = 0x00; + if (check_card_wp(chip, lun)) + buf[3] = 0x80; + else + buf[3] = 0x00; + buf[4] = 0x00; + buf[5] = 0x00; + buf[6] = 0x00; + buf[7] = 0x00; + } + } + status = TRANSPORT_GOOD; + } else { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + scsi_set_resid(srb, scsi_bufflen(srb)); + status = TRANSPORT_FAILED; + } + + if (status == TRANSPORT_GOOD) { + unsigned int len = min(scsi_bufflen(srb), dataSize); + rts51x_set_xfer_buf(buf, len, srb); + scsi_set_resid(srb, scsi_bufflen(srb) - len); + } + kfree(buf); + + return status; +} + +static int request_sense(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + struct sense_data_t *sense; + unsigned int lun = SCSI_LUN(srb); + struct ms_info *ms_card = &(chip->ms_card); + unsigned char *tmp, *buf; + + sense = &(chip->sense_buffer[lun]); + + if ((get_lun_card(chip, lun) == MS_CARD) + && PRO_UNDER_FORMATTING(ms_card)) { + mspro_format_sense(chip, lun); + } + + buf = vmalloc(scsi_bufflen(srb)); + if (buf == NULL) + TRACE_RET(chip, TRANSPORT_ERROR); + + tmp = (unsigned char *)sense; + memcpy(buf, tmp, scsi_bufflen(srb)); + + rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb); + vfree(buf); + + scsi_set_resid(srb, 0); + /* Reset Sense Data */ + set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE); + return TRANSPORT_GOOD; +} + +static int read_write(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ +#ifdef SUPPORT_SD_LOCK + struct sd_info *sd_card = &(chip->sd_card); +#endif + unsigned int lun = SCSI_LUN(srb); + int retval; + u32 start_sec; + u16 sec_cnt; + + if (!check_card_ready(chip, lun) || (chip->capacity[lun] == 0)) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + if (!check_lun_mc(chip, lun)) { + set_lun_mc(chip, lun); + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE); + return TRANSPORT_FAILED; + } + + rts51x_prepare_run(chip); + RTS51X_SET_STAT(chip, STAT_RUN); + +#ifdef SUPPORT_SD_LOCK + if (sd_card->sd_erase_status) { + /* Accessing to any card is forbidden + * until the erase procedure of SD is completed */ + RTS51X_DEBUGP("SD card being erased!\n"); + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_READ_FORBIDDEN); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + if (get_lun_card(chip, lun) == SD_CARD) { + if (sd_card->sd_lock_status & SD_LOCKED) { + RTS51X_DEBUGP("SD card locked!\n"); + set_sense_type(chip, lun, + SENSE_TYPE_MEDIA_READ_FORBIDDEN); + TRACE_RET(chip, TRANSPORT_FAILED); + } + } +#endif + + if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10)) { + start_sec = + ((u32) srb->cmnd[2] << 24) | + ((u32) srb->cmnd[3] << 16) | + ((u32) srb->cmnd[4] << 8) | + ((u32) srb->cmnd[5]); + sec_cnt = ((u16) (srb->cmnd[7]) << 8) | srb->cmnd[8]; + } else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) { + start_sec = ((u32) (srb->cmnd[1] & 0x1F) << 16) | + ((u32) srb->cmnd[2] << 8) | ((u32) srb->cmnd[3]); + sec_cnt = srb->cmnd[4]; + } else if ((srb->cmnd[0] == VENDOR_CMND) && + (srb->cmnd[1] == SCSI_APP_CMD) && + ((srb->cmnd[2] == PP_READ10) || + (srb->cmnd[2] == PP_WRITE10))) { + start_sec = ((u32) srb->cmnd[4] << 24) | + ((u32) srb->cmnd[5] << 16) | + ((u32) srb->cmnd[6] << 8) | + ((u32) srb->cmnd[7]); + sec_cnt = ((u16) (srb->cmnd[9]) << 8) | srb->cmnd[10]; + } else { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + if ((start_sec > chip->capacity[lun]) || + ((start_sec + sec_cnt) > chip->capacity[lun])) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LBA_OVER_RANGE); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + if (sec_cnt == 0) { + scsi_set_resid(srb, 0); + return TRANSPORT_GOOD; + } + + if ((srb->sc_data_direction == DMA_TO_DEVICE) + && check_card_wp(chip, lun)) { + RTS51X_DEBUGP("Write protected card!\n"); + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + retval = card_rw(srb, chip, start_sec, sec_cnt); + if (retval != STATUS_SUCCESS) { +#if 0 + if (chip->need_release & chip->lun2card[lun]) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); + } else { +#endif + if (srb->sc_data_direction == DMA_FROM_DEVICE) { + set_sense_type(chip, lun, + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + } else { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR); + } +#if 0 + } +#endif + TRACE_RET(chip, TRANSPORT_FAILED); + } + + scsi_set_resid(srb, 0); + + return TRANSPORT_GOOD; +} + +static int read_format_capacity(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + unsigned char *buf; + unsigned int lun = SCSI_LUN(srb); + unsigned int buf_len; + u8 card = get_lun_card(chip, lun); + int desc_cnt; + int i = 0; + + if (!check_card_ready(chip, lun)) { + if (!chip->option.mspro_formatter_enable) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); + TRACE_RET(chip, TRANSPORT_FAILED); + } + } + + buf_len = (scsi_bufflen(srb) > 12) ? 0x14 : 12; + + buf = kmalloc(buf_len, GFP_KERNEL); + if (buf == NULL) + TRACE_RET(chip, TRANSPORT_ERROR); + + buf[i++] = 0; + buf[i++] = 0; + buf[i++] = 0; + + /* Capacity List Length */ + if ((buf_len > 12) && chip->option.mspro_formatter_enable && + (chip->lun2card[lun] & MS_CARD) && (!card || (card == MS_CARD))) { + buf[i++] = 0x10; + desc_cnt = 2; + } else { + buf[i++] = 0x08; + desc_cnt = 1; + } + + while (desc_cnt) { + if (check_card_ready(chip, lun)) { + buf[i++] = (unsigned char)((chip->capacity[lun]) >> 24); + buf[i++] = (unsigned char)((chip->capacity[lun]) >> 16); + buf[i++] = (unsigned char)((chip->capacity[lun]) >> 8); + buf[i++] = (unsigned char)(chip->capacity[lun]); + + if (desc_cnt == 2) + /* Byte[8]: Descriptor Type: Formatted medium */ + buf[i++] = 2; + else + buf[i++] = 0; /* Byte[16] */ + } else { + buf[i++] = 0xFF; + buf[i++] = 0xFF; + buf[i++] = 0xFF; + buf[i++] = 0xFF; + + if (desc_cnt == 2) + /* Byte[8]: Descriptor Type: No medium */ + buf[i++] = 3; + else + buf[i++] = 0; /*Byte[16] */ + } + + buf[i++] = 0x00; + buf[i++] = 0x02; + buf[i++] = 0x00; + + desc_cnt--; + } + + buf_len = min(scsi_bufflen(srb), buf_len); + rts51x_set_xfer_buf(buf, buf_len, srb); + kfree(buf); + + scsi_set_resid(srb, scsi_bufflen(srb) - buf_len); + + return TRANSPORT_GOOD; +} + +static int read_capacity(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + unsigned char *buf; + unsigned int lun = SCSI_LUN(srb); + + if (!check_card_ready(chip, lun)) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + if (!check_lun_mc(chip, lun)) { + set_lun_mc(chip, lun); + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE); + return TRANSPORT_FAILED; + } + + buf = kmalloc(8, GFP_KERNEL); + if (buf == NULL) + TRACE_RET(chip, TRANSPORT_ERROR); + + buf[0] = (unsigned char)((chip->capacity[lun] - 1) >> 24); + buf[1] = (unsigned char)((chip->capacity[lun] - 1) >> 16); + buf[2] = (unsigned char)((chip->capacity[lun] - 1) >> 8); + buf[3] = (unsigned char)(chip->capacity[lun] - 1); + + buf[4] = 0x00; + buf[5] = 0x00; + buf[6] = 0x02; + buf[7] = 0x00; + + rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb); + kfree(buf); + + scsi_set_resid(srb, 0); + + return TRANSPORT_GOOD; +} + +static int get_dev_status(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + unsigned int lun = SCSI_LUN(srb); + unsigned int buf_len; + u8 status[32] = { 0 }; + + rts51x_pp_status(chip, lun, status, 32); + + buf_len = min(scsi_bufflen(srb), (unsigned int)sizeof(status)); + rts51x_set_xfer_buf(status, buf_len, srb); + scsi_set_resid(srb, scsi_bufflen(srb) - buf_len); + + return TRANSPORT_GOOD; +} + +static int read_status(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + u8 rts51x_status[16]; + unsigned int buf_len; + unsigned int lun = SCSI_LUN(srb); + + rts51x_read_status(chip, lun, rts51x_status, 16); + + buf_len = min(scsi_bufflen(srb), (unsigned int)sizeof(rts51x_status)); + rts51x_set_xfer_buf(rts51x_status, buf_len, srb); + scsi_set_resid(srb, scsi_bufflen(srb) - buf_len); + + return TRANSPORT_GOOD; +} + +static int read_mem(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + unsigned int lun = SCSI_LUN(srb); + unsigned short addr, len, i; + int retval; + u8 *buf; + + rts51x_prepare_run(chip); + RTS51X_SET_STAT(chip, STAT_RUN); + + addr = ((u16) srb->cmnd[2] << 8) | srb->cmnd[3]; + len = ((u16) srb->cmnd[4] << 8) | srb->cmnd[5]; + + if (addr < 0xe000) { + RTS51X_DEBUGP("filter!addr=0x%x\n", addr); + return TRANSPORT_GOOD; + } + + buf = vmalloc(len); + if (!buf) + TRACE_RET(chip, TRANSPORT_ERROR); + + for (i = 0; i < len; i++) { + retval = rts51x_ep0_read_register(chip, addr + i, buf + i); + if (retval != STATUS_SUCCESS) { + vfree(buf); + set_sense_type(chip, lun, + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + TRACE_RET(chip, TRANSPORT_FAILED); + } + } + + len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len); + rts51x_set_xfer_buf(buf, len, srb); + scsi_set_resid(srb, scsi_bufflen(srb) - len); + + vfree(buf); + + return TRANSPORT_GOOD; +} + +static int write_mem(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + unsigned int lun = SCSI_LUN(srb); + unsigned short addr, len, i; + int retval; + u8 *buf; + + rts51x_prepare_run(chip); + RTS51X_SET_STAT(chip, STAT_RUN); + + addr = ((u16) srb->cmnd[2] << 8) | srb->cmnd[3]; + len = ((u16) srb->cmnd[4] << 8) | srb->cmnd[5]; + + if (addr < 0xe000) { + RTS51X_DEBUGP("filter!addr=0x%x\n", addr); + return TRANSPORT_GOOD; + } + + len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len); + buf = vmalloc(len); + if (!buf) + TRACE_RET(chip, TRANSPORT_ERROR); + + rts51x_get_xfer_buf(buf, len, srb); + + for (i = 0; i < len; i++) { + retval = + rts51x_ep0_write_register(chip, addr + i, 0xFF, buf[i]); + if (retval != STATUS_SUCCESS) { + vfree(buf); + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR); + TRACE_RET(chip, TRANSPORT_FAILED); + } + } + + vfree(buf); + scsi_set_resid(srb, scsi_bufflen(srb) - len); + + return TRANSPORT_GOOD; +} + +static int get_sd_csd(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + struct sd_info *sd_card = &(chip->sd_card); + unsigned int lun = SCSI_LUN(srb); + + if (!check_card_ready(chip, lun)) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + if (get_lun_card(chip, lun) != SD_CARD) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + scsi_set_resid(srb, 0); + rts51x_set_xfer_buf(sd_card->raw_csd, scsi_bufflen(srb), srb); + + return TRANSPORT_GOOD; +} + +static int read_phy_register(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + int retval; + u8 addr, len, i; + u8 *buf; + + rts51x_prepare_run(chip); + RTS51X_SET_STAT(chip, STAT_RUN); + + addr = srb->cmnd[5]; + len = srb->cmnd[7]; + + if (len) { + buf = vmalloc(len); + if (!buf) + TRACE_RET(chip, TRANSPORT_ERROR); + + for (i = 0; i < len; i++) { + retval = + rts51x_read_phy_register(chip, addr + i, buf + i); + if (retval != STATUS_SUCCESS) { + vfree(buf); + set_sense_type(chip, SCSI_LUN(srb), + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + TRACE_RET(chip, TRANSPORT_FAILED); + } + } + + len = min(scsi_bufflen(srb), (unsigned int)len); + rts51x_set_xfer_buf(buf, len, srb); + scsi_set_resid(srb, scsi_bufflen(srb) - len); + + vfree(buf); + } + + return TRANSPORT_GOOD; +} + +static int write_phy_register(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + int retval; + u8 addr, len, i; + u8 *buf; + + rts51x_prepare_run(chip); + RTS51X_SET_STAT(chip, STAT_RUN); + + addr = srb->cmnd[5]; + len = srb->cmnd[7]; + + if (len) { + len = min(scsi_bufflen(srb), (unsigned int)len); + + buf = vmalloc(len); + if (buf == NULL) + TRACE_RET(chip, TRANSPORT_ERROR); + + rts51x_get_xfer_buf(buf, len, srb); + scsi_set_resid(srb, scsi_bufflen(srb) - len); + + for (i = 0; i < len; i++) { + retval = + rts51x_write_phy_register(chip, addr + i, buf[i]); + if (retval != STATUS_SUCCESS) { + vfree(buf); + set_sense_type(chip, SCSI_LUN(srb), + SENSE_TYPE_MEDIA_WRITE_ERR); + TRACE_RET(chip, TRANSPORT_FAILED); + } + } + + vfree(buf); + } + + return TRANSPORT_GOOD; +} + +static int get_card_bus_width(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + unsigned int lun = SCSI_LUN(srb); + u8 card, bus_width; + + if (!check_card_ready(chip, lun)) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + card = get_lun_card(chip, lun); + if ((card == SD_CARD) || (card == MS_CARD)) { + bus_width = chip->card_bus_width[lun]; + } else { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + scsi_set_resid(srb, 0); + rts51x_set_xfer_buf(&bus_width, scsi_bufflen(srb), srb); + + return TRANSPORT_GOOD; +} + +#ifdef _MSG_TRACE +static int trace_msg_cmd(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + unsigned char *buf = NULL; + u8 clear; + unsigned int buf_len; + + buf_len = + 4 + + ((2 + MSG_FUNC_LEN + MSG_FILE_LEN + TIME_VAL_LEN) * TRACE_ITEM_CNT); + + if ((scsi_bufflen(srb) < buf_len) || (scsi_sglist(srb) == NULL)) { + set_sense_type(chip, SCSI_LUN(srb), + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + clear = srb->cmnd[2]; + + buf = vmalloc(scsi_bufflen(srb)); + if (buf == NULL) + TRACE_RET(chip, TRANSPORT_ERROR); + + rts51x_trace_msg(chip, buf, clear); + + rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb); + vfree(buf); + + scsi_set_resid(srb, 0); + return TRANSPORT_GOOD; +} +#endif + +static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + int retval = STATUS_SUCCESS; + unsigned int lun = SCSI_LUN(srb); + u8 cmd_type, mask, value, idx, mode, len; + u16 addr; + u32 timeout; + + rts51x_prepare_run(chip); + RTS51X_SET_STAT(chip, STAT_RUN); + + switch (srb->cmnd[3]) { + case INIT_BATCHCMD: + rts51x_init_cmd(chip); + break; + + case ADD_BATCHCMD: + cmd_type = srb->cmnd[4]; + if (cmd_type > 2) { + set_sense_type(chip, lun, + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + addr = (srb->cmnd[5] << 8) | srb->cmnd[6]; + mask = srb->cmnd[7]; + value = srb->cmnd[8]; + rts51x_add_cmd(chip, cmd_type, addr, mask, value); + break; + + case SEND_BATCHCMD: + mode = srb->cmnd[4]; + len = srb->cmnd[5]; + timeout = + ((u32) srb->cmnd[6] << 24) | ((u32) srb-> + cmnd[7] << 16) | ((u32) srb-> + cmnd[8] << + 8) | ((u32) + srb-> + cmnd + [9]); + retval = rts51x_send_cmd(chip, mode, 1000); + if (retval != STATUS_SUCCESS) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR); + TRACE_RET(chip, TRANSPORT_FAILED); + } + if (mode & STAGE_R) { + retval = rts51x_get_rsp(chip, len, timeout); + if (retval != STATUS_SUCCESS) { + set_sense_type(chip, lun, + SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); + TRACE_RET(chip, TRANSPORT_FAILED); + } + } + break; + + case GET_BATCHRSP: + idx = srb->cmnd[4]; + value = chip->rsp_buf[idx]; + if (scsi_bufflen(srb) < 1) { + set_sense_type(chip, lun, + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + rts51x_set_xfer_buf(&value, 1, srb); + scsi_set_resid(srb, 0); + break; + + default: + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + if (retval != STATUS_SUCCESS) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + return TRANSPORT_GOOD; +} + +static int suit_cmd(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + int result; + + switch (srb->cmnd[3]) { + case INIT_BATCHCMD: + case ADD_BATCHCMD: + case SEND_BATCHCMD: + case GET_BATCHRSP: + result = rw_mem_cmd_buf(srb, chip); + break; + default: + result = TRANSPORT_ERROR; + } + + return result; +} + +static int app_cmd(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + int result; + + switch (srb->cmnd[2]) { + case PP_READ10: + case PP_WRITE10: + result = read_write(srb, chip); + break; + + case SUIT_CMD: + result = suit_cmd(srb, chip); + break; + + case READ_PHY: + result = read_phy_register(srb, chip); + break; + + case WRITE_PHY: + result = write_phy_register(srb, chip); + break; + + case GET_DEV_STATUS: + result = get_dev_status(srb, chip); + break; + + default: + set_sense_type(chip, SCSI_LUN(srb), + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + return result; +} + +static int vendor_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + int result = TRANSPORT_GOOD; + + switch (srb->cmnd[1]) { + case READ_STATUS: + result = read_status(srb, chip); + break; + + case READ_MEM: + result = read_mem(srb, chip); + break; + + case WRITE_MEM: + result = write_mem(srb, chip); + break; + + case GET_BUS_WIDTH: + result = get_card_bus_width(srb, chip); + break; + + case GET_SD_CSD: + result = get_sd_csd(srb, chip); + break; + +#ifdef _MSG_TRACE + case TRACE_MSG: + result = trace_msg_cmd(srb, chip); + break; +#endif + + case SCSI_APP_CMD: + result = app_cmd(srb, chip); + break; + + default: + set_sense_type(chip, SCSI_LUN(srb), + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + return result; +} + +static int ms_format_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + struct ms_info *ms_card = &(chip->ms_card); + unsigned int lun = SCSI_LUN(srb); + int retval, quick_format; + + if (get_lun_card(chip, lun) != MS_CARD) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + if ((srb->cmnd[3] != 0x4D) || (srb->cmnd[4] != 0x47) + || (srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D) + || (srb->cmnd[7] != 0x74)) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + if (srb->cmnd[8] & 0x01) + quick_format = 0; + else + quick_format = 1; + + if (!(chip->card_ready & MS_CARD)) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + if (chip->card_wp & MS_CARD) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + if (!CHK_MSPRO(ms_card)) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + rts51x_prepare_run(chip); + RTS51X_SET_STAT(chip, STAT_RUN); + + retval = mspro_format(srb, chip, MS_SHORT_DATA_LEN, quick_format); + if (retval != STATUS_SUCCESS) { + set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + scsi_set_resid(srb, 0); + return TRANSPORT_GOOD; +} + +#ifdef SUPPORT_PCGL_1P18 +int get_ms_information(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + struct ms_info *ms_card = &(chip->ms_card); + unsigned int lun = SCSI_LUN(srb); + u8 dev_info_id, data_len; + u8 *buf; + unsigned int buf_len; + int i; + + if (!check_card_ready(chip, lun)) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); + TRACE_RET(chip, TRANSPORT_FAILED); + } + if ((get_lun_card(chip, lun) != MS_CARD)) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + if ((srb->cmnd[2] != 0xB0) || (srb->cmnd[4] != 0x4D) || + (srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) || + (srb->cmnd[7] != 0x44)) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + dev_info_id = srb->cmnd[3]; + if ((CHK_MSXC(ms_card) && (dev_info_id == 0x10)) || + (!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) || + !CHK_MSPRO(ms_card)) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + if (dev_info_id == 0x15) + buf_len = data_len = 0x3A; + else + buf_len = data_len = 0x6A; + + buf = kmalloc(buf_len, GFP_KERNEL); + if (!buf) + TRACE_RET(chip, TRANSPORT_ERROR); + + i = 0; + /* GET Memory Stick Media Information Response Header */ + buf[i++] = 0x00; /* Data length MSB */ + buf[i++] = data_len; /* Data length LSB */ + /* Device Information Type Code */ + if (CHK_MSXC(ms_card)) + buf[i++] = 0x03; + else + buf[i++] = 0x02; + /* SGM bit */ + buf[i++] = 0x01; + /* Reserved */ + buf[i++] = 0x00; + buf[i++] = 0x00; + buf[i++] = 0x00; + /* Number of Device Information */ + buf[i++] = 0x01; + + /* Device Information Body + * Device Information ID Number */ + buf[i++] = dev_info_id; + /* Device Information Length */ + if (dev_info_id == 0x15) + data_len = 0x31; + else + data_len = 0x61; + buf[i++] = 0x00; /* Data length MSB */ + buf[i++] = data_len; /* Data length LSB */ + /* Valid Bit */ + buf[i++] = 0x80; + if ((dev_info_id == 0x10) || (dev_info_id == 0x13)) { + /* System Information */ + memcpy(buf + i, ms_card->raw_sys_info, 96); + } else { + /* Model Name */ + memcpy(buf + i, ms_card->raw_model_name, 48); + } + + rts51x_set_xfer_buf(buf, buf_len, srb); + + if (dev_info_id == 0x15) + scsi_set_resid(srb, scsi_bufflen(srb) - 0x3C); + else + scsi_set_resid(srb, scsi_bufflen(srb) - 0x6C); + + kfree(buf); + return STATUS_SUCCESS; +} +#endif + +static int ms_sp_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + int retval = TRANSPORT_ERROR; + + if (srb->cmnd[2] == MS_FORMAT) + retval = ms_format_cmnd(srb, chip); +#ifdef SUPPORT_PCGL_1P18 + else if (srb->cmnd[2] == GET_MS_INFORMATION) + retval = get_ms_information(srb, chip); +#endif + + return retval; +} + +#ifdef SUPPORT_CPRM +static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + unsigned int lun = SCSI_LUN(srb); + int result; + + rts51x_prepare_run(chip); + RTS51X_SET_STAT(chip, STAT_RUN); + + sd_cleanup_work(chip); + + if (!check_card_ready(chip, lun)) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); + TRACE_RET(chip, TRANSPORT_FAILED); + } + if ((get_lun_card(chip, lun) != SD_CARD)) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + switch (srb->cmnd[0]) { + case SD_PASS_THRU_MODE: + result = sd_pass_thru_mode(srb, chip); + break; + + case SD_EXECUTE_NO_DATA: + result = sd_execute_no_data(srb, chip); + break; + + case SD_EXECUTE_READ: + result = sd_execute_read_data(srb, chip); + break; + + case SD_EXECUTE_WRITE: + result = sd_execute_write_data(srb, chip); + break; + + case SD_GET_RSP: + result = sd_get_cmd_rsp(srb, chip); + break; + + case SD_HW_RST: + result = sd_hw_rst(srb, chip); + break; + + default: + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + return result; +} +#endif + +#ifdef SUPPORT_MAGIC_GATE +int mg_report_key(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + struct ms_info *ms_card = &(chip->ms_card); + unsigned int lun = SCSI_LUN(srb); + int retval; + u8 key_format; + + rts51x_prepare_run(chip); + RTS51X_SET_STAT(chip, STAT_RUN); + + ms_cleanup_work(chip); + + if (!check_card_ready(chip, lun)) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); + TRACE_RET(chip, TRANSPORT_FAILED); + } + if ((get_lun_card(chip, lun) != MS_CARD)) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + if (srb->cmnd[7] != KC_MG_R_PRO) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + if (!CHK_MSPRO(ms_card)) { + set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + key_format = srb->cmnd[10] & 0x3F; + + switch (key_format) { + case KF_GET_LOC_EKB: + if ((scsi_bufflen(srb) == 0x41C) && + (srb->cmnd[8] == 0x04) && (srb->cmnd[9] == 0x1C)) { + retval = mg_get_local_EKB(srb, chip); + if (retval != STATUS_SUCCESS) + TRACE_RET(chip, TRANSPORT_FAILED); + } else { + set_sense_type(chip, lun, + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + break; + + case KF_RSP_CHG: + if ((scsi_bufflen(srb) == 0x24) && + (srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x24)) { + retval = mg_get_rsp_chg(srb, chip); + if (retval != STATUS_SUCCESS) + TRACE_RET(chip, TRANSPORT_FAILED); + } else { + set_sense_type(chip, lun, + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + break; + + case KF_GET_ICV: + ms_card->mg_entry_num = srb->cmnd[5]; + if ((scsi_bufflen(srb) == 0x404) && + (srb->cmnd[8] == 0x04) && + (srb->cmnd[9] == 0x04) && + (srb->cmnd[2] == 0x00) && + (srb->cmnd[3] == 0x00) && + (srb->cmnd[4] == 0x00) && (srb->cmnd[5] < 32)) { + retval = mg_get_ICV(srb, chip); + if (retval != STATUS_SUCCESS) + TRACE_RET(chip, TRANSPORT_FAILED); + } else { + set_sense_type(chip, lun, + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + break; + + default: + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + scsi_set_resid(srb, 0); + return TRANSPORT_GOOD; +} + +int mg_send_key(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ + struct ms_info *ms_card = &(chip->ms_card); + unsigned int lun = SCSI_LUN(srb); + int retval; + u8 key_format; + + rts51x_prepare_run(chip); + RTS51X_SET_STAT(chip, STAT_RUN); + + ms_cleanup_work(chip); + + if (!check_card_ready(chip, lun)) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); + TRACE_RET(chip, TRANSPORT_FAILED); + } + if (check_card_wp(chip, lun)) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT); + TRACE_RET(chip, TRANSPORT_FAILED); + } + if ((get_lun_card(chip, lun) != MS_CARD)) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + if (srb->cmnd[7] != KC_MG_R_PRO) { + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + if (!CHK_MSPRO(ms_card)) { + set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + key_format = srb->cmnd[10] & 0x3F; + + switch (key_format) { + case KF_SET_LEAF_ID: + if ((scsi_bufflen(srb) == 0x0C) && + (srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x0C)) { + retval = mg_set_leaf_id(srb, chip); + if (retval != STATUS_SUCCESS) + TRACE_RET(chip, TRANSPORT_FAILED); + } else { + set_sense_type(chip, lun, + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + break; + + case KF_CHG_HOST: + if ((scsi_bufflen(srb) == 0x0C) && + (srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x0C)) { + retval = mg_chg(srb, chip); + if (retval != STATUS_SUCCESS) + TRACE_RET(chip, TRANSPORT_FAILED); + } else { + set_sense_type(chip, lun, + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + break; + + case KF_RSP_HOST: + if ((scsi_bufflen(srb) == 0x0C) && + (srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x0C)) { + retval = mg_rsp(srb, chip); + if (retval != STATUS_SUCCESS) + TRACE_RET(chip, TRANSPORT_FAILED); + } else { + set_sense_type(chip, lun, + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + break; + + case KF_SET_ICV: + ms_card->mg_entry_num = srb->cmnd[5]; + if ((scsi_bufflen(srb) == 0x404) && + (srb->cmnd[8] == 0x04) && + (srb->cmnd[9] == 0x04) && + (srb->cmnd[2] == 0x00) && + (srb->cmnd[3] == 0x00) && + (srb->cmnd[4] == 0x00) && (srb->cmnd[5] < 32)) { + retval = mg_set_ICV(srb, chip); + if (retval != STATUS_SUCCESS) + TRACE_RET(chip, TRANSPORT_FAILED); + } else { + set_sense_type(chip, lun, + SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + break; + + default: + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + TRACE_RET(chip, TRANSPORT_FAILED); + } + + scsi_set_resid(srb, 0); + return TRANSPORT_GOOD; +} +#endif + +int rts51x_scsi_handler(struct scsi_cmnd *srb, struct rts51x_chip *chip) +{ +#ifdef SUPPORT_SD_LOCK + struct sd_info *sd_card = &(chip->sd_card); +#endif + struct ms_info *ms_card = &(chip->ms_card); + unsigned int lun = SCSI_LUN(srb); + int result = TRANSPORT_GOOD; + +#ifdef SUPPORT_SD_LOCK + if (sd_card->sd_erase_status) { + /* Block all SCSI command except for REQUEST_SENSE + * and rs_ppstatus */ + if (! + ((srb->cmnd[0] == VENDOR_CMND) + && (srb->cmnd[1] == SCSI_APP_CMD) + && (srb->cmnd[2] == GET_DEV_STATUS)) + && (srb->cmnd[0] != REQUEST_SENSE)) { + /* Logical Unit Not Ready Format in Progress */ + set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04, + 0, 0); + TRACE_RET(chip, TRANSPORT_FAILED); + } + } +#endif + + if ((get_lun_card(chip, lun) == MS_CARD) && + (ms_card->format_status == FORMAT_IN_PROGRESS)) { + if ((srb->cmnd[0] != REQUEST_SENSE) + && (srb->cmnd[0] != INQUIRY)) { + /* Logical Unit Not Ready Format in Progress */ + set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04, + 0, (u16) (ms_card->progress)); + TRACE_RET(chip, TRANSPORT_FAILED); + } + } + + switch (srb->cmnd[0]) { + case READ_10: + case WRITE_10: + case READ_6: + case WRITE_6: + result = read_write(srb, chip); + break; + + case TEST_UNIT_READY: + result = test_unit_ready(srb, chip); + break; + + case INQUIRY: + result = inquiry(srb, chip); + break; + + case READ_CAPACITY: + result = read_capacity(srb, chip); + break; + + case START_STOP: + result = start_stop_unit(srb, chip); + break; + + case ALLOW_MEDIUM_REMOVAL: + result = allow_medium_removal(srb, chip); + break; + + case REQUEST_SENSE: + result = request_sense(srb, chip); + break; + + case MODE_SENSE: + case MODE_SENSE_10: + result = mode_sense(srb, chip); + break; + + case 0x23: + result = read_format_capacity(srb, chip); + break; + + case VENDOR_CMND: + result = vendor_cmnd(srb, chip); + break; + + case MS_SP_CMND: + result = ms_sp_cmnd(srb, chip); + break; + +#ifdef SUPPORT_CPRM + case SD_PASS_THRU_MODE: + case SD_EXECUTE_NO_DATA: + case SD_EXECUTE_READ: + case SD_EXECUTE_WRITE: + case SD_GET_RSP: + case SD_HW_RST: + result = sd_extention_cmnd(srb, chip); + break; +#endif + +#ifdef SUPPORT_MAGIC_GATE + case CMD_MSPRO_MG_RKEY: + result = mg_report_key(srb, chip); + break; + + case CMD_MSPRO_MG_SKEY: + result = mg_send_key(srb, chip); + break; +#endif + + case FORMAT_UNIT: + case MODE_SELECT: + case VERIFY: + result = TRANSPORT_GOOD; + break; + + default: + set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); + result = TRANSPORT_FAILED; + } + + return result; +} + +/*********************************************************************** + * Host functions + ***********************************************************************/ + +const char *host_info(struct Scsi_Host *host) +{ + return "SCSI emulation for RTS51xx USB driver-based card reader"; +} + +int slave_alloc(struct scsi_device *sdev) +{ + /* + * Set the INQUIRY transfer length to 36. We don't use any of + * the extra data and many devices choke if asked for more or + * less than 36 bytes. + */ + sdev->inquiry_len = 36; + return 0; +} + +int slave_configure(struct scsi_device *sdev) +{ + /* Scatter-gather buffers (all but the last) must have a length + * divisible by the bulk maxpacket size. Otherwise a data packet + * would end up being short, causing a premature end to the data + * transfer. Since high-speed bulk pipes have a maxpacket size + * of 512, we'll use that as the scsi device queue's DMA alignment + * mask. Guaranteeing proper alignment of the first buffer will + * have the desired effect because, except at the beginning and + * the end, scatter-gather buffers follow page boundaries. */ + blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); + + /* Set the SCSI level to at least 2. We'll leave it at 3 if that's + * what is originally reported. We need this to avoid confusing + * the SCSI layer with devices that report 0 or 1, but need 10-byte + * commands (ala ATAPI devices behind certain bridges, or devices + * which simply have broken INQUIRY data). + * + * NOTE: This means /dev/sg programs (ala cdrecord) will get the + * actual information. This seems to be the preference for + * programs like that. + * + * NOTE: This also means that /proc/scsi/scsi and sysfs may report + * the actual value or the modified one, depending on where the + * data comes from. + */ + if (sdev->scsi_level < SCSI_2) + sdev->scsi_level = sdev->sdev_target->scsi_level = SCSI_2; + + return 0; +} + +/*********************************************************************** + * /proc/scsi/ functions + ***********************************************************************/ + +/* we use this macro to help us write into the buffer */ +#undef SPRINTF +#define SPRINTF(args...) \ + do { if (pos < buffer+length) pos += sprintf(pos, ## args); } while (0) + +int proc_info(struct Scsi_Host *host, char *buffer, + char **start, off_t offset, int length, int inout) +{ + char *pos = buffer; + + /* if someone is sending us data, just throw it away */ + if (inout) + return length; + + /* print the controller name */ + SPRINTF(" Host scsi%d: %s\n", host->host_no, RTS51X_NAME); + + /* print product, vendor, and driver version strings */ + SPRINTF(" Vendor: Realtek Corp.\n"); + SPRINTF(" Product: RTS51xx USB Card Reader\n"); + SPRINTF(" Version: %s\n", DRIVER_VERSION); + SPRINTF(" Build: %s\n", __TIME__); + + /* + * Calculate start of next buffer, and return value. + */ + *start = buffer + offset; + + if ((pos - buffer) < offset) + return 0; + else if ((pos - buffer - offset) < length) + return pos - buffer - offset; + else + return length; +} + +/* queue a command */ +/* This is always called with scsi_lock(host) held */ +int queuecommand_lck(struct scsi_cmnd *srb, void (*done) (struct scsi_cmnd *)) +{ + struct rts51x_chip *chip = host_to_rts51x(srb->device->host); + + /* check for state-transition errors */ + if (chip->srb != NULL) { + RTS51X_DEBUGP("Error in %s: chip->srb = %p\n", + __func__, chip->srb); + return SCSI_MLQUEUE_HOST_BUSY; + } + + /* fail the command if we are disconnecting */ + if (test_bit(FLIDX_DISCONNECTING, &chip->usb->dflags)) { + RTS51X_DEBUGP("Fail command during disconnect\n"); + srb->result = DID_NO_CONNECT << 16; + done(srb); + return 0; + } + + /* enqueue the command and wake up the control thread */ + srb->scsi_done = done; + chip->srb = srb; + complete(&chip->usb->cmnd_ready); + + return 0; +} + +#if 0 /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37) */ +int queuecommand(struct scsi_cmnd *srb, void (*done) (struct scsi_cmnd *)) +{ + return queuecommand_lck(srb, done); +} +#else +DEF_SCSI_QCMD(queuecommand) +#endif +/*********************************************************************** + * Error handling functions + ***********************************************************************/ +/* Command timeout and abort */ +int command_abort(struct scsi_cmnd *srb) +{ + struct rts51x_chip *chip = host_to_rts51x(srb->device->host); + + RTS51X_DEBUGP("%s called\n", __func__); + + /* us->srb together with the TIMED_OUT, RESETTING, and ABORTING + * bits are protected by the host lock. */ + scsi_lock(rts51x_to_host(chip)); + + /* Is this command still active? */ + if (chip->srb != srb) { + scsi_unlock(rts51x_to_host(chip)); + RTS51X_DEBUGP("-- nothing to abort\n"); + return FAILED; + } + + /* Set the TIMED_OUT bit. Also set the ABORTING bit, but only if + * a device reset isn't already in progress (to avoid interfering + * with the reset). Note that we must retain the host lock while + * calling usb_stor_stop_transport(); otherwise it might interfere + * with an auto-reset that begins as soon as we release the lock. */ + set_bit(FLIDX_TIMED_OUT, &chip->usb->dflags); + if (!test_bit(FLIDX_RESETTING, &chip->usb->dflags)) { + set_bit(FLIDX_ABORTING, &chip->usb->dflags); + /* rts51x_stop_transport(us); */ + } + scsi_unlock(rts51x_to_host(chip)); + + /* Wait for the aborted command to finish */ + wait_for_completion(&chip->usb->notify); + return SUCCESS; +} + +/* This invokes the transport reset mechanism to reset the state of the + * device */ +int device_reset(struct scsi_cmnd *srb) +{ + int result = 0; + + RTS51X_DEBUGP("%s called\n", __func__); + + return result < 0 ? FAILED : SUCCESS; +} + +/* Simulate a SCSI bus reset by resetting the device's USB port. */ +int bus_reset(struct scsi_cmnd *srb) +{ + int result = 0; + + RTS51X_DEBUGP("%s called\n", __func__); + + return result < 0 ? FAILED : SUCCESS; +} + +static const char *rts5139_info(struct Scsi_Host *host) +{ + return "SCSI emulation for RTS5139 USB card reader"; +} + +struct scsi_host_template rts51x_host_template = { + /* basic userland interface stuff */ + .name = RTS51X_NAME, + .proc_name = RTS51X_NAME, + .proc_info = proc_info, + .info = rts5139_info, + + /* command interface -- queued only */ + .queuecommand = queuecommand, + + /* error and abort handlers */ + .eh_abort_handler = command_abort, + .eh_device_reset_handler = device_reset, + .eh_bus_reset_handler = bus_reset, + + /* queue commands only, only one command per LUN */ + .can_queue = 1, + .cmd_per_lun = 1, + + /* unknown initiator id */ + .this_id = -1, + + .slave_alloc = slave_alloc, + .slave_configure = slave_configure, + + /* lots of sg segments can be handled */ + .sg_tablesize = SG_ALL, + + /* limit the total size of a transfer to 120 KB */ + .max_sectors = 240, + + /* merge commands... this seems to help performance, but + * periodically someone should test to see which setting is more + * optimal. + */ + .use_clustering = 1, + + /* emulated HBA */ + .emulated = 1, + + /* we do our own delay after a device or bus reset */ + .skip_settle_delay = 1, + + /* sysfs device attributes */ + /* .sdev_attrs = sysfs_device_attr_list, */ + + /* module management */ + .module = THIS_MODULE +}; + diff --cc drivers/tty/hvc/hvc_opal.c index 7b38512d6c41,000000000000..ced26c8ccd57 mode 100644,000000..100644 --- a/drivers/tty/hvc/hvc_opal.c +++ b/drivers/tty/hvc/hvc_opal.c @@@ -1,424 -1,0 +1,425 @@@ +/* + * opal driver interface to hvc_console.c + * + * Copyright 2011 Benjamin Herrenschmidt , IBM Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#undef DEBUG + +#include +#include +#include +#include +#include +#include +#include ++#include + +#include +#include +#include +#include +#include +#include + +#include "hvc_console.h" + +static const char hvc_opal_name[] = "hvc_opal"; + +static struct of_device_id hvc_opal_match[] __devinitdata = { + { .name = "serial", .compatible = "ibm,opal-console-raw" }, + { .name = "serial", .compatible = "ibm,opal-console-hvsi" }, + { }, +}; + +typedef enum hv_protocol { + HV_PROTOCOL_RAW, + HV_PROTOCOL_HVSI +} hv_protocol_t; + +struct hvc_opal_priv { + hv_protocol_t proto; /* Raw data or HVSI packets */ + struct hvsi_priv hvsi; /* HVSI specific data */ +}; +static struct hvc_opal_priv *hvc_opal_privs[MAX_NR_HVC_CONSOLES]; + +/* For early boot console */ +static struct hvc_opal_priv hvc_opal_boot_priv; +static u32 hvc_opal_boot_termno; + +static const struct hv_ops hvc_opal_raw_ops = { + .get_chars = opal_get_chars, + .put_chars = opal_put_chars, + .notifier_add = notifier_add_irq, + .notifier_del = notifier_del_irq, + .notifier_hangup = notifier_hangup_irq, +}; + +static int hvc_opal_hvsi_get_chars(uint32_t vtermno, char *buf, int count) +{ + struct hvc_opal_priv *pv = hvc_opal_privs[vtermno]; + + if (WARN_ON(!pv)) + return -ENODEV; + + return hvsilib_get_chars(&pv->hvsi, buf, count); +} + +static int hvc_opal_hvsi_put_chars(uint32_t vtermno, const char *buf, int count) +{ + struct hvc_opal_priv *pv = hvc_opal_privs[vtermno]; + + if (WARN_ON(!pv)) + return -ENODEV; + + return hvsilib_put_chars(&pv->hvsi, buf, count); +} + +static int hvc_opal_hvsi_open(struct hvc_struct *hp, int data) +{ + struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno]; + int rc; + + pr_devel("HVSI@%x: do open !\n", hp->vtermno); + + rc = notifier_add_irq(hp, data); + if (rc) + return rc; + + return hvsilib_open(&pv->hvsi, hp); +} + +static void hvc_opal_hvsi_close(struct hvc_struct *hp, int data) +{ + struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno]; + + pr_devel("HVSI@%x: do close !\n", hp->vtermno); + + hvsilib_close(&pv->hvsi, hp); + + notifier_del_irq(hp, data); +} + +void hvc_opal_hvsi_hangup(struct hvc_struct *hp, int data) +{ + struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno]; + + pr_devel("HVSI@%x: do hangup !\n", hp->vtermno); + + hvsilib_close(&pv->hvsi, hp); + + notifier_hangup_irq(hp, data); +} + +static int hvc_opal_hvsi_tiocmget(struct hvc_struct *hp) +{ + struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno]; + + if (!pv) + return -EINVAL; + return pv->hvsi.mctrl; +} + +static int hvc_opal_hvsi_tiocmset(struct hvc_struct *hp, unsigned int set, + unsigned int clear) +{ + struct hvc_opal_priv *pv = hvc_opal_privs[hp->vtermno]; + + pr_devel("HVSI@%x: Set modem control, set=%x,clr=%x\n", + hp->vtermno, set, clear); + + if (set & TIOCM_DTR) + hvsilib_write_mctrl(&pv->hvsi, 1); + else if (clear & TIOCM_DTR) + hvsilib_write_mctrl(&pv->hvsi, 0); + + return 0; +} + +static const struct hv_ops hvc_opal_hvsi_ops = { + .get_chars = hvc_opal_hvsi_get_chars, + .put_chars = hvc_opal_hvsi_put_chars, + .notifier_add = hvc_opal_hvsi_open, + .notifier_del = hvc_opal_hvsi_close, + .notifier_hangup = hvc_opal_hvsi_hangup, + .tiocmget = hvc_opal_hvsi_tiocmget, + .tiocmset = hvc_opal_hvsi_tiocmset, +}; + +static int __devinit hvc_opal_probe(struct platform_device *dev) +{ + const struct hv_ops *ops; + struct hvc_struct *hp; + struct hvc_opal_priv *pv; + hv_protocol_t proto; + unsigned int termno, boot = 0; + const __be32 *reg; + + if (of_device_is_compatible(dev->dev.of_node, "ibm,opal-console-raw")) { + proto = HV_PROTOCOL_RAW; + ops = &hvc_opal_raw_ops; + } else if (of_device_is_compatible(dev->dev.of_node, + "ibm,opal-console-hvsi")) { + proto = HV_PROTOCOL_HVSI; + ops = &hvc_opal_hvsi_ops; + } else { + pr_err("hvc_opal: Unkown protocol for %s\n", + dev->dev.of_node->full_name); + return -ENXIO; + } + + reg = of_get_property(dev->dev.of_node, "reg", NULL); + termno = reg ? be32_to_cpup(reg) : 0; + + /* Is it our boot one ? */ + if (hvc_opal_privs[termno] == &hvc_opal_boot_priv) { + pv = hvc_opal_privs[termno]; + boot = 1; + } else if (hvc_opal_privs[termno] == NULL) { + pv = kzalloc(sizeof(struct hvc_opal_priv), GFP_KERNEL); + if (!pv) + return -ENOMEM; + pv->proto = proto; + hvc_opal_privs[termno] = pv; + if (proto == HV_PROTOCOL_HVSI) + hvsilib_init(&pv->hvsi, opal_get_chars, opal_put_chars, + termno, 0); + + /* Instanciate now to establish a mapping index==vtermno */ + hvc_instantiate(termno, termno, ops); + } else { + pr_err("hvc_opal: Device %s has duplicate terminal number #%d\n", + dev->dev.of_node->full_name, termno); + return -ENXIO; + } + + pr_info("hvc%d: %s protocol on %s%s\n", termno, + proto == HV_PROTOCOL_RAW ? "raw" : "hvsi", + dev->dev.of_node->full_name, + boot ? " (boot console)" : ""); + + /* We don't do IRQ yet */ + hp = hvc_alloc(termno, 0, ops, MAX_VIO_PUT_CHARS); + if (IS_ERR(hp)) + return PTR_ERR(hp); + dev_set_drvdata(&dev->dev, hp); + + return 0; +} + +static int __devexit hvc_opal_remove(struct platform_device *dev) +{ + struct hvc_struct *hp = dev_get_drvdata(&dev->dev); + int rc, termno; + + termno = hp->vtermno; + rc = hvc_remove(hp); + if (rc == 0) { + if (hvc_opal_privs[termno] != &hvc_opal_boot_priv) + kfree(hvc_opal_privs[termno]); + hvc_opal_privs[termno] = NULL; + } + return rc; +} + +static struct platform_driver hvc_opal_driver = { + .probe = hvc_opal_probe, + .remove = __devexit_p(hvc_opal_remove), + .driver = { + .name = hvc_opal_name, + .owner = THIS_MODULE, + .of_match_table = hvc_opal_match, + } +}; + +static int __init hvc_opal_init(void) +{ + if (!firmware_has_feature(FW_FEATURE_OPAL)) + return -ENODEV; + + /* Register as a vio device to receive callbacks */ + return platform_driver_register(&hvc_opal_driver); +} +module_init(hvc_opal_init); + +static void __exit hvc_opal_exit(void) +{ + platform_driver_unregister(&hvc_opal_driver); +} +module_exit(hvc_opal_exit); + +static void udbg_opal_putc(char c) +{ + unsigned int termno = hvc_opal_boot_termno; + int count = -1; + + if (c == '\n') + udbg_opal_putc('\r'); + + do { + switch(hvc_opal_boot_priv.proto) { + case HV_PROTOCOL_RAW: + count = opal_put_chars(termno, &c, 1); + break; + case HV_PROTOCOL_HVSI: + count = hvc_opal_hvsi_put_chars(termno, &c, 1); + break; + } + } while(count == 0 || count == -EAGAIN); +} + +static int udbg_opal_getc_poll(void) +{ + unsigned int termno = hvc_opal_boot_termno; + int rc = 0; + char c; + + switch(hvc_opal_boot_priv.proto) { + case HV_PROTOCOL_RAW: + rc = opal_get_chars(termno, &c, 1); + break; + case HV_PROTOCOL_HVSI: + rc = hvc_opal_hvsi_get_chars(termno, &c, 1); + break; + } + if (!rc) + return -1; + return c; +} + +static int udbg_opal_getc(void) +{ + int ch; + for (;;) { + ch = udbg_opal_getc_poll(); + if (ch == -1) { + /* This shouldn't be needed...but... */ + volatile unsigned long delay; + for (delay=0; delay < 2000000; delay++) + ; + } else { + return ch; + } + } +} + +static void udbg_init_opal_common(void) +{ + udbg_putc = udbg_opal_putc; + udbg_getc = udbg_opal_getc; + udbg_getc_poll = udbg_opal_getc_poll; + tb_ticks_per_usec = 0x200; /* Make udelay not suck */ +} + +void __init hvc_opal_init_early(void) +{ + struct device_node *stdout_node = NULL; + const u32 *termno; + const char *name = NULL; + const struct hv_ops *ops; + u32 index; + + /* find the boot console from /chosen/stdout */ + if (of_chosen) + name = of_get_property(of_chosen, "linux,stdout-path", NULL); + if (name) { + stdout_node = of_find_node_by_path(name); + if (!stdout_node) { + pr_err("hvc_opal: Failed to locate default console!\n"); + return; + } + } else { + struct device_node *opal, *np; + + /* Current OPAL takeover doesn't provide the stdout + * path, so we hard wire it + */ + opal = of_find_node_by_path("/ibm,opal/consoles"); + if (opal) + pr_devel("hvc_opal: Found consoles in new location\n"); + if (!opal) { + opal = of_find_node_by_path("/ibm,opal"); + if (opal) + pr_devel("hvc_opal: " + "Found consoles in old location\n"); + } + if (!opal) + return; + for_each_child_of_node(opal, np) { + if (!strcmp(np->name, "serial")) { + stdout_node = np; + break; + } + } + of_node_put(opal); + } + if (!stdout_node) + return; + termno = of_get_property(stdout_node, "reg", NULL); + index = termno ? *termno : 0; + if (index >= MAX_NR_HVC_CONSOLES) + return; + hvc_opal_privs[index] = &hvc_opal_boot_priv; + + /* Check the protocol */ + if (of_device_is_compatible(stdout_node, "ibm,opal-console-raw")) { + hvc_opal_boot_priv.proto = HV_PROTOCOL_RAW; + ops = &hvc_opal_raw_ops; + pr_devel("hvc_opal: Found RAW console\n"); + } + else if (of_device_is_compatible(stdout_node,"ibm,opal-console-hvsi")) { + hvc_opal_boot_priv.proto = HV_PROTOCOL_HVSI; + ops = &hvc_opal_hvsi_ops; + hvsilib_init(&hvc_opal_boot_priv.hvsi, opal_get_chars, + opal_put_chars, index, 1); + /* HVSI, perform the handshake now */ + hvsilib_establish(&hvc_opal_boot_priv.hvsi); + pr_devel("hvc_opal: Found HVSI console\n"); + } else + goto out; + hvc_opal_boot_termno = index; + udbg_init_opal_common(); + add_preferred_console("hvc", index, NULL); + hvc_instantiate(index, index, ops); +out: + of_node_put(stdout_node); +} + +#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_RAW +void __init udbg_init_debug_opal(void) +{ + u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO; + hvc_opal_privs[index] = &hvc_opal_boot_priv; + hvc_opal_boot_priv.proto = HV_PROTOCOL_RAW; + hvc_opal_boot_termno = index; + udbg_init_opal_common(); +} +#endif /* CONFIG_PPC_EARLY_DEBUG_OPAL_RAW */ + +#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI +void __init udbg_init_debug_opal_hvsi(void) +{ + u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO; + hvc_opal_privs[index] = &hvc_opal_boot_priv; + hvc_opal_boot_termno = index; + udbg_init_opal_common(); + hvsilib_init(&hvc_opal_boot_priv.hvsi, opal_get_chars, opal_put_chars, + index, 1); + hvsilib_establish(&hvc_opal_boot_priv.hvsi); +} +#endif /* CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI */ diff --cc drivers/tty/serial/nwpserial.c index 9beaff1cec24,4fcea030ff36..a7fb4188ad74 --- a/drivers/tty/serial/nwpserial.c +++ b/drivers/tty/serial/nwpserial.c @@@ -15,9 -15,9 +15,10 @@@ #include #include #include +#include #include #include + #include #include #include #include diff --cc drivers/video/sh_mobile_lcdcfb.c index 3a41c013d031,499d54892017..facffc254976 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@@ -23,8 -23,8 +23,9 @@@ #include #include #include + #include #include