static DEFINE_SPINLOCK(mmc_host_lock);
#ifdef CONFIG_MMC_CLKGATE
+static ssize_t clkgate_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ return snprintf(buf, PAGE_SIZE, "%lu millisecs\n",
+ host->clkgate_delay);
+}
+
+static ssize_t clkgate_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ unsigned long flags, value;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ host->clkgate_delay = value;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+
+ pr_info("%s: clock gate delay set to %lu ms\n",
+ mmc_hostname(host), value);
+ return count;
+}
/*
* Enabling clock gating will make the core call out to the host
static void mmc_host_clk_gate_work(struct work_struct *work)
{
struct mmc_host *host = container_of(work, struct mmc_host,
- clk_gate_work);
+ clk_gate_work.work);
mmc_host_clk_gate_delayed(host);
}
{
unsigned long flags;
+ /* cancel any clock gating work scheduled by mmc_host_clk_release() */
+ cancel_delayed_work_sync(&host->clk_gate_work);
mutex_lock(&host->clk_gate_mutex);
spin_lock_irqsave(&host->clk_lock, flags);
if (host->clk_gated) {
host->clk_requests--;
if (mmc_host_may_gate_card(host->card) &&
!host->clk_requests)
- queue_work(system_nrt_wq, &host->clk_gate_work);
+ queue_delayed_work(system_nrt_wq, &host->clk_gate_work,
+ msecs_to_jiffies(host->clkgate_delay));
spin_unlock_irqrestore(&host->clk_lock, flags);
}
host->clk_requests = 0;
/* Hold MCI clock for 8 cycles by default */
host->clk_delay = 8;
+ /*
+ * Default clock gating delay is 200ms.
+ * This value can be tuned by writing into sysfs entry.
+ */
+ host->clkgate_delay = 200;
host->clk_gated = false;
- INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
+ INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
spin_lock_init(&host->clk_lock);
mutex_init(&host->clk_gate_mutex);
}
* Wait for any outstanding gate and then make sure we're
* ungated before exiting.
*/
- if (cancel_work_sync(&host->clk_gate_work))
+ if (cancel_delayed_work_sync(&host->clk_gate_work))
mmc_host_clk_gate_delayed(host);
if (host->clk_gated)
mmc_host_clk_hold(host);
WARN_ON(host->clk_requests > 1);
}
+static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+{
+ host->clkgate_delay_attr.show = clkgate_delay_show;
+ host->clkgate_delay_attr.store = clkgate_delay_store;
+ sysfs_attr_init(&host->clkgate_delay_attr.attr);
+ host->clkgate_delay_attr.attr.name = "clkgate_delay";
+ host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (device_create_file(&host->class_dev, &host->clkgate_delay_attr))
+ pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
+ mmc_hostname(host));
+}
#else
static inline void mmc_host_clk_init(struct mmc_host *host)
{
}
+static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+{
+}
+
#endif
/**
#ifdef CONFIG_DEBUG_FS
mmc_add_host_debugfs(host);
#endif
+ mmc_host_clk_sysfs_init(host);
mmc_start_host(host);
register_pm_notifier(&host->pm_notify);