]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'pm-opp-next' into linux-next
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Fri, 2 Nov 2012 10:11:54 +0000 (11:11 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Fri, 2 Nov 2012 10:11:54 +0000 (11:11 +0100)
* pm-opp-next:
  PM / OPP: using kfree_rcu() to simplify the code
  PM / OPP: predictable fail results for opp_find* functions, v2
  PM / OPP: Export symbols for module usage.
  PM / OPP: RCU reclaim

1  2 
drivers/devfreq/devfreq.c

index 789af4ff5c9ccdb38edbe89dc1e03c9b8152cfaf,4fa1a22c55eabe578cb8e285d0630290c957a891..ab45dee7015045168165645cb23c6ddfe90ecd38
  #include <linux/hrtimer.h>
  #include "governor.h"
  
 -struct class *devfreq_class;
 +static struct class *devfreq_class;
  
  /*
 - * devfreq_work periodically monitors every registered device.
 - * The minimum polling interval is one jiffy. The polling interval is
 - * determined by the minimum polling period among all polling devfreq
 - * devices. The resolution of polling interval is one jiffy.
 + * devfreq core provides delayed work based load monitoring helper
 + * functions. Governors can use these or can implement their own
 + * monitoring mechanism.
   */
 -static bool polling;
  static struct workqueue_struct *devfreq_wq;
 -static struct delayed_work devfreq_work;
 -
 -/* wait removing if this is to be removed */
 -static struct devfreq *wait_remove_device;
  
  /* The list of all device-devfreq */
  static LIST_HEAD(devfreq_list);
@@@ -66,8 -72,6 +66,8 @@@ static struct devfreq *find_device_devf
        return ERR_PTR(-ENODEV);
  }
  
 +/* Load monitoring helper functions for governors use */
 +
  /**
   * update_devfreq() - Reevaluate the device and configure frequency.
   * @devfreq:  the devfreq instance.
@@@ -116,158 -120,12 +116,158 @@@ int update_devfreq(struct devfreq *devf
        return err;
  }
  
 +/**
 + * devfreq_monitor() - Periodically poll devfreq objects.
 + * @work:     the work struct used to run devfreq_monitor periodically.
 + *
 + */
 +static void devfreq_monitor(struct work_struct *work)
 +{
 +      int err;
 +      struct devfreq *devfreq = container_of(work,
 +                                      struct devfreq, work.work);
 +
 +      mutex_lock(&devfreq->lock);
 +      err = update_devfreq(devfreq);
 +      if (err)
 +              dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
 +
 +      queue_delayed_work(devfreq_wq, &devfreq->work,
 +                              msecs_to_jiffies(devfreq->profile->polling_ms));
 +      mutex_unlock(&devfreq->lock);
 +}
 +
 +/**
 + * devfreq_monitor_start() - Start load monitoring of devfreq instance
 + * @devfreq:  the devfreq instance.
 + *
 + * Helper function for starting devfreq device load monitoing. By
 + * default delayed work based monitoring is supported. Function
 + * to be called from governor in response to DEVFREQ_GOV_START
 + * event when device is added to devfreq framework.
 + */
 +void devfreq_monitor_start(struct devfreq *devfreq)
 +{
 +      INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
 +      if (devfreq->profile->polling_ms)
 +              queue_delayed_work(devfreq_wq, &devfreq->work,
 +                      msecs_to_jiffies(devfreq->profile->polling_ms));
 +}
 +
 +/**
 + * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
 + * @devfreq:  the devfreq instance.
 + *
 + * Helper function to stop devfreq device load monitoing. Function
 + * to be called from governor in response to DEVFREQ_GOV_STOP
 + * event when device is removed from devfreq framework.
 + */
 +void devfreq_monitor_stop(struct devfreq *devfreq)
 +{
 +      cancel_delayed_work_sync(&devfreq->work);
 +}
 +
 +/**
 + * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
 + * @devfreq:  the devfreq instance.
 + *
 + * Helper function to suspend devfreq device load monitoing. Function
 + * to be called from governor in response to DEVFREQ_GOV_SUSPEND
 + * event or when polling interval is set to zero.
 + *
 + * Note: Though this function is same as devfreq_monitor_stop(),
 + * intentionally kept separate to provide hooks for collecting
 + * transition statistics.
 + */
 +void devfreq_monitor_suspend(struct devfreq *devfreq)
 +{
 +      mutex_lock(&devfreq->lock);
 +      if (devfreq->stop_polling) {
 +              mutex_unlock(&devfreq->lock);
 +              return;
 +      }
 +
 +      devfreq->stop_polling = true;
 +      mutex_unlock(&devfreq->lock);
 +      cancel_delayed_work_sync(&devfreq->work);
 +}
 +
 +/**
 + * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
 + * @devfreq:    the devfreq instance.
 + *
 + * Helper function to resume devfreq device load monitoing. Function
 + * to be called from governor in response to DEVFREQ_GOV_RESUME
 + * event or when polling interval is set to non-zero.
 + */
 +void devfreq_monitor_resume(struct devfreq *devfreq)
 +{
 +      mutex_lock(&devfreq->lock);
 +      if (!devfreq->stop_polling)
 +              goto out;
 +
 +      if (!delayed_work_pending(&devfreq->work) &&
 +                      devfreq->profile->polling_ms)
 +              queue_delayed_work(devfreq_wq, &devfreq->work,
 +                      msecs_to_jiffies(devfreq->profile->polling_ms));
 +      devfreq->stop_polling = false;
 +
 +out:
 +      mutex_unlock(&devfreq->lock);
 +}
 +
 +/**
 + * devfreq_interval_update() - Update device devfreq monitoring interval
 + * @devfreq:    the devfreq instance.
 + * @delay:      new polling interval to be set.
 + *
 + * Helper function to set new load monitoring polling interval. Function
 + * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
 + */
 +void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
 +{
 +      unsigned int cur_delay = devfreq->profile->polling_ms;
 +      unsigned int new_delay = *delay;
 +
 +      mutex_lock(&devfreq->lock);
 +      devfreq->profile->polling_ms = new_delay;
 +
 +      if (devfreq->stop_polling)
 +              goto out;
 +
 +      /* if new delay is zero, stop polling */
 +      if (!new_delay) {
 +              mutex_unlock(&devfreq->lock);
 +              cancel_delayed_work_sync(&devfreq->work);
 +              return;
 +      }
 +
 +      /* if current delay is zero, start polling with new delay */
 +      if (!cur_delay) {
 +              queue_delayed_work(devfreq_wq, &devfreq->work,
 +                      msecs_to_jiffies(devfreq->profile->polling_ms));
 +              goto out;
 +      }
 +
 +      /* if current delay is greater than new delay, restart polling */
 +      if (cur_delay > new_delay) {
 +              mutex_unlock(&devfreq->lock);
 +              cancel_delayed_work_sync(&devfreq->work);
 +              mutex_lock(&devfreq->lock);
 +              if (!devfreq->stop_polling)
 +                      queue_delayed_work(devfreq_wq, &devfreq->work,
 +                            msecs_to_jiffies(devfreq->profile->polling_ms));
 +      }
 +out:
 +      mutex_unlock(&devfreq->lock);
 +}
 +
  /**
   * devfreq_notifier_call() - Notify that the device frequency requirements
   *                       has been changed out of devfreq framework.
 - * @nb                the notifier_block (supposed to be devfreq->nb)
 - * @type      not used
 - * @devp      not used
 + * @nb:               the notifier_block (supposed to be devfreq->nb)
 + * @type:     not used
 + * @devp:     not used
   *
   * Called by a notifier that uses devfreq->nb.
   */
@@@ -285,32 -143,59 +285,32 @@@ static int devfreq_notifier_call(struc
  }
  
  /**
 - * _remove_devfreq() - Remove devfreq from the device.
 + * _remove_devfreq() - Remove devfreq from the list and release its resources.
   * @devfreq:  the devfreq struct
   * @skip:     skip calling device_unregister().
 - *
 - * Note that the caller should lock devfreq->lock before calling
 - * this. _remove_devfreq() will unlock it and free devfreq
 - * internally. devfreq_list_lock should be locked by the caller
 - * as well (not relased at return)
 - *
 - * Lock usage:
 - * devfreq->lock: locked before call.
 - *              unlocked at return (and freed)
 - * devfreq_list_lock: locked before call.
 - *                  kept locked at return.
 - *                  if devfreq is centrally polled.
 - *
 - * Freed memory:
 - * devfreq
   */
  static void _remove_devfreq(struct devfreq *devfreq, bool skip)
  {
 -      if (!mutex_is_locked(&devfreq->lock)) {
 -              WARN(true, "devfreq->lock must be locked by the caller.\n");
 -              return;
 -      }
 -      if (!devfreq->governor->no_central_polling &&
 -          !mutex_is_locked(&devfreq_list_lock)) {
 -              WARN(true, "devfreq_list_lock must be locked by the caller.\n");
 +      mutex_lock(&devfreq_list_lock);
 +      if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
 +              mutex_unlock(&devfreq_list_lock);
 +              dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
                return;
        }
 +      list_del(&devfreq->node);
 +      mutex_unlock(&devfreq_list_lock);
  
 -      if (devfreq->being_removed)
 -              return;
 -
 -      devfreq->being_removed = true;
 +      devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_STOP, NULL);
  
        if (devfreq->profile->exit)
                devfreq->profile->exit(devfreq->dev.parent);
  
 -      if (devfreq->governor->exit)
 -              devfreq->governor->exit(devfreq);
 -
        if (!skip && get_device(&devfreq->dev)) {
                device_unregister(&devfreq->dev);
                put_device(&devfreq->dev);
        }
  
 -      if (!devfreq->governor->no_central_polling)
 -              list_del(&devfreq->node);
 -
 -      mutex_unlock(&devfreq->lock);
        mutex_destroy(&devfreq->lock);
 -
        kfree(devfreq);
  }
  
  static void devfreq_dev_release(struct device *dev)
  {
        struct devfreq *devfreq = to_devfreq(dev);
 -      bool central_polling = !devfreq->governor->no_central_polling;
  
 -      /*
 -       * If devfreq_dev_release() was called by device_unregister() of
 -       * _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and
 -       * being_removed is already set. This also partially checks the case
 -       * where devfreq_dev_release() is called from a thread other than
 -       * the one called _remove_devfreq(); however, this case is
 -       * dealt completely with another following being_removed check.
 -       *
 -       * Because being_removed is never being
 -       * unset, we do not need to worry about race conditions on
 -       * being_removed.
 -       */
 -      if (devfreq->being_removed)
 -              return;
 -
 -      if (central_polling)
 -              mutex_lock(&devfreq_list_lock);
 -
 -      mutex_lock(&devfreq->lock);
 -
 -      /*
 -       * Check being_removed flag again for the case where
 -       * devfreq_dev_release() was called in a thread other than the one
 -       * possibly called _remove_devfreq().
 -       */
 -      if (devfreq->being_removed) {
 -              mutex_unlock(&devfreq->lock);
 -              goto out;
 -      }
 -
 -      /* devfreq->lock is unlocked and removed in _removed_devfreq() */
        _remove_devfreq(devfreq, true);
 -
 -out:
 -      if (central_polling)
 -              mutex_unlock(&devfreq_list_lock);
 -}
 -
 -/**
 - * devfreq_monitor() - Periodically poll devfreq objects.
 - * @work: the work struct used to run devfreq_monitor periodically.
 - *
 - */
 -static void devfreq_monitor(struct work_struct *work)
 -{
 -      static unsigned long last_polled_at;
 -      struct devfreq *devfreq, *tmp;
 -      int error;
 -      unsigned long jiffies_passed;
 -      unsigned long next_jiffies = ULONG_MAX, now = jiffies;
 -      struct device *dev;
 -
 -      /* Initially last_polled_at = 0, polling every device at bootup */
 -      jiffies_passed = now - last_polled_at;
 -      last_polled_at = now;
 -      if (jiffies_passed == 0)
 -              jiffies_passed = 1;
 -
 -      mutex_lock(&devfreq_list_lock);
 -      list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) {
 -              mutex_lock(&devfreq->lock);
 -              dev = devfreq->dev.parent;
 -
 -              /* Do not remove tmp for a while */
 -              wait_remove_device = tmp;
 -
 -              if (devfreq->governor->no_central_polling ||
 -                  devfreq->next_polling == 0) {
 -                      mutex_unlock(&devfreq->lock);
 -                      continue;
 -              }
 -              mutex_unlock(&devfreq_list_lock);
 -
 -              /*
 -               * Reduce more next_polling if devfreq_wq took an extra
 -               * delay. (i.e., CPU has been idled.)
 -               */
 -              if (devfreq->next_polling <= jiffies_passed) {
 -                      error = update_devfreq(devfreq);
 -
 -                      /* Remove a devfreq with an error. */
 -                      if (error && error != -EAGAIN) {
 -
 -                              dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n",
 -                                      error, devfreq->governor->name);
 -
 -                              /*
 -                               * Unlock devfreq before locking the list
 -                               * in order to avoid deadlock with
 -                               * find_device_devfreq or others
 -                               */
 -                              mutex_unlock(&devfreq->lock);
 -                              mutex_lock(&devfreq_list_lock);
 -                              /* Check if devfreq is already removed */
 -                              if (IS_ERR(find_device_devfreq(dev)))
 -                                      continue;
 -                              mutex_lock(&devfreq->lock);
 -                              /* This unlocks devfreq->lock and free it */
 -                              _remove_devfreq(devfreq, false);
 -                              continue;
 -                      }
 -                      devfreq->next_polling = devfreq->polling_jiffies;
 -              } else {
 -                      devfreq->next_polling -= jiffies_passed;
 -              }
 -
 -              if (devfreq->next_polling)
 -                      next_jiffies = (next_jiffies > devfreq->next_polling) ?
 -                                      devfreq->next_polling : next_jiffies;
 -
 -              mutex_unlock(&devfreq->lock);
 -              mutex_lock(&devfreq_list_lock);
 -      }
 -      wait_remove_device = NULL;
 -      mutex_unlock(&devfreq_list_lock);
 -
 -      if (next_jiffies > 0 && next_jiffies < ULONG_MAX) {
 -              polling = true;
 -              queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies);
 -      } else {
 -              polling = false;
 -      }
  }
  
  /**
@@@ -350,13 -357,16 +350,13 @@@ struct devfreq *devfreq_add_device(stru
                return ERR_PTR(-EINVAL);
        }
  
 -
 -      if (!governor->no_central_polling) {
 -              mutex_lock(&devfreq_list_lock);
 -              devfreq = find_device_devfreq(dev);
 -              mutex_unlock(&devfreq_list_lock);
 -              if (!IS_ERR(devfreq)) {
 -                      dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
 -                      err = -EINVAL;
 -                      goto err_out;
 -              }
 +      mutex_lock(&devfreq_list_lock);
 +      devfreq = find_device_devfreq(dev);
 +      mutex_unlock(&devfreq_list_lock);
 +      if (!IS_ERR(devfreq)) {
 +              dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
 +              err = -EINVAL;
 +              goto err_out;
        }
  
        devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
        devfreq->governor = governor;
        devfreq->previous_freq = profile->initial_freq;
        devfreq->data = data;
 -      devfreq->next_polling = devfreq->polling_jiffies
 -                            = msecs_to_jiffies(devfreq->profile->polling_ms);
        devfreq->nb.notifier_call = devfreq_notifier_call;
  
        dev_set_name(&devfreq->dev, dev_name(dev));
        err = device_register(&devfreq->dev);
        if (err) {
                put_device(&devfreq->dev);
 +              mutex_unlock(&devfreq->lock);
                goto err_dev;
        }
  
 -      if (governor->init)
 -              err = governor->init(devfreq);
 -      if (err)
 -              goto err_init;
 -
        mutex_unlock(&devfreq->lock);
  
 -      if (governor->no_central_polling)
 -              goto out;
 -
        mutex_lock(&devfreq_list_lock);
 -
        list_add(&devfreq->node, &devfreq_list);
 +      mutex_unlock(&devfreq_list_lock);
  
 -      if (devfreq_wq && devfreq->next_polling && !polling) {
 -              polling = true;
 -              queue_delayed_work(devfreq_wq, &devfreq_work,
 -                                 devfreq->next_polling);
 +      err = devfreq->governor->event_handler(devfreq,
 +                              DEVFREQ_GOV_START, NULL);
 +      if (err) {
 +              dev_err(dev, "%s: Unable to start governor for the device\n",
 +                      __func__);
 +              goto err_init;
        }
 -      mutex_unlock(&devfreq_list_lock);
 -out:
 +
        return devfreq;
  
  err_init:
 +      list_del(&devfreq->node);
        device_unregister(&devfreq->dev);
  err_dev:
 -      mutex_unlock(&devfreq->lock);
        kfree(devfreq);
  err_out:
        return ERR_PTR(err);
  }
 +EXPORT_SYMBOL(devfreq_add_device);
  
  /**
   * devfreq_remove_device() - Remove devfreq feature from a device.
 - * @devfreq   the devfreq instance to be removed
 + * @devfreq:  the devfreq instance to be removed
   */
  int devfreq_remove_device(struct devfreq *devfreq)
  {
 -      bool central_polling;
 -
        if (!devfreq)
                return -EINVAL;
  
 -      central_polling = !devfreq->governor->no_central_polling;
 +      _remove_devfreq(devfreq, false);
  
 -      if (central_polling) {
 -              mutex_lock(&devfreq_list_lock);
 -              while (wait_remove_device == devfreq) {
 -                      mutex_unlock(&devfreq_list_lock);
 -                      schedule();
 -                      mutex_lock(&devfreq_list_lock);
 -              }
 -      }
 +      return 0;
 +}
 +EXPORT_SYMBOL(devfreq_remove_device);
  
 -      mutex_lock(&devfreq->lock);
 -      _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */
 +/**
 + * devfreq_suspend_device() - Suspend devfreq of a device.
 + * @devfreq: the devfreq instance to be suspended
 + */
 +int devfreq_suspend_device(struct devfreq *devfreq)
 +{
 +      if (!devfreq)
 +              return -EINVAL;
  
 -      if (central_polling)
 -              mutex_unlock(&devfreq_list_lock);
 +      return devfreq->governor->event_handler(devfreq,
 +                              DEVFREQ_GOV_SUSPEND, NULL);
 +}
 +EXPORT_SYMBOL(devfreq_suspend_device);
  
 -      return 0;
 +/**
 + * devfreq_resume_device() - Resume devfreq of a device.
 + * @devfreq: the devfreq instance to be resumed
 + */
 +int devfreq_resume_device(struct devfreq *devfreq)
 +{
 +      if (!devfreq)
 +              return -EINVAL;
 +
 +      return devfreq->governor->event_handler(devfreq,
 +                              DEVFREQ_GOV_RESUME, NULL);
  }
 +EXPORT_SYMBOL(devfreq_resume_device);
  
  static ssize_t show_governor(struct device *dev,
                             struct device_attribute *attr, char *buf)
  
  static ssize_t show_freq(struct device *dev,
                         struct device_attribute *attr, char *buf)
 +{
 +      unsigned long freq;
 +      struct devfreq *devfreq = to_devfreq(dev);
 +
 +      if (devfreq->profile->get_cur_freq &&
 +              !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
 +                      return sprintf(buf, "%lu\n", freq);
 +
 +      return sprintf(buf, "%lu\n", devfreq->previous_freq);
 +}
 +
 +static ssize_t show_target_freq(struct device *dev,
 +                      struct device_attribute *attr, char *buf)
  {
        return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
  }
@@@ -496,14 -488,37 +496,14 @@@ static ssize_t store_polling_interval(s
  
        ret = sscanf(buf, "%u", &value);
        if (ret != 1)
 -              goto out;
 -
 -      mutex_lock(&df->lock);
 -      df->profile->polling_ms = value;
 -      df->next_polling = df->polling_jiffies
 -                       = msecs_to_jiffies(value);
 -      mutex_unlock(&df->lock);
 +              return -EINVAL;
  
 +      df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
        ret = count;
  
 -      if (df->governor->no_central_polling)
 -              goto out;
 -
 -      mutex_lock(&devfreq_list_lock);
 -      if (df->next_polling > 0 && !polling) {
 -              polling = true;
 -              queue_delayed_work(devfreq_wq, &devfreq_work,
 -                                 df->next_polling);
 -      }
 -      mutex_unlock(&devfreq_list_lock);
 -out:
        return ret;
  }
  
 -static ssize_t show_central_polling(struct device *dev,
 -                                  struct device_attribute *attr, char *buf)
 -{
 -      return sprintf(buf, "%d\n",
 -                     !to_devfreq(dev)->governor->no_central_polling);
 -}
 -
  static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
                              const char *buf, size_t count)
  {
  
        ret = sscanf(buf, "%lu", &value);
        if (ret != 1)
 -              goto out;
 +              return -EINVAL;
  
        mutex_lock(&df->lock);
        max = df->max_freq;
        ret = count;
  unlock:
        mutex_unlock(&df->lock);
 -out:
        return ret;
  }
  
@@@ -547,7 -563,7 +547,7 @@@ static ssize_t store_max_freq(struct de
  
        ret = sscanf(buf, "%lu", &value);
        if (ret != 1)
 -              goto out;
 +              return -EINVAL;
  
        mutex_lock(&df->lock);
        min = df->min_freq;
        ret = count;
  unlock:
        mutex_unlock(&df->lock);
 -out:
        return ret;
  }
  
@@@ -573,7 -590,7 +573,7 @@@ static ssize_t show_max_freq(struct dev
  static struct device_attribute devfreq_attrs[] = {
        __ATTR(governor, S_IRUGO, show_governor, NULL),
        __ATTR(cur_freq, S_IRUGO, show_freq, NULL),
 -      __ATTR(central_polling, S_IRUGO, show_central_polling, NULL),
 +      __ATTR(target_freq, S_IRUGO, show_target_freq, NULL),
        __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
               store_polling_interval),
        __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
        { },
  };
  
 -/**
 - * devfreq_start_polling() - Initialize data structure for devfreq framework and
 - *                       start polling registered devfreq devices.
 - */
 -static int __init devfreq_start_polling(void)
 -{
 -      mutex_lock(&devfreq_list_lock);
 -      polling = false;
 -      devfreq_wq = create_freezable_workqueue("devfreq_wq");
 -      INIT_DEFERRABLE_WORK(&devfreq_work, devfreq_monitor);
 -      mutex_unlock(&devfreq_list_lock);
 -
 -      devfreq_monitor(&devfreq_work.work);
 -      return 0;
 -}
 -late_initcall(devfreq_start_polling);
 -
  static int __init devfreq_init(void)
  {
        devfreq_class = class_create(THIS_MODULE, "devfreq");
                pr_err("%s: couldn't create class\n", __FILE__);
                return PTR_ERR(devfreq_class);
        }
 +
 +      devfreq_wq = create_freezable_workqueue("devfreq_wq");
 +      if (IS_ERR(devfreq_wq)) {
 +              class_destroy(devfreq_class);
 +              pr_err("%s: couldn't create workqueue\n", __FILE__);
 +              return PTR_ERR(devfreq_wq);
 +      }
        devfreq_class->dev_attrs = devfreq_attrs;
 +
        return 0;
  }
  subsys_initcall(devfreq_init);
  static void __exit devfreq_exit(void)
  {
        class_destroy(devfreq_class);
 +      destroy_workqueue(devfreq_wq);
  }
  module_exit(devfreq_exit);
  
  /**
   * devfreq_recommended_opp() - Helper function to get proper OPP for the
   *                         freq value given to target callback.
 - * @dev               The devfreq user device. (parent of devfreq)
 - * @freq      The frequency given to target function
 - * @flags     Flags handed from devfreq framework.
 + * @dev:      The devfreq user device. (parent of devfreq)
 + * @freq:     The frequency given to target function
 + * @flags:    Flags handed from devfreq framework.
   *
   */
  struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
                opp = opp_find_freq_floor(dev, freq);
  
                /* If not available, use the closest opp */
-               if (opp == ERR_PTR(-ENODEV))
+               if (opp == ERR_PTR(-ERANGE))
                        opp = opp_find_freq_ceil(dev, freq);
        } else {
                /* The freq is an lower bound. opp should be higher */
                opp = opp_find_freq_ceil(dev, freq);
  
                /* If not available, use the closest opp */
-               if (opp == ERR_PTR(-ENODEV))
+               if (opp == ERR_PTR(-ERANGE))
                        opp = opp_find_freq_floor(dev, freq);
        }
  
   * devfreq_register_opp_notifier() - Helper function to get devfreq notified
   *                               for any changes in the OPP availability
   *                               changes
 - * @dev               The devfreq user device. (parent of devfreq)
 - * @devfreq   The devfreq object.
 + * @dev:      The devfreq user device. (parent of devfreq)
 + * @devfreq:  The devfreq object.
   */
  int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
  {
   * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
   *                                 notified for any changes in the OPP
   *                                 availability changes anymore.
 - * @dev               The devfreq user device. (parent of devfreq)
 - * @devfreq   The devfreq object.
 + * @dev:      The devfreq user device. (parent of devfreq)
 + * @devfreq:  The devfreq object.
   *
   * At exit() callback of devfreq_dev_profile, this must be included if
   * devfreq_recommended_opp is used.