#include <linux/freezer.h>
#include "tpm.h"
-
-enum tpm_const {
- TPM_MINOR = 224, /* officially assigned */
- TPM_BUFSIZE = 4096,
- TPM_NUM_DEVICES = 256,
-};
+#include "tpm_eventlog.h"
enum tpm_duration {
TPM_SHORT = 0,
#define TPM_INTERNAL_RESULT_SIZE 200
#define TPM_TAG_RQU_COMMAND cpu_to_be16(193)
#define TPM_ORD_GET_CAP cpu_to_be32(101)
+#define TPM_ORD_GET_RANDOM cpu_to_be32(70)
static const struct tpm_input_header tpm_getcap_header = {
.tag = TPM_TAG_RQU_COMMAND,
struct tpm_chip *chip = file->private_data;
del_singleshot_timer_sync(&chip->user_read_timer);
- flush_work_sync(&chip->work);
+ flush_work(&chip->work);
file->private_data = NULL;
atomic_set(&chip->data_pending, 0);
- kfree(chip->data_buffer);
+ kzfree(chip->data_buffer);
clear_bit(0, &chip->is_open);
put_device(chip->dev);
return 0;
int rc;
del_singleshot_timer_sync(&chip->user_read_timer);
- flush_work_sync(&chip->work);
+ flush_work(&chip->work);
ret_size = atomic_read(&chip->data_pending);
- atomic_set(&chip->data_pending, 0);
if (ret_size > 0) { /* relay data */
ssize_t orig_ret_size = ret_size;
if (size < ret_size)
mutex_unlock(&chip->buffer_mutex);
}
+ atomic_set(&chip->data_pending, 0);
+
return ret_size;
}
EXPORT_SYMBOL_GPL(tpm_read);
}
EXPORT_SYMBOL_GPL(tpm_pm_resume);
+#define TPM_GETRANDOM_RESULT_SIZE 18
+static struct tpm_input_header tpm_getrandom_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(14),
+ .ordinal = TPM_ORD_GET_RANDOM
+};
+
+/**
+ * tpm_get_random() - Get random bytes from the tpm's RNG
+ * @chip_num: A specific chip number for the request or TPM_ANY_NUM
+ * @out: destination buffer for the random bytes
+ * @max: the max number of bytes to write to @out
+ *
+ * Returns < 0 on error and the number of bytes read on success
+ */
+int tpm_get_random(u32 chip_num, u8 *out, size_t max)
+{
+ struct tpm_chip *chip;
+ struct tpm_cmd_t tpm_cmd;
+ u32 recd, num_bytes = min_t(u32, max, TPM_MAX_RNG_DATA);
+ int err, total = 0, retries = 5;
+ u8 *dest = out;
+
+ chip = tpm_chip_find_get(chip_num);
+ if (chip == NULL)
+ return -ENODEV;
+
+ if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
+ return -EINVAL;
+
+ do {
+ tpm_cmd.header.in = tpm_getrandom_header;
+ tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes);
+
+ err = transmit_cmd(chip, &tpm_cmd,
+ TPM_GETRANDOM_RESULT_SIZE + num_bytes,
+ "attempting get random");
+ if (err)
+ break;
+
+ recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len);
+ memcpy(dest, tpm_cmd.params.getrandom_out.rng_data, recd);
+
+ dest += recd;
+ total += recd;
+ num_bytes -= recd;
+ } while (retries-- && total < max);
+
+ return total ? total : -EIO;
+}
+EXPORT_SYMBOL_GPL(tpm_get_random);
+
/* In case vendor provided release function, call it too.*/
void tpm_dev_vendor_release(struct tpm_chip *chip)
goto put_device;
}
+ if (sys_add_ppi(&dev->kobj)) {
+ misc_deregister(&chip->vendor.miscdev);
+ goto put_device;
+ }
+
chip->bios_dir = tpm_bios_log_setup(devname);
/* Make chip available */
delay -= jiffies % delay;
dbs_info->enable = 1;
- INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
+ INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer);
schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
}
j_dbs_info->prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
+ this_dbs_info->cpu = cpu;
this_dbs_info->down_skip = 0;
this_dbs_info->requested_freq = policy->cur;
return -EBUSY;
dvb_net_stop(net);
- flush_work_sync(&priv->set_multicast_list_wq);
- flush_work_sync(&priv->restart_net_feed_wq);
+ flush_work(&priv->set_multicast_list_wq);
+ flush_work(&priv->restart_net_feed_wq);
printk("dvb_net: removed network interface %s\n", net->name);
unregister_netdev(net);
dvbnet->state[num]=0;
static void flush_request_modules(struct bttv *dev)
{
- flush_work_sync(&dev->request_module_wk);
+ flush_work(&dev->request_module_wk);
}
#else
#define request_modules(dev)
static void flush_request_modules(struct cx18 *dev)
{
- flush_work_sync(&dev->request_module_wk);
+ flush_work(&dev->request_module_wk);
}
#else
#define request_modules(dev)
module_init(module_start);
module_exit(module_cleanup);
+MODULE_FIRMWARE(XC2028_DEFAULT_FIRMWARE);
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
+ case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
case CX23885_BOARD_TEVII_S470:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
/*
*/
params.invert_level = true;
break;
+ case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
case CX23885_BOARD_TEVII_S470:
/*
* The IR controller on this board only returns pulse widths.
v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, ¶ms);
v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, ¶ms);
}
- flush_work_sync(&dev->cx25840_work);
- flush_work_sync(&dev->ir_rx_work);
- flush_work_sync(&dev->ir_tx_work);
+ flush_work(&dev->cx25840_work);
+ flush_work(&dev->ir_rx_work);
+ flush_work(&dev->ir_tx_work);
}
static void cx23885_input_ir_close(struct rc_dev *rc)
/* The grey Hauppauge RC-5 remote */
rc_map = RC_MAP_HAUPPAUGE;
break;
+ case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
+ /* Integrated CX23885 IR controller */
+ driver_type = RC_DRIVER_IR_RAW;
+ allowed_protos = RC_TYPE_NEC;
+ /* The grey Terratec remote with orange buttons */
+ rc_map = RC_MAP_NEC_TERRATEC_CINERGY_XS;
+ break;
case CX23885_BOARD_TEVII_S470:
/* Integrated CX23885 IR controller */
driver_type = RC_DRIVER_IR_RAW;
static void flush_request_modules(struct cx8802_dev *dev)
{
- flush_work_sync(&dev->request_module_wk);
+ flush_work(&dev->request_module_wk);
}
#else
#define request_modules(dev)
struct mantis_pci *mantis = ca->ca_priv;
dprintk(MANTIS_DEBUG, 1, "Mantis Host I/F Event manager exiting");
- flush_work_sync(&ca->hif_evm_work);
+ flush_work(&ca->hif_evm_work);
mantis_hif_exit(ca);
mantis_pcmcia_exit(ca);
}
{
/* disable interrupt */
mmwrite(mmread(MANTIS_UART_CTL) & 0xffef, MANTIS_UART_CTL);
- flush_work_sync(&mantis->uart_work);
+ flush_work(&mantis->uart_work);
}
EXPORT_SYMBOL_GPL(mantis_uart_exit);
static void flush_request_submodules(struct saa7134_dev *dev)
{
- flush_work_sync(&dev->request_module_wk);
+ flush_work(&dev->request_module_wk);
}
#else
if (NULL == dev->empress_dev)
return 0;
- flush_work_sync(&dev->empress_workqueue);
+ flush_work(&dev->empress_workqueue);
video_unregister_device(dev->empress_dev);
dev->empress_dev = NULL;
return 0;
/*
- * drivers/media/video/omap24xxcam.c
+ * drivers/media/platform/omap24xxcam.c
*
* OMAP 2 camera block driver.
*
atomic_inc(&cam->reset_disable);
- flush_work_sync(&cam->sensor_reset_work);
+ flush_work(&cam->sensor_reset_work);
rval = videobuf_streamoff(q);
if (!rval) {
atomic_inc(&cam->reset_disable);
- flush_work_sync(&cam->sensor_reset_work);
+ flush_work(&cam->sensor_reset_work);
/* stop streaming capture */
videobuf_streamoff(&fh->vbq);
* not be scheduled anymore since streaming is already
* disabled.)
*/
- flush_work_sync(&cam->sensor_reset_work);
+ flush_work(&cam->sensor_reset_work);
mutex_lock(&cam->mutex);
if (atomic_dec_return(&cam->users) == 0) {
static void flush_request_modules(struct cx231xx *dev)
{
- flush_work_sync(&dev->request_module_wk);
+ flush_work(&dev->request_module_wk);
}
#else
#define request_modules(dev)
static void flush_request_modules(struct em28xx *dev)
{
- flush_work_sync(&dev->request_module_wk);
+ flush_work(&dev->request_module_wk);
}
#else
#define request_modules(dev)
static void flush_request_modules(struct tm6000_core *dev)
{
- flush_work_sync(&dev->request_module_wk);
+ flush_work(&dev->request_module_wk);
}
#else
#define request_modules(dev)
cxt->nextpage = 0;
}
- while (1) {
- ret = mtd_block_isbad(mtd, cxt->nextpage * record_size);
- if (!ret)
- break;
- if (ret < 0) {
- printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n");
- return;
- }
+ while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) {
badblock:
printk(KERN_WARNING "mtdoops: bad block at %08lx\n",
cxt->nextpage * record_size);
}
}
+ if (ret < 0) {
+ printk(KERN_ERR "mtdoops: mtd_block_isbad failed, aborting\n");
+ return;
+ }
+
for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
cxt->mtd = NULL;
- flush_work_sync(&cxt->work_erase);
- flush_work_sync(&cxt->work_write);
+ flush_work(&cxt->work_erase);
+ flush_work(&cxt->work_write);
}
union niu_parent_id parent_id;
struct net_device *dev;
struct niu *np;
- int err, pos;
+ int err;
u64 dma_mask;
- u16 val16;
niu_driver_version();
goto err_out_disable_pdev;
}
- pos = pci_pcie_cap(pdev);
- if (pos <= 0) {
+ if (!pci_is_pcie(pdev)) {
dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
goto err_out_free_res;
}
goto err_out_free_dev;
}
- pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
- val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
- val16 |= (PCI_EXP_DEVCTL_CERE |
- PCI_EXP_DEVCTL_NFERE |
- PCI_EXP_DEVCTL_FERE |
- PCI_EXP_DEVCTL_URRE |
- PCI_EXP_DEVCTL_RELAX_EN);
- pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
+ pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_NOSNOOP_EN,
+ PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE |
+ PCI_EXP_DEVCTL_RELAX_EN);
dma_mask = DMA_BIT_MASK(44);
err = pci_set_dma_mask(pdev, dma_mask);
if (!netif_running(dev))
return 0;
- flush_work_sync(&np->reset_task);
+ flush_work(&np->reset_task);
niu_netif_stop(np);
del_timer_sync(&np->timer);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again. */
if (still_empty)
- queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2);
+ schedule_delayed_work(&vi->refill, HZ/2);
}
static int virtnet_poll(struct napi_struct *napi, int budget)
if (vi->num < vi->max / 2) {
if (!try_fill_recv(vi, GFP_ATOMIC))
- queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+ schedule_delayed_work(&vi->refill, 0);
}
/* Out of packets? */
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(vi, GFP_KERNEL))
- queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+ schedule_delayed_work(&vi->refill, 0);
virtnet_napi_enable(vi);
return 0;
goto done;
if (v & VIRTIO_NET_S_ANNOUNCE) {
- netif_notify_peers(vi->dev);
+ netdev_notify_peers(vi->dev);
virtnet_ack_link_announce(vi);
}
{
struct virtnet_info *vi = vdev->priv;
- queue_work(system_nrt_wq, &vi->config_work);
+ schedule_work(&vi->config_work);
}
static int init_vqs(struct virtnet_info *vi)
otherwise get link status from config. */
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
netif_carrier_off(dev);
- queue_work(system_nrt_wq, &vi->config_work);
+ schedule_work(&vi->config_work);
} else {
vi->status = VIRTIO_NET_S_LINK_UP;
netif_carrier_on(dev);
netif_device_attach(vi->dev);
if (!try_fill_recv(vi, GFP_KERNEL))
- queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+ schedule_delayed_work(&vi->refill, 0);
mutex_lock(&vi->config_lock);
vi->config_enable = true;
create_singlethread_workqueue("ab8500_btemp_wq");
if (di->btemp_wq == NULL) {
dev_err(di->dev, "failed to create work queue\n");
+ ret = -ENOMEM;
goto free_device_info;
}
/* Init work for measuring temperature periodically */
- INIT_DELAYED_WORK_DEFERRABLE(&di->btemp_periodic_work,
+ INIT_DEFERRABLE_WORK(&di->btemp_periodic_work,
ab8500_btemp_periodic_work);
/* Identify the battery */
create_singlethread_workqueue("ab8500_charger_wq");
if (di->charger_wq == NULL) {
dev_err(di->dev, "failed to create work queue\n");
+ ret = -ENOMEM;
goto free_device_info;
}
/* Init work for HW failure check */
- INIT_DELAYED_WORK_DEFERRABLE(&di->check_hw_failure_work,
+ INIT_DEFERRABLE_WORK(&di->check_hw_failure_work,
ab8500_charger_check_hw_failure_work);
- INIT_DELAYED_WORK_DEFERRABLE(&di->check_usbchgnotok_work,
+ INIT_DEFERRABLE_WORK(&di->check_usbchgnotok_work,
ab8500_charger_check_usbchargernotok_work);
/*
* watchdog have to be kicked by the charger driver
* when the AC charger is disabled
*/
- INIT_DELAYED_WORK_DEFERRABLE(&di->kick_wd_work,
+ INIT_DEFERRABLE_WORK(&di->kick_wd_work,
ab8500_charger_kick_watchdog_work);
- INIT_DELAYED_WORK_DEFERRABLE(&di->check_vbat_work,
+ INIT_DEFERRABLE_WORK(&di->check_vbat_work,
ab8500_charger_check_vbat_work);
/* Init work for charger detection */
di->fg_wq = create_singlethread_workqueue("ab8500_fg_wq");
if (di->fg_wq == NULL) {
dev_err(di->dev, "failed to create work queue\n");
+ ret = -ENOMEM;
goto free_device_info;
}
INIT_WORK(&di->fg_acc_cur_work, ab8500_fg_acc_cur_work);
/* Init work for reinitialising the fg algorithm */
- INIT_DELAYED_WORK_DEFERRABLE(&di->fg_reinit_work,
+ INIT_DEFERRABLE_WORK(&di->fg_reinit_work,
ab8500_fg_reinit_work);
/* Work delayed Queue to run the state machine */
- INIT_DELAYED_WORK_DEFERRABLE(&di->fg_periodic_work,
+ INIT_DEFERRABLE_WORK(&di->fg_periodic_work,
ab8500_fg_periodic_work);
/* Work to check low battery condition */
- INIT_DELAYED_WORK_DEFERRABLE(&di->fg_low_bat_work,
+ INIT_DEFERRABLE_WORK(&di->fg_low_bat_work,
ab8500_fg_low_bat_work);
/* Init work for HW failure check */
- INIT_DELAYED_WORK_DEFERRABLE(&di->fg_check_hw_failure_work,
+ INIT_DEFERRABLE_WORK(&di->fg_check_hw_failure_work,
ab8500_fg_check_hw_failure_work);
/* Initialize OVV, and other registers */
if (enable) {
if (cm->emergency_stop)
return -EAGAIN;
- for (i = 0 ; i < desc->num_charger_regulators ; i++)
- regulator_enable(desc->charger_regulators[i].consumer);
+ for (i = 0 ; i < desc->num_charger_regulators ; i++) {
+ err = regulator_enable(desc->charger_regulators[i].consumer);
+ if (err < 0) {
+ dev_warn(cm->dev,
+ "Cannot enable %s regulator\n",
+ desc->charger_regulators[i].regulator_name);
+ }
+ }
} else {
+ for (i = 0 ; i < desc->num_charger_regulators ; i++) {
+ err = regulator_disable(desc->charger_regulators[i].consumer);
+ if (err < 0) {
+ dev_warn(cm->dev,
+ "Cannot disable %s regulator\n",
+ desc->charger_regulators[i].regulator_name);
+ }
+ }
+
/*
* Abnormal battery state - Stop charging forcibly,
* even if charger was enabled at the other places
return;
}
- diff = cm->fullbatt_vchk_uV;
+ diff = desc->fullbatt_uV;
diff -= batt_uV;
dev_dbg(cm->dev, "VBATT dropped %duV after full-batt.\n", diff);
if (!delayed_work_pending(&cm_monitor_work) ||
(delayed_work_pending(&cm_monitor_work) &&
time_after(next_polling, _next_polling))) {
- cancel_delayed_work_sync(&cm_monitor_work);
next_polling = jiffies + polling_jiffy;
- queue_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
+ mod_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
}
out:
if (cm_suspended)
device_set_wakeup_capable(cm->dev, true);
- if (delayed_work_pending(&cm->fullbatt_vchk_work))
- cancel_delayed_work(&cm->fullbatt_vchk_work);
- queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
- msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
+ mod_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
+ msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
cm->fullbatt_vchk_jiffies_at = jiffies + msecs_to_jiffies(
desc->fullbatt_vchkdrop_ms);
#ifdef CONFIG_PM
static int wm97xx_bat_suspend(struct device *dev)
{
- flush_work_sync(&bat_work);
+ flush_work(&bat_work);
return 0;
}
props++; /* POWER_SUPPLY_PROP_VOLTAGE_MIN */
prop = kzalloc(props * sizeof(*prop), GFP_KERNEL);
- if (!prop)
+ if (!prop) {
+ ret = -ENOMEM;
goto err3;
+ }
prop[i++] = POWER_SUPPLY_PROP_PRESENT;
if (pdata->charge_gpio >= 0)
struct device *dev;
struct list_head list;
unsigned int always_on:1;
+ unsigned int bypass:1;
int uA_load;
int min_uV;
int max_uV;
case REGULATOR_STATUS_STANDBY:
label = "standby";
break;
+ case REGULATOR_STATUS_BYPASS:
+ label = "bypass";
+ break;
case REGULATOR_STATUS_UNDEFINED:
label = "undefined";
break;
static DEVICE_ATTR(suspend_standby_state, 0444,
regulator_suspend_standby_state_show, NULL);
+static ssize_t regulator_bypass_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct regulator_dev *rdev = dev_get_drvdata(dev);
+ const char *report;
+ bool bypass;
+ int ret;
+
+ ret = rdev->desc->ops->get_bypass(rdev, &bypass);
+
+ if (ret != 0)
+ report = "unknown";
+ else if (bypass)
+ report = "enabled";
+ else
+ report = "disabled";
+
+ return sprintf(buf, "%s\n", report);
+}
+static DEVICE_ATTR(bypass, 0444,
+ regulator_bypass_show, NULL);
/*
* These are the only attributes are present for all regulators.
if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY)
count += sprintf(buf + count, "standby");
+ if (!count)
+ sprintf(buf, "no parameters");
+
rdev_info(rdev, "%s\n", buf);
if ((constraints->min_uV != constraints->max_uV) &&
err = -ENOMEM;
return err;
}
+ supply_rdev->open_count++;
return 0;
}
}
}
- if (ret == 0 && best_val >= 0)
+ if (ret == 0 && best_val >= 0) {
+ unsigned long data = best_val;
+
_notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE,
- (void *)best_val);
+ (void *)data);
+ }
trace_regulator_set_voltage_complete(rdev_get_name(rdev), best_val);
EXPORT_SYMBOL_GPL(regulator_set_voltage_time);
/**
- *regulator_set_voltage_time_sel - get raise/fall time
- * @regulator: regulator source
+ * regulator_set_voltage_time_sel - get raise/fall time
+ * @rdev: regulator source device
* @old_selector: selector for starting voltage
* @new_selector: selector for target voltage
*
ret = rdev->desc->ops->list_voltage(rdev, sel);
} else if (rdev->desc->ops->get_voltage) {
ret = rdev->desc->ops->get_voltage(rdev);
+ } else if (rdev->desc->ops->list_voltage) {
+ ret = rdev->desc->ops->list_voltage(rdev, 0);
} else {
return -EINVAL;
}
}
EXPORT_SYMBOL_GPL(regulator_set_optimum_mode);
+/**
+ * regulator_set_bypass_regmap - Default set_bypass() using regmap
+ *
+ * @rdev: device to operate on.
+ * @enable: state to set.
+ */
+int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable)
+{
+ unsigned int val;
+
+ if (enable)
+ val = rdev->desc->bypass_mask;
+ else
+ val = 0;
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->bypass_reg,
+ rdev->desc->bypass_mask, val);
+}
+EXPORT_SYMBOL_GPL(regulator_set_bypass_regmap);
+
+/**
+ * regulator_get_bypass_regmap - Default get_bypass() using regmap
+ *
+ * @rdev: device to operate on.
+ * @enable: current state.
+ */
+int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, rdev->desc->bypass_reg, &val);
+ if (ret != 0)
+ return ret;
+
+ *enable = val & rdev->desc->bypass_mask;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regulator_get_bypass_regmap);
+
+/**
+ * regulator_allow_bypass - allow the regulator to go into bypass mode
+ *
+ * @regulator: Regulator to configure
+ * @allow: enable or disable bypass mode
+ *
+ * Allow the regulator to go into bypass mode if all other consumers
+ * for the regulator also enable bypass mode and the machine
+ * constraints allow this. Bypass mode means that the regulator is
+ * simply passing the input directly to the output with no regulation.
+ */
+int regulator_allow_bypass(struct regulator *regulator, bool enable)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ int ret = 0;
+
+ if (!rdev->desc->ops->set_bypass)
+ return 0;
+
+ if (rdev->constraints &&
+ !(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_BYPASS))
+ return 0;
+
+ mutex_lock(&rdev->mutex);
+
+ if (enable && !regulator->bypass) {
+ rdev->bypass_count++;
+
+ if (rdev->bypass_count == rdev->open_count) {
+ ret = rdev->desc->ops->set_bypass(rdev, enable);
+ if (ret != 0)
+ rdev->bypass_count--;
+ }
+
+ } else if (!enable && regulator->bypass) {
+ rdev->bypass_count--;
+
+ if (rdev->bypass_count != rdev->open_count) {
+ ret = rdev->desc->ops->set_bypass(rdev, enable);
+ if (ret != 0)
+ rdev->bypass_count++;
+ }
+ }
+
+ if (ret == 0)
+ regulator->bypass = enable;
+
+ mutex_unlock(&rdev->mutex);
+
+ return ret;
+}
+
/**
* regulator_register_notifier - register regulator event notifier
* @regulator: regulator source
if (status < 0)
return status;
}
+ if (ops->get_bypass) {
+ status = device_create_file(dev, &dev_attr_bypass);
+ if (status < 0)
+ return status;
+ }
/* some attributes are type-specific */
if (rdev->desc->type == REGULATOR_CURRENT) {
&rdev->use_count);
debugfs_create_u32("open_count", 0444, rdev->debugfs,
&rdev->open_count);
+ debugfs_create_u32("bypass_count", 0444, rdev->debugfs,
+ &rdev->bypass_count);
}
/**
regulator_put(rdev->supply);
mutex_lock(®ulator_list_mutex);
debugfs_remove_recursive(rdev->debugfs);
- flush_work_sync(&rdev->disable_work.work);
+ flush_work(&rdev->disable_work.work);
WARN_ON(rdev->open_count);
unset_regulator_supplies(rdev);
list_del(&rdev->list);
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
};
-static int ipr_max_bus_speeds [] = {
+static int ipr_max_bus_speeds[] = {
IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
};
trace_entry->u.add_data = add_data;
}
#else
-#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
+#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
#endif
+/**
+ * ipr_lock_and_done - Acquire lock and complete command
+ * @ipr_cmd: ipr command struct
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
+{
+ unsigned long lock_flags;
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ ipr_cmd->done(ipr_cmd);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+}
+
/**
* ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
* @ipr_cmd: ipr command struct
* Return value:
* none
**/
-static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
+static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
+ void (*fast_done) (struct ipr_cmnd *))
{
ipr_reinit_ipr_cmnd(ipr_cmd);
ipr_cmd->u.scratch = 0;
ipr_cmd->sibling = NULL;
+ ipr_cmd->fast_done = fast_done;
init_timer(&ipr_cmd->timer);
}
/**
- * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
+ * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
* @ioa_cfg: ioa config struct
*
* Return value:
* pointer to ipr command struct
**/
static
-struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
+struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
{
struct ipr_cmnd *ipr_cmd;
ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
list_del(&ipr_cmd->queue);
- ipr_init_ipr_cmnd(ipr_cmd);
return ipr_cmd;
}
+/**
+ * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
+ * @ioa_cfg: ioa config struct
+ *
+ * Return value:
+ * pointer to ipr command struct
+ **/
+static
+struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
+ ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
+ return ipr_cmd;
+}
+
/**
* ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
* @ioa_cfg: ioa config struct
**/
static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
{
- switch(proto) {
+ switch (proto) {
case IPR_PROTO_SATA:
case IPR_PROTO_SAS_STP:
res->ata_class = ATA_DEV_ATA;
}
#else
-#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
+#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
#endif
/**
**/
static void ipr_release_dump(struct kref *kref)
{
- struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
+ struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
unsigned long lock_flags = 0;
int i;
break;
}
}
- } while(did_work);
+ } while (did_work);
list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
if (res->add_to_ml) {
* number of bytes printed to buffer
**/
static ssize_t ipr_store_log_level(struct device *dev,
- struct device_attribute *attr,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
return -EACCES;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- while(ioa_cfg->in_reset_reload) {
+ while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
unsigned long lock_flags;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- while(ioa_cfg->in_reset_reload) {
+ while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
len = snprintf(fname, 99, "%s", buf);
fname[len-1] = '\0';
- if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
+ if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
return -EIO;
}
* Return value:
* SUCCESS / FAILED
**/
-static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
+static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
{
struct ipr_ioa_cfg *ioa_cfg;
int rc;
return rc;
}
-static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
+static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
{
int rc;
}
LEAVE;
- return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
+ return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
}
/**
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- while(ioa_cfg->in_reset_reload) {
+ while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
* Return value:
* SUCCESS / FAILED
**/
-static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
+static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
{
struct ipr_cmnd *ipr_cmd;
struct ipr_ioa_cfg *ioa_cfg;
res->resetting_device = 0;
LEAVE;
- return (rc ? FAILED : SUCCESS);
+ return rc ? FAILED : SUCCESS;
}
-static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
+static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
{
int rc;
* Return value:
* SUCCESS / FAILED
**/
-static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
+static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
{
struct ipr_cmnd *ipr_cmd;
struct ipr_ioa_cfg *ioa_cfg;
res->needs_sync_complete = 1;
LEAVE;
- return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
+ return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
}
/**
* Return value:
* SUCCESS / FAILED
**/
-static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
+static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
{
unsigned long flags;
int rc;
u16 cmd_index;
int num_hrrq = 0;
int irq_none = 0;
- struct ipr_cmnd *ipr_cmd;
+ struct ipr_cmnd *ipr_cmd, *temp;
irqreturn_t rc = IRQ_NONE;
+ LIST_HEAD(doneq);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return IRQ_HANDLED;
+ rc = IRQ_HANDLED;
+ goto unlock_out;
}
ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
- list_del(&ipr_cmd->queue);
- del_timer(&ipr_cmd->timer);
- ipr_cmd->done(ipr_cmd);
+ list_move_tail(&ipr_cmd->queue, &doneq);
rc = IRQ_HANDLED;
} else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
int_reg & IPR_PCII_HRRQ_UPDATED) {
ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return IRQ_HANDLED;
+ rc = IRQ_HANDLED;
+ goto unlock_out;
} else
break;
}
if (unlikely(rc == IRQ_NONE))
rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
+unlock_out:
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
+ list_del(&ipr_cmd->queue);
+ del_timer(&ipr_cmd->timer);
+ ipr_cmd->fast_done(ipr_cmd);
+ }
+
return rc;
}
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+ unsigned long lock_flags;
scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
- scsi_dma_unmap(ipr_cmd->scsi_cmd);
+ scsi_dma_unmap(scsi_cmd);
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
scsi_cmd->scsi_done(scsi_cmd);
- } else
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ } else {
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ipr_erp_start(ioa_cfg, ipr_cmd);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ }
}
/**
* ipr_queuecommand - Queue a mid-layer request
+ * @shost: scsi host struct
* @scsi_cmd: scsi command struct
- * @done: done function
*
* This function queues a request generated by the mid-layer.
*
* SCSI_MLQUEUE_DEVICE_BUSY if device is busy
* SCSI_MLQUEUE_HOST_BUSY if host is busy
**/
-static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
- void (*done) (struct scsi_cmnd *))
+static int ipr_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *scsi_cmd)
{
struct ipr_ioa_cfg *ioa_cfg;
struct ipr_resource_entry *res;
struct ipr_ioarcb *ioarcb;
struct ipr_cmnd *ipr_cmd;
+ unsigned long lock_flags;
int rc = 0;
- scsi_cmd->scsi_done = done;
- ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
- res = scsi_cmd->device->hostdata;
+ ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+
+ spin_lock_irqsave(shost->host_lock, lock_flags);
scsi_cmd->result = (DID_OK << 16);
+ res = scsi_cmd->device->hostdata;
/*
* We are currently blocking all devices due to a host reset
* We have told the host to stop giving us new requests, but
* ERP ops don't count. FIXME
*/
- if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
+ if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) {
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
return SCSI_MLQUEUE_HOST_BUSY;
+ }
/*
* FIXME - Create scsi_set_host_offline interface
* and the ioa_is_dead check can be removed
*/
if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
- memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
- scsi_cmd->result = (DID_NO_CONNECT << 16);
- scsi_cmd->scsi_done(scsi_cmd);
- return 0;
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ goto err_nodev;
}
if (ipr_is_gata(res) && res->sata_port)
return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
- ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+ ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+
+ ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
ioarcb = &ipr_cmd->ioarcb;
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
ipr_cmd->scsi_cmd = scsi_cmd;
- ioarcb->res_handle = res->res_handle;
- ipr_cmd->done = ipr_scsi_done;
- ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
+ ipr_cmd->done = ipr_scsi_eh_done;
if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
if (scsi_cmd->underflow == 0)
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
- if (res->needs_sync_complete) {
- ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
- res->needs_sync_complete = 0;
- }
-
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
if (ipr_is_gscsi(res))
ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
}
- if (unlikely(rc != 0)) {
- list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ spin_lock_irqsave(shost->host_lock, lock_flags);
+ if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) {
+ list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ if (!rc)
+ scsi_dma_unmap(scsi_cmd);
return SCSI_MLQUEUE_HOST_BUSY;
}
+ if (unlikely(ioa_cfg->ioa_is_dead)) {
+ list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ scsi_dma_unmap(scsi_cmd);
+ goto err_nodev;
+ }
+
+ ioarcb->res_handle = res->res_handle;
+ if (res->needs_sync_complete) {
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
+ res->needs_sync_complete = 0;
+ }
+ list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
ipr_send_command(ipr_cmd);
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
return 0;
-}
-static DEF_SCSI_QCMD(ipr_queuecommand)
+err_nodev:
+ spin_lock_irqsave(shost->host_lock, lock_flags);
+ memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ scsi_cmd->result = (DID_NO_CONNECT << 16);
+ scsi_cmd->scsi_done(scsi_cmd);
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ return 0;
+}
/**
* ipr_ioctl - IOCTL handler
* Return value:
* pointer to buffer with description string
**/
-static const char * ipr_ioa_info(struct Scsi_Host *host)
+static const char *ipr_ioa_info(struct Scsi_Host *host)
{
static char buffer[512];
struct ipr_ioa_cfg *ioa_cfg;
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- while(ioa_cfg->in_reset_reload) {
+ while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
unsigned long flags;
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- while(ioa_cfg->in_reset_reload) {
+ while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
int i;
if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
- for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
+ for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
if (__is_processor(ipr_blocked_processors[i]))
return 1;
}
* none
**/
static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
- struct ipr_mode_pages *mode_pages)
+ struct ipr_mode_pages *mode_pages)
{
int i, entry_length;
struct ipr_dev_bus_entry *bus;
ipr_reinit_ipr_cmnd(ipr_cmd);
ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
rc = ipr_cmd->job_step(ipr_cmd);
- } while(rc == IPR_RC_JOB_CONTINUE);
+ } while (rc == IPR_RC_JOB_CONTINUE);
}
/**
}
if (ioa_cfg->ipr_cmd_pool)
- pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
+ pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
kfree(ioa_cfg->ipr_cmnd_list);
kfree(ioa_cfg->ipr_cmnd_list_dma);
dma_addr_t dma_addr;
int i;
- ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
- sizeof(struct ipr_cmnd), 512, 0);
+ ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
+ sizeof(struct ipr_cmnd), 512, 0);
if (!ioa_cfg->ipr_cmd_pool)
return -ENOMEM;
}
for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
- ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
+ ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
if (!ipr_cmd) {
ipr_free_cmd_blks(ioa_cfg);
ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
- ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
- sata_port_info.flags, &ipr_sata_ops);
+ ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
int target, lun;
for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
- for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
+ for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
}
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
- while(ioa_cfg->in_reset_reload) {
+ while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
- flush_work_sync(&ioa_cfg->work_q);
+ flush_work(&ioa_cfg->work_q);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
spin_lock(&ipr_driver_lock);
unsigned long lock_flags = 0;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- while(ioa_cfg->in_reset_reload) {
+ while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
MODULE_DESCRIPTION("Generic thermal management sysfs support");
MODULE_LICENSE("GPL");
-struct thermal_cooling_device_instance {
+#define THERMAL_NO_TARGET -1UL
+/*
+ * This structure is used to describe the behavior of
+ * a certain cooling device on a certain trip point
+ * in a certain thermal zone
+ */
+struct thermal_instance {
int id;
char name[THERMAL_NAME_LENGTH];
struct thermal_zone_device *tz;
struct thermal_cooling_device *cdev;
int trip;
+ unsigned long upper; /* Highest cooling state for this trip point */
+ unsigned long lower; /* Lowest cooling state for this trip point */
+ unsigned long target; /* expected cooling state */
char attr_name[THERMAL_NAME_LENGTH];
struct device_attribute attr;
- struct list_head node;
+ struct list_head tz_node; /* node in tz->thermal_instances */
+ struct list_head cdev_node; /* node in cdev->thermal_instances */
};
static DEFINE_IDR(thermal_tz_idr);
if (!strncmp("Processor", cdev->type,
sizeof("Processor")))
thermal_zone_bind_cooling_device(tz,
- THERMAL_TRIPS_NONE,
- cdev);
+ THERMAL_TRIPS_NONE, cdev,
+ THERMAL_NO_LIMIT,
+ THERMAL_NO_LIMIT);
}
mutex_unlock(&thermal_list_lock);
if (!tz->passive_delay)
tz->passive_delay = 0;
}
- tz->tc1 = 1;
- tz->tc2 = 1;
-
tz->forced_passive = state;
thermal_zone_device_update(tz);
thermal_cooling_device_trip_point_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct thermal_cooling_device_instance *instance;
+ struct thermal_instance *instance;
instance =
- container_of(attr, struct thermal_cooling_device_instance, attr);
+ container_of(attr, struct thermal_instance, attr);
if (instance->trip == THERMAL_TRIPS_NONE)
return sprintf(buf, "-1\n");
temp->tz = tz;
hwmon->count++;
- snprintf(temp->temp_input.name, THERMAL_NAME_LENGTH,
+ snprintf(temp->temp_input.name, sizeof(temp->temp_input.name),
"temp%d_input", hwmon->count);
temp->temp_input.attr.attr.name = temp->temp_input.name;
temp->temp_input.attr.attr.mode = 0444;
if (tz->ops->get_crit_temp) {
unsigned long temperature;
if (!tz->ops->get_crit_temp(tz, &temperature)) {
- snprintf(temp->temp_crit.name, THERMAL_NAME_LENGTH,
+ snprintf(temp->temp_crit.name,
+ sizeof(temp->temp_crit.name),
"temp%d_crit", hwmon->count);
temp->temp_crit.attr.attr.name = temp->temp_crit.name;
temp->temp_crit.attr.attr.mode = 0444;
static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
int delay)
{
- cancel_delayed_work(&(tz->poll_queue));
-
- if (!delay)
- return;
-
if (delay > 1000)
- queue_delayed_work(system_freezable_wq, &(tz->poll_queue),
- round_jiffies(msecs_to_jiffies(delay)));
+ mod_delayed_work(system_freezable_wq, &tz->poll_queue,
+ round_jiffies(msecs_to_jiffies(delay)));
+ else if (delay)
+ mod_delayed_work(system_freezable_wq, &tz->poll_queue,
+ msecs_to_jiffies(delay));
else
- queue_delayed_work(system_freezable_wq, &(tz->poll_queue),
- msecs_to_jiffies(delay));
+ cancel_delayed_work(&tz->poll_queue);
}
-static void thermal_zone_device_passive(struct thermal_zone_device *tz,
- int temp, int trip_temp, int trip)
-{
- int trend = 0;
- struct thermal_cooling_device_instance *instance;
- struct thermal_cooling_device *cdev;
- long state, max_state;
-
- /*
- * Above Trip?
- * -----------
- * Calculate the thermal trend (using the passive cooling equation)
- * and modify the performance limit for all passive cooling devices
- * accordingly. Note that we assume symmetry.
- */
- if (temp >= trip_temp) {
- tz->passive = true;
-
- trend = (tz->tc1 * (temp - tz->last_temperature)) +
- (tz->tc2 * (temp - trip_temp));
-
- /* Heating up? */
- if (trend > 0) {
- list_for_each_entry(instance, &tz->cooling_devices,
- node) {
- if (instance->trip != trip)
- continue;
- cdev = instance->cdev;
- cdev->ops->get_cur_state(cdev, &state);
- cdev->ops->get_max_state(cdev, &max_state);
- if (state++ < max_state)
- cdev->ops->set_cur_state(cdev, state);
- }
- } else if (trend < 0) { /* Cooling off? */
- list_for_each_entry(instance, &tz->cooling_devices,
- node) {
- if (instance->trip != trip)
- continue;
- cdev = instance->cdev;
- cdev->ops->get_cur_state(cdev, &state);
- cdev->ops->get_max_state(cdev, &max_state);
- if (state > 0)
- cdev->ops->set_cur_state(cdev, --state);
- }
- }
- return;
- }
-
- /*
- * Below Trip?
- * -----------
- * Implement passive cooling hysteresis to slowly increase performance
- * and avoid thrashing around the passive trip point. Note that we
- * assume symmetry.
- */
- list_for_each_entry(instance, &tz->cooling_devices, node) {
- if (instance->trip != trip)
- continue;
- cdev = instance->cdev;
- cdev->ops->get_cur_state(cdev, &state);
- cdev->ops->get_max_state(cdev, &max_state);
- if (state > 0)
- cdev->ops->set_cur_state(cdev, --state);
- if (state == 0)
- tz->passive = false;
- }
-}
-
static void thermal_zone_device_check(struct work_struct *work)
{
struct thermal_zone_device *tz = container_of(work, struct
*/
int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
int trip,
- struct thermal_cooling_device *cdev)
+ struct thermal_cooling_device *cdev,
+ unsigned long upper, unsigned long lower)
{
- struct thermal_cooling_device_instance *dev;
- struct thermal_cooling_device_instance *pos;
+ struct thermal_instance *dev;
+ struct thermal_instance *pos;
struct thermal_zone_device *pos1;
struct thermal_cooling_device *pos2;
+ unsigned long max_state;
int result;
if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE))
if (tz != pos1 || cdev != pos2)
return -EINVAL;
+ cdev->ops->get_max_state(cdev, &max_state);
+
+ /* lower default 0, upper default max_state */
+ lower = lower == THERMAL_NO_LIMIT ? 0 : lower;
+ upper = upper == THERMAL_NO_LIMIT ? max_state : upper;
+
+ if (lower > upper || upper > max_state)
+ return -EINVAL;
+
dev =
- kzalloc(sizeof(struct thermal_cooling_device_instance), GFP_KERNEL);
+ kzalloc(sizeof(struct thermal_instance), GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->tz = tz;
dev->cdev = cdev;
dev->trip = trip;
+ dev->upper = upper;
+ dev->lower = lower;
+ dev->target = THERMAL_NO_TARGET;
+
result = get_idr(&tz->idr, &tz->lock, &dev->id);
if (result)
goto free_mem;
goto remove_symbol_link;
mutex_lock(&tz->lock);
- list_for_each_entry(pos, &tz->cooling_devices, node)
+ mutex_lock(&cdev->lock);
+ list_for_each_entry(pos, &tz->thermal_instances, tz_node)
if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
result = -EEXIST;
break;
}
- if (!result)
- list_add_tail(&dev->node, &tz->cooling_devices);
+ if (!result) {
+ list_add_tail(&dev->tz_node, &tz->thermal_instances);
+ list_add_tail(&dev->cdev_node, &cdev->thermal_instances);
+ }
+ mutex_unlock(&cdev->lock);
mutex_unlock(&tz->lock);
if (!result)
int trip,
struct thermal_cooling_device *cdev)
{
- struct thermal_cooling_device_instance *pos, *next;
+ struct thermal_instance *pos, *next;
mutex_lock(&tz->lock);
- list_for_each_entry_safe(pos, next, &tz->cooling_devices, node) {
+ mutex_lock(&cdev->lock);
+ list_for_each_entry_safe(pos, next, &tz->thermal_instances, tz_node) {
if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
- list_del(&pos->node);
+ list_del(&pos->tz_node);
+ list_del(&pos->cdev_node);
+ mutex_unlock(&cdev->lock);
mutex_unlock(&tz->lock);
goto unbind;
}
}
+ mutex_unlock(&cdev->lock);
mutex_unlock(&tz->lock);
return -ENODEV;
struct thermal_zone_device *pos;
int result;
- if (strlen(type) >= THERMAL_NAME_LENGTH)
+ if (type && strlen(type) >= THERMAL_NAME_LENGTH)
return ERR_PTR(-EINVAL);
if (!ops || !ops->get_max_state || !ops->get_cur_state ||
return ERR_PTR(result);
}
- strcpy(cdev->type, type);
+ strcpy(cdev->type, type ? : "");
+ mutex_init(&cdev->lock);
+ INIT_LIST_HEAD(&cdev->thermal_instances);
cdev->ops = ops;
+ cdev->updated = true;
cdev->device.class = &thermal_class;
cdev->devdata = devdata;
dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
}
EXPORT_SYMBOL(thermal_cooling_device_unregister);
+static void thermal_cdev_do_update(struct thermal_cooling_device *cdev)
+{
+ struct thermal_instance *instance;
+ unsigned long target = 0;
+
+ /* cooling device is updated*/
+ if (cdev->updated)
+ return;
+
+ mutex_lock(&cdev->lock);
+ /* Make sure cdev enters the deepest cooling state */
+ list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) {
+ if (instance->target == THERMAL_NO_TARGET)
+ continue;
+ if (instance->target > target)
+ target = instance->target;
+ }
+ mutex_unlock(&cdev->lock);
+ cdev->ops->set_cur_state(cdev, target);
+ cdev->updated = true;
+}
+
+static void thermal_zone_do_update(struct thermal_zone_device *tz)
+{
+ struct thermal_instance *instance;
+
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node)
+ thermal_cdev_do_update(instance->cdev);
+}
+
+/*
+ * Cooling algorithm for both active and passive cooling
+ *
+ * 1. if the temperature is higher than a trip point,
+ * a. if the trend is THERMAL_TREND_RAISING, use higher cooling
+ * state for this trip point
+ * b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
+ * state for this trip point
+ *
+ * 2. if the temperature is lower than a trip point, use lower
+ * cooling state for this trip point
+ *
+ * Note that this behaves the same as the previous passive cooling
+ * algorithm.
+ */
+
+static void thermal_zone_trip_update(struct thermal_zone_device *tz,
+ int trip, long temp)
+{
+ struct thermal_instance *instance;
+ struct thermal_cooling_device *cdev = NULL;
+ unsigned long cur_state, max_state;
+ long trip_temp;
+ enum thermal_trip_type trip_type;
+ enum thermal_trend trend;
+
+ if (trip == THERMAL_TRIPS_NONE) {
+ trip_temp = tz->forced_passive;
+ trip_type = THERMAL_TRIPS_NONE;
+ } else {
+ tz->ops->get_trip_temp(tz, trip, &trip_temp);
+ tz->ops->get_trip_type(tz, trip, &trip_type);
+ }
+
+ if (!tz->ops->get_trend || tz->ops->get_trend(tz, trip, &trend)) {
+ /*
+ * compare the current temperature and previous temperature
+ * to get the thermal trend, if no special requirement
+ */
+ if (tz->temperature > tz->last_temperature)
+ trend = THERMAL_TREND_RAISING;
+ else if (tz->temperature < tz->last_temperature)
+ trend = THERMAL_TREND_DROPPING;
+ else
+ trend = THERMAL_TREND_STABLE;
+ }
+
+ if (temp >= trip_temp) {
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ if (instance->trip != trip)
+ continue;
+
+ cdev = instance->cdev;
+
+ cdev->ops->get_cur_state(cdev, &cur_state);
+ cdev->ops->get_max_state(cdev, &max_state);
+
+ if (trend == THERMAL_TREND_RAISING) {
+ cur_state = cur_state < instance->upper ?
+ (cur_state + 1) : instance->upper;
+ } else if (trend == THERMAL_TREND_DROPPING) {
+ cur_state = cur_state > instance->lower ?
+ (cur_state - 1) : instance->lower;
+ }
+
+ /* activate a passive thermal instance */
+ if ((trip_type == THERMAL_TRIP_PASSIVE ||
+ trip_type == THERMAL_TRIPS_NONE) &&
+ instance->target == THERMAL_NO_TARGET)
+ tz->passive++;
+
+ instance->target = cur_state;
+ cdev->updated = false; /* cooling device needs update */
+ }
+ } else { /* below trip */
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ if (instance->trip != trip)
+ continue;
+
+ /* Do not use the inactive thermal instance */
+ if (instance->target == THERMAL_NO_TARGET)
+ continue;
+ cdev = instance->cdev;
+ cdev->ops->get_cur_state(cdev, &cur_state);
+
+ cur_state = cur_state > instance->lower ?
+ (cur_state - 1) : THERMAL_NO_TARGET;
+
+ /* deactivate a passive thermal instance */
+ if ((trip_type == THERMAL_TRIP_PASSIVE ||
+ trip_type == THERMAL_TRIPS_NONE) &&
+ cur_state == THERMAL_NO_TARGET)
+ tz->passive--;
+ instance->target = cur_state;
+ cdev->updated = false; /* cooling device needs update */
+ }
+ }
+
+ return;
+}
/**
* thermal_zone_device_update - force an update of a thermal zone's state
* @ttz: the thermal zone to update
int count, ret = 0;
long temp, trip_temp;
enum thermal_trip_type trip_type;
- struct thermal_cooling_device_instance *instance;
- struct thermal_cooling_device *cdev;
mutex_lock(&tz->lock);
goto leave;
}
+ tz->last_temperature = tz->temperature;
+ tz->temperature = temp;
+
for (count = 0; count < tz->trips; count++) {
tz->ops->get_trip_type(tz, count, &trip_type);
tz->ops->get_trip_temp(tz, count, &trip_temp);
tz->ops->notify(tz, count, trip_type);
break;
case THERMAL_TRIP_ACTIVE:
- list_for_each_entry(instance, &tz->cooling_devices,
- node) {
- if (instance->trip != count)
- continue;
-
- cdev = instance->cdev;
-
- if (temp >= trip_temp)
- cdev->ops->set_cur_state(cdev, 1);
- else
- cdev->ops->set_cur_state(cdev, 0);
- }
+ thermal_zone_trip_update(tz, count, temp);
break;
case THERMAL_TRIP_PASSIVE:
if (temp >= trip_temp || tz->passive)
- thermal_zone_device_passive(tz, temp,
- trip_temp, count);
+ thermal_zone_trip_update(tz, count, temp);
break;
}
}
if (tz->forced_passive)
- thermal_zone_device_passive(tz, temp, tz->forced_passive,
- THERMAL_TRIPS_NONE);
-
- tz->last_temperature = temp;
+ thermal_zone_trip_update(tz, THERMAL_TRIPS_NONE, temp);
+ thermal_zone_do_update(tz);
leave:
if (tz->passive)
* @mask: a bit string indicating the writeablility of trip points
* @devdata: private device data
* @ops: standard thermal zone device callbacks
- * @tc1: thermal coefficient 1 for passive calculations
- * @tc2: thermal coefficient 2 for passive calculations
* @passive_delay: number of milliseconds to wait between polls when
* performing passive cooling
* @polling_delay: number of milliseconds to wait between polls when checking
* driven systems)
*
* thermal_zone_device_unregister() must be called when the device is no
- * longer needed. The passive cooling formula uses tc1 and tc2 as described in
- * section 11.1.5.1 of the ACPI specification 3.0.
+ * longer needed. The passive cooling depends on the .get_trend() return value.
*/
struct thermal_zone_device *thermal_zone_device_register(const char *type,
int trips, int mask, void *devdata,
const struct thermal_zone_device_ops *ops,
- int tc1, int tc2, int passive_delay, int polling_delay)
+ int passive_delay, int polling_delay)
{
struct thermal_zone_device *tz;
struct thermal_cooling_device *pos;
int count;
int passive = 0;
- if (strlen(type) >= THERMAL_NAME_LENGTH)
+ if (type && strlen(type) >= THERMAL_NAME_LENGTH)
return ERR_PTR(-EINVAL);
if (trips > THERMAL_MAX_TRIPS || trips < 0 || mask >> trips)
if (!tz)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&tz->cooling_devices);
+ INIT_LIST_HEAD(&tz->thermal_instances);
idr_init(&tz->idr);
mutex_init(&tz->lock);
result = get_idr(&thermal_tz_idr, &thermal_idr_lock, &tz->id);
return ERR_PTR(result);
}
- strcpy(tz->type, type);
+ strcpy(tz->type, type ? : "");
tz->ops = ops;
tz->device.class = &thermal_class;
tz->devdata = devdata;
tz->trips = trips;
- tz->tc1 = tc1;
- tz->tc2 = tc2;
tz->passive_delay = passive_delay;
tz->polling_delay = polling_delay;
static int kgdboc_register_kbd(char **cptr)
{
- if (strncmp(*cptr, "kbd", 3) == 0) {
+ if (strncmp(*cptr, "kbd", 3) == 0 ||
+ strncmp(*cptr, "kdb", 3) == 0) {
if (kdb_poll_idx < KDB_POLL_FUNC_MAX) {
kdb_poll_funcs[kdb_poll_idx] = kdb_get_kbd_char;
kdb_poll_idx++;
i--;
}
}
- flush_work_sync(&kgdboc_restore_input_work);
+ flush_work(&kgdboc_restore_input_work);
}
#else /* ! CONFIG_KDB_KEYBOARD */
#define kgdboc_register_kbd(x) 0
val = sdp->sd_tune.gt_statfs_quantum;
if (val != 30)
seq_printf(s, ",statfs_quantum=%d", val);
+ else if (sdp->sd_tune.gt_statfs_slow)
+ seq_puts(s, ",statfs_quantum=0");
val = sdp->sd_tune.gt_quota_quantum;
if (val != 60)
seq_printf(s, ",quota_quantum=%d", val);
out_unlock:
/* Error path for case 1 */
if (gfs2_rs_active(ip->i_res))
- gfs2_rs_deltree(ip->i_res);
+ gfs2_rs_deltree(ip, ip->i_res);
if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
gfs2_glock_dq(&ip->i_iopen_gh);
clear_inode(inode);
gfs2_dir_hash_inval(ip);
ip->i_gl->gl_object = NULL;
- flush_delayed_work_sync(&ip->i_gl->gl_work);
+ flush_delayed_work(&ip->i_gl->gl_work);
gfs2_glock_add_to_lru(ip->i_gl);
gfs2_glock_put(ip->i_gl);
ip->i_gl = NULL;
static int slab_max_order = SLAB_MAX_ORDER_LO;
static bool slab_max_order_set __initdata;
-static inline struct kmem_cache *page_get_cache(struct page *page)
-{
- page = compound_head(page);
- BUG_ON(!PageSlab(page));
- return page->slab_cache;
-}
-
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
struct page *page = virt_to_head_page(obj);
*/
if (keventd_up() && reap_work->work.func == NULL) {
init_reap_node(cpu);
- INIT_DELAYED_WORK_DEFERRABLE(reap_work, cache_reap);
+ INIT_DEFERRABLE_WORK(reap_work, cache_reap);
schedule_delayed_work_on(cpu, reap_work,
__round_jiffies_relative(HZ, cpu));
}
static int p9_socket_open(struct p9_client *client, struct socket *csocket)
{
struct p9_trans_fd *p;
- int ret, fd;
+ struct file *file;
+ int ret;
p = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
if (!p)
return -ENOMEM;
csocket->sk->sk_allocation = GFP_NOIO;
- fd = sock_map_fd(csocket, 0);
- if (fd < 0) {
+ file = sock_alloc_file(csocket, 0, NULL);
+ if (IS_ERR(file)) {
pr_err("%s (%d): failed to map fd\n",
__func__, task_pid_nr(current));
sock_release(csocket);
kfree(p);
- return fd;
+ return PTR_ERR(file);
}
- get_file(csocket->file);
- get_file(csocket->file);
- p->wr = p->rd = csocket->file;
+ get_file(file);
+ p->wr = p->rd = file;
client->trans = p;
client->status = Connected;
- sys_close(fd); /* still racy */
-
p->rd->f_flags |= O_NONBLOCK;
p->conn = p9_conn_create(client);
void p9_trans_fd_exit(void)
{
- flush_work_sync(&p9_poll_work);
+ flush_work(&p9_poll_work);
v9fs_unregister_trans(&p9_tcp_trans);
v9fs_unregister_trans(&p9_unix_trans);
v9fs_unregister_trans(&p9_fd_trans);
if (dst_garbage.timer_inc > DST_GC_INC) {
dst_garbage.timer_inc = DST_GC_INC;
dst_garbage.timer_expires = DST_GC_MIN;
- cancel_delayed_work(&dst_gc_work);
- schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires);
+ mod_delayed_work(system_wq, &dst_gc_work,
+ dst_garbage.timer_expires);
}
spin_unlock_bh(&dst_garbage.lock);
}
struct dst_entry *dst, *last = NULL;
switch (event) {
- case NETDEV_UNREGISTER:
+ case NETDEV_UNREGISTER_FINAL:
case NETDEV_DOWN:
mutex_lock(&dst_gc_mutex);
for (dst = dst_busy_list; dst; dst = dst->next) {
}
+void linkwatch_init_dev(struct net_device *dev)
+{
+ /* Handle pre-registration link state changes */
+ if (!netif_carrier_ok(dev) || netif_dormant(dev))
+ rfc2863_policy(dev);
+}
+
+
static bool linkwatch_urgent_event(struct net_device *dev)
{
if (!netif_running(dev))
delay = 0;
/*
- * This is true if we've scheduled it immeditately or if we don't
- * need an immediate execution and it's already pending.
+ * If urgent, schedule immediate execution; otherwise, don't
+ * override the existing timer.
*/
- if (schedule_delayed_work(&linkwatch_work, delay) == !delay)
- return;
-
- /* Don't bother if there is nothing urgent. */
- if (!test_bit(LW_URGENT, &linkwatch_flags))
- return;
-
- /* It's already running which is good enough. */
- if (!__cancel_delayed_work(&linkwatch_work))
- return;
-
- /* Otherwise we reschedule it again for immediate execution. */
- schedule_delayed_work(&linkwatch_work, 0);
+ if (test_bit(LW_URGENT, &linkwatch_flags))
+ mod_delayed_work(system_wq, &linkwatch_work, 0);
+ else
+ schedule_delayed_work(&linkwatch_work, delay);
}
oxygen_shutdown(chip);
if (chip->irq >= 0)
free_irq(chip->irq, chip);
- flush_work_sync(&chip->spdif_input_bits_work);
- flush_work_sync(&chip->gpio_work);
+ flush_work(&chip->spdif_input_bits_work);
+ flush_work(&chip->gpio_work);
chip->model.cleanup(chip);
kfree(chip->model_data);
mutex_destroy(&chip->mutex);
}
EXPORT_SYMBOL(oxygen_pci_remove);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int oxygen_pci_suspend(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
spin_unlock_irq(&chip->reg_lock);
synchronize_irq(chip->irq);
- flush_work_sync(&chip->spdif_input_bits_work);
- flush_work_sync(&chip->gpio_work);
+ flush_work(&chip->spdif_input_bits_work);
+ flush_work(&chip->gpio_work);
chip->interrupt_mask = saved_interrupt_mask;
pci_disable_device(pci);
SIMPLE_DEV_PM_OPS(oxygen_pci_pm, oxygen_pci_suspend, oxygen_pci_resume);
EXPORT_SYMBOL(oxygen_pci_pm);
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
void oxygen_pci_shutdown(struct pci_dev *pci)
{
/* close any waiting streams and save state */
for (i = 0; i < card->num_rtd; i++) {
- flush_delayed_work_sync(&card->rtd[i].delayed_work);
+ flush_delayed_work(&card->rtd[i].delayed_work);
card->rtd[i].codec->dapm.suspend_bias_level = card->rtd[i].codec->dapm.bias_level;
}
if (ret < 0)
pr_warn("asoc: failed to add pmdown_time sysfs:%d\n", ret);
- if (!dai_link->params) {
- /* create the pcm */
- ret = soc_new_pcm(rtd, num);
+ if (cpu_dai->driver->compress_dai) {
+ /*create compress_device"*/
+ ret = soc_new_compress(rtd, num);
if (ret < 0) {
- pr_err("asoc: can't create pcm %s :%d\n",
- dai_link->stream_name, ret);
+ pr_err("asoc: can't create compress %s\n",
+ dai_link->stream_name);
return ret;
}
} else {
- /* link the DAI widgets */
- play_w = codec_dai->playback_widget;
- capture_w = cpu_dai->capture_widget;
- if (play_w && capture_w) {
- ret = snd_soc_dapm_new_pcm(card, dai_link->params,
- capture_w, play_w);
- if (ret != 0) {
- dev_err(card->dev, "Can't link %s to %s: %d\n",
- play_w->name, capture_w->name, ret);
+
+ if (!dai_link->params) {
+ /* create the pcm */
+ ret = soc_new_pcm(rtd, num);
+ if (ret < 0) {
+ pr_err("asoc: can't create pcm %s :%d\n",
+ dai_link->stream_name, ret);
return ret;
}
- }
+ } else {
+ /* link the DAI widgets */
+ play_w = codec_dai->playback_widget;
+ capture_w = cpu_dai->capture_widget;
+ if (play_w && capture_w) {
+ ret = snd_soc_dapm_new_pcm(card, dai_link->params,
+ capture_w, play_w);
+ if (ret != 0) {
+ dev_err(card->dev, "Can't link %s to %s: %d\n",
+ play_w->name, capture_w->name, ret);
+ return ret;
+ }
+ }
- play_w = cpu_dai->playback_widget;
- capture_w = codec_dai->capture_widget;
- if (play_w && capture_w) {
- ret = snd_soc_dapm_new_pcm(card, dai_link->params,
+ play_w = cpu_dai->playback_widget;
+ capture_w = codec_dai->capture_widget;
+ if (play_w && capture_w) {
+ ret = snd_soc_dapm_new_pcm(card, dai_link->params,
capture_w, play_w);
- if (ret != 0) {
- dev_err(card->dev, "Can't link %s to %s: %d\n",
- play_w->name, capture_w->name, ret);
- return ret;
+ if (ret != 0) {
+ dev_err(card->dev, "Can't link %s to %s: %d\n",
+ play_w->name, capture_w->name, ret);
+ return ret;
+ }
}
}
}
static int soc_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
- int ret = 0;
/*
* no card, so machine driver should be registering card
/* Bodge while we unpick instantiation */
card->dev = &pdev->dev;
- ret = snd_soc_register_card(card);
- if (ret != 0) {
- dev_err(&pdev->dev, "Failed to register card\n");
- return ret;
- }
-
- return 0;
+ return snd_soc_register_card(card);
}
static int soc_cleanup_card_resources(struct snd_soc_card *card)
/* make sure any delayed work runs */
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_pcm_runtime *rtd = &card->rtd[i];
- flush_delayed_work_sync(&rtd->delayed_work);
+ flush_delayed_work(&rtd->delayed_work);
}
/* remove auxiliary devices */
* now, we're shutting down so no imminent restart. */
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_pcm_runtime *rtd = &card->rtd[i];
- flush_delayed_work_sync(&rtd->delayed_work);
+ flush_delayed_work(&rtd->delayed_work);
}
snd_soc_dapm_shutdown(card);
}
}
+ if (!dai->codec)
+ dai->dapm.idle_bias_off = 1;
+
list_add(&dai->list, &dai_list);
mutex_unlock(&client_mutex);
}
}
+ if (!dai->codec)
+ dai->dapm.idle_bias_off = 1;
+
list_add(&dai->list, &dai_list);
mutex_unlock(&client_mutex);