3 * Implementation of primary alsa driver code base for Intel HD Audio.
5 * Copyright(c) 2004 Intel Corporation. All rights reserved.
7 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
8 * PeiSen Hou <pshou@realtek.com.tw>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
23 #include <linux/clocksource.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <sound/core.h>
31 #include <sound/initval.h>
33 #include "hda_controller.h"
35 #define CREATE_TRACE_POINTS
36 #include "hda_intel_trace.h"
38 /* DSP lock helpers */
39 #ifdef CONFIG_SND_HDA_DSP_LOADER
40 #define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex)
41 #define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
42 #define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
43 #define dsp_is_locked(dev) ((dev)->locked)
45 #define dsp_lock_init(dev) do {} while (0)
46 #define dsp_lock(dev) do {} while (0)
47 #define dsp_unlock(dev) do {} while (0)
48 #define dsp_is_locked(dev) 0
52 * AZX stream operations.
56 static void azx_stream_start(struct azx *chip, struct azx_dev *azx_dev)
59 * Before stream start, initialize parameter
61 azx_dev->insufficient = 1;
64 azx_writel(chip, INTCTL,
65 azx_readl(chip, INTCTL) | (1 << azx_dev->index));
66 /* set DMA start and interrupt mask */
67 azx_sd_writeb(chip, azx_dev, SD_CTL,
68 azx_sd_readb(chip, azx_dev, SD_CTL) |
69 SD_CTL_DMA_START | SD_INT_MASK);
73 static void azx_stream_clear(struct azx *chip, struct azx_dev *azx_dev)
75 azx_sd_writeb(chip, azx_dev, SD_CTL,
76 azx_sd_readb(chip, azx_dev, SD_CTL) &
77 ~(SD_CTL_DMA_START | SD_INT_MASK));
78 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
82 void azx_stream_stop(struct azx *chip, struct azx_dev *azx_dev)
84 azx_stream_clear(chip, azx_dev);
86 azx_writel(chip, INTCTL,
87 azx_readl(chip, INTCTL) & ~(1 << azx_dev->index));
89 EXPORT_SYMBOL_GPL(azx_stream_stop);
92 static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
97 azx_stream_clear(chip, azx_dev);
99 azx_sd_writeb(chip, azx_dev, SD_CTL,
100 azx_sd_readb(chip, azx_dev, SD_CTL) |
101 SD_CTL_STREAM_RESET);
104 while (!((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
105 SD_CTL_STREAM_RESET) && --timeout)
107 val &= ~SD_CTL_STREAM_RESET;
108 azx_sd_writeb(chip, azx_dev, SD_CTL, val);
112 /* waiting for hardware to report that the stream is out of reset */
113 while (((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
114 SD_CTL_STREAM_RESET) && --timeout)
117 /* reset first position - may not be synced with hw at this time */
118 *azx_dev->posbuf = 0;
122 * set up the SD for streaming
124 static int azx_setup_controller(struct azx *chip, struct azx_dev *azx_dev)
127 /* make sure the run bit is zero for SD */
128 azx_stream_clear(chip, azx_dev);
129 /* program the stream_tag */
130 val = azx_sd_readl(chip, azx_dev, SD_CTL);
131 val = (val & ~SD_CTL_STREAM_TAG_MASK) |
132 (azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT);
133 if (!azx_snoop(chip))
134 val |= SD_CTL_TRAFFIC_PRIO;
135 azx_sd_writel(chip, azx_dev, SD_CTL, val);
137 /* program the length of samples in cyclic buffer */
138 azx_sd_writel(chip, azx_dev, SD_CBL, azx_dev->bufsize);
140 /* program the stream format */
141 /* this value needs to be the same as the one programmed */
142 azx_sd_writew(chip, azx_dev, SD_FORMAT, azx_dev->format_val);
144 /* program the stream LVI (last valid index) of the BDL */
145 azx_sd_writew(chip, azx_dev, SD_LVI, azx_dev->frags - 1);
147 /* program the BDL address */
148 /* lower BDL address */
149 azx_sd_writel(chip, azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr);
150 /* upper BDL address */
151 azx_sd_writel(chip, azx_dev, SD_BDLPU,
152 upper_32_bits(azx_dev->bdl.addr));
154 /* enable the position buffer */
155 if (chip->position_fix[0] != POS_FIX_LPIB ||
156 chip->position_fix[1] != POS_FIX_LPIB) {
157 if (!(azx_readl(chip, DPLBASE) & ICH6_DPLBASE_ENABLE))
158 azx_writel(chip, DPLBASE,
159 (u32)chip->posbuf.addr | ICH6_DPLBASE_ENABLE);
162 /* set the interrupt enable bits in the descriptor control register */
163 azx_sd_writel(chip, azx_dev, SD_CTL,
164 azx_sd_readl(chip, azx_dev, SD_CTL) | SD_INT_MASK);
169 /* assign a stream for the PCM */
170 static inline struct azx_dev *
171 azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
174 struct azx_dev *res = NULL;
175 /* make a non-zero unique key for the substream */
176 int key = (substream->pcm->device << 16) | (substream->number << 2) |
177 (substream->stream + 1);
179 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
180 dev = chip->playback_index_offset;
181 nums = chip->playback_streams;
183 dev = chip->capture_index_offset;
184 nums = chip->capture_streams;
186 for (i = 0; i < nums; i++, dev++) {
187 struct azx_dev *azx_dev = &chip->azx_dev[dev];
189 if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
190 if (azx_dev->assigned_key == key) {
192 azx_dev->assigned_key = key;
197 (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
205 res->assigned_key = key;
211 /* release the assigned stream */
212 static inline void azx_release_device(struct azx_dev *azx_dev)
217 static cycle_t azx_cc_read(const struct cyclecounter *cc)
219 struct azx_dev *azx_dev = container_of(cc, struct azx_dev, azx_cc);
220 struct snd_pcm_substream *substream = azx_dev->substream;
221 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
222 struct azx *chip = apcm->chip;
224 return azx_readl(chip, WALLCLK);
227 static void azx_timecounter_init(struct snd_pcm_substream *substream,
228 bool force, cycle_t last)
230 struct azx_dev *azx_dev = get_azx_dev(substream);
231 struct timecounter *tc = &azx_dev->azx_tc;
232 struct cyclecounter *cc = &azx_dev->azx_cc;
235 cc->read = azx_cc_read;
236 cc->mask = CLOCKSOURCE_MASK(32);
239 * Converting from 24 MHz to ns means applying a 125/3 factor.
240 * To avoid any saturation issues in intermediate operations,
241 * the 125 factor is applied first. The division is applied
242 * last after reading the timecounter value.
243 * Applying the 1/3 factor as part of the multiplication
244 * requires at least 20 bits for a decent precision, however
245 * overflows occur after about 4 hours or less, not a option.
248 cc->mult = 125; /* saturation after 195 years */
251 nsec = 0; /* audio time is elapsed time since trigger */
252 timecounter_init(tc, cc, nsec);
255 * force timecounter to use predefined value,
256 * used for synchronized starts
258 tc->cycle_last = last;
261 static u64 azx_adjust_codec_delay(struct snd_pcm_substream *substream,
264 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
265 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
266 u64 codec_frames, codec_nsecs;
268 if (!hinfo->ops.get_delay)
271 codec_frames = hinfo->ops.get_delay(hinfo, apcm->codec, substream);
272 codec_nsecs = div_u64(codec_frames * 1000000000LL,
273 substream->runtime->rate);
275 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
276 return nsec + codec_nsecs;
278 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
284 static int setup_bdle(struct azx *chip,
285 struct snd_dma_buffer *dmab,
286 struct azx_dev *azx_dev, u32 **bdlp,
287 int ofs, int size, int with_ioc)
295 if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES)
298 addr = snd_sgbuf_get_addr(dmab, ofs);
299 /* program the address field of the BDL entry */
300 bdl[0] = cpu_to_le32((u32)addr);
301 bdl[1] = cpu_to_le32(upper_32_bits(addr));
302 /* program the size field of the BDL entry */
303 chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size);
304 /* one BDLE cannot cross 4K boundary on CTHDA chips */
305 if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) {
306 u32 remain = 0x1000 - (ofs & 0xfff);
310 bdl[2] = cpu_to_le32(chunk);
311 /* program the IOC to enable interrupt
312 * only when the whole fragment is processed
315 bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
327 static int azx_setup_periods(struct azx *chip,
328 struct snd_pcm_substream *substream,
329 struct azx_dev *azx_dev)
332 int i, ofs, periods, period_bytes;
335 /* reset BDL address */
336 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
337 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
339 period_bytes = azx_dev->period_bytes;
340 periods = azx_dev->bufsize / period_bytes;
342 /* program the initial BDL entries */
343 bdl = (u32 *)azx_dev->bdl.area;
347 if (chip->bdl_pos_adj)
348 pos_adj = chip->bdl_pos_adj[chip->dev_index];
349 if (!azx_dev->no_period_wakeup && pos_adj > 0) {
350 struct snd_pcm_runtime *runtime = substream->runtime;
351 int pos_align = pos_adj;
352 pos_adj = (pos_adj * runtime->rate + 47999) / 48000;
356 pos_adj = ((pos_adj + pos_align - 1) / pos_align) *
358 pos_adj = frames_to_bytes(runtime, pos_adj);
359 if (pos_adj >= period_bytes) {
360 dev_warn(chip->card->dev,"Too big adjustment %d\n",
364 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
366 &bdl, ofs, pos_adj, true);
373 for (i = 0; i < periods; i++) {
374 if (i == periods - 1 && pos_adj)
375 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
377 period_bytes - pos_adj, 0);
379 ofs = setup_bdle(chip, snd_pcm_get_dma_buf(substream),
382 !azx_dev->no_period_wakeup);
389 dev_err(chip->card->dev, "Too many BDL entries: buffer=%d, period=%d\n",
390 azx_dev->bufsize, period_bytes);
398 static int azx_pcm_close(struct snd_pcm_substream *substream)
400 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
401 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
402 struct azx *chip = apcm->chip;
403 struct azx_dev *azx_dev = get_azx_dev(substream);
406 mutex_lock(&chip->open_mutex);
407 spin_lock_irqsave(&chip->reg_lock, flags);
408 azx_dev->substream = NULL;
409 azx_dev->running = 0;
410 spin_unlock_irqrestore(&chip->reg_lock, flags);
411 azx_release_device(azx_dev);
412 hinfo->ops.close(hinfo, apcm->codec, substream);
413 snd_hda_power_down(apcm->codec);
414 mutex_unlock(&chip->open_mutex);
418 static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
419 struct snd_pcm_hw_params *hw_params)
421 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
422 struct azx *chip = apcm->chip;
425 dsp_lock(get_azx_dev(substream));
426 if (dsp_is_locked(get_azx_dev(substream))) {
431 ret = chip->ops->substream_alloc_pages(chip, substream,
432 params_buffer_bytes(hw_params));
434 dsp_unlock(get_azx_dev(substream));
438 static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
440 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
441 struct azx_dev *azx_dev = get_azx_dev(substream);
442 struct azx *chip = apcm->chip;
443 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
446 /* reset BDL address */
448 if (!dsp_is_locked(azx_dev)) {
449 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
450 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
451 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
452 azx_dev->bufsize = 0;
453 azx_dev->period_bytes = 0;
454 azx_dev->format_val = 0;
457 snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
459 err = chip->ops->substream_free_pages(chip, substream);
460 azx_dev->prepared = 0;
465 static int azx_pcm_prepare(struct snd_pcm_substream *substream)
467 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
468 struct azx *chip = apcm->chip;
469 struct azx_dev *azx_dev = get_azx_dev(substream);
470 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
471 struct snd_pcm_runtime *runtime = substream->runtime;
472 unsigned int bufsize, period_bytes, format_val, stream_tag;
474 struct hda_spdif_out *spdif =
475 snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
476 unsigned short ctls = spdif ? spdif->ctls : 0;
479 if (dsp_is_locked(azx_dev)) {
484 azx_stream_reset(chip, azx_dev);
485 format_val = snd_hda_calc_stream_format(runtime->rate,
491 dev_err(chip->card->dev,
492 "invalid format_val, rate=%d, ch=%d, format=%d\n",
493 runtime->rate, runtime->channels, runtime->format);
498 bufsize = snd_pcm_lib_buffer_bytes(substream);
499 period_bytes = snd_pcm_lib_period_bytes(substream);
501 dev_dbg(chip->card->dev, "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
502 bufsize, format_val);
504 if (bufsize != azx_dev->bufsize ||
505 period_bytes != azx_dev->period_bytes ||
506 format_val != azx_dev->format_val ||
507 runtime->no_period_wakeup != azx_dev->no_period_wakeup) {
508 azx_dev->bufsize = bufsize;
509 azx_dev->period_bytes = period_bytes;
510 azx_dev->format_val = format_val;
511 azx_dev->no_period_wakeup = runtime->no_period_wakeup;
512 err = azx_setup_periods(chip, substream, azx_dev);
517 /* when LPIB delay correction gives a small negative value,
518 * we ignore it; currently set the threshold statically to
521 if (runtime->period_size > 64)
522 azx_dev->delay_negative_threshold = -frames_to_bytes(runtime, 64);
524 azx_dev->delay_negative_threshold = 0;
526 /* wallclk has 24Mhz clock source */
527 azx_dev->period_wallclk = (((runtime->period_size * 24000) /
528 runtime->rate) * 1000);
529 azx_setup_controller(chip, azx_dev);
530 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
532 azx_sd_readw(chip, azx_dev, SD_FIFOSIZE) + 1;
534 azx_dev->fifo_size = 0;
536 stream_tag = azx_dev->stream_tag;
537 /* CA-IBG chips need the playback stream starting from 1 */
538 if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
539 stream_tag > chip->capture_streams)
540 stream_tag -= chip->capture_streams;
541 err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
542 azx_dev->format_val, substream);
546 azx_dev->prepared = 1;
551 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
553 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
554 struct azx *chip = apcm->chip;
555 struct azx_dev *azx_dev;
556 struct snd_pcm_substream *s;
557 int rstart = 0, start, nsync = 0, sbits = 0;
560 azx_dev = get_azx_dev(substream);
561 trace_azx_pcm_trigger(chip, azx_dev, cmd);
563 if (dsp_is_locked(azx_dev) || !azx_dev->prepared)
567 case SNDRV_PCM_TRIGGER_START:
569 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
570 case SNDRV_PCM_TRIGGER_RESUME:
573 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
574 case SNDRV_PCM_TRIGGER_SUSPEND:
575 case SNDRV_PCM_TRIGGER_STOP:
582 snd_pcm_group_for_each_entry(s, substream) {
583 if (s->pcm->card != substream->pcm->card)
585 azx_dev = get_azx_dev(s);
586 sbits |= 1 << azx_dev->index;
588 snd_pcm_trigger_done(s, substream);
591 spin_lock(&chip->reg_lock);
593 /* first, set SYNC bits of corresponding streams */
594 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
595 azx_writel(chip, OLD_SSYNC,
596 azx_readl(chip, OLD_SSYNC) | sbits);
598 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) | sbits);
600 snd_pcm_group_for_each_entry(s, substream) {
601 if (s->pcm->card != substream->pcm->card)
603 azx_dev = get_azx_dev(s);
605 azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
607 azx_dev->start_wallclk -=
608 azx_dev->period_wallclk;
609 azx_stream_start(chip, azx_dev);
611 azx_stream_stop(chip, azx_dev);
613 azx_dev->running = start;
615 spin_unlock(&chip->reg_lock);
617 /* wait until all FIFOs get ready */
618 for (timeout = 5000; timeout; timeout--) {
620 snd_pcm_group_for_each_entry(s, substream) {
621 if (s->pcm->card != substream->pcm->card)
623 azx_dev = get_azx_dev(s);
624 if (!(azx_sd_readb(chip, azx_dev, SD_STS) &
633 /* wait until all RUN bits are cleared */
634 for (timeout = 5000; timeout; timeout--) {
636 snd_pcm_group_for_each_entry(s, substream) {
637 if (s->pcm->card != substream->pcm->card)
639 azx_dev = get_azx_dev(s);
640 if (azx_sd_readb(chip, azx_dev, SD_CTL) &
649 spin_lock(&chip->reg_lock);
650 /* reset SYNC bits */
651 if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
652 azx_writel(chip, OLD_SSYNC,
653 azx_readl(chip, OLD_SSYNC) & ~sbits);
655 azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) & ~sbits);
657 azx_timecounter_init(substream, 0, 0);
661 /* same start cycle for master and group */
662 azx_dev = get_azx_dev(substream);
663 cycle_last = azx_dev->azx_tc.cycle_last;
665 snd_pcm_group_for_each_entry(s, substream) {
666 if (s->pcm->card != substream->pcm->card)
668 azx_timecounter_init(s, 1, cycle_last);
672 spin_unlock(&chip->reg_lock);
676 /* get the current DMA position with correction on VIA chips */
677 static unsigned int azx_via_get_position(struct azx *chip,
678 struct azx_dev *azx_dev)
680 unsigned int link_pos, mini_pos, bound_pos;
681 unsigned int mod_link_pos, mod_dma_pos, mod_mini_pos;
682 unsigned int fifo_size;
684 link_pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
685 if (azx_dev->substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
686 /* Playback, no problem using link position */
692 * use mod to get the DMA position just like old chipset
694 mod_dma_pos = le32_to_cpu(*azx_dev->posbuf);
695 mod_dma_pos %= azx_dev->period_bytes;
697 /* azx_dev->fifo_size can't get FIFO size of in stream.
698 * Get from base address + offset.
700 fifo_size = readw(chip->remap_addr + VIA_IN_STREAM0_FIFO_SIZE_OFFSET);
702 if (azx_dev->insufficient) {
703 /* Link position never gather than FIFO size */
704 if (link_pos <= fifo_size)
707 azx_dev->insufficient = 0;
710 if (link_pos <= fifo_size)
711 mini_pos = azx_dev->bufsize + link_pos - fifo_size;
713 mini_pos = link_pos - fifo_size;
715 /* Find nearest previous boudary */
716 mod_mini_pos = mini_pos % azx_dev->period_bytes;
717 mod_link_pos = link_pos % azx_dev->period_bytes;
718 if (mod_link_pos >= fifo_size)
719 bound_pos = link_pos - mod_link_pos;
720 else if (mod_dma_pos >= mod_mini_pos)
721 bound_pos = mini_pos - mod_mini_pos;
723 bound_pos = mini_pos - mod_mini_pos + azx_dev->period_bytes;
724 if (bound_pos >= azx_dev->bufsize)
728 /* Calculate real DMA position we want */
729 return bound_pos + mod_dma_pos;
732 unsigned int azx_get_position(struct azx *chip,
733 struct azx_dev *azx_dev,
736 struct snd_pcm_substream *substream = azx_dev->substream;
737 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
739 int stream = substream->stream;
740 struct hda_pcm_stream *hinfo = apcm->hinfo[stream];
743 switch (chip->position_fix[stream]) {
746 pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
748 case POS_FIX_VIACOMBO:
749 pos = azx_via_get_position(chip, azx_dev);
752 /* use the position buffer */
753 pos = le32_to_cpu(*azx_dev->posbuf);
754 if (with_check && chip->position_fix[stream] == POS_FIX_AUTO) {
755 if (!pos || pos == (u32)-1) {
756 dev_info(chip->card->dev,
757 "Invalid position buffer, using LPIB read method instead.\n");
758 chip->position_fix[stream] = POS_FIX_LPIB;
759 pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
761 chip->position_fix[stream] = POS_FIX_POSBUF;
766 if (pos >= azx_dev->bufsize)
769 /* calculate runtime delay from LPIB */
770 if (substream->runtime &&
771 chip->position_fix[stream] == POS_FIX_POSBUF &&
772 (chip->driver_caps & AZX_DCAPS_COUNT_LPIB_DELAY)) {
773 unsigned int lpib_pos = azx_sd_readl(chip, azx_dev, SD_LPIB);
774 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
775 delay = pos - lpib_pos;
777 delay = lpib_pos - pos;
779 if (delay >= azx_dev->delay_negative_threshold)
782 delay += azx_dev->bufsize;
784 if (delay >= azx_dev->period_bytes) {
785 dev_info(chip->card->dev,
786 "Unstable LPIB (%d >= %d); disabling LPIB delay counting\n",
787 delay, azx_dev->period_bytes);
789 chip->driver_caps &= ~AZX_DCAPS_COUNT_LPIB_DELAY;
791 delay = bytes_to_frames(substream->runtime, delay);
794 if (substream->runtime) {
795 if (hinfo->ops.get_delay)
796 delay += hinfo->ops.get_delay(hinfo, apcm->codec,
798 substream->runtime->delay = delay;
801 trace_azx_get_position(chip, azx_dev, pos, delay);
804 EXPORT_SYMBOL_GPL(azx_get_position);
806 static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
808 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
809 struct azx *chip = apcm->chip;
810 struct azx_dev *azx_dev = get_azx_dev(substream);
811 return bytes_to_frames(substream->runtime,
812 azx_get_position(chip, azx_dev, false));
815 static int azx_get_wallclock_tstamp(struct snd_pcm_substream *substream,
818 struct azx_dev *azx_dev = get_azx_dev(substream);
821 nsec = timecounter_read(&azx_dev->azx_tc);
822 nsec = div_u64(nsec, 3); /* can be optimized */
823 nsec = azx_adjust_codec_delay(substream, nsec);
825 *ts = ns_to_timespec(nsec);
830 static struct snd_pcm_hardware azx_pcm_hw = {
831 .info = (SNDRV_PCM_INFO_MMAP |
832 SNDRV_PCM_INFO_INTERLEAVED |
833 SNDRV_PCM_INFO_BLOCK_TRANSFER |
834 SNDRV_PCM_INFO_MMAP_VALID |
835 /* No full-resume yet implemented */
836 /* SNDRV_PCM_INFO_RESUME |*/
837 SNDRV_PCM_INFO_PAUSE |
838 SNDRV_PCM_INFO_SYNC_START |
839 SNDRV_PCM_INFO_HAS_WALL_CLOCK |
840 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
841 .formats = SNDRV_PCM_FMTBIT_S16_LE,
842 .rates = SNDRV_PCM_RATE_48000,
847 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
848 .period_bytes_min = 128,
849 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
851 .periods_max = AZX_MAX_FRAG,
855 static int azx_pcm_open(struct snd_pcm_substream *substream)
857 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
858 struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
859 struct azx *chip = apcm->chip;
860 struct azx_dev *azx_dev;
861 struct snd_pcm_runtime *runtime = substream->runtime;
866 mutex_lock(&chip->open_mutex);
867 azx_dev = azx_assign_device(chip, substream);
868 if (azx_dev == NULL) {
869 mutex_unlock(&chip->open_mutex);
872 runtime->hw = azx_pcm_hw;
873 runtime->hw.channels_min = hinfo->channels_min;
874 runtime->hw.channels_max = hinfo->channels_max;
875 runtime->hw.formats = hinfo->formats;
876 runtime->hw.rates = hinfo->rates;
877 snd_pcm_limit_hw_rates(runtime);
878 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
880 /* avoid wrap-around with wall-clock */
881 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
885 if (chip->align_buffer_size)
886 /* constrain buffer sizes to be multiple of 128
887 bytes. This is more efficient in terms of memory
888 access but isn't required by the HDA spec and
889 prevents users from specifying exact period/buffer
890 sizes. For example for 44.1kHz, a period size set
891 to 20ms will be rounded to 19.59ms. */
894 /* Don't enforce steps on buffer sizes, still need to
895 be multiple of 4 bytes (HDA spec). Tested on Intel
896 HDA controllers, may not work on all devices where
897 option needs to be disabled */
900 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
902 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
904 snd_hda_power_up_d3wait(apcm->codec);
905 err = hinfo->ops.open(hinfo, apcm->codec, substream);
907 azx_release_device(azx_dev);
908 snd_hda_power_down(apcm->codec);
909 mutex_unlock(&chip->open_mutex);
912 snd_pcm_limit_hw_rates(runtime);
914 if (snd_BUG_ON(!runtime->hw.channels_min) ||
915 snd_BUG_ON(!runtime->hw.channels_max) ||
916 snd_BUG_ON(!runtime->hw.formats) ||
917 snd_BUG_ON(!runtime->hw.rates)) {
918 azx_release_device(azx_dev);
919 hinfo->ops.close(hinfo, apcm->codec, substream);
920 snd_hda_power_down(apcm->codec);
921 mutex_unlock(&chip->open_mutex);
925 /* disable WALLCLOCK timestamps for capture streams
926 until we figure out how to handle digital inputs */
927 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
928 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK;
930 spin_lock_irqsave(&chip->reg_lock, flags);
931 azx_dev->substream = substream;
932 azx_dev->running = 0;
933 spin_unlock_irqrestore(&chip->reg_lock, flags);
935 runtime->private_data = azx_dev;
936 snd_pcm_set_sync(substream);
937 mutex_unlock(&chip->open_mutex);
941 static int azx_pcm_mmap(struct snd_pcm_substream *substream,
942 struct vm_area_struct *area)
944 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
945 struct azx *chip = apcm->chip;
946 if (chip->ops->pcm_mmap_prepare)
947 chip->ops->pcm_mmap_prepare(substream, area);
948 return snd_pcm_lib_default_mmap(substream, area);
951 static struct snd_pcm_ops azx_pcm_ops = {
952 .open = azx_pcm_open,
953 .close = azx_pcm_close,
954 .ioctl = snd_pcm_lib_ioctl,
955 .hw_params = azx_pcm_hw_params,
956 .hw_free = azx_pcm_hw_free,
957 .prepare = azx_pcm_prepare,
958 .trigger = azx_pcm_trigger,
959 .pointer = azx_pcm_pointer,
960 .wall_clock = azx_get_wallclock_tstamp,
961 .mmap = azx_pcm_mmap,
962 .page = snd_pcm_sgbuf_ops_page,
965 static void azx_pcm_free(struct snd_pcm *pcm)
967 struct azx_pcm *apcm = pcm->private_data;
969 list_del(&apcm->list);
974 #define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
976 static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
977 struct hda_pcm *cpcm)
979 struct azx *chip = bus->private_data;
981 struct azx_pcm *apcm;
982 int pcm_dev = cpcm->device;
986 list_for_each_entry(apcm, &chip->pcm_list, list) {
987 if (apcm->pcm->device == pcm_dev) {
988 dev_err(chip->card->dev, "PCM %d already exists\n",
993 err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
994 cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
995 cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
999 strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
1000 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
1005 apcm->codec = codec;
1006 pcm->private_data = apcm;
1007 pcm->private_free = azx_pcm_free;
1008 if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
1009 pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
1010 list_add_tail(&apcm->list, &chip->pcm_list);
1012 for (s = 0; s < 2; s++) {
1013 apcm->hinfo[s] = &cpcm->stream[s];
1014 if (cpcm->stream[s].substreams)
1015 snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
1017 /* buffer pre-allocation */
1018 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
1019 if (size > MAX_PREALLOC_SIZE)
1020 size = MAX_PREALLOC_SIZE;
1021 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
1023 size, MAX_PREALLOC_SIZE);
1025 pcm->dev = &codec->dev;
1030 * CORB / RIRB interface
1032 static int azx_alloc_cmd_io(struct azx *chip)
1036 /* single page (at least 4096 bytes) must suffice for both ringbuffes */
1037 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1038 PAGE_SIZE, &chip->rb);
1040 dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n");
1043 EXPORT_SYMBOL_GPL(azx_alloc_cmd_io);
1045 static void azx_init_cmd_io(struct azx *chip)
1049 spin_lock_irq(&chip->reg_lock);
1051 chip->corb.addr = chip->rb.addr;
1052 chip->corb.buf = (u32 *)chip->rb.area;
1053 azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
1054 azx_writel(chip, CORBUBASE, upper_32_bits(chip->corb.addr));
1056 /* set the corb size to 256 entries (ULI requires explicitly) */
1057 azx_writeb(chip, CORBSIZE, 0x02);
1058 /* set the corb write pointer to 0 */
1059 azx_writew(chip, CORBWP, 0);
1061 /* reset the corb hw read pointer */
1062 azx_writew(chip, CORBRP, ICH6_CORBRP_RST);
1063 if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) {
1064 for (timeout = 1000; timeout > 0; timeout--) {
1065 if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST)
1070 dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
1071 azx_readw(chip, CORBRP));
1073 azx_writew(chip, CORBRP, 0);
1074 for (timeout = 1000; timeout > 0; timeout--) {
1075 if (azx_readw(chip, CORBRP) == 0)
1080 dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
1081 azx_readw(chip, CORBRP));
1084 /* enable corb dma */
1085 azx_writeb(chip, CORBCTL, ICH6_CORBCTL_RUN);
1088 chip->rirb.addr = chip->rb.addr + 2048;
1089 chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
1090 chip->rirb.wp = chip->rirb.rp = 0;
1091 memset(chip->rirb.cmds, 0, sizeof(chip->rirb.cmds));
1092 azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
1093 azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr));
1095 /* set the rirb size to 256 entries (ULI requires explicitly) */
1096 azx_writeb(chip, RIRBSIZE, 0x02);
1097 /* reset the rirb hw write pointer */
1098 azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST);
1099 /* set N=1, get RIRB response interrupt for new entry */
1100 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
1101 azx_writew(chip, RINTCNT, 0xc0);
1103 azx_writew(chip, RINTCNT, 1);
1104 /* enable rirb dma and response irq */
1105 azx_writeb(chip, RIRBCTL, ICH6_RBCTL_DMA_EN | ICH6_RBCTL_IRQ_EN);
1106 spin_unlock_irq(&chip->reg_lock);
1108 EXPORT_SYMBOL_GPL(azx_init_cmd_io);
1110 static void azx_free_cmd_io(struct azx *chip)
1112 spin_lock_irq(&chip->reg_lock);
1113 /* disable ringbuffer DMAs */
1114 azx_writeb(chip, RIRBCTL, 0);
1115 azx_writeb(chip, CORBCTL, 0);
1116 spin_unlock_irq(&chip->reg_lock);
1118 EXPORT_SYMBOL_GPL(azx_free_cmd_io);
1120 static unsigned int azx_command_addr(u32 cmd)
1122 unsigned int addr = cmd >> 28;
1124 if (addr >= AZX_MAX_CODECS) {
1132 /* send a command */
1133 static int azx_corb_send_cmd(struct hda_bus *bus, u32 val)
1135 struct azx *chip = bus->private_data;
1136 unsigned int addr = azx_command_addr(val);
1137 unsigned int wp, rp;
1139 spin_lock_irq(&chip->reg_lock);
1141 /* add command to corb */
1142 wp = azx_readw(chip, CORBWP);
1144 /* something wrong, controller likely turned to D3 */
1145 spin_unlock_irq(&chip->reg_lock);
1149 wp %= ICH6_MAX_CORB_ENTRIES;
1151 rp = azx_readw(chip, CORBRP);
1153 /* oops, it's full */
1154 spin_unlock_irq(&chip->reg_lock);
1158 chip->rirb.cmds[addr]++;
1159 chip->corb.buf[wp] = cpu_to_le32(val);
1160 azx_writew(chip, CORBWP, wp);
1162 spin_unlock_irq(&chip->reg_lock);
1167 #define ICH6_RIRB_EX_UNSOL_EV (1<<4)
1169 /* retrieve RIRB entry - called from interrupt handler */
1170 static void azx_update_rirb(struct azx *chip)
1172 unsigned int rp, wp;
1176 wp = azx_readw(chip, RIRBWP);
1178 /* something wrong, controller likely turned to D3 */
1182 if (wp == chip->rirb.wp)
1186 while (chip->rirb.rp != wp) {
1188 chip->rirb.rp %= ICH6_MAX_RIRB_ENTRIES;
1190 rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */
1191 res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]);
1192 res = le32_to_cpu(chip->rirb.buf[rp]);
1193 addr = res_ex & 0xf;
1194 if ((addr >= AZX_MAX_CODECS) || !(chip->codec_mask & (1 << addr))) {
1195 dev_err(chip->card->dev, "spurious response %#x:%#x, rp = %d, wp = %d",
1200 else if (res_ex & ICH6_RIRB_EX_UNSOL_EV)
1201 snd_hda_queue_unsol_event(chip->bus, res, res_ex);
1202 else if (chip->rirb.cmds[addr]) {
1203 chip->rirb.res[addr] = res;
1205 chip->rirb.cmds[addr]--;
1206 } else if (printk_ratelimit()) {
1207 dev_err(chip->card->dev, "spurious response %#x:%#x, last cmd=%#08x\n",
1209 chip->last_cmd[addr]);
1214 /* receive a response */
1215 static unsigned int azx_rirb_get_response(struct hda_bus *bus,
1218 struct azx *chip = bus->private_data;
1219 unsigned long timeout;
1220 unsigned long loopcounter;
1224 timeout = jiffies + msecs_to_jiffies(1000);
1226 for (loopcounter = 0;; loopcounter++) {
1227 if (chip->polling_mode || do_poll) {
1228 spin_lock_irq(&chip->reg_lock);
1229 azx_update_rirb(chip);
1230 spin_unlock_irq(&chip->reg_lock);
1232 if (!chip->rirb.cmds[addr]) {
1234 bus->rirb_error = 0;
1237 chip->poll_count = 0;
1238 return chip->rirb.res[addr]; /* the last value */
1240 if (time_after(jiffies, timeout))
1242 if (bus->needs_damn_long_delay || loopcounter > 3000)
1243 msleep(2); /* temporary workaround */
1250 if (!bus->no_response_fallback)
1253 if (!chip->polling_mode && chip->poll_count < 2) {
1254 dev_dbg(chip->card->dev,
1255 "azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
1256 chip->last_cmd[addr]);
1263 if (!chip->polling_mode) {
1264 dev_warn(chip->card->dev,
1265 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
1266 chip->last_cmd[addr]);
1267 chip->polling_mode = 1;
1272 dev_warn(chip->card->dev,
1273 "No response from codec, disabling MSI: last cmd=0x%08x\n",
1274 chip->last_cmd[addr]);
1275 if (chip->ops->disable_msi_reset_irq(chip) &&
1276 chip->ops->disable_msi_reset_irq(chip) < 0) {
1277 bus->rirb_error = 1;
1283 if (chip->probing) {
1284 /* If this critical timeout happens during the codec probing
1285 * phase, this is likely an access to a non-existing codec
1286 * slot. Better to return an error and reset the system.
1291 /* a fatal communication error; need either to reset or to fallback
1292 * to the single_cmd mode
1294 bus->rirb_error = 1;
1295 if (bus->allow_bus_reset && !bus->response_reset && !bus->in_reset) {
1296 bus->response_reset = 1;
1297 return -1; /* give a chance to retry */
1300 dev_err(chip->card->dev,
1301 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
1302 chip->last_cmd[addr]);
1303 chip->single_cmd = 1;
1304 bus->response_reset = 0;
1305 /* release CORB/RIRB */
1306 azx_free_cmd_io(chip);
1307 /* disable unsolicited responses */
1308 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~ICH6_GCTL_UNSOL);
1313 * Use the single immediate command instead of CORB/RIRB for simplicity
1315 * Note: according to Intel, this is not preferred use. The command was
1316 * intended for the BIOS only, and may get confused with unsolicited
1317 * responses. So, we shouldn't use it for normal operation from the
1319 * I left the codes, however, for debugging/testing purposes.
1322 /* receive a response */
1323 static int azx_single_wait_for_response(struct azx *chip, unsigned int addr)
1328 /* check IRV busy bit */
1329 if (azx_readw(chip, IRS) & ICH6_IRS_VALID) {
1330 /* reuse rirb.res as the response return value */
1331 chip->rirb.res[addr] = azx_readl(chip, IR);
1336 if (printk_ratelimit())
1337 dev_dbg(chip->card->dev, "get_response timeout: IRS=0x%x\n",
1338 azx_readw(chip, IRS));
1339 chip->rirb.res[addr] = -1;
1343 /* send a command */
1344 static int azx_single_send_cmd(struct hda_bus *bus, u32 val)
1346 struct azx *chip = bus->private_data;
1347 unsigned int addr = azx_command_addr(val);
1350 bus->rirb_error = 0;
1352 /* check ICB busy bit */
1353 if (!((azx_readw(chip, IRS) & ICH6_IRS_BUSY))) {
1354 /* Clear IRV valid bit */
1355 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1357 azx_writel(chip, IC, val);
1358 azx_writew(chip, IRS, azx_readw(chip, IRS) |
1360 return azx_single_wait_for_response(chip, addr);
1364 if (printk_ratelimit())
1365 dev_dbg(chip->card->dev,
1366 "send_cmd timeout: IRS=0x%x, val=0x%x\n",
1367 azx_readw(chip, IRS), val);
1371 /* receive a response */
1372 static unsigned int azx_single_get_response(struct hda_bus *bus,
1375 struct azx *chip = bus->private_data;
1376 return chip->rirb.res[addr];
1380 * The below are the main callbacks from hda_codec.
1382 * They are just the skeleton to call sub-callbacks according to the
1383 * current setting of chip->single_cmd.
1386 /* send a command */
1387 static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
1389 struct azx *chip = bus->private_data;
1393 chip->last_cmd[azx_command_addr(val)] = val;
1394 if (chip->single_cmd)
1395 return azx_single_send_cmd(bus, val);
1397 return azx_corb_send_cmd(bus, val);
1399 EXPORT_SYMBOL_GPL(azx_send_cmd);
1401 /* get a response */
1402 static unsigned int azx_get_response(struct hda_bus *bus,
1405 struct azx *chip = bus->private_data;
1408 if (chip->single_cmd)
1409 return azx_single_get_response(bus, addr);
1411 return azx_rirb_get_response(bus, addr);
1413 EXPORT_SYMBOL_GPL(azx_get_response);
1415 #ifdef CONFIG_SND_HDA_DSP_LOADER
1417 * DSP loading code (e.g. for CA0132)
1420 /* use the first stream for loading DSP */
1421 static struct azx_dev *
1422 azx_get_dsp_loader_dev(struct azx *chip)
1424 return &chip->azx_dev[chip->playback_index_offset];
1427 static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
1428 unsigned int byte_size,
1429 struct snd_dma_buffer *bufp)
1432 struct azx *chip = bus->private_data;
1433 struct azx_dev *azx_dev;
1436 azx_dev = azx_get_dsp_loader_dev(chip);
1439 spin_lock_irq(&chip->reg_lock);
1440 if (azx_dev->running || azx_dev->locked) {
1441 spin_unlock_irq(&chip->reg_lock);
1445 azx_dev->prepared = 0;
1446 chip->saved_azx_dev = *azx_dev;
1447 azx_dev->locked = 1;
1448 spin_unlock_irq(&chip->reg_lock);
1450 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV_SG,
1455 azx_dev->bufsize = byte_size;
1456 azx_dev->period_bytes = byte_size;
1457 azx_dev->format_val = format;
1459 azx_stream_reset(chip, azx_dev);
1461 /* reset BDL address */
1462 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1463 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1466 bdl = (u32 *)azx_dev->bdl.area;
1467 err = setup_bdle(chip, bufp, azx_dev, &bdl, 0, byte_size, 0);
1471 azx_setup_controller(chip, azx_dev);
1472 dsp_unlock(azx_dev);
1473 return azx_dev->stream_tag;
1476 chip->ops->dma_free_pages(chip, bufp);
1478 spin_lock_irq(&chip->reg_lock);
1479 if (azx_dev->opened)
1480 *azx_dev = chip->saved_azx_dev;
1481 azx_dev->locked = 0;
1482 spin_unlock_irq(&chip->reg_lock);
1484 dsp_unlock(azx_dev);
1488 static void azx_load_dsp_trigger(struct hda_bus *bus, bool start)
1490 struct azx *chip = bus->private_data;
1491 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1494 azx_stream_start(chip, azx_dev);
1496 azx_stream_stop(chip, azx_dev);
1497 azx_dev->running = start;
1500 static void azx_load_dsp_cleanup(struct hda_bus *bus,
1501 struct snd_dma_buffer *dmab)
1503 struct azx *chip = bus->private_data;
1504 struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
1506 if (!dmab->area || !azx_dev->locked)
1510 /* reset BDL address */
1511 azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
1512 azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
1513 azx_sd_writel(chip, azx_dev, SD_CTL, 0);
1514 azx_dev->bufsize = 0;
1515 azx_dev->period_bytes = 0;
1516 azx_dev->format_val = 0;
1518 chip->ops->dma_free_pages(chip, dmab);
1521 spin_lock_irq(&chip->reg_lock);
1522 if (azx_dev->opened)
1523 *azx_dev = chip->saved_azx_dev;
1524 azx_dev->locked = 0;
1525 spin_unlock_irq(&chip->reg_lock);
1526 dsp_unlock(azx_dev);
1528 #endif /* CONFIG_SND_HDA_DSP_LOADER */
1530 int azx_alloc_stream_pages(struct azx *chip)
1533 struct snd_card *card = chip->card;
1535 for (i = 0; i < chip->num_streams; i++) {
1536 dsp_lock_init(&chip->azx_dev[i]);
1537 /* allocate memory for the BDL for each stream */
1538 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1540 &chip->azx_dev[i].bdl);
1542 dev_err(card->dev, "cannot allocate BDL\n");
1546 /* allocate memory for the position buffer */
1547 err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
1548 chip->num_streams * 8, &chip->posbuf);
1550 dev_err(card->dev, "cannot allocate posbuf\n");
1554 /* allocate CORB/RIRB */
1555 err = azx_alloc_cmd_io(chip);
1560 EXPORT_SYMBOL_GPL(azx_alloc_stream_pages);
1562 void azx_free_stream_pages(struct azx *chip)
1565 if (chip->azx_dev) {
1566 for (i = 0; i < chip->num_streams; i++)
1567 if (chip->azx_dev[i].bdl.area)
1568 chip->ops->dma_free_pages(
1569 chip, &chip->azx_dev[i].bdl);
1572 chip->ops->dma_free_pages(chip, &chip->rb);
1573 if (chip->posbuf.area)
1574 chip->ops->dma_free_pages(chip, &chip->posbuf);
1576 EXPORT_SYMBOL_GPL(azx_free_stream_pages);
1579 * Lowlevel interface
1582 /* enter link reset */
1583 void azx_enter_link_reset(struct azx *chip)
1585 unsigned long timeout;
1587 /* reset controller */
1588 azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~ICH6_GCTL_RESET);
1590 timeout = jiffies + msecs_to_jiffies(100);
1591 while ((azx_readb(chip, GCTL) & ICH6_GCTL_RESET) &&
1592 time_before(jiffies, timeout))
1593 usleep_range(500, 1000);
1595 EXPORT_SYMBOL_GPL(azx_enter_link_reset);
1597 /* exit link reset */
1598 static void azx_exit_link_reset(struct azx *chip)
1600 unsigned long timeout;
1602 azx_writeb(chip, GCTL, azx_readb(chip, GCTL) | ICH6_GCTL_RESET);
1604 timeout = jiffies + msecs_to_jiffies(100);
1605 while (!azx_readb(chip, GCTL) &&
1606 time_before(jiffies, timeout))
1607 usleep_range(500, 1000);
1610 /* reset codec link */
1611 static int azx_reset(struct azx *chip, bool full_reset)
1616 /* clear STATESTS */
1617 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1619 /* reset controller */
1620 azx_enter_link_reset(chip);
1622 /* delay for >= 100us for codec PLL to settle per spec
1623 * Rev 0.9 section 5.5.1
1625 usleep_range(500, 1000);
1627 /* Bring controller out of reset */
1628 azx_exit_link_reset(chip);
1630 /* Brent Chartrand said to wait >= 540us for codecs to initialize */
1631 usleep_range(1000, 1200);
1634 /* check to see if controller is ready */
1635 if (!azx_readb(chip, GCTL)) {
1636 dev_dbg(chip->card->dev, "azx_reset: controller not ready!\n");
1640 /* Accept unsolicited responses */
1641 if (!chip->single_cmd)
1642 azx_writel(chip, GCTL, azx_readl(chip, GCTL) |
1646 if (!chip->codec_mask) {
1647 chip->codec_mask = azx_readw(chip, STATESTS);
1648 dev_dbg(chip->card->dev, "codec_mask = 0x%x\n",
1655 /* enable interrupts */
1656 static void azx_int_enable(struct azx *chip)
1658 /* enable controller CIE and GIE */
1659 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) |
1660 ICH6_INT_CTRL_EN | ICH6_INT_GLOBAL_EN);
1663 /* disable interrupts */
1664 static void azx_int_disable(struct azx *chip)
1668 /* disable interrupts in stream descriptor */
1669 for (i = 0; i < chip->num_streams; i++) {
1670 struct azx_dev *azx_dev = &chip->azx_dev[i];
1671 azx_sd_writeb(chip, azx_dev, SD_CTL,
1672 azx_sd_readb(chip, azx_dev, SD_CTL) &
1676 /* disable SIE for all streams */
1677 azx_writeb(chip, INTCTL, 0);
1679 /* disable controller CIE and GIE */
1680 azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) &
1681 ~(ICH6_INT_CTRL_EN | ICH6_INT_GLOBAL_EN));
1684 /* clear interrupts */
1685 static void azx_int_clear(struct azx *chip)
1689 /* clear stream status */
1690 for (i = 0; i < chip->num_streams; i++) {
1691 struct azx_dev *azx_dev = &chip->azx_dev[i];
1692 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1695 /* clear STATESTS */
1696 azx_writew(chip, STATESTS, STATESTS_INT_MASK);
1698 /* clear rirb status */
1699 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1701 /* clear int status */
1702 azx_writel(chip, INTSTS, ICH6_INT_CTRL_EN | ICH6_INT_ALL_STREAM);
1706 * reset and start the controller registers
1708 void azx_init_chip(struct azx *chip, bool full_reset)
1710 if (chip->initialized)
1713 /* reset controller */
1714 azx_reset(chip, full_reset);
1716 /* initialize interrupts */
1717 azx_int_clear(chip);
1718 azx_int_enable(chip);
1720 /* initialize the codec command I/O */
1721 if (!chip->single_cmd)
1722 azx_init_cmd_io(chip);
1724 /* program the position buffer */
1725 azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr);
1726 azx_writel(chip, DPUBASE, upper_32_bits(chip->posbuf.addr));
1728 chip->initialized = 1;
1730 EXPORT_SYMBOL_GPL(azx_init_chip);
1732 void azx_stop_chip(struct azx *chip)
1734 if (!chip->initialized)
1737 /* disable interrupts */
1738 azx_int_disable(chip);
1739 azx_int_clear(chip);
1741 /* disable CORB/RIRB */
1742 azx_free_cmd_io(chip);
1744 /* disable position buffer */
1745 azx_writel(chip, DPLBASE, 0);
1746 azx_writel(chip, DPUBASE, 0);
1748 chip->initialized = 0;
1750 EXPORT_SYMBOL_GPL(azx_stop_chip);
1755 irqreturn_t azx_interrupt(int irq, void *dev_id)
1757 struct azx *chip = dev_id;
1758 struct azx_dev *azx_dev;
1763 #ifdef CONFIG_PM_RUNTIME
1764 if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
1765 if (!pm_runtime_active(chip->card->dev))
1769 spin_lock(&chip->reg_lock);
1771 if (chip->disabled) {
1772 spin_unlock(&chip->reg_lock);
1776 status = azx_readl(chip, INTSTS);
1777 if (status == 0 || status == 0xffffffff) {
1778 spin_unlock(&chip->reg_lock);
1782 for (i = 0; i < chip->num_streams; i++) {
1783 azx_dev = &chip->azx_dev[i];
1784 if (status & azx_dev->sd_int_sta_mask) {
1785 sd_status = azx_sd_readb(chip, azx_dev, SD_STS);
1786 azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
1787 if (!azx_dev->substream || !azx_dev->running ||
1788 !(sd_status & SD_INT_COMPLETE))
1790 /* check whether this IRQ is really acceptable */
1791 if (!chip->ops->position_check ||
1792 chip->ops->position_check(chip, azx_dev)) {
1793 spin_unlock(&chip->reg_lock);
1794 snd_pcm_period_elapsed(azx_dev->substream);
1795 spin_lock(&chip->reg_lock);
1800 /* clear rirb int */
1801 status = azx_readb(chip, RIRBSTS);
1802 if (status & RIRB_INT_MASK) {
1803 if (status & RIRB_INT_RESPONSE) {
1804 if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
1806 azx_update_rirb(chip);
1808 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
1811 spin_unlock(&chip->reg_lock);
1815 EXPORT_SYMBOL_GPL(azx_interrupt);
1822 * Probe the given codec address
1824 static int probe_codec(struct azx *chip, int addr)
1826 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
1827 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
1830 mutex_lock(&chip->bus->cmd_mutex);
1832 azx_send_cmd(chip->bus, cmd);
1833 res = azx_get_response(chip->bus, addr);
1835 mutex_unlock(&chip->bus->cmd_mutex);
1838 dev_dbg(chip->card->dev, "codec #%d probed OK\n", addr);
1842 static void azx_bus_reset(struct hda_bus *bus)
1844 struct azx *chip = bus->private_data;
1847 azx_stop_chip(chip);
1848 azx_init_chip(chip, true);
1850 if (chip->initialized) {
1852 list_for_each_entry(p, &chip->pcm_list, list)
1853 snd_pcm_suspend_all(p->pcm);
1854 snd_hda_suspend(chip->bus);
1855 snd_hda_resume(chip->bus);
1862 /* power-up/down the controller */
1863 static void azx_power_notify(struct hda_bus *bus, bool power_up)
1865 struct azx *chip = bus->private_data;
1867 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
1871 pm_runtime_get_sync(chip->card->dev);
1873 pm_runtime_put_sync(chip->card->dev);
1877 static int get_jackpoll_interval(struct azx *chip)
1882 if (!chip->jackpoll_ms)
1885 i = chip->jackpoll_ms[chip->dev_index];
1888 if (i < 50 || i > 60000)
1891 j = msecs_to_jiffies(i);
1893 dev_warn(chip->card->dev,
1894 "jackpoll_ms value out of range: %d\n", i);
1898 /* Codec initialization */
1899 int azx_codec_create(struct azx *chip, const char *model,
1900 unsigned int max_slots,
1903 struct hda_bus_template bus_temp;
1906 memset(&bus_temp, 0, sizeof(bus_temp));
1907 bus_temp.private_data = chip;
1908 bus_temp.modelname = model;
1909 bus_temp.pci = chip->pci;
1910 bus_temp.ops.command = azx_send_cmd;
1911 bus_temp.ops.get_response = azx_get_response;
1912 bus_temp.ops.attach_pcm = azx_attach_pcm_stream;
1913 bus_temp.ops.bus_reset = azx_bus_reset;
1915 bus_temp.power_save = power_save_to;
1916 bus_temp.ops.pm_notify = azx_power_notify;
1918 #ifdef CONFIG_SND_HDA_DSP_LOADER
1919 bus_temp.ops.load_dsp_prepare = azx_load_dsp_prepare;
1920 bus_temp.ops.load_dsp_trigger = azx_load_dsp_trigger;
1921 bus_temp.ops.load_dsp_cleanup = azx_load_dsp_cleanup;
1924 err = snd_hda_bus_new(chip->card, &bus_temp, &chip->bus);
1928 if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
1929 dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
1930 chip->bus->needs_damn_long_delay = 1;
1935 max_slots = AZX_DEFAULT_CODECS;
1937 /* First try to probe all given codec slots */
1938 for (c = 0; c < max_slots; c++) {
1939 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1940 if (probe_codec(chip, c) < 0) {
1941 /* Some BIOSen give you wrong codec addresses
1944 dev_warn(chip->card->dev,
1945 "Codec #%d probe error; disabling it...\n", c);
1946 chip->codec_mask &= ~(1 << c);
1947 /* More badly, accessing to a non-existing
1948 * codec often screws up the controller chip,
1949 * and disturbs the further communications.
1950 * Thus if an error occurs during probing,
1951 * better to reset the controller chip to
1952 * get back to the sanity state.
1954 azx_stop_chip(chip);
1955 azx_init_chip(chip, true);
1960 /* AMD chipsets often cause the communication stalls upon certain
1961 * sequence like the pin-detection. It seems that forcing the synced
1962 * access works around the stall. Grrr...
1964 if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
1965 dev_dbg(chip->card->dev, "Enable sync_write for stable communication\n");
1966 chip->bus->sync_write = 1;
1967 chip->bus->allow_bus_reset = 1;
1970 /* Then create codec instances */
1971 for (c = 0; c < max_slots; c++) {
1972 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1973 struct hda_codec *codec;
1974 err = snd_hda_codec_new(chip->bus, c, &codec);
1977 codec->jackpoll_interval = get_jackpoll_interval(chip);
1978 codec->beep_mode = chip->beep_mode;
1983 dev_err(chip->card->dev, "no codecs initialized\n");
1988 EXPORT_SYMBOL_GPL(azx_codec_create);
1990 /* configure each codec instance */
1991 int azx_codec_configure(struct azx *chip)
1993 struct hda_codec *codec;
1994 list_for_each_entry(codec, &chip->bus->codec_list, list) {
1995 snd_hda_codec_configure(codec);
1999 EXPORT_SYMBOL_GPL(azx_codec_configure);
2001 /* mixer creation - all stuff is implemented in hda module */
2002 int azx_mixer_create(struct azx *chip)
2004 return snd_hda_build_controls(chip->bus);
2006 EXPORT_SYMBOL_GPL(azx_mixer_create);
2009 /* initialize SD streams */
2010 int azx_init_stream(struct azx *chip)
2014 /* initialize each stream (aka device)
2015 * assign the starting bdl address to each stream (device)
2018 for (i = 0; i < chip->num_streams; i++) {
2019 struct azx_dev *azx_dev = &chip->azx_dev[i];
2020 azx_dev->posbuf = (u32 __iomem *)(chip->posbuf.area + i * 8);
2021 /* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
2022 azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
2023 /* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
2024 azx_dev->sd_int_sta_mask = 1 << i;
2025 /* stream tag: must be non-zero and unique */
2027 azx_dev->stream_tag = i + 1;
2032 EXPORT_SYMBOL_GPL(azx_init_stream);
2034 MODULE_LICENSE("GPL");
2035 MODULE_DESCRIPTION("Common HDA driver funcitons");