python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-or-later /* * Routines for Gravis UltraSound soundcards * Copyright (c) by Jaroslav Kysela <[email protected]> */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/module.h> #include <sound/core.h> #include <sound/gus.h> #include <sound/control.h> #include <asm/dma.h> MODULE_AUTHOR("Jaroslav Kysela <[email protected]>"); MODULE_DESCRIPTION("Routines for Gravis UltraSound soundcards"); MODULE_LICENSE("GPL"); static int snd_gus_init_dma_irq(struct snd_gus_card * gus, int latches); int snd_gus_use_inc(struct snd_gus_card * gus) { if (!try_module_get(gus->card->module)) return 0; return 1; } void snd_gus_use_dec(struct snd_gus_card * gus) { module_put(gus->card->module); } static int snd_gus_joystick_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 31; return 0; } static int snd_gus_joystick_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_gus_card *gus = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = gus->joystick_dac & 31; return 0; } static int snd_gus_joystick_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_gus_card *gus = snd_kcontrol_chip(kcontrol); unsigned long flags; int change; unsigned char nval; nval = ucontrol->value.integer.value[0] & 31; spin_lock_irqsave(&gus->reg_lock, flags); change = gus->joystick_dac != nval; gus->joystick_dac = nval; snd_gf1_write8(gus, SNDRV_GF1_GB_JOYSTICK_DAC_LEVEL, gus->joystick_dac); spin_unlock_irqrestore(&gus->reg_lock, flags); return change; } static const struct snd_kcontrol_new snd_gus_joystick_control = { .iface = SNDRV_CTL_ELEM_IFACE_CARD, .name = "Joystick Speed", .info = snd_gus_joystick_info, .get = snd_gus_joystick_get, .put = snd_gus_joystick_put }; static void snd_gus_init_control(struct snd_gus_card *gus) { if (!gus->ace_flag) snd_ctl_add(gus->card, snd_ctl_new1(&snd_gus_joystick_control, gus)); } /* * */ static int snd_gus_free(struct snd_gus_card *gus) { if (gus->gf1.res_port2 == NULL) goto __hw_end; snd_gf1_stop(gus); snd_gus_init_dma_irq(gus, 0); __hw_end: release_and_free_resource(gus->gf1.res_port1); release_and_free_resource(gus->gf1.res_port2); if (gus->gf1.irq >= 0) free_irq(gus->gf1.irq, (void *) gus); if (gus->gf1.dma1 >= 0) { disable_dma(gus->gf1.dma1); free_dma(gus->gf1.dma1); } if (!gus->equal_dma && gus->gf1.dma2 >= 0) { disable_dma(gus->gf1.dma2); free_dma(gus->gf1.dma2); } kfree(gus); return 0; } static int snd_gus_dev_free(struct snd_device *device) { struct snd_gus_card *gus = device->device_data; return snd_gus_free(gus); } int snd_gus_create(struct snd_card *card, unsigned long port, int irq, int dma1, int dma2, int timer_dev, int voices, int pcm_channels, int effect, struct snd_gus_card **rgus) { struct snd_gus_card *gus; int err; static const struct snd_device_ops ops = { .dev_free = snd_gus_dev_free, }; *rgus = NULL; gus = kzalloc(sizeof(*gus), GFP_KERNEL); if (gus == NULL) return -ENOMEM; spin_lock_init(&gus->reg_lock); spin_lock_init(&gus->voice_alloc); spin_lock_init(&gus->active_voice_lock); spin_lock_init(&gus->event_lock); spin_lock_init(&gus->dma_lock); spin_lock_init(&gus->pcm_volume_level_lock); spin_lock_init(&gus->uart_cmd_lock); mutex_init(&gus->dma_mutex); gus->gf1.irq = -1; gus->gf1.dma1 = -1; gus->gf1.dma2 = -1; gus->card = card; gus->gf1.port = port; /* fill register variables for speedup */ gus->gf1.reg_page = GUSP(gus, GF1PAGE); gus->gf1.reg_regsel = GUSP(gus, GF1REGSEL); gus->gf1.reg_data8 = GUSP(gus, GF1DATAHIGH); gus->gf1.reg_data16 = GUSP(gus, GF1DATALOW); gus->gf1.reg_irqstat = GUSP(gus, IRQSTAT); gus->gf1.reg_dram = GUSP(gus, DRAM); gus->gf1.reg_timerctrl = GUSP(gus, TIMERCNTRL); gus->gf1.reg_timerdata = GUSP(gus, TIMERDATA); /* allocate resources */ gus->gf1.res_port1 = request_region(port, 16, "GUS GF1 (Adlib/SB)"); if (!gus->gf1.res_port1) { snd_printk(KERN_ERR "gus: can't grab SB port 0x%lx\n", port); snd_gus_free(gus); return -EBUSY; } gus->gf1.res_port2 = request_region(port + 0x100, 12, "GUS GF1 (Synth)"); if (!gus->gf1.res_port2) { snd_printk(KERN_ERR "gus: can't grab synth port 0x%lx\n", port + 0x100); snd_gus_free(gus); return -EBUSY; } if (irq >= 0 && request_irq(irq, snd_gus_interrupt, 0, "GUS GF1", (void *) gus)) { snd_printk(KERN_ERR "gus: can't grab irq %d\n", irq); snd_gus_free(gus); return -EBUSY; } gus->gf1.irq = irq; card->sync_irq = irq; if (request_dma(dma1, "GUS - 1")) { snd_printk(KERN_ERR "gus: can't grab DMA1 %d\n", dma1); snd_gus_free(gus); return -EBUSY; } gus->gf1.dma1 = dma1; if (dma2 >= 0 && dma1 != dma2) { if (request_dma(dma2, "GUS - 2")) { snd_printk(KERN_ERR "gus: can't grab DMA2 %d\n", dma2); snd_gus_free(gus); return -EBUSY; } gus->gf1.dma2 = dma2; } else { gus->gf1.dma2 = gus->gf1.dma1; gus->equal_dma = 1; } gus->timer_dev = timer_dev; if (voices < 14) voices = 14; if (voices > 32) voices = 32; if (pcm_channels < 0) pcm_channels = 0; if (pcm_channels > 8) pcm_channels = 8; pcm_channels++; pcm_channels &= ~1; gus->gf1.effect = effect ? 1 : 0; gus->gf1.active_voices = voices; gus->gf1.pcm_channels = pcm_channels; gus->gf1.volume_ramp = 25; gus->gf1.smooth_pan = 1; err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, gus, &ops); if (err < 0) { snd_gus_free(gus); return err; } *rgus = gus; return 0; } /* * Memory detection routine for plain GF1 soundcards */ static int snd_gus_detect_memory(struct snd_gus_card * gus) { int l, idx, local; unsigned char d; snd_gf1_poke(gus, 0L, 0xaa); snd_gf1_poke(gus, 1L, 0x55); if (snd_gf1_peek(gus, 0L) != 0xaa || snd_gf1_peek(gus, 1L) != 0x55) { snd_printk(KERN_ERR "plain GF1 card at 0x%lx without onboard DRAM?\n", gus->gf1.port); return -ENOMEM; } for (idx = 1, d = 0xab; idx < 4; idx++, d++) { local = idx << 18; snd_gf1_poke(gus, local, d); snd_gf1_poke(gus, local + 1, d + 1); if (snd_gf1_peek(gus, local) != d || snd_gf1_peek(gus, local + 1) != d + 1 || snd_gf1_peek(gus, 0L) != 0xaa) break; } #if 1 gus->gf1.memory = idx << 18; #else gus->gf1.memory = 256 * 1024; #endif for (l = 0, local = gus->gf1.memory; l < 4; l++, local -= 256 * 1024) { gus->gf1.mem_alloc.banks_8[l].address = gus->gf1.mem_alloc.banks_8[l].size = 0; gus->gf1.mem_alloc.banks_16[l].address = l << 18; gus->gf1.mem_alloc.banks_16[l].size = local > 0 ? 256 * 1024 : 0; } gus->gf1.mem_alloc.banks_8[0].size = gus->gf1.memory; return 0; /* some memory were detected */ } static int snd_gus_init_dma_irq(struct snd_gus_card * gus, int latches) { struct snd_card *card; unsigned long flags; int irq, dma1, dma2; static const unsigned char irqs[16] = {0, 0, 1, 3, 0, 2, 0, 4, 0, 1, 0, 5, 6, 0, 0, 7}; static const unsigned char dmas[8] = {6, 1, 0, 2, 0, 3, 4, 5}; if (snd_BUG_ON(!gus)) return -EINVAL; card = gus->card; if (snd_BUG_ON(!card)) return -EINVAL; gus->mix_cntrl_reg &= 0xf8; gus->mix_cntrl_reg |= 0x01; /* disable MIC, LINE IN, enable LINE OUT */ if (gus->codec_flag || gus->ess_flag) { gus->mix_cntrl_reg &= ~1; /* enable LINE IN */ gus->mix_cntrl_reg |= 4; /* enable MIC */ } dma1 = gus->gf1.dma1; dma1 = abs(dma1); dma1 = dmas[dma1 & 7]; dma2 = gus->gf1.dma2; dma2 = abs(dma2); dma2 = dmas[dma2 & 7]; dma1 |= gus->equal_dma ? 0x40 : (dma2 << 3); if ((dma1 & 7) == 0 || (dma2 & 7) == 0) { snd_printk(KERN_ERR "Error! DMA isn't defined.\n"); return -EINVAL; } irq = gus->gf1.irq; irq = abs(irq); irq = irqs[irq & 0x0f]; if (irq == 0) { snd_printk(KERN_ERR "Error! IRQ isn't defined.\n"); return -EINVAL; } irq |= 0x40; #if 0 card->mixer.mix_ctrl_reg |= 0x10; #endif spin_lock_irqsave(&gus->reg_lock, flags); outb(5, GUSP(gus, REGCNTRLS)); outb(gus->mix_cntrl_reg, GUSP(gus, MIXCNTRLREG)); outb(0x00, GUSP(gus, IRQDMACNTRLREG)); outb(0, GUSP(gus, REGCNTRLS)); spin_unlock_irqrestore(&gus->reg_lock, flags); udelay(100); spin_lock_irqsave(&gus->reg_lock, flags); outb(0x00 | gus->mix_cntrl_reg, GUSP(gus, MIXCNTRLREG)); outb(dma1, GUSP(gus, IRQDMACNTRLREG)); if (latches) { outb(0x40 | gus->mix_cntrl_reg, GUSP(gus, MIXCNTRLREG)); outb(irq, GUSP(gus, IRQDMACNTRLREG)); } spin_unlock_irqrestore(&gus->reg_lock, flags); udelay(100); spin_lock_irqsave(&gus->reg_lock, flags); outb(0x00 | gus->mix_cntrl_reg, GUSP(gus, MIXCNTRLREG)); outb(dma1, GUSP(gus, IRQDMACNTRLREG)); if (latches) { outb(0x40 | gus->mix_cntrl_reg, GUSP(gus, MIXCNTRLREG)); outb(irq, GUSP(gus, IRQDMACNTRLREG)); } spin_unlock_irqrestore(&gus->reg_lock, flags); snd_gf1_delay(gus); if (latches) gus->mix_cntrl_reg |= 0x08; /* enable latches */ else gus->mix_cntrl_reg &= ~0x08; /* disable latches */ spin_lock_irqsave(&gus->reg_lock, flags); outb(gus->mix_cntrl_reg, GUSP(gus, MIXCNTRLREG)); outb(0, GUSP(gus, GF1PAGE)); spin_unlock_irqrestore(&gus->reg_lock, flags); return 0; } static int snd_gus_check_version(struct snd_gus_card * gus) { unsigned long flags; unsigned char val, rev; struct snd_card *card; card = gus->card; spin_lock_irqsave(&gus->reg_lock, flags); outb(0x20, GUSP(gus, REGCNTRLS)); val = inb(GUSP(gus, REGCNTRLS)); rev = inb(GUSP(gus, BOARDVERSION)); spin_unlock_irqrestore(&gus->reg_lock, flags); snd_printdd("GF1 [0x%lx] init - val = 0x%x, rev = 0x%x\n", gus->gf1.port, val, rev); strcpy(card->driver, "GUS"); strcpy(card->longname, "Gravis UltraSound Classic (2.4)"); if ((val != 255 && (val & 0x06)) || (rev >= 5 && rev != 255)) { if (rev >= 5 && rev <= 9) { gus->ics_flag = 1; if (rev == 5) gus->ics_flipped = 1; card->longname[27] = '3'; card->longname[29] = rev == 5 ? '5' : '7'; } if (rev >= 10 && rev != 255) { if (rev >= 10 && rev <= 11) { strcpy(card->driver, "GUS MAX"); strcpy(card->longname, "Gravis UltraSound MAX"); gus->max_flag = 1; } else if (rev == 0x30) { strcpy(card->driver, "GUS ACE"); strcpy(card->longname, "Gravis UltraSound Ace"); gus->ace_flag = 1; } else if (rev == 0x50) { strcpy(card->driver, "GUS Extreme"); strcpy(card->longname, "Gravis UltraSound Extreme"); gus->ess_flag = 1; } else { snd_printk(KERN_ERR "unknown GF1 revision number at 0x%lx - 0x%x (0x%x)\n", gus->gf1.port, rev, val); snd_printk(KERN_ERR " please - report to <[email protected]>\n"); } } } strscpy(card->shortname, card->longname, sizeof(card->shortname)); gus->uart_enable = 1; /* standard GUSes doesn't have midi uart trouble */ snd_gus_init_control(gus); return 0; } int snd_gus_initialize(struct snd_gus_card *gus) { int err; if (!gus->interwave) { err = snd_gus_check_version(gus); if (err < 0) { snd_printk(KERN_ERR "version check failed\n"); return err; } err = snd_gus_detect_memory(gus); if (err < 0) return err; } err = snd_gus_init_dma_irq(gus, 1); if (err < 0) return err; snd_gf1_start(gus); gus->initialized = 1; return 0; } /* gus_io.c */ EXPORT_SYMBOL(snd_gf1_delay); EXPORT_SYMBOL(snd_gf1_write8); EXPORT_SYMBOL(snd_gf1_look8); EXPORT_SYMBOL(snd_gf1_write16); EXPORT_SYMBOL(snd_gf1_look16); EXPORT_SYMBOL(snd_gf1_i_write8); EXPORT_SYMBOL(snd_gf1_i_look8); EXPORT_SYMBOL(snd_gf1_i_look16); EXPORT_SYMBOL(snd_gf1_dram_addr); EXPORT_SYMBOL(snd_gf1_write_addr); EXPORT_SYMBOL(snd_gf1_poke); EXPORT_SYMBOL(snd_gf1_peek); /* gus_reset.c */ EXPORT_SYMBOL(snd_gf1_alloc_voice); EXPORT_SYMBOL(snd_gf1_free_voice); EXPORT_SYMBOL(snd_gf1_ctrl_stop); EXPORT_SYMBOL(snd_gf1_stop_voice); /* gus_mixer.c */ EXPORT_SYMBOL(snd_gf1_new_mixer); /* gus_pcm.c */ EXPORT_SYMBOL(snd_gf1_pcm_new); /* gus.c */ EXPORT_SYMBOL(snd_gus_use_inc); EXPORT_SYMBOL(snd_gus_use_dec); EXPORT_SYMBOL(snd_gus_create); EXPORT_SYMBOL(snd_gus_initialize); /* gus_irq.c */ EXPORT_SYMBOL(snd_gus_interrupt); /* gus_uart.c */ EXPORT_SYMBOL(snd_gf1_rawmidi_new); /* gus_dram.c */ EXPORT_SYMBOL(snd_gus_dram_write); EXPORT_SYMBOL(snd_gus_dram_read); /* gus_volume.c */ EXPORT_SYMBOL(snd_gf1_lvol_to_gvol_raw); EXPORT_SYMBOL(snd_gf1_translate_freq); /* gus_mem.c */ EXPORT_SYMBOL(snd_gf1_mem_alloc); EXPORT_SYMBOL(snd_gf1_mem_xfree); EXPORT_SYMBOL(snd_gf1_mem_free); EXPORT_SYMBOL(snd_gf1_mem_lock);
linux-master
sound/isa/gus/gus_main.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) by Jaroslav Kysela <[email protected]> */ #include <linux/time.h> #include <linux/export.h> #include <sound/core.h> #include <sound/gus.h> #define __GUS_TABLES_ALLOC__ #include "gus_tables.h" EXPORT_SYMBOL(snd_gf1_atten_table); /* for snd-gus-synth module */ unsigned short snd_gf1_lvol_to_gvol_raw(unsigned int vol) { unsigned short e, m, tmp; if (vol > 65535) vol = 65535; tmp = vol; e = 7; if (tmp < 128) { while (e > 0 && tmp < (1 << e)) e--; } else { while (tmp > 255) { tmp >>= 1; e++; } } m = vol - (1 << e); if (m > 0) { if (e > 8) m >>= e - 8; else if (e < 8) m <<= 8 - e; m &= 255; } return (e << 8) | m; } #if 0 unsigned int snd_gf1_gvol_to_lvol_raw(unsigned short gf1_vol) { unsigned int rvol; unsigned short e, m; if (!gf1_vol) return 0; e = gf1_vol >> 8; m = (unsigned char) gf1_vol; rvol = 1 << e; if (e > 8) return rvol | (m << (e - 8)); return rvol | (m >> (8 - e)); } unsigned int snd_gf1_calc_ramp_rate(struct snd_gus_card * gus, unsigned short start, unsigned short end, unsigned int us) { static const unsigned char vol_rates[19] = { 23, 24, 26, 28, 29, 31, 32, 34, 36, 37, 39, 40, 42, 44, 45, 47, 49, 50, 52 }; unsigned short range, increment, value, i; start >>= 4; end >>= 4; if (start < end) us /= end - start; else us /= start - end; range = 4; value = gus->gf1.enh_mode ? vol_rates[0] : vol_rates[gus->gf1.active_voices - 14]; for (i = 0; i < 3; i++) { if (us < value) { range = i; break; } else value <<= 3; } if (range == 4) { range = 3; increment = 1; } else increment = (value + (value >> 1)) / us; return (range << 6) | (increment & 0x3f); } #endif /* 0 */ unsigned short snd_gf1_translate_freq(struct snd_gus_card * gus, unsigned int freq16) { freq16 >>= 3; if (freq16 < 50) freq16 = 50; if (freq16 & 0xf8000000) { freq16 = ~0xf8000000; snd_printk(KERN_ERR "snd_gf1_translate_freq: overflow - freq = 0x%x\n", freq16); } return ((freq16 << 9) + (gus->gf1.playback_freq >> 1)) / gus->gf1.playback_freq; } #if 0 short snd_gf1_compute_vibrato(short cents, unsigned short fc_register) { static const short vibrato_table[] = { 0, 0, 32, 592, 61, 1175, 93, 1808, 124, 2433, 152, 3007, 182, 3632, 213, 4290, 241, 4834, 255, 5200 }; long depth; const short *vi1, *vi2; short pcents, v1; pcents = cents < 0 ? -cents : cents; for (vi1 = vibrato_table, vi2 = vi1 + 2; pcents > *vi2; vi1 = vi2, vi2 += 2); v1 = *(vi1 + 1); /* The FC table above is a list of pairs. The first number in the pair */ /* is the cents index from 0-255 cents, and the second number in the */ /* pair is the FC adjustment needed to change the pitch by the indexed */ /* number of cents. The table was created for an FC of 32768. */ /* The following expression does a linear interpolation against the */ /* approximated log curve in the table above, and then scales the number */ /* by the FC before the LFO. This calculation also adjusts the output */ /* value to produce the appropriate depth for the hardware. The depth */ /* is 2 * desired FC + 1. */ depth = (((int) (*(vi2 + 1) - *vi1) * (pcents - *vi1) / (*vi2 - *vi1)) + v1) * fc_register >> 14; if (depth) depth++; if (depth > 255) depth = 255; return cents < 0 ? -(short) depth : (short) depth; } unsigned short snd_gf1_compute_pitchbend(unsigned short pitchbend, unsigned short sens) { static const long log_table[] = {1024, 1085, 1149, 1218, 1290, 1367, 1448, 1534, 1625, 1722, 1825, 1933}; int wheel, sensitivity; unsigned int mantissa, f1, f2; unsigned short semitones, f1_index, f2_index, f1_power, f2_power; char bend_down = 0; int bend; if (!sens) return 1024; wheel = (int) pitchbend - 8192; sensitivity = ((int) sens * wheel) / 128; if (sensitivity < 0) { bend_down = 1; sensitivity = -sensitivity; } semitones = (unsigned int) (sensitivity >> 13); mantissa = sensitivity % 8192; f1_index = semitones % 12; f2_index = (semitones + 1) % 12; f1_power = semitones / 12; f2_power = (semitones + 1) / 12; f1 = log_table[f1_index] << f1_power; f2 = log_table[f2_index] << f2_power; bend = (int) ((((f2 - f1) * mantissa) >> 13) + f1); if (bend_down) bend = 1048576L / bend; return bend; } unsigned short snd_gf1_compute_freq(unsigned int freq, unsigned int rate, unsigned short mix_rate) { unsigned int fc; int scale = 0; while (freq >= 4194304L) { scale++; freq >>= 1; } fc = (freq << 10) / rate; if (fc > 97391L) { fc = 97391; snd_printk(KERN_ERR "patch: (1) fc frequency overflow - %u\n", fc); } fc = (fc * 44100UL) / mix_rate; while (scale--) fc <<= 1; if (fc > 65535L) { fc = 65535; snd_printk(KERN_ERR "patch: (2) fc frequency overflow - %u\n", fc); } return (unsigned short) fc; } #endif /* 0 */
linux-master
sound/isa/gus/gus_volume.c
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) by Paul Barton-Davis 1998-1999 * * Some portions of this file are taken from work that is * copyright (C) by Hannu Savolainen 1993-1996 */ /* * An ALSA lowlevel driver for Turtle Beach ICS2115 wavetable synth * (Maui, Tropez, Tropez Plus) * * This driver supports the onboard wavetable synthesizer (an ICS2115), * including patch, sample and program loading and unloading, conversion * of GUS patches during loading, and full user-level access to all * WaveFront commands. It tries to provide semi-intelligent patch and * sample management as well. * */ #include <linux/io.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/sched/signal.h> #include <linux/firmware.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/core.h> #include <sound/snd_wavefront.h> #include <sound/initval.h> static int wf_raw = 0; /* we normally check for "raw state" to firmware loading. if non-zero, then during driver loading, the state of the board is ignored, and we reset the board and load the firmware anyway. */ static int fx_raw = 1; /* if this is zero, we'll leave the FX processor in whatever state it is when the driver is loaded. The default is to download the microprogram and associated coefficients to set it up for "default" operation, whatever that means. */ static int debug_default = 0; /* you can set this to control debugging during driver loading. it takes any combination of the WF_DEBUG_* flags defined in wavefront.h */ /* XXX this needs to be made firmware and hardware version dependent */ #define DEFAULT_OSPATH "wavefront.os" static char *ospath = DEFAULT_OSPATH; /* the firmware file name */ static int wait_usecs = 150; /* This magic number seems to give pretty optimal throughput based on my limited experimentation. If you want to play around with it and find a better value, be my guest. Remember, the idea is to get a number that causes us to just busy wait for as many WaveFront commands as possible, without coming up with a number so large that we hog the whole CPU. Specifically, with this number, out of about 134,000 status waits, only about 250 result in a sleep. */ static int sleep_interval = 100; /* HZ/sleep_interval seconds per sleep */ static int sleep_tries = 50; /* number of times we'll try to sleep */ static int reset_time = 2; /* hundreths of a second we wait after a HW reset for the expected interrupt. */ static int ramcheck_time = 20; /* time in seconds to wait while ROM code checks on-board RAM. */ static int osrun_time = 10; /* time in seconds we wait for the OS to start running. */ module_param(wf_raw, int, 0444); MODULE_PARM_DESC(wf_raw, "if non-zero, assume that we need to boot the OS"); module_param(fx_raw, int, 0444); MODULE_PARM_DESC(fx_raw, "if non-zero, assume that the FX process needs help"); module_param(debug_default, int, 0444); MODULE_PARM_DESC(debug_default, "debug parameters for card initialization"); module_param(wait_usecs, int, 0444); MODULE_PARM_DESC(wait_usecs, "how long to wait without sleeping, usecs"); module_param(sleep_interval, int, 0444); MODULE_PARM_DESC(sleep_interval, "how long to sleep when waiting for reply"); module_param(sleep_tries, int, 0444); MODULE_PARM_DESC(sleep_tries, "how many times to try sleeping during a wait"); module_param(ospath, charp, 0444); MODULE_PARM_DESC(ospath, "pathname to processed ICS2115 OS firmware"); module_param(reset_time, int, 0444); MODULE_PARM_DESC(reset_time, "how long to wait for a reset to take effect"); module_param(ramcheck_time, int, 0444); MODULE_PARM_DESC(ramcheck_time, "how many seconds to wait for the RAM test"); module_param(osrun_time, int, 0444); MODULE_PARM_DESC(osrun_time, "how many seconds to wait for the ICS2115 OS"); /* if WF_DEBUG not defined, no run-time debugging messages will be available via the debug flag setting. Given the current beta state of the driver, this will remain set until a future version. */ #define WF_DEBUG 1 #ifdef WF_DEBUG #define DPRINT(cond, ...) \ if ((dev->debug & (cond)) == (cond)) { \ snd_printk (__VA_ARGS__); \ } #else #define DPRINT(cond, args...) #endif /* WF_DEBUG */ #define LOGNAME "WaveFront: " /* bitmasks for WaveFront status port value */ #define STAT_RINTR_ENABLED 0x01 #define STAT_CAN_READ 0x02 #define STAT_INTR_READ 0x04 #define STAT_WINTR_ENABLED 0x10 #define STAT_CAN_WRITE 0x20 #define STAT_INTR_WRITE 0x40 static int wavefront_delete_sample (snd_wavefront_t *, int sampnum); static int wavefront_find_free_sample (snd_wavefront_t *); struct wavefront_command { int cmd; char *action; unsigned int read_cnt; unsigned int write_cnt; int need_ack; }; static struct { int errno; const char *errstr; } wavefront_errors[] = { { 0x01, "Bad sample number" }, { 0x02, "Out of sample memory" }, { 0x03, "Bad patch number" }, { 0x04, "Error in number of voices" }, { 0x06, "Sample load already in progress" }, { 0x0B, "No sample load request pending" }, { 0x0E, "Bad MIDI channel number" }, { 0x10, "Download Record Error" }, { 0x80, "Success" }, { 0x0 } }; #define NEEDS_ACK 1 static struct wavefront_command wavefront_commands[] = { { WFC_SET_SYNTHVOL, "set synthesizer volume", 0, 1, NEEDS_ACK }, { WFC_GET_SYNTHVOL, "get synthesizer volume", 1, 0, 0}, { WFC_SET_NVOICES, "set number of voices", 0, 1, NEEDS_ACK }, { WFC_GET_NVOICES, "get number of voices", 1, 0, 0 }, { WFC_SET_TUNING, "set synthesizer tuning", 0, 2, NEEDS_ACK }, { WFC_GET_TUNING, "get synthesizer tuning", 2, 0, 0 }, { WFC_DISABLE_CHANNEL, "disable synth channel", 0, 1, NEEDS_ACK }, { WFC_ENABLE_CHANNEL, "enable synth channel", 0, 1, NEEDS_ACK }, { WFC_GET_CHANNEL_STATUS, "get synth channel status", 3, 0, 0 }, { WFC_MISYNTH_OFF, "disable midi-in to synth", 0, 0, NEEDS_ACK }, { WFC_MISYNTH_ON, "enable midi-in to synth", 0, 0, NEEDS_ACK }, { WFC_VMIDI_ON, "enable virtual midi mode", 0, 0, NEEDS_ACK }, { WFC_VMIDI_OFF, "disable virtual midi mode", 0, 0, NEEDS_ACK }, { WFC_MIDI_STATUS, "report midi status", 1, 0, 0 }, { WFC_FIRMWARE_VERSION, "report firmware version", 2, 0, 0 }, { WFC_HARDWARE_VERSION, "report hardware version", 2, 0, 0 }, { WFC_GET_NSAMPLES, "report number of samples", 2, 0, 0 }, { WFC_INSTOUT_LEVELS, "report instantaneous output levels", 7, 0, 0 }, { WFC_PEAKOUT_LEVELS, "report peak output levels", 7, 0, 0 }, { WFC_DOWNLOAD_SAMPLE, "download sample", 0, WF_SAMPLE_BYTES, NEEDS_ACK }, { WFC_DOWNLOAD_BLOCK, "download block", 0, 0, NEEDS_ACK}, { WFC_DOWNLOAD_SAMPLE_HEADER, "download sample header", 0, WF_SAMPLE_HDR_BYTES, NEEDS_ACK }, { WFC_UPLOAD_SAMPLE_HEADER, "upload sample header", 13, 2, 0 }, /* This command requires a variable number of bytes to be written. There is a hack in snd_wavefront_cmd() to support this. The actual count is passed in as the read buffer ptr, cast appropriately. Ugh. */ { WFC_DOWNLOAD_MULTISAMPLE, "download multisample", 0, 0, NEEDS_ACK }, /* This one is a hack as well. We just read the first byte of the response, don't fetch an ACK, and leave the rest to the calling function. Ugly, ugly, ugly. */ { WFC_UPLOAD_MULTISAMPLE, "upload multisample", 2, 1, 0 }, { WFC_DOWNLOAD_SAMPLE_ALIAS, "download sample alias", 0, WF_ALIAS_BYTES, NEEDS_ACK }, { WFC_UPLOAD_SAMPLE_ALIAS, "upload sample alias", WF_ALIAS_BYTES, 2, 0}, { WFC_DELETE_SAMPLE, "delete sample", 0, 2, NEEDS_ACK }, { WFC_IDENTIFY_SAMPLE_TYPE, "identify sample type", 5, 2, 0 }, { WFC_UPLOAD_SAMPLE_PARAMS, "upload sample parameters" }, { WFC_REPORT_FREE_MEMORY, "report free memory", 4, 0, 0 }, { WFC_DOWNLOAD_PATCH, "download patch", 0, 134, NEEDS_ACK }, { WFC_UPLOAD_PATCH, "upload patch", 132, 2, 0 }, { WFC_DOWNLOAD_PROGRAM, "download program", 0, 33, NEEDS_ACK }, { WFC_UPLOAD_PROGRAM, "upload program", 32, 1, 0 }, { WFC_DOWNLOAD_EDRUM_PROGRAM, "download enhanced drum program", 0, 9, NEEDS_ACK}, { WFC_UPLOAD_EDRUM_PROGRAM, "upload enhanced drum program", 8, 1, 0}, { WFC_SET_EDRUM_CHANNEL, "set enhanced drum program channel", 0, 1, NEEDS_ACK }, { WFC_DISABLE_DRUM_PROGRAM, "disable drum program", 0, 1, NEEDS_ACK }, { WFC_REPORT_CHANNEL_PROGRAMS, "report channel program numbers", 32, 0, 0 }, { WFC_NOOP, "the no-op command", 0, 0, NEEDS_ACK }, { 0x00 } }; static const char * wavefront_errorstr (int errnum) { int i; for (i = 0; wavefront_errors[i].errstr; i++) { if (wavefront_errors[i].errno == errnum) { return wavefront_errors[i].errstr; } } return "Unknown WaveFront error"; } static struct wavefront_command * wavefront_get_command (int cmd) { int i; for (i = 0; wavefront_commands[i].cmd != 0; i++) { if (cmd == wavefront_commands[i].cmd) { return &wavefront_commands[i]; } } return NULL; } static inline int wavefront_status (snd_wavefront_t *dev) { return inb (dev->status_port); } static int wavefront_sleep (int limit) { schedule_timeout_interruptible(limit); return signal_pending(current); } static int wavefront_wait (snd_wavefront_t *dev, int mask) { int i; /* Spin for a short period of time, because >99% of all requests to the WaveFront can be serviced inline like this. */ for (i = 0; i < wait_usecs; i += 5) { if (wavefront_status (dev) & mask) { return 1; } udelay(5); } for (i = 0; i < sleep_tries; i++) { if (wavefront_status (dev) & mask) { return 1; } if (wavefront_sleep (HZ/sleep_interval)) { return (0); } } return (0); } static int wavefront_read (snd_wavefront_t *dev) { if (wavefront_wait (dev, STAT_CAN_READ)) return inb (dev->data_port); DPRINT (WF_DEBUG_DATA, "read timeout.\n"); return -1; } static int wavefront_write (snd_wavefront_t *dev, unsigned char data) { if (wavefront_wait (dev, STAT_CAN_WRITE)) { outb (data, dev->data_port); return 0; } DPRINT (WF_DEBUG_DATA, "write timeout.\n"); return -1; } int snd_wavefront_cmd (snd_wavefront_t *dev, int cmd, unsigned char *rbuf, unsigned char *wbuf) { int ack; unsigned int i; int c; struct wavefront_command *wfcmd; wfcmd = wavefront_get_command(cmd); if (!wfcmd) { snd_printk ("command 0x%x not supported.\n", cmd); return 1; } /* Hack to handle the one variable-size write command. See wavefront_send_multisample() for the other half of this gross and ugly strategy. */ if (cmd == WFC_DOWNLOAD_MULTISAMPLE) { wfcmd->write_cnt = (unsigned long) rbuf; rbuf = NULL; } DPRINT (WF_DEBUG_CMD, "0x%x [%s] (%d,%d,%d)\n", cmd, wfcmd->action, wfcmd->read_cnt, wfcmd->write_cnt, wfcmd->need_ack); if (wavefront_write (dev, cmd)) { DPRINT ((WF_DEBUG_IO|WF_DEBUG_CMD), "cannot request " "0x%x [%s].\n", cmd, wfcmd->action); return 1; } if (wfcmd->write_cnt > 0) { DPRINT (WF_DEBUG_DATA, "writing %d bytes " "for 0x%x\n", wfcmd->write_cnt, cmd); for (i = 0; i < wfcmd->write_cnt; i++) { if (wavefront_write (dev, wbuf[i])) { DPRINT (WF_DEBUG_IO, "bad write for byte " "%d of 0x%x [%s].\n", i, cmd, wfcmd->action); return 1; } DPRINT (WF_DEBUG_DATA, "write[%d] = 0x%x\n", i, wbuf[i]); } } if (wfcmd->read_cnt > 0) { DPRINT (WF_DEBUG_DATA, "reading %d ints " "for 0x%x\n", wfcmd->read_cnt, cmd); for (i = 0; i < wfcmd->read_cnt; i++) { c = wavefront_read(dev); if (c == -1) { DPRINT (WF_DEBUG_IO, "bad read for byte " "%d of 0x%x [%s].\n", i, cmd, wfcmd->action); return 1; } /* Now handle errors. Lots of special cases here */ if (c == 0xff) { c = wavefront_read(dev); if (c == -1) { DPRINT (WF_DEBUG_IO, "bad read for " "error byte at " "read byte %d " "of 0x%x [%s].\n", i, cmd, wfcmd->action); return 1; } /* Can you believe this madness ? */ if (c == 1 && wfcmd->cmd == WFC_IDENTIFY_SAMPLE_TYPE) { rbuf[0] = WF_ST_EMPTY; return (0); } else if (c == 3 && wfcmd->cmd == WFC_UPLOAD_PATCH) { return 3; } else if (c == 1 && wfcmd->cmd == WFC_UPLOAD_PROGRAM) { return 1; } else { DPRINT (WF_DEBUG_IO, "error %d (%s) " "during " "read for byte " "%d of 0x%x " "[%s].\n", c, wavefront_errorstr (c), i, cmd, wfcmd->action); return 1; } } else { rbuf[i] = c; } DPRINT (WF_DEBUG_DATA, "read[%d] = 0x%x\n",i, rbuf[i]); } } if ((wfcmd->read_cnt == 0 && wfcmd->write_cnt == 0) || wfcmd->need_ack) { DPRINT (WF_DEBUG_CMD, "reading ACK for 0x%x\n", cmd); /* Some commands need an ACK, but return zero instead of the standard value. */ ack = wavefront_read(dev); if (ack == 0) ack = WF_ACK; if (ack != WF_ACK) { if (ack == -1) { DPRINT (WF_DEBUG_IO, "cannot read ack for " "0x%x [%s].\n", cmd, wfcmd->action); return 1; } else { int err = -1; /* something unknown */ if (ack == 0xff) { /* explicit error */ err = wavefront_read(dev); if (err == -1) { DPRINT (WF_DEBUG_DATA, "cannot read err " "for 0x%x [%s].\n", cmd, wfcmd->action); } } DPRINT (WF_DEBUG_IO, "0x%x [%s] " "failed (0x%x, 0x%x, %s)\n", cmd, wfcmd->action, ack, err, wavefront_errorstr (err)); return -err; } } DPRINT (WF_DEBUG_DATA, "ack received " "for 0x%x [%s]\n", cmd, wfcmd->action); } else { DPRINT (WF_DEBUG_CMD, "0x%x [%s] does not need " "ACK (%d,%d,%d)\n", cmd, wfcmd->action, wfcmd->read_cnt, wfcmd->write_cnt, wfcmd->need_ack); } return 0; } /*********************************************************************** WaveFront data munging Things here are weird. All data written to the board cannot have its most significant bit set. Any data item with values potentially > 0x7F (127) must be split across multiple bytes. Sometimes, we need to munge numeric values that are represented on the x86 side as 8-32 bit values. Sometimes, we need to munge data that is represented on the x86 side as an array of bytes. The most efficient approach to handling both cases seems to be to use 2 different functions for munging and 2 for de-munging. This avoids weird casting and worrying about bit-level offsets. **********************************************************************/ static unsigned char * munge_int32 (unsigned int src, unsigned char *dst, unsigned int dst_size) { unsigned int i; for (i = 0; i < dst_size; i++) { *dst = src & 0x7F; /* Mask high bit of LSB */ src = src >> 7; /* Rotate Right 7 bits */ /* Note: we leave the upper bits in place */ dst++; } return dst; }; static int demunge_int32 (unsigned char* src, int src_size) { int i; int outval = 0; for (i = src_size - 1; i >= 0; i--) { outval=(outval<<7)+src[i]; } return outval; }; static unsigned char * munge_buf (unsigned char *src, unsigned char *dst, unsigned int dst_size) { unsigned int i; unsigned int last = dst_size / 2; for (i = 0; i < last; i++) { *dst++ = src[i] & 0x7f; *dst++ = src[i] >> 7; } return dst; } static unsigned char * demunge_buf (unsigned char *src, unsigned char *dst, unsigned int src_bytes) { int i; unsigned char *end = src + src_bytes; /* NOTE: src and dst *CAN* point to the same address */ for (i = 0; src != end; i++) { dst[i] = *src++; dst[i] |= (*src++)<<7; } return dst; } /*********************************************************************** WaveFront: sample, patch and program management. ***********************************************************************/ static int wavefront_delete_sample (snd_wavefront_t *dev, int sample_num) { unsigned char wbuf[2]; int x; wbuf[0] = sample_num & 0x7f; wbuf[1] = sample_num >> 7; x = snd_wavefront_cmd(dev, WFC_DELETE_SAMPLE, NULL, wbuf); if (!x) dev->sample_status[sample_num] = WF_ST_EMPTY; return x; } static int wavefront_get_sample_status (snd_wavefront_t *dev, int assume_rom) { int i; unsigned char rbuf[32], wbuf[32]; unsigned int sc_real, sc_alias, sc_multi; /* check sample status */ if (snd_wavefront_cmd (dev, WFC_GET_NSAMPLES, rbuf, wbuf)) { snd_printk ("cannot request sample count.\n"); return -1; } sc_real = sc_alias = sc_multi = dev->samples_used = 0; for (i = 0; i < WF_MAX_SAMPLE; i++) { wbuf[0] = i & 0x7f; wbuf[1] = i >> 7; if (snd_wavefront_cmd (dev, WFC_IDENTIFY_SAMPLE_TYPE, rbuf, wbuf)) { snd_printk(KERN_WARNING "cannot identify sample " "type of slot %d\n", i); dev->sample_status[i] = WF_ST_EMPTY; continue; } dev->sample_status[i] = (WF_SLOT_FILLED|rbuf[0]); if (assume_rom) { dev->sample_status[i] |= WF_SLOT_ROM; } switch (rbuf[0] & WF_ST_MASK) { case WF_ST_SAMPLE: sc_real++; break; case WF_ST_MULTISAMPLE: sc_multi++; break; case WF_ST_ALIAS: sc_alias++; break; case WF_ST_EMPTY: break; default: snd_printk ("unknown sample type for " "slot %d (0x%x)\n", i, rbuf[0]); } if (rbuf[0] != WF_ST_EMPTY) { dev->samples_used++; } } snd_printk ("%d samples used (%d real, %d aliases, %d multi), " "%d empty\n", dev->samples_used, sc_real, sc_alias, sc_multi, WF_MAX_SAMPLE - dev->samples_used); return (0); } static int wavefront_get_patch_status (snd_wavefront_t *dev) { unsigned char patchbuf[WF_PATCH_BYTES]; unsigned char patchnum[2]; wavefront_patch *p; int i, x, cnt, cnt2; for (i = 0; i < WF_MAX_PATCH; i++) { patchnum[0] = i & 0x7f; patchnum[1] = i >> 7; x = snd_wavefront_cmd(dev, WFC_UPLOAD_PATCH, patchbuf, patchnum); if (x == 0) { dev->patch_status[i] |= WF_SLOT_FILLED; p = (wavefront_patch *) patchbuf; dev->sample_status [p->sample_number|(p->sample_msb<<7)] |= WF_SLOT_USED; } else if (x == 3) { /* Bad patch number */ dev->patch_status[i] = 0; } else { snd_printk ("upload patch " "error 0x%x\n", x); dev->patch_status[i] = 0; return 1; } } /* program status has already filled in slot_used bits */ for (i = 0, cnt = 0, cnt2 = 0; i < WF_MAX_PATCH; i++) { if (dev->patch_status[i] & WF_SLOT_FILLED) { cnt++; } if (dev->patch_status[i] & WF_SLOT_USED) { cnt2++; } } snd_printk ("%d patch slots filled, %d in use\n", cnt, cnt2); return (0); } static int wavefront_get_program_status (snd_wavefront_t *dev) { unsigned char progbuf[WF_PROGRAM_BYTES]; wavefront_program prog; unsigned char prognum; int i, x, l, cnt; for (i = 0; i < WF_MAX_PROGRAM; i++) { prognum = i; x = snd_wavefront_cmd(dev, WFC_UPLOAD_PROGRAM, progbuf, &prognum); if (x == 0) { dev->prog_status[i] |= WF_SLOT_USED; demunge_buf (progbuf, (unsigned char *) &prog, WF_PROGRAM_BYTES); for (l = 0; l < WF_NUM_LAYERS; l++) { if (prog.layer[l].mute) { dev->patch_status [prog.layer[l].patch_number] |= WF_SLOT_USED; } } } else if (x == 1) { /* Bad program number */ dev->prog_status[i] = 0; } else { snd_printk ("upload program " "error 0x%x\n", x); dev->prog_status[i] = 0; } } for (i = 0, cnt = 0; i < WF_MAX_PROGRAM; i++) { if (dev->prog_status[i]) { cnt++; } } snd_printk ("%d programs slots in use\n", cnt); return (0); } static int wavefront_send_patch (snd_wavefront_t *dev, wavefront_patch_info *header) { unsigned char buf[WF_PATCH_BYTES+2]; unsigned char *bptr; DPRINT (WF_DEBUG_LOAD_PATCH, "downloading patch %d\n", header->number); if (header->number >= ARRAY_SIZE(dev->patch_status)) return -EINVAL; dev->patch_status[header->number] |= WF_SLOT_FILLED; bptr = munge_int32 (header->number, buf, 2); munge_buf ((unsigned char *)&header->hdr.p, bptr, WF_PATCH_BYTES); if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_PATCH, NULL, buf)) { snd_printk ("download patch failed\n"); return -EIO; } return (0); } static int wavefront_send_program (snd_wavefront_t *dev, wavefront_patch_info *header) { unsigned char buf[WF_PROGRAM_BYTES+1]; int i; DPRINT (WF_DEBUG_LOAD_PATCH, "downloading program %d\n", header->number); if (header->number >= ARRAY_SIZE(dev->prog_status)) return -EINVAL; dev->prog_status[header->number] = WF_SLOT_USED; /* XXX need to zero existing SLOT_USED bit for program_status[i] where `i' is the program that's being (potentially) overwritten. */ for (i = 0; i < WF_NUM_LAYERS; i++) { if (header->hdr.pr.layer[i].mute) { dev->patch_status[header->hdr.pr.layer[i].patch_number] |= WF_SLOT_USED; /* XXX need to mark SLOT_USED for sample used by patch_number, but this means we have to load it. Ick. */ } } buf[0] = header->number; munge_buf ((unsigned char *)&header->hdr.pr, &buf[1], WF_PROGRAM_BYTES); if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_PROGRAM, NULL, buf)) { snd_printk ("download patch failed\n"); return -EIO; } return (0); } static int wavefront_freemem (snd_wavefront_t *dev) { char rbuf[8]; if (snd_wavefront_cmd (dev, WFC_REPORT_FREE_MEMORY, rbuf, NULL)) { snd_printk ("can't get memory stats.\n"); return -1; } else { return demunge_int32 (rbuf, 4); } } static int wavefront_send_sample (snd_wavefront_t *dev, wavefront_patch_info *header, u16 __user *dataptr, int data_is_unsigned) { /* samples are downloaded via a 16-bit wide i/o port (you could think of it as 2 adjacent 8-bit wide ports but its less efficient that way). therefore, all the blocksizes and so forth listed in the documentation, and used conventionally to refer to sample sizes, which are given in 8-bit units (bytes), need to be divided by 2. */ u16 sample_short = 0; u32 length; u16 __user *data_end = NULL; unsigned int i; const unsigned int max_blksize = 4096/2; unsigned int written; unsigned int blocksize; int dma_ack; int blocknum; unsigned char sample_hdr[WF_SAMPLE_HDR_BYTES]; unsigned char *shptr; int skip = 0; int initial_skip = 0; DPRINT (WF_DEBUG_LOAD_PATCH, "sample %sdownload for slot %d, " "type %d, %d bytes from 0x%lx\n", header->size ? "" : "header ", header->number, header->subkey, header->size, (unsigned long) header->dataptr); if (header->number == WAVEFRONT_FIND_FREE_SAMPLE_SLOT) { int x; x = wavefront_find_free_sample(dev); if (x < 0) return -ENOMEM; snd_printk ("unspecified sample => %d\n", x); header->number = x; } if (header->number >= WF_MAX_SAMPLE) return -EINVAL; if (header->size) { /* XXX it's a debatable point whether or not RDONLY semantics on the ROM samples should cover just the sample data or the sample header. For now, it only covers the sample data, so anyone is free at all times to rewrite sample headers. My reason for this is that we have the sample headers available in the WFB file for General MIDI, and so these can always be reset if needed. The sample data, however, cannot be recovered without a complete reset and firmware reload of the ICS2115, which is a very expensive operation. So, doing things this way allows us to honor the notion of "RESETSAMPLES" reasonably cheaply. Note however, that this is done purely at user level: there is no WFB parser in this driver, and so a complete reset (back to General MIDI, or theoretically some other configuration) is the responsibility of the user level library. To try to do this in the kernel would be a little crazy: we'd need 158K of kernel space just to hold a copy of the patch/program/sample header data. */ if (dev->rom_samples_rdonly) { if (dev->sample_status[header->number] & WF_SLOT_ROM) { snd_printk ("sample slot %d " "write protected\n", header->number); return -EACCES; } } wavefront_delete_sample (dev, header->number); } if (header->size) { dev->freemem = wavefront_freemem (dev); if (dev->freemem < (int)header->size) { snd_printk ("insufficient memory to " "load %d byte sample.\n", header->size); return -ENOMEM; } } skip = WF_GET_CHANNEL(&header->hdr.s); if (skip > 0 && header->hdr.s.SampleResolution != LINEAR_16BIT) { snd_printk ("channel selection only " "possible on 16-bit samples"); return -EINVAL; } switch (skip) { case 0: initial_skip = 0; skip = 1; break; case 1: initial_skip = 0; skip = 2; break; case 2: initial_skip = 1; skip = 2; break; case 3: initial_skip = 2; skip = 3; break; case 4: initial_skip = 3; skip = 4; break; case 5: initial_skip = 4; skip = 5; break; case 6: initial_skip = 5; skip = 6; break; } DPRINT (WF_DEBUG_LOAD_PATCH, "channel selection: %d => " "initial skip = %d, skip = %d\n", WF_GET_CHANNEL (&header->hdr.s), initial_skip, skip); /* Be safe, and zero the "Unused" bits ... */ WF_SET_CHANNEL(&header->hdr.s, 0); /* adjust size for 16 bit samples by dividing by two. We always send 16 bits per write, even for 8 bit samples, so the length is always half the size of the sample data in bytes. */ length = header->size / 2; /* the data we're sent has not been munged, and in fact, the header we have to send isn't just a munged copy either. so, build the sample header right here. */ shptr = &sample_hdr[0]; shptr = munge_int32 (header->number, shptr, 2); if (header->size) { shptr = munge_int32 (length, shptr, 4); } /* Yes, a 4 byte result doesn't contain all of the offset bits, but the offset only uses 24 bits. */ shptr = munge_int32 (*((u32 *) &header->hdr.s.sampleStartOffset), shptr, 4); shptr = munge_int32 (*((u32 *) &header->hdr.s.loopStartOffset), shptr, 4); shptr = munge_int32 (*((u32 *) &header->hdr.s.loopEndOffset), shptr, 4); shptr = munge_int32 (*((u32 *) &header->hdr.s.sampleEndOffset), shptr, 4); /* This one is truly weird. What kind of weirdo decided that in a system dominated by 16 and 32 bit integers, they would use a just 12 bits ? */ shptr = munge_int32 (header->hdr.s.FrequencyBias, shptr, 3); /* Why is this nybblified, when the MSB is *always* zero ? Anyway, we can't take address of bitfield, so make a good-faith guess at where it starts. */ shptr = munge_int32 (*(&header->hdr.s.FrequencyBias+1), shptr, 2); if (snd_wavefront_cmd (dev, header->size ? WFC_DOWNLOAD_SAMPLE : WFC_DOWNLOAD_SAMPLE_HEADER, NULL, sample_hdr)) { snd_printk ("sample %sdownload refused.\n", header->size ? "" : "header "); return -EIO; } if (header->size == 0) { goto sent; /* Sorry. Just had to have one somewhere */ } data_end = dataptr + length; /* Do any initial skip over an unused channel's data */ dataptr += initial_skip; for (written = 0, blocknum = 0; written < length; written += max_blksize, blocknum++) { if ((length - written) > max_blksize) { blocksize = max_blksize; } else { /* round to nearest 16-byte value */ blocksize = ALIGN(length - written, 8); } if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_BLOCK, NULL, NULL)) { snd_printk ("download block " "request refused.\n"); return -EIO; } for (i = 0; i < blocksize; i++) { if (dataptr < data_end) { if (get_user(sample_short, dataptr)) return -EFAULT; dataptr += skip; if (data_is_unsigned) { /* GUS ? */ if (WF_SAMPLE_IS_8BIT(&header->hdr.s)) { /* 8 bit sample resolution, sign extend both bytes. */ ((unsigned char*) &sample_short)[0] += 0x7f; ((unsigned char*) &sample_short)[1] += 0x7f; } else { /* 16 bit sample resolution, sign extend the MSB. */ sample_short += 0x7fff; } } } else { /* In padding section of final block: Don't fetch unsupplied data from user space, just continue with whatever the final value was. */ } if (i < blocksize - 1) { outw (sample_short, dev->block_port); } else { outw (sample_short, dev->last_block_port); } } /* Get "DMA page acknowledge", even though its really nothing to do with DMA at all. */ dma_ack = wavefront_read(dev); if (dma_ack != WF_DMA_ACK) { if (dma_ack == -1) { snd_printk ("upload sample " "DMA ack timeout\n"); return -EIO; } else { snd_printk ("upload sample " "DMA ack error 0x%x\n", dma_ack); return -EIO; } } } dev->sample_status[header->number] = (WF_SLOT_FILLED|WF_ST_SAMPLE); /* Note, label is here because sending the sample header shouldn't alter the sample_status info at all. */ sent: return (0); } static int wavefront_send_alias (snd_wavefront_t *dev, wavefront_patch_info *header) { unsigned char alias_hdr[WF_ALIAS_BYTES]; DPRINT (WF_DEBUG_LOAD_PATCH, "download alias, %d is " "alias for %d\n", header->number, header->hdr.a.OriginalSample); if (header->number >= WF_MAX_SAMPLE) return -EINVAL; munge_int32 (header->number, &alias_hdr[0], 2); munge_int32 (header->hdr.a.OriginalSample, &alias_hdr[2], 2); munge_int32 (*((unsigned int *)&header->hdr.a.sampleStartOffset), &alias_hdr[4], 4); munge_int32 (*((unsigned int *)&header->hdr.a.loopStartOffset), &alias_hdr[8], 4); munge_int32 (*((unsigned int *)&header->hdr.a.loopEndOffset), &alias_hdr[12], 4); munge_int32 (*((unsigned int *)&header->hdr.a.sampleEndOffset), &alias_hdr[16], 4); munge_int32 (header->hdr.a.FrequencyBias, &alias_hdr[20], 3); munge_int32 (*(&header->hdr.a.FrequencyBias+1), &alias_hdr[23], 2); if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_SAMPLE_ALIAS, NULL, alias_hdr)) { snd_printk ("download alias failed.\n"); return -EIO; } dev->sample_status[header->number] = (WF_SLOT_FILLED|WF_ST_ALIAS); return (0); } static int wavefront_send_multisample (snd_wavefront_t *dev, wavefront_patch_info *header) { int i; int num_samples; unsigned char *msample_hdr; if (header->number >= WF_MAX_SAMPLE) return -EINVAL; msample_hdr = kmalloc(WF_MSAMPLE_BYTES, GFP_KERNEL); if (! msample_hdr) return -ENOMEM; munge_int32 (header->number, &msample_hdr[0], 2); /* You'll recall at this point that the "number of samples" value in a wavefront_multisample struct is actually the log2 of the real number of samples. */ num_samples = (1<<(header->hdr.ms.NumberOfSamples&7)); msample_hdr[2] = (unsigned char) header->hdr.ms.NumberOfSamples; DPRINT (WF_DEBUG_LOAD_PATCH, "multi %d with %d=%d samples\n", header->number, header->hdr.ms.NumberOfSamples, num_samples); for (i = 0; i < num_samples; i++) { DPRINT(WF_DEBUG_LOAD_PATCH|WF_DEBUG_DATA, "sample[%d] = %d\n", i, header->hdr.ms.SampleNumber[i]); munge_int32 (header->hdr.ms.SampleNumber[i], &msample_hdr[3+(i*2)], 2); } /* Need a hack here to pass in the number of bytes to be written to the synth. This is ugly, and perhaps one day, I'll fix it. */ if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_MULTISAMPLE, (unsigned char *) (long) ((num_samples*2)+3), msample_hdr)) { snd_printk ("download of multisample failed.\n"); kfree(msample_hdr); return -EIO; } dev->sample_status[header->number] = (WF_SLOT_FILLED|WF_ST_MULTISAMPLE); kfree(msample_hdr); return (0); } static int wavefront_fetch_multisample (snd_wavefront_t *dev, wavefront_patch_info *header) { int i; unsigned char log_ns[1]; unsigned char number[2]; int num_samples; munge_int32 (header->number, number, 2); if (snd_wavefront_cmd (dev, WFC_UPLOAD_MULTISAMPLE, log_ns, number)) { snd_printk ("upload multisample failed.\n"); return -EIO; } DPRINT (WF_DEBUG_DATA, "msample %d has %d samples\n", header->number, log_ns[0]); header->hdr.ms.NumberOfSamples = log_ns[0]; /* get the number of samples ... */ num_samples = (1 << log_ns[0]); for (i = 0; i < num_samples; i++) { char d[2]; int val; val = wavefront_read(dev); if (val == -1) { snd_printk ("upload multisample failed " "during sample loop.\n"); return -EIO; } d[0] = val; val = wavefront_read(dev); if (val == -1) { snd_printk ("upload multisample failed " "during sample loop.\n"); return -EIO; } d[1] = val; header->hdr.ms.SampleNumber[i] = demunge_int32 ((unsigned char *) d, 2); DPRINT (WF_DEBUG_DATA, "msample sample[%d] = %d\n", i, header->hdr.ms.SampleNumber[i]); } return (0); } static int wavefront_send_drum (snd_wavefront_t *dev, wavefront_patch_info *header) { unsigned char drumbuf[WF_DRUM_BYTES]; wavefront_drum *drum = &header->hdr.d; int i; DPRINT (WF_DEBUG_LOAD_PATCH, "downloading edrum for MIDI " "note %d, patch = %d\n", header->number, drum->PatchNumber); drumbuf[0] = header->number & 0x7f; for (i = 0; i < 4; i++) { munge_int32 (((unsigned char *)drum)[i], &drumbuf[1+(i*2)], 2); } if (snd_wavefront_cmd (dev, WFC_DOWNLOAD_EDRUM_PROGRAM, NULL, drumbuf)) { snd_printk ("download drum failed.\n"); return -EIO; } return (0); } static int wavefront_find_free_sample (snd_wavefront_t *dev) { int i; for (i = 0; i < WF_MAX_SAMPLE; i++) { if (!(dev->sample_status[i] & WF_SLOT_FILLED)) { return i; } } snd_printk ("no free sample slots!\n"); return -1; } #if 0 static int wavefront_find_free_patch (snd_wavefront_t *dev) { int i; for (i = 0; i < WF_MAX_PATCH; i++) { if (!(dev->patch_status[i] & WF_SLOT_FILLED)) { return i; } } snd_printk ("no free patch slots!\n"); return -1; } #endif static int wavefront_load_patch (snd_wavefront_t *dev, const char __user *addr) { wavefront_patch_info *header; int err; header = kmalloc(sizeof(*header), GFP_KERNEL); if (! header) return -ENOMEM; if (copy_from_user (header, addr, sizeof(wavefront_patch_info) - sizeof(wavefront_any))) { snd_printk ("bad address for load patch.\n"); err = -EFAULT; goto __error; } DPRINT (WF_DEBUG_LOAD_PATCH, "download " "Sample type: %d " "Sample number: %d " "Sample size: %d\n", header->subkey, header->number, header->size); switch (header->subkey) { case WF_ST_SAMPLE: /* sample or sample_header, based on patch->size */ if (copy_from_user (&header->hdr.s, header->hdrptr, sizeof (wavefront_sample))) { err = -EFAULT; break; } err = wavefront_send_sample (dev, header, header->dataptr, 0); break; case WF_ST_MULTISAMPLE: if (copy_from_user (&header->hdr.s, header->hdrptr, sizeof (wavefront_multisample))) { err = -EFAULT; break; } err = wavefront_send_multisample (dev, header); break; case WF_ST_ALIAS: if (copy_from_user (&header->hdr.a, header->hdrptr, sizeof (wavefront_alias))) { err = -EFAULT; break; } err = wavefront_send_alias (dev, header); break; case WF_ST_DRUM: if (copy_from_user (&header->hdr.d, header->hdrptr, sizeof (wavefront_drum))) { err = -EFAULT; break; } err = wavefront_send_drum (dev, header); break; case WF_ST_PATCH: if (copy_from_user (&header->hdr.p, header->hdrptr, sizeof (wavefront_patch))) { err = -EFAULT; break; } err = wavefront_send_patch (dev, header); break; case WF_ST_PROGRAM: if (copy_from_user (&header->hdr.pr, header->hdrptr, sizeof (wavefront_program))) { err = -EFAULT; break; } err = wavefront_send_program (dev, header); break; default: snd_printk ("unknown patch type %d.\n", header->subkey); err = -EINVAL; break; } __error: kfree(header); return err; } /*********************************************************************** WaveFront: hardware-dependent interface ***********************************************************************/ static void process_sample_hdr (u8 *buf) { wavefront_sample s; u8 *ptr; ptr = buf; /* The board doesn't send us an exact copy of a "wavefront_sample" in response to an Upload Sample Header command. Instead, we have to convert the data format back into our data structure, just as in the Download Sample command, where we have to do something very similar in the reverse direction. */ *((u32 *) &s.sampleStartOffset) = demunge_int32 (ptr, 4); ptr += 4; *((u32 *) &s.loopStartOffset) = demunge_int32 (ptr, 4); ptr += 4; *((u32 *) &s.loopEndOffset) = demunge_int32 (ptr, 4); ptr += 4; *((u32 *) &s.sampleEndOffset) = demunge_int32 (ptr, 4); ptr += 4; *((u32 *) &s.FrequencyBias) = demunge_int32 (ptr, 3); ptr += 3; s.SampleResolution = *ptr & 0x3; s.Loop = *ptr & 0x8; s.Bidirectional = *ptr & 0x10; s.Reverse = *ptr & 0x40; /* Now copy it back to where it came from */ memcpy (buf, (unsigned char *) &s, sizeof (wavefront_sample)); } static int wavefront_synth_control (snd_wavefront_card_t *acard, wavefront_control *wc) { snd_wavefront_t *dev = &acard->wavefront; unsigned char patchnumbuf[2]; int i; DPRINT (WF_DEBUG_CMD, "synth control with " "cmd 0x%x\n", wc->cmd); /* Pre-handling of or for various commands */ switch (wc->cmd) { case WFC_DISABLE_INTERRUPTS: snd_printk ("interrupts disabled.\n"); outb (0x80|0x20, dev->control_port); dev->interrupts_are_midi = 1; return 0; case WFC_ENABLE_INTERRUPTS: snd_printk ("interrupts enabled.\n"); outb (0x80|0x40|0x20, dev->control_port); dev->interrupts_are_midi = 1; return 0; case WFC_INTERRUPT_STATUS: wc->rbuf[0] = dev->interrupts_are_midi; return 0; case WFC_ROMSAMPLES_RDONLY: dev->rom_samples_rdonly = wc->wbuf[0]; wc->status = 0; return 0; case WFC_IDENTIFY_SLOT_TYPE: i = wc->wbuf[0] | (wc->wbuf[1] << 7); if (i <0 || i >= WF_MAX_SAMPLE) { snd_printk ("invalid slot ID %d\n", i); wc->status = EINVAL; return -EINVAL; } wc->rbuf[0] = dev->sample_status[i]; wc->status = 0; return 0; case WFC_DEBUG_DRIVER: dev->debug = wc->wbuf[0]; snd_printk ("debug = 0x%x\n", dev->debug); return 0; case WFC_UPLOAD_PATCH: munge_int32 (*((u32 *) wc->wbuf), patchnumbuf, 2); memcpy (wc->wbuf, patchnumbuf, 2); break; case WFC_UPLOAD_MULTISAMPLE: /* multisamples have to be handled differently, and cannot be dealt with properly by snd_wavefront_cmd() alone. */ wc->status = wavefront_fetch_multisample (dev, (wavefront_patch_info *) wc->rbuf); return 0; case WFC_UPLOAD_SAMPLE_ALIAS: snd_printk ("support for sample alias upload " "being considered.\n"); wc->status = EINVAL; return -EINVAL; } wc->status = snd_wavefront_cmd (dev, wc->cmd, wc->rbuf, wc->wbuf); /* Post-handling of certain commands. In particular, if the command was an upload, demunge the data so that the user-level doesn't have to think about it. */ if (wc->status == 0) { switch (wc->cmd) { /* intercept any freemem requests so that we know we are always current with the user-level view of things. */ case WFC_REPORT_FREE_MEMORY: dev->freemem = demunge_int32 (wc->rbuf, 4); break; case WFC_UPLOAD_PATCH: demunge_buf (wc->rbuf, wc->rbuf, WF_PATCH_BYTES); break; case WFC_UPLOAD_PROGRAM: demunge_buf (wc->rbuf, wc->rbuf, WF_PROGRAM_BYTES); break; case WFC_UPLOAD_EDRUM_PROGRAM: demunge_buf (wc->rbuf, wc->rbuf, WF_DRUM_BYTES - 1); break; case WFC_UPLOAD_SAMPLE_HEADER: process_sample_hdr (wc->rbuf); break; case WFC_UPLOAD_SAMPLE_ALIAS: snd_printk ("support for " "sample aliases still " "being considered.\n"); break; case WFC_VMIDI_OFF: snd_wavefront_midi_disable_virtual (acard); break; case WFC_VMIDI_ON: snd_wavefront_midi_enable_virtual (acard); break; } } return 0; } int snd_wavefront_synth_open (struct snd_hwdep *hw, struct file *file) { if (!try_module_get(hw->card->module)) return -EFAULT; file->private_data = hw; return 0; } int snd_wavefront_synth_release (struct snd_hwdep *hw, struct file *file) { module_put(hw->card->module); return 0; } int snd_wavefront_synth_ioctl (struct snd_hwdep *hw, struct file *file, unsigned int cmd, unsigned long arg) { struct snd_card *card; snd_wavefront_t *dev; snd_wavefront_card_t *acard; wavefront_control *wc; void __user *argp = (void __user *)arg; int err; card = (struct snd_card *) hw->card; if (snd_BUG_ON(!card)) return -ENODEV; if (snd_BUG_ON(!card->private_data)) return -ENODEV; acard = card->private_data; dev = &acard->wavefront; switch (cmd) { case WFCTL_LOAD_SPP: if (wavefront_load_patch (dev, argp) != 0) { return -EIO; } break; case WFCTL_WFCMD: wc = memdup_user(argp, sizeof(*wc)); if (IS_ERR(wc)) return PTR_ERR(wc); if (wavefront_synth_control (acard, wc) < 0) err = -EIO; else if (copy_to_user (argp, wc, sizeof (*wc))) err = -EFAULT; else err = 0; kfree(wc); return err; default: return -EINVAL; } return 0; } /***********************************************************************/ /* WaveFront: interface for card-level wavefront module */ /***********************************************************************/ void snd_wavefront_internal_interrupt (snd_wavefront_card_t *card) { snd_wavefront_t *dev = &card->wavefront; /* Some comments on interrupts. I attempted a version of this driver that used interrupts throughout the code instead of doing busy and/or sleep-waiting. Alas, it appears that once the Motorola firmware is downloaded, the card *never* generates an RX interrupt. These are successfully generated during firmware loading, and after that wavefront_status() reports that an interrupt is pending on the card from time to time, but it never seems to be delivered to this driver. Note also that wavefront_status() continues to report that RX interrupts are enabled, suggesting that I didn't goof up and disable them by mistake. Thus, I stepped back to a prior version of wavefront_wait(), the only place where this really matters. Its sad, but I've looked through the code to check on things, and I really feel certain that the Motorola firmware prevents RX-ready interrupts. */ if ((wavefront_status(dev) & (STAT_INTR_READ|STAT_INTR_WRITE)) == 0) { return; } spin_lock(&dev->irq_lock); dev->irq_ok = 1; dev->irq_cnt++; spin_unlock(&dev->irq_lock); wake_up(&dev->interrupt_sleeper); } /* STATUS REGISTER 0 Host Rx Interrupt Enable (1=Enabled) 1 Host Rx Register Full (1=Full) 2 Host Rx Interrupt Pending (1=Interrupt) 3 Unused 4 Host Tx Interrupt (1=Enabled) 5 Host Tx Register empty (1=Empty) 6 Host Tx Interrupt Pending (1=Interrupt) 7 Unused */ static int snd_wavefront_interrupt_bits (int irq) { int bits; switch (irq) { case 9: bits = 0x00; break; case 5: bits = 0x08; break; case 12: bits = 0x10; break; case 15: bits = 0x18; break; default: snd_printk ("invalid IRQ %d\n", irq); bits = -1; } return bits; } static void wavefront_should_cause_interrupt (snd_wavefront_t *dev, int val, int port, unsigned long timeout) { wait_queue_entry_t wait; init_waitqueue_entry(&wait, current); spin_lock_irq(&dev->irq_lock); add_wait_queue(&dev->interrupt_sleeper, &wait); dev->irq_ok = 0; outb (val,port); spin_unlock_irq(&dev->irq_lock); while (!dev->irq_ok && time_before(jiffies, timeout)) { schedule_timeout_uninterruptible(1); barrier(); } } static int wavefront_reset_to_cleanliness (snd_wavefront_t *dev) { int bits; int hwv[2]; /* IRQ already checked */ bits = snd_wavefront_interrupt_bits (dev->irq); /* try reset of port */ outb (0x0, dev->control_port); /* At this point, the board is in reset, and the H/W initialization register is accessed at the same address as the data port. Bit 7 - Enable IRQ Driver 0 - Tri-state the Wave-Board drivers for the PC Bus IRQs 1 - Enable IRQ selected by bits 5:3 to be driven onto the PC Bus. Bit 6 - MIDI Interface Select 0 - Use the MIDI Input from the 26-pin WaveBlaster compatible header as the serial MIDI source 1 - Use the MIDI Input from the 9-pin D connector as the serial MIDI source. Bits 5:3 - IRQ Selection 0 0 0 - IRQ 2/9 0 0 1 - IRQ 5 0 1 0 - IRQ 12 0 1 1 - IRQ 15 1 0 0 - Reserved 1 0 1 - Reserved 1 1 0 - Reserved 1 1 1 - Reserved Bits 2:1 - Reserved Bit 0 - Disable Boot ROM 0 - memory accesses to 03FC30-03FFFFH utilize the internal Boot ROM 1 - memory accesses to 03FC30-03FFFFH are directed to external storage. */ /* configure hardware: IRQ, enable interrupts, plus external 9-pin MIDI interface selected */ outb (0x80 | 0x40 | bits, dev->data_port); /* CONTROL REGISTER 0 Host Rx Interrupt Enable (1=Enabled) 0x1 1 Unused 0x2 2 Unused 0x4 3 Unused 0x8 4 Host Tx Interrupt Enable 0x10 5 Mute (0=Mute; 1=Play) 0x20 6 Master Interrupt Enable (1=Enabled) 0x40 7 Master Reset (0=Reset; 1=Run) 0x80 Take us out of reset, mute output, master + TX + RX interrupts on. We'll get an interrupt presumably to tell us that the TX register is clear. */ wavefront_should_cause_interrupt(dev, 0x80|0x40|0x10|0x1, dev->control_port, (reset_time*HZ)/100); /* Note: data port is now the data port, not the h/w initialization port. */ if (!dev->irq_ok) { snd_printk ("intr not received after h/w un-reset.\n"); goto gone_bad; } /* Note: data port is now the data port, not the h/w initialization port. At this point, only "HW VERSION" or "DOWNLOAD OS" commands will work. So, issue one of them, and wait for TX interrupt. This can take a *long* time after a cold boot, while the ISC ROM does its RAM test. The SDK says up to 4 seconds - with 12MB of RAM on a Tropez+, it takes a lot longer than that (~16secs). Note that the card understands the difference between a warm and a cold boot, so subsequent ISC2115 reboots (say, caused by module reloading) will get through this much faster. XXX Interesting question: why is no RX interrupt received first ? */ wavefront_should_cause_interrupt(dev, WFC_HARDWARE_VERSION, dev->data_port, ramcheck_time*HZ); if (!dev->irq_ok) { snd_printk ("post-RAM-check interrupt not received.\n"); goto gone_bad; } if (!wavefront_wait (dev, STAT_CAN_READ)) { snd_printk ("no response to HW version cmd.\n"); goto gone_bad; } hwv[0] = wavefront_read(dev); if (hwv[0] == -1) { snd_printk ("board not responding correctly.\n"); goto gone_bad; } if (hwv[0] == 0xFF) { /* NAK */ /* Board's RAM test failed. Try to read error code, and tell us about it either way. */ hwv[0] = wavefront_read(dev); if (hwv[0] == -1) { snd_printk ("on-board RAM test failed " "(bad error code).\n"); } else { snd_printk ("on-board RAM test failed " "(error code: 0x%x).\n", hwv[0]); } goto gone_bad; } /* We're OK, just get the next byte of the HW version response */ hwv[1] = wavefront_read(dev); if (hwv[1] == -1) { snd_printk ("incorrect h/w response.\n"); goto gone_bad; } snd_printk ("hardware version %d.%d\n", hwv[0], hwv[1]); return 0; gone_bad: return (1); } static int wavefront_download_firmware (snd_wavefront_t *dev, char *path) { const unsigned char *buf; int len, err; int section_cnt_downloaded = 0; const struct firmware *firmware; err = request_firmware(&firmware, path, dev->card->dev); if (err < 0) { snd_printk(KERN_ERR "firmware (%s) download failed!!!\n", path); return 1; } len = 0; buf = firmware->data; for (;;) { int section_length = *(signed char *)buf; if (section_length == 0) break; if (section_length < 0 || section_length > WF_SECTION_MAX) { snd_printk(KERN_ERR "invalid firmware section length %d\n", section_length); goto failure; } buf++; len++; if (firmware->size < len + section_length) { snd_printk(KERN_ERR "firmware section read error.\n"); goto failure; } /* Send command */ if (wavefront_write(dev, WFC_DOWNLOAD_OS)) goto failure; for (; section_length; section_length--) { if (wavefront_write(dev, *buf)) goto failure; buf++; len++; } /* get ACK */ if (!wavefront_wait(dev, STAT_CAN_READ)) { snd_printk(KERN_ERR "time out for firmware ACK.\n"); goto failure; } err = inb(dev->data_port); if (err != WF_ACK) { snd_printk(KERN_ERR "download of section #%d not " "acknowledged, ack = 0x%x\n", section_cnt_downloaded + 1, err); goto failure; } section_cnt_downloaded++; } release_firmware(firmware); return 0; failure: release_firmware(firmware); snd_printk(KERN_ERR "firmware download failed!!!\n"); return 1; } static int wavefront_do_reset (snd_wavefront_t *dev) { char voices[1]; if (wavefront_reset_to_cleanliness (dev)) { snd_printk ("hw reset failed.\n"); goto gone_bad; } if (dev->israw) { if (wavefront_download_firmware (dev, ospath)) { goto gone_bad; } dev->israw = 0; /* Wait for the OS to get running. The protocol for this is non-obvious, and was determined by using port-IO tracing in DOSemu and some experimentation here. Rather than using timed waits, use interrupts creatively. */ wavefront_should_cause_interrupt (dev, WFC_NOOP, dev->data_port, (osrun_time*HZ)); if (!dev->irq_ok) { snd_printk ("no post-OS interrupt.\n"); goto gone_bad; } /* Now, do it again ! */ wavefront_should_cause_interrupt (dev, WFC_NOOP, dev->data_port, (10*HZ)); if (!dev->irq_ok) { snd_printk ("no post-OS interrupt(2).\n"); goto gone_bad; } /* OK, no (RX/TX) interrupts any more, but leave mute in effect. */ outb (0x80|0x40, dev->control_port); } /* SETUPSND.EXE asks for sample memory config here, but since i have no idea how to interpret the result, we'll forget about it. */ dev->freemem = wavefront_freemem(dev); if (dev->freemem < 0) goto gone_bad; snd_printk ("available DRAM %dk\n", dev->freemem / 1024); if (wavefront_write (dev, 0xf0) || wavefront_write (dev, 1) || (wavefront_read (dev) < 0)) { dev->debug = 0; snd_printk ("MPU emulation mode not set.\n"); goto gone_bad; } voices[0] = 32; if (snd_wavefront_cmd (dev, WFC_SET_NVOICES, NULL, voices)) { snd_printk ("cannot set number of voices to 32.\n"); goto gone_bad; } return 0; gone_bad: /* reset that sucker so that it doesn't bother us. */ outb (0x0, dev->control_port); dev->interrupts_are_midi = 0; return 1; } int snd_wavefront_start (snd_wavefront_t *dev) { int samples_are_from_rom; /* IMPORTANT: assumes that snd_wavefront_detect() and/or wavefront_reset_to_cleanliness() has already been called */ if (dev->israw) { samples_are_from_rom = 1; } else { /* XXX is this always true ? */ samples_are_from_rom = 0; } if (dev->israw || fx_raw) { if (wavefront_do_reset (dev)) { return -1; } } /* Check for FX device, present only on Tropez+ */ dev->has_fx = (snd_wavefront_fx_detect (dev) == 0); if (dev->has_fx && fx_raw) { snd_wavefront_fx_start (dev); } wavefront_get_sample_status (dev, samples_are_from_rom); wavefront_get_program_status (dev); wavefront_get_patch_status (dev); /* Start normal operation: unreset, master interrupt enabled, no mute */ outb (0x80|0x40|0x20, dev->control_port); return (0); } int snd_wavefront_detect (snd_wavefront_card_t *card) { unsigned char rbuf[4], wbuf[4]; snd_wavefront_t *dev = &card->wavefront; /* returns zero if a WaveFront card is successfully detected. negative otherwise. */ dev->israw = 0; dev->has_fx = 0; dev->debug = debug_default; dev->interrupts_are_midi = 0; dev->irq_cnt = 0; dev->rom_samples_rdonly = 1; if (snd_wavefront_cmd (dev, WFC_FIRMWARE_VERSION, rbuf, wbuf) == 0) { dev->fw_version[0] = rbuf[0]; dev->fw_version[1] = rbuf[1]; snd_printk ("firmware %d.%d already loaded.\n", rbuf[0], rbuf[1]); /* check that a command actually works */ if (snd_wavefront_cmd (dev, WFC_HARDWARE_VERSION, rbuf, wbuf) == 0) { dev->hw_version[0] = rbuf[0]; dev->hw_version[1] = rbuf[1]; } else { snd_printk ("not raw, but no " "hardware version!\n"); return -1; } if (!wf_raw) { return 0; } else { snd_printk ("reloading firmware as you requested.\n"); dev->israw = 1; } } else { dev->israw = 1; snd_printk ("no response to firmware probe, assume raw.\n"); } return 0; } MODULE_FIRMWARE(DEFAULT_OSPATH);
linux-master
sound/isa/wavefront/wavefront_synth.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * ALSA card-level driver for Turtle Beach Wavefront cards * (Maui,Tropez,Tropez+) * * Copyright (c) 1997-1999 by Paul Barton-Davis <[email protected]> */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/err.h> #include <linux/isa.h> #include <linux/pnp.h> #include <linux/module.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/opl3.h> #include <sound/wss.h> #include <sound/snd_wavefront.h> MODULE_AUTHOR("Paul Barton-Davis <[email protected]>"); MODULE_DESCRIPTION("Turtle Beach Wavefront"); MODULE_LICENSE("GPL"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; /* Enable this card */ #ifdef CONFIG_PNP static bool isapnp[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1}; #endif static long cs4232_pcm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static int cs4232_pcm_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 5,7,9,11,12,15 */ static long cs4232_mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static int cs4232_mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 9,11,12,15 */ static long ics2115_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static int ics2115_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 2,9,11,12,15 */ static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3,5,6,7 */ static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3,5,6,7 */ static bool use_cs4232_midi[SNDRV_CARDS]; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for WaveFront soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for WaveFront soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable WaveFront soundcard."); #ifdef CONFIG_PNP module_param_array(isapnp, bool, NULL, 0444); MODULE_PARM_DESC(isapnp, "ISA PnP detection for WaveFront soundcards."); #endif module_param_hw_array(cs4232_pcm_port, long, ioport, NULL, 0444); MODULE_PARM_DESC(cs4232_pcm_port, "Port # for CS4232 PCM interface."); module_param_hw_array(cs4232_pcm_irq, int, irq, NULL, 0444); MODULE_PARM_DESC(cs4232_pcm_irq, "IRQ # for CS4232 PCM interface."); module_param_hw_array(dma1, int, dma, NULL, 0444); MODULE_PARM_DESC(dma1, "DMA1 # for CS4232 PCM interface."); module_param_hw_array(dma2, int, dma, NULL, 0444); MODULE_PARM_DESC(dma2, "DMA2 # for CS4232 PCM interface."); module_param_hw_array(cs4232_mpu_port, long, ioport, NULL, 0444); MODULE_PARM_DESC(cs4232_mpu_port, "port # for CS4232 MPU-401 interface."); module_param_hw_array(cs4232_mpu_irq, int, irq, NULL, 0444); MODULE_PARM_DESC(cs4232_mpu_irq, "IRQ # for CS4232 MPU-401 interface."); module_param_hw_array(ics2115_irq, int, irq, NULL, 0444); MODULE_PARM_DESC(ics2115_irq, "IRQ # for ICS2115."); module_param_hw_array(ics2115_port, long, ioport, NULL, 0444); MODULE_PARM_DESC(ics2115_port, "Port # for ICS2115."); module_param_hw_array(fm_port, long, ioport, NULL, 0444); MODULE_PARM_DESC(fm_port, "FM port #."); module_param_array(use_cs4232_midi, bool, NULL, 0444); MODULE_PARM_DESC(use_cs4232_midi, "Use CS4232 MPU-401 interface (inaccessibly located inside your computer)"); #ifdef CONFIG_PNP static int isa_registered; static int pnp_registered; static const struct pnp_card_device_id snd_wavefront_pnpids[] = { /* Tropez */ { .id = "CSC7532", .devs = { { "CSC0000" }, { "CSC0010" }, { "PnPb006" }, { "CSC0004" } } }, /* Tropez+ */ { .id = "CSC7632", .devs = { { "CSC0000" }, { "CSC0010" }, { "PnPb006" }, { "CSC0004" } } }, { .id = "" } }; MODULE_DEVICE_TABLE(pnp_card, snd_wavefront_pnpids); static int snd_wavefront_pnp (int dev, snd_wavefront_card_t *acard, struct pnp_card_link *card, const struct pnp_card_device_id *id) { struct pnp_dev *pdev; int err; /* Check for each logical device. */ /* CS4232 chip (aka "windows sound system") is logical device 0 */ acard->wss = pnp_request_card_device(card, id->devs[0].id, NULL); if (acard->wss == NULL) return -EBUSY; /* there is a game port at logical device 1, but we ignore it completely */ /* the control interface is logical device 2, but we ignore it completely. in fact, nobody even seems to know what it does. */ /* Only configure the CS4232 MIDI interface if its been specifically requested. It is logical device 3. */ if (use_cs4232_midi[dev]) { acard->mpu = pnp_request_card_device(card, id->devs[2].id, NULL); if (acard->mpu == NULL) return -EBUSY; } /* The ICS2115 synth is logical device 4 */ acard->synth = pnp_request_card_device(card, id->devs[3].id, NULL); if (acard->synth == NULL) return -EBUSY; /* PCM/FM initialization */ pdev = acard->wss; /* An interesting note from the Tropez+ FAQ: Q. [Ports] Why is the base address of the WSS I/O ports off by 4? A. WSS I/O requires a block of 8 I/O addresses ("ports"). Of these, the first 4 are used to identify and configure the board. With the advent of PnP, these first 4 addresses have become obsolete, and software applications only use the last 4 addresses to control the codec chip. Therefore, the base address setting "skips past" the 4 unused addresses. */ err = pnp_activate_dev(pdev); if (err < 0) { snd_printk(KERN_ERR "PnP WSS pnp configure failure\n"); return err; } cs4232_pcm_port[dev] = pnp_port_start(pdev, 0); fm_port[dev] = pnp_port_start(pdev, 1); dma1[dev] = pnp_dma(pdev, 0); dma2[dev] = pnp_dma(pdev, 1); cs4232_pcm_irq[dev] = pnp_irq(pdev, 0); /* Synth initialization */ pdev = acard->synth; err = pnp_activate_dev(pdev); if (err < 0) { snd_printk(KERN_ERR "PnP ICS2115 pnp configure failure\n"); return err; } ics2115_port[dev] = pnp_port_start(pdev, 0); ics2115_irq[dev] = pnp_irq(pdev, 0); /* CS4232 MPU initialization. Configure this only if explicitly requested, since its physically inaccessible and consumes another IRQ. */ if (use_cs4232_midi[dev]) { pdev = acard->mpu; err = pnp_activate_dev(pdev); if (err < 0) { snd_printk(KERN_ERR "PnP MPU401 pnp configure failure\n"); cs4232_mpu_port[dev] = SNDRV_AUTO_PORT; } else { cs4232_mpu_port[dev] = pnp_port_start(pdev, 0); cs4232_mpu_irq[dev] = pnp_irq(pdev, 0); } snd_printk (KERN_INFO "CS4232 MPU: port=0x%lx, irq=%i\n", cs4232_mpu_port[dev], cs4232_mpu_irq[dev]); } snd_printdd ("CS4232: pcm port=0x%lx, fm port=0x%lx, dma1=%i, dma2=%i, irq=%i\nICS2115: port=0x%lx, irq=%i\n", cs4232_pcm_port[dev], fm_port[dev], dma1[dev], dma2[dev], cs4232_pcm_irq[dev], ics2115_port[dev], ics2115_irq[dev]); return 0; } #endif /* CONFIG_PNP */ static irqreturn_t snd_wavefront_ics2115_interrupt(int irq, void *dev_id) { snd_wavefront_card_t *acard; acard = (snd_wavefront_card_t *) dev_id; if (acard == NULL) return IRQ_NONE; if (acard->wavefront.interrupts_are_midi) { snd_wavefront_midi_interrupt (acard); } else { snd_wavefront_internal_interrupt (acard); } return IRQ_HANDLED; } static struct snd_hwdep *snd_wavefront_new_synth(struct snd_card *card, int hw_dev, snd_wavefront_card_t *acard) { struct snd_hwdep *wavefront_synth; if (snd_wavefront_detect (acard) < 0) { return NULL; } if (snd_wavefront_start (&acard->wavefront) < 0) { return NULL; } if (snd_hwdep_new(card, "WaveFront", hw_dev, &wavefront_synth) < 0) return NULL; strcpy (wavefront_synth->name, "WaveFront (ICS2115) wavetable synthesizer"); wavefront_synth->ops.open = snd_wavefront_synth_open; wavefront_synth->ops.release = snd_wavefront_synth_release; wavefront_synth->ops.ioctl = snd_wavefront_synth_ioctl; return wavefront_synth; } static struct snd_hwdep *snd_wavefront_new_fx(struct snd_card *card, int hw_dev, snd_wavefront_card_t *acard, unsigned long port) { struct snd_hwdep *fx_processor; if (snd_wavefront_fx_start (&acard->wavefront)) { snd_printk (KERN_ERR "cannot initialize YSS225 FX processor"); return NULL; } if (snd_hwdep_new (card, "YSS225", hw_dev, &fx_processor) < 0) return NULL; sprintf (fx_processor->name, "YSS225 FX Processor at 0x%lx", port); fx_processor->ops.open = snd_wavefront_fx_open; fx_processor->ops.release = snd_wavefront_fx_release; fx_processor->ops.ioctl = snd_wavefront_fx_ioctl; return fx_processor; } static snd_wavefront_mpu_id internal_id = internal_mpu; static snd_wavefront_mpu_id external_id = external_mpu; static struct snd_rawmidi *snd_wavefront_new_midi(struct snd_card *card, int midi_dev, snd_wavefront_card_t *acard, unsigned long port, snd_wavefront_mpu_id mpu) { struct snd_rawmidi *rmidi; static int first = 1; if (first) { first = 0; acard->wavefront.midi.base = port; if (snd_wavefront_midi_start (acard)) { snd_printk (KERN_ERR "cannot initialize MIDI interface\n"); return NULL; } } if (snd_rawmidi_new (card, "WaveFront MIDI", midi_dev, 1, 1, &rmidi) < 0) return NULL; if (mpu == internal_mpu) { strcpy(rmidi->name, "WaveFront MIDI (Internal)"); rmidi->private_data = &internal_id; } else { strcpy(rmidi->name, "WaveFront MIDI (External)"); rmidi->private_data = &external_id; } snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_wavefront_midi_output); snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_wavefront_midi_input); rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT | SNDRV_RAWMIDI_INFO_INPUT | SNDRV_RAWMIDI_INFO_DUPLEX; return rmidi; } static int snd_wavefront_card_new(struct device *pdev, int dev, struct snd_card **cardp) { struct snd_card *card; snd_wavefront_card_t *acard; int err; err = snd_devm_card_new(pdev, index[dev], id[dev], THIS_MODULE, sizeof(snd_wavefront_card_t), &card); if (err < 0) return err; acard = card->private_data; acard->wavefront.irq = -1; spin_lock_init(&acard->wavefront.irq_lock); init_waitqueue_head(&acard->wavefront.interrupt_sleeper); spin_lock_init(&acard->wavefront.midi.open); spin_lock_init(&acard->wavefront.midi.virtual); acard->wavefront.card = card; *cardp = card; return 0; } static int snd_wavefront_probe (struct snd_card *card, int dev) { snd_wavefront_card_t *acard = card->private_data; struct snd_wss *chip; struct snd_hwdep *wavefront_synth; struct snd_rawmidi *ics2115_internal_rmidi = NULL; struct snd_rawmidi *ics2115_external_rmidi = NULL; struct snd_hwdep *fx_processor; int hw_dev = 0, midi_dev = 0, err; /* --------- PCM --------------- */ err = snd_wss_create(card, cs4232_pcm_port[dev], -1, cs4232_pcm_irq[dev], dma1[dev], dma2[dev], WSS_HW_DETECT, 0, &chip); if (err < 0) { snd_printk(KERN_ERR "can't allocate WSS device\n"); return err; } err = snd_wss_pcm(chip, 0); if (err < 0) return err; err = snd_wss_timer(chip, 0); if (err < 0) return err; /* ---------- OPL3 synth --------- */ if (fm_port[dev] > 0 && fm_port[dev] != SNDRV_AUTO_PORT) { struct snd_opl3 *opl3; err = snd_opl3_create(card, fm_port[dev], fm_port[dev] + 2, OPL3_HW_OPL3_CS, 0, &opl3); if (err < 0) { snd_printk (KERN_ERR "can't allocate or detect OPL3 synth\n"); return err; } err = snd_opl3_hwdep_new(opl3, hw_dev, 1, NULL); if (err < 0) return err; hw_dev++; } /* ------- ICS2115 Wavetable synth ------- */ acard->wavefront.res_base = devm_request_region(card->dev, ics2115_port[dev], 16, "ICS2115"); if (acard->wavefront.res_base == NULL) { snd_printk(KERN_ERR "unable to grab ICS2115 i/o region 0x%lx-0x%lx\n", ics2115_port[dev], ics2115_port[dev] + 16 - 1); return -EBUSY; } if (devm_request_irq(card->dev, ics2115_irq[dev], snd_wavefront_ics2115_interrupt, 0, "ICS2115", acard)) { snd_printk(KERN_ERR "unable to use ICS2115 IRQ %d\n", ics2115_irq[dev]); return -EBUSY; } acard->wavefront.irq = ics2115_irq[dev]; card->sync_irq = acard->wavefront.irq; acard->wavefront.base = ics2115_port[dev]; wavefront_synth = snd_wavefront_new_synth(card, hw_dev, acard); if (wavefront_synth == NULL) { snd_printk (KERN_ERR "can't create WaveFront synth device\n"); return -ENOMEM; } strcpy (wavefront_synth->name, "ICS2115 Wavetable MIDI Synthesizer"); wavefront_synth->iface = SNDRV_HWDEP_IFACE_ICS2115; hw_dev++; /* --------- Mixer ------------ */ err = snd_wss_mixer(chip); if (err < 0) { snd_printk (KERN_ERR "can't allocate mixer device\n"); return err; } /* -------- CS4232 MPU-401 interface -------- */ if (cs4232_mpu_port[dev] > 0 && cs4232_mpu_port[dev] != SNDRV_AUTO_PORT) { err = snd_mpu401_uart_new(card, midi_dev, MPU401_HW_CS4232, cs4232_mpu_port[dev], 0, cs4232_mpu_irq[dev], NULL); if (err < 0) { snd_printk (KERN_ERR "can't allocate CS4232 MPU-401 device\n"); return err; } midi_dev++; } /* ------ ICS2115 internal MIDI ------------ */ if (ics2115_port[dev] > 0 && ics2115_port[dev] != SNDRV_AUTO_PORT) { ics2115_internal_rmidi = snd_wavefront_new_midi (card, midi_dev, acard, ics2115_port[dev], internal_mpu); if (ics2115_internal_rmidi == NULL) { snd_printk (KERN_ERR "can't setup ICS2115 internal MIDI device\n"); return -ENOMEM; } midi_dev++; } /* ------ ICS2115 external MIDI ------------ */ if (ics2115_port[dev] > 0 && ics2115_port[dev] != SNDRV_AUTO_PORT) { ics2115_external_rmidi = snd_wavefront_new_midi (card, midi_dev, acard, ics2115_port[dev], external_mpu); if (ics2115_external_rmidi == NULL) { snd_printk (KERN_ERR "can't setup ICS2115 external MIDI device\n"); return -ENOMEM; } midi_dev++; } /* FX processor for Tropez+ */ if (acard->wavefront.has_fx) { fx_processor = snd_wavefront_new_fx (card, hw_dev, acard, ics2115_port[dev]); if (fx_processor == NULL) { snd_printk (KERN_ERR "can't setup FX device\n"); return -ENOMEM; } hw_dev++; strcpy(card->driver, "Tropez+"); strcpy(card->shortname, "Turtle Beach Tropez+"); } else { /* Need a way to distinguish between Maui and Tropez */ strcpy(card->driver, "WaveFront"); strcpy(card->shortname, "Turtle Beach WaveFront"); } /* ----- Register the card --------- */ /* Not safe to include "Turtle Beach" in longname, due to length restrictions */ sprintf(card->longname, "%s PCM 0x%lx irq %d dma %d", card->driver, chip->port, cs4232_pcm_irq[dev], dma1[dev]); if (dma2[dev] >= 0 && dma2[dev] < 8) sprintf(card->longname + strlen(card->longname), "&%d", dma2[dev]); if (cs4232_mpu_port[dev] > 0 && cs4232_mpu_port[dev] != SNDRV_AUTO_PORT) { sprintf (card->longname + strlen (card->longname), " MPU-401 0x%lx irq %d", cs4232_mpu_port[dev], cs4232_mpu_irq[dev]); } sprintf (card->longname + strlen (card->longname), " SYNTH 0x%lx irq %d", ics2115_port[dev], ics2115_irq[dev]); return snd_card_register(card); } static int snd_wavefront_isa_match(struct device *pdev, unsigned int dev) { if (!enable[dev]) return 0; #ifdef CONFIG_PNP if (isapnp[dev]) return 0; #endif if (cs4232_pcm_port[dev] == SNDRV_AUTO_PORT) { snd_printk(KERN_ERR "specify CS4232 port\n"); return 0; } if (ics2115_port[dev] == SNDRV_AUTO_PORT) { snd_printk(KERN_ERR "specify ICS2115 port\n"); return 0; } return 1; } static int snd_wavefront_isa_probe(struct device *pdev, unsigned int dev) { struct snd_card *card; int err; err = snd_wavefront_card_new(pdev, dev, &card); if (err < 0) return err; err = snd_wavefront_probe(card, dev); if (err < 0) return err; dev_set_drvdata(pdev, card); return 0; } #define DEV_NAME "wavefront" static struct isa_driver snd_wavefront_driver = { .match = snd_wavefront_isa_match, .probe = snd_wavefront_isa_probe, /* FIXME: suspend, resume */ .driver = { .name = DEV_NAME }, }; #ifdef CONFIG_PNP static int snd_wavefront_pnp_detect(struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { static int dev; struct snd_card *card; int res; for ( ; dev < SNDRV_CARDS; dev++) { if (enable[dev] && isapnp[dev]) break; } if (dev >= SNDRV_CARDS) return -ENODEV; res = snd_wavefront_card_new(&pcard->card->dev, dev, &card); if (res < 0) return res; if (snd_wavefront_pnp (dev, card->private_data, pcard, pid) < 0) { if (cs4232_pcm_port[dev] == SNDRV_AUTO_PORT) { snd_printk (KERN_ERR "isapnp detection failed\n"); return -ENODEV; } } res = snd_wavefront_probe(card, dev); if (res < 0) return res; pnp_set_card_drvdata(pcard, card); dev++; return 0; } static struct pnp_card_driver wavefront_pnpc_driver = { .flags = PNP_DRIVER_RES_DISABLE, .name = "wavefront", .id_table = snd_wavefront_pnpids, .probe = snd_wavefront_pnp_detect, /* FIXME: suspend,resume */ }; #endif /* CONFIG_PNP */ static int __init alsa_card_wavefront_init(void) { int err; err = isa_register_driver(&snd_wavefront_driver, SNDRV_CARDS); #ifdef CONFIG_PNP if (!err) isa_registered = 1; err = pnp_register_card_driver(&wavefront_pnpc_driver); if (!err) pnp_registered = 1; if (isa_registered) err = 0; #endif return err; } static void __exit alsa_card_wavefront_exit(void) { #ifdef CONFIG_PNP if (pnp_registered) pnp_unregister_card_driver(&wavefront_pnpc_driver); if (isa_registered) #endif isa_unregister_driver(&snd_wavefront_driver); } module_init(alsa_card_wavefront_init) module_exit(alsa_card_wavefront_exit)
linux-master
sound/isa/wavefront/wavefront.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 1998-2002 by Paul Davis <[email protected]> */ #include <linux/io.h> #include <linux/init.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/firmware.h> #include <sound/core.h> #include <sound/snd_wavefront.h> #include <sound/initval.h> /* Control bits for the Load Control Register */ #define FX_LSB_TRANSFER 0x01 /* transfer after DSP LSB byte written */ #define FX_MSB_TRANSFER 0x02 /* transfer after DSP MSB byte written */ #define FX_AUTO_INCR 0x04 /* auto-increment DSP address after transfer */ #define WAIT_IDLE 0xff static int wavefront_fx_idle (snd_wavefront_t *dev) { int i; unsigned int x = 0x80; for (i = 0; i < 1000; i++) { x = inb (dev->fx_status); if ((x & 0x80) == 0) { break; } } if (x & 0x80) { snd_printk ("FX device never idle.\n"); return 0; } return (1); } static void wavefront_fx_mute (snd_wavefront_t *dev, int onoff) { if (!wavefront_fx_idle(dev)) { return; } outb (onoff ? 0x02 : 0x00, dev->fx_op); } static int wavefront_fx_memset (snd_wavefront_t *dev, int page, int addr, int cnt, unsigned short *data) { if (page < 0 || page > 7) { snd_printk ("FX memset: " "page must be >= 0 and <= 7\n"); return -EINVAL; } if (addr < 0 || addr > 0x7f) { snd_printk ("FX memset: " "addr must be >= 0 and <= 7f\n"); return -EINVAL; } if (cnt == 1) { outb (FX_LSB_TRANSFER, dev->fx_lcr); outb (page, dev->fx_dsp_page); outb (addr, dev->fx_dsp_addr); outb ((data[0] >> 8), dev->fx_dsp_msb); outb ((data[0] & 0xff), dev->fx_dsp_lsb); snd_printk ("FX: addr %d:%x set to 0x%x\n", page, addr, data[0]); } else { int i; outb (FX_AUTO_INCR|FX_LSB_TRANSFER, dev->fx_lcr); outb (page, dev->fx_dsp_page); outb (addr, dev->fx_dsp_addr); for (i = 0; i < cnt; i++) { outb ((data[i] >> 8), dev->fx_dsp_msb); outb ((data[i] & 0xff), dev->fx_dsp_lsb); if (!wavefront_fx_idle (dev)) { break; } } if (i != cnt) { snd_printk ("FX memset " "(0x%x, 0x%x, 0x%lx, %d) incomplete\n", page, addr, (unsigned long) data, cnt); return -EIO; } } return 0; } int snd_wavefront_fx_detect (snd_wavefront_t *dev) { /* This is a crude check, but its the best one I have for now. Certainly on the Maui and the Tropez, wavefront_fx_idle() will report "never idle", which suggests that this test should work OK. */ if (inb (dev->fx_status) & 0x80) { snd_printk ("Hmm, probably a Maui or Tropez.\n"); return -1; } return 0; } int snd_wavefront_fx_open (struct snd_hwdep *hw, struct file *file) { if (!try_module_get(hw->card->module)) return -EFAULT; file->private_data = hw; return 0; } int snd_wavefront_fx_release (struct snd_hwdep *hw, struct file *file) { module_put(hw->card->module); return 0; } int snd_wavefront_fx_ioctl (struct snd_hwdep *sdev, struct file *file, unsigned int cmd, unsigned long arg) { struct snd_card *card; snd_wavefront_card_t *acard; snd_wavefront_t *dev; wavefront_fx_info r; unsigned short *page_data = NULL; unsigned short *pd; int err = 0; card = sdev->card; if (snd_BUG_ON(!card)) return -ENODEV; if (snd_BUG_ON(!card->private_data)) return -ENODEV; acard = card->private_data; dev = &acard->wavefront; if (copy_from_user (&r, (void __user *)arg, sizeof (wavefront_fx_info))) return -EFAULT; switch (r.request) { case WFFX_MUTE: wavefront_fx_mute (dev, r.data[0]); return -EIO; case WFFX_MEMSET: if (r.data[2] <= 0) { snd_printk ("cannot write " "<= 0 bytes to FX\n"); return -EIO; } else if (r.data[2] == 1) { pd = (unsigned short *) &r.data[3]; } else { if (r.data[2] > 256) { snd_printk ("cannot write " "> 512 bytes to FX\n"); return -EIO; } page_data = memdup_user((unsigned char __user *) r.data[3], r.data[2] * sizeof(short)); if (IS_ERR(page_data)) return PTR_ERR(page_data); pd = page_data; } err = wavefront_fx_memset (dev, r.data[0], /* page */ r.data[1], /* addr */ r.data[2], /* cnt */ pd); kfree(page_data); break; default: snd_printk ("FX: ioctl %d not yet supported\n", r.request); return -ENOTTY; } return err; } /* YSS225 initialization. This code was developed using DOSEMU. The Turtle Beach SETUPSND utility was run with I/O tracing in DOSEMU enabled, and a reconstruction of the port I/O done, using the Yamaha faxback document as a guide to add more logic to the code. Its really pretty weird. This is the approach of just dumping the whole I/O sequence as a series of port/value pairs and a simple loop that outputs it. */ int snd_wavefront_fx_start (snd_wavefront_t *dev) { unsigned int i; int err; const struct firmware *firmware = NULL; if (dev->fx_initialized) return 0; err = request_firmware(&firmware, "yamaha/yss225_registers.bin", dev->card->dev); if (err < 0) { err = -1; goto out; } for (i = 0; i + 1 < firmware->size; i += 2) { if (firmware->data[i] >= 8 && firmware->data[i] < 16) { outb(firmware->data[i + 1], dev->base + firmware->data[i]); } else if (firmware->data[i] == WAIT_IDLE) { if (!wavefront_fx_idle(dev)) { err = -1; goto out; } } else { snd_printk(KERN_ERR "invalid address" " in register data\n"); err = -1; goto out; } } dev->fx_initialized = 1; err = 0; out: release_firmware(firmware); return err; } MODULE_FIRMWARE("yamaha/yss225_registers.bin");
linux-master
sound/isa/wavefront/wavefront_fx.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) by Paul Barton-Davis 1998-1999 */ /* The low level driver for the WaveFront ICS2115 MIDI interface(s) * * Note that there is also an MPU-401 emulation (actually, a UART-401 * emulation) on the CS4232 on the Tropez and Tropez Plus. This code * has nothing to do with that interface at all. * * The interface is essentially just a UART-401, but is has the * interesting property of supporting what Turtle Beach called * "Virtual MIDI" mode. In this mode, there are effectively *two* * MIDI buses accessible via the interface, one that is routed * solely to/from the external WaveFront synthesizer and the other * corresponding to the pin/socket connector used to link external * MIDI devices to the board. * * This driver fully supports this mode, allowing two distinct MIDI * busses to be used completely independently, giving 32 channels of * MIDI routing, 16 to the WaveFront synth and 16 to the external MIDI * bus. The devices are named /dev/snd/midiCnD0 and /dev/snd/midiCnD1, * where `n' is the card number. Note that the device numbers may be * something other than 0 and 1 if the CS4232 UART/MPU-401 interface * is enabled. * * Switching between the two is accomplished externally by the driver * using the two otherwise unused MIDI bytes. See the code for more details. * * NOTE: VIRTUAL MIDI MODE IS ON BY DEFAULT (see lowlevel/isa/wavefront.c) * * The main reason to turn off Virtual MIDI mode is when you want to * tightly couple the WaveFront synth with an external MIDI * device. You won't be able to distinguish the source of any MIDI * data except via SysEx ID, but thats probably OK, since for the most * part, the WaveFront won't be sending any MIDI data at all. * * The main reason to turn on Virtual MIDI Mode is to provide two * completely independent 16-channel MIDI buses, one to the * WaveFront and one to any external MIDI devices. Given the 32 * voice nature of the WaveFront, its pretty easy to find a use * for all 16 channels driving just that synth. * */ #include <linux/io.h> #include <linux/init.h> #include <linux/time.h> #include <linux/wait.h> #include <sound/core.h> #include <sound/snd_wavefront.h> static inline int wf_mpu_status (snd_wavefront_midi_t *midi) { return inb (midi->mpu_status_port); } static inline int input_avail (snd_wavefront_midi_t *midi) { return !(wf_mpu_status(midi) & INPUT_AVAIL); } static inline int output_ready (snd_wavefront_midi_t *midi) { return !(wf_mpu_status(midi) & OUTPUT_READY); } static inline int read_data (snd_wavefront_midi_t *midi) { return inb (midi->mpu_data_port); } static inline void write_data (snd_wavefront_midi_t *midi, unsigned char byte) { outb (byte, midi->mpu_data_port); } static snd_wavefront_midi_t * get_wavefront_midi (struct snd_rawmidi_substream *substream) { struct snd_card *card; snd_wavefront_card_t *acard; if (substream == NULL || substream->rmidi == NULL) return NULL; card = substream->rmidi->card; if (card == NULL) return NULL; if (card->private_data == NULL) return NULL; acard = card->private_data; return &acard->wavefront.midi; } static void snd_wavefront_midi_output_write(snd_wavefront_card_t *card) { snd_wavefront_midi_t *midi = &card->wavefront.midi; snd_wavefront_mpu_id mpu; unsigned long flags; unsigned char midi_byte; int max = 256, mask = 1; int timeout; /* Its not OK to try to change the status of "virtuality" of the MIDI interface while we're outputting stuff. See snd_wavefront_midi_{enable,disable}_virtual () for the other half of this. The first loop attempts to flush any data from the current output device, and then the second emits the switch byte (if necessary), and starts outputting data for the output device currently in use. */ if (midi->substream_output[midi->output_mpu] == NULL) { goto __second; } while (max > 0) { /* XXX fix me - no hard timing loops allowed! */ for (timeout = 30000; timeout > 0; timeout--) { if (output_ready (midi)) break; } spin_lock_irqsave (&midi->virtual, flags); if ((midi->mode[midi->output_mpu] & MPU401_MODE_OUTPUT) == 0) { spin_unlock_irqrestore (&midi->virtual, flags); goto __second; } if (output_ready (midi)) { if (snd_rawmidi_transmit(midi->substream_output[midi->output_mpu], &midi_byte, 1) == 1) { if (!midi->isvirtual || (midi_byte != WF_INTERNAL_SWITCH && midi_byte != WF_EXTERNAL_SWITCH)) write_data(midi, midi_byte); max--; } else { if (midi->istimer) { if (--midi->istimer <= 0) del_timer(&midi->timer); } midi->mode[midi->output_mpu] &= ~MPU401_MODE_OUTPUT_TRIGGER; spin_unlock_irqrestore (&midi->virtual, flags); goto __second; } } else { spin_unlock_irqrestore (&midi->virtual, flags); return; } spin_unlock_irqrestore (&midi->virtual, flags); } __second: if (midi->substream_output[!midi->output_mpu] == NULL) { return; } while (max > 0) { /* XXX fix me - no hard timing loops allowed! */ for (timeout = 30000; timeout > 0; timeout--) { if (output_ready (midi)) break; } spin_lock_irqsave (&midi->virtual, flags); if (!midi->isvirtual) mask = 0; mpu = midi->output_mpu ^ mask; mask = 0; /* don't invert the value from now */ if ((midi->mode[mpu] & MPU401_MODE_OUTPUT) == 0) { spin_unlock_irqrestore (&midi->virtual, flags); return; } if (snd_rawmidi_transmit_empty(midi->substream_output[mpu])) goto __timer; if (output_ready (midi)) { if (mpu != midi->output_mpu) { write_data(midi, mpu == internal_mpu ? WF_INTERNAL_SWITCH : WF_EXTERNAL_SWITCH); midi->output_mpu = mpu; } else if (snd_rawmidi_transmit(midi->substream_output[mpu], &midi_byte, 1) == 1) { if (!midi->isvirtual || (midi_byte != WF_INTERNAL_SWITCH && midi_byte != WF_EXTERNAL_SWITCH)) write_data(midi, midi_byte); max--; } else { __timer: if (midi->istimer) { if (--midi->istimer <= 0) del_timer(&midi->timer); } midi->mode[mpu] &= ~MPU401_MODE_OUTPUT_TRIGGER; spin_unlock_irqrestore (&midi->virtual, flags); return; } } else { spin_unlock_irqrestore (&midi->virtual, flags); return; } spin_unlock_irqrestore (&midi->virtual, flags); } } static int snd_wavefront_midi_input_open(struct snd_rawmidi_substream *substream) { unsigned long flags; snd_wavefront_midi_t *midi; snd_wavefront_mpu_id mpu; if (snd_BUG_ON(!substream || !substream->rmidi)) return -ENXIO; if (snd_BUG_ON(!substream->rmidi->private_data)) return -ENXIO; mpu = *((snd_wavefront_mpu_id *) substream->rmidi->private_data); midi = get_wavefront_midi(substream); if (!midi) return -EIO; spin_lock_irqsave (&midi->open, flags); midi->mode[mpu] |= MPU401_MODE_INPUT; midi->substream_input[mpu] = substream; spin_unlock_irqrestore (&midi->open, flags); return 0; } static int snd_wavefront_midi_output_open(struct snd_rawmidi_substream *substream) { unsigned long flags; snd_wavefront_midi_t *midi; snd_wavefront_mpu_id mpu; if (snd_BUG_ON(!substream || !substream->rmidi)) return -ENXIO; if (snd_BUG_ON(!substream->rmidi->private_data)) return -ENXIO; mpu = *((snd_wavefront_mpu_id *) substream->rmidi->private_data); midi = get_wavefront_midi(substream); if (!midi) return -EIO; spin_lock_irqsave (&midi->open, flags); midi->mode[mpu] |= MPU401_MODE_OUTPUT; midi->substream_output[mpu] = substream; spin_unlock_irqrestore (&midi->open, flags); return 0; } static int snd_wavefront_midi_input_close(struct snd_rawmidi_substream *substream) { unsigned long flags; snd_wavefront_midi_t *midi; snd_wavefront_mpu_id mpu; if (snd_BUG_ON(!substream || !substream->rmidi)) return -ENXIO; if (snd_BUG_ON(!substream->rmidi->private_data)) return -ENXIO; mpu = *((snd_wavefront_mpu_id *) substream->rmidi->private_data); midi = get_wavefront_midi(substream); if (!midi) return -EIO; spin_lock_irqsave (&midi->open, flags); midi->mode[mpu] &= ~MPU401_MODE_INPUT; spin_unlock_irqrestore (&midi->open, flags); return 0; } static int snd_wavefront_midi_output_close(struct snd_rawmidi_substream *substream) { unsigned long flags; snd_wavefront_midi_t *midi; snd_wavefront_mpu_id mpu; if (snd_BUG_ON(!substream || !substream->rmidi)) return -ENXIO; if (snd_BUG_ON(!substream->rmidi->private_data)) return -ENXIO; mpu = *((snd_wavefront_mpu_id *) substream->rmidi->private_data); midi = get_wavefront_midi(substream); if (!midi) return -EIO; spin_lock_irqsave (&midi->open, flags); midi->mode[mpu] &= ~MPU401_MODE_OUTPUT; spin_unlock_irqrestore (&midi->open, flags); return 0; } static void snd_wavefront_midi_input_trigger(struct snd_rawmidi_substream *substream, int up) { unsigned long flags; snd_wavefront_midi_t *midi; snd_wavefront_mpu_id mpu; if (substream == NULL || substream->rmidi == NULL) return; if (substream->rmidi->private_data == NULL) return; mpu = *((snd_wavefront_mpu_id *) substream->rmidi->private_data); midi = get_wavefront_midi(substream); if (!midi) return; spin_lock_irqsave (&midi->virtual, flags); if (up) { midi->mode[mpu] |= MPU401_MODE_INPUT_TRIGGER; } else { midi->mode[mpu] &= ~MPU401_MODE_INPUT_TRIGGER; } spin_unlock_irqrestore (&midi->virtual, flags); } static void snd_wavefront_midi_output_timer(struct timer_list *t) { snd_wavefront_midi_t *midi = from_timer(midi, t, timer); snd_wavefront_card_t *card = midi->timer_card; unsigned long flags; spin_lock_irqsave (&midi->virtual, flags); mod_timer(&midi->timer, 1 + jiffies); spin_unlock_irqrestore (&midi->virtual, flags); snd_wavefront_midi_output_write(card); } static void snd_wavefront_midi_output_trigger(struct snd_rawmidi_substream *substream, int up) { unsigned long flags; snd_wavefront_midi_t *midi; snd_wavefront_mpu_id mpu; if (substream == NULL || substream->rmidi == NULL) return; if (substream->rmidi->private_data == NULL) return; mpu = *((snd_wavefront_mpu_id *) substream->rmidi->private_data); midi = get_wavefront_midi(substream); if (!midi) return; spin_lock_irqsave (&midi->virtual, flags); if (up) { if ((midi->mode[mpu] & MPU401_MODE_OUTPUT_TRIGGER) == 0) { if (!midi->istimer) { timer_setup(&midi->timer, snd_wavefront_midi_output_timer, 0); mod_timer(&midi->timer, 1 + jiffies); } midi->istimer++; midi->mode[mpu] |= MPU401_MODE_OUTPUT_TRIGGER; } } else { midi->mode[mpu] &= ~MPU401_MODE_OUTPUT_TRIGGER; } spin_unlock_irqrestore (&midi->virtual, flags); if (up) snd_wavefront_midi_output_write((snd_wavefront_card_t *)substream->rmidi->card->private_data); } void snd_wavefront_midi_interrupt (snd_wavefront_card_t *card) { unsigned long flags; snd_wavefront_midi_t *midi; static struct snd_rawmidi_substream *substream = NULL; static int mpu = external_mpu; int max = 128; unsigned char byte; midi = &card->wavefront.midi; if (!input_avail (midi)) { /* not for us */ snd_wavefront_midi_output_write(card); return; } spin_lock_irqsave (&midi->virtual, flags); while (--max) { if (input_avail (midi)) { byte = read_data (midi); if (midi->isvirtual) { if (byte == WF_EXTERNAL_SWITCH) { substream = midi->substream_input[external_mpu]; mpu = external_mpu; } else if (byte == WF_INTERNAL_SWITCH) { substream = midi->substream_output[internal_mpu]; mpu = internal_mpu; } /* else just leave it as it is */ } else { substream = midi->substream_input[internal_mpu]; mpu = internal_mpu; } if (substream == NULL) { continue; } if (midi->mode[mpu] & MPU401_MODE_INPUT_TRIGGER) { snd_rawmidi_receive(substream, &byte, 1); } } else { break; } } spin_unlock_irqrestore (&midi->virtual, flags); snd_wavefront_midi_output_write(card); } void snd_wavefront_midi_enable_virtual (snd_wavefront_card_t *card) { unsigned long flags; spin_lock_irqsave (&card->wavefront.midi.virtual, flags); card->wavefront.midi.isvirtual = 1; card->wavefront.midi.output_mpu = internal_mpu; card->wavefront.midi.input_mpu = internal_mpu; spin_unlock_irqrestore (&card->wavefront.midi.virtual, flags); } void snd_wavefront_midi_disable_virtual (snd_wavefront_card_t *card) { unsigned long flags; spin_lock_irqsave (&card->wavefront.midi.virtual, flags); // snd_wavefront_midi_input_close (card->ics2115_external_rmidi); // snd_wavefront_midi_output_close (card->ics2115_external_rmidi); card->wavefront.midi.isvirtual = 0; spin_unlock_irqrestore (&card->wavefront.midi.virtual, flags); } int snd_wavefront_midi_start (snd_wavefront_card_t *card) { int ok, i; unsigned char rbuf[4], wbuf[4]; snd_wavefront_t *dev; snd_wavefront_midi_t *midi; dev = &card->wavefront; midi = &dev->midi; /* The ICS2115 MPU-401 interface doesn't do anything until its set into UART mode. */ /* XXX fix me - no hard timing loops allowed! */ for (i = 0; i < 30000 && !output_ready (midi); i++); if (!output_ready (midi)) { snd_printk ("MIDI interface not ready for command\n"); return -1; } /* Any interrupts received from now on are owned by the MIDI side of things. */ dev->interrupts_are_midi = 1; outb (UART_MODE_ON, midi->mpu_command_port); for (ok = 0, i = 50000; i > 0 && !ok; i--) { if (input_avail (midi)) { if (read_data (midi) == MPU_ACK) { ok = 1; break; } } } if (!ok) { snd_printk ("cannot set UART mode for MIDI interface"); dev->interrupts_are_midi = 0; return -1; } /* Route external MIDI to WaveFront synth (by default) */ if (snd_wavefront_cmd (dev, WFC_MISYNTH_ON, rbuf, wbuf)) { snd_printk ("can't enable MIDI-IN-2-synth routing.\n"); /* XXX error ? */ } /* Turn on Virtual MIDI, but first *always* turn it off, since otherwise consecutive reloads of the driver will never cause the hardware to generate the initial "internal" or "external" source bytes in the MIDI data stream. This is pretty important, since the internal hardware generally will be used to generate none or very little MIDI output, and thus the only source of MIDI data is actually external. Without the switch bytes, the driver will think it all comes from the internal interface. Duh. */ if (snd_wavefront_cmd (dev, WFC_VMIDI_OFF, rbuf, wbuf)) { snd_printk ("virtual MIDI mode not disabled\n"); return 0; /* We're OK, but missing the external MIDI dev */ } snd_wavefront_midi_enable_virtual (card); if (snd_wavefront_cmd (dev, WFC_VMIDI_ON, rbuf, wbuf)) { snd_printk ("cannot enable virtual MIDI mode.\n"); snd_wavefront_midi_disable_virtual (card); } return 0; } const struct snd_rawmidi_ops snd_wavefront_midi_output = { .open = snd_wavefront_midi_output_open, .close = snd_wavefront_midi_output_close, .trigger = snd_wavefront_midi_output_trigger, }; const struct snd_rawmidi_ops snd_wavefront_midi_input = { .open = snd_wavefront_midi_input_open, .close = snd_wavefront_midi_input_close, .trigger = snd_wavefront_midi_input_trigger, };
linux-master
sound/isa/wavefront/wavefront_midi.c
// SPDX-License-Identifier: GPL-2.0+ /* * virtio-snd: Virtio sound device * Copyright (C) 2021 OpenSynergy GmbH */ #include <linux/moduleparam.h> #include <linux/virtio_config.h> #include "virtio_card.h" static u32 pcm_buffer_ms = 160; module_param(pcm_buffer_ms, uint, 0644); MODULE_PARM_DESC(pcm_buffer_ms, "PCM substream buffer time in milliseconds"); static u32 pcm_periods_min = 2; module_param(pcm_periods_min, uint, 0644); MODULE_PARM_DESC(pcm_periods_min, "Minimum number of PCM periods"); static u32 pcm_periods_max = 16; module_param(pcm_periods_max, uint, 0644); MODULE_PARM_DESC(pcm_periods_max, "Maximum number of PCM periods"); static u32 pcm_period_ms_min = 10; module_param(pcm_period_ms_min, uint, 0644); MODULE_PARM_DESC(pcm_period_ms_min, "Minimum PCM period time in milliseconds"); static u32 pcm_period_ms_max = 80; module_param(pcm_period_ms_max, uint, 0644); MODULE_PARM_DESC(pcm_period_ms_max, "Maximum PCM period time in milliseconds"); /* Map for converting VirtIO format to ALSA format. */ static const snd_pcm_format_t g_v2a_format_map[] = { [VIRTIO_SND_PCM_FMT_IMA_ADPCM] = SNDRV_PCM_FORMAT_IMA_ADPCM, [VIRTIO_SND_PCM_FMT_MU_LAW] = SNDRV_PCM_FORMAT_MU_LAW, [VIRTIO_SND_PCM_FMT_A_LAW] = SNDRV_PCM_FORMAT_A_LAW, [VIRTIO_SND_PCM_FMT_S8] = SNDRV_PCM_FORMAT_S8, [VIRTIO_SND_PCM_FMT_U8] = SNDRV_PCM_FORMAT_U8, [VIRTIO_SND_PCM_FMT_S16] = SNDRV_PCM_FORMAT_S16_LE, [VIRTIO_SND_PCM_FMT_U16] = SNDRV_PCM_FORMAT_U16_LE, [VIRTIO_SND_PCM_FMT_S18_3] = SNDRV_PCM_FORMAT_S18_3LE, [VIRTIO_SND_PCM_FMT_U18_3] = SNDRV_PCM_FORMAT_U18_3LE, [VIRTIO_SND_PCM_FMT_S20_3] = SNDRV_PCM_FORMAT_S20_3LE, [VIRTIO_SND_PCM_FMT_U20_3] = SNDRV_PCM_FORMAT_U20_3LE, [VIRTIO_SND_PCM_FMT_S24_3] = SNDRV_PCM_FORMAT_S24_3LE, [VIRTIO_SND_PCM_FMT_U24_3] = SNDRV_PCM_FORMAT_U24_3LE, [VIRTIO_SND_PCM_FMT_S20] = SNDRV_PCM_FORMAT_S20_LE, [VIRTIO_SND_PCM_FMT_U20] = SNDRV_PCM_FORMAT_U20_LE, [VIRTIO_SND_PCM_FMT_S24] = SNDRV_PCM_FORMAT_S24_LE, [VIRTIO_SND_PCM_FMT_U24] = SNDRV_PCM_FORMAT_U24_LE, [VIRTIO_SND_PCM_FMT_S32] = SNDRV_PCM_FORMAT_S32_LE, [VIRTIO_SND_PCM_FMT_U32] = SNDRV_PCM_FORMAT_U32_LE, [VIRTIO_SND_PCM_FMT_FLOAT] = SNDRV_PCM_FORMAT_FLOAT_LE, [VIRTIO_SND_PCM_FMT_FLOAT64] = SNDRV_PCM_FORMAT_FLOAT64_LE, [VIRTIO_SND_PCM_FMT_DSD_U8] = SNDRV_PCM_FORMAT_DSD_U8, [VIRTIO_SND_PCM_FMT_DSD_U16] = SNDRV_PCM_FORMAT_DSD_U16_LE, [VIRTIO_SND_PCM_FMT_DSD_U32] = SNDRV_PCM_FORMAT_DSD_U32_LE, [VIRTIO_SND_PCM_FMT_IEC958_SUBFRAME] = SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE }; /* Map for converting VirtIO frame rate to ALSA frame rate. */ struct virtsnd_v2a_rate { unsigned int alsa_bit; unsigned int rate; }; static const struct virtsnd_v2a_rate g_v2a_rate_map[] = { [VIRTIO_SND_PCM_RATE_5512] = { SNDRV_PCM_RATE_5512, 5512 }, [VIRTIO_SND_PCM_RATE_8000] = { SNDRV_PCM_RATE_8000, 8000 }, [VIRTIO_SND_PCM_RATE_11025] = { SNDRV_PCM_RATE_11025, 11025 }, [VIRTIO_SND_PCM_RATE_16000] = { SNDRV_PCM_RATE_16000, 16000 }, [VIRTIO_SND_PCM_RATE_22050] = { SNDRV_PCM_RATE_22050, 22050 }, [VIRTIO_SND_PCM_RATE_32000] = { SNDRV_PCM_RATE_32000, 32000 }, [VIRTIO_SND_PCM_RATE_44100] = { SNDRV_PCM_RATE_44100, 44100 }, [VIRTIO_SND_PCM_RATE_48000] = { SNDRV_PCM_RATE_48000, 48000 }, [VIRTIO_SND_PCM_RATE_64000] = { SNDRV_PCM_RATE_64000, 64000 }, [VIRTIO_SND_PCM_RATE_88200] = { SNDRV_PCM_RATE_88200, 88200 }, [VIRTIO_SND_PCM_RATE_96000] = { SNDRV_PCM_RATE_96000, 96000 }, [VIRTIO_SND_PCM_RATE_176400] = { SNDRV_PCM_RATE_176400, 176400 }, [VIRTIO_SND_PCM_RATE_192000] = { SNDRV_PCM_RATE_192000, 192000 } }; /** * virtsnd_pcm_build_hw() - Parse substream config and build HW descriptor. * @vss: VirtIO substream. * @info: VirtIO substream information entry. * * Context: Any context. * Return: 0 on success, -EINVAL if configuration is invalid. */ static int virtsnd_pcm_build_hw(struct virtio_pcm_substream *vss, struct virtio_snd_pcm_info *info) { struct virtio_device *vdev = vss->snd->vdev; unsigned int i; u64 values; size_t sample_max = 0; size_t sample_min = 0; vss->features = le32_to_cpu(info->features); /* * TODO: set SNDRV_PCM_INFO_{BATCH,BLOCK_TRANSFER} if device supports * only message-based transport. */ vss->hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE; if (!info->channels_min || info->channels_min > info->channels_max) { dev_err(&vdev->dev, "SID %u: invalid channel range [%u %u]\n", vss->sid, info->channels_min, info->channels_max); return -EINVAL; } vss->hw.channels_min = info->channels_min; vss->hw.channels_max = info->channels_max; values = le64_to_cpu(info->formats); vss->hw.formats = 0; for (i = 0; i < ARRAY_SIZE(g_v2a_format_map); ++i) if (values & (1ULL << i)) { snd_pcm_format_t alsa_fmt = g_v2a_format_map[i]; int bytes = snd_pcm_format_physical_width(alsa_fmt) / 8; if (!sample_min || sample_min > bytes) sample_min = bytes; if (sample_max < bytes) sample_max = bytes; vss->hw.formats |= pcm_format_to_bits(alsa_fmt); } if (!vss->hw.formats) { dev_err(&vdev->dev, "SID %u: no supported PCM sample formats found\n", vss->sid); return -EINVAL; } values = le64_to_cpu(info->rates); vss->hw.rates = 0; for (i = 0; i < ARRAY_SIZE(g_v2a_rate_map); ++i) if (values & (1ULL << i)) { if (!vss->hw.rate_min || vss->hw.rate_min > g_v2a_rate_map[i].rate) vss->hw.rate_min = g_v2a_rate_map[i].rate; if (vss->hw.rate_max < g_v2a_rate_map[i].rate) vss->hw.rate_max = g_v2a_rate_map[i].rate; vss->hw.rates |= g_v2a_rate_map[i].alsa_bit; } if (!vss->hw.rates) { dev_err(&vdev->dev, "SID %u: no supported PCM frame rates found\n", vss->sid); return -EINVAL; } vss->hw.periods_min = pcm_periods_min; vss->hw.periods_max = pcm_periods_max; /* * We must ensure that there is enough space in the buffer to store * pcm_buffer_ms ms for the combination (Cmax, Smax, Rmax), where: * Cmax = maximum supported number of channels, * Smax = maximum supported sample size in bytes, * Rmax = maximum supported frame rate. */ vss->hw.buffer_bytes_max = PAGE_ALIGN(sample_max * vss->hw.channels_max * pcm_buffer_ms * (vss->hw.rate_max / MSEC_PER_SEC)); /* * We must ensure that the minimum period size is enough to store * pcm_period_ms_min ms for the combination (Cmin, Smin, Rmin), where: * Cmin = minimum supported number of channels, * Smin = minimum supported sample size in bytes, * Rmin = minimum supported frame rate. */ vss->hw.period_bytes_min = sample_min * vss->hw.channels_min * pcm_period_ms_min * (vss->hw.rate_min / MSEC_PER_SEC); /* * We must ensure that the maximum period size is enough to store * pcm_period_ms_max ms for the combination (Cmax, Smax, Rmax). */ vss->hw.period_bytes_max = sample_max * vss->hw.channels_max * pcm_period_ms_max * (vss->hw.rate_max / MSEC_PER_SEC); return 0; } /** * virtsnd_pcm_find() - Find the PCM device for the specified node ID. * @snd: VirtIO sound device. * @nid: Function node ID. * * Context: Any context. * Return: a pointer to the PCM device or ERR_PTR(-ENOENT). */ struct virtio_pcm *virtsnd_pcm_find(struct virtio_snd *snd, u32 nid) { struct virtio_pcm *vpcm; list_for_each_entry(vpcm, &snd->pcm_list, list) if (vpcm->nid == nid) return vpcm; return ERR_PTR(-ENOENT); } /** * virtsnd_pcm_find_or_create() - Find or create the PCM device for the * specified node ID. * @snd: VirtIO sound device. * @nid: Function node ID. * * Context: Any context that permits to sleep. * Return: a pointer to the PCM device or ERR_PTR(-errno). */ struct virtio_pcm *virtsnd_pcm_find_or_create(struct virtio_snd *snd, u32 nid) { struct virtio_device *vdev = snd->vdev; struct virtio_pcm *vpcm; vpcm = virtsnd_pcm_find(snd, nid); if (!IS_ERR(vpcm)) return vpcm; vpcm = devm_kzalloc(&vdev->dev, sizeof(*vpcm), GFP_KERNEL); if (!vpcm) return ERR_PTR(-ENOMEM); vpcm->nid = nid; list_add_tail(&vpcm->list, &snd->pcm_list); return vpcm; } /** * virtsnd_pcm_validate() - Validate if the device can be started. * @vdev: VirtIO parent device. * * Context: Any context. * Return: 0 on success, -EINVAL on failure. */ int virtsnd_pcm_validate(struct virtio_device *vdev) { if (pcm_periods_min < 2 || pcm_periods_min > pcm_periods_max) { dev_err(&vdev->dev, "invalid range [%u %u] of the number of PCM periods\n", pcm_periods_min, pcm_periods_max); return -EINVAL; } if (!pcm_period_ms_min || pcm_period_ms_min > pcm_period_ms_max) { dev_err(&vdev->dev, "invalid range [%u %u] of the size of the PCM period\n", pcm_period_ms_min, pcm_period_ms_max); return -EINVAL; } if (pcm_buffer_ms < pcm_periods_min * pcm_period_ms_min) { dev_err(&vdev->dev, "pcm_buffer_ms(=%u) value cannot be < %u ms\n", pcm_buffer_ms, pcm_periods_min * pcm_period_ms_min); return -EINVAL; } if (pcm_period_ms_max > pcm_buffer_ms / 2) { dev_err(&vdev->dev, "pcm_period_ms_max(=%u) value cannot be > %u ms\n", pcm_period_ms_max, pcm_buffer_ms / 2); return -EINVAL; } return 0; } /** * virtsnd_pcm_period_elapsed() - Kernel work function to handle the elapsed * period state. * @work: Elapsed period work. * * The main purpose of this function is to call snd_pcm_period_elapsed() in * a process context, not in an interrupt context. This is necessary because PCM * devices operate in non-atomic mode. * * Context: Process context. */ static void virtsnd_pcm_period_elapsed(struct work_struct *work) { struct virtio_pcm_substream *vss = container_of(work, struct virtio_pcm_substream, elapsed_period); snd_pcm_period_elapsed(vss->substream); } /** * virtsnd_pcm_parse_cfg() - Parse the stream configuration. * @snd: VirtIO sound device. * * This function is called during initial device initialization. * * Context: Any context that permits to sleep. * Return: 0 on success, -errno on failure. */ int virtsnd_pcm_parse_cfg(struct virtio_snd *snd) { struct virtio_device *vdev = snd->vdev; struct virtio_snd_pcm_info *info; u32 i; int rc; virtio_cread_le(vdev, struct virtio_snd_config, streams, &snd->nsubstreams); if (!snd->nsubstreams) return 0; snd->substreams = devm_kcalloc(&vdev->dev, snd->nsubstreams, sizeof(*snd->substreams), GFP_KERNEL); if (!snd->substreams) return -ENOMEM; info = kcalloc(snd->nsubstreams, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; rc = virtsnd_ctl_query_info(snd, VIRTIO_SND_R_PCM_INFO, 0, snd->nsubstreams, sizeof(*info), info); if (rc) goto on_exit; for (i = 0; i < snd->nsubstreams; ++i) { struct virtio_pcm_substream *vss = &snd->substreams[i]; struct virtio_pcm *vpcm; vss->snd = snd; vss->sid = i; INIT_WORK(&vss->elapsed_period, virtsnd_pcm_period_elapsed); init_waitqueue_head(&vss->msg_empty); spin_lock_init(&vss->lock); rc = virtsnd_pcm_build_hw(vss, &info[i]); if (rc) goto on_exit; vss->nid = le32_to_cpu(info[i].hdr.hda_fn_nid); vpcm = virtsnd_pcm_find_or_create(snd, vss->nid); if (IS_ERR(vpcm)) { rc = PTR_ERR(vpcm); goto on_exit; } switch (info[i].direction) { case VIRTIO_SND_D_OUTPUT: vss->direction = SNDRV_PCM_STREAM_PLAYBACK; break; case VIRTIO_SND_D_INPUT: vss->direction = SNDRV_PCM_STREAM_CAPTURE; break; default: dev_err(&vdev->dev, "SID %u: unknown direction (%u)\n", vss->sid, info[i].direction); rc = -EINVAL; goto on_exit; } vpcm->streams[vss->direction].nsubstreams++; } on_exit: kfree(info); return rc; } /** * virtsnd_pcm_build_devs() - Build ALSA PCM devices. * @snd: VirtIO sound device. * * Context: Any context that permits to sleep. * Return: 0 on success, -errno on failure. */ int virtsnd_pcm_build_devs(struct virtio_snd *snd) { struct virtio_device *vdev = snd->vdev; struct virtio_pcm *vpcm; u32 i; int rc; list_for_each_entry(vpcm, &snd->pcm_list, list) { unsigned int npbs = vpcm->streams[SNDRV_PCM_STREAM_PLAYBACK].nsubstreams; unsigned int ncps = vpcm->streams[SNDRV_PCM_STREAM_CAPTURE].nsubstreams; if (!npbs && !ncps) continue; rc = snd_pcm_new(snd->card, VIRTIO_SND_CARD_DRIVER, vpcm->nid, npbs, ncps, &vpcm->pcm); if (rc) { dev_err(&vdev->dev, "snd_pcm_new[%u] failed: %d\n", vpcm->nid, rc); return rc; } vpcm->pcm->info_flags = 0; vpcm->pcm->dev_class = SNDRV_PCM_CLASS_GENERIC; vpcm->pcm->dev_subclass = SNDRV_PCM_SUBCLASS_GENERIC_MIX; snprintf(vpcm->pcm->name, sizeof(vpcm->pcm->name), VIRTIO_SND_PCM_NAME " %u", vpcm->pcm->device); vpcm->pcm->private_data = vpcm; vpcm->pcm->nonatomic = true; for (i = 0; i < ARRAY_SIZE(vpcm->streams); ++i) { struct virtio_pcm_stream *stream = &vpcm->streams[i]; if (!stream->nsubstreams) continue; stream->substreams = devm_kcalloc(&vdev->dev, stream->nsubstreams, sizeof(*stream->substreams), GFP_KERNEL); if (!stream->substreams) return -ENOMEM; stream->nsubstreams = 0; } } for (i = 0; i < snd->nsubstreams; ++i) { struct virtio_pcm_stream *vs; struct virtio_pcm_substream *vss = &snd->substreams[i]; vpcm = virtsnd_pcm_find(snd, vss->nid); if (IS_ERR(vpcm)) return PTR_ERR(vpcm); vs = &vpcm->streams[vss->direction]; vs->substreams[vs->nsubstreams++] = vss; } list_for_each_entry(vpcm, &snd->pcm_list, list) { for (i = 0; i < ARRAY_SIZE(vpcm->streams); ++i) { struct virtio_pcm_stream *vs = &vpcm->streams[i]; struct snd_pcm_str *ks = &vpcm->pcm->streams[i]; struct snd_pcm_substream *kss; if (!vs->nsubstreams) continue; for (kss = ks->substream; kss; kss = kss->next) vs->substreams[kss->number]->substream = kss; snd_pcm_set_ops(vpcm->pcm, i, &virtsnd_pcm_ops); } snd_pcm_set_managed_buffer_all(vpcm->pcm, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0); } return 0; } /** * virtsnd_pcm_event() - Handle the PCM device event notification. * @snd: VirtIO sound device. * @event: VirtIO sound event. * * Context: Interrupt context. */ void virtsnd_pcm_event(struct virtio_snd *snd, struct virtio_snd_event *event) { struct virtio_pcm_substream *vss; u32 sid = le32_to_cpu(event->data); if (sid >= snd->nsubstreams) return; vss = &snd->substreams[sid]; switch (le32_to_cpu(event->hdr.code)) { case VIRTIO_SND_EVT_PCM_PERIOD_ELAPSED: /* TODO: deal with shmem elapsed period */ break; case VIRTIO_SND_EVT_PCM_XRUN: spin_lock(&vss->lock); if (vss->xfer_enabled) vss->xfer_xrun = true; spin_unlock(&vss->lock); break; } }
linux-master
sound/virtio/virtio_pcm.c
// SPDX-License-Identifier: GPL-2.0+ /* * virtio-snd: Virtio sound device * Copyright (C) 2021 OpenSynergy GmbH */ #include <linux/virtio_config.h> #include <sound/jack.h> #include <sound/hda_verbs.h> #include "virtio_card.h" /** * DOC: Implementation Status * * At the moment jacks have a simple implementation and can only be used to * receive notifications about a plugged in/out device. * * VIRTIO_SND_R_JACK_REMAP * is not supported */ /** * struct virtio_jack - VirtIO jack. * @jack: Kernel jack control. * @nid: Functional group node identifier. * @features: Jack virtio feature bit map (1 << VIRTIO_SND_JACK_F_XXX). * @defconf: Pin default configuration value. * @caps: Pin capabilities value. * @connected: Current jack connection status. * @type: Kernel jack type (SND_JACK_XXX). */ struct virtio_jack { struct snd_jack *jack; u32 nid; u32 features; u32 defconf; u32 caps; bool connected; int type; }; /** * virtsnd_jack_get_label() - Get the name string for the jack. * @vjack: VirtIO jack. * * Returns the jack name based on the default pin configuration value (see HDA * specification). * * Context: Any context. * Return: Name string. */ static const char *virtsnd_jack_get_label(struct virtio_jack *vjack) { unsigned int defconf = vjack->defconf; unsigned int device = (defconf & AC_DEFCFG_DEVICE) >> AC_DEFCFG_DEVICE_SHIFT; unsigned int location = (defconf & AC_DEFCFG_LOCATION) >> AC_DEFCFG_LOCATION_SHIFT; switch (device) { case AC_JACK_LINE_OUT: return "Line Out"; case AC_JACK_SPEAKER: return "Speaker"; case AC_JACK_HP_OUT: return "Headphone"; case AC_JACK_CD: return "CD"; case AC_JACK_SPDIF_OUT: case AC_JACK_DIG_OTHER_OUT: if (location == AC_JACK_LOC_HDMI) return "HDMI Out"; else return "SPDIF Out"; case AC_JACK_LINE_IN: return "Line"; case AC_JACK_AUX: return "Aux"; case AC_JACK_MIC_IN: return "Mic"; case AC_JACK_SPDIF_IN: return "SPDIF In"; case AC_JACK_DIG_OTHER_IN: return "Digital In"; default: return "Misc"; } } /** * virtsnd_jack_get_type() - Get the type for the jack. * @vjack: VirtIO jack. * * Returns the jack type based on the default pin configuration value (see HDA * specification). * * Context: Any context. * Return: SND_JACK_XXX value. */ static int virtsnd_jack_get_type(struct virtio_jack *vjack) { unsigned int defconf = vjack->defconf; unsigned int device = (defconf & AC_DEFCFG_DEVICE) >> AC_DEFCFG_DEVICE_SHIFT; switch (device) { case AC_JACK_LINE_OUT: case AC_JACK_SPEAKER: return SND_JACK_LINEOUT; case AC_JACK_HP_OUT: return SND_JACK_HEADPHONE; case AC_JACK_SPDIF_OUT: case AC_JACK_DIG_OTHER_OUT: return SND_JACK_AVOUT; case AC_JACK_MIC_IN: return SND_JACK_MICROPHONE; default: return SND_JACK_LINEIN; } } /** * virtsnd_jack_parse_cfg() - Parse the jack configuration. * @snd: VirtIO sound device. * * This function is called during initial device initialization. * * Context: Any context that permits to sleep. * Return: 0 on success, -errno on failure. */ int virtsnd_jack_parse_cfg(struct virtio_snd *snd) { struct virtio_device *vdev = snd->vdev; struct virtio_snd_jack_info *info; u32 i; int rc; virtio_cread_le(vdev, struct virtio_snd_config, jacks, &snd->njacks); if (!snd->njacks) return 0; snd->jacks = devm_kcalloc(&vdev->dev, snd->njacks, sizeof(*snd->jacks), GFP_KERNEL); if (!snd->jacks) return -ENOMEM; info = kcalloc(snd->njacks, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; rc = virtsnd_ctl_query_info(snd, VIRTIO_SND_R_JACK_INFO, 0, snd->njacks, sizeof(*info), info); if (rc) goto on_exit; for (i = 0; i < snd->njacks; ++i) { struct virtio_jack *vjack = &snd->jacks[i]; vjack->nid = le32_to_cpu(info[i].hdr.hda_fn_nid); vjack->features = le32_to_cpu(info[i].features); vjack->defconf = le32_to_cpu(info[i].hda_reg_defconf); vjack->caps = le32_to_cpu(info[i].hda_reg_caps); vjack->connected = info[i].connected; } on_exit: kfree(info); return rc; } /** * virtsnd_jack_build_devs() - Build ALSA controls for jacks. * @snd: VirtIO sound device. * * Context: Any context that permits to sleep. * Return: 0 on success, -errno on failure. */ int virtsnd_jack_build_devs(struct virtio_snd *snd) { u32 i; int rc; for (i = 0; i < snd->njacks; ++i) { struct virtio_jack *vjack = &snd->jacks[i]; vjack->type = virtsnd_jack_get_type(vjack); rc = snd_jack_new(snd->card, virtsnd_jack_get_label(vjack), vjack->type, &vjack->jack, true, true); if (rc) return rc; if (vjack->jack) vjack->jack->private_data = vjack; snd_jack_report(vjack->jack, vjack->connected ? vjack->type : 0); } return 0; } /** * virtsnd_jack_event() - Handle the jack event notification. * @snd: VirtIO sound device. * @event: VirtIO sound event. * * Context: Interrupt context. */ void virtsnd_jack_event(struct virtio_snd *snd, struct virtio_snd_event *event) { u32 jack_id = le32_to_cpu(event->data); struct virtio_jack *vjack; if (jack_id >= snd->njacks) return; vjack = &snd->jacks[jack_id]; switch (le32_to_cpu(event->hdr.code)) { case VIRTIO_SND_EVT_JACK_CONNECTED: vjack->connected = true; break; case VIRTIO_SND_EVT_JACK_DISCONNECTED: vjack->connected = false; break; default: return; } snd_jack_report(vjack->jack, vjack->connected ? vjack->type : 0); }
linux-master
sound/virtio/virtio_jack.c
// SPDX-License-Identifier: GPL-2.0+ /* * virtio-snd: Virtio sound device * Copyright (C) 2021 OpenSynergy GmbH */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/virtio_config.h> #include <sound/initval.h> #include <uapi/linux/virtio_ids.h> #include "virtio_card.h" u32 virtsnd_msg_timeout_ms = MSEC_PER_SEC; module_param_named(msg_timeout_ms, virtsnd_msg_timeout_ms, uint, 0644); MODULE_PARM_DESC(msg_timeout_ms, "Message completion timeout in milliseconds"); static void virtsnd_remove(struct virtio_device *vdev); /** * virtsnd_event_send() - Add an event to the event queue. * @vqueue: Underlying event virtqueue. * @event: Event. * @notify: Indicates whether or not to send a notification to the device. * @gfp: Kernel flags for memory allocation. * * Context: Any context. */ static void virtsnd_event_send(struct virtqueue *vqueue, struct virtio_snd_event *event, bool notify, gfp_t gfp) { struct scatterlist sg; struct scatterlist *psgs[1] = { &sg }; /* reset event content */ memset(event, 0, sizeof(*event)); sg_init_one(&sg, event, sizeof(*event)); if (virtqueue_add_sgs(vqueue, psgs, 0, 1, event, gfp) || !notify) return; if (virtqueue_kick_prepare(vqueue)) virtqueue_notify(vqueue); } /** * virtsnd_event_dispatch() - Dispatch an event from the device side. * @snd: VirtIO sound device. * @event: VirtIO sound event. * * Context: Any context. */ static void virtsnd_event_dispatch(struct virtio_snd *snd, struct virtio_snd_event *event) { switch (le32_to_cpu(event->hdr.code)) { case VIRTIO_SND_EVT_JACK_CONNECTED: case VIRTIO_SND_EVT_JACK_DISCONNECTED: virtsnd_jack_event(snd, event); break; case VIRTIO_SND_EVT_PCM_PERIOD_ELAPSED: case VIRTIO_SND_EVT_PCM_XRUN: virtsnd_pcm_event(snd, event); break; } } /** * virtsnd_event_notify_cb() - Dispatch all reported events from the event queue. * @vqueue: Underlying event virtqueue. * * This callback function is called upon a vring interrupt request from the * device. * * Context: Interrupt context. */ static void virtsnd_event_notify_cb(struct virtqueue *vqueue) { struct virtio_snd *snd = vqueue->vdev->priv; struct virtio_snd_queue *queue = virtsnd_event_queue(snd); struct virtio_snd_event *event; u32 length; unsigned long flags; spin_lock_irqsave(&queue->lock, flags); do { virtqueue_disable_cb(vqueue); while ((event = virtqueue_get_buf(vqueue, &length))) { virtsnd_event_dispatch(snd, event); virtsnd_event_send(vqueue, event, true, GFP_ATOMIC); } if (unlikely(virtqueue_is_broken(vqueue))) break; } while (!virtqueue_enable_cb(vqueue)); spin_unlock_irqrestore(&queue->lock, flags); } /** * virtsnd_find_vqs() - Enumerate and initialize all virtqueues. * @snd: VirtIO sound device. * * After calling this function, the event queue is disabled. * * Context: Any context. * Return: 0 on success, -errno on failure. */ static int virtsnd_find_vqs(struct virtio_snd *snd) { struct virtio_device *vdev = snd->vdev; static vq_callback_t *callbacks[VIRTIO_SND_VQ_MAX] = { [VIRTIO_SND_VQ_CONTROL] = virtsnd_ctl_notify_cb, [VIRTIO_SND_VQ_EVENT] = virtsnd_event_notify_cb, [VIRTIO_SND_VQ_TX] = virtsnd_pcm_tx_notify_cb, [VIRTIO_SND_VQ_RX] = virtsnd_pcm_rx_notify_cb }; static const char *names[VIRTIO_SND_VQ_MAX] = { [VIRTIO_SND_VQ_CONTROL] = "virtsnd-ctl", [VIRTIO_SND_VQ_EVENT] = "virtsnd-event", [VIRTIO_SND_VQ_TX] = "virtsnd-tx", [VIRTIO_SND_VQ_RX] = "virtsnd-rx" }; struct virtqueue *vqs[VIRTIO_SND_VQ_MAX] = { 0 }; unsigned int i; unsigned int n; int rc; rc = virtio_find_vqs(vdev, VIRTIO_SND_VQ_MAX, vqs, callbacks, names, NULL); if (rc) { dev_err(&vdev->dev, "failed to initialize virtqueues\n"); return rc; } for (i = 0; i < VIRTIO_SND_VQ_MAX; ++i) snd->queues[i].vqueue = vqs[i]; /* Allocate events and populate the event queue */ virtqueue_disable_cb(vqs[VIRTIO_SND_VQ_EVENT]); n = virtqueue_get_vring_size(vqs[VIRTIO_SND_VQ_EVENT]); snd->event_msgs = kmalloc_array(n, sizeof(*snd->event_msgs), GFP_KERNEL); if (!snd->event_msgs) return -ENOMEM; for (i = 0; i < n; ++i) virtsnd_event_send(vqs[VIRTIO_SND_VQ_EVENT], &snd->event_msgs[i], false, GFP_KERNEL); return 0; } /** * virtsnd_enable_event_vq() - Enable the event virtqueue. * @snd: VirtIO sound device. * * Context: Any context. */ static void virtsnd_enable_event_vq(struct virtio_snd *snd) { struct virtio_snd_queue *queue = virtsnd_event_queue(snd); if (!virtqueue_enable_cb(queue->vqueue)) virtsnd_event_notify_cb(queue->vqueue); } /** * virtsnd_disable_event_vq() - Disable the event virtqueue. * @snd: VirtIO sound device. * * Context: Any context. */ static void virtsnd_disable_event_vq(struct virtio_snd *snd) { struct virtio_snd_queue *queue = virtsnd_event_queue(snd); struct virtio_snd_event *event; u32 length; unsigned long flags; if (queue->vqueue) { spin_lock_irqsave(&queue->lock, flags); virtqueue_disable_cb(queue->vqueue); while ((event = virtqueue_get_buf(queue->vqueue, &length))) virtsnd_event_dispatch(snd, event); spin_unlock_irqrestore(&queue->lock, flags); } } /** * virtsnd_build_devs() - Read configuration and build ALSA devices. * @snd: VirtIO sound device. * * Context: Any context that permits to sleep. * Return: 0 on success, -errno on failure. */ static int virtsnd_build_devs(struct virtio_snd *snd) { struct virtio_device *vdev = snd->vdev; struct device *dev = &vdev->dev; int rc; rc = snd_card_new(dev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, THIS_MODULE, 0, &snd->card); if (rc < 0) return rc; snd->card->private_data = snd; strscpy(snd->card->driver, VIRTIO_SND_CARD_DRIVER, sizeof(snd->card->driver)); strscpy(snd->card->shortname, VIRTIO_SND_CARD_NAME, sizeof(snd->card->shortname)); if (dev->parent->bus) snprintf(snd->card->longname, sizeof(snd->card->longname), VIRTIO_SND_CARD_NAME " at %s/%s/%s", dev->parent->bus->name, dev_name(dev->parent), dev_name(dev)); else snprintf(snd->card->longname, sizeof(snd->card->longname), VIRTIO_SND_CARD_NAME " at %s/%s", dev_name(dev->parent), dev_name(dev)); rc = virtsnd_jack_parse_cfg(snd); if (rc) return rc; rc = virtsnd_pcm_parse_cfg(snd); if (rc) return rc; rc = virtsnd_chmap_parse_cfg(snd); if (rc) return rc; if (snd->njacks) { rc = virtsnd_jack_build_devs(snd); if (rc) return rc; } if (snd->nsubstreams) { rc = virtsnd_pcm_build_devs(snd); if (rc) return rc; } if (snd->nchmaps) { rc = virtsnd_chmap_build_devs(snd); if (rc) return rc; } return snd_card_register(snd->card); } /** * virtsnd_validate() - Validate if the device can be started. * @vdev: VirtIO parent device. * * Context: Any context. * Return: 0 on success, -EINVAL on failure. */ static int virtsnd_validate(struct virtio_device *vdev) { if (!vdev->config->get) { dev_err(&vdev->dev, "configuration access disabled\n"); return -EINVAL; } if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { dev_err(&vdev->dev, "device does not comply with spec version 1.x\n"); return -EINVAL; } if (!virtsnd_msg_timeout_ms) { dev_err(&vdev->dev, "msg_timeout_ms value cannot be zero\n"); return -EINVAL; } if (virtsnd_pcm_validate(vdev)) return -EINVAL; return 0; } /** * virtsnd_probe() - Create and initialize the device. * @vdev: VirtIO parent device. * * Context: Any context that permits to sleep. * Return: 0 on success, -errno on failure. */ static int virtsnd_probe(struct virtio_device *vdev) { struct virtio_snd *snd; unsigned int i; int rc; snd = devm_kzalloc(&vdev->dev, sizeof(*snd), GFP_KERNEL); if (!snd) return -ENOMEM; snd->vdev = vdev; INIT_LIST_HEAD(&snd->ctl_msgs); INIT_LIST_HEAD(&snd->pcm_list); vdev->priv = snd; for (i = 0; i < VIRTIO_SND_VQ_MAX; ++i) spin_lock_init(&snd->queues[i].lock); rc = virtsnd_find_vqs(snd); if (rc) goto on_exit; virtio_device_ready(vdev); rc = virtsnd_build_devs(snd); if (rc) goto on_exit; virtsnd_enable_event_vq(snd); on_exit: if (rc) virtsnd_remove(vdev); return rc; } /** * virtsnd_remove() - Remove VirtIO and ALSA devices. * @vdev: VirtIO parent device. * * Context: Any context that permits to sleep. */ static void virtsnd_remove(struct virtio_device *vdev) { struct virtio_snd *snd = vdev->priv; unsigned int i; virtsnd_disable_event_vq(snd); virtsnd_ctl_msg_cancel_all(snd); if (snd->card) snd_card_free(snd->card); vdev->config->del_vqs(vdev); virtio_reset_device(vdev); for (i = 0; snd->substreams && i < snd->nsubstreams; ++i) { struct virtio_pcm_substream *vss = &snd->substreams[i]; cancel_work_sync(&vss->elapsed_period); virtsnd_pcm_msg_free(vss); } kfree(snd->event_msgs); } #ifdef CONFIG_PM_SLEEP /** * virtsnd_freeze() - Suspend device. * @vdev: VirtIO parent device. * * Context: Any context. * Return: 0 on success, -errno on failure. */ static int virtsnd_freeze(struct virtio_device *vdev) { struct virtio_snd *snd = vdev->priv; unsigned int i; virtsnd_disable_event_vq(snd); virtsnd_ctl_msg_cancel_all(snd); vdev->config->del_vqs(vdev); virtio_reset_device(vdev); for (i = 0; i < snd->nsubstreams; ++i) cancel_work_sync(&snd->substreams[i].elapsed_period); kfree(snd->event_msgs); snd->event_msgs = NULL; return 0; } /** * virtsnd_restore() - Resume device. * @vdev: VirtIO parent device. * * Context: Any context. * Return: 0 on success, -errno on failure. */ static int virtsnd_restore(struct virtio_device *vdev) { struct virtio_snd *snd = vdev->priv; int rc; rc = virtsnd_find_vqs(snd); if (rc) return rc; virtio_device_ready(vdev); virtsnd_enable_event_vq(snd); return 0; } #endif /* CONFIG_PM_SLEEP */ static const struct virtio_device_id id_table[] = { { VIRTIO_ID_SOUND, VIRTIO_DEV_ANY_ID }, { 0 }, }; static struct virtio_driver virtsnd_driver = { .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .validate = virtsnd_validate, .probe = virtsnd_probe, .remove = virtsnd_remove, #ifdef CONFIG_PM_SLEEP .freeze = virtsnd_freeze, .restore = virtsnd_restore, #endif }; module_virtio_driver(virtsnd_driver); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio sound card driver"); MODULE_LICENSE("GPL");
linux-master
sound/virtio/virtio_card.c
// SPDX-License-Identifier: GPL-2.0+ /* * virtio-snd: Virtio sound device * Copyright (C) 2021 OpenSynergy GmbH */ #include <linux/moduleparam.h> #include <linux/virtio_config.h> #include "virtio_card.h" /** * struct virtio_snd_msg - Control message. * @sg_request: Scattergather list containing a device request (header). * @sg_response: Scattergather list containing a device response (status). * @list: Pending message list entry. * @notify: Request completed notification. * @ref_count: Reference count used to manage a message lifetime. */ struct virtio_snd_msg { struct scatterlist sg_request; struct scatterlist sg_response; struct list_head list; struct completion notify; refcount_t ref_count; }; /** * virtsnd_ctl_msg_ref() - Increment reference counter for the message. * @msg: Control message. * * Context: Any context. */ void virtsnd_ctl_msg_ref(struct virtio_snd_msg *msg) { refcount_inc(&msg->ref_count); } /** * virtsnd_ctl_msg_unref() - Decrement reference counter for the message. * @msg: Control message. * * The message will be freed when the ref_count value is 0. * * Context: Any context. */ void virtsnd_ctl_msg_unref(struct virtio_snd_msg *msg) { if (refcount_dec_and_test(&msg->ref_count)) kfree(msg); } /** * virtsnd_ctl_msg_request() - Get a pointer to the request header. * @msg: Control message. * * Context: Any context. */ void *virtsnd_ctl_msg_request(struct virtio_snd_msg *msg) { return sg_virt(&msg->sg_request); } /** * virtsnd_ctl_msg_response() - Get a pointer to the response header. * @msg: Control message. * * Context: Any context. */ void *virtsnd_ctl_msg_response(struct virtio_snd_msg *msg) { return sg_virt(&msg->sg_response); } /** * virtsnd_ctl_msg_alloc() - Allocate and initialize a control message. * @request_size: Size of request header. * @response_size: Size of response header. * @gfp: Kernel flags for memory allocation. * * The message will be automatically freed when the ref_count value is 0. * * Context: Any context. May sleep if @gfp flags permit. * Return: Allocated message on success, NULL on failure. */ struct virtio_snd_msg *virtsnd_ctl_msg_alloc(size_t request_size, size_t response_size, gfp_t gfp) { struct virtio_snd_msg *msg; if (!request_size || !response_size) return NULL; msg = kzalloc(sizeof(*msg) + request_size + response_size, gfp); if (!msg) return NULL; sg_init_one(&msg->sg_request, (u8 *)msg + sizeof(*msg), request_size); sg_init_one(&msg->sg_response, (u8 *)msg + sizeof(*msg) + request_size, response_size); INIT_LIST_HEAD(&msg->list); init_completion(&msg->notify); /* This reference is dropped in virtsnd_ctl_msg_complete(). */ refcount_set(&msg->ref_count, 1); return msg; } /** * virtsnd_ctl_msg_send() - Send a control message. * @snd: VirtIO sound device. * @msg: Control message. * @out_sgs: Additional sg-list to attach to the request header (may be NULL). * @in_sgs: Additional sg-list to attach to the response header (may be NULL). * @nowait: Flag indicating whether to wait for completion. * * Context: Any context. Takes and releases the control queue spinlock. * May sleep if @nowait is false. * Return: 0 on success, -errno on failure. */ int virtsnd_ctl_msg_send(struct virtio_snd *snd, struct virtio_snd_msg *msg, struct scatterlist *out_sgs, struct scatterlist *in_sgs, bool nowait) { struct virtio_device *vdev = snd->vdev; struct virtio_snd_queue *queue = virtsnd_control_queue(snd); unsigned int js = msecs_to_jiffies(virtsnd_msg_timeout_ms); struct virtio_snd_hdr *request = virtsnd_ctl_msg_request(msg); struct virtio_snd_hdr *response = virtsnd_ctl_msg_response(msg); unsigned int nouts = 0; unsigned int nins = 0; struct scatterlist *psgs[4]; bool notify = false; unsigned long flags; int rc; virtsnd_ctl_msg_ref(msg); /* Set the default status in case the message was canceled. */ response->code = cpu_to_le32(VIRTIO_SND_S_IO_ERR); psgs[nouts++] = &msg->sg_request; if (out_sgs) psgs[nouts++] = out_sgs; psgs[nouts + nins++] = &msg->sg_response; if (in_sgs) psgs[nouts + nins++] = in_sgs; spin_lock_irqsave(&queue->lock, flags); rc = virtqueue_add_sgs(queue->vqueue, psgs, nouts, nins, msg, GFP_ATOMIC); if (!rc) { notify = virtqueue_kick_prepare(queue->vqueue); list_add_tail(&msg->list, &snd->ctl_msgs); } spin_unlock_irqrestore(&queue->lock, flags); if (rc) { dev_err(&vdev->dev, "failed to send control message (0x%08x)\n", le32_to_cpu(request->code)); /* * Since in this case virtsnd_ctl_msg_complete() will not be * called, it is necessary to decrement the reference count. */ virtsnd_ctl_msg_unref(msg); goto on_exit; } if (notify) virtqueue_notify(queue->vqueue); if (nowait) goto on_exit; rc = wait_for_completion_interruptible_timeout(&msg->notify, js); if (rc <= 0) { if (!rc) { dev_err(&vdev->dev, "control message (0x%08x) timeout\n", le32_to_cpu(request->code)); rc = -ETIMEDOUT; } goto on_exit; } switch (le32_to_cpu(response->code)) { case VIRTIO_SND_S_OK: rc = 0; break; case VIRTIO_SND_S_NOT_SUPP: rc = -EOPNOTSUPP; break; case VIRTIO_SND_S_IO_ERR: rc = -EIO; break; default: rc = -EINVAL; break; } on_exit: virtsnd_ctl_msg_unref(msg); return rc; } /** * virtsnd_ctl_msg_complete() - Complete a control message. * @msg: Control message. * * Context: Any context. Expects the control queue spinlock to be held by * caller. */ void virtsnd_ctl_msg_complete(struct virtio_snd_msg *msg) { list_del(&msg->list); complete(&msg->notify); virtsnd_ctl_msg_unref(msg); } /** * virtsnd_ctl_msg_cancel_all() - Cancel all pending control messages. * @snd: VirtIO sound device. * * Context: Any context. */ void virtsnd_ctl_msg_cancel_all(struct virtio_snd *snd) { struct virtio_snd_queue *queue = virtsnd_control_queue(snd); unsigned long flags; spin_lock_irqsave(&queue->lock, flags); while (!list_empty(&snd->ctl_msgs)) { struct virtio_snd_msg *msg = list_first_entry(&snd->ctl_msgs, struct virtio_snd_msg, list); virtsnd_ctl_msg_complete(msg); } spin_unlock_irqrestore(&queue->lock, flags); } /** * virtsnd_ctl_query_info() - Query the item configuration from the device. * @snd: VirtIO sound device. * @command: Control request code (VIRTIO_SND_R_XXX_INFO). * @start_id: Item start identifier. * @count: Item count to query. * @size: Item information size in bytes. * @info: Buffer for storing item information. * * Context: Any context that permits to sleep. * Return: 0 on success, -errno on failure. */ int virtsnd_ctl_query_info(struct virtio_snd *snd, int command, int start_id, int count, size_t size, void *info) { struct virtio_snd_msg *msg; struct virtio_snd_query_info *query; struct scatterlist sg; msg = virtsnd_ctl_msg_alloc(sizeof(*query), sizeof(struct virtio_snd_hdr), GFP_KERNEL); if (!msg) return -ENOMEM; query = virtsnd_ctl_msg_request(msg); query->hdr.code = cpu_to_le32(command); query->start_id = cpu_to_le32(start_id); query->count = cpu_to_le32(count); query->size = cpu_to_le32(size); sg_init_one(&sg, info, count * size); return virtsnd_ctl_msg_send(snd, msg, NULL, &sg, false); } /** * virtsnd_ctl_notify_cb() - Process all completed control messages. * @vqueue: Underlying control virtqueue. * * This callback function is called upon a vring interrupt request from the * device. * * Context: Interrupt context. Takes and releases the control queue spinlock. */ void virtsnd_ctl_notify_cb(struct virtqueue *vqueue) { struct virtio_snd *snd = vqueue->vdev->priv; struct virtio_snd_queue *queue = virtsnd_control_queue(snd); struct virtio_snd_msg *msg; u32 length; unsigned long flags; spin_lock_irqsave(&queue->lock, flags); do { virtqueue_disable_cb(vqueue); while ((msg = virtqueue_get_buf(vqueue, &length))) virtsnd_ctl_msg_complete(msg); if (unlikely(virtqueue_is_broken(vqueue))) break; } while (!virtqueue_enable_cb(vqueue)); spin_unlock_irqrestore(&queue->lock, flags); }
linux-master
sound/virtio/virtio_ctl_msg.c
// SPDX-License-Identifier: GPL-2.0+ /* * virtio-snd: Virtio sound device * Copyright (C) 2021 OpenSynergy GmbH */ #include <sound/pcm_params.h> #include "virtio_card.h" /* * I/O messages lifetime * --------------------- * * Allocation: * Messages are initially allocated in the ops->hw_params() after the size and * number of periods have been successfully negotiated. * * Freeing: * Messages can be safely freed after the queue has been successfully flushed * (RELEASE command in the ops->sync_stop()) and the ops->hw_free() has been * called. * * When the substream stops, the ops->sync_stop() waits until the device has * completed all pending messages. This wait can be interrupted either by a * signal or due to a timeout. In this case, the device can still access * messages even after calling ops->hw_free(). It can also issue an interrupt, * and the interrupt handler will also try to access message structures. * * Therefore, freeing of already allocated messages occurs: * * - in ops->hw_params(), if this operator was called several times in a row, * or if ops->hw_free() failed to free messages previously; * * - in ops->hw_free(), if the queue has been successfully flushed; * * - in dev->release(). */ /* Map for converting ALSA format to VirtIO format. */ struct virtsnd_a2v_format { snd_pcm_format_t alsa_bit; unsigned int vio_bit; }; static const struct virtsnd_a2v_format g_a2v_format_map[] = { { SNDRV_PCM_FORMAT_IMA_ADPCM, VIRTIO_SND_PCM_FMT_IMA_ADPCM }, { SNDRV_PCM_FORMAT_MU_LAW, VIRTIO_SND_PCM_FMT_MU_LAW }, { SNDRV_PCM_FORMAT_A_LAW, VIRTIO_SND_PCM_FMT_A_LAW }, { SNDRV_PCM_FORMAT_S8, VIRTIO_SND_PCM_FMT_S8 }, { SNDRV_PCM_FORMAT_U8, VIRTIO_SND_PCM_FMT_U8 }, { SNDRV_PCM_FORMAT_S16_LE, VIRTIO_SND_PCM_FMT_S16 }, { SNDRV_PCM_FORMAT_U16_LE, VIRTIO_SND_PCM_FMT_U16 }, { SNDRV_PCM_FORMAT_S18_3LE, VIRTIO_SND_PCM_FMT_S18_3 }, { SNDRV_PCM_FORMAT_U18_3LE, VIRTIO_SND_PCM_FMT_U18_3 }, { SNDRV_PCM_FORMAT_S20_3LE, VIRTIO_SND_PCM_FMT_S20_3 }, { SNDRV_PCM_FORMAT_U20_3LE, VIRTIO_SND_PCM_FMT_U20_3 }, { SNDRV_PCM_FORMAT_S24_3LE, VIRTIO_SND_PCM_FMT_S24_3 }, { SNDRV_PCM_FORMAT_U24_3LE, VIRTIO_SND_PCM_FMT_U24_3 }, { SNDRV_PCM_FORMAT_S20_LE, VIRTIO_SND_PCM_FMT_S20 }, { SNDRV_PCM_FORMAT_U20_LE, VIRTIO_SND_PCM_FMT_U20 }, { SNDRV_PCM_FORMAT_S24_LE, VIRTIO_SND_PCM_FMT_S24 }, { SNDRV_PCM_FORMAT_U24_LE, VIRTIO_SND_PCM_FMT_U24 }, { SNDRV_PCM_FORMAT_S32_LE, VIRTIO_SND_PCM_FMT_S32 }, { SNDRV_PCM_FORMAT_U32_LE, VIRTIO_SND_PCM_FMT_U32 }, { SNDRV_PCM_FORMAT_FLOAT_LE, VIRTIO_SND_PCM_FMT_FLOAT }, { SNDRV_PCM_FORMAT_FLOAT64_LE, VIRTIO_SND_PCM_FMT_FLOAT64 }, { SNDRV_PCM_FORMAT_DSD_U8, VIRTIO_SND_PCM_FMT_DSD_U8 }, { SNDRV_PCM_FORMAT_DSD_U16_LE, VIRTIO_SND_PCM_FMT_DSD_U16 }, { SNDRV_PCM_FORMAT_DSD_U32_LE, VIRTIO_SND_PCM_FMT_DSD_U32 }, { SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE, VIRTIO_SND_PCM_FMT_IEC958_SUBFRAME } }; /* Map for converting ALSA frame rate to VirtIO frame rate. */ struct virtsnd_a2v_rate { unsigned int rate; unsigned int vio_bit; }; static const struct virtsnd_a2v_rate g_a2v_rate_map[] = { { 5512, VIRTIO_SND_PCM_RATE_5512 }, { 8000, VIRTIO_SND_PCM_RATE_8000 }, { 11025, VIRTIO_SND_PCM_RATE_11025 }, { 16000, VIRTIO_SND_PCM_RATE_16000 }, { 22050, VIRTIO_SND_PCM_RATE_22050 }, { 32000, VIRTIO_SND_PCM_RATE_32000 }, { 44100, VIRTIO_SND_PCM_RATE_44100 }, { 48000, VIRTIO_SND_PCM_RATE_48000 }, { 64000, VIRTIO_SND_PCM_RATE_64000 }, { 88200, VIRTIO_SND_PCM_RATE_88200 }, { 96000, VIRTIO_SND_PCM_RATE_96000 }, { 176400, VIRTIO_SND_PCM_RATE_176400 }, { 192000, VIRTIO_SND_PCM_RATE_192000 } }; static int virtsnd_pcm_sync_stop(struct snd_pcm_substream *substream); /** * virtsnd_pcm_open() - Open the PCM substream. * @substream: Kernel ALSA substream. * * Context: Process context. * Return: 0 on success, -errno on failure. */ static int virtsnd_pcm_open(struct snd_pcm_substream *substream) { struct virtio_pcm *vpcm = snd_pcm_substream_chip(substream); struct virtio_pcm_stream *vs = &vpcm->streams[substream->stream]; struct virtio_pcm_substream *vss = vs->substreams[substream->number]; substream->runtime->hw = vss->hw; substream->private_data = vss; snd_pcm_hw_constraint_integer(substream->runtime, SNDRV_PCM_HW_PARAM_PERIODS); vss->stopped = !!virtsnd_pcm_msg_pending_num(vss); vss->suspended = false; /* * If the substream has already been used, then the I/O queue may be in * an invalid state. Just in case, we do a check and try to return the * queue to its original state, if necessary. */ return virtsnd_pcm_sync_stop(substream); } /** * virtsnd_pcm_close() - Close the PCM substream. * @substream: Kernel ALSA substream. * * Context: Process context. * Return: 0. */ static int virtsnd_pcm_close(struct snd_pcm_substream *substream) { return 0; } /** * virtsnd_pcm_dev_set_params() - Set the parameters of the PCM substream on * the device side. * @vss: VirtIO PCM substream. * @buffer_bytes: Size of the hardware buffer. * @period_bytes: Size of the hardware period. * @channels: Selected number of channels. * @format: Selected sample format (SNDRV_PCM_FORMAT_XXX). * @rate: Selected frame rate. * * Context: Any context that permits to sleep. * Return: 0 on success, -errno on failure. */ static int virtsnd_pcm_dev_set_params(struct virtio_pcm_substream *vss, unsigned int buffer_bytes, unsigned int period_bytes, unsigned int channels, snd_pcm_format_t format, unsigned int rate) { struct virtio_snd_msg *msg; struct virtio_snd_pcm_set_params *request; unsigned int i; int vformat = -1; int vrate = -1; for (i = 0; i < ARRAY_SIZE(g_a2v_format_map); ++i) if (g_a2v_format_map[i].alsa_bit == format) { vformat = g_a2v_format_map[i].vio_bit; break; } for (i = 0; i < ARRAY_SIZE(g_a2v_rate_map); ++i) if (g_a2v_rate_map[i].rate == rate) { vrate = g_a2v_rate_map[i].vio_bit; break; } if (vformat == -1 || vrate == -1) return -EINVAL; msg = virtsnd_pcm_ctl_msg_alloc(vss, VIRTIO_SND_R_PCM_SET_PARAMS, GFP_KERNEL); if (!msg) return -ENOMEM; request = virtsnd_ctl_msg_request(msg); request->buffer_bytes = cpu_to_le32(buffer_bytes); request->period_bytes = cpu_to_le32(period_bytes); request->channels = channels; request->format = vformat; request->rate = vrate; if (vss->features & (1U << VIRTIO_SND_PCM_F_MSG_POLLING)) request->features |= cpu_to_le32(1U << VIRTIO_SND_PCM_F_MSG_POLLING); if (vss->features & (1U << VIRTIO_SND_PCM_F_EVT_XRUNS)) request->features |= cpu_to_le32(1U << VIRTIO_SND_PCM_F_EVT_XRUNS); return virtsnd_ctl_msg_send_sync(vss->snd, msg); } /** * virtsnd_pcm_hw_params() - Set the parameters of the PCM substream. * @substream: Kernel ALSA substream. * @hw_params: Hardware parameters. * * Context: Process context. * Return: 0 on success, -errno on failure. */ static int virtsnd_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct virtio_pcm_substream *vss = snd_pcm_substream_chip(substream); struct virtio_device *vdev = vss->snd->vdev; int rc; if (virtsnd_pcm_msg_pending_num(vss)) { dev_err(&vdev->dev, "SID %u: invalid I/O queue state\n", vss->sid); return -EBADFD; } rc = virtsnd_pcm_dev_set_params(vss, params_buffer_bytes(hw_params), params_period_bytes(hw_params), params_channels(hw_params), params_format(hw_params), params_rate(hw_params)); if (rc) return rc; /* * Free previously allocated messages if ops->hw_params() is called * several times in a row, or if ops->hw_free() failed to free messages. */ virtsnd_pcm_msg_free(vss); return virtsnd_pcm_msg_alloc(vss, params_periods(hw_params), params_period_bytes(hw_params)); } /** * virtsnd_pcm_hw_free() - Reset the parameters of the PCM substream. * @substream: Kernel ALSA substream. * * Context: Process context. * Return: 0 */ static int virtsnd_pcm_hw_free(struct snd_pcm_substream *substream) { struct virtio_pcm_substream *vss = snd_pcm_substream_chip(substream); /* If the queue is flushed, we can safely free the messages here. */ if (!virtsnd_pcm_msg_pending_num(vss)) virtsnd_pcm_msg_free(vss); return 0; } /** * virtsnd_pcm_prepare() - Prepare the PCM substream. * @substream: Kernel ALSA substream. * * Context: Process context. * Return: 0 on success, -errno on failure. */ static int virtsnd_pcm_prepare(struct snd_pcm_substream *substream) { struct virtio_pcm_substream *vss = snd_pcm_substream_chip(substream); struct virtio_device *vdev = vss->snd->vdev; struct virtio_snd_msg *msg; if (!vss->suspended) { if (virtsnd_pcm_msg_pending_num(vss)) { dev_err(&vdev->dev, "SID %u: invalid I/O queue state\n", vss->sid); return -EBADFD; } vss->buffer_bytes = snd_pcm_lib_buffer_bytes(substream); vss->hw_ptr = 0; vss->msg_last_enqueued = -1; } else { struct snd_pcm_runtime *runtime = substream->runtime; unsigned int buffer_bytes = snd_pcm_lib_buffer_bytes(substream); unsigned int period_bytes = snd_pcm_lib_period_bytes(substream); int rc; rc = virtsnd_pcm_dev_set_params(vss, buffer_bytes, period_bytes, runtime->channels, runtime->format, runtime->rate); if (rc) return rc; } vss->xfer_xrun = false; vss->suspended = false; vss->msg_count = 0; msg = virtsnd_pcm_ctl_msg_alloc(vss, VIRTIO_SND_R_PCM_PREPARE, GFP_KERNEL); if (!msg) return -ENOMEM; return virtsnd_ctl_msg_send_sync(vss->snd, msg); } /** * virtsnd_pcm_trigger() - Process command for the PCM substream. * @substream: Kernel ALSA substream. * @command: Substream command (SNDRV_PCM_TRIGGER_XXX). * * Context: Any context. Takes and releases the VirtIO substream spinlock. * May take and release the tx/rx queue spinlock. * Return: 0 on success, -errno on failure. */ static int virtsnd_pcm_trigger(struct snd_pcm_substream *substream, int command) { struct virtio_pcm_substream *vss = snd_pcm_substream_chip(substream); struct virtio_snd *snd = vss->snd; struct virtio_snd_queue *queue; struct virtio_snd_msg *msg; unsigned long flags; int rc; switch (command) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: queue = virtsnd_pcm_queue(vss); spin_lock_irqsave(&queue->lock, flags); spin_lock(&vss->lock); rc = virtsnd_pcm_msg_send(vss); if (!rc) vss->xfer_enabled = true; spin_unlock(&vss->lock); spin_unlock_irqrestore(&queue->lock, flags); if (rc) return rc; msg = virtsnd_pcm_ctl_msg_alloc(vss, VIRTIO_SND_R_PCM_START, GFP_KERNEL); if (!msg) { spin_lock_irqsave(&vss->lock, flags); vss->xfer_enabled = false; spin_unlock_irqrestore(&vss->lock, flags); return -ENOMEM; } return virtsnd_ctl_msg_send_sync(snd, msg); case SNDRV_PCM_TRIGGER_SUSPEND: vss->suspended = true; fallthrough; case SNDRV_PCM_TRIGGER_STOP: vss->stopped = true; fallthrough; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: spin_lock_irqsave(&vss->lock, flags); vss->xfer_enabled = false; spin_unlock_irqrestore(&vss->lock, flags); msg = virtsnd_pcm_ctl_msg_alloc(vss, VIRTIO_SND_R_PCM_STOP, GFP_KERNEL); if (!msg) return -ENOMEM; return virtsnd_ctl_msg_send_sync(snd, msg); default: return -EINVAL; } } /** * virtsnd_pcm_sync_stop() - Synchronous PCM substream stop. * @substream: Kernel ALSA substream. * * The function can be called both from the upper level or from the driver * itself. * * Context: Process context. Takes and releases the VirtIO substream spinlock. * Return: 0 on success, -errno on failure. */ static int virtsnd_pcm_sync_stop(struct snd_pcm_substream *substream) { struct virtio_pcm_substream *vss = snd_pcm_substream_chip(substream); struct virtio_snd *snd = vss->snd; struct virtio_snd_msg *msg; unsigned int js = msecs_to_jiffies(virtsnd_msg_timeout_ms); int rc; cancel_work_sync(&vss->elapsed_period); if (!vss->stopped) return 0; msg = virtsnd_pcm_ctl_msg_alloc(vss, VIRTIO_SND_R_PCM_RELEASE, GFP_KERNEL); if (!msg) return -ENOMEM; rc = virtsnd_ctl_msg_send_sync(snd, msg); if (rc) return rc; /* * The spec states that upon receipt of the RELEASE command "the device * MUST complete all pending I/O messages for the specified stream ID". * Thus, we consider the absence of I/O messages in the queue as an * indication that the substream has been released. */ rc = wait_event_interruptible_timeout(vss->msg_empty, !virtsnd_pcm_msg_pending_num(vss), js); if (rc <= 0) { dev_warn(&snd->vdev->dev, "SID %u: failed to flush I/O queue\n", vss->sid); return !rc ? -ETIMEDOUT : rc; } vss->stopped = false; return 0; } /** * virtsnd_pcm_pointer() - Get the current hardware position for the PCM * substream. * @substream: Kernel ALSA substream. * * Context: Any context. Takes and releases the VirtIO substream spinlock. * Return: Hardware position in frames inside [0 ... buffer_size) range. */ static snd_pcm_uframes_t virtsnd_pcm_pointer(struct snd_pcm_substream *substream) { struct virtio_pcm_substream *vss = snd_pcm_substream_chip(substream); snd_pcm_uframes_t hw_ptr = SNDRV_PCM_POS_XRUN; unsigned long flags; spin_lock_irqsave(&vss->lock, flags); if (!vss->xfer_xrun) hw_ptr = bytes_to_frames(substream->runtime, vss->hw_ptr); spin_unlock_irqrestore(&vss->lock, flags); return hw_ptr; } /* PCM substream operators map. */ const struct snd_pcm_ops virtsnd_pcm_ops = { .open = virtsnd_pcm_open, .close = virtsnd_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = virtsnd_pcm_hw_params, .hw_free = virtsnd_pcm_hw_free, .prepare = virtsnd_pcm_prepare, .trigger = virtsnd_pcm_trigger, .sync_stop = virtsnd_pcm_sync_stop, .pointer = virtsnd_pcm_pointer, };
linux-master
sound/virtio/virtio_pcm_ops.c
// SPDX-License-Identifier: GPL-2.0+ /* * virtio-snd: Virtio sound device * Copyright (C) 2021 OpenSynergy GmbH */ #include <sound/pcm_params.h> #include "virtio_card.h" /** * struct virtio_pcm_msg - VirtIO I/O message. * @substream: VirtIO PCM substream. * @xfer: Request header payload. * @status: Response header payload. * @length: Data length in bytes. * @sgs: Payload scatter-gather table. */ struct virtio_pcm_msg { struct virtio_pcm_substream *substream; struct virtio_snd_pcm_xfer xfer; struct virtio_snd_pcm_status status; size_t length; struct scatterlist sgs[]; }; /** * enum pcm_msg_sg_index - Index values for the virtio_pcm_msg->sgs field in * an I/O message. * @PCM_MSG_SG_XFER: Element containing a virtio_snd_pcm_xfer structure. * @PCM_MSG_SG_STATUS: Element containing a virtio_snd_pcm_status structure. * @PCM_MSG_SG_DATA: The first element containing a data buffer. */ enum pcm_msg_sg_index { PCM_MSG_SG_XFER = 0, PCM_MSG_SG_STATUS, PCM_MSG_SG_DATA }; /** * virtsnd_pcm_sg_num() - Count the number of sg-elements required to represent * vmalloc'ed buffer. * @data: Pointer to vmalloc'ed buffer. * @length: Buffer size. * * Context: Any context. * Return: Number of physically contiguous parts in the @data. */ static int virtsnd_pcm_sg_num(u8 *data, unsigned int length) { phys_addr_t sg_address; unsigned int sg_length; int num = 0; while (length) { struct page *pg = vmalloc_to_page(data); phys_addr_t pg_address = page_to_phys(pg); size_t pg_length; pg_length = PAGE_SIZE - offset_in_page(data); if (pg_length > length) pg_length = length; if (!num || sg_address + sg_length != pg_address) { sg_address = pg_address; sg_length = pg_length; num++; } else { sg_length += pg_length; } data += pg_length; length -= pg_length; } return num; } /** * virtsnd_pcm_sg_from() - Build sg-list from vmalloc'ed buffer. * @sgs: Preallocated sg-list to populate. * @nsgs: The maximum number of elements in the @sgs. * @data: Pointer to vmalloc'ed buffer. * @length: Buffer size. * * Splits the buffer into physically contiguous parts and makes an sg-list of * such parts. * * Context: Any context. */ static void virtsnd_pcm_sg_from(struct scatterlist *sgs, int nsgs, u8 *data, unsigned int length) { int idx = -1; while (length) { struct page *pg = vmalloc_to_page(data); size_t pg_length; pg_length = PAGE_SIZE - offset_in_page(data); if (pg_length > length) pg_length = length; if (idx == -1 || sg_phys(&sgs[idx]) + sgs[idx].length != page_to_phys(pg)) { if (idx + 1 == nsgs) break; sg_set_page(&sgs[++idx], pg, pg_length, offset_in_page(data)); } else { sgs[idx].length += pg_length; } data += pg_length; length -= pg_length; } sg_mark_end(&sgs[idx]); } /** * virtsnd_pcm_msg_alloc() - Allocate I/O messages. * @vss: VirtIO PCM substream. * @periods: Current number of periods. * @period_bytes: Current period size in bytes. * * The function slices the buffer into @periods parts (each with the size of * @period_bytes), and creates @periods corresponding I/O messages. * * Context: Any context that permits to sleep. * Return: 0 on success, -ENOMEM on failure. */ int virtsnd_pcm_msg_alloc(struct virtio_pcm_substream *vss, unsigned int periods, unsigned int period_bytes) { struct snd_pcm_runtime *runtime = vss->substream->runtime; unsigned int i; vss->msgs = kcalloc(periods, sizeof(*vss->msgs), GFP_KERNEL); if (!vss->msgs) return -ENOMEM; vss->nmsgs = periods; for (i = 0; i < periods; ++i) { u8 *data = runtime->dma_area + period_bytes * i; int sg_num = virtsnd_pcm_sg_num(data, period_bytes); struct virtio_pcm_msg *msg; msg = kzalloc(struct_size(msg, sgs, sg_num + 2), GFP_KERNEL); if (!msg) return -ENOMEM; msg->substream = vss; sg_init_one(&msg->sgs[PCM_MSG_SG_XFER], &msg->xfer, sizeof(msg->xfer)); sg_init_one(&msg->sgs[PCM_MSG_SG_STATUS], &msg->status, sizeof(msg->status)); msg->length = period_bytes; virtsnd_pcm_sg_from(&msg->sgs[PCM_MSG_SG_DATA], sg_num, data, period_bytes); vss->msgs[i] = msg; } return 0; } /** * virtsnd_pcm_msg_free() - Free all allocated I/O messages. * @vss: VirtIO PCM substream. * * Context: Any context. */ void virtsnd_pcm_msg_free(struct virtio_pcm_substream *vss) { unsigned int i; for (i = 0; vss->msgs && i < vss->nmsgs; ++i) kfree(vss->msgs[i]); kfree(vss->msgs); vss->msgs = NULL; vss->nmsgs = 0; } /** * virtsnd_pcm_msg_send() - Send asynchronous I/O messages. * @vss: VirtIO PCM substream. * * All messages are organized in an ordered circular list. Each time the * function is called, all currently non-enqueued messages are added to the * virtqueue. For this, the function keeps track of two values: * * msg_last_enqueued = index of the last enqueued message, * msg_count = # of pending messages in the virtqueue. * * Context: Any context. Expects the tx/rx queue and the VirtIO substream * spinlocks to be held by caller. * Return: 0 on success, -errno on failure. */ int virtsnd_pcm_msg_send(struct virtio_pcm_substream *vss) { struct snd_pcm_runtime *runtime = vss->substream->runtime; struct virtio_snd *snd = vss->snd; struct virtio_device *vdev = snd->vdev; struct virtqueue *vqueue = virtsnd_pcm_queue(vss)->vqueue; int i; int n; bool notify = false; i = (vss->msg_last_enqueued + 1) % runtime->periods; n = runtime->periods - vss->msg_count; for (; n; --n, i = (i + 1) % runtime->periods) { struct virtio_pcm_msg *msg = vss->msgs[i]; struct scatterlist *psgs[] = { &msg->sgs[PCM_MSG_SG_XFER], &msg->sgs[PCM_MSG_SG_DATA], &msg->sgs[PCM_MSG_SG_STATUS] }; int rc; msg->xfer.stream_id = cpu_to_le32(vss->sid); memset(&msg->status, 0, sizeof(msg->status)); if (vss->direction == SNDRV_PCM_STREAM_PLAYBACK) rc = virtqueue_add_sgs(vqueue, psgs, 2, 1, msg, GFP_ATOMIC); else rc = virtqueue_add_sgs(vqueue, psgs, 1, 2, msg, GFP_ATOMIC); if (rc) { dev_err(&vdev->dev, "SID %u: failed to send I/O message\n", vss->sid); return rc; } vss->msg_last_enqueued = i; vss->msg_count++; } if (!(vss->features & (1U << VIRTIO_SND_PCM_F_MSG_POLLING))) notify = virtqueue_kick_prepare(vqueue); if (notify) virtqueue_notify(vqueue); return 0; } /** * virtsnd_pcm_msg_pending_num() - Returns the number of pending I/O messages. * @vss: VirtIO substream. * * Context: Any context. * Return: Number of messages. */ unsigned int virtsnd_pcm_msg_pending_num(struct virtio_pcm_substream *vss) { unsigned int num; unsigned long flags; spin_lock_irqsave(&vss->lock, flags); num = vss->msg_count; spin_unlock_irqrestore(&vss->lock, flags); return num; } /** * virtsnd_pcm_msg_complete() - Complete an I/O message. * @msg: I/O message. * @written_bytes: Number of bytes written to the message. * * Completion of the message means the elapsed period. If transmission is * allowed, then each completed message is immediately placed back at the end * of the queue. * * For the playback substream, @written_bytes is equal to sizeof(msg->status). * * For the capture substream, @written_bytes is equal to sizeof(msg->status) * plus the number of captured bytes. * * Context: Interrupt context. Takes and releases the VirtIO substream spinlock. */ static void virtsnd_pcm_msg_complete(struct virtio_pcm_msg *msg, size_t written_bytes) { struct virtio_pcm_substream *vss = msg->substream; /* * hw_ptr always indicates the buffer position of the first I/O message * in the virtqueue. Therefore, on each completion of an I/O message, * the hw_ptr value is unconditionally advanced. */ spin_lock(&vss->lock); /* * If the capture substream returned an incorrect status, then just * increase the hw_ptr by the message size. */ if (vss->direction == SNDRV_PCM_STREAM_PLAYBACK || written_bytes <= sizeof(msg->status)) vss->hw_ptr += msg->length; else vss->hw_ptr += written_bytes - sizeof(msg->status); if (vss->hw_ptr >= vss->buffer_bytes) vss->hw_ptr -= vss->buffer_bytes; vss->xfer_xrun = false; vss->msg_count--; if (vss->xfer_enabled) { struct snd_pcm_runtime *runtime = vss->substream->runtime; runtime->delay = bytes_to_frames(runtime, le32_to_cpu(msg->status.latency_bytes)); schedule_work(&vss->elapsed_period); virtsnd_pcm_msg_send(vss); } else if (!vss->msg_count) { wake_up_all(&vss->msg_empty); } spin_unlock(&vss->lock); } /** * virtsnd_pcm_notify_cb() - Process all completed I/O messages. * @queue: Underlying tx/rx virtqueue. * * Context: Interrupt context. Takes and releases the tx/rx queue spinlock. */ static inline void virtsnd_pcm_notify_cb(struct virtio_snd_queue *queue) { struct virtio_pcm_msg *msg; u32 written_bytes; unsigned long flags; spin_lock_irqsave(&queue->lock, flags); do { virtqueue_disable_cb(queue->vqueue); while ((msg = virtqueue_get_buf(queue->vqueue, &written_bytes))) virtsnd_pcm_msg_complete(msg, written_bytes); if (unlikely(virtqueue_is_broken(queue->vqueue))) break; } while (!virtqueue_enable_cb(queue->vqueue)); spin_unlock_irqrestore(&queue->lock, flags); } /** * virtsnd_pcm_tx_notify_cb() - Process all completed TX messages. * @vqueue: Underlying tx virtqueue. * * Context: Interrupt context. */ void virtsnd_pcm_tx_notify_cb(struct virtqueue *vqueue) { struct virtio_snd *snd = vqueue->vdev->priv; virtsnd_pcm_notify_cb(virtsnd_tx_queue(snd)); } /** * virtsnd_pcm_rx_notify_cb() - Process all completed RX messages. * @vqueue: Underlying rx virtqueue. * * Context: Interrupt context. */ void virtsnd_pcm_rx_notify_cb(struct virtqueue *vqueue) { struct virtio_snd *snd = vqueue->vdev->priv; virtsnd_pcm_notify_cb(virtsnd_rx_queue(snd)); } /** * virtsnd_pcm_ctl_msg_alloc() - Allocate and initialize the PCM device control * message for the specified substream. * @vss: VirtIO PCM substream. * @command: Control request code (VIRTIO_SND_R_PCM_XXX). * @gfp: Kernel flags for memory allocation. * * Context: Any context. May sleep if @gfp flags permit. * Return: Allocated message on success, NULL on failure. */ struct virtio_snd_msg * virtsnd_pcm_ctl_msg_alloc(struct virtio_pcm_substream *vss, unsigned int command, gfp_t gfp) { size_t request_size = sizeof(struct virtio_snd_pcm_hdr); size_t response_size = sizeof(struct virtio_snd_hdr); struct virtio_snd_msg *msg; switch (command) { case VIRTIO_SND_R_PCM_SET_PARAMS: request_size = sizeof(struct virtio_snd_pcm_set_params); break; } msg = virtsnd_ctl_msg_alloc(request_size, response_size, gfp); if (msg) { struct virtio_snd_pcm_hdr *hdr = virtsnd_ctl_msg_request(msg); hdr->hdr.code = cpu_to_le32(command); hdr->stream_id = cpu_to_le32(vss->sid); } return msg; }
linux-master
sound/virtio/virtio_pcm_msg.c
// SPDX-License-Identifier: GPL-2.0+ /* * virtio-snd: Virtio sound device * Copyright (C) 2021 OpenSynergy GmbH */ #include <linux/virtio_config.h> #include "virtio_card.h" /* VirtIO->ALSA channel position map */ static const u8 g_v2a_position_map[] = { [VIRTIO_SND_CHMAP_NONE] = SNDRV_CHMAP_UNKNOWN, [VIRTIO_SND_CHMAP_NA] = SNDRV_CHMAP_NA, [VIRTIO_SND_CHMAP_MONO] = SNDRV_CHMAP_MONO, [VIRTIO_SND_CHMAP_FL] = SNDRV_CHMAP_FL, [VIRTIO_SND_CHMAP_FR] = SNDRV_CHMAP_FR, [VIRTIO_SND_CHMAP_RL] = SNDRV_CHMAP_RL, [VIRTIO_SND_CHMAP_RR] = SNDRV_CHMAP_RR, [VIRTIO_SND_CHMAP_FC] = SNDRV_CHMAP_FC, [VIRTIO_SND_CHMAP_LFE] = SNDRV_CHMAP_LFE, [VIRTIO_SND_CHMAP_SL] = SNDRV_CHMAP_SL, [VIRTIO_SND_CHMAP_SR] = SNDRV_CHMAP_SR, [VIRTIO_SND_CHMAP_RC] = SNDRV_CHMAP_RC, [VIRTIO_SND_CHMAP_FLC] = SNDRV_CHMAP_FLC, [VIRTIO_SND_CHMAP_FRC] = SNDRV_CHMAP_FRC, [VIRTIO_SND_CHMAP_RLC] = SNDRV_CHMAP_RLC, [VIRTIO_SND_CHMAP_RRC] = SNDRV_CHMAP_RRC, [VIRTIO_SND_CHMAP_FLW] = SNDRV_CHMAP_FLW, [VIRTIO_SND_CHMAP_FRW] = SNDRV_CHMAP_FRW, [VIRTIO_SND_CHMAP_FLH] = SNDRV_CHMAP_FLH, [VIRTIO_SND_CHMAP_FCH] = SNDRV_CHMAP_FCH, [VIRTIO_SND_CHMAP_FRH] = SNDRV_CHMAP_FRH, [VIRTIO_SND_CHMAP_TC] = SNDRV_CHMAP_TC, [VIRTIO_SND_CHMAP_TFL] = SNDRV_CHMAP_TFL, [VIRTIO_SND_CHMAP_TFR] = SNDRV_CHMAP_TFR, [VIRTIO_SND_CHMAP_TFC] = SNDRV_CHMAP_TFC, [VIRTIO_SND_CHMAP_TRL] = SNDRV_CHMAP_TRL, [VIRTIO_SND_CHMAP_TRR] = SNDRV_CHMAP_TRR, [VIRTIO_SND_CHMAP_TRC] = SNDRV_CHMAP_TRC, [VIRTIO_SND_CHMAP_TFLC] = SNDRV_CHMAP_TFLC, [VIRTIO_SND_CHMAP_TFRC] = SNDRV_CHMAP_TFRC, [VIRTIO_SND_CHMAP_TSL] = SNDRV_CHMAP_TSL, [VIRTIO_SND_CHMAP_TSR] = SNDRV_CHMAP_TSR, [VIRTIO_SND_CHMAP_LLFE] = SNDRV_CHMAP_LLFE, [VIRTIO_SND_CHMAP_RLFE] = SNDRV_CHMAP_RLFE, [VIRTIO_SND_CHMAP_BC] = SNDRV_CHMAP_BC, [VIRTIO_SND_CHMAP_BLC] = SNDRV_CHMAP_BLC, [VIRTIO_SND_CHMAP_BRC] = SNDRV_CHMAP_BRC }; /** * virtsnd_chmap_parse_cfg() - Parse the channel map configuration. * @snd: VirtIO sound device. * * This function is called during initial device initialization. * * Context: Any context that permits to sleep. * Return: 0 on success, -errno on failure. */ int virtsnd_chmap_parse_cfg(struct virtio_snd *snd) { struct virtio_device *vdev = snd->vdev; u32 i; int rc; virtio_cread_le(vdev, struct virtio_snd_config, chmaps, &snd->nchmaps); if (!snd->nchmaps) return 0; snd->chmaps = devm_kcalloc(&vdev->dev, snd->nchmaps, sizeof(*snd->chmaps), GFP_KERNEL); if (!snd->chmaps) return -ENOMEM; rc = virtsnd_ctl_query_info(snd, VIRTIO_SND_R_CHMAP_INFO, 0, snd->nchmaps, sizeof(*snd->chmaps), snd->chmaps); if (rc) return rc; /* Count the number of channel maps per each PCM device/stream. */ for (i = 0; i < snd->nchmaps; ++i) { struct virtio_snd_chmap_info *info = &snd->chmaps[i]; u32 nid = le32_to_cpu(info->hdr.hda_fn_nid); struct virtio_pcm *vpcm; struct virtio_pcm_stream *vs; vpcm = virtsnd_pcm_find_or_create(snd, nid); if (IS_ERR(vpcm)) return PTR_ERR(vpcm); switch (info->direction) { case VIRTIO_SND_D_OUTPUT: vs = &vpcm->streams[SNDRV_PCM_STREAM_PLAYBACK]; break; case VIRTIO_SND_D_INPUT: vs = &vpcm->streams[SNDRV_PCM_STREAM_CAPTURE]; break; default: dev_err(&vdev->dev, "chmap #%u: unknown direction (%u)\n", i, info->direction); return -EINVAL; } vs->nchmaps++; } return 0; } /** * virtsnd_chmap_add_ctls() - Create an ALSA control for channel maps. * @pcm: ALSA PCM device. * @direction: PCM stream direction (SNDRV_PCM_STREAM_XXX). * @vs: VirtIO PCM stream. * * Context: Any context. * Return: 0 on success, -errno on failure. */ static int virtsnd_chmap_add_ctls(struct snd_pcm *pcm, int direction, struct virtio_pcm_stream *vs) { u32 i; int max_channels = 0; for (i = 0; i < vs->nchmaps; i++) if (max_channels < vs->chmaps[i].channels) max_channels = vs->chmaps[i].channels; return snd_pcm_add_chmap_ctls(pcm, direction, vs->chmaps, max_channels, 0, NULL); } /** * virtsnd_chmap_build_devs() - Build ALSA controls for channel maps. * @snd: VirtIO sound device. * * Context: Any context. * Return: 0 on success, -errno on failure. */ int virtsnd_chmap_build_devs(struct virtio_snd *snd) { struct virtio_device *vdev = snd->vdev; struct virtio_pcm *vpcm; struct virtio_pcm_stream *vs; u32 i; int rc; /* Allocate channel map elements per each PCM device/stream. */ list_for_each_entry(vpcm, &snd->pcm_list, list) { for (i = 0; i < ARRAY_SIZE(vpcm->streams); ++i) { vs = &vpcm->streams[i]; if (!vs->nchmaps) continue; vs->chmaps = devm_kcalloc(&vdev->dev, vs->nchmaps + 1, sizeof(*vs->chmaps), GFP_KERNEL); if (!vs->chmaps) return -ENOMEM; vs->nchmaps = 0; } } /* Initialize channel maps per each PCM device/stream. */ for (i = 0; i < snd->nchmaps; ++i) { struct virtio_snd_chmap_info *info = &snd->chmaps[i]; unsigned int channels = info->channels; unsigned int ch; struct snd_pcm_chmap_elem *chmap; vpcm = virtsnd_pcm_find(snd, le32_to_cpu(info->hdr.hda_fn_nid)); if (IS_ERR(vpcm)) return PTR_ERR(vpcm); if (info->direction == VIRTIO_SND_D_OUTPUT) vs = &vpcm->streams[SNDRV_PCM_STREAM_PLAYBACK]; else vs = &vpcm->streams[SNDRV_PCM_STREAM_CAPTURE]; chmap = &vs->chmaps[vs->nchmaps++]; if (channels > ARRAY_SIZE(chmap->map)) channels = ARRAY_SIZE(chmap->map); chmap->channels = channels; for (ch = 0; ch < channels; ++ch) { u8 position = info->positions[ch]; if (position >= ARRAY_SIZE(g_v2a_position_map)) return -EINVAL; chmap->map[ch] = g_v2a_position_map[position]; } } /* Create an ALSA control per each PCM device/stream. */ list_for_each_entry(vpcm, &snd->pcm_list, list) { if (!vpcm->pcm) continue; for (i = 0; i < ARRAY_SIZE(vpcm->streams); ++i) { vs = &vpcm->streams[i]; if (!vs->nchmaps) continue; rc = virtsnd_chmap_add_ctls(vpcm->pcm, i, vs); if (rc) return rc; } } return 0; }
linux-master
sound/virtio/virtio_chmap.c
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) // Copyright(c) 2015-2021 Intel Corporation. /* * SDW Intel ACPI scan helpers */ #include <linux/acpi.h> #include <linux/bits.h> #include <linux/bitfield.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/fwnode.h> #include <linux/module.h> #include <linux/soundwire/sdw_intel.h> #include <linux/string.h> #define SDW_LINK_TYPE 4 /* from Intel ACPI documentation */ #define SDW_MAX_LINKS 4 static int ctrl_link_mask; module_param_named(sdw_link_mask, ctrl_link_mask, int, 0444); MODULE_PARM_DESC(sdw_link_mask, "Intel link mask (one bit per link)"); static bool is_link_enabled(struct fwnode_handle *fw_node, u8 idx) { struct fwnode_handle *link; char name[32]; u32 quirk_mask = 0; /* Find master handle */ snprintf(name, sizeof(name), "mipi-sdw-link-%hhu-subproperties", idx); link = fwnode_get_named_child_node(fw_node, name); if (!link) return false; fwnode_property_read_u32(link, "intel-quirk-mask", &quirk_mask); if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE) return false; return true; } static int sdw_intel_scan_controller(struct sdw_intel_acpi_info *info) { struct acpi_device *adev = acpi_fetch_acpi_dev(info->handle); u8 count, i; int ret; if (!adev) return -EINVAL; /* Found controller, find links supported */ count = 0; ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev), "mipi-sdw-master-count", &count, 1); /* * In theory we could check the number of links supported in * hardware, but in that step we cannot assume SoundWire IP is * powered. * * In addition, if the BIOS doesn't even provide this * 'master-count' property then all the inits based on link * masks will fail as well. * * We will check the hardware capabilities in the startup() step */ if (ret) { dev_err(&adev->dev, "Failed to read mipi-sdw-master-count: %d\n", ret); return -EINVAL; } /* Check count is within bounds */ if (count > SDW_MAX_LINKS) { dev_err(&adev->dev, "Link count %d exceeds max %d\n", count, SDW_MAX_LINKS); return -EINVAL; } if (!count) { dev_warn(&adev->dev, "No SoundWire links detected\n"); return -EINVAL; } dev_dbg(&adev->dev, "ACPI reports %d SDW Link devices\n", count); info->count = count; info->link_mask = 0; for (i = 0; i < count; i++) { if (ctrl_link_mask && !(ctrl_link_mask & BIT(i))) { dev_dbg(&adev->dev, "Link %d masked, will not be enabled\n", i); continue; } if (!is_link_enabled(acpi_fwnode_handle(adev), i)) { dev_dbg(&adev->dev, "Link %d not selected in firmware\n", i); continue; } info->link_mask |= BIT(i); } return 0; } static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level, void *cdata, void **return_value) { struct sdw_intel_acpi_info *info = cdata; acpi_status status; u64 adr; status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &adr); if (ACPI_FAILURE(status)) return AE_OK; /* keep going */ if (!acpi_fetch_acpi_dev(handle)) { pr_err("%s: Couldn't find ACPI handle\n", __func__); return AE_NOT_FOUND; } /* * On some Intel platforms, multiple children of the HDAS * device can be found, but only one of them is the SoundWire * controller. The SNDW device is always exposed with * Name(_ADR, 0x40000000), with bits 31..28 representing the * SoundWire link so filter accordingly */ if (FIELD_GET(GENMASK(31, 28), adr) != SDW_LINK_TYPE) return AE_OK; /* keep going */ /* found the correct SoundWire controller */ info->handle = handle; /* device found, stop namespace walk */ return AE_CTRL_TERMINATE; } /** * sdw_intel_acpi_scan() - SoundWire Intel init routine * @parent_handle: ACPI parent handle * @info: description of what firmware/DSDT tables expose * * This scans the namespace and queries firmware to figure out which * links to enable. A follow-up use of sdw_intel_probe() and * sdw_intel_startup() is required for creation of devices and bus * startup */ int sdw_intel_acpi_scan(acpi_handle *parent_handle, struct sdw_intel_acpi_info *info) { acpi_status status; info->handle = NULL; /* * In the HDAS ACPI scope, 'SNDW' may be either the child of * 'HDAS' or the grandchild of 'HDAS'. So let's go through * the ACPI from 'HDAS' at max depth of 2 to find the 'SNDW' * device. */ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, parent_handle, 2, sdw_intel_acpi_cb, NULL, info, NULL); if (ACPI_FAILURE(status) || info->handle == NULL) return -ENODEV; return sdw_intel_scan_controller(info); } EXPORT_SYMBOL_NS(sdw_intel_acpi_scan, SND_INTEL_SOUNDWIRE_ACPI); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("Intel Soundwire ACPI helpers");
linux-master
sound/hda/intel-sdw-acpi.c
/* * tracepoint definitions for HD-audio core drivers */ #define CREATE_TRACE_POINTS #include "trace.h"
linux-master
sound/hda/trace.c
// SPDX-License-Identifier: GPL-2.0-only /* * HD-audio codec core device */ #include <linux/init.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/export.h> #include <linux/pm_runtime.h> #include <sound/hdaudio.h> #include <sound/hda_regmap.h> #include <sound/pcm.h> #include "local.h" static void setup_fg_nodes(struct hdac_device *codec); static int get_codec_vendor_name(struct hdac_device *codec); static void default_release(struct device *dev) { snd_hdac_device_exit(dev_to_hdac_dev(dev)); } /** * snd_hdac_device_init - initialize the HD-audio codec base device * @codec: device to initialize * @bus: but to attach * @name: device name string * @addr: codec address * * Returns zero for success or a negative error code. * * This function increments the runtime PM counter and marks it active. * The caller needs to turn it off appropriately later. * * The caller needs to set the device's release op properly by itself. */ int snd_hdac_device_init(struct hdac_device *codec, struct hdac_bus *bus, const char *name, unsigned int addr) { struct device *dev; hda_nid_t fg; int err; dev = &codec->dev; device_initialize(dev); dev->parent = bus->dev; dev->bus = &snd_hda_bus_type; dev->release = default_release; dev->groups = hdac_dev_attr_groups; dev_set_name(dev, "%s", name); device_enable_async_suspend(dev); codec->bus = bus; codec->addr = addr; codec->type = HDA_DEV_CORE; mutex_init(&codec->widget_lock); mutex_init(&codec->regmap_lock); pm_runtime_set_active(&codec->dev); pm_runtime_get_noresume(&codec->dev); atomic_set(&codec->in_pm, 0); err = snd_hdac_bus_add_device(bus, codec); if (err < 0) goto error; /* fill parameters */ codec->vendor_id = snd_hdac_read_parm(codec, AC_NODE_ROOT, AC_PAR_VENDOR_ID); if (codec->vendor_id == -1) { /* read again, hopefully the access method was corrected * in the last read... */ codec->vendor_id = snd_hdac_read_parm(codec, AC_NODE_ROOT, AC_PAR_VENDOR_ID); } codec->subsystem_id = snd_hdac_read_parm(codec, AC_NODE_ROOT, AC_PAR_SUBSYSTEM_ID); codec->revision_id = snd_hdac_read_parm(codec, AC_NODE_ROOT, AC_PAR_REV_ID); setup_fg_nodes(codec); if (!codec->afg && !codec->mfg) { dev_err(dev, "no AFG or MFG node found\n"); err = -ENODEV; goto error; } fg = codec->afg ? codec->afg : codec->mfg; err = snd_hdac_refresh_widgets(codec); if (err < 0) goto error; codec->power_caps = snd_hdac_read_parm(codec, fg, AC_PAR_POWER_STATE); /* reread ssid if not set by parameter */ if (codec->subsystem_id == -1 || codec->subsystem_id == 0) snd_hdac_read(codec, fg, AC_VERB_GET_SUBSYSTEM_ID, 0, &codec->subsystem_id); err = get_codec_vendor_name(codec); if (err < 0) goto error; codec->chip_name = kasprintf(GFP_KERNEL, "ID %x", codec->vendor_id & 0xffff); if (!codec->chip_name) { err = -ENOMEM; goto error; } return 0; error: put_device(&codec->dev); return err; } EXPORT_SYMBOL_GPL(snd_hdac_device_init); /** * snd_hdac_device_exit - clean up the HD-audio codec base device * @codec: device to clean up */ void snd_hdac_device_exit(struct hdac_device *codec) { pm_runtime_put_noidle(&codec->dev); /* keep balance of runtime PM child_count in parent device */ pm_runtime_set_suspended(&codec->dev); snd_hdac_bus_remove_device(codec->bus, codec); kfree(codec->vendor_name); kfree(codec->chip_name); } EXPORT_SYMBOL_GPL(snd_hdac_device_exit); /** * snd_hdac_device_register - register the hd-audio codec base device * @codec: the device to register */ int snd_hdac_device_register(struct hdac_device *codec) { int err; err = device_add(&codec->dev); if (err < 0) return err; mutex_lock(&codec->widget_lock); err = hda_widget_sysfs_init(codec); mutex_unlock(&codec->widget_lock); if (err < 0) { device_del(&codec->dev); return err; } return 0; } EXPORT_SYMBOL_GPL(snd_hdac_device_register); /** * snd_hdac_device_unregister - unregister the hd-audio codec base device * @codec: the device to unregister */ void snd_hdac_device_unregister(struct hdac_device *codec) { if (device_is_registered(&codec->dev)) { mutex_lock(&codec->widget_lock); hda_widget_sysfs_exit(codec); mutex_unlock(&codec->widget_lock); device_del(&codec->dev); snd_hdac_bus_remove_device(codec->bus, codec); } } EXPORT_SYMBOL_GPL(snd_hdac_device_unregister); /** * snd_hdac_device_set_chip_name - set/update the codec name * @codec: the HDAC device * @name: name string to set * * Returns 0 if the name is set or updated, or a negative error code. */ int snd_hdac_device_set_chip_name(struct hdac_device *codec, const char *name) { char *newname; if (!name) return 0; newname = kstrdup(name, GFP_KERNEL); if (!newname) return -ENOMEM; kfree(codec->chip_name); codec->chip_name = newname; return 0; } EXPORT_SYMBOL_GPL(snd_hdac_device_set_chip_name); /** * snd_hdac_codec_modalias - give the module alias name * @codec: HDAC device * @buf: string buffer to store * @size: string buffer size * * Returns the size of string, like snprintf(), or a negative error code. */ int snd_hdac_codec_modalias(const struct hdac_device *codec, char *buf, size_t size) { return scnprintf(buf, size, "hdaudio:v%08Xr%08Xa%02X\n", codec->vendor_id, codec->revision_id, codec->type); } EXPORT_SYMBOL_GPL(snd_hdac_codec_modalias); /** * snd_hdac_make_cmd - compose a 32bit command word to be sent to the * HD-audio controller * @codec: the codec object * @nid: NID to encode * @verb: verb to encode * @parm: parameter to encode * * Return an encoded command verb or -1 for error. */ static unsigned int snd_hdac_make_cmd(struct hdac_device *codec, hda_nid_t nid, unsigned int verb, unsigned int parm) { u32 val, addr; addr = codec->addr; if ((addr & ~0xf) || (nid & ~0x7f) || (verb & ~0xfff) || (parm & ~0xffff)) { dev_err(&codec->dev, "out of range cmd %x:%x:%x:%x\n", addr, nid, verb, parm); return -1; } val = addr << 28; val |= (u32)nid << 20; val |= verb << 8; val |= parm; return val; } /** * snd_hdac_exec_verb - execute an encoded verb * @codec: the codec object * @cmd: encoded verb to execute * @flags: optional flags, pass zero for default * @res: the pointer to store the result, NULL if running async * * Returns zero if successful, or a negative error code. * * This calls the exec_verb op when set in hdac_codec. If not, * call the default snd_hdac_bus_exec_verb(). */ int snd_hdac_exec_verb(struct hdac_device *codec, unsigned int cmd, unsigned int flags, unsigned int *res) { if (codec->exec_verb) return codec->exec_verb(codec, cmd, flags, res); return snd_hdac_bus_exec_verb(codec->bus, codec->addr, cmd, res); } /** * snd_hdac_read - execute a verb * @codec: the codec object * @nid: NID to execute a verb * @verb: verb to execute * @parm: parameter for a verb * @res: the pointer to store the result, NULL if running async * * Returns zero if successful, or a negative error code. */ int snd_hdac_read(struct hdac_device *codec, hda_nid_t nid, unsigned int verb, unsigned int parm, unsigned int *res) { unsigned int cmd = snd_hdac_make_cmd(codec, nid, verb, parm); return snd_hdac_exec_verb(codec, cmd, 0, res); } EXPORT_SYMBOL_GPL(snd_hdac_read); /** * _snd_hdac_read_parm - read a parmeter * @codec: the codec object * @nid: NID to read a parameter * @parm: parameter to read * @res: pointer to store the read value * * This function returns zero or an error unlike snd_hdac_read_parm(). */ int _snd_hdac_read_parm(struct hdac_device *codec, hda_nid_t nid, int parm, unsigned int *res) { unsigned int cmd; cmd = snd_hdac_regmap_encode_verb(nid, AC_VERB_PARAMETERS) | parm; return snd_hdac_regmap_read_raw(codec, cmd, res); } EXPORT_SYMBOL_GPL(_snd_hdac_read_parm); /** * snd_hdac_read_parm_uncached - read a codec parameter without caching * @codec: the codec object * @nid: NID to read a parameter * @parm: parameter to read * * Returns -1 for error. If you need to distinguish the error more * strictly, use snd_hdac_read() directly. */ int snd_hdac_read_parm_uncached(struct hdac_device *codec, hda_nid_t nid, int parm) { unsigned int cmd, val; cmd = snd_hdac_regmap_encode_verb(nid, AC_VERB_PARAMETERS) | parm; if (snd_hdac_regmap_read_raw_uncached(codec, cmd, &val) < 0) return -1; return val; } EXPORT_SYMBOL_GPL(snd_hdac_read_parm_uncached); /** * snd_hdac_override_parm - override read-only parameters * @codec: the codec object * @nid: NID for the parameter * @parm: the parameter to change * @val: the parameter value to overwrite */ int snd_hdac_override_parm(struct hdac_device *codec, hda_nid_t nid, unsigned int parm, unsigned int val) { unsigned int verb = (AC_VERB_PARAMETERS << 8) | (nid << 20) | parm; int err; if (!codec->regmap) return -EINVAL; codec->caps_overwriting = true; err = snd_hdac_regmap_write_raw(codec, verb, val); codec->caps_overwriting = false; return err; } EXPORT_SYMBOL_GPL(snd_hdac_override_parm); /** * snd_hdac_get_sub_nodes - get start NID and number of subtree nodes * @codec: the codec object * @nid: NID to inspect * @start_id: the pointer to store the starting NID * * Returns the number of subtree nodes or zero if not found. * This function reads parameters always without caching. */ int snd_hdac_get_sub_nodes(struct hdac_device *codec, hda_nid_t nid, hda_nid_t *start_id) { unsigned int parm; parm = snd_hdac_read_parm_uncached(codec, nid, AC_PAR_NODE_COUNT); if (parm == -1) { *start_id = 0; return 0; } *start_id = (parm >> 16) & 0x7fff; return (int)(parm & 0x7fff); } EXPORT_SYMBOL_GPL(snd_hdac_get_sub_nodes); /* * look for an AFG and MFG nodes */ static void setup_fg_nodes(struct hdac_device *codec) { int i, total_nodes, function_id; hda_nid_t nid; total_nodes = snd_hdac_get_sub_nodes(codec, AC_NODE_ROOT, &nid); for (i = 0; i < total_nodes; i++, nid++) { function_id = snd_hdac_read_parm(codec, nid, AC_PAR_FUNCTION_TYPE); switch (function_id & 0xff) { case AC_GRP_AUDIO_FUNCTION: codec->afg = nid; codec->afg_function_id = function_id & 0xff; codec->afg_unsol = (function_id >> 8) & 1; break; case AC_GRP_MODEM_FUNCTION: codec->mfg = nid; codec->mfg_function_id = function_id & 0xff; codec->mfg_unsol = (function_id >> 8) & 1; break; default: break; } } } /** * snd_hdac_refresh_widgets - Reset the widget start/end nodes * @codec: the codec object */ int snd_hdac_refresh_widgets(struct hdac_device *codec) { hda_nid_t start_nid; int nums, err = 0; /* * Serialize against multiple threads trying to update the sysfs * widgets array. */ mutex_lock(&codec->widget_lock); nums = snd_hdac_get_sub_nodes(codec, codec->afg, &start_nid); if (!start_nid || nums <= 0 || nums >= 0xff) { dev_err(&codec->dev, "cannot read sub nodes for FG 0x%02x\n", codec->afg); err = -EINVAL; goto unlock; } err = hda_widget_sysfs_reinit(codec, start_nid, nums); if (err < 0) goto unlock; codec->num_nodes = nums; codec->start_nid = start_nid; codec->end_nid = start_nid + nums; unlock: mutex_unlock(&codec->widget_lock); return err; } EXPORT_SYMBOL_GPL(snd_hdac_refresh_widgets); /* return CONNLIST_LEN parameter of the given widget */ static unsigned int get_num_conns(struct hdac_device *codec, hda_nid_t nid) { unsigned int wcaps = get_wcaps(codec, nid); unsigned int parm; if (!(wcaps & AC_WCAP_CONN_LIST) && get_wcaps_type(wcaps) != AC_WID_VOL_KNB) return 0; parm = snd_hdac_read_parm(codec, nid, AC_PAR_CONNLIST_LEN); if (parm == -1) parm = 0; return parm; } /** * snd_hdac_get_connections - get a widget connection list * @codec: the codec object * @nid: NID * @conn_list: the array to store the results, can be NULL * @max_conns: the max size of the given array * * Returns the number of connected widgets, zero for no connection, or a * negative error code. When the number of elements don't fit with the * given array size, it returns -ENOSPC. * * When @conn_list is NULL, it just checks the number of connections. */ int snd_hdac_get_connections(struct hdac_device *codec, hda_nid_t nid, hda_nid_t *conn_list, int max_conns) { unsigned int parm; int i, conn_len, conns, err; unsigned int shift, num_elems, mask; hda_nid_t prev_nid; int null_count = 0; parm = get_num_conns(codec, nid); if (!parm) return 0; if (parm & AC_CLIST_LONG) { /* long form */ shift = 16; num_elems = 2; } else { /* short form */ shift = 8; num_elems = 4; } conn_len = parm & AC_CLIST_LENGTH; mask = (1 << (shift-1)) - 1; if (!conn_len) return 0; /* no connection */ if (conn_len == 1) { /* single connection */ err = snd_hdac_read(codec, nid, AC_VERB_GET_CONNECT_LIST, 0, &parm); if (err < 0) return err; if (conn_list) conn_list[0] = parm & mask; return 1; } /* multi connection */ conns = 0; prev_nid = 0; for (i = 0; i < conn_len; i++) { int range_val; hda_nid_t val, n; if (i % num_elems == 0) { err = snd_hdac_read(codec, nid, AC_VERB_GET_CONNECT_LIST, i, &parm); if (err < 0) return -EIO; } range_val = !!(parm & (1 << (shift-1))); /* ranges */ val = parm & mask; if (val == 0 && null_count++) { /* no second chance */ dev_dbg(&codec->dev, "invalid CONNECT_LIST verb %x[%i]:%x\n", nid, i, parm); return 0; } parm >>= shift; if (range_val) { /* ranges between the previous and this one */ if (!prev_nid || prev_nid >= val) { dev_warn(&codec->dev, "invalid dep_range_val %x:%x\n", prev_nid, val); continue; } for (n = prev_nid + 1; n <= val; n++) { if (conn_list) { if (conns >= max_conns) return -ENOSPC; conn_list[conns] = n; } conns++; } } else { if (conn_list) { if (conns >= max_conns) return -ENOSPC; conn_list[conns] = val; } conns++; } prev_nid = val; } return conns; } EXPORT_SYMBOL_GPL(snd_hdac_get_connections); #ifdef CONFIG_PM /** * snd_hdac_power_up - power up the codec * @codec: the codec object * * This function calls the runtime PM helper to power up the given codec. * Unlike snd_hdac_power_up_pm(), you should call this only for the code * path that isn't included in PM path. Otherwise it gets stuck. * * Returns zero if successful, or a negative error code. */ int snd_hdac_power_up(struct hdac_device *codec) { return pm_runtime_get_sync(&codec->dev); } EXPORT_SYMBOL_GPL(snd_hdac_power_up); /** * snd_hdac_power_down - power down the codec * @codec: the codec object * * Returns zero if successful, or a negative error code. */ int snd_hdac_power_down(struct hdac_device *codec) { struct device *dev = &codec->dev; pm_runtime_mark_last_busy(dev); return pm_runtime_put_autosuspend(dev); } EXPORT_SYMBOL_GPL(snd_hdac_power_down); /** * snd_hdac_power_up_pm - power up the codec * @codec: the codec object * * This function can be called in a recursive code path like init code * which may be called by PM suspend/resume again. OTOH, if a power-up * call must wake up the sleeper (e.g. in a kctl callback), use * snd_hdac_power_up() instead. * * Returns zero if successful, or a negative error code. */ int snd_hdac_power_up_pm(struct hdac_device *codec) { if (!atomic_inc_not_zero(&codec->in_pm)) return snd_hdac_power_up(codec); return 0; } EXPORT_SYMBOL_GPL(snd_hdac_power_up_pm); /* like snd_hdac_power_up_pm(), but only increment the pm count when * already powered up. Returns -1 if not powered up, 1 if incremented * or 0 if unchanged. Only used in hdac_regmap.c */ int snd_hdac_keep_power_up(struct hdac_device *codec) { if (!atomic_inc_not_zero(&codec->in_pm)) { int ret = pm_runtime_get_if_active(&codec->dev, true); if (!ret) return -1; if (ret < 0) return 0; } return 1; } /** * snd_hdac_power_down_pm - power down the codec * @codec: the codec object * * Like snd_hdac_power_up_pm(), this function is used in a recursive * code path like init code which may be called by PM suspend/resume again. * * Returns zero if successful, or a negative error code. */ int snd_hdac_power_down_pm(struct hdac_device *codec) { if (atomic_dec_if_positive(&codec->in_pm) < 0) return snd_hdac_power_down(codec); return 0; } EXPORT_SYMBOL_GPL(snd_hdac_power_down_pm); #endif /* codec vendor labels */ struct hda_vendor_id { unsigned int id; const char *name; }; static const struct hda_vendor_id hda_vendor_ids[] = { { 0x0014, "Loongson" }, { 0x1002, "ATI" }, { 0x1013, "Cirrus Logic" }, { 0x1057, "Motorola" }, { 0x1095, "Silicon Image" }, { 0x10de, "Nvidia" }, { 0x10ec, "Realtek" }, { 0x1102, "Creative" }, { 0x1106, "VIA" }, { 0x111d, "IDT" }, { 0x11c1, "LSI" }, { 0x11d4, "Analog Devices" }, { 0x13f6, "C-Media" }, { 0x14f1, "Conexant" }, { 0x17e8, "Chrontel" }, { 0x1854, "LG" }, { 0x19e5, "Huawei" }, { 0x1aec, "Wolfson Microelectronics" }, { 0x1af4, "QEMU" }, { 0x434d, "C-Media" }, { 0x8086, "Intel" }, { 0x8384, "SigmaTel" }, {} /* terminator */ }; /* store the codec vendor name */ static int get_codec_vendor_name(struct hdac_device *codec) { const struct hda_vendor_id *c; u16 vendor_id = codec->vendor_id >> 16; for (c = hda_vendor_ids; c->id; c++) { if (c->id == vendor_id) { codec->vendor_name = kstrdup(c->name, GFP_KERNEL); return codec->vendor_name ? 0 : -ENOMEM; } } codec->vendor_name = kasprintf(GFP_KERNEL, "Generic %04x", vendor_id); return codec->vendor_name ? 0 : -ENOMEM; } /* * stream formats */ struct hda_rate_tbl { unsigned int hz; unsigned int alsa_bits; unsigned int hda_fmt; }; /* rate = base * mult / div */ #define HDA_RATE(base, mult, div) \ (AC_FMT_BASE_##base##K | (((mult) - 1) << AC_FMT_MULT_SHIFT) | \ (((div) - 1) << AC_FMT_DIV_SHIFT)) static const struct hda_rate_tbl rate_bits[] = { /* rate in Hz, ALSA rate bitmask, HDA format value */ /* autodetected value used in snd_hda_query_supported_pcm */ { 8000, SNDRV_PCM_RATE_8000, HDA_RATE(48, 1, 6) }, { 11025, SNDRV_PCM_RATE_11025, HDA_RATE(44, 1, 4) }, { 16000, SNDRV_PCM_RATE_16000, HDA_RATE(48, 1, 3) }, { 22050, SNDRV_PCM_RATE_22050, HDA_RATE(44, 1, 2) }, { 32000, SNDRV_PCM_RATE_32000, HDA_RATE(48, 2, 3) }, { 44100, SNDRV_PCM_RATE_44100, HDA_RATE(44, 1, 1) }, { 48000, SNDRV_PCM_RATE_48000, HDA_RATE(48, 1, 1) }, { 88200, SNDRV_PCM_RATE_88200, HDA_RATE(44, 2, 1) }, { 96000, SNDRV_PCM_RATE_96000, HDA_RATE(48, 2, 1) }, { 176400, SNDRV_PCM_RATE_176400, HDA_RATE(44, 4, 1) }, { 192000, SNDRV_PCM_RATE_192000, HDA_RATE(48, 4, 1) }, #define AC_PAR_PCM_RATE_BITS 11 /* up to bits 10, 384kHZ isn't supported properly */ /* not autodetected value */ { 9600, SNDRV_PCM_RATE_KNOT, HDA_RATE(48, 1, 5) }, { 0 } /* terminator */ }; /** * snd_hdac_calc_stream_format - calculate the format bitset * @rate: the sample rate * @channels: the number of channels * @format: the PCM format (SNDRV_PCM_FORMAT_XXX) * @maxbps: the max. bps * @spdif_ctls: HD-audio SPDIF status bits (0 if irrelevant) * * Calculate the format bitset from the given rate, channels and th PCM format. * * Return zero if invalid. */ unsigned int snd_hdac_calc_stream_format(unsigned int rate, unsigned int channels, snd_pcm_format_t format, unsigned int maxbps, unsigned short spdif_ctls) { int i; unsigned int val = 0; for (i = 0; rate_bits[i].hz; i++) if (rate_bits[i].hz == rate) { val = rate_bits[i].hda_fmt; break; } if (!rate_bits[i].hz) return 0; if (channels == 0 || channels > 8) return 0; val |= channels - 1; switch (snd_pcm_format_width(format)) { case 8: val |= AC_FMT_BITS_8; break; case 16: val |= AC_FMT_BITS_16; break; case 20: case 24: case 32: if (maxbps >= 32 || format == SNDRV_PCM_FORMAT_FLOAT_LE) val |= AC_FMT_BITS_32; else if (maxbps >= 24) val |= AC_FMT_BITS_24; else val |= AC_FMT_BITS_20; break; default: return 0; } if (spdif_ctls & AC_DIG1_NONAUDIO) val |= AC_FMT_TYPE_NON_PCM; return val; } EXPORT_SYMBOL_GPL(snd_hdac_calc_stream_format); static unsigned int query_pcm_param(struct hdac_device *codec, hda_nid_t nid) { unsigned int val = 0; if (nid != codec->afg && (get_wcaps(codec, nid) & AC_WCAP_FORMAT_OVRD)) val = snd_hdac_read_parm(codec, nid, AC_PAR_PCM); if (!val || val == -1) val = snd_hdac_read_parm(codec, codec->afg, AC_PAR_PCM); if (!val || val == -1) return 0; return val; } static unsigned int query_stream_param(struct hdac_device *codec, hda_nid_t nid) { unsigned int streams = snd_hdac_read_parm(codec, nid, AC_PAR_STREAM); if (!streams || streams == -1) streams = snd_hdac_read_parm(codec, codec->afg, AC_PAR_STREAM); if (!streams || streams == -1) return 0; return streams; } /** * snd_hdac_query_supported_pcm - query the supported PCM rates and formats * @codec: the codec object * @nid: NID to query * @ratesp: the pointer to store the detected rate bitflags * @formatsp: the pointer to store the detected formats * @bpsp: the pointer to store the detected format widths * * Queries the supported PCM rates and formats. The NULL @ratesp, @formatsp * or @bsps argument is ignored. * * Returns 0 if successful, otherwise a negative error code. */ int snd_hdac_query_supported_pcm(struct hdac_device *codec, hda_nid_t nid, u32 *ratesp, u64 *formatsp, unsigned int *bpsp) { unsigned int i, val, wcaps; wcaps = get_wcaps(codec, nid); val = query_pcm_param(codec, nid); if (ratesp) { u32 rates = 0; for (i = 0; i < AC_PAR_PCM_RATE_BITS; i++) { if (val & (1 << i)) rates |= rate_bits[i].alsa_bits; } if (rates == 0) { dev_err(&codec->dev, "rates == 0 (nid=0x%x, val=0x%x, ovrd=%i)\n", nid, val, (wcaps & AC_WCAP_FORMAT_OVRD) ? 1 : 0); return -EIO; } *ratesp = rates; } if (formatsp || bpsp) { u64 formats = 0; unsigned int streams, bps; streams = query_stream_param(codec, nid); if (!streams) return -EIO; bps = 0; if (streams & AC_SUPFMT_PCM) { if (val & AC_SUPPCM_BITS_8) { formats |= SNDRV_PCM_FMTBIT_U8; bps = 8; } if (val & AC_SUPPCM_BITS_16) { formats |= SNDRV_PCM_FMTBIT_S16_LE; bps = 16; } if (wcaps & AC_WCAP_DIGITAL) { if (val & AC_SUPPCM_BITS_32) formats |= SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE; if (val & (AC_SUPPCM_BITS_20|AC_SUPPCM_BITS_24)) formats |= SNDRV_PCM_FMTBIT_S32_LE; if (val & AC_SUPPCM_BITS_24) bps = 24; else if (val & AC_SUPPCM_BITS_20) bps = 20; } else if (val & (AC_SUPPCM_BITS_20|AC_SUPPCM_BITS_24| AC_SUPPCM_BITS_32)) { formats |= SNDRV_PCM_FMTBIT_S32_LE; if (val & AC_SUPPCM_BITS_32) bps = 32; else if (val & AC_SUPPCM_BITS_24) bps = 24; else if (val & AC_SUPPCM_BITS_20) bps = 20; } } #if 0 /* FIXME: CS4206 doesn't work, which is the only codec supporting float */ if (streams & AC_SUPFMT_FLOAT32) { formats |= SNDRV_PCM_FMTBIT_FLOAT_LE; if (!bps) bps = 32; } #endif if (streams == AC_SUPFMT_AC3) { /* should be exclusive */ /* temporary hack: we have still no proper support * for the direct AC3 stream... */ formats |= SNDRV_PCM_FMTBIT_U8; bps = 8; } if (formats == 0) { dev_err(&codec->dev, "formats == 0 (nid=0x%x, val=0x%x, ovrd=%i, streams=0x%x)\n", nid, val, (wcaps & AC_WCAP_FORMAT_OVRD) ? 1 : 0, streams); return -EIO; } if (formatsp) *formatsp = formats; if (bpsp) *bpsp = bps; } return 0; } EXPORT_SYMBOL_GPL(snd_hdac_query_supported_pcm); /** * snd_hdac_is_supported_format - Check the validity of the format * @codec: the codec object * @nid: NID to check * @format: the HD-audio format value to check * * Check whether the given node supports the format value. * * Returns true if supported, false if not. */ bool snd_hdac_is_supported_format(struct hdac_device *codec, hda_nid_t nid, unsigned int format) { int i; unsigned int val = 0, rate, stream; val = query_pcm_param(codec, nid); if (!val) return false; rate = format & 0xff00; for (i = 0; i < AC_PAR_PCM_RATE_BITS; i++) if (rate_bits[i].hda_fmt == rate) { if (val & (1 << i)) break; return false; } if (i >= AC_PAR_PCM_RATE_BITS) return false; stream = query_stream_param(codec, nid); if (!stream) return false; if (stream & AC_SUPFMT_PCM) { switch (format & 0xf0) { case 0x00: if (!(val & AC_SUPPCM_BITS_8)) return false; break; case 0x10: if (!(val & AC_SUPPCM_BITS_16)) return false; break; case 0x20: if (!(val & AC_SUPPCM_BITS_20)) return false; break; case 0x30: if (!(val & AC_SUPPCM_BITS_24)) return false; break; case 0x40: if (!(val & AC_SUPPCM_BITS_32)) return false; break; default: return false; } } else { /* FIXME: check for float32 and AC3? */ } return true; } EXPORT_SYMBOL_GPL(snd_hdac_is_supported_format); static unsigned int codec_read(struct hdac_device *hdac, hda_nid_t nid, int flags, unsigned int verb, unsigned int parm) { unsigned int cmd = snd_hdac_make_cmd(hdac, nid, verb, parm); unsigned int res; if (snd_hdac_exec_verb(hdac, cmd, flags, &res)) return -1; return res; } static int codec_write(struct hdac_device *hdac, hda_nid_t nid, int flags, unsigned int verb, unsigned int parm) { unsigned int cmd = snd_hdac_make_cmd(hdac, nid, verb, parm); return snd_hdac_exec_verb(hdac, cmd, flags, NULL); } /** * snd_hdac_codec_read - send a command and get the response * @hdac: the HDAC device * @nid: NID to send the command * @flags: optional bit flags * @verb: the verb to send * @parm: the parameter for the verb * * Send a single command and read the corresponding response. * * Returns the obtained response value, or -1 for an error. */ int snd_hdac_codec_read(struct hdac_device *hdac, hda_nid_t nid, int flags, unsigned int verb, unsigned int parm) { return codec_read(hdac, nid, flags, verb, parm); } EXPORT_SYMBOL_GPL(snd_hdac_codec_read); /** * snd_hdac_codec_write - send a single command without waiting for response * @hdac: the HDAC device * @nid: NID to send the command * @flags: optional bit flags * @verb: the verb to send * @parm: the parameter for the verb * * Send a single command without waiting for response. * * Returns 0 if successful, or a negative error code. */ int snd_hdac_codec_write(struct hdac_device *hdac, hda_nid_t nid, int flags, unsigned int verb, unsigned int parm) { return codec_write(hdac, nid, flags, verb, parm); } EXPORT_SYMBOL_GPL(snd_hdac_codec_write); /** * snd_hdac_check_power_state - check whether the actual power state matches * with the target state * * @hdac: the HDAC device * @nid: NID to send the command * @target_state: target state to check for * * Return true if state matches, false if not */ bool snd_hdac_check_power_state(struct hdac_device *hdac, hda_nid_t nid, unsigned int target_state) { unsigned int state = codec_read(hdac, nid, 0, AC_VERB_GET_POWER_STATE, 0); if (state & AC_PWRST_ERROR) return true; state = (state >> 4) & 0x0f; return (state == target_state); } EXPORT_SYMBOL_GPL(snd_hdac_check_power_state); /** * snd_hdac_sync_power_state - wait until actual power state matches * with the target state * * @codec: the HDAC device * @nid: NID to send the command * @power_state: target power state to wait for * * Return power state or PS_ERROR if codec rejects GET verb. */ unsigned int snd_hdac_sync_power_state(struct hdac_device *codec, hda_nid_t nid, unsigned int power_state) { unsigned long end_time = jiffies + msecs_to_jiffies(500); unsigned int state, actual_state, count; for (count = 0; count < 500; count++) { state = snd_hdac_codec_read(codec, nid, 0, AC_VERB_GET_POWER_STATE, 0); if (state & AC_PWRST_ERROR) { msleep(20); break; } actual_state = (state >> 4) & 0x0f; if (actual_state == power_state) break; if (time_after_eq(jiffies, end_time)) break; /* wait until the codec reachs to the target state */ msleep(1); } return state; } EXPORT_SYMBOL_GPL(snd_hdac_sync_power_state);
linux-master
sound/hda/hdac_device.c
// SPDX-License-Identifier: GPL-2.0-only /* * Regmap support for HD-audio verbs * * A virtual register is translated to one or more hda verbs for write, * vice versa for read. * * A few limitations: * - Provided for not all verbs but only subset standard non-volatile verbs. * - For reading, only AC_VERB_GET_* variants can be used. * - For writing, mapped to the *corresponding* AC_VERB_SET_* variants, * so can't handle asymmetric verbs for read and write */ #include <linux/slab.h> #include <linux/device.h> #include <linux/regmap.h> #include <linux/export.h> #include <linux/pm.h> #include <sound/core.h> #include <sound/hdaudio.h> #include <sound/hda_regmap.h> #include "local.h" static int codec_pm_lock(struct hdac_device *codec) { return snd_hdac_keep_power_up(codec); } static void codec_pm_unlock(struct hdac_device *codec, int lock) { if (lock == 1) snd_hdac_power_down_pm(codec); } #define get_verb(reg) (((reg) >> 8) & 0xfff) static bool hda_volatile_reg(struct device *dev, unsigned int reg) { struct hdac_device *codec = dev_to_hdac_dev(dev); unsigned int verb = get_verb(reg); switch (verb) { case AC_VERB_GET_PROC_COEF: return !codec->cache_coef; case AC_VERB_GET_COEF_INDEX: case AC_VERB_GET_PROC_STATE: case AC_VERB_GET_POWER_STATE: case AC_VERB_GET_PIN_SENSE: case AC_VERB_GET_HDMI_DIP_SIZE: case AC_VERB_GET_HDMI_ELDD: case AC_VERB_GET_HDMI_DIP_INDEX: case AC_VERB_GET_HDMI_DIP_DATA: case AC_VERB_GET_HDMI_DIP_XMIT: case AC_VERB_GET_HDMI_CP_CTRL: case AC_VERB_GET_HDMI_CHAN_SLOT: case AC_VERB_GET_DEVICE_SEL: case AC_VERB_GET_DEVICE_LIST: /* read-only volatile */ return true; } return false; } static bool hda_writeable_reg(struct device *dev, unsigned int reg) { struct hdac_device *codec = dev_to_hdac_dev(dev); unsigned int verb = get_verb(reg); const unsigned int *v; int i; snd_array_for_each(&codec->vendor_verbs, i, v) { if (verb == *v) return true; } if (codec->caps_overwriting) return true; switch (verb & 0xf00) { case AC_VERB_GET_STREAM_FORMAT: case AC_VERB_GET_AMP_GAIN_MUTE: return true; case AC_VERB_GET_PROC_COEF: return codec->cache_coef; case 0xf00: break; default: return false; } switch (verb) { case AC_VERB_GET_CONNECT_SEL: case AC_VERB_GET_SDI_SELECT: case AC_VERB_GET_PIN_WIDGET_CONTROL: case AC_VERB_GET_UNSOLICITED_RESPONSE: /* only as SET_UNSOLICITED_ENABLE */ case AC_VERB_GET_BEEP_CONTROL: case AC_VERB_GET_EAPD_BTLENABLE: case AC_VERB_GET_DIGI_CONVERT_1: case AC_VERB_GET_DIGI_CONVERT_2: /* only for beep control */ case AC_VERB_GET_VOLUME_KNOB_CONTROL: case AC_VERB_GET_GPIO_MASK: case AC_VERB_GET_GPIO_DIRECTION: case AC_VERB_GET_GPIO_DATA: /* not for volatile read */ case AC_VERB_GET_GPIO_WAKE_MASK: case AC_VERB_GET_GPIO_UNSOLICITED_RSP_MASK: case AC_VERB_GET_GPIO_STICKY_MASK: return true; } return false; } static bool hda_readable_reg(struct device *dev, unsigned int reg) { struct hdac_device *codec = dev_to_hdac_dev(dev); unsigned int verb = get_verb(reg); if (codec->caps_overwriting) return true; switch (verb) { case AC_VERB_PARAMETERS: case AC_VERB_GET_CONNECT_LIST: case AC_VERB_GET_SUBSYSTEM_ID: return true; /* below are basically writable, but disabled for reducing unnecessary * writes at sync */ case AC_VERB_GET_CONFIG_DEFAULT: /* usually just read */ case AC_VERB_GET_CONV: /* managed in PCM code */ case AC_VERB_GET_CVT_CHAN_COUNT: /* managed in HDMI CA code */ return true; } return hda_writeable_reg(dev, reg); } /* * Stereo amp pseudo register: * for making easier to handle the stereo volume control, we provide a * fake register to deal both left and right channels by a single * (pseudo) register access. A verb consisting of SET_AMP_GAIN with * *both* SET_LEFT and SET_RIGHT bits takes a 16bit value, the lower 8bit * for the left and the upper 8bit for the right channel. */ static bool is_stereo_amp_verb(unsigned int reg) { if (((reg >> 8) & 0x700) != AC_VERB_SET_AMP_GAIN_MUTE) return false; return (reg & (AC_AMP_SET_LEFT | AC_AMP_SET_RIGHT)) == (AC_AMP_SET_LEFT | AC_AMP_SET_RIGHT); } /* read a pseudo stereo amp register (16bit left+right) */ static int hda_reg_read_stereo_amp(struct hdac_device *codec, unsigned int reg, unsigned int *val) { unsigned int left, right; int err; reg &= ~(AC_AMP_SET_LEFT | AC_AMP_SET_RIGHT); err = snd_hdac_exec_verb(codec, reg | AC_AMP_GET_LEFT, 0, &left); if (err < 0) return err; err = snd_hdac_exec_verb(codec, reg | AC_AMP_GET_RIGHT, 0, &right); if (err < 0) return err; *val = left | (right << 8); return 0; } /* write a pseudo stereo amp register (16bit left+right) */ static int hda_reg_write_stereo_amp(struct hdac_device *codec, unsigned int reg, unsigned int val) { int err; unsigned int verb, left, right; verb = AC_VERB_SET_AMP_GAIN_MUTE << 8; if (reg & AC_AMP_GET_OUTPUT) verb |= AC_AMP_SET_OUTPUT; else verb |= AC_AMP_SET_INPUT | ((reg & 0xf) << 8); reg = (reg & ~0xfffff) | verb; left = val & 0xff; right = (val >> 8) & 0xff; if (left == right) { reg |= AC_AMP_SET_LEFT | AC_AMP_SET_RIGHT; return snd_hdac_exec_verb(codec, reg | left, 0, NULL); } err = snd_hdac_exec_verb(codec, reg | AC_AMP_SET_LEFT | left, 0, NULL); if (err < 0) return err; err = snd_hdac_exec_verb(codec, reg | AC_AMP_SET_RIGHT | right, 0, NULL); if (err < 0) return err; return 0; } /* read a pseudo coef register (16bit) */ static int hda_reg_read_coef(struct hdac_device *codec, unsigned int reg, unsigned int *val) { unsigned int verb; int err; if (!codec->cache_coef) return -EINVAL; /* LSB 8bit = coef index */ verb = (reg & ~0xfff00) | (AC_VERB_SET_COEF_INDEX << 8); err = snd_hdac_exec_verb(codec, verb, 0, NULL); if (err < 0) return err; verb = (reg & ~0xfffff) | (AC_VERB_GET_COEF_INDEX << 8); return snd_hdac_exec_verb(codec, verb, 0, val); } /* write a pseudo coef register (16bit) */ static int hda_reg_write_coef(struct hdac_device *codec, unsigned int reg, unsigned int val) { unsigned int verb; int err; if (!codec->cache_coef) return -EINVAL; /* LSB 8bit = coef index */ verb = (reg & ~0xfff00) | (AC_VERB_SET_COEF_INDEX << 8); err = snd_hdac_exec_verb(codec, verb, 0, NULL); if (err < 0) return err; verb = (reg & ~0xfffff) | (AC_VERB_GET_COEF_INDEX << 8) | (val & 0xffff); return snd_hdac_exec_verb(codec, verb, 0, NULL); } static int hda_reg_read(void *context, unsigned int reg, unsigned int *val) { struct hdac_device *codec = context; int verb = get_verb(reg); int err; int pm_lock = 0; if (verb != AC_VERB_GET_POWER_STATE) { pm_lock = codec_pm_lock(codec); if (pm_lock < 0) return -EAGAIN; } reg |= (codec->addr << 28); if (is_stereo_amp_verb(reg)) { err = hda_reg_read_stereo_amp(codec, reg, val); goto out; } if (verb == AC_VERB_GET_PROC_COEF) { err = hda_reg_read_coef(codec, reg, val); goto out; } if ((verb & 0x700) == AC_VERB_SET_AMP_GAIN_MUTE) reg &= ~AC_AMP_FAKE_MUTE; err = snd_hdac_exec_verb(codec, reg, 0, val); if (err < 0) goto out; /* special handling for asymmetric reads */ if (verb == AC_VERB_GET_POWER_STATE) { if (*val & AC_PWRST_ERROR) *val = -1; else /* take only the actual state */ *val = (*val >> 4) & 0x0f; } out: codec_pm_unlock(codec, pm_lock); return err; } static int hda_reg_write(void *context, unsigned int reg, unsigned int val) { struct hdac_device *codec = context; unsigned int verb; int i, bytes, err; int pm_lock = 0; if (codec->caps_overwriting) return 0; reg &= ~0x00080000U; /* drop GET bit */ reg |= (codec->addr << 28); verb = get_verb(reg); if (verb != AC_VERB_SET_POWER_STATE) { pm_lock = codec_pm_lock(codec); if (pm_lock < 0) return codec->lazy_cache ? 0 : -EAGAIN; } if (is_stereo_amp_verb(reg)) { err = hda_reg_write_stereo_amp(codec, reg, val); goto out; } if (verb == AC_VERB_SET_PROC_COEF) { err = hda_reg_write_coef(codec, reg, val); goto out; } switch (verb & 0xf00) { case AC_VERB_SET_AMP_GAIN_MUTE: if ((reg & AC_AMP_FAKE_MUTE) && (val & AC_AMP_MUTE)) val = 0; verb = AC_VERB_SET_AMP_GAIN_MUTE; if (reg & AC_AMP_GET_LEFT) verb |= AC_AMP_SET_LEFT >> 8; else verb |= AC_AMP_SET_RIGHT >> 8; if (reg & AC_AMP_GET_OUTPUT) { verb |= AC_AMP_SET_OUTPUT >> 8; } else { verb |= AC_AMP_SET_INPUT >> 8; verb |= reg & 0xf; } break; } switch (verb) { case AC_VERB_SET_DIGI_CONVERT_1: bytes = 2; break; case AC_VERB_SET_CONFIG_DEFAULT_BYTES_0: bytes = 4; break; default: bytes = 1; break; } for (i = 0; i < bytes; i++) { reg &= ~0xfffff; reg |= (verb + i) << 8 | ((val >> (8 * i)) & 0xff); err = snd_hdac_exec_verb(codec, reg, 0, NULL); if (err < 0) goto out; } out: codec_pm_unlock(codec, pm_lock); return err; } static const struct regmap_config hda_regmap_cfg = { .name = "hdaudio", .reg_bits = 32, .val_bits = 32, .max_register = 0xfffffff, .writeable_reg = hda_writeable_reg, .readable_reg = hda_readable_reg, .volatile_reg = hda_volatile_reg, .cache_type = REGCACHE_MAPLE, .reg_read = hda_reg_read, .reg_write = hda_reg_write, .use_single_read = true, .use_single_write = true, .disable_locking = true, }; /** * snd_hdac_regmap_init - Initialize regmap for HDA register accesses * @codec: the codec object * * Returns zero for success or a negative error code. */ int snd_hdac_regmap_init(struct hdac_device *codec) { struct regmap *regmap; regmap = regmap_init(&codec->dev, NULL, codec, &hda_regmap_cfg); if (IS_ERR(regmap)) return PTR_ERR(regmap); codec->regmap = regmap; snd_array_init(&codec->vendor_verbs, sizeof(unsigned int), 8); return 0; } EXPORT_SYMBOL_GPL(snd_hdac_regmap_init); /** * snd_hdac_regmap_exit - Release the regmap from HDA codec * @codec: the codec object */ void snd_hdac_regmap_exit(struct hdac_device *codec) { if (codec->regmap) { regmap_exit(codec->regmap); codec->regmap = NULL; snd_array_free(&codec->vendor_verbs); } } EXPORT_SYMBOL_GPL(snd_hdac_regmap_exit); /** * snd_hdac_regmap_add_vendor_verb - add a vendor-specific verb to regmap * @codec: the codec object * @verb: verb to allow accessing via regmap * * Returns zero for success or a negative error code. */ int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec, unsigned int verb) { unsigned int *p = snd_array_new(&codec->vendor_verbs); if (!p) return -ENOMEM; *p = verb | 0x800; /* set GET bit */ return 0; } EXPORT_SYMBOL_GPL(snd_hdac_regmap_add_vendor_verb); /* * helper functions */ /* write a pseudo-register value (w/o power sequence) */ static int reg_raw_write(struct hdac_device *codec, unsigned int reg, unsigned int val) { int err; mutex_lock(&codec->regmap_lock); if (!codec->regmap) err = hda_reg_write(codec, reg, val); else err = regmap_write(codec->regmap, reg, val); mutex_unlock(&codec->regmap_lock); return err; } /* a helper macro to call @func_call; retry with power-up if failed */ #define CALL_RAW_FUNC(codec, func_call) \ ({ \ int _err = func_call; \ if (_err == -EAGAIN) { \ _err = snd_hdac_power_up_pm(codec); \ if (_err >= 0) \ _err = func_call; \ snd_hdac_power_down_pm(codec); \ } \ _err;}) /** * snd_hdac_regmap_write_raw - write a pseudo register with power mgmt * @codec: the codec object * @reg: pseudo register * @val: value to write * * Returns zero if successful or a negative error code. */ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg, unsigned int val) { return CALL_RAW_FUNC(codec, reg_raw_write(codec, reg, val)); } EXPORT_SYMBOL_GPL(snd_hdac_regmap_write_raw); static int reg_raw_read(struct hdac_device *codec, unsigned int reg, unsigned int *val, bool uncached) { int err; mutex_lock(&codec->regmap_lock); if (uncached || !codec->regmap) err = hda_reg_read(codec, reg, val); else err = regmap_read(codec->regmap, reg, val); mutex_unlock(&codec->regmap_lock); return err; } static int __snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg, unsigned int *val, bool uncached) { return CALL_RAW_FUNC(codec, reg_raw_read(codec, reg, val, uncached)); } /** * snd_hdac_regmap_read_raw - read a pseudo register with power mgmt * @codec: the codec object * @reg: pseudo register * @val: pointer to store the read value * * Returns zero if successful or a negative error code. */ int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg, unsigned int *val) { return __snd_hdac_regmap_read_raw(codec, reg, val, false); } EXPORT_SYMBOL_GPL(snd_hdac_regmap_read_raw); /* Works like snd_hdac_regmap_read_raw(), but this doesn't read from the * cache but always via hda verbs. */ int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec, unsigned int reg, unsigned int *val) { return __snd_hdac_regmap_read_raw(codec, reg, val, true); } static int reg_raw_update(struct hdac_device *codec, unsigned int reg, unsigned int mask, unsigned int val) { unsigned int orig; bool change; int err; mutex_lock(&codec->regmap_lock); if (codec->regmap) { err = regmap_update_bits_check(codec->regmap, reg, mask, val, &change); if (!err) err = change ? 1 : 0; } else { err = hda_reg_read(codec, reg, &orig); if (!err) { val &= mask; val |= orig & ~mask; if (val != orig) { err = hda_reg_write(codec, reg, val); if (!err) err = 1; } } } mutex_unlock(&codec->regmap_lock); return err; } /** * snd_hdac_regmap_update_raw - update a pseudo register with power mgmt * @codec: the codec object * @reg: pseudo register * @mask: bit mask to update * @val: value to update * * Returns zero if successful or a negative error code. */ int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg, unsigned int mask, unsigned int val) { return CALL_RAW_FUNC(codec, reg_raw_update(codec, reg, mask, val)); } EXPORT_SYMBOL_GPL(snd_hdac_regmap_update_raw); static int reg_raw_update_once(struct hdac_device *codec, unsigned int reg, unsigned int mask, unsigned int val) { int err = 0; if (!codec->regmap) return reg_raw_update(codec, reg, mask, val); mutex_lock(&codec->regmap_lock); /* Discard any updates to already initialised registers. */ if (!regcache_reg_cached(codec->regmap, reg)) err = regmap_update_bits(codec->regmap, reg, mask, val); mutex_unlock(&codec->regmap_lock); return err; } /** * snd_hdac_regmap_update_raw_once - initialize the register value only once * @codec: the codec object * @reg: pseudo register * @mask: bit mask to update * @val: value to update * * Performs the update of the register bits only once when the register * hasn't been initialized yet. Used in HD-audio legacy driver. * Returns zero if successful or a negative error code */ int snd_hdac_regmap_update_raw_once(struct hdac_device *codec, unsigned int reg, unsigned int mask, unsigned int val) { return CALL_RAW_FUNC(codec, reg_raw_update_once(codec, reg, mask, val)); } EXPORT_SYMBOL_GPL(snd_hdac_regmap_update_raw_once); /** * snd_hdac_regmap_sync - sync out the cached values for PM resume * @codec: the codec object */ void snd_hdac_regmap_sync(struct hdac_device *codec) { mutex_lock(&codec->regmap_lock); if (codec->regmap) regcache_sync(codec->regmap); mutex_unlock(&codec->regmap_lock); } EXPORT_SYMBOL_GPL(snd_hdac_regmap_sync);
linux-master
sound/hda/hdac_regmap.c
// SPDX-License-Identifier: GPL-2.0-only /* * generic arrays */ #include <linux/slab.h> #include <sound/core.h> #include <sound/hdaudio.h> /** * snd_array_new - get a new element from the given array * @array: the array object * * Get a new element from the given array. If it exceeds the * pre-allocated array size, re-allocate the array. * * Returns NULL if allocation failed. */ void *snd_array_new(struct snd_array *array) { if (snd_BUG_ON(!array->elem_size)) return NULL; if (array->used >= array->alloced) { int num = array->alloced + array->alloc_align; int oldsize = array->alloced * array->elem_size; int size = (num + 1) * array->elem_size; void *nlist; if (snd_BUG_ON(num >= 4096)) return NULL; nlist = krealloc(array->list, size, GFP_KERNEL); if (!nlist) return NULL; memset(nlist + oldsize, 0, size - oldsize); array->list = nlist; array->alloced = num; } return snd_array_elem(array, array->used++); } EXPORT_SYMBOL_GPL(snd_array_new); /** * snd_array_free - free the given array elements * @array: the array object */ void snd_array_free(struct snd_array *array) { kfree(array->list); array->used = 0; array->alloced = 0; array->list = NULL; } EXPORT_SYMBOL_GPL(snd_array_free);
linux-master
sound/hda/array.c
// SPDX-License-Identifier: GPL-2.0 /* * sysfs support for HD-audio core device */ #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/device.h> #include <sound/core.h> #include <sound/hdaudio.h> #include "local.h" struct hdac_widget_tree { struct kobject *root; struct kobject *afg; struct kobject **nodes; }; #define CODEC_ATTR(type) \ static ssize_t type##_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct hdac_device *codec = dev_to_hdac_dev(dev); \ return sysfs_emit(buf, "0x%x\n", codec->type); \ } \ static DEVICE_ATTR_RO(type) #define CODEC_ATTR_STR(type) \ static ssize_t type##_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct hdac_device *codec = dev_to_hdac_dev(dev); \ return sysfs_emit(buf, "%s\n", \ codec->type ? codec->type : ""); \ } \ static DEVICE_ATTR_RO(type) CODEC_ATTR(type); CODEC_ATTR(vendor_id); CODEC_ATTR(subsystem_id); CODEC_ATTR(revision_id); CODEC_ATTR(afg); CODEC_ATTR(mfg); CODEC_ATTR_STR(vendor_name); CODEC_ATTR_STR(chip_name); static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { return snd_hdac_codec_modalias(dev_to_hdac_dev(dev), buf, 256); } static DEVICE_ATTR_RO(modalias); static struct attribute *hdac_dev_attrs[] = { &dev_attr_type.attr, &dev_attr_vendor_id.attr, &dev_attr_subsystem_id.attr, &dev_attr_revision_id.attr, &dev_attr_afg.attr, &dev_attr_mfg.attr, &dev_attr_vendor_name.attr, &dev_attr_chip_name.attr, &dev_attr_modalias.attr, NULL }; static const struct attribute_group hdac_dev_attr_group = { .attrs = hdac_dev_attrs, }; const struct attribute_group *hdac_dev_attr_groups[] = { &hdac_dev_attr_group, NULL }; /* * Widget tree sysfs * * This is a tree showing the attributes of each widget. It appears like * /sys/bus/hdaudioC0D0/widgets/04/caps */ struct widget_attribute; struct widget_attribute { struct attribute attr; ssize_t (*show)(struct hdac_device *codec, hda_nid_t nid, struct widget_attribute *attr, char *buf); ssize_t (*store)(struct hdac_device *codec, hda_nid_t nid, struct widget_attribute *attr, const char *buf, size_t count); }; static int get_codec_nid(struct kobject *kobj, struct hdac_device **codecp) { struct device *dev = kobj_to_dev(kobj->parent->parent); int nid; ssize_t ret; ret = kstrtoint(kobj->name, 16, &nid); if (ret < 0) return ret; *codecp = dev_to_hdac_dev(dev); return nid; } static ssize_t widget_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct widget_attribute *wid_attr = container_of(attr, struct widget_attribute, attr); struct hdac_device *codec; int nid; if (!wid_attr->show) return -EIO; nid = get_codec_nid(kobj, &codec); if (nid < 0) return nid; return wid_attr->show(codec, nid, wid_attr, buf); } static ssize_t widget_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct widget_attribute *wid_attr = container_of(attr, struct widget_attribute, attr); struct hdac_device *codec; int nid; if (!wid_attr->store) return -EIO; nid = get_codec_nid(kobj, &codec); if (nid < 0) return nid; return wid_attr->store(codec, nid, wid_attr, buf, count); } static const struct sysfs_ops widget_sysfs_ops = { .show = widget_attr_show, .store = widget_attr_store, }; static void widget_release(struct kobject *kobj) { kfree(kobj); } static const struct kobj_type widget_ktype = { .release = widget_release, .sysfs_ops = &widget_sysfs_ops, }; #define WIDGET_ATTR_RO(_name) \ struct widget_attribute wid_attr_##_name = __ATTR_RO(_name) #define WIDGET_ATTR_RW(_name) \ struct widget_attribute wid_attr_##_name = __ATTR_RW(_name) static ssize_t caps_show(struct hdac_device *codec, hda_nid_t nid, struct widget_attribute *attr, char *buf) { return sysfs_emit(buf, "0x%08x\n", get_wcaps(codec, nid)); } static ssize_t pin_caps_show(struct hdac_device *codec, hda_nid_t nid, struct widget_attribute *attr, char *buf) { if (get_wcaps_type(get_wcaps(codec, nid)) != AC_WID_PIN) return 0; return sysfs_emit(buf, "0x%08x\n", snd_hdac_read_parm(codec, nid, AC_PAR_PIN_CAP)); } static ssize_t pin_cfg_show(struct hdac_device *codec, hda_nid_t nid, struct widget_attribute *attr, char *buf) { unsigned int val; if (get_wcaps_type(get_wcaps(codec, nid)) != AC_WID_PIN) return 0; if (snd_hdac_read(codec, nid, AC_VERB_GET_CONFIG_DEFAULT, 0, &val)) return 0; return sysfs_emit(buf, "0x%08x\n", val); } static bool has_pcm_cap(struct hdac_device *codec, hda_nid_t nid) { if (nid == codec->afg || nid == codec->mfg) return true; switch (get_wcaps_type(get_wcaps(codec, nid))) { case AC_WID_AUD_OUT: case AC_WID_AUD_IN: return true; default: return false; } } static ssize_t pcm_caps_show(struct hdac_device *codec, hda_nid_t nid, struct widget_attribute *attr, char *buf) { if (!has_pcm_cap(codec, nid)) return 0; return sysfs_emit(buf, "0x%08x\n", snd_hdac_read_parm(codec, nid, AC_PAR_PCM)); } static ssize_t pcm_formats_show(struct hdac_device *codec, hda_nid_t nid, struct widget_attribute *attr, char *buf) { if (!has_pcm_cap(codec, nid)) return 0; return sysfs_emit(buf, "0x%08x\n", snd_hdac_read_parm(codec, nid, AC_PAR_STREAM)); } static ssize_t amp_in_caps_show(struct hdac_device *codec, hda_nid_t nid, struct widget_attribute *attr, char *buf) { if (nid != codec->afg && !(get_wcaps(codec, nid) & AC_WCAP_IN_AMP)) return 0; return sysfs_emit(buf, "0x%08x\n", snd_hdac_read_parm(codec, nid, AC_PAR_AMP_IN_CAP)); } static ssize_t amp_out_caps_show(struct hdac_device *codec, hda_nid_t nid, struct widget_attribute *attr, char *buf) { if (nid != codec->afg && !(get_wcaps(codec, nid) & AC_WCAP_OUT_AMP)) return 0; return sysfs_emit(buf, "0x%08x\n", snd_hdac_read_parm(codec, nid, AC_PAR_AMP_OUT_CAP)); } static ssize_t power_caps_show(struct hdac_device *codec, hda_nid_t nid, struct widget_attribute *attr, char *buf) { if (nid != codec->afg && !(get_wcaps(codec, nid) & AC_WCAP_POWER)) return 0; return sysfs_emit(buf, "0x%08x\n", snd_hdac_read_parm(codec, nid, AC_PAR_POWER_STATE)); } static ssize_t gpio_caps_show(struct hdac_device *codec, hda_nid_t nid, struct widget_attribute *attr, char *buf) { return sysfs_emit(buf, "0x%08x\n", snd_hdac_read_parm(codec, nid, AC_PAR_GPIO_CAP)); } static ssize_t connections_show(struct hdac_device *codec, hda_nid_t nid, struct widget_attribute *attr, char *buf) { hda_nid_t list[32]; int i, nconns; ssize_t ret = 0; nconns = snd_hdac_get_connections(codec, nid, list, ARRAY_SIZE(list)); if (nconns <= 0) return nconns; for (i = 0; i < nconns; i++) ret += sysfs_emit_at(buf, ret, "%s0x%02x", i ? " " : "", list[i]); ret += sysfs_emit_at(buf, ret, "\n"); return ret; } static WIDGET_ATTR_RO(caps); static WIDGET_ATTR_RO(pin_caps); static WIDGET_ATTR_RO(pin_cfg); static WIDGET_ATTR_RO(pcm_caps); static WIDGET_ATTR_RO(pcm_formats); static WIDGET_ATTR_RO(amp_in_caps); static WIDGET_ATTR_RO(amp_out_caps); static WIDGET_ATTR_RO(power_caps); static WIDGET_ATTR_RO(gpio_caps); static WIDGET_ATTR_RO(connections); static struct attribute *widget_node_attrs[] = { &wid_attr_caps.attr, &wid_attr_pin_caps.attr, &wid_attr_pin_cfg.attr, &wid_attr_pcm_caps.attr, &wid_attr_pcm_formats.attr, &wid_attr_amp_in_caps.attr, &wid_attr_amp_out_caps.attr, &wid_attr_power_caps.attr, &wid_attr_connections.attr, NULL, }; static struct attribute *widget_afg_attrs[] = { &wid_attr_pcm_caps.attr, &wid_attr_pcm_formats.attr, &wid_attr_amp_in_caps.attr, &wid_attr_amp_out_caps.attr, &wid_attr_power_caps.attr, &wid_attr_gpio_caps.attr, NULL, }; static const struct attribute_group widget_node_group = { .attrs = widget_node_attrs, }; static const struct attribute_group widget_afg_group = { .attrs = widget_afg_attrs, }; static void free_widget_node(struct kobject *kobj, const struct attribute_group *group) { if (kobj) { sysfs_remove_group(kobj, group); kobject_put(kobj); } } static void widget_tree_free(struct hdac_device *codec) { struct hdac_widget_tree *tree = codec->widgets; struct kobject **p; if (!tree) return; free_widget_node(tree->afg, &widget_afg_group); if (tree->nodes) { for (p = tree->nodes; *p; p++) free_widget_node(*p, &widget_node_group); kfree(tree->nodes); } kobject_put(tree->root); kfree(tree); codec->widgets = NULL; } static int add_widget_node(struct kobject *parent, hda_nid_t nid, const struct attribute_group *group, struct kobject **res) { struct kobject *kobj = kzalloc(sizeof(*kobj), GFP_KERNEL); int err; if (!kobj) return -ENOMEM; kobject_init(kobj, &widget_ktype); err = kobject_add(kobj, parent, "%02x", nid); if (err < 0) { kobject_put(kobj); return err; } err = sysfs_create_group(kobj, group); if (err < 0) { kobject_put(kobj); return err; } *res = kobj; return 0; } static int widget_tree_create(struct hdac_device *codec) { struct hdac_widget_tree *tree; int i, err; hda_nid_t nid; tree = codec->widgets = kzalloc(sizeof(*tree), GFP_KERNEL); if (!tree) return -ENOMEM; tree->root = kobject_create_and_add("widgets", &codec->dev.kobj); if (!tree->root) return -ENOMEM; tree->nodes = kcalloc(codec->num_nodes + 1, sizeof(*tree->nodes), GFP_KERNEL); if (!tree->nodes) return -ENOMEM; for (i = 0, nid = codec->start_nid; i < codec->num_nodes; i++, nid++) { err = add_widget_node(tree->root, nid, &widget_node_group, &tree->nodes[i]); if (err < 0) return err; } if (codec->afg) { err = add_widget_node(tree->root, codec->afg, &widget_afg_group, &tree->afg); if (err < 0) return err; } kobject_uevent(tree->root, KOBJ_CHANGE); return 0; } /* call with codec->widget_lock held */ int hda_widget_sysfs_init(struct hdac_device *codec) { int err; if (codec->widgets) return 0; /* already created */ err = widget_tree_create(codec); if (err < 0) { widget_tree_free(codec); return err; } return 0; } /* call with codec->widget_lock held */ void hda_widget_sysfs_exit(struct hdac_device *codec) { widget_tree_free(codec); } /* call with codec->widget_lock held */ int hda_widget_sysfs_reinit(struct hdac_device *codec, hda_nid_t start_nid, int num_nodes) { struct hdac_widget_tree *tree; hda_nid_t end_nid = start_nid + num_nodes; hda_nid_t nid; int i; if (!codec->widgets) return 0; tree = kmemdup(codec->widgets, sizeof(*tree), GFP_KERNEL); if (!tree) return -ENOMEM; tree->nodes = kcalloc(num_nodes + 1, sizeof(*tree->nodes), GFP_KERNEL); if (!tree->nodes) { kfree(tree); return -ENOMEM; } /* prune non-existing nodes */ for (i = 0, nid = codec->start_nid; i < codec->num_nodes; i++, nid++) { if (nid < start_nid || nid >= end_nid) free_widget_node(codec->widgets->nodes[i], &widget_node_group); } /* add new nodes */ for (i = 0, nid = start_nid; i < num_nodes; i++, nid++) { if (nid < codec->start_nid || nid >= codec->end_nid) add_widget_node(tree->root, nid, &widget_node_group, &tree->nodes[i]); else tree->nodes[i] = codec->widgets->nodes[nid - codec->start_nid]; } /* replace with the new tree */ kfree(codec->widgets->nodes); kfree(codec->widgets); codec->widgets = tree; kobject_uevent(tree->root, KOBJ_CHANGE); return 0; }
linux-master
sound/hda/hdac_sysfs.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Jaroslav Kysela <[email protected]> #include <linux/acpi.h> #include <linux/bits.h> #include <linux/dmi.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/soundwire/sdw.h> #include <linux/soundwire/sdw_intel.h> #include <sound/core.h> #include <sound/intel-dsp-config.h> #include <sound/intel-nhlt.h> #include <sound/soc-acpi.h> static int dsp_driver; module_param(dsp_driver, int, 0444); MODULE_PARM_DESC(dsp_driver, "Force the DSP driver for Intel DSP (0=auto, 1=legacy, 2=SST, 3=SOF)"); #define FLAG_SST BIT(0) #define FLAG_SOF BIT(1) #define FLAG_SST_ONLY_IF_DMIC BIT(15) #define FLAG_SOF_ONLY_IF_DMIC BIT(16) #define FLAG_SOF_ONLY_IF_SOUNDWIRE BIT(17) #define FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE (FLAG_SOF_ONLY_IF_DMIC | \ FLAG_SOF_ONLY_IF_SOUNDWIRE) struct config_entry { u32 flags; u16 device; u8 acpi_hid[ACPI_ID_LEN]; const struct dmi_system_id *dmi_table; const struct snd_soc_acpi_codecs *codec_hid; }; static const struct snd_soc_acpi_codecs __maybe_unused essx_83x6 = { .num_codecs = 3, .codecs = { "ESSX8316", "ESSX8326", "ESSX8336"}, }; /* * configuration table * - the order of similar PCI ID entries is important! * - the first successful match will win */ static const struct config_entry config_table[] = { /* Merrifield */ #if IS_ENABLED(CONFIG_SND_SOC_SOF_MERRIFIELD) { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_SST_TNG, }, #endif /* * Apollolake (Broxton-P) * the legacy HDAudio driver is used except on Up Squared (SOF) and * Chromebooks (SST), as well as devices based on the ES8336 codec */ #if IS_ENABLED(CONFIG_SND_SOC_SOF_APOLLOLAKE) { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_APL, .dmi_table = (const struct dmi_system_id []) { { .ident = "Up Squared", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "AAEON"), DMI_MATCH(DMI_BOARD_NAME, "UP-APL01"), } }, {} } }, { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_APL, .codec_hid = &essx_83x6, }, #endif #if IS_ENABLED(CONFIG_SND_SOC_INTEL_APL) { .flags = FLAG_SST, .device = PCI_DEVICE_ID_INTEL_HDA_APL, .dmi_table = (const struct dmi_system_id []) { { .ident = "Google Chromebooks", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Google"), } }, {} } }, #endif /* * Skylake and Kabylake use legacy HDAudio driver except for Google * Chromebooks (SST) */ /* Sunrise Point-LP */ #if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKL) { .flags = FLAG_SST, .device = PCI_DEVICE_ID_INTEL_HDA_SKL_LP, .dmi_table = (const struct dmi_system_id []) { { .ident = "Google Chromebooks", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Google"), } }, {} } }, { .flags = FLAG_SST | FLAG_SST_ONLY_IF_DMIC, .device = PCI_DEVICE_ID_INTEL_HDA_SKL_LP, }, #endif /* Kabylake-LP */ #if IS_ENABLED(CONFIG_SND_SOC_INTEL_KBL) { .flags = FLAG_SST, .device = PCI_DEVICE_ID_INTEL_HDA_KBL_LP, .dmi_table = (const struct dmi_system_id []) { { .ident = "Google Chromebooks", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Google"), } }, {} } }, { .flags = FLAG_SST | FLAG_SST_ONLY_IF_DMIC, .device = PCI_DEVICE_ID_INTEL_HDA_KBL_LP, }, #endif /* * Geminilake uses legacy HDAudio driver except for Google * Chromebooks and devices based on the ES8336 codec */ /* Geminilake */ #if IS_ENABLED(CONFIG_SND_SOC_SOF_GEMINILAKE) { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_GML, .dmi_table = (const struct dmi_system_id []) { { .ident = "Google Chromebooks", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Google"), } }, {} } }, { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_GML, .codec_hid = &essx_83x6, }, #endif /* * CoffeeLake, CannonLake, CometLake, IceLake, TigerLake, AlderLake, * RaptorLake use legacy HDAudio driver except for Google Chromebooks * and when DMICs are present. Two cases are required since Coreboot * does not expose NHLT tables. * * When the Chromebook quirk is not present, it's based on information * that no such device exists. When the quirk is present, it could be * either based on product information or a placeholder. */ /* Cannonlake */ #if IS_ENABLED(CONFIG_SND_SOC_SOF_CANNONLAKE) { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_CNL_LP, .dmi_table = (const struct dmi_system_id []) { { .ident = "Google Chromebooks", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Google"), } }, { .ident = "UP-WHL", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "AAEON"), } }, {} } }, { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_CNL_LP, .codec_hid = &essx_83x6, }, { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_CNL_LP, }, #endif /* Coffelake */ #if IS_ENABLED(CONFIG_SND_SOC_SOF_COFFEELAKE) { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_CNL_H, .dmi_table = (const struct dmi_system_id []) { { .ident = "Google Chromebooks", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Google"), } }, {} } }, { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_CNL_H, }, #endif #if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE) /* Cometlake-LP */ { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_CML_LP, .dmi_table = (const struct dmi_system_id []) { { .ident = "Google Chromebooks", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Google"), } }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "09C6") }, }, { /* early version of SKU 09C6 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0983") }, }, {} } }, { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_CML_LP, .codec_hid = &essx_83x6, }, { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_CML_LP, }, /* Cometlake-H */ { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_CML_H, .dmi_table = (const struct dmi_system_id []) { { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "098F"), }, }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0990"), }, }, {} } }, { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_CML_H, .codec_hid = &essx_83x6, }, { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_CML_H, }, #endif /* Icelake */ #if IS_ENABLED(CONFIG_SND_SOC_SOF_ICELAKE) { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_ICL_LP, .dmi_table = (const struct dmi_system_id []) { { .ident = "Google Chromebooks", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Google"), } }, {} } }, { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_ICL_LP, .codec_hid = &essx_83x6, }, { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_ICL_LP, }, #endif /* Jasper Lake */ #if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE) { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_JSL_N, .dmi_table = (const struct dmi_system_id []) { { .ident = "Google Chromebooks", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Google"), } }, {} } }, { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_JSL_N, .codec_hid = &essx_83x6, }, { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC, .device = PCI_DEVICE_ID_INTEL_HDA_JSL_N, }, #endif /* Tigerlake */ #if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE) { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_TGL_LP, .dmi_table = (const struct dmi_system_id []) { { .ident = "Google Chromebooks", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Google"), } }, { .ident = "UPX-TGL", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "AAEON"), } }, {} } }, { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_TGL_LP, .codec_hid = &essx_83x6, }, { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_TGL_LP, }, { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_TGL_H, }, #endif /* Elkhart Lake */ #if IS_ENABLED(CONFIG_SND_SOC_SOF_ELKHARTLAKE) { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC, .device = PCI_DEVICE_ID_INTEL_HDA_EHL_0, }, { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC, .device = PCI_DEVICE_ID_INTEL_HDA_EHL_3, }, #endif /* Alder Lake / Raptor Lake */ #if IS_ENABLED(CONFIG_SND_SOC_SOF_ALDERLAKE) { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_ADL_S, }, { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_RPL_S, }, { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_ADL_P, .dmi_table = (const struct dmi_system_id []) { { .ident = "Google Chromebooks", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Google"), } }, {} } }, { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_ADL_P, .codec_hid = &essx_83x6, }, { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_ADL_P, }, { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_ADL_PX, }, { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_ADL_PS, .codec_hid = &essx_83x6, }, { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_ADL_PS, }, { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_ADL_M, }, { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_ADL_N, .dmi_table = (const struct dmi_system_id []) { { .ident = "Google Chromebooks", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Google"), } }, {} } }, { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_ADL_N, }, { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_RPL_P_0, .dmi_table = (const struct dmi_system_id []) { { .ident = "Google Chromebooks", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Google"), } }, {} } }, { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_RPL_P_0, }, { .flags = FLAG_SOF, .device = PCI_DEVICE_ID_INTEL_HDA_RPL_P_1, .dmi_table = (const struct dmi_system_id []) { { .ident = "Google Chromebooks", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Google"), } }, {} } }, { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_RPL_P_1, }, { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_RPL_M, }, { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_RPL_PX, }, #endif /* Meteor Lake */ #if IS_ENABLED(CONFIG_SND_SOC_SOF_METEORLAKE) /* Meteorlake-P */ { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_MTL, }, #endif /* Lunar Lake */ #if IS_ENABLED(CONFIG_SND_SOC_SOF_LUNARLAKE) /* Lunarlake-P */ { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = PCI_DEVICE_ID_INTEL_HDA_LNL_P, }, #endif }; static const struct config_entry *snd_intel_dsp_find_config (struct pci_dev *pci, const struct config_entry *table, u32 len) { u16 device; device = pci->device; for (; len > 0; len--, table++) { if (table->device != device) continue; if (table->dmi_table && !dmi_check_system(table->dmi_table)) continue; if (table->codec_hid) { int i; for (i = 0; i < table->codec_hid->num_codecs; i++) if (acpi_dev_present(table->codec_hid->codecs[i], NULL, -1)) break; if (i == table->codec_hid->num_codecs) continue; } return table; } return NULL; } static int snd_intel_dsp_check_dmic(struct pci_dev *pci) { struct nhlt_acpi_table *nhlt; int ret = 0; nhlt = intel_nhlt_init(&pci->dev); if (nhlt) { if (intel_nhlt_has_endpoint_type(nhlt, NHLT_LINK_DMIC)) ret = 1; intel_nhlt_free(nhlt); } return ret; } #if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE) static int snd_intel_dsp_check_soundwire(struct pci_dev *pci) { struct sdw_intel_acpi_info info; acpi_handle handle; int ret; handle = ACPI_HANDLE(&pci->dev); ret = sdw_intel_acpi_scan(handle, &info); if (ret < 0) return ret; return info.link_mask; } #else static int snd_intel_dsp_check_soundwire(struct pci_dev *pci) { return 0; } #endif int snd_intel_dsp_driver_probe(struct pci_dev *pci) { const struct config_entry *cfg; /* Intel vendor only */ if (pci->vendor != PCI_VENDOR_ID_INTEL) return SND_INTEL_DSP_DRIVER_ANY; /* * Legacy devices don't have a PCI-based DSP and use HDaudio * for HDMI/DP support, ignore kernel parameter */ switch (pci->device) { case PCI_DEVICE_ID_INTEL_HDA_BDW: case PCI_DEVICE_ID_INTEL_HDA_HSW_0: case PCI_DEVICE_ID_INTEL_HDA_HSW_2: case PCI_DEVICE_ID_INTEL_HDA_HSW_3: case PCI_DEVICE_ID_INTEL_HDA_BYT: case PCI_DEVICE_ID_INTEL_HDA_BSW: return SND_INTEL_DSP_DRIVER_ANY; } if (dsp_driver > 0 && dsp_driver <= SND_INTEL_DSP_DRIVER_LAST) return dsp_driver; /* * detect DSP by checking class/subclass/prog-id information * class=04 subclass 03 prog-if 00: no DSP, use legacy driver * class=04 subclass 01 prog-if 00: DSP is present * (and may be required e.g. for DMIC or SSP support) * class=04 subclass 03 prog-if 80: use DSP or legacy mode */ if (pci->class == 0x040300) return SND_INTEL_DSP_DRIVER_LEGACY; if (pci->class != 0x040100 && pci->class != 0x040380) { dev_err(&pci->dev, "Unknown PCI class/subclass/prog-if information (0x%06x) found, selecting HDAudio legacy driver\n", pci->class); return SND_INTEL_DSP_DRIVER_LEGACY; } dev_info(&pci->dev, "DSP detected with PCI class/subclass/prog-if info 0x%06x\n", pci->class); /* find the configuration for the specific device */ cfg = snd_intel_dsp_find_config(pci, config_table, ARRAY_SIZE(config_table)); if (!cfg) return SND_INTEL_DSP_DRIVER_ANY; if (cfg->flags & FLAG_SOF) { if (cfg->flags & FLAG_SOF_ONLY_IF_SOUNDWIRE && snd_intel_dsp_check_soundwire(pci) > 0) { dev_info(&pci->dev, "SoundWire enabled on CannonLake+ platform, using SOF driver\n"); return SND_INTEL_DSP_DRIVER_SOF; } if (cfg->flags & FLAG_SOF_ONLY_IF_DMIC && snd_intel_dsp_check_dmic(pci)) { dev_info(&pci->dev, "Digital mics found on Skylake+ platform, using SOF driver\n"); return SND_INTEL_DSP_DRIVER_SOF; } if (!(cfg->flags & FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE)) return SND_INTEL_DSP_DRIVER_SOF; } if (cfg->flags & FLAG_SST) { if (cfg->flags & FLAG_SST_ONLY_IF_DMIC) { if (snd_intel_dsp_check_dmic(pci)) { dev_info(&pci->dev, "Digital mics found on Skylake+ platform, using SST driver\n"); return SND_INTEL_DSP_DRIVER_SST; } } else { return SND_INTEL_DSP_DRIVER_SST; } } return SND_INTEL_DSP_DRIVER_LEGACY; } EXPORT_SYMBOL_GPL(snd_intel_dsp_driver_probe); /* Should we default to SOF or SST for BYT/CHT ? */ #if IS_ENABLED(CONFIG_SND_INTEL_BYT_PREFER_SOF) || \ !IS_ENABLED(CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_ACPI) #define FLAG_SST_OR_SOF_BYT FLAG_SOF #else #define FLAG_SST_OR_SOF_BYT FLAG_SST #endif /* * configuration table * - the order of similar ACPI ID entries is important! * - the first successful match will win */ static const struct config_entry acpi_config_table[] = { #if IS_ENABLED(CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_ACPI) || \ IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL) /* BayTrail */ { .flags = FLAG_SST_OR_SOF_BYT, .acpi_hid = "80860F28", }, /* CherryTrail */ { .flags = FLAG_SST_OR_SOF_BYT, .acpi_hid = "808622A8", }, #endif /* Broadwell */ #if IS_ENABLED(CONFIG_SND_SOC_INTEL_CATPT) { .flags = FLAG_SST, .acpi_hid = "INT3438" }, #endif #if IS_ENABLED(CONFIG_SND_SOC_SOF_BROADWELL) { .flags = FLAG_SOF, .acpi_hid = "INT3438" }, #endif /* Haswell - not supported by SOF but added for consistency */ #if IS_ENABLED(CONFIG_SND_SOC_INTEL_CATPT) { .flags = FLAG_SST, .acpi_hid = "INT33C8" }, #endif }; static const struct config_entry *snd_intel_acpi_dsp_find_config(const u8 acpi_hid[ACPI_ID_LEN], const struct config_entry *table, u32 len) { for (; len > 0; len--, table++) { if (memcmp(table->acpi_hid, acpi_hid, ACPI_ID_LEN)) continue; if (table->dmi_table && !dmi_check_system(table->dmi_table)) continue; return table; } return NULL; } int snd_intel_acpi_dsp_driver_probe(struct device *dev, const u8 acpi_hid[ACPI_ID_LEN]) { const struct config_entry *cfg; if (dsp_driver > SND_INTEL_DSP_DRIVER_LEGACY && dsp_driver <= SND_INTEL_DSP_DRIVER_LAST) return dsp_driver; if (dsp_driver == SND_INTEL_DSP_DRIVER_LEGACY) { dev_warn(dev, "dsp_driver parameter %d not supported, using automatic detection\n", SND_INTEL_DSP_DRIVER_LEGACY); } /* find the configuration for the specific device */ cfg = snd_intel_acpi_dsp_find_config(acpi_hid, acpi_config_table, ARRAY_SIZE(acpi_config_table)); if (!cfg) return SND_INTEL_DSP_DRIVER_ANY; if (cfg->flags & FLAG_SST) return SND_INTEL_DSP_DRIVER_SST; if (cfg->flags & FLAG_SOF) return SND_INTEL_DSP_DRIVER_SOF; return SND_INTEL_DSP_DRIVER_SST; } EXPORT_SYMBOL_GPL(snd_intel_acpi_dsp_driver_probe); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Intel DSP config driver"); MODULE_IMPORT_NS(SND_INTEL_SOUNDWIRE_ACPI);
linux-master
sound/hda/intel-dsp-config.c
// SPDX-License-Identifier: GPL-2.0-only /* * HD-audio stream operations */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/clocksource.h> #include <sound/compress_driver.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/hdaudio.h> #include <sound/hda_register.h> #include "trace.h" /* * the hdac_stream library is intended to be used with the following * transitions. The states are not formally defined in the code but loosely * inspired by boolean variables. Note that the 'prepared' field is not used * in this library but by the callers during the hw_params/prepare transitions * * | * stream_init() | * v * +--+-------+ * | unused | * +--+----+--+ * | ^ * stream_assign() | | stream_release() * v | * +--+----+--+ * | opened | * +--+----+--+ * | ^ * stream_reset() | | * stream_setup() | | stream_cleanup() * v | * +--+----+--+ * | prepared | * +--+----+--+ * | ^ * stream_start() | | stream_stop() * v | * +--+----+--+ * | running | * +----------+ */ /** * snd_hdac_get_stream_stripe_ctl - get stripe control value * @bus: HD-audio core bus * @substream: PCM substream */ int snd_hdac_get_stream_stripe_ctl(struct hdac_bus *bus, struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; unsigned int channels = runtime->channels, rate = runtime->rate, bits_per_sample = runtime->sample_bits, max_sdo_lines, value, sdo_line; /* T_AZA_GCAP_NSDO is 1:2 bitfields in GCAP */ max_sdo_lines = snd_hdac_chip_readl(bus, GCAP) & AZX_GCAP_NSDO; /* following is from HD audio spec */ for (sdo_line = max_sdo_lines; sdo_line > 0; sdo_line >>= 1) { if (rate > 48000) value = (channels * bits_per_sample * (rate / 48000)) / sdo_line; else value = (channels * bits_per_sample) / sdo_line; if (value >= bus->sdo_limit) break; } /* stripe value: 0 for 1SDO, 1 for 2SDO, 2 for 4SDO lines */ return sdo_line >> 1; } EXPORT_SYMBOL_GPL(snd_hdac_get_stream_stripe_ctl); /** * snd_hdac_stream_init - initialize each stream (aka device) * @bus: HD-audio core bus * @azx_dev: HD-audio core stream object to initialize * @idx: stream index number * @direction: stream direction (SNDRV_PCM_STREAM_PLAYBACK or SNDRV_PCM_STREAM_CAPTURE) * @tag: the tag id to assign * * Assign the starting bdl address to each stream (device) and initialize. */ void snd_hdac_stream_init(struct hdac_bus *bus, struct hdac_stream *azx_dev, int idx, int direction, int tag) { azx_dev->bus = bus; /* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */ azx_dev->sd_addr = bus->remap_addr + (0x20 * idx + 0x80); /* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */ azx_dev->sd_int_sta_mask = 1 << idx; azx_dev->index = idx; azx_dev->direction = direction; azx_dev->stream_tag = tag; snd_hdac_dsp_lock_init(azx_dev); list_add_tail(&azx_dev->list, &bus->stream_list); if (bus->spbcap) { azx_dev->spib_addr = bus->spbcap + AZX_SPB_BASE + AZX_SPB_INTERVAL * idx + AZX_SPB_SPIB; azx_dev->fifo_addr = bus->spbcap + AZX_SPB_BASE + AZX_SPB_INTERVAL * idx + AZX_SPB_MAXFIFO; } if (bus->drsmcap) azx_dev->dpibr_addr = bus->drsmcap + AZX_DRSM_BASE + AZX_DRSM_INTERVAL * idx; } EXPORT_SYMBOL_GPL(snd_hdac_stream_init); /** * snd_hdac_stream_start - start a stream * @azx_dev: HD-audio core stream to start * * Start a stream, set start_wallclk and set the running flag. */ void snd_hdac_stream_start(struct hdac_stream *azx_dev) { struct hdac_bus *bus = azx_dev->bus; int stripe_ctl; trace_snd_hdac_stream_start(bus, azx_dev); azx_dev->start_wallclk = snd_hdac_chip_readl(bus, WALLCLK); /* enable SIE */ snd_hdac_chip_updatel(bus, INTCTL, 1 << azx_dev->index, 1 << azx_dev->index); /* set stripe control */ if (azx_dev->stripe) { if (azx_dev->substream) stripe_ctl = snd_hdac_get_stream_stripe_ctl(bus, azx_dev->substream); else stripe_ctl = 0; snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, stripe_ctl); } /* set DMA start and interrupt mask */ if (bus->access_sdnctl_in_dword) snd_hdac_stream_updatel(azx_dev, SD_CTL, 0, SD_CTL_DMA_START | SD_INT_MASK); else snd_hdac_stream_updateb(azx_dev, SD_CTL, 0, SD_CTL_DMA_START | SD_INT_MASK); azx_dev->running = true; } EXPORT_SYMBOL_GPL(snd_hdac_stream_start); /** * snd_hdac_stream_clear - helper to clear stream registers and stop DMA transfers * @azx_dev: HD-audio core stream to stop */ static void snd_hdac_stream_clear(struct hdac_stream *azx_dev) { snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_CTL_DMA_START | SD_INT_MASK, 0); snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */ if (azx_dev->stripe) snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0); azx_dev->running = false; } /** * snd_hdac_stream_stop - stop a stream * @azx_dev: HD-audio core stream to stop * * Stop a stream DMA and disable stream interrupt */ void snd_hdac_stream_stop(struct hdac_stream *azx_dev) { trace_snd_hdac_stream_stop(azx_dev->bus, azx_dev); snd_hdac_stream_clear(azx_dev); /* disable SIE */ snd_hdac_chip_updatel(azx_dev->bus, INTCTL, 1 << azx_dev->index, 0); } EXPORT_SYMBOL_GPL(snd_hdac_stream_stop); /** * snd_hdac_stop_streams - stop all streams * @bus: HD-audio core bus */ void snd_hdac_stop_streams(struct hdac_bus *bus) { struct hdac_stream *stream; list_for_each_entry(stream, &bus->stream_list, list) snd_hdac_stream_stop(stream); } EXPORT_SYMBOL_GPL(snd_hdac_stop_streams); /** * snd_hdac_stop_streams_and_chip - stop all streams and chip if running * @bus: HD-audio core bus */ void snd_hdac_stop_streams_and_chip(struct hdac_bus *bus) { if (bus->chip_init) { snd_hdac_stop_streams(bus); snd_hdac_bus_stop_chip(bus); } } EXPORT_SYMBOL_GPL(snd_hdac_stop_streams_and_chip); /** * snd_hdac_stream_reset - reset a stream * @azx_dev: HD-audio core stream to reset */ void snd_hdac_stream_reset(struct hdac_stream *azx_dev) { unsigned char val; int dma_run_state; snd_hdac_stream_clear(azx_dev); dma_run_state = snd_hdac_stream_readb(azx_dev, SD_CTL) & SD_CTL_DMA_START; snd_hdac_stream_updateb(azx_dev, SD_CTL, 0, SD_CTL_STREAM_RESET); /* wait for hardware to report that the stream entered reset */ snd_hdac_stream_readb_poll(azx_dev, SD_CTL, val, (val & SD_CTL_STREAM_RESET), 3, 300); if (azx_dev->bus->dma_stop_delay && dma_run_state) udelay(azx_dev->bus->dma_stop_delay); snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_CTL_STREAM_RESET, 0); /* wait for hardware to report that the stream is out of reset */ snd_hdac_stream_readb_poll(azx_dev, SD_CTL, val, !(val & SD_CTL_STREAM_RESET), 3, 300); /* reset first position - may not be synced with hw at this time */ if (azx_dev->posbuf) *azx_dev->posbuf = 0; } EXPORT_SYMBOL_GPL(snd_hdac_stream_reset); /** * snd_hdac_stream_setup - set up the SD for streaming * @azx_dev: HD-audio core stream to set up */ int snd_hdac_stream_setup(struct hdac_stream *azx_dev) { struct hdac_bus *bus = azx_dev->bus; struct snd_pcm_runtime *runtime; unsigned int val; if (azx_dev->substream) runtime = azx_dev->substream->runtime; else runtime = NULL; /* make sure the run bit is zero for SD */ snd_hdac_stream_clear(azx_dev); /* program the stream_tag */ val = snd_hdac_stream_readl(azx_dev, SD_CTL); val = (val & ~SD_CTL_STREAM_TAG_MASK) | (azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT); if (!bus->snoop) val |= SD_CTL_TRAFFIC_PRIO; snd_hdac_stream_writel(azx_dev, SD_CTL, val); /* program the length of samples in cyclic buffer */ snd_hdac_stream_writel(azx_dev, SD_CBL, azx_dev->bufsize); /* program the stream format */ /* this value needs to be the same as the one programmed */ snd_hdac_stream_writew(azx_dev, SD_FORMAT, azx_dev->format_val); /* program the stream LVI (last valid index) of the BDL */ snd_hdac_stream_writew(azx_dev, SD_LVI, azx_dev->frags - 1); /* program the BDL address */ /* lower BDL address */ snd_hdac_stream_writel(azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr); /* upper BDL address */ snd_hdac_stream_writel(azx_dev, SD_BDLPU, upper_32_bits(azx_dev->bdl.addr)); /* enable the position buffer */ if (bus->use_posbuf && bus->posbuf.addr) { if (!(snd_hdac_chip_readl(bus, DPLBASE) & AZX_DPLBASE_ENABLE)) snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr | AZX_DPLBASE_ENABLE); } /* set the interrupt enable bits in the descriptor control register */ snd_hdac_stream_updatel(azx_dev, SD_CTL, 0, SD_INT_MASK); azx_dev->fifo_size = snd_hdac_stream_readw(azx_dev, SD_FIFOSIZE) + 1; /* when LPIB delay correction gives a small negative value, * we ignore it; currently set the threshold statically to * 64 frames */ if (runtime && runtime->period_size > 64) azx_dev->delay_negative_threshold = -frames_to_bytes(runtime, 64); else azx_dev->delay_negative_threshold = 0; /* wallclk has 24Mhz clock source */ if (runtime) azx_dev->period_wallclk = (((runtime->period_size * 24000) / runtime->rate) * 1000); return 0; } EXPORT_SYMBOL_GPL(snd_hdac_stream_setup); /** * snd_hdac_stream_cleanup - cleanup a stream * @azx_dev: HD-audio core stream to clean up */ void snd_hdac_stream_cleanup(struct hdac_stream *azx_dev) { snd_hdac_stream_writel(azx_dev, SD_BDLPL, 0); snd_hdac_stream_writel(azx_dev, SD_BDLPU, 0); snd_hdac_stream_writel(azx_dev, SD_CTL, 0); azx_dev->bufsize = 0; azx_dev->period_bytes = 0; azx_dev->format_val = 0; } EXPORT_SYMBOL_GPL(snd_hdac_stream_cleanup); /** * snd_hdac_stream_assign - assign a stream for the PCM * @bus: HD-audio core bus * @substream: PCM substream to assign * * Look for an unused stream for the given PCM substream, assign it * and return the stream object. If no stream is free, returns NULL. * The function tries to keep using the same stream object when it's used * beforehand. Also, when bus->reverse_assign flag is set, the last free * or matching entry is returned. This is needed for some strange codecs. */ struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus, struct snd_pcm_substream *substream) { struct hdac_stream *azx_dev; struct hdac_stream *res = NULL; /* make a non-zero unique key for the substream */ int key = (substream->pcm->device << 16) | (substream->number << 2) | (substream->stream + 1); spin_lock_irq(&bus->reg_lock); list_for_each_entry(azx_dev, &bus->stream_list, list) { if (azx_dev->direction != substream->stream) continue; if (azx_dev->opened) continue; if (azx_dev->assigned_key == key) { res = azx_dev; break; } if (!res || bus->reverse_assign) res = azx_dev; } if (res) { res->opened = 1; res->running = 0; res->assigned_key = key; res->substream = substream; } spin_unlock_irq(&bus->reg_lock); return res; } EXPORT_SYMBOL_GPL(snd_hdac_stream_assign); /** * snd_hdac_stream_release_locked - release the assigned stream * @azx_dev: HD-audio core stream to release * * Release the stream that has been assigned by snd_hdac_stream_assign(). * The bus->reg_lock needs to be taken at a higher level */ void snd_hdac_stream_release_locked(struct hdac_stream *azx_dev) { azx_dev->opened = 0; azx_dev->running = 0; azx_dev->substream = NULL; } EXPORT_SYMBOL_GPL(snd_hdac_stream_release_locked); /** * snd_hdac_stream_release - release the assigned stream * @azx_dev: HD-audio core stream to release * * Release the stream that has been assigned by snd_hdac_stream_assign(). */ void snd_hdac_stream_release(struct hdac_stream *azx_dev) { struct hdac_bus *bus = azx_dev->bus; spin_lock_irq(&bus->reg_lock); snd_hdac_stream_release_locked(azx_dev); spin_unlock_irq(&bus->reg_lock); } EXPORT_SYMBOL_GPL(snd_hdac_stream_release); /** * snd_hdac_get_stream - return hdac_stream based on stream_tag and * direction * * @bus: HD-audio core bus * @dir: direction for the stream to be found * @stream_tag: stream tag for stream to be found */ struct hdac_stream *snd_hdac_get_stream(struct hdac_bus *bus, int dir, int stream_tag) { struct hdac_stream *s; list_for_each_entry(s, &bus->stream_list, list) { if (s->direction == dir && s->stream_tag == stream_tag) return s; } return NULL; } EXPORT_SYMBOL_GPL(snd_hdac_get_stream); /* * set up a BDL entry */ static int setup_bdle(struct hdac_bus *bus, struct snd_dma_buffer *dmab, struct hdac_stream *azx_dev, __le32 **bdlp, int ofs, int size, int with_ioc) { __le32 *bdl = *bdlp; while (size > 0) { dma_addr_t addr; int chunk; if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES) return -EINVAL; addr = snd_sgbuf_get_addr(dmab, ofs); /* program the address field of the BDL entry */ bdl[0] = cpu_to_le32((u32)addr); bdl[1] = cpu_to_le32(upper_32_bits(addr)); /* program the size field of the BDL entry */ chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size); /* one BDLE cannot cross 4K boundary on CTHDA chips */ if (bus->align_bdle_4k) { u32 remain = 0x1000 - (ofs & 0xfff); if (chunk > remain) chunk = remain; } bdl[2] = cpu_to_le32(chunk); /* program the IOC to enable interrupt * only when the whole fragment is processed */ size -= chunk; bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01); bdl += 4; azx_dev->frags++; ofs += chunk; } *bdlp = bdl; return ofs; } /** * snd_hdac_stream_setup_periods - set up BDL entries * @azx_dev: HD-audio core stream to set up * * Set up the buffer descriptor table of the given stream based on the * period and buffer sizes of the assigned PCM substream. */ int snd_hdac_stream_setup_periods(struct hdac_stream *azx_dev) { struct hdac_bus *bus = azx_dev->bus; struct snd_pcm_substream *substream = azx_dev->substream; struct snd_compr_stream *cstream = azx_dev->cstream; struct snd_pcm_runtime *runtime = NULL; struct snd_dma_buffer *dmab; __le32 *bdl; int i, ofs, periods, period_bytes; int pos_adj, pos_align; if (substream) { runtime = substream->runtime; dmab = snd_pcm_get_dma_buf(substream); } else if (cstream) { dmab = snd_pcm_get_dma_buf(cstream); } else { WARN(1, "No substream or cstream assigned\n"); return -EINVAL; } /* reset BDL address */ snd_hdac_stream_writel(azx_dev, SD_BDLPL, 0); snd_hdac_stream_writel(azx_dev, SD_BDLPU, 0); period_bytes = azx_dev->period_bytes; periods = azx_dev->bufsize / period_bytes; /* program the initial BDL entries */ bdl = (__le32 *)azx_dev->bdl.area; ofs = 0; azx_dev->frags = 0; pos_adj = bus->bdl_pos_adj; if (runtime && !azx_dev->no_period_wakeup && pos_adj > 0) { pos_align = pos_adj; pos_adj = DIV_ROUND_UP(pos_adj * runtime->rate, 48000); if (!pos_adj) pos_adj = pos_align; else pos_adj = roundup(pos_adj, pos_align); pos_adj = frames_to_bytes(runtime, pos_adj); if (pos_adj >= period_bytes) { dev_warn(bus->dev, "Too big adjustment %d\n", pos_adj); pos_adj = 0; } else { ofs = setup_bdle(bus, dmab, azx_dev, &bdl, ofs, pos_adj, true); if (ofs < 0) goto error; } } else pos_adj = 0; for (i = 0; i < periods; i++) { if (i == periods - 1 && pos_adj) ofs = setup_bdle(bus, dmab, azx_dev, &bdl, ofs, period_bytes - pos_adj, 0); else ofs = setup_bdle(bus, dmab, azx_dev, &bdl, ofs, period_bytes, !azx_dev->no_period_wakeup); if (ofs < 0) goto error; } return 0; error: dev_err(bus->dev, "Too many BDL entries: buffer=%d, period=%d\n", azx_dev->bufsize, period_bytes); return -EINVAL; } EXPORT_SYMBOL_GPL(snd_hdac_stream_setup_periods); /** * snd_hdac_stream_set_params - set stream parameters * @azx_dev: HD-audio core stream for which parameters are to be set * @format_val: format value parameter * * Setup the HD-audio core stream parameters from substream of the stream * and passed format value */ int snd_hdac_stream_set_params(struct hdac_stream *azx_dev, unsigned int format_val) { struct snd_pcm_substream *substream = azx_dev->substream; struct snd_compr_stream *cstream = azx_dev->cstream; unsigned int bufsize, period_bytes; unsigned int no_period_wakeup; int err; if (substream) { bufsize = snd_pcm_lib_buffer_bytes(substream); period_bytes = snd_pcm_lib_period_bytes(substream); no_period_wakeup = substream->runtime->no_period_wakeup; } else if (cstream) { bufsize = cstream->runtime->buffer_size; period_bytes = cstream->runtime->fragment_size; no_period_wakeup = 0; } else { return -EINVAL; } if (bufsize != azx_dev->bufsize || period_bytes != azx_dev->period_bytes || format_val != azx_dev->format_val || no_period_wakeup != azx_dev->no_period_wakeup) { azx_dev->bufsize = bufsize; azx_dev->period_bytes = period_bytes; azx_dev->format_val = format_val; azx_dev->no_period_wakeup = no_period_wakeup; err = snd_hdac_stream_setup_periods(azx_dev); if (err < 0) return err; } return 0; } EXPORT_SYMBOL_GPL(snd_hdac_stream_set_params); static u64 azx_cc_read(const struct cyclecounter *cc) { struct hdac_stream *azx_dev = container_of(cc, struct hdac_stream, cc); return snd_hdac_chip_readl(azx_dev->bus, WALLCLK); } static void azx_timecounter_init(struct hdac_stream *azx_dev, bool force, u64 last) { struct timecounter *tc = &azx_dev->tc; struct cyclecounter *cc = &azx_dev->cc; u64 nsec; cc->read = azx_cc_read; cc->mask = CLOCKSOURCE_MASK(32); /* * Calculate the optimal mult/shift values. The counter wraps * around after ~178.9 seconds. */ clocks_calc_mult_shift(&cc->mult, &cc->shift, 24000000, NSEC_PER_SEC, 178); nsec = 0; /* audio time is elapsed time since trigger */ timecounter_init(tc, cc, nsec); if (force) { /* * force timecounter to use predefined value, * used for synchronized starts */ tc->cycle_last = last; } } /** * snd_hdac_stream_timecounter_init - initialize time counter * @azx_dev: HD-audio core stream (master stream) * @streams: bit flags of streams to set up * * Initializes the time counter of streams marked by the bit flags (each * bit corresponds to the stream index). * The trigger timestamp of PCM substream assigned to the given stream is * updated accordingly, too. */ void snd_hdac_stream_timecounter_init(struct hdac_stream *azx_dev, unsigned int streams) { struct hdac_bus *bus = azx_dev->bus; struct snd_pcm_runtime *runtime = azx_dev->substream->runtime; struct hdac_stream *s; bool inited = false; u64 cycle_last = 0; int i = 0; list_for_each_entry(s, &bus->stream_list, list) { if (streams & (1 << i)) { azx_timecounter_init(s, inited, cycle_last); if (!inited) { inited = true; cycle_last = s->tc.cycle_last; } } i++; } snd_pcm_gettime(runtime, &runtime->trigger_tstamp); runtime->trigger_tstamp_latched = true; } EXPORT_SYMBOL_GPL(snd_hdac_stream_timecounter_init); /** * snd_hdac_stream_sync_trigger - turn on/off stream sync register * @azx_dev: HD-audio core stream (master stream) * @set: true = set, false = clear * @streams: bit flags of streams to sync * @reg: the stream sync register address */ void snd_hdac_stream_sync_trigger(struct hdac_stream *azx_dev, bool set, unsigned int streams, unsigned int reg) { struct hdac_bus *bus = azx_dev->bus; unsigned int val; if (!reg) reg = AZX_REG_SSYNC; val = _snd_hdac_chip_readl(bus, reg); if (set) val |= streams; else val &= ~streams; _snd_hdac_chip_writel(bus, reg, val); } EXPORT_SYMBOL_GPL(snd_hdac_stream_sync_trigger); /** * snd_hdac_stream_sync - sync with start/stop trigger operation * @azx_dev: HD-audio core stream (master stream) * @start: true = start, false = stop * @streams: bit flags of streams to sync * * For @start = true, wait until all FIFOs get ready. * For @start = false, wait until all RUN bits are cleared. */ void snd_hdac_stream_sync(struct hdac_stream *azx_dev, bool start, unsigned int streams) { struct hdac_bus *bus = azx_dev->bus; int i, nwait, timeout; struct hdac_stream *s; for (timeout = 5000; timeout; timeout--) { nwait = 0; i = 0; list_for_each_entry(s, &bus->stream_list, list) { if (!(streams & (1 << i++))) continue; if (start) { /* check FIFO gets ready */ if (!(snd_hdac_stream_readb(s, SD_STS) & SD_STS_FIFO_READY)) nwait++; } else { /* check RUN bit is cleared */ if (snd_hdac_stream_readb(s, SD_CTL) & SD_CTL_DMA_START) { nwait++; /* * Perform stream reset if DMA RUN * bit not cleared within given timeout */ if (timeout == 1) snd_hdac_stream_reset(s); } } } if (!nwait) break; cpu_relax(); } } EXPORT_SYMBOL_GPL(snd_hdac_stream_sync); /** * snd_hdac_stream_spbcap_enable - enable SPIB for a stream * @bus: HD-audio core bus * @enable: flag to enable/disable SPIB * @index: stream index for which SPIB need to be enabled */ void snd_hdac_stream_spbcap_enable(struct hdac_bus *bus, bool enable, int index) { u32 mask = 0; if (!bus->spbcap) { dev_err(bus->dev, "Address of SPB capability is NULL\n"); return; } mask |= (1 << index); if (enable) snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, mask); else snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, 0); } EXPORT_SYMBOL_GPL(snd_hdac_stream_spbcap_enable); /** * snd_hdac_stream_set_spib - sets the spib value of a stream * @bus: HD-audio core bus * @azx_dev: hdac_stream * @value: spib value to set */ int snd_hdac_stream_set_spib(struct hdac_bus *bus, struct hdac_stream *azx_dev, u32 value) { if (!bus->spbcap) { dev_err(bus->dev, "Address of SPB capability is NULL\n"); return -EINVAL; } writel(value, azx_dev->spib_addr); return 0; } EXPORT_SYMBOL_GPL(snd_hdac_stream_set_spib); /** * snd_hdac_stream_get_spbmaxfifo - gets the spib value of a stream * @bus: HD-audio core bus * @azx_dev: hdac_stream * * Return maxfifo for the stream */ int snd_hdac_stream_get_spbmaxfifo(struct hdac_bus *bus, struct hdac_stream *azx_dev) { if (!bus->spbcap) { dev_err(bus->dev, "Address of SPB capability is NULL\n"); return -EINVAL; } return readl(azx_dev->fifo_addr); } EXPORT_SYMBOL_GPL(snd_hdac_stream_get_spbmaxfifo); /** * snd_hdac_stream_drsm_enable - enable DMA resume for a stream * @bus: HD-audio core bus * @enable: flag to enable/disable DRSM * @index: stream index for which DRSM need to be enabled */ void snd_hdac_stream_drsm_enable(struct hdac_bus *bus, bool enable, int index) { u32 mask = 0; if (!bus->drsmcap) { dev_err(bus->dev, "Address of DRSM capability is NULL\n"); return; } mask |= (1 << index); if (enable) snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, mask); else snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, 0); } EXPORT_SYMBOL_GPL(snd_hdac_stream_drsm_enable); /* * snd_hdac_stream_wait_drsm - wait for HW to clear RSM for a stream * @azx_dev: HD-audio core stream to await RSM for * * Returns 0 on success and -ETIMEDOUT upon a timeout. */ int snd_hdac_stream_wait_drsm(struct hdac_stream *azx_dev) { struct hdac_bus *bus = azx_dev->bus; u32 mask, reg; int ret; mask = 1 << azx_dev->index; ret = read_poll_timeout(snd_hdac_reg_readl, reg, !(reg & mask), 250, 2000, false, bus, bus->drsmcap + AZX_REG_DRSM_CTL); if (ret) dev_dbg(bus->dev, "polling RSM 0x%08x failed: %d\n", mask, ret); return ret; } EXPORT_SYMBOL_GPL(snd_hdac_stream_wait_drsm); /** * snd_hdac_stream_set_dpibr - sets the dpibr value of a stream * @bus: HD-audio core bus * @azx_dev: hdac_stream * @value: dpib value to set */ int snd_hdac_stream_set_dpibr(struct hdac_bus *bus, struct hdac_stream *azx_dev, u32 value) { if (!bus->drsmcap) { dev_err(bus->dev, "Address of DRSM capability is NULL\n"); return -EINVAL; } writel(value, azx_dev->dpibr_addr); return 0; } EXPORT_SYMBOL_GPL(snd_hdac_stream_set_dpibr); /** * snd_hdac_stream_set_lpib - sets the lpib value of a stream * @azx_dev: hdac_stream * @value: lpib value to set */ int snd_hdac_stream_set_lpib(struct hdac_stream *azx_dev, u32 value) { snd_hdac_stream_writel(azx_dev, SD_LPIB, value); return 0; } EXPORT_SYMBOL_GPL(snd_hdac_stream_set_lpib); #ifdef CONFIG_SND_HDA_DSP_LOADER /** * snd_hdac_dsp_prepare - prepare for DSP loading * @azx_dev: HD-audio core stream used for DSP loading * @format: HD-audio stream format * @byte_size: data chunk byte size * @bufp: allocated buffer * * Allocate the buffer for the given size and set up the given stream for * DSP loading. Returns the stream tag (>= 0), or a negative error code. */ int snd_hdac_dsp_prepare(struct hdac_stream *azx_dev, unsigned int format, unsigned int byte_size, struct snd_dma_buffer *bufp) { struct hdac_bus *bus = azx_dev->bus; __le32 *bdl; int err; snd_hdac_dsp_lock(azx_dev); spin_lock_irq(&bus->reg_lock); if (azx_dev->running || azx_dev->locked) { spin_unlock_irq(&bus->reg_lock); err = -EBUSY; goto unlock; } azx_dev->locked = true; spin_unlock_irq(&bus->reg_lock); err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, bus->dev, byte_size, bufp); if (err < 0) goto err_alloc; azx_dev->substream = NULL; azx_dev->bufsize = byte_size; azx_dev->period_bytes = byte_size; azx_dev->format_val = format; snd_hdac_stream_reset(azx_dev); /* reset BDL address */ snd_hdac_stream_writel(azx_dev, SD_BDLPL, 0); snd_hdac_stream_writel(azx_dev, SD_BDLPU, 0); azx_dev->frags = 0; bdl = (__le32 *)azx_dev->bdl.area; err = setup_bdle(bus, bufp, azx_dev, &bdl, 0, byte_size, 0); if (err < 0) goto error; snd_hdac_stream_setup(azx_dev); snd_hdac_dsp_unlock(azx_dev); return azx_dev->stream_tag; error: snd_dma_free_pages(bufp); err_alloc: spin_lock_irq(&bus->reg_lock); azx_dev->locked = false; spin_unlock_irq(&bus->reg_lock); unlock: snd_hdac_dsp_unlock(azx_dev); return err; } EXPORT_SYMBOL_GPL(snd_hdac_dsp_prepare); /** * snd_hdac_dsp_trigger - start / stop DSP loading * @azx_dev: HD-audio core stream used for DSP loading * @start: trigger start or stop */ void snd_hdac_dsp_trigger(struct hdac_stream *azx_dev, bool start) { if (start) snd_hdac_stream_start(azx_dev); else snd_hdac_stream_stop(azx_dev); } EXPORT_SYMBOL_GPL(snd_hdac_dsp_trigger); /** * snd_hdac_dsp_cleanup - clean up the stream from DSP loading to normal * @azx_dev: HD-audio core stream used for DSP loading * @dmab: buffer used by DSP loading */ void snd_hdac_dsp_cleanup(struct hdac_stream *azx_dev, struct snd_dma_buffer *dmab) { struct hdac_bus *bus = azx_dev->bus; if (!dmab->area || !azx_dev->locked) return; snd_hdac_dsp_lock(azx_dev); /* reset BDL address */ snd_hdac_stream_writel(azx_dev, SD_BDLPL, 0); snd_hdac_stream_writel(azx_dev, SD_BDLPU, 0); snd_hdac_stream_writel(azx_dev, SD_CTL, 0); azx_dev->bufsize = 0; azx_dev->period_bytes = 0; azx_dev->format_val = 0; snd_dma_free_pages(dmab); dmab->area = NULL; spin_lock_irq(&bus->reg_lock); azx_dev->locked = false; spin_unlock_irq(&bus->reg_lock); snd_hdac_dsp_unlock(azx_dev); } EXPORT_SYMBOL_GPL(snd_hdac_dsp_cleanup); #endif /* CONFIG_SND_HDA_DSP_LOADER */
linux-master
sound/hda/hdac_stream.c
// SPDX-License-Identifier: GPL-2.0-only /* * HD-audio bus */ #include <linux/init.h> #include <linux/device.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/export.h> #include <sound/hdaudio.h> MODULE_DESCRIPTION("HD-audio bus"); MODULE_LICENSE("GPL"); /** * hdac_get_device_id - gets the hdac device id entry * @hdev: HD-audio core device * @drv: HD-audio codec driver * * Compares the hdac device vendor_id and revision_id to the hdac_device * driver id_table and returns the matching device id entry. */ const struct hda_device_id * hdac_get_device_id(struct hdac_device *hdev, struct hdac_driver *drv) { if (drv->id_table) { const struct hda_device_id *id = drv->id_table; while (id->vendor_id) { if (hdev->vendor_id == id->vendor_id && (!id->rev_id || id->rev_id == hdev->revision_id)) return id; id++; } } return NULL; } EXPORT_SYMBOL_GPL(hdac_get_device_id); static int hdac_codec_match(struct hdac_device *dev, struct hdac_driver *drv) { if (hdac_get_device_id(dev, drv)) return 1; else return 0; } static int hda_bus_match(struct device *dev, struct device_driver *drv) { struct hdac_device *hdev = dev_to_hdac_dev(dev); struct hdac_driver *hdrv = drv_to_hdac_driver(drv); if (hdev->type != hdrv->type) return 0; /* * if driver provided a match function use that otherwise we will * use hdac_codec_match function */ if (hdrv->match) return hdrv->match(hdev, hdrv); else return hdac_codec_match(hdev, hdrv); return 1; } static int hda_uevent(const struct device *dev, struct kobj_uevent_env *env) { char modalias[32]; snd_hdac_codec_modalias(dev_to_hdac_dev(dev), modalias, sizeof(modalias)); if (add_uevent_var(env, "MODALIAS=%s", modalias)) return -ENOMEM; return 0; } struct bus_type snd_hda_bus_type = { .name = "hdaudio", .match = hda_bus_match, .uevent = hda_uevent, }; EXPORT_SYMBOL_GPL(snd_hda_bus_type); static int __init hda_bus_init(void) { return bus_register(&snd_hda_bus_type); } static void __exit hda_bus_exit(void) { bus_unregister(&snd_hda_bus_type); } subsys_initcall(hda_bus_init); module_exit(hda_bus_exit);
linux-master
sound/hda/hda_bus_type.c
// SPDX-License-Identifier: GPL-2.0-only /* * HDMI Channel map support helpers */ #include <linux/module.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/hda_chmap.h> /* * CEA speaker placement: * * FLH FCH FRH * FLW FL FLC FC FRC FR FRW * * LFE * TC * * RL RLC RC RRC RR * * The Left/Right Surround channel _notions_ LS/RS in SMPTE 320M corresponds to * CEA RL/RR; The SMPTE channel _assignment_ C/LFE is swapped to CEA LFE/FC. */ enum cea_speaker_placement { FL = (1 << 0), /* Front Left */ FC = (1 << 1), /* Front Center */ FR = (1 << 2), /* Front Right */ FLC = (1 << 3), /* Front Left Center */ FRC = (1 << 4), /* Front Right Center */ RL = (1 << 5), /* Rear Left */ RC = (1 << 6), /* Rear Center */ RR = (1 << 7), /* Rear Right */ RLC = (1 << 8), /* Rear Left Center */ RRC = (1 << 9), /* Rear Right Center */ LFE = (1 << 10), /* Low Frequency Effect */ FLW = (1 << 11), /* Front Left Wide */ FRW = (1 << 12), /* Front Right Wide */ FLH = (1 << 13), /* Front Left High */ FCH = (1 << 14), /* Front Center High */ FRH = (1 << 15), /* Front Right High */ TC = (1 << 16), /* Top Center */ }; static const char * const cea_speaker_allocation_names[] = { /* 0 */ "FL/FR", /* 1 */ "LFE", /* 2 */ "FC", /* 3 */ "RL/RR", /* 4 */ "RC", /* 5 */ "FLC/FRC", /* 6 */ "RLC/RRC", /* 7 */ "FLW/FRW", /* 8 */ "FLH/FRH", /* 9 */ "TC", /* 10 */ "FCH", }; /* * ELD SA bits in the CEA Speaker Allocation data block */ static const int eld_speaker_allocation_bits[] = { [0] = FL | FR, [1] = LFE, [2] = FC, [3] = RL | RR, [4] = RC, [5] = FLC | FRC, [6] = RLC | RRC, /* the following are not defined in ELD yet */ [7] = FLW | FRW, [8] = FLH | FRH, [9] = TC, [10] = FCH, }; /* * ALSA sequence is: * * surround40 surround41 surround50 surround51 surround71 * ch0 front left = = = = * ch1 front right = = = = * ch2 rear left = = = = * ch3 rear right = = = = * ch4 LFE center center center * ch5 LFE LFE * ch6 side left * ch7 side right * * surround71 = {FL, FR, RLC, RRC, FC, LFE, RL, RR} */ static int hdmi_channel_mapping[0x32][8] = { /* stereo */ [0x00] = { 0x00, 0x11, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7 }, /* 2.1 */ [0x01] = { 0x00, 0x11, 0x22, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7 }, /* Dolby Surround */ [0x02] = { 0x00, 0x11, 0x23, 0xf2, 0xf4, 0xf5, 0xf6, 0xf7 }, /* surround40 */ [0x08] = { 0x00, 0x11, 0x24, 0x35, 0xf3, 0xf2, 0xf6, 0xf7 }, /* 4ch */ [0x03] = { 0x00, 0x11, 0x23, 0x32, 0x44, 0xf5, 0xf6, 0xf7 }, /* surround41 */ [0x09] = { 0x00, 0x11, 0x24, 0x35, 0x42, 0xf3, 0xf6, 0xf7 }, /* surround50 */ [0x0a] = { 0x00, 0x11, 0x24, 0x35, 0x43, 0xf2, 0xf6, 0xf7 }, /* surround51 */ [0x0b] = { 0x00, 0x11, 0x24, 0x35, 0x43, 0x52, 0xf6, 0xf7 }, /* 7.1 */ [0x13] = { 0x00, 0x11, 0x26, 0x37, 0x43, 0x52, 0x64, 0x75 }, }; /* * This is an ordered list! * * The preceding ones have better chances to be selected by * hdmi_channel_allocation(). */ static struct hdac_cea_channel_speaker_allocation channel_allocations[] = { /* channel: 7 6 5 4 3 2 1 0 */ { .ca_index = 0x00, .speakers = { 0, 0, 0, 0, 0, 0, FR, FL } }, /* 2.1 */ { .ca_index = 0x01, .speakers = { 0, 0, 0, 0, 0, LFE, FR, FL } }, /* Dolby Surround */ { .ca_index = 0x02, .speakers = { 0, 0, 0, 0, FC, 0, FR, FL } }, /* surround40 */ { .ca_index = 0x08, .speakers = { 0, 0, RR, RL, 0, 0, FR, FL } }, /* surround41 */ { .ca_index = 0x09, .speakers = { 0, 0, RR, RL, 0, LFE, FR, FL } }, /* surround50 */ { .ca_index = 0x0a, .speakers = { 0, 0, RR, RL, FC, 0, FR, FL } }, /* surround51 */ { .ca_index = 0x0b, .speakers = { 0, 0, RR, RL, FC, LFE, FR, FL } }, /* 6.1 */ { .ca_index = 0x0f, .speakers = { 0, RC, RR, RL, FC, LFE, FR, FL } }, /* surround71 */ { .ca_index = 0x13, .speakers = { RRC, RLC, RR, RL, FC, LFE, FR, FL } }, { .ca_index = 0x03, .speakers = { 0, 0, 0, 0, FC, LFE, FR, FL } }, { .ca_index = 0x04, .speakers = { 0, 0, 0, RC, 0, 0, FR, FL } }, { .ca_index = 0x05, .speakers = { 0, 0, 0, RC, 0, LFE, FR, FL } }, { .ca_index = 0x06, .speakers = { 0, 0, 0, RC, FC, 0, FR, FL } }, { .ca_index = 0x07, .speakers = { 0, 0, 0, RC, FC, LFE, FR, FL } }, { .ca_index = 0x0c, .speakers = { 0, RC, RR, RL, 0, 0, FR, FL } }, { .ca_index = 0x0d, .speakers = { 0, RC, RR, RL, 0, LFE, FR, FL } }, { .ca_index = 0x0e, .speakers = { 0, RC, RR, RL, FC, 0, FR, FL } }, { .ca_index = 0x10, .speakers = { RRC, RLC, RR, RL, 0, 0, FR, FL } }, { .ca_index = 0x11, .speakers = { RRC, RLC, RR, RL, 0, LFE, FR, FL } }, { .ca_index = 0x12, .speakers = { RRC, RLC, RR, RL, FC, 0, FR, FL } }, { .ca_index = 0x14, .speakers = { FRC, FLC, 0, 0, 0, 0, FR, FL } }, { .ca_index = 0x15, .speakers = { FRC, FLC, 0, 0, 0, LFE, FR, FL } }, { .ca_index = 0x16, .speakers = { FRC, FLC, 0, 0, FC, 0, FR, FL } }, { .ca_index = 0x17, .speakers = { FRC, FLC, 0, 0, FC, LFE, FR, FL } }, { .ca_index = 0x18, .speakers = { FRC, FLC, 0, RC, 0, 0, FR, FL } }, { .ca_index = 0x19, .speakers = { FRC, FLC, 0, RC, 0, LFE, FR, FL } }, { .ca_index = 0x1a, .speakers = { FRC, FLC, 0, RC, FC, 0, FR, FL } }, { .ca_index = 0x1b, .speakers = { FRC, FLC, 0, RC, FC, LFE, FR, FL } }, { .ca_index = 0x1c, .speakers = { FRC, FLC, RR, RL, 0, 0, FR, FL } }, { .ca_index = 0x1d, .speakers = { FRC, FLC, RR, RL, 0, LFE, FR, FL } }, { .ca_index = 0x1e, .speakers = { FRC, FLC, RR, RL, FC, 0, FR, FL } }, { .ca_index = 0x1f, .speakers = { FRC, FLC, RR, RL, FC, LFE, FR, FL } }, { .ca_index = 0x20, .speakers = { 0, FCH, RR, RL, FC, 0, FR, FL } }, { .ca_index = 0x21, .speakers = { 0, FCH, RR, RL, FC, LFE, FR, FL } }, { .ca_index = 0x22, .speakers = { TC, 0, RR, RL, FC, 0, FR, FL } }, { .ca_index = 0x23, .speakers = { TC, 0, RR, RL, FC, LFE, FR, FL } }, { .ca_index = 0x24, .speakers = { FRH, FLH, RR, RL, 0, 0, FR, FL } }, { .ca_index = 0x25, .speakers = { FRH, FLH, RR, RL, 0, LFE, FR, FL } }, { .ca_index = 0x26, .speakers = { FRW, FLW, RR, RL, 0, 0, FR, FL } }, { .ca_index = 0x27, .speakers = { FRW, FLW, RR, RL, 0, LFE, FR, FL } }, { .ca_index = 0x28, .speakers = { TC, RC, RR, RL, FC, 0, FR, FL } }, { .ca_index = 0x29, .speakers = { TC, RC, RR, RL, FC, LFE, FR, FL } }, { .ca_index = 0x2a, .speakers = { FCH, RC, RR, RL, FC, 0, FR, FL } }, { .ca_index = 0x2b, .speakers = { FCH, RC, RR, RL, FC, LFE, FR, FL } }, { .ca_index = 0x2c, .speakers = { TC, FCH, RR, RL, FC, 0, FR, FL } }, { .ca_index = 0x2d, .speakers = { TC, FCH, RR, RL, FC, LFE, FR, FL } }, { .ca_index = 0x2e, .speakers = { FRH, FLH, RR, RL, FC, 0, FR, FL } }, { .ca_index = 0x2f, .speakers = { FRH, FLH, RR, RL, FC, LFE, FR, FL } }, { .ca_index = 0x30, .speakers = { FRW, FLW, RR, RL, FC, 0, FR, FL } }, { .ca_index = 0x31, .speakers = { FRW, FLW, RR, RL, FC, LFE, FR, FL } }, }; static int hdmi_pin_set_slot_channel(struct hdac_device *codec, hda_nid_t pin_nid, int asp_slot, int channel) { return snd_hdac_codec_write(codec, pin_nid, 0, AC_VERB_SET_HDMI_CHAN_SLOT, (channel << 4) | asp_slot); } static int hdmi_pin_get_slot_channel(struct hdac_device *codec, hda_nid_t pin_nid, int asp_slot) { return (snd_hdac_codec_read(codec, pin_nid, 0, AC_VERB_GET_HDMI_CHAN_SLOT, asp_slot) & 0xf0) >> 4; } static int hdmi_get_channel_count(struct hdac_device *codec, hda_nid_t cvt_nid) { return 1 + snd_hdac_codec_read(codec, cvt_nid, 0, AC_VERB_GET_CVT_CHAN_COUNT, 0); } static void hdmi_set_channel_count(struct hdac_device *codec, hda_nid_t cvt_nid, int chs) { if (chs != hdmi_get_channel_count(codec, cvt_nid)) snd_hdac_codec_write(codec, cvt_nid, 0, AC_VERB_SET_CVT_CHAN_COUNT, chs - 1); } /* * Channel mapping routines */ /* * Compute derived values in channel_allocations[]. */ static void init_channel_allocations(void) { int i, j; struct hdac_cea_channel_speaker_allocation *p; for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) { p = channel_allocations + i; p->channels = 0; p->spk_mask = 0; for (j = 0; j < ARRAY_SIZE(p->speakers); j++) if (p->speakers[j]) { p->channels++; p->spk_mask |= p->speakers[j]; } } } static int get_channel_allocation_order(int ca) { int i; for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) { if (channel_allocations[i].ca_index == ca) break; } return i; } void snd_hdac_print_channel_allocation(int spk_alloc, char *buf, int buflen) { int i, j; for (i = 0, j = 0; i < ARRAY_SIZE(cea_speaker_allocation_names); i++) { if (spk_alloc & (1 << i)) j += scnprintf(buf + j, buflen - j, " %s", cea_speaker_allocation_names[i]); } buf[j] = '\0'; /* necessary when j == 0 */ } EXPORT_SYMBOL_GPL(snd_hdac_print_channel_allocation); /* * The transformation takes two steps: * * eld->spk_alloc => (eld_speaker_allocation_bits[]) => spk_mask * spk_mask => (channel_allocations[]) => ai->CA * * TODO: it could select the wrong CA from multiple candidates. */ static int hdmi_channel_allocation_spk_alloc_blk(struct hdac_device *codec, int spk_alloc, int channels) { int i; int ca = 0; int spk_mask = 0; char buf[SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE]; /* * CA defaults to 0 for basic stereo audio */ if (channels <= 2) return 0; /* * expand ELD's speaker allocation mask * * ELD tells the speaker mask in a compact(paired) form, * expand ELD's notions to match the ones used by Audio InfoFrame. */ for (i = 0; i < ARRAY_SIZE(eld_speaker_allocation_bits); i++) { if (spk_alloc & (1 << i)) spk_mask |= eld_speaker_allocation_bits[i]; } /* search for the first working match in the CA table */ for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) { if (channels == channel_allocations[i].channels && (spk_mask & channel_allocations[i].spk_mask) == channel_allocations[i].spk_mask) { ca = channel_allocations[i].ca_index; break; } } if (!ca) { /* * if there was no match, select the regular ALSA channel * allocation with the matching number of channels */ for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) { if (channels == channel_allocations[i].channels) { ca = channel_allocations[i].ca_index; break; } } } snd_hdac_print_channel_allocation(spk_alloc, buf, sizeof(buf)); dev_dbg(&codec->dev, "HDMI: select CA 0x%x for %d-channel allocation: %s\n", ca, channels, buf); return ca; } static void hdmi_debug_channel_mapping(struct hdac_chmap *chmap, hda_nid_t pin_nid) { #ifdef CONFIG_SND_DEBUG_VERBOSE int i; int channel; for (i = 0; i < 8; i++) { channel = chmap->ops.pin_get_slot_channel( chmap->hdac, pin_nid, i); dev_dbg(&chmap->hdac->dev, "HDMI: ASP channel %d => slot %d\n", channel, i); } #endif } static void hdmi_std_setup_channel_mapping(struct hdac_chmap *chmap, hda_nid_t pin_nid, bool non_pcm, int ca) { struct hdac_cea_channel_speaker_allocation *ch_alloc; int i; int err; int order; int non_pcm_mapping[8]; order = get_channel_allocation_order(ca); ch_alloc = &channel_allocations[order]; if (hdmi_channel_mapping[ca][1] == 0) { int hdmi_slot = 0; /* fill actual channel mappings in ALSA channel (i) order */ for (i = 0; i < ch_alloc->channels; i++) { while (!WARN_ON(hdmi_slot >= 8) && !ch_alloc->speakers[7 - hdmi_slot]) hdmi_slot++; /* skip zero slots */ hdmi_channel_mapping[ca][i] = (i << 4) | hdmi_slot++; } /* fill the rest of the slots with ALSA channel 0xf */ for (hdmi_slot = 0; hdmi_slot < 8; hdmi_slot++) if (!ch_alloc->speakers[7 - hdmi_slot]) hdmi_channel_mapping[ca][i++] = (0xf << 4) | hdmi_slot; } if (non_pcm) { for (i = 0; i < ch_alloc->channels; i++) non_pcm_mapping[i] = (i << 4) | i; for (; i < 8; i++) non_pcm_mapping[i] = (0xf << 4) | i; } for (i = 0; i < 8; i++) { int slotsetup = non_pcm ? non_pcm_mapping[i] : hdmi_channel_mapping[ca][i]; int hdmi_slot = slotsetup & 0x0f; int channel = (slotsetup & 0xf0) >> 4; err = chmap->ops.pin_set_slot_channel(chmap->hdac, pin_nid, hdmi_slot, channel); if (err) { dev_dbg(&chmap->hdac->dev, "HDMI: channel mapping failed\n"); break; } } } struct channel_map_table { unsigned char map; /* ALSA API channel map position */ int spk_mask; /* speaker position bit mask */ }; static struct channel_map_table map_tables[] = { { SNDRV_CHMAP_FL, FL }, { SNDRV_CHMAP_FR, FR }, { SNDRV_CHMAP_RL, RL }, { SNDRV_CHMAP_RR, RR }, { SNDRV_CHMAP_LFE, LFE }, { SNDRV_CHMAP_FC, FC }, { SNDRV_CHMAP_RLC, RLC }, { SNDRV_CHMAP_RRC, RRC }, { SNDRV_CHMAP_RC, RC }, { SNDRV_CHMAP_FLC, FLC }, { SNDRV_CHMAP_FRC, FRC }, { SNDRV_CHMAP_TFL, FLH }, { SNDRV_CHMAP_TFR, FRH }, { SNDRV_CHMAP_FLW, FLW }, { SNDRV_CHMAP_FRW, FRW }, { SNDRV_CHMAP_TC, TC }, { SNDRV_CHMAP_TFC, FCH }, {} /* terminator */ }; /* from ALSA API channel position to speaker bit mask */ int snd_hdac_chmap_to_spk_mask(unsigned char c) { struct channel_map_table *t = map_tables; for (; t->map; t++) { if (t->map == c) return t->spk_mask; } return 0; } EXPORT_SYMBOL_GPL(snd_hdac_chmap_to_spk_mask); /* from ALSA API channel position to CEA slot */ static int to_cea_slot(int ordered_ca, unsigned char pos) { int mask = snd_hdac_chmap_to_spk_mask(pos); int i; /* Add sanity check to pass klockwork check. * This should never happen. */ if (ordered_ca >= ARRAY_SIZE(channel_allocations)) return -1; if (mask) { for (i = 0; i < 8; i++) { if (channel_allocations[ordered_ca].speakers[7 - i] == mask) return i; } } return -1; } /* from speaker bit mask to ALSA API channel position */ int snd_hdac_spk_to_chmap(int spk) { struct channel_map_table *t = map_tables; for (; t->map; t++) { if (t->spk_mask == spk) return t->map; } return 0; } EXPORT_SYMBOL_GPL(snd_hdac_spk_to_chmap); /* from CEA slot to ALSA API channel position */ static int from_cea_slot(int ordered_ca, unsigned char slot) { int mask; /* Add sanity check to pass klockwork check. * This should never happen. */ if (slot >= 8) return 0; mask = channel_allocations[ordered_ca].speakers[7 - slot]; return snd_hdac_spk_to_chmap(mask); } /* get the CA index corresponding to the given ALSA API channel map */ static int hdmi_manual_channel_allocation(int chs, unsigned char *map) { int i, spks = 0, spk_mask = 0; for (i = 0; i < chs; i++) { int mask = snd_hdac_chmap_to_spk_mask(map[i]); if (mask) { spk_mask |= mask; spks++; } } for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) { if ((chs == channel_allocations[i].channels || spks == channel_allocations[i].channels) && (spk_mask & channel_allocations[i].spk_mask) == channel_allocations[i].spk_mask) return channel_allocations[i].ca_index; } return -1; } /* set up the channel slots for the given ALSA API channel map */ static int hdmi_manual_setup_channel_mapping(struct hdac_chmap *chmap, hda_nid_t pin_nid, int chs, unsigned char *map, int ca) { int ordered_ca = get_channel_allocation_order(ca); int alsa_pos, hdmi_slot; int assignments[8] = {[0 ... 7] = 0xf}; for (alsa_pos = 0; alsa_pos < chs; alsa_pos++) { hdmi_slot = to_cea_slot(ordered_ca, map[alsa_pos]); if (hdmi_slot < 0) continue; /* unassigned channel */ assignments[hdmi_slot] = alsa_pos; } for (hdmi_slot = 0; hdmi_slot < 8; hdmi_slot++) { int err; err = chmap->ops.pin_set_slot_channel(chmap->hdac, pin_nid, hdmi_slot, assignments[hdmi_slot]); if (err) return -EINVAL; } return 0; } /* store ALSA API channel map from the current default map */ static void hdmi_setup_fake_chmap(unsigned char *map, int ca) { int i; int ordered_ca = get_channel_allocation_order(ca); for (i = 0; i < 8; i++) { if (ordered_ca < ARRAY_SIZE(channel_allocations) && i < channel_allocations[ordered_ca].channels) map[i] = from_cea_slot(ordered_ca, hdmi_channel_mapping[ca][i] & 0x0f); else map[i] = 0; } } void snd_hdac_setup_channel_mapping(struct hdac_chmap *chmap, hda_nid_t pin_nid, bool non_pcm, int ca, int channels, unsigned char *map, bool chmap_set) { if (!non_pcm && chmap_set) { hdmi_manual_setup_channel_mapping(chmap, pin_nid, channels, map, ca); } else { hdmi_std_setup_channel_mapping(chmap, pin_nid, non_pcm, ca); hdmi_setup_fake_chmap(map, ca); } hdmi_debug_channel_mapping(chmap, pin_nid); } EXPORT_SYMBOL_GPL(snd_hdac_setup_channel_mapping); int snd_hdac_get_active_channels(int ca) { int ordered_ca = get_channel_allocation_order(ca); /* Add sanity check to pass klockwork check. * This should never happen. */ if (ordered_ca >= ARRAY_SIZE(channel_allocations)) ordered_ca = 0; return channel_allocations[ordered_ca].channels; } EXPORT_SYMBOL_GPL(snd_hdac_get_active_channels); struct hdac_cea_channel_speaker_allocation *snd_hdac_get_ch_alloc_from_ca(int ca) { return &channel_allocations[get_channel_allocation_order(ca)]; } EXPORT_SYMBOL_GPL(snd_hdac_get_ch_alloc_from_ca); int snd_hdac_channel_allocation(struct hdac_device *hdac, int spk_alloc, int channels, bool chmap_set, bool non_pcm, unsigned char *map) { int ca; if (!non_pcm && chmap_set) ca = hdmi_manual_channel_allocation(channels, map); else ca = hdmi_channel_allocation_spk_alloc_blk(hdac, spk_alloc, channels); if (ca < 0) ca = 0; return ca; } EXPORT_SYMBOL_GPL(snd_hdac_channel_allocation); /* * ALSA API channel-map control callbacks */ static int hdmi_chmap_ctl_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); struct hdac_chmap *chmap = info->private_data; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = chmap->channels_max; uinfo->value.integer.min = 0; uinfo->value.integer.max = SNDRV_CHMAP_LAST; return 0; } static int hdmi_chmap_cea_alloc_validate_get_type(struct hdac_chmap *chmap, struct hdac_cea_channel_speaker_allocation *cap, int channels) { /* If the speaker allocation matches the channel count, it is OK.*/ if (cap->channels != channels) return -1; /* all channels are remappable freely */ return SNDRV_CTL_TLVT_CHMAP_VAR; } static void hdmi_cea_alloc_to_tlv_chmap(struct hdac_chmap *hchmap, struct hdac_cea_channel_speaker_allocation *cap, unsigned int *chmap, int channels) { int count = 0; int c; for (c = 7; c >= 0; c--) { int spk = cap->speakers[c]; if (!spk) continue; chmap[count++] = snd_hdac_spk_to_chmap(spk); } WARN_ON(count != channels); } static int spk_mask_from_spk_alloc(int spk_alloc) { int i; int spk_mask = eld_speaker_allocation_bits[0]; for (i = 0; i < ARRAY_SIZE(eld_speaker_allocation_bits); i++) { if (spk_alloc & (1 << i)) spk_mask |= eld_speaker_allocation_bits[i]; } return spk_mask; } static int hdmi_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *tlv) { struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); struct hdac_chmap *chmap = info->private_data; int pcm_idx = kcontrol->private_value; unsigned int __user *dst; int chs, count = 0; unsigned long max_chs; int type; int spk_alloc, spk_mask; if (size < 8) return -ENOMEM; if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv)) return -EFAULT; size -= 8; dst = tlv + 2; spk_alloc = chmap->ops.get_spk_alloc(chmap->hdac, pcm_idx); spk_mask = spk_mask_from_spk_alloc(spk_alloc); max_chs = hweight_long(spk_mask); for (chs = 2; chs <= max_chs; chs++) { int i; struct hdac_cea_channel_speaker_allocation *cap; cap = channel_allocations; for (i = 0; i < ARRAY_SIZE(channel_allocations); i++, cap++) { int chs_bytes = chs * 4; unsigned int tlv_chmap[8]; if (cap->channels != chs) continue; if (!(cap->spk_mask == (spk_mask & cap->spk_mask))) continue; type = chmap->ops.chmap_cea_alloc_validate_get_type( chmap, cap, chs); if (type < 0) return -ENODEV; if (size < 8) return -ENOMEM; if (put_user(type, dst) || put_user(chs_bytes, dst + 1)) return -EFAULT; dst += 2; size -= 8; count += 8; if (size < chs_bytes) return -ENOMEM; size -= chs_bytes; count += chs_bytes; chmap->ops.cea_alloc_to_tlv_chmap(chmap, cap, tlv_chmap, chs); if (copy_to_user(dst, tlv_chmap, chs_bytes)) return -EFAULT; dst += chs; } } if (put_user(count, tlv + 1)) return -EFAULT; return 0; } static int hdmi_chmap_ctl_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); struct hdac_chmap *chmap = info->private_data; int pcm_idx = kcontrol->private_value; unsigned char pcm_chmap[8]; int i; memset(pcm_chmap, 0, sizeof(pcm_chmap)); chmap->ops.get_chmap(chmap->hdac, pcm_idx, pcm_chmap); for (i = 0; i < ARRAY_SIZE(pcm_chmap); i++) ucontrol->value.integer.value[i] = pcm_chmap[i]; return 0; } static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); struct hdac_chmap *hchmap = info->private_data; int pcm_idx = kcontrol->private_value; unsigned int ctl_idx; struct snd_pcm_substream *substream; unsigned char chmap[8], per_pin_chmap[8]; int i, err, ca, prepared = 0; /* No monitor is connected in dyn_pcm_assign. * It's invalid to setup the chmap */ if (!hchmap->ops.is_pcm_attached(hchmap->hdac, pcm_idx)) return 0; ctl_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); substream = snd_pcm_chmap_substream(info, ctl_idx); if (!substream || !substream->runtime) return 0; /* just for avoiding error from alsactl restore */ switch (substream->runtime->state) { case SNDRV_PCM_STATE_OPEN: case SNDRV_PCM_STATE_SETUP: break; case SNDRV_PCM_STATE_PREPARED: prepared = 1; break; default: return -EBUSY; } memset(chmap, 0, sizeof(chmap)); for (i = 0; i < ARRAY_SIZE(chmap); i++) chmap[i] = ucontrol->value.integer.value[i]; hchmap->ops.get_chmap(hchmap->hdac, pcm_idx, per_pin_chmap); if (!memcmp(chmap, per_pin_chmap, sizeof(chmap))) return 0; ca = hdmi_manual_channel_allocation(ARRAY_SIZE(chmap), chmap); if (ca < 0) return -EINVAL; if (hchmap->ops.chmap_validate) { err = hchmap->ops.chmap_validate(hchmap, ca, ARRAY_SIZE(chmap), chmap); if (err) return err; } hchmap->ops.set_chmap(hchmap->hdac, pcm_idx, chmap, prepared); return 0; } static const struct hdac_chmap_ops chmap_ops = { .chmap_cea_alloc_validate_get_type = hdmi_chmap_cea_alloc_validate_get_type, .cea_alloc_to_tlv_chmap = hdmi_cea_alloc_to_tlv_chmap, .pin_get_slot_channel = hdmi_pin_get_slot_channel, .pin_set_slot_channel = hdmi_pin_set_slot_channel, .set_channel_count = hdmi_set_channel_count, }; void snd_hdac_register_chmap_ops(struct hdac_device *hdac, struct hdac_chmap *chmap) { chmap->ops = chmap_ops; chmap->hdac = hdac; init_channel_allocations(); } EXPORT_SYMBOL_GPL(snd_hdac_register_chmap_ops); int snd_hdac_add_chmap_ctls(struct snd_pcm *pcm, int pcm_idx, struct hdac_chmap *hchmap) { struct snd_pcm_chmap *chmap; struct snd_kcontrol *kctl; int err, i; err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK, NULL, 0, pcm_idx, &chmap); if (err < 0) return err; /* override handlers */ chmap->private_data = hchmap; kctl = chmap->kctl; for (i = 0; i < kctl->count; i++) kctl->vd[i].access |= SNDRV_CTL_ELEM_ACCESS_WRITE; kctl->info = hdmi_chmap_ctl_info; kctl->get = hdmi_chmap_ctl_get; kctl->put = hdmi_chmap_ctl_put; kctl->tlv.c = hdmi_chmap_ctl_tlv; return 0; } EXPORT_SYMBOL_GPL(snd_hdac_add_chmap_ctls);
linux-master
sound/hda/hdmi_chmap.c
// SPDX-License-Identifier: GPL-2.0-only /* * HD-audio controller helpers */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/export.h> #include <sound/core.h> #include <sound/hdaudio.h> #include <sound/hda_register.h> #include "local.h" /* clear CORB read pointer properly */ static void azx_clear_corbrp(struct hdac_bus *bus) { int timeout; for (timeout = 1000; timeout > 0; timeout--) { if (snd_hdac_chip_readw(bus, CORBRP) & AZX_CORBRP_RST) break; udelay(1); } if (timeout <= 0) dev_err(bus->dev, "CORB reset timeout#1, CORBRP = %d\n", snd_hdac_chip_readw(bus, CORBRP)); snd_hdac_chip_writew(bus, CORBRP, 0); for (timeout = 1000; timeout > 0; timeout--) { if (snd_hdac_chip_readw(bus, CORBRP) == 0) break; udelay(1); } if (timeout <= 0) dev_err(bus->dev, "CORB reset timeout#2, CORBRP = %d\n", snd_hdac_chip_readw(bus, CORBRP)); } /** * snd_hdac_bus_init_cmd_io - set up CORB/RIRB buffers * @bus: HD-audio core bus */ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus) { WARN_ON_ONCE(!bus->rb.area); spin_lock_irq(&bus->reg_lock); /* CORB set up */ bus->corb.addr = bus->rb.addr; bus->corb.buf = (__le32 *)bus->rb.area; snd_hdac_chip_writel(bus, CORBLBASE, (u32)bus->corb.addr); snd_hdac_chip_writel(bus, CORBUBASE, upper_32_bits(bus->corb.addr)); /* set the corb size to 256 entries (ULI requires explicitly) */ snd_hdac_chip_writeb(bus, CORBSIZE, 0x02); /* set the corb write pointer to 0 */ snd_hdac_chip_writew(bus, CORBWP, 0); /* reset the corb hw read pointer */ snd_hdac_chip_writew(bus, CORBRP, AZX_CORBRP_RST); if (!bus->corbrp_self_clear) azx_clear_corbrp(bus); /* enable corb dma */ snd_hdac_chip_writeb(bus, CORBCTL, AZX_CORBCTL_RUN); /* RIRB set up */ bus->rirb.addr = bus->rb.addr + 2048; bus->rirb.buf = (__le32 *)(bus->rb.area + 2048); bus->rirb.wp = bus->rirb.rp = 0; memset(bus->rirb.cmds, 0, sizeof(bus->rirb.cmds)); snd_hdac_chip_writel(bus, RIRBLBASE, (u32)bus->rirb.addr); snd_hdac_chip_writel(bus, RIRBUBASE, upper_32_bits(bus->rirb.addr)); /* set the rirb size to 256 entries (ULI requires explicitly) */ snd_hdac_chip_writeb(bus, RIRBSIZE, 0x02); /* reset the rirb hw write pointer */ snd_hdac_chip_writew(bus, RIRBWP, AZX_RIRBWP_RST); /* set N=1, get RIRB response interrupt for new entry */ snd_hdac_chip_writew(bus, RINTCNT, 1); /* enable rirb dma and response irq */ if (bus->not_use_interrupts) snd_hdac_chip_writeb(bus, RIRBCTL, AZX_RBCTL_DMA_EN); else snd_hdac_chip_writeb(bus, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN); /* Accept unsolicited responses */ snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_UNSOL, AZX_GCTL_UNSOL); spin_unlock_irq(&bus->reg_lock); } EXPORT_SYMBOL_GPL(snd_hdac_bus_init_cmd_io); /* wait for cmd dmas till they are stopped */ static void hdac_wait_for_cmd_dmas(struct hdac_bus *bus) { unsigned long timeout; timeout = jiffies + msecs_to_jiffies(100); while ((snd_hdac_chip_readb(bus, RIRBCTL) & AZX_RBCTL_DMA_EN) && time_before(jiffies, timeout)) udelay(10); timeout = jiffies + msecs_to_jiffies(100); while ((snd_hdac_chip_readb(bus, CORBCTL) & AZX_CORBCTL_RUN) && time_before(jiffies, timeout)) udelay(10); } /** * snd_hdac_bus_stop_cmd_io - clean up CORB/RIRB buffers * @bus: HD-audio core bus */ void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus) { spin_lock_irq(&bus->reg_lock); /* disable ringbuffer DMAs */ snd_hdac_chip_writeb(bus, RIRBCTL, 0); snd_hdac_chip_writeb(bus, CORBCTL, 0); spin_unlock_irq(&bus->reg_lock); hdac_wait_for_cmd_dmas(bus); spin_lock_irq(&bus->reg_lock); /* disable unsolicited responses */ snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_UNSOL, 0); spin_unlock_irq(&bus->reg_lock); } EXPORT_SYMBOL_GPL(snd_hdac_bus_stop_cmd_io); static unsigned int azx_command_addr(u32 cmd) { unsigned int addr = cmd >> 28; if (snd_BUG_ON(addr >= HDA_MAX_CODECS)) addr = 0; return addr; } /** * snd_hdac_bus_send_cmd - send a command verb via CORB * @bus: HD-audio core bus * @val: encoded verb value to send * * Returns zero for success or a negative error code. */ int snd_hdac_bus_send_cmd(struct hdac_bus *bus, unsigned int val) { unsigned int addr = azx_command_addr(val); unsigned int wp, rp; spin_lock_irq(&bus->reg_lock); bus->last_cmd[azx_command_addr(val)] = val; /* add command to corb */ wp = snd_hdac_chip_readw(bus, CORBWP); if (wp == 0xffff) { /* something wrong, controller likely turned to D3 */ spin_unlock_irq(&bus->reg_lock); return -EIO; } wp++; wp %= AZX_MAX_CORB_ENTRIES; rp = snd_hdac_chip_readw(bus, CORBRP); if (wp == rp) { /* oops, it's full */ spin_unlock_irq(&bus->reg_lock); return -EAGAIN; } bus->rirb.cmds[addr]++; bus->corb.buf[wp] = cpu_to_le32(val); snd_hdac_chip_writew(bus, CORBWP, wp); spin_unlock_irq(&bus->reg_lock); return 0; } EXPORT_SYMBOL_GPL(snd_hdac_bus_send_cmd); #define AZX_RIRB_EX_UNSOL_EV (1<<4) /** * snd_hdac_bus_update_rirb - retrieve RIRB entries * @bus: HD-audio core bus * * Usually called from interrupt handler. * The caller needs bus->reg_lock spinlock before calling this. */ void snd_hdac_bus_update_rirb(struct hdac_bus *bus) { unsigned int rp, wp; unsigned int addr; u32 res, res_ex; wp = snd_hdac_chip_readw(bus, RIRBWP); if (wp == 0xffff) { /* something wrong, controller likely turned to D3 */ return; } if (wp == bus->rirb.wp) return; bus->rirb.wp = wp; while (bus->rirb.rp != wp) { bus->rirb.rp++; bus->rirb.rp %= AZX_MAX_RIRB_ENTRIES; rp = bus->rirb.rp << 1; /* an RIRB entry is 8-bytes */ res_ex = le32_to_cpu(bus->rirb.buf[rp + 1]); res = le32_to_cpu(bus->rirb.buf[rp]); addr = res_ex & 0xf; if (addr >= HDA_MAX_CODECS) { dev_err(bus->dev, "spurious response %#x:%#x, rp = %d, wp = %d", res, res_ex, bus->rirb.rp, wp); snd_BUG(); } else if (res_ex & AZX_RIRB_EX_UNSOL_EV) snd_hdac_bus_queue_event(bus, res, res_ex); else if (bus->rirb.cmds[addr]) { bus->rirb.res[addr] = res; bus->rirb.cmds[addr]--; if (!bus->rirb.cmds[addr] && waitqueue_active(&bus->rirb_wq)) wake_up(&bus->rirb_wq); } else { dev_err_ratelimited(bus->dev, "spurious response %#x:%#x, last cmd=%#08x\n", res, res_ex, bus->last_cmd[addr]); } } } EXPORT_SYMBOL_GPL(snd_hdac_bus_update_rirb); /** * snd_hdac_bus_get_response - receive a response via RIRB * @bus: HD-audio core bus * @addr: codec address * @res: pointer to store the value, NULL when not needed * * Returns zero if a value is read, or a negative error code. */ int snd_hdac_bus_get_response(struct hdac_bus *bus, unsigned int addr, unsigned int *res) { unsigned long timeout; unsigned long loopcounter; wait_queue_entry_t wait; bool warned = false; init_wait_entry(&wait, 0); timeout = jiffies + msecs_to_jiffies(1000); for (loopcounter = 0;; loopcounter++) { spin_lock_irq(&bus->reg_lock); if (!bus->polling_mode) prepare_to_wait(&bus->rirb_wq, &wait, TASK_UNINTERRUPTIBLE); if (bus->polling_mode) snd_hdac_bus_update_rirb(bus); if (!bus->rirb.cmds[addr]) { if (res) *res = bus->rirb.res[addr]; /* the last value */ if (!bus->polling_mode) finish_wait(&bus->rirb_wq, &wait); spin_unlock_irq(&bus->reg_lock); return 0; } spin_unlock_irq(&bus->reg_lock); if (time_after(jiffies, timeout)) break; #define LOOP_COUNT_MAX 3000 if (!bus->polling_mode) { schedule_timeout(msecs_to_jiffies(2)); } else if (bus->needs_damn_long_delay || loopcounter > LOOP_COUNT_MAX) { if (loopcounter > LOOP_COUNT_MAX && !warned) { dev_dbg_ratelimited(bus->dev, "too slow response, last cmd=%#08x\n", bus->last_cmd[addr]); warned = true; } msleep(2); /* temporary workaround */ } else { udelay(10); cond_resched(); } } if (!bus->polling_mode) finish_wait(&bus->rirb_wq, &wait); return -EIO; } EXPORT_SYMBOL_GPL(snd_hdac_bus_get_response); #define HDAC_MAX_CAPS 10 /** * snd_hdac_bus_parse_capabilities - parse capability structure * @bus: the pointer to bus object * * Returns 0 if successful, or a negative error code. */ int snd_hdac_bus_parse_capabilities(struct hdac_bus *bus) { unsigned int cur_cap; unsigned int offset; unsigned int counter = 0; offset = snd_hdac_chip_readw(bus, LLCH); /* Lets walk the linked capabilities list */ do { cur_cap = _snd_hdac_chip_readl(bus, offset); dev_dbg(bus->dev, "Capability version: 0x%x\n", (cur_cap & AZX_CAP_HDR_VER_MASK) >> AZX_CAP_HDR_VER_OFF); dev_dbg(bus->dev, "HDA capability ID: 0x%x\n", (cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF); if (cur_cap == -1) { dev_dbg(bus->dev, "Invalid capability reg read\n"); break; } switch ((cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF) { case AZX_ML_CAP_ID: dev_dbg(bus->dev, "Found ML capability\n"); bus->mlcap = bus->remap_addr + offset; break; case AZX_GTS_CAP_ID: dev_dbg(bus->dev, "Found GTS capability offset=%x\n", offset); bus->gtscap = bus->remap_addr + offset; break; case AZX_PP_CAP_ID: /* PP capability found, the Audio DSP is present */ dev_dbg(bus->dev, "Found PP capability offset=%x\n", offset); bus->ppcap = bus->remap_addr + offset; break; case AZX_SPB_CAP_ID: /* SPIB capability found, handler function */ dev_dbg(bus->dev, "Found SPB capability\n"); bus->spbcap = bus->remap_addr + offset; break; case AZX_DRSM_CAP_ID: /* DMA resume capability found, handler function */ dev_dbg(bus->dev, "Found DRSM capability\n"); bus->drsmcap = bus->remap_addr + offset; break; default: dev_err(bus->dev, "Unknown capability %d\n", cur_cap); cur_cap = 0; break; } counter++; if (counter > HDAC_MAX_CAPS) { dev_err(bus->dev, "We exceeded HDAC capabilities!!!\n"); break; } /* read the offset of next capability */ offset = cur_cap & AZX_CAP_HDR_NXT_PTR_MASK; } while (offset); return 0; } EXPORT_SYMBOL_GPL(snd_hdac_bus_parse_capabilities); /* * Lowlevel interface */ /** * snd_hdac_bus_enter_link_reset - enter link reset * @bus: HD-audio core bus * * Enter to the link reset state. */ void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus) { unsigned long timeout; /* reset controller */ snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_RESET, 0); timeout = jiffies + msecs_to_jiffies(100); while ((snd_hdac_chip_readb(bus, GCTL) & AZX_GCTL_RESET) && time_before(jiffies, timeout)) usleep_range(500, 1000); } EXPORT_SYMBOL_GPL(snd_hdac_bus_enter_link_reset); /** * snd_hdac_bus_exit_link_reset - exit link reset * @bus: HD-audio core bus * * Exit from the link reset state. */ void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus) { unsigned long timeout; snd_hdac_chip_updateb(bus, GCTL, AZX_GCTL_RESET, AZX_GCTL_RESET); timeout = jiffies + msecs_to_jiffies(100); while (!snd_hdac_chip_readb(bus, GCTL) && time_before(jiffies, timeout)) usleep_range(500, 1000); } EXPORT_SYMBOL_GPL(snd_hdac_bus_exit_link_reset); /* reset codec link */ int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset) { if (!full_reset) goto skip_reset; /* clear STATESTS if not in reset */ if (snd_hdac_chip_readb(bus, GCTL) & AZX_GCTL_RESET) snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK); /* reset controller */ snd_hdac_bus_enter_link_reset(bus); /* delay for >= 100us for codec PLL to settle per spec * Rev 0.9 section 5.5.1 */ usleep_range(500, 1000); /* Bring controller out of reset */ snd_hdac_bus_exit_link_reset(bus); /* Brent Chartrand said to wait >= 540us for codecs to initialize */ usleep_range(1000, 1200); skip_reset: /* check to see if controller is ready */ if (!snd_hdac_chip_readb(bus, GCTL)) { dev_dbg(bus->dev, "controller not ready!\n"); return -EBUSY; } /* detect codecs */ if (!bus->codec_mask) { bus->codec_mask = snd_hdac_chip_readw(bus, STATESTS); dev_dbg(bus->dev, "codec_mask = 0x%lx\n", bus->codec_mask); } return 0; } EXPORT_SYMBOL_GPL(snd_hdac_bus_reset_link); /* enable interrupts */ static void azx_int_enable(struct hdac_bus *bus) { /* enable controller CIE and GIE */ snd_hdac_chip_updatel(bus, INTCTL, AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN, AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN); } /* disable interrupts */ static void azx_int_disable(struct hdac_bus *bus) { struct hdac_stream *azx_dev; /* disable interrupts in stream descriptor */ list_for_each_entry(azx_dev, &bus->stream_list, list) snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0); /* disable SIE for all streams & disable controller CIE and GIE */ snd_hdac_chip_writel(bus, INTCTL, 0); } /* clear interrupts */ static void azx_int_clear(struct hdac_bus *bus) { struct hdac_stream *azx_dev; /* clear stream status */ list_for_each_entry(azx_dev, &bus->stream_list, list) snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* clear STATESTS */ snd_hdac_chip_writew(bus, STATESTS, STATESTS_INT_MASK); /* clear rirb status */ snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK); /* clear int status */ snd_hdac_chip_writel(bus, INTSTS, AZX_INT_CTRL_EN | AZX_INT_ALL_STREAM); } /** * snd_hdac_bus_init_chip - reset and start the controller registers * @bus: HD-audio core bus * @full_reset: Do full reset */ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset) { if (bus->chip_init) return false; /* reset controller */ snd_hdac_bus_reset_link(bus, full_reset); /* clear interrupts */ azx_int_clear(bus); /* initialize the codec command I/O */ snd_hdac_bus_init_cmd_io(bus); /* enable interrupts after CORB/RIRB buffers are initialized above */ azx_int_enable(bus); /* program the position buffer */ if (bus->use_posbuf && bus->posbuf.addr) { snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr); snd_hdac_chip_writel(bus, DPUBASE, upper_32_bits(bus->posbuf.addr)); } bus->chip_init = true; return true; } EXPORT_SYMBOL_GPL(snd_hdac_bus_init_chip); /** * snd_hdac_bus_stop_chip - disable the whole IRQ and I/Os * @bus: HD-audio core bus */ void snd_hdac_bus_stop_chip(struct hdac_bus *bus) { if (!bus->chip_init) return; /* disable interrupts */ azx_int_disable(bus); azx_int_clear(bus); /* disable CORB/RIRB */ snd_hdac_bus_stop_cmd_io(bus); /* disable position buffer */ if (bus->posbuf.addr) { snd_hdac_chip_writel(bus, DPLBASE, 0); snd_hdac_chip_writel(bus, DPUBASE, 0); } bus->chip_init = false; } EXPORT_SYMBOL_GPL(snd_hdac_bus_stop_chip); /** * snd_hdac_bus_handle_stream_irq - interrupt handler for streams * @bus: HD-audio core bus * @status: INTSTS register value * @ack: callback to be called for woken streams * * Returns the bits of handled streams, or zero if no stream is handled. */ int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status, void (*ack)(struct hdac_bus *, struct hdac_stream *)) { struct hdac_stream *azx_dev; u8 sd_status; int handled = 0; list_for_each_entry(azx_dev, &bus->stream_list, list) { if (status & azx_dev->sd_int_sta_mask) { sd_status = snd_hdac_stream_readb(azx_dev, SD_STS); snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); handled |= 1 << azx_dev->index; if ((!azx_dev->substream && !azx_dev->cstream) || !azx_dev->running || !(sd_status & SD_INT_COMPLETE)) continue; if (ack) ack(bus, azx_dev); } } return handled; } EXPORT_SYMBOL_GPL(snd_hdac_bus_handle_stream_irq); /** * snd_hdac_bus_alloc_stream_pages - allocate BDL and other buffers * @bus: HD-audio core bus * * Call this after assigning the all streams. * Returns zero for success, or a negative error code. */ int snd_hdac_bus_alloc_stream_pages(struct hdac_bus *bus) { struct hdac_stream *s; int num_streams = 0; int dma_type = bus->dma_type ? bus->dma_type : SNDRV_DMA_TYPE_DEV; int err; list_for_each_entry(s, &bus->stream_list, list) { /* allocate memory for the BDL for each stream */ err = snd_dma_alloc_pages(dma_type, bus->dev, BDL_SIZE, &s->bdl); num_streams++; if (err < 0) return -ENOMEM; } if (WARN_ON(!num_streams)) return -EINVAL; /* allocate memory for the position buffer */ err = snd_dma_alloc_pages(dma_type, bus->dev, num_streams * 8, &bus->posbuf); if (err < 0) return -ENOMEM; list_for_each_entry(s, &bus->stream_list, list) s->posbuf = (__le32 *)(bus->posbuf.area + s->index * 8); /* single page (at least 4096 bytes) must suffice for both ringbuffes */ return snd_dma_alloc_pages(dma_type, bus->dev, PAGE_SIZE, &bus->rb); } EXPORT_SYMBOL_GPL(snd_hdac_bus_alloc_stream_pages); /** * snd_hdac_bus_free_stream_pages - release BDL and other buffers * @bus: HD-audio core bus */ void snd_hdac_bus_free_stream_pages(struct hdac_bus *bus) { struct hdac_stream *s; list_for_each_entry(s, &bus->stream_list, list) { if (s->bdl.area) snd_dma_free_pages(&s->bdl); } if (bus->rb.area) snd_dma_free_pages(&bus->rb); if (bus->posbuf.area) snd_dma_free_pages(&bus->posbuf); } EXPORT_SYMBOL_GPL(snd_hdac_bus_free_stream_pages); /** * snd_hdac_bus_link_power - power up/down codec link * @codec: HD-audio device * @enable: whether to power-up the link */ void snd_hdac_bus_link_power(struct hdac_device *codec, bool enable) { if (enable) set_bit(codec->addr, &codec->bus->codec_powered); else clear_bit(codec->addr, &codec->bus->codec_powered); } EXPORT_SYMBOL_GPL(snd_hdac_bus_link_power);
linux-master
sound/hda/hdac_controller.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (c) 2015-2019 Intel Corporation #include <linux/acpi.h> #include <sound/intel-nhlt.h> struct nhlt_acpi_table *intel_nhlt_init(struct device *dev) { struct nhlt_acpi_table *nhlt; acpi_status status; status = acpi_get_table(ACPI_SIG_NHLT, 0, (struct acpi_table_header **)&nhlt); if (ACPI_FAILURE(status)) { dev_warn(dev, "NHLT table not found\n"); return NULL; } return nhlt; } EXPORT_SYMBOL_GPL(intel_nhlt_init); void intel_nhlt_free(struct nhlt_acpi_table *nhlt) { acpi_put_table((struct acpi_table_header *)nhlt); } EXPORT_SYMBOL_GPL(intel_nhlt_free); int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt) { struct nhlt_endpoint *epnt; struct nhlt_dmic_array_config *cfg; struct nhlt_vendor_dmic_array_config *cfg_vendor; struct nhlt_fmt *fmt_configs; unsigned int dmic_geo = 0; u16 max_ch = 0; u8 i, j; if (!nhlt) return 0; if (nhlt->header.length <= sizeof(struct acpi_table_header)) { dev_warn(dev, "Invalid DMIC description table\n"); return 0; } for (j = 0, epnt = nhlt->desc; j < nhlt->endpoint_count; j++, epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length)) { if (epnt->linktype != NHLT_LINK_DMIC) continue; cfg = (struct nhlt_dmic_array_config *)(epnt->config.caps); fmt_configs = (struct nhlt_fmt *)(epnt->config.caps + epnt->config.size); /* find max number of channels based on format_configuration */ if (fmt_configs->fmt_count) { struct nhlt_fmt_cfg *fmt_cfg = fmt_configs->fmt_config; dev_dbg(dev, "found %d format definitions\n", fmt_configs->fmt_count); for (i = 0; i < fmt_configs->fmt_count; i++) { struct wav_fmt_ext *fmt_ext; fmt_ext = &fmt_cfg->fmt_ext; if (fmt_ext->fmt.channels > max_ch) max_ch = fmt_ext->fmt.channels; /* Move to the next nhlt_fmt_cfg */ fmt_cfg = (struct nhlt_fmt_cfg *)(fmt_cfg->config.caps + fmt_cfg->config.size); } dev_dbg(dev, "max channels found %d\n", max_ch); } else { dev_dbg(dev, "No format information found\n"); } if (cfg->device_config.config_type != NHLT_CONFIG_TYPE_MIC_ARRAY) { dmic_geo = max_ch; } else { switch (cfg->array_type) { case NHLT_MIC_ARRAY_2CH_SMALL: case NHLT_MIC_ARRAY_2CH_BIG: dmic_geo = MIC_ARRAY_2CH; break; case NHLT_MIC_ARRAY_4CH_1ST_GEOM: case NHLT_MIC_ARRAY_4CH_L_SHAPED: case NHLT_MIC_ARRAY_4CH_2ND_GEOM: dmic_geo = MIC_ARRAY_4CH; break; case NHLT_MIC_ARRAY_VENDOR_DEFINED: cfg_vendor = (struct nhlt_vendor_dmic_array_config *)cfg; dmic_geo = cfg_vendor->nb_mics; break; default: dev_warn(dev, "%s: undefined DMIC array_type 0x%0x\n", __func__, cfg->array_type); } if (dmic_geo > 0) { dev_dbg(dev, "Array with %d dmics\n", dmic_geo); } if (max_ch > dmic_geo) { dev_dbg(dev, "max channels %d exceed dmic number %d\n", max_ch, dmic_geo); } } } dev_dbg(dev, "dmic number %d max_ch %d\n", dmic_geo, max_ch); return dmic_geo; } EXPORT_SYMBOL_GPL(intel_nhlt_get_dmic_geo); bool intel_nhlt_has_endpoint_type(struct nhlt_acpi_table *nhlt, u8 link_type) { struct nhlt_endpoint *epnt; int i; if (!nhlt) return false; epnt = (struct nhlt_endpoint *)nhlt->desc; for (i = 0; i < nhlt->endpoint_count; i++) { if (epnt->linktype == link_type) return true; epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length); } return false; } EXPORT_SYMBOL(intel_nhlt_has_endpoint_type); int intel_nhlt_ssp_endpoint_mask(struct nhlt_acpi_table *nhlt, u8 device_type) { struct nhlt_endpoint *epnt; int ssp_mask = 0; int i; if (!nhlt || (device_type != NHLT_DEVICE_BT && device_type != NHLT_DEVICE_I2S)) return 0; epnt = (struct nhlt_endpoint *)nhlt->desc; for (i = 0; i < nhlt->endpoint_count; i++) { if (epnt->linktype == NHLT_LINK_SSP && epnt->device_type == device_type) { /* for SSP the virtual bus id is the SSP port */ ssp_mask |= BIT(epnt->virtual_bus_id); } epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length); } return ssp_mask; } EXPORT_SYMBOL(intel_nhlt_ssp_endpoint_mask); #define SSP_BLOB_V1_0_SIZE 84 #define SSP_BLOB_V1_0_MDIVC_OFFSET 19 /* offset in u32 */ #define SSP_BLOB_V1_5_SIZE 96 #define SSP_BLOB_V1_5_MDIVC_OFFSET 21 /* offset in u32 */ #define SSP_BLOB_VER_1_5 0xEE000105 #define SSP_BLOB_V2_0_SIZE 88 #define SSP_BLOB_V2_0_MDIVC_OFFSET 20 /* offset in u32 */ #define SSP_BLOB_VER_2_0 0xEE000200 int intel_nhlt_ssp_mclk_mask(struct nhlt_acpi_table *nhlt, int ssp_num) { struct nhlt_endpoint *epnt; struct nhlt_fmt *fmt; struct nhlt_fmt_cfg *cfg; int mclk_mask = 0; int i, j; if (!nhlt) return 0; epnt = (struct nhlt_endpoint *)nhlt->desc; for (i = 0; i < nhlt->endpoint_count; i++) { /* we only care about endpoints connected to an audio codec over SSP */ if (epnt->linktype == NHLT_LINK_SSP && epnt->device_type == NHLT_DEVICE_I2S && epnt->virtual_bus_id == ssp_num) { fmt = (struct nhlt_fmt *)(epnt->config.caps + epnt->config.size); cfg = fmt->fmt_config; /* * In theory all formats should use the same MCLK but it doesn't hurt to * double-check that the configuration is consistent */ for (j = 0; j < fmt->fmt_count; j++) { u32 *blob; int mdivc_offset; int size; /* first check we have enough data to read the blob type */ if (cfg->config.size < 8) return -EINVAL; blob = (u32 *)cfg->config.caps; if (blob[1] == SSP_BLOB_VER_2_0) { mdivc_offset = SSP_BLOB_V2_0_MDIVC_OFFSET; size = SSP_BLOB_V2_0_SIZE; } else if (blob[1] == SSP_BLOB_VER_1_5) { mdivc_offset = SSP_BLOB_V1_5_MDIVC_OFFSET; size = SSP_BLOB_V1_5_SIZE; } else { mdivc_offset = SSP_BLOB_V1_0_MDIVC_OFFSET; size = SSP_BLOB_V1_0_SIZE; } /* make sure we have enough data for the fixed part of the blob */ if (cfg->config.size < size) return -EINVAL; mclk_mask |= blob[mdivc_offset] & GENMASK(1, 0); cfg = (struct nhlt_fmt_cfg *)(cfg->config.caps + cfg->config.size); } } epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length); } /* make sure only one MCLK is used */ if (hweight_long(mclk_mask) != 1) return -EINVAL; return mclk_mask; } EXPORT_SYMBOL(intel_nhlt_ssp_mclk_mask); static struct nhlt_specific_cfg * nhlt_get_specific_cfg(struct device *dev, struct nhlt_fmt *fmt, u8 num_ch, u32 rate, u8 vbps, u8 bps) { struct nhlt_fmt_cfg *cfg = fmt->fmt_config; struct wav_fmt *wfmt; u16 _bps, _vbps; int i; dev_dbg(dev, "Endpoint format count=%d\n", fmt->fmt_count); for (i = 0; i < fmt->fmt_count; i++) { wfmt = &cfg->fmt_ext.fmt; _bps = wfmt->bits_per_sample; _vbps = cfg->fmt_ext.sample.valid_bits_per_sample; dev_dbg(dev, "Endpoint format: ch=%d fmt=%d/%d rate=%d\n", wfmt->channels, _vbps, _bps, wfmt->samples_per_sec); if (wfmt->channels == num_ch && wfmt->samples_per_sec == rate && vbps == _vbps && bps == _bps) return &cfg->config; cfg = (struct nhlt_fmt_cfg *)(cfg->config.caps + cfg->config.size); } return NULL; } static bool nhlt_check_ep_match(struct device *dev, struct nhlt_endpoint *epnt, u32 bus_id, u8 link_type, u8 dir, u8 dev_type) { dev_dbg(dev, "Endpoint: vbus_id=%d link_type=%d dir=%d dev_type = %d\n", epnt->virtual_bus_id, epnt->linktype, epnt->direction, epnt->device_type); if ((epnt->virtual_bus_id != bus_id) || (epnt->linktype != link_type) || (epnt->direction != dir)) return false; /* link of type DMIC bypasses device_type check */ return epnt->linktype == NHLT_LINK_DMIC || epnt->device_type == dev_type; } struct nhlt_specific_cfg * intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt, u32 bus_id, u8 link_type, u8 vbps, u8 bps, u8 num_ch, u32 rate, u8 dir, u8 dev_type) { struct nhlt_specific_cfg *cfg; struct nhlt_endpoint *epnt; struct nhlt_fmt *fmt; int i; if (!nhlt) return NULL; dev_dbg(dev, "Looking for configuration:\n"); dev_dbg(dev, " vbus_id=%d link_type=%d dir=%d, dev_type=%d\n", bus_id, link_type, dir, dev_type); dev_dbg(dev, " ch=%d fmt=%d/%d rate=%d\n", num_ch, vbps, bps, rate); dev_dbg(dev, "Endpoint count=%d\n", nhlt->endpoint_count); epnt = (struct nhlt_endpoint *)nhlt->desc; for (i = 0; i < nhlt->endpoint_count; i++) { if (nhlt_check_ep_match(dev, epnt, bus_id, link_type, dir, dev_type)) { fmt = (struct nhlt_fmt *)(epnt->config.caps + epnt->config.size); cfg = nhlt_get_specific_cfg(dev, fmt, num_ch, rate, vbps, bps); if (cfg) return cfg; } epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length); } return NULL; } EXPORT_SYMBOL(intel_nhlt_get_endpoint_blob);
linux-master
sound/hda/intel-nhlt.c
// SPDX-License-Identifier: GPL-2.0-only /* * HD-audio core bus driver */ #include <linux/init.h> #include <linux/io.h> #include <linux/device.h> #include <linux/module.h> #include <linux/export.h> #include <sound/hdaudio.h> #include "local.h" #include "trace.h" static void snd_hdac_bus_process_unsol_events(struct work_struct *work); static const struct hdac_bus_ops default_ops = { .command = snd_hdac_bus_send_cmd, .get_response = snd_hdac_bus_get_response, .link_power = snd_hdac_bus_link_power, }; /** * snd_hdac_bus_init - initialize a HD-audio bas bus * @bus: the pointer to bus object * @dev: device pointer * @ops: bus verb operators * * Returns 0 if successful, or a negative error code. */ int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev, const struct hdac_bus_ops *ops) { memset(bus, 0, sizeof(*bus)); bus->dev = dev; if (ops) bus->ops = ops; else bus->ops = &default_ops; bus->dma_type = SNDRV_DMA_TYPE_DEV; INIT_LIST_HEAD(&bus->stream_list); INIT_LIST_HEAD(&bus->codec_list); INIT_WORK(&bus->unsol_work, snd_hdac_bus_process_unsol_events); spin_lock_init(&bus->reg_lock); mutex_init(&bus->cmd_mutex); mutex_init(&bus->lock); INIT_LIST_HEAD(&bus->hlink_list); init_waitqueue_head(&bus->rirb_wq); bus->irq = -1; /* * Default value of '8' is as per the HD audio specification (Rev 1.0a). * Following relation is used to derive STRIPE control value. * For sample rate <= 48K: * { ((num_channels * bits_per_sample) / number of SDOs) >= 8 } * For sample rate > 48K: * { ((num_channels * bits_per_sample * rate/48000) / * number of SDOs) >= 8 } */ bus->sdo_limit = 8; return 0; } EXPORT_SYMBOL_GPL(snd_hdac_bus_init); /** * snd_hdac_bus_exit - clean up a HD-audio bas bus * @bus: the pointer to bus object */ void snd_hdac_bus_exit(struct hdac_bus *bus) { WARN_ON(!list_empty(&bus->stream_list)); WARN_ON(!list_empty(&bus->codec_list)); cancel_work_sync(&bus->unsol_work); } EXPORT_SYMBOL_GPL(snd_hdac_bus_exit); /** * snd_hdac_bus_exec_verb - execute a HD-audio verb on the given bus * @bus: bus object * @addr: the HDAC device address * @cmd: HD-audio encoded verb * @res: pointer to store the response, NULL if performing asynchronously * * Returns 0 if successful, or a negative error code. */ int snd_hdac_bus_exec_verb(struct hdac_bus *bus, unsigned int addr, unsigned int cmd, unsigned int *res) { int err; mutex_lock(&bus->cmd_mutex); err = snd_hdac_bus_exec_verb_unlocked(bus, addr, cmd, res); mutex_unlock(&bus->cmd_mutex); return err; } /** * snd_hdac_bus_exec_verb_unlocked - unlocked version * @bus: bus object * @addr: the HDAC device address * @cmd: HD-audio encoded verb * @res: pointer to store the response, NULL if performing asynchronously * * Returns 0 if successful, or a negative error code. */ int snd_hdac_bus_exec_verb_unlocked(struct hdac_bus *bus, unsigned int addr, unsigned int cmd, unsigned int *res) { unsigned int tmp; int err; if (cmd == ~0) return -EINVAL; if (res) *res = -1; else if (bus->sync_write) res = &tmp; for (;;) { trace_hda_send_cmd(bus, cmd); err = bus->ops->command(bus, cmd); if (err != -EAGAIN) break; /* process pending verbs */ err = bus->ops->get_response(bus, addr, &tmp); if (err) break; } if (!err && res) { err = bus->ops->get_response(bus, addr, res); trace_hda_get_response(bus, addr, *res); } return err; } EXPORT_SYMBOL_GPL(snd_hdac_bus_exec_verb_unlocked); /** * snd_hdac_bus_queue_event - add an unsolicited event to queue * @bus: the BUS * @res: unsolicited event (lower 32bit of RIRB entry) * @res_ex: codec addr and flags (upper 32bit or RIRB entry) * * Adds the given event to the queue. The events are processed in * the workqueue asynchronously. Call this function in the interrupt * hanlder when RIRB receives an unsolicited event. */ void snd_hdac_bus_queue_event(struct hdac_bus *bus, u32 res, u32 res_ex) { unsigned int wp; if (!bus) return; trace_hda_unsol_event(bus, res, res_ex); wp = (bus->unsol_wp + 1) % HDA_UNSOL_QUEUE_SIZE; bus->unsol_wp = wp; wp <<= 1; bus->unsol_queue[wp] = res; bus->unsol_queue[wp + 1] = res_ex; schedule_work(&bus->unsol_work); } /* * process queued unsolicited events */ static void snd_hdac_bus_process_unsol_events(struct work_struct *work) { struct hdac_bus *bus = container_of(work, struct hdac_bus, unsol_work); struct hdac_device *codec; struct hdac_driver *drv; unsigned int rp, caddr, res; spin_lock_irq(&bus->reg_lock); while (bus->unsol_rp != bus->unsol_wp) { rp = (bus->unsol_rp + 1) % HDA_UNSOL_QUEUE_SIZE; bus->unsol_rp = rp; rp <<= 1; res = bus->unsol_queue[rp]; caddr = bus->unsol_queue[rp + 1]; if (!(caddr & (1 << 4))) /* no unsolicited event? */ continue; codec = bus->caddr_tbl[caddr & 0x0f]; if (!codec || !codec->registered) continue; spin_unlock_irq(&bus->reg_lock); drv = drv_to_hdac_driver(codec->dev.driver); if (drv->unsol_event) drv->unsol_event(codec, res); spin_lock_irq(&bus->reg_lock); } spin_unlock_irq(&bus->reg_lock); } /** * snd_hdac_bus_add_device - Add a codec to bus * @bus: HDA core bus * @codec: HDA core device to add * * Adds the given codec to the list in the bus. The caddr_tbl array * and codec_powered bits are updated, as well. * Returns zero if success, or a negative error code. */ int snd_hdac_bus_add_device(struct hdac_bus *bus, struct hdac_device *codec) { if (bus->caddr_tbl[codec->addr]) { dev_err(bus->dev, "address 0x%x is already occupied\n", codec->addr); return -EBUSY; } list_add_tail(&codec->list, &bus->codec_list); bus->caddr_tbl[codec->addr] = codec; set_bit(codec->addr, &bus->codec_powered); bus->num_codecs++; return 0; } /** * snd_hdac_bus_remove_device - Remove a codec from bus * @bus: HDA core bus * @codec: HDA core device to remove */ void snd_hdac_bus_remove_device(struct hdac_bus *bus, struct hdac_device *codec) { WARN_ON(bus != codec->bus); if (list_empty(&codec->list)) return; list_del_init(&codec->list); bus->caddr_tbl[codec->addr] = NULL; clear_bit(codec->addr, &bus->codec_powered); bus->num_codecs--; flush_work(&bus->unsol_work); } #ifdef CONFIG_SND_HDA_ALIGNED_MMIO /* Helpers for aligned read/write of mmio space, for Tegra */ unsigned int snd_hdac_aligned_read(void __iomem *addr, unsigned int mask) { void __iomem *aligned_addr = (void __iomem *)((unsigned long)(addr) & ~0x3); unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; unsigned int v; v = readl(aligned_addr); return (v >> shift) & mask; } EXPORT_SYMBOL_GPL(snd_hdac_aligned_read); void snd_hdac_aligned_write(unsigned int val, void __iomem *addr, unsigned int mask) { void __iomem *aligned_addr = (void __iomem *)((unsigned long)(addr) & ~0x3); unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; unsigned int v; v = readl(aligned_addr); v &= ~(mask << shift); v |= val << shift; writel(v, aligned_addr); } EXPORT_SYMBOL_GPL(snd_hdac_aligned_write); #endif /* CONFIG_SND_HDA_ALIGNED_MMIO */ void snd_hdac_codec_link_up(struct hdac_device *codec) { struct hdac_bus *bus = codec->bus; if (bus->ops->link_power) bus->ops->link_power(codec, true); else snd_hdac_bus_link_power(codec, true); } EXPORT_SYMBOL_GPL(snd_hdac_codec_link_up); void snd_hdac_codec_link_down(struct hdac_device *codec) { struct hdac_bus *bus = codec->bus; if (bus->ops->link_power) bus->ops->link_power(codec, false); else snd_hdac_bus_link_power(codec, false); } EXPORT_SYMBOL_GPL(snd_hdac_codec_link_down);
linux-master
sound/hda/hdac_bus.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * hdac_i915.c - routines for sync between HD-A core and i915 display driver */ #include <linux/init.h> #include <linux/module.h> #include <linux/pci.h> #include <sound/core.h> #include <sound/hdaudio.h> #include <sound/hda_i915.h> #include <sound/hda_register.h> /** * snd_hdac_i915_set_bclk - Reprogram BCLK for HSW/BDW * @bus: HDA core bus * * Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value) * are used to convert CDClk (Core Display Clock) to 24MHz BCLK: * BCLK = CDCLK * M / N * The values will be lost when the display power well is disabled and need to * be restored to avoid abnormal playback speed. * * Call this function at initializing and changing power well, as well as * at ELD notifier for the hotplug. */ void snd_hdac_i915_set_bclk(struct hdac_bus *bus) { struct drm_audio_component *acomp = bus->audio_component; struct pci_dev *pci = to_pci_dev(bus->dev); int cdclk_freq; unsigned int bclk_m, bclk_n; if (!acomp || !acomp->ops || !acomp->ops->get_cdclk_freq) return; /* only for i915 binding */ if (!HDA_CONTROLLER_IS_HSW(pci)) return; /* only HSW/BDW */ cdclk_freq = acomp->ops->get_cdclk_freq(acomp->dev); switch (cdclk_freq) { case 337500: bclk_m = 16; bclk_n = 225; break; case 450000: default: /* default CDCLK 450MHz */ bclk_m = 4; bclk_n = 75; break; case 540000: bclk_m = 4; bclk_n = 90; break; case 675000: bclk_m = 8; bclk_n = 225; break; } snd_hdac_chip_writew(bus, HSW_EM4, bclk_m); snd_hdac_chip_writew(bus, HSW_EM5, bclk_n); } EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk); /* returns true if the devices can be connected for audio */ static bool connectivity_check(struct pci_dev *i915, struct pci_dev *hdac) { struct pci_bus *bus_a = i915->bus, *bus_b = hdac->bus; /* directly connected on the same bus */ if (bus_a == bus_b) return true; bus_a = bus_a->parent; bus_b = bus_b->parent; /* connected via parent bus (may be NULL!) */ if (bus_a == bus_b) return true; if (!bus_a || !bus_b) return false; /* * on i915 discrete GPUs with embedded HDA audio, the two * devices are connected via 2nd level PCI bridge */ bus_a = bus_a->parent; bus_b = bus_b->parent; if (bus_a && bus_a == bus_b) return true; return false; } static int i915_component_master_match(struct device *dev, int subcomponent, void *data) { struct pci_dev *hdac_pci, *i915_pci; struct hdac_bus *bus = data; if (!dev_is_pci(dev)) return 0; hdac_pci = to_pci_dev(bus->dev); i915_pci = to_pci_dev(dev); if (!strcmp(dev->driver->name, "i915") && subcomponent == I915_COMPONENT_AUDIO && connectivity_check(i915_pci, hdac_pci)) return 1; return 0; } /* check whether Intel graphics is present and reachable */ static int i915_gfx_present(struct pci_dev *hdac_pci) { struct pci_dev *display_dev = NULL; for_each_pci_dev(display_dev) { if (display_dev->vendor == PCI_VENDOR_ID_INTEL && (display_dev->class >> 16) == PCI_BASE_CLASS_DISPLAY && connectivity_check(display_dev, hdac_pci)) { pci_dev_put(display_dev); return true; } } return false; } /** * snd_hdac_i915_init - Initialize i915 audio component * @bus: HDA core bus * * This function is supposed to be used only by a HD-audio controller * driver that needs the interaction with i915 graphics. * * This function initializes and sets up the audio component to communicate * with i915 graphics driver. * * Returns zero for success or a negative error code. */ int snd_hdac_i915_init(struct hdac_bus *bus) { struct drm_audio_component *acomp; int err; if (!i915_gfx_present(to_pci_dev(bus->dev))) return -ENODEV; err = snd_hdac_acomp_init(bus, NULL, i915_component_master_match, sizeof(struct i915_audio_component) - sizeof(*acomp)); if (err < 0) return err; acomp = bus->audio_component; if (!acomp) return -ENODEV; if (!acomp->ops) { if (!IS_ENABLED(CONFIG_MODULES) || !request_module("i915")) { /* 60s timeout */ wait_for_completion_killable_timeout(&acomp->master_bind_complete, msecs_to_jiffies(60 * 1000)); } } if (!acomp->ops) { dev_info(bus->dev, "couldn't bind with audio component\n"); snd_hdac_acomp_exit(bus); return -ENODEV; } return 0; } EXPORT_SYMBOL_GPL(snd_hdac_i915_init);
linux-master
sound/hda/hdac_i915.c
// SPDX-License-Identifier: GPL-2.0 // hdac_component.c - routines for sync between HD-A core and DRM driver #include <linux/init.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/component.h> #include <sound/core.h> #include <sound/hdaudio.h> #include <sound/hda_component.h> #include <sound/hda_register.h> static void hdac_acomp_release(struct device *dev, void *res) { } static struct drm_audio_component *hdac_get_acomp(struct device *dev) { return devres_find(dev, hdac_acomp_release, NULL, NULL); } /** * snd_hdac_set_codec_wakeup - Enable / disable HDMI/DP codec wakeup * @bus: HDA core bus * @enable: enable or disable the wakeup * * This function is supposed to be used only by a HD-audio controller * driver that needs the interaction with graphics driver. * * This function should be called during the chip reset, also called at * resume for updating STATESTS register read. * * Returns zero for success or a negative error code. */ int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable) { struct drm_audio_component *acomp = bus->audio_component; if (!acomp || !acomp->ops) return -ENODEV; if (!acomp->ops->codec_wake_override) return 0; dev_dbg(bus->dev, "%s codec wakeup\n", enable ? "enable" : "disable"); acomp->ops->codec_wake_override(acomp->dev, enable); return 0; } EXPORT_SYMBOL_GPL(snd_hdac_set_codec_wakeup); /** * snd_hdac_display_power - Power up / down the power refcount * @bus: HDA core bus * @idx: HDA codec address, pass HDA_CODEC_IDX_CONTROLLER for controller * @enable: power up or down * * This function is used by either HD-audio controller or codec driver that * needs the interaction with graphics driver. * * This function updates the power status, and calls the get_power() and * put_power() ops accordingly, toggling the codec wakeup, too. */ void snd_hdac_display_power(struct hdac_bus *bus, unsigned int idx, bool enable) { struct drm_audio_component *acomp = bus->audio_component; dev_dbg(bus->dev, "display power %s\n", enable ? "enable" : "disable"); mutex_lock(&bus->lock); if (enable) set_bit(idx, &bus->display_power_status); else clear_bit(idx, &bus->display_power_status); if (!acomp || !acomp->ops) goto unlock; if (bus->display_power_status) { if (!bus->display_power_active) { unsigned long cookie = -1; if (acomp->ops->get_power) cookie = acomp->ops->get_power(acomp->dev); snd_hdac_set_codec_wakeup(bus, true); snd_hdac_set_codec_wakeup(bus, false); bus->display_power_active = cookie; } } else { if (bus->display_power_active) { unsigned long cookie = bus->display_power_active; if (acomp->ops->put_power) acomp->ops->put_power(acomp->dev, cookie); bus->display_power_active = 0; } } unlock: mutex_unlock(&bus->lock); } EXPORT_SYMBOL_GPL(snd_hdac_display_power); /** * snd_hdac_sync_audio_rate - Set N/CTS based on the sample rate * @codec: HDA codec * @nid: the pin widget NID * @dev_id: device identifier * @rate: the sample rate to set * * This function is supposed to be used only by a HD-audio controller * driver that needs the interaction with graphics driver. * * This function sets N/CTS value based on the given sample rate. * Returns zero for success, or a negative error code. */ int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid, int dev_id, int rate) { struct hdac_bus *bus = codec->bus; struct drm_audio_component *acomp = bus->audio_component; int port, pipe; if (!acomp || !acomp->ops || !acomp->ops->sync_audio_rate) return -ENODEV; port = nid; if (acomp->audio_ops && acomp->audio_ops->pin2port) { port = acomp->audio_ops->pin2port(codec, nid); if (port < 0) return -EINVAL; } pipe = dev_id; return acomp->ops->sync_audio_rate(acomp->dev, port, pipe, rate); } EXPORT_SYMBOL_GPL(snd_hdac_sync_audio_rate); /** * snd_hdac_acomp_get_eld - Get the audio state and ELD via component * @codec: HDA codec * @nid: the pin widget NID * @dev_id: device identifier * @audio_enabled: the pointer to store the current audio state * @buffer: the buffer pointer to store ELD bytes * @max_bytes: the max bytes to be stored on @buffer * * This function is supposed to be used only by a HD-audio controller * driver that needs the interaction with graphics driver. * * This function queries the current state of the audio on the given * digital port and fetches the ELD bytes onto the given buffer. * It returns the number of bytes for the total ELD data, zero for * invalid ELD, or a negative error code. * * The return size is the total bytes required for the whole ELD bytes, * thus it may be over @max_bytes. If it's over @max_bytes, it implies * that only a part of ELD bytes have been fetched. */ int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id, bool *audio_enabled, char *buffer, int max_bytes) { struct hdac_bus *bus = codec->bus; struct drm_audio_component *acomp = bus->audio_component; int port, pipe; if (!acomp || !acomp->ops || !acomp->ops->get_eld) return -ENODEV; port = nid; if (acomp->audio_ops && acomp->audio_ops->pin2port) { port = acomp->audio_ops->pin2port(codec, nid); if (port < 0) return -EINVAL; } pipe = dev_id; return acomp->ops->get_eld(acomp->dev, port, pipe, audio_enabled, buffer, max_bytes); } EXPORT_SYMBOL_GPL(snd_hdac_acomp_get_eld); static int hdac_component_master_bind(struct device *dev) { struct drm_audio_component *acomp = hdac_get_acomp(dev); int ret; if (WARN_ON(!acomp)) return -EINVAL; ret = component_bind_all(dev, acomp); if (ret < 0) return ret; if (WARN_ON(!(acomp->dev && acomp->ops))) { ret = -EINVAL; goto out_unbind; } /* pin the module to avoid dynamic unbinding, but only if given */ if (!try_module_get(acomp->ops->owner)) { ret = -ENODEV; goto out_unbind; } if (acomp->audio_ops && acomp->audio_ops->master_bind) { ret = acomp->audio_ops->master_bind(dev, acomp); if (ret < 0) goto module_put; } complete_all(&acomp->master_bind_complete); return 0; module_put: module_put(acomp->ops->owner); out_unbind: component_unbind_all(dev, acomp); complete_all(&acomp->master_bind_complete); return ret; } static void hdac_component_master_unbind(struct device *dev) { struct drm_audio_component *acomp = hdac_get_acomp(dev); if (acomp->audio_ops && acomp->audio_ops->master_unbind) acomp->audio_ops->master_unbind(dev, acomp); module_put(acomp->ops->owner); component_unbind_all(dev, acomp); WARN_ON(acomp->ops || acomp->dev); } static const struct component_master_ops hdac_component_master_ops = { .bind = hdac_component_master_bind, .unbind = hdac_component_master_unbind, }; /** * snd_hdac_acomp_register_notifier - Register audio component ops * @bus: HDA core bus * @aops: audio component ops * * This function is supposed to be used only by a HD-audio controller * driver that needs the interaction with graphics driver. * * This function sets the given ops to be called by the graphics driver. * * Returns zero for success or a negative error code. */ int snd_hdac_acomp_register_notifier(struct hdac_bus *bus, const struct drm_audio_component_audio_ops *aops) { if (!bus->audio_component) return -ENODEV; bus->audio_component->audio_ops = aops; return 0; } EXPORT_SYMBOL_GPL(snd_hdac_acomp_register_notifier); /** * snd_hdac_acomp_init - Initialize audio component * @bus: HDA core bus * @aops: audio component ops * @match_master: match function for finding components * @extra_size: Extra bytes to allocate * * This function is supposed to be used only by a HD-audio controller * driver that needs the interaction with graphics driver. * * This function initializes and sets up the audio component to communicate * with graphics driver. * * Unlike snd_hdac_i915_init(), this function doesn't synchronize with the * binding with the DRM component. Each caller needs to sync via master_bind * audio_ops. * * Returns zero for success or a negative error code. */ int snd_hdac_acomp_init(struct hdac_bus *bus, const struct drm_audio_component_audio_ops *aops, int (*match_master)(struct device *, int, void *), size_t extra_size) { struct component_match *match = NULL; struct device *dev = bus->dev; struct drm_audio_component *acomp; int ret; if (WARN_ON(hdac_get_acomp(dev))) return -EBUSY; acomp = devres_alloc(hdac_acomp_release, sizeof(*acomp) + extra_size, GFP_KERNEL); if (!acomp) return -ENOMEM; acomp->audio_ops = aops; init_completion(&acomp->master_bind_complete); bus->audio_component = acomp; devres_add(dev, acomp); component_match_add_typed(dev, &match, match_master, bus); ret = component_master_add_with_match(dev, &hdac_component_master_ops, match); if (ret < 0) goto out_err; return 0; out_err: bus->audio_component = NULL; devres_destroy(dev, hdac_acomp_release, NULL, NULL); dev_info(dev, "failed to add audio component master (%d)\n", ret); return ret; } EXPORT_SYMBOL_GPL(snd_hdac_acomp_init); /** * snd_hdac_acomp_exit - Finalize audio component * @bus: HDA core bus * * This function is supposed to be used only by a HD-audio controller * driver that needs the interaction with graphics driver. * * This function releases the audio component that has been used. * * Returns zero for success or a negative error code. */ int snd_hdac_acomp_exit(struct hdac_bus *bus) { struct device *dev = bus->dev; struct drm_audio_component *acomp = bus->audio_component; if (!acomp) return 0; if (WARN_ON(bus->display_power_active) && acomp->ops) acomp->ops->put_power(acomp->dev, bus->display_power_active); bus->display_power_active = 0; bus->display_power_status = 0; component_master_del(dev, &hdac_component_master_ops); bus->audio_component = NULL; devres_destroy(dev, hdac_acomp_release, NULL, NULL); return 0; } EXPORT_SYMBOL_GPL(snd_hdac_acomp_exit);
linux-master
sound/hda/hdac_component.c
// SPDX-License-Identifier: GPL-2.0-only /* * hdac-ext-stream.c - HD-audio extended stream operations. * * Copyright (C) 2015 Intel Corp * Author: Jeeja KP <[email protected]> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/delay.h> #include <linux/slab.h> #include <sound/pcm.h> #include <sound/hda_register.h> #include <sound/hdaudio_ext.h> #include <sound/compress_driver.h> /** * snd_hdac_ext_stream_init - initialize each stream (aka device) * @bus: HD-audio core bus * @hext_stream: HD-audio ext core stream object to initialize * @idx: stream index number * @direction: stream direction (SNDRV_PCM_STREAM_PLAYBACK or SNDRV_PCM_STREAM_CAPTURE) * @tag: the tag id to assign * * initialize the stream, if ppcap is enabled then init those and then * invoke hdac stream initialization routine */ static void snd_hdac_ext_stream_init(struct hdac_bus *bus, struct hdac_ext_stream *hext_stream, int idx, int direction, int tag) { if (bus->ppcap) { hext_stream->pphc_addr = bus->ppcap + AZX_PPHC_BASE + AZX_PPHC_INTERVAL * idx; hext_stream->pplc_addr = bus->ppcap + AZX_PPLC_BASE + AZX_PPLC_MULTI * bus->num_streams + AZX_PPLC_INTERVAL * idx; } hext_stream->decoupled = false; snd_hdac_stream_init(bus, &hext_stream->hstream, idx, direction, tag); } /** * snd_hdac_ext_stream_init_all - create and initialize the stream objects * for an extended hda bus * @bus: HD-audio core bus * @start_idx: start index for streams * @num_stream: number of streams to initialize * @dir: direction of streams */ int snd_hdac_ext_stream_init_all(struct hdac_bus *bus, int start_idx, int num_stream, int dir) { int stream_tag = 0; int i, tag, idx = start_idx; for (i = 0; i < num_stream; i++) { struct hdac_ext_stream *hext_stream = kzalloc(sizeof(*hext_stream), GFP_KERNEL); if (!hext_stream) return -ENOMEM; tag = ++stream_tag; snd_hdac_ext_stream_init(bus, hext_stream, idx, dir, tag); idx++; } return 0; } EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_init_all); /** * snd_hdac_ext_stream_free_all - free hdac extended stream objects * * @bus: HD-audio core bus */ void snd_hdac_ext_stream_free_all(struct hdac_bus *bus) { struct hdac_stream *s, *_s; struct hdac_ext_stream *hext_stream; list_for_each_entry_safe(s, _s, &bus->stream_list, list) { hext_stream = stream_to_hdac_ext_stream(s); snd_hdac_ext_stream_decouple(bus, hext_stream, false); list_del(&s->list); kfree(hext_stream); } } EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_free_all); void snd_hdac_ext_stream_decouple_locked(struct hdac_bus *bus, struct hdac_ext_stream *hext_stream, bool decouple) { struct hdac_stream *hstream = &hext_stream->hstream; u32 val; int mask = AZX_PPCTL_PROCEN(hstream->index); val = readw(bus->ppcap + AZX_REG_PP_PPCTL) & mask; if (decouple && !val) snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, mask, mask); else if (!decouple && val) snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, mask, 0); hext_stream->decoupled = decouple; } EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_decouple_locked); /** * snd_hdac_ext_stream_decouple - decouple the hdac stream * @bus: HD-audio core bus * @hext_stream: HD-audio ext core stream object to initialize * @decouple: flag to decouple */ void snd_hdac_ext_stream_decouple(struct hdac_bus *bus, struct hdac_ext_stream *hext_stream, bool decouple) { spin_lock_irq(&bus->reg_lock); snd_hdac_ext_stream_decouple_locked(bus, hext_stream, decouple); spin_unlock_irq(&bus->reg_lock); } EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_decouple); /** * snd_hdac_ext_stream_start - start a stream * @hext_stream: HD-audio ext core stream to start */ void snd_hdac_ext_stream_start(struct hdac_ext_stream *hext_stream) { snd_hdac_updatel(hext_stream->pplc_addr, AZX_REG_PPLCCTL, AZX_PPLCCTL_RUN, AZX_PPLCCTL_RUN); } EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_start); /** * snd_hdac_ext_stream_clear - stop a stream DMA * @hext_stream: HD-audio ext core stream to stop */ void snd_hdac_ext_stream_clear(struct hdac_ext_stream *hext_stream) { snd_hdac_updatel(hext_stream->pplc_addr, AZX_REG_PPLCCTL, AZX_PPLCCTL_RUN, 0); } EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_clear); /** * snd_hdac_ext_stream_reset - reset a stream * @hext_stream: HD-audio ext core stream to reset */ void snd_hdac_ext_stream_reset(struct hdac_ext_stream *hext_stream) { unsigned char val; int timeout; snd_hdac_ext_stream_clear(hext_stream); snd_hdac_updatel(hext_stream->pplc_addr, AZX_REG_PPLCCTL, AZX_PPLCCTL_STRST, AZX_PPLCCTL_STRST); udelay(3); timeout = 50; do { val = readl(hext_stream->pplc_addr + AZX_REG_PPLCCTL) & AZX_PPLCCTL_STRST; if (val) break; udelay(3); } while (--timeout); val &= ~AZX_PPLCCTL_STRST; writel(val, hext_stream->pplc_addr + AZX_REG_PPLCCTL); udelay(3); timeout = 50; /* waiting for hardware to report that the stream is out of reset */ do { val = readl(hext_stream->pplc_addr + AZX_REG_PPLCCTL) & AZX_PPLCCTL_STRST; if (!val) break; udelay(3); } while (--timeout); } EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_reset); /** * snd_hdac_ext_stream_setup - set up the SD for streaming * @hext_stream: HD-audio ext core stream to set up * @fmt: stream format */ int snd_hdac_ext_stream_setup(struct hdac_ext_stream *hext_stream, int fmt) { struct hdac_stream *hstream = &hext_stream->hstream; unsigned int val; /* make sure the run bit is zero for SD */ snd_hdac_ext_stream_clear(hext_stream); /* program the stream_tag */ val = readl(hext_stream->pplc_addr + AZX_REG_PPLCCTL); val = (val & ~AZX_PPLCCTL_STRM_MASK) | (hstream->stream_tag << AZX_PPLCCTL_STRM_SHIFT); writel(val, hext_stream->pplc_addr + AZX_REG_PPLCCTL); /* program the stream format */ writew(fmt, hext_stream->pplc_addr + AZX_REG_PPLCFMT); return 0; } EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_setup); static struct hdac_ext_stream * hdac_ext_link_dma_stream_assign(struct hdac_bus *bus, struct snd_pcm_substream *substream) { struct hdac_ext_stream *res = NULL; struct hdac_stream *hstream = NULL; if (!bus->ppcap) { dev_err(bus->dev, "stream type not supported\n"); return NULL; } spin_lock_irq(&bus->reg_lock); list_for_each_entry(hstream, &bus->stream_list, list) { struct hdac_ext_stream *hext_stream = container_of(hstream, struct hdac_ext_stream, hstream); if (hstream->direction != substream->stream) continue; /* check if link stream is available */ if (!hext_stream->link_locked) { res = hext_stream; break; } } if (res) { snd_hdac_ext_stream_decouple_locked(bus, res, true); res->link_locked = 1; res->link_substream = substream; } spin_unlock_irq(&bus->reg_lock); return res; } static struct hdac_ext_stream * hdac_ext_host_dma_stream_assign(struct hdac_bus *bus, struct snd_pcm_substream *substream) { struct hdac_ext_stream *res = NULL; struct hdac_stream *hstream = NULL; if (!bus->ppcap) { dev_err(bus->dev, "stream type not supported\n"); return NULL; } spin_lock_irq(&bus->reg_lock); list_for_each_entry(hstream, &bus->stream_list, list) { struct hdac_ext_stream *hext_stream = container_of(hstream, struct hdac_ext_stream, hstream); if (hstream->direction != substream->stream) continue; if (!hstream->opened) { res = hext_stream; break; } } if (res) { snd_hdac_ext_stream_decouple_locked(bus, res, true); res->hstream.opened = 1; res->hstream.running = 0; res->hstream.substream = substream; } spin_unlock_irq(&bus->reg_lock); return res; } /** * snd_hdac_ext_stream_assign - assign a stream for the PCM * @bus: HD-audio core bus * @substream: PCM substream to assign * @type: type of stream (coupled, host or link stream) * * This assigns the stream based on the type (coupled/host/link), for the * given PCM substream, assigns it and returns the stream object * * coupled: Looks for an unused stream * host: Looks for an unused decoupled host stream * link: Looks for an unused decoupled link stream * * If no stream is free, returns NULL. The function tries to keep using * the same stream object when it's used beforehand. when a stream is * decoupled, it becomes a host stream and link stream. */ struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_bus *bus, struct snd_pcm_substream *substream, int type) { struct hdac_ext_stream *hext_stream = NULL; struct hdac_stream *hstream = NULL; switch (type) { case HDAC_EXT_STREAM_TYPE_COUPLED: hstream = snd_hdac_stream_assign(bus, substream); if (hstream) hext_stream = container_of(hstream, struct hdac_ext_stream, hstream); return hext_stream; case HDAC_EXT_STREAM_TYPE_HOST: return hdac_ext_host_dma_stream_assign(bus, substream); case HDAC_EXT_STREAM_TYPE_LINK: return hdac_ext_link_dma_stream_assign(bus, substream); default: return NULL; } } EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_assign); /** * snd_hdac_ext_stream_release - release the assigned stream * @hext_stream: HD-audio ext core stream to release * @type: type of stream (coupled, host or link stream) * * Release the stream that has been assigned by snd_hdac_ext_stream_assign(). */ void snd_hdac_ext_stream_release(struct hdac_ext_stream *hext_stream, int type) { struct hdac_bus *bus = hext_stream->hstream.bus; switch (type) { case HDAC_EXT_STREAM_TYPE_COUPLED: snd_hdac_stream_release(&hext_stream->hstream); break; case HDAC_EXT_STREAM_TYPE_HOST: spin_lock_irq(&bus->reg_lock); /* couple link only if not in use */ if (!hext_stream->link_locked) snd_hdac_ext_stream_decouple_locked(bus, hext_stream, false); snd_hdac_stream_release_locked(&hext_stream->hstream); spin_unlock_irq(&bus->reg_lock); break; case HDAC_EXT_STREAM_TYPE_LINK: spin_lock_irq(&bus->reg_lock); /* couple host only if not in use */ if (!hext_stream->hstream.opened) snd_hdac_ext_stream_decouple_locked(bus, hext_stream, false); hext_stream->link_locked = 0; hext_stream->link_substream = NULL; spin_unlock_irq(&bus->reg_lock); break; default: dev_dbg(bus->dev, "Invalid type %d\n", type); } } EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_release); /** * snd_hdac_ext_cstream_assign - assign a host stream for compress * @bus: HD-audio core bus * @cstream: Compress stream to assign * * Assign an unused host stream for the given compress stream. * If no stream is free, NULL is returned. Stream is decoupled * before assignment. */ struct hdac_ext_stream *snd_hdac_ext_cstream_assign(struct hdac_bus *bus, struct snd_compr_stream *cstream) { struct hdac_ext_stream *res = NULL; struct hdac_stream *hstream; spin_lock_irq(&bus->reg_lock); list_for_each_entry(hstream, &bus->stream_list, list) { struct hdac_ext_stream *hext_stream = stream_to_hdac_ext_stream(hstream); if (hstream->direction != cstream->direction) continue; if (!hstream->opened) { res = hext_stream; break; } } if (res) { snd_hdac_ext_stream_decouple_locked(bus, res, true); res->hstream.opened = 1; res->hstream.running = 0; res->hstream.cstream = cstream; } spin_unlock_irq(&bus->reg_lock); return res; } EXPORT_SYMBOL_GPL(snd_hdac_ext_cstream_assign);
linux-master
sound/hda/ext/hdac_ext_stream.c
// SPDX-License-Identifier: GPL-2.0-only /* * hdac-ext-bus.c - HD-audio extended core bus functions. * * Copyright (C) 2014-2015 Intel Corp * Author: Jeeja KP <[email protected]> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/module.h> #include <linux/slab.h> #include <linux/io.h> #include <sound/hdaudio_ext.h> MODULE_DESCRIPTION("HDA extended core"); MODULE_LICENSE("GPL v2"); /** * snd_hdac_ext_bus_init - initialize a HD-audio extended bus * @bus: the pointer to HDAC bus object * @dev: device pointer * @ops: bus verb operators * @ext_ops: operators used for ASoC HDA codec drivers * * Returns 0 if successful, or a negative error code. */ int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev, const struct hdac_bus_ops *ops, const struct hdac_ext_bus_ops *ext_ops) { int ret; ret = snd_hdac_bus_init(bus, dev, ops); if (ret < 0) return ret; bus->ext_ops = ext_ops; /* FIXME: * Currently only one bus is supported, if there is device with more * buses, bus->idx should be greater than 0, but there needs to be a * reliable way to always assign same number. */ bus->idx = 0; bus->cmd_dma_state = true; return 0; } EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_init); /** * snd_hdac_ext_bus_exit - clean up a HD-audio extended bus * @bus: the pointer to HDAC bus object */ void snd_hdac_ext_bus_exit(struct hdac_bus *bus) { snd_hdac_bus_exit(bus); WARN_ON(!list_empty(&bus->hlink_list)); } EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_exit); /** * snd_hdac_ext_bus_device_remove - remove HD-audio extended codec base devices * * @bus: the pointer to HDAC bus object */ void snd_hdac_ext_bus_device_remove(struct hdac_bus *bus) { struct hdac_device *codec, *__codec; /* * we need to remove all the codec devices objects created in the * snd_hdac_ext_bus_device_init */ list_for_each_entry_safe(codec, __codec, &bus->codec_list, list) { snd_hdac_device_unregister(codec); put_device(&codec->dev); } } EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_device_remove); #define dev_to_hdac(dev) (container_of((dev), \ struct hdac_device, dev)) static inline struct hdac_driver *get_hdrv(struct device *dev) { struct hdac_driver *hdrv = drv_to_hdac_driver(dev->driver); return hdrv; } static inline struct hdac_device *get_hdev(struct device *dev) { struct hdac_device *hdev = dev_to_hdac_dev(dev); return hdev; } static int hda_ext_drv_probe(struct device *dev) { return (get_hdrv(dev))->probe(get_hdev(dev)); } static int hdac_ext_drv_remove(struct device *dev) { return (get_hdrv(dev))->remove(get_hdev(dev)); } static void hdac_ext_drv_shutdown(struct device *dev) { return (get_hdrv(dev))->shutdown(get_hdev(dev)); } /** * snd_hda_ext_driver_register - register a driver for ext hda devices * * @drv: ext hda driver structure */ int snd_hda_ext_driver_register(struct hdac_driver *drv) { drv->type = HDA_DEV_ASOC; drv->driver.bus = &snd_hda_bus_type; /* we use default match */ if (drv->probe) drv->driver.probe = hda_ext_drv_probe; if (drv->remove) drv->driver.remove = hdac_ext_drv_remove; if (drv->shutdown) drv->driver.shutdown = hdac_ext_drv_shutdown; return driver_register(&drv->driver); } EXPORT_SYMBOL_GPL(snd_hda_ext_driver_register); /** * snd_hda_ext_driver_unregister - unregister a driver for ext hda devices * * @drv: ext hda driver structure */ void snd_hda_ext_driver_unregister(struct hdac_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(snd_hda_ext_driver_unregister);
linux-master
sound/hda/ext/hdac_ext_bus.c
// SPDX-License-Identifier: GPL-2.0-only /* * hdac-ext-controller.c - HD-audio extended controller functions. * * Copyright (C) 2014-2015 Intel Corp * Author: Jeeja KP <[email protected]> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/delay.h> #include <linux/slab.h> #include <sound/hda_register.h> #include <sound/hdaudio_ext.h> /* * processing pipe helpers - these helpers are useful for dealing with HDA * new capability of processing pipelines */ /** * snd_hdac_ext_bus_ppcap_enable - enable/disable processing pipe capability * @bus: the pointer to HDAC bus object * @enable: flag to turn on/off the capability */ void snd_hdac_ext_bus_ppcap_enable(struct hdac_bus *bus, bool enable) { if (!bus->ppcap) { dev_err(bus->dev, "Address of PP capability is NULL"); return; } if (enable) snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, AZX_PPCTL_GPROCEN, AZX_PPCTL_GPROCEN); else snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, AZX_PPCTL_GPROCEN, 0); } EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_ppcap_enable); /** * snd_hdac_ext_bus_ppcap_int_enable - ppcap interrupt enable/disable * @bus: the pointer to HDAC bus object * @enable: flag to enable/disable interrupt */ void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_bus *bus, bool enable) { if (!bus->ppcap) { dev_err(bus->dev, "Address of PP capability is NULL\n"); return; } if (enable) snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, AZX_PPCTL_PIE, AZX_PPCTL_PIE); else snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, AZX_PPCTL_PIE, 0); } EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_ppcap_int_enable); /* * Multilink helpers - these helpers are useful for dealing with HDA * new multilink capability */ /** * snd_hdac_ext_bus_get_ml_capabilities - get multilink capability * @bus: the pointer to HDAC bus object * * This will parse all links and read the mlink capabilities and add them * in hlink_list of extended hdac bus * Note: this will be freed on bus exit by driver */ int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_bus *bus) { int idx; u32 link_count; struct hdac_ext_link *hlink; link_count = readl(bus->mlcap + AZX_REG_ML_MLCD) + 1; dev_dbg(bus->dev, "In %s Link count: %d\n", __func__, link_count); for (idx = 0; idx < link_count; idx++) { hlink = kzalloc(sizeof(*hlink), GFP_KERNEL); if (!hlink) return -ENOMEM; hlink->index = idx; hlink->bus = bus; hlink->ml_addr = bus->mlcap + AZX_ML_BASE + (AZX_ML_INTERVAL * idx); hlink->lcaps = readl(hlink->ml_addr + AZX_REG_ML_LCAP); hlink->lsdiid = readw(hlink->ml_addr + AZX_REG_ML_LSDIID); /* since link in On, update the ref */ hlink->ref_count = 1; list_add_tail(&hlink->list, &bus->hlink_list); } return 0; } EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_get_ml_capabilities); /** * snd_hdac_ext_link_free_all- free hdac extended link objects * * @bus: the pointer to HDAC bus object */ void snd_hdac_ext_link_free_all(struct hdac_bus *bus) { struct hdac_ext_link *hlink; while (!list_empty(&bus->hlink_list)) { hlink = list_first_entry(&bus->hlink_list, struct hdac_ext_link, list); list_del(&hlink->list); kfree(hlink); } } EXPORT_SYMBOL_GPL(snd_hdac_ext_link_free_all); /** * snd_hdac_ext_bus_get_hlink_by_addr - get hlink at specified address * @bus: hlink's parent bus device * @addr: codec device address * * Returns hlink object or NULL if matching hlink is not found. */ struct hdac_ext_link *snd_hdac_ext_bus_get_hlink_by_addr(struct hdac_bus *bus, int addr) { struct hdac_ext_link *hlink; list_for_each_entry(hlink, &bus->hlink_list, list) if (hlink->lsdiid & (0x1 << addr)) return hlink; return NULL; } EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_get_hlink_by_addr); /** * snd_hdac_ext_bus_get_hlink_by_name - get hlink based on codec name * @bus: the pointer to HDAC bus object * @codec_name: codec name */ struct hdac_ext_link *snd_hdac_ext_bus_get_hlink_by_name(struct hdac_bus *bus, const char *codec_name) { int bus_idx, addr; if (sscanf(codec_name, "ehdaudio%dD%d", &bus_idx, &addr) != 2) return NULL; if (bus->idx != bus_idx) return NULL; if (addr < 0 || addr > 31) return NULL; return snd_hdac_ext_bus_get_hlink_by_addr(bus, addr); } EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_get_hlink_by_name); static int check_hdac_link_power_active(struct hdac_ext_link *hlink, bool enable) { int timeout; u32 val; int mask = (1 << AZX_ML_LCTL_CPA_SHIFT); udelay(3); timeout = 150; do { val = readl(hlink->ml_addr + AZX_REG_ML_LCTL); if (enable) { if (((val & mask) >> AZX_ML_LCTL_CPA_SHIFT)) return 0; } else { if (!((val & mask) >> AZX_ML_LCTL_CPA_SHIFT)) return 0; } udelay(3); } while (--timeout); return -EIO; } /** * snd_hdac_ext_bus_link_power_up -power up hda link * @hlink: HD-audio extended link */ int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *hlink) { snd_hdac_updatel(hlink->ml_addr, AZX_REG_ML_LCTL, AZX_ML_LCTL_SPA, AZX_ML_LCTL_SPA); return check_hdac_link_power_active(hlink, true); } EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power_up); /** * snd_hdac_ext_bus_link_power_down -power down hda link * @hlink: HD-audio extended link */ int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *hlink) { snd_hdac_updatel(hlink->ml_addr, AZX_REG_ML_LCTL, AZX_ML_LCTL_SPA, 0); return check_hdac_link_power_active(hlink, false); } EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power_down); /** * snd_hdac_ext_bus_link_power_up_all -power up all hda link * @bus: the pointer to HDAC bus object */ int snd_hdac_ext_bus_link_power_up_all(struct hdac_bus *bus) { struct hdac_ext_link *hlink = NULL; int ret; list_for_each_entry(hlink, &bus->hlink_list, list) { ret = snd_hdac_ext_bus_link_power_up(hlink); if (ret < 0) return ret; } return 0; } EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power_up_all); /** * snd_hdac_ext_bus_link_power_down_all -power down all hda link * @bus: the pointer to HDAC bus object */ int snd_hdac_ext_bus_link_power_down_all(struct hdac_bus *bus) { struct hdac_ext_link *hlink = NULL; int ret; list_for_each_entry(hlink, &bus->hlink_list, list) { ret = snd_hdac_ext_bus_link_power_down(hlink); if (ret < 0) return ret; } return 0; } EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power_down_all); /** * snd_hdac_ext_bus_link_set_stream_id - maps stream id to link output * @link: HD-audio ext link to set up * @stream: stream id */ void snd_hdac_ext_bus_link_set_stream_id(struct hdac_ext_link *link, int stream) { snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV, (1 << stream), 1 << stream); } EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_set_stream_id); /** * snd_hdac_ext_bus_link_clear_stream_id - maps stream id to link output * @link: HD-audio ext link to set up * @stream: stream id */ void snd_hdac_ext_bus_link_clear_stream_id(struct hdac_ext_link *link, int stream) { snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV, (1 << stream), 0); } EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_clear_stream_id); int snd_hdac_ext_bus_link_get(struct hdac_bus *bus, struct hdac_ext_link *hlink) { unsigned long codec_mask; int ret = 0; mutex_lock(&bus->lock); /* * if we move from 0 to 1, count will be 1 so power up this link * as well, also check the dma status and trigger that */ if (++hlink->ref_count == 1) { if (!bus->cmd_dma_state) { snd_hdac_bus_init_cmd_io(bus); bus->cmd_dma_state = true; } ret = snd_hdac_ext_bus_link_power_up(hlink); /* * clear the register to invalidate all the output streams */ snd_hdac_updatew(hlink->ml_addr, AZX_REG_ML_LOSIDV, AZX_ML_LOSIDV_STREAM_MASK, 0); /* * wait for 521usec for codec to report status * HDA spec section 4.3 - Codec Discovery */ udelay(521); codec_mask = snd_hdac_chip_readw(bus, STATESTS); dev_dbg(bus->dev, "codec_mask = 0x%lx\n", codec_mask); snd_hdac_chip_writew(bus, STATESTS, codec_mask); if (!bus->codec_mask) bus->codec_mask = codec_mask; } mutex_unlock(&bus->lock); return ret; } EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_get); int snd_hdac_ext_bus_link_put(struct hdac_bus *bus, struct hdac_ext_link *hlink) { int ret = 0; struct hdac_ext_link *hlink_tmp; bool link_up = false; mutex_lock(&bus->lock); /* * if we move from 1 to 0, count will be 0 * so power down this link as well */ if (--hlink->ref_count == 0) { ret = snd_hdac_ext_bus_link_power_down(hlink); /* * now check if all links are off, if so turn off * cmd dma as well */ list_for_each_entry(hlink_tmp, &bus->hlink_list, list) { if (hlink_tmp->ref_count) { link_up = true; break; } } if (!link_up) { snd_hdac_bus_stop_cmd_io(bus); bus->cmd_dma_state = false; } } mutex_unlock(&bus->lock); return ret; } EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_put); static void hdac_ext_codec_link_up(struct hdac_device *codec) { const char *devname = dev_name(&codec->dev); struct hdac_ext_link *hlink = snd_hdac_ext_bus_get_hlink_by_name(codec->bus, devname); if (hlink) snd_hdac_ext_bus_link_get(codec->bus, hlink); } static void hdac_ext_codec_link_down(struct hdac_device *codec) { const char *devname = dev_name(&codec->dev); struct hdac_ext_link *hlink = snd_hdac_ext_bus_get_hlink_by_name(codec->bus, devname); if (hlink) snd_hdac_ext_bus_link_put(codec->bus, hlink); } void snd_hdac_ext_bus_link_power(struct hdac_device *codec, bool enable) { struct hdac_bus *bus = codec->bus; bool oldstate = test_bit(codec->addr, &bus->codec_powered); if (enable == oldstate) return; snd_hdac_bus_link_power(codec, enable); if (enable) hdac_ext_codec_link_up(codec); else hdac_ext_codec_link_down(codec); } EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power);
linux-master
sound/hda/ext/hdac_ext_controller.c
// SPDX-License-Identifier: GPL-2.0 // // soc-component.c // // Copyright 2009-2011 Wolfson Microelectronics PLC. // Copyright (C) 2019 Renesas Electronics Corp. // // Mark Brown <[email protected]> // Kuninori Morimoto <[email protected]> // #include <linux/module.h> #include <linux/pm_runtime.h> #include <sound/soc.h> #include <linux/bitops.h> #define soc_component_ret(dai, ret) _soc_component_ret(dai, __func__, ret, -1) #define soc_component_ret_reg_rw(dai, ret, reg) _soc_component_ret(dai, __func__, ret, reg) static inline int _soc_component_ret(struct snd_soc_component *component, const char *func, int ret, int reg) { /* Positive/Zero values are not errors */ if (ret >= 0) return ret; /* Negative values might be errors */ switch (ret) { case -EPROBE_DEFER: case -ENOTSUPP: break; default: if (reg == -1) dev_err(component->dev, "ASoC: error at %s on %s: %d\n", func, component->name, ret); else dev_err(component->dev, "ASoC: error at %s on %s for register: [0x%08x] %d\n", func, component->name, reg, ret); } return ret; } static inline int soc_component_field_shift(struct snd_soc_component *component, unsigned int mask) { if (!mask) { dev_err(component->dev, "ASoC: error field mask is zero for %s\n", component->name); return 0; } return (ffs(mask) - 1); } /* * We might want to check substream by using list. * In such case, we can update these macros. */ #define soc_component_mark_push(component, substream, tgt) ((component)->mark_##tgt = substream) #define soc_component_mark_pop(component, substream, tgt) ((component)->mark_##tgt = NULL) #define soc_component_mark_match(component, substream, tgt) ((component)->mark_##tgt == substream) void snd_soc_component_set_aux(struct snd_soc_component *component, struct snd_soc_aux_dev *aux) { component->init = (aux) ? aux->init : NULL; } int snd_soc_component_init(struct snd_soc_component *component) { int ret = 0; if (component->init) ret = component->init(component); return soc_component_ret(component, ret); } /** * snd_soc_component_set_sysclk - configure COMPONENT system or master clock. * @component: COMPONENT * @clk_id: DAI specific clock ID * @source: Source for the clock * @freq: new clock frequency in Hz * @dir: new clock direction - input/output. * * Configures the CODEC master (MCLK) or system (SYSCLK) clocking. */ int snd_soc_component_set_sysclk(struct snd_soc_component *component, int clk_id, int source, unsigned int freq, int dir) { int ret = -ENOTSUPP; if (component->driver->set_sysclk) ret = component->driver->set_sysclk(component, clk_id, source, freq, dir); return soc_component_ret(component, ret); } EXPORT_SYMBOL_GPL(snd_soc_component_set_sysclk); /* * snd_soc_component_set_pll - configure component PLL. * @component: COMPONENT * @pll_id: DAI specific PLL ID * @source: DAI specific source for the PLL * @freq_in: PLL input clock frequency in Hz * @freq_out: requested PLL output clock frequency in Hz * * Configures and enables PLL to generate output clock based on input clock. */ int snd_soc_component_set_pll(struct snd_soc_component *component, int pll_id, int source, unsigned int freq_in, unsigned int freq_out) { int ret = -EINVAL; if (component->driver->set_pll) ret = component->driver->set_pll(component, pll_id, source, freq_in, freq_out); return soc_component_ret(component, ret); } EXPORT_SYMBOL_GPL(snd_soc_component_set_pll); void snd_soc_component_seq_notifier(struct snd_soc_component *component, enum snd_soc_dapm_type type, int subseq) { if (component->driver->seq_notifier) component->driver->seq_notifier(component, type, subseq); } int snd_soc_component_stream_event(struct snd_soc_component *component, int event) { int ret = 0; if (component->driver->stream_event) ret = component->driver->stream_event(component, event); return soc_component_ret(component, ret); } int snd_soc_component_set_bias_level(struct snd_soc_component *component, enum snd_soc_bias_level level) { int ret = 0; if (component->driver->set_bias_level) ret = component->driver->set_bias_level(component, level); return soc_component_ret(component, ret); } int snd_soc_component_enable_pin(struct snd_soc_component *component, const char *pin) { struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component); return snd_soc_dapm_enable_pin(dapm, pin); } EXPORT_SYMBOL_GPL(snd_soc_component_enable_pin); int snd_soc_component_enable_pin_unlocked(struct snd_soc_component *component, const char *pin) { struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component); return snd_soc_dapm_enable_pin_unlocked(dapm, pin); } EXPORT_SYMBOL_GPL(snd_soc_component_enable_pin_unlocked); int snd_soc_component_disable_pin(struct snd_soc_component *component, const char *pin) { struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component); return snd_soc_dapm_disable_pin(dapm, pin); } EXPORT_SYMBOL_GPL(snd_soc_component_disable_pin); int snd_soc_component_disable_pin_unlocked(struct snd_soc_component *component, const char *pin) { struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component); return snd_soc_dapm_disable_pin_unlocked(dapm, pin); } EXPORT_SYMBOL_GPL(snd_soc_component_disable_pin_unlocked); int snd_soc_component_nc_pin(struct snd_soc_component *component, const char *pin) { struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component); return snd_soc_dapm_nc_pin(dapm, pin); } EXPORT_SYMBOL_GPL(snd_soc_component_nc_pin); int snd_soc_component_nc_pin_unlocked(struct snd_soc_component *component, const char *pin) { struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component); return snd_soc_dapm_nc_pin_unlocked(dapm, pin); } EXPORT_SYMBOL_GPL(snd_soc_component_nc_pin_unlocked); int snd_soc_component_get_pin_status(struct snd_soc_component *component, const char *pin) { struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component); return snd_soc_dapm_get_pin_status(dapm, pin); } EXPORT_SYMBOL_GPL(snd_soc_component_get_pin_status); int snd_soc_component_force_enable_pin(struct snd_soc_component *component, const char *pin) { struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component); return snd_soc_dapm_force_enable_pin(dapm, pin); } EXPORT_SYMBOL_GPL(snd_soc_component_force_enable_pin); int snd_soc_component_force_enable_pin_unlocked( struct snd_soc_component *component, const char *pin) { struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component); return snd_soc_dapm_force_enable_pin_unlocked(dapm, pin); } EXPORT_SYMBOL_GPL(snd_soc_component_force_enable_pin_unlocked); int snd_soc_component_notify_control(struct snd_soc_component *component, const char * const ctl) { char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; struct snd_kcontrol *kctl; if (component->name_prefix) snprintf(name, ARRAY_SIZE(name), "%s %s", component->name_prefix, ctl); else snprintf(name, ARRAY_SIZE(name), "%s", ctl); kctl = snd_soc_card_get_kcontrol(component->card, name); if (!kctl) return soc_component_ret(component, -EINVAL); snd_ctl_notify(component->card->snd_card, SNDRV_CTL_EVENT_MASK_VALUE, &kctl->id); return 0; } EXPORT_SYMBOL_GPL(snd_soc_component_notify_control); /** * snd_soc_component_set_jack - configure component jack. * @component: COMPONENTs * @jack: structure to use for the jack * @data: can be used if codec driver need extra data for configuring jack * * Configures and enables jack detection function. */ int snd_soc_component_set_jack(struct snd_soc_component *component, struct snd_soc_jack *jack, void *data) { int ret = -ENOTSUPP; if (component->driver->set_jack) ret = component->driver->set_jack(component, jack, data); return soc_component_ret(component, ret); } EXPORT_SYMBOL_GPL(snd_soc_component_set_jack); /** * snd_soc_component_get_jack_type * @component: COMPONENTs * * Returns the jack type of the component * This can either be the supported type or one read from * devicetree with the property: jack-type. */ int snd_soc_component_get_jack_type( struct snd_soc_component *component) { int ret = -ENOTSUPP; if (component->driver->get_jack_type) ret = component->driver->get_jack_type(component); return soc_component_ret(component, ret); } EXPORT_SYMBOL_GPL(snd_soc_component_get_jack_type); int snd_soc_component_module_get(struct snd_soc_component *component, void *mark, int upon_open) { int ret = 0; if (component->driver->module_get_upon_open == !!upon_open && !try_module_get(component->dev->driver->owner)) ret = -ENODEV; /* mark module if succeeded */ if (ret == 0) soc_component_mark_push(component, mark, module); return soc_component_ret(component, ret); } void snd_soc_component_module_put(struct snd_soc_component *component, void *mark, int upon_open, int rollback) { if (rollback && !soc_component_mark_match(component, mark, module)) return; if (component->driver->module_get_upon_open == !!upon_open) module_put(component->dev->driver->owner); /* remove the mark from module */ soc_component_mark_pop(component, mark, module); } int snd_soc_component_open(struct snd_soc_component *component, struct snd_pcm_substream *substream) { int ret = 0; if (component->driver->open) ret = component->driver->open(component, substream); /* mark substream if succeeded */ if (ret == 0) soc_component_mark_push(component, substream, open); return soc_component_ret(component, ret); } int snd_soc_component_close(struct snd_soc_component *component, struct snd_pcm_substream *substream, int rollback) { int ret = 0; if (rollback && !soc_component_mark_match(component, substream, open)) return 0; if (component->driver->close) ret = component->driver->close(component, substream); /* remove marked substream */ soc_component_mark_pop(component, substream, open); return soc_component_ret(component, ret); } void snd_soc_component_suspend(struct snd_soc_component *component) { if (component->driver->suspend) component->driver->suspend(component); component->suspended = 1; } void snd_soc_component_resume(struct snd_soc_component *component) { if (component->driver->resume) component->driver->resume(component); component->suspended = 0; } int snd_soc_component_is_suspended(struct snd_soc_component *component) { return component->suspended; } int snd_soc_component_probe(struct snd_soc_component *component) { int ret = 0; if (component->driver->probe) ret = component->driver->probe(component); return soc_component_ret(component, ret); } void snd_soc_component_remove(struct snd_soc_component *component) { if (component->driver->remove) component->driver->remove(component); } int snd_soc_component_of_xlate_dai_id(struct snd_soc_component *component, struct device_node *ep) { int ret = -ENOTSUPP; if (component->driver->of_xlate_dai_id) ret = component->driver->of_xlate_dai_id(component, ep); return soc_component_ret(component, ret); } int snd_soc_component_of_xlate_dai_name(struct snd_soc_component *component, const struct of_phandle_args *args, const char **dai_name) { if (component->driver->of_xlate_dai_name) return component->driver->of_xlate_dai_name(component, args, dai_name); /* * Don't use soc_component_ret here because we may not want to report * the error just yet. If a device has more than one component, the * first may not match and we don't want spam the log with this. */ return -ENOTSUPP; } void snd_soc_component_setup_regmap(struct snd_soc_component *component) { int val_bytes = regmap_get_val_bytes(component->regmap); /* Errors are legitimate for non-integer byte multiples */ if (val_bytes > 0) component->val_bytes = val_bytes; } #ifdef CONFIG_REGMAP /** * snd_soc_component_init_regmap() - Initialize regmap instance for the * component * @component: The component for which to initialize the regmap instance * @regmap: The regmap instance that should be used by the component * * This function allows deferred assignment of the regmap instance that is * associated with the component. Only use this if the regmap instance is not * yet ready when the component is registered. The function must also be called * before the first IO attempt of the component. */ void snd_soc_component_init_regmap(struct snd_soc_component *component, struct regmap *regmap) { component->regmap = regmap; snd_soc_component_setup_regmap(component); } EXPORT_SYMBOL_GPL(snd_soc_component_init_regmap); /** * snd_soc_component_exit_regmap() - De-initialize regmap instance for the * component * @component: The component for which to de-initialize the regmap instance * * Calls regmap_exit() on the regmap instance associated to the component and * removes the regmap instance from the component. * * This function should only be used if snd_soc_component_init_regmap() was used * to initialize the regmap instance. */ void snd_soc_component_exit_regmap(struct snd_soc_component *component) { regmap_exit(component->regmap); component->regmap = NULL; } EXPORT_SYMBOL_GPL(snd_soc_component_exit_regmap); #endif int snd_soc_component_compr_open(struct snd_soc_component *component, struct snd_compr_stream *cstream) { int ret = 0; if (component->driver->compress_ops && component->driver->compress_ops->open) ret = component->driver->compress_ops->open(component, cstream); /* mark substream if succeeded */ if (ret == 0) soc_component_mark_push(component, cstream, compr_open); return soc_component_ret(component, ret); } EXPORT_SYMBOL_GPL(snd_soc_component_compr_open); void snd_soc_component_compr_free(struct snd_soc_component *component, struct snd_compr_stream *cstream, int rollback) { if (rollback && !soc_component_mark_match(component, cstream, compr_open)) return; if (component->driver->compress_ops && component->driver->compress_ops->free) component->driver->compress_ops->free(component, cstream); /* remove marked substream */ soc_component_mark_pop(component, cstream, compr_open); } EXPORT_SYMBOL_GPL(snd_soc_component_compr_free); int snd_soc_component_compr_trigger(struct snd_compr_stream *cstream, int cmd) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_component *component; int i, ret; for_each_rtd_components(rtd, i, component) { if (component->driver->compress_ops && component->driver->compress_ops->trigger) { ret = component->driver->compress_ops->trigger( component, cstream, cmd); if (ret < 0) return soc_component_ret(component, ret); } } return 0; } EXPORT_SYMBOL_GPL(snd_soc_component_compr_trigger); int snd_soc_component_compr_set_params(struct snd_compr_stream *cstream, struct snd_compr_params *params) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_component *component; int i, ret; for_each_rtd_components(rtd, i, component) { if (component->driver->compress_ops && component->driver->compress_ops->set_params) { ret = component->driver->compress_ops->set_params( component, cstream, params); if (ret < 0) return soc_component_ret(component, ret); } } return 0; } EXPORT_SYMBOL_GPL(snd_soc_component_compr_set_params); int snd_soc_component_compr_get_params(struct snd_compr_stream *cstream, struct snd_codec *params) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_component *component; int i, ret; for_each_rtd_components(rtd, i, component) { if (component->driver->compress_ops && component->driver->compress_ops->get_params) { ret = component->driver->compress_ops->get_params( component, cstream, params); return soc_component_ret(component, ret); } } return 0; } EXPORT_SYMBOL_GPL(snd_soc_component_compr_get_params); int snd_soc_component_compr_get_caps(struct snd_compr_stream *cstream, struct snd_compr_caps *caps) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_component *component; int i, ret = 0; snd_soc_dpcm_mutex_lock(rtd); for_each_rtd_components(rtd, i, component) { if (component->driver->compress_ops && component->driver->compress_ops->get_caps) { ret = component->driver->compress_ops->get_caps( component, cstream, caps); break; } } snd_soc_dpcm_mutex_unlock(rtd); return soc_component_ret(component, ret); } EXPORT_SYMBOL_GPL(snd_soc_component_compr_get_caps); int snd_soc_component_compr_get_codec_caps(struct snd_compr_stream *cstream, struct snd_compr_codec_caps *codec) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_component *component; int i, ret = 0; snd_soc_dpcm_mutex_lock(rtd); for_each_rtd_components(rtd, i, component) { if (component->driver->compress_ops && component->driver->compress_ops->get_codec_caps) { ret = component->driver->compress_ops->get_codec_caps( component, cstream, codec); break; } } snd_soc_dpcm_mutex_unlock(rtd); return soc_component_ret(component, ret); } EXPORT_SYMBOL_GPL(snd_soc_component_compr_get_codec_caps); int snd_soc_component_compr_ack(struct snd_compr_stream *cstream, size_t bytes) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_component *component; int i, ret; for_each_rtd_components(rtd, i, component) { if (component->driver->compress_ops && component->driver->compress_ops->ack) { ret = component->driver->compress_ops->ack( component, cstream, bytes); if (ret < 0) return soc_component_ret(component, ret); } } return 0; } EXPORT_SYMBOL_GPL(snd_soc_component_compr_ack); int snd_soc_component_compr_pointer(struct snd_compr_stream *cstream, struct snd_compr_tstamp *tstamp) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_component *component; int i, ret; for_each_rtd_components(rtd, i, component) { if (component->driver->compress_ops && component->driver->compress_ops->pointer) { ret = component->driver->compress_ops->pointer( component, cstream, tstamp); return soc_component_ret(component, ret); } } return 0; } EXPORT_SYMBOL_GPL(snd_soc_component_compr_pointer); int snd_soc_component_compr_copy(struct snd_compr_stream *cstream, char __user *buf, size_t count) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_component *component; int i, ret = 0; snd_soc_dpcm_mutex_lock(rtd); for_each_rtd_components(rtd, i, component) { if (component->driver->compress_ops && component->driver->compress_ops->copy) { ret = component->driver->compress_ops->copy( component, cstream, buf, count); break; } } snd_soc_dpcm_mutex_unlock(rtd); return soc_component_ret(component, ret); } EXPORT_SYMBOL_GPL(snd_soc_component_compr_copy); int snd_soc_component_compr_set_metadata(struct snd_compr_stream *cstream, struct snd_compr_metadata *metadata) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_component *component; int i, ret; for_each_rtd_components(rtd, i, component) { if (component->driver->compress_ops && component->driver->compress_ops->set_metadata) { ret = component->driver->compress_ops->set_metadata( component, cstream, metadata); if (ret < 0) return soc_component_ret(component, ret); } } return 0; } EXPORT_SYMBOL_GPL(snd_soc_component_compr_set_metadata); int snd_soc_component_compr_get_metadata(struct snd_compr_stream *cstream, struct snd_compr_metadata *metadata) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_component *component; int i, ret; for_each_rtd_components(rtd, i, component) { if (component->driver->compress_ops && component->driver->compress_ops->get_metadata) { ret = component->driver->compress_ops->get_metadata( component, cstream, metadata); return soc_component_ret(component, ret); } } return 0; } EXPORT_SYMBOL_GPL(snd_soc_component_compr_get_metadata); static unsigned int soc_component_read_no_lock( struct snd_soc_component *component, unsigned int reg) { int ret; unsigned int val = 0; if (component->regmap) ret = regmap_read(component->regmap, reg, &val); else if (component->driver->read) { ret = 0; val = component->driver->read(component, reg); } else ret = -EIO; if (ret < 0) return soc_component_ret_reg_rw(component, ret, reg); return val; } /** * snd_soc_component_read() - Read register value * @component: Component to read from * @reg: Register to read * * Return: read value */ unsigned int snd_soc_component_read(struct snd_soc_component *component, unsigned int reg) { unsigned int val; mutex_lock(&component->io_mutex); val = soc_component_read_no_lock(component, reg); mutex_unlock(&component->io_mutex); return val; } EXPORT_SYMBOL_GPL(snd_soc_component_read); static int soc_component_write_no_lock( struct snd_soc_component *component, unsigned int reg, unsigned int val) { int ret = -EIO; if (component->regmap) ret = regmap_write(component->regmap, reg, val); else if (component->driver->write) ret = component->driver->write(component, reg, val); return soc_component_ret_reg_rw(component, ret, reg); } /** * snd_soc_component_write() - Write register value * @component: Component to write to * @reg: Register to write * @val: Value to write to the register * * Return: 0 on success, a negative error code otherwise. */ int snd_soc_component_write(struct snd_soc_component *component, unsigned int reg, unsigned int val) { int ret; mutex_lock(&component->io_mutex); ret = soc_component_write_no_lock(component, reg, val); mutex_unlock(&component->io_mutex); return ret; } EXPORT_SYMBOL_GPL(snd_soc_component_write); static int snd_soc_component_update_bits_legacy( struct snd_soc_component *component, unsigned int reg, unsigned int mask, unsigned int val, bool *change) { unsigned int old, new; int ret = 0; mutex_lock(&component->io_mutex); old = soc_component_read_no_lock(component, reg); new = (old & ~mask) | (val & mask); *change = old != new; if (*change) ret = soc_component_write_no_lock(component, reg, new); mutex_unlock(&component->io_mutex); return soc_component_ret_reg_rw(component, ret, reg); } /** * snd_soc_component_update_bits() - Perform read/modify/write cycle * @component: Component to update * @reg: Register to update * @mask: Mask that specifies which bits to update * @val: New value for the bits specified by mask * * Return: 1 if the operation was successful and the value of the register * changed, 0 if the operation was successful, but the value did not change. * Returns a negative error code otherwise. */ int snd_soc_component_update_bits(struct snd_soc_component *component, unsigned int reg, unsigned int mask, unsigned int val) { bool change; int ret; if (component->regmap) ret = regmap_update_bits_check(component->regmap, reg, mask, val, &change); else ret = snd_soc_component_update_bits_legacy(component, reg, mask, val, &change); if (ret < 0) return soc_component_ret_reg_rw(component, ret, reg); return change; } EXPORT_SYMBOL_GPL(snd_soc_component_update_bits); /** * snd_soc_component_update_bits_async() - Perform asynchronous * read/modify/write cycle * @component: Component to update * @reg: Register to update * @mask: Mask that specifies which bits to update * @val: New value for the bits specified by mask * * This function is similar to snd_soc_component_update_bits(), but the update * operation is scheduled asynchronously. This means it may not be completed * when the function returns. To make sure that all scheduled updates have been * completed snd_soc_component_async_complete() must be called. * * Return: 1 if the operation was successful and the value of the register * changed, 0 if the operation was successful, but the value did not change. * Returns a negative error code otherwise. */ int snd_soc_component_update_bits_async(struct snd_soc_component *component, unsigned int reg, unsigned int mask, unsigned int val) { bool change; int ret; if (component->regmap) ret = regmap_update_bits_check_async(component->regmap, reg, mask, val, &change); else ret = snd_soc_component_update_bits_legacy(component, reg, mask, val, &change); if (ret < 0) return soc_component_ret_reg_rw(component, ret, reg); return change; } EXPORT_SYMBOL_GPL(snd_soc_component_update_bits_async); /** * snd_soc_component_read_field() - Read register field value * @component: Component to read from * @reg: Register to read * @mask: mask of the register field * * Return: read value of register field. */ unsigned int snd_soc_component_read_field(struct snd_soc_component *component, unsigned int reg, unsigned int mask) { unsigned int val; val = snd_soc_component_read(component, reg); val = (val & mask) >> soc_component_field_shift(component, mask); return val; } EXPORT_SYMBOL_GPL(snd_soc_component_read_field); /** * snd_soc_component_write_field() - write to register field * @component: Component to write to * @reg: Register to write * @mask: mask of the register field to update * @val: value of the field to write * * Return: 1 for change, otherwise 0. */ int snd_soc_component_write_field(struct snd_soc_component *component, unsigned int reg, unsigned int mask, unsigned int val) { val = (val << soc_component_field_shift(component, mask)) & mask; return snd_soc_component_update_bits(component, reg, mask, val); } EXPORT_SYMBOL_GPL(snd_soc_component_write_field); /** * snd_soc_component_async_complete() - Ensure asynchronous I/O has completed * @component: Component for which to wait * * This function blocks until all asynchronous I/O which has previously been * scheduled using snd_soc_component_update_bits_async() has completed. */ void snd_soc_component_async_complete(struct snd_soc_component *component) { if (component->regmap) regmap_async_complete(component->regmap); } EXPORT_SYMBOL_GPL(snd_soc_component_async_complete); /** * snd_soc_component_test_bits - Test register for change * @component: component * @reg: Register to test * @mask: Mask that specifies which bits to test * @value: Value to test against * * Tests a register with a new value and checks if the new value is * different from the old value. * * Return: 1 for change, otherwise 0. */ int snd_soc_component_test_bits(struct snd_soc_component *component, unsigned int reg, unsigned int mask, unsigned int value) { unsigned int old, new; old = snd_soc_component_read(component, reg); new = (old & ~mask) | value; return old != new; } EXPORT_SYMBOL_GPL(snd_soc_component_test_bits); int snd_soc_pcm_component_pointer(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i; /* FIXME: use 1st pointer */ for_each_rtd_components(rtd, i, component) if (component->driver->pointer) return component->driver->pointer(component, substream); return 0; } static bool snd_soc_component_is_codec_on_rtd(struct snd_soc_pcm_runtime *rtd, struct snd_soc_component *component) { struct snd_soc_dai *dai; int i; for_each_rtd_codec_dais(rtd, i, dai) { if (dai->component == component) return true; } return false; } void snd_soc_pcm_component_delay(struct snd_pcm_substream *substream, snd_pcm_sframes_t *cpu_delay, snd_pcm_sframes_t *codec_delay) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; snd_pcm_sframes_t delay; int i; /* * We're looking for the delay through the full audio path so it needs to * be the maximum of the Components doing transmit and the maximum of the * Components doing receive (ie, all CPUs and all CODECs) rather than * just the maximum of all Components. */ for_each_rtd_components(rtd, i, component) { if (!component->driver->delay) continue; delay = component->driver->delay(component, substream); if (snd_soc_component_is_codec_on_rtd(rtd, component)) *codec_delay = max(*codec_delay, delay); else *cpu_delay = max(*cpu_delay, delay); } } int snd_soc_pcm_component_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i; /* FIXME: use 1st ioctl */ for_each_rtd_components(rtd, i, component) if (component->driver->ioctl) return soc_component_ret( component, component->driver->ioctl(component, substream, cmd, arg)); return snd_pcm_lib_ioctl(substream, cmd, arg); } int snd_soc_pcm_component_sync_stop(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i, ret; for_each_rtd_components(rtd, i, component) { if (component->driver->sync_stop) { ret = component->driver->sync_stop(component, substream); if (ret < 0) return soc_component_ret(component, ret); } } return 0; } int snd_soc_pcm_component_copy(struct snd_pcm_substream *substream, int channel, unsigned long pos, struct iov_iter *iter, unsigned long bytes) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i; /* FIXME. it returns 1st copy now */ for_each_rtd_components(rtd, i, component) if (component->driver->copy) return soc_component_ret(component, component->driver->copy(component, substream, channel, pos, iter, bytes)); return -EINVAL; } struct page *snd_soc_pcm_component_page(struct snd_pcm_substream *substream, unsigned long offset) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; struct page *page; int i; /* FIXME. it returns 1st page now */ for_each_rtd_components(rtd, i, component) { if (component->driver->page) { page = component->driver->page(component, substream, offset); if (page) return page; } } return NULL; } int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i; /* FIXME. it returns 1st mmap now */ for_each_rtd_components(rtd, i, component) if (component->driver->mmap) return soc_component_ret( component, component->driver->mmap(component, substream, vma)); return -EINVAL; } int snd_soc_pcm_component_new(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_component *component; int ret; int i; for_each_rtd_components(rtd, i, component) { if (component->driver->pcm_construct) { ret = component->driver->pcm_construct(component, rtd); if (ret < 0) return soc_component_ret(component, ret); } } return 0; } void snd_soc_pcm_component_free(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_component *component; int i; if (!rtd->pcm) return; for_each_rtd_components(rtd, i, component) if (component->driver->pcm_destruct) component->driver->pcm_destruct(component, rtd->pcm); } int snd_soc_pcm_component_prepare(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i, ret; for_each_rtd_components(rtd, i, component) { if (component->driver->prepare) { ret = component->driver->prepare(component, substream); if (ret < 0) return soc_component_ret(component, ret); } } return 0; } int snd_soc_pcm_component_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i, ret; for_each_rtd_components(rtd, i, component) { if (component->driver->hw_params) { ret = component->driver->hw_params(component, substream, params); if (ret < 0) return soc_component_ret(component, ret); } /* mark substream if succeeded */ soc_component_mark_push(component, substream, hw_params); } return 0; } void snd_soc_pcm_component_hw_free(struct snd_pcm_substream *substream, int rollback) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i, ret; for_each_rtd_components(rtd, i, component) { if (rollback && !soc_component_mark_match(component, substream, hw_params)) continue; if (component->driver->hw_free) { ret = component->driver->hw_free(component, substream); if (ret < 0) soc_component_ret(component, ret); } /* remove marked substream */ soc_component_mark_pop(component, substream, hw_params); } } static int soc_component_trigger(struct snd_soc_component *component, struct snd_pcm_substream *substream, int cmd) { int ret = 0; if (component->driver->trigger) ret = component->driver->trigger(component, substream, cmd); return soc_component_ret(component, ret); } int snd_soc_pcm_component_trigger(struct snd_pcm_substream *substream, int cmd, int rollback) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i, r, ret = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: for_each_rtd_components(rtd, i, component) { ret = soc_component_trigger(component, substream, cmd); if (ret < 0) break; soc_component_mark_push(component, substream, trigger); } break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: for_each_rtd_components(rtd, i, component) { if (rollback && !soc_component_mark_match(component, substream, trigger)) continue; r = soc_component_trigger(component, substream, cmd); if (r < 0) ret = r; /* use last ret */ soc_component_mark_pop(component, substream, trigger); } } return ret; } int snd_soc_pcm_component_pm_runtime_get(struct snd_soc_pcm_runtime *rtd, void *stream) { struct snd_soc_component *component; int i; for_each_rtd_components(rtd, i, component) { int ret = pm_runtime_get_sync(component->dev); if (ret < 0 && ret != -EACCES) { pm_runtime_put_noidle(component->dev); return soc_component_ret(component, ret); } /* mark stream if succeeded */ soc_component_mark_push(component, stream, pm); } return 0; } void snd_soc_pcm_component_pm_runtime_put(struct snd_soc_pcm_runtime *rtd, void *stream, int rollback) { struct snd_soc_component *component; int i; for_each_rtd_components(rtd, i, component) { if (rollback && !soc_component_mark_match(component, stream, pm)) continue; pm_runtime_mark_last_busy(component->dev); pm_runtime_put_autosuspend(component->dev); /* remove marked stream */ soc_component_mark_pop(component, stream, pm); } } int snd_soc_pcm_component_ack(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i; /* FIXME: use 1st pointer */ for_each_rtd_components(rtd, i, component) if (component->driver->ack) return component->driver->ack(component, substream); return 0; }
linux-master
sound/soc/soc-component.c
// SPDX-License-Identifier: GPL-2.0+ // // Copyright (C) 2013, Analog Devices Inc. // Author: Lars-Peter Clausen <[email protected]> #include <linux/module.h> #include <linux/init.h> #include <linux/dmaengine.h> #include <linux/slab.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <linux/dma-mapping.h> #include <linux/of.h> #include <sound/dmaengine_pcm.h> static unsigned int prealloc_buffer_size_kbytes = 512; module_param(prealloc_buffer_size_kbytes, uint, 0444); MODULE_PARM_DESC(prealloc_buffer_size_kbytes, "Preallocate DMA buffer size (KB)."); /* * The platforms dmaengine driver does not support reporting the amount of * bytes that are still left to transfer. */ #define SND_DMAENGINE_PCM_FLAG_NO_RESIDUE BIT(31) static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm, struct snd_pcm_substream *substream) { if (!pcm->chan[substream->stream]) return NULL; return pcm->chan[substream->stream]->device->dev; } /** * snd_dmaengine_pcm_prepare_slave_config() - Generic prepare_slave_config callback * @substream: PCM substream * @params: hw_params * @slave_config: DMA slave config to prepare * * This function can be used as a generic prepare_slave_config callback for * platforms which make use of the snd_dmaengine_dai_dma_data struct for their * DAI DMA data. Internally the function will first call * snd_hwparams_to_dma_slave_config to fill in the slave config based on the * hw_params, followed by snd_dmaengine_set_config_from_dai_data to fill in the * remaining fields based on the DAI DMA data. */ int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_dmaengine_dai_dma_data *dma_data; int ret; if (rtd->dai_link->num_cpus > 1) { dev_err(rtd->dev, "%s doesn't support Multi CPU yet\n", __func__); return -EINVAL; } dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream); ret = snd_hwparams_to_dma_slave_config(substream, params, slave_config); if (ret) return ret; snd_dmaengine_pcm_set_config_from_dai_data(substream, dma_data, slave_config); return 0; } EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_prepare_slave_config); static int dmaengine_pcm_hw_params(struct snd_soc_component *component, struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct dmaengine_pcm *pcm = soc_component_to_pcm(component); struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream); struct dma_slave_config slave_config; int ret; if (!pcm->config->prepare_slave_config) return 0; memset(&slave_config, 0, sizeof(slave_config)); ret = pcm->config->prepare_slave_config(substream, params, &slave_config); if (ret) return ret; return dmaengine_slave_config(chan, &slave_config); } static int dmaengine_pcm_set_runtime_hwparams(struct snd_soc_component *component, struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct dmaengine_pcm *pcm = soc_component_to_pcm(component); struct device *dma_dev = dmaengine_dma_dev(pcm, substream); struct dma_chan *chan = pcm->chan[substream->stream]; struct snd_dmaengine_dai_dma_data *dma_data; struct snd_pcm_hardware hw; if (rtd->dai_link->num_cpus > 1) { dev_err(rtd->dev, "%s doesn't support Multi CPU yet\n", __func__); return -EINVAL; } if (pcm->config->pcm_hardware) return snd_soc_set_runtime_hwparams(substream, pcm->config->pcm_hardware); dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream); memset(&hw, 0, sizeof(hw)); hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED; hw.periods_min = 2; hw.periods_max = UINT_MAX; hw.period_bytes_min = dma_data->maxburst * DMA_SLAVE_BUSWIDTH_8_BYTES; if (!hw.period_bytes_min) hw.period_bytes_min = 256; hw.period_bytes_max = dma_get_max_seg_size(dma_dev); hw.buffer_bytes_max = SIZE_MAX; hw.fifo_size = dma_data->fifo_size; if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE) hw.info |= SNDRV_PCM_INFO_BATCH; /** * FIXME: Remove the return value check to align with the code * before adding snd_dmaengine_pcm_refine_runtime_hwparams * function. */ snd_dmaengine_pcm_refine_runtime_hwparams(substream, dma_data, &hw, chan); return snd_soc_set_runtime_hwparams(substream, &hw); } static int dmaengine_pcm_open(struct snd_soc_component *component, struct snd_pcm_substream *substream) { struct dmaengine_pcm *pcm = soc_component_to_pcm(component); struct dma_chan *chan = pcm->chan[substream->stream]; int ret; ret = dmaengine_pcm_set_runtime_hwparams(component, substream); if (ret) return ret; return snd_dmaengine_pcm_open(substream, chan); } static int dmaengine_pcm_close(struct snd_soc_component *component, struct snd_pcm_substream *substream) { return snd_dmaengine_pcm_close(substream); } static int dmaengine_pcm_trigger(struct snd_soc_component *component, struct snd_pcm_substream *substream, int cmd) { return snd_dmaengine_pcm_trigger(substream, cmd); } static struct dma_chan *dmaengine_pcm_compat_request_channel( struct snd_soc_component *component, struct snd_soc_pcm_runtime *rtd, struct snd_pcm_substream *substream) { struct dmaengine_pcm *pcm = soc_component_to_pcm(component); struct snd_dmaengine_dai_dma_data *dma_data; if (rtd->dai_link->num_cpus > 1) { dev_err(rtd->dev, "%s doesn't support Multi CPU yet\n", __func__); return NULL; } dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream); if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) && pcm->chan[0]) return pcm->chan[0]; if (pcm->config->compat_request_channel) return pcm->config->compat_request_channel(rtd, substream); return snd_dmaengine_pcm_request_channel(pcm->config->compat_filter_fn, dma_data->filter_data); } static bool dmaengine_pcm_can_report_residue(struct device *dev, struct dma_chan *chan) { struct dma_slave_caps dma_caps; int ret; ret = dma_get_slave_caps(chan, &dma_caps); if (ret != 0) { dev_warn(dev, "Failed to get DMA channel capabilities, falling back to period counting: %d\n", ret); return false; } if (dma_caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) return false; return true; } static int dmaengine_pcm_new(struct snd_soc_component *component, struct snd_soc_pcm_runtime *rtd) { struct dmaengine_pcm *pcm = soc_component_to_pcm(component); const struct snd_dmaengine_pcm_config *config = pcm->config; struct device *dev = component->dev; size_t prealloc_buffer_size; size_t max_buffer_size; unsigned int i; if (config->prealloc_buffer_size) prealloc_buffer_size = config->prealloc_buffer_size; else prealloc_buffer_size = prealloc_buffer_size_kbytes * 1024; if (config->pcm_hardware && config->pcm_hardware->buffer_bytes_max) max_buffer_size = config->pcm_hardware->buffer_bytes_max; else max_buffer_size = SIZE_MAX; for_each_pcm_streams(i) { struct snd_pcm_substream *substream = rtd->pcm->streams[i].substream; if (!substream) continue; if (!pcm->chan[i] && config->chan_names[i]) pcm->chan[i] = dma_request_slave_channel(dev, config->chan_names[i]); if (!pcm->chan[i] && (pcm->flags & SND_DMAENGINE_PCM_FLAG_COMPAT)) { pcm->chan[i] = dmaengine_pcm_compat_request_channel( component, rtd, substream); } if (!pcm->chan[i]) { dev_err(component->dev, "Missing dma channel for stream: %d\n", i); return -EINVAL; } snd_pcm_set_managed_buffer(substream, SNDRV_DMA_TYPE_DEV_IRAM, dmaengine_dma_dev(pcm, substream), prealloc_buffer_size, max_buffer_size); if (!dmaengine_pcm_can_report_residue(dev, pcm->chan[i])) pcm->flags |= SND_DMAENGINE_PCM_FLAG_NO_RESIDUE; if (rtd->pcm->streams[i].pcm->name[0] == '\0') { strscpy_pad(rtd->pcm->streams[i].pcm->name, rtd->pcm->streams[i].pcm->id, sizeof(rtd->pcm->streams[i].pcm->name)); } } return 0; } static snd_pcm_uframes_t dmaengine_pcm_pointer( struct snd_soc_component *component, struct snd_pcm_substream *substream) { struct dmaengine_pcm *pcm = soc_component_to_pcm(component); if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE) return snd_dmaengine_pcm_pointer_no_residue(substream); else return snd_dmaengine_pcm_pointer(substream); } static int dmaengine_copy(struct snd_soc_component *component, struct snd_pcm_substream *substream, int channel, unsigned long hwoff, struct iov_iter *iter, unsigned long bytes) { struct snd_pcm_runtime *runtime = substream->runtime; struct dmaengine_pcm *pcm = soc_component_to_pcm(component); int (*process)(struct snd_pcm_substream *substream, int channel, unsigned long hwoff, unsigned long bytes) = pcm->config->process; bool is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; void *dma_ptr = runtime->dma_area + hwoff + channel * (runtime->dma_bytes / runtime->channels); if (is_playback) if (copy_from_iter(dma_ptr, bytes, iter) != bytes) return -EFAULT; if (process) { int ret = process(substream, channel, hwoff, bytes); if (ret < 0) return ret; } if (!is_playback) if (copy_to_iter(dma_ptr, bytes, iter) != bytes) return -EFAULT; return 0; } static const struct snd_soc_component_driver dmaengine_pcm_component = { .name = SND_DMAENGINE_PCM_DRV_NAME, .probe_order = SND_SOC_COMP_ORDER_LATE, .open = dmaengine_pcm_open, .close = dmaengine_pcm_close, .hw_params = dmaengine_pcm_hw_params, .trigger = dmaengine_pcm_trigger, .pointer = dmaengine_pcm_pointer, .pcm_construct = dmaengine_pcm_new, }; static const struct snd_soc_component_driver dmaengine_pcm_component_process = { .name = SND_DMAENGINE_PCM_DRV_NAME, .probe_order = SND_SOC_COMP_ORDER_LATE, .open = dmaengine_pcm_open, .close = dmaengine_pcm_close, .hw_params = dmaengine_pcm_hw_params, .trigger = dmaengine_pcm_trigger, .pointer = dmaengine_pcm_pointer, .copy = dmaengine_copy, .pcm_construct = dmaengine_pcm_new, }; static const char * const dmaengine_pcm_dma_channel_names[] = { [SNDRV_PCM_STREAM_PLAYBACK] = "tx", [SNDRV_PCM_STREAM_CAPTURE] = "rx", }; static int dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm, struct device *dev, const struct snd_dmaengine_pcm_config *config) { unsigned int i; const char *name; struct dma_chan *chan; if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_DT) || (!dev->of_node && !(config->dma_dev && config->dma_dev->of_node))) return 0; if (config->dma_dev) { /* * If this warning is seen, it probably means that your Linux * device structure does not match your HW device structure. * It would be best to refactor the Linux device structure to * correctly match the HW structure. */ dev_warn(dev, "DMA channels sourced from device %s", dev_name(config->dma_dev)); dev = config->dma_dev; } for_each_pcm_streams(i) { if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) name = "rx-tx"; else name = dmaengine_pcm_dma_channel_names[i]; if (config->chan_names[i]) name = config->chan_names[i]; chan = dma_request_chan(dev, name); if (IS_ERR(chan)) { /* * Only report probe deferral errors, channels * might not be present for devices that * support only TX or only RX. */ if (PTR_ERR(chan) == -EPROBE_DEFER) return -EPROBE_DEFER; pcm->chan[i] = NULL; } else { pcm->chan[i] = chan; } if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) break; } if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) pcm->chan[1] = pcm->chan[0]; return 0; } static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm) { unsigned int i; for_each_pcm_streams(i) { if (!pcm->chan[i]) continue; dma_release_channel(pcm->chan[i]); if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) break; } } static const struct snd_dmaengine_pcm_config snd_dmaengine_pcm_default_config = { .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config, }; /** * snd_dmaengine_pcm_register - Register a dmaengine based PCM device * @dev: The parent device for the PCM device * @config: Platform specific PCM configuration * @flags: Platform specific quirks */ int snd_dmaengine_pcm_register(struct device *dev, const struct snd_dmaengine_pcm_config *config, unsigned int flags) { const struct snd_soc_component_driver *driver; struct dmaengine_pcm *pcm; int ret; pcm = kzalloc(sizeof(*pcm), GFP_KERNEL); if (!pcm) return -ENOMEM; #ifdef CONFIG_DEBUG_FS pcm->component.debugfs_prefix = "dma"; #endif if (!config) config = &snd_dmaengine_pcm_default_config; pcm->config = config; pcm->flags = flags; ret = dmaengine_pcm_request_chan_of(pcm, dev, config); if (ret) goto err_free_dma; if (config->process) driver = &dmaengine_pcm_component_process; else driver = &dmaengine_pcm_component; ret = snd_soc_component_initialize(&pcm->component, driver, dev); if (ret) goto err_free_dma; ret = snd_soc_add_component(&pcm->component, NULL, 0); if (ret) goto err_free_dma; return 0; err_free_dma: dmaengine_pcm_release_chan(pcm); kfree(pcm); return ret; } EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register); /** * snd_dmaengine_pcm_unregister - Removes a dmaengine based PCM device * @dev: Parent device the PCM was register with * * Removes a dmaengine based PCM device previously registered with * snd_dmaengine_pcm_register. */ void snd_dmaengine_pcm_unregister(struct device *dev) { struct snd_soc_component *component; struct dmaengine_pcm *pcm; component = snd_soc_lookup_component(dev, SND_DMAENGINE_PCM_DRV_NAME); if (!component) return; pcm = soc_component_to_pcm(component); snd_soc_unregister_component_by_driver(dev, component->driver); dmaengine_pcm_release_chan(pcm); kfree(pcm); } EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister); MODULE_LICENSE("GPL");
linux-master
sound/soc/soc-generic-dmaengine-pcm.c
// SPDX-License-Identifier: GPL-2.0+ // // soc-compress.c -- ALSA SoC Compress // // Copyright (C) 2012 Intel Corp. // // Authors: Namarta Kohli <[email protected]> // Ramesh Babu K V <[email protected]> // Vinod Koul <[email protected]> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <sound/core.h> #include <sound/compress_params.h> #include <sound/compress_driver.h> #include <sound/soc.h> #include <sound/initval.h> #include <sound/soc-dpcm.h> #include <sound/soc-link.h> static int snd_soc_compr_components_open(struct snd_compr_stream *cstream) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_component *component; int ret = 0; int i; for_each_rtd_components(rtd, i, component) { ret = snd_soc_component_module_get_when_open(component, cstream); if (ret < 0) break; ret = snd_soc_component_compr_open(component, cstream); if (ret < 0) break; } return ret; } static void snd_soc_compr_components_free(struct snd_compr_stream *cstream, int rollback) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_component *component; int i; for_each_rtd_components(rtd, i, component) { snd_soc_component_compr_free(component, cstream, rollback); snd_soc_component_module_put_when_close(component, cstream, rollback); } } static int soc_compr_clean(struct snd_compr_stream *cstream, int rollback) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0); int stream = cstream->direction; /* SND_COMPRESS_xxx is same as SNDRV_PCM_STREAM_xxx */ snd_soc_dpcm_mutex_lock(rtd); if (!rollback) snd_soc_runtime_deactivate(rtd, stream); snd_soc_dai_digital_mute(codec_dai, 1, stream); if (!snd_soc_dai_active(cpu_dai)) cpu_dai->rate = 0; if (!snd_soc_dai_active(codec_dai)) codec_dai->rate = 0; snd_soc_link_compr_shutdown(cstream, rollback); snd_soc_compr_components_free(cstream, rollback); snd_soc_dai_compr_shutdown(cpu_dai, cstream, rollback); if (!rollback) snd_soc_dapm_stream_stop(rtd, stream); snd_soc_dpcm_mutex_unlock(rtd); snd_soc_pcm_component_pm_runtime_put(rtd, cstream, rollback); return 0; } static int soc_compr_free(struct snd_compr_stream *cstream) { return soc_compr_clean(cstream, 0); } static int soc_compr_open(struct snd_compr_stream *cstream) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); int stream = cstream->direction; /* SND_COMPRESS_xxx is same as SNDRV_PCM_STREAM_xxx */ int ret; ret = snd_soc_pcm_component_pm_runtime_get(rtd, cstream); if (ret < 0) goto err_no_lock; snd_soc_dpcm_mutex_lock(rtd); ret = snd_soc_dai_compr_startup(cpu_dai, cstream); if (ret < 0) goto err; ret = snd_soc_compr_components_open(cstream); if (ret < 0) goto err; ret = snd_soc_link_compr_startup(cstream); if (ret < 0) goto err; snd_soc_runtime_activate(rtd, stream); err: snd_soc_dpcm_mutex_unlock(rtd); err_no_lock: if (ret < 0) soc_compr_clean(cstream, 1); return ret; } static int soc_compr_open_fe(struct snd_compr_stream *cstream) { struct snd_soc_pcm_runtime *fe = cstream->private_data; struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(fe, 0); struct snd_soc_dpcm *dpcm; struct snd_soc_dapm_widget_list *list; int stream = cstream->direction; /* SND_COMPRESS_xxx is same as SNDRV_PCM_STREAM_xxx */ int ret; snd_soc_card_mutex_lock(fe->card); ret = dpcm_path_get(fe, stream, &list); if (ret < 0) goto be_err; snd_soc_dpcm_mutex_lock(fe); /* calculate valid and active FE <-> BE dpcms */ dpcm_process_paths(fe, stream, &list, 1); fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; ret = dpcm_be_dai_startup(fe, stream); if (ret < 0) { /* clean up all links */ for_each_dpcm_be(fe, stream, dpcm) dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE; dpcm_be_disconnect(fe, stream); goto out; } ret = snd_soc_dai_compr_startup(cpu_dai, cstream); if (ret < 0) goto out; ret = snd_soc_compr_components_open(cstream); if (ret < 0) goto open_err; ret = snd_soc_link_compr_startup(cstream); if (ret < 0) goto machine_err; dpcm_clear_pending_state(fe, stream); dpcm_path_put(&list); fe->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN; fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; snd_soc_runtime_activate(fe, stream); snd_soc_dpcm_mutex_unlock(fe); snd_soc_card_mutex_unlock(fe->card); return 0; machine_err: snd_soc_compr_components_free(cstream, 1); open_err: snd_soc_dai_compr_shutdown(cpu_dai, cstream, 1); out: dpcm_path_put(&list); snd_soc_dpcm_mutex_unlock(fe); be_err: fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; snd_soc_card_mutex_unlock(fe->card); return ret; } static int soc_compr_free_fe(struct snd_compr_stream *cstream) { struct snd_soc_pcm_runtime *fe = cstream->private_data; struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(fe, 0); struct snd_soc_dpcm *dpcm; int stream = cstream->direction; /* SND_COMPRESS_xxx is same as SNDRV_PCM_STREAM_xxx */ snd_soc_card_mutex_lock(fe->card); snd_soc_dpcm_mutex_lock(fe); snd_soc_runtime_deactivate(fe, stream); fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; dpcm_be_dai_hw_free(fe, stream); dpcm_be_dai_shutdown(fe, stream); /* mark FE's links ready to prune */ for_each_dpcm_be(fe, stream, dpcm) dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE; dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP); fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE; fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; dpcm_be_disconnect(fe, stream); snd_soc_dpcm_mutex_unlock(fe); snd_soc_link_compr_shutdown(cstream, 0); snd_soc_compr_components_free(cstream, 0); snd_soc_dai_compr_shutdown(cpu_dai, cstream, 0); snd_soc_card_mutex_unlock(fe->card); return 0; } static int soc_compr_trigger(struct snd_compr_stream *cstream, int cmd) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0); struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); int stream = cstream->direction; /* SND_COMPRESS_xxx is same as SNDRV_PCM_STREAM_xxx */ int ret; snd_soc_dpcm_mutex_lock(rtd); ret = snd_soc_component_compr_trigger(cstream, cmd); if (ret < 0) goto out; ret = snd_soc_dai_compr_trigger(cpu_dai, cstream, cmd); if (ret < 0) goto out; switch (cmd) { case SNDRV_PCM_TRIGGER_START: snd_soc_dai_digital_mute(codec_dai, 0, stream); break; case SNDRV_PCM_TRIGGER_STOP: snd_soc_dai_digital_mute(codec_dai, 1, stream); break; } out: snd_soc_dpcm_mutex_unlock(rtd); return ret; } static int soc_compr_trigger_fe(struct snd_compr_stream *cstream, int cmd) { struct snd_soc_pcm_runtime *fe = cstream->private_data; struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(fe, 0); int stream = cstream->direction; /* SND_COMPRESS_xxx is same as SNDRV_PCM_STREAM_xxx */ int ret; if (cmd == SND_COMPR_TRIGGER_PARTIAL_DRAIN || cmd == SND_COMPR_TRIGGER_DRAIN) return snd_soc_component_compr_trigger(cstream, cmd); snd_soc_card_mutex_lock(fe->card); ret = snd_soc_dai_compr_trigger(cpu_dai, cstream, cmd); if (ret < 0) goto out; ret = snd_soc_component_compr_trigger(cstream, cmd); if (ret < 0) goto out; fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; ret = dpcm_be_dai_trigger(fe, stream, cmd); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: fe->dpcm[stream].state = SND_SOC_DPCM_STATE_START; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: fe->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP; break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED; break; } out: fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; snd_soc_card_mutex_unlock(fe->card); return ret; } static int soc_compr_set_params(struct snd_compr_stream *cstream, struct snd_compr_params *params) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); int stream = cstream->direction; /* SND_COMPRESS_xxx is same as SNDRV_PCM_STREAM_xxx */ int ret; snd_soc_dpcm_mutex_lock(rtd); /* * First we call set_params for the CPU DAI, then the component * driver this should configure the SoC side. If the machine has * compressed ops then we call that as well. The expectation is * that these callbacks will configure everything for this compress * path, like configuring a PCM port for a CODEC. */ ret = snd_soc_dai_compr_set_params(cpu_dai, cstream, params); if (ret < 0) goto err; ret = snd_soc_component_compr_set_params(cstream, params); if (ret < 0) goto err; ret = snd_soc_link_compr_set_params(cstream); if (ret < 0) goto err; snd_soc_dapm_stream_event(rtd, stream, SND_SOC_DAPM_STREAM_START); /* cancel any delayed stream shutdown that is pending */ rtd->pop_wait = 0; snd_soc_dpcm_mutex_unlock(rtd); cancel_delayed_work_sync(&rtd->delayed_work); return 0; err: snd_soc_dpcm_mutex_unlock(rtd); return ret; } static int soc_compr_set_params_fe(struct snd_compr_stream *cstream, struct snd_compr_params *params) { struct snd_soc_pcm_runtime *fe = cstream->private_data; struct snd_pcm_substream *fe_substream = fe->pcm->streams[cstream->direction].substream; struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(fe, 0); int stream = cstream->direction; /* SND_COMPRESS_xxx is same as SNDRV_PCM_STREAM_xxx */ int ret; snd_soc_card_mutex_lock(fe->card); /* * Create an empty hw_params for the BE as the machine driver must * fix this up to match DSP decoder and ASRC configuration. * I.e. machine driver fixup for compressed BE is mandatory. */ memset(&fe->dpcm[fe_substream->stream].hw_params, 0, sizeof(struct snd_pcm_hw_params)); fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; ret = dpcm_be_dai_hw_params(fe, stream); if (ret < 0) goto out; ret = dpcm_be_dai_prepare(fe, stream); if (ret < 0) goto out; ret = snd_soc_dai_compr_set_params(cpu_dai, cstream, params); if (ret < 0) goto out; ret = snd_soc_component_compr_set_params(cstream, params); if (ret < 0) goto out; ret = snd_soc_link_compr_set_params(cstream); if (ret < 0) goto out; snd_soc_dpcm_mutex_lock(fe); dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START); snd_soc_dpcm_mutex_unlock(fe); fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE; out: fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; snd_soc_card_mutex_unlock(fe->card); return ret; } static int soc_compr_get_params(struct snd_compr_stream *cstream, struct snd_codec *params) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); int ret = 0; snd_soc_dpcm_mutex_lock(rtd); ret = snd_soc_dai_compr_get_params(cpu_dai, cstream, params); if (ret < 0) goto err; ret = snd_soc_component_compr_get_params(cstream, params); err: snd_soc_dpcm_mutex_unlock(rtd); return ret; } static int soc_compr_ack(struct snd_compr_stream *cstream, size_t bytes) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); int ret; snd_soc_dpcm_mutex_lock(rtd); ret = snd_soc_dai_compr_ack(cpu_dai, cstream, bytes); if (ret < 0) goto err; ret = snd_soc_component_compr_ack(cstream, bytes); err: snd_soc_dpcm_mutex_unlock(rtd); return ret; } static int soc_compr_pointer(struct snd_compr_stream *cstream, struct snd_compr_tstamp *tstamp) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; int ret; struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); snd_soc_dpcm_mutex_lock(rtd); ret = snd_soc_dai_compr_pointer(cpu_dai, cstream, tstamp); if (ret < 0) goto out; ret = snd_soc_component_compr_pointer(cstream, tstamp); out: snd_soc_dpcm_mutex_unlock(rtd); return ret; } static int soc_compr_set_metadata(struct snd_compr_stream *cstream, struct snd_compr_metadata *metadata) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); int ret; ret = snd_soc_dai_compr_set_metadata(cpu_dai, cstream, metadata); if (ret < 0) return ret; return snd_soc_component_compr_set_metadata(cstream, metadata); } static int soc_compr_get_metadata(struct snd_compr_stream *cstream, struct snd_compr_metadata *metadata) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); int ret; ret = snd_soc_dai_compr_get_metadata(cpu_dai, cstream, metadata); if (ret < 0) return ret; return snd_soc_component_compr_get_metadata(cstream, metadata); } /* ASoC Compress operations */ static struct snd_compr_ops soc_compr_ops = { .open = soc_compr_open, .free = soc_compr_free, .set_params = soc_compr_set_params, .set_metadata = soc_compr_set_metadata, .get_metadata = soc_compr_get_metadata, .get_params = soc_compr_get_params, .trigger = soc_compr_trigger, .pointer = soc_compr_pointer, .ack = soc_compr_ack, .get_caps = snd_soc_component_compr_get_caps, .get_codec_caps = snd_soc_component_compr_get_codec_caps, }; /* ASoC Dynamic Compress operations */ static struct snd_compr_ops soc_compr_dyn_ops = { .open = soc_compr_open_fe, .free = soc_compr_free_fe, .set_params = soc_compr_set_params_fe, .get_params = soc_compr_get_params, .set_metadata = soc_compr_set_metadata, .get_metadata = soc_compr_get_metadata, .trigger = soc_compr_trigger_fe, .pointer = soc_compr_pointer, .ack = soc_compr_ack, .get_caps = snd_soc_component_compr_get_caps, .get_codec_caps = snd_soc_component_compr_get_codec_caps, }; /** * snd_soc_new_compress - create a new compress. * * @rtd: The runtime for which we will create compress * @num: the device index number (zero based - shared with normal PCMs) * * Return: 0 for success, else error. */ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num) { struct snd_soc_component *component; struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0); struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); struct snd_compr *compr; struct snd_pcm *be_pcm; char new_name[64]; int ret = 0, direction = 0; int playback = 0, capture = 0; int i; /* * make sure these are same value, * and then use these as equally */ BUILD_BUG_ON((int)SNDRV_PCM_STREAM_PLAYBACK != (int)SND_COMPRESS_PLAYBACK); BUILD_BUG_ON((int)SNDRV_PCM_STREAM_CAPTURE != (int)SND_COMPRESS_CAPTURE); if (rtd->dai_link->num_cpus > 1 || rtd->dai_link->num_codecs > 1) { dev_err(rtd->card->dev, "Compress ASoC: Multi CPU/Codec not supported\n"); return -EINVAL; } if (!codec_dai) { dev_err(rtd->card->dev, "Missing codec\n"); return -EINVAL; } /* check client and interface hw capabilities */ if (snd_soc_dai_stream_valid(codec_dai, SNDRV_PCM_STREAM_PLAYBACK) && snd_soc_dai_stream_valid(cpu_dai, SNDRV_PCM_STREAM_PLAYBACK)) playback = 1; if (snd_soc_dai_stream_valid(codec_dai, SNDRV_PCM_STREAM_CAPTURE) && snd_soc_dai_stream_valid(cpu_dai, SNDRV_PCM_STREAM_CAPTURE)) capture = 1; /* * Compress devices are unidirectional so only one of the directions * should be set, check for that (xor) */ if (playback + capture != 1) { dev_err(rtd->card->dev, "Compress ASoC: Invalid direction for P %d, C %d\n", playback, capture); return -EINVAL; } if (playback) direction = SND_COMPRESS_PLAYBACK; else direction = SND_COMPRESS_CAPTURE; compr = devm_kzalloc(rtd->card->dev, sizeof(*compr), GFP_KERNEL); if (!compr) return -ENOMEM; compr->ops = devm_kzalloc(rtd->card->dev, sizeof(soc_compr_ops), GFP_KERNEL); if (!compr->ops) return -ENOMEM; if (rtd->dai_link->dynamic) { snprintf(new_name, sizeof(new_name), "(%s)", rtd->dai_link->stream_name); ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num, rtd->dai_link->dpcm_playback, rtd->dai_link->dpcm_capture, &be_pcm); if (ret < 0) { dev_err(rtd->card->dev, "Compress ASoC: can't create compressed for %s: %d\n", rtd->dai_link->name, ret); return ret; } /* inherit atomicity from DAI link */ be_pcm->nonatomic = rtd->dai_link->nonatomic; rtd->pcm = be_pcm; rtd->fe_compr = 1; if (rtd->dai_link->dpcm_playback) be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd; if (rtd->dai_link->dpcm_capture) be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd; memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops)); } else { snprintf(new_name, sizeof(new_name), "%s %s-%d", rtd->dai_link->stream_name, codec_dai->name, num); memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops)); } for_each_rtd_components(rtd, i, component) { if (!component->driver->compress_ops || !component->driver->compress_ops->copy) continue; compr->ops->copy = snd_soc_component_compr_copy; break; } ret = snd_compress_new(rtd->card->snd_card, num, direction, new_name, compr); if (ret < 0) { component = asoc_rtd_to_codec(rtd, 0)->component; dev_err(component->dev, "Compress ASoC: can't create compress for codec %s: %d\n", component->name, ret); return ret; } /* DAPM dai link stream work */ rtd->close_delayed_work_func = snd_soc_close_delayed_work; rtd->compr = compr; compr->private_data = rtd; dev_dbg(rtd->card->dev, "Compress ASoC: %s <-> %s mapping ok\n", codec_dai->name, cpu_dai->name); return 0; } EXPORT_SYMBOL_GPL(snd_soc_new_compress);
linux-master
sound/soc/soc-compress.c
// SPDX-License-Identifier: GPL-2.0+ // // soc-ac97.c -- ALSA SoC Audio Layer AC97 support // // Copyright 2005 Wolfson Microelectronics PLC. // Copyright 2005 Openedhand Ltd. // Copyright (C) 2010 Slimlogic Ltd. // Copyright (C) 2010 Texas Instruments Inc. // // Author: Liam Girdwood <[email protected]> // with code, comments and ideas from :- // Richard Purdie <[email protected]> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/gpio/consumer.h> #include <linux/gpio/driver.h> #include <linux/init.h> #include <linux/of.h> #include <linux/pinctrl/consumer.h> #include <linux/slab.h> #include <sound/ac97_codec.h> #include <sound/soc.h> struct snd_ac97_reset_cfg { struct pinctrl *pctl; struct pinctrl_state *pstate_reset; struct pinctrl_state *pstate_warm_reset; struct pinctrl_state *pstate_run; struct gpio_desc *reset_gpio; struct gpio_desc *sdata_gpio; struct gpio_desc *sync_gpio; }; static struct snd_ac97_bus soc_ac97_bus = { .ops = NULL, /* Gets initialized in snd_soc_set_ac97_ops() */ }; static void soc_ac97_device_release(struct device *dev) { kfree(to_ac97_t(dev)); } #ifdef CONFIG_GPIOLIB struct snd_ac97_gpio_priv { struct gpio_chip gpio_chip; unsigned int gpios_set; struct snd_soc_component *component; }; static inline struct snd_soc_component *gpio_to_component(struct gpio_chip *chip) { struct snd_ac97_gpio_priv *gpio_priv = gpiochip_get_data(chip); return gpio_priv->component; } static int snd_soc_ac97_gpio_request(struct gpio_chip *chip, unsigned int offset) { if (offset >= AC97_NUM_GPIOS) return -EINVAL; return 0; } static int snd_soc_ac97_gpio_direction_in(struct gpio_chip *chip, unsigned int offset) { struct snd_soc_component *component = gpio_to_component(chip); dev_dbg(component->dev, "set gpio %d to output\n", offset); return snd_soc_component_update_bits(component, AC97_GPIO_CFG, 1 << offset, 1 << offset); } static int snd_soc_ac97_gpio_get(struct gpio_chip *chip, unsigned int offset) { struct snd_soc_component *component = gpio_to_component(chip); int ret; ret = snd_soc_component_read(component, AC97_GPIO_STATUS); dev_dbg(component->dev, "get gpio %d : %d\n", offset, ret & (1 << offset)); return !!(ret & (1 << offset)); } static void snd_soc_ac97_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { struct snd_ac97_gpio_priv *gpio_priv = gpiochip_get_data(chip); struct snd_soc_component *component = gpio_to_component(chip); gpio_priv->gpios_set &= ~(1 << offset); gpio_priv->gpios_set |= (!!value) << offset; snd_soc_component_write(component, AC97_GPIO_STATUS, gpio_priv->gpios_set); dev_dbg(component->dev, "set gpio %d to %d\n", offset, !!value); } static int snd_soc_ac97_gpio_direction_out(struct gpio_chip *chip, unsigned offset, int value) { struct snd_soc_component *component = gpio_to_component(chip); dev_dbg(component->dev, "set gpio %d to output\n", offset); snd_soc_ac97_gpio_set(chip, offset, value); return snd_soc_component_update_bits(component, AC97_GPIO_CFG, 1 << offset, 0); } static const struct gpio_chip snd_soc_ac97_gpio_chip = { .label = "snd_soc_ac97", .owner = THIS_MODULE, .request = snd_soc_ac97_gpio_request, .direction_input = snd_soc_ac97_gpio_direction_in, .get = snd_soc_ac97_gpio_get, .direction_output = snd_soc_ac97_gpio_direction_out, .set = snd_soc_ac97_gpio_set, .can_sleep = 1, }; static int snd_soc_ac97_init_gpio(struct snd_ac97 *ac97, struct snd_soc_component *component) { struct snd_ac97_gpio_priv *gpio_priv; int ret; gpio_priv = devm_kzalloc(component->dev, sizeof(*gpio_priv), GFP_KERNEL); if (!gpio_priv) return -ENOMEM; ac97->gpio_priv = gpio_priv; gpio_priv->component = component; gpio_priv->gpio_chip = snd_soc_ac97_gpio_chip; gpio_priv->gpio_chip.ngpio = AC97_NUM_GPIOS; gpio_priv->gpio_chip.parent = component->dev; gpio_priv->gpio_chip.base = -1; ret = gpiochip_add_data(&gpio_priv->gpio_chip, gpio_priv); if (ret != 0) dev_err(component->dev, "Failed to add GPIOs: %d\n", ret); return ret; } static void snd_soc_ac97_free_gpio(struct snd_ac97 *ac97) { gpiochip_remove(&ac97->gpio_priv->gpio_chip); } #else static int snd_soc_ac97_init_gpio(struct snd_ac97 *ac97, struct snd_soc_component *component) { return 0; } static void snd_soc_ac97_free_gpio(struct snd_ac97 *ac97) { } #endif /** * snd_soc_alloc_ac97_component() - Allocate new a AC'97 device * @component: The COMPONENT for which to create the AC'97 device * * Allocated a new snd_ac97 device and intializes it, but does not yet register * it. The caller is responsible to either call device_add(&ac97->dev) to * register the device, or to call put_device(&ac97->dev) to free the device. * * Returns: A snd_ac97 device or a PTR_ERR in case of an error. */ struct snd_ac97 *snd_soc_alloc_ac97_component(struct snd_soc_component *component) { struct snd_ac97 *ac97; ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL); if (ac97 == NULL) return ERR_PTR(-ENOMEM); ac97->bus = &soc_ac97_bus; ac97->num = 0; ac97->dev.bus = &ac97_bus_type; ac97->dev.parent = component->card->dev; ac97->dev.release = soc_ac97_device_release; dev_set_name(&ac97->dev, "%d-%d:%s", component->card->snd_card->number, 0, component->name); device_initialize(&ac97->dev); return ac97; } EXPORT_SYMBOL(snd_soc_alloc_ac97_component); /** * snd_soc_new_ac97_component - initailise AC97 device * @component: audio component * @id: The expected device ID * @id_mask: Mask that is applied to the device ID before comparing with @id * * Initialises AC97 component resources for use by ad-hoc devices only. * * If @id is not 0 this function will reset the device, then read the ID from * the device and check if it matches the expected ID. If it doesn't match an * error will be returned and device will not be registered. * * Returns: A PTR_ERR() on failure or a valid snd_ac97 struct on success. */ struct snd_ac97 *snd_soc_new_ac97_component(struct snd_soc_component *component, unsigned int id, unsigned int id_mask) { struct snd_ac97 *ac97; int ret; ac97 = snd_soc_alloc_ac97_component(component); if (IS_ERR(ac97)) return ac97; if (id) { ret = snd_ac97_reset(ac97, false, id, id_mask); if (ret < 0) { dev_err(component->dev, "Failed to reset AC97 device: %d\n", ret); goto err_put_device; } } ret = device_add(&ac97->dev); if (ret) goto err_put_device; ret = snd_soc_ac97_init_gpio(ac97, component); if (ret) goto err_put_device; return ac97; err_put_device: put_device(&ac97->dev); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(snd_soc_new_ac97_component); /** * snd_soc_free_ac97_component - free AC97 component device * @ac97: snd_ac97 device to be freed * * Frees AC97 component device resources. */ void snd_soc_free_ac97_component(struct snd_ac97 *ac97) { snd_soc_ac97_free_gpio(ac97); device_del(&ac97->dev); ac97->bus = NULL; put_device(&ac97->dev); } EXPORT_SYMBOL_GPL(snd_soc_free_ac97_component); static struct snd_ac97_reset_cfg snd_ac97_rst_cfg; static void snd_soc_ac97_warm_reset(struct snd_ac97 *ac97) { struct pinctrl *pctl = snd_ac97_rst_cfg.pctl; pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_warm_reset); gpiod_direction_output_raw(snd_ac97_rst_cfg.sync_gpio, 1); udelay(10); gpiod_direction_output_raw(snd_ac97_rst_cfg.sync_gpio, 0); pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_run); msleep(2); } static void snd_soc_ac97_reset(struct snd_ac97 *ac97) { struct pinctrl *pctl = snd_ac97_rst_cfg.pctl; pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_reset); gpiod_direction_output_raw(snd_ac97_rst_cfg.sync_gpio, 0); gpiod_direction_output_raw(snd_ac97_rst_cfg.sdata_gpio, 0); gpiod_direction_output_raw(snd_ac97_rst_cfg.reset_gpio, 0); udelay(10); gpiod_direction_output_raw(snd_ac97_rst_cfg.reset_gpio, 1); pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_run); msleep(2); } static int snd_soc_ac97_parse_pinctl(struct device *dev, struct snd_ac97_reset_cfg *cfg) { struct pinctrl *p; struct pinctrl_state *state; p = devm_pinctrl_get(dev); if (IS_ERR(p)) { dev_err(dev, "Failed to get pinctrl\n"); return PTR_ERR(p); } cfg->pctl = p; state = pinctrl_lookup_state(p, "ac97-reset"); if (IS_ERR(state)) { dev_err(dev, "Can't find pinctrl state ac97-reset\n"); return PTR_ERR(state); } cfg->pstate_reset = state; state = pinctrl_lookup_state(p, "ac97-warm-reset"); if (IS_ERR(state)) { dev_err(dev, "Can't find pinctrl state ac97-warm-reset\n"); return PTR_ERR(state); } cfg->pstate_warm_reset = state; state = pinctrl_lookup_state(p, "ac97-running"); if (IS_ERR(state)) { dev_err(dev, "Can't find pinctrl state ac97-running\n"); return PTR_ERR(state); } cfg->pstate_run = state; cfg->sync_gpio = devm_gpiod_get_index(dev, "ac97", 0, GPIOD_ASIS); if (IS_ERR(cfg->sync_gpio)) return dev_err_probe(dev, PTR_ERR(cfg->sync_gpio), "Can't find ac97-sync gpio\n"); gpiod_set_consumer_name(cfg->sync_gpio, "AC97 link sync"); cfg->sdata_gpio = devm_gpiod_get_index(dev, "ac97", 1, GPIOD_ASIS); if (IS_ERR(cfg->sdata_gpio)) return dev_err_probe(dev, PTR_ERR(cfg->sdata_gpio), "Can't find ac97-sdata gpio\n"); gpiod_set_consumer_name(cfg->sdata_gpio, "AC97 link sdata"); cfg->reset_gpio = devm_gpiod_get_index(dev, "ac97", 2, GPIOD_ASIS); if (IS_ERR(cfg->reset_gpio)) return dev_err_probe(dev, PTR_ERR(cfg->reset_gpio), "Can't find ac97-reset gpio\n"); gpiod_set_consumer_name(cfg->reset_gpio, "AC97 link reset"); return 0; } struct snd_ac97_bus_ops *soc_ac97_ops; EXPORT_SYMBOL_GPL(soc_ac97_ops); int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops) { if (ops == soc_ac97_ops) return 0; if (soc_ac97_ops && ops) return -EBUSY; soc_ac97_ops = ops; soc_ac97_bus.ops = ops; return 0; } EXPORT_SYMBOL_GPL(snd_soc_set_ac97_ops); /** * snd_soc_set_ac97_ops_of_reset - Set ac97 ops with generic ac97 reset functions * @ops: bus ops * @pdev: platform device * * This function sets the reset and warm_reset properties of ops and parses * the device node of pdev to get pinctrl states and gpio numbers to use. */ int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops, struct platform_device *pdev) { struct device *dev = &pdev->dev; struct snd_ac97_reset_cfg cfg; int ret; ret = snd_soc_ac97_parse_pinctl(dev, &cfg); if (ret) return ret; ret = snd_soc_set_ac97_ops(ops); if (ret) return ret; ops->warm_reset = snd_soc_ac97_warm_reset; ops->reset = snd_soc_ac97_reset; snd_ac97_rst_cfg = cfg; return 0; } EXPORT_SYMBOL_GPL(snd_soc_set_ac97_ops_of_reset);
linux-master
sound/soc/soc-ac97.c
// SPDX-License-Identifier: GPL-2.0+ // // soc-topology.c -- ALSA SoC Topology // // Copyright (C) 2012 Texas Instruments Inc. // Copyright (C) 2015 Intel Corporation. // // Authors: Liam Girdwood <[email protected]> // K, Mythri P <[email protected]> // Prusty, Subhransu S <[email protected]> // B, Jayachandran <[email protected]> // Abdullah, Omair M <[email protected]> // Jin, Yao <[email protected]> // Lin, Mengdong <[email protected]> // // Add support to read audio firmware topology alongside firmware text. The // topology data can contain kcontrols, DAPM graphs, widgets, DAIs, DAI links, // equalizers, firmware, coefficients etc. // // This file only manages the core ALSA and ASoC components, all other bespoke // firmware topology data is passed to component drivers for bespoke handling. #include <linux/kernel.h> #include <linux/export.h> #include <linux/list.h> #include <linux/firmware.h> #include <linux/slab.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/soc-topology.h> #include <sound/tlv.h> #define SOC_TPLG_MAGIC_BIG_ENDIAN 0x436F5341 /* ASoC in reverse */ /* * We make several passes over the data (since it wont necessarily be ordered) * and process objects in the following order. This guarantees the component * drivers will be ready with any vendor data before the mixers and DAPM objects * are loaded (that may make use of the vendor data). */ #define SOC_TPLG_PASS_MANIFEST 0 #define SOC_TPLG_PASS_VENDOR 1 #define SOC_TPLG_PASS_CONTROL 2 #define SOC_TPLG_PASS_WIDGET 3 #define SOC_TPLG_PASS_PCM_DAI 4 #define SOC_TPLG_PASS_GRAPH 5 #define SOC_TPLG_PASS_BE_DAI 6 #define SOC_TPLG_PASS_LINK 7 #define SOC_TPLG_PASS_START SOC_TPLG_PASS_MANIFEST #define SOC_TPLG_PASS_END SOC_TPLG_PASS_LINK /* topology context */ struct soc_tplg { const struct firmware *fw; /* runtime FW parsing */ const u8 *pos; /* read position */ const u8 *hdr_pos; /* header position */ unsigned int pass; /* pass number */ /* component caller */ struct device *dev; struct snd_soc_component *comp; u32 index; /* current block index */ /* vendor specific kcontrol operations */ const struct snd_soc_tplg_kcontrol_ops *io_ops; int io_ops_count; /* vendor specific bytes ext handlers, for TLV bytes controls */ const struct snd_soc_tplg_bytes_ext_ops *bytes_ext_ops; int bytes_ext_ops_count; /* optional fw loading callbacks to component drivers */ struct snd_soc_tplg_ops *ops; }; /* check we dont overflow the data for this control chunk */ static int soc_tplg_check_elem_count(struct soc_tplg *tplg, size_t elem_size, unsigned int count, size_t bytes, const char *elem_type) { const u8 *end = tplg->pos + elem_size * count; if (end > tplg->fw->data + tplg->fw->size) { dev_err(tplg->dev, "ASoC: %s overflow end of data\n", elem_type); return -EINVAL; } /* check there is enough room in chunk for control. extra bytes at the end of control are for vendor data here */ if (elem_size * count > bytes) { dev_err(tplg->dev, "ASoC: %s count %d of size %zu is bigger than chunk %zu\n", elem_type, count, elem_size, bytes); return -EINVAL; } return 0; } static inline bool soc_tplg_is_eof(struct soc_tplg *tplg) { const u8 *end = tplg->hdr_pos; if (end >= tplg->fw->data + tplg->fw->size) return true; return false; } static inline unsigned long soc_tplg_get_hdr_offset(struct soc_tplg *tplg) { return (unsigned long)(tplg->hdr_pos - tplg->fw->data); } static inline unsigned long soc_tplg_get_offset(struct soc_tplg *tplg) { return (unsigned long)(tplg->pos - tplg->fw->data); } /* mapping of Kcontrol types and associated operations. */ static const struct snd_soc_tplg_kcontrol_ops io_ops[] = { {SND_SOC_TPLG_CTL_VOLSW, snd_soc_get_volsw, snd_soc_put_volsw, snd_soc_info_volsw}, {SND_SOC_TPLG_CTL_VOLSW_SX, snd_soc_get_volsw_sx, snd_soc_put_volsw_sx, NULL}, {SND_SOC_TPLG_CTL_ENUM, snd_soc_get_enum_double, snd_soc_put_enum_double, snd_soc_info_enum_double}, {SND_SOC_TPLG_CTL_ENUM_VALUE, snd_soc_get_enum_double, snd_soc_put_enum_double, NULL}, {SND_SOC_TPLG_CTL_BYTES, snd_soc_bytes_get, snd_soc_bytes_put, snd_soc_bytes_info}, {SND_SOC_TPLG_CTL_RANGE, snd_soc_get_volsw_range, snd_soc_put_volsw_range, snd_soc_info_volsw_range}, {SND_SOC_TPLG_CTL_VOLSW_XR_SX, snd_soc_get_xr_sx, snd_soc_put_xr_sx, snd_soc_info_xr_sx}, {SND_SOC_TPLG_CTL_STROBE, snd_soc_get_strobe, snd_soc_put_strobe, NULL}, {SND_SOC_TPLG_DAPM_CTL_VOLSW, snd_soc_dapm_get_volsw, snd_soc_dapm_put_volsw, snd_soc_info_volsw}, {SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE, snd_soc_dapm_get_enum_double, snd_soc_dapm_put_enum_double, snd_soc_info_enum_double}, {SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT, snd_soc_dapm_get_enum_double, snd_soc_dapm_put_enum_double, NULL}, {SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE, snd_soc_dapm_get_enum_double, snd_soc_dapm_put_enum_double, NULL}, {SND_SOC_TPLG_DAPM_CTL_PIN, snd_soc_dapm_get_pin_switch, snd_soc_dapm_put_pin_switch, snd_soc_dapm_info_pin_switch}, }; struct soc_tplg_map { int uid; int kid; }; /* mapping of widget types from UAPI IDs to kernel IDs */ static const struct soc_tplg_map dapm_map[] = { {SND_SOC_TPLG_DAPM_INPUT, snd_soc_dapm_input}, {SND_SOC_TPLG_DAPM_OUTPUT, snd_soc_dapm_output}, {SND_SOC_TPLG_DAPM_MUX, snd_soc_dapm_mux}, {SND_SOC_TPLG_DAPM_MIXER, snd_soc_dapm_mixer}, {SND_SOC_TPLG_DAPM_PGA, snd_soc_dapm_pga}, {SND_SOC_TPLG_DAPM_OUT_DRV, snd_soc_dapm_out_drv}, {SND_SOC_TPLG_DAPM_ADC, snd_soc_dapm_adc}, {SND_SOC_TPLG_DAPM_DAC, snd_soc_dapm_dac}, {SND_SOC_TPLG_DAPM_SWITCH, snd_soc_dapm_switch}, {SND_SOC_TPLG_DAPM_PRE, snd_soc_dapm_pre}, {SND_SOC_TPLG_DAPM_POST, snd_soc_dapm_post}, {SND_SOC_TPLG_DAPM_AIF_IN, snd_soc_dapm_aif_in}, {SND_SOC_TPLG_DAPM_AIF_OUT, snd_soc_dapm_aif_out}, {SND_SOC_TPLG_DAPM_DAI_IN, snd_soc_dapm_dai_in}, {SND_SOC_TPLG_DAPM_DAI_OUT, snd_soc_dapm_dai_out}, {SND_SOC_TPLG_DAPM_DAI_LINK, snd_soc_dapm_dai_link}, {SND_SOC_TPLG_DAPM_BUFFER, snd_soc_dapm_buffer}, {SND_SOC_TPLG_DAPM_SCHEDULER, snd_soc_dapm_scheduler}, {SND_SOC_TPLG_DAPM_EFFECT, snd_soc_dapm_effect}, {SND_SOC_TPLG_DAPM_SIGGEN, snd_soc_dapm_siggen}, {SND_SOC_TPLG_DAPM_SRC, snd_soc_dapm_src}, {SND_SOC_TPLG_DAPM_ASRC, snd_soc_dapm_asrc}, {SND_SOC_TPLG_DAPM_ENCODER, snd_soc_dapm_encoder}, {SND_SOC_TPLG_DAPM_DECODER, snd_soc_dapm_decoder}, }; static int tplg_chan_get_reg(struct soc_tplg *tplg, struct snd_soc_tplg_channel *chan, int map) { int i; for (i = 0; i < SND_SOC_TPLG_MAX_CHAN; i++) { if (le32_to_cpu(chan[i].id) == map) return le32_to_cpu(chan[i].reg); } return -EINVAL; } static int tplg_chan_get_shift(struct soc_tplg *tplg, struct snd_soc_tplg_channel *chan, int map) { int i; for (i = 0; i < SND_SOC_TPLG_MAX_CHAN; i++) { if (le32_to_cpu(chan[i].id) == map) return le32_to_cpu(chan[i].shift); } return -EINVAL; } static int get_widget_id(int tplg_type) { int i; for (i = 0; i < ARRAY_SIZE(dapm_map); i++) { if (tplg_type == dapm_map[i].uid) return dapm_map[i].kid; } return -EINVAL; } static inline void soc_bind_err(struct soc_tplg *tplg, struct snd_soc_tplg_ctl_hdr *hdr, int index) { dev_err(tplg->dev, "ASoC: invalid control type (g,p,i) %d:%d:%d index %d at 0x%lx\n", hdr->ops.get, hdr->ops.put, hdr->ops.info, index, soc_tplg_get_offset(tplg)); } static inline void soc_control_err(struct soc_tplg *tplg, struct snd_soc_tplg_ctl_hdr *hdr, const char *name) { dev_err(tplg->dev, "ASoC: no complete control IO handler for %s type (g,p,i) %d:%d:%d at 0x%lx\n", name, hdr->ops.get, hdr->ops.put, hdr->ops.info, soc_tplg_get_offset(tplg)); } /* pass vendor data to component driver for processing */ static int soc_tplg_vendor_load(struct soc_tplg *tplg, struct snd_soc_tplg_hdr *hdr) { int ret = 0; if (tplg->ops && tplg->ops->vendor_load) ret = tplg->ops->vendor_load(tplg->comp, tplg->index, hdr); else { dev_err(tplg->dev, "ASoC: no vendor load callback for ID %d\n", hdr->vendor_type); return -EINVAL; } if (ret < 0) dev_err(tplg->dev, "ASoC: vendor load failed at hdr offset %ld/0x%lx for type %d:%d\n", soc_tplg_get_hdr_offset(tplg), soc_tplg_get_hdr_offset(tplg), hdr->type, hdr->vendor_type); return ret; } /* optionally pass new dynamic widget to component driver. This is mainly for * external widgets where we can assign private data/ops */ static int soc_tplg_widget_load(struct soc_tplg *tplg, struct snd_soc_dapm_widget *w, struct snd_soc_tplg_dapm_widget *tplg_w) { if (tplg->ops && tplg->ops->widget_load) return tplg->ops->widget_load(tplg->comp, tplg->index, w, tplg_w); return 0; } /* optionally pass new dynamic widget to component driver. This is mainly for * external widgets where we can assign private data/ops */ static int soc_tplg_widget_ready(struct soc_tplg *tplg, struct snd_soc_dapm_widget *w, struct snd_soc_tplg_dapm_widget *tplg_w) { if (tplg->ops && tplg->ops->widget_ready) return tplg->ops->widget_ready(tplg->comp, tplg->index, w, tplg_w); return 0; } /* pass DAI configurations to component driver for extra initialization */ static int soc_tplg_dai_load(struct soc_tplg *tplg, struct snd_soc_dai_driver *dai_drv, struct snd_soc_tplg_pcm *pcm, struct snd_soc_dai *dai) { if (tplg->ops && tplg->ops->dai_load) return tplg->ops->dai_load(tplg->comp, tplg->index, dai_drv, pcm, dai); return 0; } /* pass link configurations to component driver for extra initialization */ static int soc_tplg_dai_link_load(struct soc_tplg *tplg, struct snd_soc_dai_link *link, struct snd_soc_tplg_link_config *cfg) { if (tplg->ops && tplg->ops->link_load) return tplg->ops->link_load(tplg->comp, tplg->index, link, cfg); return 0; } /* tell the component driver that all firmware has been loaded in this request */ static int soc_tplg_complete(struct soc_tplg *tplg) { if (tplg->ops && tplg->ops->complete) return tplg->ops->complete(tplg->comp); return 0; } /* add a dynamic kcontrol */ static int soc_tplg_add_dcontrol(struct snd_card *card, struct device *dev, const struct snd_kcontrol_new *control_new, const char *prefix, void *data, struct snd_kcontrol **kcontrol) { int err; *kcontrol = snd_soc_cnew(control_new, data, control_new->name, prefix); if (*kcontrol == NULL) { dev_err(dev, "ASoC: Failed to create new kcontrol %s\n", control_new->name); return -ENOMEM; } err = snd_ctl_add(card, *kcontrol); if (err < 0) { dev_err(dev, "ASoC: Failed to add %s: %d\n", control_new->name, err); return err; } return 0; } /* add a dynamic kcontrol for component driver */ static int soc_tplg_add_kcontrol(struct soc_tplg *tplg, struct snd_kcontrol_new *k, struct snd_kcontrol **kcontrol) { struct snd_soc_component *comp = tplg->comp; return soc_tplg_add_dcontrol(comp->card->snd_card, tplg->dev, k, comp->name_prefix, comp, kcontrol); } /* remove kcontrol */ static void soc_tplg_remove_kcontrol(struct snd_soc_component *comp, struct snd_soc_dobj *dobj, int pass) { struct snd_card *card = comp->card->snd_card; if (pass != SOC_TPLG_PASS_CONTROL) return; if (dobj->unload) dobj->unload(comp, dobj); snd_ctl_remove(card, dobj->control.kcontrol); list_del(&dobj->list); } /* remove a route */ static void soc_tplg_remove_route(struct snd_soc_component *comp, struct snd_soc_dobj *dobj, int pass) { if (pass != SOC_TPLG_PASS_GRAPH) return; if (dobj->unload) dobj->unload(comp, dobj); list_del(&dobj->list); } /* remove a widget and it's kcontrols - routes must be removed first */ static void soc_tplg_remove_widget(struct snd_soc_component *comp, struct snd_soc_dobj *dobj, int pass) { struct snd_card *card = comp->card->snd_card; struct snd_soc_dapm_widget *w = container_of(dobj, struct snd_soc_dapm_widget, dobj); int i; if (pass != SOC_TPLG_PASS_WIDGET) return; if (dobj->unload) dobj->unload(comp, dobj); if (!w->kcontrols) goto free_news; for (i = 0; w->kcontrols && i < w->num_kcontrols; i++) snd_ctl_remove(card, w->kcontrols[i]); free_news: list_del(&dobj->list); /* widget w is freed by soc-dapm.c */ } /* remove DAI configurations */ static void soc_tplg_remove_dai(struct snd_soc_component *comp, struct snd_soc_dobj *dobj, int pass) { struct snd_soc_dai_driver *dai_drv = container_of(dobj, struct snd_soc_dai_driver, dobj); struct snd_soc_dai *dai, *_dai; if (pass != SOC_TPLG_PASS_PCM_DAI) return; if (dobj->unload) dobj->unload(comp, dobj); for_each_component_dais_safe(comp, dai, _dai) if (dai->driver == dai_drv) snd_soc_unregister_dai(dai); list_del(&dobj->list); } /* remove link configurations */ static void soc_tplg_remove_link(struct snd_soc_component *comp, struct snd_soc_dobj *dobj, int pass) { struct snd_soc_dai_link *link = container_of(dobj, struct snd_soc_dai_link, dobj); if (pass != SOC_TPLG_PASS_PCM_DAI) return; if (dobj->unload) dobj->unload(comp, dobj); list_del(&dobj->list); snd_soc_remove_pcm_runtime(comp->card, snd_soc_get_pcm_runtime(comp->card, link)); } /* unload dai link */ static void remove_backend_link(struct snd_soc_component *comp, struct snd_soc_dobj *dobj, int pass) { if (pass != SOC_TPLG_PASS_LINK) return; if (dobj->unload) dobj->unload(comp, dobj); /* * We don't free the link here as what soc_tplg_remove_link() do since BE * links are not allocated by topology. * We however need to reset the dobj type to its initial values */ dobj->type = SND_SOC_DOBJ_NONE; list_del(&dobj->list); } /* bind a kcontrol to it's IO handlers */ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr, struct snd_kcontrol_new *k, const struct soc_tplg *tplg) { const struct snd_soc_tplg_kcontrol_ops *ops; const struct snd_soc_tplg_bytes_ext_ops *ext_ops; int num_ops, i; if (le32_to_cpu(hdr->ops.info) == SND_SOC_TPLG_CTL_BYTES && k->iface & SNDRV_CTL_ELEM_IFACE_MIXER && (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_READ || k->access & SNDRV_CTL_ELEM_ACCESS_TLV_WRITE) && k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { struct soc_bytes_ext *sbe; struct snd_soc_tplg_bytes_control *be; sbe = (struct soc_bytes_ext *)k->private_value; be = container_of(hdr, struct snd_soc_tplg_bytes_control, hdr); /* TLV bytes controls need standard kcontrol info handler, * TLV callback and extended put/get handlers. */ k->info = snd_soc_bytes_info_ext; k->tlv.c = snd_soc_bytes_tlv_callback; /* * When a topology-based implementation abuses the * control interface and uses bytes_ext controls of * more than 512 bytes, we need to disable the size * checks, otherwise accesses to such controls will * return an -EINVAL error and prevent the card from * being configured. */ if (sbe->max > 512) k->access |= SNDRV_CTL_ELEM_ACCESS_SKIP_CHECK; ext_ops = tplg->bytes_ext_ops; num_ops = tplg->bytes_ext_ops_count; for (i = 0; i < num_ops; i++) { if (!sbe->put && ext_ops[i].id == le32_to_cpu(be->ext_ops.put)) sbe->put = ext_ops[i].put; if (!sbe->get && ext_ops[i].id == le32_to_cpu(be->ext_ops.get)) sbe->get = ext_ops[i].get; } if ((k->access & SNDRV_CTL_ELEM_ACCESS_TLV_READ) && !sbe->get) return -EINVAL; if ((k->access & SNDRV_CTL_ELEM_ACCESS_TLV_WRITE) && !sbe->put) return -EINVAL; return 0; } /* try and map vendor specific kcontrol handlers first */ ops = tplg->io_ops; num_ops = tplg->io_ops_count; for (i = 0; i < num_ops; i++) { if (k->put == NULL && ops[i].id == le32_to_cpu(hdr->ops.put)) k->put = ops[i].put; if (k->get == NULL && ops[i].id == le32_to_cpu(hdr->ops.get)) k->get = ops[i].get; if (k->info == NULL && ops[i].id == le32_to_cpu(hdr->ops.info)) k->info = ops[i].info; } /* vendor specific handlers found ? */ if (k->put && k->get && k->info) return 0; /* none found so try standard kcontrol handlers */ ops = io_ops; num_ops = ARRAY_SIZE(io_ops); for (i = 0; i < num_ops; i++) { if (k->put == NULL && ops[i].id == le32_to_cpu(hdr->ops.put)) k->put = ops[i].put; if (k->get == NULL && ops[i].id == le32_to_cpu(hdr->ops.get)) k->get = ops[i].get; if (k->info == NULL && ops[i].id == le32_to_cpu(hdr->ops.info)) k->info = ops[i].info; } /* standard handlers found ? */ if (k->put && k->get && k->info) return 0; /* nothing to bind */ return -EINVAL; } /* bind a widgets to it's evnt handlers */ int snd_soc_tplg_widget_bind_event(struct snd_soc_dapm_widget *w, const struct snd_soc_tplg_widget_events *events, int num_events, u16 event_type) { int i; w->event = NULL; for (i = 0; i < num_events; i++) { if (event_type == events[i].type) { /* found - so assign event */ w->event = events[i].event_handler; return 0; } } /* not found */ return -EINVAL; } EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_bind_event); /* optionally pass new dynamic kcontrol to component driver. */ static int soc_tplg_control_load(struct soc_tplg *tplg, struct snd_kcontrol_new *k, struct snd_soc_tplg_ctl_hdr *hdr) { int ret = 0; if (tplg->ops && tplg->ops->control_load) ret = tplg->ops->control_load(tplg->comp, tplg->index, k, hdr); if (ret) dev_err(tplg->dev, "ASoC: failed to init %s\n", hdr->name); return ret; } static int soc_tplg_create_tlv_db_scale(struct soc_tplg *tplg, struct snd_kcontrol_new *kc, struct snd_soc_tplg_tlv_dbscale *scale) { unsigned int item_len = 2 * sizeof(unsigned int); unsigned int *p; p = devm_kzalloc(tplg->dev, item_len + 2 * sizeof(unsigned int), GFP_KERNEL); if (!p) return -ENOMEM; p[0] = SNDRV_CTL_TLVT_DB_SCALE; p[1] = item_len; p[2] = le32_to_cpu(scale->min); p[3] = (le32_to_cpu(scale->step) & TLV_DB_SCALE_MASK) | (le32_to_cpu(scale->mute) ? TLV_DB_SCALE_MUTE : 0); kc->tlv.p = (void *)p; return 0; } static int soc_tplg_create_tlv(struct soc_tplg *tplg, struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_hdr *tc) { struct snd_soc_tplg_ctl_tlv *tplg_tlv; u32 access = le32_to_cpu(tc->access); if (!(access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE)) return 0; if (!(access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK)) { tplg_tlv = &tc->tlv; switch (le32_to_cpu(tplg_tlv->type)) { case SNDRV_CTL_TLVT_DB_SCALE: return soc_tplg_create_tlv_db_scale(tplg, kc, &tplg_tlv->scale); /* TODO: add support for other TLV types */ default: dev_dbg(tplg->dev, "Unsupported TLV type %d\n", tplg_tlv->type); return -EINVAL; } } return 0; } static int soc_tplg_dbytes_create(struct soc_tplg *tplg, size_t size) { struct snd_soc_tplg_bytes_control *be; struct soc_bytes_ext *sbe; struct snd_kcontrol_new kc; int ret = 0; if (soc_tplg_check_elem_count(tplg, sizeof(struct snd_soc_tplg_bytes_control), 1, size, "mixer bytes")) return -EINVAL; be = (struct snd_soc_tplg_bytes_control *)tplg->pos; /* validate kcontrol */ if (strnlen(be->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == SNDRV_CTL_ELEM_ID_NAME_MAXLEN) return -EINVAL; sbe = devm_kzalloc(tplg->dev, sizeof(*sbe), GFP_KERNEL); if (sbe == NULL) return -ENOMEM; tplg->pos += (sizeof(struct snd_soc_tplg_bytes_control) + le32_to_cpu(be->priv.size)); dev_dbg(tplg->dev, "ASoC: adding bytes kcontrol %s with access 0x%x\n", be->hdr.name, be->hdr.access); memset(&kc, 0, sizeof(kc)); kc.name = be->hdr.name; kc.private_value = (long)sbe; kc.iface = SNDRV_CTL_ELEM_IFACE_MIXER; kc.access = le32_to_cpu(be->hdr.access); sbe->max = le32_to_cpu(be->max); sbe->dobj.type = SND_SOC_DOBJ_BYTES; if (tplg->ops) sbe->dobj.unload = tplg->ops->control_unload; INIT_LIST_HEAD(&sbe->dobj.list); /* map io handlers */ ret = soc_tplg_kcontrol_bind_io(&be->hdr, &kc, tplg); if (ret) { soc_control_err(tplg, &be->hdr, be->hdr.name); goto err; } /* pass control to driver for optional further init */ ret = soc_tplg_control_load(tplg, &kc, &be->hdr); if (ret < 0) goto err; /* register control here */ ret = soc_tplg_add_kcontrol(tplg, &kc, &sbe->dobj.control.kcontrol); if (ret < 0) goto err; list_add(&sbe->dobj.list, &tplg->comp->dobj_list); err: return ret; } static int soc_tplg_dmixer_create(struct soc_tplg *tplg, size_t size) { struct snd_soc_tplg_mixer_control *mc; struct soc_mixer_control *sm; struct snd_kcontrol_new kc; int ret = 0; if (soc_tplg_check_elem_count(tplg, sizeof(struct snd_soc_tplg_mixer_control), 1, size, "mixers")) return -EINVAL; mc = (struct snd_soc_tplg_mixer_control *)tplg->pos; /* validate kcontrol */ if (strnlen(mc->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == SNDRV_CTL_ELEM_ID_NAME_MAXLEN) return -EINVAL; sm = devm_kzalloc(tplg->dev, sizeof(*sm), GFP_KERNEL); if (sm == NULL) return -ENOMEM; tplg->pos += (sizeof(struct snd_soc_tplg_mixer_control) + le32_to_cpu(mc->priv.size)); dev_dbg(tplg->dev, "ASoC: adding mixer kcontrol %s with access 0x%x\n", mc->hdr.name, mc->hdr.access); memset(&kc, 0, sizeof(kc)); kc.name = mc->hdr.name; kc.private_value = (long)sm; kc.iface = SNDRV_CTL_ELEM_IFACE_MIXER; kc.access = le32_to_cpu(mc->hdr.access); /* we only support FL/FR channel mapping atm */ sm->reg = tplg_chan_get_reg(tplg, mc->channel, SNDRV_CHMAP_FL); sm->rreg = tplg_chan_get_reg(tplg, mc->channel, SNDRV_CHMAP_FR); sm->shift = tplg_chan_get_shift(tplg, mc->channel, SNDRV_CHMAP_FL); sm->rshift = tplg_chan_get_shift(tplg, mc->channel, SNDRV_CHMAP_FR); sm->max = le32_to_cpu(mc->max); sm->min = le32_to_cpu(mc->min); sm->invert = le32_to_cpu(mc->invert); sm->platform_max = le32_to_cpu(mc->platform_max); sm->dobj.index = tplg->index; sm->dobj.type = SND_SOC_DOBJ_MIXER; if (tplg->ops) sm->dobj.unload = tplg->ops->control_unload; INIT_LIST_HEAD(&sm->dobj.list); /* map io handlers */ ret = soc_tplg_kcontrol_bind_io(&mc->hdr, &kc, tplg); if (ret) { soc_control_err(tplg, &mc->hdr, mc->hdr.name); goto err; } /* create any TLV data */ ret = soc_tplg_create_tlv(tplg, &kc, &mc->hdr); if (ret < 0) { dev_err(tplg->dev, "ASoC: failed to create TLV %s\n", mc->hdr.name); goto err; } /* pass control to driver for optional further init */ ret = soc_tplg_control_load(tplg, &kc, &mc->hdr); if (ret < 0) goto err; /* register control here */ ret = soc_tplg_add_kcontrol(tplg, &kc, &sm->dobj.control.kcontrol); if (ret < 0) goto err; list_add(&sm->dobj.list, &tplg->comp->dobj_list); err: return ret; } static int soc_tplg_denum_create_texts(struct soc_tplg *tplg, struct soc_enum *se, struct snd_soc_tplg_enum_control *ec) { int i, ret; if (le32_to_cpu(ec->items) > ARRAY_SIZE(ec->texts)) return -EINVAL; se->dobj.control.dtexts = devm_kcalloc(tplg->dev, le32_to_cpu(ec->items), sizeof(char *), GFP_KERNEL); if (se->dobj.control.dtexts == NULL) return -ENOMEM; for (i = 0; i < le32_to_cpu(ec->items); i++) { if (strnlen(ec->texts[i], SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == SNDRV_CTL_ELEM_ID_NAME_MAXLEN) { ret = -EINVAL; goto err; } se->dobj.control.dtexts[i] = devm_kstrdup(tplg->dev, ec->texts[i], GFP_KERNEL); if (!se->dobj.control.dtexts[i]) { ret = -ENOMEM; goto err; } } se->items = le32_to_cpu(ec->items); se->texts = (const char * const *)se->dobj.control.dtexts; return 0; err: return ret; } static int soc_tplg_denum_create_values(struct soc_tplg *tplg, struct soc_enum *se, struct snd_soc_tplg_enum_control *ec) { int i; /* * Following "if" checks if we have at most SND_SOC_TPLG_NUM_TEXTS * values instead of using ARRAY_SIZE(ec->values) due to the fact that * it is oversized for its purpose. Additionally it is done so because * it is defined in UAPI header where it can't be easily changed. */ if (le32_to_cpu(ec->items) > SND_SOC_TPLG_NUM_TEXTS) return -EINVAL; se->dobj.control.dvalues = devm_kcalloc(tplg->dev, le32_to_cpu(ec->items), sizeof(*se->dobj.control.dvalues), GFP_KERNEL); if (!se->dobj.control.dvalues) return -ENOMEM; /* convert from little-endian */ for (i = 0; i < le32_to_cpu(ec->items); i++) { se->dobj.control.dvalues[i] = le32_to_cpu(ec->values[i]); } return 0; } static int soc_tplg_denum_create(struct soc_tplg *tplg, size_t size) { struct snd_soc_tplg_enum_control *ec; struct soc_enum *se; struct snd_kcontrol_new kc; int ret = 0; if (soc_tplg_check_elem_count(tplg, sizeof(struct snd_soc_tplg_enum_control), 1, size, "enums")) return -EINVAL; ec = (struct snd_soc_tplg_enum_control *)tplg->pos; /* validate kcontrol */ if (strnlen(ec->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == SNDRV_CTL_ELEM_ID_NAME_MAXLEN) return -EINVAL; se = devm_kzalloc(tplg->dev, (sizeof(*se)), GFP_KERNEL); if (se == NULL) return -ENOMEM; tplg->pos += (sizeof(struct snd_soc_tplg_enum_control) + le32_to_cpu(ec->priv.size)); dev_dbg(tplg->dev, "ASoC: adding enum kcontrol %s size %d\n", ec->hdr.name, ec->items); memset(&kc, 0, sizeof(kc)); kc.name = ec->hdr.name; kc.private_value = (long)se; kc.iface = SNDRV_CTL_ELEM_IFACE_MIXER; kc.access = le32_to_cpu(ec->hdr.access); se->reg = tplg_chan_get_reg(tplg, ec->channel, SNDRV_CHMAP_FL); se->shift_l = tplg_chan_get_shift(tplg, ec->channel, SNDRV_CHMAP_FL); se->shift_r = tplg_chan_get_shift(tplg, ec->channel, SNDRV_CHMAP_FL); se->mask = le32_to_cpu(ec->mask); se->dobj.index = tplg->index; se->dobj.type = SND_SOC_DOBJ_ENUM; if (tplg->ops) se->dobj.unload = tplg->ops->control_unload; INIT_LIST_HEAD(&se->dobj.list); switch (le32_to_cpu(ec->hdr.ops.info)) { case SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE: case SND_SOC_TPLG_CTL_ENUM_VALUE: ret = soc_tplg_denum_create_values(tplg, se, ec); if (ret < 0) { dev_err(tplg->dev, "ASoC: could not create values for %s\n", ec->hdr.name); goto err; } fallthrough; case SND_SOC_TPLG_CTL_ENUM: case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE: case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT: ret = soc_tplg_denum_create_texts(tplg, se, ec); if (ret < 0) { dev_err(tplg->dev, "ASoC: could not create texts for %s\n", ec->hdr.name); goto err; } break; default: ret = -EINVAL; dev_err(tplg->dev, "ASoC: invalid enum control type %d for %s\n", ec->hdr.ops.info, ec->hdr.name); goto err; } /* map io handlers */ ret = soc_tplg_kcontrol_bind_io(&ec->hdr, &kc, tplg); if (ret) { soc_control_err(tplg, &ec->hdr, ec->hdr.name); goto err; } /* pass control to driver for optional further init */ ret = soc_tplg_control_load(tplg, &kc, &ec->hdr); if (ret < 0) goto err; /* register control here */ ret = soc_tplg_add_kcontrol(tplg, &kc, &se->dobj.control.kcontrol); if (ret < 0) goto err; list_add(&se->dobj.list, &tplg->comp->dobj_list); err: return ret; } static int soc_tplg_kcontrol_elems_load(struct soc_tplg *tplg, struct snd_soc_tplg_hdr *hdr) { int ret; int i; dev_dbg(tplg->dev, "ASoC: adding %d kcontrols at 0x%lx\n", hdr->count, soc_tplg_get_offset(tplg)); for (i = 0; i < le32_to_cpu(hdr->count); i++) { struct snd_soc_tplg_ctl_hdr *control_hdr = (struct snd_soc_tplg_ctl_hdr *)tplg->pos; if (le32_to_cpu(control_hdr->size) != sizeof(*control_hdr)) { dev_err(tplg->dev, "ASoC: invalid control size\n"); return -EINVAL; } switch (le32_to_cpu(control_hdr->ops.info)) { case SND_SOC_TPLG_CTL_VOLSW: case SND_SOC_TPLG_CTL_STROBE: case SND_SOC_TPLG_CTL_VOLSW_SX: case SND_SOC_TPLG_CTL_VOLSW_XR_SX: case SND_SOC_TPLG_CTL_RANGE: case SND_SOC_TPLG_DAPM_CTL_VOLSW: case SND_SOC_TPLG_DAPM_CTL_PIN: ret = soc_tplg_dmixer_create(tplg, le32_to_cpu(hdr->payload_size)); break; case SND_SOC_TPLG_CTL_ENUM: case SND_SOC_TPLG_CTL_ENUM_VALUE: case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE: case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT: case SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE: ret = soc_tplg_denum_create(tplg, le32_to_cpu(hdr->payload_size)); break; case SND_SOC_TPLG_CTL_BYTES: ret = soc_tplg_dbytes_create(tplg, le32_to_cpu(hdr->payload_size)); break; default: soc_bind_err(tplg, control_hdr, i); return -EINVAL; } if (ret < 0) { dev_err(tplg->dev, "ASoC: invalid control\n"); return ret; } } return 0; } /* optionally pass new dynamic kcontrol to component driver. */ static int soc_tplg_add_route(struct soc_tplg *tplg, struct snd_soc_dapm_route *route) { if (tplg->ops && tplg->ops->dapm_route_load) return tplg->ops->dapm_route_load(tplg->comp, tplg->index, route); return 0; } static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg, struct snd_soc_tplg_hdr *hdr) { struct snd_soc_dapm_context *dapm = &tplg->comp->dapm; struct snd_soc_tplg_dapm_graph_elem *elem; struct snd_soc_dapm_route *route; int count, i; int ret = 0; count = le32_to_cpu(hdr->count); if (soc_tplg_check_elem_count(tplg, sizeof(struct snd_soc_tplg_dapm_graph_elem), count, le32_to_cpu(hdr->payload_size), "graph")) return -EINVAL; dev_dbg(tplg->dev, "ASoC: adding %d DAPM routes for index %d\n", count, hdr->index); for (i = 0; i < count; i++) { route = devm_kzalloc(tplg->dev, sizeof(*route), GFP_KERNEL); if (!route) return -ENOMEM; elem = (struct snd_soc_tplg_dapm_graph_elem *)tplg->pos; tplg->pos += sizeof(struct snd_soc_tplg_dapm_graph_elem); /* validate routes */ if (strnlen(elem->source, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == SNDRV_CTL_ELEM_ID_NAME_MAXLEN) { ret = -EINVAL; break; } if (strnlen(elem->sink, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == SNDRV_CTL_ELEM_ID_NAME_MAXLEN) { ret = -EINVAL; break; } if (strnlen(elem->control, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == SNDRV_CTL_ELEM_ID_NAME_MAXLEN) { ret = -EINVAL; break; } route->source = elem->source; route->sink = elem->sink; /* set to NULL atm for tplg users */ route->connected = NULL; if (strnlen(elem->control, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == 0) route->control = NULL; else route->control = elem->control; /* add route dobj to dobj_list */ route->dobj.type = SND_SOC_DOBJ_GRAPH; if (tplg->ops) route->dobj.unload = tplg->ops->dapm_route_unload; route->dobj.index = tplg->index; list_add(&route->dobj.list, &tplg->comp->dobj_list); ret = soc_tplg_add_route(tplg, route); if (ret < 0) { dev_err(tplg->dev, "ASoC: topology: add_route failed: %d\n", ret); break; } /* add route, but keep going if some fail */ snd_soc_dapm_add_routes(dapm, route, 1); } return ret; } static int soc_tplg_dapm_widget_dmixer_create(struct soc_tplg *tplg, struct snd_kcontrol_new *kc) { struct soc_mixer_control *sm; struct snd_soc_tplg_mixer_control *mc; int err; mc = (struct snd_soc_tplg_mixer_control *)tplg->pos; /* validate kcontrol */ if (strnlen(mc->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == SNDRV_CTL_ELEM_ID_NAME_MAXLEN) return -EINVAL; sm = devm_kzalloc(tplg->dev, sizeof(*sm), GFP_KERNEL); if (!sm) return -ENOMEM; tplg->pos += sizeof(struct snd_soc_tplg_mixer_control) + le32_to_cpu(mc->priv.size); dev_dbg(tplg->dev, " adding DAPM widget mixer control %s\n", mc->hdr.name); kc->private_value = (long)sm; kc->name = devm_kstrdup(tplg->dev, mc->hdr.name, GFP_KERNEL); if (!kc->name) return -ENOMEM; kc->iface = SNDRV_CTL_ELEM_IFACE_MIXER; kc->access = le32_to_cpu(mc->hdr.access); /* we only support FL/FR channel mapping atm */ sm->reg = tplg_chan_get_reg(tplg, mc->channel, SNDRV_CHMAP_FL); sm->rreg = tplg_chan_get_reg(tplg, mc->channel, SNDRV_CHMAP_FR); sm->shift = tplg_chan_get_shift(tplg, mc->channel, SNDRV_CHMAP_FL); sm->rshift = tplg_chan_get_shift(tplg, mc->channel, SNDRV_CHMAP_FR); sm->max = le32_to_cpu(mc->max); sm->min = le32_to_cpu(mc->min); sm->invert = le32_to_cpu(mc->invert); sm->platform_max = le32_to_cpu(mc->platform_max); sm->dobj.index = tplg->index; INIT_LIST_HEAD(&sm->dobj.list); /* map io handlers */ err = soc_tplg_kcontrol_bind_io(&mc->hdr, kc, tplg); if (err) { soc_control_err(tplg, &mc->hdr, mc->hdr.name); return err; } /* create any TLV data */ err = soc_tplg_create_tlv(tplg, kc, &mc->hdr); if (err < 0) { dev_err(tplg->dev, "ASoC: failed to create TLV %s\n", mc->hdr.name); return err; } /* pass control to driver for optional further init */ err = soc_tplg_control_load(tplg, kc, &mc->hdr); if (err < 0) return err; return 0; } static int soc_tplg_dapm_widget_denum_create(struct soc_tplg *tplg, struct snd_kcontrol_new *kc) { struct snd_soc_tplg_enum_control *ec; struct soc_enum *se; int err; ec = (struct snd_soc_tplg_enum_control *)tplg->pos; /* validate kcontrol */ if (strnlen(ec->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == SNDRV_CTL_ELEM_ID_NAME_MAXLEN) return -EINVAL; se = devm_kzalloc(tplg->dev, sizeof(*se), GFP_KERNEL); if (!se) return -ENOMEM; tplg->pos += (sizeof(struct snd_soc_tplg_enum_control) + le32_to_cpu(ec->priv.size)); dev_dbg(tplg->dev, " adding DAPM widget enum control %s\n", ec->hdr.name); kc->private_value = (long)se; kc->name = devm_kstrdup(tplg->dev, ec->hdr.name, GFP_KERNEL); if (!kc->name) return -ENOMEM; kc->iface = SNDRV_CTL_ELEM_IFACE_MIXER; kc->access = le32_to_cpu(ec->hdr.access); /* we only support FL/FR channel mapping atm */ se->reg = tplg_chan_get_reg(tplg, ec->channel, SNDRV_CHMAP_FL); se->shift_l = tplg_chan_get_shift(tplg, ec->channel, SNDRV_CHMAP_FL); se->shift_r = tplg_chan_get_shift(tplg, ec->channel, SNDRV_CHMAP_FR); se->items = le32_to_cpu(ec->items); se->mask = le32_to_cpu(ec->mask); se->dobj.index = tplg->index; switch (le32_to_cpu(ec->hdr.ops.info)) { case SND_SOC_TPLG_CTL_ENUM_VALUE: case SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE: err = soc_tplg_denum_create_values(tplg, se, ec); if (err < 0) { dev_err(tplg->dev, "ASoC: could not create values for %s\n", ec->hdr.name); return err; } fallthrough; case SND_SOC_TPLG_CTL_ENUM: case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE: case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT: err = soc_tplg_denum_create_texts(tplg, se, ec); if (err < 0) { dev_err(tplg->dev, "ASoC: could not create texts for %s\n", ec->hdr.name); return err; } break; default: dev_err(tplg->dev, "ASoC: invalid enum control type %d for %s\n", ec->hdr.ops.info, ec->hdr.name); return -EINVAL; } /* map io handlers */ err = soc_tplg_kcontrol_bind_io(&ec->hdr, kc, tplg); if (err) { soc_control_err(tplg, &ec->hdr, ec->hdr.name); return err; } /* pass control to driver for optional further init */ err = soc_tplg_control_load(tplg, kc, &ec->hdr); if (err < 0) return err; return 0; } static int soc_tplg_dapm_widget_dbytes_create(struct soc_tplg *tplg, struct snd_kcontrol_new *kc) { struct snd_soc_tplg_bytes_control *be; struct soc_bytes_ext *sbe; int err; be = (struct snd_soc_tplg_bytes_control *)tplg->pos; /* validate kcontrol */ if (strnlen(be->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == SNDRV_CTL_ELEM_ID_NAME_MAXLEN) return -EINVAL; sbe = devm_kzalloc(tplg->dev, sizeof(*sbe), GFP_KERNEL); if (!sbe) return -ENOMEM; tplg->pos += (sizeof(struct snd_soc_tplg_bytes_control) + le32_to_cpu(be->priv.size)); dev_dbg(tplg->dev, "ASoC: adding bytes kcontrol %s with access 0x%x\n", be->hdr.name, be->hdr.access); kc->private_value = (long)sbe; kc->name = devm_kstrdup(tplg->dev, be->hdr.name, GFP_KERNEL); if (!kc->name) return -ENOMEM; kc->iface = SNDRV_CTL_ELEM_IFACE_MIXER; kc->access = le32_to_cpu(be->hdr.access); sbe->max = le32_to_cpu(be->max); INIT_LIST_HEAD(&sbe->dobj.list); /* map standard io handlers and check for external handlers */ err = soc_tplg_kcontrol_bind_io(&be->hdr, kc, tplg); if (err) { soc_control_err(tplg, &be->hdr, be->hdr.name); return err; } /* pass control to driver for optional further init */ err = soc_tplg_control_load(tplg, kc, &be->hdr); if (err < 0) return err; return 0; } static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg, struct snd_soc_tplg_dapm_widget *w) { struct snd_soc_dapm_context *dapm = &tplg->comp->dapm; struct snd_soc_dapm_widget template, *widget; struct snd_soc_tplg_ctl_hdr *control_hdr; struct snd_soc_card *card = tplg->comp->card; unsigned int *kcontrol_type = NULL; struct snd_kcontrol_new *kc; int mixer_count = 0; int bytes_count = 0; int enum_count = 0; int ret = 0; int i; if (strnlen(w->name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == SNDRV_CTL_ELEM_ID_NAME_MAXLEN) return -EINVAL; if (strnlen(w->sname, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == SNDRV_CTL_ELEM_ID_NAME_MAXLEN) return -EINVAL; dev_dbg(tplg->dev, "ASoC: creating DAPM widget %s id %d\n", w->name, w->id); memset(&template, 0, sizeof(template)); /* map user to kernel widget ID */ template.id = get_widget_id(le32_to_cpu(w->id)); if ((int)template.id < 0) return template.id; /* strings are allocated here, but used and freed by the widget */ template.name = kstrdup(w->name, GFP_KERNEL); if (!template.name) return -ENOMEM; template.sname = kstrdup(w->sname, GFP_KERNEL); if (!template.sname) { ret = -ENOMEM; goto err; } template.reg = le32_to_cpu(w->reg); template.shift = le32_to_cpu(w->shift); template.mask = le32_to_cpu(w->mask); template.subseq = le32_to_cpu(w->subseq); template.on_val = w->invert ? 0 : 1; template.off_val = w->invert ? 1 : 0; template.ignore_suspend = le32_to_cpu(w->ignore_suspend); template.event_flags = le16_to_cpu(w->event_flags); template.dobj.index = tplg->index; tplg->pos += (sizeof(struct snd_soc_tplg_dapm_widget) + le32_to_cpu(w->priv.size)); if (w->num_kcontrols == 0) { template.num_kcontrols = 0; goto widget; } template.num_kcontrols = le32_to_cpu(w->num_kcontrols); kc = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(*kc), GFP_KERNEL); if (!kc) { ret = -ENOMEM; goto hdr_err; } kcontrol_type = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(unsigned int), GFP_KERNEL); if (!kcontrol_type) { ret = -ENOMEM; goto hdr_err; } for (i = 0; i < le32_to_cpu(w->num_kcontrols); i++) { control_hdr = (struct snd_soc_tplg_ctl_hdr *)tplg->pos; switch (le32_to_cpu(control_hdr->ops.info)) { case SND_SOC_TPLG_CTL_VOLSW: case SND_SOC_TPLG_CTL_STROBE: case SND_SOC_TPLG_CTL_VOLSW_SX: case SND_SOC_TPLG_CTL_VOLSW_XR_SX: case SND_SOC_TPLG_CTL_RANGE: case SND_SOC_TPLG_DAPM_CTL_VOLSW: /* volume mixer */ kc[i].index = mixer_count; kcontrol_type[i] = SND_SOC_TPLG_TYPE_MIXER; mixer_count++; ret = soc_tplg_dapm_widget_dmixer_create(tplg, &kc[i]); if (ret < 0) goto hdr_err; break; case SND_SOC_TPLG_CTL_ENUM: case SND_SOC_TPLG_CTL_ENUM_VALUE: case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE: case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT: case SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE: /* enumerated mixer */ kc[i].index = enum_count; kcontrol_type[i] = SND_SOC_TPLG_TYPE_ENUM; enum_count++; ret = soc_tplg_dapm_widget_denum_create(tplg, &kc[i]); if (ret < 0) goto hdr_err; break; case SND_SOC_TPLG_CTL_BYTES: /* bytes control */ kc[i].index = bytes_count; kcontrol_type[i] = SND_SOC_TPLG_TYPE_BYTES; bytes_count++; ret = soc_tplg_dapm_widget_dbytes_create(tplg, &kc[i]); if (ret < 0) goto hdr_err; break; default: dev_err(tplg->dev, "ASoC: invalid widget control type %d:%d:%d\n", control_hdr->ops.get, control_hdr->ops.put, le32_to_cpu(control_hdr->ops.info)); ret = -EINVAL; goto hdr_err; } } template.kcontrol_news = kc; dev_dbg(tplg->dev, "ASoC: template %s with %d/%d/%d (mixer/enum/bytes) control\n", w->name, mixer_count, enum_count, bytes_count); widget: ret = soc_tplg_widget_load(tplg, &template, w); if (ret < 0) goto hdr_err; /* card dapm mutex is held by the core if we are loading topology * data during sound card init. */ if (snd_soc_card_is_instantiated(card)) widget = snd_soc_dapm_new_control(dapm, &template); else widget = snd_soc_dapm_new_control_unlocked(dapm, &template); if (IS_ERR(widget)) { ret = PTR_ERR(widget); goto hdr_err; } widget->dobj.type = SND_SOC_DOBJ_WIDGET; widget->dobj.widget.kcontrol_type = kcontrol_type; if (tplg->ops) widget->dobj.unload = tplg->ops->widget_unload; widget->dobj.index = tplg->index; list_add(&widget->dobj.list, &tplg->comp->dobj_list); ret = soc_tplg_widget_ready(tplg, widget, w); if (ret < 0) goto ready_err; kfree(template.sname); kfree(template.name); return 0; ready_err: soc_tplg_remove_widget(widget->dapm->component, &widget->dobj, SOC_TPLG_PASS_WIDGET); snd_soc_dapm_free_widget(widget); hdr_err: kfree(template.sname); err: kfree(template.name); return ret; } static int soc_tplg_dapm_widget_elems_load(struct soc_tplg *tplg, struct snd_soc_tplg_hdr *hdr) { int count, i; count = le32_to_cpu(hdr->count); dev_dbg(tplg->dev, "ASoC: adding %d DAPM widgets\n", count); for (i = 0; i < count; i++) { struct snd_soc_tplg_dapm_widget *widget = (struct snd_soc_tplg_dapm_widget *) tplg->pos; int ret; /* * check if widget itself fits within topology file * use sizeof instead of widget->size, as we can't be sure * it is set properly yet (file may end before it is present) */ if (soc_tplg_get_offset(tplg) + sizeof(*widget) >= tplg->fw->size) { dev_err(tplg->dev, "ASoC: invalid widget data size\n"); return -EINVAL; } /* check if widget has proper size */ if (le32_to_cpu(widget->size) != sizeof(*widget)) { dev_err(tplg->dev, "ASoC: invalid widget size\n"); return -EINVAL; } /* check if widget private data fits within topology file */ if (soc_tplg_get_offset(tplg) + le32_to_cpu(widget->priv.size) >= tplg->fw->size) { dev_err(tplg->dev, "ASoC: invalid widget private data size\n"); return -EINVAL; } ret = soc_tplg_dapm_widget_create(tplg, widget); if (ret < 0) { dev_err(tplg->dev, "ASoC: failed to load widget %s\n", widget->name); return ret; } } return 0; } static int soc_tplg_dapm_complete(struct soc_tplg *tplg) { struct snd_soc_card *card = tplg->comp->card; int ret; /* Card might not have been registered at this point. * If so, just return success. */ if (!snd_soc_card_is_instantiated(card)) { dev_warn(tplg->dev, "ASoC: Parent card not yet available, widget card binding deferred\n"); return 0; } ret = snd_soc_dapm_new_widgets(card); if (ret < 0) dev_err(tplg->dev, "ASoC: failed to create new widgets %d\n", ret); return ret; } static int set_stream_info(struct soc_tplg *tplg, struct snd_soc_pcm_stream *stream, struct snd_soc_tplg_stream_caps *caps) { stream->stream_name = devm_kstrdup(tplg->dev, caps->name, GFP_KERNEL); if (!stream->stream_name) return -ENOMEM; stream->channels_min = le32_to_cpu(caps->channels_min); stream->channels_max = le32_to_cpu(caps->channels_max); stream->rates = le32_to_cpu(caps->rates); stream->rate_min = le32_to_cpu(caps->rate_min); stream->rate_max = le32_to_cpu(caps->rate_max); stream->formats = le64_to_cpu(caps->formats); stream->sig_bits = le32_to_cpu(caps->sig_bits); return 0; } static void set_dai_flags(struct snd_soc_dai_driver *dai_drv, unsigned int flag_mask, unsigned int flags) { if (flag_mask & SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_RATES) dai_drv->symmetric_rate = (flags & SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_RATES) ? 1 : 0; if (flag_mask & SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_CHANNELS) dai_drv->symmetric_channels = (flags & SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_CHANNELS) ? 1 : 0; if (flag_mask & SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_SAMPLEBITS) dai_drv->symmetric_sample_bits = (flags & SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_SAMPLEBITS) ? 1 : 0; } static const struct snd_soc_dai_ops tplg_dai_ops = { .compress_new = snd_soc_new_compress, }; static int soc_tplg_dai_create(struct soc_tplg *tplg, struct snd_soc_tplg_pcm *pcm) { struct snd_soc_dai_driver *dai_drv; struct snd_soc_pcm_stream *stream; struct snd_soc_tplg_stream_caps *caps; struct snd_soc_dai *dai; struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(tplg->comp); int ret; dai_drv = devm_kzalloc(tplg->dev, sizeof(struct snd_soc_dai_driver), GFP_KERNEL); if (dai_drv == NULL) return -ENOMEM; if (strlen(pcm->dai_name)) { dai_drv->name = devm_kstrdup(tplg->dev, pcm->dai_name, GFP_KERNEL); if (!dai_drv->name) { ret = -ENOMEM; goto err; } } dai_drv->id = le32_to_cpu(pcm->dai_id); if (pcm->playback) { stream = &dai_drv->playback; caps = &pcm->caps[SND_SOC_TPLG_STREAM_PLAYBACK]; ret = set_stream_info(tplg, stream, caps); if (ret < 0) goto err; } if (pcm->capture) { stream = &dai_drv->capture; caps = &pcm->caps[SND_SOC_TPLG_STREAM_CAPTURE]; ret = set_stream_info(tplg, stream, caps); if (ret < 0) goto err; } if (pcm->compress) dai_drv->ops = &tplg_dai_ops; /* pass control to component driver for optional further init */ ret = soc_tplg_dai_load(tplg, dai_drv, pcm, NULL); if (ret < 0) { dev_err(tplg->dev, "ASoC: DAI loading failed\n"); goto err; } dai_drv->dobj.index = tplg->index; dai_drv->dobj.type = SND_SOC_DOBJ_PCM; if (tplg->ops) dai_drv->dobj.unload = tplg->ops->dai_unload; list_add(&dai_drv->dobj.list, &tplg->comp->dobj_list); /* register the DAI to the component */ dai = snd_soc_register_dai(tplg->comp, dai_drv, false); if (!dai) return -ENOMEM; /* Create the DAI widgets here */ ret = snd_soc_dapm_new_dai_widgets(dapm, dai); if (ret != 0) { dev_err(dai->dev, "Failed to create DAI widgets %d\n", ret); snd_soc_unregister_dai(dai); return ret; } return 0; err: return ret; } static void set_link_flags(struct snd_soc_dai_link *link, unsigned int flag_mask, unsigned int flags) { if (flag_mask & SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_RATES) link->symmetric_rate = (flags & SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_RATES) ? 1 : 0; if (flag_mask & SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_CHANNELS) link->symmetric_channels = (flags & SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_CHANNELS) ? 1 : 0; if (flag_mask & SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_SAMPLEBITS) link->symmetric_sample_bits = (flags & SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_SAMPLEBITS) ? 1 : 0; if (flag_mask & SND_SOC_TPLG_LNK_FLGBIT_VOICE_WAKEUP) link->ignore_suspend = (flags & SND_SOC_TPLG_LNK_FLGBIT_VOICE_WAKEUP) ? 1 : 0; } /* create the FE DAI link */ static int soc_tplg_fe_link_create(struct soc_tplg *tplg, struct snd_soc_tplg_pcm *pcm) { struct snd_soc_dai_link *link; struct snd_soc_dai_link_component *dlc; int ret; /* link + cpu + codec + platform */ link = devm_kzalloc(tplg->dev, sizeof(*link) + (3 * sizeof(*dlc)), GFP_KERNEL); if (link == NULL) return -ENOMEM; dlc = (struct snd_soc_dai_link_component *)(link + 1); link->cpus = &dlc[0]; link->num_cpus = 1; link->dobj.index = tplg->index; link->dobj.type = SND_SOC_DOBJ_DAI_LINK; if (tplg->ops) link->dobj.unload = tplg->ops->link_unload; if (strlen(pcm->pcm_name)) { link->name = devm_kstrdup(tplg->dev, pcm->pcm_name, GFP_KERNEL); link->stream_name = devm_kstrdup(tplg->dev, pcm->pcm_name, GFP_KERNEL); if (!link->name || !link->stream_name) { ret = -ENOMEM; goto err; } } link->id = le32_to_cpu(pcm->pcm_id); if (strlen(pcm->dai_name)) { link->cpus->dai_name = devm_kstrdup(tplg->dev, pcm->dai_name, GFP_KERNEL); if (!link->cpus->dai_name) { ret = -ENOMEM; goto err; } } /* * Many topology are assuming link has Codec / Platform, and * these might be overwritten at soc_tplg_dai_link_load(). * Don't use &asoc_dummy_dlc here. */ link->codecs = &dlc[1]; /* Don't use &asoc_dummy_dlc here */ link->codecs->name = "snd-soc-dummy"; link->codecs->dai_name = "snd-soc-dummy-dai"; link->num_codecs = 1; link->platforms = &dlc[2]; /* Don't use &asoc_dummy_dlc here */ link->platforms->name = "snd-soc-dummy"; link->num_platforms = 1; /* enable DPCM */ link->dynamic = 1; link->ignore_pmdown_time = 1; link->dpcm_playback = le32_to_cpu(pcm->playback); link->dpcm_capture = le32_to_cpu(pcm->capture); if (pcm->flag_mask) set_link_flags(link, le32_to_cpu(pcm->flag_mask), le32_to_cpu(pcm->flags)); /* pass control to component driver for optional further init */ ret = soc_tplg_dai_link_load(tplg, link, NULL); if (ret < 0) { dev_err(tplg->dev, "ASoC: FE link loading failed\n"); goto err; } ret = snd_soc_add_pcm_runtimes(tplg->comp->card, link, 1); if (ret < 0) { if (ret != -EPROBE_DEFER) dev_err(tplg->dev, "ASoC: adding FE link failed\n"); goto err; } list_add(&link->dobj.list, &tplg->comp->dobj_list); return 0; err: return ret; } /* create a FE DAI and DAI link from the PCM object */ static int soc_tplg_pcm_create(struct soc_tplg *tplg, struct snd_soc_tplg_pcm *pcm) { int ret; ret = soc_tplg_dai_create(tplg, pcm); if (ret < 0) return ret; return soc_tplg_fe_link_create(tplg, pcm); } /* copy stream caps from the old version 4 of source */ static void stream_caps_new_ver(struct snd_soc_tplg_stream_caps *dest, struct snd_soc_tplg_stream_caps_v4 *src) { dest->size = cpu_to_le32(sizeof(*dest)); memcpy(dest->name, src->name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN); dest->formats = src->formats; dest->rates = src->rates; dest->rate_min = src->rate_min; dest->rate_max = src->rate_max; dest->channels_min = src->channels_min; dest->channels_max = src->channels_max; dest->periods_min = src->periods_min; dest->periods_max = src->periods_max; dest->period_size_min = src->period_size_min; dest->period_size_max = src->period_size_max; dest->buffer_size_min = src->buffer_size_min; dest->buffer_size_max = src->buffer_size_max; } /** * pcm_new_ver - Create the new version of PCM from the old version. * @tplg: topology context * @src: older version of pcm as a source * @pcm: latest version of pcm created from the source * * Support from version 4. User should free the returned pcm manually. */ static int pcm_new_ver(struct soc_tplg *tplg, struct snd_soc_tplg_pcm *src, struct snd_soc_tplg_pcm **pcm) { struct snd_soc_tplg_pcm *dest; struct snd_soc_tplg_pcm_v4 *src_v4; int i; *pcm = NULL; if (le32_to_cpu(src->size) != sizeof(*src_v4)) { dev_err(tplg->dev, "ASoC: invalid PCM size\n"); return -EINVAL; } dev_warn(tplg->dev, "ASoC: old version of PCM\n"); src_v4 = (struct snd_soc_tplg_pcm_v4 *)src; dest = kzalloc(sizeof(*dest), GFP_KERNEL); if (!dest) return -ENOMEM; dest->size = cpu_to_le32(sizeof(*dest)); /* size of latest abi version */ memcpy(dest->pcm_name, src_v4->pcm_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN); memcpy(dest->dai_name, src_v4->dai_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN); dest->pcm_id = src_v4->pcm_id; dest->dai_id = src_v4->dai_id; dest->playback = src_v4->playback; dest->capture = src_v4->capture; dest->compress = src_v4->compress; dest->num_streams = src_v4->num_streams; for (i = 0; i < le32_to_cpu(dest->num_streams); i++) memcpy(&dest->stream[i], &src_v4->stream[i], sizeof(struct snd_soc_tplg_stream)); for (i = 0; i < 2; i++) stream_caps_new_ver(&dest->caps[i], &src_v4->caps[i]); *pcm = dest; return 0; } static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg, struct snd_soc_tplg_hdr *hdr) { struct snd_soc_tplg_pcm *pcm, *_pcm; int count; int size; int i; bool abi_match; int ret; count = le32_to_cpu(hdr->count); /* check the element size and count */ pcm = (struct snd_soc_tplg_pcm *)tplg->pos; size = le32_to_cpu(pcm->size); if (size > sizeof(struct snd_soc_tplg_pcm) || size < sizeof(struct snd_soc_tplg_pcm_v4)) { dev_err(tplg->dev, "ASoC: invalid size %d for PCM elems\n", size); return -EINVAL; } if (soc_tplg_check_elem_count(tplg, size, count, le32_to_cpu(hdr->payload_size), "PCM DAI")) return -EINVAL; for (i = 0; i < count; i++) { pcm = (struct snd_soc_tplg_pcm *)tplg->pos; size = le32_to_cpu(pcm->size); /* check ABI version by size, create a new version of pcm * if abi not match. */ if (size == sizeof(*pcm)) { abi_match = true; _pcm = pcm; } else { abi_match = false; ret = pcm_new_ver(tplg, pcm, &_pcm); if (ret < 0) return ret; } /* create the FE DAIs and DAI links */ ret = soc_tplg_pcm_create(tplg, _pcm); if (ret < 0) { if (!abi_match) kfree(_pcm); return ret; } /* offset by version-specific struct size and * real priv data size */ tplg->pos += size + le32_to_cpu(_pcm->priv.size); if (!abi_match) kfree(_pcm); /* free the duplicated one */ } dev_dbg(tplg->dev, "ASoC: adding %d PCM DAIs\n", count); return 0; } /** * set_link_hw_format - Set the HW audio format of the physical DAI link. * @link: &snd_soc_dai_link which should be updated * @cfg: physical link configs. * * Topology context contains a list of supported HW formats (configs) and * a default format ID for the physical link. This function will use this * default ID to choose the HW format to set the link's DAI format for init. */ static void set_link_hw_format(struct snd_soc_dai_link *link, struct snd_soc_tplg_link_config *cfg) { struct snd_soc_tplg_hw_config *hw_config; unsigned char bclk_provider, fsync_provider; unsigned char invert_bclk, invert_fsync; int i; for (i = 0; i < le32_to_cpu(cfg->num_hw_configs); i++) { hw_config = &cfg->hw_config[i]; if (hw_config->id != cfg->default_hw_config_id) continue; link->dai_fmt = le32_to_cpu(hw_config->fmt) & SND_SOC_DAIFMT_FORMAT_MASK; /* clock gating */ switch (hw_config->clock_gated) { case SND_SOC_TPLG_DAI_CLK_GATE_GATED: link->dai_fmt |= SND_SOC_DAIFMT_GATED; break; case SND_SOC_TPLG_DAI_CLK_GATE_CONT: link->dai_fmt |= SND_SOC_DAIFMT_CONT; break; default: /* ignore the value */ break; } /* clock signal polarity */ invert_bclk = hw_config->invert_bclk; invert_fsync = hw_config->invert_fsync; if (!invert_bclk && !invert_fsync) link->dai_fmt |= SND_SOC_DAIFMT_NB_NF; else if (!invert_bclk && invert_fsync) link->dai_fmt |= SND_SOC_DAIFMT_NB_IF; else if (invert_bclk && !invert_fsync) link->dai_fmt |= SND_SOC_DAIFMT_IB_NF; else link->dai_fmt |= SND_SOC_DAIFMT_IB_IF; /* clock masters */ bclk_provider = (hw_config->bclk_provider == SND_SOC_TPLG_BCLK_CP); fsync_provider = (hw_config->fsync_provider == SND_SOC_TPLG_FSYNC_CP); if (bclk_provider && fsync_provider) link->dai_fmt |= SND_SOC_DAIFMT_CBP_CFP; else if (!bclk_provider && fsync_provider) link->dai_fmt |= SND_SOC_DAIFMT_CBC_CFP; else if (bclk_provider && !fsync_provider) link->dai_fmt |= SND_SOC_DAIFMT_CBP_CFC; else link->dai_fmt |= SND_SOC_DAIFMT_CBC_CFC; } } /** * link_new_ver - Create a new physical link config from the old * version of source. * @tplg: topology context * @src: old version of phyical link config as a source * @link: latest version of physical link config created from the source * * Support from version 4. User need free the returned link config manually. */ static int link_new_ver(struct soc_tplg *tplg, struct snd_soc_tplg_link_config *src, struct snd_soc_tplg_link_config **link) { struct snd_soc_tplg_link_config *dest; struct snd_soc_tplg_link_config_v4 *src_v4; int i; *link = NULL; if (le32_to_cpu(src->size) != sizeof(struct snd_soc_tplg_link_config_v4)) { dev_err(tplg->dev, "ASoC: invalid physical link config size\n"); return -EINVAL; } dev_warn(tplg->dev, "ASoC: old version of physical link config\n"); src_v4 = (struct snd_soc_tplg_link_config_v4 *)src; dest = kzalloc(sizeof(*dest), GFP_KERNEL); if (!dest) return -ENOMEM; dest->size = cpu_to_le32(sizeof(*dest)); dest->id = src_v4->id; dest->num_streams = src_v4->num_streams; for (i = 0; i < le32_to_cpu(dest->num_streams); i++) memcpy(&dest->stream[i], &src_v4->stream[i], sizeof(struct snd_soc_tplg_stream)); *link = dest; return 0; } /** * snd_soc_find_dai_link - Find a DAI link * * @card: soc card * @id: DAI link ID to match * @name: DAI link name to match, optional * @stream_name: DAI link stream name to match, optional * * This function will search all existing DAI links of the soc card to * find the link of the same ID. Since DAI links may not have their * unique ID, so name and stream name should also match if being * specified. * * Return: pointer of DAI link, or NULL if not found. */ static struct snd_soc_dai_link *snd_soc_find_dai_link(struct snd_soc_card *card, int id, const char *name, const char *stream_name) { struct snd_soc_pcm_runtime *rtd; for_each_card_rtds(card, rtd) { struct snd_soc_dai_link *link = rtd->dai_link; if (link->id != id) continue; if (name && (!link->name || !strstr(link->name, name))) continue; if (stream_name && (!link->stream_name || !strstr(link->stream_name, stream_name))) continue; return link; } return NULL; } /* Find and configure an existing physical DAI link */ static int soc_tplg_link_config(struct soc_tplg *tplg, struct snd_soc_tplg_link_config *cfg) { struct snd_soc_dai_link *link; const char *name, *stream_name; size_t len; int ret; len = strnlen(cfg->name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN); if (len == SNDRV_CTL_ELEM_ID_NAME_MAXLEN) return -EINVAL; else if (len) name = cfg->name; else name = NULL; len = strnlen(cfg->stream_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN); if (len == SNDRV_CTL_ELEM_ID_NAME_MAXLEN) return -EINVAL; else if (len) stream_name = cfg->stream_name; else stream_name = NULL; link = snd_soc_find_dai_link(tplg->comp->card, le32_to_cpu(cfg->id), name, stream_name); if (!link) { dev_err(tplg->dev, "ASoC: physical link %s (id %d) not exist\n", name, cfg->id); return -EINVAL; } /* hw format */ if (cfg->num_hw_configs) set_link_hw_format(link, cfg); /* flags */ if (cfg->flag_mask) set_link_flags(link, le32_to_cpu(cfg->flag_mask), le32_to_cpu(cfg->flags)); /* pass control to component driver for optional further init */ ret = soc_tplg_dai_link_load(tplg, link, cfg); if (ret < 0) { dev_err(tplg->dev, "ASoC: physical link loading failed\n"); return ret; } /* for unloading it in snd_soc_tplg_component_remove */ link->dobj.index = tplg->index; link->dobj.type = SND_SOC_DOBJ_BACKEND_LINK; if (tplg->ops) link->dobj.unload = tplg->ops->link_unload; list_add(&link->dobj.list, &tplg->comp->dobj_list); return 0; } /* Load physical link config elements from the topology context */ static int soc_tplg_link_elems_load(struct soc_tplg *tplg, struct snd_soc_tplg_hdr *hdr) { struct snd_soc_tplg_link_config *link, *_link; int count; int size; int i, ret; bool abi_match; count = le32_to_cpu(hdr->count); /* check the element size and count */ link = (struct snd_soc_tplg_link_config *)tplg->pos; size = le32_to_cpu(link->size); if (size > sizeof(struct snd_soc_tplg_link_config) || size < sizeof(struct snd_soc_tplg_link_config_v4)) { dev_err(tplg->dev, "ASoC: invalid size %d for physical link elems\n", size); return -EINVAL; } if (soc_tplg_check_elem_count(tplg, size, count, le32_to_cpu(hdr->payload_size), "physical link config")) return -EINVAL; /* config physical DAI links */ for (i = 0; i < count; i++) { link = (struct snd_soc_tplg_link_config *)tplg->pos; size = le32_to_cpu(link->size); if (size == sizeof(*link)) { abi_match = true; _link = link; } else { abi_match = false; ret = link_new_ver(tplg, link, &_link); if (ret < 0) return ret; } ret = soc_tplg_link_config(tplg, _link); if (ret < 0) { if (!abi_match) kfree(_link); return ret; } /* offset by version-specific struct size and * real priv data size */ tplg->pos += size + le32_to_cpu(_link->priv.size); if (!abi_match) kfree(_link); /* free the duplicated one */ } return 0; } /** * soc_tplg_dai_config - Find and configure an existing physical DAI. * @tplg: topology context * @d: physical DAI configs. * * The physical dai should already be registered by the platform driver. * The platform driver should specify the DAI name and ID for matching. */ static int soc_tplg_dai_config(struct soc_tplg *tplg, struct snd_soc_tplg_dai *d) { struct snd_soc_dai_link_component dai_component; struct snd_soc_dai *dai; struct snd_soc_dai_driver *dai_drv; struct snd_soc_pcm_stream *stream; struct snd_soc_tplg_stream_caps *caps; int ret; memset(&dai_component, 0, sizeof(dai_component)); dai_component.dai_name = d->dai_name; dai = snd_soc_find_dai(&dai_component); if (!dai) { dev_err(tplg->dev, "ASoC: physical DAI %s not registered\n", d->dai_name); return -EINVAL; } if (le32_to_cpu(d->dai_id) != dai->id) { dev_err(tplg->dev, "ASoC: physical DAI %s id mismatch\n", d->dai_name); return -EINVAL; } dai_drv = dai->driver; if (!dai_drv) return -EINVAL; if (d->playback) { stream = &dai_drv->playback; caps = &d->caps[SND_SOC_TPLG_STREAM_PLAYBACK]; ret = set_stream_info(tplg, stream, caps); if (ret < 0) goto err; } if (d->capture) { stream = &dai_drv->capture; caps = &d->caps[SND_SOC_TPLG_STREAM_CAPTURE]; ret = set_stream_info(tplg, stream, caps); if (ret < 0) goto err; } if (d->flag_mask) set_dai_flags(dai_drv, le32_to_cpu(d->flag_mask), le32_to_cpu(d->flags)); /* pass control to component driver for optional further init */ ret = soc_tplg_dai_load(tplg, dai_drv, NULL, dai); if (ret < 0) { dev_err(tplg->dev, "ASoC: DAI loading failed\n"); goto err; } return 0; err: return ret; } /* load physical DAI elements */ static int soc_tplg_dai_elems_load(struct soc_tplg *tplg, struct snd_soc_tplg_hdr *hdr) { int count; int i; count = le32_to_cpu(hdr->count); /* config the existing BE DAIs */ for (i = 0; i < count; i++) { struct snd_soc_tplg_dai *dai = (struct snd_soc_tplg_dai *)tplg->pos; int ret; if (le32_to_cpu(dai->size) != sizeof(*dai)) { dev_err(tplg->dev, "ASoC: invalid physical DAI size\n"); return -EINVAL; } ret = soc_tplg_dai_config(tplg, dai); if (ret < 0) { dev_err(tplg->dev, "ASoC: failed to configure DAI\n"); return ret; } tplg->pos += (sizeof(*dai) + le32_to_cpu(dai->priv.size)); } dev_dbg(tplg->dev, "ASoC: Configure %d BE DAIs\n", count); return 0; } /** * manifest_new_ver - Create a new version of manifest from the old version * of source. * @tplg: topology context * @src: old version of manifest as a source * @manifest: latest version of manifest created from the source * * Support from version 4. Users need free the returned manifest manually. */ static int manifest_new_ver(struct soc_tplg *tplg, struct snd_soc_tplg_manifest *src, struct snd_soc_tplg_manifest **manifest) { struct snd_soc_tplg_manifest *dest; struct snd_soc_tplg_manifest_v4 *src_v4; int size; *manifest = NULL; size = le32_to_cpu(src->size); if (size != sizeof(*src_v4)) { dev_warn(tplg->dev, "ASoC: invalid manifest size %d\n", size); if (size) return -EINVAL; src->size = cpu_to_le32(sizeof(*src_v4)); } dev_warn(tplg->dev, "ASoC: old version of manifest\n"); src_v4 = (struct snd_soc_tplg_manifest_v4 *)src; dest = kzalloc(sizeof(*dest) + le32_to_cpu(src_v4->priv.size), GFP_KERNEL); if (!dest) return -ENOMEM; dest->size = cpu_to_le32(sizeof(*dest)); /* size of latest abi version */ dest->control_elems = src_v4->control_elems; dest->widget_elems = src_v4->widget_elems; dest->graph_elems = src_v4->graph_elems; dest->pcm_elems = src_v4->pcm_elems; dest->dai_link_elems = src_v4->dai_link_elems; dest->priv.size = src_v4->priv.size; if (dest->priv.size) memcpy(dest->priv.data, src_v4->priv.data, le32_to_cpu(src_v4->priv.size)); *manifest = dest; return 0; } static int soc_tplg_manifest_load(struct soc_tplg *tplg, struct snd_soc_tplg_hdr *hdr) { struct snd_soc_tplg_manifest *manifest, *_manifest; bool abi_match; int ret = 0; manifest = (struct snd_soc_tplg_manifest *)tplg->pos; /* check ABI version by size, create a new manifest if abi not match */ if (le32_to_cpu(manifest->size) == sizeof(*manifest)) { abi_match = true; _manifest = manifest; } else { abi_match = false; ret = manifest_new_ver(tplg, manifest, &_manifest); if (ret < 0) return ret; } /* pass control to component driver for optional further init */ if (tplg->ops && tplg->ops->manifest) ret = tplg->ops->manifest(tplg->comp, tplg->index, _manifest); if (!abi_match) /* free the duplicated one */ kfree(_manifest); return ret; } /* validate header magic, size and type */ static int soc_tplg_valid_header(struct soc_tplg *tplg, struct snd_soc_tplg_hdr *hdr) { if (le32_to_cpu(hdr->size) != sizeof(*hdr)) { dev_err(tplg->dev, "ASoC: invalid header size for type %d at offset 0x%lx size 0x%zx.\n", le32_to_cpu(hdr->type), soc_tplg_get_hdr_offset(tplg), tplg->fw->size); return -EINVAL; } if (soc_tplg_get_hdr_offset(tplg) + le32_to_cpu(hdr->payload_size) >= tplg->fw->size) { dev_err(tplg->dev, "ASoC: invalid header of type %d at offset %ld payload_size %d\n", le32_to_cpu(hdr->type), soc_tplg_get_hdr_offset(tplg), hdr->payload_size); return -EINVAL; } /* big endian firmware objects not supported atm */ if (le32_to_cpu(hdr->magic) == SOC_TPLG_MAGIC_BIG_ENDIAN) { dev_err(tplg->dev, "ASoC: pass %d big endian not supported header got %x at offset 0x%lx size 0x%zx.\n", tplg->pass, hdr->magic, soc_tplg_get_hdr_offset(tplg), tplg->fw->size); return -EINVAL; } if (le32_to_cpu(hdr->magic) != SND_SOC_TPLG_MAGIC) { dev_err(tplg->dev, "ASoC: pass %d does not have a valid header got %x at offset 0x%lx size 0x%zx.\n", tplg->pass, hdr->magic, soc_tplg_get_hdr_offset(tplg), tplg->fw->size); return -EINVAL; } /* Support ABI from version 4 */ if (le32_to_cpu(hdr->abi) > SND_SOC_TPLG_ABI_VERSION || le32_to_cpu(hdr->abi) < SND_SOC_TPLG_ABI_VERSION_MIN) { dev_err(tplg->dev, "ASoC: pass %d invalid ABI version got 0x%x need 0x%x at offset 0x%lx size 0x%zx.\n", tplg->pass, hdr->abi, SND_SOC_TPLG_ABI_VERSION, soc_tplg_get_hdr_offset(tplg), tplg->fw->size); return -EINVAL; } if (hdr->payload_size == 0) { dev_err(tplg->dev, "ASoC: header has 0 size at offset 0x%lx.\n", soc_tplg_get_hdr_offset(tplg)); return -EINVAL; } return 0; } /* check header type and call appropriate handler */ static int soc_tplg_load_header(struct soc_tplg *tplg, struct snd_soc_tplg_hdr *hdr) { int (*elem_load)(struct soc_tplg *tplg, struct snd_soc_tplg_hdr *hdr); unsigned int hdr_pass; tplg->pos = tplg->hdr_pos + sizeof(struct snd_soc_tplg_hdr); tplg->index = le32_to_cpu(hdr->index); switch (le32_to_cpu(hdr->type)) { case SND_SOC_TPLG_TYPE_MIXER: case SND_SOC_TPLG_TYPE_ENUM: case SND_SOC_TPLG_TYPE_BYTES: hdr_pass = SOC_TPLG_PASS_CONTROL; elem_load = soc_tplg_kcontrol_elems_load; break; case SND_SOC_TPLG_TYPE_DAPM_GRAPH: hdr_pass = SOC_TPLG_PASS_GRAPH; elem_load = soc_tplg_dapm_graph_elems_load; break; case SND_SOC_TPLG_TYPE_DAPM_WIDGET: hdr_pass = SOC_TPLG_PASS_WIDGET; elem_load = soc_tplg_dapm_widget_elems_load; break; case SND_SOC_TPLG_TYPE_PCM: hdr_pass = SOC_TPLG_PASS_PCM_DAI; elem_load = soc_tplg_pcm_elems_load; break; case SND_SOC_TPLG_TYPE_DAI: hdr_pass = SOC_TPLG_PASS_BE_DAI; elem_load = soc_tplg_dai_elems_load; break; case SND_SOC_TPLG_TYPE_DAI_LINK: case SND_SOC_TPLG_TYPE_BACKEND_LINK: /* physical link configurations */ hdr_pass = SOC_TPLG_PASS_LINK; elem_load = soc_tplg_link_elems_load; break; case SND_SOC_TPLG_TYPE_MANIFEST: hdr_pass = SOC_TPLG_PASS_MANIFEST; elem_load = soc_tplg_manifest_load; break; default: /* bespoke vendor data object */ hdr_pass = SOC_TPLG_PASS_VENDOR; elem_load = soc_tplg_vendor_load; break; } if (tplg->pass == hdr_pass) { dev_dbg(tplg->dev, "ASoC: Got 0x%x bytes of type %d version %d vendor %d at pass %d\n", hdr->payload_size, hdr->type, hdr->version, hdr->vendor_type, tplg->pass); return elem_load(tplg, hdr); } return 0; } /* process the topology file headers */ static int soc_tplg_process_headers(struct soc_tplg *tplg) { int ret; /* process the header types from start to end */ for (tplg->pass = SOC_TPLG_PASS_START; tplg->pass <= SOC_TPLG_PASS_END; tplg->pass++) { struct snd_soc_tplg_hdr *hdr; tplg->hdr_pos = tplg->fw->data; hdr = (struct snd_soc_tplg_hdr *)tplg->hdr_pos; while (!soc_tplg_is_eof(tplg)) { /* make sure header is valid before loading */ ret = soc_tplg_valid_header(tplg, hdr); if (ret < 0) return ret; /* load the header object */ ret = soc_tplg_load_header(tplg, hdr); if (ret < 0) { if (ret != -EPROBE_DEFER) { dev_err(tplg->dev, "ASoC: topology: could not load header: %d\n", ret); } return ret; } /* goto next header */ tplg->hdr_pos += le32_to_cpu(hdr->payload_size) + sizeof(struct snd_soc_tplg_hdr); hdr = (struct snd_soc_tplg_hdr *)tplg->hdr_pos; } } /* signal DAPM we are complete */ ret = soc_tplg_dapm_complete(tplg); return ret; } static int soc_tplg_load(struct soc_tplg *tplg) { int ret; ret = soc_tplg_process_headers(tplg); if (ret == 0) return soc_tplg_complete(tplg); return ret; } /* load audio component topology from "firmware" file */ int snd_soc_tplg_component_load(struct snd_soc_component *comp, struct snd_soc_tplg_ops *ops, const struct firmware *fw) { struct soc_tplg tplg; int ret; /* * check if we have sane parameters: * comp - needs to exist to keep and reference data while parsing * comp->card - used for setting card related parameters * comp->card->dev - used for resource management and prints * fw - we need it, as it is the very thing we parse */ if (!comp || !comp->card || !comp->card->dev || !fw) return -EINVAL; /* setup parsing context */ memset(&tplg, 0, sizeof(tplg)); tplg.fw = fw; tplg.dev = comp->card->dev; tplg.comp = comp; if (ops) { tplg.ops = ops; tplg.io_ops = ops->io_ops; tplg.io_ops_count = ops->io_ops_count; tplg.bytes_ext_ops = ops->bytes_ext_ops; tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count; } ret = soc_tplg_load(&tplg); /* free the created components if fail to load topology */ if (ret) snd_soc_tplg_component_remove(comp); return ret; } EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load); /* remove dynamic controls from the component driver */ int snd_soc_tplg_component_remove(struct snd_soc_component *comp) { struct snd_soc_dobj *dobj, *next_dobj; int pass; /* process the header types from end to start */ for (pass = SOC_TPLG_PASS_END; pass >= SOC_TPLG_PASS_START; pass--) { /* remove mixer controls */ list_for_each_entry_safe(dobj, next_dobj, &comp->dobj_list, list) { switch (dobj->type) { case SND_SOC_DOBJ_BYTES: case SND_SOC_DOBJ_ENUM: case SND_SOC_DOBJ_MIXER: soc_tplg_remove_kcontrol(comp, dobj, pass); break; case SND_SOC_DOBJ_GRAPH: soc_tplg_remove_route(comp, dobj, pass); break; case SND_SOC_DOBJ_WIDGET: soc_tplg_remove_widget(comp, dobj, pass); break; case SND_SOC_DOBJ_PCM: soc_tplg_remove_dai(comp, dobj, pass); break; case SND_SOC_DOBJ_DAI_LINK: soc_tplg_remove_link(comp, dobj, pass); break; case SND_SOC_DOBJ_BACKEND_LINK: /* * call link_unload ops if extra * deinitialization is needed. */ remove_backend_link(comp, dobj, pass); break; default: dev_err(comp->dev, "ASoC: invalid component type %d for removal\n", dobj->type); break; } } } /* let caller know if FW can be freed when no objects are left */ return !list_empty(&comp->dobj_list); } EXPORT_SYMBOL_GPL(snd_soc_tplg_component_remove);
linux-master
sound/soc/soc-topology.c
// SPDX-License-Identifier: GPL-2.0+ // // soc-pcm.c -- ALSA SoC PCM // // Copyright 2005 Wolfson Microelectronics PLC. // Copyright 2005 Openedhand Ltd. // Copyright (C) 2010 Slimlogic Ltd. // Copyright (C) 2010 Texas Instruments Inc. // // Authors: Liam Girdwood <[email protected]> // Mark Brown <[email protected]> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pinctrl/consumer.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/export.h> #include <linux/debugfs.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dpcm.h> #include <sound/soc-link.h> #include <sound/initval.h> #define soc_pcm_ret(rtd, ret) _soc_pcm_ret(rtd, __func__, ret) static inline int _soc_pcm_ret(struct snd_soc_pcm_runtime *rtd, const char *func, int ret) { /* Positive, Zero values are not errors */ if (ret >= 0) return ret; /* Negative values might be errors */ switch (ret) { case -EPROBE_DEFER: case -ENOTSUPP: case -EINVAL: break; default: dev_err(rtd->dev, "ASoC: error at %s on %s: %d\n", func, rtd->dai_link->name, ret); } return ret; } static inline void snd_soc_dpcm_stream_lock_irq(struct snd_soc_pcm_runtime *rtd, int stream) { snd_pcm_stream_lock_irq(snd_soc_dpcm_get_substream(rtd, stream)); } #define snd_soc_dpcm_stream_lock_irqsave_nested(rtd, stream, flags) \ snd_pcm_stream_lock_irqsave_nested(snd_soc_dpcm_get_substream(rtd, stream), flags) static inline void snd_soc_dpcm_stream_unlock_irq(struct snd_soc_pcm_runtime *rtd, int stream) { snd_pcm_stream_unlock_irq(snd_soc_dpcm_get_substream(rtd, stream)); } #define snd_soc_dpcm_stream_unlock_irqrestore(rtd, stream, flags) \ snd_pcm_stream_unlock_irqrestore(snd_soc_dpcm_get_substream(rtd, stream), flags) #define DPCM_MAX_BE_USERS 8 static inline const char *soc_cpu_dai_name(struct snd_soc_pcm_runtime *rtd) { return (rtd)->dai_link->num_cpus == 1 ? asoc_rtd_to_cpu(rtd, 0)->name : "multicpu"; } static inline const char *soc_codec_dai_name(struct snd_soc_pcm_runtime *rtd) { return (rtd)->dai_link->num_codecs == 1 ? asoc_rtd_to_codec(rtd, 0)->name : "multicodec"; } #ifdef CONFIG_DEBUG_FS static const char *dpcm_state_string(enum snd_soc_dpcm_state state) { switch (state) { case SND_SOC_DPCM_STATE_NEW: return "new"; case SND_SOC_DPCM_STATE_OPEN: return "open"; case SND_SOC_DPCM_STATE_HW_PARAMS: return "hw_params"; case SND_SOC_DPCM_STATE_PREPARE: return "prepare"; case SND_SOC_DPCM_STATE_START: return "start"; case SND_SOC_DPCM_STATE_STOP: return "stop"; case SND_SOC_DPCM_STATE_SUSPEND: return "suspend"; case SND_SOC_DPCM_STATE_PAUSED: return "paused"; case SND_SOC_DPCM_STATE_HW_FREE: return "hw_free"; case SND_SOC_DPCM_STATE_CLOSE: return "close"; } return "unknown"; } static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe, int stream, char *buf, size_t size) { struct snd_pcm_hw_params *params = &fe->dpcm[stream].hw_params; struct snd_soc_dpcm *dpcm; ssize_t offset = 0; /* FE state */ offset += scnprintf(buf + offset, size - offset, "[%s - %s]\n", fe->dai_link->name, stream ? "Capture" : "Playback"); offset += scnprintf(buf + offset, size - offset, "State: %s\n", dpcm_state_string(fe->dpcm[stream].state)); if ((fe->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) && (fe->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP)) offset += scnprintf(buf + offset, size - offset, "Hardware Params: " "Format = %s, Channels = %d, Rate = %d\n", snd_pcm_format_name(params_format(params)), params_channels(params), params_rate(params)); /* BEs state */ offset += scnprintf(buf + offset, size - offset, "Backends:\n"); if (list_empty(&fe->dpcm[stream].be_clients)) { offset += scnprintf(buf + offset, size - offset, " No active DSP links\n"); goto out; } for_each_dpcm_be(fe, stream, dpcm) { struct snd_soc_pcm_runtime *be = dpcm->be; params = &be->dpcm[stream].hw_params; offset += scnprintf(buf + offset, size - offset, "- %s\n", be->dai_link->name); offset += scnprintf(buf + offset, size - offset, " State: %s\n", dpcm_state_string(be->dpcm[stream].state)); if ((be->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) && (be->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP)) offset += scnprintf(buf + offset, size - offset, " Hardware Params: " "Format = %s, Channels = %d, Rate = %d\n", snd_pcm_format_name(params_format(params)), params_channels(params), params_rate(params)); } out: return offset; } static ssize_t dpcm_state_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct snd_soc_pcm_runtime *fe = file->private_data; ssize_t out_count = PAGE_SIZE, offset = 0, ret = 0; int stream; char *buf; if (fe->dai_link->num_cpus > 1) { dev_err(fe->dev, "%s doesn't support Multi CPU yet\n", __func__); return -EINVAL; } buf = kmalloc(out_count, GFP_KERNEL); if (!buf) return -ENOMEM; snd_soc_dpcm_mutex_lock(fe); for_each_pcm_streams(stream) if (snd_soc_dai_stream_valid(asoc_rtd_to_cpu(fe, 0), stream)) offset += dpcm_show_state(fe, stream, buf + offset, out_count - offset); snd_soc_dpcm_mutex_unlock(fe); ret = simple_read_from_buffer(user_buf, count, ppos, buf, offset); kfree(buf); return ret; } static const struct file_operations dpcm_state_fops = { .open = simple_open, .read = dpcm_state_read_file, .llseek = default_llseek, }; void soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd) { if (!rtd->dai_link->dynamic) return; if (!rtd->card->debugfs_card_root) return; rtd->debugfs_dpcm_root = debugfs_create_dir(rtd->dai_link->name, rtd->card->debugfs_card_root); debugfs_create_file("state", 0444, rtd->debugfs_dpcm_root, rtd, &dpcm_state_fops); } static void dpcm_create_debugfs_state(struct snd_soc_dpcm *dpcm, int stream) { char *name; name = kasprintf(GFP_KERNEL, "%s:%s", dpcm->be->dai_link->name, stream ? "capture" : "playback"); if (name) { dpcm->debugfs_state = debugfs_create_dir( name, dpcm->fe->debugfs_dpcm_root); debugfs_create_u32("state", 0644, dpcm->debugfs_state, &dpcm->state); kfree(name); } } static void dpcm_remove_debugfs_state(struct snd_soc_dpcm *dpcm) { debugfs_remove_recursive(dpcm->debugfs_state); } #else static inline void dpcm_create_debugfs_state(struct snd_soc_dpcm *dpcm, int stream) { } static inline void dpcm_remove_debugfs_state(struct snd_soc_dpcm *dpcm) { } #endif /* Set FE's runtime_update state; the state is protected via PCM stream lock * for avoiding the race with trigger callback. * If the state is unset and a trigger is pending while the previous operation, * process the pending trigger action here. */ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd); static void dpcm_set_fe_update_state(struct snd_soc_pcm_runtime *fe, int stream, enum snd_soc_dpcm_update state) { struct snd_pcm_substream *substream = snd_soc_dpcm_get_substream(fe, stream); snd_soc_dpcm_stream_lock_irq(fe, stream); if (state == SND_SOC_DPCM_UPDATE_NO && fe->dpcm[stream].trigger_pending) { dpcm_fe_dai_do_trigger(substream, fe->dpcm[stream].trigger_pending - 1); fe->dpcm[stream].trigger_pending = 0; } fe->dpcm[stream].runtime_update = state; snd_soc_dpcm_stream_unlock_irq(fe, stream); } static void dpcm_set_be_update_state(struct snd_soc_pcm_runtime *be, int stream, enum snd_soc_dpcm_update state) { be->dpcm[stream].runtime_update = state; } /** * snd_soc_runtime_action() - Increment/Decrement active count for * PCM runtime components * @rtd: ASoC PCM runtime that is activated * @stream: Direction of the PCM stream * @action: Activate stream if 1. Deactivate if -1. * * Increments/Decrements the active count for all the DAIs and components * attached to a PCM runtime. * Should typically be called when a stream is opened. * * Must be called with the rtd->card->pcm_mutex being held */ void snd_soc_runtime_action(struct snd_soc_pcm_runtime *rtd, int stream, int action) { struct snd_soc_dai *dai; int i; snd_soc_dpcm_mutex_assert_held(rtd); for_each_rtd_dais(rtd, i, dai) snd_soc_dai_action(dai, stream, action); } EXPORT_SYMBOL_GPL(snd_soc_runtime_action); /** * snd_soc_runtime_ignore_pmdown_time() - Check whether to ignore the power down delay * @rtd: The ASoC PCM runtime that should be checked. * * This function checks whether the power down delay should be ignored for a * specific PCM runtime. Returns true if the delay is 0, if it the DAI link has * been configured to ignore the delay, or if none of the components benefits * from having the delay. */ bool snd_soc_runtime_ignore_pmdown_time(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_component *component; bool ignore = true; int i; if (!rtd->pmdown_time || rtd->dai_link->ignore_pmdown_time) return true; for_each_rtd_components(rtd, i, component) ignore &= !component->driver->use_pmdown_time; return ignore; } /** * snd_soc_set_runtime_hwparams - set the runtime hardware parameters * @substream: the pcm substream * @hw: the hardware parameters * * Sets the substream runtime hardware parameters. */ int snd_soc_set_runtime_hwparams(struct snd_pcm_substream *substream, const struct snd_pcm_hardware *hw) { substream->runtime->hw = *hw; return 0; } EXPORT_SYMBOL_GPL(snd_soc_set_runtime_hwparams); /* DPCM stream event, send event to FE and all active BEs. */ int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir, int event) { struct snd_soc_dpcm *dpcm; snd_soc_dpcm_mutex_assert_held(fe); for_each_dpcm_be(fe, dir, dpcm) { struct snd_soc_pcm_runtime *be = dpcm->be; dev_dbg(be->dev, "ASoC: BE %s event %d dir %d\n", be->dai_link->name, event, dir); if ((event == SND_SOC_DAPM_STREAM_STOP) && (be->dpcm[dir].users >= 1)) continue; snd_soc_dapm_stream_event(be, dir, event); } snd_soc_dapm_stream_event(fe, dir, event); return 0; } static void soc_pcm_set_dai_params(struct snd_soc_dai *dai, struct snd_pcm_hw_params *params) { if (params) { dai->rate = params_rate(params); dai->channels = params_channels(params); dai->sample_bits = snd_pcm_format_physical_width(params_format(params)); } else { dai->rate = 0; dai->channels = 0; dai->sample_bits = 0; } } static int soc_pcm_apply_symmetry(struct snd_pcm_substream *substream, struct snd_soc_dai *soc_dai) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret; if (!snd_soc_dai_active(soc_dai)) return 0; #define __soc_pcm_apply_symmetry(name, NAME) \ if (soc_dai->name && (soc_dai->driver->symmetric_##name || \ rtd->dai_link->symmetric_##name)) { \ dev_dbg(soc_dai->dev, "ASoC: Symmetry forces %s to %d\n",\ #name, soc_dai->name); \ \ ret = snd_pcm_hw_constraint_single(substream->runtime, \ SNDRV_PCM_HW_PARAM_##NAME,\ soc_dai->name); \ if (ret < 0) { \ dev_err(soc_dai->dev, \ "ASoC: Unable to apply %s constraint: %d\n",\ #name, ret); \ return ret; \ } \ } __soc_pcm_apply_symmetry(rate, RATE); __soc_pcm_apply_symmetry(channels, CHANNELS); __soc_pcm_apply_symmetry(sample_bits, SAMPLE_BITS); return 0; } static int soc_pcm_params_symmetry(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai d; struct snd_soc_dai *dai; struct snd_soc_dai *cpu_dai; unsigned int symmetry, i; d.name = __func__; soc_pcm_set_dai_params(&d, params); #define __soc_pcm_params_symmetry(xxx) \ symmetry = rtd->dai_link->symmetric_##xxx; \ for_each_rtd_dais(rtd, i, dai) \ symmetry |= dai->driver->symmetric_##xxx; \ \ if (symmetry) \ for_each_rtd_cpu_dais(rtd, i, cpu_dai) \ if (!snd_soc_dai_is_dummy(cpu_dai) && \ cpu_dai->xxx && cpu_dai->xxx != d.xxx) { \ dev_err(rtd->dev, "ASoC: unmatched %s symmetry: %s:%d - %s:%d\n", \ #xxx, cpu_dai->name, cpu_dai->xxx, d.name, d.xxx); \ return -EINVAL; \ } /* reject unmatched parameters when applying symmetry */ __soc_pcm_params_symmetry(rate); __soc_pcm_params_symmetry(channels); __soc_pcm_params_symmetry(sample_bits); return 0; } static void soc_pcm_update_symmetry(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai_link *link = rtd->dai_link; struct snd_soc_dai *dai; unsigned int symmetry, i; symmetry = link->symmetric_rate || link->symmetric_channels || link->symmetric_sample_bits; for_each_rtd_dais(rtd, i, dai) symmetry = symmetry || dai->driver->symmetric_rate || dai->driver->symmetric_channels || dai->driver->symmetric_sample_bits; if (symmetry) substream->runtime->hw.info |= SNDRV_PCM_INFO_JOINT_DUPLEX; } static void soc_pcm_set_msb(struct snd_pcm_substream *substream, int bits) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret; if (!bits) return; ret = snd_pcm_hw_constraint_msbits(substream->runtime, 0, 0, bits); if (ret != 0) dev_warn(rtd->dev, "ASoC: Failed to set MSB %d: %d\n", bits, ret); } static void soc_pcm_apply_msb(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai *cpu_dai; struct snd_soc_dai *codec_dai; int stream = substream->stream; int i; unsigned int bits = 0, cpu_bits = 0; for_each_rtd_codec_dais(rtd, i, codec_dai) { struct snd_soc_pcm_stream *pcm_codec = snd_soc_dai_get_pcm_stream(codec_dai, stream); if (pcm_codec->sig_bits == 0) { bits = 0; break; } bits = max(pcm_codec->sig_bits, bits); } for_each_rtd_cpu_dais(rtd, i, cpu_dai) { struct snd_soc_pcm_stream *pcm_cpu = snd_soc_dai_get_pcm_stream(cpu_dai, stream); if (pcm_cpu->sig_bits == 0) { cpu_bits = 0; break; } cpu_bits = max(pcm_cpu->sig_bits, cpu_bits); } soc_pcm_set_msb(substream, bits); soc_pcm_set_msb(substream, cpu_bits); } static void soc_pcm_hw_init(struct snd_pcm_hardware *hw) { hw->rates = UINT_MAX; hw->rate_min = 0; hw->rate_max = UINT_MAX; hw->channels_min = 0; hw->channels_max = UINT_MAX; hw->formats = ULLONG_MAX; } static void soc_pcm_hw_update_rate(struct snd_pcm_hardware *hw, struct snd_soc_pcm_stream *p) { hw->rates = snd_pcm_rate_mask_intersect(hw->rates, p->rates); /* setup hw->rate_min/max via hw->rates first */ snd_pcm_hw_limit_rates(hw); /* update hw->rate_min/max by snd_soc_pcm_stream */ hw->rate_min = max(hw->rate_min, p->rate_min); hw->rate_max = min_not_zero(hw->rate_max, p->rate_max); } static void soc_pcm_hw_update_chan(struct snd_pcm_hardware *hw, struct snd_soc_pcm_stream *p) { hw->channels_min = max(hw->channels_min, p->channels_min); hw->channels_max = min(hw->channels_max, p->channels_max); } static void soc_pcm_hw_update_format(struct snd_pcm_hardware *hw, struct snd_soc_pcm_stream *p) { hw->formats &= p->formats; } /** * snd_soc_runtime_calc_hw() - Calculate hw limits for a PCM stream * @rtd: ASoC PCM runtime * @hw: PCM hardware parameters (output) * @stream: Direction of the PCM stream * * Calculates the subset of stream parameters supported by all DAIs * associated with the PCM stream. */ int snd_soc_runtime_calc_hw(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hardware *hw, int stream) { struct snd_soc_dai *codec_dai; struct snd_soc_dai *cpu_dai; struct snd_soc_pcm_stream *codec_stream; struct snd_soc_pcm_stream *cpu_stream; unsigned int cpu_chan_min = 0, cpu_chan_max = UINT_MAX; int i; soc_pcm_hw_init(hw); /* first calculate min/max only for CPUs in the DAI link */ for_each_rtd_cpu_dais(rtd, i, cpu_dai) { /* * Skip CPUs which don't support the current stream type. * Otherwise, since the rate, channel, and format values will * zero in that case, we would have no usable settings left, * causing the resulting setup to fail. */ if (!snd_soc_dai_stream_valid(cpu_dai, stream)) continue; cpu_stream = snd_soc_dai_get_pcm_stream(cpu_dai, stream); soc_pcm_hw_update_chan(hw, cpu_stream); soc_pcm_hw_update_rate(hw, cpu_stream); soc_pcm_hw_update_format(hw, cpu_stream); } cpu_chan_min = hw->channels_min; cpu_chan_max = hw->channels_max; /* second calculate min/max only for CODECs in the DAI link */ for_each_rtd_codec_dais(rtd, i, codec_dai) { /* * Skip CODECs which don't support the current stream type. * Otherwise, since the rate, channel, and format values will * zero in that case, we would have no usable settings left, * causing the resulting setup to fail. */ if (!snd_soc_dai_stream_valid(codec_dai, stream)) continue; codec_stream = snd_soc_dai_get_pcm_stream(codec_dai, stream); soc_pcm_hw_update_chan(hw, codec_stream); soc_pcm_hw_update_rate(hw, codec_stream); soc_pcm_hw_update_format(hw, codec_stream); } /* Verify both a valid CPU DAI and a valid CODEC DAI were found */ if (!hw->channels_min) return -EINVAL; /* * chan min/max cannot be enforced if there are multiple CODEC DAIs * connected to CPU DAI(s), use CPU DAI's directly and let * channel allocation be fixed up later */ if (rtd->dai_link->num_codecs > 1) { hw->channels_min = cpu_chan_min; hw->channels_max = cpu_chan_max; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_runtime_calc_hw); static void soc_pcm_init_runtime_hw(struct snd_pcm_substream *substream) { struct snd_pcm_hardware *hw = &substream->runtime->hw; struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); u64 formats = hw->formats; /* * At least one CPU and one CODEC should match. Otherwise, we should * have bailed out on a higher level, since there would be no CPU or * CODEC to support the transfer direction in that case. */ snd_soc_runtime_calc_hw(rtd, hw, substream->stream); if (formats) hw->formats &= formats; } static int soc_pcm_components_open(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i, ret = 0; for_each_rtd_components(rtd, i, component) { ret = snd_soc_component_module_get_when_open(component, substream); if (ret < 0) break; ret = snd_soc_component_open(component, substream); if (ret < 0) break; } return ret; } static int soc_pcm_components_close(struct snd_pcm_substream *substream, int rollback) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int i, ret = 0; for_each_rtd_components(rtd, i, component) { int r = snd_soc_component_close(component, substream, rollback); if (r < 0) ret = r; /* use last ret */ snd_soc_component_module_put_when_close(component, substream, rollback); } return ret; } static int soc_pcm_clean(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_substream *substream, int rollback) { struct snd_soc_component *component; struct snd_soc_dai *dai; int i; snd_soc_dpcm_mutex_assert_held(rtd); if (!rollback) { snd_soc_runtime_deactivate(rtd, substream->stream); /* clear the corresponding DAIs parameters when going to be inactive */ for_each_rtd_dais(rtd, i, dai) { if (snd_soc_dai_active(dai) == 0) soc_pcm_set_dai_params(dai, NULL); if (snd_soc_dai_stream_active(dai, substream->stream) == 0) snd_soc_dai_digital_mute(dai, 1, substream->stream); } } for_each_rtd_dais(rtd, i, dai) snd_soc_dai_shutdown(dai, substream, rollback); snd_soc_link_shutdown(substream, rollback); soc_pcm_components_close(substream, rollback); snd_soc_pcm_component_pm_runtime_put(rtd, substream, rollback); for_each_rtd_components(rtd, i, component) if (!snd_soc_component_active(component)) pinctrl_pm_select_sleep_state(component->dev); return 0; } /* * Called by ALSA when a PCM substream is closed. Private data can be * freed here. The cpu DAI, codec DAI, machine and components are also * shutdown. */ static int __soc_pcm_close(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_substream *substream) { return soc_pcm_clean(rtd, substream, 0); } /* PCM close ops for non-DPCM streams */ static int soc_pcm_close(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); snd_soc_dpcm_mutex_lock(rtd); __soc_pcm_close(rtd, substream); snd_soc_dpcm_mutex_unlock(rtd); return 0; } static int soc_hw_sanity_check(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_pcm_hardware *hw = &substream->runtime->hw; const char *name_cpu = soc_cpu_dai_name(rtd); const char *name_codec = soc_codec_dai_name(rtd); const char *err_msg; struct device *dev = rtd->dev; err_msg = "rates"; if (!hw->rates) goto config_err; err_msg = "formats"; if (!hw->formats) goto config_err; err_msg = "channels"; if (!hw->channels_min || !hw->channels_max || hw->channels_min > hw->channels_max) goto config_err; dev_dbg(dev, "ASoC: %s <-> %s info:\n", name_codec, name_cpu); dev_dbg(dev, "ASoC: rate mask 0x%x\n", hw->rates); dev_dbg(dev, "ASoC: ch min %d max %d\n", hw->channels_min, hw->channels_max); dev_dbg(dev, "ASoC: rate min %d max %d\n", hw->rate_min, hw->rate_max); return 0; config_err: dev_err(dev, "ASoC: %s <-> %s No matching %s\n", name_codec, name_cpu, err_msg); return -EINVAL; } /* * Called by ALSA when a PCM substream is opened, the runtime->hw record is * then initialized and any private data can be allocated. This also calls * startup for the cpu DAI, component, machine and codec DAI. */ static int __soc_pcm_open(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_substream *substream) { struct snd_soc_component *component; struct snd_soc_dai *dai; int i, ret = 0; snd_soc_dpcm_mutex_assert_held(rtd); for_each_rtd_components(rtd, i, component) pinctrl_pm_select_default_state(component->dev); ret = snd_soc_pcm_component_pm_runtime_get(rtd, substream); if (ret < 0) goto err; ret = soc_pcm_components_open(substream); if (ret < 0) goto err; ret = snd_soc_link_startup(substream); if (ret < 0) goto err; /* startup the audio subsystem */ for_each_rtd_dais(rtd, i, dai) { ret = snd_soc_dai_startup(dai, substream); if (ret < 0) goto err; } /* Dynamic PCM DAI links compat checks use dynamic capabilities */ if (rtd->dai_link->dynamic || rtd->dai_link->no_pcm) goto dynamic; /* Check that the codec and cpu DAIs are compatible */ soc_pcm_init_runtime_hw(substream); soc_pcm_update_symmetry(substream); ret = soc_hw_sanity_check(substream); if (ret < 0) goto err; soc_pcm_apply_msb(substream); /* Symmetry only applies if we've already got an active stream. */ for_each_rtd_dais(rtd, i, dai) { ret = soc_pcm_apply_symmetry(substream, dai); if (ret != 0) goto err; } dynamic: snd_soc_runtime_activate(rtd, substream->stream); ret = 0; err: if (ret < 0) soc_pcm_clean(rtd, substream, 1); return soc_pcm_ret(rtd, ret); } /* PCM open ops for non-DPCM streams */ static int soc_pcm_open(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret; snd_soc_dpcm_mutex_lock(rtd); ret = __soc_pcm_open(rtd, substream); snd_soc_dpcm_mutex_unlock(rtd); return ret; } /* * Called by ALSA when the PCM substream is prepared, can set format, sample * rate, etc. This function is non atomic and can be called multiple times, * it can refer to the runtime info. */ static int __soc_pcm_prepare(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_substream *substream) { struct snd_soc_dai *dai; int i, ret = 0; snd_soc_dpcm_mutex_assert_held(rtd); ret = snd_soc_link_prepare(substream); if (ret < 0) goto out; ret = snd_soc_pcm_component_prepare(substream); if (ret < 0) goto out; ret = snd_soc_pcm_dai_prepare(substream); if (ret < 0) goto out; /* cancel any delayed stream shutdown that is pending */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && rtd->pop_wait) { rtd->pop_wait = 0; cancel_delayed_work(&rtd->delayed_work); } snd_soc_dapm_stream_event(rtd, substream->stream, SND_SOC_DAPM_STREAM_START); for_each_rtd_dais(rtd, i, dai) snd_soc_dai_digital_mute(dai, 0, substream->stream); out: return soc_pcm_ret(rtd, ret); } /* PCM prepare ops for non-DPCM streams */ static int soc_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret; snd_soc_dpcm_mutex_lock(rtd); ret = __soc_pcm_prepare(rtd, substream); snd_soc_dpcm_mutex_unlock(rtd); return ret; } static void soc_pcm_codec_params_fixup(struct snd_pcm_hw_params *params, unsigned int mask) { struct snd_interval *interval; int channels = hweight_long(mask); interval = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); interval->min = channels; interval->max = channels; } static int soc_pcm_hw_clean(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_substream *substream, int rollback) { struct snd_soc_dai *dai; int i; snd_soc_dpcm_mutex_assert_held(rtd); /* run the stream event */ snd_soc_dapm_stream_stop(rtd, substream->stream); /* free any machine hw params */ snd_soc_link_hw_free(substream, rollback); /* free any component resources */ snd_soc_pcm_component_hw_free(substream, rollback); /* now free hw params for the DAIs */ for_each_rtd_dais(rtd, i, dai) if (snd_soc_dai_stream_valid(dai, substream->stream)) snd_soc_dai_hw_free(dai, substream, rollback); return 0; } /* * Frees resources allocated by hw_params, can be called multiple times */ static int __soc_pcm_hw_free(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_substream *substream) { return soc_pcm_hw_clean(rtd, substream, 0); } /* hw_free PCM ops for non-DPCM streams */ static int soc_pcm_hw_free(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret; snd_soc_dpcm_mutex_lock(rtd); ret = __soc_pcm_hw_free(rtd, substream); snd_soc_dpcm_mutex_unlock(rtd); return ret; } /* * Called by ALSA when the hardware params are set by application. This * function can also be called multiple times and can allocate buffers * (using snd_pcm_lib_* ). It's non-atomic. */ static int __soc_pcm_hw_params(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_dai *cpu_dai; struct snd_soc_dai *codec_dai; struct snd_pcm_hw_params tmp_params; int i, ret = 0; snd_soc_dpcm_mutex_assert_held(rtd); ret = soc_pcm_params_symmetry(substream, params); if (ret) goto out; ret = snd_soc_link_hw_params(substream, params); if (ret < 0) goto out; for_each_rtd_codec_dais(rtd, i, codec_dai) { unsigned int tdm_mask = snd_soc_dai_tdm_mask_get(codec_dai, substream->stream); /* * Skip CODECs which don't support the current stream type, * the idea being that if a CODEC is not used for the currently * set up transfer direction, it should not need to be * configured, especially since the configuration used might * not even be supported by that CODEC. There may be cases * however where a CODEC needs to be set up although it is * actually not being used for the transfer, e.g. if a * capture-only CODEC is acting as an LRCLK and/or BCLK master * for the DAI link including a playback-only CODEC. * If this becomes necessary, we will have to augment the * machine driver setup with information on how to act, so * we can do the right thing here. */ if (!snd_soc_dai_stream_valid(codec_dai, substream->stream)) continue; /* copy params for each codec */ tmp_params = *params; /* fixup params based on TDM slot masks */ if (tdm_mask) soc_pcm_codec_params_fixup(&tmp_params, tdm_mask); ret = snd_soc_dai_hw_params(codec_dai, substream, &tmp_params); if(ret < 0) goto out; soc_pcm_set_dai_params(codec_dai, &tmp_params); snd_soc_dapm_update_dai(substream, &tmp_params, codec_dai); } for_each_rtd_cpu_dais(rtd, i, cpu_dai) { unsigned int ch_mask = 0; int j; /* * Skip CPUs which don't support the current stream * type. See soc_pcm_init_runtime_hw() for more details */ if (!snd_soc_dai_stream_valid(cpu_dai, substream->stream)) continue; /* copy params for each cpu */ tmp_params = *params; if (!rtd->dai_link->codec_ch_maps) goto hw_params; /* * construct cpu channel mask by combining ch_mask of each * codec which maps to the cpu. */ for_each_rtd_codec_dais(rtd, j, codec_dai) { if (rtd->dai_link->codec_ch_maps[j].connected_cpu_id == i) ch_mask |= rtd->dai_link->codec_ch_maps[j].ch_mask; } /* fixup cpu channel number */ if (ch_mask) soc_pcm_codec_params_fixup(&tmp_params, ch_mask); hw_params: ret = snd_soc_dai_hw_params(cpu_dai, substream, &tmp_params); if (ret < 0) goto out; /* store the parameters for each DAI */ soc_pcm_set_dai_params(cpu_dai, &tmp_params); snd_soc_dapm_update_dai(substream, &tmp_params, cpu_dai); } ret = snd_soc_pcm_component_hw_params(substream, params); out: if (ret < 0) soc_pcm_hw_clean(rtd, substream, 1); return soc_pcm_ret(rtd, ret); } /* hw_params PCM ops for non-DPCM streams */ static int soc_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret; snd_soc_dpcm_mutex_lock(rtd); ret = __soc_pcm_hw_params(rtd, substream, params); snd_soc_dpcm_mutex_unlock(rtd); return ret; } #define TRIGGER_MAX 3 static int (* const trigger[][TRIGGER_MAX])(struct snd_pcm_substream *substream, int cmd, int rollback) = { [SND_SOC_TRIGGER_ORDER_DEFAULT] = { snd_soc_link_trigger, snd_soc_pcm_component_trigger, snd_soc_pcm_dai_trigger, }, [SND_SOC_TRIGGER_ORDER_LDC] = { snd_soc_link_trigger, snd_soc_pcm_dai_trigger, snd_soc_pcm_component_trigger, }, }; static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component; int ret = 0, r = 0, i; int rollback = 0; int start = 0, stop = 0; /* * select START/STOP sequence */ for_each_rtd_components(rtd, i, component) { if (component->driver->trigger_start) start = component->driver->trigger_start; if (component->driver->trigger_stop) stop = component->driver->trigger_stop; } if (rtd->dai_link->trigger_start) start = rtd->dai_link->trigger_start; if (rtd->dai_link->trigger_stop) stop = rtd->dai_link->trigger_stop; if (start < 0 || start >= SND_SOC_TRIGGER_ORDER_MAX || stop < 0 || stop >= SND_SOC_TRIGGER_ORDER_MAX) return -EINVAL; /* * START */ switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: for (i = 0; i < TRIGGER_MAX; i++) { r = trigger[start][i](substream, cmd, 0); if (r < 0) break; } } /* * Rollback if START failed * find correspond STOP command */ if (r < 0) { rollback = 1; ret = r; switch (cmd) { case SNDRV_PCM_TRIGGER_START: cmd = SNDRV_PCM_TRIGGER_STOP; break; case SNDRV_PCM_TRIGGER_RESUME: cmd = SNDRV_PCM_TRIGGER_SUSPEND; break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: cmd = SNDRV_PCM_TRIGGER_PAUSE_PUSH; break; } } /* * STOP */ switch (cmd) { case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: for (i = TRIGGER_MAX; i > 0; i--) { r = trigger[stop][i - 1](substream, cmd, rollback); if (r < 0) ret = r; } } return ret; } /* * soc level wrapper for pointer callback * If cpu_dai, codec_dai, component driver has the delay callback, then * the runtime->delay will be updated via snd_soc_pcm_component/dai_delay(). */ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t offset = 0; snd_pcm_sframes_t codec_delay = 0; snd_pcm_sframes_t cpu_delay = 0; offset = snd_soc_pcm_component_pointer(substream); /* should be called *after* snd_soc_pcm_component_pointer() */ snd_soc_pcm_dai_delay(substream, &cpu_delay, &codec_delay); snd_soc_pcm_component_delay(substream, &cpu_delay, &codec_delay); runtime->delay = cpu_delay + codec_delay; return offset; } /* connect a FE and BE */ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe, struct snd_soc_pcm_runtime *be, int stream) { struct snd_pcm_substream *fe_substream; struct snd_pcm_substream *be_substream; struct snd_soc_dpcm *dpcm; snd_soc_dpcm_mutex_assert_held(fe); /* only add new dpcms */ for_each_dpcm_be(fe, stream, dpcm) { if (dpcm->be == be && dpcm->fe == fe) return 0; } fe_substream = snd_soc_dpcm_get_substream(fe, stream); be_substream = snd_soc_dpcm_get_substream(be, stream); if (!fe_substream->pcm->nonatomic && be_substream->pcm->nonatomic) { dev_err(be->dev, "%s: FE is atomic but BE is nonatomic, invalid configuration\n", __func__); return -EINVAL; } if (fe_substream->pcm->nonatomic && !be_substream->pcm->nonatomic) { dev_dbg(be->dev, "FE is nonatomic but BE is not, forcing BE as nonatomic\n"); be_substream->pcm->nonatomic = 1; } dpcm = kzalloc(sizeof(struct snd_soc_dpcm), GFP_KERNEL); if (!dpcm) return -ENOMEM; dpcm->be = be; dpcm->fe = fe; dpcm->state = SND_SOC_DPCM_LINK_STATE_NEW; snd_soc_dpcm_stream_lock_irq(fe, stream); list_add(&dpcm->list_be, &fe->dpcm[stream].be_clients); list_add(&dpcm->list_fe, &be->dpcm[stream].fe_clients); snd_soc_dpcm_stream_unlock_irq(fe, stream); dev_dbg(fe->dev, "connected new DPCM %s path %s %s %s\n", stream ? "capture" : "playback", fe->dai_link->name, stream ? "<-" : "->", be->dai_link->name); dpcm_create_debugfs_state(dpcm, stream); return 1; } /* reparent a BE onto another FE */ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe, struct snd_soc_pcm_runtime *be, int stream) { struct snd_soc_dpcm *dpcm; struct snd_pcm_substream *fe_substream, *be_substream; /* reparent if BE is connected to other FEs */ if (!be->dpcm[stream].users) return; be_substream = snd_soc_dpcm_get_substream(be, stream); if (!be_substream) return; for_each_dpcm_fe(be, stream, dpcm) { if (dpcm->fe == fe) continue; dev_dbg(fe->dev, "reparent %s path %s %s %s\n", stream ? "capture" : "playback", dpcm->fe->dai_link->name, stream ? "<-" : "->", dpcm->be->dai_link->name); fe_substream = snd_soc_dpcm_get_substream(dpcm->fe, stream); be_substream->runtime = fe_substream->runtime; break; } } /* disconnect a BE and FE */ void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream) { struct snd_soc_dpcm *dpcm, *d; LIST_HEAD(deleted_dpcms); snd_soc_dpcm_mutex_assert_held(fe); snd_soc_dpcm_stream_lock_irq(fe, stream); for_each_dpcm_be_safe(fe, stream, dpcm, d) { dev_dbg(fe->dev, "ASoC: BE %s disconnect check for %s\n", stream ? "capture" : "playback", dpcm->be->dai_link->name); if (dpcm->state != SND_SOC_DPCM_LINK_STATE_FREE) continue; dev_dbg(fe->dev, "freed DSP %s path %s %s %s\n", stream ? "capture" : "playback", fe->dai_link->name, stream ? "<-" : "->", dpcm->be->dai_link->name); /* BEs still alive need new FE */ dpcm_be_reparent(fe, dpcm->be, stream); list_del(&dpcm->list_be); list_move(&dpcm->list_fe, &deleted_dpcms); } snd_soc_dpcm_stream_unlock_irq(fe, stream); while (!list_empty(&deleted_dpcms)) { dpcm = list_first_entry(&deleted_dpcms, struct snd_soc_dpcm, list_fe); list_del(&dpcm->list_fe); dpcm_remove_debugfs_state(dpcm); kfree(dpcm); } } /* get BE for DAI widget and stream */ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card, struct snd_soc_dapm_widget *widget, int stream) { struct snd_soc_pcm_runtime *be; struct snd_soc_dapm_widget *w; struct snd_soc_dai *dai; int i; dev_dbg(card->dev, "ASoC: find BE for widget %s\n", widget->name); for_each_card_rtds(card, be) { if (!be->dai_link->no_pcm) continue; if (!snd_soc_dpcm_get_substream(be, stream)) continue; for_each_rtd_dais(be, i, dai) { w = snd_soc_dai_get_widget(dai, stream); dev_dbg(card->dev, "ASoC: try BE : %s\n", w ? w->name : "(not set)"); if (w == widget) return be; } } /* Widget provided is not a BE */ return NULL; } int widget_in_list(struct snd_soc_dapm_widget_list *list, struct snd_soc_dapm_widget *widget) { struct snd_soc_dapm_widget *w; int i; for_each_dapm_widgets(list, i, w) if (widget == w) return 1; return 0; } EXPORT_SYMBOL_GPL(widget_in_list); bool dpcm_end_walk_at_be(struct snd_soc_dapm_widget *widget, enum snd_soc_dapm_direction dir) { struct snd_soc_card *card = widget->dapm->card; struct snd_soc_pcm_runtime *rtd; int stream; /* adjust dir to stream */ if (dir == SND_SOC_DAPM_DIR_OUT) stream = SNDRV_PCM_STREAM_PLAYBACK; else stream = SNDRV_PCM_STREAM_CAPTURE; rtd = dpcm_get_be(card, widget, stream); if (rtd) return true; return false; } EXPORT_SYMBOL_GPL(dpcm_end_walk_at_be); int dpcm_path_get(struct snd_soc_pcm_runtime *fe, int stream, struct snd_soc_dapm_widget_list **list) { struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(fe, 0); int paths; if (fe->dai_link->num_cpus > 1) { dev_err(fe->dev, "%s doesn't support Multi CPU yet\n", __func__); return -EINVAL; } /* get number of valid DAI paths and their widgets */ paths = snd_soc_dapm_dai_get_connected_widgets(cpu_dai, stream, list, fe->card->component_chaining ? NULL : dpcm_end_walk_at_be); if (paths > 0) dev_dbg(fe->dev, "ASoC: found %d audio %s paths\n", paths, stream ? "capture" : "playback"); else if (paths == 0) dev_dbg(fe->dev, "ASoC: %s no valid %s path\n", fe->dai_link->name, stream ? "capture" : "playback"); return paths; } void dpcm_path_put(struct snd_soc_dapm_widget_list **list) { snd_soc_dapm_dai_free_widgets(list); } static bool dpcm_be_is_active(struct snd_soc_dpcm *dpcm, int stream, struct snd_soc_dapm_widget_list *list) { struct snd_soc_dai *dai; unsigned int i; /* is there a valid DAI widget for this BE */ for_each_rtd_dais(dpcm->be, i, dai) { struct snd_soc_dapm_widget *widget = snd_soc_dai_get_widget(dai, stream); /* * The BE is pruned only if none of the dai * widgets are in the active list. */ if (widget && widget_in_list(list, widget)) return true; } return false; } static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream, struct snd_soc_dapm_widget_list **list_) { struct snd_soc_dpcm *dpcm; int prune = 0; /* Destroy any old FE <--> BE connections */ for_each_dpcm_be(fe, stream, dpcm) { if (dpcm_be_is_active(dpcm, stream, *list_)) continue; dev_dbg(fe->dev, "ASoC: pruning %s BE %s for %s\n", stream ? "capture" : "playback", dpcm->be->dai_link->name, fe->dai_link->name); dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE; dpcm_set_be_update_state(dpcm->be, stream, SND_SOC_DPCM_UPDATE_BE); prune++; } dev_dbg(fe->dev, "ASoC: found %d old BE paths for pruning\n", prune); return prune; } static int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream, struct snd_soc_dapm_widget_list **list_) { struct snd_soc_card *card = fe->card; struct snd_soc_dapm_widget_list *list = *list_; struct snd_soc_pcm_runtime *be; struct snd_soc_dapm_widget *widget; struct snd_pcm_substream *fe_substream = snd_soc_dpcm_get_substream(fe, stream); int i, new = 0, err; /* don't connect if FE is not running */ if (!fe_substream->runtime && !fe->fe_compr) return new; /* Create any new FE <--> BE connections */ for_each_dapm_widgets(list, i, widget) { switch (widget->id) { case snd_soc_dapm_dai_in: if (stream != SNDRV_PCM_STREAM_PLAYBACK) continue; break; case snd_soc_dapm_dai_out: if (stream != SNDRV_PCM_STREAM_CAPTURE) continue; break; default: continue; } /* is there a valid BE rtd for this widget */ be = dpcm_get_be(card, widget, stream); if (!be) { dev_dbg(fe->dev, "ASoC: no BE found for %s\n", widget->name); continue; } /* * Filter for systems with 'component_chaining' enabled. * This helps to avoid unnecessary re-configuration of an * already active BE on such systems. */ if (fe->card->component_chaining && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_NEW) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_CLOSE)) continue; /* newly connected FE and BE */ err = dpcm_be_connect(fe, be, stream); if (err < 0) { dev_err(fe->dev, "ASoC: can't connect %s\n", widget->name); break; } else if (err == 0) /* already connected */ continue; /* new */ dpcm_set_be_update_state(be, stream, SND_SOC_DPCM_UPDATE_BE); new++; } dev_dbg(fe->dev, "ASoC: found %d new BE paths\n", new); return new; } /* * Find the corresponding BE DAIs that source or sink audio to this * FE substream. */ int dpcm_process_paths(struct snd_soc_pcm_runtime *fe, int stream, struct snd_soc_dapm_widget_list **list, int new) { if (new) return dpcm_add_paths(fe, stream, list); else return dpcm_prune_paths(fe, stream, list); } void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream) { struct snd_soc_dpcm *dpcm; for_each_dpcm_be(fe, stream, dpcm) dpcm_set_be_update_state(dpcm->be, stream, SND_SOC_DPCM_UPDATE_NO); } void dpcm_be_dai_stop(struct snd_soc_pcm_runtime *fe, int stream, int do_hw_free, struct snd_soc_dpcm *last) { struct snd_soc_dpcm *dpcm; /* disable any enabled and non active backends */ for_each_dpcm_be(fe, stream, dpcm) { struct snd_soc_pcm_runtime *be = dpcm->be; struct snd_pcm_substream *be_substream = snd_soc_dpcm_get_substream(be, stream); if (dpcm == last) return; /* is this op for this BE ? */ if (!snd_soc_dpcm_be_can_update(fe, be, stream)) continue; if (be->dpcm[stream].users == 0) { dev_err(be->dev, "ASoC: no users %s at close - state %d\n", stream ? "capture" : "playback", be->dpcm[stream].state); continue; } if (--be->dpcm[stream].users != 0) continue; if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN) { if (!do_hw_free) continue; if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) { __soc_pcm_hw_free(be, be_substream); be->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE; } } __soc_pcm_close(be, be_substream); be_substream->runtime = NULL; be->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE; } } int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream) { struct snd_pcm_substream *fe_substream = snd_soc_dpcm_get_substream(fe, stream); struct snd_soc_pcm_runtime *be; struct snd_soc_dpcm *dpcm; int err, count = 0; /* only startup BE DAIs that are either sinks or sources to this FE DAI */ for_each_dpcm_be(fe, stream, dpcm) { struct snd_pcm_substream *be_substream; be = dpcm->be; be_substream = snd_soc_dpcm_get_substream(be, stream); if (!be_substream) { dev_err(be->dev, "ASoC: no backend %s stream\n", stream ? "capture" : "playback"); continue; } /* is this op for this BE ? */ if (!snd_soc_dpcm_be_can_update(fe, be, stream)) continue; /* first time the dpcm is open ? */ if (be->dpcm[stream].users == DPCM_MAX_BE_USERS) { dev_err(be->dev, "ASoC: too many users %s at open %d\n", stream ? "capture" : "playback", be->dpcm[stream].state); continue; } if (be->dpcm[stream].users++ != 0) continue; if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_NEW) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_CLOSE)) continue; dev_dbg(be->dev, "ASoC: open %s BE %s\n", stream ? "capture" : "playback", be->dai_link->name); be_substream->runtime = fe_substream->runtime; err = __soc_pcm_open(be, be_substream); if (err < 0) { be->dpcm[stream].users--; if (be->dpcm[stream].users < 0) dev_err(be->dev, "ASoC: no users %s at unwind %d\n", stream ? "capture" : "playback", be->dpcm[stream].state); be->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE; goto unwind; } be->dpcm[stream].be_start = 0; be->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN; count++; } return count; unwind: dpcm_be_dai_startup_rollback(fe, stream, dpcm); return soc_pcm_ret(fe, err); } static void dpcm_runtime_setup_fe(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_hardware *hw = &runtime->hw; struct snd_soc_dai *dai; int stream = substream->stream; u64 formats = hw->formats; int i; soc_pcm_hw_init(hw); if (formats) hw->formats &= formats; for_each_rtd_cpu_dais(fe, i, dai) { struct snd_soc_pcm_stream *cpu_stream; /* * Skip CPUs which don't support the current stream * type. See soc_pcm_init_runtime_hw() for more details */ if (!snd_soc_dai_stream_valid(dai, stream)) continue; cpu_stream = snd_soc_dai_get_pcm_stream(dai, stream); soc_pcm_hw_update_rate(hw, cpu_stream); soc_pcm_hw_update_chan(hw, cpu_stream); soc_pcm_hw_update_format(hw, cpu_stream); } } static void dpcm_runtime_setup_be_format(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_hardware *hw = &runtime->hw; struct snd_soc_dpcm *dpcm; struct snd_soc_dai *dai; int stream = substream->stream; if (!fe->dai_link->dpcm_merged_format) return; /* * It returns merged BE codec format * if FE want to use it (= dpcm_merged_format) */ for_each_dpcm_be(fe, stream, dpcm) { struct snd_soc_pcm_runtime *be = dpcm->be; struct snd_soc_pcm_stream *codec_stream; int i; for_each_rtd_codec_dais(be, i, dai) { /* * Skip CODECs which don't support the current stream * type. See soc_pcm_init_runtime_hw() for more details */ if (!snd_soc_dai_stream_valid(dai, stream)) continue; codec_stream = snd_soc_dai_get_pcm_stream(dai, stream); soc_pcm_hw_update_format(hw, codec_stream); } } } static void dpcm_runtime_setup_be_chan(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_hardware *hw = &runtime->hw; struct snd_soc_dpcm *dpcm; int stream = substream->stream; if (!fe->dai_link->dpcm_merged_chan) return; /* * It returns merged BE codec channel; * if FE want to use it (= dpcm_merged_chan) */ for_each_dpcm_be(fe, stream, dpcm) { struct snd_soc_pcm_runtime *be = dpcm->be; struct snd_soc_pcm_stream *cpu_stream; struct snd_soc_dai *dai; int i; for_each_rtd_cpu_dais(be, i, dai) { /* * Skip CPUs which don't support the current stream * type. See soc_pcm_init_runtime_hw() for more details */ if (!snd_soc_dai_stream_valid(dai, stream)) continue; cpu_stream = snd_soc_dai_get_pcm_stream(dai, stream); soc_pcm_hw_update_chan(hw, cpu_stream); } /* * chan min/max cannot be enforced if there are multiple CODEC * DAIs connected to a single CPU DAI, use CPU DAI's directly */ if (be->dai_link->num_codecs == 1) { struct snd_soc_pcm_stream *codec_stream = snd_soc_dai_get_pcm_stream( asoc_rtd_to_codec(be, 0), stream); soc_pcm_hw_update_chan(hw, codec_stream); } } } static void dpcm_runtime_setup_be_rate(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_hardware *hw = &runtime->hw; struct snd_soc_dpcm *dpcm; int stream = substream->stream; if (!fe->dai_link->dpcm_merged_rate) return; /* * It returns merged BE codec channel; * if FE want to use it (= dpcm_merged_chan) */ for_each_dpcm_be(fe, stream, dpcm) { struct snd_soc_pcm_runtime *be = dpcm->be; struct snd_soc_pcm_stream *pcm; struct snd_soc_dai *dai; int i; for_each_rtd_dais(be, i, dai) { /* * Skip DAIs which don't support the current stream * type. See soc_pcm_init_runtime_hw() for more details */ if (!snd_soc_dai_stream_valid(dai, stream)) continue; pcm = snd_soc_dai_get_pcm_stream(dai, stream); soc_pcm_hw_update_rate(hw, pcm); } } } static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream, int stream) { struct snd_soc_dpcm *dpcm; struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(fe_substream); struct snd_soc_dai *fe_cpu_dai; int err = 0; int i; /* apply symmetry for FE */ soc_pcm_update_symmetry(fe_substream); for_each_rtd_cpu_dais (fe, i, fe_cpu_dai) { /* Symmetry only applies if we've got an active stream. */ err = soc_pcm_apply_symmetry(fe_substream, fe_cpu_dai); if (err < 0) goto error; } /* apply symmetry for BE */ for_each_dpcm_be(fe, stream, dpcm) { struct snd_soc_pcm_runtime *be = dpcm->be; struct snd_pcm_substream *be_substream = snd_soc_dpcm_get_substream(be, stream); struct snd_soc_pcm_runtime *rtd; struct snd_soc_dai *dai; /* A backend may not have the requested substream */ if (!be_substream) continue; rtd = asoc_substream_to_rtd(be_substream); if (rtd->dai_link->be_hw_params_fixup) continue; soc_pcm_update_symmetry(be_substream); /* Symmetry only applies if we've got an active stream. */ for_each_rtd_dais(rtd, i, dai) { err = soc_pcm_apply_symmetry(fe_substream, dai); if (err < 0) goto error; } } error: return soc_pcm_ret(fe, err); } static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream) { struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(fe_substream); int stream = fe_substream->stream, ret = 0; dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); ret = dpcm_be_dai_startup(fe, stream); if (ret < 0) goto be_err; dev_dbg(fe->dev, "ASoC: open FE %s\n", fe->dai_link->name); /* start the DAI frontend */ ret = __soc_pcm_open(fe, fe_substream); if (ret < 0) goto unwind; fe->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN; dpcm_runtime_setup_fe(fe_substream); dpcm_runtime_setup_be_format(fe_substream); dpcm_runtime_setup_be_chan(fe_substream); dpcm_runtime_setup_be_rate(fe_substream); ret = dpcm_apply_symmetry(fe_substream, stream); unwind: if (ret < 0) dpcm_be_dai_startup_unwind(fe, stream); be_err: dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); return soc_pcm_ret(fe, ret); } static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); int stream = substream->stream; snd_soc_dpcm_mutex_assert_held(fe); dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); /* shutdown the BEs */ dpcm_be_dai_shutdown(fe, stream); dev_dbg(fe->dev, "ASoC: close FE %s\n", fe->dai_link->name); /* now shutdown the frontend */ __soc_pcm_close(fe, substream); /* run the stream stop event */ dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP); fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE; dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); return 0; } void dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream) { struct snd_soc_dpcm *dpcm; /* only hw_params backends that are either sinks or sources * to this frontend DAI */ for_each_dpcm_be(fe, stream, dpcm) { struct snd_soc_pcm_runtime *be = dpcm->be; struct snd_pcm_substream *be_substream = snd_soc_dpcm_get_substream(be, stream); /* is this op for this BE ? */ if (!snd_soc_dpcm_be_can_update(fe, be, stream)) continue; /* only free hw when no longer used - check all FEs */ if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream)) continue; /* do not free hw if this BE is used by other FE */ if (be->dpcm[stream].users > 1) continue; if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND)) continue; dev_dbg(be->dev, "ASoC: hw_free BE %s\n", be->dai_link->name); __soc_pcm_hw_free(be, be_substream); be->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE; } } static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); int stream = substream->stream; snd_soc_dpcm_mutex_lock(fe); dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); dev_dbg(fe->dev, "ASoC: hw_free FE %s\n", fe->dai_link->name); /* call hw_free on the frontend */ soc_pcm_hw_clean(fe, substream, 0); /* only hw_params backends that are either sinks or sources * to this frontend DAI */ dpcm_be_dai_hw_free(fe, stream); fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE; dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); snd_soc_dpcm_mutex_unlock(fe); return 0; } int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream) { struct snd_soc_pcm_runtime *be; struct snd_pcm_substream *be_substream; struct snd_soc_dpcm *dpcm; int ret; for_each_dpcm_be(fe, stream, dpcm) { struct snd_pcm_hw_params hw_params; be = dpcm->be; be_substream = snd_soc_dpcm_get_substream(be, stream); /* is this op for this BE ? */ if (!snd_soc_dpcm_be_can_update(fe, be, stream)) continue; /* copy params for each dpcm */ memcpy(&hw_params, &fe->dpcm[stream].hw_params, sizeof(struct snd_pcm_hw_params)); /* perform any hw_params fixups */ ret = snd_soc_link_be_hw_params_fixup(be, &hw_params); if (ret < 0) goto unwind; /* copy the fixed-up hw params for BE dai */ memcpy(&be->dpcm[stream].hw_params, &hw_params, sizeof(struct snd_pcm_hw_params)); /* only allow hw_params() if no connected FEs are running */ if (!snd_soc_dpcm_can_be_params(fe, be, stream)) continue; if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE)) continue; dev_dbg(be->dev, "ASoC: hw_params BE %s\n", be->dai_link->name); ret = __soc_pcm_hw_params(be, be_substream, &hw_params); if (ret < 0) goto unwind; be->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS; } return 0; unwind: dev_dbg(fe->dev, "ASoC: %s() failed at %s (%d)\n", __func__, be->dai_link->name, ret); /* disable any enabled and non active backends */ for_each_dpcm_be_rollback(fe, stream, dpcm) { be = dpcm->be; be_substream = snd_soc_dpcm_get_substream(be, stream); if (!snd_soc_dpcm_be_can_update(fe, be, stream)) continue; /* only allow hw_free() if no connected FEs are running */ if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream)) continue; if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP)) continue; __soc_pcm_hw_free(be, be_substream); } return ret; } static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); int ret, stream = substream->stream; snd_soc_dpcm_mutex_lock(fe); dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); memcpy(&fe->dpcm[stream].hw_params, params, sizeof(struct snd_pcm_hw_params)); ret = dpcm_be_dai_hw_params(fe, stream); if (ret < 0) goto out; dev_dbg(fe->dev, "ASoC: hw_params FE %s rate %d chan %x fmt %d\n", fe->dai_link->name, params_rate(params), params_channels(params), params_format(params)); /* call hw_params on the frontend */ ret = __soc_pcm_hw_params(fe, substream, params); if (ret < 0) dpcm_be_dai_hw_free(fe, stream); else fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS; out: dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); snd_soc_dpcm_mutex_unlock(fe); return soc_pcm_ret(fe, ret); } int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, int cmd) { struct snd_soc_pcm_runtime *be; bool pause_stop_transition; struct snd_soc_dpcm *dpcm; unsigned long flags; int ret = 0; for_each_dpcm_be(fe, stream, dpcm) { struct snd_pcm_substream *be_substream; be = dpcm->be; be_substream = snd_soc_dpcm_get_substream(be, stream); snd_soc_dpcm_stream_lock_irqsave_nested(be, stream, flags); /* is this op for this BE ? */ if (!snd_soc_dpcm_be_can_update(fe, be, stream)) goto next; dev_dbg(be->dev, "ASoC: trigger BE %s cmd %d\n", be->dai_link->name, cmd); switch (cmd) { case SNDRV_PCM_TRIGGER_START: if (!be->dpcm[stream].be_start && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED)) goto next; be->dpcm[stream].be_start++; if (be->dpcm[stream].be_start != 1) goto next; if (be->dpcm[stream].state == SND_SOC_DPCM_STATE_PAUSED) ret = soc_pcm_trigger(be_substream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE); else ret = soc_pcm_trigger(be_substream, SNDRV_PCM_TRIGGER_START); if (ret) { be->dpcm[stream].be_start--; goto next; } be->dpcm[stream].state = SND_SOC_DPCM_STATE_START; break; case SNDRV_PCM_TRIGGER_RESUME: if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND)) goto next; be->dpcm[stream].be_start++; if (be->dpcm[stream].be_start != 1) goto next; ret = soc_pcm_trigger(be_substream, cmd); if (ret) { be->dpcm[stream].be_start--; goto next; } be->dpcm[stream].state = SND_SOC_DPCM_STATE_START; break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (!be->dpcm[stream].be_start && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED)) goto next; fe->dpcm[stream].fe_pause = false; be->dpcm[stream].be_pause--; be->dpcm[stream].be_start++; if (be->dpcm[stream].be_start != 1) goto next; ret = soc_pcm_trigger(be_substream, cmd); if (ret) { be->dpcm[stream].be_start--; goto next; } be->dpcm[stream].state = SND_SOC_DPCM_STATE_START; break; case SNDRV_PCM_TRIGGER_STOP: if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED)) goto next; if (be->dpcm[stream].state == SND_SOC_DPCM_STATE_START) be->dpcm[stream].be_start--; if (be->dpcm[stream].be_start != 0) goto next; pause_stop_transition = false; if (fe->dpcm[stream].fe_pause) { pause_stop_transition = true; fe->dpcm[stream].fe_pause = false; be->dpcm[stream].be_pause--; } if (be->dpcm[stream].be_pause != 0) ret = soc_pcm_trigger(be_substream, SNDRV_PCM_TRIGGER_PAUSE_PUSH); else ret = soc_pcm_trigger(be_substream, SNDRV_PCM_TRIGGER_STOP); if (ret) { if (be->dpcm[stream].state == SND_SOC_DPCM_STATE_START) be->dpcm[stream].be_start++; if (pause_stop_transition) { fe->dpcm[stream].fe_pause = true; be->dpcm[stream].be_pause++; } goto next; } if (be->dpcm[stream].be_pause != 0) be->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED; else be->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP; break; case SNDRV_PCM_TRIGGER_SUSPEND: if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) goto next; be->dpcm[stream].be_start--; if (be->dpcm[stream].be_start != 0) goto next; ret = soc_pcm_trigger(be_substream, cmd); if (ret) { be->dpcm[stream].be_start++; goto next; } be->dpcm[stream].state = SND_SOC_DPCM_STATE_SUSPEND; break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) goto next; fe->dpcm[stream].fe_pause = true; be->dpcm[stream].be_pause++; be->dpcm[stream].be_start--; if (be->dpcm[stream].be_start != 0) goto next; ret = soc_pcm_trigger(be_substream, cmd); if (ret) { be->dpcm[stream].be_start++; goto next; } be->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED; break; } next: snd_soc_dpcm_stream_unlock_irqrestore(be, stream, flags); if (ret) break; } return soc_pcm_ret(fe, ret); } EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger); static int dpcm_dai_trigger_fe_be(struct snd_pcm_substream *substream, int cmd, bool fe_first) { struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); int ret; /* call trigger on the frontend before the backend. */ if (fe_first) { dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n", fe->dai_link->name, cmd); ret = soc_pcm_trigger(substream, cmd); if (ret < 0) return ret; ret = dpcm_be_dai_trigger(fe, substream->stream, cmd); return ret; } /* call trigger on the frontend after the backend. */ ret = dpcm_be_dai_trigger(fe, substream->stream, cmd); if (ret < 0) return ret; dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n", fe->dai_link->name, cmd); ret = soc_pcm_trigger(substream, cmd); return ret; } static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); int stream = substream->stream; int ret = 0; enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream]; fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; switch (trigger) { case SND_SOC_DPCM_TRIGGER_PRE: switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_DRAIN: ret = dpcm_dai_trigger_fe_be(substream, cmd, true); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: ret = dpcm_dai_trigger_fe_be(substream, cmd, false); break; default: ret = -EINVAL; break; } break; case SND_SOC_DPCM_TRIGGER_POST: switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_DRAIN: ret = dpcm_dai_trigger_fe_be(substream, cmd, false); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: ret = dpcm_dai_trigger_fe_be(substream, cmd, true); break; default: ret = -EINVAL; break; } break; case SND_SOC_DPCM_TRIGGER_BESPOKE: /* bespoke trigger() - handles both FE and BEs */ dev_dbg(fe->dev, "ASoC: bespoke trigger FE %s cmd %d\n", fe->dai_link->name, cmd); ret = snd_soc_pcm_dai_bespoke_trigger(substream, cmd); break; default: dev_err(fe->dev, "ASoC: invalid trigger cmd %d for %s\n", cmd, fe->dai_link->name); ret = -EINVAL; goto out; } if (ret < 0) { dev_err(fe->dev, "ASoC: trigger FE cmd: %d failed: %d\n", cmd, ret); goto out; } switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: fe->dpcm[stream].state = SND_SOC_DPCM_STATE_START; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: fe->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP; break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED; break; } out: fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; return ret; } static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); int stream = substream->stream; /* if FE's runtime_update is already set, we're in race; * process this trigger later at exit */ if (fe->dpcm[stream].runtime_update != SND_SOC_DPCM_UPDATE_NO) { fe->dpcm[stream].trigger_pending = cmd + 1; return 0; /* delayed, assuming it's successful */ } /* we're alone, let's trigger */ return dpcm_fe_dai_do_trigger(substream, cmd); } int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream) { struct snd_soc_dpcm *dpcm; int ret = 0; for_each_dpcm_be(fe, stream, dpcm) { struct snd_soc_pcm_runtime *be = dpcm->be; struct snd_pcm_substream *be_substream = snd_soc_dpcm_get_substream(be, stream); /* is this op for this BE ? */ if (!snd_soc_dpcm_be_can_update(fe, be, stream)) continue; if (!snd_soc_dpcm_can_be_prepared(fe, be, stream)) continue; if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED)) continue; dev_dbg(be->dev, "ASoC: prepare BE %s\n", be->dai_link->name); ret = __soc_pcm_prepare(be, be_substream); if (ret < 0) break; be->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE; } return soc_pcm_ret(fe, ret); } static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); int stream = substream->stream, ret = 0; snd_soc_dpcm_mutex_lock(fe); dev_dbg(fe->dev, "ASoC: prepare FE %s\n", fe->dai_link->name); dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); /* there is no point preparing this FE if there are no BEs */ if (list_empty(&fe->dpcm[stream].be_clients)) { /* dev_err_once() for visibility, dev_dbg() for debugging UCM profiles */ dev_err_once(fe->dev, "ASoC: no backend DAIs enabled for %s, possibly missing ALSA mixer-based routing or UCM profile\n", fe->dai_link->name); dev_dbg(fe->dev, "ASoC: no backend DAIs enabled for %s\n", fe->dai_link->name); ret = -EINVAL; goto out; } ret = dpcm_be_dai_prepare(fe, stream); if (ret < 0) goto out; /* call prepare on the frontend */ ret = __soc_pcm_prepare(fe, substream); if (ret < 0) goto out; fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE; out: dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); snd_soc_dpcm_mutex_unlock(fe); return soc_pcm_ret(fe, ret); } static int dpcm_run_update_shutdown(struct snd_soc_pcm_runtime *fe, int stream) { struct snd_pcm_substream *substream = snd_soc_dpcm_get_substream(fe, stream); enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream]; int err; dev_dbg(fe->dev, "ASoC: runtime %s close on FE %s\n", stream ? "capture" : "playback", fe->dai_link->name); if (trigger == SND_SOC_DPCM_TRIGGER_BESPOKE) { /* call bespoke trigger - FE takes care of all BE triggers */ dev_dbg(fe->dev, "ASoC: bespoke trigger FE %s cmd stop\n", fe->dai_link->name); err = snd_soc_pcm_dai_bespoke_trigger(substream, SNDRV_PCM_TRIGGER_STOP); } else { dev_dbg(fe->dev, "ASoC: trigger FE %s cmd stop\n", fe->dai_link->name); err = dpcm_be_dai_trigger(fe, stream, SNDRV_PCM_TRIGGER_STOP); } dpcm_be_dai_hw_free(fe, stream); dpcm_be_dai_shutdown(fe, stream); /* run the stream event for each BE */ dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_NOP); return soc_pcm_ret(fe, err); } static int dpcm_run_update_startup(struct snd_soc_pcm_runtime *fe, int stream) { struct snd_pcm_substream *substream = snd_soc_dpcm_get_substream(fe, stream); struct snd_soc_dpcm *dpcm; enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream]; int ret = 0; dev_dbg(fe->dev, "ASoC: runtime %s open on FE %s\n", stream ? "capture" : "playback", fe->dai_link->name); /* Only start the BE if the FE is ready */ if (fe->dpcm[stream].state == SND_SOC_DPCM_STATE_HW_FREE || fe->dpcm[stream].state == SND_SOC_DPCM_STATE_CLOSE) { dev_err(fe->dev, "ASoC: FE %s is not ready %d\n", fe->dai_link->name, fe->dpcm[stream].state); ret = -EINVAL; goto disconnect; } /* startup must always be called for new BEs */ ret = dpcm_be_dai_startup(fe, stream); if (ret < 0) goto disconnect; /* keep going if FE state is > open */ if (fe->dpcm[stream].state == SND_SOC_DPCM_STATE_OPEN) return 0; ret = dpcm_be_dai_hw_params(fe, stream); if (ret < 0) goto close; /* keep going if FE state is > hw_params */ if (fe->dpcm[stream].state == SND_SOC_DPCM_STATE_HW_PARAMS) return 0; ret = dpcm_be_dai_prepare(fe, stream); if (ret < 0) goto hw_free; /* run the stream event for each BE */ dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_NOP); /* keep going if FE state is > prepare */ if (fe->dpcm[stream].state == SND_SOC_DPCM_STATE_PREPARE || fe->dpcm[stream].state == SND_SOC_DPCM_STATE_STOP) return 0; if (trigger == SND_SOC_DPCM_TRIGGER_BESPOKE) { /* call trigger on the frontend - FE takes care of all BE triggers */ dev_dbg(fe->dev, "ASoC: bespoke trigger FE %s cmd start\n", fe->dai_link->name); ret = snd_soc_pcm_dai_bespoke_trigger(substream, SNDRV_PCM_TRIGGER_START); if (ret < 0) goto hw_free; } else { dev_dbg(fe->dev, "ASoC: trigger FE %s cmd start\n", fe->dai_link->name); ret = dpcm_be_dai_trigger(fe, stream, SNDRV_PCM_TRIGGER_START); if (ret < 0) goto hw_free; } return 0; hw_free: dpcm_be_dai_hw_free(fe, stream); close: dpcm_be_dai_shutdown(fe, stream); disconnect: /* disconnect any pending BEs */ for_each_dpcm_be(fe, stream, dpcm) { struct snd_soc_pcm_runtime *be = dpcm->be; /* is this op for this BE ? */ if (!snd_soc_dpcm_be_can_update(fe, be, stream)) continue; if (be->dpcm[stream].state == SND_SOC_DPCM_STATE_CLOSE || be->dpcm[stream].state == SND_SOC_DPCM_STATE_NEW) dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE; } return soc_pcm_ret(fe, ret); } static int soc_dpcm_fe_runtime_update(struct snd_soc_pcm_runtime *fe, int new) { struct snd_soc_dapm_widget_list *list; int stream; int count, paths; if (!fe->dai_link->dynamic) return 0; if (fe->dai_link->num_cpus > 1) { dev_err(fe->dev, "%s doesn't support Multi CPU yet\n", __func__); return -EINVAL; } /* only check active links */ if (!snd_soc_dai_active(asoc_rtd_to_cpu(fe, 0))) return 0; /* DAPM sync will call this to update DSP paths */ dev_dbg(fe->dev, "ASoC: DPCM %s runtime update for FE %s\n", new ? "new" : "old", fe->dai_link->name); for_each_pcm_streams(stream) { /* skip if FE doesn't have playback/capture capability */ if (!snd_soc_dai_stream_valid(asoc_rtd_to_cpu(fe, 0), stream) || !snd_soc_dai_stream_valid(asoc_rtd_to_codec(fe, 0), stream)) continue; /* skip if FE isn't currently playing/capturing */ if (!snd_soc_dai_stream_active(asoc_rtd_to_cpu(fe, 0), stream) || !snd_soc_dai_stream_active(asoc_rtd_to_codec(fe, 0), stream)) continue; paths = dpcm_path_get(fe, stream, &list); if (paths < 0) return paths; /* update any playback/capture paths */ count = dpcm_process_paths(fe, stream, &list, new); if (count) { dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE); if (new) dpcm_run_update_startup(fe, stream); else dpcm_run_update_shutdown(fe, stream); dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); dpcm_clear_pending_state(fe, stream); dpcm_be_disconnect(fe, stream); } dpcm_path_put(&list); } return 0; } /* Called by DAPM mixer/mux changes to update audio routing between PCMs and * any DAI links. */ int snd_soc_dpcm_runtime_update(struct snd_soc_card *card) { struct snd_soc_pcm_runtime *fe; int ret = 0; snd_soc_dpcm_mutex_lock(card); /* shutdown all old paths first */ for_each_card_rtds(card, fe) { ret = soc_dpcm_fe_runtime_update(fe, 0); if (ret) goto out; } /* bring new paths up */ for_each_card_rtds(card, fe) { ret = soc_dpcm_fe_runtime_update(fe, 1); if (ret) goto out; } out: snd_soc_dpcm_mutex_unlock(card); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dpcm_runtime_update); static void dpcm_fe_dai_cleanup(struct snd_pcm_substream *fe_substream) { struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(fe_substream); struct snd_soc_dpcm *dpcm; int stream = fe_substream->stream; snd_soc_dpcm_mutex_assert_held(fe); /* mark FE's links ready to prune */ for_each_dpcm_be(fe, stream, dpcm) dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE; dpcm_be_disconnect(fe, stream); } static int dpcm_fe_dai_close(struct snd_pcm_substream *fe_substream) { struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(fe_substream); int ret; snd_soc_dpcm_mutex_lock(fe); ret = dpcm_fe_dai_shutdown(fe_substream); dpcm_fe_dai_cleanup(fe_substream); snd_soc_dpcm_mutex_unlock(fe); return ret; } static int dpcm_fe_dai_open(struct snd_pcm_substream *fe_substream) { struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(fe_substream); struct snd_soc_dapm_widget_list *list; int ret; int stream = fe_substream->stream; snd_soc_dpcm_mutex_lock(fe); ret = dpcm_path_get(fe, stream, &list); if (ret < 0) goto open_end; /* calculate valid and active FE <-> BE dpcms */ dpcm_process_paths(fe, stream, &list, 1); ret = dpcm_fe_dai_startup(fe_substream); if (ret < 0) dpcm_fe_dai_cleanup(fe_substream); dpcm_clear_pending_state(fe, stream); dpcm_path_put(&list); open_end: snd_soc_dpcm_mutex_unlock(fe); return ret; } static int soc_get_playback_capture(struct snd_soc_pcm_runtime *rtd, int *playback, int *capture) { struct snd_soc_dai_link *dai_link = rtd->dai_link; struct snd_soc_dai *cpu_dai; int has_playback = 0; int has_capture = 0; int i; if (dai_link->dynamic && dai_link->num_cpus > 1) { dev_err(rtd->dev, "DPCM doesn't support Multi CPU for Front-Ends yet\n"); return -EINVAL; } if (dai_link->dynamic || dai_link->no_pcm) { int stream; if (dai_link->dpcm_playback) { stream = SNDRV_PCM_STREAM_PLAYBACK; for_each_rtd_cpu_dais(rtd, i, cpu_dai) { if (snd_soc_dai_stream_valid(cpu_dai, stream)) { has_playback = 1; break; } } if (!has_playback) { dev_err(rtd->card->dev, "No CPU DAIs support playback for stream %s\n", dai_link->stream_name); return -EINVAL; } } if (dai_link->dpcm_capture) { stream = SNDRV_PCM_STREAM_CAPTURE; for_each_rtd_cpu_dais(rtd, i, cpu_dai) { if (snd_soc_dai_stream_valid(cpu_dai, stream)) { has_capture = 1; break; } } if (!has_capture) { dev_err(rtd->card->dev, "No CPU DAIs support capture for stream %s\n", dai_link->stream_name); return -EINVAL; } } } else { struct snd_soc_dai *codec_dai; /* Adapt stream for codec2codec links */ int cpu_capture = snd_soc_get_stream_cpu(dai_link, SNDRV_PCM_STREAM_CAPTURE); int cpu_playback = snd_soc_get_stream_cpu(dai_link, SNDRV_PCM_STREAM_PLAYBACK); for_each_rtd_codec_dais(rtd, i, codec_dai) { if (dai_link->num_cpus == 1) { cpu_dai = asoc_rtd_to_cpu(rtd, 0); } else if (dai_link->num_cpus == dai_link->num_codecs) { cpu_dai = asoc_rtd_to_cpu(rtd, i); } else if (rtd->dai_link->num_codecs > rtd->dai_link->num_cpus) { int cpu_id; if (!rtd->dai_link->codec_ch_maps) { dev_err(rtd->card->dev, "%s: no codec channel mapping table provided\n", __func__); return -EINVAL; } cpu_id = rtd->dai_link->codec_ch_maps[i].connected_cpu_id; cpu_dai = asoc_rtd_to_cpu(rtd, cpu_id); } else { dev_err(rtd->card->dev, "%s codec number %d < cpu number %d is not supported\n", __func__, rtd->dai_link->num_codecs, rtd->dai_link->num_cpus); return -EINVAL; } if (snd_soc_dai_stream_valid(codec_dai, SNDRV_PCM_STREAM_PLAYBACK) && snd_soc_dai_stream_valid(cpu_dai, cpu_playback)) has_playback = 1; if (snd_soc_dai_stream_valid(codec_dai, SNDRV_PCM_STREAM_CAPTURE) && snd_soc_dai_stream_valid(cpu_dai, cpu_capture)) has_capture = 1; } } if (dai_link->playback_only) has_capture = 0; if (dai_link->capture_only) has_playback = 0; if (!has_playback && !has_capture) { dev_err(rtd->dev, "substream %s has no playback, no capture\n", dai_link->stream_name); return -EINVAL; } *playback = has_playback; *capture = has_capture; return 0; } static int soc_create_pcm(struct snd_pcm **pcm, struct snd_soc_pcm_runtime *rtd, int playback, int capture, int num) { char new_name[64]; int ret; /* create the PCM */ if (rtd->dai_link->c2c_params) { snprintf(new_name, sizeof(new_name), "codec2codec(%s)", rtd->dai_link->stream_name); ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num, playback, capture, pcm); } else if (rtd->dai_link->no_pcm) { snprintf(new_name, sizeof(new_name), "(%s)", rtd->dai_link->stream_name); ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num, playback, capture, pcm); } else { if (rtd->dai_link->dynamic) snprintf(new_name, sizeof(new_name), "%s (*)", rtd->dai_link->stream_name); else snprintf(new_name, sizeof(new_name), "%s %s-%d", rtd->dai_link->stream_name, soc_codec_dai_name(rtd), num); ret = snd_pcm_new(rtd->card->snd_card, new_name, num, playback, capture, pcm); } if (ret < 0) { dev_err(rtd->card->dev, "ASoC: can't create pcm %s for dailink %s: %d\n", new_name, rtd->dai_link->name, ret); return ret; } dev_dbg(rtd->card->dev, "ASoC: registered pcm #%d %s\n",num, new_name); return 0; } /* create a new pcm */ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num) { struct snd_soc_component *component; struct snd_pcm *pcm; int ret = 0, playback = 0, capture = 0; int i; ret = soc_get_playback_capture(rtd, &playback, &capture); if (ret < 0) return ret; ret = soc_create_pcm(&pcm, rtd, playback, capture, num); if (ret < 0) return ret; /* DAPM dai link stream work */ /* * Currently nothing to do for c2c links * Since c2c links are internal nodes in the DAPM graph and * don't interface with the outside world or application layer * we don't have to do any special handling on close. */ if (!rtd->dai_link->c2c_params) rtd->close_delayed_work_func = snd_soc_close_delayed_work; rtd->pcm = pcm; pcm->nonatomic = rtd->dai_link->nonatomic; pcm->private_data = rtd; pcm->no_device_suspend = true; if (rtd->dai_link->no_pcm || rtd->dai_link->c2c_params) { if (playback) pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd; if (capture) pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd; goto out; } /* ASoC PCM operations */ if (rtd->dai_link->dynamic) { rtd->ops.open = dpcm_fe_dai_open; rtd->ops.hw_params = dpcm_fe_dai_hw_params; rtd->ops.prepare = dpcm_fe_dai_prepare; rtd->ops.trigger = dpcm_fe_dai_trigger; rtd->ops.hw_free = dpcm_fe_dai_hw_free; rtd->ops.close = dpcm_fe_dai_close; rtd->ops.pointer = soc_pcm_pointer; } else { rtd->ops.open = soc_pcm_open; rtd->ops.hw_params = soc_pcm_hw_params; rtd->ops.prepare = soc_pcm_prepare; rtd->ops.trigger = soc_pcm_trigger; rtd->ops.hw_free = soc_pcm_hw_free; rtd->ops.close = soc_pcm_close; rtd->ops.pointer = soc_pcm_pointer; } for_each_rtd_components(rtd, i, component) { const struct snd_soc_component_driver *drv = component->driver; if (drv->ioctl) rtd->ops.ioctl = snd_soc_pcm_component_ioctl; if (drv->sync_stop) rtd->ops.sync_stop = snd_soc_pcm_component_sync_stop; if (drv->copy) rtd->ops.copy = snd_soc_pcm_component_copy; if (drv->page) rtd->ops.page = snd_soc_pcm_component_page; if (drv->mmap) rtd->ops.mmap = snd_soc_pcm_component_mmap; if (drv->ack) rtd->ops.ack = snd_soc_pcm_component_ack; } if (playback) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &rtd->ops); if (capture) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &rtd->ops); ret = snd_soc_pcm_component_new(rtd); if (ret < 0) return ret; out: dev_dbg(rtd->card->dev, "%s <-> %s mapping ok\n", soc_codec_dai_name(rtd), soc_cpu_dai_name(rtd)); return ret; } /* is the current PCM operation for this FE ? */ int snd_soc_dpcm_fe_can_update(struct snd_soc_pcm_runtime *fe, int stream) { if (fe->dpcm[stream].runtime_update == SND_SOC_DPCM_UPDATE_FE) return 1; return 0; } EXPORT_SYMBOL_GPL(snd_soc_dpcm_fe_can_update); /* is the current PCM operation for this BE ? */ int snd_soc_dpcm_be_can_update(struct snd_soc_pcm_runtime *fe, struct snd_soc_pcm_runtime *be, int stream) { if ((fe->dpcm[stream].runtime_update == SND_SOC_DPCM_UPDATE_FE) || ((fe->dpcm[stream].runtime_update == SND_SOC_DPCM_UPDATE_BE) && be->dpcm[stream].runtime_update)) return 1; return 0; } EXPORT_SYMBOL_GPL(snd_soc_dpcm_be_can_update); /* get the substream for this BE */ struct snd_pcm_substream * snd_soc_dpcm_get_substream(struct snd_soc_pcm_runtime *be, int stream) { return be->pcm->streams[stream].substream; } EXPORT_SYMBOL_GPL(snd_soc_dpcm_get_substream); static int snd_soc_dpcm_check_state(struct snd_soc_pcm_runtime *fe, struct snd_soc_pcm_runtime *be, int stream, const enum snd_soc_dpcm_state *states, int num_states) { struct snd_soc_dpcm *dpcm; int state; int ret = 1; int i; for_each_dpcm_fe(be, stream, dpcm) { if (dpcm->fe == fe) continue; state = dpcm->fe->dpcm[stream].state; for (i = 0; i < num_states; i++) { if (state == states[i]) { ret = 0; break; } } } /* it's safe to do this BE DAI */ return ret; } /* * We can only hw_free, stop, pause or suspend a BE DAI if any of it's FE * are not running, paused or suspended for the specified stream direction. */ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe, struct snd_soc_pcm_runtime *be, int stream) { const enum snd_soc_dpcm_state state[] = { SND_SOC_DPCM_STATE_START, SND_SOC_DPCM_STATE_PAUSED, SND_SOC_DPCM_STATE_SUSPEND, }; return snd_soc_dpcm_check_state(fe, be, stream, state, ARRAY_SIZE(state)); } EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_free_stop); /* * We can only change hw params a BE DAI if any of it's FE are not prepared, * running, paused or suspended for the specified stream direction. */ int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe, struct snd_soc_pcm_runtime *be, int stream) { const enum snd_soc_dpcm_state state[] = { SND_SOC_DPCM_STATE_START, SND_SOC_DPCM_STATE_PAUSED, SND_SOC_DPCM_STATE_SUSPEND, SND_SOC_DPCM_STATE_PREPARE, }; return snd_soc_dpcm_check_state(fe, be, stream, state, ARRAY_SIZE(state)); } EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_params); /* * We can only prepare a BE DAI if any of it's FE are not prepared, * running or paused for the specified stream direction. */ int snd_soc_dpcm_can_be_prepared(struct snd_soc_pcm_runtime *fe, struct snd_soc_pcm_runtime *be, int stream) { const enum snd_soc_dpcm_state state[] = { SND_SOC_DPCM_STATE_START, SND_SOC_DPCM_STATE_PAUSED, SND_SOC_DPCM_STATE_PREPARE, }; return snd_soc_dpcm_check_state(fe, be, stream, state, ARRAY_SIZE(state)); } EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_prepared);
linux-master
sound/soc/soc-pcm.c
// SPDX-License-Identifier: GPL-2.0+ // // soc-jack.c -- ALSA SoC jack handling // // Copyright 2008 Wolfson Microelectronics PLC. // // Author: Mark Brown <[email protected]> #include <sound/jack.h> #include <sound/soc.h> #include <linux/gpio.h> #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/suspend.h> #include <trace/events/asoc.h> /** * snd_soc_jack_report - Report the current status for a jack * * @jack: the jack * @status: a bitmask of enum snd_jack_type values that are currently detected. * @mask: a bitmask of enum snd_jack_type values that being reported. * * If configured using snd_soc_jack_add_pins() then the associated * DAPM pins will be enabled or disabled as appropriate and DAPM * synchronised. * * Note: This function uses mutexes and should be called from a * context which can sleep (such as a workqueue). */ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask) { struct snd_soc_dapm_context *dapm; struct snd_soc_jack_pin *pin; unsigned int sync = 0; if (!jack || !jack->jack) return; trace_snd_soc_jack_report(jack, mask, status); dapm = &jack->card->dapm; mutex_lock(&jack->mutex); jack->status &= ~mask; jack->status |= status & mask; trace_snd_soc_jack_notify(jack, status); list_for_each_entry(pin, &jack->pins, list) { int enable = pin->mask & jack->status; if (pin->invert) enable = !enable; if (enable) snd_soc_dapm_enable_pin(dapm, pin->pin); else snd_soc_dapm_disable_pin(dapm, pin->pin); /* we need to sync for this case only */ sync = 1; } /* Report before the DAPM sync to help users updating micbias status */ blocking_notifier_call_chain(&jack->notifier, jack->status, jack); if (sync) snd_soc_dapm_sync(dapm); snd_jack_report(jack->jack, jack->status); mutex_unlock(&jack->mutex); } EXPORT_SYMBOL_GPL(snd_soc_jack_report); /** * snd_soc_jack_add_zones - Associate voltage zones with jack * * @jack: ASoC jack * @count: Number of zones * @zones: Array of zones * * After this function has been called the zones specified in the * array will be associated with the jack. */ int snd_soc_jack_add_zones(struct snd_soc_jack *jack, int count, struct snd_soc_jack_zone *zones) { int i; for (i = 0; i < count; i++) { INIT_LIST_HEAD(&zones[i].list); list_add(&(zones[i].list), &jack->jack_zones); } return 0; } EXPORT_SYMBOL_GPL(snd_soc_jack_add_zones); /** * snd_soc_jack_get_type - Based on the mic bias value, this function returns * the type of jack from the zones declared in the jack type * * @jack: ASoC jack * @micbias_voltage: mic bias voltage at adc channel when jack is plugged in * * Based on the mic bias value passed, this function helps identify * the type of jack from the already declared jack zones */ int snd_soc_jack_get_type(struct snd_soc_jack *jack, int micbias_voltage) { struct snd_soc_jack_zone *zone; list_for_each_entry(zone, &jack->jack_zones, list) { if (micbias_voltage >= zone->min_mv && micbias_voltage < zone->max_mv) return zone->jack_type; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_jack_get_type); /** * snd_soc_jack_add_pins - Associate DAPM pins with an ASoC jack * * @jack: ASoC jack created with snd_soc_card_jack_new_pins() * @count: Number of pins * @pins: Array of pins * * After this function has been called the DAPM pins specified in the * pins array will have their status updated to reflect the current * state of the jack whenever the jack status is updated. */ int snd_soc_jack_add_pins(struct snd_soc_jack *jack, int count, struct snd_soc_jack_pin *pins) { int i; for (i = 0; i < count; i++) { if (!pins[i].pin) { dev_err(jack->card->dev, "ASoC: No name for pin %d\n", i); return -EINVAL; } if (!pins[i].mask) { dev_err(jack->card->dev, "ASoC: No mask for pin %d" " (%s)\n", i, pins[i].pin); return -EINVAL; } INIT_LIST_HEAD(&pins[i].list); list_add(&(pins[i].list), &jack->pins); snd_jack_add_new_kctl(jack->jack, pins[i].pin, pins[i].mask); } /* Update to reflect the last reported status; canned jack * implementations are likely to set their state before the * card has an opportunity to associate pins. */ snd_soc_jack_report(jack, 0, 0); return 0; } EXPORT_SYMBOL_GPL(snd_soc_jack_add_pins); /** * snd_soc_jack_notifier_register - Register a notifier for jack status * * @jack: ASoC jack * @nb: Notifier block to register * * Register for notification of the current status of the jack. Note * that it is not possible to report additional jack events in the * callback from the notifier, this is intended to support * applications such as enabling electrical detection only when a * mechanical detection event has occurred. */ void snd_soc_jack_notifier_register(struct snd_soc_jack *jack, struct notifier_block *nb) { blocking_notifier_chain_register(&jack->notifier, nb); } EXPORT_SYMBOL_GPL(snd_soc_jack_notifier_register); /** * snd_soc_jack_notifier_unregister - Unregister a notifier for jack status * * @jack: ASoC jack * @nb: Notifier block to unregister * * Stop notifying for status changes. */ void snd_soc_jack_notifier_unregister(struct snd_soc_jack *jack, struct notifier_block *nb) { blocking_notifier_chain_unregister(&jack->notifier, nb); } EXPORT_SYMBOL_GPL(snd_soc_jack_notifier_unregister); #ifdef CONFIG_GPIOLIB struct jack_gpio_tbl { int count; struct snd_soc_jack *jack; struct snd_soc_jack_gpio *gpios; }; /* gpio detect */ static void snd_soc_jack_gpio_detect(struct snd_soc_jack_gpio *gpio) { struct snd_soc_jack *jack = gpio->jack; int enable; int report; enable = gpiod_get_value_cansleep(gpio->desc); if (gpio->invert) enable = !enable; if (enable) report = gpio->report; else report = 0; if (gpio->jack_status_check) report = gpio->jack_status_check(gpio->data); snd_soc_jack_report(jack, report, gpio->report); } /* irq handler for gpio pin */ static irqreturn_t gpio_handler(int irq, void *data) { struct snd_soc_jack_gpio *gpio = data; struct device *dev = gpio->jack->card->dev; trace_snd_soc_jack_irq(gpio->name); if (device_may_wakeup(dev)) pm_wakeup_event(dev, gpio->debounce_time + 50); queue_delayed_work(system_power_efficient_wq, &gpio->work, msecs_to_jiffies(gpio->debounce_time)); return IRQ_HANDLED; } /* gpio work */ static void gpio_work(struct work_struct *work) { struct snd_soc_jack_gpio *gpio; gpio = container_of(work, struct snd_soc_jack_gpio, work.work); snd_soc_jack_gpio_detect(gpio); } static int snd_soc_jack_pm_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct snd_soc_jack_gpio *gpio = container_of(nb, struct snd_soc_jack_gpio, pm_notifier); switch (action) { case PM_POST_SUSPEND: case PM_POST_HIBERNATION: case PM_POST_RESTORE: /* * Use workqueue so we do not have to care about running * concurrently with work triggered by the interrupt handler. */ queue_delayed_work(system_power_efficient_wq, &gpio->work, 0); break; } return NOTIFY_DONE; } static void jack_free_gpios(struct snd_soc_jack *jack, int count, struct snd_soc_jack_gpio *gpios) { int i; for (i = 0; i < count; i++) { gpiod_unexport(gpios[i].desc); unregister_pm_notifier(&gpios[i].pm_notifier); free_irq(gpiod_to_irq(gpios[i].desc), &gpios[i]); cancel_delayed_work_sync(&gpios[i].work); gpiod_put(gpios[i].desc); gpios[i].jack = NULL; } } static void jack_devres_free_gpios(struct device *dev, void *res) { struct jack_gpio_tbl *tbl = res; jack_free_gpios(tbl->jack, tbl->count, tbl->gpios); } /** * snd_soc_jack_add_gpios - Associate GPIO pins with an ASoC jack * * @jack: ASoC jack * @count: number of pins * @gpios: array of gpio pins * * This function will request gpio, set data direction and request irq * for each gpio in the array. */ int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count, struct snd_soc_jack_gpio *gpios) { int i, ret; struct jack_gpio_tbl *tbl; tbl = devres_alloc(jack_devres_free_gpios, sizeof(*tbl), GFP_KERNEL); if (!tbl) return -ENOMEM; tbl->jack = jack; tbl->count = count; tbl->gpios = gpios; for (i = 0; i < count; i++) { if (!gpios[i].name) { dev_err(jack->card->dev, "ASoC: No name for gpio at index %d\n", i); ret = -EINVAL; goto undo; } if (gpios[i].desc) { /* Already have a GPIO descriptor. */ goto got_gpio; } else if (gpios[i].gpiod_dev) { /* Get a GPIO descriptor */ gpios[i].desc = gpiod_get_index(gpios[i].gpiod_dev, gpios[i].name, gpios[i].idx, GPIOD_IN); if (IS_ERR(gpios[i].desc)) { ret = PTR_ERR(gpios[i].desc); dev_err(gpios[i].gpiod_dev, "ASoC: Cannot get gpio at index %d: %d", i, ret); goto undo; } } else { /* legacy GPIO number */ if (!gpio_is_valid(gpios[i].gpio)) { dev_err(jack->card->dev, "ASoC: Invalid gpio %d\n", gpios[i].gpio); ret = -EINVAL; goto undo; } ret = gpio_request_one(gpios[i].gpio, GPIOF_IN, gpios[i].name); if (ret) goto undo; gpios[i].desc = gpio_to_desc(gpios[i].gpio); } got_gpio: INIT_DELAYED_WORK(&gpios[i].work, gpio_work); gpios[i].jack = jack; ret = request_any_context_irq(gpiod_to_irq(gpios[i].desc), gpio_handler, IRQF_SHARED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, gpios[i].name, &gpios[i]); if (ret < 0) goto err; if (gpios[i].wake) { ret = irq_set_irq_wake(gpiod_to_irq(gpios[i].desc), 1); if (ret != 0) dev_err(jack->card->dev, "ASoC: Failed to mark GPIO at index %d as wake source: %d\n", i, ret); } /* * Register PM notifier so we do not miss state transitions * happening while system is asleep. */ gpios[i].pm_notifier.notifier_call = snd_soc_jack_pm_notifier; register_pm_notifier(&gpios[i].pm_notifier); /* Expose GPIO value over sysfs for diagnostic purposes */ gpiod_export(gpios[i].desc, false); /* Update initial jack status */ schedule_delayed_work(&gpios[i].work, msecs_to_jiffies(gpios[i].debounce_time)); } devres_add(jack->card->dev, tbl); return 0; err: gpio_free(gpios[i].gpio); undo: jack_free_gpios(jack, i, gpios); devres_free(tbl); return ret; } EXPORT_SYMBOL_GPL(snd_soc_jack_add_gpios); /** * snd_soc_jack_add_gpiods - Associate GPIO descriptor pins with an ASoC jack * * @gpiod_dev: GPIO consumer device * @jack: ASoC jack * @count: number of pins * @gpios: array of gpio pins * * This function will request gpio, set data direction and request irq * for each gpio in the array. */ int snd_soc_jack_add_gpiods(struct device *gpiod_dev, struct snd_soc_jack *jack, int count, struct snd_soc_jack_gpio *gpios) { int i; for (i = 0; i < count; i++) gpios[i].gpiod_dev = gpiod_dev; return snd_soc_jack_add_gpios(jack, count, gpios); } EXPORT_SYMBOL_GPL(snd_soc_jack_add_gpiods); /** * snd_soc_jack_free_gpios - Release GPIO pins' resources of an ASoC jack * * @jack: ASoC jack * @count: number of pins * @gpios: array of gpio pins * * Release gpio and irq resources for gpio pins associated with an ASoC jack. */ void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count, struct snd_soc_jack_gpio *gpios) { jack_free_gpios(jack, count, gpios); devres_destroy(jack->card->dev, jack_devres_free_gpios, NULL, NULL); } EXPORT_SYMBOL_GPL(snd_soc_jack_free_gpios); #endif /* CONFIG_GPIOLIB */
linux-master
sound/soc/soc-jack.c
// SPDX-License-Identifier: GPL-2.0+ // // soc-util.c -- ALSA SoC Audio Layer utility functions // // Copyright 2009 Wolfson Microelectronics PLC. // // Author: Mark Brown <[email protected]> // Liam Girdwood <[email protected]> #include <linux/platform_device.h> #include <linux/export.h> #include <linux/math.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots) { return sample_size * channels * tdm_slots; } EXPORT_SYMBOL_GPL(snd_soc_calc_frame_size); int snd_soc_params_to_frame_size(struct snd_pcm_hw_params *params) { int sample_size; sample_size = snd_pcm_format_width(params_format(params)); if (sample_size < 0) return sample_size; return snd_soc_calc_frame_size(sample_size, params_channels(params), 1); } EXPORT_SYMBOL_GPL(snd_soc_params_to_frame_size); int snd_soc_calc_bclk(int fs, int sample_size, int channels, int tdm_slots) { return fs * snd_soc_calc_frame_size(sample_size, channels, tdm_slots); } EXPORT_SYMBOL_GPL(snd_soc_calc_bclk); int snd_soc_params_to_bclk(struct snd_pcm_hw_params *params) { int ret; ret = snd_soc_params_to_frame_size(params); if (ret > 0) return ret * params_rate(params); else return ret; } EXPORT_SYMBOL_GPL(snd_soc_params_to_bclk); /** * snd_soc_tdm_params_to_bclk - calculate bclk from params and tdm slot info. * * Calculate the bclk from the params sample rate, the tdm slot count and the * tdm slot width. Optionally round-up the slot count to a given multiple. * Either or both of tdm_width and tdm_slots can be 0. * * If tdm_width == 0: use params_width() as the slot width. * If tdm_slots == 0: use params_channels() as the slot count. * * If slot_multiple > 1 the slot count (or params_channels() if tdm_slots == 0) * will be rounded up to a multiple of slot_multiple. This is mainly useful for * I2S mode, which has a left and right phase so the number of slots is always * a multiple of 2. * * If tdm_width == 0 && tdm_slots == 0 && slot_multiple < 2, this is equivalent * to calling snd_soc_params_to_bclk(). * * @params: Pointer to struct_pcm_hw_params. * @tdm_width: Width in bits of the tdm slots. Must be >= 0. * @tdm_slots: Number of tdm slots per frame. Must be >= 0. * @slot_multiple: If >1 roundup slot count to a multiple of this value. * * Return: bclk frequency in Hz, else a negative error code if params format * is invalid. */ int snd_soc_tdm_params_to_bclk(struct snd_pcm_hw_params *params, int tdm_width, int tdm_slots, int slot_multiple) { if (!tdm_slots) tdm_slots = params_channels(params); if (slot_multiple > 1) tdm_slots = roundup(tdm_slots, slot_multiple); if (!tdm_width) { tdm_width = snd_pcm_format_width(params_format(params)); if (tdm_width < 0) return tdm_width; } return snd_soc_calc_bclk(params_rate(params), tdm_width, 1, tdm_slots); } EXPORT_SYMBOL_GPL(snd_soc_tdm_params_to_bclk); static const struct snd_pcm_hardware dummy_dma_hardware = { /* Random values to keep userspace happy when checking constraints */ .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER, .buffer_bytes_max = 128*1024, .period_bytes_min = PAGE_SIZE, .period_bytes_max = PAGE_SIZE*2, .periods_min = 2, .periods_max = 128, }; static const struct snd_soc_component_driver dummy_platform; static int dummy_dma_open(struct snd_soc_component *component, struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int i; /* * If there are other components associated with rtd, we shouldn't * override their hwparams */ for_each_rtd_components(rtd, i, component) { if (component->driver == &dummy_platform) return 0; } /* BE's dont need dummy params */ if (!rtd->dai_link->no_pcm) snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware); return 0; } static const struct snd_soc_component_driver dummy_platform = { .open = dummy_dma_open, }; static const struct snd_soc_component_driver dummy_codec = { .idle_bias_on = 1, .use_pmdown_time = 1, .endianness = 1, }; #define STUB_RATES SNDRV_PCM_RATE_8000_384000 #define STUB_FORMATS (SNDRV_PCM_FMTBIT_S8 | \ SNDRV_PCM_FMTBIT_U8 | \ SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_U16_LE | \ SNDRV_PCM_FMTBIT_S24_LE | \ SNDRV_PCM_FMTBIT_S24_3LE | \ SNDRV_PCM_FMTBIT_U24_LE | \ SNDRV_PCM_FMTBIT_S32_LE | \ SNDRV_PCM_FMTBIT_U32_LE | \ SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE) /* * Select these from Sound Card Manually * SND_SOC_POSSIBLE_DAIFMT_CBP_CFP * SND_SOC_POSSIBLE_DAIFMT_CBP_CFC * SND_SOC_POSSIBLE_DAIFMT_CBC_CFP * SND_SOC_POSSIBLE_DAIFMT_CBC_CFC */ static u64 dummy_dai_formats = SND_SOC_POSSIBLE_DAIFMT_I2S | SND_SOC_POSSIBLE_DAIFMT_RIGHT_J | SND_SOC_POSSIBLE_DAIFMT_LEFT_J | SND_SOC_POSSIBLE_DAIFMT_DSP_A | SND_SOC_POSSIBLE_DAIFMT_DSP_B | SND_SOC_POSSIBLE_DAIFMT_AC97 | SND_SOC_POSSIBLE_DAIFMT_PDM | SND_SOC_POSSIBLE_DAIFMT_GATED | SND_SOC_POSSIBLE_DAIFMT_CONT | SND_SOC_POSSIBLE_DAIFMT_NB_NF | SND_SOC_POSSIBLE_DAIFMT_NB_IF | SND_SOC_POSSIBLE_DAIFMT_IB_NF | SND_SOC_POSSIBLE_DAIFMT_IB_IF; static const struct snd_soc_dai_ops dummy_dai_ops = { .auto_selectable_formats = &dummy_dai_formats, .num_auto_selectable_formats = 1, }; /* * The dummy CODEC is only meant to be used in situations where there is no * actual hardware. * * If there is actual hardware even if it does not have a control bus * the hardware will still have constraints like supported samplerates, etc. * which should be modelled. And the data flow graph also should be modelled * using DAPM. */ static struct snd_soc_dai_driver dummy_dai = { .name = "snd-soc-dummy-dai", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 384, .rates = STUB_RATES, .formats = STUB_FORMATS, }, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 384, .rates = STUB_RATES, .formats = STUB_FORMATS, }, .ops = &dummy_dai_ops, }; int snd_soc_dai_is_dummy(struct snd_soc_dai *dai) { if (dai->driver == &dummy_dai) return 1; return 0; } EXPORT_SYMBOL_GPL(snd_soc_dai_is_dummy); int snd_soc_component_is_dummy(struct snd_soc_component *component) { return ((component->driver == &dummy_platform) || (component->driver == &dummy_codec)); } struct snd_soc_dai_link_component asoc_dummy_dlc = { .of_node = NULL, .dai_name = "snd-soc-dummy-dai", .name = "snd-soc-dummy", }; EXPORT_SYMBOL_GPL(asoc_dummy_dlc); static int snd_soc_dummy_probe(struct platform_device *pdev) { int ret; ret = devm_snd_soc_register_component(&pdev->dev, &dummy_codec, &dummy_dai, 1); if (ret < 0) return ret; ret = devm_snd_soc_register_component(&pdev->dev, &dummy_platform, NULL, 0); return ret; } static struct platform_driver soc_dummy_driver = { .driver = { .name = "snd-soc-dummy", }, .probe = snd_soc_dummy_probe, }; static struct platform_device *soc_dummy_dev; int __init snd_soc_util_init(void) { int ret; soc_dummy_dev = platform_device_register_simple("snd-soc-dummy", -1, NULL, 0); if (IS_ERR(soc_dummy_dev)) return PTR_ERR(soc_dummy_dev); ret = platform_driver_register(&soc_dummy_driver); if (ret != 0) platform_device_unregister(soc_dummy_dev); return ret; } void snd_soc_util_exit(void) { platform_driver_unregister(&soc_dummy_driver); platform_device_unregister(soc_dummy_dev); }
linux-master
sound/soc/soc-utils.c
// SPDX-License-Identifier: GPL-2.0+ // // soc-ops.c -- Generic ASoC operations // // Copyright 2005 Wolfson Microelectronics PLC. // Copyright 2005 Openedhand Ltd. // Copyright (C) 2010 Slimlogic Ltd. // Copyright (C) 2010 Texas Instruments Inc. // // Author: Liam Girdwood <[email protected]> // with code, comments and ideas from :- // Richard Purdie <[email protected]> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/pm.h> #include <linux/bitops.h> #include <linux/ctype.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dpcm.h> #include <sound/initval.h> /** * snd_soc_info_enum_double - enumerated double mixer info callback * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information about a double enumerated * mixer control. * * Returns 0 for success. */ int snd_soc_info_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; return snd_ctl_enum_info(uinfo, e->shift_l == e->shift_r ? 1 : 2, e->items, e->texts); } EXPORT_SYMBOL_GPL(snd_soc_info_enum_double); /** * snd_soc_get_enum_double - enumerated double mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a double enumerated mixer. * * Returns 0 for success. */ int snd_soc_get_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int val, item; unsigned int reg_val; reg_val = snd_soc_component_read(component, e->reg); val = (reg_val >> e->shift_l) & e->mask; item = snd_soc_enum_val_to_item(e, val); ucontrol->value.enumerated.item[0] = item; if (e->shift_l != e->shift_r) { val = (reg_val >> e->shift_r) & e->mask; item = snd_soc_enum_val_to_item(e, val); ucontrol->value.enumerated.item[1] = item; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_enum_double); /** * snd_soc_put_enum_double - enumerated double mixer put callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value of a double enumerated mixer. * * Returns 0 for success. */ int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int *item = ucontrol->value.enumerated.item; unsigned int val; unsigned int mask; if (item[0] >= e->items) return -EINVAL; val = snd_soc_enum_item_to_val(e, item[0]) << e->shift_l; mask = e->mask << e->shift_l; if (e->shift_l != e->shift_r) { if (item[1] >= e->items) return -EINVAL; val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_r; mask |= e->mask << e->shift_r; } return snd_soc_component_update_bits(component, e->reg, mask, val); } EXPORT_SYMBOL_GPL(snd_soc_put_enum_double); /** * snd_soc_read_signed - Read a codec register and interpret as signed value * @component: component * @reg: Register to read * @mask: Mask to use after shifting the register value * @shift: Right shift of register value * @sign_bit: Bit that describes if a number is negative or not. * @signed_val: Pointer to where the read value should be stored * * This functions reads a codec register. The register value is shifted right * by 'shift' bits and masked with the given 'mask'. Afterwards it translates * the given registervalue into a signed integer if sign_bit is non-zero. * * Returns 0 on sucess, otherwise an error value */ static int snd_soc_read_signed(struct snd_soc_component *component, unsigned int reg, unsigned int mask, unsigned int shift, unsigned int sign_bit, int *signed_val) { int ret; unsigned int val; val = snd_soc_component_read(component, reg); val = (val >> shift) & mask; if (!sign_bit) { *signed_val = val; return 0; } /* non-negative number */ if (!(val & BIT(sign_bit))) { *signed_val = val; return 0; } ret = val; /* * The register most probably does not contain a full-sized int. * Instead we have an arbitrary number of bits in a signed * representation which has to be translated into a full-sized int. * This is done by filling up all bits above the sign-bit. */ ret |= ~((int)(BIT(sign_bit) - 1)); *signed_val = ret; return 0; } /** * snd_soc_info_volsw - single mixer info callback * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information about a single mixer control, or a double * mixer control that spans 2 registers. * * Returns 0 for success. */ int snd_soc_info_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; const char *vol_string = NULL; int max; max = uinfo->value.integer.max = mc->max - mc->min; if (mc->platform_max && mc->platform_max < max) max = mc->platform_max; if (max == 1) { /* Even two value controls ending in Volume should always be integer */ vol_string = strstr(kcontrol->id.name, " Volume"); if (vol_string && !strcmp(vol_string, " Volume")) uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; else uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; } else { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; } uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = max; return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_volsw); /** * snd_soc_info_volsw_sx - Mixer info callback for SX TLV controls * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information about a single mixer control, or a double * mixer control that spans 2 registers of the SX TLV type. SX TLV controls * have a range that represents both positive and negative values either side * of zero but without a sign bit. min is the minimum register value, max is * the number of steps. * * Returns 0 for success. */ int snd_soc_info_volsw_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int max; if (mc->platform_max) max = mc->platform_max; else max = mc->max; if (max == 1 && !strstr(kcontrol->id.name, " Volume")) uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; else uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = max; return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_volsw_sx); /** * snd_soc_get_volsw - single mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a single mixer control, or a double mixer * control that spans 2 registers. * * Returns 0 for success. */ int snd_soc_get_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int reg = mc->reg; unsigned int reg2 = mc->rreg; unsigned int shift = mc->shift; unsigned int rshift = mc->rshift; int max = mc->max; int min = mc->min; int sign_bit = mc->sign_bit; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; int val; int ret; if (sign_bit) mask = BIT(sign_bit + 1) - 1; ret = snd_soc_read_signed(component, reg, mask, shift, sign_bit, &val); if (ret) return ret; ucontrol->value.integer.value[0] = val - min; if (invert) ucontrol->value.integer.value[0] = max - ucontrol->value.integer.value[0]; if (snd_soc_volsw_is_stereo(mc)) { if (reg == reg2) ret = snd_soc_read_signed(component, reg, mask, rshift, sign_bit, &val); else ret = snd_soc_read_signed(component, reg2, mask, shift, sign_bit, &val); if (ret) return ret; ucontrol->value.integer.value[1] = val - min; if (invert) ucontrol->value.integer.value[1] = max - ucontrol->value.integer.value[1]; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_volsw); /** * snd_soc_put_volsw - single mixer put callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value of a single mixer control, or a double mixer * control that spans 2 registers. * * Returns 0 for success. */ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int reg = mc->reg; unsigned int reg2 = mc->rreg; unsigned int shift = mc->shift; unsigned int rshift = mc->rshift; int max = mc->max; int min = mc->min; unsigned int sign_bit = mc->sign_bit; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; int err, ret; bool type_2r = false; unsigned int val2 = 0; unsigned int val, val_mask; if (sign_bit) mask = BIT(sign_bit + 1) - 1; if (ucontrol->value.integer.value[0] < 0) return -EINVAL; val = ucontrol->value.integer.value[0]; if (mc->platform_max && ((int)val + min) > mc->platform_max) return -EINVAL; if (val > max - min) return -EINVAL; val = (val + min) & mask; if (invert) val = max - val; val_mask = mask << shift; val = val << shift; if (snd_soc_volsw_is_stereo(mc)) { if (ucontrol->value.integer.value[1] < 0) return -EINVAL; val2 = ucontrol->value.integer.value[1]; if (mc->platform_max && ((int)val2 + min) > mc->platform_max) return -EINVAL; if (val2 > max - min) return -EINVAL; val2 = (val2 + min) & mask; if (invert) val2 = max - val2; if (reg == reg2) { val_mask |= mask << rshift; val |= val2 << rshift; } else { val2 = val2 << shift; type_2r = true; } } err = snd_soc_component_update_bits(component, reg, val_mask, val); if (err < 0) return err; ret = err; if (type_2r) { err = snd_soc_component_update_bits(component, reg2, val_mask, val2); /* Don't discard any error code or drop change flag */ if (ret == 0 || err < 0) { ret = err; } } return ret; } EXPORT_SYMBOL_GPL(snd_soc_put_volsw); /** * snd_soc_get_volsw_sx - single mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a single mixer control, or a double mixer * control that spans 2 registers. * * Returns 0 for success. */ int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int reg = mc->reg; unsigned int reg2 = mc->rreg; unsigned int shift = mc->shift; unsigned int rshift = mc->rshift; int max = mc->max; int min = mc->min; unsigned int mask = (1U << (fls(min + max) - 1)) - 1; unsigned int val; val = snd_soc_component_read(component, reg); ucontrol->value.integer.value[0] = ((val >> shift) - min) & mask; if (snd_soc_volsw_is_stereo(mc)) { val = snd_soc_component_read(component, reg2); val = ((val >> rshift) - min) & mask; ucontrol->value.integer.value[1] = val; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_volsw_sx); /** * snd_soc_put_volsw_sx - double mixer set callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value of a double mixer control that spans 2 registers. * * Returns 0 for success. */ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int reg = mc->reg; unsigned int reg2 = mc->rreg; unsigned int shift = mc->shift; unsigned int rshift = mc->rshift; int max = mc->max; int min = mc->min; unsigned int mask = (1U << (fls(min + max) - 1)) - 1; int err = 0; int ret; unsigned int val, val_mask; if (ucontrol->value.integer.value[0] < 0) return -EINVAL; val = ucontrol->value.integer.value[0]; if (mc->platform_max && val > mc->platform_max) return -EINVAL; if (val > max) return -EINVAL; val_mask = mask << shift; val = (val + min) & mask; val = val << shift; err = snd_soc_component_update_bits(component, reg, val_mask, val); if (err < 0) return err; ret = err; if (snd_soc_volsw_is_stereo(mc)) { unsigned int val2 = ucontrol->value.integer.value[1]; if (mc->platform_max && val2 > mc->platform_max) return -EINVAL; if (val2 > max) return -EINVAL; val_mask = mask << rshift; val2 = (val2 + min) & mask; val2 = val2 << rshift; err = snd_soc_component_update_bits(component, reg2, val_mask, val2); /* Don't discard any error code or drop change flag */ if (ret == 0 || err < 0) { ret = err; } } return ret; } EXPORT_SYMBOL_GPL(snd_soc_put_volsw_sx); /** * snd_soc_info_volsw_range - single mixer info callback with range. * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information, within a range, about a single * mixer control. * * returns 0 for success. */ int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int platform_max; int min = mc->min; if (!mc->platform_max) mc->platform_max = mc->max; platform_max = mc->platform_max; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = platform_max - min; return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_volsw_range); /** * snd_soc_put_volsw_range - single mixer put value callback with range. * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value, within a range, for a single mixer control. * * Returns 0 for success. */ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); unsigned int reg = mc->reg; unsigned int rreg = mc->rreg; unsigned int shift = mc->shift; int min = mc->min; int max = mc->max; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; unsigned int val, val_mask; int err, ret, tmp; tmp = ucontrol->value.integer.value[0]; if (tmp < 0) return -EINVAL; if (mc->platform_max && tmp > mc->platform_max) return -EINVAL; if (tmp > mc->max - mc->min) return -EINVAL; if (invert) val = (max - ucontrol->value.integer.value[0]) & mask; else val = ((ucontrol->value.integer.value[0] + min) & mask); val_mask = mask << shift; val = val << shift; err = snd_soc_component_update_bits(component, reg, val_mask, val); if (err < 0) return err; ret = err; if (snd_soc_volsw_is_stereo(mc)) { tmp = ucontrol->value.integer.value[1]; if (tmp < 0) return -EINVAL; if (mc->platform_max && tmp > mc->platform_max) return -EINVAL; if (tmp > mc->max - mc->min) return -EINVAL; if (invert) val = (max - ucontrol->value.integer.value[1]) & mask; else val = ((ucontrol->value.integer.value[1] + min) & mask); val_mask = mask << shift; val = val << shift; err = snd_soc_component_update_bits(component, rreg, val_mask, val); /* Don't discard any error code or drop change flag */ if (ret == 0 || err < 0) { ret = err; } } return ret; } EXPORT_SYMBOL_GPL(snd_soc_put_volsw_range); /** * snd_soc_get_volsw_range - single mixer get callback with range * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value, within a range, of a single mixer control. * * Returns 0 for success. */ int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int reg = mc->reg; unsigned int rreg = mc->rreg; unsigned int shift = mc->shift; int min = mc->min; int max = mc->max; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; unsigned int val; val = snd_soc_component_read(component, reg); ucontrol->value.integer.value[0] = (val >> shift) & mask; if (invert) ucontrol->value.integer.value[0] = max - ucontrol->value.integer.value[0]; else ucontrol->value.integer.value[0] = ucontrol->value.integer.value[0] - min; if (snd_soc_volsw_is_stereo(mc)) { val = snd_soc_component_read(component, rreg); ucontrol->value.integer.value[1] = (val >> shift) & mask; if (invert) ucontrol->value.integer.value[1] = max - ucontrol->value.integer.value[1]; else ucontrol->value.integer.value[1] = ucontrol->value.integer.value[1] - min; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range); /** * snd_soc_limit_volume - Set new limit to an existing volume control. * * @card: where to look for the control * @name: Name of the control * @max: new maximum limit * * Return 0 for success, else error. */ int snd_soc_limit_volume(struct snd_soc_card *card, const char *name, int max) { struct snd_kcontrol *kctl; int ret = -EINVAL; /* Sanity check for name and max */ if (unlikely(!name || max <= 0)) return -EINVAL; kctl = snd_soc_card_get_kcontrol(card, name); if (kctl) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kctl->private_value; if (max <= mc->max) { mc->platform_max = max; ret = 0; } } return ret; } EXPORT_SYMBOL_GPL(snd_soc_limit_volume); int snd_soc_bytes_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); struct soc_bytes *params = (void *)kcontrol->private_value; uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES; uinfo->count = params->num_regs * component->val_bytes; return 0; } EXPORT_SYMBOL_GPL(snd_soc_bytes_info); int snd_soc_bytes_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); struct soc_bytes *params = (void *)kcontrol->private_value; int ret; if (component->regmap) ret = regmap_raw_read(component->regmap, params->base, ucontrol->value.bytes.data, params->num_regs * component->val_bytes); else ret = -EINVAL; /* Hide any masked bytes to ensure consistent data reporting */ if (ret == 0 && params->mask) { switch (component->val_bytes) { case 1: ucontrol->value.bytes.data[0] &= ~params->mask; break; case 2: ((u16 *)(&ucontrol->value.bytes.data))[0] &= cpu_to_be16(~params->mask); break; case 4: ((u32 *)(&ucontrol->value.bytes.data))[0] &= cpu_to_be32(~params->mask); break; default: return -EINVAL; } } return ret; } EXPORT_SYMBOL_GPL(snd_soc_bytes_get); int snd_soc_bytes_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); struct soc_bytes *params = (void *)kcontrol->private_value; int ret, len; unsigned int val, mask; void *data; if (!component->regmap || !params->num_regs) return -EINVAL; len = params->num_regs * component->val_bytes; data = kmemdup(ucontrol->value.bytes.data, len, GFP_KERNEL | GFP_DMA); if (!data) return -ENOMEM; /* * If we've got a mask then we need to preserve the register * bits. We shouldn't modify the incoming data so take a * copy. */ if (params->mask) { ret = regmap_read(component->regmap, params->base, &val); if (ret != 0) goto out; val &= params->mask; switch (component->val_bytes) { case 1: ((u8 *)data)[0] &= ~params->mask; ((u8 *)data)[0] |= val; break; case 2: mask = ~params->mask; ret = regmap_parse_val(component->regmap, &mask, &mask); if (ret != 0) goto out; ((u16 *)data)[0] &= mask; ret = regmap_parse_val(component->regmap, &val, &val); if (ret != 0) goto out; ((u16 *)data)[0] |= val; break; case 4: mask = ~params->mask; ret = regmap_parse_val(component->regmap, &mask, &mask); if (ret != 0) goto out; ((u32 *)data)[0] &= mask; ret = regmap_parse_val(component->regmap, &val, &val); if (ret != 0) goto out; ((u32 *)data)[0] |= val; break; default: ret = -EINVAL; goto out; } } ret = regmap_raw_write(component->regmap, params->base, data, len); out: kfree(data); return ret; } EXPORT_SYMBOL_GPL(snd_soc_bytes_put); int snd_soc_bytes_info_ext(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *ucontrol) { struct soc_bytes_ext *params = (void *)kcontrol->private_value; ucontrol->type = SNDRV_CTL_ELEM_TYPE_BYTES; ucontrol->count = params->max; return 0; } EXPORT_SYMBOL_GPL(snd_soc_bytes_info_ext); int snd_soc_bytes_tlv_callback(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *tlv) { struct soc_bytes_ext *params = (void *)kcontrol->private_value; unsigned int count = size < params->max ? size : params->max; int ret = -ENXIO; switch (op_flag) { case SNDRV_CTL_TLV_OP_READ: if (params->get) ret = params->get(kcontrol, tlv, count); break; case SNDRV_CTL_TLV_OP_WRITE: if (params->put) ret = params->put(kcontrol, tlv, count); break; } return ret; } EXPORT_SYMBOL_GPL(snd_soc_bytes_tlv_callback); /** * snd_soc_info_xr_sx - signed multi register info callback * @kcontrol: mreg control * @uinfo: control element information * * Callback to provide information of a control that can * span multiple codec registers which together * forms a single signed value in a MSB/LSB manner. * * Returns 0 for success. */ int snd_soc_info_xr_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_mreg_control *mc = (struct soc_mreg_control *)kcontrol->private_value; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = mc->min; uinfo->value.integer.max = mc->max; return 0; } EXPORT_SYMBOL_GPL(snd_soc_info_xr_sx); /** * snd_soc_get_xr_sx - signed multi register get callback * @kcontrol: mreg control * @ucontrol: control element information * * Callback to get the value of a control that can span * multiple codec registers which together forms a single * signed value in a MSB/LSB manner. The control supports * specifying total no of bits used to allow for bitfields * across the multiple codec registers. * * Returns 0 for success. */ int snd_soc_get_xr_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); struct soc_mreg_control *mc = (struct soc_mreg_control *)kcontrol->private_value; unsigned int regbase = mc->regbase; unsigned int regcount = mc->regcount; unsigned int regwshift = component->val_bytes * BITS_PER_BYTE; unsigned int regwmask = (1UL<<regwshift)-1; unsigned int invert = mc->invert; unsigned long mask = (1UL<<mc->nbits)-1; long min = mc->min; long max = mc->max; long val = 0; unsigned int i; for (i = 0; i < regcount; i++) { unsigned int regval = snd_soc_component_read(component, regbase+i); val |= (regval & regwmask) << (regwshift*(regcount-i-1)); } val &= mask; if (min < 0 && val > max) val |= ~mask; if (invert) val = max - val; ucontrol->value.integer.value[0] = val; return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_xr_sx); /** * snd_soc_put_xr_sx - signed multi register get callback * @kcontrol: mreg control * @ucontrol: control element information * * Callback to set the value of a control that can span * multiple codec registers which together forms a single * signed value in a MSB/LSB manner. The control supports * specifying total no of bits used to allow for bitfields * across the multiple codec registers. * * Returns 0 for success. */ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); struct soc_mreg_control *mc = (struct soc_mreg_control *)kcontrol->private_value; unsigned int regbase = mc->regbase; unsigned int regcount = mc->regcount; unsigned int regwshift = component->val_bytes * BITS_PER_BYTE; unsigned int regwmask = (1UL<<regwshift)-1; unsigned int invert = mc->invert; unsigned long mask = (1UL<<mc->nbits)-1; long max = mc->max; long val = ucontrol->value.integer.value[0]; int ret = 0; unsigned int i; if (val < mc->min || val > mc->max) return -EINVAL; if (invert) val = max - val; val &= mask; for (i = 0; i < regcount; i++) { unsigned int regval = (val >> (regwshift*(regcount-i-1))) & regwmask; unsigned int regmask = (mask >> (regwshift*(regcount-i-1))) & regwmask; int err = snd_soc_component_update_bits(component, regbase+i, regmask, regval); if (err < 0) return err; if (err > 0) ret = err; } return ret; } EXPORT_SYMBOL_GPL(snd_soc_put_xr_sx); /** * snd_soc_get_strobe - strobe get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback get the value of a strobe mixer control. * * Returns 0 for success. */ int snd_soc_get_strobe(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int reg = mc->reg; unsigned int shift = mc->shift; unsigned int mask = 1 << shift; unsigned int invert = mc->invert != 0; unsigned int val; val = snd_soc_component_read(component, reg); val &= mask; if (shift != 0 && val != 0) val = val >> shift; ucontrol->value.enumerated.item[0] = val ^ invert; return 0; } EXPORT_SYMBOL_GPL(snd_soc_get_strobe); /** * snd_soc_put_strobe - strobe put callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback strobe a register bit to high then low (or the inverse) * in one pass of a single mixer enum control. * * Returns 1 for success. */ int snd_soc_put_strobe(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int reg = mc->reg; unsigned int shift = mc->shift; unsigned int mask = 1 << shift; unsigned int invert = mc->invert != 0; unsigned int strobe = ucontrol->value.enumerated.item[0] != 0; unsigned int val1 = (strobe ^ invert) ? mask : 0; unsigned int val2 = (strobe ^ invert) ? 0 : mask; int err; err = snd_soc_component_update_bits(component, reg, mask, val1); if (err < 0) return err; return snd_soc_component_update_bits(component, reg, mask, val2); } EXPORT_SYMBOL_GPL(snd_soc_put_strobe);
linux-master
sound/soc/soc-ops.c
// SPDX-License-Identifier: GPL-2.0 // // soc-link.c // // Copyright (C) 2019 Renesas Electronics Corp. // Kuninori Morimoto <[email protected]> // #include <sound/soc.h> #include <sound/soc-link.h> #define soc_link_ret(rtd, ret) _soc_link_ret(rtd, __func__, ret) static inline int _soc_link_ret(struct snd_soc_pcm_runtime *rtd, const char *func, int ret) { /* Positive, Zero values are not errors */ if (ret >= 0) return ret; /* Negative values might be errors */ switch (ret) { case -EPROBE_DEFER: case -ENOTSUPP: break; default: dev_err(rtd->dev, "ASoC: error at %s on %s: %d\n", func, rtd->dai_link->name, ret); } return ret; } /* * We might want to check substream by using list. * In such case, we can update these macros. */ #define soc_link_mark_push(rtd, substream, tgt) ((rtd)->mark_##tgt = substream) #define soc_link_mark_pop(rtd, substream, tgt) ((rtd)->mark_##tgt = NULL) #define soc_link_mark_match(rtd, substream, tgt) ((rtd)->mark_##tgt == substream) int snd_soc_link_init(struct snd_soc_pcm_runtime *rtd) { int ret = 0; if (rtd->dai_link->init) ret = rtd->dai_link->init(rtd); return soc_link_ret(rtd, ret); } void snd_soc_link_exit(struct snd_soc_pcm_runtime *rtd) { if (rtd->dai_link->exit) rtd->dai_link->exit(rtd); } int snd_soc_link_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hw_params *params) { int ret = 0; if (rtd->dai_link->be_hw_params_fixup) ret = rtd->dai_link->be_hw_params_fixup(rtd, params); return soc_link_ret(rtd, ret); } int snd_soc_link_startup(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret = 0; if (rtd->dai_link->ops && rtd->dai_link->ops->startup) ret = rtd->dai_link->ops->startup(substream); /* mark substream if succeeded */ if (ret == 0) soc_link_mark_push(rtd, substream, startup); return soc_link_ret(rtd, ret); } void snd_soc_link_shutdown(struct snd_pcm_substream *substream, int rollback) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); if (rollback && !soc_link_mark_match(rtd, substream, startup)) return; if (rtd->dai_link->ops && rtd->dai_link->ops->shutdown) rtd->dai_link->ops->shutdown(substream); /* remove marked substream */ soc_link_mark_pop(rtd, substream, startup); } int snd_soc_link_prepare(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret = 0; if (rtd->dai_link->ops && rtd->dai_link->ops->prepare) ret = rtd->dai_link->ops->prepare(substream); return soc_link_ret(rtd, ret); } int snd_soc_link_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret = 0; if (rtd->dai_link->ops && rtd->dai_link->ops->hw_params) ret = rtd->dai_link->ops->hw_params(substream, params); /* mark substream if succeeded */ if (ret == 0) soc_link_mark_push(rtd, substream, hw_params); return soc_link_ret(rtd, ret); } void snd_soc_link_hw_free(struct snd_pcm_substream *substream, int rollback) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); if (rollback && !soc_link_mark_match(rtd, substream, hw_params)) return; if (rtd->dai_link->ops && rtd->dai_link->ops->hw_free) rtd->dai_link->ops->hw_free(substream); /* remove marked substream */ soc_link_mark_pop(rtd, substream, hw_params); } static int soc_link_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret = 0; if (rtd->dai_link->ops && rtd->dai_link->ops->trigger) ret = rtd->dai_link->ops->trigger(substream, cmd); return soc_link_ret(rtd, ret); } int snd_soc_link_trigger(struct snd_pcm_substream *substream, int cmd, int rollback) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: ret = soc_link_trigger(substream, cmd); if (ret < 0) break; soc_link_mark_push(rtd, substream, trigger); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (rollback && !soc_link_mark_match(rtd, substream, trigger)) break; ret = soc_link_trigger(substream, cmd); soc_link_mark_pop(rtd, substream, startup); } return ret; } int snd_soc_link_compr_startup(struct snd_compr_stream *cstream) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; int ret = 0; if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->startup) ret = rtd->dai_link->compr_ops->startup(cstream); if (ret == 0) soc_link_mark_push(rtd, cstream, compr_startup); return soc_link_ret(rtd, ret); } EXPORT_SYMBOL_GPL(snd_soc_link_compr_startup); void snd_soc_link_compr_shutdown(struct snd_compr_stream *cstream, int rollback) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; if (rollback && !soc_link_mark_match(rtd, cstream, compr_startup)) return; if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->shutdown) rtd->dai_link->compr_ops->shutdown(cstream); soc_link_mark_pop(rtd, cstream, compr_startup); } EXPORT_SYMBOL_GPL(snd_soc_link_compr_shutdown); int snd_soc_link_compr_set_params(struct snd_compr_stream *cstream) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; int ret = 0; if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->set_params) ret = rtd->dai_link->compr_ops->set_params(cstream); return soc_link_ret(rtd, ret); } EXPORT_SYMBOL_GPL(snd_soc_link_compr_set_params);
linux-master
sound/soc/soc-link.c
// SPDX-License-Identifier: GPL-2.0+ // // soc-dapm.c -- ALSA SoC Dynamic Audio Power Management // // Copyright 2005 Wolfson Microelectronics PLC. // Author: Liam Girdwood <[email protected]> // // Features: // o Changes power status of internal codec blocks depending on the // dynamic configuration of codec internal audio paths and active // DACs/ADCs. // o Platform power domain - can support external components i.e. amps and // mic/headphone insertion events. // o Automatic Mic Bias support // o Jack insertion power event initiation - e.g. hp insertion will enable // sinks, dacs, etc // o Delayed power down of audio subsystem to reduce pops between a quick // device reopen. #include <linux/module.h> #include <linux/init.h> #include <linux/async.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/bitops.h> #include <linux/platform_device.h> #include <linux/jiffies.h> #include <linux/debugfs.h> #include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <linux/pinctrl/consumer.h> #include <linux/clk.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include <trace/events/asoc.h> #define DAPM_UPDATE_STAT(widget, val) widget->dapm->card->dapm_stats.val++; #define SND_SOC_DAPM_DIR_REVERSE(x) ((x == SND_SOC_DAPM_DIR_IN) ? \ SND_SOC_DAPM_DIR_OUT : SND_SOC_DAPM_DIR_IN) #define snd_soc_dapm_for_each_direction(dir) \ for ((dir) = SND_SOC_DAPM_DIR_IN; (dir) <= SND_SOC_DAPM_DIR_OUT; \ (dir)++) static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm, struct snd_soc_dapm_widget *wsource, struct snd_soc_dapm_widget *wsink, const char *control, int (*connected)(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink)); struct snd_soc_dapm_widget * snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_widget *widget); struct snd_soc_dapm_widget * snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_widget *widget); static unsigned int soc_dapm_read(struct snd_soc_dapm_context *dapm, int reg); /* dapm power sequences - make this per codec in the future */ static int dapm_up_seq[] = { [snd_soc_dapm_pre] = 1, [snd_soc_dapm_regulator_supply] = 2, [snd_soc_dapm_pinctrl] = 2, [snd_soc_dapm_clock_supply] = 2, [snd_soc_dapm_supply] = 3, [snd_soc_dapm_dai_link] = 3, [snd_soc_dapm_micbias] = 4, [snd_soc_dapm_vmid] = 4, [snd_soc_dapm_dai_in] = 5, [snd_soc_dapm_dai_out] = 5, [snd_soc_dapm_aif_in] = 5, [snd_soc_dapm_aif_out] = 5, [snd_soc_dapm_mic] = 6, [snd_soc_dapm_siggen] = 6, [snd_soc_dapm_input] = 6, [snd_soc_dapm_output] = 6, [snd_soc_dapm_mux] = 7, [snd_soc_dapm_demux] = 7, [snd_soc_dapm_dac] = 8, [snd_soc_dapm_switch] = 9, [snd_soc_dapm_mixer] = 9, [snd_soc_dapm_mixer_named_ctl] = 9, [snd_soc_dapm_pga] = 10, [snd_soc_dapm_buffer] = 10, [snd_soc_dapm_scheduler] = 10, [snd_soc_dapm_effect] = 10, [snd_soc_dapm_src] = 10, [snd_soc_dapm_asrc] = 10, [snd_soc_dapm_encoder] = 10, [snd_soc_dapm_decoder] = 10, [snd_soc_dapm_adc] = 11, [snd_soc_dapm_out_drv] = 12, [snd_soc_dapm_hp] = 12, [snd_soc_dapm_line] = 12, [snd_soc_dapm_sink] = 12, [snd_soc_dapm_spk] = 13, [snd_soc_dapm_kcontrol] = 14, [snd_soc_dapm_post] = 15, }; static int dapm_down_seq[] = { [snd_soc_dapm_pre] = 1, [snd_soc_dapm_kcontrol] = 2, [snd_soc_dapm_adc] = 3, [snd_soc_dapm_spk] = 4, [snd_soc_dapm_hp] = 5, [snd_soc_dapm_line] = 5, [snd_soc_dapm_out_drv] = 5, [snd_soc_dapm_sink] = 6, [snd_soc_dapm_pga] = 6, [snd_soc_dapm_buffer] = 6, [snd_soc_dapm_scheduler] = 6, [snd_soc_dapm_effect] = 6, [snd_soc_dapm_src] = 6, [snd_soc_dapm_asrc] = 6, [snd_soc_dapm_encoder] = 6, [snd_soc_dapm_decoder] = 6, [snd_soc_dapm_switch] = 7, [snd_soc_dapm_mixer_named_ctl] = 7, [snd_soc_dapm_mixer] = 7, [snd_soc_dapm_dac] = 8, [snd_soc_dapm_mic] = 9, [snd_soc_dapm_siggen] = 9, [snd_soc_dapm_input] = 9, [snd_soc_dapm_output] = 9, [snd_soc_dapm_micbias] = 10, [snd_soc_dapm_vmid] = 10, [snd_soc_dapm_mux] = 11, [snd_soc_dapm_demux] = 11, [snd_soc_dapm_aif_in] = 12, [snd_soc_dapm_aif_out] = 12, [snd_soc_dapm_dai_in] = 12, [snd_soc_dapm_dai_out] = 12, [snd_soc_dapm_dai_link] = 13, [snd_soc_dapm_supply] = 14, [snd_soc_dapm_clock_supply] = 15, [snd_soc_dapm_pinctrl] = 15, [snd_soc_dapm_regulator_supply] = 15, [snd_soc_dapm_post] = 16, }; static void dapm_assert_locked(struct snd_soc_dapm_context *dapm) { if (snd_soc_card_is_instantiated(dapm->card)) snd_soc_dapm_mutex_assert_held(dapm); } static void pop_wait(u32 pop_time) { if (pop_time) schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time)); } __printf(3, 4) static void pop_dbg(struct device *dev, u32 pop_time, const char *fmt, ...) { va_list args; char *buf; if (!pop_time) return; buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (buf == NULL) return; va_start(args, fmt); vsnprintf(buf, PAGE_SIZE, fmt, args); dev_info(dev, "%s", buf); va_end(args); kfree(buf); } static bool dapm_dirty_widget(struct snd_soc_dapm_widget *w) { return !list_empty(&w->dirty); } static void dapm_mark_dirty(struct snd_soc_dapm_widget *w, const char *reason) { dapm_assert_locked(w->dapm); if (!dapm_dirty_widget(w)) { dev_vdbg(w->dapm->dev, "Marking %s dirty due to %s\n", w->name, reason); list_add_tail(&w->dirty, &w->dapm->card->dapm_dirty); } } /* * Common implementation for dapm_widget_invalidate_input_paths() and * dapm_widget_invalidate_output_paths(). The function is inlined since the * combined size of the two specialized functions is only marginally larger then * the size of the generic function and at the same time the fast path of the * specialized functions is significantly smaller than the generic function. */ static __always_inline void dapm_widget_invalidate_paths( struct snd_soc_dapm_widget *w, enum snd_soc_dapm_direction dir) { enum snd_soc_dapm_direction rdir = SND_SOC_DAPM_DIR_REVERSE(dir); struct snd_soc_dapm_widget *node; struct snd_soc_dapm_path *p; LIST_HEAD(list); dapm_assert_locked(w->dapm); if (w->endpoints[dir] == -1) return; list_add_tail(&w->work_list, &list); w->endpoints[dir] = -1; list_for_each_entry(w, &list, work_list) { snd_soc_dapm_widget_for_each_path(w, dir, p) { if (p->is_supply || p->weak || !p->connect) continue; node = p->node[rdir]; if (node->endpoints[dir] != -1) { node->endpoints[dir] = -1; list_add_tail(&node->work_list, &list); } } } } /* * dapm_widget_invalidate_input_paths() - Invalidate the cached number of * input paths * @w: The widget for which to invalidate the cached number of input paths * * Resets the cached number of inputs for the specified widget and all widgets * that can be reached via outcoming paths from the widget. * * This function must be called if the number of output paths for a widget might * have changed. E.g. if the source state of a widget changes or a path is added * or activated with the widget as the sink. */ static void dapm_widget_invalidate_input_paths(struct snd_soc_dapm_widget *w) { dapm_widget_invalidate_paths(w, SND_SOC_DAPM_DIR_IN); } /* * dapm_widget_invalidate_output_paths() - Invalidate the cached number of * output paths * @w: The widget for which to invalidate the cached number of output paths * * Resets the cached number of outputs for the specified widget and all widgets * that can be reached via incoming paths from the widget. * * This function must be called if the number of output paths for a widget might * have changed. E.g. if the sink state of a widget changes or a path is added * or activated with the widget as the source. */ static void dapm_widget_invalidate_output_paths(struct snd_soc_dapm_widget *w) { dapm_widget_invalidate_paths(w, SND_SOC_DAPM_DIR_OUT); } /* * dapm_path_invalidate() - Invalidates the cached number of inputs and outputs * for the widgets connected to a path * @p: The path to invalidate * * Resets the cached number of inputs for the sink of the path and the cached * number of outputs for the source of the path. * * This function must be called when a path is added, removed or the connected * state changes. */ static void dapm_path_invalidate(struct snd_soc_dapm_path *p) { /* * Weak paths or supply paths do not influence the number of input or * output paths of their neighbors. */ if (p->weak || p->is_supply) return; /* * The number of connected endpoints is the sum of the number of * connected endpoints of all neighbors. If a node with 0 connected * endpoints is either connected or disconnected that sum won't change, * so there is no need to re-check the path. */ if (p->source->endpoints[SND_SOC_DAPM_DIR_IN] != 0) dapm_widget_invalidate_input_paths(p->sink); if (p->sink->endpoints[SND_SOC_DAPM_DIR_OUT] != 0) dapm_widget_invalidate_output_paths(p->source); } void dapm_mark_endpoints_dirty(struct snd_soc_card *card) { struct snd_soc_dapm_widget *w; snd_soc_dapm_mutex_lock_root(card); for_each_card_widgets(card, w) { if (w->is_ep) { dapm_mark_dirty(w, "Rechecking endpoints"); if (w->is_ep & SND_SOC_DAPM_EP_SINK) dapm_widget_invalidate_output_paths(w); if (w->is_ep & SND_SOC_DAPM_EP_SOURCE) dapm_widget_invalidate_input_paths(w); } } snd_soc_dapm_mutex_unlock(card); } EXPORT_SYMBOL_GPL(dapm_mark_endpoints_dirty); /* create a new dapm widget */ static inline struct snd_soc_dapm_widget *dapm_cnew_widget( const struct snd_soc_dapm_widget *_widget) { struct snd_soc_dapm_widget *w; w = kmemdup(_widget, sizeof(*_widget), GFP_KERNEL); if (!w) return NULL; /* * w->name is duplicated in caller, but w->sname isn't. * Duplicate it here if defined */ if (_widget->sname) { w->sname = kstrdup_const(_widget->sname, GFP_KERNEL); if (!w->sname) { kfree(w); return NULL; } } return w; } struct dapm_kcontrol_data { unsigned int value; struct snd_soc_dapm_widget *widget; struct list_head paths; struct snd_soc_dapm_widget_list *wlist; }; static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget, struct snd_kcontrol *kcontrol, const char *ctrl_name) { struct dapm_kcontrol_data *data; struct soc_mixer_control *mc; struct soc_enum *e; const char *name; int ret; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; INIT_LIST_HEAD(&data->paths); switch (widget->id) { case snd_soc_dapm_switch: case snd_soc_dapm_mixer: case snd_soc_dapm_mixer_named_ctl: mc = (struct soc_mixer_control *)kcontrol->private_value; if (mc->autodisable) { struct snd_soc_dapm_widget template; if (snd_soc_volsw_is_stereo(mc)) dev_warn(widget->dapm->dev, "ASoC: Unsupported stereo autodisable control '%s'\n", ctrl_name); name = kasprintf(GFP_KERNEL, "%s %s", ctrl_name, "Autodisable"); if (!name) { ret = -ENOMEM; goto err_data; } memset(&template, 0, sizeof(template)); template.reg = mc->reg; template.mask = (1 << fls(mc->max)) - 1; template.shift = mc->shift; if (mc->invert) template.off_val = mc->max; else template.off_val = 0; template.on_val = template.off_val; template.id = snd_soc_dapm_kcontrol; template.name = name; data->value = template.on_val; data->widget = snd_soc_dapm_new_control_unlocked(widget->dapm, &template); kfree(name); if (IS_ERR(data->widget)) { ret = PTR_ERR(data->widget); goto err_data; } } break; case snd_soc_dapm_demux: case snd_soc_dapm_mux: e = (struct soc_enum *)kcontrol->private_value; if (e->autodisable) { struct snd_soc_dapm_widget template; name = kasprintf(GFP_KERNEL, "%s %s", ctrl_name, "Autodisable"); if (!name) { ret = -ENOMEM; goto err_data; } memset(&template, 0, sizeof(template)); template.reg = e->reg; template.mask = e->mask; template.shift = e->shift_l; template.off_val = snd_soc_enum_item_to_val(e, 0); template.on_val = template.off_val; template.id = snd_soc_dapm_kcontrol; template.name = name; data->value = template.on_val; data->widget = snd_soc_dapm_new_control_unlocked( widget->dapm, &template); kfree(name); if (IS_ERR(data->widget)) { ret = PTR_ERR(data->widget); goto err_data; } snd_soc_dapm_add_path(widget->dapm, data->widget, widget, NULL, NULL); } else if (e->reg != SND_SOC_NOPM) { data->value = soc_dapm_read(widget->dapm, e->reg) & (e->mask << e->shift_l); } break; default: break; } kcontrol->private_data = data; return 0; err_data: kfree(data); return ret; } static void dapm_kcontrol_free(struct snd_kcontrol *kctl) { struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl); list_del(&data->paths); kfree(data->wlist); kfree(data); } static struct snd_soc_dapm_widget_list *dapm_kcontrol_get_wlist( const struct snd_kcontrol *kcontrol) { struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol); return data->wlist; } static int dapm_kcontrol_add_widget(struct snd_kcontrol *kcontrol, struct snd_soc_dapm_widget *widget) { struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol); struct snd_soc_dapm_widget_list *new_wlist; unsigned int n; if (data->wlist) n = data->wlist->num_widgets + 1; else n = 1; new_wlist = krealloc(data->wlist, struct_size(new_wlist, widgets, n), GFP_KERNEL); if (!new_wlist) return -ENOMEM; new_wlist->widgets[n - 1] = widget; new_wlist->num_widgets = n; data->wlist = new_wlist; return 0; } static void dapm_kcontrol_add_path(const struct snd_kcontrol *kcontrol, struct snd_soc_dapm_path *path) { struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol); list_add_tail(&path->list_kcontrol, &data->paths); } static bool dapm_kcontrol_is_powered(const struct snd_kcontrol *kcontrol) { struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol); if (!data->widget) return true; return data->widget->power; } static struct list_head *dapm_kcontrol_get_path_list( const struct snd_kcontrol *kcontrol) { struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol); return &data->paths; } #define dapm_kcontrol_for_each_path(path, kcontrol) \ list_for_each_entry(path, dapm_kcontrol_get_path_list(kcontrol), \ list_kcontrol) unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol) { struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol); return data->value; } EXPORT_SYMBOL_GPL(dapm_kcontrol_get_value); static bool dapm_kcontrol_set_value(const struct snd_kcontrol *kcontrol, unsigned int value) { struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol); if (data->value == value) return false; if (data->widget) { switch (dapm_kcontrol_get_wlist(kcontrol)->widgets[0]->id) { case snd_soc_dapm_switch: case snd_soc_dapm_mixer: case snd_soc_dapm_mixer_named_ctl: data->widget->on_val = value & data->widget->mask; break; case snd_soc_dapm_demux: case snd_soc_dapm_mux: data->widget->on_val = value >> data->widget->shift; break; default: data->widget->on_val = value; break; } } data->value = value; return true; } /** * snd_soc_dapm_kcontrol_widget() - Returns the widget associated to a * kcontrol * @kcontrol: The kcontrol */ struct snd_soc_dapm_widget *snd_soc_dapm_kcontrol_widget( struct snd_kcontrol *kcontrol) { return dapm_kcontrol_get_wlist(kcontrol)->widgets[0]; } EXPORT_SYMBOL_GPL(snd_soc_dapm_kcontrol_widget); /** * snd_soc_dapm_kcontrol_dapm() - Returns the dapm context associated to a * kcontrol * @kcontrol: The kcontrol * * Note: This function must only be used on kcontrols that are known to have * been registered for a CODEC. Otherwise the behaviour is undefined. */ struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm( struct snd_kcontrol *kcontrol) { return dapm_kcontrol_get_wlist(kcontrol)->widgets[0]->dapm; } EXPORT_SYMBOL_GPL(snd_soc_dapm_kcontrol_dapm); static void dapm_reset(struct snd_soc_card *card) { struct snd_soc_dapm_widget *w; snd_soc_dapm_mutex_assert_held(card); memset(&card->dapm_stats, 0, sizeof(card->dapm_stats)); for_each_card_widgets(card, w) { w->new_power = w->power; w->power_checked = false; } } static const char *soc_dapm_prefix(struct snd_soc_dapm_context *dapm) { if (!dapm->component) return NULL; return dapm->component->name_prefix; } static unsigned int soc_dapm_read(struct snd_soc_dapm_context *dapm, int reg) { if (!dapm->component) return -EIO; return snd_soc_component_read(dapm->component, reg); } static int soc_dapm_update_bits(struct snd_soc_dapm_context *dapm, int reg, unsigned int mask, unsigned int value) { if (!dapm->component) return -EIO; return snd_soc_component_update_bits(dapm->component, reg, mask, value); } static int soc_dapm_test_bits(struct snd_soc_dapm_context *dapm, int reg, unsigned int mask, unsigned int value) { if (!dapm->component) return -EIO; return snd_soc_component_test_bits(dapm->component, reg, mask, value); } static void soc_dapm_async_complete(struct snd_soc_dapm_context *dapm) { if (dapm->component) snd_soc_component_async_complete(dapm->component); } static struct snd_soc_dapm_widget * dapm_wcache_lookup(struct snd_soc_dapm_widget *w, const char *name) { if (w) { struct list_head *wlist = &w->dapm->card->widgets; const int depth = 2; int i = 0; list_for_each_entry_from(w, wlist, list) { if (!strcmp(name, w->name)) return w; if (++i == depth) break; } } return NULL; } /** * snd_soc_dapm_force_bias_level() - Sets the DAPM bias level * @dapm: The DAPM context for which to set the level * @level: The level to set * * Forces the DAPM bias level to a specific state. It will call the bias level * callback of DAPM context with the specified level. This will even happen if * the context is already at the same level. Furthermore it will not go through * the normal bias level sequencing, meaning any intermediate states between the * current and the target state will not be entered. * * Note that the change in bias level is only temporary and the next time * snd_soc_dapm_sync() is called the state will be set to the level as * determined by the DAPM core. The function is mainly intended to be used to * used during probe or resume from suspend to power up the device so * initialization can be done, before the DAPM core takes over. */ int snd_soc_dapm_force_bias_level(struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level) { int ret = 0; if (dapm->component) ret = snd_soc_component_set_bias_level(dapm->component, level); if (ret == 0) dapm->bias_level = level; return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_force_bias_level); /** * snd_soc_dapm_set_bias_level - set the bias level for the system * @dapm: DAPM context * @level: level to configure * * Configure the bias (power) levels for the SoC audio device. * * Returns 0 for success else error. */ static int snd_soc_dapm_set_bias_level(struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level) { struct snd_soc_card *card = dapm->card; int ret = 0; trace_snd_soc_bias_level_start(card, level); ret = snd_soc_card_set_bias_level(card, dapm, level); if (ret != 0) goto out; if (!card || dapm != &card->dapm) ret = snd_soc_dapm_force_bias_level(dapm, level); if (ret != 0) goto out; ret = snd_soc_card_set_bias_level_post(card, dapm, level); out: trace_snd_soc_bias_level_done(card, level); return ret; } /* connect mux widget to its interconnecting audio paths */ static int dapm_connect_mux(struct snd_soc_dapm_context *dapm, struct snd_soc_dapm_path *path, const char *control_name, struct snd_soc_dapm_widget *w) { const struct snd_kcontrol_new *kcontrol = &w->kcontrol_news[0]; struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int item; int i; if (e->reg != SND_SOC_NOPM) { unsigned int val; val = soc_dapm_read(dapm, e->reg); val = (val >> e->shift_l) & e->mask; item = snd_soc_enum_val_to_item(e, val); } else { /* since a virtual mux has no backing registers to * decide which path to connect, it will try to match * with the first enumeration. This is to ensure * that the default mux choice (the first) will be * correctly powered up during initialization. */ item = 0; } i = match_string(e->texts, e->items, control_name); if (i < 0) return -ENODEV; path->name = e->texts[i]; path->connect = (i == item); return 0; } /* set up initial codec paths */ static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i, int nth_path) { struct soc_mixer_control *mc = (struct soc_mixer_control *) p->sink->kcontrol_news[i].private_value; unsigned int reg = mc->reg; unsigned int invert = mc->invert; if (reg != SND_SOC_NOPM) { unsigned int shift = mc->shift; unsigned int max = mc->max; unsigned int mask = (1 << fls(max)) - 1; unsigned int val = soc_dapm_read(p->sink->dapm, reg); /* * The nth_path argument allows this function to know * which path of a kcontrol it is setting the initial * status for. Ideally this would support any number * of paths and channels. But since kcontrols only come * in mono and stereo variants, we are limited to 2 * channels. * * The following code assumes for stereo controls the * first path is the left channel, and all remaining * paths are the right channel. */ if (snd_soc_volsw_is_stereo(mc) && nth_path > 0) { if (reg != mc->rreg) val = soc_dapm_read(p->sink->dapm, mc->rreg); val = (val >> mc->rshift) & mask; } else { val = (val >> shift) & mask; } if (invert) val = max - val; p->connect = !!val; } else { /* since a virtual mixer has no backing registers to * decide which path to connect, it will try to match * with initial state. This is to ensure * that the default mixer choice will be * correctly powered up during initialization. */ p->connect = invert; } } /* connect mixer widget to its interconnecting audio paths */ static int dapm_connect_mixer(struct snd_soc_dapm_context *dapm, struct snd_soc_dapm_path *path, const char *control_name) { int i, nth_path = 0; /* search for mixer kcontrol */ for (i = 0; i < path->sink->num_kcontrols; i++) { if (!strcmp(control_name, path->sink->kcontrol_news[i].name)) { path->name = path->sink->kcontrol_news[i].name; dapm_set_mixer_path_status(path, i, nth_path++); return 0; } } return -ENODEV; } static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm, struct snd_soc_dapm_widget *kcontrolw, const struct snd_kcontrol_new *kcontrol_new, struct snd_kcontrol **kcontrol) { struct snd_soc_dapm_widget *w; int i; *kcontrol = NULL; for_each_card_widgets(dapm->card, w) { if (w == kcontrolw || w->dapm != kcontrolw->dapm) continue; for (i = 0; i < w->num_kcontrols; i++) { if (&w->kcontrol_news[i] == kcontrol_new) { if (w->kcontrols) *kcontrol = w->kcontrols[i]; return 1; } } } return 0; } /* * Determine if a kcontrol is shared. If it is, look it up. If it isn't, * create it. Either way, add the widget into the control's widget list */ static int dapm_create_or_share_kcontrol(struct snd_soc_dapm_widget *w, int kci) { struct snd_soc_dapm_context *dapm = w->dapm; struct snd_card *card = dapm->card->snd_card; const char *prefix; size_t prefix_len; int shared; struct snd_kcontrol *kcontrol; bool wname_in_long_name, kcname_in_long_name; char *long_name = NULL; const char *name; int ret = 0; prefix = soc_dapm_prefix(dapm); if (prefix) prefix_len = strlen(prefix) + 1; else prefix_len = 0; shared = dapm_is_shared_kcontrol(dapm, w, &w->kcontrol_news[kci], &kcontrol); if (!kcontrol) { if (shared) { wname_in_long_name = false; kcname_in_long_name = true; } else { switch (w->id) { case snd_soc_dapm_switch: case snd_soc_dapm_mixer: case snd_soc_dapm_pga: case snd_soc_dapm_effect: case snd_soc_dapm_out_drv: wname_in_long_name = true; kcname_in_long_name = true; break; case snd_soc_dapm_mixer_named_ctl: wname_in_long_name = false; kcname_in_long_name = true; break; case snd_soc_dapm_demux: case snd_soc_dapm_mux: wname_in_long_name = true; kcname_in_long_name = false; break; default: return -EINVAL; } } if (w->no_wname_in_kcontrol_name) wname_in_long_name = false; if (wname_in_long_name && kcname_in_long_name) { /* * The control will get a prefix from the control * creation process but we're also using the same * prefix for widgets so cut the prefix off the * front of the widget name. */ long_name = kasprintf(GFP_KERNEL, "%s %s", w->name + prefix_len, w->kcontrol_news[kci].name); if (long_name == NULL) return -ENOMEM; name = long_name; } else if (wname_in_long_name) { long_name = NULL; name = w->name + prefix_len; } else { long_name = NULL; name = w->kcontrol_news[kci].name; } kcontrol = snd_soc_cnew(&w->kcontrol_news[kci], NULL, name, prefix); if (!kcontrol) { ret = -ENOMEM; goto exit_free; } kcontrol->private_free = dapm_kcontrol_free; ret = dapm_kcontrol_data_alloc(w, kcontrol, name); if (ret) { snd_ctl_free_one(kcontrol); goto exit_free; } ret = snd_ctl_add(card, kcontrol); if (ret < 0) { dev_err(dapm->dev, "ASoC: failed to add widget %s dapm kcontrol %s: %d\n", w->name, name, ret); goto exit_free; } } ret = dapm_kcontrol_add_widget(kcontrol, w); if (ret == 0) w->kcontrols[kci] = kcontrol; exit_free: kfree(long_name); return ret; } /* create new dapm mixer control */ static int dapm_new_mixer(struct snd_soc_dapm_widget *w) { int i, ret; struct snd_soc_dapm_path *path; struct dapm_kcontrol_data *data; /* add kcontrol */ for (i = 0; i < w->num_kcontrols; i++) { /* match name */ snd_soc_dapm_widget_for_each_source_path(w, path) { /* mixer/mux paths name must match control name */ if (path->name != (char *)w->kcontrol_news[i].name) continue; if (!w->kcontrols[i]) { ret = dapm_create_or_share_kcontrol(w, i); if (ret < 0) return ret; } dapm_kcontrol_add_path(w->kcontrols[i], path); data = snd_kcontrol_chip(w->kcontrols[i]); if (data->widget) snd_soc_dapm_add_path(data->widget->dapm, data->widget, path->source, NULL, NULL); } } return 0; } /* create new dapm mux control */ static int dapm_new_mux(struct snd_soc_dapm_widget *w) { struct snd_soc_dapm_context *dapm = w->dapm; enum snd_soc_dapm_direction dir; struct snd_soc_dapm_path *path; const char *type; int ret; switch (w->id) { case snd_soc_dapm_mux: dir = SND_SOC_DAPM_DIR_OUT; type = "mux"; break; case snd_soc_dapm_demux: dir = SND_SOC_DAPM_DIR_IN; type = "demux"; break; default: return -EINVAL; } if (w->num_kcontrols != 1) { dev_err(dapm->dev, "ASoC: %s %s has incorrect number of controls\n", type, w->name); return -EINVAL; } if (list_empty(&w->edges[dir])) { dev_err(dapm->dev, "ASoC: %s %s has no paths\n", type, w->name); return -EINVAL; } ret = dapm_create_or_share_kcontrol(w, 0); if (ret < 0) return ret; snd_soc_dapm_widget_for_each_path(w, dir, path) { if (path->name) dapm_kcontrol_add_path(w->kcontrols[0], path); } return 0; } /* create new dapm volume control */ static int dapm_new_pga(struct snd_soc_dapm_widget *w) { int i; for (i = 0; i < w->num_kcontrols; i++) { int ret = dapm_create_or_share_kcontrol(w, i); if (ret < 0) return ret; } return 0; } /* create new dapm dai link control */ static int dapm_new_dai_link(struct snd_soc_dapm_widget *w) { int i; struct snd_soc_pcm_runtime *rtd = w->priv; /* create control for links with > 1 config */ if (rtd->dai_link->num_c2c_params <= 1) return 0; /* add kcontrol */ for (i = 0; i < w->num_kcontrols; i++) { struct snd_soc_dapm_context *dapm = w->dapm; struct snd_card *card = dapm->card->snd_card; struct snd_kcontrol *kcontrol = snd_soc_cnew(&w->kcontrol_news[i], w, w->name, NULL); int ret = snd_ctl_add(card, kcontrol); if (ret < 0) { dev_err(dapm->dev, "ASoC: failed to add widget %s dapm kcontrol %s: %d\n", w->name, w->kcontrol_news[i].name, ret); return ret; } kcontrol->private_data = w; w->kcontrols[i] = kcontrol; } return 0; } /* We implement power down on suspend by checking the power state of * the ALSA card - when we are suspending the ALSA state for the card * is set to D3. */ static int snd_soc_dapm_suspend_check(struct snd_soc_dapm_widget *widget) { int level = snd_power_get_state(widget->dapm->card->snd_card); switch (level) { case SNDRV_CTL_POWER_D3hot: case SNDRV_CTL_POWER_D3cold: if (widget->ignore_suspend) dev_dbg(widget->dapm->dev, "ASoC: %s ignoring suspend\n", widget->name); return widget->ignore_suspend; default: return 1; } } static void dapm_widget_list_free(struct snd_soc_dapm_widget_list **list) { kfree(*list); } static int dapm_widget_list_create(struct snd_soc_dapm_widget_list **list, struct list_head *widgets) { struct snd_soc_dapm_widget *w; struct list_head *it; unsigned int size = 0; unsigned int i = 0; list_for_each(it, widgets) size++; *list = kzalloc(struct_size(*list, widgets, size), GFP_KERNEL); if (*list == NULL) return -ENOMEM; list_for_each_entry(w, widgets, work_list) (*list)->widgets[i++] = w; (*list)->num_widgets = i; return 0; } /* * Recursively reset the cached number of inputs or outputs for the specified * widget and all widgets that can be reached via incoming or outcoming paths * from the widget. */ static void invalidate_paths_ep(struct snd_soc_dapm_widget *widget, enum snd_soc_dapm_direction dir) { enum snd_soc_dapm_direction rdir = SND_SOC_DAPM_DIR_REVERSE(dir); struct snd_soc_dapm_path *path; widget->endpoints[dir] = -1; snd_soc_dapm_widget_for_each_path(widget, rdir, path) { if (path->weak || path->is_supply) continue; if (path->walking) return; if (path->connect) { path->walking = 1; invalidate_paths_ep(path->node[dir], dir); path->walking = 0; } } } /* * Common implementation for is_connected_output_ep() and * is_connected_input_ep(). The function is inlined since the combined size of * the two specialized functions is only marginally larger then the size of the * generic function and at the same time the fast path of the specialized * functions is significantly smaller than the generic function. */ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget, struct list_head *list, enum snd_soc_dapm_direction dir, int (*fn)(struct snd_soc_dapm_widget *, struct list_head *, bool (*custom_stop_condition)(struct snd_soc_dapm_widget *, enum snd_soc_dapm_direction)), bool (*custom_stop_condition)(struct snd_soc_dapm_widget *, enum snd_soc_dapm_direction)) { enum snd_soc_dapm_direction rdir = SND_SOC_DAPM_DIR_REVERSE(dir); struct snd_soc_dapm_path *path; int con = 0; if (widget->endpoints[dir] >= 0) return widget->endpoints[dir]; DAPM_UPDATE_STAT(widget, path_checks); /* do we need to add this widget to the list ? */ if (list) list_add_tail(&widget->work_list, list); if (custom_stop_condition && custom_stop_condition(widget, dir)) { list = NULL; custom_stop_condition = NULL; } if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) { widget->endpoints[dir] = snd_soc_dapm_suspend_check(widget); return widget->endpoints[dir]; } snd_soc_dapm_widget_for_each_path(widget, rdir, path) { DAPM_UPDATE_STAT(widget, neighbour_checks); if (path->weak || path->is_supply) continue; if (path->walking) return 1; trace_snd_soc_dapm_path(widget, dir, path); if (path->connect) { path->walking = 1; con += fn(path->node[dir], list, custom_stop_condition); path->walking = 0; } } widget->endpoints[dir] = con; return con; } /* * Recursively check for a completed path to an active or physically connected * output widget. Returns number of complete paths. * * Optionally, can be supplied with a function acting as a stopping condition. * This function takes the dapm widget currently being examined and the walk * direction as an arguments, it should return true if widgets from that point * in the graph onwards should not be added to the widget list. */ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget, struct list_head *list, bool (*custom_stop_condition)(struct snd_soc_dapm_widget *i, enum snd_soc_dapm_direction)) { return is_connected_ep(widget, list, SND_SOC_DAPM_DIR_OUT, is_connected_output_ep, custom_stop_condition); } /* * Recursively check for a completed path to an active or physically connected * input widget. Returns number of complete paths. * * Optionally, can be supplied with a function acting as a stopping condition. * This function takes the dapm widget currently being examined and the walk * direction as an arguments, it should return true if the walk should be * stopped and false otherwise. */ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, struct list_head *list, bool (*custom_stop_condition)(struct snd_soc_dapm_widget *i, enum snd_soc_dapm_direction)) { return is_connected_ep(widget, list, SND_SOC_DAPM_DIR_IN, is_connected_input_ep, custom_stop_condition); } /** * snd_soc_dapm_dai_get_connected_widgets - query audio path and it's widgets. * @dai: the soc DAI. * @stream: stream direction. * @list: list of active widgets for this stream. * @custom_stop_condition: (optional) a function meant to stop the widget graph * walk based on custom logic. * * Queries DAPM graph as to whether a valid audio stream path exists for * the initial stream specified by name. This takes into account * current mixer and mux kcontrol settings. Creates list of valid widgets. * * Optionally, can be supplied with a function acting as a stopping condition. * This function takes the dapm widget currently being examined and the walk * direction as an arguments, it should return true if the walk should be * stopped and false otherwise. * * Returns the number of valid paths or negative error. */ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream, struct snd_soc_dapm_widget_list **list, bool (*custom_stop_condition)(struct snd_soc_dapm_widget *, enum snd_soc_dapm_direction)) { struct snd_soc_card *card = dai->component->card; struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(dai, stream); LIST_HEAD(widgets); int paths; int ret; snd_soc_dapm_mutex_lock(card); if (stream == SNDRV_PCM_STREAM_PLAYBACK) { invalidate_paths_ep(w, SND_SOC_DAPM_DIR_OUT); paths = is_connected_output_ep(w, &widgets, custom_stop_condition); } else { invalidate_paths_ep(w, SND_SOC_DAPM_DIR_IN); paths = is_connected_input_ep(w, &widgets, custom_stop_condition); } /* Drop starting point */ list_del(widgets.next); ret = dapm_widget_list_create(list, &widgets); if (ret) paths = ret; trace_snd_soc_dapm_connected(paths, stream); snd_soc_dapm_mutex_unlock(card); return paths; } EXPORT_SYMBOL_GPL(snd_soc_dapm_dai_get_connected_widgets); void snd_soc_dapm_dai_free_widgets(struct snd_soc_dapm_widget_list **list) { dapm_widget_list_free(list); } EXPORT_SYMBOL_GPL(snd_soc_dapm_dai_free_widgets); /* * Handler for regulator supply widget. */ int dapm_regulator_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { int ret; soc_dapm_async_complete(w->dapm); if (SND_SOC_DAPM_EVENT_ON(event)) { if (w->on_val & SND_SOC_DAPM_REGULATOR_BYPASS) { ret = regulator_allow_bypass(w->regulator, false); if (ret != 0) dev_warn(w->dapm->dev, "ASoC: Failed to unbypass %s: %d\n", w->name, ret); } return regulator_enable(w->regulator); } else { if (w->on_val & SND_SOC_DAPM_REGULATOR_BYPASS) { ret = regulator_allow_bypass(w->regulator, true); if (ret != 0) dev_warn(w->dapm->dev, "ASoC: Failed to bypass %s: %d\n", w->name, ret); } return regulator_disable_deferred(w->regulator, w->shift); } } EXPORT_SYMBOL_GPL(dapm_regulator_event); /* * Handler for pinctrl widget. */ int dapm_pinctrl_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_dapm_pinctrl_priv *priv = w->priv; struct pinctrl *p = w->pinctrl; struct pinctrl_state *s; if (!p || !priv) return -EIO; if (SND_SOC_DAPM_EVENT_ON(event)) s = pinctrl_lookup_state(p, priv->active_state); else s = pinctrl_lookup_state(p, priv->sleep_state); if (IS_ERR(s)) return PTR_ERR(s); return pinctrl_select_state(p, s); } EXPORT_SYMBOL_GPL(dapm_pinctrl_event); /* * Handler for clock supply widget. */ int dapm_clock_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { if (!w->clk) return -EIO; soc_dapm_async_complete(w->dapm); if (SND_SOC_DAPM_EVENT_ON(event)) { return clk_prepare_enable(w->clk); } else { clk_disable_unprepare(w->clk); return 0; } return 0; } EXPORT_SYMBOL_GPL(dapm_clock_event); static int dapm_widget_power_check(struct snd_soc_dapm_widget *w) { if (w->power_checked) return w->new_power; if (w->force) w->new_power = 1; else w->new_power = w->power_check(w); w->power_checked = true; return w->new_power; } /* Generic check to see if a widget should be powered. */ static int dapm_generic_check_power(struct snd_soc_dapm_widget *w) { int in, out; DAPM_UPDATE_STAT(w, power_checks); in = is_connected_input_ep(w, NULL, NULL); out = is_connected_output_ep(w, NULL, NULL); return out != 0 && in != 0; } /* Check to see if a power supply is needed */ static int dapm_supply_check_power(struct snd_soc_dapm_widget *w) { struct snd_soc_dapm_path *path; DAPM_UPDATE_STAT(w, power_checks); /* Check if one of our outputs is connected */ snd_soc_dapm_widget_for_each_sink_path(w, path) { DAPM_UPDATE_STAT(w, neighbour_checks); if (path->weak) continue; if (path->connected && !path->connected(path->source, path->sink)) continue; if (dapm_widget_power_check(path->sink)) return 1; } return 0; } static int dapm_always_on_check_power(struct snd_soc_dapm_widget *w) { return w->connected; } static int dapm_seq_compare(struct snd_soc_dapm_widget *a, struct snd_soc_dapm_widget *b, bool power_up) { int *sort; BUILD_BUG_ON(ARRAY_SIZE(dapm_up_seq) != SND_SOC_DAPM_TYPE_COUNT); BUILD_BUG_ON(ARRAY_SIZE(dapm_down_seq) != SND_SOC_DAPM_TYPE_COUNT); if (power_up) sort = dapm_up_seq; else sort = dapm_down_seq; WARN_ONCE(sort[a->id] == 0, "offset a->id %d not initialized\n", a->id); WARN_ONCE(sort[b->id] == 0, "offset b->id %d not initialized\n", b->id); if (sort[a->id] != sort[b->id]) return sort[a->id] - sort[b->id]; if (a->subseq != b->subseq) { if (power_up) return a->subseq - b->subseq; else return b->subseq - a->subseq; } if (a->reg != b->reg) return a->reg - b->reg; if (a->dapm != b->dapm) return (unsigned long)a->dapm - (unsigned long)b->dapm; return 0; } /* Insert a widget in order into a DAPM power sequence. */ static void dapm_seq_insert(struct snd_soc_dapm_widget *new_widget, struct list_head *list, bool power_up) { struct snd_soc_dapm_widget *w; list_for_each_entry(w, list, power_list) if (dapm_seq_compare(new_widget, w, power_up) < 0) { list_add_tail(&new_widget->power_list, &w->power_list); return; } list_add_tail(&new_widget->power_list, list); } static void dapm_seq_check_event(struct snd_soc_card *card, struct snd_soc_dapm_widget *w, int event) { const char *ev_name; int power; switch (event) { case SND_SOC_DAPM_PRE_PMU: ev_name = "PRE_PMU"; power = 1; break; case SND_SOC_DAPM_POST_PMU: ev_name = "POST_PMU"; power = 1; break; case SND_SOC_DAPM_PRE_PMD: ev_name = "PRE_PMD"; power = 0; break; case SND_SOC_DAPM_POST_PMD: ev_name = "POST_PMD"; power = 0; break; case SND_SOC_DAPM_WILL_PMU: ev_name = "WILL_PMU"; power = 1; break; case SND_SOC_DAPM_WILL_PMD: ev_name = "WILL_PMD"; power = 0; break; default: WARN(1, "Unknown event %d\n", event); return; } if (w->new_power != power) return; if (w->event && (w->event_flags & event)) { int ret; pop_dbg(w->dapm->dev, card->pop_time, "pop test : %s %s\n", w->name, ev_name); soc_dapm_async_complete(w->dapm); trace_snd_soc_dapm_widget_event_start(w, event); ret = w->event(w, NULL, event); trace_snd_soc_dapm_widget_event_done(w, event); if (ret < 0) dev_err(w->dapm->dev, "ASoC: %s: %s event failed: %d\n", ev_name, w->name, ret); } } /* Apply the coalesced changes from a DAPM sequence */ static void dapm_seq_run_coalesced(struct snd_soc_card *card, struct list_head *pending) { struct snd_soc_dapm_context *dapm; struct snd_soc_dapm_widget *w; int reg; unsigned int value = 0; unsigned int mask = 0; w = list_first_entry(pending, struct snd_soc_dapm_widget, power_list); reg = w->reg; dapm = w->dapm; list_for_each_entry(w, pending, power_list) { WARN_ON(reg != w->reg || dapm != w->dapm); w->power = w->new_power; mask |= w->mask << w->shift; if (w->power) value |= w->on_val << w->shift; else value |= w->off_val << w->shift; pop_dbg(dapm->dev, card->pop_time, "pop test : Queue %s: reg=0x%x, 0x%x/0x%x\n", w->name, reg, value, mask); /* Check for events */ dapm_seq_check_event(card, w, SND_SOC_DAPM_PRE_PMU); dapm_seq_check_event(card, w, SND_SOC_DAPM_PRE_PMD); } if (reg >= 0) { /* Any widget will do, they should all be updating the * same register. */ pop_dbg(dapm->dev, card->pop_time, "pop test : Applying 0x%x/0x%x to %x in %dms\n", value, mask, reg, card->pop_time); pop_wait(card->pop_time); soc_dapm_update_bits(dapm, reg, mask, value); } list_for_each_entry(w, pending, power_list) { dapm_seq_check_event(card, w, SND_SOC_DAPM_POST_PMU); dapm_seq_check_event(card, w, SND_SOC_DAPM_POST_PMD); } } /* Apply a DAPM power sequence. * * We walk over a pre-sorted list of widgets to apply power to. In * order to minimise the number of writes to the device required * multiple widgets will be updated in a single write where possible. * Currently anything that requires more than a single write is not * handled. */ static void dapm_seq_run(struct snd_soc_card *card, struct list_head *list, int event, bool power_up) { struct snd_soc_dapm_widget *w, *n; struct snd_soc_dapm_context *d; LIST_HEAD(pending); int cur_sort = -1; int cur_subseq = -1; int cur_reg = SND_SOC_NOPM; struct snd_soc_dapm_context *cur_dapm = NULL; int i; int *sort; if (power_up) sort = dapm_up_seq; else sort = dapm_down_seq; list_for_each_entry_safe(w, n, list, power_list) { int ret = 0; /* Do we need to apply any queued changes? */ if (sort[w->id] != cur_sort || w->reg != cur_reg || w->dapm != cur_dapm || w->subseq != cur_subseq) { if (!list_empty(&pending)) dapm_seq_run_coalesced(card, &pending); if (cur_dapm && cur_dapm->component) { for (i = 0; i < ARRAY_SIZE(dapm_up_seq); i++) if (sort[i] == cur_sort) snd_soc_component_seq_notifier( cur_dapm->component, i, cur_subseq); } if (cur_dapm && w->dapm != cur_dapm) soc_dapm_async_complete(cur_dapm); INIT_LIST_HEAD(&pending); cur_sort = -1; cur_subseq = INT_MIN; cur_reg = SND_SOC_NOPM; cur_dapm = NULL; } switch (w->id) { case snd_soc_dapm_pre: if (!w->event) continue; if (event == SND_SOC_DAPM_STREAM_START) ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMU); else if (event == SND_SOC_DAPM_STREAM_STOP) ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMD); break; case snd_soc_dapm_post: if (!w->event) continue; if (event == SND_SOC_DAPM_STREAM_START) ret = w->event(w, NULL, SND_SOC_DAPM_POST_PMU); else if (event == SND_SOC_DAPM_STREAM_STOP) ret = w->event(w, NULL, SND_SOC_DAPM_POST_PMD); break; default: /* Queue it up for application */ cur_sort = sort[w->id]; cur_subseq = w->subseq; cur_reg = w->reg; cur_dapm = w->dapm; list_move(&w->power_list, &pending); break; } if (ret < 0) dev_err(w->dapm->dev, "ASoC: Failed to apply widget power: %d\n", ret); } if (!list_empty(&pending)) dapm_seq_run_coalesced(card, &pending); if (cur_dapm && cur_dapm->component) { for (i = 0; i < ARRAY_SIZE(dapm_up_seq); i++) if (sort[i] == cur_sort) snd_soc_component_seq_notifier( cur_dapm->component, i, cur_subseq); } for_each_card_dapms(card, d) soc_dapm_async_complete(d); } static void dapm_widget_update(struct snd_soc_card *card) { struct snd_soc_dapm_update *update = card->update; struct snd_soc_dapm_widget_list *wlist; struct snd_soc_dapm_widget *w = NULL; unsigned int wi; int ret; if (!update || !dapm_kcontrol_is_powered(update->kcontrol)) return; wlist = dapm_kcontrol_get_wlist(update->kcontrol); for_each_dapm_widgets(wlist, wi, w) { if (w->event && (w->event_flags & SND_SOC_DAPM_PRE_REG)) { ret = w->event(w, update->kcontrol, SND_SOC_DAPM_PRE_REG); if (ret != 0) dev_err(w->dapm->dev, "ASoC: %s DAPM pre-event failed: %d\n", w->name, ret); } } if (!w) return; ret = soc_dapm_update_bits(w->dapm, update->reg, update->mask, update->val); if (ret < 0) dev_err(w->dapm->dev, "ASoC: %s DAPM update failed: %d\n", w->name, ret); if (update->has_second_set) { ret = soc_dapm_update_bits(w->dapm, update->reg2, update->mask2, update->val2); if (ret < 0) dev_err(w->dapm->dev, "ASoC: %s DAPM update failed: %d\n", w->name, ret); } for_each_dapm_widgets(wlist, wi, w) { if (w->event && (w->event_flags & SND_SOC_DAPM_POST_REG)) { ret = w->event(w, update->kcontrol, SND_SOC_DAPM_POST_REG); if (ret != 0) dev_err(w->dapm->dev, "ASoC: %s DAPM post-event failed: %d\n", w->name, ret); } } } /* Async callback run prior to DAPM sequences - brings to _PREPARE if * they're changing state. */ static void dapm_pre_sequence_async(void *data, async_cookie_t cookie) { struct snd_soc_dapm_context *d = data; int ret; /* If we're off and we're not supposed to go into STANDBY */ if (d->bias_level == SND_SOC_BIAS_OFF && d->target_bias_level != SND_SOC_BIAS_OFF) { if (d->dev && cookie) pm_runtime_get_sync(d->dev); ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_STANDBY); if (ret != 0) dev_err(d->dev, "ASoC: Failed to turn on bias: %d\n", ret); } /* Prepare for a transition to ON or away from ON */ if ((d->target_bias_level == SND_SOC_BIAS_ON && d->bias_level != SND_SOC_BIAS_ON) || (d->target_bias_level != SND_SOC_BIAS_ON && d->bias_level == SND_SOC_BIAS_ON)) { ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_PREPARE); if (ret != 0) dev_err(d->dev, "ASoC: Failed to prepare bias: %d\n", ret); } } /* Async callback run prior to DAPM sequences - brings to their final * state. */ static void dapm_post_sequence_async(void *data, async_cookie_t cookie) { struct snd_soc_dapm_context *d = data; int ret; /* If we just powered the last thing off drop to standby bias */ if (d->bias_level == SND_SOC_BIAS_PREPARE && (d->target_bias_level == SND_SOC_BIAS_STANDBY || d->target_bias_level == SND_SOC_BIAS_OFF)) { ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_STANDBY); if (ret != 0) dev_err(d->dev, "ASoC: Failed to apply standby bias: %d\n", ret); } /* If we're in standby and can support bias off then do that */ if (d->bias_level == SND_SOC_BIAS_STANDBY && d->target_bias_level == SND_SOC_BIAS_OFF) { ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_OFF); if (ret != 0) dev_err(d->dev, "ASoC: Failed to turn off bias: %d\n", ret); if (d->dev && cookie) pm_runtime_put(d->dev); } /* If we just powered up then move to active bias */ if (d->bias_level == SND_SOC_BIAS_PREPARE && d->target_bias_level == SND_SOC_BIAS_ON) { ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_ON); if (ret != 0) dev_err(d->dev, "ASoC: Failed to apply active bias: %d\n", ret); } } static void dapm_widget_set_peer_power(struct snd_soc_dapm_widget *peer, bool power, bool connect) { /* If a connection is being made or broken then that update * will have marked the peer dirty, otherwise the widgets are * not connected and this update has no impact. */ if (!connect) return; /* If the peer is already in the state we're moving to then we * won't have an impact on it. */ if (power != peer->power) dapm_mark_dirty(peer, "peer state change"); } static void dapm_power_one_widget(struct snd_soc_dapm_widget *w, struct list_head *up_list, struct list_head *down_list) { struct snd_soc_dapm_path *path; int power; switch (w->id) { case snd_soc_dapm_pre: power = 0; goto end; case snd_soc_dapm_post: power = 1; goto end; default: break; } power = dapm_widget_power_check(w); if (w->power == power) return; trace_snd_soc_dapm_widget_power(w, power); /* * If we changed our power state perhaps our neigbours * changed also. */ snd_soc_dapm_widget_for_each_source_path(w, path) dapm_widget_set_peer_power(path->source, power, path->connect); /* * Supplies can't affect their outputs, only their inputs */ if (!w->is_supply) snd_soc_dapm_widget_for_each_sink_path(w, path) dapm_widget_set_peer_power(path->sink, power, path->connect); end: if (power) dapm_seq_insert(w, up_list, true); else dapm_seq_insert(w, down_list, false); } static bool dapm_idle_bias_off(struct snd_soc_dapm_context *dapm) { if (dapm->idle_bias_off) return true; switch (snd_power_get_state(dapm->card->snd_card)) { case SNDRV_CTL_POWER_D3hot: case SNDRV_CTL_POWER_D3cold: return dapm->suspend_bias_off; default: break; } return false; } /* * Scan each dapm widget for complete audio path. * A complete path is a route that has valid endpoints i.e.:- * * o DAC to output pin. * o Input pin to ADC. * o Input pin to Output pin (bypass, sidetone) * o DAC to ADC (loopback). */ static int dapm_power_widgets(struct snd_soc_card *card, int event) { struct snd_soc_dapm_widget *w; struct snd_soc_dapm_context *d; LIST_HEAD(up_list); LIST_HEAD(down_list); ASYNC_DOMAIN_EXCLUSIVE(async_domain); enum snd_soc_bias_level bias; int ret; snd_soc_dapm_mutex_assert_held(card); trace_snd_soc_dapm_start(card); for_each_card_dapms(card, d) { if (dapm_idle_bias_off(d)) d->target_bias_level = SND_SOC_BIAS_OFF; else d->target_bias_level = SND_SOC_BIAS_STANDBY; } dapm_reset(card); /* Check which widgets we need to power and store them in * lists indicating if they should be powered up or down. We * only check widgets that have been flagged as dirty but note * that new widgets may be added to the dirty list while we * iterate. */ list_for_each_entry(w, &card->dapm_dirty, dirty) { dapm_power_one_widget(w, &up_list, &down_list); } for_each_card_widgets(card, w) { switch (w->id) { case snd_soc_dapm_pre: case snd_soc_dapm_post: /* These widgets always need to be powered */ break; default: list_del_init(&w->dirty); break; } if (w->new_power) { d = w->dapm; /* Supplies and micbiases only bring the * context up to STANDBY as unless something * else is active and passing audio they * generally don't require full power. Signal * generators are virtual pins and have no * power impact themselves. */ switch (w->id) { case snd_soc_dapm_siggen: case snd_soc_dapm_vmid: break; case snd_soc_dapm_supply: case snd_soc_dapm_regulator_supply: case snd_soc_dapm_pinctrl: case snd_soc_dapm_clock_supply: case snd_soc_dapm_micbias: if (d->target_bias_level < SND_SOC_BIAS_STANDBY) d->target_bias_level = SND_SOC_BIAS_STANDBY; break; default: d->target_bias_level = SND_SOC_BIAS_ON; break; } } } /* Force all contexts in the card to the same bias state if * they're not ground referenced. */ bias = SND_SOC_BIAS_OFF; for_each_card_dapms(card, d) if (d->target_bias_level > bias) bias = d->target_bias_level; for_each_card_dapms(card, d) if (!dapm_idle_bias_off(d)) d->target_bias_level = bias; trace_snd_soc_dapm_walk_done(card); /* Run card bias changes at first */ dapm_pre_sequence_async(&card->dapm, 0); /* Run other bias changes in parallel */ for_each_card_dapms(card, d) { if (d != &card->dapm && d->bias_level != d->target_bias_level) async_schedule_domain(dapm_pre_sequence_async, d, &async_domain); } async_synchronize_full_domain(&async_domain); list_for_each_entry(w, &down_list, power_list) { dapm_seq_check_event(card, w, SND_SOC_DAPM_WILL_PMD); } list_for_each_entry(w, &up_list, power_list) { dapm_seq_check_event(card, w, SND_SOC_DAPM_WILL_PMU); } /* Power down widgets first; try to avoid amplifying pops. */ dapm_seq_run(card, &down_list, event, false); dapm_widget_update(card); /* Now power up. */ dapm_seq_run(card, &up_list, event, true); /* Run all the bias changes in parallel */ for_each_card_dapms(card, d) { if (d != &card->dapm && d->bias_level != d->target_bias_level) async_schedule_domain(dapm_post_sequence_async, d, &async_domain); } async_synchronize_full_domain(&async_domain); /* Run card bias changes at last */ dapm_post_sequence_async(&card->dapm, 0); /* do we need to notify any clients that DAPM event is complete */ for_each_card_dapms(card, d) { if (!d->component) continue; ret = snd_soc_component_stream_event(d->component, event); if (ret < 0) return ret; } pop_dbg(card->dev, card->pop_time, "DAPM sequencing finished, waiting %dms\n", card->pop_time); pop_wait(card->pop_time); trace_snd_soc_dapm_done(card); return 0; } #ifdef CONFIG_DEBUG_FS static ssize_t dapm_widget_power_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct snd_soc_dapm_widget *w = file->private_data; enum snd_soc_dapm_direction dir, rdir; char *buf; int in, out; ssize_t ret; struct snd_soc_dapm_path *p = NULL; buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; snd_soc_dapm_mutex_lock_root(w->dapm); /* Supply widgets are not handled by is_connected_{input,output}_ep() */ if (w->is_supply) { in = 0; out = 0; } else { in = is_connected_input_ep(w, NULL, NULL); out = is_connected_output_ep(w, NULL, NULL); } ret = scnprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d", w->name, w->power ? "On" : "Off", w->force ? " (forced)" : "", in, out); if (w->reg >= 0) ret += scnprintf(buf + ret, PAGE_SIZE - ret, " - R%d(0x%x) mask 0x%x", w->reg, w->reg, w->mask << w->shift); ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); if (w->sname) ret += scnprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n", w->sname, w->active ? "active" : "inactive"); snd_soc_dapm_for_each_direction(dir) { rdir = SND_SOC_DAPM_DIR_REVERSE(dir); snd_soc_dapm_widget_for_each_path(w, dir, p) { if (p->connected && !p->connected(p->source, p->sink)) continue; if (!p->connect) continue; ret += scnprintf(buf + ret, PAGE_SIZE - ret, " %s \"%s\" \"%s\"\n", (rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out", p->name ? p->name : "static", p->node[rdir]->name); } } snd_soc_dapm_mutex_unlock(w->dapm); ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); return ret; } static const struct file_operations dapm_widget_power_fops = { .open = simple_open, .read = dapm_widget_power_read_file, .llseek = default_llseek, }; static ssize_t dapm_bias_read_file(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct snd_soc_dapm_context *dapm = file->private_data; char *level; switch (dapm->bias_level) { case SND_SOC_BIAS_ON: level = "On\n"; break; case SND_SOC_BIAS_PREPARE: level = "Prepare\n"; break; case SND_SOC_BIAS_STANDBY: level = "Standby\n"; break; case SND_SOC_BIAS_OFF: level = "Off\n"; break; default: WARN(1, "Unknown bias_level %d\n", dapm->bias_level); level = "Unknown\n"; break; } return simple_read_from_buffer(user_buf, count, ppos, level, strlen(level)); } static const struct file_operations dapm_bias_fops = { .open = simple_open, .read = dapm_bias_read_file, .llseek = default_llseek, }; void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm, struct dentry *parent) { if (!parent || IS_ERR(parent)) return; dapm->debugfs_dapm = debugfs_create_dir("dapm", parent); debugfs_create_file("bias_level", 0444, dapm->debugfs_dapm, dapm, &dapm_bias_fops); } static void dapm_debugfs_add_widget(struct snd_soc_dapm_widget *w) { struct snd_soc_dapm_context *dapm = w->dapm; if (!dapm->debugfs_dapm || !w->name) return; debugfs_create_file(w->name, 0444, dapm->debugfs_dapm, w, &dapm_widget_power_fops); } static void dapm_debugfs_free_widget(struct snd_soc_dapm_widget *w) { struct snd_soc_dapm_context *dapm = w->dapm; if (!dapm->debugfs_dapm || !w->name) return; debugfs_lookup_and_remove(w->name, dapm->debugfs_dapm); } static void dapm_debugfs_cleanup(struct snd_soc_dapm_context *dapm) { debugfs_remove_recursive(dapm->debugfs_dapm); dapm->debugfs_dapm = NULL; } #else void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm, struct dentry *parent) { } static inline void dapm_debugfs_add_widget(struct snd_soc_dapm_widget *w) { } static inline void dapm_debugfs_free_widget(struct snd_soc_dapm_widget *w) { } static inline void dapm_debugfs_cleanup(struct snd_soc_dapm_context *dapm) { } #endif /* * soc_dapm_connect_path() - Connects or disconnects a path * @path: The path to update * @connect: The new connect state of the path. True if the path is connected, * false if it is disconnected. * @reason: The reason why the path changed (for debugging only) */ static void soc_dapm_connect_path(struct snd_soc_dapm_path *path, bool connect, const char *reason) { if (path->connect == connect) return; path->connect = connect; dapm_mark_dirty(path->source, reason); dapm_mark_dirty(path->sink, reason); dapm_path_invalidate(path); } /* test and update the power status of a mux widget */ static int soc_dapm_mux_update_power(struct snd_soc_card *card, struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e) { struct snd_soc_dapm_path *path; int found = 0; bool connect; snd_soc_dapm_mutex_assert_held(card); /* find dapm widget path assoc with kcontrol */ dapm_kcontrol_for_each_path(path, kcontrol) { found = 1; /* we now need to match the string in the enum to the path */ if (e && !(strcmp(path->name, e->texts[mux]))) connect = true; else connect = false; soc_dapm_connect_path(path, connect, "mux update"); } if (found) dapm_power_widgets(card, SND_SOC_DAPM_STREAM_NOP); return found; } int snd_soc_dapm_mux_update_power(struct snd_soc_dapm_context *dapm, struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e, struct snd_soc_dapm_update *update) { struct snd_soc_card *card = dapm->card; int ret; snd_soc_dapm_mutex_lock(card); card->update = update; ret = soc_dapm_mux_update_power(card, kcontrol, mux, e); card->update = NULL; snd_soc_dapm_mutex_unlock(card); if (ret > 0) snd_soc_dpcm_runtime_update(card); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_mux_update_power); /* test and update the power status of a mixer or switch widget */ static int soc_dapm_mixer_update_power(struct snd_soc_card *card, struct snd_kcontrol *kcontrol, int connect, int rconnect) { struct snd_soc_dapm_path *path; int found = 0; snd_soc_dapm_mutex_assert_held(card); /* find dapm widget path assoc with kcontrol */ dapm_kcontrol_for_each_path(path, kcontrol) { /* * Ideally this function should support any number of * paths and channels. But since kcontrols only come * in mono and stereo variants, we are limited to 2 * channels. * * The following code assumes for stereo controls the * first path (when 'found == 0') is the left channel, * and all remaining paths (when 'found == 1') are the * right channel. * * A stereo control is signified by a valid 'rconnect' * value, either 0 for unconnected, or >= 0 for connected. * This is chosen instead of using snd_soc_volsw_is_stereo, * so that the behavior of snd_soc_dapm_mixer_update_power * doesn't change even when the kcontrol passed in is * stereo. * * It passes 'connect' as the path connect status for * the left channel, and 'rconnect' for the right * channel. */ if (found && rconnect >= 0) soc_dapm_connect_path(path, rconnect, "mixer update"); else soc_dapm_connect_path(path, connect, "mixer update"); found = 1; } if (found) dapm_power_widgets(card, SND_SOC_DAPM_STREAM_NOP); return found; } int snd_soc_dapm_mixer_update_power(struct snd_soc_dapm_context *dapm, struct snd_kcontrol *kcontrol, int connect, struct snd_soc_dapm_update *update) { struct snd_soc_card *card = dapm->card; int ret; snd_soc_dapm_mutex_lock(card); card->update = update; ret = soc_dapm_mixer_update_power(card, kcontrol, connect, -1); card->update = NULL; snd_soc_dapm_mutex_unlock(card); if (ret > 0) snd_soc_dpcm_runtime_update(card); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_mixer_update_power); static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt, char *buf, int count) { struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(cmpnt); struct snd_soc_dapm_widget *w; char *state = "not set"; /* card won't be set for the dummy component, as a spot fix * we're checking for that case specifically here but in future * we will ensure that the dummy component looks like others. */ if (!cmpnt->card) return 0; for_each_card_widgets(cmpnt->card, w) { if (w->dapm != dapm) continue; /* only display widgets that burn power */ switch (w->id) { case snd_soc_dapm_hp: case snd_soc_dapm_mic: case snd_soc_dapm_spk: case snd_soc_dapm_line: case snd_soc_dapm_micbias: case snd_soc_dapm_dac: case snd_soc_dapm_adc: case snd_soc_dapm_pga: case snd_soc_dapm_effect: case snd_soc_dapm_out_drv: case snd_soc_dapm_mixer: case snd_soc_dapm_mixer_named_ctl: case snd_soc_dapm_supply: case snd_soc_dapm_regulator_supply: case snd_soc_dapm_pinctrl: case snd_soc_dapm_clock_supply: if (w->name) count += sysfs_emit_at(buf, count, "%s: %s\n", w->name, w->power ? "On":"Off"); break; default: break; } } switch (snd_soc_dapm_get_bias_level(dapm)) { case SND_SOC_BIAS_ON: state = "On"; break; case SND_SOC_BIAS_PREPARE: state = "Prepare"; break; case SND_SOC_BIAS_STANDBY: state = "Standby"; break; case SND_SOC_BIAS_OFF: state = "Off"; break; } count += sysfs_emit_at(buf, count, "PM State: %s\n", state); return count; } /* show dapm widget status in sys fs */ static ssize_t dapm_widget_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev); struct snd_soc_dai *codec_dai; int i, count = 0; snd_soc_dapm_mutex_lock_root(rtd->card); for_each_rtd_codec_dais(rtd, i, codec_dai) { struct snd_soc_component *cmpnt = codec_dai->component; count = dapm_widget_show_component(cmpnt, buf, count); } snd_soc_dapm_mutex_unlock(rtd->card); return count; } static DEVICE_ATTR_RO(dapm_widget); struct attribute *soc_dapm_dev_attrs[] = { &dev_attr_dapm_widget.attr, NULL }; static void dapm_free_path(struct snd_soc_dapm_path *path) { list_del(&path->list_node[SND_SOC_DAPM_DIR_IN]); list_del(&path->list_node[SND_SOC_DAPM_DIR_OUT]); list_del(&path->list_kcontrol); list_del(&path->list); kfree(path); } /** * snd_soc_dapm_free_widget - Free specified widget * @w: widget to free * * Removes widget from all paths and frees memory occupied by it. */ void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w) { struct snd_soc_dapm_path *p, *next_p; enum snd_soc_dapm_direction dir; if (!w) return; list_del(&w->list); list_del(&w->dirty); /* * remove source and sink paths associated to this widget. * While removing the path, remove reference to it from both * source and sink widgets so that path is removed only once. */ snd_soc_dapm_for_each_direction(dir) { snd_soc_dapm_widget_for_each_path_safe(w, dir, p, next_p) dapm_free_path(p); } dapm_debugfs_free_widget(w); kfree(w->kcontrols); kfree_const(w->name); kfree_const(w->sname); kfree(w); } EXPORT_SYMBOL_GPL(snd_soc_dapm_free_widget); /* free all dapm widgets and resources */ static void dapm_free_widgets(struct snd_soc_dapm_context *dapm) { struct snd_soc_dapm_widget *w, *next_w; for_each_card_widgets_safe(dapm->card, w, next_w) { if (w->dapm != dapm) continue; snd_soc_dapm_free_widget(w); } dapm->wcache_sink = NULL; dapm->wcache_source = NULL; } static struct snd_soc_dapm_widget *dapm_find_widget( struct snd_soc_dapm_context *dapm, const char *pin, bool search_other_contexts) { struct snd_soc_dapm_widget *w; struct snd_soc_dapm_widget *fallback = NULL; char prefixed_pin[80]; const char *pin_name; const char *prefix = soc_dapm_prefix(dapm); if (prefix) { snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s", prefix, pin); pin_name = prefixed_pin; } else { pin_name = pin; } for_each_card_widgets(dapm->card, w) { if (!strcmp(w->name, pin_name)) { if (w->dapm == dapm) return w; else fallback = w; } } if (search_other_contexts) return fallback; return NULL; } /* * set the DAPM pin status: * returns 1 when the value has been updated, 0 when unchanged, or a negative * error code; called from kcontrol put callback */ static int __snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm, const char *pin, int status) { struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true); int ret = 0; dapm_assert_locked(dapm); if (!w) { dev_err(dapm->dev, "ASoC: DAPM unknown pin %s\n", pin); return -EINVAL; } if (w->connected != status) { dapm_mark_dirty(w, "pin configuration"); dapm_widget_invalidate_input_paths(w); dapm_widget_invalidate_output_paths(w); ret = 1; } w->connected = status; if (status == 0) w->force = 0; return ret; } /* * similar as __snd_soc_dapm_set_pin(), but returns 0 when successful; * called from several API functions below */ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm, const char *pin, int status) { int ret = __snd_soc_dapm_set_pin(dapm, pin, status); return ret < 0 ? ret : 0; } /** * snd_soc_dapm_sync_unlocked - scan and power dapm paths * @dapm: DAPM context * * Walks all dapm audio paths and powers widgets according to their * stream or path usage. * * Requires external locking. * * Returns 0 for success. */ int snd_soc_dapm_sync_unlocked(struct snd_soc_dapm_context *dapm) { /* * Suppress early reports (eg, jacks syncing their state) to avoid * silly DAPM runs during card startup. */ if (!snd_soc_card_is_instantiated(dapm->card)) return 0; return dapm_power_widgets(dapm->card, SND_SOC_DAPM_STREAM_NOP); } EXPORT_SYMBOL_GPL(snd_soc_dapm_sync_unlocked); /** * snd_soc_dapm_sync - scan and power dapm paths * @dapm: DAPM context * * Walks all dapm audio paths and powers widgets according to their * stream or path usage. * * Returns 0 for success. */ int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm) { int ret; snd_soc_dapm_mutex_lock(dapm); ret = snd_soc_dapm_sync_unlocked(dapm); snd_soc_dapm_mutex_unlock(dapm); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_sync); static int dapm_update_dai_chan(struct snd_soc_dapm_path *p, struct snd_soc_dapm_widget *w, int channels) { switch (w->id) { case snd_soc_dapm_aif_out: case snd_soc_dapm_aif_in: break; default: return 0; } dev_dbg(w->dapm->dev, "%s DAI route %s -> %s\n", w->channel < channels ? "Connecting" : "Disconnecting", p->source->name, p->sink->name); if (w->channel < channels) soc_dapm_connect_path(p, true, "dai update"); else soc_dapm_connect_path(p, false, "dai update"); return 0; } static int dapm_update_dai_unlocked(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { int dir = substream->stream; int channels = params_channels(params); struct snd_soc_dapm_path *p; struct snd_soc_dapm_widget *w; int ret; w = snd_soc_dai_get_widget(dai, dir); if (!w) return 0; dev_dbg(dai->dev, "Update DAI routes for %s %s\n", dai->name, dir == SNDRV_PCM_STREAM_PLAYBACK ? "playback" : "capture"); snd_soc_dapm_widget_for_each_sink_path(w, p) { ret = dapm_update_dai_chan(p, p->sink, channels); if (ret < 0) return ret; } snd_soc_dapm_widget_for_each_source_path(w, p) { ret = dapm_update_dai_chan(p, p->source, channels); if (ret < 0) return ret; } return 0; } int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret; snd_soc_dapm_mutex_lock(rtd->card); ret = dapm_update_dai_unlocked(substream, params, dai); snd_soc_dapm_mutex_unlock(rtd->card); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_update_dai); /* * dapm_update_widget_flags() - Re-compute widget sink and source flags * @w: The widget for which to update the flags * * Some widgets have a dynamic category which depends on which neighbors they * are connected to. This function update the category for these widgets. * * This function must be called whenever a path is added or removed to a widget. */ static void dapm_update_widget_flags(struct snd_soc_dapm_widget *w) { enum snd_soc_dapm_direction dir; struct snd_soc_dapm_path *p; unsigned int ep; switch (w->id) { case snd_soc_dapm_input: /* On a fully routed card an input is never a source */ if (w->dapm->card->fully_routed) return; ep = SND_SOC_DAPM_EP_SOURCE; snd_soc_dapm_widget_for_each_source_path(w, p) { if (p->source->id == snd_soc_dapm_micbias || p->source->id == snd_soc_dapm_mic || p->source->id == snd_soc_dapm_line || p->source->id == snd_soc_dapm_output) { ep = 0; break; } } break; case snd_soc_dapm_output: /* On a fully routed card a output is never a sink */ if (w->dapm->card->fully_routed) return; ep = SND_SOC_DAPM_EP_SINK; snd_soc_dapm_widget_for_each_sink_path(w, p) { if (p->sink->id == snd_soc_dapm_spk || p->sink->id == snd_soc_dapm_hp || p->sink->id == snd_soc_dapm_line || p->sink->id == snd_soc_dapm_input) { ep = 0; break; } } break; case snd_soc_dapm_line: ep = 0; snd_soc_dapm_for_each_direction(dir) { if (!list_empty(&w->edges[dir])) ep |= SND_SOC_DAPM_DIR_TO_EP(dir); } break; default: return; } w->is_ep = ep; } static int snd_soc_dapm_check_dynamic_path(struct snd_soc_dapm_context *dapm, struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink, const char *control) { bool dynamic_source = false; bool dynamic_sink = false; if (!control) return 0; switch (source->id) { case snd_soc_dapm_demux: dynamic_source = true; break; default: break; } switch (sink->id) { case snd_soc_dapm_mux: case snd_soc_dapm_switch: case snd_soc_dapm_mixer: case snd_soc_dapm_mixer_named_ctl: dynamic_sink = true; break; default: break; } if (dynamic_source && dynamic_sink) { dev_err(dapm->dev, "Direct connection between demux and mixer/mux not supported for path %s -> [%s] -> %s\n", source->name, control, sink->name); return -EINVAL; } else if (!dynamic_source && !dynamic_sink) { dev_err(dapm->dev, "Control not supported for path %s -> [%s] -> %s\n", source->name, control, sink->name); return -EINVAL; } return 0; } static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm, struct snd_soc_dapm_widget *wsource, struct snd_soc_dapm_widget *wsink, const char *control, int (*connected)(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink)) { enum snd_soc_dapm_direction dir; struct snd_soc_dapm_path *path; int ret; if (wsink->is_supply && !wsource->is_supply) { dev_err(dapm->dev, "Connecting non-supply widget to supply widget is not supported (%s -> %s)\n", wsource->name, wsink->name); return -EINVAL; } if (connected && !wsource->is_supply) { dev_err(dapm->dev, "connected() callback only supported for supply widgets (%s -> %s)\n", wsource->name, wsink->name); return -EINVAL; } if (wsource->is_supply && control) { dev_err(dapm->dev, "Conditional paths are not supported for supply widgets (%s -> [%s] -> %s)\n", wsource->name, control, wsink->name); return -EINVAL; } ret = snd_soc_dapm_check_dynamic_path(dapm, wsource, wsink, control); if (ret) return ret; path = kzalloc(sizeof(struct snd_soc_dapm_path), GFP_KERNEL); if (!path) return -ENOMEM; path->node[SND_SOC_DAPM_DIR_IN] = wsource; path->node[SND_SOC_DAPM_DIR_OUT] = wsink; path->connected = connected; INIT_LIST_HEAD(&path->list); INIT_LIST_HEAD(&path->list_kcontrol); if (wsource->is_supply || wsink->is_supply) path->is_supply = 1; /* connect static paths */ if (control == NULL) { path->connect = 1; } else { switch (wsource->id) { case snd_soc_dapm_demux: ret = dapm_connect_mux(dapm, path, control, wsource); if (ret) goto err; break; default: break; } switch (wsink->id) { case snd_soc_dapm_mux: ret = dapm_connect_mux(dapm, path, control, wsink); if (ret != 0) goto err; break; case snd_soc_dapm_switch: case snd_soc_dapm_mixer: case snd_soc_dapm_mixer_named_ctl: ret = dapm_connect_mixer(dapm, path, control); if (ret != 0) goto err; break; default: break; } } list_add(&path->list, &dapm->card->paths); snd_soc_dapm_for_each_direction(dir) list_add(&path->list_node[dir], &path->node[dir]->edges[dir]); snd_soc_dapm_for_each_direction(dir) { dapm_update_widget_flags(path->node[dir]); dapm_mark_dirty(path->node[dir], "Route added"); } if (snd_soc_card_is_instantiated(dapm->card) && path->connect) dapm_path_invalidate(path); return 0; err: kfree(path); return ret; } static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_route *route) { struct snd_soc_dapm_widget *wsource = NULL, *wsink = NULL, *w; struct snd_soc_dapm_widget *wtsource = NULL, *wtsink = NULL; const char *sink; const char *source; char prefixed_sink[80]; char prefixed_source[80]; const char *prefix; unsigned int sink_ref = 0; unsigned int source_ref = 0; int ret; prefix = soc_dapm_prefix(dapm); if (prefix) { snprintf(prefixed_sink, sizeof(prefixed_sink), "%s %s", prefix, route->sink); sink = prefixed_sink; snprintf(prefixed_source, sizeof(prefixed_source), "%s %s", prefix, route->source); source = prefixed_source; } else { sink = route->sink; source = route->source; } wsource = dapm_wcache_lookup(dapm->wcache_source, source); wsink = dapm_wcache_lookup(dapm->wcache_sink, sink); if (wsink && wsource) goto skip_search; /* * find src and dest widgets over all widgets but favor a widget from * current DAPM context */ for_each_card_widgets(dapm->card, w) { if (!wsink && !(strcmp(w->name, sink))) { wtsink = w; if (w->dapm == dapm) { wsink = w; if (wsource) break; } sink_ref++; if (sink_ref > 1) dev_warn(dapm->dev, "ASoC: sink widget %s overwritten\n", w->name); continue; } if (!wsource && !(strcmp(w->name, source))) { wtsource = w; if (w->dapm == dapm) { wsource = w; if (wsink) break; } source_ref++; if (source_ref > 1) dev_warn(dapm->dev, "ASoC: source widget %s overwritten\n", w->name); } } /* use widget from another DAPM context if not found from this */ if (!wsink) wsink = wtsink; if (!wsource) wsource = wtsource; ret = -ENODEV; if (!wsource) goto err; if (!wsink) goto err; skip_search: /* update cache */ dapm->wcache_sink = wsink; dapm->wcache_source = wsource; ret = snd_soc_dapm_add_path(dapm, wsource, wsink, route->control, route->connected); err: if (ret) dev_err(dapm->dev, "ASoC: Failed to add route %s%s -%s%s%s> %s%s\n", source, !wsource ? "(*)" : "", !route->control ? "" : "> [", !route->control ? "" : route->control, !route->control ? "" : "] -", sink, !wsink ? "(*)" : ""); return ret; } static int snd_soc_dapm_del_route(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_route *route) { struct snd_soc_dapm_path *path, *p; const char *sink; const char *source; char prefixed_sink[80]; char prefixed_source[80]; const char *prefix; if (route->control) { dev_err(dapm->dev, "ASoC: Removal of routes with controls not supported\n"); return -EINVAL; } prefix = soc_dapm_prefix(dapm); if (prefix) { snprintf(prefixed_sink, sizeof(prefixed_sink), "%s %s", prefix, route->sink); sink = prefixed_sink; snprintf(prefixed_source, sizeof(prefixed_source), "%s %s", prefix, route->source); source = prefixed_source; } else { sink = route->sink; source = route->source; } path = NULL; list_for_each_entry(p, &dapm->card->paths, list) { if (strcmp(p->source->name, source) != 0) continue; if (strcmp(p->sink->name, sink) != 0) continue; path = p; break; } if (path) { struct snd_soc_dapm_widget *wsource = path->source; struct snd_soc_dapm_widget *wsink = path->sink; dapm_mark_dirty(wsource, "Route removed"); dapm_mark_dirty(wsink, "Route removed"); if (path->connect) dapm_path_invalidate(path); dapm_free_path(path); /* Update any path related flags */ dapm_update_widget_flags(wsource); dapm_update_widget_flags(wsink); } else { dev_warn(dapm->dev, "ASoC: Route %s->%s does not exist\n", source, sink); } return 0; } /** * snd_soc_dapm_add_routes - Add routes between DAPM widgets * @dapm: DAPM context * @route: audio routes * @num: number of routes * * Connects 2 dapm widgets together via a named audio path. The sink is * the widget receiving the audio signal, whilst the source is the sender * of the audio signal. * * Returns 0 for success else error. On error all resources can be freed * with a call to snd_soc_card_free(). */ int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_route *route, int num) { int i, ret = 0; snd_soc_dapm_mutex_lock(dapm); for (i = 0; i < num; i++) { int r = snd_soc_dapm_add_route(dapm, route); if (r < 0) ret = r; route++; } snd_soc_dapm_mutex_unlock(dapm); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_add_routes); /** * snd_soc_dapm_del_routes - Remove routes between DAPM widgets * @dapm: DAPM context * @route: audio routes * @num: number of routes * * Removes routes from the DAPM context. */ int snd_soc_dapm_del_routes(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_route *route, int num) { int i; snd_soc_dapm_mutex_lock(dapm); for (i = 0; i < num; i++) { snd_soc_dapm_del_route(dapm, route); route++; } snd_soc_dapm_mutex_unlock(dapm); return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_del_routes); static int snd_soc_dapm_weak_route(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_route *route) { struct snd_soc_dapm_widget *source = dapm_find_widget(dapm, route->source, true); struct snd_soc_dapm_widget *sink = dapm_find_widget(dapm, route->sink, true); struct snd_soc_dapm_path *path; int count = 0; if (!source) { dev_err(dapm->dev, "ASoC: Unable to find source %s for weak route\n", route->source); return -ENODEV; } if (!sink) { dev_err(dapm->dev, "ASoC: Unable to find sink %s for weak route\n", route->sink); return -ENODEV; } if (route->control || route->connected) dev_warn(dapm->dev, "ASoC: Ignoring control for weak route %s->%s\n", route->source, route->sink); snd_soc_dapm_widget_for_each_sink_path(source, path) { if (path->sink == sink) { path->weak = 1; count++; } } if (count == 0) dev_err(dapm->dev, "ASoC: No path found for weak route %s->%s\n", route->source, route->sink); if (count > 1) dev_warn(dapm->dev, "ASoC: %d paths found for weak route %s->%s\n", count, route->source, route->sink); return 0; } /** * snd_soc_dapm_weak_routes - Mark routes between DAPM widgets as weak * @dapm: DAPM context * @route: audio routes * @num: number of routes * * Mark existing routes matching those specified in the passed array * as being weak, meaning that they are ignored for the purpose of * power decisions. The main intended use case is for sidetone paths * which couple audio between other independent paths if they are both * active in order to make the combination work better at the user * level but which aren't intended to be "used". * * Note that CODEC drivers should not use this as sidetone type paths * can frequently also be used as bypass paths. */ int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_route *route, int num) { int i; int ret = 0; snd_soc_dapm_mutex_lock_root(dapm); for (i = 0; i < num; i++) { int err = snd_soc_dapm_weak_route(dapm, route); if (err) ret = err; route++; } snd_soc_dapm_mutex_unlock(dapm); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_weak_routes); /** * snd_soc_dapm_new_widgets - add new dapm widgets * @card: card to be checked for new dapm widgets * * Checks the codec for any new dapm widgets and creates them if found. * * Returns 0 for success. */ int snd_soc_dapm_new_widgets(struct snd_soc_card *card) { struct snd_soc_dapm_widget *w; unsigned int val; snd_soc_dapm_mutex_lock_root(card); for_each_card_widgets(card, w) { if (w->new) continue; if (w->num_kcontrols) { w->kcontrols = kcalloc(w->num_kcontrols, sizeof(struct snd_kcontrol *), GFP_KERNEL); if (!w->kcontrols) { snd_soc_dapm_mutex_unlock(card); return -ENOMEM; } } switch(w->id) { case snd_soc_dapm_switch: case snd_soc_dapm_mixer: case snd_soc_dapm_mixer_named_ctl: dapm_new_mixer(w); break; case snd_soc_dapm_mux: case snd_soc_dapm_demux: dapm_new_mux(w); break; case snd_soc_dapm_pga: case snd_soc_dapm_effect: case snd_soc_dapm_out_drv: dapm_new_pga(w); break; case snd_soc_dapm_dai_link: dapm_new_dai_link(w); break; default: break; } /* Read the initial power state from the device */ if (w->reg >= 0) { val = soc_dapm_read(w->dapm, w->reg); val = val >> w->shift; val &= w->mask; if (val == w->on_val) w->power = 1; } w->new = 1; dapm_mark_dirty(w, "new widget"); dapm_debugfs_add_widget(w); } dapm_power_widgets(card, SND_SOC_DAPM_STREAM_NOP); snd_soc_dapm_mutex_unlock(card); return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_new_widgets); /** * snd_soc_dapm_get_volsw - dapm mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a dapm mixer control. * * Returns 0 for success. */ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int reg = mc->reg; unsigned int shift = mc->shift; int max = mc->max; unsigned int width = fls(max); unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; unsigned int reg_val, val, rval = 0; snd_soc_dapm_mutex_lock(dapm); if (dapm_kcontrol_is_powered(kcontrol) && reg != SND_SOC_NOPM) { reg_val = soc_dapm_read(dapm, reg); val = (reg_val >> shift) & mask; if (reg != mc->rreg) reg_val = soc_dapm_read(dapm, mc->rreg); if (snd_soc_volsw_is_stereo(mc)) rval = (reg_val >> mc->rshift) & mask; } else { reg_val = dapm_kcontrol_get_value(kcontrol); val = reg_val & mask; if (snd_soc_volsw_is_stereo(mc)) rval = (reg_val >> width) & mask; } snd_soc_dapm_mutex_unlock(dapm); if (invert) ucontrol->value.integer.value[0] = max - val; else ucontrol->value.integer.value[0] = val; if (snd_soc_volsw_is_stereo(mc)) { if (invert) ucontrol->value.integer.value[1] = max - rval; else ucontrol->value.integer.value[1] = rval; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_get_volsw); /** * snd_soc_dapm_put_volsw - dapm mixer set callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value of a dapm mixer control. * * Returns 0 for success. */ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); struct snd_soc_card *card = dapm->card; struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int reg = mc->reg; unsigned int shift = mc->shift; int max = mc->max; unsigned int width = fls(max); unsigned int mask = (1 << width) - 1; unsigned int invert = mc->invert; unsigned int val, rval = 0; int connect, rconnect = -1, change, reg_change = 0; struct snd_soc_dapm_update update = {}; int ret = 0; val = (ucontrol->value.integer.value[0] & mask); connect = !!val; if (invert) val = max - val; if (snd_soc_volsw_is_stereo(mc)) { rval = (ucontrol->value.integer.value[1] & mask); rconnect = !!rval; if (invert) rval = max - rval; } snd_soc_dapm_mutex_lock(card); /* This assumes field width < (bits in unsigned int / 2) */ if (width > sizeof(unsigned int) * 8 / 2) dev_warn(dapm->dev, "ASoC: control %s field width limit exceeded\n", kcontrol->id.name); change = dapm_kcontrol_set_value(kcontrol, val | (rval << width)); if (reg != SND_SOC_NOPM) { val = val << shift; rval = rval << mc->rshift; reg_change = soc_dapm_test_bits(dapm, reg, mask << shift, val); if (snd_soc_volsw_is_stereo(mc)) reg_change |= soc_dapm_test_bits(dapm, mc->rreg, mask << mc->rshift, rval); } if (change || reg_change) { if (reg_change) { if (snd_soc_volsw_is_stereo(mc)) { update.has_second_set = true; update.reg2 = mc->rreg; update.mask2 = mask << mc->rshift; update.val2 = rval; } update.kcontrol = kcontrol; update.reg = reg; update.mask = mask << shift; update.val = val; card->update = &update; } ret = soc_dapm_mixer_update_power(card, kcontrol, connect, rconnect); card->update = NULL; } snd_soc_dapm_mutex_unlock(card); if (ret > 0) snd_soc_dpcm_runtime_update(card); return change; } EXPORT_SYMBOL_GPL(snd_soc_dapm_put_volsw); /** * snd_soc_dapm_get_enum_double - dapm enumerated double mixer get callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to get the value of a dapm enumerated double mixer control. * * Returns 0 for success. */ int snd_soc_dapm_get_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int reg_val, val; snd_soc_dapm_mutex_lock(dapm); if (e->reg != SND_SOC_NOPM && dapm_kcontrol_is_powered(kcontrol)) { reg_val = soc_dapm_read(dapm, e->reg); } else { reg_val = dapm_kcontrol_get_value(kcontrol); } snd_soc_dapm_mutex_unlock(dapm); val = (reg_val >> e->shift_l) & e->mask; ucontrol->value.enumerated.item[0] = snd_soc_enum_val_to_item(e, val); if (e->shift_l != e->shift_r) { val = (reg_val >> e->shift_r) & e->mask; val = snd_soc_enum_val_to_item(e, val); ucontrol->value.enumerated.item[1] = val; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_get_enum_double); /** * snd_soc_dapm_put_enum_double - dapm enumerated double mixer set callback * @kcontrol: mixer control * @ucontrol: control element information * * Callback to set the value of a dapm enumerated double mixer control. * * Returns 0 for success. */ int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol); struct snd_soc_card *card = dapm->card; struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; unsigned int *item = ucontrol->value.enumerated.item; unsigned int val, change, reg_change = 0; unsigned int mask; struct snd_soc_dapm_update update = {}; int ret = 0; if (item[0] >= e->items) return -EINVAL; val = snd_soc_enum_item_to_val(e, item[0]) << e->shift_l; mask = e->mask << e->shift_l; if (e->shift_l != e->shift_r) { if (item[1] > e->items) return -EINVAL; val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_r; mask |= e->mask << e->shift_r; } snd_soc_dapm_mutex_lock(card); change = dapm_kcontrol_set_value(kcontrol, val); if (e->reg != SND_SOC_NOPM) reg_change = soc_dapm_test_bits(dapm, e->reg, mask, val); if (change || reg_change) { if (reg_change) { update.kcontrol = kcontrol; update.reg = e->reg; update.mask = mask; update.val = val; card->update = &update; } ret = soc_dapm_mux_update_power(card, kcontrol, item[0], e); card->update = NULL; } snd_soc_dapm_mutex_unlock(card); if (ret > 0) snd_soc_dpcm_runtime_update(card); return change; } EXPORT_SYMBOL_GPL(snd_soc_dapm_put_enum_double); /** * snd_soc_dapm_info_pin_switch - Info for a pin switch * * @kcontrol: mixer control * @uinfo: control element information * * Callback to provide information about a pin switch control. */ int snd_soc_dapm_info_pin_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_info_pin_switch); /** * snd_soc_dapm_get_pin_switch - Get information for a pin switch * * @kcontrol: mixer control * @ucontrol: Value */ int snd_soc_dapm_get_pin_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_card *card = snd_kcontrol_chip(kcontrol); const char *pin = (const char *)kcontrol->private_value; snd_soc_dapm_mutex_lock(card); ucontrol->value.integer.value[0] = snd_soc_dapm_get_pin_status(&card->dapm, pin); snd_soc_dapm_mutex_unlock(card); return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_get_pin_switch); /** * snd_soc_dapm_put_pin_switch - Set information for a pin switch * * @kcontrol: mixer control * @ucontrol: Value */ int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_card *card = snd_kcontrol_chip(kcontrol); const char *pin = (const char *)kcontrol->private_value; int ret; snd_soc_dapm_mutex_lock(card); ret = __snd_soc_dapm_set_pin(&card->dapm, pin, !!ucontrol->value.integer.value[0]); snd_soc_dapm_mutex_unlock(card); snd_soc_dapm_sync(&card->dapm); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_put_pin_switch); struct snd_soc_dapm_widget * snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_widget *widget) { enum snd_soc_dapm_direction dir; struct snd_soc_dapm_widget *w; const char *prefix; int ret = -ENOMEM; if ((w = dapm_cnew_widget(widget)) == NULL) goto cnew_failed; prefix = soc_dapm_prefix(dapm); if (prefix) w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name); else w->name = kstrdup_const(widget->name, GFP_KERNEL); if (!w->name) goto name_failed; switch (w->id) { case snd_soc_dapm_regulator_supply: w->regulator = devm_regulator_get(dapm->dev, widget->name); if (IS_ERR(w->regulator)) { ret = PTR_ERR(w->regulator); goto request_failed; } if (w->on_val & SND_SOC_DAPM_REGULATOR_BYPASS) { ret = regulator_allow_bypass(w->regulator, true); if (ret != 0) dev_warn(dapm->dev, "ASoC: Failed to bypass %s: %d\n", w->name, ret); } break; case snd_soc_dapm_pinctrl: w->pinctrl = devm_pinctrl_get(dapm->dev); if (IS_ERR(w->pinctrl)) { ret = PTR_ERR(w->pinctrl); goto request_failed; } /* set to sleep_state when initializing */ dapm_pinctrl_event(w, NULL, SND_SOC_DAPM_POST_PMD); break; case snd_soc_dapm_clock_supply: w->clk = devm_clk_get(dapm->dev, w->name); if (IS_ERR(w->clk)) { ret = PTR_ERR(w->clk); goto request_failed; } break; default: break; } switch (w->id) { case snd_soc_dapm_mic: w->is_ep = SND_SOC_DAPM_EP_SOURCE; w->power_check = dapm_generic_check_power; break; case snd_soc_dapm_input: if (!dapm->card->fully_routed) w->is_ep = SND_SOC_DAPM_EP_SOURCE; w->power_check = dapm_generic_check_power; break; case snd_soc_dapm_spk: case snd_soc_dapm_hp: w->is_ep = SND_SOC_DAPM_EP_SINK; w->power_check = dapm_generic_check_power; break; case snd_soc_dapm_output: if (!dapm->card->fully_routed) w->is_ep = SND_SOC_DAPM_EP_SINK; w->power_check = dapm_generic_check_power; break; case snd_soc_dapm_vmid: case snd_soc_dapm_siggen: w->is_ep = SND_SOC_DAPM_EP_SOURCE; w->power_check = dapm_always_on_check_power; break; case snd_soc_dapm_sink: w->is_ep = SND_SOC_DAPM_EP_SINK; w->power_check = dapm_always_on_check_power; break; case snd_soc_dapm_mux: case snd_soc_dapm_demux: case snd_soc_dapm_switch: case snd_soc_dapm_mixer: case snd_soc_dapm_mixer_named_ctl: case snd_soc_dapm_adc: case snd_soc_dapm_aif_out: case snd_soc_dapm_dac: case snd_soc_dapm_aif_in: case snd_soc_dapm_pga: case snd_soc_dapm_buffer: case snd_soc_dapm_scheduler: case snd_soc_dapm_effect: case snd_soc_dapm_src: case snd_soc_dapm_asrc: case snd_soc_dapm_encoder: case snd_soc_dapm_decoder: case snd_soc_dapm_out_drv: case snd_soc_dapm_micbias: case snd_soc_dapm_line: case snd_soc_dapm_dai_link: case snd_soc_dapm_dai_out: case snd_soc_dapm_dai_in: w->power_check = dapm_generic_check_power; break; case snd_soc_dapm_supply: case snd_soc_dapm_regulator_supply: case snd_soc_dapm_pinctrl: case snd_soc_dapm_clock_supply: case snd_soc_dapm_kcontrol: w->is_supply = 1; w->power_check = dapm_supply_check_power; break; default: w->power_check = dapm_always_on_check_power; break; } w->dapm = dapm; INIT_LIST_HEAD(&w->list); INIT_LIST_HEAD(&w->dirty); /* see for_each_card_widgets */ list_add_tail(&w->list, &dapm->card->widgets); snd_soc_dapm_for_each_direction(dir) { INIT_LIST_HEAD(&w->edges[dir]); w->endpoints[dir] = -1; } /* machine layer sets up unconnected pins and insertions */ w->connected = 1; return w; request_failed: dev_err_probe(dapm->dev, ret, "ASoC: Failed to request %s\n", w->name); kfree_const(w->name); name_failed: kfree_const(w->sname); kfree(w); cnew_failed: return ERR_PTR(ret); } /** * snd_soc_dapm_new_control - create new dapm control * @dapm: DAPM context * @widget: widget template * * Creates new DAPM control based upon a template. * * Returns a widget pointer on success or an error pointer on failure */ struct snd_soc_dapm_widget * snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_widget *widget) { struct snd_soc_dapm_widget *w; snd_soc_dapm_mutex_lock(dapm); w = snd_soc_dapm_new_control_unlocked(dapm, widget); snd_soc_dapm_mutex_unlock(dapm); return w; } EXPORT_SYMBOL_GPL(snd_soc_dapm_new_control); /** * snd_soc_dapm_new_controls - create new dapm controls * @dapm: DAPM context * @widget: widget array * @num: number of widgets * * Creates new DAPM controls based upon the templates. * * Returns 0 for success else error. */ int snd_soc_dapm_new_controls(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_widget *widget, int num) { int i; int ret = 0; snd_soc_dapm_mutex_lock_root(dapm); for (i = 0; i < num; i++) { struct snd_soc_dapm_widget *w = snd_soc_dapm_new_control_unlocked(dapm, widget); if (IS_ERR(w)) { ret = PTR_ERR(w); break; } widget++; } snd_soc_dapm_mutex_unlock(dapm); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_new_controls); static int snd_soc_dai_link_event_pre_pmu(struct snd_soc_dapm_widget *w, struct snd_pcm_substream *substream) { struct snd_soc_dapm_path *path; struct snd_soc_dai *source, *sink; struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_pcm_hw_params *params = NULL; const struct snd_soc_pcm_stream *config = NULL; struct snd_pcm_runtime *runtime = NULL; unsigned int fmt; int ret = 0; /* * NOTE * * snd_pcm_hw_params is quite large (608 bytes on arm64) and is * starting to get a bit excessive for allocation on the stack, * especially when you're building with some of the KASAN type * stuff that increases stack usage. * So, we use kzalloc()/kfree() for params in this function. */ params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; runtime = kzalloc(sizeof(*runtime), GFP_KERNEL); if (!runtime) { ret = -ENOMEM; goto out; } substream->runtime = runtime; substream->stream = SNDRV_PCM_STREAM_CAPTURE; snd_soc_dapm_widget_for_each_source_path(w, path) { source = path->source->priv; ret = snd_soc_dai_startup(source, substream); if (ret < 0) goto out; snd_soc_dai_activate(source, substream->stream); } substream->stream = SNDRV_PCM_STREAM_PLAYBACK; snd_soc_dapm_widget_for_each_sink_path(w, path) { sink = path->sink->priv; ret = snd_soc_dai_startup(sink, substream); if (ret < 0) goto out; snd_soc_dai_activate(sink, substream->stream); } substream->hw_opened = 1; /* * Note: getting the config after .startup() gives a chance to * either party on the link to alter the configuration if * necessary */ config = rtd->dai_link->c2c_params + rtd->c2c_params_select; if (!config) { dev_err(w->dapm->dev, "ASoC: link config missing\n"); ret = -EINVAL; goto out; } /* Be a little careful as we don't want to overflow the mask array */ if (!config->formats) { dev_warn(w->dapm->dev, "ASoC: Invalid format was specified\n"); ret = -EINVAL; goto out; } fmt = ffs(config->formats) - 1; snd_mask_set(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), fmt); hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE)->min = config->rate_min; hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE)->max = config->rate_max; hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS)->min = config->channels_min; hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS)->max = config->channels_max; substream->stream = SNDRV_PCM_STREAM_CAPTURE; snd_soc_dapm_widget_for_each_source_path(w, path) { source = path->source->priv; ret = snd_soc_dai_hw_params(source, substream, params); if (ret < 0) goto out; dapm_update_dai_unlocked(substream, params, source); } substream->stream = SNDRV_PCM_STREAM_PLAYBACK; snd_soc_dapm_widget_for_each_sink_path(w, path) { sink = path->sink->priv; ret = snd_soc_dai_hw_params(sink, substream, params); if (ret < 0) goto out; dapm_update_dai_unlocked(substream, params, sink); } runtime->format = params_format(params); runtime->subformat = params_subformat(params); runtime->channels = params_channels(params); runtime->rate = params_rate(params); out: /* see above NOTE */ kfree(params); return ret; } static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_dapm_path *path; struct snd_soc_dai *source, *sink; struct snd_pcm_substream *substream = w->priv; int ret = 0, saved_stream = substream->stream; if (WARN_ON(list_empty(&w->edges[SND_SOC_DAPM_DIR_OUT]) || list_empty(&w->edges[SND_SOC_DAPM_DIR_IN]))) return -EINVAL; switch (event) { case SND_SOC_DAPM_PRE_PMU: ret = snd_soc_dai_link_event_pre_pmu(w, substream); if (ret < 0) goto out; break; case SND_SOC_DAPM_POST_PMU: snd_soc_dapm_widget_for_each_sink_path(w, path) { sink = path->sink->priv; snd_soc_dai_digital_mute(sink, 0, SNDRV_PCM_STREAM_PLAYBACK); ret = 0; } break; case SND_SOC_DAPM_PRE_PMD: snd_soc_dapm_widget_for_each_sink_path(w, path) { sink = path->sink->priv; snd_soc_dai_digital_mute(sink, 1, SNDRV_PCM_STREAM_PLAYBACK); ret = 0; } substream->stream = SNDRV_PCM_STREAM_CAPTURE; snd_soc_dapm_widget_for_each_source_path(w, path) { source = path->source->priv; snd_soc_dai_hw_free(source, substream, 0); } substream->stream = SNDRV_PCM_STREAM_PLAYBACK; snd_soc_dapm_widget_for_each_sink_path(w, path) { sink = path->sink->priv; snd_soc_dai_hw_free(sink, substream, 0); } substream->stream = SNDRV_PCM_STREAM_CAPTURE; snd_soc_dapm_widget_for_each_source_path(w, path) { source = path->source->priv; snd_soc_dai_deactivate(source, substream->stream); snd_soc_dai_shutdown(source, substream, 0); } substream->stream = SNDRV_PCM_STREAM_PLAYBACK; snd_soc_dapm_widget_for_each_sink_path(w, path) { sink = path->sink->priv; snd_soc_dai_deactivate(sink, substream->stream); snd_soc_dai_shutdown(sink, substream, 0); } break; case SND_SOC_DAPM_POST_PMD: kfree(substream->runtime); break; default: WARN(1, "Unknown event %d\n", event); ret = -EINVAL; } out: /* Restore the substream direction */ substream->stream = saved_stream; return ret; } static int snd_soc_dapm_dai_link_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_widget *w = snd_kcontrol_chip(kcontrol); struct snd_soc_pcm_runtime *rtd = w->priv; ucontrol->value.enumerated.item[0] = rtd->c2c_params_select; return 0; } static int snd_soc_dapm_dai_link_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dapm_widget *w = snd_kcontrol_chip(kcontrol); struct snd_soc_pcm_runtime *rtd = w->priv; /* Can't change the config when widget is already powered */ if (w->power) return -EBUSY; if (ucontrol->value.enumerated.item[0] == rtd->c2c_params_select) return 0; if (ucontrol->value.enumerated.item[0] >= rtd->dai_link->num_c2c_params) return -EINVAL; rtd->c2c_params_select = ucontrol->value.enumerated.item[0]; return 1; } static void snd_soc_dapm_free_kcontrol(struct snd_soc_card *card, unsigned long *private_value, int num_c2c_params, const char **w_param_text) { int count; devm_kfree(card->dev, (void *)*private_value); if (!w_param_text) return; for (count = 0 ; count < num_c2c_params; count++) devm_kfree(card->dev, (void *)w_param_text[count]); devm_kfree(card->dev, w_param_text); } static struct snd_kcontrol_new * snd_soc_dapm_alloc_kcontrol(struct snd_soc_card *card, char *link_name, const struct snd_soc_pcm_stream *c2c_params, int num_c2c_params, const char **w_param_text, unsigned long *private_value) { struct soc_enum w_param_enum[] = { SOC_ENUM_SINGLE(0, 0, 0, NULL), }; struct snd_kcontrol_new kcontrol_dai_link[] = { SOC_ENUM_EXT(NULL, w_param_enum[0], snd_soc_dapm_dai_link_get, snd_soc_dapm_dai_link_put), }; struct snd_kcontrol_new *kcontrol_news; const struct snd_soc_pcm_stream *config = c2c_params; int count; for (count = 0 ; count < num_c2c_params; count++) { if (!config->stream_name) { dev_warn(card->dapm.dev, "ASoC: anonymous config %d for dai link %s\n", count, link_name); w_param_text[count] = devm_kasprintf(card->dev, GFP_KERNEL, "Anonymous Configuration %d", count); } else { w_param_text[count] = devm_kmemdup(card->dev, config->stream_name, strlen(config->stream_name) + 1, GFP_KERNEL); } if (!w_param_text[count]) goto outfree_w_param; config++; } w_param_enum[0].items = num_c2c_params; w_param_enum[0].texts = w_param_text; *private_value = (unsigned long) devm_kmemdup(card->dev, (void *)(kcontrol_dai_link[0].private_value), sizeof(struct soc_enum), GFP_KERNEL); if (!*private_value) { dev_err(card->dev, "ASoC: Failed to create control for %s widget\n", link_name); goto outfree_w_param; } kcontrol_dai_link[0].private_value = *private_value; /* duplicate kcontrol_dai_link on heap so that memory persists */ kcontrol_news = devm_kmemdup(card->dev, &kcontrol_dai_link[0], sizeof(struct snd_kcontrol_new), GFP_KERNEL); if (!kcontrol_news) { dev_err(card->dev, "ASoC: Failed to create control for %s widget\n", link_name); goto outfree_w_param; } return kcontrol_news; outfree_w_param: snd_soc_dapm_free_kcontrol(card, private_value, num_c2c_params, w_param_text); return NULL; } static struct snd_soc_dapm_widget * snd_soc_dapm_new_dai(struct snd_soc_card *card, struct snd_pcm_substream *substream, char *id) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dapm_widget template; struct snd_soc_dapm_widget *w; const struct snd_kcontrol_new *kcontrol_news; int num_kcontrols; const char **w_param_text; unsigned long private_value = 0; char *link_name; int ret = -ENOMEM; link_name = devm_kasprintf(card->dev, GFP_KERNEL, "%s-%s", rtd->dai_link->name, id); if (!link_name) goto name_fail; /* allocate memory for control, only in case of multiple configs */ w_param_text = NULL; kcontrol_news = NULL; num_kcontrols = 0; if (rtd->dai_link->num_c2c_params > 1) { w_param_text = devm_kcalloc(card->dev, rtd->dai_link->num_c2c_params, sizeof(char *), GFP_KERNEL); if (!w_param_text) goto param_fail; num_kcontrols = 1; kcontrol_news = snd_soc_dapm_alloc_kcontrol(card, link_name, rtd->dai_link->c2c_params, rtd->dai_link->num_c2c_params, w_param_text, &private_value); if (!kcontrol_news) goto param_fail; } memset(&template, 0, sizeof(template)); template.reg = SND_SOC_NOPM; template.id = snd_soc_dapm_dai_link; template.name = link_name; template.event = snd_soc_dai_link_event; template.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD; template.kcontrol_news = kcontrol_news; template.num_kcontrols = num_kcontrols; dev_dbg(card->dev, "ASoC: adding %s widget\n", link_name); w = snd_soc_dapm_new_control_unlocked(&card->dapm, &template); if (IS_ERR(w)) { ret = PTR_ERR(w); goto outfree_kcontrol_news; } w->priv = substream; return w; outfree_kcontrol_news: devm_kfree(card->dev, (void *)template.kcontrol_news); snd_soc_dapm_free_kcontrol(card, &private_value, rtd->dai_link->num_c2c_params, w_param_text); param_fail: devm_kfree(card->dev, link_name); name_fail: dev_err(rtd->dev, "ASoC: Failed to create %s-%s widget: %d\n", rtd->dai_link->name, id, ret); return ERR_PTR(ret); } /** * snd_soc_dapm_new_dai_widgets - Create new DAPM widgets * @dapm: DAPM context * @dai: parent DAI * * Returns 0 on success, error code otherwise. */ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm, struct snd_soc_dai *dai) { struct snd_soc_dapm_widget template; struct snd_soc_dapm_widget *w; WARN_ON(dapm->dev != dai->dev); memset(&template, 0, sizeof(template)); template.reg = SND_SOC_NOPM; if (dai->driver->playback.stream_name) { template.id = snd_soc_dapm_dai_in; template.name = dai->driver->playback.stream_name; template.sname = dai->driver->playback.stream_name; dev_dbg(dai->dev, "ASoC: adding %s widget\n", template.name); w = snd_soc_dapm_new_control_unlocked(dapm, &template); if (IS_ERR(w)) return PTR_ERR(w); w->priv = dai; snd_soc_dai_set_widget_playback(dai, w); } if (dai->driver->capture.stream_name) { template.id = snd_soc_dapm_dai_out; template.name = dai->driver->capture.stream_name; template.sname = dai->driver->capture.stream_name; dev_dbg(dai->dev, "ASoC: adding %s widget\n", template.name); w = snd_soc_dapm_new_control_unlocked(dapm, &template); if (IS_ERR(w)) return PTR_ERR(w); w->priv = dai; snd_soc_dai_set_widget_capture(dai, w); } return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_new_dai_widgets); int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card) { struct snd_soc_dapm_widget *dai_w, *w; struct snd_soc_dapm_widget *src, *sink; struct snd_soc_dai *dai; /* For each DAI widget... */ for_each_card_widgets(card, dai_w) { switch (dai_w->id) { case snd_soc_dapm_dai_in: case snd_soc_dapm_dai_out: break; default: continue; } /* let users know there is no DAI to link */ if (!dai_w->priv) { dev_dbg(card->dev, "dai widget %s has no DAI\n", dai_w->name); continue; } dai = dai_w->priv; /* ...find all widgets with the same stream and link them */ for_each_card_widgets(card, w) { if (w->dapm != dai_w->dapm) continue; switch (w->id) { case snd_soc_dapm_dai_in: case snd_soc_dapm_dai_out: continue; default: break; } if (!w->sname || !strstr(w->sname, dai_w->sname)) continue; if (dai_w->id == snd_soc_dapm_dai_in) { src = dai_w; sink = w; } else { src = w; sink = dai_w; } dev_dbg(dai->dev, "%s -> %s\n", src->name, sink->name); snd_soc_dapm_add_path(w->dapm, src, sink, NULL, NULL); } } return 0; } static void dapm_connect_dai_routes(struct snd_soc_dapm_context *dapm, struct snd_soc_dai *src_dai, struct snd_soc_dapm_widget *src, struct snd_soc_dapm_widget *dai, struct snd_soc_dai *sink_dai, struct snd_soc_dapm_widget *sink) { dev_dbg(dapm->dev, "connected DAI link %s:%s -> %s:%s\n", src_dai->component->name, src->name, sink_dai->component->name, sink->name); if (dai) { snd_soc_dapm_add_path(dapm, src, dai, NULL, NULL); src = dai; } snd_soc_dapm_add_path(dapm, src, sink, NULL, NULL); } static void dapm_connect_dai_pair(struct snd_soc_card *card, struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *codec_dai, struct snd_soc_dai *cpu_dai) { struct snd_soc_dai_link *dai_link = rtd->dai_link; struct snd_soc_dapm_widget *codec, *cpu; struct snd_soc_dai *src_dai[] = { cpu_dai, codec_dai }; struct snd_soc_dai *sink_dai[] = { codec_dai, cpu_dai }; struct snd_soc_dapm_widget **src[] = { &cpu, &codec }; struct snd_soc_dapm_widget **sink[] = { &codec, &cpu }; char *widget_name[] = { "playback", "capture" }; int stream; for_each_pcm_streams(stream) { int stream_cpu, stream_codec; stream_cpu = snd_soc_get_stream_cpu(dai_link, stream); stream_codec = stream; /* connect BE DAI playback if widgets are valid */ cpu = snd_soc_dai_get_widget(cpu_dai, stream_cpu); codec = snd_soc_dai_get_widget(codec_dai, stream_codec); if (!cpu || !codec) continue; /* special handling for [Codec2Codec] */ if (dai_link->c2c_params && !rtd->c2c_widget[stream]) { struct snd_pcm_substream *substream = rtd->pcm->streams[stream].substream; struct snd_soc_dapm_widget *dai = snd_soc_dapm_new_dai(card, substream, widget_name[stream]); if (IS_ERR(dai)) continue; rtd->c2c_widget[stream] = dai; } dapm_connect_dai_routes(&card->dapm, src_dai[stream], *src[stream], rtd->c2c_widget[stream], sink_dai[stream], *sink[stream]); } } static void soc_dapm_dai_stream_event(struct snd_soc_dai *dai, int stream, int event) { struct snd_soc_dapm_widget *w; w = snd_soc_dai_get_widget(dai, stream); if (w) { unsigned int ep; dapm_mark_dirty(w, "stream event"); if (w->id == snd_soc_dapm_dai_in) { ep = SND_SOC_DAPM_EP_SOURCE; dapm_widget_invalidate_input_paths(w); } else { ep = SND_SOC_DAPM_EP_SINK; dapm_widget_invalidate_output_paths(w); } switch (event) { case SND_SOC_DAPM_STREAM_START: w->active = 1; w->is_ep = ep; break; case SND_SOC_DAPM_STREAM_STOP: w->active = 0; w->is_ep = 0; break; case SND_SOC_DAPM_STREAM_SUSPEND: case SND_SOC_DAPM_STREAM_RESUME: case SND_SOC_DAPM_STREAM_PAUSE_PUSH: case SND_SOC_DAPM_STREAM_PAUSE_RELEASE: break; } } } void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card) { struct snd_soc_pcm_runtime *rtd; struct snd_soc_dai *codec_dai; int i; /* for each BE DAI link... */ for_each_card_rtds(card, rtd) { /* * dynamic FE links have no fixed DAI mapping. * CODEC<->CODEC links have no direct connection. */ if (rtd->dai_link->dynamic) continue; if (rtd->dai_link->num_cpus == 1) { for_each_rtd_codec_dais(rtd, i, codec_dai) dapm_connect_dai_pair(card, rtd, codec_dai, asoc_rtd_to_cpu(rtd, 0)); } else if (rtd->dai_link->num_codecs == rtd->dai_link->num_cpus) { for_each_rtd_codec_dais(rtd, i, codec_dai) dapm_connect_dai_pair(card, rtd, codec_dai, asoc_rtd_to_cpu(rtd, i)); } else if (rtd->dai_link->num_codecs > rtd->dai_link->num_cpus) { int cpu_id; if (!rtd->dai_link->codec_ch_maps) { dev_err(card->dev, "%s: no codec channel mapping table provided\n", __func__); continue; } for_each_rtd_codec_dais(rtd, i, codec_dai) { cpu_id = rtd->dai_link->codec_ch_maps[i].connected_cpu_id; if (cpu_id >= rtd->dai_link->num_cpus) { dev_err(card->dev, "%s: dai_link %s cpu_id %d too large, num_cpus is %d\n", __func__, rtd->dai_link->name, cpu_id, rtd->dai_link->num_cpus); continue; } dapm_connect_dai_pair(card, rtd, codec_dai, asoc_rtd_to_cpu(rtd, cpu_id)); } } else { dev_err(card->dev, "%s: codec number %d < cpu number %d is not supported\n", __func__, rtd->dai_link->num_codecs, rtd->dai_link->num_cpus); } } } static void soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream, int event) { struct snd_soc_dai *dai; int i; for_each_rtd_dais(rtd, i, dai) soc_dapm_dai_stream_event(dai, stream, event); dapm_power_widgets(rtd->card, event); } /** * snd_soc_dapm_stream_event - send a stream event to the dapm core * @rtd: PCM runtime data * @stream: stream name * @event: stream event * * Sends a stream event to the dapm core. The core then makes any * necessary widget power changes. * * Returns 0 for success else error. */ void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream, int event) { struct snd_soc_card *card = rtd->card; snd_soc_dapm_mutex_lock(card); soc_dapm_stream_event(rtd, stream, event); snd_soc_dapm_mutex_unlock(card); } void snd_soc_dapm_stream_stop(struct snd_soc_pcm_runtime *rtd, int stream) { if (stream == SNDRV_PCM_STREAM_PLAYBACK) { if (snd_soc_runtime_ignore_pmdown_time(rtd)) { /* powered down playback stream now */ snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK, SND_SOC_DAPM_STREAM_STOP); } else { /* start delayed pop wq here for playback streams */ rtd->pop_wait = 1; queue_delayed_work(system_power_efficient_wq, &rtd->delayed_work, msecs_to_jiffies(rtd->pmdown_time)); } } else { /* capture streams can be powered down now */ snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_CAPTURE, SND_SOC_DAPM_STREAM_STOP); } } EXPORT_SYMBOL_GPL(snd_soc_dapm_stream_stop); /** * snd_soc_dapm_enable_pin_unlocked - enable pin. * @dapm: DAPM context * @pin: pin name * * Enables input/output pin and its parents or children widgets iff there is * a valid audio route and active audio stream. * * Requires external locking. * * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to * do any widget power switching. */ int snd_soc_dapm_enable_pin_unlocked(struct snd_soc_dapm_context *dapm, const char *pin) { return snd_soc_dapm_set_pin(dapm, pin, 1); } EXPORT_SYMBOL_GPL(snd_soc_dapm_enable_pin_unlocked); /** * snd_soc_dapm_enable_pin - enable pin. * @dapm: DAPM context * @pin: pin name * * Enables input/output pin and its parents or children widgets iff there is * a valid audio route and active audio stream. * * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to * do any widget power switching. */ int snd_soc_dapm_enable_pin(struct snd_soc_dapm_context *dapm, const char *pin) { int ret; snd_soc_dapm_mutex_lock(dapm); ret = snd_soc_dapm_set_pin(dapm, pin, 1); snd_soc_dapm_mutex_unlock(dapm); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_enable_pin); /** * snd_soc_dapm_force_enable_pin_unlocked - force a pin to be enabled * @dapm: DAPM context * @pin: pin name * * Enables input/output pin regardless of any other state. This is * intended for use with microphone bias supplies used in microphone * jack detection. * * Requires external locking. * * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to * do any widget power switching. */ int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm, const char *pin) { struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true); if (!w) { dev_err(dapm->dev, "ASoC: unknown pin %s\n", pin); return -EINVAL; } dev_dbg(w->dapm->dev, "ASoC: force enable pin %s\n", pin); if (!w->connected) { /* * w->force does not affect the number of input or output paths, * so we only have to recheck if w->connected is changed */ dapm_widget_invalidate_input_paths(w); dapm_widget_invalidate_output_paths(w); w->connected = 1; } w->force = 1; dapm_mark_dirty(w, "force enable"); return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_force_enable_pin_unlocked); /** * snd_soc_dapm_force_enable_pin - force a pin to be enabled * @dapm: DAPM context * @pin: pin name * * Enables input/output pin regardless of any other state. This is * intended for use with microphone bias supplies used in microphone * jack detection. * * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to * do any widget power switching. */ int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm, const char *pin) { int ret; snd_soc_dapm_mutex_lock(dapm); ret = snd_soc_dapm_force_enable_pin_unlocked(dapm, pin); snd_soc_dapm_mutex_unlock(dapm); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_force_enable_pin); /** * snd_soc_dapm_disable_pin_unlocked - disable pin. * @dapm: DAPM context * @pin: pin name * * Disables input/output pin and its parents or children widgets. * * Requires external locking. * * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to * do any widget power switching. */ int snd_soc_dapm_disable_pin_unlocked(struct snd_soc_dapm_context *dapm, const char *pin) { return snd_soc_dapm_set_pin(dapm, pin, 0); } EXPORT_SYMBOL_GPL(snd_soc_dapm_disable_pin_unlocked); /** * snd_soc_dapm_disable_pin - disable pin. * @dapm: DAPM context * @pin: pin name * * Disables input/output pin and its parents or children widgets. * * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to * do any widget power switching. */ int snd_soc_dapm_disable_pin(struct snd_soc_dapm_context *dapm, const char *pin) { int ret; snd_soc_dapm_mutex_lock(dapm); ret = snd_soc_dapm_set_pin(dapm, pin, 0); snd_soc_dapm_mutex_unlock(dapm); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_disable_pin); /** * snd_soc_dapm_nc_pin_unlocked - permanently disable pin. * @dapm: DAPM context * @pin: pin name * * Marks the specified pin as being not connected, disabling it along * any parent or child widgets. At present this is identical to * snd_soc_dapm_disable_pin() but in future it will be extended to do * additional things such as disabling controls which only affect * paths through the pin. * * Requires external locking. * * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to * do any widget power switching. */ int snd_soc_dapm_nc_pin_unlocked(struct snd_soc_dapm_context *dapm, const char *pin) { return snd_soc_dapm_set_pin(dapm, pin, 0); } EXPORT_SYMBOL_GPL(snd_soc_dapm_nc_pin_unlocked); /** * snd_soc_dapm_nc_pin - permanently disable pin. * @dapm: DAPM context * @pin: pin name * * Marks the specified pin as being not connected, disabling it along * any parent or child widgets. At present this is identical to * snd_soc_dapm_disable_pin() but in future it will be extended to do * additional things such as disabling controls which only affect * paths through the pin. * * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to * do any widget power switching. */ int snd_soc_dapm_nc_pin(struct snd_soc_dapm_context *dapm, const char *pin) { int ret; snd_soc_dapm_mutex_lock(dapm); ret = snd_soc_dapm_set_pin(dapm, pin, 0); snd_soc_dapm_mutex_unlock(dapm); return ret; } EXPORT_SYMBOL_GPL(snd_soc_dapm_nc_pin); /** * snd_soc_dapm_get_pin_status - get audio pin status * @dapm: DAPM context * @pin: audio signal pin endpoint (or start point) * * Get audio pin status - connected or disconnected. * * Returns 1 for connected otherwise 0. */ int snd_soc_dapm_get_pin_status(struct snd_soc_dapm_context *dapm, const char *pin) { struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true); if (w) return w->connected; return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_get_pin_status); /** * snd_soc_dapm_ignore_suspend - ignore suspend status for DAPM endpoint * @dapm: DAPM context * @pin: audio signal pin endpoint (or start point) * * Mark the given endpoint or pin as ignoring suspend. When the * system is disabled a path between two endpoints flagged as ignoring * suspend will not be disabled. The path must already be enabled via * normal means at suspend time, it will not be turned on if it was not * already enabled. */ int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm, const char *pin) { struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, false); if (!w) { dev_err(dapm->dev, "ASoC: unknown pin %s\n", pin); return -EINVAL; } w->ignore_suspend = 1; return 0; } EXPORT_SYMBOL_GPL(snd_soc_dapm_ignore_suspend); /** * snd_soc_dapm_free - free dapm resources * @dapm: DAPM context * * Free all dapm widgets and resources. */ void snd_soc_dapm_free(struct snd_soc_dapm_context *dapm) { dapm_debugfs_cleanup(dapm); dapm_free_widgets(dapm); list_del(&dapm->list); } EXPORT_SYMBOL_GPL(snd_soc_dapm_free); void snd_soc_dapm_init(struct snd_soc_dapm_context *dapm, struct snd_soc_card *card, struct snd_soc_component *component) { dapm->card = card; dapm->component = component; dapm->bias_level = SND_SOC_BIAS_OFF; if (component) { dapm->dev = component->dev; dapm->idle_bias_off = !component->driver->idle_bias_on; dapm->suspend_bias_off = component->driver->suspend_bias_off; } else { dapm->dev = card->dev; } INIT_LIST_HEAD(&dapm->list); /* see for_each_card_dapms */ list_add(&dapm->list, &card->dapm_list); } EXPORT_SYMBOL_GPL(snd_soc_dapm_init); static void soc_dapm_shutdown_dapm(struct snd_soc_dapm_context *dapm) { struct snd_soc_card *card = dapm->card; struct snd_soc_dapm_widget *w; LIST_HEAD(down_list); int powerdown = 0; snd_soc_dapm_mutex_lock_root(card); for_each_card_widgets(dapm->card, w) { if (w->dapm != dapm) continue; if (w->power) { dapm_seq_insert(w, &down_list, false); w->new_power = 0; powerdown = 1; } } /* If there were no widgets to power down we're already in * standby. */ if (powerdown) { if (dapm->bias_level == SND_SOC_BIAS_ON) snd_soc_dapm_set_bias_level(dapm, SND_SOC_BIAS_PREPARE); dapm_seq_run(card, &down_list, 0, false); if (dapm->bias_level == SND_SOC_BIAS_PREPARE) snd_soc_dapm_set_bias_level(dapm, SND_SOC_BIAS_STANDBY); } snd_soc_dapm_mutex_unlock(card); } /* * snd_soc_dapm_shutdown - callback for system shutdown */ void snd_soc_dapm_shutdown(struct snd_soc_card *card) { struct snd_soc_dapm_context *dapm; for_each_card_dapms(card, dapm) { if (dapm != &card->dapm) { soc_dapm_shutdown_dapm(dapm); if (dapm->bias_level == SND_SOC_BIAS_STANDBY) snd_soc_dapm_set_bias_level(dapm, SND_SOC_BIAS_OFF); } } soc_dapm_shutdown_dapm(&card->dapm); if (card->dapm.bias_level == SND_SOC_BIAS_STANDBY) snd_soc_dapm_set_bias_level(&card->dapm, SND_SOC_BIAS_OFF); } /* Module information */ MODULE_AUTHOR("Liam Girdwood, [email protected]"); MODULE_DESCRIPTION("Dynamic Audio Power Management core for ALSA SoC"); MODULE_LICENSE("GPL");
linux-master
sound/soc/soc-dapm.c
// SPDX-License-Identifier: GPL-2.0 // // soc-card.c // // Copyright (C) 2019 Renesas Electronics Corp. // Kuninori Morimoto <[email protected]> // #include <sound/soc.h> #include <sound/jack.h> #define soc_card_ret(dai, ret) _soc_card_ret(dai, __func__, ret) static inline int _soc_card_ret(struct snd_soc_card *card, const char *func, int ret) { switch (ret) { case -EPROBE_DEFER: case -ENOTSUPP: case 0: break; default: dev_err(card->dev, "ASoC: error at %s on %s: %d\n", func, card->name, ret); } return ret; } struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card, const char *name) { struct snd_card *card = soc_card->snd_card; struct snd_kcontrol *kctl; if (unlikely(!name)) return NULL; list_for_each_entry(kctl, &card->controls, list) if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name))) return kctl; return NULL; } EXPORT_SYMBOL_GPL(snd_soc_card_get_kcontrol); static int jack_new(struct snd_soc_card *card, const char *id, int type, struct snd_soc_jack *jack, bool initial_kctl) { mutex_init(&jack->mutex); jack->card = card; INIT_LIST_HEAD(&jack->pins); INIT_LIST_HEAD(&jack->jack_zones); BLOCKING_INIT_NOTIFIER_HEAD(&jack->notifier); return snd_jack_new(card->snd_card, id, type, &jack->jack, initial_kctl, false); } /** * snd_soc_card_jack_new - Create a new jack without pins * @card: ASoC card * @id: an identifying string for this jack * @type: a bitmask of enum snd_jack_type values that can be detected by * this jack * @jack: structure to use for the jack * * Creates a new jack object without pins. If adding pins later, * snd_soc_card_jack_new_pins() should be used instead with 0 as num_pins * argument. * * Returns zero if successful, or a negative error code on failure. * On success jack will be initialised. */ int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type, struct snd_soc_jack *jack) { return soc_card_ret(card, jack_new(card, id, type, jack, true)); } EXPORT_SYMBOL_GPL(snd_soc_card_jack_new); /** * snd_soc_card_jack_new_pins - Create a new jack with pins * @card: ASoC card * @id: an identifying string for this jack * @type: a bitmask of enum snd_jack_type values that can be detected by * this jack * @jack: structure to use for the jack * @pins: Array of jack pins to be added to the jack or NULL * @num_pins: Number of elements in the @pins array * * Creates a new jack object with pins. If not adding pins, * snd_soc_card_jack_new() should be used instead. * * Returns zero if successful, or a negative error code on failure. * On success jack will be initialised. */ int snd_soc_card_jack_new_pins(struct snd_soc_card *card, const char *id, int type, struct snd_soc_jack *jack, struct snd_soc_jack_pin *pins, unsigned int num_pins) { int ret; ret = jack_new(card, id, type, jack, false); if (ret) goto end; if (num_pins) ret = snd_soc_jack_add_pins(jack, num_pins, pins); end: return soc_card_ret(card, ret); } EXPORT_SYMBOL_GPL(snd_soc_card_jack_new_pins); int snd_soc_card_suspend_pre(struct snd_soc_card *card) { int ret = 0; if (card->suspend_pre) ret = card->suspend_pre(card); return soc_card_ret(card, ret); } int snd_soc_card_suspend_post(struct snd_soc_card *card) { int ret = 0; if (card->suspend_post) ret = card->suspend_post(card); return soc_card_ret(card, ret); } int snd_soc_card_resume_pre(struct snd_soc_card *card) { int ret = 0; if (card->resume_pre) ret = card->resume_pre(card); return soc_card_ret(card, ret); } int snd_soc_card_resume_post(struct snd_soc_card *card) { int ret = 0; if (card->resume_post) ret = card->resume_post(card); return soc_card_ret(card, ret); } int snd_soc_card_probe(struct snd_soc_card *card) { if (card->probe) { int ret = card->probe(card); if (ret < 0) return soc_card_ret(card, ret); /* * It has "card->probe" and "card->late_probe" callbacks. * So, set "probed" flag here, because it needs to care * about "late_probe". * * see * snd_soc_bind_card() * snd_soc_card_late_probe() */ card->probed = 1; } return 0; } int snd_soc_card_late_probe(struct snd_soc_card *card) { if (card->late_probe) { int ret = card->late_probe(card); if (ret < 0) return soc_card_ret(card, ret); } /* * It has "card->probe" and "card->late_probe" callbacks, * and "late_probe" callback is called after "probe". * This means, we can set "card->probed" flag afer "late_probe" * for all cases. * * see * snd_soc_bind_card() * snd_soc_card_probe() */ card->probed = 1; return 0; } void snd_soc_card_fixup_controls(struct snd_soc_card *card) { if (card->fixup_controls) card->fixup_controls(card); } int snd_soc_card_remove(struct snd_soc_card *card) { int ret = 0; if (card->probed && card->remove) ret = card->remove(card); card->probed = 0; return soc_card_ret(card, ret); } int snd_soc_card_set_bias_level(struct snd_soc_card *card, struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level) { int ret = 0; if (card && card->set_bias_level) ret = card->set_bias_level(card, dapm, level); return soc_card_ret(card, ret); } int snd_soc_card_set_bias_level_post(struct snd_soc_card *card, struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level) { int ret = 0; if (card && card->set_bias_level_post) ret = card->set_bias_level_post(card, dapm, level); return soc_card_ret(card, ret); } int snd_soc_card_add_dai_link(struct snd_soc_card *card, struct snd_soc_dai_link *dai_link) { int ret = 0; if (card->add_dai_link) ret = card->add_dai_link(card, dai_link); return soc_card_ret(card, ret); } EXPORT_SYMBOL_GPL(snd_soc_card_add_dai_link); void snd_soc_card_remove_dai_link(struct snd_soc_card *card, struct snd_soc_dai_link *dai_link) { if (card->remove_dai_link) card->remove_dai_link(card, dai_link); } EXPORT_SYMBOL_GPL(snd_soc_card_remove_dai_link);
linux-master
sound/soc/soc-card.c
// SPDX-License-Identifier: GPL-2.0+ // // soc-devres.c -- ALSA SoC Audio Layer devres functions // // Copyright (C) 2013 Linaro Ltd #include <linux/module.h> #include <linux/moduleparam.h> #include <sound/soc.h> #include <sound/dmaengine_pcm.h> static void devm_dai_release(struct device *dev, void *res) { snd_soc_unregister_dai(*(struct snd_soc_dai **)res); } /** * devm_snd_soc_register_dai - resource-managed dai registration * @dev: Device used to manage component * @component: The component the DAIs are registered for * @dai_drv: DAI driver to use for the DAI * @legacy_dai_naming: if %true, use legacy single-name format; * if %false, use multiple-name format; */ struct snd_soc_dai *devm_snd_soc_register_dai(struct device *dev, struct snd_soc_component *component, struct snd_soc_dai_driver *dai_drv, bool legacy_dai_naming) { struct snd_soc_dai **ptr; struct snd_soc_dai *dai; ptr = devres_alloc(devm_dai_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return NULL; dai = snd_soc_register_dai(component, dai_drv, legacy_dai_naming); if (dai) { *ptr = dai; devres_add(dev, ptr); } else { devres_free(ptr); } return dai; } EXPORT_SYMBOL_GPL(devm_snd_soc_register_dai); static void devm_component_release(struct device *dev, void *res) { const struct snd_soc_component_driver **cmpnt_drv = res; snd_soc_unregister_component_by_driver(dev, *cmpnt_drv); } /** * devm_snd_soc_register_component - resource managed component registration * @dev: Device used to manage component * @cmpnt_drv: Component driver * @dai_drv: DAI driver * @num_dai: Number of DAIs to register * * Register a component with automatic unregistration when the device is * unregistered. */ int devm_snd_soc_register_component(struct device *dev, const struct snd_soc_component_driver *cmpnt_drv, struct snd_soc_dai_driver *dai_drv, int num_dai) { const struct snd_soc_component_driver **ptr; int ret; ptr = devres_alloc(devm_component_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = snd_soc_register_component(dev, cmpnt_drv, dai_drv, num_dai); if (ret == 0) { *ptr = cmpnt_drv; devres_add(dev, ptr); } else { devres_free(ptr); } return ret; } EXPORT_SYMBOL_GPL(devm_snd_soc_register_component); static void devm_card_release(struct device *dev, void *res) { snd_soc_unregister_card(*(struct snd_soc_card **)res); } /** * devm_snd_soc_register_card - resource managed card registration * @dev: Device used to manage card * @card: Card to register * * Register a card with automatic unregistration when the device is * unregistered. */ int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card) { struct snd_soc_card **ptr; int ret; ptr = devres_alloc(devm_card_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = snd_soc_register_card(card); if (ret == 0) { *ptr = card; devres_add(dev, ptr); } else { devres_free(ptr); } return ret; } EXPORT_SYMBOL_GPL(devm_snd_soc_register_card); #ifdef CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM static void devm_dmaengine_pcm_release(struct device *dev, void *res) { snd_dmaengine_pcm_unregister(*(struct device **)res); } /** * devm_snd_dmaengine_pcm_register - resource managed dmaengine PCM registration * @dev: The parent device for the PCM device * @config: Platform specific PCM configuration * @flags: Platform specific quirks * * Register a dmaengine based PCM device with automatic unregistration when the * device is unregistered. */ int devm_snd_dmaengine_pcm_register(struct device *dev, const struct snd_dmaengine_pcm_config *config, unsigned int flags) { struct device **ptr; int ret; ptr = devres_alloc(devm_dmaengine_pcm_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = snd_dmaengine_pcm_register(dev, config, flags); if (ret == 0) { *ptr = dev; devres_add(dev, ptr); } else { devres_free(ptr); } return ret; } EXPORT_SYMBOL_GPL(devm_snd_dmaengine_pcm_register); #endif
linux-master
sound/soc/soc-devres.c
// SPDX-License-Identifier: GPL-2.0 // // soc-dai.c // // Copyright (C) 2019 Renesas Electronics Corp. // Kuninori Morimoto <[email protected]> // #include <sound/soc.h> #include <sound/soc-dai.h> #include <sound/soc-link.h> #define soc_dai_ret(dai, ret) _soc_dai_ret(dai, __func__, ret) static inline int _soc_dai_ret(struct snd_soc_dai *dai, const char *func, int ret) { /* Positive, Zero values are not errors */ if (ret >= 0) return ret; /* Negative values might be errors */ switch (ret) { case -EPROBE_DEFER: case -ENOTSUPP: break; default: dev_err(dai->dev, "ASoC: error at %s on %s: %d\n", func, dai->name, ret); } return ret; } /* * We might want to check substream by using list. * In such case, we can update these macros. */ #define soc_dai_mark_push(dai, substream, tgt) ((dai)->mark_##tgt = substream) #define soc_dai_mark_pop(dai, substream, tgt) ((dai)->mark_##tgt = NULL) #define soc_dai_mark_match(dai, substream, tgt) ((dai)->mark_##tgt == substream) /** * snd_soc_dai_set_sysclk - configure DAI system or master clock. * @dai: DAI * @clk_id: DAI specific clock ID * @freq: new clock frequency in Hz * @dir: new clock direction - input/output. * * Configures the DAI master (MCLK) or system (SYSCLK) clocking. */ int snd_soc_dai_set_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { int ret; if (dai->driver->ops && dai->driver->ops->set_sysclk) ret = dai->driver->ops->set_sysclk(dai, clk_id, freq, dir); else ret = snd_soc_component_set_sysclk(dai->component, clk_id, 0, freq, dir); return soc_dai_ret(dai, ret); } EXPORT_SYMBOL_GPL(snd_soc_dai_set_sysclk); /** * snd_soc_dai_set_clkdiv - configure DAI clock dividers. * @dai: DAI * @div_id: DAI specific clock divider ID * @div: new clock divisor. * * Configures the clock dividers. This is used to derive the best DAI bit and * frame clocks from the system or master clock. It's best to set the DAI bit * and frame clocks as low as possible to save system power. */ int snd_soc_dai_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div) { int ret = -EINVAL; if (dai->driver->ops && dai->driver->ops->set_clkdiv) ret = dai->driver->ops->set_clkdiv(dai, div_id, div); return soc_dai_ret(dai, ret); } EXPORT_SYMBOL_GPL(snd_soc_dai_set_clkdiv); /** * snd_soc_dai_set_pll - configure DAI PLL. * @dai: DAI * @pll_id: DAI specific PLL ID * @source: DAI specific source for the PLL * @freq_in: PLL input clock frequency in Hz * @freq_out: requested PLL output clock frequency in Hz * * Configures and enables PLL to generate output clock based on input clock. */ int snd_soc_dai_set_pll(struct snd_soc_dai *dai, int pll_id, int source, unsigned int freq_in, unsigned int freq_out) { int ret; if (dai->driver->ops && dai->driver->ops->set_pll) ret = dai->driver->ops->set_pll(dai, pll_id, source, freq_in, freq_out); else ret = snd_soc_component_set_pll(dai->component, pll_id, source, freq_in, freq_out); return soc_dai_ret(dai, ret); } EXPORT_SYMBOL_GPL(snd_soc_dai_set_pll); /** * snd_soc_dai_set_bclk_ratio - configure BCLK to sample rate ratio. * @dai: DAI * @ratio: Ratio of BCLK to Sample rate. * * Configures the DAI for a preset BCLK to sample rate ratio. */ int snd_soc_dai_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio) { int ret = -ENOTSUPP; if (dai->driver->ops && dai->driver->ops->set_bclk_ratio) ret = dai->driver->ops->set_bclk_ratio(dai, ratio); return soc_dai_ret(dai, ret); } EXPORT_SYMBOL_GPL(snd_soc_dai_set_bclk_ratio); int snd_soc_dai_get_fmt_max_priority(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_dai *dai; int i, max = 0; /* * return max num if *ALL* DAIs have .auto_selectable_formats */ for_each_rtd_dais(rtd, i, dai) { if (dai->driver->ops && dai->driver->ops->num_auto_selectable_formats) max = max(max, dai->driver->ops->num_auto_selectable_formats); else return 0; } return max; } /** * snd_soc_dai_get_fmt - get supported audio format. * @dai: DAI * @priority: priority level of supported audio format. * * This should return only formats implemented with high * quality by the DAI so that the core can configure a * format which will work well with other devices. * For example devices which don't support both edges of the * LRCLK signal in I2S style formats should only list DSP * modes. This will mean that sometimes fewer formats * are reported here than are supported by set_fmt(). */ u64 snd_soc_dai_get_fmt(struct snd_soc_dai *dai, int priority) { const struct snd_soc_dai_ops *ops = dai->driver->ops; u64 fmt = 0; int i, max = 0, until = priority; /* * Collect auto_selectable_formats until priority * * ex) * auto_selectable_formats[] = { A, B, C }; * (A, B, C = SND_SOC_POSSIBLE_DAIFMT_xxx) * * priority = 1 : A * priority = 2 : A | B * priority = 3 : A | B | C * priority = 4 : A | B | C * ... */ if (ops) max = ops->num_auto_selectable_formats; if (max < until) until = max; for (i = 0; i < until; i++) fmt |= ops->auto_selectable_formats[i]; return fmt; } /** * snd_soc_dai_set_fmt - configure DAI hardware audio format. * @dai: DAI * @fmt: SND_SOC_DAIFMT_* format value. * * Configures the DAI hardware format and clocking. */ int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { int ret = -ENOTSUPP; if (dai->driver->ops && dai->driver->ops->set_fmt) ret = dai->driver->ops->set_fmt(dai, fmt); return soc_dai_ret(dai, ret); } EXPORT_SYMBOL_GPL(snd_soc_dai_set_fmt); /** * snd_soc_xlate_tdm_slot_mask - generate tx/rx slot mask. * @slots: Number of slots in use. * @tx_mask: bitmask representing active TX slots. * @rx_mask: bitmask representing active RX slots. * * Generates the TDM tx and rx slot default masks for DAI. */ static int snd_soc_xlate_tdm_slot_mask(unsigned int slots, unsigned int *tx_mask, unsigned int *rx_mask) { if (*tx_mask || *rx_mask) return 0; if (!slots) return -EINVAL; *tx_mask = (1 << slots) - 1; *rx_mask = (1 << slots) - 1; return 0; } /** * snd_soc_dai_set_tdm_slot() - Configures a DAI for TDM operation * @dai: The DAI to configure * @tx_mask: bitmask representing active TX slots. * @rx_mask: bitmask representing active RX slots. * @slots: Number of slots in use. * @slot_width: Width in bits for each slot. * * This function configures the specified DAI for TDM operation. @slot contains * the total number of slots of the TDM stream and @slot_with the width of each * slot in bit clock cycles. @tx_mask and @rx_mask are bitmasks specifying the * active slots of the TDM stream for the specified DAI, i.e. which slots the * DAI should write to or read from. If a bit is set the corresponding slot is * active, if a bit is cleared the corresponding slot is inactive. Bit 0 maps to * the first slot, bit 1 to the second slot and so on. The first active slot * maps to the first channel of the DAI, the second active slot to the second * channel and so on. * * TDM mode can be disabled by passing 0 for @slots. In this case @tx_mask, * @rx_mask and @slot_width will be ignored. * * Returns 0 on success, a negative error code otherwise. */ int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) { int ret = -ENOTSUPP; int stream; unsigned int *tdm_mask[] = { &tx_mask, &rx_mask, }; if (dai->driver->ops && dai->driver->ops->xlate_tdm_slot_mask) dai->driver->ops->xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask); else snd_soc_xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask); for_each_pcm_streams(stream) snd_soc_dai_tdm_mask_set(dai, stream, *tdm_mask[stream]); if (dai->driver->ops && dai->driver->ops->set_tdm_slot) ret = dai->driver->ops->set_tdm_slot(dai, tx_mask, rx_mask, slots, slot_width); return soc_dai_ret(dai, ret); } EXPORT_SYMBOL_GPL(snd_soc_dai_set_tdm_slot); /** * snd_soc_dai_set_channel_map - configure DAI audio channel map * @dai: DAI * @tx_num: how many TX channels * @tx_slot: pointer to an array which imply the TX slot number channel * 0~num-1 uses * @rx_num: how many RX channels * @rx_slot: pointer to an array which imply the RX slot number channel * 0~num-1 uses * * configure the relationship between channel number and TDM slot number. */ int snd_soc_dai_set_channel_map(struct snd_soc_dai *dai, unsigned int tx_num, unsigned int *tx_slot, unsigned int rx_num, unsigned int *rx_slot) { int ret = -ENOTSUPP; if (dai->driver->ops && dai->driver->ops->set_channel_map) ret = dai->driver->ops->set_channel_map(dai, tx_num, tx_slot, rx_num, rx_slot); return soc_dai_ret(dai, ret); } EXPORT_SYMBOL_GPL(snd_soc_dai_set_channel_map); /** * snd_soc_dai_get_channel_map - Get DAI audio channel map * @dai: DAI * @tx_num: how many TX channels * @tx_slot: pointer to an array which imply the TX slot number channel * 0~num-1 uses * @rx_num: how many RX channels * @rx_slot: pointer to an array which imply the RX slot number channel * 0~num-1 uses */ int snd_soc_dai_get_channel_map(struct snd_soc_dai *dai, unsigned int *tx_num, unsigned int *tx_slot, unsigned int *rx_num, unsigned int *rx_slot) { int ret = -ENOTSUPP; if (dai->driver->ops && dai->driver->ops->get_channel_map) ret = dai->driver->ops->get_channel_map(dai, tx_num, tx_slot, rx_num, rx_slot); return soc_dai_ret(dai, ret); } EXPORT_SYMBOL_GPL(snd_soc_dai_get_channel_map); /** * snd_soc_dai_set_tristate - configure DAI system or master clock. * @dai: DAI * @tristate: tristate enable * * Tristates the DAI so that others can use it. */ int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate) { int ret = -EINVAL; if (dai->driver->ops && dai->driver->ops->set_tristate) ret = dai->driver->ops->set_tristate(dai, tristate); return soc_dai_ret(dai, ret); } EXPORT_SYMBOL_GPL(snd_soc_dai_set_tristate); /** * snd_soc_dai_digital_mute - configure DAI system or master clock. * @dai: DAI * @mute: mute enable * @direction: stream to mute * * Mutes the DAI DAC. */ int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute, int direction) { int ret = -ENOTSUPP; /* * ignore if direction was CAPTURE * and it had .no_capture_mute flag */ if (dai->driver->ops && dai->driver->ops->mute_stream && (direction == SNDRV_PCM_STREAM_PLAYBACK || !dai->driver->ops->no_capture_mute)) ret = dai->driver->ops->mute_stream(dai, mute, direction); return soc_dai_ret(dai, ret); } EXPORT_SYMBOL_GPL(snd_soc_dai_digital_mute); int snd_soc_dai_hw_params(struct snd_soc_dai *dai, struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { int ret = 0; if (dai->driver->ops && dai->driver->ops->hw_params) ret = dai->driver->ops->hw_params(substream, params, dai); /* mark substream if succeeded */ if (ret == 0) soc_dai_mark_push(dai, substream, hw_params); return soc_dai_ret(dai, ret); } void snd_soc_dai_hw_free(struct snd_soc_dai *dai, struct snd_pcm_substream *substream, int rollback) { if (rollback && !soc_dai_mark_match(dai, substream, hw_params)) return; if (dai->driver->ops && dai->driver->ops->hw_free) dai->driver->ops->hw_free(substream, dai); /* remove marked substream */ soc_dai_mark_pop(dai, substream, hw_params); } int snd_soc_dai_startup(struct snd_soc_dai *dai, struct snd_pcm_substream *substream) { int ret = 0; if (!snd_soc_dai_stream_valid(dai, substream->stream)) return 0; if (dai->driver->ops && dai->driver->ops->startup) ret = dai->driver->ops->startup(substream, dai); /* mark substream if succeeded */ if (ret == 0) soc_dai_mark_push(dai, substream, startup); return soc_dai_ret(dai, ret); } void snd_soc_dai_shutdown(struct snd_soc_dai *dai, struct snd_pcm_substream *substream, int rollback) { if (!snd_soc_dai_stream_valid(dai, substream->stream)) return; if (rollback && !soc_dai_mark_match(dai, substream, startup)) return; if (dai->driver->ops && dai->driver->ops->shutdown) dai->driver->ops->shutdown(substream, dai); /* remove marked substream */ soc_dai_mark_pop(dai, substream, startup); } int snd_soc_dai_compress_new(struct snd_soc_dai *dai, struct snd_soc_pcm_runtime *rtd, int num) { int ret = -ENOTSUPP; if (dai->driver->ops && dai->driver->ops->compress_new) ret = dai->driver->ops->compress_new(rtd, num); return soc_dai_ret(dai, ret); } /* * snd_soc_dai_stream_valid() - check if a DAI supports the given stream * * Returns true if the DAI supports the indicated stream type. */ bool snd_soc_dai_stream_valid(struct snd_soc_dai *dai, int dir) { struct snd_soc_pcm_stream *stream = snd_soc_dai_get_pcm_stream(dai, dir); /* If the codec specifies any channels at all, it supports the stream */ return stream->channels_min; } /* * snd_soc_dai_link_set_capabilities() - set dai_link properties based on its DAIs */ void snd_soc_dai_link_set_capabilities(struct snd_soc_dai_link *dai_link) { bool supported[SNDRV_PCM_STREAM_LAST + 1]; int direction; for_each_pcm_streams(direction) { struct snd_soc_dai_link_component *cpu; struct snd_soc_dai_link_component *codec; struct snd_soc_dai *dai; bool supported_cpu = false; bool supported_codec = false; int i; for_each_link_cpus(dai_link, i, cpu) { dai = snd_soc_find_dai_with_mutex(cpu); if (dai && snd_soc_dai_stream_valid(dai, direction)) { supported_cpu = true; break; } } for_each_link_codecs(dai_link, i, codec) { dai = snd_soc_find_dai_with_mutex(codec); if (dai && snd_soc_dai_stream_valid(dai, direction)) { supported_codec = true; break; } } supported[direction] = supported_cpu && supported_codec; } dai_link->dpcm_playback = supported[SNDRV_PCM_STREAM_PLAYBACK]; dai_link->dpcm_capture = supported[SNDRV_PCM_STREAM_CAPTURE]; } EXPORT_SYMBOL_GPL(snd_soc_dai_link_set_capabilities); void snd_soc_dai_action(struct snd_soc_dai *dai, int stream, int action) { /* see snd_soc_dai_stream_active() */ dai->stream[stream].active += action; /* see snd_soc_component_active() */ dai->component->active += action; } EXPORT_SYMBOL_GPL(snd_soc_dai_action); int snd_soc_dai_active(struct snd_soc_dai *dai) { int stream, active; active = 0; for_each_pcm_streams(stream) active += dai->stream[stream].active; return active; } EXPORT_SYMBOL_GPL(snd_soc_dai_active); int snd_soc_pcm_dai_probe(struct snd_soc_pcm_runtime *rtd, int order) { struct snd_soc_dai *dai; int i; for_each_rtd_dais(rtd, i, dai) { if (dai->probed) continue; if (dai->driver->ops) { if (dai->driver->ops->probe_order != order) continue; if (dai->driver->ops->probe) { int ret = dai->driver->ops->probe(dai); if (ret < 0) return soc_dai_ret(dai, ret); } } dai->probed = 1; } return 0; } int snd_soc_pcm_dai_remove(struct snd_soc_pcm_runtime *rtd, int order) { struct snd_soc_dai *dai; int i, r, ret = 0; for_each_rtd_dais(rtd, i, dai) { if (!dai->probed) continue; if (dai->driver->ops) { if (dai->driver->ops->remove_order != order) continue; if (dai->driver->ops->remove) { r = dai->driver->ops->remove(dai); if (r < 0) ret = r; /* use last error */ } } dai->probed = 0; } return ret; } int snd_soc_pcm_dai_new(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_dai *dai; int i; for_each_rtd_dais(rtd, i, dai) { if (dai->driver->ops && dai->driver->ops->pcm_new) { int ret = dai->driver->ops->pcm_new(rtd, dai); if (ret < 0) return soc_dai_ret(dai, ret); } } return 0; } int snd_soc_pcm_dai_prepare(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai *dai; int i, ret; for_each_rtd_dais(rtd, i, dai) { if (!snd_soc_dai_stream_valid(dai, substream->stream)) continue; if (dai->driver->ops && dai->driver->ops->prepare) { ret = dai->driver->ops->prepare(substream, dai); if (ret < 0) return soc_dai_ret(dai, ret); } } return 0; } static int soc_dai_trigger(struct snd_soc_dai *dai, struct snd_pcm_substream *substream, int cmd) { int ret = 0; if (!snd_soc_dai_stream_valid(dai, substream->stream)) return 0; if (dai->driver->ops && dai->driver->ops->trigger) ret = dai->driver->ops->trigger(substream, cmd, dai); return soc_dai_ret(dai, ret); } int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream, int cmd, int rollback) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai *dai; int i, r, ret = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: for_each_rtd_dais(rtd, i, dai) { ret = soc_dai_trigger(dai, substream, cmd); if (ret < 0) break; soc_dai_mark_push(dai, substream, trigger); } break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: for_each_rtd_dais(rtd, i, dai) { if (rollback && !soc_dai_mark_match(dai, substream, trigger)) continue; r = soc_dai_trigger(dai, substream, cmd); if (r < 0) ret = r; /* use last ret */ soc_dai_mark_pop(dai, substream, trigger); } } return ret; } int snd_soc_pcm_dai_bespoke_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai *dai; int i, ret; for_each_rtd_dais(rtd, i, dai) { if (dai->driver->ops && dai->driver->ops->bespoke_trigger) { ret = dai->driver->ops->bespoke_trigger(substream, cmd, dai); if (ret < 0) return soc_dai_ret(dai, ret); } } return 0; } void snd_soc_pcm_dai_delay(struct snd_pcm_substream *substream, snd_pcm_sframes_t *cpu_delay, snd_pcm_sframes_t *codec_delay) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai *dai; int i; /* * We're looking for the delay through the full audio path so it needs to * be the maximum of the DAIs doing transmit and the maximum of the DAIs * doing receive (ie, all CPUs and all CODECs) rather than just the maximum * of all DAIs. */ /* for CPU */ for_each_rtd_cpu_dais(rtd, i, dai) if (dai->driver->ops && dai->driver->ops->delay) *cpu_delay = max(*cpu_delay, dai->driver->ops->delay(substream, dai)); /* for Codec */ for_each_rtd_codec_dais(rtd, i, dai) if (dai->driver->ops && dai->driver->ops->delay) *codec_delay = max(*codec_delay, dai->driver->ops->delay(substream, dai)); } int snd_soc_dai_compr_startup(struct snd_soc_dai *dai, struct snd_compr_stream *cstream) { int ret = 0; if (dai->driver->cops && dai->driver->cops->startup) ret = dai->driver->cops->startup(cstream, dai); /* mark cstream if succeeded */ if (ret == 0) soc_dai_mark_push(dai, cstream, compr_startup); return soc_dai_ret(dai, ret); } EXPORT_SYMBOL_GPL(snd_soc_dai_compr_startup); void snd_soc_dai_compr_shutdown(struct snd_soc_dai *dai, struct snd_compr_stream *cstream, int rollback) { if (rollback && !soc_dai_mark_match(dai, cstream, compr_startup)) return; if (dai->driver->cops && dai->driver->cops->shutdown) dai->driver->cops->shutdown(cstream, dai); /* remove marked cstream */ soc_dai_mark_pop(dai, cstream, compr_startup); } EXPORT_SYMBOL_GPL(snd_soc_dai_compr_shutdown); int snd_soc_dai_compr_trigger(struct snd_soc_dai *dai, struct snd_compr_stream *cstream, int cmd) { int ret = 0; if (dai->driver->cops && dai->driver->cops->trigger) ret = dai->driver->cops->trigger(cstream, cmd, dai); return soc_dai_ret(dai, ret); } EXPORT_SYMBOL_GPL(snd_soc_dai_compr_trigger); int snd_soc_dai_compr_set_params(struct snd_soc_dai *dai, struct snd_compr_stream *cstream, struct snd_compr_params *params) { int ret = 0; if (dai->driver->cops && dai->driver->cops->set_params) ret = dai->driver->cops->set_params(cstream, params, dai); return soc_dai_ret(dai, ret); } EXPORT_SYMBOL_GPL(snd_soc_dai_compr_set_params); int snd_soc_dai_compr_get_params(struct snd_soc_dai *dai, struct snd_compr_stream *cstream, struct snd_codec *params) { int ret = 0; if (dai->driver->cops && dai->driver->cops->get_params) ret = dai->driver->cops->get_params(cstream, params, dai); return soc_dai_ret(dai, ret); } EXPORT_SYMBOL_GPL(snd_soc_dai_compr_get_params); int snd_soc_dai_compr_ack(struct snd_soc_dai *dai, struct snd_compr_stream *cstream, size_t bytes) { int ret = 0; if (dai->driver->cops && dai->driver->cops->ack) ret = dai->driver->cops->ack(cstream, bytes, dai); return soc_dai_ret(dai, ret); } EXPORT_SYMBOL_GPL(snd_soc_dai_compr_ack); int snd_soc_dai_compr_pointer(struct snd_soc_dai *dai, struct snd_compr_stream *cstream, struct snd_compr_tstamp *tstamp) { int ret = 0; if (dai->driver->cops && dai->driver->cops->pointer) ret = dai->driver->cops->pointer(cstream, tstamp, dai); return soc_dai_ret(dai, ret); } EXPORT_SYMBOL_GPL(snd_soc_dai_compr_pointer); int snd_soc_dai_compr_set_metadata(struct snd_soc_dai *dai, struct snd_compr_stream *cstream, struct snd_compr_metadata *metadata) { int ret = 0; if (dai->driver->cops && dai->driver->cops->set_metadata) ret = dai->driver->cops->set_metadata(cstream, metadata, dai); return soc_dai_ret(dai, ret); } EXPORT_SYMBOL_GPL(snd_soc_dai_compr_set_metadata); int snd_soc_dai_compr_get_metadata(struct snd_soc_dai *dai, struct snd_compr_stream *cstream, struct snd_compr_metadata *metadata) { int ret = 0; if (dai->driver->cops && dai->driver->cops->get_metadata) ret = dai->driver->cops->get_metadata(cstream, metadata, dai); return soc_dai_ret(dai, ret); } EXPORT_SYMBOL_GPL(snd_soc_dai_compr_get_metadata);
linux-master
sound/soc/soc-dai.c
// SPDX-License-Identifier: GPL-2.0+ // // soc-core.c -- ALSA SoC Audio Layer // // Copyright 2005 Wolfson Microelectronics PLC. // Copyright 2005 Openedhand Ltd. // Copyright (C) 2010 Slimlogic Ltd. // Copyright (C) 2010 Texas Instruments Inc. // // Author: Liam Girdwood <[email protected]> // with code, comments and ideas from :- // Richard Purdie <[email protected]> // // TODO: // o Add hw rules to enforce rates, etc. // o More testing with other codecs/machines. // o Add more codecs and platforms to ensure good API coverage. // o Support TDM on PCM and I2S #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/bitops.h> #include <linux/debugfs.h> #include <linux/platform_device.h> #include <linux/pinctrl/consumer.h> #include <linux/ctype.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/dmi.h> #include <linux/acpi.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dpcm.h> #include <sound/soc-topology.h> #include <sound/soc-link.h> #include <sound/initval.h> #define CREATE_TRACE_POINTS #include <trace/events/asoc.h> static DEFINE_MUTEX(client_mutex); static LIST_HEAD(component_list); static LIST_HEAD(unbind_card_list); #define for_each_component(component) \ list_for_each_entry(component, &component_list, list) /* * This is used if driver don't need to have CPU/Codec/Platform * dai_link. see soc.h */ struct snd_soc_dai_link_component null_dailink_component[0]; EXPORT_SYMBOL_GPL(null_dailink_component); /* * This is a timeout to do a DAPM powerdown after a stream is closed(). * It can be used to eliminate pops between different playback streams, e.g. * between two audio tracks. */ static int pmdown_time = 5000; module_param(pmdown_time, int, 0); MODULE_PARM_DESC(pmdown_time, "DAPM stream powerdown time (msecs)"); static ssize_t pmdown_time_show(struct device *dev, struct device_attribute *attr, char *buf) { struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev); return sysfs_emit(buf, "%ld\n", rtd->pmdown_time); } static ssize_t pmdown_time_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev); int ret; ret = kstrtol(buf, 10, &rtd->pmdown_time); if (ret) return ret; return count; } static DEVICE_ATTR_RW(pmdown_time); static struct attribute *soc_dev_attrs[] = { &dev_attr_pmdown_time.attr, NULL }; static umode_t soc_dev_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { struct device *dev = kobj_to_dev(kobj); struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev); if (!rtd) return 0; if (attr == &dev_attr_pmdown_time.attr) return attr->mode; /* always visible */ return rtd->dai_link->num_codecs ? attr->mode : 0; /* enabled only with codec */ } static const struct attribute_group soc_dapm_dev_group = { .attrs = soc_dapm_dev_attrs, .is_visible = soc_dev_attr_is_visible, }; static const struct attribute_group soc_dev_group = { .attrs = soc_dev_attrs, .is_visible = soc_dev_attr_is_visible, }; static const struct attribute_group *soc_dev_attr_groups[] = { &soc_dapm_dev_group, &soc_dev_group, NULL }; #ifdef CONFIG_DEBUG_FS struct dentry *snd_soc_debugfs_root; EXPORT_SYMBOL_GPL(snd_soc_debugfs_root); static void soc_init_component_debugfs(struct snd_soc_component *component) { if (!component->card->debugfs_card_root) return; if (component->debugfs_prefix) { char *name; name = kasprintf(GFP_KERNEL, "%s:%s", component->debugfs_prefix, component->name); if (name) { component->debugfs_root = debugfs_create_dir(name, component->card->debugfs_card_root); kfree(name); } } else { component->debugfs_root = debugfs_create_dir(component->name, component->card->debugfs_card_root); } snd_soc_dapm_debugfs_init(snd_soc_component_get_dapm(component), component->debugfs_root); } static void soc_cleanup_component_debugfs(struct snd_soc_component *component) { if (!component->debugfs_root) return; debugfs_remove_recursive(component->debugfs_root); component->debugfs_root = NULL; } static int dai_list_show(struct seq_file *m, void *v) { struct snd_soc_component *component; struct snd_soc_dai *dai; mutex_lock(&client_mutex); for_each_component(component) for_each_component_dais(component, dai) seq_printf(m, "%s\n", dai->name); mutex_unlock(&client_mutex); return 0; } DEFINE_SHOW_ATTRIBUTE(dai_list); static int component_list_show(struct seq_file *m, void *v) { struct snd_soc_component *component; mutex_lock(&client_mutex); for_each_component(component) seq_printf(m, "%s\n", component->name); mutex_unlock(&client_mutex); return 0; } DEFINE_SHOW_ATTRIBUTE(component_list); static void soc_init_card_debugfs(struct snd_soc_card *card) { card->debugfs_card_root = debugfs_create_dir(card->name, snd_soc_debugfs_root); debugfs_create_u32("dapm_pop_time", 0644, card->debugfs_card_root, &card->pop_time); snd_soc_dapm_debugfs_init(&card->dapm, card->debugfs_card_root); } static void soc_cleanup_card_debugfs(struct snd_soc_card *card) { debugfs_remove_recursive(card->debugfs_card_root); card->debugfs_card_root = NULL; } static void snd_soc_debugfs_init(void) { snd_soc_debugfs_root = debugfs_create_dir("asoc", NULL); debugfs_create_file("dais", 0444, snd_soc_debugfs_root, NULL, &dai_list_fops); debugfs_create_file("components", 0444, snd_soc_debugfs_root, NULL, &component_list_fops); } static void snd_soc_debugfs_exit(void) { debugfs_remove_recursive(snd_soc_debugfs_root); } #else static inline void soc_init_component_debugfs(struct snd_soc_component *component) { } static inline void soc_cleanup_component_debugfs(struct snd_soc_component *component) { } static inline void soc_init_card_debugfs(struct snd_soc_card *card) { } static inline void soc_cleanup_card_debugfs(struct snd_soc_card *card) { } static inline void snd_soc_debugfs_init(void) { } static inline void snd_soc_debugfs_exit(void) { } #endif static int snd_soc_is_match_dai_args(struct of_phandle_args *args1, struct of_phandle_args *args2) { if (!args1 || !args2) return 0; if (args1->np != args2->np) return 0; for (int i = 0; i < args1->args_count; i++) if (args1->args[i] != args2->args[i]) return 0; return 1; } static inline int snd_soc_dlc_component_is_empty(struct snd_soc_dai_link_component *dlc) { return !(dlc->dai_args || dlc->name || dlc->of_node); } static inline int snd_soc_dlc_component_is_invalid(struct snd_soc_dai_link_component *dlc) { return (dlc->name && dlc->of_node); } static inline int snd_soc_dlc_dai_is_empty(struct snd_soc_dai_link_component *dlc) { return !(dlc->dai_args || dlc->dai_name); } static int snd_soc_is_matching_dai(const struct snd_soc_dai_link_component *dlc, struct snd_soc_dai *dai) { if (!dlc) return 0; if (dlc->dai_args) return snd_soc_is_match_dai_args(dai->driver->dai_args, dlc->dai_args); if (!dlc->dai_name) return 1; /* see snd_soc_dai_name_get() */ if (strcmp(dlc->dai_name, dai->name) == 0) return 1; if (dai->driver->name && strcmp(dai->driver->name, dlc->dai_name) == 0) return 1; if (dai->component->name && strcmp(dlc->dai_name, dai->component->name) == 0) return 1; return 0; } const char *snd_soc_dai_name_get(struct snd_soc_dai *dai) { /* see snd_soc_is_matching_dai() */ if (dai->name) return dai->name; if (dai->driver->name) return dai->driver->name; if (dai->component->name) return dai->component->name; return NULL; } EXPORT_SYMBOL_GPL(snd_soc_dai_name_get); static int snd_soc_rtd_add_component(struct snd_soc_pcm_runtime *rtd, struct snd_soc_component *component) { struct snd_soc_component *comp; int i; for_each_rtd_components(rtd, i, comp) { /* already connected */ if (comp == component) return 0; } /* see for_each_rtd_components */ rtd->components[rtd->num_components] = component; rtd->num_components++; return 0; } struct snd_soc_component *snd_soc_rtdcom_lookup(struct snd_soc_pcm_runtime *rtd, const char *driver_name) { struct snd_soc_component *component; int i; if (!driver_name) return NULL; /* * NOTE * * snd_soc_rtdcom_lookup() will find component from rtd by using * specified driver name. * But, if many components which have same driver name are connected * to 1 rtd, this function will return 1st found component. */ for_each_rtd_components(rtd, i, component) { const char *component_name = component->driver->name; if (!component_name) continue; if ((component_name == driver_name) || strcmp(component_name, driver_name) == 0) return component; } return NULL; } EXPORT_SYMBOL_GPL(snd_soc_rtdcom_lookup); struct snd_soc_component *snd_soc_lookup_component_nolocked(struct device *dev, const char *driver_name) { struct snd_soc_component *component; struct snd_soc_component *found_component; found_component = NULL; for_each_component(component) { if ((dev == component->dev) && (!driver_name || (driver_name == component->driver->name) || (strcmp(component->driver->name, driver_name) == 0))) { found_component = component; break; } } return found_component; } EXPORT_SYMBOL_GPL(snd_soc_lookup_component_nolocked); struct snd_soc_component *snd_soc_lookup_component(struct device *dev, const char *driver_name) { struct snd_soc_component *component; mutex_lock(&client_mutex); component = snd_soc_lookup_component_nolocked(dev, driver_name); mutex_unlock(&client_mutex); return component; } EXPORT_SYMBOL_GPL(snd_soc_lookup_component); struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card, struct snd_soc_dai_link *dai_link) { struct snd_soc_pcm_runtime *rtd; for_each_card_rtds(card, rtd) { if (rtd->dai_link == dai_link) return rtd; } dev_dbg(card->dev, "ASoC: failed to find rtd %s\n", dai_link->name); return NULL; } EXPORT_SYMBOL_GPL(snd_soc_get_pcm_runtime); /* * Power down the audio subsystem pmdown_time msecs after close is called. * This is to ensure there are no pops or clicks in between any music tracks * due to DAPM power cycling. */ void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0); int playback = SNDRV_PCM_STREAM_PLAYBACK; snd_soc_dpcm_mutex_lock(rtd); dev_dbg(rtd->dev, "ASoC: pop wq checking: %s status: %s waiting: %s\n", codec_dai->driver->playback.stream_name, snd_soc_dai_stream_active(codec_dai, playback) ? "active" : "inactive", rtd->pop_wait ? "yes" : "no"); /* are we waiting on this codec DAI stream */ if (rtd->pop_wait == 1) { rtd->pop_wait = 0; snd_soc_dapm_stream_event(rtd, playback, SND_SOC_DAPM_STREAM_STOP); } snd_soc_dpcm_mutex_unlock(rtd); } EXPORT_SYMBOL_GPL(snd_soc_close_delayed_work); static void soc_release_rtd_dev(struct device *dev) { /* "dev" means "rtd->dev" */ kfree(dev); } static void soc_free_pcm_runtime(struct snd_soc_pcm_runtime *rtd) { if (!rtd) return; list_del(&rtd->list); if (delayed_work_pending(&rtd->delayed_work)) flush_delayed_work(&rtd->delayed_work); snd_soc_pcm_component_free(rtd); /* * we don't need to call kfree() for rtd->dev * see * soc_release_rtd_dev() * * We don't need rtd->dev NULL check, because * it is alloced *before* rtd. * see * soc_new_pcm_runtime() * * We don't need to mind freeing for rtd, * because it was created from dev (= rtd->dev) * see * soc_new_pcm_runtime() * * rtd = devm_kzalloc(dev, ...); * rtd->dev = dev */ device_unregister(rtd->dev); } static void close_delayed_work(struct work_struct *work) { struct snd_soc_pcm_runtime *rtd = container_of(work, struct snd_soc_pcm_runtime, delayed_work.work); if (rtd->close_delayed_work_func) rtd->close_delayed_work_func(rtd); } static struct snd_soc_pcm_runtime *soc_new_pcm_runtime( struct snd_soc_card *card, struct snd_soc_dai_link *dai_link) { struct snd_soc_pcm_runtime *rtd; struct snd_soc_component *component; struct device *dev; int ret; int stream; /* * for rtd->dev */ dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (!dev) return NULL; dev->parent = card->dev; dev->release = soc_release_rtd_dev; dev_set_name(dev, "%s", dai_link->name); ret = device_register(dev); if (ret < 0) { put_device(dev); /* soc_release_rtd_dev */ return NULL; } /* * for rtd */ rtd = devm_kzalloc(dev, sizeof(*rtd) + sizeof(component) * (dai_link->num_cpus + dai_link->num_codecs + dai_link->num_platforms), GFP_KERNEL); if (!rtd) { device_unregister(dev); return NULL; } rtd->dev = dev; INIT_LIST_HEAD(&rtd->list); for_each_pcm_streams(stream) { INIT_LIST_HEAD(&rtd->dpcm[stream].be_clients); INIT_LIST_HEAD(&rtd->dpcm[stream].fe_clients); } dev_set_drvdata(dev, rtd); INIT_DELAYED_WORK(&rtd->delayed_work, close_delayed_work); /* * for rtd->dais */ rtd->dais = devm_kcalloc(dev, dai_link->num_cpus + dai_link->num_codecs, sizeof(struct snd_soc_dai *), GFP_KERNEL); if (!rtd->dais) goto free_rtd; /* * dais = [][][][][][][][][][][][][][][][][][] * ^cpu_dais ^codec_dais * |--- num_cpus ---|--- num_codecs --| * see * asoc_rtd_to_cpu() * asoc_rtd_to_codec() */ rtd->card = card; rtd->dai_link = dai_link; rtd->num = card->num_rtd++; rtd->pmdown_time = pmdown_time; /* default power off timeout */ /* see for_each_card_rtds */ list_add_tail(&rtd->list, &card->rtd_list); ret = device_add_groups(dev, soc_dev_attr_groups); if (ret < 0) goto free_rtd; return rtd; free_rtd: soc_free_pcm_runtime(rtd); return NULL; } static void snd_soc_flush_all_delayed_work(struct snd_soc_card *card) { struct snd_soc_pcm_runtime *rtd; for_each_card_rtds(card, rtd) flush_delayed_work(&rtd->delayed_work); } #ifdef CONFIG_PM_SLEEP static void soc_playback_digital_mute(struct snd_soc_card *card, int mute) { struct snd_soc_pcm_runtime *rtd; struct snd_soc_dai *dai; int playback = SNDRV_PCM_STREAM_PLAYBACK; int i; for_each_card_rtds(card, rtd) { if (rtd->dai_link->ignore_suspend) continue; for_each_rtd_dais(rtd, i, dai) { if (snd_soc_dai_stream_active(dai, playback)) snd_soc_dai_digital_mute(dai, mute, playback); } } } static void soc_dapm_suspend_resume(struct snd_soc_card *card, int event) { struct snd_soc_pcm_runtime *rtd; int stream; for_each_card_rtds(card, rtd) { if (rtd->dai_link->ignore_suspend) continue; for_each_pcm_streams(stream) snd_soc_dapm_stream_event(rtd, stream, event); } } /* powers down audio subsystem for suspend */ int snd_soc_suspend(struct device *dev) { struct snd_soc_card *card = dev_get_drvdata(dev); struct snd_soc_component *component; struct snd_soc_pcm_runtime *rtd; int i; /* If the card is not initialized yet there is nothing to do */ if (!snd_soc_card_is_instantiated(card)) return 0; /* * Due to the resume being scheduled into a workqueue we could * suspend before that's finished - wait for it to complete. */ snd_power_wait(card->snd_card); /* we're going to block userspace touching us until resume completes */ snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D3hot); /* mute any active DACs */ soc_playback_digital_mute(card, 1); /* suspend all pcms */ for_each_card_rtds(card, rtd) { if (rtd->dai_link->ignore_suspend) continue; snd_pcm_suspend_all(rtd->pcm); } snd_soc_card_suspend_pre(card); /* close any waiting streams */ snd_soc_flush_all_delayed_work(card); soc_dapm_suspend_resume(card, SND_SOC_DAPM_STREAM_SUSPEND); /* Recheck all endpoints too, their state is affected by suspend */ dapm_mark_endpoints_dirty(card); snd_soc_dapm_sync(&card->dapm); /* suspend all COMPONENTs */ for_each_card_rtds(card, rtd) { if (rtd->dai_link->ignore_suspend) continue; for_each_rtd_components(rtd, i, component) { struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component); /* * ignore if component was already suspended */ if (snd_soc_component_is_suspended(component)) continue; /* * If there are paths active then the COMPONENT will be * held with bias _ON and should not be suspended. */ switch (snd_soc_dapm_get_bias_level(dapm)) { case SND_SOC_BIAS_STANDBY: /* * If the COMPONENT is capable of idle * bias off then being in STANDBY * means it's doing something, * otherwise fall through. */ if (dapm->idle_bias_off) { dev_dbg(component->dev, "ASoC: idle_bias_off CODEC on over suspend\n"); break; } fallthrough; case SND_SOC_BIAS_OFF: snd_soc_component_suspend(component); if (component->regmap) regcache_mark_dirty(component->regmap); /* deactivate pins to sleep state */ pinctrl_pm_select_sleep_state(component->dev); break; default: dev_dbg(component->dev, "ASoC: COMPONENT is on over suspend\n"); break; } } } snd_soc_card_suspend_post(card); return 0; } EXPORT_SYMBOL_GPL(snd_soc_suspend); /* * deferred resume work, so resume can complete before we finished * setting our codec back up, which can be very slow on I2C */ static void soc_resume_deferred(struct work_struct *work) { struct snd_soc_card *card = container_of(work, struct snd_soc_card, deferred_resume_work); struct snd_soc_component *component; /* * our power state is still SNDRV_CTL_POWER_D3hot from suspend time, * so userspace apps are blocked from touching us */ dev_dbg(card->dev, "ASoC: starting resume work\n"); /* Bring us up into D2 so that DAPM starts enabling things */ snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D2); snd_soc_card_resume_pre(card); for_each_card_components(card, component) { if (snd_soc_component_is_suspended(component)) snd_soc_component_resume(component); } soc_dapm_suspend_resume(card, SND_SOC_DAPM_STREAM_RESUME); /* unmute any active DACs */ soc_playback_digital_mute(card, 0); snd_soc_card_resume_post(card); dev_dbg(card->dev, "ASoC: resume work completed\n"); /* Recheck all endpoints too, their state is affected by suspend */ dapm_mark_endpoints_dirty(card); snd_soc_dapm_sync(&card->dapm); /* userspace can access us now we are back as we were before */ snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D0); } /* powers up audio subsystem after a suspend */ int snd_soc_resume(struct device *dev) { struct snd_soc_card *card = dev_get_drvdata(dev); struct snd_soc_component *component; /* If the card is not initialized yet there is nothing to do */ if (!snd_soc_card_is_instantiated(card)) return 0; /* activate pins from sleep state */ for_each_card_components(card, component) if (snd_soc_component_active(component)) pinctrl_pm_select_default_state(component->dev); dev_dbg(dev, "ASoC: Scheduling resume work\n"); if (!schedule_work(&card->deferred_resume_work)) dev_err(dev, "ASoC: resume work item may be lost\n"); return 0; } EXPORT_SYMBOL_GPL(snd_soc_resume); static void soc_resume_init(struct snd_soc_card *card) { /* deferred resume work */ INIT_WORK(&card->deferred_resume_work, soc_resume_deferred); } #else #define snd_soc_suspend NULL #define snd_soc_resume NULL static inline void soc_resume_init(struct snd_soc_card *card) { } #endif static struct device_node *soc_component_to_node(struct snd_soc_component *component) { struct device_node *of_node; of_node = component->dev->of_node; if (!of_node && component->dev->parent) of_node = component->dev->parent->of_node; return of_node; } struct of_phandle_args *snd_soc_copy_dai_args(struct device *dev, struct of_phandle_args *args) { struct of_phandle_args *ret = devm_kzalloc(dev, sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; *ret = *args; return ret; } EXPORT_SYMBOL_GPL(snd_soc_copy_dai_args); static int snd_soc_is_matching_component( const struct snd_soc_dai_link_component *dlc, struct snd_soc_component *component) { struct device_node *component_of_node; if (!dlc) return 0; if (dlc->dai_args) { struct snd_soc_dai *dai; for_each_component_dais(component, dai) if (snd_soc_is_matching_dai(dlc, dai)) return 1; return 0; } component_of_node = soc_component_to_node(component); if (dlc->of_node && component_of_node != dlc->of_node) return 0; if (dlc->name && strcmp(component->name, dlc->name)) return 0; return 1; } static struct snd_soc_component *soc_find_component( const struct snd_soc_dai_link_component *dlc) { struct snd_soc_component *component; lockdep_assert_held(&client_mutex); /* * NOTE * * It returns *1st* found component, but some driver * has few components by same of_node/name * ex) * CPU component and generic DMAEngine component */ for_each_component(component) if (snd_soc_is_matching_component(dlc, component)) return component; return NULL; } /** * snd_soc_find_dai - Find a registered DAI * * @dlc: name of the DAI or the DAI driver and optional component info to match * * This function will search all registered components and their DAIs to * find the DAI of the same name. The component's of_node and name * should also match if being specified. * * Return: pointer of DAI, or NULL if not found. */ struct snd_soc_dai *snd_soc_find_dai( const struct snd_soc_dai_link_component *dlc) { struct snd_soc_component *component; struct snd_soc_dai *dai; lockdep_assert_held(&client_mutex); /* Find CPU DAI from registered DAIs */ for_each_component(component) if (snd_soc_is_matching_component(dlc, component)) for_each_component_dais(component, dai) if (snd_soc_is_matching_dai(dlc, dai)) return dai; return NULL; } EXPORT_SYMBOL_GPL(snd_soc_find_dai); struct snd_soc_dai *snd_soc_find_dai_with_mutex( const struct snd_soc_dai_link_component *dlc) { struct snd_soc_dai *dai; mutex_lock(&client_mutex); dai = snd_soc_find_dai(dlc); mutex_unlock(&client_mutex); return dai; } EXPORT_SYMBOL_GPL(snd_soc_find_dai_with_mutex); static int soc_dai_link_sanity_check(struct snd_soc_card *card, struct snd_soc_dai_link *link) { int i; struct snd_soc_dai_link_component *dlc; /* Codec check */ for_each_link_codecs(link, i, dlc) { /* * Codec must be specified by 1 of name or OF node, * not both or neither. */ if (snd_soc_dlc_component_is_invalid(dlc)) goto component_invalid; if (snd_soc_dlc_component_is_empty(dlc)) goto component_empty; /* Codec DAI name must be specified */ if (snd_soc_dlc_dai_is_empty(dlc)) goto dai_empty; /* * Defer card registration if codec component is not added to * component list. */ if (!soc_find_component(dlc)) goto component_not_found; } /* Platform check */ for_each_link_platforms(link, i, dlc) { /* * Platform may be specified by either name or OF node, but it * can be left unspecified, then no components will be inserted * in the rtdcom list */ if (snd_soc_dlc_component_is_invalid(dlc)) goto component_invalid; if (snd_soc_dlc_component_is_empty(dlc)) goto component_empty; /* * Defer card registration if platform component is not added to * component list. */ if (!soc_find_component(dlc)) goto component_not_found; } /* CPU check */ for_each_link_cpus(link, i, dlc) { /* * CPU device may be specified by either name or OF node, but * can be left unspecified, and will be matched based on DAI * name alone.. */ if (snd_soc_dlc_component_is_invalid(dlc)) goto component_invalid; if (snd_soc_dlc_component_is_empty(dlc)) { /* * At least one of CPU DAI name or CPU device name/node must be specified */ if (snd_soc_dlc_dai_is_empty(dlc)) goto component_dai_empty; } else { /* * Defer card registration if Component is not added */ if (!soc_find_component(dlc)) goto component_not_found; } } return 0; component_invalid: dev_err(card->dev, "ASoC: Both Component name/of_node are set for %s\n", link->name); return -EINVAL; component_empty: dev_err(card->dev, "ASoC: Neither Component name/of_node are set for %s\n", link->name); return -EINVAL; component_not_found: dev_dbg(card->dev, "ASoC: Component %s not found for link %s\n", dlc->name, link->name); return -EPROBE_DEFER; dai_empty: dev_err(card->dev, "ASoC: DAI name is not set for %s\n", link->name); return -EINVAL; component_dai_empty: dev_err(card->dev, "ASoC: Neither DAI/Component name/of_node are set for %s\n", link->name); return -EINVAL; } /** * snd_soc_remove_pcm_runtime - Remove a pcm_runtime from card * @card: The ASoC card to which the pcm_runtime has * @rtd: The pcm_runtime to remove * * This function removes a pcm_runtime from the ASoC card. */ void snd_soc_remove_pcm_runtime(struct snd_soc_card *card, struct snd_soc_pcm_runtime *rtd) { lockdep_assert_held(&client_mutex); /* * Notify the machine driver for extra destruction */ snd_soc_card_remove_dai_link(card, rtd->dai_link); soc_free_pcm_runtime(rtd); } EXPORT_SYMBOL_GPL(snd_soc_remove_pcm_runtime); /** * snd_soc_add_pcm_runtime - Add a pcm_runtime dynamically via dai_link * @card: The ASoC card to which the pcm_runtime is added * @dai_link: The DAI link to find pcm_runtime * * This function adds a pcm_runtime ASoC card by using dai_link. * * Note: Topology can use this API to add pcm_runtime when probing the * topology component. And machine drivers can still define static * DAI links in dai_link array. */ static int snd_soc_add_pcm_runtime(struct snd_soc_card *card, struct snd_soc_dai_link *dai_link) { struct snd_soc_pcm_runtime *rtd; struct snd_soc_dai_link_component *codec, *platform, *cpu; struct snd_soc_component *component; int i, ret; lockdep_assert_held(&client_mutex); /* * Notify the machine driver for extra initialization */ ret = snd_soc_card_add_dai_link(card, dai_link); if (ret < 0) return ret; if (dai_link->ignore) return 0; dev_dbg(card->dev, "ASoC: binding %s\n", dai_link->name); ret = soc_dai_link_sanity_check(card, dai_link); if (ret < 0) return ret; rtd = soc_new_pcm_runtime(card, dai_link); if (!rtd) return -ENOMEM; for_each_link_cpus(dai_link, i, cpu) { asoc_rtd_to_cpu(rtd, i) = snd_soc_find_dai(cpu); if (!asoc_rtd_to_cpu(rtd, i)) { dev_info(card->dev, "ASoC: CPU DAI %s not registered\n", cpu->dai_name); goto _err_defer; } snd_soc_rtd_add_component(rtd, asoc_rtd_to_cpu(rtd, i)->component); } /* Find CODEC from registered CODECs */ for_each_link_codecs(dai_link, i, codec) { asoc_rtd_to_codec(rtd, i) = snd_soc_find_dai(codec); if (!asoc_rtd_to_codec(rtd, i)) { dev_info(card->dev, "ASoC: CODEC DAI %s not registered\n", codec->dai_name); goto _err_defer; } snd_soc_rtd_add_component(rtd, asoc_rtd_to_codec(rtd, i)->component); } /* Find PLATFORM from registered PLATFORMs */ for_each_link_platforms(dai_link, i, platform) { for_each_component(component) { if (!snd_soc_is_matching_component(platform, component)) continue; snd_soc_rtd_add_component(rtd, component); } } return 0; _err_defer: snd_soc_remove_pcm_runtime(card, rtd); return -EPROBE_DEFER; } int snd_soc_add_pcm_runtimes(struct snd_soc_card *card, struct snd_soc_dai_link *dai_link, int num_dai_link) { for (int i = 0; i < num_dai_link; i++) { int ret = snd_soc_add_pcm_runtime(card, dai_link + i); if (ret < 0) return ret; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_add_pcm_runtimes); static void snd_soc_runtime_get_dai_fmt(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_dai_link *dai_link = rtd->dai_link; struct snd_soc_dai *dai, *not_used; u64 pos, possible_fmt; unsigned int mask = 0, dai_fmt = 0; int i, j, priority, pri, until; /* * Get selectable format from each DAIs. * **************************** * NOTE * Using .auto_selectable_formats is not mandatory, * we can select format manually from Sound Card. * When use it, driver should list well tested format only. **************************** * * ex) * auto_selectable_formats (= SND_SOC_POSSIBLE_xxx) * (A) (B) (C) * DAI0_: { 0x000F, 0x00F0, 0x0F00 }; * DAI1 : { 0xF000, 0x0F00 }; * (X) (Y) * * "until" will be 3 in this case (MAX array size from DAI0 and DAI1) * Here is dev_dbg() message and comments * * priority = 1 * DAI0: (pri, fmt) = (1, 000000000000000F) // 1st check (A) DAI1 is not selected * DAI1: (pri, fmt) = (0, 0000000000000000) // Necessary Waste * DAI0: (pri, fmt) = (1, 000000000000000F) // 2nd check (A) * DAI1: (pri, fmt) = (1, 000000000000F000) // (X) * priority = 2 * DAI0: (pri, fmt) = (2, 00000000000000FF) // 3rd check (A) + (B) * DAI1: (pri, fmt) = (1, 000000000000F000) // (X) * DAI0: (pri, fmt) = (2, 00000000000000FF) // 4th check (A) + (B) * DAI1: (pri, fmt) = (2, 000000000000FF00) // (X) + (Y) * priority = 3 * DAI0: (pri, fmt) = (3, 0000000000000FFF) // 5th check (A) + (B) + (C) * DAI1: (pri, fmt) = (2, 000000000000FF00) // (X) + (Y) * found auto selected format: 0000000000000F00 */ until = snd_soc_dai_get_fmt_max_priority(rtd); for (priority = 1; priority <= until; priority++) { for_each_rtd_dais(rtd, j, not_used) { possible_fmt = ULLONG_MAX; for_each_rtd_dais(rtd, i, dai) { u64 fmt = 0; pri = (j >= i) ? priority : priority - 1; fmt = snd_soc_dai_get_fmt(dai, pri); possible_fmt &= fmt; } if (possible_fmt) goto found; } } /* Not Found */ return; found: /* * convert POSSIBLE_DAIFMT to DAIFMT * * Some basic/default settings on each is defined as 0. * see * SND_SOC_DAIFMT_NB_NF * SND_SOC_DAIFMT_GATED * * SND_SOC_DAIFMT_xxx_MASK can't notice it if Sound Card specify * these value, and will be overwrite to auto selected value. * * To avoid such issue, loop from 63 to 0 here. * Small number of SND_SOC_POSSIBLE_xxx will be Hi priority. * Basic/Default settings of each part and aboves are defined * as Hi priority (= small number) of SND_SOC_POSSIBLE_xxx. */ for (i = 63; i >= 0; i--) { pos = 1ULL << i; switch (possible_fmt & pos) { /* * for format */ case SND_SOC_POSSIBLE_DAIFMT_I2S: case SND_SOC_POSSIBLE_DAIFMT_RIGHT_J: case SND_SOC_POSSIBLE_DAIFMT_LEFT_J: case SND_SOC_POSSIBLE_DAIFMT_DSP_A: case SND_SOC_POSSIBLE_DAIFMT_DSP_B: case SND_SOC_POSSIBLE_DAIFMT_AC97: case SND_SOC_POSSIBLE_DAIFMT_PDM: dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_FORMAT_MASK) | i; break; /* * for clock */ case SND_SOC_POSSIBLE_DAIFMT_CONT: dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_CLOCK_MASK) | SND_SOC_DAIFMT_CONT; break; case SND_SOC_POSSIBLE_DAIFMT_GATED: dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_CLOCK_MASK) | SND_SOC_DAIFMT_GATED; break; /* * for clock invert */ case SND_SOC_POSSIBLE_DAIFMT_NB_NF: dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_INV_MASK) | SND_SOC_DAIFMT_NB_NF; break; case SND_SOC_POSSIBLE_DAIFMT_NB_IF: dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_INV_MASK) | SND_SOC_DAIFMT_NB_IF; break; case SND_SOC_POSSIBLE_DAIFMT_IB_NF: dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_INV_MASK) | SND_SOC_DAIFMT_IB_NF; break; case SND_SOC_POSSIBLE_DAIFMT_IB_IF: dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_INV_MASK) | SND_SOC_DAIFMT_IB_IF; break; /* * for clock provider / consumer */ case SND_SOC_POSSIBLE_DAIFMT_CBP_CFP: dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) | SND_SOC_DAIFMT_CBP_CFP; break; case SND_SOC_POSSIBLE_DAIFMT_CBC_CFP: dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) | SND_SOC_DAIFMT_CBC_CFP; break; case SND_SOC_POSSIBLE_DAIFMT_CBP_CFC: dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) | SND_SOC_DAIFMT_CBP_CFC; break; case SND_SOC_POSSIBLE_DAIFMT_CBC_CFC: dai_fmt = (dai_fmt & ~SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) | SND_SOC_DAIFMT_CBC_CFC; break; } } /* * Some driver might have very complex limitation. * In such case, user want to auto-select non-limitation part, * and want to manually specify complex part. * * Or for example, if both CPU and Codec can be clock provider, * but because of its quality, user want to specify it manually. * * Use manually specified settings if sound card did. */ if (!(dai_link->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK)) mask |= SND_SOC_DAIFMT_FORMAT_MASK; if (!(dai_link->dai_fmt & SND_SOC_DAIFMT_CLOCK_MASK)) mask |= SND_SOC_DAIFMT_CLOCK_MASK; if (!(dai_link->dai_fmt & SND_SOC_DAIFMT_INV_MASK)) mask |= SND_SOC_DAIFMT_INV_MASK; if (!(dai_link->dai_fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK)) mask |= SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK; dai_link->dai_fmt |= (dai_fmt & mask); } /** * snd_soc_runtime_set_dai_fmt() - Change DAI link format for a ASoC runtime * @rtd: The runtime for which the DAI link format should be changed * @dai_fmt: The new DAI link format * * This function updates the DAI link format for all DAIs connected to the DAI * link for the specified runtime. * * Note: For setups with a static format set the dai_fmt field in the * corresponding snd_dai_link struct instead of using this function. * * Returns 0 on success, otherwise a negative error code. */ int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd, unsigned int dai_fmt) { struct snd_soc_dai *cpu_dai; struct snd_soc_dai *codec_dai; unsigned int i; int ret; if (!dai_fmt) return 0; for_each_rtd_codec_dais(rtd, i, codec_dai) { ret = snd_soc_dai_set_fmt(codec_dai, dai_fmt); if (ret != 0 && ret != -ENOTSUPP) return ret; } /* Flip the polarity for the "CPU" end of link */ dai_fmt = snd_soc_daifmt_clock_provider_flipped(dai_fmt); for_each_rtd_cpu_dais(rtd, i, cpu_dai) { ret = snd_soc_dai_set_fmt(cpu_dai, dai_fmt); if (ret != 0 && ret != -ENOTSUPP) return ret; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_runtime_set_dai_fmt); static int soc_init_pcm_runtime(struct snd_soc_card *card, struct snd_soc_pcm_runtime *rtd) { struct snd_soc_dai_link *dai_link = rtd->dai_link; struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); struct snd_soc_component *component; int ret, num, i; /* do machine specific initialization */ ret = snd_soc_link_init(rtd); if (ret < 0) return ret; snd_soc_runtime_get_dai_fmt(rtd); ret = snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt); if (ret) return ret; /* add DPCM sysfs entries */ soc_dpcm_debugfs_add(rtd); num = rtd->num; /* * most drivers will register their PCMs using DAI link ordering but * topology based drivers can use the DAI link id field to set PCM * device number and then use rtd + a base offset of the BEs. */ for_each_rtd_components(rtd, i, component) { if (!component->driver->use_dai_pcm_id) continue; if (rtd->dai_link->no_pcm) num += component->driver->be_pcm_base; else num = rtd->dai_link->id; } /* create compress_device if possible */ ret = snd_soc_dai_compress_new(cpu_dai, rtd, num); if (ret != -ENOTSUPP) return ret; /* create the pcm */ ret = soc_new_pcm(rtd, num); if (ret < 0) { dev_err(card->dev, "ASoC: can't create pcm %s :%d\n", dai_link->stream_name, ret); return ret; } return snd_soc_pcm_dai_new(rtd); } static void soc_set_name_prefix(struct snd_soc_card *card, struct snd_soc_component *component) { struct device_node *of_node = soc_component_to_node(component); const char *str; int ret, i; for (i = 0; i < card->num_configs; i++) { struct snd_soc_codec_conf *map = &card->codec_conf[i]; if (snd_soc_is_matching_component(&map->dlc, component) && map->name_prefix) { component->name_prefix = map->name_prefix; return; } } /* * If there is no configuration table or no match in the table, * check if a prefix is provided in the node */ ret = of_property_read_string(of_node, "sound-name-prefix", &str); if (ret < 0) return; component->name_prefix = str; } static void soc_remove_component(struct snd_soc_component *component, int probed) { if (!component->card) return; if (probed) snd_soc_component_remove(component); list_del_init(&component->card_list); snd_soc_dapm_free(snd_soc_component_get_dapm(component)); soc_cleanup_component_debugfs(component); component->card = NULL; snd_soc_component_module_put_when_remove(component); } static int soc_probe_component(struct snd_soc_card *card, struct snd_soc_component *component) { struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component); struct snd_soc_dai *dai; int probed = 0; int ret; if (snd_soc_component_is_dummy(component)) return 0; if (component->card) { if (component->card != card) { dev_err(component->dev, "Trying to bind component to card \"%s\" but is already bound to card \"%s\"\n", card->name, component->card->name); return -ENODEV; } return 0; } ret = snd_soc_component_module_get_when_probe(component); if (ret < 0) return ret; component->card = card; soc_set_name_prefix(card, component); soc_init_component_debugfs(component); snd_soc_dapm_init(dapm, card, component); ret = snd_soc_dapm_new_controls(dapm, component->driver->dapm_widgets, component->driver->num_dapm_widgets); if (ret != 0) { dev_err(component->dev, "Failed to create new controls %d\n", ret); goto err_probe; } for_each_component_dais(component, dai) { ret = snd_soc_dapm_new_dai_widgets(dapm, dai); if (ret != 0) { dev_err(component->dev, "Failed to create DAI widgets %d\n", ret); goto err_probe; } } ret = snd_soc_component_probe(component); if (ret < 0) goto err_probe; WARN(dapm->idle_bias_off && dapm->bias_level != SND_SOC_BIAS_OFF, "codec %s can not start from non-off bias with idle_bias_off==1\n", component->name); probed = 1; /* * machine specific init * see * snd_soc_component_set_aux() */ ret = snd_soc_component_init(component); if (ret < 0) goto err_probe; ret = snd_soc_add_component_controls(component, component->driver->controls, component->driver->num_controls); if (ret < 0) goto err_probe; ret = snd_soc_dapm_add_routes(dapm, component->driver->dapm_routes, component->driver->num_dapm_routes); if (ret < 0) { if (card->disable_route_checks) { dev_info(card->dev, "%s: disable_route_checks set, ignoring errors on add_routes\n", __func__); } else { dev_err(card->dev, "%s: snd_soc_dapm_add_routes failed: %d\n", __func__, ret); goto err_probe; } } /* see for_each_card_components */ list_add(&component->card_list, &card->component_dev_list); err_probe: if (ret < 0) soc_remove_component(component, probed); return ret; } static void soc_remove_link_dais(struct snd_soc_card *card) { struct snd_soc_pcm_runtime *rtd; int order; for_each_comp_order(order) { for_each_card_rtds(card, rtd) { /* remove all rtd connected DAIs in good order */ snd_soc_pcm_dai_remove(rtd, order); } } } static int soc_probe_link_dais(struct snd_soc_card *card) { struct snd_soc_pcm_runtime *rtd; int order, ret; for_each_comp_order(order) { for_each_card_rtds(card, rtd) { /* probe all rtd connected DAIs in good order */ ret = snd_soc_pcm_dai_probe(rtd, order); if (ret) return ret; } } return 0; } static void soc_remove_link_components(struct snd_soc_card *card) { struct snd_soc_component *component; struct snd_soc_pcm_runtime *rtd; int i, order; for_each_comp_order(order) { for_each_card_rtds(card, rtd) { for_each_rtd_components(rtd, i, component) { if (component->driver->remove_order != order) continue; soc_remove_component(component, 1); } } } } static int soc_probe_link_components(struct snd_soc_card *card) { struct snd_soc_component *component; struct snd_soc_pcm_runtime *rtd; int i, ret, order; for_each_comp_order(order) { for_each_card_rtds(card, rtd) { for_each_rtd_components(rtd, i, component) { if (component->driver->probe_order != order) continue; ret = soc_probe_component(card, component); if (ret < 0) return ret; } } } return 0; } static void soc_unbind_aux_dev(struct snd_soc_card *card) { struct snd_soc_component *component, *_component; for_each_card_auxs_safe(card, component, _component) { /* for snd_soc_component_init() */ snd_soc_component_set_aux(component, NULL); list_del(&component->card_aux_list); } } static int soc_bind_aux_dev(struct snd_soc_card *card) { struct snd_soc_component *component; struct snd_soc_aux_dev *aux; int i; for_each_card_pre_auxs(card, i, aux) { /* codecs, usually analog devices */ component = soc_find_component(&aux->dlc); if (!component) return -EPROBE_DEFER; /* for snd_soc_component_init() */ snd_soc_component_set_aux(component, aux); /* see for_each_card_auxs */ list_add(&component->card_aux_list, &card->aux_comp_list); } return 0; } static int soc_probe_aux_devices(struct snd_soc_card *card) { struct snd_soc_component *component; int order; int ret; for_each_comp_order(order) { for_each_card_auxs(card, component) { if (component->driver->probe_order != order) continue; ret = soc_probe_component(card, component); if (ret < 0) return ret; } } return 0; } static void soc_remove_aux_devices(struct snd_soc_card *card) { struct snd_soc_component *comp, *_comp; int order; for_each_comp_order(order) { for_each_card_auxs_safe(card, comp, _comp) { if (comp->driver->remove_order == order) soc_remove_component(comp, 1); } } } #ifdef CONFIG_DMI /* * If a DMI filed contain strings in this blacklist (e.g. * "Type2 - Board Manufacturer" or "Type1 - TBD by OEM"), it will be taken * as invalid and dropped when setting the card long name from DMI info. */ static const char * const dmi_blacklist[] = { "To be filled by OEM", "TBD by OEM", "Default String", "Board Manufacturer", "Board Vendor Name", "Board Product Name", NULL, /* terminator */ }; /* * Trim special characters, and replace '-' with '_' since '-' is used to * separate different DMI fields in the card long name. Only number and * alphabet characters and a few separator characters are kept. */ static void cleanup_dmi_name(char *name) { int i, j = 0; for (i = 0; name[i]; i++) { if (isalnum(name[i]) || (name[i] == '.') || (name[i] == '_')) name[j++] = name[i]; else if (name[i] == '-') name[j++] = '_'; } name[j] = '\0'; } /* * Check if a DMI field is valid, i.e. not containing any string * in the black list. */ static int is_dmi_valid(const char *field) { int i = 0; while (dmi_blacklist[i]) { if (strstr(field, dmi_blacklist[i])) return 0; i++; } return 1; } /* * Append a string to card->dmi_longname with character cleanups. */ static void append_dmi_string(struct snd_soc_card *card, const char *str) { char *dst = card->dmi_longname; size_t dst_len = sizeof(card->dmi_longname); size_t len; len = strlen(dst); snprintf(dst + len, dst_len - len, "-%s", str); len++; /* skip the separator "-" */ if (len < dst_len) cleanup_dmi_name(dst + len); } /** * snd_soc_set_dmi_name() - Register DMI names to card * @card: The card to register DMI names * @flavour: The flavour "differentiator" for the card amongst its peers. * * An Intel machine driver may be used by many different devices but are * difficult for userspace to differentiate, since machine drivers ususally * use their own name as the card short name and leave the card long name * blank. To differentiate such devices and fix bugs due to lack of * device-specific configurations, this function allows DMI info to be used * as the sound card long name, in the format of * "vendor-product-version-board" * (Character '-' is used to separate different DMI fields here). * This will help the user space to load the device-specific Use Case Manager * (UCM) configurations for the card. * * Possible card long names may be: * DellInc.-XPS139343-01-0310JH * ASUSTeKCOMPUTERINC.-T100TA-1.0-T100TA * Circuitco-MinnowboardMaxD0PLATFORM-D0-MinnowBoardMAX * * This function also supports flavoring the card longname to provide * the extra differentiation, like "vendor-product-version-board-flavor". * * We only keep number and alphabet characters and a few separator characters * in the card long name since UCM in the user space uses the card long names * as card configuration directory names and AudoConf cannot support special * charactors like SPACE. * * Returns 0 on success, otherwise a negative error code. */ int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour) { const char *vendor, *product, *board; if (card->long_name) return 0; /* long name already set by driver or from DMI */ if (!dmi_available) return 0; /* make up dmi long name as: vendor-product-version-board */ vendor = dmi_get_system_info(DMI_BOARD_VENDOR); if (!vendor || !is_dmi_valid(vendor)) { dev_warn(card->dev, "ASoC: no DMI vendor name!\n"); return 0; } snprintf(card->dmi_longname, sizeof(card->dmi_longname), "%s", vendor); cleanup_dmi_name(card->dmi_longname); product = dmi_get_system_info(DMI_PRODUCT_NAME); if (product && is_dmi_valid(product)) { const char *product_version = dmi_get_system_info(DMI_PRODUCT_VERSION); append_dmi_string(card, product); /* * some vendors like Lenovo may only put a self-explanatory * name in the product version field */ if (product_version && is_dmi_valid(product_version)) append_dmi_string(card, product_version); } board = dmi_get_system_info(DMI_BOARD_NAME); if (board && is_dmi_valid(board)) { if (!product || strcasecmp(board, product)) append_dmi_string(card, board); } else if (!product) { /* fall back to using legacy name */ dev_warn(card->dev, "ASoC: no DMI board/product name!\n"); return 0; } /* Add flavour to dmi long name */ if (flavour) append_dmi_string(card, flavour); /* set the card long name */ card->long_name = card->dmi_longname; return 0; } EXPORT_SYMBOL_GPL(snd_soc_set_dmi_name); #endif /* CONFIG_DMI */ static void soc_check_tplg_fes(struct snd_soc_card *card) { struct snd_soc_component *component; const struct snd_soc_component_driver *comp_drv; struct snd_soc_dai_link *dai_link; int i; for_each_component(component) { /* does this component override BEs ? */ if (!component->driver->ignore_machine) continue; /* for this machine ? */ if (!strcmp(component->driver->ignore_machine, card->dev->driver->name)) goto match; if (strcmp(component->driver->ignore_machine, dev_name(card->dev))) continue; match: /* machine matches, so override the rtd data */ for_each_card_prelinks(card, i, dai_link) { /* ignore this FE */ if (dai_link->dynamic) { dai_link->ignore = true; continue; } dev_dbg(card->dev, "info: override BE DAI link %s\n", card->dai_link[i].name); /* override platform component */ if (!dai_link->platforms) { dev_err(card->dev, "init platform error"); continue; } if (component->dev->of_node) dai_link->platforms->of_node = component->dev->of_node; else dai_link->platforms->name = component->name; /* convert non BE into BE */ if (!dai_link->no_pcm) { dai_link->no_pcm = 1; if (dai_link->dpcm_playback) dev_warn(card->dev, "invalid configuration, dailink %s has flags no_pcm=0 and dpcm_playback=1\n", dai_link->name); if (dai_link->dpcm_capture) dev_warn(card->dev, "invalid configuration, dailink %s has flags no_pcm=0 and dpcm_capture=1\n", dai_link->name); /* convert normal link into DPCM one */ if (!(dai_link->dpcm_playback || dai_link->dpcm_capture)) { dai_link->dpcm_playback = !dai_link->capture_only; dai_link->dpcm_capture = !dai_link->playback_only; } } /* * override any BE fixups * see * snd_soc_link_be_hw_params_fixup() */ dai_link->be_hw_params_fixup = component->driver->be_hw_params_fixup; /* * most BE links don't set stream name, so set it to * dai link name if it's NULL to help bind widgets. */ if (!dai_link->stream_name) dai_link->stream_name = dai_link->name; } /* Inform userspace we are using alternate topology */ if (component->driver->topology_name_prefix) { /* topology shortname created? */ if (!card->topology_shortname_created) { comp_drv = component->driver; snprintf(card->topology_shortname, 32, "%s-%s", comp_drv->topology_name_prefix, card->name); card->topology_shortname_created = true; } /* use topology shortname */ card->name = card->topology_shortname; } } } #define soc_setup_card_name(card, name, name1, name2) \ __soc_setup_card_name(card, name, sizeof(name), name1, name2) static void __soc_setup_card_name(struct snd_soc_card *card, char *name, int len, const char *name1, const char *name2) { const char *src = name1 ? name1 : name2; int i; snprintf(name, len, "%s", src); if (name != card->snd_card->driver) return; /* * Name normalization (driver field) * * The driver name is somewhat special, as it's used as a key for * searches in the user-space. * * ex) * "abcd??efg" -> "abcd__efg" */ for (i = 0; i < len; i++) { switch (name[i]) { case '_': case '-': case '\0': break; default: if (!isalnum(name[i])) name[i] = '_'; break; } } /* * The driver field should contain a valid string from the user view. * The wrapping usually does not work so well here. Set a smaller string * in the specific ASoC driver. */ if (strlen(src) > len - 1) dev_err(card->dev, "ASoC: driver name too long '%s' -> '%s'\n", src, name); } static void soc_cleanup_card_resources(struct snd_soc_card *card) { struct snd_soc_pcm_runtime *rtd, *n; if (card->snd_card) snd_card_disconnect_sync(card->snd_card); snd_soc_dapm_shutdown(card); /* release machine specific resources */ for_each_card_rtds(card, rtd) snd_soc_link_exit(rtd); /* remove and free each DAI */ soc_remove_link_dais(card); soc_remove_link_components(card); for_each_card_rtds_safe(card, rtd, n) snd_soc_remove_pcm_runtime(card, rtd); /* remove auxiliary devices */ soc_remove_aux_devices(card); soc_unbind_aux_dev(card); snd_soc_dapm_free(&card->dapm); soc_cleanup_card_debugfs(card); /* remove the card */ snd_soc_card_remove(card); if (card->snd_card) { snd_card_free(card->snd_card); card->snd_card = NULL; } } static void snd_soc_unbind_card(struct snd_soc_card *card, bool unregister) { if (snd_soc_card_is_instantiated(card)) { card->instantiated = false; snd_soc_flush_all_delayed_work(card); soc_cleanup_card_resources(card); if (!unregister) list_add(&card->list, &unbind_card_list); } else { if (unregister) list_del(&card->list); } } static int snd_soc_bind_card(struct snd_soc_card *card) { struct snd_soc_pcm_runtime *rtd; struct snd_soc_component *component; int ret; mutex_lock(&client_mutex); snd_soc_card_mutex_lock_root(card); snd_soc_dapm_init(&card->dapm, card, NULL); /* check whether any platform is ignore machine FE and using topology */ soc_check_tplg_fes(card); /* bind aux_devs too */ ret = soc_bind_aux_dev(card); if (ret < 0) goto probe_end; /* add predefined DAI links to the list */ card->num_rtd = 0; ret = snd_soc_add_pcm_runtimes(card, card->dai_link, card->num_links); if (ret < 0) goto probe_end; /* card bind complete so register a sound card */ ret = snd_card_new(card->dev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, card->owner, 0, &card->snd_card); if (ret < 0) { dev_err(card->dev, "ASoC: can't create sound card for card %s: %d\n", card->name, ret); goto probe_end; } soc_init_card_debugfs(card); soc_resume_init(card); ret = snd_soc_dapm_new_controls(&card->dapm, card->dapm_widgets, card->num_dapm_widgets); if (ret < 0) goto probe_end; ret = snd_soc_dapm_new_controls(&card->dapm, card->of_dapm_widgets, card->num_of_dapm_widgets); if (ret < 0) goto probe_end; /* initialise the sound card only once */ ret = snd_soc_card_probe(card); if (ret < 0) goto probe_end; /* probe all components used by DAI links on this card */ ret = soc_probe_link_components(card); if (ret < 0) { if (ret != -EPROBE_DEFER) { dev_err(card->dev, "ASoC: failed to instantiate card %d\n", ret); } goto probe_end; } /* probe auxiliary components */ ret = soc_probe_aux_devices(card); if (ret < 0) { dev_err(card->dev, "ASoC: failed to probe aux component %d\n", ret); goto probe_end; } /* probe all DAI links on this card */ ret = soc_probe_link_dais(card); if (ret < 0) { dev_err(card->dev, "ASoC: failed to instantiate card %d\n", ret); goto probe_end; } for_each_card_rtds(card, rtd) { ret = soc_init_pcm_runtime(card, rtd); if (ret < 0) goto probe_end; } snd_soc_dapm_link_dai_widgets(card); snd_soc_dapm_connect_dai_link_widgets(card); ret = snd_soc_add_card_controls(card, card->controls, card->num_controls); if (ret < 0) goto probe_end; ret = snd_soc_dapm_add_routes(&card->dapm, card->dapm_routes, card->num_dapm_routes); if (ret < 0) { if (card->disable_route_checks) { dev_info(card->dev, "%s: disable_route_checks set, ignoring errors on add_routes\n", __func__); } else { dev_err(card->dev, "%s: snd_soc_dapm_add_routes failed: %d\n", __func__, ret); goto probe_end; } } ret = snd_soc_dapm_add_routes(&card->dapm, card->of_dapm_routes, card->num_of_dapm_routes); if (ret < 0) goto probe_end; /* try to set some sane longname if DMI is available */ snd_soc_set_dmi_name(card, NULL); soc_setup_card_name(card, card->snd_card->shortname, card->name, NULL); soc_setup_card_name(card, card->snd_card->longname, card->long_name, card->name); soc_setup_card_name(card, card->snd_card->driver, card->driver_name, card->name); if (card->components) { /* the current implementation of snd_component_add() accepts */ /* multiple components in the string separated by space, */ /* but the string collision (identical string) check might */ /* not work correctly */ ret = snd_component_add(card->snd_card, card->components); if (ret < 0) { dev_err(card->dev, "ASoC: %s snd_component_add() failed: %d\n", card->name, ret); goto probe_end; } } ret = snd_soc_card_late_probe(card); if (ret < 0) goto probe_end; snd_soc_dapm_new_widgets(card); snd_soc_card_fixup_controls(card); ret = snd_card_register(card->snd_card); if (ret < 0) { dev_err(card->dev, "ASoC: failed to register soundcard %d\n", ret); goto probe_end; } card->instantiated = 1; dapm_mark_endpoints_dirty(card); snd_soc_dapm_sync(&card->dapm); /* deactivate pins to sleep state */ for_each_card_components(card, component) if (!snd_soc_component_active(component)) pinctrl_pm_select_sleep_state(component->dev); probe_end: if (ret < 0) soc_cleanup_card_resources(card); snd_soc_card_mutex_unlock(card); mutex_unlock(&client_mutex); return ret; } /* probes a new socdev */ static int soc_probe(struct platform_device *pdev) { struct snd_soc_card *card = platform_get_drvdata(pdev); /* * no card, so machine driver should be registering card * we should not be here in that case so ret error */ if (!card) return -EINVAL; dev_warn(&pdev->dev, "ASoC: machine %s should use snd_soc_register_card()\n", card->name); /* Bodge while we unpick instantiation */ card->dev = &pdev->dev; return devm_snd_soc_register_card(&pdev->dev, card); } int snd_soc_poweroff(struct device *dev) { struct snd_soc_card *card = dev_get_drvdata(dev); struct snd_soc_component *component; if (!snd_soc_card_is_instantiated(card)) return 0; /* * Flush out pmdown_time work - we actually do want to run it * now, we're shutting down so no imminent restart. */ snd_soc_flush_all_delayed_work(card); snd_soc_dapm_shutdown(card); /* deactivate pins to sleep state */ for_each_card_components(card, component) pinctrl_pm_select_sleep_state(component->dev); return 0; } EXPORT_SYMBOL_GPL(snd_soc_poweroff); const struct dev_pm_ops snd_soc_pm_ops = { .suspend = snd_soc_suspend, .resume = snd_soc_resume, .freeze = snd_soc_suspend, .thaw = snd_soc_resume, .poweroff = snd_soc_poweroff, .restore = snd_soc_resume, }; EXPORT_SYMBOL_GPL(snd_soc_pm_ops); /* ASoC platform driver */ static struct platform_driver soc_driver = { .driver = { .name = "soc-audio", .pm = &snd_soc_pm_ops, }, .probe = soc_probe, }; /** * snd_soc_cnew - create new control * @_template: control template * @data: control private data * @long_name: control long name * @prefix: control name prefix * * Create a new mixer control from a template control. * * Returns 0 for success, else error. */ struct snd_kcontrol *snd_soc_cnew(const struct snd_kcontrol_new *_template, void *data, const char *long_name, const char *prefix) { struct snd_kcontrol_new template; struct snd_kcontrol *kcontrol; char *name = NULL; memcpy(&template, _template, sizeof(template)); template.index = 0; if (!long_name) long_name = template.name; if (prefix) { name = kasprintf(GFP_KERNEL, "%s %s", prefix, long_name); if (!name) return NULL; template.name = name; } else { template.name = long_name; } kcontrol = snd_ctl_new1(&template, data); kfree(name); return kcontrol; } EXPORT_SYMBOL_GPL(snd_soc_cnew); static int snd_soc_add_controls(struct snd_card *card, struct device *dev, const struct snd_kcontrol_new *controls, int num_controls, const char *prefix, void *data) { int i; for (i = 0; i < num_controls; i++) { const struct snd_kcontrol_new *control = &controls[i]; int err = snd_ctl_add(card, snd_soc_cnew(control, data, control->name, prefix)); if (err < 0) { dev_err(dev, "ASoC: Failed to add %s: %d\n", control->name, err); return err; } } return 0; } /** * snd_soc_add_component_controls - Add an array of controls to a component. * * @component: Component to add controls to * @controls: Array of controls to add * @num_controls: Number of elements in the array * * Return: 0 for success, else error. */ int snd_soc_add_component_controls(struct snd_soc_component *component, const struct snd_kcontrol_new *controls, unsigned int num_controls) { struct snd_card *card = component->card->snd_card; return snd_soc_add_controls(card, component->dev, controls, num_controls, component->name_prefix, component); } EXPORT_SYMBOL_GPL(snd_soc_add_component_controls); /** * snd_soc_add_card_controls - add an array of controls to a SoC card. * Convenience function to add a list of controls. * * @soc_card: SoC card to add controls to * @controls: array of controls to add * @num_controls: number of elements in the array * * Return 0 for success, else error. */ int snd_soc_add_card_controls(struct snd_soc_card *soc_card, const struct snd_kcontrol_new *controls, int num_controls) { struct snd_card *card = soc_card->snd_card; return snd_soc_add_controls(card, soc_card->dev, controls, num_controls, NULL, soc_card); } EXPORT_SYMBOL_GPL(snd_soc_add_card_controls); /** * snd_soc_add_dai_controls - add an array of controls to a DAI. * Convienience function to add a list of controls. * * @dai: DAI to add controls to * @controls: array of controls to add * @num_controls: number of elements in the array * * Return 0 for success, else error. */ int snd_soc_add_dai_controls(struct snd_soc_dai *dai, const struct snd_kcontrol_new *controls, int num_controls) { struct snd_card *card = dai->component->card->snd_card; return snd_soc_add_controls(card, dai->dev, controls, num_controls, NULL, dai); } EXPORT_SYMBOL_GPL(snd_soc_add_dai_controls); /** * snd_soc_register_card - Register a card with the ASoC core * * @card: Card to register * */ int snd_soc_register_card(struct snd_soc_card *card) { if (!card->name || !card->dev) return -EINVAL; dev_set_drvdata(card->dev, card); INIT_LIST_HEAD(&card->widgets); INIT_LIST_HEAD(&card->paths); INIT_LIST_HEAD(&card->dapm_list); INIT_LIST_HEAD(&card->aux_comp_list); INIT_LIST_HEAD(&card->component_dev_list); INIT_LIST_HEAD(&card->list); INIT_LIST_HEAD(&card->rtd_list); INIT_LIST_HEAD(&card->dapm_dirty); INIT_LIST_HEAD(&card->dobj_list); card->instantiated = 0; mutex_init(&card->mutex); mutex_init(&card->dapm_mutex); mutex_init(&card->pcm_mutex); return snd_soc_bind_card(card); } EXPORT_SYMBOL_GPL(snd_soc_register_card); /** * snd_soc_unregister_card - Unregister a card with the ASoC core * * @card: Card to unregister * */ void snd_soc_unregister_card(struct snd_soc_card *card) { mutex_lock(&client_mutex); snd_soc_unbind_card(card, true); mutex_unlock(&client_mutex); dev_dbg(card->dev, "ASoC: Unregistered card '%s'\n", card->name); } EXPORT_SYMBOL_GPL(snd_soc_unregister_card); /* * Simplify DAI link configuration by removing ".-1" from device names * and sanitizing names. */ static char *fmt_single_name(struct device *dev, int *id) { const char *devname = dev_name(dev); char *found, *name; unsigned int id1, id2; if (devname == NULL) return NULL; name = devm_kstrdup(dev, devname, GFP_KERNEL); if (!name) return NULL; /* are we a "%s.%d" name (platform and SPI components) */ found = strstr(name, dev->driver->name); if (found) { /* get ID */ if (sscanf(&found[strlen(dev->driver->name)], ".%d", id) == 1) { /* discard ID from name if ID == -1 */ if (*id == -1) found[strlen(dev->driver->name)] = '\0'; } /* I2C component devices are named "bus-addr" */ } else if (sscanf(name, "%x-%x", &id1, &id2) == 2) { /* create unique ID number from I2C addr and bus */ *id = ((id1 & 0xffff) << 16) + id2; devm_kfree(dev, name); /* sanitize component name for DAI link creation */ name = devm_kasprintf(dev, GFP_KERNEL, "%s.%s", dev->driver->name, devname); } else { *id = 0; } return name; } /* * Simplify DAI link naming for single devices with multiple DAIs by removing * any ".-1" and using the DAI name (instead of device name). */ static inline char *fmt_multiple_name(struct device *dev, struct snd_soc_dai_driver *dai_drv) { if (dai_drv->name == NULL) { dev_err(dev, "ASoC: error - multiple DAI %s registered with no name\n", dev_name(dev)); return NULL; } return devm_kstrdup(dev, dai_drv->name, GFP_KERNEL); } void snd_soc_unregister_dai(struct snd_soc_dai *dai) { dev_dbg(dai->dev, "ASoC: Unregistered DAI '%s'\n", dai->name); list_del(&dai->list); } EXPORT_SYMBOL_GPL(snd_soc_unregister_dai); /** * snd_soc_register_dai - Register a DAI dynamically & create its widgets * * @component: The component the DAIs are registered for * @dai_drv: DAI driver to use for the DAI * @legacy_dai_naming: if %true, use legacy single-name format; * if %false, use multiple-name format; * * Topology can use this API to register DAIs when probing a component. * These DAIs's widgets will be freed in the card cleanup and the DAIs * will be freed in the component cleanup. */ struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component, struct snd_soc_dai_driver *dai_drv, bool legacy_dai_naming) { struct device *dev = component->dev; struct snd_soc_dai *dai; lockdep_assert_held(&client_mutex); dai = devm_kzalloc(dev, sizeof(*dai), GFP_KERNEL); if (dai == NULL) return NULL; /* * Back in the old days when we still had component-less DAIs, * instead of having a static name, component-less DAIs would * inherit the name of the parent device so it is possible to * register multiple instances of the DAI. We still need to keep * the same naming style even though those DAIs are not * component-less anymore. */ if (legacy_dai_naming && (dai_drv->id == 0 || dai_drv->name == NULL)) { dai->name = fmt_single_name(dev, &dai->id); } else { dai->name = fmt_multiple_name(dev, dai_drv); if (dai_drv->id) dai->id = dai_drv->id; else dai->id = component->num_dai; } if (!dai->name) return NULL; dai->component = component; dai->dev = dev; dai->driver = dai_drv; /* see for_each_component_dais */ list_add_tail(&dai->list, &component->dai_list); component->num_dai++; dev_dbg(dev, "ASoC: Registered DAI '%s'\n", dai->name); return dai; } EXPORT_SYMBOL_GPL(snd_soc_register_dai); /** * snd_soc_unregister_dais - Unregister DAIs from the ASoC core * * @component: The component for which the DAIs should be unregistered */ static void snd_soc_unregister_dais(struct snd_soc_component *component) { struct snd_soc_dai *dai, *_dai; for_each_component_dais_safe(component, dai, _dai) snd_soc_unregister_dai(dai); } /** * snd_soc_register_dais - Register a DAI with the ASoC core * * @component: The component the DAIs are registered for * @dai_drv: DAI driver to use for the DAIs * @count: Number of DAIs */ static int snd_soc_register_dais(struct snd_soc_component *component, struct snd_soc_dai_driver *dai_drv, size_t count) { struct snd_soc_dai *dai; unsigned int i; int ret; for (i = 0; i < count; i++) { dai = snd_soc_register_dai(component, dai_drv + i, count == 1 && component->driver->legacy_dai_naming); if (dai == NULL) { ret = -ENOMEM; goto err; } } return 0; err: snd_soc_unregister_dais(component); return ret; } #define ENDIANNESS_MAP(name) \ (SNDRV_PCM_FMTBIT_##name##LE | SNDRV_PCM_FMTBIT_##name##BE) static u64 endianness_format_map[] = { ENDIANNESS_MAP(S16_), ENDIANNESS_MAP(U16_), ENDIANNESS_MAP(S24_), ENDIANNESS_MAP(U24_), ENDIANNESS_MAP(S32_), ENDIANNESS_MAP(U32_), ENDIANNESS_MAP(S24_3), ENDIANNESS_MAP(U24_3), ENDIANNESS_MAP(S20_3), ENDIANNESS_MAP(U20_3), ENDIANNESS_MAP(S18_3), ENDIANNESS_MAP(U18_3), ENDIANNESS_MAP(FLOAT_), ENDIANNESS_MAP(FLOAT64_), ENDIANNESS_MAP(IEC958_SUBFRAME_), }; /* * Fix up the DAI formats for endianness: codecs don't actually see * the endianness of the data but we're using the CPU format * definitions which do need to include endianness so we ensure that * codec DAIs always have both big and little endian variants set. */ static void convert_endianness_formats(struct snd_soc_pcm_stream *stream) { int i; for (i = 0; i < ARRAY_SIZE(endianness_format_map); i++) if (stream->formats & endianness_format_map[i]) stream->formats |= endianness_format_map[i]; } static void snd_soc_try_rebind_card(void) { struct snd_soc_card *card, *c; list_for_each_entry_safe(card, c, &unbind_card_list, list) if (!snd_soc_bind_card(card)) list_del(&card->list); } static void snd_soc_del_component_unlocked(struct snd_soc_component *component) { struct snd_soc_card *card = component->card; snd_soc_unregister_dais(component); if (card) snd_soc_unbind_card(card, false); list_del(&component->list); } int snd_soc_component_initialize(struct snd_soc_component *component, const struct snd_soc_component_driver *driver, struct device *dev) { INIT_LIST_HEAD(&component->dai_list); INIT_LIST_HEAD(&component->dobj_list); INIT_LIST_HEAD(&component->card_list); INIT_LIST_HEAD(&component->list); mutex_init(&component->io_mutex); component->name = fmt_single_name(dev, &component->id); if (!component->name) { dev_err(dev, "ASoC: Failed to allocate name\n"); return -ENOMEM; } component->dev = dev; component->driver = driver; #ifdef CONFIG_DEBUG_FS if (!component->debugfs_prefix) component->debugfs_prefix = driver->debugfs_prefix; #endif return 0; } EXPORT_SYMBOL_GPL(snd_soc_component_initialize); int snd_soc_add_component(struct snd_soc_component *component, struct snd_soc_dai_driver *dai_drv, int num_dai) { int ret; int i; mutex_lock(&client_mutex); if (component->driver->endianness) { for (i = 0; i < num_dai; i++) { convert_endianness_formats(&dai_drv[i].playback); convert_endianness_formats(&dai_drv[i].capture); } } ret = snd_soc_register_dais(component, dai_drv, num_dai); if (ret < 0) { dev_err(component->dev, "ASoC: Failed to register DAIs: %d\n", ret); goto err_cleanup; } if (!component->driver->write && !component->driver->read) { if (!component->regmap) component->regmap = dev_get_regmap(component->dev, NULL); if (component->regmap) snd_soc_component_setup_regmap(component); } /* see for_each_component */ list_add(&component->list, &component_list); err_cleanup: if (ret < 0) snd_soc_del_component_unlocked(component); mutex_unlock(&client_mutex); if (ret == 0) snd_soc_try_rebind_card(); return ret; } EXPORT_SYMBOL_GPL(snd_soc_add_component); int snd_soc_register_component(struct device *dev, const struct snd_soc_component_driver *component_driver, struct snd_soc_dai_driver *dai_drv, int num_dai) { struct snd_soc_component *component; int ret; component = devm_kzalloc(dev, sizeof(*component), GFP_KERNEL); if (!component) return -ENOMEM; ret = snd_soc_component_initialize(component, component_driver, dev); if (ret < 0) return ret; return snd_soc_add_component(component, dai_drv, num_dai); } EXPORT_SYMBOL_GPL(snd_soc_register_component); /** * snd_soc_unregister_component_by_driver - Unregister component using a given driver * from the ASoC core * * @dev: The device to unregister * @component_driver: The component driver to unregister */ void snd_soc_unregister_component_by_driver(struct device *dev, const struct snd_soc_component_driver *component_driver) { struct snd_soc_component *component; if (!component_driver) return; mutex_lock(&client_mutex); component = snd_soc_lookup_component_nolocked(dev, component_driver->name); if (!component) goto out; snd_soc_del_component_unlocked(component); out: mutex_unlock(&client_mutex); } EXPORT_SYMBOL_GPL(snd_soc_unregister_component_by_driver); /** * snd_soc_unregister_component - Unregister all related component * from the ASoC core * * @dev: The device to unregister */ void snd_soc_unregister_component(struct device *dev) { mutex_lock(&client_mutex); while (1) { struct snd_soc_component *component = snd_soc_lookup_component_nolocked(dev, NULL); if (!component) break; snd_soc_del_component_unlocked(component); } mutex_unlock(&client_mutex); } EXPORT_SYMBOL_GPL(snd_soc_unregister_component); /* Retrieve a card's name from device tree */ int snd_soc_of_parse_card_name(struct snd_soc_card *card, const char *propname) { struct device_node *np; int ret; if (!card->dev) { pr_err("card->dev is not set before calling %s\n", __func__); return -EINVAL; } np = card->dev->of_node; ret = of_property_read_string_index(np, propname, 0, &card->name); /* * EINVAL means the property does not exist. This is fine providing * card->name was previously set, which is checked later in * snd_soc_register_card. */ if (ret < 0 && ret != -EINVAL) { dev_err(card->dev, "ASoC: Property '%s' could not be read: %d\n", propname, ret); return ret; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_of_parse_card_name); static const struct snd_soc_dapm_widget simple_widgets[] = { SND_SOC_DAPM_MIC("Microphone", NULL), SND_SOC_DAPM_LINE("Line", NULL), SND_SOC_DAPM_HP("Headphone", NULL), SND_SOC_DAPM_SPK("Speaker", NULL), }; int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card, const char *propname) { struct device_node *np = card->dev->of_node; struct snd_soc_dapm_widget *widgets; const char *template, *wname; int i, j, num_widgets; num_widgets = of_property_count_strings(np, propname); if (num_widgets < 0) { dev_err(card->dev, "ASoC: Property '%s' does not exist\n", propname); return -EINVAL; } if (!num_widgets) { dev_err(card->dev, "ASoC: Property '%s's length is zero\n", propname); return -EINVAL; } if (num_widgets & 1) { dev_err(card->dev, "ASoC: Property '%s' length is not even\n", propname); return -EINVAL; } num_widgets /= 2; widgets = devm_kcalloc(card->dev, num_widgets, sizeof(*widgets), GFP_KERNEL); if (!widgets) { dev_err(card->dev, "ASoC: Could not allocate memory for widgets\n"); return -ENOMEM; } for (i = 0; i < num_widgets; i++) { int ret = of_property_read_string_index(np, propname, 2 * i, &template); if (ret) { dev_err(card->dev, "ASoC: Property '%s' index %d read error:%d\n", propname, 2 * i, ret); return -EINVAL; } for (j = 0; j < ARRAY_SIZE(simple_widgets); j++) { if (!strncmp(template, simple_widgets[j].name, strlen(simple_widgets[j].name))) { widgets[i] = simple_widgets[j]; break; } } if (j >= ARRAY_SIZE(simple_widgets)) { dev_err(card->dev, "ASoC: DAPM widget '%s' is not supported\n", template); return -EINVAL; } ret = of_property_read_string_index(np, propname, (2 * i) + 1, &wname); if (ret) { dev_err(card->dev, "ASoC: Property '%s' index %d read error:%d\n", propname, (2 * i) + 1, ret); return -EINVAL; } widgets[i].name = wname; } card->of_dapm_widgets = widgets; card->num_of_dapm_widgets = num_widgets; return 0; } EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_simple_widgets); int snd_soc_of_parse_pin_switches(struct snd_soc_card *card, const char *prop) { const unsigned int nb_controls_max = 16; const char **strings, *control_name; struct snd_kcontrol_new *controls; struct device *dev = card->dev; unsigned int i, nb_controls; int ret; if (!of_property_read_bool(dev->of_node, prop)) return 0; strings = devm_kcalloc(dev, nb_controls_max, sizeof(*strings), GFP_KERNEL); if (!strings) return -ENOMEM; ret = of_property_read_string_array(dev->of_node, prop, strings, nb_controls_max); if (ret < 0) return ret; nb_controls = (unsigned int)ret; controls = devm_kcalloc(dev, nb_controls, sizeof(*controls), GFP_KERNEL); if (!controls) return -ENOMEM; for (i = 0; i < nb_controls; i++) { control_name = devm_kasprintf(dev, GFP_KERNEL, "%s Switch", strings[i]); if (!control_name) return -ENOMEM; controls[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER; controls[i].name = control_name; controls[i].info = snd_soc_dapm_info_pin_switch; controls[i].get = snd_soc_dapm_get_pin_switch; controls[i].put = snd_soc_dapm_put_pin_switch; controls[i].private_value = (unsigned long)strings[i]; } card->controls = controls; card->num_controls = nb_controls; return 0; } EXPORT_SYMBOL_GPL(snd_soc_of_parse_pin_switches); int snd_soc_of_get_slot_mask(struct device_node *np, const char *prop_name, unsigned int *mask) { u32 val; const __be32 *of_slot_mask = of_get_property(np, prop_name, &val); int i; if (!of_slot_mask) return 0; val /= sizeof(u32); for (i = 0; i < val; i++) if (be32_to_cpup(&of_slot_mask[i])) *mask |= (1 << i); return val; } EXPORT_SYMBOL_GPL(snd_soc_of_get_slot_mask); int snd_soc_of_parse_tdm_slot(struct device_node *np, unsigned int *tx_mask, unsigned int *rx_mask, unsigned int *slots, unsigned int *slot_width) { u32 val; int ret; if (tx_mask) snd_soc_of_get_slot_mask(np, "dai-tdm-slot-tx-mask", tx_mask); if (rx_mask) snd_soc_of_get_slot_mask(np, "dai-tdm-slot-rx-mask", rx_mask); if (of_property_read_bool(np, "dai-tdm-slot-num")) { ret = of_property_read_u32(np, "dai-tdm-slot-num", &val); if (ret) return ret; if (slots) *slots = val; } if (of_property_read_bool(np, "dai-tdm-slot-width")) { ret = of_property_read_u32(np, "dai-tdm-slot-width", &val); if (ret) return ret; if (slot_width) *slot_width = val; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_of_parse_tdm_slot); void snd_soc_dlc_use_cpu_as_platform(struct snd_soc_dai_link_component *platforms, struct snd_soc_dai_link_component *cpus) { platforms->of_node = cpus->of_node; platforms->dai_args = cpus->dai_args; } EXPORT_SYMBOL_GPL(snd_soc_dlc_use_cpu_as_platform); void snd_soc_of_parse_node_prefix(struct device_node *np, struct snd_soc_codec_conf *codec_conf, struct device_node *of_node, const char *propname) { const char *str; int ret; ret = of_property_read_string(np, propname, &str); if (ret < 0) { /* no prefix is not error */ return; } codec_conf->dlc.of_node = of_node; codec_conf->name_prefix = str; } EXPORT_SYMBOL_GPL(snd_soc_of_parse_node_prefix); int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, const char *propname) { struct device_node *np = card->dev->of_node; int num_routes; struct snd_soc_dapm_route *routes; int i; num_routes = of_property_count_strings(np, propname); if (num_routes < 0 || num_routes & 1) { dev_err(card->dev, "ASoC: Property '%s' does not exist or its length is not even\n", propname); return -EINVAL; } num_routes /= 2; routes = devm_kcalloc(card->dev, num_routes, sizeof(*routes), GFP_KERNEL); if (!routes) { dev_err(card->dev, "ASoC: Could not allocate DAPM route table\n"); return -ENOMEM; } for (i = 0; i < num_routes; i++) { int ret = of_property_read_string_index(np, propname, 2 * i, &routes[i].sink); if (ret) { dev_err(card->dev, "ASoC: Property '%s' index %d could not be read: %d\n", propname, 2 * i, ret); return -EINVAL; } ret = of_property_read_string_index(np, propname, (2 * i) + 1, &routes[i].source); if (ret) { dev_err(card->dev, "ASoC: Property '%s' index %d could not be read: %d\n", propname, (2 * i) + 1, ret); return -EINVAL; } } card->num_of_dapm_routes = num_routes; card->of_dapm_routes = routes; return 0; } EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_routing); int snd_soc_of_parse_aux_devs(struct snd_soc_card *card, const char *propname) { struct device_node *node = card->dev->of_node; struct snd_soc_aux_dev *aux; int num, i; num = of_count_phandle_with_args(node, propname, NULL); if (num == -ENOENT) { return 0; } else if (num < 0) { dev_err(card->dev, "ASOC: Property '%s' could not be read: %d\n", propname, num); return num; } aux = devm_kcalloc(card->dev, num, sizeof(*aux), GFP_KERNEL); if (!aux) return -ENOMEM; card->aux_dev = aux; card->num_aux_devs = num; for_each_card_pre_auxs(card, i, aux) { aux->dlc.of_node = of_parse_phandle(node, propname, i); if (!aux->dlc.of_node) return -EINVAL; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_of_parse_aux_devs); unsigned int snd_soc_daifmt_clock_provider_flipped(unsigned int dai_fmt) { unsigned int inv_dai_fmt = dai_fmt & ~SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK; switch (dai_fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) { case SND_SOC_DAIFMT_CBP_CFP: inv_dai_fmt |= SND_SOC_DAIFMT_CBC_CFC; break; case SND_SOC_DAIFMT_CBP_CFC: inv_dai_fmt |= SND_SOC_DAIFMT_CBC_CFP; break; case SND_SOC_DAIFMT_CBC_CFP: inv_dai_fmt |= SND_SOC_DAIFMT_CBP_CFC; break; case SND_SOC_DAIFMT_CBC_CFC: inv_dai_fmt |= SND_SOC_DAIFMT_CBP_CFP; break; } return inv_dai_fmt; } EXPORT_SYMBOL_GPL(snd_soc_daifmt_clock_provider_flipped); unsigned int snd_soc_daifmt_clock_provider_from_bitmap(unsigned int bit_frame) { /* * bit_frame is return value from * snd_soc_daifmt_parse_clock_provider_raw() */ /* Codec base */ switch (bit_frame) { case 0x11: return SND_SOC_DAIFMT_CBP_CFP; case 0x10: return SND_SOC_DAIFMT_CBP_CFC; case 0x01: return SND_SOC_DAIFMT_CBC_CFP; default: return SND_SOC_DAIFMT_CBC_CFC; } return 0; } EXPORT_SYMBOL_GPL(snd_soc_daifmt_clock_provider_from_bitmap); unsigned int snd_soc_daifmt_parse_format(struct device_node *np, const char *prefix) { int ret; char prop[128]; unsigned int format = 0; int bit, frame; const char *str; struct { char *name; unsigned int val; } of_fmt_table[] = { { "i2s", SND_SOC_DAIFMT_I2S }, { "right_j", SND_SOC_DAIFMT_RIGHT_J }, { "left_j", SND_SOC_DAIFMT_LEFT_J }, { "dsp_a", SND_SOC_DAIFMT_DSP_A }, { "dsp_b", SND_SOC_DAIFMT_DSP_B }, { "ac97", SND_SOC_DAIFMT_AC97 }, { "pdm", SND_SOC_DAIFMT_PDM}, { "msb", SND_SOC_DAIFMT_MSB }, { "lsb", SND_SOC_DAIFMT_LSB }, }; if (!prefix) prefix = ""; /* * check "dai-format = xxx" * or "[prefix]format = xxx" * SND_SOC_DAIFMT_FORMAT_MASK area */ ret = of_property_read_string(np, "dai-format", &str); if (ret < 0) { snprintf(prop, sizeof(prop), "%sformat", prefix); ret = of_property_read_string(np, prop, &str); } if (ret == 0) { int i; for (i = 0; i < ARRAY_SIZE(of_fmt_table); i++) { if (strcmp(str, of_fmt_table[i].name) == 0) { format |= of_fmt_table[i].val; break; } } } /* * check "[prefix]continuous-clock" * SND_SOC_DAIFMT_CLOCK_MASK area */ snprintf(prop, sizeof(prop), "%scontinuous-clock", prefix); if (of_property_read_bool(np, prop)) format |= SND_SOC_DAIFMT_CONT; else format |= SND_SOC_DAIFMT_GATED; /* * check "[prefix]bitclock-inversion" * check "[prefix]frame-inversion" * SND_SOC_DAIFMT_INV_MASK area */ snprintf(prop, sizeof(prop), "%sbitclock-inversion", prefix); bit = !!of_get_property(np, prop, NULL); snprintf(prop, sizeof(prop), "%sframe-inversion", prefix); frame = !!of_get_property(np, prop, NULL); switch ((bit << 4) + frame) { case 0x11: format |= SND_SOC_DAIFMT_IB_IF; break; case 0x10: format |= SND_SOC_DAIFMT_IB_NF; break; case 0x01: format |= SND_SOC_DAIFMT_NB_IF; break; default: /* SND_SOC_DAIFMT_NB_NF is default */ break; } return format; } EXPORT_SYMBOL_GPL(snd_soc_daifmt_parse_format); unsigned int snd_soc_daifmt_parse_clock_provider_raw(struct device_node *np, const char *prefix, struct device_node **bitclkmaster, struct device_node **framemaster) { char prop[128]; unsigned int bit, frame; if (!prefix) prefix = ""; /* * check "[prefix]bitclock-master" * check "[prefix]frame-master" */ snprintf(prop, sizeof(prop), "%sbitclock-master", prefix); bit = !!of_get_property(np, prop, NULL); if (bit && bitclkmaster) *bitclkmaster = of_parse_phandle(np, prop, 0); snprintf(prop, sizeof(prop), "%sframe-master", prefix); frame = !!of_get_property(np, prop, NULL); if (frame && framemaster) *framemaster = of_parse_phandle(np, prop, 0); /* * return bitmap. * It will be parameter of * snd_soc_daifmt_clock_provider_from_bitmap() */ return (bit << 4) + frame; } EXPORT_SYMBOL_GPL(snd_soc_daifmt_parse_clock_provider_raw); int snd_soc_get_stream_cpu(struct snd_soc_dai_link *dai_link, int stream) { /* * [Normal] * * Playback * CPU : SNDRV_PCM_STREAM_PLAYBACK * Codec: SNDRV_PCM_STREAM_PLAYBACK * * Capture * CPU : SNDRV_PCM_STREAM_CAPTURE * Codec: SNDRV_PCM_STREAM_CAPTURE */ if (!dai_link->c2c_params) return stream; /* * [Codec2Codec] * * Playback * CPU : SNDRV_PCM_STREAM_CAPTURE * Codec: SNDRV_PCM_STREAM_PLAYBACK * * Capture * CPU : SNDRV_PCM_STREAM_PLAYBACK * Codec: SNDRV_PCM_STREAM_CAPTURE */ if (stream == SNDRV_PCM_STREAM_CAPTURE) return SNDRV_PCM_STREAM_PLAYBACK; return SNDRV_PCM_STREAM_CAPTURE; } EXPORT_SYMBOL_GPL(snd_soc_get_stream_cpu); int snd_soc_get_dai_id(struct device_node *ep) { struct snd_soc_component *component; struct snd_soc_dai_link_component dlc = { .of_node = of_graph_get_port_parent(ep), }; int ret; /* * For example HDMI case, HDMI has video/sound port, * but ALSA SoC needs sound port number only. * Thus counting HDMI DT port/endpoint doesn't work. * Then, it should have .of_xlate_dai_id */ ret = -ENOTSUPP; mutex_lock(&client_mutex); component = soc_find_component(&dlc); if (component) ret = snd_soc_component_of_xlate_dai_id(component, ep); mutex_unlock(&client_mutex); of_node_put(dlc.of_node); return ret; } EXPORT_SYMBOL_GPL(snd_soc_get_dai_id); int snd_soc_get_dlc(const struct of_phandle_args *args, struct snd_soc_dai_link_component *dlc) { struct snd_soc_component *pos; int ret = -EPROBE_DEFER; mutex_lock(&client_mutex); for_each_component(pos) { struct device_node *component_of_node = soc_component_to_node(pos); if (component_of_node != args->np || !pos->num_dai) continue; ret = snd_soc_component_of_xlate_dai_name(pos, args, &dlc->dai_name); if (ret == -ENOTSUPP) { struct snd_soc_dai *dai; int id = -1; switch (args->args_count) { case 0: id = 0; /* same as dai_drv[0] */ break; case 1: id = args->args[0]; break; default: /* not supported */ break; } if (id < 0 || id >= pos->num_dai) { ret = -EINVAL; continue; } ret = 0; /* find target DAI */ for_each_component_dais(pos, dai) { if (id == 0) break; id--; } dlc->dai_name = snd_soc_dai_name_get(dai); } else if (ret) { /* * if another error than ENOTSUPP is returned go on and * check if another component is provided with the same * node. This may happen if a device provides several * components */ continue; } break; } if (ret == 0) dlc->of_node = args->np; mutex_unlock(&client_mutex); return ret; } EXPORT_SYMBOL_GPL(snd_soc_get_dlc); int snd_soc_of_get_dlc(struct device_node *of_node, struct of_phandle_args *args, struct snd_soc_dai_link_component *dlc, int index) { struct of_phandle_args __args; int ret; if (!args) args = &__args; ret = of_parse_phandle_with_args(of_node, "sound-dai", "#sound-dai-cells", index, args); if (ret) return ret; return snd_soc_get_dlc(args, dlc); } EXPORT_SYMBOL_GPL(snd_soc_of_get_dlc); int snd_soc_get_dai_name(const struct of_phandle_args *args, const char **dai_name) { struct snd_soc_dai_link_component dlc; int ret = snd_soc_get_dlc(args, &dlc); if (ret == 0) *dai_name = dlc.dai_name; return ret; } EXPORT_SYMBOL_GPL(snd_soc_get_dai_name); int snd_soc_of_get_dai_name(struct device_node *of_node, const char **dai_name, int index) { struct snd_soc_dai_link_component dlc; int ret = snd_soc_of_get_dlc(of_node, NULL, &dlc, index); if (ret == 0) *dai_name = dlc.dai_name; return ret; } EXPORT_SYMBOL_GPL(snd_soc_of_get_dai_name); struct snd_soc_dai *snd_soc_get_dai_via_args(struct of_phandle_args *dai_args) { struct snd_soc_dai *dai; struct snd_soc_component *component; mutex_lock(&client_mutex); for_each_component(component) { for_each_component_dais(component, dai) if (snd_soc_is_match_dai_args(dai->driver->dai_args, dai_args)) goto found; } dai = NULL; found: mutex_unlock(&client_mutex); return dai; } EXPORT_SYMBOL_GPL(snd_soc_get_dai_via_args); static void __snd_soc_of_put_component(struct snd_soc_dai_link_component *component) { if (component->of_node) { of_node_put(component->of_node); component->of_node = NULL; } } static int __snd_soc_of_get_dai_link_component_alloc( struct device *dev, struct device_node *of_node, struct snd_soc_dai_link_component **ret_component, int *ret_num) { struct snd_soc_dai_link_component *component; int num; /* Count the number of CPUs/CODECs */ num = of_count_phandle_with_args(of_node, "sound-dai", "#sound-dai-cells"); if (num <= 0) { if (num == -ENOENT) dev_err(dev, "No 'sound-dai' property\n"); else dev_err(dev, "Bad phandle in 'sound-dai'\n"); return num; } component = devm_kcalloc(dev, num, sizeof(*component), GFP_KERNEL); if (!component) return -ENOMEM; *ret_component = component; *ret_num = num; return 0; } /* * snd_soc_of_put_dai_link_codecs - Dereference device nodes in the codecs array * @dai_link: DAI link * * Dereference device nodes acquired by snd_soc_of_get_dai_link_codecs(). */ void snd_soc_of_put_dai_link_codecs(struct snd_soc_dai_link *dai_link) { struct snd_soc_dai_link_component *component; int index; for_each_link_codecs(dai_link, index, component) __snd_soc_of_put_component(component); } EXPORT_SYMBOL_GPL(snd_soc_of_put_dai_link_codecs); /* * snd_soc_of_get_dai_link_codecs - Parse a list of CODECs in the devicetree * @dev: Card device * @of_node: Device node * @dai_link: DAI link * * Builds an array of CODEC DAI components from the DAI link property * 'sound-dai'. * The array is set in the DAI link and the number of DAIs is set accordingly. * The device nodes in the array (of_node) must be dereferenced by calling * snd_soc_of_put_dai_link_codecs() on @dai_link. * * Returns 0 for success */ int snd_soc_of_get_dai_link_codecs(struct device *dev, struct device_node *of_node, struct snd_soc_dai_link *dai_link) { struct snd_soc_dai_link_component *component; int index, ret; ret = __snd_soc_of_get_dai_link_component_alloc(dev, of_node, &dai_link->codecs, &dai_link->num_codecs); if (ret < 0) return ret; /* Parse the list */ for_each_link_codecs(dai_link, index, component) { ret = snd_soc_of_get_dlc(of_node, NULL, component, index); if (ret) goto err; } return 0; err: snd_soc_of_put_dai_link_codecs(dai_link); dai_link->codecs = NULL; dai_link->num_codecs = 0; return ret; } EXPORT_SYMBOL_GPL(snd_soc_of_get_dai_link_codecs); /* * snd_soc_of_put_dai_link_cpus - Dereference device nodes in the codecs array * @dai_link: DAI link * * Dereference device nodes acquired by snd_soc_of_get_dai_link_cpus(). */ void snd_soc_of_put_dai_link_cpus(struct snd_soc_dai_link *dai_link) { struct snd_soc_dai_link_component *component; int index; for_each_link_cpus(dai_link, index, component) __snd_soc_of_put_component(component); } EXPORT_SYMBOL_GPL(snd_soc_of_put_dai_link_cpus); /* * snd_soc_of_get_dai_link_cpus - Parse a list of CPU DAIs in the devicetree * @dev: Card device * @of_node: Device node * @dai_link: DAI link * * Is analogous to snd_soc_of_get_dai_link_codecs but parses a list of CPU DAIs * instead. * * Returns 0 for success */ int snd_soc_of_get_dai_link_cpus(struct device *dev, struct device_node *of_node, struct snd_soc_dai_link *dai_link) { struct snd_soc_dai_link_component *component; int index, ret; /* Count the number of CPUs */ ret = __snd_soc_of_get_dai_link_component_alloc(dev, of_node, &dai_link->cpus, &dai_link->num_cpus); if (ret < 0) return ret; /* Parse the list */ for_each_link_cpus(dai_link, index, component) { ret = snd_soc_of_get_dlc(of_node, NULL, component, index); if (ret) goto err; } return 0; err: snd_soc_of_put_dai_link_cpus(dai_link); dai_link->cpus = NULL; dai_link->num_cpus = 0; return ret; } EXPORT_SYMBOL_GPL(snd_soc_of_get_dai_link_cpus); static int __init snd_soc_init(void) { int ret; snd_soc_debugfs_init(); ret = snd_soc_util_init(); if (ret) goto err_util_init; ret = platform_driver_register(&soc_driver); if (ret) goto err_register; return 0; err_register: snd_soc_util_exit(); err_util_init: snd_soc_debugfs_exit(); return ret; } module_init(snd_soc_init); static void __exit snd_soc_exit(void) { snd_soc_util_exit(); snd_soc_debugfs_exit(); platform_driver_unregister(&soc_driver); } module_exit(snd_soc_exit); /* Module information */ MODULE_AUTHOR("Liam Girdwood, [email protected]"); MODULE_DESCRIPTION("ALSA SoC Core"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:soc-audio");
linux-master
sound/soc/soc-core.c
// SPDX-License-Identifier: GPL-2.0-only /* * soc-topology-test.c -- ALSA SoC Topology Kernel Unit Tests * * Copyright(c) 2021 Intel Corporation. All rights reserved. */ #include <linux/firmware.h> #include <sound/core.h> #include <sound/soc.h> #include <sound/soc-topology.h> #include <kunit/test.h> /* ===== HELPER FUNCTIONS =================================================== */ /* * snd_soc_component needs device to operate on (primarily for prints), create * fake one, as we don't register with PCI or anything else * device_driver name is used in some of the prints (fmt_single_name) so * we also mock up minimal one */ static struct device *test_dev; static struct device_driver test_drv = { .name = "sound-soc-topology-test-driver", }; static int snd_soc_tplg_test_init(struct kunit *test) { test_dev = root_device_register("sound-soc-topology-test"); test_dev = get_device(test_dev); if (!test_dev) return -ENODEV; test_dev->driver = &test_drv; return 0; } static void snd_soc_tplg_test_exit(struct kunit *test) { put_device(test_dev); root_device_unregister(test_dev); } /* * helper struct we use when registering component, as we load topology during * component probe, we need to pass struct kunit somehow to probe function, so * we can report test result */ struct kunit_soc_component { struct kunit *kunit; int expect; /* what result we expect when loading topology */ struct snd_soc_component comp; struct snd_soc_card card; struct firmware fw; }; static int d_probe(struct snd_soc_component *component) { struct kunit_soc_component *kunit_comp = container_of(component, struct kunit_soc_component, comp); int ret; ret = snd_soc_tplg_component_load(component, NULL, &kunit_comp->fw); KUNIT_EXPECT_EQ_MSG(kunit_comp->kunit, kunit_comp->expect, ret, "Failed topology load"); return 0; } static void d_remove(struct snd_soc_component *component) { struct kunit_soc_component *kunit_comp = container_of(component, struct kunit_soc_component, comp); int ret; ret = snd_soc_tplg_component_remove(component); KUNIT_EXPECT_EQ(kunit_comp->kunit, 0, ret); } /* * ASoC minimal boiler plate */ SND_SOC_DAILINK_DEF(dummy, DAILINK_COMP_ARRAY(COMP_DUMMY())); SND_SOC_DAILINK_DEF(platform, DAILINK_COMP_ARRAY(COMP_PLATFORM("sound-soc-topology-test"))); static struct snd_soc_dai_link kunit_dai_links[] = { { .name = "KUNIT Audio Port", .id = 0, .stream_name = "Audio Playback/Capture", .nonatomic = 1, .dynamic = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .dpcm_playback = 1, .dpcm_capture = 1, SND_SOC_DAILINK_REG(dummy, dummy, platform), }, }; static const struct snd_soc_component_driver test_component = { .name = "sound-soc-topology-test", .probe = d_probe, .remove = d_remove, }; /* ===== TOPOLOGY TEMPLATES ================================================= */ // Structural representation of topology which can be generated with: // $ touch empty // $ alsatplg -c empty -o empty.tplg // $ xxd -i empty.tplg struct tplg_tmpl_001 { struct snd_soc_tplg_hdr header; struct snd_soc_tplg_manifest manifest; } __packed; static struct tplg_tmpl_001 tplg_tmpl_empty = { .header = { .magic = cpu_to_le32(SND_SOC_TPLG_MAGIC), .abi = cpu_to_le32(5), .version = 0, .type = cpu_to_le32(SND_SOC_TPLG_TYPE_MANIFEST), .size = cpu_to_le32(sizeof(struct snd_soc_tplg_hdr)), .vendor_type = 0, .payload_size = cpu_to_le32(sizeof(struct snd_soc_tplg_manifest)), .index = 0, .count = cpu_to_le32(1), }, .manifest = { .size = cpu_to_le32(sizeof(struct snd_soc_tplg_manifest)), /* rest of fields is 0 */ }, }; // Structural representation of topology containing SectionPCM struct tplg_tmpl_002 { struct snd_soc_tplg_hdr header; struct snd_soc_tplg_manifest manifest; struct snd_soc_tplg_hdr pcm_header; struct snd_soc_tplg_pcm pcm; } __packed; static struct tplg_tmpl_002 tplg_tmpl_with_pcm = { .header = { .magic = cpu_to_le32(SND_SOC_TPLG_MAGIC), .abi = cpu_to_le32(5), .version = 0, .type = cpu_to_le32(SND_SOC_TPLG_TYPE_MANIFEST), .size = cpu_to_le32(sizeof(struct snd_soc_tplg_hdr)), .vendor_type = 0, .payload_size = cpu_to_le32(sizeof(struct snd_soc_tplg_manifest)), .index = 0, .count = cpu_to_le32(1), }, .manifest = { .size = cpu_to_le32(sizeof(struct snd_soc_tplg_manifest)), .pcm_elems = cpu_to_le32(1), /* rest of fields is 0 */ }, .pcm_header = { .magic = cpu_to_le32(SND_SOC_TPLG_MAGIC), .abi = cpu_to_le32(5), .version = 0, .type = cpu_to_le32(SND_SOC_TPLG_TYPE_PCM), .size = cpu_to_le32(sizeof(struct snd_soc_tplg_hdr)), .vendor_type = 0, .payload_size = cpu_to_le32(sizeof(struct snd_soc_tplg_pcm)), .index = 0, .count = cpu_to_le32(1), }, .pcm = { .size = cpu_to_le32(sizeof(struct snd_soc_tplg_pcm)), .pcm_name = "KUNIT Audio", .dai_name = "kunit-audio-dai", .pcm_id = 0, .dai_id = 0, .playback = cpu_to_le32(1), .capture = cpu_to_le32(1), .compress = 0, .stream = { [0] = { .channels = cpu_to_le32(2), }, [1] = { .channels = cpu_to_le32(2), }, }, .num_streams = 0, .caps = { [0] = { .name = "kunit-audio-playback", .channels_min = cpu_to_le32(2), .channels_max = cpu_to_le32(2), }, [1] = { .name = "kunit-audio-capture", .channels_min = cpu_to_le32(2), .channels_max = cpu_to_le32(2), }, }, .flag_mask = 0, .flags = 0, .priv = { 0 }, }, }; /* ===== TEST CASES ========================================================= */ // TEST CASE // Test passing NULL component as parameter to snd_soc_tplg_component_load /* * need to override generic probe function with one using NULL when calling * topology load during component initialization, we don't need .remove * handler as load should fail */ static int d_probe_null_comp(struct snd_soc_component *component) { struct kunit_soc_component *kunit_comp = container_of(component, struct kunit_soc_component, comp); int ret; /* instead of passing component pointer as first argument, pass NULL here */ ret = snd_soc_tplg_component_load(NULL, NULL, &kunit_comp->fw); KUNIT_EXPECT_EQ_MSG(kunit_comp->kunit, kunit_comp->expect, ret, "Failed topology load"); return 0; } static const struct snd_soc_component_driver test_component_null_comp = { .name = "sound-soc-topology-test", .probe = d_probe_null_comp, }; static void snd_soc_tplg_test_load_with_null_comp(struct kunit *test) { struct kunit_soc_component *kunit_comp; int ret; /* prepare */ kunit_comp = kunit_kzalloc(test, sizeof(*kunit_comp), GFP_KERNEL); KUNIT_EXPECT_NOT_ERR_OR_NULL(test, kunit_comp); kunit_comp->kunit = test; kunit_comp->expect = -EINVAL; /* expect failure */ kunit_comp->card.dev = test_dev, kunit_comp->card.name = "kunit-card", kunit_comp->card.owner = THIS_MODULE, kunit_comp->card.dai_link = kunit_dai_links, kunit_comp->card.num_links = ARRAY_SIZE(kunit_dai_links), kunit_comp->card.fully_routed = true, /* run test */ ret = snd_soc_register_card(&kunit_comp->card); if (ret != 0 && ret != -EPROBE_DEFER) KUNIT_FAIL(test, "Failed to register card"); ret = snd_soc_component_initialize(&kunit_comp->comp, &test_component_null_comp, test_dev); KUNIT_EXPECT_EQ(test, 0, ret); ret = snd_soc_add_component(&kunit_comp->comp, NULL, 0); KUNIT_EXPECT_EQ(test, 0, ret); /* cleanup */ snd_soc_unregister_card(&kunit_comp->card); snd_soc_unregister_component(test_dev); } // TEST CASE // Test passing NULL ops as parameter to snd_soc_tplg_component_load /* * NULL ops is default case, we pass empty topology (fw), so we don't have * anything to parse and just do nothing, which results in return 0; from * calling soc_tplg_dapm_complete in soc_tplg_process_headers */ static void snd_soc_tplg_test_load_with_null_ops(struct kunit *test) { struct kunit_soc_component *kunit_comp; int ret; /* prepare */ kunit_comp = kunit_kzalloc(test, sizeof(*kunit_comp), GFP_KERNEL); KUNIT_EXPECT_NOT_ERR_OR_NULL(test, kunit_comp); kunit_comp->kunit = test; kunit_comp->expect = 0; /* expect success */ kunit_comp->card.dev = test_dev, kunit_comp->card.name = "kunit-card", kunit_comp->card.owner = THIS_MODULE, kunit_comp->card.dai_link = kunit_dai_links, kunit_comp->card.num_links = ARRAY_SIZE(kunit_dai_links), kunit_comp->card.fully_routed = true, /* run test */ ret = snd_soc_register_card(&kunit_comp->card); if (ret != 0 && ret != -EPROBE_DEFER) KUNIT_FAIL(test, "Failed to register card"); ret = snd_soc_component_initialize(&kunit_comp->comp, &test_component, test_dev); KUNIT_EXPECT_EQ(test, 0, ret); ret = snd_soc_add_component(&kunit_comp->comp, NULL, 0); KUNIT_EXPECT_EQ(test, 0, ret); /* cleanup */ snd_soc_unregister_card(&kunit_comp->card); snd_soc_unregister_component(test_dev); } // TEST CASE // Test passing NULL fw as parameter to snd_soc_tplg_component_load /* * need to override generic probe function with one using NULL pointer to fw * when calling topology load during component initialization, we don't need * .remove handler as load should fail */ static int d_probe_null_fw(struct snd_soc_component *component) { struct kunit_soc_component *kunit_comp = container_of(component, struct kunit_soc_component, comp); int ret; /* instead of passing fw pointer as third argument, pass NULL here */ ret = snd_soc_tplg_component_load(component, NULL, NULL); KUNIT_EXPECT_EQ_MSG(kunit_comp->kunit, kunit_comp->expect, ret, "Failed topology load"); return 0; } static const struct snd_soc_component_driver test_component_null_fw = { .name = "sound-soc-topology-test", .probe = d_probe_null_fw, }; static void snd_soc_tplg_test_load_with_null_fw(struct kunit *test) { struct kunit_soc_component *kunit_comp; int ret; /* prepare */ kunit_comp = kunit_kzalloc(test, sizeof(*kunit_comp), GFP_KERNEL); KUNIT_EXPECT_NOT_ERR_OR_NULL(test, kunit_comp); kunit_comp->kunit = test; kunit_comp->expect = -EINVAL; /* expect failure */ kunit_comp->card.dev = test_dev, kunit_comp->card.name = "kunit-card", kunit_comp->card.owner = THIS_MODULE, kunit_comp->card.dai_link = kunit_dai_links, kunit_comp->card.num_links = ARRAY_SIZE(kunit_dai_links), kunit_comp->card.fully_routed = true, /* run test */ ret = snd_soc_register_card(&kunit_comp->card); if (ret != 0 && ret != -EPROBE_DEFER) KUNIT_FAIL(test, "Failed to register card"); ret = snd_soc_component_initialize(&kunit_comp->comp, &test_component_null_fw, test_dev); KUNIT_EXPECT_EQ(test, 0, ret); ret = snd_soc_add_component(&kunit_comp->comp, NULL, 0); KUNIT_EXPECT_EQ(test, 0, ret); /* cleanup */ snd_soc_unregister_card(&kunit_comp->card); snd_soc_unregister_component(test_dev); } // TEST CASE // Test passing "empty" topology file static void snd_soc_tplg_test_load_empty_tplg(struct kunit *test) { struct kunit_soc_component *kunit_comp; struct tplg_tmpl_001 *data; int size; int ret; /* prepare */ kunit_comp = kunit_kzalloc(test, sizeof(*kunit_comp), GFP_KERNEL); KUNIT_EXPECT_NOT_ERR_OR_NULL(test, kunit_comp); kunit_comp->kunit = test; kunit_comp->expect = 0; /* expect success */ size = sizeof(tplg_tmpl_empty); data = kunit_kzalloc(kunit_comp->kunit, size, GFP_KERNEL); KUNIT_EXPECT_NOT_ERR_OR_NULL(kunit_comp->kunit, data); memcpy(data, &tplg_tmpl_empty, sizeof(tplg_tmpl_empty)); kunit_comp->fw.data = (u8 *)data; kunit_comp->fw.size = size; kunit_comp->card.dev = test_dev, kunit_comp->card.name = "kunit-card", kunit_comp->card.owner = THIS_MODULE, kunit_comp->card.dai_link = kunit_dai_links, kunit_comp->card.num_links = ARRAY_SIZE(kunit_dai_links), kunit_comp->card.fully_routed = true, /* run test */ ret = snd_soc_register_card(&kunit_comp->card); if (ret != 0 && ret != -EPROBE_DEFER) KUNIT_FAIL(test, "Failed to register card"); ret = snd_soc_component_initialize(&kunit_comp->comp, &test_component, test_dev); KUNIT_EXPECT_EQ(test, 0, ret); ret = snd_soc_add_component(&kunit_comp->comp, NULL, 0); KUNIT_EXPECT_EQ(test, 0, ret); /* cleanup */ snd_soc_unregister_card(&kunit_comp->card); snd_soc_unregister_component(test_dev); } // TEST CASE // Test "empty" topology file, but with bad "magic" // In theory we could loop through all possible bad values, but it takes too // long, so just use SND_SOC_TPLG_MAGIC + 1 static void snd_soc_tplg_test_load_empty_tplg_bad_magic(struct kunit *test) { struct kunit_soc_component *kunit_comp; struct tplg_tmpl_001 *data; int size; int ret; /* prepare */ kunit_comp = kunit_kzalloc(test, sizeof(*kunit_comp), GFP_KERNEL); KUNIT_EXPECT_NOT_ERR_OR_NULL(test, kunit_comp); kunit_comp->kunit = test; kunit_comp->expect = -EINVAL; /* expect failure */ size = sizeof(tplg_tmpl_empty); data = kunit_kzalloc(kunit_comp->kunit, size, GFP_KERNEL); KUNIT_EXPECT_NOT_ERR_OR_NULL(kunit_comp->kunit, data); memcpy(data, &tplg_tmpl_empty, sizeof(tplg_tmpl_empty)); /* * override abi * any value != magic number is wrong */ data->header.magic = cpu_to_le32(SND_SOC_TPLG_MAGIC + 1); kunit_comp->fw.data = (u8 *)data; kunit_comp->fw.size = size; kunit_comp->card.dev = test_dev, kunit_comp->card.name = "kunit-card", kunit_comp->card.owner = THIS_MODULE, kunit_comp->card.dai_link = kunit_dai_links, kunit_comp->card.num_links = ARRAY_SIZE(kunit_dai_links), kunit_comp->card.fully_routed = true, /* run test */ ret = snd_soc_register_card(&kunit_comp->card); if (ret != 0 && ret != -EPROBE_DEFER) KUNIT_FAIL(test, "Failed to register card"); ret = snd_soc_component_initialize(&kunit_comp->comp, &test_component, test_dev); KUNIT_EXPECT_EQ(test, 0, ret); ret = snd_soc_add_component(&kunit_comp->comp, NULL, 0); KUNIT_EXPECT_EQ(test, 0, ret); /* cleanup */ snd_soc_unregister_card(&kunit_comp->card); snd_soc_unregister_component(test_dev); } // TEST CASE // Test "empty" topology file, but with bad "abi" // In theory we could loop through all possible bad values, but it takes too // long, so just use SND_SOC_TPLG_ABI_VERSION + 1 static void snd_soc_tplg_test_load_empty_tplg_bad_abi(struct kunit *test) { struct kunit_soc_component *kunit_comp; struct tplg_tmpl_001 *data; int size; int ret; /* prepare */ kunit_comp = kunit_kzalloc(test, sizeof(*kunit_comp), GFP_KERNEL); KUNIT_EXPECT_NOT_ERR_OR_NULL(test, kunit_comp); kunit_comp->kunit = test; kunit_comp->expect = -EINVAL; /* expect failure */ size = sizeof(tplg_tmpl_empty); data = kunit_kzalloc(kunit_comp->kunit, size, GFP_KERNEL); KUNIT_EXPECT_NOT_ERR_OR_NULL(kunit_comp->kunit, data); memcpy(data, &tplg_tmpl_empty, sizeof(tplg_tmpl_empty)); /* * override abi * any value != accepted range is wrong */ data->header.abi = cpu_to_le32(SND_SOC_TPLG_ABI_VERSION + 1); kunit_comp->fw.data = (u8 *)data; kunit_comp->fw.size = size; kunit_comp->card.dev = test_dev, kunit_comp->card.name = "kunit-card", kunit_comp->card.owner = THIS_MODULE, kunit_comp->card.dai_link = kunit_dai_links, kunit_comp->card.num_links = ARRAY_SIZE(kunit_dai_links), kunit_comp->card.fully_routed = true, /* run test */ ret = snd_soc_register_card(&kunit_comp->card); if (ret != 0 && ret != -EPROBE_DEFER) KUNIT_FAIL(test, "Failed to register card"); ret = snd_soc_component_initialize(&kunit_comp->comp, &test_component, test_dev); KUNIT_EXPECT_EQ(test, 0, ret); ret = snd_soc_add_component(&kunit_comp->comp, NULL, 0); KUNIT_EXPECT_EQ(test, 0, ret); /* cleanup */ snd_soc_unregister_card(&kunit_comp->card); snd_soc_unregister_component(test_dev); } // TEST CASE // Test "empty" topology file, but with bad "size" // In theory we could loop through all possible bad values, but it takes too // long, so just use sizeof(struct snd_soc_tplg_hdr) + 1 static void snd_soc_tplg_test_load_empty_tplg_bad_size(struct kunit *test) { struct kunit_soc_component *kunit_comp; struct tplg_tmpl_001 *data; int size; int ret; /* prepare */ kunit_comp = kunit_kzalloc(test, sizeof(*kunit_comp), GFP_KERNEL); KUNIT_EXPECT_NOT_ERR_OR_NULL(test, kunit_comp); kunit_comp->kunit = test; kunit_comp->expect = -EINVAL; /* expect failure */ size = sizeof(tplg_tmpl_empty); data = kunit_kzalloc(kunit_comp->kunit, size, GFP_KERNEL); KUNIT_EXPECT_NOT_ERR_OR_NULL(kunit_comp->kunit, data); memcpy(data, &tplg_tmpl_empty, sizeof(tplg_tmpl_empty)); /* * override size * any value != struct size is wrong */ data->header.size = cpu_to_le32(sizeof(struct snd_soc_tplg_hdr) + 1); kunit_comp->fw.data = (u8 *)data; kunit_comp->fw.size = size; kunit_comp->card.dev = test_dev, kunit_comp->card.name = "kunit-card", kunit_comp->card.owner = THIS_MODULE, kunit_comp->card.dai_link = kunit_dai_links, kunit_comp->card.num_links = ARRAY_SIZE(kunit_dai_links), kunit_comp->card.fully_routed = true, /* run test */ ret = snd_soc_register_card(&kunit_comp->card); if (ret != 0 && ret != -EPROBE_DEFER) KUNIT_FAIL(test, "Failed to register card"); ret = snd_soc_component_initialize(&kunit_comp->comp, &test_component, test_dev); KUNIT_EXPECT_EQ(test, 0, ret); ret = snd_soc_add_component(&kunit_comp->comp, NULL, 0); KUNIT_EXPECT_EQ(test, 0, ret); /* cleanup */ snd_soc_unregister_card(&kunit_comp->card); snd_soc_unregister_component(test_dev); } // TEST CASE // Test "empty" topology file, but with bad "payload_size" // In theory we could loop through all possible bad values, but it takes too // long, so just use the known wrong one static void snd_soc_tplg_test_load_empty_tplg_bad_payload_size(struct kunit *test) { struct kunit_soc_component *kunit_comp; struct tplg_tmpl_001 *data; int size; int ret; /* prepare */ kunit_comp = kunit_kzalloc(test, sizeof(*kunit_comp), GFP_KERNEL); KUNIT_EXPECT_NOT_ERR_OR_NULL(test, kunit_comp); kunit_comp->kunit = test; kunit_comp->expect = -EINVAL; /* expect failure */ size = sizeof(tplg_tmpl_empty); data = kunit_kzalloc(kunit_comp->kunit, size, GFP_KERNEL); KUNIT_EXPECT_NOT_ERR_OR_NULL(kunit_comp->kunit, data); memcpy(data, &tplg_tmpl_empty, sizeof(tplg_tmpl_empty)); /* * override payload size * there is only explicit check for 0, so check with it, other values * are handled by just not reading behind EOF */ data->header.payload_size = 0; kunit_comp->fw.data = (u8 *)data; kunit_comp->fw.size = size; kunit_comp->card.dev = test_dev, kunit_comp->card.name = "kunit-card", kunit_comp->card.owner = THIS_MODULE, kunit_comp->card.dai_link = kunit_dai_links, kunit_comp->card.num_links = ARRAY_SIZE(kunit_dai_links), kunit_comp->card.fully_routed = true, /* run test */ ret = snd_soc_register_card(&kunit_comp->card); if (ret != 0 && ret != -EPROBE_DEFER) KUNIT_FAIL(test, "Failed to register card"); ret = snd_soc_component_initialize(&kunit_comp->comp, &test_component, test_dev); KUNIT_EXPECT_EQ(test, 0, ret); ret = snd_soc_add_component(&kunit_comp->comp, NULL, 0); KUNIT_EXPECT_EQ(test, 0, ret); /* cleanup */ snd_soc_unregister_component(test_dev); snd_soc_unregister_card(&kunit_comp->card); } // TEST CASE // Test passing topology file with PCM definition static void snd_soc_tplg_test_load_pcm_tplg(struct kunit *test) { struct kunit_soc_component *kunit_comp; u8 *data; int size; int ret; /* prepare */ kunit_comp = kunit_kzalloc(test, sizeof(*kunit_comp), GFP_KERNEL); KUNIT_EXPECT_NOT_ERR_OR_NULL(test, kunit_comp); kunit_comp->kunit = test; kunit_comp->expect = 0; /* expect success */ size = sizeof(tplg_tmpl_with_pcm); data = kunit_kzalloc(kunit_comp->kunit, size, GFP_KERNEL); KUNIT_EXPECT_NOT_ERR_OR_NULL(kunit_comp->kunit, data); memcpy(data, &tplg_tmpl_with_pcm, sizeof(tplg_tmpl_with_pcm)); kunit_comp->fw.data = data; kunit_comp->fw.size = size; kunit_comp->card.dev = test_dev, kunit_comp->card.name = "kunit-card", kunit_comp->card.owner = THIS_MODULE, kunit_comp->card.dai_link = kunit_dai_links, kunit_comp->card.num_links = ARRAY_SIZE(kunit_dai_links), kunit_comp->card.fully_routed = true, /* run test */ ret = snd_soc_register_card(&kunit_comp->card); if (ret != 0 && ret != -EPROBE_DEFER) KUNIT_FAIL(test, "Failed to register card"); ret = snd_soc_component_initialize(&kunit_comp->comp, &test_component, test_dev); KUNIT_EXPECT_EQ(test, 0, ret); ret = snd_soc_add_component(&kunit_comp->comp, NULL, 0); KUNIT_EXPECT_EQ(test, 0, ret); snd_soc_unregister_component(test_dev); /* cleanup */ snd_soc_unregister_card(&kunit_comp->card); } // TEST CASE // Test passing topology file with PCM definition // with component reload static void snd_soc_tplg_test_load_pcm_tplg_reload_comp(struct kunit *test) { struct kunit_soc_component *kunit_comp; u8 *data; int size; int ret; int i; /* prepare */ kunit_comp = kunit_kzalloc(test, sizeof(*kunit_comp), GFP_KERNEL); KUNIT_EXPECT_NOT_ERR_OR_NULL(test, kunit_comp); kunit_comp->kunit = test; kunit_comp->expect = 0; /* expect success */ size = sizeof(tplg_tmpl_with_pcm); data = kunit_kzalloc(kunit_comp->kunit, size, GFP_KERNEL); KUNIT_EXPECT_NOT_ERR_OR_NULL(kunit_comp->kunit, data); memcpy(data, &tplg_tmpl_with_pcm, sizeof(tplg_tmpl_with_pcm)); kunit_comp->fw.data = data; kunit_comp->fw.size = size; kunit_comp->card.dev = test_dev, kunit_comp->card.name = "kunit-card", kunit_comp->card.owner = THIS_MODULE, kunit_comp->card.dai_link = kunit_dai_links, kunit_comp->card.num_links = ARRAY_SIZE(kunit_dai_links), kunit_comp->card.fully_routed = true, /* run test */ ret = snd_soc_register_card(&kunit_comp->card); if (ret != 0 && ret != -EPROBE_DEFER) KUNIT_FAIL(test, "Failed to register card"); for (i = 0; i < 100; i++) { ret = snd_soc_component_initialize(&kunit_comp->comp, &test_component, test_dev); KUNIT_EXPECT_EQ(test, 0, ret); ret = snd_soc_add_component(&kunit_comp->comp, NULL, 0); KUNIT_EXPECT_EQ(test, 0, ret); snd_soc_unregister_component(test_dev); } /* cleanup */ snd_soc_unregister_card(&kunit_comp->card); } // TEST CASE // Test passing topology file with PCM definition // with card reload static void snd_soc_tplg_test_load_pcm_tplg_reload_card(struct kunit *test) { struct kunit_soc_component *kunit_comp; u8 *data; int size; int ret; int i; /* prepare */ kunit_comp = kunit_kzalloc(test, sizeof(*kunit_comp), GFP_KERNEL); KUNIT_EXPECT_NOT_ERR_OR_NULL(test, kunit_comp); kunit_comp->kunit = test; kunit_comp->expect = 0; /* expect success */ size = sizeof(tplg_tmpl_with_pcm); data = kunit_kzalloc(kunit_comp->kunit, size, GFP_KERNEL); KUNIT_EXPECT_NOT_ERR_OR_NULL(kunit_comp->kunit, data); memcpy(data, &tplg_tmpl_with_pcm, sizeof(tplg_tmpl_with_pcm)); kunit_comp->fw.data = data; kunit_comp->fw.size = size; kunit_comp->card.dev = test_dev, kunit_comp->card.name = "kunit-card", kunit_comp->card.owner = THIS_MODULE, kunit_comp->card.dai_link = kunit_dai_links, kunit_comp->card.num_links = ARRAY_SIZE(kunit_dai_links), kunit_comp->card.fully_routed = true, /* run test */ ret = snd_soc_component_initialize(&kunit_comp->comp, &test_component, test_dev); KUNIT_EXPECT_EQ(test, 0, ret); ret = snd_soc_add_component(&kunit_comp->comp, NULL, 0); KUNIT_EXPECT_EQ(test, 0, ret); for (i = 0; i < 100; i++) { ret = snd_soc_register_card(&kunit_comp->card); if (ret != 0 && ret != -EPROBE_DEFER) KUNIT_FAIL(test, "Failed to register card"); snd_soc_unregister_card(&kunit_comp->card); } /* cleanup */ snd_soc_unregister_component(test_dev); } /* ===== KUNIT MODULE DEFINITIONS =========================================== */ static struct kunit_case snd_soc_tplg_test_cases[] = { KUNIT_CASE(snd_soc_tplg_test_load_with_null_comp), KUNIT_CASE(snd_soc_tplg_test_load_with_null_ops), KUNIT_CASE(snd_soc_tplg_test_load_with_null_fw), KUNIT_CASE(snd_soc_tplg_test_load_empty_tplg), KUNIT_CASE(snd_soc_tplg_test_load_empty_tplg_bad_magic), KUNIT_CASE(snd_soc_tplg_test_load_empty_tplg_bad_abi), KUNIT_CASE(snd_soc_tplg_test_load_empty_tplg_bad_size), KUNIT_CASE(snd_soc_tplg_test_load_empty_tplg_bad_payload_size), KUNIT_CASE(snd_soc_tplg_test_load_pcm_tplg), KUNIT_CASE(snd_soc_tplg_test_load_pcm_tplg_reload_comp), KUNIT_CASE(snd_soc_tplg_test_load_pcm_tplg_reload_card), {} }; static struct kunit_suite snd_soc_tplg_test_suite = { .name = "snd_soc_tplg_test", .init = snd_soc_tplg_test_init, .exit = snd_soc_tplg_test_exit, .test_cases = snd_soc_tplg_test_cases, }; kunit_test_suites(&snd_soc_tplg_test_suite); MODULE_LICENSE("GPL");
linux-master
sound/soc/soc-topology-test.c
// SPDX-License-Identifier: GPL-2.0 // // soc-apci.c - support for ACPI enumeration. // // Copyright (c) 2013-15, Intel Corporation. #include <linux/export.h> #include <linux/module.h> #include <sound/soc-acpi.h> static bool snd_soc_acpi_id_present(struct snd_soc_acpi_mach *machine) { const struct snd_soc_acpi_codecs *comp_ids = machine->comp_ids; int i; if (machine->id[0]) { if (acpi_dev_present(machine->id, NULL, -1)) return true; } if (comp_ids) { for (i = 0; i < comp_ids->num_codecs; i++) { if (acpi_dev_present(comp_ids->codecs[i], NULL, -1)) { strscpy(machine->id, comp_ids->codecs[i], ACPI_ID_LEN); return true; } } } return false; } struct snd_soc_acpi_mach * snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines) { struct snd_soc_acpi_mach *mach; struct snd_soc_acpi_mach *mach_alt; for (mach = machines; mach->id[0] || mach->comp_ids; mach++) { if (snd_soc_acpi_id_present(mach)) { if (mach->machine_quirk) { mach_alt = mach->machine_quirk(mach); if (!mach_alt) continue; /* not full match, ignore */ mach = mach_alt; } return mach; } } return NULL; } EXPORT_SYMBOL_GPL(snd_soc_acpi_find_machine); static acpi_status snd_soc_acpi_find_package(acpi_handle handle, u32 level, void *context, void **ret) { struct acpi_device *adev = acpi_fetch_acpi_dev(handle); acpi_status status; struct snd_soc_acpi_package_context *pkg_ctx = context; pkg_ctx->data_valid = false; if (adev && adev->status.present && adev->status.functional) { struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *myobj = NULL; status = acpi_evaluate_object_typed(handle, pkg_ctx->name, NULL, &buffer, ACPI_TYPE_PACKAGE); if (ACPI_FAILURE(status)) return AE_OK; myobj = buffer.pointer; if (!myobj || myobj->package.count != pkg_ctx->length) { kfree(buffer.pointer); return AE_OK; } status = acpi_extract_package(myobj, pkg_ctx->format, pkg_ctx->state); if (ACPI_FAILURE(status)) { kfree(buffer.pointer); return AE_OK; } kfree(buffer.pointer); pkg_ctx->data_valid = true; return AE_CTRL_TERMINATE; } return AE_OK; } bool snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN], struct snd_soc_acpi_package_context *ctx) { acpi_status status; status = acpi_get_devices(hid, snd_soc_acpi_find_package, ctx, NULL); if (ACPI_FAILURE(status) || !ctx->data_valid) return false; return true; } EXPORT_SYMBOL_GPL(snd_soc_acpi_find_package_from_hid); struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg) { struct snd_soc_acpi_mach *mach = arg; struct snd_soc_acpi_codecs *codec_list = (struct snd_soc_acpi_codecs *) mach->quirk_data; int i; if (mach->quirk_data == NULL) return mach; for (i = 0; i < codec_list->num_codecs; i++) { if (!acpi_dev_present(codec_list->codecs[i], NULL, -1)) return NULL; } return mach; } EXPORT_SYMBOL_GPL(snd_soc_acpi_codec_list); #define SDW_CODEC_ADR_MASK(_adr) ((_adr) & (SDW_DISCO_LINK_ID_MASK | SDW_VERSION_MASK | \ SDW_MFG_ID_MASK | SDW_PART_ID_MASK)) /* Check if all Slaves defined on the link can be found */ bool snd_soc_acpi_sdw_link_slaves_found(struct device *dev, const struct snd_soc_acpi_link_adr *link, struct sdw_extended_slave_id *ids, int num_slaves) { unsigned int part_id, link_id, unique_id, mfg_id, version; int i, j, k; for (i = 0; i < link->num_adr; i++) { u64 adr = link->adr_d[i].adr; int reported_part_count = 0; mfg_id = SDW_MFG_ID(adr); part_id = SDW_PART_ID(adr); link_id = SDW_DISCO_LINK_ID(adr); version = SDW_VERSION(adr); for (j = 0; j < num_slaves; j++) { /* find out how many identical parts were reported on that link */ if (ids[j].link_id == link_id && ids[j].id.part_id == part_id && ids[j].id.mfg_id == mfg_id && ids[j].id.sdw_version == version) reported_part_count++; } for (j = 0; j < num_slaves; j++) { int expected_part_count = 0; if (ids[j].link_id != link_id || ids[j].id.part_id != part_id || ids[j].id.mfg_id != mfg_id || ids[j].id.sdw_version != version) continue; /* find out how many identical parts are expected */ for (k = 0; k < link->num_adr; k++) { u64 adr2 = link->adr_d[k].adr; if (SDW_CODEC_ADR_MASK(adr2) == SDW_CODEC_ADR_MASK(adr)) expected_part_count++; } if (reported_part_count == expected_part_count) { /* * we have to check unique id * if there is more than one * Slave on the link */ unique_id = SDW_UNIQUE_ID(adr); if (reported_part_count == 1 || ids[j].id.unique_id == unique_id) { dev_dbg(dev, "found part_id %#x at link %d\n", part_id, link_id); break; } } else { dev_dbg(dev, "part_id %#x reported %d expected %d on link %d, skipping\n", part_id, reported_part_count, expected_part_count, link_id); } } if (j == num_slaves) { dev_dbg(dev, "Slave part_id %#x not found\n", part_id); return false; } } return true; } EXPORT_SYMBOL_GPL(snd_soc_acpi_sdw_link_slaves_found); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("ALSA SoC ACPI module");
linux-master
sound/soc/soc-acpi.c
// SPDX-License-Identifier: GPL-2.0-only // Copyright (C) 2022 Cirrus Logic, Inc. and // Cirrus Logic International Semiconductor Ltd. #include <kunit/test.h> #include <linux/module.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <uapi/sound/asound.h> static const struct { u32 rate; snd_pcm_format_t fmt; u8 channels; u8 tdm_width; u8 tdm_slots; u8 slot_multiple; u32 bclk; } tdm_params_to_bclk_cases[] = { /* rate fmt channels tdm_width tdm_slots slot_multiple bclk */ /* From params only */ { 8000, SNDRV_PCM_FORMAT_S16_LE, 1, 0, 0, 0, 128000 }, { 8000, SNDRV_PCM_FORMAT_S16_LE, 2, 0, 0, 0, 256000 }, { 8000, SNDRV_PCM_FORMAT_S24_LE, 1, 0, 0, 0, 192000 }, { 8000, SNDRV_PCM_FORMAT_S24_LE, 2, 0, 0, 0, 384000 }, { 8000, SNDRV_PCM_FORMAT_S32_LE, 1, 0, 0, 0, 256000 }, { 8000, SNDRV_PCM_FORMAT_S32_LE, 2, 0, 0, 0, 512000 }, { 44100, SNDRV_PCM_FORMAT_S16_LE, 1, 0, 0, 0, 705600 }, { 44100, SNDRV_PCM_FORMAT_S16_LE, 2, 0, 0, 0, 1411200 }, { 44100, SNDRV_PCM_FORMAT_S24_LE, 1, 0, 0, 0, 1058400 }, { 44100, SNDRV_PCM_FORMAT_S24_LE, 2, 0, 0, 0, 2116800 }, { 44100, SNDRV_PCM_FORMAT_S32_LE, 1, 0, 0, 0, 1411200 }, { 44100, SNDRV_PCM_FORMAT_S32_LE, 2, 0, 0, 0, 2822400 }, { 384000, SNDRV_PCM_FORMAT_S16_LE, 1, 0, 0, 0, 6144000 }, { 384000, SNDRV_PCM_FORMAT_S16_LE, 2, 0, 0, 0, 12288000 }, { 384000, SNDRV_PCM_FORMAT_S24_LE, 1, 0, 0, 0, 9216000 }, { 384000, SNDRV_PCM_FORMAT_S24_LE, 2, 0, 0, 0, 18432000 }, { 384000, SNDRV_PCM_FORMAT_S32_LE, 1, 0, 0, 0, 12288000 }, { 384000, SNDRV_PCM_FORMAT_S32_LE, 2, 0, 0, 0, 24576000 }, /* I2S from params */ { 8000, SNDRV_PCM_FORMAT_S16_LE, 1, 0, 0, 2, 256000 }, { 8000, SNDRV_PCM_FORMAT_S16_LE, 2, 0, 0, 2, 256000 }, { 8000, SNDRV_PCM_FORMAT_S24_LE, 1, 0, 0, 2, 384000 }, { 8000, SNDRV_PCM_FORMAT_S24_LE, 2, 0, 0, 2, 384000 }, { 8000, SNDRV_PCM_FORMAT_S32_LE, 1, 0, 0, 2, 512000 }, { 8000, SNDRV_PCM_FORMAT_S32_LE, 2, 0, 0, 2, 512000 }, { 44100, SNDRV_PCM_FORMAT_S16_LE, 1, 0, 0, 2, 1411200 }, { 44100, SNDRV_PCM_FORMAT_S16_LE, 2, 0, 0, 2, 1411200 }, { 44100, SNDRV_PCM_FORMAT_S24_LE, 1, 0, 0, 2, 2116800 }, { 44100, SNDRV_PCM_FORMAT_S24_LE, 2, 0, 0, 2, 2116800 }, { 44100, SNDRV_PCM_FORMAT_S32_LE, 1, 0, 0, 2, 2822400 }, { 44100, SNDRV_PCM_FORMAT_S32_LE, 2, 0, 0, 2, 2822400 }, { 384000, SNDRV_PCM_FORMAT_S16_LE, 1, 0, 0, 2, 12288000 }, { 384000, SNDRV_PCM_FORMAT_S16_LE, 2, 0, 0, 2, 12288000 }, { 384000, SNDRV_PCM_FORMAT_S24_LE, 1, 0, 0, 2, 18432000 }, { 384000, SNDRV_PCM_FORMAT_S24_LE, 2, 0, 0, 2, 18432000 }, { 384000, SNDRV_PCM_FORMAT_S32_LE, 1, 0, 0, 2, 24576000 }, { 384000, SNDRV_PCM_FORMAT_S32_LE, 2, 0, 0, 2, 24576000 }, /* Fixed 8-slot TDM, other values from params */ { 8000, SNDRV_PCM_FORMAT_S16_LE, 1, 0, 8, 0, 1024000 }, { 8000, SNDRV_PCM_FORMAT_S16_LE, 2, 0, 8, 0, 1024000 }, { 8000, SNDRV_PCM_FORMAT_S16_LE, 3, 0, 8, 0, 1024000 }, { 8000, SNDRV_PCM_FORMAT_S16_LE, 4, 0, 8, 0, 1024000 }, { 8000, SNDRV_PCM_FORMAT_S32_LE, 1, 0, 8, 0, 2048000 }, { 8000, SNDRV_PCM_FORMAT_S32_LE, 2, 0, 8, 0, 2048000 }, { 8000, SNDRV_PCM_FORMAT_S32_LE, 3, 0, 8, 0, 2048000 }, { 8000, SNDRV_PCM_FORMAT_S32_LE, 4, 0, 8, 0, 2048000 }, { 384000, SNDRV_PCM_FORMAT_S16_LE, 1, 0, 8, 0, 49152000 }, { 384000, SNDRV_PCM_FORMAT_S16_LE, 2, 0, 8, 0, 49152000 }, { 384000, SNDRV_PCM_FORMAT_S16_LE, 3, 0, 8, 0, 49152000 }, { 384000, SNDRV_PCM_FORMAT_S16_LE, 4, 0, 8, 0, 49152000 }, { 384000, SNDRV_PCM_FORMAT_S32_LE, 1, 0, 8, 0, 98304000 }, { 384000, SNDRV_PCM_FORMAT_S32_LE, 2, 0, 8, 0, 98304000 }, { 384000, SNDRV_PCM_FORMAT_S32_LE, 3, 0, 8, 0, 98304000 }, { 384000, SNDRV_PCM_FORMAT_S32_LE, 4, 0, 8, 0, 98304000 }, /* Fixed 32-bit TDM, other values from params */ { 8000, SNDRV_PCM_FORMAT_S16_LE, 1, 32, 0, 0, 256000 }, { 8000, SNDRV_PCM_FORMAT_S16_LE, 2, 32, 0, 0, 512000 }, { 8000, SNDRV_PCM_FORMAT_S16_LE, 3, 32, 0, 0, 768000 }, { 8000, SNDRV_PCM_FORMAT_S16_LE, 4, 32, 0, 0, 1024000 }, { 8000, SNDRV_PCM_FORMAT_S32_LE, 1, 32, 0, 0, 256000 }, { 8000, SNDRV_PCM_FORMAT_S32_LE, 2, 32, 0, 0, 512000 }, { 8000, SNDRV_PCM_FORMAT_S32_LE, 3, 32, 0, 0, 768000 }, { 8000, SNDRV_PCM_FORMAT_S32_LE, 4, 32, 0, 0, 1024000 }, { 384000, SNDRV_PCM_FORMAT_S16_LE, 1, 32, 0, 0, 12288000 }, { 384000, SNDRV_PCM_FORMAT_S16_LE, 2, 32, 0, 0, 24576000 }, { 384000, SNDRV_PCM_FORMAT_S16_LE, 3, 32, 0, 0, 36864000 }, { 384000, SNDRV_PCM_FORMAT_S16_LE, 4, 32, 0, 0, 49152000 }, { 384000, SNDRV_PCM_FORMAT_S32_LE, 1, 32, 0, 0, 12288000 }, { 384000, SNDRV_PCM_FORMAT_S32_LE, 2, 32, 0, 0, 24576000 }, { 384000, SNDRV_PCM_FORMAT_S32_LE, 3, 32, 0, 0, 36864000 }, { 384000, SNDRV_PCM_FORMAT_S32_LE, 4, 32, 0, 0, 49152000 }, /* Fixed 6-slot 24-bit TDM, other values from params */ { 8000, SNDRV_PCM_FORMAT_S16_LE, 1, 24, 6, 0, 1152000 }, { 8000, SNDRV_PCM_FORMAT_S16_LE, 2, 24, 6, 0, 1152000 }, { 8000, SNDRV_PCM_FORMAT_S16_LE, 3, 24, 6, 0, 1152000 }, { 8000, SNDRV_PCM_FORMAT_S16_LE, 4, 24, 6, 0, 1152000 }, { 8000, SNDRV_PCM_FORMAT_S24_LE, 1, 24, 6, 0, 1152000 }, { 8000, SNDRV_PCM_FORMAT_S24_LE, 2, 24, 6, 0, 1152000 }, { 8000, SNDRV_PCM_FORMAT_S24_LE, 3, 24, 6, 0, 1152000 }, { 8000, SNDRV_PCM_FORMAT_S24_LE, 4, 24, 6, 0, 1152000 }, { 192000, SNDRV_PCM_FORMAT_S16_LE, 1, 24, 6, 0, 27648000 }, { 192000, SNDRV_PCM_FORMAT_S16_LE, 2, 24, 6, 0, 27648000 }, { 192000, SNDRV_PCM_FORMAT_S16_LE, 3, 24, 6, 0, 27648000 }, { 192000, SNDRV_PCM_FORMAT_S16_LE, 4, 24, 6, 0, 27648000 }, { 192000, SNDRV_PCM_FORMAT_S24_LE, 1, 24, 6, 0, 27648000 }, { 192000, SNDRV_PCM_FORMAT_S24_LE, 2, 24, 6, 0, 27648000 }, { 192000, SNDRV_PCM_FORMAT_S24_LE, 3, 24, 6, 0, 27648000 }, { 192000, SNDRV_PCM_FORMAT_S24_LE, 4, 24, 6, 0, 27648000 }, }; static void test_tdm_params_to_bclk_one(struct kunit *test, unsigned int rate, snd_pcm_format_t fmt, unsigned int channels, unsigned int tdm_width, unsigned int tdm_slots, unsigned int slot_multiple, unsigned int expected_bclk) { struct snd_pcm_hw_params params; int got_bclk; _snd_pcm_hw_params_any(&params); snd_mask_none(hw_param_mask(&params, SNDRV_PCM_HW_PARAM_FORMAT)); hw_param_interval(&params, SNDRV_PCM_HW_PARAM_RATE)->min = rate; hw_param_interval(&params, SNDRV_PCM_HW_PARAM_RATE)->max = rate; hw_param_interval(&params, SNDRV_PCM_HW_PARAM_CHANNELS)->min = channels; hw_param_interval(&params, SNDRV_PCM_HW_PARAM_CHANNELS)->max = channels; params_set_format(&params, fmt); got_bclk = snd_soc_tdm_params_to_bclk(&params, tdm_width, tdm_slots, slot_multiple); pr_debug("%s: r=%u sb=%u ch=%u tw=%u ts=%u sm=%u expected=%u got=%d\n", __func__, rate, params_width(&params), channels, tdm_width, tdm_slots, slot_multiple, expected_bclk, got_bclk); KUNIT_ASSERT_EQ(test, expected_bclk, (unsigned int)got_bclk); } static void test_tdm_params_to_bclk(struct kunit *test) { int i; for (i = 0; i < ARRAY_SIZE(tdm_params_to_bclk_cases); ++i) { test_tdm_params_to_bclk_one(test, tdm_params_to_bclk_cases[i].rate, tdm_params_to_bclk_cases[i].fmt, tdm_params_to_bclk_cases[i].channels, tdm_params_to_bclk_cases[i].tdm_width, tdm_params_to_bclk_cases[i].tdm_slots, tdm_params_to_bclk_cases[i].slot_multiple, tdm_params_to_bclk_cases[i].bclk); if (tdm_params_to_bclk_cases[i].slot_multiple > 0) continue; /* Slot multiple 1 should have the same effect as multiple 0 */ test_tdm_params_to_bclk_one(test, tdm_params_to_bclk_cases[i].rate, tdm_params_to_bclk_cases[i].fmt, tdm_params_to_bclk_cases[i].channels, tdm_params_to_bclk_cases[i].tdm_width, tdm_params_to_bclk_cases[i].tdm_slots, 1, tdm_params_to_bclk_cases[i].bclk); } } static void test_snd_soc_params_to_bclk_one(struct kunit *test, unsigned int rate, snd_pcm_format_t fmt, unsigned int channels, unsigned int expected_bclk) { struct snd_pcm_hw_params params; int got_bclk; _snd_pcm_hw_params_any(&params); snd_mask_none(hw_param_mask(&params, SNDRV_PCM_HW_PARAM_FORMAT)); hw_param_interval(&params, SNDRV_PCM_HW_PARAM_RATE)->min = rate; hw_param_interval(&params, SNDRV_PCM_HW_PARAM_RATE)->max = rate; hw_param_interval(&params, SNDRV_PCM_HW_PARAM_CHANNELS)->min = channels; hw_param_interval(&params, SNDRV_PCM_HW_PARAM_CHANNELS)->max = channels; params_set_format(&params, fmt); got_bclk = snd_soc_params_to_bclk(&params); pr_debug("%s: r=%u sb=%u ch=%u expected=%u got=%d\n", __func__, rate, params_width(&params), channels, expected_bclk, got_bclk); KUNIT_ASSERT_EQ(test, expected_bclk, (unsigned int)got_bclk); } static void test_snd_soc_params_to_bclk(struct kunit *test) { int i; for (i = 0; i < ARRAY_SIZE(tdm_params_to_bclk_cases); ++i) { /* * snd_soc_params_to_bclk() is all the test cases where * snd_pcm_hw_params values are not overridden. */ if (tdm_params_to_bclk_cases[i].tdm_width | tdm_params_to_bclk_cases[i].tdm_slots | tdm_params_to_bclk_cases[i].slot_multiple) continue; test_snd_soc_params_to_bclk_one(test, tdm_params_to_bclk_cases[i].rate, tdm_params_to_bclk_cases[i].fmt, tdm_params_to_bclk_cases[i].channels, tdm_params_to_bclk_cases[i].bclk); } } static struct kunit_case soc_utils_test_cases[] = { KUNIT_CASE(test_tdm_params_to_bclk), KUNIT_CASE(test_snd_soc_params_to_bclk), {} }; static struct kunit_suite soc_utils_test_suite = { .name = "soc-utils", .test_cases = soc_utils_test_cases, }; kunit_test_suites(&soc_utils_test_suite); MODULE_DESCRIPTION("ASoC soc-utils kunit test"); MODULE_LICENSE("GPL");
linux-master
sound/soc/soc-utils-test.c
// SPDX-License-Identifier: GPL-2.0-only // // Apple SoCs MCA driver // // Copyright (C) The Asahi Linux Contributors // // The MCA peripheral is made up of a number of identical units called clusters. // Each cluster has its separate clock parent, SYNC signal generator, carries // four SERDES units and has a dedicated I2S port on the SoC's periphery. // // The clusters can operate independently, or can be combined together in a // configurable manner. We mostly treat them as self-contained independent // units and don't configure any cross-cluster connections except for the I2S // ports. The I2S ports can be routed to any of the clusters (irrespective // of their native cluster). We map this onto ASoC's (DPCM) notion of backend // and frontend DAIs. The 'cluster guts' are frontends which are dynamically // routed to backend I2S ports. // // DAI references in devicetree are resolved to backends. The routing between // frontends and backends is determined by the machine driver in the DAPM paths // it supplies. #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_clk.h> #include <linux/of_dma.h> #include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/regmap.h> #include <linux/reset.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/dmaengine_pcm.h> #define USE_RXB_FOR_CAPTURE /* Relative to cluster base */ #define REG_STATUS 0x0 #define STATUS_MCLK_EN BIT(0) #define REG_MCLK_CONF 0x4 #define MCLK_CONF_DIV GENMASK(11, 8) #define REG_SYNCGEN_STATUS 0x100 #define SYNCGEN_STATUS_EN BIT(0) #define REG_SYNCGEN_MCLK_SEL 0x104 #define SYNCGEN_MCLK_SEL GENMASK(3, 0) #define REG_SYNCGEN_HI_PERIOD 0x108 #define REG_SYNCGEN_LO_PERIOD 0x10c #define REG_PORT_ENABLES 0x600 #define PORT_ENABLES_CLOCKS GENMASK(2, 1) #define PORT_ENABLES_TX_DATA BIT(3) #define REG_PORT_CLOCK_SEL 0x604 #define PORT_CLOCK_SEL GENMASK(11, 8) #define REG_PORT_DATA_SEL 0x608 #define PORT_DATA_SEL_TXA(cl) (1 << ((cl)*2)) #define PORT_DATA_SEL_TXB(cl) (2 << ((cl)*2)) #define REG_INTSTATE 0x700 #define REG_INTMASK 0x704 /* Bases of serdes units (relative to cluster) */ #define CLUSTER_RXA_OFF 0x200 #define CLUSTER_TXA_OFF 0x300 #define CLUSTER_RXB_OFF 0x400 #define CLUSTER_TXB_OFF 0x500 #define CLUSTER_TX_OFF CLUSTER_TXA_OFF #ifndef USE_RXB_FOR_CAPTURE #define CLUSTER_RX_OFF CLUSTER_RXA_OFF #else #define CLUSTER_RX_OFF CLUSTER_RXB_OFF #endif /* Relative to serdes unit base */ #define REG_SERDES_STATUS 0x00 #define SERDES_STATUS_EN BIT(0) #define SERDES_STATUS_RST BIT(1) #define REG_TX_SERDES_CONF 0x04 #define REG_RX_SERDES_CONF 0x08 #define SERDES_CONF_NCHANS GENMASK(3, 0) #define SERDES_CONF_WIDTH_MASK GENMASK(8, 4) #define SERDES_CONF_WIDTH_16BIT 0x40 #define SERDES_CONF_WIDTH_20BIT 0x80 #define SERDES_CONF_WIDTH_24BIT 0xc0 #define SERDES_CONF_WIDTH_32BIT 0x100 #define SERDES_CONF_BCLK_POL 0x400 #define SERDES_CONF_LSB_FIRST 0x800 #define SERDES_CONF_UNK1 BIT(12) #define SERDES_CONF_UNK2 BIT(13) #define SERDES_CONF_UNK3 BIT(14) #define SERDES_CONF_NO_DATA_FEEDBACK BIT(15) #define SERDES_CONF_SYNC_SEL GENMASK(18, 16) #define REG_TX_SERDES_BITSTART 0x08 #define REG_RX_SERDES_BITSTART 0x0c #define REG_TX_SERDES_SLOTMASK 0x0c #define REG_RX_SERDES_SLOTMASK 0x10 #define REG_RX_SERDES_PORT 0x04 /* Relative to switch base */ #define REG_DMA_ADAPTER_A(cl) (0x8000 * (cl)) #define REG_DMA_ADAPTER_B(cl) (0x8000 * (cl) + 0x4000) #define DMA_ADAPTER_TX_LSB_PAD GENMASK(4, 0) #define DMA_ADAPTER_TX_NCHANS GENMASK(6, 5) #define DMA_ADAPTER_RX_MSB_PAD GENMASK(12, 8) #define DMA_ADAPTER_RX_NCHANS GENMASK(14, 13) #define DMA_ADAPTER_NCHANS GENMASK(22, 20) #define SWITCH_STRIDE 0x8000 #define CLUSTER_STRIDE 0x4000 #define MAX_NCLUSTERS 6 #define APPLE_MCA_FMTBITS (SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_S24_LE | \ SNDRV_PCM_FMTBIT_S32_LE) struct mca_cluster { int no; __iomem void *base; struct mca_data *host; struct device *pd_dev; struct clk *clk_parent; struct dma_chan *dma_chans[SNDRV_PCM_STREAM_LAST + 1]; bool port_started[SNDRV_PCM_STREAM_LAST + 1]; int port_driver; /* The cluster driving this cluster's port */ bool clocks_in_use[SNDRV_PCM_STREAM_LAST + 1]; struct device_link *pd_link; unsigned int bclk_ratio; /* Masks etc. picked up via the set_tdm_slot method */ int tdm_slots; int tdm_slot_width; unsigned int tdm_tx_mask; unsigned int tdm_rx_mask; }; struct mca_data { struct device *dev; __iomem void *switch_base; struct device *pd_dev; struct reset_control *rstc; struct device_link *pd_link; /* Mutex for accessing port_driver of foreign clusters */ struct mutex port_mutex; int nclusters; struct mca_cluster clusters[]; }; static void mca_modify(struct mca_cluster *cl, int regoffset, u32 mask, u32 val) { __iomem void *ptr = cl->base + regoffset; u32 newval; newval = (val & mask) | (readl_relaxed(ptr) & ~mask); writel_relaxed(newval, ptr); } /* * Get the cluster of FE or BE DAI */ static struct mca_cluster *mca_dai_to_cluster(struct snd_soc_dai *dai) { struct mca_data *mca = snd_soc_dai_get_drvdata(dai); /* * FE DAIs are 0 ... nclusters - 1 * BE DAIs are nclusters ... 2*nclusters - 1 */ int cluster_no = dai->id % mca->nclusters; return &mca->clusters[cluster_no]; } /* called before PCM trigger */ static void mca_fe_early_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct mca_cluster *cl = mca_dai_to_cluster(dai); bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; int serdes_unit = is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF; int serdes_conf = serdes_unit + (is_tx ? REG_TX_SERDES_CONF : REG_RX_SERDES_CONF); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL, FIELD_PREP(SERDES_CONF_SYNC_SEL, 0)); mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL, FIELD_PREP(SERDES_CONF_SYNC_SEL, 7)); mca_modify(cl, serdes_unit + REG_SERDES_STATUS, SERDES_STATUS_EN | SERDES_STATUS_RST, SERDES_STATUS_RST); /* * Experiments suggest that it takes at most ~1 us * for the bit to clear, so wait 2 us for good measure. */ udelay(2); WARN_ON(readl_relaxed(cl->base + serdes_unit + REG_SERDES_STATUS) & SERDES_STATUS_RST); mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL, FIELD_PREP(SERDES_CONF_SYNC_SEL, 0)); mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL, FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1)); break; default: break; } } static int mca_fe_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct mca_cluster *cl = mca_dai_to_cluster(dai); bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; int serdes_unit = is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: mca_modify(cl, serdes_unit + REG_SERDES_STATUS, SERDES_STATUS_EN | SERDES_STATUS_RST, SERDES_STATUS_EN); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: mca_modify(cl, serdes_unit + REG_SERDES_STATUS, SERDES_STATUS_EN, 0); break; default: return -EINVAL; } return 0; } static int mca_fe_enable_clocks(struct mca_cluster *cl) { struct mca_data *mca = cl->host; int ret; ret = clk_prepare_enable(cl->clk_parent); if (ret) { dev_err(mca->dev, "cluster %d: unable to enable clock parent: %d\n", cl->no, ret); return ret; } /* * We can't power up the device earlier than this because * the power state driver would error out on seeing the device * as clock-gated. */ cl->pd_link = device_link_add(mca->dev, cl->pd_dev, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); if (!cl->pd_link) { dev_err(mca->dev, "cluster %d: unable to prop-up power domain\n", cl->no); clk_disable_unprepare(cl->clk_parent); return -EINVAL; } writel_relaxed(cl->no + 1, cl->base + REG_SYNCGEN_MCLK_SEL); mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN, SYNCGEN_STATUS_EN); mca_modify(cl, REG_STATUS, STATUS_MCLK_EN, STATUS_MCLK_EN); return 0; } static void mca_fe_disable_clocks(struct mca_cluster *cl) { mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN, 0); mca_modify(cl, REG_STATUS, STATUS_MCLK_EN, 0); device_link_del(cl->pd_link); clk_disable_unprepare(cl->clk_parent); } static bool mca_fe_clocks_in_use(struct mca_cluster *cl) { struct mca_data *mca = cl->host; struct mca_cluster *be_cl; int stream, i; mutex_lock(&mca->port_mutex); for (i = 0; i < mca->nclusters; i++) { be_cl = &mca->clusters[i]; if (be_cl->port_driver != cl->no) continue; for_each_pcm_streams(stream) { if (be_cl->clocks_in_use[stream]) { mutex_unlock(&mca->port_mutex); return true; } } } mutex_unlock(&mca->port_mutex); return false; } static int mca_be_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct mca_cluster *cl = mca_dai_to_cluster(dai); struct mca_data *mca = cl->host; struct mca_cluster *fe_cl; int ret; if (cl->port_driver < 0) return -EINVAL; fe_cl = &mca->clusters[cl->port_driver]; /* * Typically the CODECs we are paired with will require clocks * to be present at time of unmute with the 'mute_stream' op * or at time of DAPM widget power-up. We need to enable clocks * here at the latest (frontend prepare would be too late). */ if (!mca_fe_clocks_in_use(fe_cl)) { ret = mca_fe_enable_clocks(fe_cl); if (ret < 0) return ret; } cl->clocks_in_use[substream->stream] = true; return 0; } static int mca_be_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct mca_cluster *cl = mca_dai_to_cluster(dai); struct mca_data *mca = cl->host; struct mca_cluster *fe_cl; if (cl->port_driver < 0) return -EINVAL; /* * We are operating on a foreign cluster here, but since we * belong to the same PCM, accesses should have been * synchronized at ASoC level. */ fe_cl = &mca->clusters[cl->port_driver]; if (!mca_fe_clocks_in_use(fe_cl)) return 0; /* Nothing to do */ cl->clocks_in_use[substream->stream] = false; if (!mca_fe_clocks_in_use(fe_cl)) mca_fe_disable_clocks(fe_cl); return 0; } static unsigned int mca_crop_mask(unsigned int mask, int nchans) { while (hweight32(mask) > nchans) mask &= ~(1 << __fls(mask)); return mask; } static int mca_configure_serdes(struct mca_cluster *cl, int serdes_unit, unsigned int mask, int slots, int nchans, int slot_width, bool is_tx, int port) { __iomem void *serdes_base = cl->base + serdes_unit; u32 serdes_conf, serdes_conf_mask; serdes_conf_mask = SERDES_CONF_WIDTH_MASK | SERDES_CONF_NCHANS; serdes_conf = FIELD_PREP(SERDES_CONF_NCHANS, max(slots, 1) - 1); switch (slot_width) { case 16: serdes_conf |= SERDES_CONF_WIDTH_16BIT; break; case 20: serdes_conf |= SERDES_CONF_WIDTH_20BIT; break; case 24: serdes_conf |= SERDES_CONF_WIDTH_24BIT; break; case 32: serdes_conf |= SERDES_CONF_WIDTH_32BIT; break; default: goto err; } serdes_conf_mask |= SERDES_CONF_SYNC_SEL; serdes_conf |= FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1); if (is_tx) { serdes_conf_mask |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 | SERDES_CONF_UNK3; serdes_conf |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 | SERDES_CONF_UNK3; } else { serdes_conf_mask |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 | SERDES_CONF_UNK3 | SERDES_CONF_NO_DATA_FEEDBACK; serdes_conf |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 | SERDES_CONF_NO_DATA_FEEDBACK; } mca_modify(cl, serdes_unit + (is_tx ? REG_TX_SERDES_CONF : REG_RX_SERDES_CONF), serdes_conf_mask, serdes_conf); if (is_tx) { writel_relaxed(0xffffffff, serdes_base + REG_TX_SERDES_SLOTMASK); writel_relaxed(~((u32)mca_crop_mask(mask, nchans)), serdes_base + REG_TX_SERDES_SLOTMASK + 0x4); writel_relaxed(0xffffffff, serdes_base + REG_TX_SERDES_SLOTMASK + 0x8); writel_relaxed(~((u32)mask), serdes_base + REG_TX_SERDES_SLOTMASK + 0xc); } else { writel_relaxed(0xffffffff, serdes_base + REG_RX_SERDES_SLOTMASK); writel_relaxed(~((u32)mca_crop_mask(mask, nchans)), serdes_base + REG_RX_SERDES_SLOTMASK + 0x4); writel_relaxed(1 << port, serdes_base + REG_RX_SERDES_PORT); } return 0; err: dev_err(cl->host->dev, "unsupported SERDES configuration requested (mask=0x%x slots=%d slot_width=%d)\n", mask, slots, slot_width); return -EINVAL; } static int mca_fe_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) { struct mca_cluster *cl = mca_dai_to_cluster(dai); cl->tdm_slots = slots; cl->tdm_slot_width = slot_width; cl->tdm_tx_mask = tx_mask; cl->tdm_rx_mask = rx_mask; return 0; } static int mca_fe_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct mca_cluster *cl = mca_dai_to_cluster(dai); struct mca_data *mca = cl->host; bool fpol_inv = false; u32 serdes_conf = 0; u32 bitstart; if ((fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) != SND_SOC_DAIFMT_BP_FP) goto err; switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: fpol_inv = 0; bitstart = 1; break; case SND_SOC_DAIFMT_LEFT_J: fpol_inv = 1; bitstart = 0; break; default: goto err; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_IF: case SND_SOC_DAIFMT_IB_IF: fpol_inv ^= 1; break; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: case SND_SOC_DAIFMT_NB_IF: serdes_conf |= SERDES_CONF_BCLK_POL; break; } if (!fpol_inv) goto err; mca_modify(cl, CLUSTER_TX_OFF + REG_TX_SERDES_CONF, SERDES_CONF_BCLK_POL, serdes_conf); mca_modify(cl, CLUSTER_RX_OFF + REG_RX_SERDES_CONF, SERDES_CONF_BCLK_POL, serdes_conf); writel_relaxed(bitstart, cl->base + CLUSTER_TX_OFF + REG_TX_SERDES_BITSTART); writel_relaxed(bitstart, cl->base + CLUSTER_RX_OFF + REG_RX_SERDES_BITSTART); return 0; err: dev_err(mca->dev, "unsupported DAI format (0x%x) requested\n", fmt); return -EINVAL; } static int mca_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio) { struct mca_cluster *cl = mca_dai_to_cluster(dai); cl->bclk_ratio = ratio; return 0; } static int mca_fe_get_port(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream); struct snd_soc_pcm_runtime *be; struct snd_soc_dpcm *dpcm; be = NULL; for_each_dpcm_be(fe, substream->stream, dpcm) { be = dpcm->be; break; } if (!be) return -EINVAL; return mca_dai_to_cluster(asoc_rtd_to_cpu(be, 0))->no; } static int mca_fe_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct mca_cluster *cl = mca_dai_to_cluster(dai); struct mca_data *mca = cl->host; struct device *dev = mca->dev; unsigned int samp_rate = params_rate(params); bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; bool refine_tdm = false; unsigned long bclk_ratio; unsigned int tdm_slots, tdm_slot_width, tdm_mask; u32 regval, pad; int ret, port, nchans_ceiled; if (!cl->tdm_slot_width) { /* * We were not given TDM settings from above, set initial * guesses which will later be refined. */ tdm_slot_width = params_width(params); tdm_slots = params_channels(params); refine_tdm = true; } else { tdm_slot_width = cl->tdm_slot_width; tdm_slots = cl->tdm_slots; tdm_mask = is_tx ? cl->tdm_tx_mask : cl->tdm_rx_mask; } if (cl->bclk_ratio) bclk_ratio = cl->bclk_ratio; else bclk_ratio = tdm_slot_width * tdm_slots; if (refine_tdm) { int nchannels = params_channels(params); if (nchannels > 2) { dev_err(dev, "missing TDM for stream with two or more channels\n"); return -EINVAL; } if ((bclk_ratio % nchannels) != 0) { dev_err(dev, "BCLK ratio (%ld) not divisible by no. of channels (%d)\n", bclk_ratio, nchannels); return -EINVAL; } tdm_slot_width = bclk_ratio / nchannels; if (tdm_slot_width > 32 && nchannels == 1) tdm_slot_width = 32; if (tdm_slot_width < params_width(params)) { dev_err(dev, "TDM slots too narrow (tdm=%d params=%d)\n", tdm_slot_width, params_width(params)); return -EINVAL; } tdm_mask = (1 << tdm_slots) - 1; } port = mca_fe_get_port(substream); if (port < 0) return port; ret = mca_configure_serdes(cl, is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF, tdm_mask, tdm_slots, params_channels(params), tdm_slot_width, is_tx, port); if (ret) return ret; pad = 32 - params_width(params); /* * TODO: Here the register semantics aren't clear. */ nchans_ceiled = min_t(int, params_channels(params), 4); regval = FIELD_PREP(DMA_ADAPTER_NCHANS, nchans_ceiled) | FIELD_PREP(DMA_ADAPTER_TX_NCHANS, 0x2) | FIELD_PREP(DMA_ADAPTER_RX_NCHANS, 0x2) | FIELD_PREP(DMA_ADAPTER_TX_LSB_PAD, pad) | FIELD_PREP(DMA_ADAPTER_RX_MSB_PAD, pad); #ifndef USE_RXB_FOR_CAPTURE writel_relaxed(regval, mca->switch_base + REG_DMA_ADAPTER_A(cl->no)); #else if (is_tx) writel_relaxed(regval, mca->switch_base + REG_DMA_ADAPTER_A(cl->no)); else writel_relaxed(regval, mca->switch_base + REG_DMA_ADAPTER_B(cl->no)); #endif if (!mca_fe_clocks_in_use(cl)) { /* * Set up FSYNC duty cycle as even as possible. */ writel_relaxed((bclk_ratio / 2) - 1, cl->base + REG_SYNCGEN_HI_PERIOD); writel_relaxed(((bclk_ratio + 1) / 2) - 1, cl->base + REG_SYNCGEN_LO_PERIOD); writel_relaxed(FIELD_PREP(MCLK_CONF_DIV, 0x1), cl->base + REG_MCLK_CONF); ret = clk_set_rate(cl->clk_parent, bclk_ratio * samp_rate); if (ret) { dev_err(mca->dev, "cluster %d: unable to set clock parent: %d\n", cl->no, ret); return ret; } } return 0; } static const struct snd_soc_dai_ops mca_fe_ops = { .set_fmt = mca_fe_set_fmt, .set_bclk_ratio = mca_set_bclk_ratio, .set_tdm_slot = mca_fe_set_tdm_slot, .hw_params = mca_fe_hw_params, .trigger = mca_fe_trigger, }; static bool mca_be_started(struct mca_cluster *cl) { int stream; for_each_pcm_streams(stream) if (cl->port_started[stream]) return true; return false; } static int mca_be_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *be = asoc_substream_to_rtd(substream); struct snd_soc_pcm_runtime *fe; struct mca_cluster *cl = mca_dai_to_cluster(dai); struct mca_cluster *fe_cl; struct mca_data *mca = cl->host; struct snd_soc_dpcm *dpcm; fe = NULL; for_each_dpcm_fe(be, substream->stream, dpcm) { if (fe && dpcm->fe != fe) { dev_err(mca->dev, "many FE per one BE unsupported\n"); return -EINVAL; } fe = dpcm->fe; } if (!fe) return -EINVAL; fe_cl = mca_dai_to_cluster(asoc_rtd_to_cpu(fe, 0)); if (mca_be_started(cl)) { /* * Port is already started in the other direction. * Make sure there isn't a conflict with another cluster * driving the port. */ if (cl->port_driver != fe_cl->no) return -EINVAL; cl->port_started[substream->stream] = true; return 0; } writel_relaxed(PORT_ENABLES_CLOCKS | PORT_ENABLES_TX_DATA, cl->base + REG_PORT_ENABLES); writel_relaxed(FIELD_PREP(PORT_CLOCK_SEL, fe_cl->no + 1), cl->base + REG_PORT_CLOCK_SEL); writel_relaxed(PORT_DATA_SEL_TXA(fe_cl->no), cl->base + REG_PORT_DATA_SEL); mutex_lock(&mca->port_mutex); cl->port_driver = fe_cl->no; mutex_unlock(&mca->port_mutex); cl->port_started[substream->stream] = true; return 0; } static void mca_be_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct mca_cluster *cl = mca_dai_to_cluster(dai); struct mca_data *mca = cl->host; cl->port_started[substream->stream] = false; if (!mca_be_started(cl)) { /* * Were we the last direction to shutdown? * Turn off the lights. */ writel_relaxed(0, cl->base + REG_PORT_ENABLES); writel_relaxed(0, cl->base + REG_PORT_DATA_SEL); mutex_lock(&mca->port_mutex); cl->port_driver = -1; mutex_unlock(&mca->port_mutex); } } static const struct snd_soc_dai_ops mca_be_ops = { .prepare = mca_be_prepare, .hw_free = mca_be_hw_free, .startup = mca_be_startup, .shutdown = mca_be_shutdown, }; static int mca_set_runtime_hwparams(struct snd_soc_component *component, struct snd_pcm_substream *substream, struct dma_chan *chan) { struct device *dma_dev = chan->device->dev; struct snd_dmaengine_dai_dma_data dma_data = {}; int ret; struct snd_pcm_hardware hw; memset(&hw, 0, sizeof(hw)); hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED; hw.periods_min = 2; hw.periods_max = UINT_MAX; hw.period_bytes_min = 256; hw.period_bytes_max = dma_get_max_seg_size(dma_dev); hw.buffer_bytes_max = SIZE_MAX; hw.fifo_size = 16; ret = snd_dmaengine_pcm_refine_runtime_hwparams(substream, &dma_data, &hw, chan); if (ret) return ret; return snd_soc_set_runtime_hwparams(substream, &hw); } static int mca_pcm_open(struct snd_soc_component *component, struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct mca_cluster *cl = mca_dai_to_cluster(asoc_rtd_to_cpu(rtd, 0)); struct dma_chan *chan = cl->dma_chans[substream->stream]; int ret; if (rtd->dai_link->no_pcm) return 0; ret = mca_set_runtime_hwparams(component, substream, chan); if (ret) return ret; return snd_dmaengine_pcm_open(substream, chan); } static int mca_hw_params(struct snd_soc_component *component, struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream); struct dma_slave_config slave_config; int ret; if (rtd->dai_link->no_pcm) return 0; memset(&slave_config, 0, sizeof(slave_config)); ret = snd_hwparams_to_dma_slave_config(substream, params, &slave_config); if (ret < 0) return ret; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) slave_config.dst_port_window_size = min_t(u32, params_channels(params), 4); else slave_config.src_port_window_size = min_t(u32, params_channels(params), 4); return dmaengine_slave_config(chan, &slave_config); } static int mca_close(struct snd_soc_component *component, struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); if (rtd->dai_link->no_pcm) return 0; return snd_dmaengine_pcm_close(substream); } static int mca_trigger(struct snd_soc_component *component, struct snd_pcm_substream *substream, int cmd) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); if (rtd->dai_link->no_pcm) return 0; /* * Before we do the PCM trigger proper, insert an opportunity * to reset the frontend's SERDES. */ mca_fe_early_trigger(substream, cmd, asoc_rtd_to_cpu(rtd, 0)); return snd_dmaengine_pcm_trigger(substream, cmd); } static snd_pcm_uframes_t mca_pointer(struct snd_soc_component *component, struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); if (rtd->dai_link->no_pcm) return -ENOTSUPP; return snd_dmaengine_pcm_pointer(substream); } static struct dma_chan *mca_request_dma_channel(struct mca_cluster *cl, unsigned int stream) { bool is_tx = (stream == SNDRV_PCM_STREAM_PLAYBACK); #ifndef USE_RXB_FOR_CAPTURE char *name = devm_kasprintf(cl->host->dev, GFP_KERNEL, is_tx ? "tx%da" : "rx%da", cl->no); #else char *name = devm_kasprintf(cl->host->dev, GFP_KERNEL, is_tx ? "tx%da" : "rx%db", cl->no); #endif return of_dma_request_slave_channel(cl->host->dev->of_node, name); } static void mca_pcm_free(struct snd_soc_component *component, struct snd_pcm *pcm) { struct snd_soc_pcm_runtime *rtd = snd_pcm_chip(pcm); struct mca_cluster *cl = mca_dai_to_cluster(asoc_rtd_to_cpu(rtd, 0)); unsigned int i; if (rtd->dai_link->no_pcm) return; for_each_pcm_streams(i) { struct snd_pcm_substream *substream = rtd->pcm->streams[i].substream; if (!substream || !cl->dma_chans[i]) continue; dma_release_channel(cl->dma_chans[i]); cl->dma_chans[i] = NULL; } } static int mca_pcm_new(struct snd_soc_component *component, struct snd_soc_pcm_runtime *rtd) { struct mca_cluster *cl = mca_dai_to_cluster(asoc_rtd_to_cpu(rtd, 0)); unsigned int i; if (rtd->dai_link->no_pcm) return 0; for_each_pcm_streams(i) { struct snd_pcm_substream *substream = rtd->pcm->streams[i].substream; struct dma_chan *chan; if (!substream) continue; chan = mca_request_dma_channel(cl, i); if (IS_ERR_OR_NULL(chan)) { mca_pcm_free(component, rtd->pcm); if (chan && PTR_ERR(chan) == -EPROBE_DEFER) return PTR_ERR(chan); dev_err(component->dev, "unable to obtain DMA channel (stream %d cluster %d): %pe\n", i, cl->no, chan); if (!chan) return -EINVAL; return PTR_ERR(chan); } cl->dma_chans[i] = chan; snd_pcm_set_managed_buffer(substream, SNDRV_DMA_TYPE_DEV_IRAM, chan->device->dev, 512 * 1024 * 6, SIZE_MAX); } return 0; } static const struct snd_soc_component_driver mca_component = { .name = "apple-mca", .open = mca_pcm_open, .close = mca_close, .hw_params = mca_hw_params, .trigger = mca_trigger, .pointer = mca_pointer, .pcm_construct = mca_pcm_new, .pcm_destruct = mca_pcm_free, }; static void apple_mca_release(struct mca_data *mca) { int i; for (i = 0; i < mca->nclusters; i++) { struct mca_cluster *cl = &mca->clusters[i]; if (!IS_ERR_OR_NULL(cl->clk_parent)) clk_put(cl->clk_parent); if (!IS_ERR_OR_NULL(cl->pd_dev)) dev_pm_domain_detach(cl->pd_dev, true); } if (mca->pd_link) device_link_del(mca->pd_link); if (!IS_ERR_OR_NULL(mca->pd_dev)) dev_pm_domain_detach(mca->pd_dev, true); reset_control_rearm(mca->rstc); } static int apple_mca_probe(struct platform_device *pdev) { struct mca_data *mca; struct mca_cluster *clusters; struct snd_soc_dai_driver *dai_drivers; struct resource *res; void __iomem *base; int nclusters; int ret, i; base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(base)) return PTR_ERR(base); if (resource_size(res) < CLUSTER_STRIDE) return -EINVAL; nclusters = (resource_size(res) - CLUSTER_STRIDE) / CLUSTER_STRIDE + 1; mca = devm_kzalloc(&pdev->dev, struct_size(mca, clusters, nclusters), GFP_KERNEL); if (!mca) return -ENOMEM; mca->dev = &pdev->dev; mca->nclusters = nclusters; mutex_init(&mca->port_mutex); platform_set_drvdata(pdev, mca); clusters = mca->clusters; mca->switch_base = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(mca->switch_base)) return PTR_ERR(mca->switch_base); mca->rstc = devm_reset_control_get_optional_shared(&pdev->dev, NULL); if (IS_ERR(mca->rstc)) return PTR_ERR(mca->rstc); dai_drivers = devm_kzalloc( &pdev->dev, sizeof(*dai_drivers) * 2 * nclusters, GFP_KERNEL); if (!dai_drivers) return -ENOMEM; mca->pd_dev = dev_pm_domain_attach_by_id(&pdev->dev, 0); if (IS_ERR(mca->pd_dev)) return -EINVAL; mca->pd_link = device_link_add(&pdev->dev, mca->pd_dev, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); if (!mca->pd_link) { ret = -EINVAL; /* Prevent an unbalanced reset rearm */ mca->rstc = NULL; goto err_release; } reset_control_reset(mca->rstc); for (i = 0; i < nclusters; i++) { struct mca_cluster *cl = &clusters[i]; struct snd_soc_dai_driver *fe = &dai_drivers[mca->nclusters + i]; struct snd_soc_dai_driver *be = &dai_drivers[i]; cl->host = mca; cl->no = i; cl->base = base + CLUSTER_STRIDE * i; cl->port_driver = -1; cl->clk_parent = of_clk_get(pdev->dev.of_node, i); if (IS_ERR(cl->clk_parent)) { dev_err(&pdev->dev, "unable to obtain clock %d: %ld\n", i, PTR_ERR(cl->clk_parent)); ret = PTR_ERR(cl->clk_parent); goto err_release; } cl->pd_dev = dev_pm_domain_attach_by_id(&pdev->dev, i + 1); if (IS_ERR(cl->pd_dev)) { dev_err(&pdev->dev, "unable to obtain cluster %d PD: %ld\n", i, PTR_ERR(cl->pd_dev)); ret = PTR_ERR(cl->pd_dev); goto err_release; } fe->id = i; fe->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "mca-pcm-%d", i); if (!fe->name) { ret = -ENOMEM; goto err_release; } fe->ops = &mca_fe_ops; fe->playback.channels_min = 1; fe->playback.channels_max = 32; fe->playback.rates = SNDRV_PCM_RATE_8000_192000; fe->playback.formats = APPLE_MCA_FMTBITS; fe->capture.channels_min = 1; fe->capture.channels_max = 32; fe->capture.rates = SNDRV_PCM_RATE_8000_192000; fe->capture.formats = APPLE_MCA_FMTBITS; fe->symmetric_rate = 1; fe->playback.stream_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "PCM%d TX", i); fe->capture.stream_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "PCM%d RX", i); if (!fe->playback.stream_name || !fe->capture.stream_name) { ret = -ENOMEM; goto err_release; } be->id = i + nclusters; be->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "mca-i2s-%d", i); if (!be->name) { ret = -ENOMEM; goto err_release; } be->ops = &mca_be_ops; be->playback.channels_min = 1; be->playback.channels_max = 32; be->playback.rates = SNDRV_PCM_RATE_8000_192000; be->playback.formats = APPLE_MCA_FMTBITS; be->capture.channels_min = 1; be->capture.channels_max = 32; be->capture.rates = SNDRV_PCM_RATE_8000_192000; be->capture.formats = APPLE_MCA_FMTBITS; be->playback.stream_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "I2S%d TX", i); be->capture.stream_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "I2S%d RX", i); if (!be->playback.stream_name || !be->capture.stream_name) { ret = -ENOMEM; goto err_release; } } ret = snd_soc_register_component(&pdev->dev, &mca_component, dai_drivers, nclusters * 2); if (ret) { dev_err(&pdev->dev, "unable to register ASoC component: %d\n", ret); goto err_release; } return 0; err_release: apple_mca_release(mca); return ret; } static void apple_mca_remove(struct platform_device *pdev) { struct mca_data *mca = platform_get_drvdata(pdev); snd_soc_unregister_component(&pdev->dev); apple_mca_release(mca); } static const struct of_device_id apple_mca_of_match[] = { { .compatible = "apple,mca", }, {} }; MODULE_DEVICE_TABLE(of, apple_mca_of_match); static struct platform_driver apple_mca_driver = { .driver = { .name = "apple-mca", .of_match_table = apple_mca_of_match, }, .probe = apple_mca_probe, .remove_new = apple_mca_remove, }; module_platform_driver(apple_mca_driver); MODULE_AUTHOR("Martin Povišer <[email protected]>"); MODULE_DESCRIPTION("ASoC Apple MCA driver"); MODULE_LICENSE("GPL");
linux-master
sound/soc/apple/mca.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/firmware.h> #include <linux/module.h> #include <linux/slab.h> #include <sound/hdaudio.h> #include <sound/hdaudio_ext.h> #include "avs.h" #include "cldma.h" #include "messages.h" #include "registers.h" #include "topology.h" #define AVS_ROM_STS_MASK 0xFF #define AVS_ROM_INIT_DONE 0x1 #define SKL_ROM_BASEFW_ENTERED 0xF #define APL_ROM_FW_ENTERED 0x5 #define AVS_ROM_INIT_POLLING_US 5 #define SKL_ROM_INIT_TIMEOUT_US 1000000 #define APL_ROM_INIT_TIMEOUT_US 300000 #define APL_ROM_INIT_RETRIES 3 #define AVS_FW_INIT_POLLING_US 500 #define AVS_FW_INIT_TIMEOUT_MS 3000 #define AVS_FW_INIT_TIMEOUT_US (AVS_FW_INIT_TIMEOUT_MS * 1000) #define AVS_CLDMA_START_DELAY_MS 100 #define AVS_ROOT_DIR "intel/avs" #define AVS_BASEFW_FILENAME "dsp_basefw.bin" #define AVS_EXT_MANIFEST_MAGIC 0x31454124 #define SKL_MANIFEST_MAGIC 0x00000006 #define SKL_ADSPFW_OFFSET 0x284 #define APL_MANIFEST_MAGIC 0x44504324 #define APL_ADSPFW_OFFSET 0x2000 /* Occasionally, engineering (release candidate) firmware is provided for testing. */ static bool debug_ignore_fw_version; module_param_named(ignore_fw_version, debug_ignore_fw_version, bool, 0444); MODULE_PARM_DESC(ignore_fw_version, "Ignore firmware version check 0=no (default), 1=yes"); #define AVS_LIB_NAME_SIZE 8 struct avs_fw_manifest { u32 id; u32 len; char name[AVS_LIB_NAME_SIZE]; u32 preload_page_count; u32 img_flags; u32 feature_mask; struct avs_fw_version version; } __packed; struct avs_fw_ext_manifest { u32 id; u32 len; u16 version_major; u16 version_minor; u32 entries; } __packed; static int avs_fw_ext_manifest_strip(struct firmware *fw) { struct avs_fw_ext_manifest *man; if (fw->size < sizeof(*man)) return -EINVAL; man = (struct avs_fw_ext_manifest *)fw->data; if (man->id == AVS_EXT_MANIFEST_MAGIC) { fw->data += man->len; fw->size -= man->len; } return 0; } static int avs_fw_manifest_offset(struct firmware *fw) { /* Header type found in first DWORD of fw binary. */ u32 magic = *(u32 *)fw->data; switch (magic) { case SKL_MANIFEST_MAGIC: return SKL_ADSPFW_OFFSET; case APL_MANIFEST_MAGIC: return APL_ADSPFW_OFFSET; default: return -EINVAL; } } static int avs_fw_manifest_strip_verify(struct avs_dev *adev, struct firmware *fw, const struct avs_fw_version *min) { struct avs_fw_manifest *man; int offset, ret; ret = avs_fw_ext_manifest_strip(fw); if (ret) return ret; offset = avs_fw_manifest_offset(fw); if (offset < 0) return offset; if (fw->size < offset + sizeof(*man)) return -EINVAL; if (!min) return 0; man = (struct avs_fw_manifest *)(fw->data + offset); if (man->version.major != min->major || man->version.minor != min->minor || man->version.hotfix != min->hotfix || man->version.build < min->build) { dev_warn(adev->dev, "bad FW version %d.%d.%d.%d, expected %d.%d.%d.%d or newer\n", man->version.major, man->version.minor, man->version.hotfix, man->version.build, min->major, min->minor, min->hotfix, min->build); if (!debug_ignore_fw_version) return -EINVAL; } return 0; } int avs_cldma_load_basefw(struct avs_dev *adev, struct firmware *fw) { struct hda_cldma *cl = &code_loader; unsigned int reg; int ret; ret = avs_dsp_op(adev, power, AVS_MAIN_CORE_MASK, true); if (ret < 0) return ret; ret = avs_dsp_op(adev, reset, AVS_MAIN_CORE_MASK, false); if (ret < 0) return ret; ret = hda_cldma_reset(cl); if (ret < 0) { dev_err(adev->dev, "cldma reset failed: %d\n", ret); return ret; } hda_cldma_setup(cl); ret = avs_dsp_op(adev, stall, AVS_MAIN_CORE_MASK, false); if (ret < 0) return ret; reinit_completion(&adev->fw_ready); avs_dsp_op(adev, int_control, true); /* await ROM init */ ret = snd_hdac_adsp_readl_poll(adev, AVS_FW_REG_STATUS(adev), reg, (reg & AVS_ROM_INIT_DONE) == AVS_ROM_INIT_DONE, AVS_ROM_INIT_POLLING_US, SKL_ROM_INIT_TIMEOUT_US); if (ret < 0) { dev_err(adev->dev, "rom init timeout: %d\n", ret); avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK); return ret; } hda_cldma_set_data(cl, (void *)fw->data, fw->size); /* transfer firmware */ hda_cldma_transfer(cl, 0); ret = snd_hdac_adsp_readl_poll(adev, AVS_FW_REG_STATUS(adev), reg, (reg & AVS_ROM_STS_MASK) == SKL_ROM_BASEFW_ENTERED, AVS_FW_INIT_POLLING_US, AVS_FW_INIT_TIMEOUT_US); hda_cldma_stop(cl); if (ret < 0) { dev_err(adev->dev, "transfer fw failed: %d\n", ret); avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK); return ret; } return 0; } int avs_cldma_load_library(struct avs_dev *adev, struct firmware *lib, u32 id) { struct hda_cldma *cl = &code_loader; int ret; hda_cldma_set_data(cl, (void *)lib->data, lib->size); /* transfer modules manifest */ hda_cldma_transfer(cl, msecs_to_jiffies(AVS_CLDMA_START_DELAY_MS)); /* DMA id ignored as there is only ever one code-loader DMA */ ret = avs_ipc_load_library(adev, 0, id); hda_cldma_stop(cl); if (ret) { ret = AVS_IPC_RET(ret); dev_err(adev->dev, "transfer lib %d failed: %d\n", id, ret); } return ret; } static int avs_cldma_load_module(struct avs_dev *adev, struct avs_module_entry *mentry) { struct hda_cldma *cl = &code_loader; const struct firmware *mod; char *mod_name; int ret; mod_name = kasprintf(GFP_KERNEL, "%s/%s/dsp_mod_%pUL.bin", AVS_ROOT_DIR, adev->spec->name, mentry->uuid.b); if (!mod_name) return -ENOMEM; ret = avs_request_firmware(adev, &mod, mod_name); kfree(mod_name); if (ret < 0) return ret; avs_hda_power_gating_enable(adev, false); avs_hda_clock_gating_enable(adev, false); avs_hda_l1sen_enable(adev, false); hda_cldma_set_data(cl, (void *)mod->data, mod->size); hda_cldma_transfer(cl, msecs_to_jiffies(AVS_CLDMA_START_DELAY_MS)); ret = avs_ipc_load_modules(adev, &mentry->module_id, 1); hda_cldma_stop(cl); avs_hda_l1sen_enable(adev, true); avs_hda_clock_gating_enable(adev, true); avs_hda_power_gating_enable(adev, true); if (ret) { dev_err(adev->dev, "load module %d failed: %d\n", mentry->module_id, ret); avs_release_last_firmware(adev); return AVS_IPC_RET(ret); } return 0; } int avs_cldma_transfer_modules(struct avs_dev *adev, bool load, struct avs_module_entry *mods, u32 num_mods) { u16 *mod_ids; int ret, i; /* Either load to DSP or unload them to free space. */ if (load) { for (i = 0; i < num_mods; i++) { ret = avs_cldma_load_module(adev, &mods[i]); if (ret) return ret; } return 0; } mod_ids = kcalloc(num_mods, sizeof(u16), GFP_KERNEL); if (!mod_ids) return -ENOMEM; for (i = 0; i < num_mods; i++) mod_ids[i] = mods[i].module_id; ret = avs_ipc_unload_modules(adev, mod_ids, num_mods); kfree(mod_ids); if (ret) return AVS_IPC_RET(ret); return 0; } static int avs_hda_init_rom(struct avs_dev *adev, unsigned int dma_id, bool purge) { const struct avs_spec *const spec = adev->spec; unsigned int corex_mask, reg; int ret; corex_mask = spec->core_init_mask & ~AVS_MAIN_CORE_MASK; ret = avs_dsp_op(adev, power, spec->core_init_mask, true); if (ret < 0) goto err; ret = avs_dsp_op(adev, reset, AVS_MAIN_CORE_MASK, false); if (ret < 0) goto err; reinit_completion(&adev->fw_ready); avs_dsp_op(adev, int_control, true); /* set boot config */ ret = avs_ipc_set_boot_config(adev, dma_id, purge); if (ret) { ret = AVS_IPC_RET(ret); goto err; } /* await ROM init */ ret = snd_hdac_adsp_readq_poll(adev, spec->rom_status, reg, (reg & 0xF) == AVS_ROM_INIT_DONE || (reg & 0xF) == APL_ROM_FW_ENTERED, AVS_ROM_INIT_POLLING_US, APL_ROM_INIT_TIMEOUT_US); if (ret < 0) { dev_err(adev->dev, "rom init timeout: %d\n", ret); goto err; } /* power down non-main cores */ if (corex_mask) { ret = avs_dsp_op(adev, power, corex_mask, false); if (ret < 0) goto err; } return 0; err: avs_dsp_core_disable(adev, spec->core_init_mask); return ret; } static int avs_imr_load_basefw(struct avs_dev *adev) { int ret; /* DMA id ignored when flashing from IMR as no transfer occurs. */ ret = avs_hda_init_rom(adev, 0, false); if (ret < 0) { dev_err(adev->dev, "rom init failed: %d\n", ret); return ret; } ret = wait_for_completion_timeout(&adev->fw_ready, msecs_to_jiffies(AVS_FW_INIT_TIMEOUT_MS)); if (!ret) { dev_err(adev->dev, "firmware ready timeout\n"); avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK); return -ETIMEDOUT; } return 0; } int avs_hda_load_basefw(struct avs_dev *adev, struct firmware *fw) { struct snd_pcm_substream substream; struct snd_dma_buffer dmab; struct hdac_ext_stream *estream; struct hdac_stream *hstream; struct hdac_bus *bus = &adev->base.core; unsigned int sdfmt, reg; int ret, i; /* configure hda dma */ memset(&substream, 0, sizeof(substream)); substream.stream = SNDRV_PCM_STREAM_PLAYBACK; estream = snd_hdac_ext_stream_assign(bus, &substream, HDAC_EXT_STREAM_TYPE_HOST); if (!estream) return -ENODEV; hstream = hdac_stream(estream); /* code loading performed with default format */ sdfmt = snd_hdac_calc_stream_format(48000, 1, SNDRV_PCM_FORMAT_S32_LE, 32, 0); ret = snd_hdac_dsp_prepare(hstream, sdfmt, fw->size, &dmab); if (ret < 0) goto release_stream; /* enable SPIB for hda stream */ snd_hdac_stream_spbcap_enable(bus, true, hstream->index); ret = snd_hdac_stream_set_spib(bus, hstream, fw->size); if (ret) goto cleanup_resources; memcpy(dmab.area, fw->data, fw->size); for (i = 0; i < APL_ROM_INIT_RETRIES; i++) { unsigned int dma_id = hstream->stream_tag - 1; ret = avs_hda_init_rom(adev, dma_id, true); if (!ret) break; dev_info(adev->dev, "#%d rom init fail: %d\n", i + 1, ret); } if (ret < 0) goto cleanup_resources; /* transfer firmware */ snd_hdac_dsp_trigger(hstream, true); ret = snd_hdac_adsp_readl_poll(adev, AVS_FW_REG_STATUS(adev), reg, (reg & AVS_ROM_STS_MASK) == APL_ROM_FW_ENTERED, AVS_FW_INIT_POLLING_US, AVS_FW_INIT_TIMEOUT_US); snd_hdac_dsp_trigger(hstream, false); if (ret < 0) { dev_err(adev->dev, "transfer fw failed: %d\n", ret); avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK); } cleanup_resources: /* disable SPIB for hda stream */ snd_hdac_stream_spbcap_enable(bus, false, hstream->index); snd_hdac_stream_set_spib(bus, hstream, 0); snd_hdac_dsp_cleanup(hstream, &dmab); release_stream: snd_hdac_ext_stream_release(estream, HDAC_EXT_STREAM_TYPE_HOST); return ret; } int avs_hda_load_library(struct avs_dev *adev, struct firmware *lib, u32 id) { struct snd_pcm_substream substream; struct snd_dma_buffer dmab; struct hdac_ext_stream *estream; struct hdac_stream *stream; struct hdac_bus *bus = &adev->base.core; unsigned int sdfmt; int ret; /* configure hda dma */ memset(&substream, 0, sizeof(substream)); substream.stream = SNDRV_PCM_STREAM_PLAYBACK; estream = snd_hdac_ext_stream_assign(bus, &substream, HDAC_EXT_STREAM_TYPE_HOST); if (!estream) return -ENODEV; stream = hdac_stream(estream); /* code loading performed with default format */ sdfmt = snd_hdac_calc_stream_format(48000, 1, SNDRV_PCM_FORMAT_S32_LE, 32, 0); ret = snd_hdac_dsp_prepare(stream, sdfmt, lib->size, &dmab); if (ret < 0) goto release_stream; /* enable SPIB for hda stream */ snd_hdac_stream_spbcap_enable(bus, true, stream->index); snd_hdac_stream_set_spib(bus, stream, lib->size); memcpy(dmab.area, lib->data, lib->size); /* transfer firmware */ snd_hdac_dsp_trigger(stream, true); ret = avs_ipc_load_library(adev, stream->stream_tag - 1, id); snd_hdac_dsp_trigger(stream, false); if (ret) { dev_err(adev->dev, "transfer lib %d failed: %d\n", id, ret); ret = AVS_IPC_RET(ret); } /* disable SPIB for hda stream */ snd_hdac_stream_spbcap_enable(bus, false, stream->index); snd_hdac_stream_set_spib(bus, stream, 0); snd_hdac_dsp_cleanup(stream, &dmab); release_stream: snd_hdac_ext_stream_release(estream, HDAC_EXT_STREAM_TYPE_HOST); return ret; } int avs_hda_transfer_modules(struct avs_dev *adev, bool load, struct avs_module_entry *mods, u32 num_mods) { /* * All platforms without CLDMA are equipped with IMR, * and thus the module transferring is offloaded to DSP. */ return 0; } int avs_dsp_load_libraries(struct avs_dev *adev, struct avs_tplg_library *libs, u32 num_libs) { int start, id, i = 0; int ret; /* Calculate the id to assign for the next lib. */ for (id = 0; id < adev->fw_cfg.max_libs_count; id++) if (adev->lib_names[id][0] == '\0') break; if (id + num_libs >= adev->fw_cfg.max_libs_count) return -EINVAL; start = id; while (i < num_libs) { struct avs_fw_manifest *man; const struct firmware *fw; struct firmware stripped_fw; char *filename; int j; filename = kasprintf(GFP_KERNEL, "%s/%s/%s", AVS_ROOT_DIR, adev->spec->name, libs[i].name); if (!filename) return -ENOMEM; /* * If any call after this one fails, requested firmware is not released with * avs_release_last_firmware() as failing to load code results in need for reload * of entire driver module. And then avs_release_firmwares() is in place already. */ ret = avs_request_firmware(adev, &fw, filename); kfree(filename); if (ret < 0) return ret; stripped_fw = *fw; ret = avs_fw_manifest_strip_verify(adev, &stripped_fw, NULL); if (ret) { dev_err(adev->dev, "invalid library data: %d\n", ret); return ret; } ret = avs_fw_manifest_offset(&stripped_fw); if (ret < 0) return ret; man = (struct avs_fw_manifest *)(stripped_fw.data + ret); /* Don't load anything that's already in DSP memory. */ for (j = 0; j < id; j++) if (!strncmp(adev->lib_names[j], man->name, AVS_LIB_NAME_SIZE)) goto next_lib; ret = avs_dsp_op(adev, load_lib, &stripped_fw, id); if (ret) return ret; strncpy(adev->lib_names[id], man->name, AVS_LIB_NAME_SIZE); id++; next_lib: i++; } return start == id ? 1 : 0; } static int avs_dsp_load_basefw(struct avs_dev *adev) { const struct avs_fw_version *min_req; const struct avs_spec *const spec = adev->spec; const struct firmware *fw; struct firmware stripped_fw; char *filename; int ret; filename = kasprintf(GFP_KERNEL, "%s/%s/%s", AVS_ROOT_DIR, spec->name, AVS_BASEFW_FILENAME); if (!filename) return -ENOMEM; ret = avs_request_firmware(adev, &fw, filename); kfree(filename); if (ret < 0) { dev_err(adev->dev, "request firmware failed: %d\n", ret); return ret; } stripped_fw = *fw; min_req = &adev->spec->min_fw_version; ret = avs_fw_manifest_strip_verify(adev, &stripped_fw, min_req); if (ret < 0) { dev_err(adev->dev, "invalid firmware data: %d\n", ret); goto release_fw; } ret = avs_dsp_op(adev, load_basefw, &stripped_fw); if (ret < 0) { dev_err(adev->dev, "basefw load failed: %d\n", ret); goto release_fw; } ret = wait_for_completion_timeout(&adev->fw_ready, msecs_to_jiffies(AVS_FW_INIT_TIMEOUT_MS)); if (!ret) { dev_err(adev->dev, "firmware ready timeout\n"); avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK); ret = -ETIMEDOUT; goto release_fw; } return 0; release_fw: avs_release_last_firmware(adev); return ret; } int avs_dsp_boot_firmware(struct avs_dev *adev, bool purge) { struct avs_soc_component *acomp; int ret, i; /* Forgo full boot if flash from IMR succeeds. */ if (!purge && avs_platattr_test(adev, IMR)) { ret = avs_imr_load_basefw(adev); if (!ret) return 0; dev_dbg(adev->dev, "firmware flash from imr failed: %d\n", ret); } /* Full boot, clear cached data except for basefw (slot 0). */ for (i = 1; i < adev->fw_cfg.max_libs_count; i++) memset(adev->lib_names[i], 0, AVS_LIB_NAME_SIZE); avs_hda_power_gating_enable(adev, false); avs_hda_clock_gating_enable(adev, false); avs_hda_l1sen_enable(adev, false); ret = avs_dsp_load_basefw(adev); if (ret) goto reenable_gating; mutex_lock(&adev->comp_list_mutex); list_for_each_entry(acomp, &adev->comp_list, node) { struct avs_tplg *tplg = acomp->tplg; ret = avs_dsp_load_libraries(adev, tplg->libs, tplg->num_libs); if (ret < 0) break; } mutex_unlock(&adev->comp_list_mutex); reenable_gating: avs_hda_l1sen_enable(adev, true); avs_hda_clock_gating_enable(adev, true); avs_hda_power_gating_enable(adev, true); if (ret < 0) return ret; /* With all code loaded, refresh module information. */ ret = avs_module_info_init(adev, true); if (ret) { dev_err(adev->dev, "init module info failed: %d\n", ret); return ret; } return 0; } int avs_dsp_first_boot_firmware(struct avs_dev *adev) { int ret, i; if (avs_platattr_test(adev, CLDMA)) { ret = hda_cldma_init(&code_loader, &adev->base.core, adev->dsp_ba, AVS_CL_DEFAULT_BUFFER_SIZE); if (ret < 0) { dev_err(adev->dev, "cldma init failed: %d\n", ret); return ret; } } ret = avs_dsp_boot_firmware(adev, true); if (ret < 0) { dev_err(adev->dev, "firmware boot failed: %d\n", ret); return ret; } ret = avs_ipc_get_hw_config(adev, &adev->hw_cfg); if (ret) { dev_err(adev->dev, "get hw cfg failed: %d\n", ret); return AVS_IPC_RET(ret); } ret = avs_ipc_get_fw_config(adev, &adev->fw_cfg); if (ret) { dev_err(adev->dev, "get fw cfg failed: %d\n", ret); return AVS_IPC_RET(ret); } adev->core_refs = devm_kcalloc(adev->dev, adev->hw_cfg.dsp_cores, sizeof(*adev->core_refs), GFP_KERNEL); adev->lib_names = devm_kcalloc(adev->dev, adev->fw_cfg.max_libs_count, sizeof(*adev->lib_names), GFP_KERNEL); if (!adev->core_refs || !adev->lib_names) return -ENOMEM; for (i = 0; i < adev->fw_cfg.max_libs_count; i++) { adev->lib_names[i] = devm_kzalloc(adev->dev, AVS_LIB_NAME_SIZE, GFP_KERNEL); if (!adev->lib_names[i]) return -ENOMEM; } /* basefw always occupies slot 0 */ strcpy(&adev->lib_names[0][0], "BASEFW"); ida_init(&adev->ppl_ida); return 0; }
linux-master
sound/soc/intel/avs/loader.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/acpi.h> #include <linux/module.h> #include <linux/dmi.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <sound/hda_codec.h> #include <sound/hda_register.h> #include <sound/intel-nhlt.h> #include <sound/soc-acpi.h> #include <sound/soc-component.h> #include "avs.h" static bool i2s_test; module_param(i2s_test, bool, 0444); MODULE_PARM_DESC(i2s_test, "Probe I2S test-board and skip all other I2S boards"); static const struct dmi_system_id kbl_dmi_table[] = { { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Skylake Y LPDDR3 RVP3"), }, }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), DMI_MATCH(DMI_BOARD_NAME, "AmberLake Y"), }, }, {} }; static const struct dmi_system_id kblr_dmi_table[] = { { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Kabylake R DDR4 RVP"), }, }, {} }; static struct snd_soc_acpi_mach *dmi_match_quirk(void *arg) { struct snd_soc_acpi_mach *mach = arg; const struct dmi_system_id *dmi_id; struct dmi_system_id *dmi_table; if (mach->quirk_data == NULL) return mach; dmi_table = (struct dmi_system_id *)mach->quirk_data; dmi_id = dmi_first_match(dmi_table); if (!dmi_id) return NULL; return mach; } #define AVS_SSP(x) (BIT(x)) #define AVS_SSP_RANGE(a, b) (GENMASK(b, a)) /* supported I2S board codec configurations */ static struct snd_soc_acpi_mach avs_skl_i2s_machines[] = { { .id = "INT343A", .drv_name = "avs_rt286", .mach_params = { .i2s_link_mask = AVS_SSP(0), }, .tplg_filename = "rt286-tplg.bin", }, { .id = "10508825", .drv_name = "avs_nau8825", .mach_params = { .i2s_link_mask = AVS_SSP(1), }, .tplg_filename = "nau8825-tplg.bin", }, { .id = "INT343B", .drv_name = "avs_ssm4567", .mach_params = { .i2s_link_mask = AVS_SSP(0), }, .tplg_filename = "ssm4567-tplg.bin", }, { .id = "MX98357A", .drv_name = "avs_max98357a", .mach_params = { .i2s_link_mask = AVS_SSP(0), }, .tplg_filename = "max98357a-tplg.bin", }, {}, }; static struct snd_soc_acpi_mach avs_kbl_i2s_machines[] = { { .id = "INT343A", .drv_name = "avs_rt286", .mach_params = { .i2s_link_mask = AVS_SSP(0), }, .quirk_data = &kbl_dmi_table, .machine_quirk = dmi_match_quirk, .tplg_filename = "rt286-tplg.bin", }, { .id = "INT343A", .drv_name = "avs_rt298", .mach_params = { .i2s_link_mask = AVS_SSP(0), }, .quirk_data = &kblr_dmi_table, .machine_quirk = dmi_match_quirk, .tplg_filename = "rt298-tplg.bin", }, { .id = "MX98927", .drv_name = "avs_max98927", .mach_params = { .i2s_link_mask = AVS_SSP(0), }, .tplg_filename = "max98927-tplg.bin", }, { .id = "10EC5663", .drv_name = "avs_rt5663", .mach_params = { .i2s_link_mask = AVS_SSP(1), }, .tplg_filename = "rt5663-tplg.bin", }, { .id = "MX98373", .drv_name = "avs_max98373", .mach_params = { .i2s_link_mask = AVS_SSP(0), }, .tplg_filename = "max98373-tplg.bin", }, { .id = "MX98357A", .drv_name = "avs_max98357a", .mach_params = { .i2s_link_mask = AVS_SSP(0), }, .tplg_filename = "max98357a-tplg.bin", }, { .id = "DLGS7219", .drv_name = "avs_da7219", .mach_params = { .i2s_link_mask = AVS_SSP(1), }, .tplg_filename = "da7219-tplg.bin", }, { .id = "ESSX8336", .drv_name = "avs_es8336", .mach_params = { .i2s_link_mask = AVS_SSP(0), }, .tplg_filename = "es8336-tplg.bin", }, {}, }; static struct snd_soc_acpi_mach avs_apl_i2s_machines[] = { { .id = "INT343A", .drv_name = "avs_rt298", .mach_params = { .i2s_link_mask = AVS_SSP(5), }, .tplg_filename = "rt298-tplg.bin", }, { .id = "INT34C3", .drv_name = "avs_tdf8532", .mach_params = { .i2s_link_mask = AVS_SSP_RANGE(0, 5), }, .pdata = (unsigned long[]){ 0, 0, 0x14, 0, 0, 0 }, /* SSP2 TDMs */ .tplg_filename = "tdf8532-tplg.bin", }, { .id = "MX98357A", .drv_name = "avs_max98357a", .mach_params = { .i2s_link_mask = AVS_SSP(5), }, .tplg_filename = "max98357a-tplg.bin", }, { .id = "DLGS7219", .drv_name = "avs_da7219", .mach_params = { .i2s_link_mask = AVS_SSP(1), }, .tplg_filename = "da7219-tplg.bin", }, {}, }; static struct snd_soc_acpi_mach avs_gml_i2s_machines[] = { { .id = "INT343A", .drv_name = "avs_rt298", .mach_params = { .i2s_link_mask = AVS_SSP(2), }, .tplg_filename = "rt298-tplg.bin", }, {}, }; static struct snd_soc_acpi_mach avs_test_i2s_machines[] = { { .drv_name = "avs_i2s_test", .mach_params = { .i2s_link_mask = AVS_SSP(0), }, .tplg_filename = "i2s-test-tplg.bin", }, { .drv_name = "avs_i2s_test", .mach_params = { .i2s_link_mask = AVS_SSP(1), }, .tplg_filename = "i2s-test-tplg.bin", }, { .drv_name = "avs_i2s_test", .mach_params = { .i2s_link_mask = AVS_SSP(2), }, .tplg_filename = "i2s-test-tplg.bin", }, { .drv_name = "avs_i2s_test", .mach_params = { .i2s_link_mask = AVS_SSP(3), }, .tplg_filename = "i2s-test-tplg.bin", }, { .drv_name = "avs_i2s_test", .mach_params = { .i2s_link_mask = AVS_SSP(4), }, .tplg_filename = "i2s-test-tplg.bin", }, { .drv_name = "avs_i2s_test", .mach_params = { .i2s_link_mask = AVS_SSP(5), }, .tplg_filename = "i2s-test-tplg.bin", }, /* no NULL terminator, as we depend on ARRAY SIZE due to .id == NULL */ }; struct avs_acpi_boards { int id; struct snd_soc_acpi_mach *machs; }; #define AVS_MACH_ENTRY(_id, _mach) \ { .id = PCI_DEVICE_ID_INTEL_##_id, .machs = (_mach), } /* supported I2S boards per platform */ static const struct avs_acpi_boards i2s_boards[] = { AVS_MACH_ENTRY(HDA_SKL_LP, avs_skl_i2s_machines), AVS_MACH_ENTRY(HDA_KBL_LP, avs_kbl_i2s_machines), AVS_MACH_ENTRY(HDA_APL, avs_apl_i2s_machines), AVS_MACH_ENTRY(HDA_GML, avs_gml_i2s_machines), {}, }; static const struct avs_acpi_boards *avs_get_i2s_boards(struct avs_dev *adev) { int id, i; id = adev->base.pci->device; for (i = 0; i < ARRAY_SIZE(i2s_boards); i++) if (i2s_boards[i].id == id) return &i2s_boards[i]; return NULL; } /* platform devices owned by AVS audio are removed with this hook */ static void board_pdev_unregister(void *data) { platform_device_unregister(data); } static int __maybe_unused avs_register_probe_board(struct avs_dev *adev) { struct platform_device *board; struct snd_soc_acpi_mach mach = {{0}}; int ret; ret = avs_probe_platform_register(adev, "probe-platform"); if (ret < 0) return ret; mach.mach_params.platform = "probe-platform"; board = platform_device_register_data(NULL, "avs_probe_mb", PLATFORM_DEVID_NONE, (const void *)&mach, sizeof(mach)); if (IS_ERR(board)) { dev_err(adev->dev, "probe board register failed\n"); return PTR_ERR(board); } ret = devm_add_action(adev->dev, board_pdev_unregister, board); if (ret < 0) { platform_device_unregister(board); return ret; } return 0; } static int avs_register_dmic_board(struct avs_dev *adev) { struct platform_device *codec, *board; struct snd_soc_acpi_mach mach = {{0}}; int ret; if (!adev->nhlt || !intel_nhlt_has_endpoint_type(adev->nhlt, NHLT_LINK_DMIC)) { dev_dbg(adev->dev, "no DMIC endpoints present\n"); return 0; } codec = platform_device_register_simple("dmic-codec", PLATFORM_DEVID_NONE, NULL, 0); if (IS_ERR(codec)) { dev_err(adev->dev, "dmic codec register failed\n"); return PTR_ERR(codec); } ret = devm_add_action(adev->dev, board_pdev_unregister, codec); if (ret < 0) { platform_device_unregister(codec); return ret; } ret = avs_dmic_platform_register(adev, "dmic-platform"); if (ret < 0) return ret; mach.tplg_filename = "dmic-tplg.bin"; mach.mach_params.platform = "dmic-platform"; board = platform_device_register_data(NULL, "avs_dmic", PLATFORM_DEVID_NONE, (const void *)&mach, sizeof(mach)); if (IS_ERR(board)) { dev_err(adev->dev, "dmic board register failed\n"); return PTR_ERR(board); } ret = devm_add_action(adev->dev, board_pdev_unregister, board); if (ret < 0) { platform_device_unregister(board); return ret; } return 0; } static int avs_register_i2s_board(struct avs_dev *adev, struct snd_soc_acpi_mach *mach) { struct platform_device *board; int num_ssps; char *name; int ret; num_ssps = adev->hw_cfg.i2s_caps.ctrl_count; if (fls(mach->mach_params.i2s_link_mask) > num_ssps) { dev_err(adev->dev, "Platform supports %d SSPs but board %s requires SSP%ld\n", num_ssps, mach->drv_name, (unsigned long)__fls(mach->mach_params.i2s_link_mask)); return -ENODEV; } name = devm_kasprintf(adev->dev, GFP_KERNEL, "%s.%d-platform", mach->drv_name, mach->mach_params.i2s_link_mask); if (!name) return -ENOMEM; ret = avs_i2s_platform_register(adev, name, mach->mach_params.i2s_link_mask, mach->pdata); if (ret < 0) return ret; mach->mach_params.platform = name; board = platform_device_register_data(NULL, mach->drv_name, mach->mach_params.i2s_link_mask, (const void *)mach, sizeof(*mach)); if (IS_ERR(board)) { dev_err(adev->dev, "ssp board register failed\n"); return PTR_ERR(board); } ret = devm_add_action(adev->dev, board_pdev_unregister, board); if (ret < 0) { platform_device_unregister(board); return ret; } return 0; } static int avs_register_i2s_boards(struct avs_dev *adev) { const struct avs_acpi_boards *boards; struct snd_soc_acpi_mach *mach; int ret; if (!adev->nhlt || !intel_nhlt_has_endpoint_type(adev->nhlt, NHLT_LINK_SSP)) { dev_dbg(adev->dev, "no I2S endpoints present\n"); return 0; } if (i2s_test) { int i, num_ssps; num_ssps = adev->hw_cfg.i2s_caps.ctrl_count; /* constrain just in case FW says there can be more SSPs than possible */ num_ssps = min_t(int, ARRAY_SIZE(avs_test_i2s_machines), num_ssps); mach = avs_test_i2s_machines; for (i = 0; i < num_ssps; i++) { ret = avs_register_i2s_board(adev, &mach[i]); if (ret < 0) dev_warn(adev->dev, "register i2s %s failed: %d\n", mach->drv_name, ret); } return 0; } boards = avs_get_i2s_boards(adev); if (!boards) { dev_dbg(adev->dev, "no I2S endpoints supported\n"); return 0; } for (mach = boards->machs; mach->id[0]; mach++) { if (!acpi_dev_present(mach->id, mach->uid, -1)) continue; if (mach->machine_quirk) if (!mach->machine_quirk(mach)) continue; ret = avs_register_i2s_board(adev, mach); if (ret < 0) dev_warn(adev->dev, "register i2s %s failed: %d\n", mach->drv_name, ret); } return 0; } static int avs_register_hda_board(struct avs_dev *adev, struct hda_codec *codec) { struct snd_soc_acpi_mach mach = {{0}}; struct platform_device *board; struct hdac_device *hdev = &codec->core; char *pname; int ret, id; pname = devm_kasprintf(adev->dev, GFP_KERNEL, "%s-platform", dev_name(&hdev->dev)); if (!pname) return -ENOMEM; ret = avs_hda_platform_register(adev, pname); if (ret < 0) return ret; mach.pdata = codec; mach.mach_params.platform = pname; mach.tplg_filename = devm_kasprintf(adev->dev, GFP_KERNEL, "hda-%08x-tplg.bin", hdev->vendor_id); if (!mach.tplg_filename) return -ENOMEM; id = adev->base.core.idx * HDA_MAX_CODECS + hdev->addr; board = platform_device_register_data(NULL, "avs_hdaudio", id, (const void *)&mach, sizeof(mach)); if (IS_ERR(board)) { dev_err(adev->dev, "hda board register failed\n"); return PTR_ERR(board); } ret = devm_add_action(adev->dev, board_pdev_unregister, board); if (ret < 0) { platform_device_unregister(board); return ret; } return 0; } static int avs_register_hda_boards(struct avs_dev *adev) { struct hdac_bus *bus = &adev->base.core; struct hdac_device *hdev; int ret; if (!bus->num_codecs) { dev_dbg(adev->dev, "no HDA endpoints present\n"); return 0; } list_for_each_entry(hdev, &bus->codec_list, list) { struct hda_codec *codec; codec = dev_to_hda_codec(&hdev->dev); ret = avs_register_hda_board(adev, codec); if (ret < 0) dev_warn(adev->dev, "register hda-%08x failed: %d\n", codec->core.vendor_id, ret); } return 0; } int avs_register_all_boards(struct avs_dev *adev) { int ret; #ifdef CONFIG_DEBUG_FS ret = avs_register_probe_board(adev); if (ret < 0) dev_warn(adev->dev, "enumerate PROBE endpoints failed: %d\n", ret); #endif ret = avs_register_dmic_board(adev); if (ret < 0) dev_warn(adev->dev, "enumerate DMIC endpoints failed: %d\n", ret); ret = avs_register_i2s_boards(adev); if (ret < 0) dev_warn(adev->dev, "enumerate I2S endpoints failed: %d\n", ret); ret = avs_register_hda_boards(adev); if (ret < 0) dev_warn(adev->dev, "enumerate HDA endpoints failed: %d\n", ret); return 0; } void avs_unregister_all_boards(struct avs_dev *adev) { snd_soc_unregister_component(adev->dev); }
linux-master
sound/soc/intel/avs/board_selection.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Author: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/types.h> #define CREATE_TRACE_POINTS #include "trace.h" #define BYTES_PER_LINE 16 #define MAX_CHUNK_SIZE ((PAGE_SIZE - 150) /* Place for trace header */ \ / (2 * BYTES_PER_LINE + 4) /* chars per line */ \ * BYTES_PER_LINE) void trace_avs_msg_payload(const void *data, size_t size) { size_t remaining = size; size_t offset = 0; while (remaining > 0) { u32 chunk; chunk = min_t(size_t, remaining, MAX_CHUNK_SIZE); trace_avs_ipc_msg_payload(data, chunk, offset, size); remaining -= chunk; offset += chunk; } }
linux-master
sound/soc/intel/avs/trace.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/devcoredump.h> #include <linux/slab.h> #include <sound/hdaudio_ext.h> #include "avs.h" #include "messages.h" static int __maybe_unused skl_enable_logs(struct avs_dev *adev, enum avs_log_enable enable, u32 aging_period, u32 fifo_full_period, unsigned long resource_mask, u32 *priorities) { struct skl_log_state_info *info; u32 size, num_cores = adev->hw_cfg.dsp_cores; int ret, i; if (fls_long(resource_mask) > num_cores) return -EINVAL; size = struct_size(info, logs_core, num_cores); info = kzalloc(size, GFP_KERNEL); if (!info) return -ENOMEM; info->core_mask = resource_mask; if (enable) for_each_set_bit(i, &resource_mask, num_cores) { info->logs_core[i].enable = enable; info->logs_core[i].min_priority = *priorities++; } else for_each_set_bit(i, &resource_mask, num_cores) info->logs_core[i].enable = enable; ret = avs_ipc_set_enable_logs(adev, (u8 *)info, size); kfree(info); if (ret) return AVS_IPC_RET(ret); return 0; } int skl_log_buffer_offset(struct avs_dev *adev, u32 core) { return core * avs_log_buffer_size(adev); } /* fw DbgLogWp registers */ #define FW_REGS_DBG_LOG_WP(core) (0x30 + 0x4 * core) static int skl_log_buffer_status(struct avs_dev *adev, union avs_notify_msg *msg) { void __iomem *buf; u16 size, write, offset; if (!avs_logging_fw(adev)) return 0; size = avs_log_buffer_size(adev) / 2; write = readl(avs_sram_addr(adev, AVS_FW_REGS_WINDOW) + FW_REGS_DBG_LOG_WP(msg->log.core)); /* determine buffer half */ offset = (write < size) ? size : 0; /* Address is guaranteed to exist in SRAM2. */ buf = avs_log_buffer_addr(adev, msg->log.core) + offset; avs_dump_fw_log_wakeup(adev, buf, size); return 0; } static int skl_coredump(struct avs_dev *adev, union avs_notify_msg *msg) { u8 *dump; dump = vzalloc(AVS_FW_REGS_SIZE); if (!dump) return -ENOMEM; memcpy_fromio(dump, avs_sram_addr(adev, AVS_FW_REGS_WINDOW), AVS_FW_REGS_SIZE); dev_coredumpv(adev->dev, dump, AVS_FW_REGS_SIZE, GFP_KERNEL); return 0; } static bool skl_d0ix_toggle(struct avs_dev *adev, struct avs_ipc_msg *tx, bool wake) { /* unsupported on cAVS 1.5 hw */ return false; } static int skl_set_d0ix(struct avs_dev *adev, bool enable) { /* unsupported on cAVS 1.5 hw */ return 0; } const struct avs_dsp_ops skl_dsp_ops = { .power = avs_dsp_core_power, .reset = avs_dsp_core_reset, .stall = avs_dsp_core_stall, .irq_handler = avs_dsp_irq_handler, .irq_thread = avs_dsp_irq_thread, .int_control = avs_dsp_interrupt_control, .load_basefw = avs_cldma_load_basefw, .load_lib = avs_cldma_load_library, .transfer_mods = avs_cldma_transfer_modules, .log_buffer_offset = skl_log_buffer_offset, .log_buffer_status = skl_log_buffer_status, .coredump = skl_coredump, .d0ix_toggle = skl_d0ix_toggle, .set_d0ix = skl_set_d0ix, AVS_SET_ENABLE_LOGS_OP(skl) };
linux-master
sound/soc/intel/avs/skl.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <sound/hdaudio_ext.h> #include "avs.h" #include "registers.h" #include "trace.h" #define AVS_ADSPCS_INTERVAL_US 500 #define AVS_ADSPCS_TIMEOUT_US 50000 #define AVS_ADSPCS_DELAY_US 1000 int avs_dsp_core_power(struct avs_dev *adev, u32 core_mask, bool power) { u32 value, mask, reg; int ret; value = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPCS); trace_avs_dsp_core_op(value, core_mask, "power", power); mask = AVS_ADSPCS_SPA_MASK(core_mask); value = power ? mask : 0; snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value); /* Delay the polling to avoid false positives. */ usleep_range(AVS_ADSPCS_DELAY_US, 2 * AVS_ADSPCS_DELAY_US); mask = AVS_ADSPCS_CPA_MASK(core_mask); value = power ? mask : 0; ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS, reg, (reg & mask) == value, AVS_ADSPCS_INTERVAL_US, AVS_ADSPCS_TIMEOUT_US); if (ret) dev_err(adev->dev, "core_mask %d power %s failed: %d\n", core_mask, power ? "on" : "off", ret); return ret; } int avs_dsp_core_reset(struct avs_dev *adev, u32 core_mask, bool reset) { u32 value, mask, reg; int ret; value = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPCS); trace_avs_dsp_core_op(value, core_mask, "reset", reset); mask = AVS_ADSPCS_CRST_MASK(core_mask); value = reset ? mask : 0; snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value); ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS, reg, (reg & mask) == value, AVS_ADSPCS_INTERVAL_US, AVS_ADSPCS_TIMEOUT_US); if (ret) dev_err(adev->dev, "core_mask %d %s reset failed: %d\n", core_mask, reset ? "enter" : "exit", ret); return ret; } int avs_dsp_core_stall(struct avs_dev *adev, u32 core_mask, bool stall) { u32 value, mask, reg; int ret; value = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPCS); trace_avs_dsp_core_op(value, core_mask, "stall", stall); mask = AVS_ADSPCS_CSTALL_MASK(core_mask); value = stall ? mask : 0; snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPCS, mask, value); ret = snd_hdac_adsp_readl_poll(adev, AVS_ADSP_REG_ADSPCS, reg, (reg & mask) == value, AVS_ADSPCS_INTERVAL_US, AVS_ADSPCS_TIMEOUT_US); if (ret) { dev_err(adev->dev, "core_mask %d %sstall failed: %d\n", core_mask, stall ? "" : "un", ret); return ret; } /* Give HW time to propagate the change. */ usleep_range(AVS_ADSPCS_DELAY_US, 2 * AVS_ADSPCS_DELAY_US); return 0; } int avs_dsp_core_enable(struct avs_dev *adev, u32 core_mask) { int ret; ret = avs_dsp_op(adev, power, core_mask, true); if (ret) return ret; ret = avs_dsp_op(adev, reset, core_mask, false); if (ret) return ret; return avs_dsp_op(adev, stall, core_mask, false); } int avs_dsp_core_disable(struct avs_dev *adev, u32 core_mask) { /* No error checks to allow for complete DSP shutdown. */ avs_dsp_op(adev, stall, core_mask, true); avs_dsp_op(adev, reset, core_mask, true); return avs_dsp_op(adev, power, core_mask, false); } static int avs_dsp_enable(struct avs_dev *adev, u32 core_mask) { u32 mask; int ret; ret = avs_dsp_core_enable(adev, core_mask); if (ret < 0) return ret; mask = core_mask & ~AVS_MAIN_CORE_MASK; if (!mask) /* * without main core, fw is dead anyway * so setting D0 for it is futile. */ return 0; ret = avs_ipc_set_dx(adev, mask, true); return AVS_IPC_RET(ret); } static int avs_dsp_disable(struct avs_dev *adev, u32 core_mask) { int ret; ret = avs_ipc_set_dx(adev, core_mask, false); if (ret) return AVS_IPC_RET(ret); return avs_dsp_core_disable(adev, core_mask); } static int avs_dsp_get_core(struct avs_dev *adev, u32 core_id) { u32 mask; int ret; mask = BIT_MASK(core_id); if (mask == AVS_MAIN_CORE_MASK) /* nothing to do for main core */ return 0; if (core_id >= adev->hw_cfg.dsp_cores) { ret = -EINVAL; goto err; } adev->core_refs[core_id]++; if (adev->core_refs[core_id] == 1) { /* * No cores other than main-core can be running for DSP * to achieve d0ix. Conscious SET_D0IX IPC failure is permitted, * simply d0ix power state will no longer be attempted. */ ret = avs_dsp_disable_d0ix(adev); if (ret && ret != -AVS_EIPC) goto err_disable_d0ix; ret = avs_dsp_enable(adev, mask); if (ret) goto err_enable_dsp; } return 0; err_enable_dsp: avs_dsp_enable_d0ix(adev); err_disable_d0ix: adev->core_refs[core_id]--; err: dev_err(adev->dev, "get core %d failed: %d\n", core_id, ret); return ret; } static int avs_dsp_put_core(struct avs_dev *adev, u32 core_id) { u32 mask; int ret; mask = BIT_MASK(core_id); if (mask == AVS_MAIN_CORE_MASK) /* nothing to do for main core */ return 0; if (core_id >= adev->hw_cfg.dsp_cores) { ret = -EINVAL; goto err; } adev->core_refs[core_id]--; if (!adev->core_refs[core_id]) { ret = avs_dsp_disable(adev, mask); if (ret) goto err; /* Match disable_d0ix in avs_dsp_get_core(). */ avs_dsp_enable_d0ix(adev); } return 0; err: dev_err(adev->dev, "put core %d failed: %d\n", core_id, ret); return ret; } int avs_dsp_init_module(struct avs_dev *adev, u16 module_id, u8 ppl_instance_id, u8 core_id, u8 domain, void *param, u32 param_size, u8 *instance_id) { struct avs_module_entry mentry; bool was_loaded = false; int ret, id; id = avs_module_id_alloc(adev, module_id); if (id < 0) return id; ret = avs_get_module_id_entry(adev, module_id, &mentry); if (ret) goto err_mod_entry; ret = avs_dsp_get_core(adev, core_id); if (ret) goto err_mod_entry; /* Load code into memory if this is the first instance. */ if (!id && !avs_module_entry_is_loaded(&mentry)) { ret = avs_dsp_op(adev, transfer_mods, true, &mentry, 1); if (ret) { dev_err(adev->dev, "load modules failed: %d\n", ret); goto err_mod_entry; } was_loaded = true; } ret = avs_ipc_init_instance(adev, module_id, id, ppl_instance_id, core_id, domain, param, param_size); if (ret) { ret = AVS_IPC_RET(ret); goto err_ipc; } *instance_id = id; return 0; err_ipc: if (was_loaded) avs_dsp_op(adev, transfer_mods, false, &mentry, 1); avs_dsp_put_core(adev, core_id); err_mod_entry: avs_module_id_free(adev, module_id, id); return ret; } void avs_dsp_delete_module(struct avs_dev *adev, u16 module_id, u8 instance_id, u8 ppl_instance_id, u8 core_id) { struct avs_module_entry mentry; int ret; /* Modules not owned by any pipeline need to be freed explicitly. */ if (ppl_instance_id == INVALID_PIPELINE_ID) avs_ipc_delete_instance(adev, module_id, instance_id); avs_module_id_free(adev, module_id, instance_id); ret = avs_get_module_id_entry(adev, module_id, &mentry); /* Unload occupied memory if this was the last instance. */ if (!ret && mentry.type.load_type == AVS_MODULE_LOAD_TYPE_LOADABLE) { if (avs_is_module_ida_empty(adev, module_id)) { ret = avs_dsp_op(adev, transfer_mods, false, &mentry, 1); if (ret) dev_err(adev->dev, "unload modules failed: %d\n", ret); } } avs_dsp_put_core(adev, core_id); } int avs_dsp_create_pipeline(struct avs_dev *adev, u16 req_size, u8 priority, bool lp, u16 attributes, u8 *instance_id) { struct avs_fw_cfg *fw_cfg = &adev->fw_cfg; int ret, id; id = ida_alloc_max(&adev->ppl_ida, fw_cfg->max_ppl_count - 1, GFP_KERNEL); if (id < 0) return id; ret = avs_ipc_create_pipeline(adev, req_size, priority, id, lp, attributes); if (ret) { ida_free(&adev->ppl_ida, id); return AVS_IPC_RET(ret); } *instance_id = id; return 0; } int avs_dsp_delete_pipeline(struct avs_dev *adev, u8 instance_id) { int ret; ret = avs_ipc_delete_pipeline(adev, instance_id); if (ret) ret = AVS_IPC_RET(ret); ida_free(&adev->ppl_ida, instance_id); return ret; }
linux-master
sound/soc/intel/avs/dsp.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/debugfs.h> #include <linux/kfifo.h> #include <linux/wait.h> #include <linux/sched/signal.h> #include <sound/soc.h> #include "avs.h" #include "messages.h" static unsigned int __kfifo_fromio(struct kfifo *fifo, const void __iomem *src, unsigned int len) { struct __kfifo *__fifo = &fifo->kfifo; unsigned int l, off; len = min(len, kfifo_avail(fifo)); off = __fifo->in & __fifo->mask; l = min(len, kfifo_size(fifo) - off); memcpy_fromio(__fifo->data + off, src, l); memcpy_fromio(__fifo->data, src + l, len - l); /* Make sure data copied from SRAM is visible to all CPUs. */ smp_mb(); __fifo->in += len; return len; } bool avs_logging_fw(struct avs_dev *adev) { return kfifo_initialized(&adev->trace_fifo); } void avs_dump_fw_log(struct avs_dev *adev, const void __iomem *src, unsigned int len) { __kfifo_fromio(&adev->trace_fifo, src, len); } void avs_dump_fw_log_wakeup(struct avs_dev *adev, const void __iomem *src, unsigned int len) { avs_dump_fw_log(adev, src, len); wake_up(&adev->trace_waitq); } static ssize_t fw_regs_read(struct file *file, char __user *to, size_t count, loff_t *ppos) { struct avs_dev *adev = file->private_data; char *buf; int ret; buf = kzalloc(AVS_FW_REGS_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; memcpy_fromio(buf, avs_sram_addr(adev, AVS_FW_REGS_WINDOW), AVS_FW_REGS_SIZE); ret = simple_read_from_buffer(to, count, ppos, buf, AVS_FW_REGS_SIZE); kfree(buf); return ret; } static const struct file_operations fw_regs_fops = { .open = simple_open, .read = fw_regs_read, .llseek = no_llseek, }; static ssize_t debug_window_read(struct file *file, char __user *to, size_t count, loff_t *ppos) { struct avs_dev *adev = file->private_data; size_t size; char *buf; int ret; size = adev->hw_cfg.dsp_cores * AVS_WINDOW_CHUNK_SIZE; buf = kzalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; memcpy_fromio(buf, avs_sram_addr(adev, AVS_DEBUG_WINDOW), size); ret = simple_read_from_buffer(to, count, ppos, buf, size); kfree(buf); return ret; } static const struct file_operations debug_window_fops = { .open = simple_open, .read = debug_window_read, .llseek = no_llseek, }; static ssize_t probe_points_read(struct file *file, char __user *to, size_t count, loff_t *ppos) { struct avs_dev *adev = file->private_data; struct avs_probe_point_desc *desc; size_t num_desc, len = 0; char *buf; int i, ret; /* Prevent chaining, send and dump IPC value just once. */ if (*ppos) return 0; buf = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; ret = avs_ipc_probe_get_points(adev, &desc, &num_desc); if (ret) { ret = AVS_IPC_RET(ret); goto exit; } for (i = 0; i < num_desc; i++) { ret = snprintf(buf + len, PAGE_SIZE - len, "Id: %#010x Purpose: %d Node id: %#x\n", desc[i].id.value, desc[i].purpose, desc[i].node_id.val); if (ret < 0) goto free_desc; len += ret; } ret = simple_read_from_buffer(to, count, ppos, buf, len); free_desc: kfree(desc); exit: kfree(buf); return ret; } static ssize_t probe_points_write(struct file *file, const char __user *from, size_t count, loff_t *ppos) { struct avs_dev *adev = file->private_data; struct avs_probe_point_desc *desc; u32 *array, num_elems; size_t bytes; int ret; ret = parse_int_array_user(from, count, (int **)&array); if (ret < 0) return ret; num_elems = *array; bytes = sizeof(*array) * num_elems; if (bytes % sizeof(*desc)) { ret = -EINVAL; goto exit; } desc = (struct avs_probe_point_desc *)&array[1]; ret = avs_ipc_probe_connect_points(adev, desc, bytes / sizeof(*desc)); if (ret) ret = AVS_IPC_RET(ret); else ret = count; exit: kfree(array); return ret; } static const struct file_operations probe_points_fops = { .open = simple_open, .read = probe_points_read, .write = probe_points_write, .llseek = no_llseek, }; static ssize_t probe_points_disconnect_write(struct file *file, const char __user *from, size_t count, loff_t *ppos) { struct avs_dev *adev = file->private_data; union avs_probe_point_id *id; u32 *array, num_elems; size_t bytes; int ret; ret = parse_int_array_user(from, count, (int **)&array); if (ret < 0) return ret; num_elems = *array; bytes = sizeof(*array) * num_elems; if (bytes % sizeof(*id)) { ret = -EINVAL; goto exit; } id = (union avs_probe_point_id *)&array[1]; ret = avs_ipc_probe_disconnect_points(adev, id, bytes / sizeof(*id)); if (ret) ret = AVS_IPC_RET(ret); else ret = count; exit: kfree(array); return ret; } static const struct file_operations probe_points_disconnect_fops = { .open = simple_open, .write = probe_points_disconnect_write, .llseek = default_llseek, }; static ssize_t strace_read(struct file *file, char __user *to, size_t count, loff_t *ppos) { struct avs_dev *adev = file->private_data; struct kfifo *fifo = &adev->trace_fifo; unsigned int copied; if (kfifo_is_empty(fifo)) { DEFINE_WAIT(wait); prepare_to_wait(&adev->trace_waitq, &wait, TASK_INTERRUPTIBLE); if (!signal_pending(current)) schedule(); finish_wait(&adev->trace_waitq, &wait); } if (kfifo_to_user(fifo, to, count, &copied)) return -EFAULT; *ppos += copied; return copied; } static int strace_open(struct inode *inode, struct file *file) { struct avs_dev *adev = inode->i_private; int ret; if (kfifo_initialized(&adev->trace_fifo)) return -EBUSY; ret = kfifo_alloc(&adev->trace_fifo, PAGE_SIZE, GFP_KERNEL); if (ret < 0) return ret; file->private_data = adev; return 0; } static int strace_release(struct inode *inode, struct file *file) { union avs_notify_msg msg = AVS_NOTIFICATION(LOG_BUFFER_STATUS); struct avs_dev *adev = file->private_data; unsigned long resource_mask; unsigned long flags, i; u32 num_cores; resource_mask = adev->logged_resources; num_cores = adev->hw_cfg.dsp_cores; spin_lock_irqsave(&adev->trace_lock, flags); /* Gather any remaining logs. */ for_each_set_bit(i, &resource_mask, num_cores) { msg.log.core = i; avs_dsp_op(adev, log_buffer_status, &msg); } kfifo_free(&adev->trace_fifo); spin_unlock_irqrestore(&adev->trace_lock, flags); return 0; } static const struct file_operations strace_fops = { .llseek = default_llseek, .read = strace_read, .open = strace_open, .release = strace_release, }; #define DISABLE_TIMERS UINT_MAX static int enable_logs(struct avs_dev *adev, u32 resource_mask, u32 *priorities) { int ret; /* Logging demands D0i0 state from DSP. */ if (!adev->logged_resources) { pm_runtime_get_sync(adev->dev); ret = avs_dsp_disable_d0ix(adev); if (ret) goto err_d0ix; } ret = avs_ipc_set_system_time(adev); if (ret && ret != AVS_IPC_NOT_SUPPORTED) { ret = AVS_IPC_RET(ret); goto err_ipc; } ret = avs_dsp_op(adev, enable_logs, AVS_LOG_ENABLE, adev->aging_timer_period, adev->fifo_full_timer_period, resource_mask, priorities); if (ret) goto err_ipc; adev->logged_resources |= resource_mask; return 0; err_ipc: if (!adev->logged_resources) { avs_dsp_enable_d0ix(adev); err_d0ix: pm_runtime_mark_last_busy(adev->dev); pm_runtime_put_autosuspend(adev->dev); } return ret; } static int disable_logs(struct avs_dev *adev, u32 resource_mask) { int ret; /* Check if there's anything to do. */ if (!adev->logged_resources) return 0; ret = avs_dsp_op(adev, enable_logs, AVS_LOG_DISABLE, DISABLE_TIMERS, DISABLE_TIMERS, resource_mask, NULL); /* * If IPC fails causing recovery, logged_resources is already zero * so unsetting bits is still safe. */ adev->logged_resources &= ~resource_mask; /* If that's the last resource, allow for D3. */ if (!adev->logged_resources) { avs_dsp_enable_d0ix(adev); pm_runtime_mark_last_busy(adev->dev); pm_runtime_put_autosuspend(adev->dev); } return ret; } static ssize_t trace_control_read(struct file *file, char __user *to, size_t count, loff_t *ppos) { struct avs_dev *adev = file->private_data; char buf[64]; int len; len = snprintf(buf, sizeof(buf), "0x%08x\n", adev->logged_resources); return simple_read_from_buffer(to, count, ppos, buf, len); } static ssize_t trace_control_write(struct file *file, const char __user *from, size_t count, loff_t *ppos) { struct avs_dev *adev = file->private_data; u32 *array, num_elems; u32 resource_mask; int ret; ret = parse_int_array_user(from, count, (int **)&array); if (ret < 0) return ret; num_elems = *array; resource_mask = array[1]; /* * Disable if just resource mask is provided - no log priority flags. * * Enable input format: mask, prio1, .., prioN * Where 'N' equals number of bits set in the 'mask'. */ if (num_elems == 1) { ret = disable_logs(adev, resource_mask); } else { if (num_elems != (hweight_long(resource_mask) + 1)) { ret = -EINVAL; goto free_array; } ret = enable_logs(adev, resource_mask, &array[2]); } if (!ret) ret = count; free_array: kfree(array); return ret; } static const struct file_operations trace_control_fops = { .llseek = default_llseek, .read = trace_control_read, .write = trace_control_write, .open = simple_open, }; void avs_debugfs_init(struct avs_dev *adev) { init_waitqueue_head(&adev->trace_waitq); spin_lock_init(&adev->trace_lock); adev->debugfs_root = debugfs_create_dir("avs", snd_soc_debugfs_root); /* Initialize timer periods with recommended defaults. */ adev->aging_timer_period = 10; adev->fifo_full_timer_period = 10; debugfs_create_file("strace", 0444, adev->debugfs_root, adev, &strace_fops); debugfs_create_file("trace_control", 0644, adev->debugfs_root, adev, &trace_control_fops); debugfs_create_file("fw_regs", 0444, adev->debugfs_root, adev, &fw_regs_fops); debugfs_create_file("debug_window", 0444, adev->debugfs_root, adev, &debug_window_fops); debugfs_create_u32("trace_aging_period", 0644, adev->debugfs_root, &adev->aging_timer_period); debugfs_create_u32("trace_fifo_full_period", 0644, adev->debugfs_root, &adev->fifo_full_timer_period); debugfs_create_file("probe_points", 0644, adev->debugfs_root, adev, &probe_points_fops); debugfs_create_file("probe_points_disconnect", 0200, adev->debugfs_root, adev, &probe_points_disconnect_fops); } void avs_debugfs_exit(struct avs_dev *adev) { debugfs_remove_recursive(adev->debugfs_root); }
linux-master
sound/soc/intel/avs/debugfs.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/firmware.h> #include <linux/uuid.h> #include <sound/soc.h> #include <sound/soc-acpi.h> #include <sound/soc-topology.h> #include <uapi/sound/intel/avs/tokens.h> #include "avs.h" #include "control.h" #include "topology.h" /* Get pointer to vendor array at the specified offset. */ #define avs_tplg_vendor_array_at(array, offset) \ ((struct snd_soc_tplg_vendor_array *)((u8 *)array + offset)) /* Get pointer to vendor array that is next in line. */ #define avs_tplg_vendor_array_next(array) \ (avs_tplg_vendor_array_at(array, le32_to_cpu((array)->size))) /* * Scan provided block of tuples for the specified token. If found, * @offset is updated with position at which first matching token is * located. * * Returns 0 on success, -ENOENT if not found and error code otherwise. */ static int avs_tplg_vendor_array_lookup(struct snd_soc_tplg_vendor_array *tuples, u32 block_size, u32 token, u32 *offset) { u32 pos = 0; while (block_size > 0) { struct snd_soc_tplg_vendor_value_elem *tuple; u32 tuples_size = le32_to_cpu(tuples->size); if (tuples_size > block_size) return -EINVAL; tuple = tuples->value; if (le32_to_cpu(tuple->token) == token) { *offset = pos; return 0; } block_size -= tuples_size; pos += tuples_size; tuples = avs_tplg_vendor_array_next(tuples); } return -ENOENT; } /* * See avs_tplg_vendor_array_lookup() for description. * * Behaves exactly like avs_tplg_vendor_lookup() but starts from the * next vendor array in line. Useful when searching for the finish line * of an arbitrary entry in a list of entries where each is composed of * several vendor tuples and a specific token marks the beginning of * a new entry block. */ static int avs_tplg_vendor_array_lookup_next(struct snd_soc_tplg_vendor_array *tuples, u32 block_size, u32 token, u32 *offset) { u32 tuples_size = le32_to_cpu(tuples->size); int ret; if (tuples_size > block_size) return -EINVAL; tuples = avs_tplg_vendor_array_next(tuples); block_size -= tuples_size; ret = avs_tplg_vendor_array_lookup(tuples, block_size, token, offset); if (!ret) *offset += tuples_size; return ret; } /* * Scan provided block of tuples for the specified token which marks * the border of an entry block. Behavior is similar to * avs_tplg_vendor_array_lookup() except 0 is also returned if no * matching token has been found. In such case, returned @size is * assigned to @block_size as the entire block belongs to the current * entry. * * Returns 0 on success, error code otherwise. */ static int avs_tplg_vendor_entry_size(struct snd_soc_tplg_vendor_array *tuples, u32 block_size, u32 entry_id_token, u32 *size) { int ret; ret = avs_tplg_vendor_array_lookup_next(tuples, block_size, entry_id_token, size); if (ret == -ENOENT) { *size = block_size; ret = 0; } return ret; } /* * Vendor tuple parsing descriptor. * * @token: vendor specific token that identifies tuple * @type: tuple type, one of SND_SOC_TPLG_TUPLE_TYPE_XXX * @offset: offset of a struct's field to initialize * @parse: parsing function, extracts and assigns value to object's field */ struct avs_tplg_token_parser { enum avs_tplg_token token; u32 type; u32 offset; int (*parse)(struct snd_soc_component *comp, void *elem, void *object, u32 offset); }; static int avs_parse_uuid_token(struct snd_soc_component *comp, void *elem, void *object, u32 offset) { struct snd_soc_tplg_vendor_uuid_elem *tuple = elem; guid_t *val = (guid_t *)((u8 *)object + offset); guid_copy((guid_t *)val, (const guid_t *)&tuple->uuid); return 0; } static int avs_parse_bool_token(struct snd_soc_component *comp, void *elem, void *object, u32 offset) { struct snd_soc_tplg_vendor_value_elem *tuple = elem; bool *val = (bool *)((u8 *)object + offset); *val = le32_to_cpu(tuple->value); return 0; } static int avs_parse_byte_token(struct snd_soc_component *comp, void *elem, void *object, u32 offset) { struct snd_soc_tplg_vendor_value_elem *tuple = elem; u8 *val = ((u8 *)object + offset); *val = le32_to_cpu(tuple->value); return 0; } static int avs_parse_short_token(struct snd_soc_component *comp, void *elem, void *object, u32 offset) { struct snd_soc_tplg_vendor_value_elem *tuple = elem; u16 *val = (u16 *)((u8 *)object + offset); *val = le32_to_cpu(tuple->value); return 0; } static int avs_parse_word_token(struct snd_soc_component *comp, void *elem, void *object, u32 offset) { struct snd_soc_tplg_vendor_value_elem *tuple = elem; u32 *val = (u32 *)((u8 *)object + offset); *val = le32_to_cpu(tuple->value); return 0; } static int avs_parse_string_token(struct snd_soc_component *comp, void *elem, void *object, u32 offset) { struct snd_soc_tplg_vendor_string_elem *tuple = elem; char *val = (char *)((u8 *)object + offset); snprintf(val, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s", tuple->string); return 0; } static int avs_parse_uuid_tokens(struct snd_soc_component *comp, void *object, const struct avs_tplg_token_parser *parsers, int count, struct snd_soc_tplg_vendor_array *tuples) { struct snd_soc_tplg_vendor_uuid_elem *tuple; int ret, i, j; /* Parse element by element. */ for (i = 0; i < le32_to_cpu(tuples->num_elems); i++) { tuple = &tuples->uuid[i]; for (j = 0; j < count; j++) { /* Ignore non-UUID tokens. */ if (parsers[j].type != SND_SOC_TPLG_TUPLE_TYPE_UUID || parsers[j].token != le32_to_cpu(tuple->token)) continue; ret = parsers[j].parse(comp, tuple, object, parsers[j].offset); if (ret) return ret; } } return 0; } static int avs_parse_string_tokens(struct snd_soc_component *comp, void *object, const struct avs_tplg_token_parser *parsers, int count, struct snd_soc_tplg_vendor_array *tuples) { struct snd_soc_tplg_vendor_string_elem *tuple; int ret, i, j; /* Parse element by element. */ for (i = 0; i < le32_to_cpu(tuples->num_elems); i++) { tuple = &tuples->string[i]; for (j = 0; j < count; j++) { /* Ignore non-string tokens. */ if (parsers[j].type != SND_SOC_TPLG_TUPLE_TYPE_STRING || parsers[j].token != le32_to_cpu(tuple->token)) continue; ret = parsers[j].parse(comp, tuple, object, parsers[j].offset); if (ret) return ret; } } return 0; } static int avs_parse_word_tokens(struct snd_soc_component *comp, void *object, const struct avs_tplg_token_parser *parsers, int count, struct snd_soc_tplg_vendor_array *tuples) { struct snd_soc_tplg_vendor_value_elem *tuple; int ret, i, j; /* Parse element by element. */ for (i = 0; i < le32_to_cpu(tuples->num_elems); i++) { tuple = &tuples->value[i]; for (j = 0; j < count; j++) { /* Ignore non-integer tokens. */ if (!(parsers[j].type == SND_SOC_TPLG_TUPLE_TYPE_WORD || parsers[j].type == SND_SOC_TPLG_TUPLE_TYPE_SHORT || parsers[j].type == SND_SOC_TPLG_TUPLE_TYPE_BYTE || parsers[j].type == SND_SOC_TPLG_TUPLE_TYPE_BOOL)) continue; if (parsers[j].token != le32_to_cpu(tuple->token)) continue; ret = parsers[j].parse(comp, tuple, object, parsers[j].offset); if (ret) return ret; } } return 0; } static int avs_parse_tokens(struct snd_soc_component *comp, void *object, const struct avs_tplg_token_parser *parsers, size_t count, struct snd_soc_tplg_vendor_array *tuples, int priv_size) { int array_size, ret; while (priv_size > 0) { array_size = le32_to_cpu(tuples->size); if (array_size <= 0) { dev_err(comp->dev, "invalid array size 0x%x\n", array_size); return -EINVAL; } /* Make sure there is enough data before parsing. */ priv_size -= array_size; if (priv_size < 0) { dev_err(comp->dev, "invalid array size 0x%x\n", array_size); return -EINVAL; } switch (le32_to_cpu(tuples->type)) { case SND_SOC_TPLG_TUPLE_TYPE_UUID: ret = avs_parse_uuid_tokens(comp, object, parsers, count, tuples); break; case SND_SOC_TPLG_TUPLE_TYPE_STRING: ret = avs_parse_string_tokens(comp, object, parsers, count, tuples); break; case SND_SOC_TPLG_TUPLE_TYPE_BOOL: case SND_SOC_TPLG_TUPLE_TYPE_BYTE: case SND_SOC_TPLG_TUPLE_TYPE_SHORT: case SND_SOC_TPLG_TUPLE_TYPE_WORD: ret = avs_parse_word_tokens(comp, object, parsers, count, tuples); break; default: dev_err(comp->dev, "unknown token type %d\n", tuples->type); ret = -EINVAL; } if (ret) { dev_err(comp->dev, "parsing %zu tokens of %d type failed: %d\n", count, tuples->type, ret); return ret; } tuples = avs_tplg_vendor_array_next(tuples); } return 0; } #define AVS_DEFINE_PTR_PARSER(name, type, member) \ static int \ avs_parse_##name##_ptr(struct snd_soc_component *comp, void *elem, void *object, u32 offset) \ { \ struct snd_soc_tplg_vendor_value_elem *tuple = elem; \ struct avs_soc_component *acomp = to_avs_soc_component(comp); \ type **val = (type **)(object + offset); \ u32 idx; \ \ idx = le32_to_cpu(tuple->value); \ if (idx >= acomp->tplg->num_##member) \ return -EINVAL; \ \ *val = &acomp->tplg->member[idx]; \ \ return 0; \ } AVS_DEFINE_PTR_PARSER(audio_format, struct avs_audio_format, fmts); AVS_DEFINE_PTR_PARSER(modcfg_base, struct avs_tplg_modcfg_base, modcfgs_base); AVS_DEFINE_PTR_PARSER(modcfg_ext, struct avs_tplg_modcfg_ext, modcfgs_ext); AVS_DEFINE_PTR_PARSER(pplcfg, struct avs_tplg_pplcfg, pplcfgs); AVS_DEFINE_PTR_PARSER(binding, struct avs_tplg_binding, bindings); static int parse_audio_format_bitfield(struct snd_soc_component *comp, void *elem, void *object, u32 offset) { struct snd_soc_tplg_vendor_value_elem *velem = elem; struct avs_audio_format *audio_format = object; switch (offset) { case AVS_TKN_AFMT_NUM_CHANNELS_U32: audio_format->num_channels = le32_to_cpu(velem->value); break; case AVS_TKN_AFMT_VALID_BIT_DEPTH_U32: audio_format->valid_bit_depth = le32_to_cpu(velem->value); break; case AVS_TKN_AFMT_SAMPLE_TYPE_U32: audio_format->sample_type = le32_to_cpu(velem->value); break; } return 0; } static int parse_link_formatted_string(struct snd_soc_component *comp, void *elem, void *object, u32 offset) { struct snd_soc_tplg_vendor_string_elem *tuple = elem; struct snd_soc_acpi_mach *mach = dev_get_platdata(comp->card->dev); char *val = (char *)((u8 *)object + offset); /* * Dynamic naming - string formats, e.g.: ssp%d - supported only for * topologies describing single device e.g.: an I2S codec on SSP0. */ if (hweight_long(mach->mach_params.i2s_link_mask) != 1) return avs_parse_string_token(comp, elem, object, offset); snprintf(val, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, tuple->string, __ffs(mach->mach_params.i2s_link_mask)); return 0; } static int parse_dictionary_header(struct snd_soc_component *comp, struct snd_soc_tplg_vendor_array *tuples, void **dict, u32 *num_entries, size_t entry_size, u32 num_entries_token) { struct snd_soc_tplg_vendor_value_elem *tuple; /* Dictionary header consists of single tuple - entry count. */ tuple = tuples->value; if (le32_to_cpu(tuple->token) != num_entries_token) { dev_err(comp->dev, "invalid dictionary header, expected: %d\n", num_entries_token); return -EINVAL; } *num_entries = le32_to_cpu(tuple->value); *dict = devm_kcalloc(comp->card->dev, *num_entries, entry_size, GFP_KERNEL); if (!*dict) return -ENOMEM; return 0; } static int parse_dictionary_entries(struct snd_soc_component *comp, struct snd_soc_tplg_vendor_array *tuples, u32 block_size, void *dict, u32 num_entries, size_t entry_size, u32 entry_id_token, const struct avs_tplg_token_parser *parsers, size_t num_parsers) { void *pos = dict; int i; for (i = 0; i < num_entries; i++) { u32 esize; int ret; ret = avs_tplg_vendor_entry_size(tuples, block_size, entry_id_token, &esize); if (ret) return ret; ret = avs_parse_tokens(comp, pos, parsers, num_parsers, tuples, esize); if (ret < 0) { dev_err(comp->dev, "parse entry: %d of type: %d failed: %d\n", i, entry_id_token, ret); return ret; } pos += entry_size; block_size -= esize; tuples = avs_tplg_vendor_array_at(tuples, esize); } return 0; } static int parse_dictionary(struct snd_soc_component *comp, struct snd_soc_tplg_vendor_array *tuples, u32 block_size, void **dict, u32 *num_entries, size_t entry_size, u32 num_entries_token, u32 entry_id_token, const struct avs_tplg_token_parser *parsers, size_t num_parsers) { int ret; ret = parse_dictionary_header(comp, tuples, dict, num_entries, entry_size, num_entries_token); if (ret) return ret; block_size -= le32_to_cpu(tuples->size); /* With header parsed, move on to parsing entries. */ tuples = avs_tplg_vendor_array_next(tuples); return parse_dictionary_entries(comp, tuples, block_size, *dict, *num_entries, entry_size, entry_id_token, parsers, num_parsers); } static const struct avs_tplg_token_parser library_parsers[] = { { .token = AVS_TKN_LIBRARY_NAME_STRING, .type = SND_SOC_TPLG_TUPLE_TYPE_STRING, .offset = offsetof(struct avs_tplg_library, name), .parse = avs_parse_string_token, }, }; static int avs_tplg_parse_libraries(struct snd_soc_component *comp, struct snd_soc_tplg_vendor_array *tuples, u32 block_size) { struct avs_soc_component *acomp = to_avs_soc_component(comp); struct avs_tplg *tplg = acomp->tplg; return parse_dictionary(comp, tuples, block_size, (void **)&tplg->libs, &tplg->num_libs, sizeof(*tplg->libs), AVS_TKN_MANIFEST_NUM_LIBRARIES_U32, AVS_TKN_LIBRARY_ID_U32, library_parsers, ARRAY_SIZE(library_parsers)); } static const struct avs_tplg_token_parser audio_format_parsers[] = { { .token = AVS_TKN_AFMT_SAMPLE_RATE_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_audio_format, sampling_freq), .parse = avs_parse_word_token, }, { .token = AVS_TKN_AFMT_BIT_DEPTH_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_audio_format, bit_depth), .parse = avs_parse_word_token, }, { .token = AVS_TKN_AFMT_CHANNEL_MAP_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_audio_format, channel_map), .parse = avs_parse_word_token, }, { .token = AVS_TKN_AFMT_CHANNEL_CFG_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_audio_format, channel_config), .parse = avs_parse_word_token, }, { .token = AVS_TKN_AFMT_INTERLEAVING_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_audio_format, interleaving), .parse = avs_parse_word_token, }, { .token = AVS_TKN_AFMT_NUM_CHANNELS_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = AVS_TKN_AFMT_NUM_CHANNELS_U32, .parse = parse_audio_format_bitfield, }, { .token = AVS_TKN_AFMT_VALID_BIT_DEPTH_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = AVS_TKN_AFMT_VALID_BIT_DEPTH_U32, .parse = parse_audio_format_bitfield, }, { .token = AVS_TKN_AFMT_SAMPLE_TYPE_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = AVS_TKN_AFMT_SAMPLE_TYPE_U32, .parse = parse_audio_format_bitfield, }, }; static int avs_tplg_parse_audio_formats(struct snd_soc_component *comp, struct snd_soc_tplg_vendor_array *tuples, u32 block_size) { struct avs_soc_component *acomp = to_avs_soc_component(comp); struct avs_tplg *tplg = acomp->tplg; return parse_dictionary(comp, tuples, block_size, (void **)&tplg->fmts, &tplg->num_fmts, sizeof(*tplg->fmts), AVS_TKN_MANIFEST_NUM_AFMTS_U32, AVS_TKN_AFMT_ID_U32, audio_format_parsers, ARRAY_SIZE(audio_format_parsers)); } static const struct avs_tplg_token_parser modcfg_base_parsers[] = { { .token = AVS_TKN_MODCFG_BASE_CPC_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_base, cpc), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_BASE_IBS_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_base, ibs), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_BASE_OBS_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_base, obs), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_BASE_PAGES_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_base, is_pages), .parse = avs_parse_word_token, }, }; static int avs_tplg_parse_modcfgs_base(struct snd_soc_component *comp, struct snd_soc_tplg_vendor_array *tuples, u32 block_size) { struct avs_soc_component *acomp = to_avs_soc_component(comp); struct avs_tplg *tplg = acomp->tplg; return parse_dictionary(comp, tuples, block_size, (void **)&tplg->modcfgs_base, &tplg->num_modcfgs_base, sizeof(*tplg->modcfgs_base), AVS_TKN_MANIFEST_NUM_MODCFGS_BASE_U32, AVS_TKN_MODCFG_BASE_ID_U32, modcfg_base_parsers, ARRAY_SIZE(modcfg_base_parsers)); } static const struct avs_tplg_token_parser modcfg_ext_parsers[] = { { .token = AVS_TKN_MODCFG_EXT_TYPE_UUID, .type = SND_SOC_TPLG_TUPLE_TYPE_UUID, .offset = offsetof(struct avs_tplg_modcfg_ext, type), .parse = avs_parse_uuid_token, }, { .token = AVS_TKN_MODCFG_CPR_OUT_AFMT_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, copier.out_fmt), .parse = avs_parse_audio_format_ptr, }, { .token = AVS_TKN_MODCFG_CPR_FEATURE_MASK_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, copier.feature_mask), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_CPR_VINDEX_U8, .type = SND_SOC_TPLG_TUPLE_TYPE_BYTE, .offset = offsetof(struct avs_tplg_modcfg_ext, copier.vindex), .parse = avs_parse_byte_token, }, { .token = AVS_TKN_MODCFG_CPR_DMA_TYPE_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, copier.dma_type), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_CPR_DMABUFF_SIZE_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, copier.dma_buffer_size), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_CPR_BLOB_FMT_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, copier.blob_fmt), .parse = avs_parse_audio_format_ptr, }, { .token = AVS_TKN_MODCFG_MICSEL_OUT_AFMT_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, micsel.out_fmt), .parse = avs_parse_audio_format_ptr, }, { .token = AVS_TKN_MODCFG_INTELWOV_CPC_LP_MODE_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, wov.cpc_lp_mode), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_SRC_OUT_FREQ_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, src.out_freq), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_MUX_REF_AFMT_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, mux.ref_fmt), .parse = avs_parse_audio_format_ptr, }, { .token = AVS_TKN_MODCFG_MUX_OUT_AFMT_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, mux.out_fmt), .parse = avs_parse_audio_format_ptr, }, { .token = AVS_TKN_MODCFG_AEC_REF_AFMT_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, aec.ref_fmt), .parse = avs_parse_audio_format_ptr, }, { .token = AVS_TKN_MODCFG_AEC_OUT_AFMT_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, aec.out_fmt), .parse = avs_parse_audio_format_ptr, }, { .token = AVS_TKN_MODCFG_AEC_CPC_LP_MODE_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, aec.cpc_lp_mode), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_ASRC_OUT_FREQ_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, asrc.out_freq), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_ASRC_MODE_U8, .type = SND_SOC_TPLG_TUPLE_TYPE_BYTE, .offset = offsetof(struct avs_tplg_modcfg_ext, asrc.mode), .parse = avs_parse_byte_token, }, { .token = AVS_TKN_MODCFG_ASRC_DISABLE_JITTER_U8, .type = SND_SOC_TPLG_TUPLE_TYPE_BYTE, .offset = offsetof(struct avs_tplg_modcfg_ext, asrc.disable_jitter_buffer), .parse = avs_parse_byte_token, }, { .token = AVS_TKN_MODCFG_UPDOWN_MIX_OUT_CHAN_CFG_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.out_channel_config), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_SELECT_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.coefficients_select), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_0_S32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.coefficients[0]), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_1_S32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.coefficients[1]), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_2_S32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.coefficients[2]), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_3_S32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.coefficients[3]), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_4_S32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.coefficients[4]), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_5_S32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.coefficients[5]), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_6_S32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.coefficients[6]), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_UPDOWN_MIX_COEFF_7_S32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.coefficients[7]), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_UPDOWN_MIX_CHAN_MAP_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_modcfg_ext, updown_mix.channel_map), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MODCFG_EXT_NUM_INPUT_PINS_U16, .type = SND_SOC_TPLG_TUPLE_TYPE_SHORT, .offset = offsetof(struct avs_tplg_modcfg_ext, generic.num_input_pins), .parse = avs_parse_short_token, }, { .token = AVS_TKN_MODCFG_EXT_NUM_OUTPUT_PINS_U16, .type = SND_SOC_TPLG_TUPLE_TYPE_SHORT, .offset = offsetof(struct avs_tplg_modcfg_ext, generic.num_output_pins), .parse = avs_parse_short_token, }, }; static const struct avs_tplg_token_parser pin_format_parsers[] = { { .token = AVS_TKN_PIN_FMT_INDEX_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_pin_format, pin_index), .parse = avs_parse_word_token, }, { .token = AVS_TKN_PIN_FMT_IOBS_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_pin_format, iobs), .parse = avs_parse_word_token, }, { .token = AVS_TKN_PIN_FMT_AFMT_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_pin_format, fmt), .parse = avs_parse_audio_format_ptr, }, }; static void assign_copier_gtw_instance(struct snd_soc_component *comp, struct avs_tplg_modcfg_ext *cfg) { struct snd_soc_acpi_mach *mach; if (!guid_equal(&cfg->type, &AVS_COPIER_MOD_UUID)) return; /* Only I2S boards assign port instance in ->i2s_link_mask. */ switch (cfg->copier.dma_type) { case AVS_DMA_I2S_LINK_OUTPUT: case AVS_DMA_I2S_LINK_INPUT: break; default: return; } mach = dev_get_platdata(comp->card->dev); /* Automatic assignment only when board describes single SSP. */ if (hweight_long(mach->mach_params.i2s_link_mask) == 1 && !cfg->copier.vindex.i2s.instance) cfg->copier.vindex.i2s.instance = __ffs(mach->mach_params.i2s_link_mask); } static int avs_tplg_parse_modcfg_ext(struct snd_soc_component *comp, struct avs_tplg_modcfg_ext *cfg, struct snd_soc_tplg_vendor_array *tuples, u32 block_size) { u32 esize; int ret; /* See where pin block starts. */ ret = avs_tplg_vendor_entry_size(tuples, block_size, AVS_TKN_PIN_FMT_INDEX_U32, &esize); if (ret) return ret; ret = avs_parse_tokens(comp, cfg, modcfg_ext_parsers, ARRAY_SIZE(modcfg_ext_parsers), tuples, esize); if (ret) return ret; /* Update copier gateway based on board's i2s_link_mask. */ assign_copier_gtw_instance(comp, cfg); block_size -= esize; /* Parse trailing in/out pin formats if any. */ if (block_size) { struct avs_tplg_pin_format *pins; u32 num_pins; num_pins = cfg->generic.num_input_pins + cfg->generic.num_output_pins; if (!num_pins) return -EINVAL; pins = devm_kcalloc(comp->card->dev, num_pins, sizeof(*pins), GFP_KERNEL); if (!pins) return -ENOMEM; tuples = avs_tplg_vendor_array_at(tuples, esize); ret = parse_dictionary_entries(comp, tuples, block_size, pins, num_pins, sizeof(*pins), AVS_TKN_PIN_FMT_INDEX_U32, pin_format_parsers, ARRAY_SIZE(pin_format_parsers)); if (ret) return ret; cfg->generic.pin_fmts = pins; } return 0; } static int avs_tplg_parse_modcfgs_ext(struct snd_soc_component *comp, struct snd_soc_tplg_vendor_array *tuples, u32 block_size) { struct avs_soc_component *acomp = to_avs_soc_component(comp); struct avs_tplg *tplg = acomp->tplg; int ret, i; ret = parse_dictionary_header(comp, tuples, (void **)&tplg->modcfgs_ext, &tplg->num_modcfgs_ext, sizeof(*tplg->modcfgs_ext), AVS_TKN_MANIFEST_NUM_MODCFGS_EXT_U32); if (ret) return ret; block_size -= le32_to_cpu(tuples->size); /* With header parsed, move on to parsing entries. */ tuples = avs_tplg_vendor_array_next(tuples); for (i = 0; i < tplg->num_modcfgs_ext; i++) { struct avs_tplg_modcfg_ext *cfg = &tplg->modcfgs_ext[i]; u32 esize; ret = avs_tplg_vendor_entry_size(tuples, block_size, AVS_TKN_MODCFG_EXT_ID_U32, &esize); if (ret) return ret; ret = avs_tplg_parse_modcfg_ext(comp, cfg, tuples, esize); if (ret) return ret; block_size -= esize; tuples = avs_tplg_vendor_array_at(tuples, esize); } return 0; } static const struct avs_tplg_token_parser pplcfg_parsers[] = { { .token = AVS_TKN_PPLCFG_REQ_SIZE_U16, .type = SND_SOC_TPLG_TUPLE_TYPE_SHORT, .offset = offsetof(struct avs_tplg_pplcfg, req_size), .parse = avs_parse_short_token, }, { .token = AVS_TKN_PPLCFG_PRIORITY_U8, .type = SND_SOC_TPLG_TUPLE_TYPE_BYTE, .offset = offsetof(struct avs_tplg_pplcfg, priority), .parse = avs_parse_byte_token, }, { .token = AVS_TKN_PPLCFG_LOW_POWER_BOOL, .type = SND_SOC_TPLG_TUPLE_TYPE_BOOL, .offset = offsetof(struct avs_tplg_pplcfg, lp), .parse = avs_parse_bool_token, }, { .token = AVS_TKN_PPLCFG_ATTRIBUTES_U16, .type = SND_SOC_TPLG_TUPLE_TYPE_SHORT, .offset = offsetof(struct avs_tplg_pplcfg, attributes), .parse = avs_parse_short_token, }, { .token = AVS_TKN_PPLCFG_TRIGGER_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_pplcfg, trigger), .parse = avs_parse_word_token, }, }; static int avs_tplg_parse_pplcfgs(struct snd_soc_component *comp, struct snd_soc_tplg_vendor_array *tuples, u32 block_size) { struct avs_soc_component *acomp = to_avs_soc_component(comp); struct avs_tplg *tplg = acomp->tplg; return parse_dictionary(comp, tuples, block_size, (void **)&tplg->pplcfgs, &tplg->num_pplcfgs, sizeof(*tplg->pplcfgs), AVS_TKN_MANIFEST_NUM_PPLCFGS_U32, AVS_TKN_PPLCFG_ID_U32, pplcfg_parsers, ARRAY_SIZE(pplcfg_parsers)); } static const struct avs_tplg_token_parser binding_parsers[] = { { .token = AVS_TKN_BINDING_TARGET_TPLG_NAME_STRING, .type = SND_SOC_TPLG_TUPLE_TYPE_STRING, .offset = offsetof(struct avs_tplg_binding, target_tplg_name), .parse = parse_link_formatted_string, }, { .token = AVS_TKN_BINDING_TARGET_PATH_TMPL_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_binding, target_path_tmpl_id), .parse = avs_parse_word_token, }, { .token = AVS_TKN_BINDING_TARGET_PPL_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_binding, target_ppl_id), .parse = avs_parse_word_token, }, { .token = AVS_TKN_BINDING_TARGET_MOD_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_binding, target_mod_id), .parse = avs_parse_word_token, }, { .token = AVS_TKN_BINDING_TARGET_MOD_PIN_U8, .type = SND_SOC_TPLG_TUPLE_TYPE_BYTE, .offset = offsetof(struct avs_tplg_binding, target_mod_pin), .parse = avs_parse_byte_token, }, { .token = AVS_TKN_BINDING_MOD_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_binding, mod_id), .parse = avs_parse_word_token, }, { .token = AVS_TKN_BINDING_MOD_PIN_U8, .type = SND_SOC_TPLG_TUPLE_TYPE_BYTE, .offset = offsetof(struct avs_tplg_binding, mod_pin), .parse = avs_parse_byte_token, }, { .token = AVS_TKN_BINDING_IS_SINK_U8, .type = SND_SOC_TPLG_TUPLE_TYPE_BYTE, .offset = offsetof(struct avs_tplg_binding, is_sink), .parse = avs_parse_byte_token, }, }; static int avs_tplg_parse_bindings(struct snd_soc_component *comp, struct snd_soc_tplg_vendor_array *tuples, u32 block_size) { struct avs_soc_component *acomp = to_avs_soc_component(comp); struct avs_tplg *tplg = acomp->tplg; return parse_dictionary(comp, tuples, block_size, (void **)&tplg->bindings, &tplg->num_bindings, sizeof(*tplg->bindings), AVS_TKN_MANIFEST_NUM_BINDINGS_U32, AVS_TKN_BINDING_ID_U32, binding_parsers, ARRAY_SIZE(binding_parsers)); } static const struct avs_tplg_token_parser module_parsers[] = { { .token = AVS_TKN_MOD_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_module, id), .parse = avs_parse_word_token, }, { .token = AVS_TKN_MOD_MODCFG_BASE_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_module, cfg_base), .parse = avs_parse_modcfg_base_ptr, }, { .token = AVS_TKN_MOD_IN_AFMT_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_module, in_fmt), .parse = avs_parse_audio_format_ptr, }, { .token = AVS_TKN_MOD_CORE_ID_U8, .type = SND_SOC_TPLG_TUPLE_TYPE_BYTE, .offset = offsetof(struct avs_tplg_module, core_id), .parse = avs_parse_byte_token, }, { .token = AVS_TKN_MOD_PROC_DOMAIN_U8, .type = SND_SOC_TPLG_TUPLE_TYPE_BYTE, .offset = offsetof(struct avs_tplg_module, domain), .parse = avs_parse_byte_token, }, { .token = AVS_TKN_MOD_MODCFG_EXT_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_module, cfg_ext), .parse = avs_parse_modcfg_ext_ptr, }, { .token = AVS_TKN_MOD_KCONTROL_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_module, ctl_id), .parse = avs_parse_byte_token, }, }; static struct avs_tplg_module * avs_tplg_module_create(struct snd_soc_component *comp, struct avs_tplg_pipeline *owner, struct snd_soc_tplg_vendor_array *tuples, u32 block_size) { struct avs_tplg_module *module; int ret; module = devm_kzalloc(comp->card->dev, sizeof(*module), GFP_KERNEL); if (!module) return ERR_PTR(-ENOMEM); ret = avs_parse_tokens(comp, module, module_parsers, ARRAY_SIZE(module_parsers), tuples, block_size); if (ret < 0) return ERR_PTR(ret); module->owner = owner; INIT_LIST_HEAD(&module->node); return module; } static const struct avs_tplg_token_parser pipeline_parsers[] = { { .token = AVS_TKN_PPL_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_pipeline, id), .parse = avs_parse_word_token, }, { .token = AVS_TKN_PPL_PPLCFG_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_pipeline, cfg), .parse = avs_parse_pplcfg_ptr, }, { .token = AVS_TKN_PPL_NUM_BINDING_IDS_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_pipeline, num_bindings), .parse = avs_parse_word_token, }, }; static const struct avs_tplg_token_parser bindings_parsers[] = { { .token = AVS_TKN_PPL_BINDING_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = 0, /* to treat pipeline->bindings as dictionary */ .parse = avs_parse_binding_ptr, }, }; static struct avs_tplg_pipeline * avs_tplg_pipeline_create(struct snd_soc_component *comp, struct avs_tplg_path *owner, struct snd_soc_tplg_vendor_array *tuples, u32 block_size) { struct avs_tplg_pipeline *pipeline; u32 modblk_size, offset; int ret; pipeline = devm_kzalloc(comp->card->dev, sizeof(*pipeline), GFP_KERNEL); if (!pipeline) return ERR_PTR(-ENOMEM); pipeline->owner = owner; INIT_LIST_HEAD(&pipeline->mod_list); /* Pipeline header MUST be followed by at least one module. */ ret = avs_tplg_vendor_array_lookup(tuples, block_size, AVS_TKN_MOD_ID_U32, &offset); if (!ret && !offset) ret = -EINVAL; if (ret) return ERR_PTR(ret); /* Process header which precedes module sections. */ ret = avs_parse_tokens(comp, pipeline, pipeline_parsers, ARRAY_SIZE(pipeline_parsers), tuples, offset); if (ret < 0) return ERR_PTR(ret); block_size -= offset; tuples = avs_tplg_vendor_array_at(tuples, offset); /* Optionally, binding sections follow module ones. */ ret = avs_tplg_vendor_array_lookup_next(tuples, block_size, AVS_TKN_PPL_BINDING_ID_U32, &offset); if (ret) { if (ret != -ENOENT) return ERR_PTR(ret); /* Does header information match actual block layout? */ if (pipeline->num_bindings) return ERR_PTR(-EINVAL); modblk_size = block_size; } else { pipeline->bindings = devm_kcalloc(comp->card->dev, pipeline->num_bindings, sizeof(*pipeline->bindings), GFP_KERNEL); if (!pipeline->bindings) return ERR_PTR(-ENOMEM); modblk_size = offset; } block_size -= modblk_size; do { struct avs_tplg_module *module; u32 esize; ret = avs_tplg_vendor_entry_size(tuples, modblk_size, AVS_TKN_MOD_ID_U32, &esize); if (ret) return ERR_PTR(ret); module = avs_tplg_module_create(comp, pipeline, tuples, esize); if (IS_ERR(module)) { dev_err(comp->dev, "parse module failed: %ld\n", PTR_ERR(module)); return ERR_CAST(module); } list_add_tail(&module->node, &pipeline->mod_list); modblk_size -= esize; tuples = avs_tplg_vendor_array_at(tuples, esize); } while (modblk_size > 0); /* What's left is optional range of bindings. */ ret = parse_dictionary_entries(comp, tuples, block_size, pipeline->bindings, pipeline->num_bindings, sizeof(*pipeline->bindings), AVS_TKN_PPL_BINDING_ID_U32, bindings_parsers, ARRAY_SIZE(bindings_parsers)); if (ret) return ERR_PTR(ret); return pipeline; } static const struct avs_tplg_token_parser path_parsers[] = { { .token = AVS_TKN_PATH_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_path, id), .parse = avs_parse_word_token, }, { .token = AVS_TKN_PATH_FE_FMT_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_path, fe_fmt), .parse = avs_parse_audio_format_ptr, }, { .token = AVS_TKN_PATH_BE_FMT_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_path, be_fmt), .parse = avs_parse_audio_format_ptr, }, }; static struct avs_tplg_path * avs_tplg_path_create(struct snd_soc_component *comp, struct avs_tplg_path_template *owner, struct snd_soc_tplg_vendor_array *tuples, u32 block_size, const struct avs_tplg_token_parser *parsers, u32 num_parsers) { struct avs_tplg_pipeline *pipeline; struct avs_tplg_path *path; u32 offset; int ret; path = devm_kzalloc(comp->card->dev, sizeof(*path), GFP_KERNEL); if (!path) return ERR_PTR(-ENOMEM); path->owner = owner; INIT_LIST_HEAD(&path->ppl_list); INIT_LIST_HEAD(&path->node); /* Path header MAY be followed by one or more pipelines. */ ret = avs_tplg_vendor_array_lookup(tuples, block_size, AVS_TKN_PPL_ID_U32, &offset); if (ret == -ENOENT) offset = block_size; else if (ret) return ERR_PTR(ret); else if (!offset) return ERR_PTR(-EINVAL); /* Process header which precedes pipeline sections. */ ret = avs_parse_tokens(comp, path, parsers, num_parsers, tuples, offset); if (ret < 0) return ERR_PTR(ret); block_size -= offset; tuples = avs_tplg_vendor_array_at(tuples, offset); while (block_size > 0) { u32 esize; ret = avs_tplg_vendor_entry_size(tuples, block_size, AVS_TKN_PPL_ID_U32, &esize); if (ret) return ERR_PTR(ret); pipeline = avs_tplg_pipeline_create(comp, path, tuples, esize); if (IS_ERR(pipeline)) { dev_err(comp->dev, "parse pipeline failed: %ld\n", PTR_ERR(pipeline)); return ERR_CAST(pipeline); } list_add_tail(&pipeline->node, &path->ppl_list); block_size -= esize; tuples = avs_tplg_vendor_array_at(tuples, esize); } return path; } static const struct avs_tplg_token_parser path_tmpl_parsers[] = { { .token = AVS_TKN_PATH_TMPL_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg_path_template, id), .parse = avs_parse_word_token, }, }; static int parse_path_template(struct snd_soc_component *comp, struct snd_soc_tplg_vendor_array *tuples, u32 block_size, struct avs_tplg_path_template *template, const struct avs_tplg_token_parser *tmpl_tokens, u32 num_tmpl_tokens, const struct avs_tplg_token_parser *path_tokens, u32 num_path_tokens) { struct avs_tplg_path *path; u32 offset; int ret; /* Path template header MUST be followed by at least one path variant. */ ret = avs_tplg_vendor_array_lookup(tuples, block_size, AVS_TKN_PATH_ID_U32, &offset); if (ret) return ret; /* Process header which precedes path variants sections. */ ret = avs_parse_tokens(comp, template, tmpl_tokens, num_tmpl_tokens, tuples, offset); if (ret < 0) return ret; block_size -= offset; tuples = avs_tplg_vendor_array_at(tuples, offset); do { u32 esize; ret = avs_tplg_vendor_entry_size(tuples, block_size, AVS_TKN_PATH_ID_U32, &esize); if (ret) return ret; path = avs_tplg_path_create(comp, template, tuples, esize, path_tokens, num_path_tokens); if (IS_ERR(path)) { dev_err(comp->dev, "parse path failed: %ld\n", PTR_ERR(path)); return PTR_ERR(path); } list_add_tail(&path->node, &template->path_list); block_size -= esize; tuples = avs_tplg_vendor_array_at(tuples, esize); } while (block_size > 0); return 0; } static struct avs_tplg_path_template * avs_tplg_path_template_create(struct snd_soc_component *comp, struct avs_tplg *owner, struct snd_soc_tplg_vendor_array *tuples, u32 block_size) { struct avs_tplg_path_template *template; int ret; template = devm_kzalloc(comp->card->dev, sizeof(*template), GFP_KERNEL); if (!template) return ERR_PTR(-ENOMEM); template->owner = owner; /* Used to access component tplg is assigned to. */ INIT_LIST_HEAD(&template->path_list); INIT_LIST_HEAD(&template->node); ret = parse_path_template(comp, tuples, block_size, template, path_tmpl_parsers, ARRAY_SIZE(path_tmpl_parsers), path_parsers, ARRAY_SIZE(path_parsers)); if (ret) return ERR_PTR(ret); return template; } static int avs_route_load(struct snd_soc_component *comp, int index, struct snd_soc_dapm_route *route) { struct snd_soc_acpi_mach *mach = dev_get_platdata(comp->card->dev); size_t len = SNDRV_CTL_ELEM_ID_NAME_MAXLEN; char buf[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; u32 port; /* See parse_link_formatted_string() for dynamic naming when(s). */ if (hweight_long(mach->mach_params.i2s_link_mask) == 1) { port = __ffs(mach->mach_params.i2s_link_mask); snprintf(buf, len, route->source, port); strscpy((char *)route->source, buf, len); snprintf(buf, len, route->sink, port); strscpy((char *)route->sink, buf, len); if (route->control) { snprintf(buf, len, route->control, port); strscpy((char *)route->control, buf, len); } } return 0; } static int avs_widget_load(struct snd_soc_component *comp, int index, struct snd_soc_dapm_widget *w, struct snd_soc_tplg_dapm_widget *dw) { struct snd_soc_acpi_mach *mach; struct avs_tplg_path_template *template; struct avs_soc_component *acomp = to_avs_soc_component(comp); struct avs_tplg *tplg; if (!le32_to_cpu(dw->priv.size)) return 0; if (w->ignore_suspend && !AVS_S0IX_SUPPORTED) { dev_info_once(comp->dev, "Device does not support S0IX, check BIOS settings\n"); w->ignore_suspend = false; } tplg = acomp->tplg; mach = dev_get_platdata(comp->card->dev); /* See parse_link_formatted_string() for dynamic naming when(s). */ if (hweight_long(mach->mach_params.i2s_link_mask) == 1) { kfree(w->name); /* w->name is freed later by soc_tplg_dapm_widget_create() */ w->name = kasprintf(GFP_KERNEL, dw->name, __ffs(mach->mach_params.i2s_link_mask)); if (!w->name) return -ENOMEM; } template = avs_tplg_path_template_create(comp, tplg, dw->priv.array, le32_to_cpu(dw->priv.size)); if (IS_ERR(template)) { dev_err(comp->dev, "widget %s load failed: %ld\n", dw->name, PTR_ERR(template)); return PTR_ERR(template); } w->priv = template; /* link path information to widget */ list_add_tail(&template->node, &tplg->path_tmpl_list); return 0; } static int avs_widget_ready(struct snd_soc_component *comp, int index, struct snd_soc_dapm_widget *w, struct snd_soc_tplg_dapm_widget *dw) { struct avs_tplg_path_template *template = w->priv; template->w = w; return 0; } static int avs_dai_load(struct snd_soc_component *comp, int index, struct snd_soc_dai_driver *dai_drv, struct snd_soc_tplg_pcm *pcm, struct snd_soc_dai *dai) { if (pcm) dai_drv->ops = &avs_dai_fe_ops; return 0; } static int avs_link_load(struct snd_soc_component *comp, int index, struct snd_soc_dai_link *link, struct snd_soc_tplg_link_config *cfg) { if (link->ignore_suspend && !AVS_S0IX_SUPPORTED) { dev_info_once(comp->dev, "Device does not support S0IX, check BIOS settings\n"); link->ignore_suspend = false; } if (!link->no_pcm) { /* Stream control handled by IPCs. */ link->nonatomic = true; /* Open LINK (BE) pipes last and close them first to prevent xruns. */ link->trigger[0] = SND_SOC_DPCM_TRIGGER_PRE; link->trigger[1] = SND_SOC_DPCM_TRIGGER_PRE; } return 0; } static const struct avs_tplg_token_parser manifest_parsers[] = { { .token = AVS_TKN_MANIFEST_NAME_STRING, .type = SND_SOC_TPLG_TUPLE_TYPE_STRING, .offset = offsetof(struct avs_tplg, name), .parse = parse_link_formatted_string, }, { .token = AVS_TKN_MANIFEST_VERSION_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_tplg, version), .parse = avs_parse_word_token, }, }; static int avs_manifest(struct snd_soc_component *comp, int index, struct snd_soc_tplg_manifest *manifest) { struct snd_soc_tplg_vendor_array *tuples = manifest->priv.array; struct avs_soc_component *acomp = to_avs_soc_component(comp); size_t remaining = le32_to_cpu(manifest->priv.size); u32 offset; int ret; ret = avs_tplg_vendor_array_lookup(tuples, remaining, AVS_TKN_MANIFEST_NUM_LIBRARIES_U32, &offset); /* Manifest MUST begin with a header. */ if (!ret && !offset) ret = -EINVAL; if (ret) { dev_err(comp->dev, "incorrect manifest format: %d\n", ret); return ret; } /* Process header which precedes any of the dictionaries. */ ret = avs_parse_tokens(comp, acomp->tplg, manifest_parsers, ARRAY_SIZE(manifest_parsers), tuples, offset); if (ret < 0) return ret; remaining -= offset; tuples = avs_tplg_vendor_array_at(tuples, offset); ret = avs_tplg_vendor_array_lookup(tuples, remaining, AVS_TKN_MANIFEST_NUM_AFMTS_U32, &offset); if (ret) { dev_err(comp->dev, "audio formats lookup failed: %d\n", ret); return ret; } /* Libraries dictionary. */ ret = avs_tplg_parse_libraries(comp, tuples, offset); if (ret < 0) return ret; remaining -= offset; tuples = avs_tplg_vendor_array_at(tuples, offset); ret = avs_tplg_vendor_array_lookup(tuples, remaining, AVS_TKN_MANIFEST_NUM_MODCFGS_BASE_U32, &offset); if (ret) { dev_err(comp->dev, "modcfgs_base lookup failed: %d\n", ret); return ret; } /* Audio formats dictionary. */ ret = avs_tplg_parse_audio_formats(comp, tuples, offset); if (ret < 0) return ret; remaining -= offset; tuples = avs_tplg_vendor_array_at(tuples, offset); ret = avs_tplg_vendor_array_lookup(tuples, remaining, AVS_TKN_MANIFEST_NUM_MODCFGS_EXT_U32, &offset); if (ret) { dev_err(comp->dev, "modcfgs_ext lookup failed: %d\n", ret); return ret; } /* Module configs-base dictionary. */ ret = avs_tplg_parse_modcfgs_base(comp, tuples, offset); if (ret < 0) return ret; remaining -= offset; tuples = avs_tplg_vendor_array_at(tuples, offset); ret = avs_tplg_vendor_array_lookup(tuples, remaining, AVS_TKN_MANIFEST_NUM_PPLCFGS_U32, &offset); if (ret) { dev_err(comp->dev, "pplcfgs lookup failed: %d\n", ret); return ret; } /* Module configs-ext dictionary. */ ret = avs_tplg_parse_modcfgs_ext(comp, tuples, offset); if (ret < 0) return ret; remaining -= offset; tuples = avs_tplg_vendor_array_at(tuples, offset); ret = avs_tplg_vendor_array_lookup(tuples, remaining, AVS_TKN_MANIFEST_NUM_BINDINGS_U32, &offset); if (ret) { dev_err(comp->dev, "bindings lookup failed: %d\n", ret); return ret; } /* Pipeline configs dictionary. */ ret = avs_tplg_parse_pplcfgs(comp, tuples, offset); if (ret < 0) return ret; remaining -= offset; tuples = avs_tplg_vendor_array_at(tuples, offset); /* Bindings dictionary. */ return avs_tplg_parse_bindings(comp, tuples, remaining); } #define AVS_CONTROL_OPS_VOLUME 257 static const struct snd_soc_tplg_kcontrol_ops avs_control_ops[] = { { .id = AVS_CONTROL_OPS_VOLUME, .get = avs_control_volume_get, .put = avs_control_volume_put, }, }; static const struct avs_tplg_token_parser control_parsers[] = { { .token = AVS_TKN_KCONTROL_ID_U32, .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, .offset = offsetof(struct avs_control_data, id), .parse = avs_parse_word_token, }, }; static int avs_control_load(struct snd_soc_component *comp, int index, struct snd_kcontrol_new *ctmpl, struct snd_soc_tplg_ctl_hdr *hdr) { struct snd_soc_tplg_vendor_array *tuples; struct snd_soc_tplg_mixer_control *tmc; struct avs_control_data *ctl_data; struct soc_mixer_control *mc; size_t block_size; int ret; switch (le32_to_cpu(hdr->type)) { case SND_SOC_TPLG_TYPE_MIXER: tmc = container_of(hdr, typeof(*tmc), hdr); tuples = tmc->priv.array; block_size = le32_to_cpu(tmc->priv.size); break; default: return -EINVAL; } ctl_data = devm_kzalloc(comp->card->dev, sizeof(*ctl_data), GFP_KERNEL); if (!ctl_data) return -ENOMEM; ret = parse_dictionary_entries(comp, tuples, block_size, ctl_data, 1, sizeof(*ctl_data), AVS_TKN_KCONTROL_ID_U32, control_parsers, ARRAY_SIZE(control_parsers)); if (ret) return ret; mc = (struct soc_mixer_control *)ctmpl->private_value; mc->dobj.private = ctl_data; return 0; } static struct snd_soc_tplg_ops avs_tplg_ops = { .io_ops = avs_control_ops, .io_ops_count = ARRAY_SIZE(avs_control_ops), .control_load = avs_control_load, .dapm_route_load = avs_route_load, .widget_load = avs_widget_load, .widget_ready = avs_widget_ready, .dai_load = avs_dai_load, .link_load = avs_link_load, .manifest = avs_manifest, }; struct avs_tplg *avs_tplg_new(struct snd_soc_component *comp) { struct avs_tplg *tplg; tplg = devm_kzalloc(comp->card->dev, sizeof(*tplg), GFP_KERNEL); if (!tplg) return NULL; tplg->comp = comp; INIT_LIST_HEAD(&tplg->path_tmpl_list); return tplg; } int avs_load_topology(struct snd_soc_component *comp, const char *filename) { const struct firmware *fw; int ret; ret = request_firmware(&fw, filename, comp->dev); if (ret < 0) { dev_err(comp->dev, "request topology \"%s\" failed: %d\n", filename, ret); return ret; } ret = snd_soc_tplg_component_load(comp, &avs_tplg_ops, fw); if (ret < 0) dev_err(comp->dev, "load topology \"%s\" failed: %d\n", filename, ret); release_firmware(fw); return ret; } int avs_remove_topology(struct snd_soc_component *comp) { snd_soc_tplg_component_remove(comp); return 0; }
linux-master
sound/soc/intel/avs/topology.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/debugfs.h> #include <linux/device.h> #include <sound/hda_register.h> #include <sound/hdaudio_ext.h> #include <sound/pcm_params.h> #include <sound/soc-acpi.h> #include <sound/soc-acpi-intel-match.h> #include <sound/soc-component.h> #include "avs.h" #include "path.h" #include "topology.h" struct avs_dma_data { struct avs_tplg_path_template *template; struct avs_path *path; /* * link stream is stored within substream's runtime * private_data to fulfill the needs of codec BE path * * host stream assigned */ struct hdac_ext_stream *host_stream; struct snd_pcm_substream *substream; }; static struct avs_tplg_path_template * avs_dai_find_path_template(struct snd_soc_dai *dai, bool is_fe, int direction) { struct snd_soc_dapm_widget *dw = snd_soc_dai_get_widget(dai, direction); struct snd_soc_dapm_path *dp; enum snd_soc_dapm_direction dir; if (direction == SNDRV_PCM_STREAM_CAPTURE) { dir = is_fe ? SND_SOC_DAPM_DIR_OUT : SND_SOC_DAPM_DIR_IN; } else { dir = is_fe ? SND_SOC_DAPM_DIR_IN : SND_SOC_DAPM_DIR_OUT; } dp = list_first_entry_or_null(&dw->edges[dir], typeof(*dp), list_node[dir]); if (!dp) return NULL; /* Get the other widget, with actual path template data */ dw = (dp->source == dw) ? dp->sink : dp->source; return dw->priv; } static int avs_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai, bool is_fe, const struct snd_soc_dai_ops *ops) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct avs_dev *adev = to_avs_dev(dai->dev); struct avs_tplg_path_template *template; struct avs_dma_data *data; template = avs_dai_find_path_template(dai, is_fe, substream->stream); if (!template) { dev_err(dai->dev, "no %s path for dai %s, invalid tplg?\n", snd_pcm_stream_str(substream), dai->name); return -EINVAL; } data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->substream = substream; data->template = template; snd_soc_dai_set_dma_data(dai, substream, data); if (rtd->dai_link->ignore_suspend) adev->num_lp_paths++; return 0; } static int avs_dai_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *fe_hw_params, struct snd_pcm_hw_params *be_hw_params, struct snd_soc_dai *dai, int dma_id) { struct avs_dma_data *data; struct avs_path *path; struct avs_dev *adev = to_avs_dev(dai->dev); int ret; data = snd_soc_dai_get_dma_data(dai, substream); dev_dbg(dai->dev, "%s FE hw_params str %p rtd %p", __func__, substream, substream->runtime); dev_dbg(dai->dev, "rate %d chn %d vbd %d bd %d\n", params_rate(fe_hw_params), params_channels(fe_hw_params), params_width(fe_hw_params), params_physical_width(fe_hw_params)); dev_dbg(dai->dev, "%s BE hw_params str %p rtd %p", __func__, substream, substream->runtime); dev_dbg(dai->dev, "rate %d chn %d vbd %d bd %d\n", params_rate(be_hw_params), params_channels(be_hw_params), params_width(be_hw_params), params_physical_width(be_hw_params)); path = avs_path_create(adev, dma_id, data->template, fe_hw_params, be_hw_params); if (IS_ERR(path)) { ret = PTR_ERR(path); dev_err(dai->dev, "create path failed: %d\n", ret); return ret; } data->path = path; return 0; } static int avs_dai_be_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *be_hw_params, struct snd_soc_dai *dai, int dma_id) { struct snd_pcm_hw_params *fe_hw_params = NULL; struct snd_soc_pcm_runtime *fe, *be; struct snd_soc_dpcm *dpcm; be = asoc_substream_to_rtd(substream); for_each_dpcm_fe(be, substream->stream, dpcm) { fe = dpcm->fe; fe_hw_params = &fe->dpcm[substream->stream].hw_params; } return avs_dai_hw_params(substream, fe_hw_params, be_hw_params, dai, dma_id); } static int avs_dai_prepare(struct avs_dev *adev, struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct avs_dma_data *data; int ret; data = snd_soc_dai_get_dma_data(dai, substream); if (!data->path) return 0; ret = avs_path_reset(data->path); if (ret < 0) { dev_err(dai->dev, "reset path failed: %d\n", ret); return ret; } ret = avs_path_pause(data->path); if (ret < 0) dev_err(dai->dev, "pause path failed: %d\n", ret); return ret; } static const struct snd_soc_dai_ops avs_dai_nonhda_be_ops; static int avs_dai_nonhda_be_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { return avs_dai_startup(substream, dai, false, &avs_dai_nonhda_be_ops); } static void avs_dai_nonhda_be_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct avs_dev *adev = to_avs_dev(dai->dev); struct avs_dma_data *data; if (rtd->dai_link->ignore_suspend) adev->num_lp_paths--; data = snd_soc_dai_get_dma_data(dai, substream); snd_soc_dai_set_dma_data(dai, substream, NULL); kfree(data); } static int avs_dai_nonhda_be_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params, struct snd_soc_dai *dai) { struct avs_dma_data *data; data = snd_soc_dai_get_dma_data(dai, substream); if (data->path) return 0; /* Actual port-id comes from topology. */ return avs_dai_be_hw_params(substream, hw_params, dai, 0); } static int avs_dai_nonhda_be_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct avs_dma_data *data; dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name); data = snd_soc_dai_get_dma_data(dai, substream); if (data->path) { avs_path_free(data->path); data->path = NULL; } return 0; } static int avs_dai_nonhda_be_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { return avs_dai_prepare(to_avs_dev(dai->dev), substream, dai); } static int avs_dai_nonhda_be_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct avs_dma_data *data; int ret = 0; data = snd_soc_dai_get_dma_data(dai, substream); switch (cmd) { case SNDRV_PCM_TRIGGER_RESUME: if (rtd->dai_link->ignore_suspend) break; fallthrough; case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: ret = avs_path_pause(data->path); if (ret < 0) { dev_err(dai->dev, "pause BE path failed: %d\n", ret); break; } ret = avs_path_run(data->path, AVS_TPLG_TRIGGER_AUTO); if (ret < 0) dev_err(dai->dev, "run BE path failed: %d\n", ret); break; case SNDRV_PCM_TRIGGER_SUSPEND: if (rtd->dai_link->ignore_suspend) break; fallthrough; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_STOP: ret = avs_path_pause(data->path); if (ret < 0) dev_err(dai->dev, "pause BE path failed: %d\n", ret); ret = avs_path_reset(data->path); if (ret < 0) dev_err(dai->dev, "reset BE path failed: %d\n", ret); break; default: ret = -EINVAL; break; } return ret; } static const struct snd_soc_dai_ops avs_dai_nonhda_be_ops = { .startup = avs_dai_nonhda_be_startup, .shutdown = avs_dai_nonhda_be_shutdown, .hw_params = avs_dai_nonhda_be_hw_params, .hw_free = avs_dai_nonhda_be_hw_free, .prepare = avs_dai_nonhda_be_prepare, .trigger = avs_dai_nonhda_be_trigger, }; static const struct snd_soc_dai_ops avs_dai_hda_be_ops; static int avs_dai_hda_be_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { return avs_dai_startup(substream, dai, false, &avs_dai_hda_be_ops); } static void avs_dai_hda_be_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { return avs_dai_nonhda_be_shutdown(substream, dai); } static int avs_dai_hda_be_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params, struct snd_soc_dai *dai) { struct avs_dma_data *data; struct hdac_ext_stream *link_stream; data = snd_soc_dai_get_dma_data(dai, substream); if (data->path) return 0; link_stream = substream->runtime->private_data; return avs_dai_be_hw_params(substream, hw_params, dai, hdac_stream(link_stream)->stream_tag - 1); } static int avs_dai_hda_be_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct avs_dma_data *data; struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct hdac_ext_stream *link_stream; struct hdac_ext_link *link; struct hda_codec *codec; dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name); data = snd_soc_dai_get_dma_data(dai, substream); if (!data->path) return 0; link_stream = substream->runtime->private_data; link_stream->link_prepared = false; avs_path_free(data->path); data->path = NULL; /* clear link <-> stream mapping */ codec = dev_to_hda_codec(asoc_rtd_to_codec(rtd, 0)->dev); link = snd_hdac_ext_bus_get_hlink_by_addr(&codec->bus->core, codec->core.addr); if (!link) return -EINVAL; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) snd_hdac_ext_bus_link_clear_stream_id(link, hdac_stream(link_stream)->stream_tag); return 0; } static int avs_dai_hda_be_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct hdac_ext_stream *link_stream = runtime->private_data; struct hdac_ext_link *link; struct hda_codec *codec; struct hdac_bus *bus; unsigned int format_val; int ret; if (link_stream->link_prepared) return 0; codec = dev_to_hda_codec(asoc_rtd_to_codec(rtd, 0)->dev); bus = &codec->bus->core; format_val = snd_hdac_calc_stream_format(runtime->rate, runtime->channels, runtime->format, runtime->sample_bits, 0); snd_hdac_ext_stream_decouple(bus, link_stream, true); snd_hdac_ext_stream_reset(link_stream); snd_hdac_ext_stream_setup(link_stream, format_val); link = snd_hdac_ext_bus_get_hlink_by_addr(bus, codec->core.addr); if (!link) return -EINVAL; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) snd_hdac_ext_bus_link_set_stream_id(link, hdac_stream(link_stream)->stream_tag); ret = avs_dai_prepare(to_avs_dev(dai->dev), substream, dai); if (ret) return ret; link_stream->link_prepared = true; return 0; } static int avs_dai_hda_be_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct hdac_ext_stream *link_stream; struct avs_dma_data *data; int ret = 0; dev_dbg(dai->dev, "entry %s cmd=%d\n", __func__, cmd); data = snd_soc_dai_get_dma_data(dai, substream); link_stream = substream->runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_RESUME: if (rtd->dai_link->ignore_suspend) break; fallthrough; case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: snd_hdac_ext_stream_start(link_stream); ret = avs_path_pause(data->path); if (ret < 0) { dev_err(dai->dev, "pause BE path failed: %d\n", ret); break; } ret = avs_path_run(data->path, AVS_TPLG_TRIGGER_AUTO); if (ret < 0) dev_err(dai->dev, "run BE path failed: %d\n", ret); break; case SNDRV_PCM_TRIGGER_SUSPEND: if (rtd->dai_link->ignore_suspend) break; fallthrough; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_STOP: ret = avs_path_pause(data->path); if (ret < 0) dev_err(dai->dev, "pause BE path failed: %d\n", ret); snd_hdac_ext_stream_clear(link_stream); ret = avs_path_reset(data->path); if (ret < 0) dev_err(dai->dev, "reset BE path failed: %d\n", ret); break; default: ret = -EINVAL; break; } return ret; } static const struct snd_soc_dai_ops avs_dai_hda_be_ops = { .startup = avs_dai_hda_be_startup, .shutdown = avs_dai_hda_be_shutdown, .hw_params = avs_dai_hda_be_hw_params, .hw_free = avs_dai_hda_be_hw_free, .prepare = avs_dai_hda_be_prepare, .trigger = avs_dai_hda_be_trigger, }; static const unsigned int rates[] = { 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 64000, 88200, 96000, 128000, 176400, 192000, }; static const struct snd_pcm_hw_constraint_list hw_rates = { .count = ARRAY_SIZE(rates), .list = rates, .mask = 0, }; const struct snd_soc_dai_ops avs_dai_fe_ops; static int avs_dai_fe_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_pcm_runtime *runtime = substream->runtime; struct avs_dma_data *data; struct avs_dev *adev = to_avs_dev(dai->dev); struct hdac_bus *bus = &adev->base.core; struct hdac_ext_stream *host_stream; int ret; ret = avs_dai_startup(substream, dai, true, &avs_dai_fe_ops); if (ret) return ret; data = snd_soc_dai_get_dma_data(dai, substream); host_stream = snd_hdac_ext_stream_assign(bus, substream, HDAC_EXT_STREAM_TYPE_HOST); if (!host_stream) { ret = -EBUSY; goto err; } data->host_stream = host_stream; ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) goto err; /* avoid wrap-around with wall-clock */ ret = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME, 20, 178000000); if (ret < 0) goto err; ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_rates); if (ret < 0) goto err; snd_pcm_set_sync(substream); dev_dbg(dai->dev, "%s fe STARTUP tag %d str %p", __func__, hdac_stream(host_stream)->stream_tag, substream); return 0; err: kfree(data); return ret; } static void avs_dai_fe_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct avs_dev *adev = to_avs_dev(dai->dev); struct avs_dma_data *data; if (rtd->dai_link->ignore_suspend) adev->num_lp_paths--; data = snd_soc_dai_get_dma_data(dai, substream); snd_soc_dai_set_dma_data(dai, substream, NULL); snd_hdac_ext_stream_release(data->host_stream, HDAC_EXT_STREAM_TYPE_HOST); kfree(data); } static int avs_dai_fe_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params, struct snd_soc_dai *dai) { struct snd_pcm_hw_params *be_hw_params = NULL; struct snd_soc_pcm_runtime *fe, *be; struct snd_soc_dpcm *dpcm; struct avs_dma_data *data; struct hdac_ext_stream *host_stream; int ret; data = snd_soc_dai_get_dma_data(dai, substream); if (data->path) return 0; host_stream = data->host_stream; hdac_stream(host_stream)->bufsize = 0; hdac_stream(host_stream)->period_bytes = 0; hdac_stream(host_stream)->format_val = 0; fe = asoc_substream_to_rtd(substream); for_each_dpcm_be(fe, substream->stream, dpcm) { be = dpcm->be; be_hw_params = &be->dpcm[substream->stream].hw_params; } ret = avs_dai_hw_params(substream, hw_params, be_hw_params, dai, hdac_stream(host_stream)->stream_tag - 1); if (ret) goto create_err; ret = avs_path_bind(data->path); if (ret < 0) { dev_err(dai->dev, "bind FE <-> BE failed: %d\n", ret); goto bind_err; } return 0; bind_err: avs_path_free(data->path); data->path = NULL; create_err: snd_pcm_lib_free_pages(substream); return ret; } static int __avs_dai_fe_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct avs_dma_data *data; struct hdac_ext_stream *host_stream; int ret; dev_dbg(dai->dev, "%s fe HW_FREE str %p rtd %p", __func__, substream, substream->runtime); data = snd_soc_dai_get_dma_data(dai, substream); if (!data->path) return 0; host_stream = data->host_stream; ret = avs_path_unbind(data->path); if (ret < 0) dev_err(dai->dev, "unbind FE <-> BE failed: %d\n", ret); avs_path_free(data->path); data->path = NULL; snd_hdac_stream_cleanup(hdac_stream(host_stream)); hdac_stream(host_stream)->prepared = false; return ret; } static int avs_dai_fe_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { int ret; ret = __avs_dai_fe_hw_free(substream, dai); snd_pcm_lib_free_pages(substream); return ret; } static int avs_dai_fe_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_pcm_runtime *runtime = substream->runtime; struct avs_dma_data *data; struct avs_dev *adev = to_avs_dev(dai->dev); struct hdac_ext_stream *host_stream; struct hdac_bus *bus; unsigned int format_val; int ret; data = snd_soc_dai_get_dma_data(dai, substream); host_stream = data->host_stream; if (hdac_stream(host_stream)->prepared) return 0; bus = hdac_stream(host_stream)->bus; snd_hdac_ext_stream_decouple(bus, data->host_stream, true); snd_hdac_stream_reset(hdac_stream(host_stream)); format_val = snd_hdac_calc_stream_format(runtime->rate, runtime->channels, runtime->format, runtime->sample_bits, 0); ret = snd_hdac_stream_set_params(hdac_stream(host_stream), format_val); if (ret < 0) return ret; ret = snd_hdac_stream_setup(hdac_stream(host_stream)); if (ret < 0) return ret; ret = avs_dai_prepare(adev, substream, dai); if (ret) return ret; hdac_stream(host_stream)->prepared = true; return 0; } static int avs_dai_fe_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct avs_dma_data *data; struct hdac_ext_stream *host_stream; struct hdac_bus *bus; unsigned long flags; int ret = 0; data = snd_soc_dai_get_dma_data(dai, substream); host_stream = data->host_stream; bus = hdac_stream(host_stream)->bus; switch (cmd) { case SNDRV_PCM_TRIGGER_RESUME: if (rtd->dai_link->ignore_suspend) break; fallthrough; case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: spin_lock_irqsave(&bus->reg_lock, flags); snd_hdac_stream_start(hdac_stream(host_stream)); spin_unlock_irqrestore(&bus->reg_lock, flags); /* Timeout on DRSM poll shall not stop the resume so ignore the result. */ if (cmd == SNDRV_PCM_TRIGGER_RESUME) snd_hdac_stream_wait_drsm(hdac_stream(host_stream)); ret = avs_path_pause(data->path); if (ret < 0) { dev_err(dai->dev, "pause FE path failed: %d\n", ret); break; } ret = avs_path_run(data->path, AVS_TPLG_TRIGGER_AUTO); if (ret < 0) dev_err(dai->dev, "run FE path failed: %d\n", ret); break; case SNDRV_PCM_TRIGGER_SUSPEND: if (rtd->dai_link->ignore_suspend) break; fallthrough; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_STOP: ret = avs_path_pause(data->path); if (ret < 0) dev_err(dai->dev, "pause FE path failed: %d\n", ret); spin_lock_irqsave(&bus->reg_lock, flags); snd_hdac_stream_stop(hdac_stream(host_stream)); spin_unlock_irqrestore(&bus->reg_lock, flags); ret = avs_path_reset(data->path); if (ret < 0) dev_err(dai->dev, "reset FE path failed: %d\n", ret); break; default: ret = -EINVAL; break; } return ret; } const struct snd_soc_dai_ops avs_dai_fe_ops = { .startup = avs_dai_fe_startup, .shutdown = avs_dai_fe_shutdown, .hw_params = avs_dai_fe_hw_params, .hw_free = avs_dai_fe_hw_free, .prepare = avs_dai_fe_prepare, .trigger = avs_dai_fe_trigger, }; static ssize_t topology_name_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct snd_soc_component *component = file->private_data; struct snd_soc_card *card = component->card; struct snd_soc_acpi_mach *mach = dev_get_platdata(card->dev); char buf[64]; size_t len; len = scnprintf(buf, sizeof(buf), "%s/%s\n", component->driver->topology_name_prefix, mach->tplg_filename); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations topology_name_fops = { .open = simple_open, .read = topology_name_read, .llseek = default_llseek, }; static int avs_component_load_libraries(struct avs_soc_component *acomp) { struct avs_tplg *tplg = acomp->tplg; struct avs_dev *adev = to_avs_dev(acomp->base.dev); int ret; if (!tplg->num_libs) return 0; /* Parent device may be asleep and library loading involves IPCs. */ ret = pm_runtime_resume_and_get(adev->dev); if (ret < 0) return ret; avs_hda_power_gating_enable(adev, false); avs_hda_clock_gating_enable(adev, false); avs_hda_l1sen_enable(adev, false); ret = avs_dsp_load_libraries(adev, tplg->libs, tplg->num_libs); avs_hda_l1sen_enable(adev, true); avs_hda_clock_gating_enable(adev, true); avs_hda_power_gating_enable(adev, true); if (!ret) ret = avs_module_info_init(adev, false); pm_runtime_mark_last_busy(adev->dev); pm_runtime_put_autosuspend(adev->dev); return ret; } static int avs_component_probe(struct snd_soc_component *component) { struct snd_soc_card *card = component->card; struct snd_soc_acpi_mach *mach; struct avs_soc_component *acomp; struct avs_dev *adev; char *filename; int ret; dev_dbg(card->dev, "probing %s card %s\n", component->name, card->name); mach = dev_get_platdata(card->dev); acomp = to_avs_soc_component(component); adev = to_avs_dev(component->dev); acomp->tplg = avs_tplg_new(component); if (!acomp->tplg) return -ENOMEM; if (!mach->tplg_filename) goto finalize; /* Load specified topology and create debugfs for it. */ filename = kasprintf(GFP_KERNEL, "%s/%s", component->driver->topology_name_prefix, mach->tplg_filename); if (!filename) return -ENOMEM; ret = avs_load_topology(component, filename); kfree(filename); if (ret == -ENOENT && !strncmp(mach->tplg_filename, "hda-", 4)) { unsigned int vendor_id; if (sscanf(mach->tplg_filename, "hda-%08x-tplg.bin", &vendor_id) != 1) return ret; if (((vendor_id >> 16) & 0xFFFF) == 0x8086) mach->tplg_filename = devm_kasprintf(adev->dev, GFP_KERNEL, "hda-8086-generic-tplg.bin"); else mach->tplg_filename = devm_kasprintf(adev->dev, GFP_KERNEL, "hda-generic-tplg.bin"); filename = kasprintf(GFP_KERNEL, "%s/%s", component->driver->topology_name_prefix, mach->tplg_filename); if (!filename) return -ENOMEM; dev_info(card->dev, "trying to load fallback topology %s\n", mach->tplg_filename); ret = avs_load_topology(component, filename); kfree(filename); } if (ret < 0) return ret; ret = avs_component_load_libraries(acomp); if (ret < 0) { dev_err(card->dev, "libraries loading failed: %d\n", ret); goto err_load_libs; } finalize: debugfs_create_file("topology_name", 0444, component->debugfs_root, component, &topology_name_fops); mutex_lock(&adev->comp_list_mutex); list_add_tail(&acomp->node, &adev->comp_list); mutex_unlock(&adev->comp_list_mutex); return 0; err_load_libs: avs_remove_topology(component); return ret; } static void avs_component_remove(struct snd_soc_component *component) { struct avs_soc_component *acomp = to_avs_soc_component(component); struct snd_soc_acpi_mach *mach; struct avs_dev *adev = to_avs_dev(component->dev); int ret; mach = dev_get_platdata(component->card->dev); mutex_lock(&adev->comp_list_mutex); list_del(&acomp->node); mutex_unlock(&adev->comp_list_mutex); if (mach->tplg_filename) { ret = avs_remove_topology(component); if (ret < 0) dev_err(component->dev, "unload topology failed: %d\n", ret); } } static int avs_dai_resume_hw_params(struct snd_soc_dai *dai, struct avs_dma_data *data) { struct snd_pcm_substream *substream; struct snd_soc_pcm_runtime *rtd; int ret; substream = data->substream; rtd = asoc_substream_to_rtd(substream); ret = dai->driver->ops->hw_params(substream, &rtd->dpcm[substream->stream].hw_params, dai); if (ret) dev_err(dai->dev, "hw_params on resume failed: %d\n", ret); return ret; } static int avs_dai_resume_fe_prepare(struct snd_soc_dai *dai, struct avs_dma_data *data) { struct hdac_ext_stream *host_stream; struct hdac_stream *hstream; struct hdac_bus *bus; int ret; host_stream = data->host_stream; hstream = hdac_stream(host_stream); bus = hdac_stream(host_stream)->bus; /* Set DRSM before programming stream and position registers. */ snd_hdac_stream_drsm_enable(bus, true, hstream->index); ret = dai->driver->ops->prepare(data->substream, dai); if (ret) { dev_err(dai->dev, "prepare FE on resume failed: %d\n", ret); return ret; } writel(host_stream->pphcllpl, host_stream->pphc_addr + AZX_REG_PPHCLLPL); writel(host_stream->pphcllpu, host_stream->pphc_addr + AZX_REG_PPHCLLPU); writel(host_stream->pphcldpl, host_stream->pphc_addr + AZX_REG_PPHCLDPL); writel(host_stream->pphcldpu, host_stream->pphc_addr + AZX_REG_PPHCLDPU); /* As per HW spec recommendation, program LPIB and DPIB to the same value. */ snd_hdac_stream_set_lpib(hstream, hstream->lpib); snd_hdac_stream_set_dpibr(bus, hstream, hstream->lpib); return 0; } static int avs_dai_resume_be_prepare(struct snd_soc_dai *dai, struct avs_dma_data *data) { int ret; ret = dai->driver->ops->prepare(data->substream, dai); if (ret) dev_err(dai->dev, "prepare BE on resume failed: %d\n", ret); return ret; } static int avs_dai_suspend_fe_hw_free(struct snd_soc_dai *dai, struct avs_dma_data *data) { struct hdac_ext_stream *host_stream; int ret; host_stream = data->host_stream; /* Store position addresses so we can resume from them later on. */ hdac_stream(host_stream)->lpib = snd_hdac_stream_get_pos_lpib(hdac_stream(host_stream)); host_stream->pphcllpl = readl(host_stream->pphc_addr + AZX_REG_PPHCLLPL); host_stream->pphcllpu = readl(host_stream->pphc_addr + AZX_REG_PPHCLLPU); host_stream->pphcldpl = readl(host_stream->pphc_addr + AZX_REG_PPHCLDPL); host_stream->pphcldpu = readl(host_stream->pphc_addr + AZX_REG_PPHCLDPU); ret = __avs_dai_fe_hw_free(data->substream, dai); if (ret < 0) dev_err(dai->dev, "hw_free FE on suspend failed: %d\n", ret); return ret; } static int avs_dai_suspend_be_hw_free(struct snd_soc_dai *dai, struct avs_dma_data *data) { int ret; ret = dai->driver->ops->hw_free(data->substream, dai); if (ret < 0) dev_err(dai->dev, "hw_free BE on suspend failed: %d\n", ret); return ret; } static int avs_component_pm_op(struct snd_soc_component *component, bool be, int (*op)(struct snd_soc_dai *, struct avs_dma_data *)) { struct snd_soc_pcm_runtime *rtd; struct avs_dma_data *data; struct snd_soc_dai *dai; int ret; for_each_component_dais(component, dai) { data = snd_soc_dai_dma_data_get_playback(dai); if (data) { rtd = asoc_substream_to_rtd(data->substream); if (rtd->dai_link->no_pcm == be && !rtd->dai_link->ignore_suspend) { ret = op(dai, data); if (ret < 0) { __snd_pcm_set_state(data->substream->runtime, SNDRV_PCM_STATE_DISCONNECTED); return ret; } } } data = snd_soc_dai_dma_data_get_capture(dai); if (data) { rtd = asoc_substream_to_rtd(data->substream); if (rtd->dai_link->no_pcm == be && !rtd->dai_link->ignore_suspend) { ret = op(dai, data); if (ret < 0) { __snd_pcm_set_state(data->substream->runtime, SNDRV_PCM_STATE_DISCONNECTED); return ret; } } } } return 0; } static int avs_component_resume_hw_params(struct snd_soc_component *component, bool be) { return avs_component_pm_op(component, be, &avs_dai_resume_hw_params); } static int avs_component_resume_prepare(struct snd_soc_component *component, bool be) { int (*prepare_cb)(struct snd_soc_dai *dai, struct avs_dma_data *data); if (be) prepare_cb = &avs_dai_resume_be_prepare; else prepare_cb = &avs_dai_resume_fe_prepare; return avs_component_pm_op(component, be, prepare_cb); } static int avs_component_suspend_hw_free(struct snd_soc_component *component, bool be) { int (*hw_free_cb)(struct snd_soc_dai *dai, struct avs_dma_data *data); if (be) hw_free_cb = &avs_dai_suspend_be_hw_free; else hw_free_cb = &avs_dai_suspend_fe_hw_free; return avs_component_pm_op(component, be, hw_free_cb); } static int avs_component_suspend(struct snd_soc_component *component) { int ret; /* * When freeing paths, FEs need to be first as they perform * path unbinding. */ ret = avs_component_suspend_hw_free(component, false); if (ret) return ret; return avs_component_suspend_hw_free(component, true); } static int avs_component_resume(struct snd_soc_component *component) { int ret; /* * When creating paths, FEs need to be last as they perform * path binding. */ ret = avs_component_resume_hw_params(component, true); if (ret) return ret; ret = avs_component_resume_hw_params(component, false); if (ret) return ret; /* It is expected that the LINK stream is prepared first. */ ret = avs_component_resume_prepare(component, true); if (ret) return ret; return avs_component_resume_prepare(component, false); } static const struct snd_pcm_hardware avs_pcm_hardware = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_NO_PERIOD_WAKEUP, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE, .buffer_bytes_max = AZX_MAX_BUF_SIZE, .period_bytes_min = 128, .period_bytes_max = AZX_MAX_BUF_SIZE / 2, .periods_min = 2, .periods_max = AZX_MAX_FRAG, .fifo_size = 0, }; static int avs_component_open(struct snd_soc_component *component, struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); /* only FE DAI links are handled here */ if (rtd->dai_link->no_pcm) return 0; return snd_soc_set_runtime_hwparams(substream, &avs_pcm_hardware); } static unsigned int avs_hda_stream_dpib_read(struct hdac_ext_stream *stream) { return readl(hdac_stream(stream)->bus->remap_addr + AZX_REG_VS_SDXDPIB_XBASE + (AZX_REG_VS_SDXDPIB_XINTERVAL * hdac_stream(stream)->index)); } static snd_pcm_uframes_t avs_component_pointer(struct snd_soc_component *component, struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct avs_dma_data *data; struct hdac_ext_stream *host_stream; unsigned int pos; data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream); if (!data->host_stream) return 0; host_stream = data->host_stream; pos = avs_hda_stream_dpib_read(host_stream); if (pos >= hdac_stream(host_stream)->bufsize) pos = 0; return bytes_to_frames(substream->runtime, pos); } static int avs_component_mmap(struct snd_soc_component *component, struct snd_pcm_substream *substream, struct vm_area_struct *vma) { return snd_pcm_lib_default_mmap(substream, vma); } #define MAX_PREALLOC_SIZE (32 * 1024 * 1024) static int avs_component_construct(struct snd_soc_component *component, struct snd_soc_pcm_runtime *rtd) { struct snd_soc_dai *dai = asoc_rtd_to_cpu(rtd, 0); struct snd_pcm *pcm = rtd->pcm; if (dai->driver->playback.channels_min) snd_pcm_set_managed_buffer(pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream, SNDRV_DMA_TYPE_DEV_SG, component->dev, 0, MAX_PREALLOC_SIZE); if (dai->driver->capture.channels_min) snd_pcm_set_managed_buffer(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream, SNDRV_DMA_TYPE_DEV_SG, component->dev, 0, MAX_PREALLOC_SIZE); return 0; } static const struct snd_soc_component_driver avs_component_driver = { .name = "avs-pcm", .probe = avs_component_probe, .remove = avs_component_remove, .suspend = avs_component_suspend, .resume = avs_component_resume, .open = avs_component_open, .pointer = avs_component_pointer, .mmap = avs_component_mmap, .pcm_construct = avs_component_construct, .module_get_upon_open = 1, /* increment refcount when a pcm is opened */ .topology_name_prefix = "intel/avs", }; int avs_soc_component_register(struct device *dev, const char *name, const struct snd_soc_component_driver *drv, struct snd_soc_dai_driver *cpu_dais, int num_cpu_dais) { struct avs_soc_component *acomp; int ret; acomp = devm_kzalloc(dev, sizeof(*acomp), GFP_KERNEL); if (!acomp) return -ENOMEM; ret = snd_soc_component_initialize(&acomp->base, drv, dev); if (ret < 0) return ret; /* force name change after ASoC is done with its init */ acomp->base.name = name; INIT_LIST_HEAD(&acomp->node); return snd_soc_add_component(&acomp->base, cpu_dais, num_cpu_dais); } static struct snd_soc_dai_driver dmic_cpu_dais[] = { { .name = "DMIC Pin", .ops = &avs_dai_nonhda_be_ops, .capture = { .stream_name = "DMIC Rx", .channels_min = 1, .channels_max = 4, .rates = SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, }, }, { .name = "DMIC WoV Pin", .ops = &avs_dai_nonhda_be_ops, .capture = { .stream_name = "DMIC WoV Rx", .channels_min = 1, .channels_max = 4, .rates = SNDRV_PCM_RATE_16000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, }, }; int avs_dmic_platform_register(struct avs_dev *adev, const char *name) { return avs_soc_component_register(adev->dev, name, &avs_component_driver, dmic_cpu_dais, ARRAY_SIZE(dmic_cpu_dais)); } static const struct snd_soc_dai_driver i2s_dai_template = { .ops = &avs_dai_nonhda_be_ops, .playback = { .channels_min = 1, .channels_max = 8, .rates = SNDRV_PCM_RATE_8000_192000 | SNDRV_PCM_RATE_KNOT, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE, }, .capture = { .channels_min = 1, .channels_max = 8, .rates = SNDRV_PCM_RATE_8000_192000 | SNDRV_PCM_RATE_KNOT, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE, }, }; int avs_i2s_platform_register(struct avs_dev *adev, const char *name, unsigned long port_mask, unsigned long *tdms) { struct snd_soc_dai_driver *cpus, *dai; size_t ssp_count, cpu_count; int i, j; ssp_count = adev->hw_cfg.i2s_caps.ctrl_count; cpu_count = hweight_long(port_mask); if (tdms) for_each_set_bit(i, &port_mask, ssp_count) cpu_count += hweight_long(tdms[i]); cpus = devm_kzalloc(adev->dev, sizeof(*cpus) * cpu_count, GFP_KERNEL); if (!cpus) return -ENOMEM; dai = cpus; for_each_set_bit(i, &port_mask, ssp_count) { memcpy(dai, &i2s_dai_template, sizeof(*dai)); dai->name = devm_kasprintf(adev->dev, GFP_KERNEL, "SSP%d Pin", i); dai->playback.stream_name = devm_kasprintf(adev->dev, GFP_KERNEL, "ssp%d Tx", i); dai->capture.stream_name = devm_kasprintf(adev->dev, GFP_KERNEL, "ssp%d Rx", i); if (!dai->name || !dai->playback.stream_name || !dai->capture.stream_name) return -ENOMEM; dai++; } if (!tdms) goto plat_register; for_each_set_bit(i, &port_mask, ssp_count) { for_each_set_bit(j, &tdms[i], ssp_count) { memcpy(dai, &i2s_dai_template, sizeof(*dai)); dai->name = devm_kasprintf(adev->dev, GFP_KERNEL, "SSP%d:%d Pin", i, j); dai->playback.stream_name = devm_kasprintf(adev->dev, GFP_KERNEL, "ssp%d:%d Tx", i, j); dai->capture.stream_name = devm_kasprintf(adev->dev, GFP_KERNEL, "ssp%d:%d Rx", i, j); if (!dai->name || !dai->playback.stream_name || !dai->capture.stream_name) return -ENOMEM; dai++; } } plat_register: return avs_soc_component_register(adev->dev, name, &avs_component_driver, cpus, cpu_count); } /* HD-Audio CPU DAI template */ static const struct snd_soc_dai_driver hda_cpu_dai = { .ops = &avs_dai_hda_be_ops, .playback = { .channels_min = 1, .channels_max = 8, .rates = SNDRV_PCM_RATE_8000_192000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE, }, .capture = { .channels_min = 1, .channels_max = 8, .rates = SNDRV_PCM_RATE_8000_192000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE, }, }; static void avs_component_hda_unregister_dais(struct snd_soc_component *component) { struct snd_soc_acpi_mach *mach; struct snd_soc_dai *dai, *save; struct hda_codec *codec; char name[32]; mach = dev_get_platdata(component->card->dev); codec = mach->pdata; sprintf(name, "%s-cpu", dev_name(&codec->core.dev)); for_each_component_dais_safe(component, dai, save) { int stream; if (!strstr(dai->driver->name, name)) continue; for_each_pcm_streams(stream) snd_soc_dapm_free_widget(snd_soc_dai_get_widget(dai, stream)); snd_soc_unregister_dai(dai); } } static int avs_component_hda_probe(struct snd_soc_component *component) { struct snd_soc_dapm_context *dapm; struct snd_soc_dai_driver *dais; struct snd_soc_acpi_mach *mach; struct hda_codec *codec; struct hda_pcm *pcm; const char *cname; int pcm_count = 0, ret, i; mach = dev_get_platdata(component->card->dev); if (!mach) return -EINVAL; codec = mach->pdata; if (list_empty(&codec->pcm_list_head)) return -EINVAL; list_for_each_entry(pcm, &codec->pcm_list_head, list) pcm_count++; dais = devm_kcalloc(component->dev, pcm_count, sizeof(*dais), GFP_KERNEL); if (!dais) return -ENOMEM; cname = dev_name(&codec->core.dev); dapm = snd_soc_component_get_dapm(component); pcm = list_first_entry(&codec->pcm_list_head, struct hda_pcm, list); for (i = 0; i < pcm_count; i++, pcm = list_next_entry(pcm, list)) { struct snd_soc_dai *dai; memcpy(&dais[i], &hda_cpu_dai, sizeof(*dais)); dais[i].id = i; dais[i].name = devm_kasprintf(component->dev, GFP_KERNEL, "%s-cpu%d", cname, i); if (!dais[i].name) { ret = -ENOMEM; goto exit; } if (pcm->stream[0].substreams) { dais[i].playback.stream_name = devm_kasprintf(component->dev, GFP_KERNEL, "%s-cpu%d Tx", cname, i); if (!dais[i].playback.stream_name) { ret = -ENOMEM; goto exit; } } if (pcm->stream[1].substreams) { dais[i].capture.stream_name = devm_kasprintf(component->dev, GFP_KERNEL, "%s-cpu%d Rx", cname, i); if (!dais[i].capture.stream_name) { ret = -ENOMEM; goto exit; } } dai = snd_soc_register_dai(component, &dais[i], false); if (!dai) { dev_err(component->dev, "register dai for %s failed\n", pcm->name); ret = -EINVAL; goto exit; } ret = snd_soc_dapm_new_dai_widgets(dapm, dai); if (ret < 0) { dev_err(component->dev, "create widgets failed: %d\n", ret); goto exit; } } ret = avs_component_probe(component); exit: if (ret) avs_component_hda_unregister_dais(component); return ret; } static void avs_component_hda_remove(struct snd_soc_component *component) { avs_component_hda_unregister_dais(component); avs_component_remove(component); } static int avs_component_hda_open(struct snd_soc_component *component, struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct hdac_ext_stream *link_stream; struct hda_codec *codec; if (!rtd->dai_link->no_pcm) { struct snd_pcm_hardware hwparams = avs_pcm_hardware; struct snd_soc_pcm_runtime *be; struct snd_soc_dpcm *dpcm; int dir = substream->stream; /* * Support the DPCM reparenting while still fulfilling expectations of HDAudio * common code - a valid stream pointer at substream->runtime->private_data - * by having all FEs point to the same private data. */ for_each_dpcm_be(rtd, dir, dpcm) { struct snd_pcm_substream *be_substream; be = dpcm->be; if (be->dpcm[dir].users == 1) break; be_substream = snd_soc_dpcm_get_substream(be, dir); substream->runtime->private_data = be_substream->runtime->private_data; break; } /* RESUME unsupported for de-coupled HD-Audio capture. */ if (dir == SNDRV_PCM_STREAM_CAPTURE) hwparams.info &= ~SNDRV_PCM_INFO_RESUME; return snd_soc_set_runtime_hwparams(substream, &hwparams); } codec = dev_to_hda_codec(asoc_rtd_to_codec(rtd, 0)->dev); link_stream = snd_hdac_ext_stream_assign(&codec->bus->core, substream, HDAC_EXT_STREAM_TYPE_LINK); if (!link_stream) return -EBUSY; substream->runtime->private_data = link_stream; return 0; } static int avs_component_hda_close(struct snd_soc_component *component, struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct hdac_ext_stream *link_stream; /* only BE DAI links are handled here */ if (!rtd->dai_link->no_pcm) return 0; link_stream = substream->runtime->private_data; snd_hdac_ext_stream_release(link_stream, HDAC_EXT_STREAM_TYPE_LINK); substream->runtime->private_data = NULL; return 0; } static const struct snd_soc_component_driver avs_hda_component_driver = { .name = "avs-hda-pcm", .probe = avs_component_hda_probe, .remove = avs_component_hda_remove, .suspend = avs_component_suspend, .resume = avs_component_resume, .open = avs_component_hda_open, .close = avs_component_hda_close, .pointer = avs_component_pointer, .mmap = avs_component_mmap, .pcm_construct = avs_component_construct, /* * hda platform component's probe() is dependent on * codec->pcm_list_head, it needs to be initialized after codec * component. remove_order is here for completeness sake */ .probe_order = SND_SOC_COMP_ORDER_LATE, .remove_order = SND_SOC_COMP_ORDER_EARLY, .module_get_upon_open = 1, .topology_name_prefix = "intel/avs", }; int avs_hda_platform_register(struct avs_dev *adev, const char *name) { return avs_soc_component_register(adev->dev, name, &avs_hda_component_driver, NULL, 0); }
linux-master
sound/soc/intel/avs/pcm.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <sound/compress_driver.h> #include <sound/hdaudio_ext.h> #include <sound/hdaudio.h> #include <sound/soc.h> #include "avs.h" #include "messages.h" static int avs_dsp_init_probe(struct avs_dev *adev, union avs_connector_node_id node_id, size_t buffer_size) { struct avs_probe_cfg cfg = {{0}}; struct avs_module_entry mentry; u8 dummy; avs_get_module_entry(adev, &AVS_PROBE_MOD_UUID, &mentry); /* * Probe module uses no cycles, audio data format and input and output * frame sizes are unused. It is also not owned by any pipeline. */ cfg.base.ibs = 1; /* BSS module descriptor is always segment of index=2. */ cfg.base.is_pages = mentry.segments[2].flags.length; cfg.gtw_cfg.node_id = node_id; cfg.gtw_cfg.dma_buffer_size = buffer_size; return avs_dsp_init_module(adev, mentry.module_id, INVALID_PIPELINE_ID, 0, 0, &cfg, sizeof(cfg), &dummy); } static void avs_dsp_delete_probe(struct avs_dev *adev) { struct avs_module_entry mentry; avs_get_module_entry(adev, &AVS_PROBE_MOD_UUID, &mentry); /* There is only ever one probe module instance. */ avs_dsp_delete_module(adev, mentry.module_id, 0, INVALID_PIPELINE_ID, 0); } static inline struct hdac_ext_stream *avs_compr_get_host_stream(struct snd_compr_stream *cstream) { return cstream->runtime->private_data; } static int avs_probe_compr_open(struct snd_compr_stream *cstream, struct snd_soc_dai *dai) { struct avs_dev *adev = to_avs_dev(dai->dev); struct hdac_bus *bus = &adev->base.core; struct hdac_ext_stream *host_stream; if (adev->extractor) { dev_err(dai->dev, "Cannot open more than one extractor stream\n"); return -EEXIST; } host_stream = snd_hdac_ext_cstream_assign(bus, cstream); if (!host_stream) { dev_err(dai->dev, "Failed to assign HDAudio stream for extraction\n"); return -EBUSY; } adev->extractor = host_stream; hdac_stream(host_stream)->curr_pos = 0; cstream->runtime->private_data = host_stream; return 0; } static int avs_probe_compr_free(struct snd_compr_stream *cstream, struct snd_soc_dai *dai) { struct hdac_ext_stream *host_stream = avs_compr_get_host_stream(cstream); struct avs_dev *adev = to_avs_dev(dai->dev); struct avs_probe_point_desc *desc; /* Extractor node identifier. */ unsigned int vindex = INVALID_NODE_ID.vindex; size_t num_desc; int i, ret; /* Disconnect all probe points. */ ret = avs_ipc_probe_get_points(adev, &desc, &num_desc); if (ret) { dev_err(dai->dev, "get probe points failed: %d\n", ret); ret = AVS_IPC_RET(ret); goto exit; } for (i = 0; i < num_desc; i++) if (desc[i].node_id.vindex == vindex) avs_ipc_probe_disconnect_points(adev, &desc[i].id, 1); kfree(desc); exit: if (adev->num_probe_streams) { adev->num_probe_streams--; if (!adev->num_probe_streams) { avs_dsp_delete_probe(adev); avs_dsp_enable_d0ix(adev); } } snd_hdac_stream_cleanup(hdac_stream(host_stream)); hdac_stream(host_stream)->prepared = 0; snd_hdac_ext_stream_release(host_stream, HDAC_EXT_STREAM_TYPE_HOST); snd_compr_free_pages(cstream); adev->extractor = NULL; return ret; } static int avs_probe_compr_set_params(struct snd_compr_stream *cstream, struct snd_compr_params *params, struct snd_soc_dai *dai) { struct hdac_ext_stream *host_stream = avs_compr_get_host_stream(cstream); struct snd_compr_runtime *rtd = cstream->runtime; struct avs_dev *adev = to_avs_dev(dai->dev); /* compr params do not store bit depth, default to S32_LE. */ snd_pcm_format_t format = SNDRV_PCM_FORMAT_S32_LE; unsigned int format_val; int bps, ret; hdac_stream(host_stream)->bufsize = 0; hdac_stream(host_stream)->period_bytes = 0; hdac_stream(host_stream)->format_val = 0; cstream->dma_buffer.dev.type = SNDRV_DMA_TYPE_DEV_SG; cstream->dma_buffer.dev.dev = adev->dev; ret = snd_compr_malloc_pages(cstream, rtd->buffer_size); if (ret < 0) return ret; bps = snd_pcm_format_physical_width(format); if (bps < 0) return bps; format_val = snd_hdac_calc_stream_format(params->codec.sample_rate, params->codec.ch_out, format, bps, 0); ret = snd_hdac_stream_set_params(hdac_stream(host_stream), format_val); if (ret < 0) return ret; ret = snd_hdac_stream_setup(hdac_stream(host_stream)); if (ret < 0) return ret; hdac_stream(host_stream)->prepared = 1; if (!adev->num_probe_streams) { union avs_connector_node_id node_id; /* D0ix not allowed during probing. */ ret = avs_dsp_disable_d0ix(adev); if (ret) return ret; node_id.vindex = hdac_stream(host_stream)->stream_tag - 1; node_id.dma_type = AVS_DMA_HDA_HOST_INPUT; ret = avs_dsp_init_probe(adev, node_id, rtd->dma_bytes); if (ret < 0) { dev_err(dai->dev, "probe init failed: %d\n", ret); avs_dsp_enable_d0ix(adev); return ret; } } adev->num_probe_streams++; return 0; } static int avs_probe_compr_trigger(struct snd_compr_stream *cstream, int cmd, struct snd_soc_dai *dai) { struct hdac_ext_stream *host_stream = avs_compr_get_host_stream(cstream); struct avs_dev *adev = to_avs_dev(dai->dev); struct hdac_bus *bus = &adev->base.core; unsigned long cookie; if (!hdac_stream(host_stream)->prepared) return -EPIPE; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_RESUME: spin_lock_irqsave(&bus->reg_lock, cookie); snd_hdac_stream_start(hdac_stream(host_stream)); spin_unlock_irqrestore(&bus->reg_lock, cookie); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_STOP: spin_lock_irqsave(&bus->reg_lock, cookie); snd_hdac_stream_stop(hdac_stream(host_stream)); spin_unlock_irqrestore(&bus->reg_lock, cookie); break; default: return -EINVAL; } return 0; } static int avs_probe_compr_pointer(struct snd_compr_stream *cstream, struct snd_compr_tstamp *tstamp, struct snd_soc_dai *dai) { struct hdac_ext_stream *host_stream = avs_compr_get_host_stream(cstream); struct snd_soc_pcm_stream *pstream; pstream = &dai->driver->capture; tstamp->copied_total = hdac_stream(host_stream)->curr_pos; tstamp->sampling_rate = snd_pcm_rate_bit_to_rate(pstream->rates); return 0; } static int avs_probe_compr_copy(struct snd_soc_component *comp, struct snd_compr_stream *cstream, char __user *buf, size_t count) { struct snd_compr_runtime *rtd = cstream->runtime; unsigned int offset, n; void *ptr; int ret; if (count > rtd->buffer_size) count = rtd->buffer_size; div_u64_rem(rtd->total_bytes_transferred, rtd->buffer_size, &offset); ptr = rtd->dma_area + offset; n = rtd->buffer_size - offset; if (count < n) { ret = copy_to_user(buf, ptr, count); } else { ret = copy_to_user(buf, ptr, n); ret += copy_to_user(buf + n, rtd->dma_area, count - n); } if (ret) return count - ret; return count; } static const struct snd_soc_cdai_ops avs_probe_cdai_ops = { .startup = avs_probe_compr_open, .shutdown = avs_probe_compr_free, .set_params = avs_probe_compr_set_params, .trigger = avs_probe_compr_trigger, .pointer = avs_probe_compr_pointer, }; static const struct snd_soc_dai_ops avs_probe_dai_ops = { .compress_new = snd_soc_new_compress, }; static const struct snd_compress_ops avs_probe_compress_ops = { .copy = avs_probe_compr_copy, }; static struct snd_soc_dai_driver probe_cpu_dais[] = { { .name = "Probe Extraction CPU DAI", .cops = &avs_probe_cdai_ops, .ops = &avs_probe_dai_ops, .capture = { .stream_name = "Probe Extraction", .channels_min = 1, .channels_max = 8, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, }, }, }; static const struct snd_soc_component_driver avs_probe_component_driver = { .name = "avs-probe-compr", .compress_ops = &avs_probe_compress_ops, .module_get_upon_open = 1, /* increment refcount when a stream is opened */ }; int avs_probe_platform_register(struct avs_dev *adev, const char *name) { return avs_soc_component_register(adev->dev, name, &avs_probe_component_driver, probe_cpu_dais, ARRAY_SIZE(probe_cpu_dais)); }
linux-master
sound/soc/intel/avs/probes.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Amadeusz Slawinski <[email protected]> // Cezary Rojewski <[email protected]> // #include <sound/soc.h> #include "avs.h" #include "control.h" #include "messages.h" #include "path.h" static struct avs_dev *avs_get_kcontrol_adev(struct snd_kcontrol *kcontrol) { struct snd_soc_dapm_widget *w; w = snd_soc_dapm_kcontrol_widget(kcontrol); return to_avs_dev(w->dapm->component->dev); } static struct avs_path_module *avs_get_volume_module(struct avs_dev *adev, u32 id) { struct avs_path *path; struct avs_path_pipeline *ppl; struct avs_path_module *mod; spin_lock(&adev->path_list_lock); list_for_each_entry(path, &adev->path_list, node) { list_for_each_entry(ppl, &path->ppl_list, node) { list_for_each_entry(mod, &ppl->mod_list, node) { if (guid_equal(&mod->template->cfg_ext->type, &AVS_PEAKVOL_MOD_UUID) && mod->template->ctl_id == id) { spin_unlock(&adev->path_list_lock); return mod; } } } } spin_unlock(&adev->path_list_lock); return NULL; } int avs_control_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct avs_control_data *ctl_data = (struct avs_control_data *)mc->dobj.private; struct avs_dev *adev = avs_get_kcontrol_adev(kcontrol); struct avs_volume_cfg *dspvols = NULL; struct avs_path_module *active_module; size_t num_dspvols; int ret = 0; /* prevent access to modules while path is being constructed */ mutex_lock(&adev->path_mutex); active_module = avs_get_volume_module(adev, ctl_data->id); if (active_module) { ret = avs_ipc_peakvol_get_volume(adev, active_module->module_id, active_module->instance_id, &dspvols, &num_dspvols); if (!ret) ucontrol->value.integer.value[0] = dspvols[0].target_volume; ret = AVS_IPC_RET(ret); kfree(dspvols); } else { ucontrol->value.integer.value[0] = ctl_data->volume; } mutex_unlock(&adev->path_mutex); return ret; } int avs_control_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct avs_control_data *ctl_data = (struct avs_control_data *)mc->dobj.private; struct avs_dev *adev = avs_get_kcontrol_adev(kcontrol); long *volume = &ctl_data->volume; struct avs_path_module *active_module; struct avs_volume_cfg dspvol = {0}; long ctlvol = ucontrol->value.integer.value[0]; int ret = 0, changed = 0; if (ctlvol < 0 || ctlvol > mc->max) return -EINVAL; /* prevent access to modules while path is being constructed */ mutex_lock(&adev->path_mutex); if (*volume != ctlvol) { *volume = ctlvol; changed = 1; } active_module = avs_get_volume_module(adev, ctl_data->id); if (active_module) { dspvol.channel_id = AVS_ALL_CHANNELS_MASK; dspvol.target_volume = *volume; ret = avs_ipc_peakvol_set_volume(adev, active_module->module_id, active_module->instance_id, &dspvol); ret = AVS_IPC_RET(ret); } mutex_unlock(&adev->path_mutex); return ret ? ret : changed; }
linux-master
sound/soc/intel/avs/control.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // // Special thanks to: // Krzysztof Hejmowski <[email protected]> // Michal Sienkiewicz <[email protected]> // Filip Proborszcz // // for sharing Intel AudioDSP expertise and helping shape the very // foundation of this driver // #include <linux/module.h> #include <linux/pci.h> #include <sound/hda_codec.h> #include <sound/hda_i915.h> #include <sound/hda_register.h> #include <sound/hdaudio.h> #include <sound/hdaudio_ext.h> #include <sound/intel-dsp-config.h> #include <sound/intel-nhlt.h> #include "../../codecs/hda.h" #include "avs.h" #include "cldma.h" static u32 pgctl_mask = AZX_PGCTL_LSRMD_MASK; module_param(pgctl_mask, uint, 0444); MODULE_PARM_DESC(pgctl_mask, "PCI PGCTL policy override"); static u32 cgctl_mask = AZX_CGCTL_MISCBDCGE_MASK; module_param(cgctl_mask, uint, 0444); MODULE_PARM_DESC(cgctl_mask, "PCI CGCTL policy override"); static void avs_hda_update_config_dword(struct hdac_bus *bus, u32 reg, u32 mask, u32 value) { struct pci_dev *pci = to_pci_dev(bus->dev); u32 data; pci_read_config_dword(pci, reg, &data); data &= ~mask; data |= (value & mask); pci_write_config_dword(pci, reg, data); } void avs_hda_power_gating_enable(struct avs_dev *adev, bool enable) { u32 value = enable ? 0 : pgctl_mask; avs_hda_update_config_dword(&adev->base.core, AZX_PCIREG_PGCTL, pgctl_mask, value); } static void avs_hdac_clock_gating_enable(struct hdac_bus *bus, bool enable) { u32 value = enable ? cgctl_mask : 0; avs_hda_update_config_dword(bus, AZX_PCIREG_CGCTL, cgctl_mask, value); } void avs_hda_clock_gating_enable(struct avs_dev *adev, bool enable) { avs_hdac_clock_gating_enable(&adev->base.core, enable); } void avs_hda_l1sen_enable(struct avs_dev *adev, bool enable) { u32 value = enable ? AZX_VS_EM2_L1SEN : 0; snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN, value); } static int avs_hdac_bus_init_streams(struct hdac_bus *bus) { unsigned int cp_streams, pb_streams; unsigned int gcap; gcap = snd_hdac_chip_readw(bus, GCAP); cp_streams = (gcap >> 8) & 0x0F; pb_streams = (gcap >> 12) & 0x0F; bus->num_streams = cp_streams + pb_streams; snd_hdac_ext_stream_init_all(bus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE); snd_hdac_ext_stream_init_all(bus, cp_streams, pb_streams, SNDRV_PCM_STREAM_PLAYBACK); return snd_hdac_bus_alloc_stream_pages(bus); } static bool avs_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset) { struct hdac_ext_link *hlink; bool ret; avs_hdac_clock_gating_enable(bus, false); ret = snd_hdac_bus_init_chip(bus, full_reset); /* Reset stream-to-link mapping */ list_for_each_entry(hlink, &bus->hlink_list, list) writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV); avs_hdac_clock_gating_enable(bus, true); /* Set DUM bit to address incorrect position reporting for capture * streams. In order to do so, CTRL needs to be out of reset state */ snd_hdac_chip_updatel(bus, VS_EM2, AZX_VS_EM2_DUM, AZX_VS_EM2_DUM); return ret; } static int probe_codec(struct hdac_bus *bus, int addr) { struct hda_codec *codec; unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) | (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID; unsigned int res = -1; int ret; mutex_lock(&bus->cmd_mutex); snd_hdac_bus_send_cmd(bus, cmd); snd_hdac_bus_get_response(bus, addr, &res); mutex_unlock(&bus->cmd_mutex); if (res == -1) return -EIO; dev_dbg(bus->dev, "codec #%d probed OK: 0x%x\n", addr, res); codec = snd_hda_codec_device_init(to_hda_bus(bus), addr, "hdaudioB%dD%d", bus->idx, addr); if (IS_ERR(codec)) { dev_err(bus->dev, "init codec failed: %ld\n", PTR_ERR(codec)); return PTR_ERR(codec); } /* * Allow avs_core suspend by forcing suspended state on all * of its codec child devices. Component interested in * dealing with hda codecs directly takes pm responsibilities */ pm_runtime_set_suspended(hda_codec_dev(codec)); /* configure effectively creates new ASoC component */ ret = snd_hda_codec_configure(codec); if (ret < 0) { dev_err(bus->dev, "failed to config codec %d\n", ret); return ret; } return 0; } static void avs_hdac_bus_probe_codecs(struct hdac_bus *bus) { int c; /* First try to probe all given codec slots */ for (c = 0; c < HDA_MAX_CODECS; c++) { if (!(bus->codec_mask & BIT(c))) continue; if (!probe_codec(bus, c)) /* success, continue probing */ continue; /* * Some BIOSen give you wrong codec addresses * that don't exist */ dev_warn(bus->dev, "Codec #%d probe error; disabling it...\n", c); bus->codec_mask &= ~BIT(c); /* * More badly, accessing to a non-existing * codec often screws up the controller bus, * and disturbs the further communications. * Thus if an error occurs during probing, * better to reset the controller bus to get * back to the sanity state. */ snd_hdac_bus_stop_chip(bus); avs_hdac_bus_init_chip(bus, true); } } static void avs_hda_probe_work(struct work_struct *work) { struct avs_dev *adev = container_of(work, struct avs_dev, probe_work); struct hdac_bus *bus = &adev->base.core; struct hdac_ext_link *hlink; int ret; pm_runtime_set_active(bus->dev); /* clear runtime_error flag */ ret = snd_hdac_i915_init(bus); if (ret < 0) dev_info(bus->dev, "i915 init unsuccessful: %d\n", ret); snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true); avs_hdac_bus_init_chip(bus, true); avs_hdac_bus_probe_codecs(bus); snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); /* with all codecs probed, links can be powered down */ list_for_each_entry(hlink, &bus->hlink_list, list) snd_hdac_ext_bus_link_put(bus, hlink); snd_hdac_ext_bus_ppcap_enable(bus, true); snd_hdac_ext_bus_ppcap_int_enable(bus, true); ret = avs_dsp_first_boot_firmware(adev); if (ret < 0) return; adev->nhlt = intel_nhlt_init(adev->dev); if (!adev->nhlt) dev_info(bus->dev, "platform has no NHLT\n"); avs_debugfs_init(adev); avs_register_all_boards(adev); /* configure PM */ pm_runtime_set_autosuspend_delay(bus->dev, 2000); pm_runtime_use_autosuspend(bus->dev); pm_runtime_mark_last_busy(bus->dev); pm_runtime_put_autosuspend(bus->dev); pm_runtime_allow(bus->dev); } static void hdac_stream_update_pos(struct hdac_stream *stream, u64 buffer_size) { u64 prev_pos, pos, num_bytes; div64_u64_rem(stream->curr_pos, buffer_size, &prev_pos); pos = snd_hdac_stream_get_pos_posbuf(stream); if (pos < prev_pos) num_bytes = (buffer_size - prev_pos) + pos; else num_bytes = pos - prev_pos; stream->curr_pos += num_bytes; } /* called from IRQ */ static void hdac_update_stream(struct hdac_bus *bus, struct hdac_stream *stream) { if (stream->substream) { snd_pcm_period_elapsed(stream->substream); } else if (stream->cstream) { u64 buffer_size = stream->cstream->runtime->buffer_size; hdac_stream_update_pos(stream, buffer_size); snd_compr_fragment_elapsed(stream->cstream); } } static irqreturn_t hdac_bus_irq_handler(int irq, void *context) { struct hdac_bus *bus = context; u32 mask, int_enable; u32 status; int ret = IRQ_NONE; if (!pm_runtime_active(bus->dev)) return ret; spin_lock(&bus->reg_lock); status = snd_hdac_chip_readl(bus, INTSTS); if (status == 0 || status == UINT_MAX) { spin_unlock(&bus->reg_lock); return ret; } /* clear rirb int */ status = snd_hdac_chip_readb(bus, RIRBSTS); if (status & RIRB_INT_MASK) { if (status & RIRB_INT_RESPONSE) snd_hdac_bus_update_rirb(bus); snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK); } mask = (0x1 << bus->num_streams) - 1; status = snd_hdac_chip_readl(bus, INTSTS); status &= mask; if (status) { /* Disable stream interrupts; Re-enable in bottom half */ int_enable = snd_hdac_chip_readl(bus, INTCTL); snd_hdac_chip_writel(bus, INTCTL, (int_enable & (~mask))); ret = IRQ_WAKE_THREAD; } else { ret = IRQ_HANDLED; } spin_unlock(&bus->reg_lock); return ret; } static irqreturn_t hdac_bus_irq_thread(int irq, void *context) { struct hdac_bus *bus = context; u32 status; u32 int_enable; u32 mask; unsigned long flags; status = snd_hdac_chip_readl(bus, INTSTS); snd_hdac_bus_handle_stream_irq(bus, status, hdac_update_stream); /* Re-enable stream interrupts */ mask = (0x1 << bus->num_streams) - 1; spin_lock_irqsave(&bus->reg_lock, flags); int_enable = snd_hdac_chip_readl(bus, INTCTL); snd_hdac_chip_writel(bus, INTCTL, (int_enable | mask)); spin_unlock_irqrestore(&bus->reg_lock, flags); return IRQ_HANDLED; } static int avs_hdac_acquire_irq(struct avs_dev *adev) { struct hdac_bus *bus = &adev->base.core; struct pci_dev *pci = to_pci_dev(bus->dev); int ret; /* request one and check that we only got one interrupt */ ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_LEGACY); if (ret != 1) { dev_err(adev->dev, "Failed to allocate IRQ vector: %d\n", ret); return ret; } ret = pci_request_irq(pci, 0, hdac_bus_irq_handler, hdac_bus_irq_thread, bus, KBUILD_MODNAME); if (ret < 0) { dev_err(adev->dev, "Failed to request stream IRQ handler: %d\n", ret); goto free_vector; } ret = pci_request_irq(pci, 0, avs_dsp_irq_handler, avs_dsp_irq_thread, adev, KBUILD_MODNAME); if (ret < 0) { dev_err(adev->dev, "Failed to request IPC IRQ handler: %d\n", ret); goto free_stream_irq; } return 0; free_stream_irq: pci_free_irq(pci, 0, bus); free_vector: pci_free_irq_vectors(pci); return ret; } static int avs_bus_init(struct avs_dev *adev, struct pci_dev *pci, const struct pci_device_id *id) { struct hda_bus *bus = &adev->base; struct avs_ipc *ipc; struct device *dev = &pci->dev; int ret; ret = snd_hdac_ext_bus_init(&bus->core, dev, NULL, &soc_hda_ext_bus_ops); if (ret < 0) return ret; bus->core.use_posbuf = 1; bus->core.bdl_pos_adj = 0; bus->core.sync_write = 1; bus->pci = pci; bus->mixer_assigned = -1; mutex_init(&bus->prepare_mutex); ipc = devm_kzalloc(dev, sizeof(*ipc), GFP_KERNEL); if (!ipc) return -ENOMEM; ret = avs_ipc_init(ipc, dev); if (ret < 0) return ret; adev->dev = dev; adev->spec = (const struct avs_spec *)id->driver_data; adev->ipc = ipc; adev->hw_cfg.dsp_cores = hweight_long(AVS_MAIN_CORE_MASK); INIT_WORK(&adev->probe_work, avs_hda_probe_work); INIT_LIST_HEAD(&adev->comp_list); INIT_LIST_HEAD(&adev->path_list); INIT_LIST_HEAD(&adev->fw_list); init_completion(&adev->fw_ready); spin_lock_init(&adev->path_list_lock); mutex_init(&adev->modres_mutex); mutex_init(&adev->comp_list_mutex); mutex_init(&adev->path_mutex); return 0; } static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) { struct hdac_bus *bus; struct avs_dev *adev; struct device *dev = &pci->dev; int ret; ret = snd_intel_dsp_driver_probe(pci); if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_AVS) return -ENODEV; ret = pcim_enable_device(pci); if (ret < 0) return ret; adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL); if (!adev) return -ENOMEM; ret = avs_bus_init(adev, pci, id); if (ret < 0) { dev_err(dev, "failed to init avs bus: %d\n", ret); return ret; } ret = pci_request_regions(pci, "AVS HDAudio"); if (ret < 0) return ret; bus = &adev->base.core; bus->addr = pci_resource_start(pci, 0); bus->remap_addr = pci_ioremap_bar(pci, 0); if (!bus->remap_addr) { dev_err(bus->dev, "ioremap error\n"); ret = -ENXIO; goto err_remap_bar0; } adev->dsp_ba = pci_ioremap_bar(pci, 4); if (!adev->dsp_ba) { dev_err(bus->dev, "ioremap error\n"); ret = -ENXIO; goto err_remap_bar4; } snd_hdac_bus_parse_capabilities(bus); if (bus->mlcap) snd_hdac_ext_bus_get_ml_capabilities(bus); if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); dma_set_max_seg_size(dev, UINT_MAX); ret = avs_hdac_bus_init_streams(bus); if (ret < 0) { dev_err(dev, "failed to init streams: %d\n", ret); goto err_init_streams; } ret = avs_hdac_acquire_irq(adev); if (ret < 0) { dev_err(bus->dev, "failed to acquire irq: %d\n", ret); goto err_acquire_irq; } pci_set_master(pci); pci_set_drvdata(pci, bus); device_disable_async_suspend(dev); schedule_work(&adev->probe_work); return 0; err_acquire_irq: snd_hdac_bus_free_stream_pages(bus); snd_hdac_ext_stream_free_all(bus); err_init_streams: iounmap(adev->dsp_ba); err_remap_bar4: iounmap(bus->remap_addr); err_remap_bar0: pci_release_regions(pci); return ret; } static void avs_pci_shutdown(struct pci_dev *pci) { struct hdac_bus *bus = pci_get_drvdata(pci); struct avs_dev *adev = hdac_to_avs(bus); cancel_work_sync(&adev->probe_work); avs_ipc_block(adev->ipc); snd_hdac_stop_streams(bus); avs_dsp_op(adev, int_control, false); snd_hdac_ext_bus_ppcap_int_enable(bus, false); snd_hdac_ext_bus_link_power_down_all(bus); snd_hdac_bus_stop_chip(bus); snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); if (avs_platattr_test(adev, CLDMA)) pci_free_irq(pci, 0, &code_loader); pci_free_irq(pci, 0, adev); pci_free_irq(pci, 0, bus); pci_free_irq_vectors(pci); } static void avs_pci_remove(struct pci_dev *pci) { struct hdac_device *hdev, *save; struct hdac_bus *bus = pci_get_drvdata(pci); struct avs_dev *adev = hdac_to_avs(bus); cancel_work_sync(&adev->probe_work); avs_ipc_block(adev->ipc); avs_unregister_all_boards(adev); avs_debugfs_exit(adev); if (adev->nhlt) intel_nhlt_free(adev->nhlt); if (avs_platattr_test(adev, CLDMA)) hda_cldma_free(&code_loader); snd_hdac_stop_streams_and_chip(bus); avs_dsp_op(adev, int_control, false); snd_hdac_ext_bus_ppcap_int_enable(bus, false); /* it is safe to remove all codecs from the system now */ list_for_each_entry_safe(hdev, save, &bus->codec_list, list) snd_hda_codec_unregister(hdac_to_hda_codec(hdev)); snd_hdac_bus_free_stream_pages(bus); snd_hdac_ext_stream_free_all(bus); /* reverse ml_capabilities */ snd_hdac_ext_link_free_all(bus); snd_hdac_ext_bus_exit(bus); avs_dsp_core_disable(adev, GENMASK(adev->hw_cfg.dsp_cores - 1, 0)); snd_hdac_ext_bus_ppcap_enable(bus, false); /* snd_hdac_stop_streams_and_chip does that already? */ snd_hdac_bus_stop_chip(bus); snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); if (bus->audio_component) snd_hdac_i915_exit(bus); avs_module_info_free(adev); pci_free_irq(pci, 0, adev); pci_free_irq(pci, 0, bus); pci_free_irq_vectors(pci); iounmap(bus->remap_addr); iounmap(adev->dsp_ba); pci_release_regions(pci); /* Firmware is not needed anymore */ avs_release_firmwares(adev); /* pm_runtime_forbid() can rpm_resume() which we do not want */ pm_runtime_disable(&pci->dev); pm_runtime_forbid(&pci->dev); pm_runtime_enable(&pci->dev); pm_runtime_get_noresume(&pci->dev); } static int avs_suspend_standby(struct avs_dev *adev) { struct hdac_bus *bus = &adev->base.core; struct pci_dev *pci = adev->base.pci; if (bus->cmd_dma_state) snd_hdac_bus_stop_cmd_io(bus); snd_hdac_ext_bus_link_power_down_all(bus); enable_irq_wake(pci->irq); pci_save_state(pci); return 0; } static int __maybe_unused avs_suspend_common(struct avs_dev *adev, bool low_power) { struct hdac_bus *bus = &adev->base.core; int ret; flush_work(&adev->probe_work); if (low_power && adev->num_lp_paths) return avs_suspend_standby(adev); snd_hdac_ext_bus_link_power_down_all(bus); ret = avs_ipc_set_dx(adev, AVS_MAIN_CORE_MASK, false); /* * pm_runtime is blocked on DSP failure but system-wide suspend is not. * Do not block entire system from suspending if that's the case. */ if (ret && ret != -EPERM) { dev_err(adev->dev, "set dx failed: %d\n", ret); return AVS_IPC_RET(ret); } avs_ipc_block(adev->ipc); avs_dsp_op(adev, int_control, false); snd_hdac_ext_bus_ppcap_int_enable(bus, false); ret = avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK); if (ret < 0) { dev_err(adev->dev, "core_mask %ld disable failed: %d\n", AVS_MAIN_CORE_MASK, ret); return ret; } snd_hdac_ext_bus_ppcap_enable(bus, false); /* disable LP SRAM retention */ avs_hda_power_gating_enable(adev, false); snd_hdac_bus_stop_chip(bus); /* disable CG when putting controller to reset */ avs_hdac_clock_gating_enable(bus, false); snd_hdac_bus_enter_link_reset(bus); avs_hdac_clock_gating_enable(bus, true); snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); return 0; } static int avs_resume_standby(struct avs_dev *adev) { struct hdac_bus *bus = &adev->base.core; struct pci_dev *pci = adev->base.pci; pci_restore_state(pci); disable_irq_wake(pci->irq); snd_hdac_ext_bus_link_power_up_all(bus); if (bus->cmd_dma_state) snd_hdac_bus_init_cmd_io(bus); return 0; } static int __maybe_unused avs_resume_common(struct avs_dev *adev, bool low_power, bool purge) { struct hdac_bus *bus = &adev->base.core; int ret; if (low_power && adev->num_lp_paths) return avs_resume_standby(adev); snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true); avs_hdac_bus_init_chip(bus, true); snd_hdac_ext_bus_ppcap_enable(bus, true); snd_hdac_ext_bus_ppcap_int_enable(bus, true); ret = avs_dsp_boot_firmware(adev, purge); if (ret < 0) { dev_err(adev->dev, "firmware boot failed: %d\n", ret); return ret; } return 0; } static int __maybe_unused avs_suspend(struct device *dev) { return avs_suspend_common(to_avs_dev(dev), true); } static int __maybe_unused avs_resume(struct device *dev) { return avs_resume_common(to_avs_dev(dev), true, true); } static int __maybe_unused avs_runtime_suspend(struct device *dev) { return avs_suspend_common(to_avs_dev(dev), true); } static int __maybe_unused avs_runtime_resume(struct device *dev) { return avs_resume_common(to_avs_dev(dev), true, false); } static int __maybe_unused avs_freeze(struct device *dev) { return avs_suspend_common(to_avs_dev(dev), false); } static int __maybe_unused avs_thaw(struct device *dev) { return avs_resume_common(to_avs_dev(dev), false, true); } static int __maybe_unused avs_poweroff(struct device *dev) { return avs_suspend_common(to_avs_dev(dev), false); } static int __maybe_unused avs_restore(struct device *dev) { return avs_resume_common(to_avs_dev(dev), false, true); } static const struct dev_pm_ops avs_dev_pm = { .suspend = avs_suspend, .resume = avs_resume, .freeze = avs_freeze, .thaw = avs_thaw, .poweroff = avs_poweroff, .restore = avs_restore, SET_RUNTIME_PM_OPS(avs_runtime_suspend, avs_runtime_resume, NULL) }; static const struct avs_spec skl_desc = { .name = "skl", .min_fw_version = { .major = 9, .minor = 21, .hotfix = 0, .build = 4732, }, .dsp_ops = &skl_dsp_ops, .core_init_mask = 1, .attributes = AVS_PLATATTR_CLDMA, .sram_base_offset = SKL_ADSP_SRAM_BASE_OFFSET, .sram_window_size = SKL_ADSP_SRAM_WINDOW_SIZE, .rom_status = SKL_ADSP_SRAM_BASE_OFFSET, }; static const struct avs_spec apl_desc = { .name = "apl", .min_fw_version = { .major = 9, .minor = 22, .hotfix = 1, .build = 4323, }, .dsp_ops = &apl_dsp_ops, .core_init_mask = 3, .attributes = AVS_PLATATTR_IMR, .sram_base_offset = APL_ADSP_SRAM_BASE_OFFSET, .sram_window_size = APL_ADSP_SRAM_WINDOW_SIZE, .rom_status = APL_ADSP_SRAM_BASE_OFFSET, }; static const struct pci_device_id avs_ids[] = { { PCI_DEVICE_DATA(INTEL, HDA_SKL_LP, &skl_desc) }, { PCI_DEVICE_DATA(INTEL, HDA_SKL, &skl_desc) }, { PCI_DEVICE_DATA(INTEL, HDA_KBL_LP, &skl_desc) }, { PCI_DEVICE_DATA(INTEL, HDA_KBL, &skl_desc) }, { PCI_DEVICE_DATA(INTEL, HDA_KBL_H, &skl_desc) }, { PCI_DEVICE_DATA(INTEL, HDA_CML_S, &skl_desc) }, { PCI_DEVICE_DATA(INTEL, HDA_APL, &apl_desc) }, { PCI_DEVICE_DATA(INTEL, HDA_GML, &apl_desc) }, { 0 } }; MODULE_DEVICE_TABLE(pci, avs_ids); static struct pci_driver avs_pci_driver = { .name = KBUILD_MODNAME, .id_table = avs_ids, .probe = avs_pci_probe, .remove = avs_pci_remove, .shutdown = avs_pci_shutdown, .driver = { .pm = &avs_dev_pm, }, }; module_pci_driver(avs_pci_driver); MODULE_AUTHOR("Cezary Rojewski <[email protected]>"); MODULE_AUTHOR("Amadeusz Slawinski <[email protected]>"); MODULE_DESCRIPTION("Intel cAVS sound driver"); MODULE_LICENSE("GPL");
linux-master
sound/soc/intel/avs/core.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/firmware.h> #include <linux/kfifo.h> #include <linux/slab.h> #include "avs.h" #include "messages.h" /* Caller responsible for holding adev->modres_mutex. */ static int avs_module_entry_index(struct avs_dev *adev, const guid_t *uuid) { int i; for (i = 0; i < adev->mods_info->count; i++) { struct avs_module_entry *module; module = &adev->mods_info->entries[i]; if (guid_equal(&module->uuid, uuid)) return i; } return -ENOENT; } /* Caller responsible for holding adev->modres_mutex. */ static int avs_module_id_entry_index(struct avs_dev *adev, u32 module_id) { int i; for (i = 0; i < adev->mods_info->count; i++) { struct avs_module_entry *module; module = &adev->mods_info->entries[i]; if (module->module_id == module_id) return i; } return -ENOENT; } int avs_get_module_entry(struct avs_dev *adev, const guid_t *uuid, struct avs_module_entry *entry) { int idx; mutex_lock(&adev->modres_mutex); idx = avs_module_entry_index(adev, uuid); if (idx >= 0) memcpy(entry, &adev->mods_info->entries[idx], sizeof(*entry)); mutex_unlock(&adev->modres_mutex); return (idx < 0) ? idx : 0; } int avs_get_module_id_entry(struct avs_dev *adev, u32 module_id, struct avs_module_entry *entry) { int idx; mutex_lock(&adev->modres_mutex); idx = avs_module_id_entry_index(adev, module_id); if (idx >= 0) memcpy(entry, &adev->mods_info->entries[idx], sizeof(*entry)); mutex_unlock(&adev->modres_mutex); return (idx < 0) ? idx : 0; } int avs_get_module_id(struct avs_dev *adev, const guid_t *uuid) { struct avs_module_entry module; int ret; ret = avs_get_module_entry(adev, uuid, &module); return !ret ? module.module_id : -ENOENT; } bool avs_is_module_ida_empty(struct avs_dev *adev, u32 module_id) { bool ret = false; int idx; mutex_lock(&adev->modres_mutex); idx = avs_module_id_entry_index(adev, module_id); if (idx >= 0) ret = ida_is_empty(adev->mod_idas[idx]); mutex_unlock(&adev->modres_mutex); return ret; } /* Caller responsible for holding adev->modres_mutex. */ static void avs_module_ida_destroy(struct avs_dev *adev) { int i = adev->mods_info ? adev->mods_info->count : 0; while (i--) { ida_destroy(adev->mod_idas[i]); kfree(adev->mod_idas[i]); } kfree(adev->mod_idas); } /* Caller responsible for holding adev->modres_mutex. */ static int avs_module_ida_alloc(struct avs_dev *adev, struct avs_mods_info *newinfo, bool purge) { struct avs_mods_info *oldinfo = adev->mods_info; struct ida **ida_ptrs; u32 tocopy_count = 0; int i; if (!purge && oldinfo) { if (oldinfo->count >= newinfo->count) dev_warn(adev->dev, "refreshing %d modules info with %d\n", oldinfo->count, newinfo->count); tocopy_count = oldinfo->count; } ida_ptrs = kcalloc(newinfo->count, sizeof(*ida_ptrs), GFP_KERNEL); if (!ida_ptrs) return -ENOMEM; if (tocopy_count) memcpy(ida_ptrs, adev->mod_idas, tocopy_count * sizeof(*ida_ptrs)); for (i = tocopy_count; i < newinfo->count; i++) { ida_ptrs[i] = kzalloc(sizeof(**ida_ptrs), GFP_KERNEL); if (!ida_ptrs[i]) { while (i--) kfree(ida_ptrs[i]); kfree(ida_ptrs); return -ENOMEM; } ida_init(ida_ptrs[i]); } /* If old elements have been reused, don't wipe them. */ if (tocopy_count) kfree(adev->mod_idas); else avs_module_ida_destroy(adev); adev->mod_idas = ida_ptrs; return 0; } int avs_module_info_init(struct avs_dev *adev, bool purge) { struct avs_mods_info *info; int ret; ret = avs_ipc_get_modules_info(adev, &info); if (ret) return AVS_IPC_RET(ret); mutex_lock(&adev->modres_mutex); ret = avs_module_ida_alloc(adev, info, purge); if (ret < 0) { dev_err(adev->dev, "initialize module idas failed: %d\n", ret); goto exit; } /* Refresh current information with newly received table. */ kfree(adev->mods_info); adev->mods_info = info; exit: mutex_unlock(&adev->modres_mutex); return ret; } void avs_module_info_free(struct avs_dev *adev) { mutex_lock(&adev->modres_mutex); avs_module_ida_destroy(adev); kfree(adev->mods_info); adev->mods_info = NULL; mutex_unlock(&adev->modres_mutex); } int avs_module_id_alloc(struct avs_dev *adev, u16 module_id) { int ret, idx, max_id; mutex_lock(&adev->modres_mutex); idx = avs_module_id_entry_index(adev, module_id); if (idx == -ENOENT) { dev_err(adev->dev, "invalid module id: %d", module_id); ret = -EINVAL; goto exit; } max_id = adev->mods_info->entries[idx].instance_max_count - 1; ret = ida_alloc_max(adev->mod_idas[idx], max_id, GFP_KERNEL); exit: mutex_unlock(&adev->modres_mutex); return ret; } void avs_module_id_free(struct avs_dev *adev, u16 module_id, u8 instance_id) { int idx; mutex_lock(&adev->modres_mutex); idx = avs_module_id_entry_index(adev, module_id); if (idx == -ENOENT) { dev_err(adev->dev, "invalid module id: %d", module_id); goto exit; } ida_free(adev->mod_idas[idx], instance_id); exit: mutex_unlock(&adev->modres_mutex); } /* * Once driver loads FW it should keep it in memory, so we are not affected * by FW removal from filesystem or even worse by loading different FW at * runtime suspend/resume. */ int avs_request_firmware(struct avs_dev *adev, const struct firmware **fw_p, const char *name) { struct avs_fw_entry *entry; int ret; /* first check in list if it is not already loaded */ list_for_each_entry(entry, &adev->fw_list, node) { if (!strcmp(name, entry->name)) { *fw_p = entry->fw; return 0; } } /* FW is not loaded, let's load it now and add to the list */ entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; entry->name = kstrdup(name, GFP_KERNEL); if (!entry->name) { kfree(entry); return -ENOMEM; } ret = request_firmware(&entry->fw, name, adev->dev); if (ret < 0) { kfree(entry->name); kfree(entry); return ret; } *fw_p = entry->fw; list_add_tail(&entry->node, &adev->fw_list); return 0; } /* * Release single FW entry, used to handle errors in functions calling * avs_request_firmware() */ void avs_release_last_firmware(struct avs_dev *adev) { struct avs_fw_entry *entry; entry = list_last_entry(&adev->fw_list, typeof(*entry), node); list_del(&entry->node); release_firmware(entry->fw); kfree(entry->name); kfree(entry); } /* * Release all FW entries, used on driver removal */ void avs_release_firmwares(struct avs_dev *adev) { struct avs_fw_entry *entry, *tmp; list_for_each_entry_safe(entry, tmp, &adev->fw_list, node) { list_del(&entry->node); release_firmware(entry->fw); kfree(entry->name); kfree(entry); } }
linux-master
sound/soc/intel/avs/utils.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <sound/intel-nhlt.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include "avs.h" #include "control.h" #include "path.h" #include "topology.h" /* Must be called with adev->comp_list_mutex held. */ static struct avs_tplg * avs_path_find_tplg(struct avs_dev *adev, const char *name) { struct avs_soc_component *acomp; list_for_each_entry(acomp, &adev->comp_list, node) if (!strcmp(acomp->tplg->name, name)) return acomp->tplg; return NULL; } static struct avs_path_module * avs_path_find_module(struct avs_path_pipeline *ppl, u32 template_id) { struct avs_path_module *mod; list_for_each_entry(mod, &ppl->mod_list, node) if (mod->template->id == template_id) return mod; return NULL; } static struct avs_path_pipeline * avs_path_find_pipeline(struct avs_path *path, u32 template_id) { struct avs_path_pipeline *ppl; list_for_each_entry(ppl, &path->ppl_list, node) if (ppl->template->id == template_id) return ppl; return NULL; } static struct avs_path * avs_path_find_path(struct avs_dev *adev, const char *name, u32 template_id) { struct avs_tplg_path_template *pos, *template = NULL; struct avs_tplg *tplg; struct avs_path *path; tplg = avs_path_find_tplg(adev, name); if (!tplg) return NULL; list_for_each_entry(pos, &tplg->path_tmpl_list, node) { if (pos->id == template_id) { template = pos; break; } } if (!template) return NULL; spin_lock(&adev->path_list_lock); /* Only one variant of given path template may be instantiated at a time. */ list_for_each_entry(path, &adev->path_list, node) { if (path->template->owner == template) { spin_unlock(&adev->path_list_lock); return path; } } spin_unlock(&adev->path_list_lock); return NULL; } static bool avs_test_hw_params(struct snd_pcm_hw_params *params, struct avs_audio_format *fmt) { return (params_rate(params) == fmt->sampling_freq && params_channels(params) == fmt->num_channels && params_physical_width(params) == fmt->bit_depth && params_width(params) == fmt->valid_bit_depth); } static struct avs_tplg_path * avs_path_find_variant(struct avs_dev *adev, struct avs_tplg_path_template *template, struct snd_pcm_hw_params *fe_params, struct snd_pcm_hw_params *be_params) { struct avs_tplg_path *variant; list_for_each_entry(variant, &template->path_list, node) { dev_dbg(adev->dev, "check FE rate %d chn %d vbd %d bd %d\n", variant->fe_fmt->sampling_freq, variant->fe_fmt->num_channels, variant->fe_fmt->valid_bit_depth, variant->fe_fmt->bit_depth); dev_dbg(adev->dev, "check BE rate %d chn %d vbd %d bd %d\n", variant->be_fmt->sampling_freq, variant->be_fmt->num_channels, variant->be_fmt->valid_bit_depth, variant->be_fmt->bit_depth); if (variant->fe_fmt && avs_test_hw_params(fe_params, variant->fe_fmt) && variant->be_fmt && avs_test_hw_params(be_params, variant->be_fmt)) return variant; } return NULL; } __maybe_unused static bool avs_dma_type_is_host(u32 dma_type) { return dma_type == AVS_DMA_HDA_HOST_OUTPUT || dma_type == AVS_DMA_HDA_HOST_INPUT; } __maybe_unused static bool avs_dma_type_is_link(u32 dma_type) { return !avs_dma_type_is_host(dma_type); } __maybe_unused static bool avs_dma_type_is_output(u32 dma_type) { return dma_type == AVS_DMA_HDA_HOST_OUTPUT || dma_type == AVS_DMA_HDA_LINK_OUTPUT || dma_type == AVS_DMA_I2S_LINK_OUTPUT; } __maybe_unused static bool avs_dma_type_is_input(u32 dma_type) { return !avs_dma_type_is_output(dma_type); } static int avs_copier_create(struct avs_dev *adev, struct avs_path_module *mod) { struct nhlt_acpi_table *nhlt = adev->nhlt; struct avs_tplg_module *t = mod->template; struct avs_copier_cfg *cfg; struct nhlt_specific_cfg *ep_blob; union avs_connector_node_id node_id = {0}; size_t cfg_size, data_size = 0; void *data = NULL; u32 dma_type; int ret; dma_type = t->cfg_ext->copier.dma_type; node_id.dma_type = dma_type; switch (dma_type) { struct avs_audio_format *fmt; int direction; case AVS_DMA_I2S_LINK_OUTPUT: case AVS_DMA_I2S_LINK_INPUT: if (avs_dma_type_is_input(dma_type)) direction = SNDRV_PCM_STREAM_CAPTURE; else direction = SNDRV_PCM_STREAM_PLAYBACK; if (t->cfg_ext->copier.blob_fmt) fmt = t->cfg_ext->copier.blob_fmt; else if (direction == SNDRV_PCM_STREAM_CAPTURE) fmt = t->in_fmt; else fmt = t->cfg_ext->copier.out_fmt; ep_blob = intel_nhlt_get_endpoint_blob(adev->dev, nhlt, t->cfg_ext->copier.vindex.i2s.instance, NHLT_LINK_SSP, fmt->valid_bit_depth, fmt->bit_depth, fmt->num_channels, fmt->sampling_freq, direction, NHLT_DEVICE_I2S); if (!ep_blob) { dev_err(adev->dev, "no I2S ep_blob found\n"); return -ENOENT; } data = ep_blob->caps; data_size = ep_blob->size; /* I2S gateway's vindex is statically assigned in topology */ node_id.vindex = t->cfg_ext->copier.vindex.val; break; case AVS_DMA_DMIC_LINK_INPUT: direction = SNDRV_PCM_STREAM_CAPTURE; if (t->cfg_ext->copier.blob_fmt) fmt = t->cfg_ext->copier.blob_fmt; else fmt = t->in_fmt; ep_blob = intel_nhlt_get_endpoint_blob(adev->dev, nhlt, 0, NHLT_LINK_DMIC, fmt->valid_bit_depth, fmt->bit_depth, fmt->num_channels, fmt->sampling_freq, direction, NHLT_DEVICE_DMIC); if (!ep_blob) { dev_err(adev->dev, "no DMIC ep_blob found\n"); return -ENOENT; } data = ep_blob->caps; data_size = ep_blob->size; /* DMIC gateway's vindex is statically assigned in topology */ node_id.vindex = t->cfg_ext->copier.vindex.val; break; case AVS_DMA_HDA_HOST_OUTPUT: case AVS_DMA_HDA_HOST_INPUT: /* HOST gateway's vindex is dynamically assigned with DMA id */ node_id.vindex = mod->owner->owner->dma_id; break; case AVS_DMA_HDA_LINK_OUTPUT: case AVS_DMA_HDA_LINK_INPUT: node_id.vindex = t->cfg_ext->copier.vindex.val | mod->owner->owner->dma_id; break; case INVALID_OBJECT_ID: default: node_id = INVALID_NODE_ID; break; } cfg_size = sizeof(*cfg) + data_size; /* Every config-BLOB contains gateway attributes. */ if (data_size) cfg_size -= sizeof(cfg->gtw_cfg.config.attrs); cfg = kzalloc(cfg_size, GFP_KERNEL); if (!cfg) return -ENOMEM; cfg->base.cpc = t->cfg_base->cpc; cfg->base.ibs = t->cfg_base->ibs; cfg->base.obs = t->cfg_base->obs; cfg->base.is_pages = t->cfg_base->is_pages; cfg->base.audio_fmt = *t->in_fmt; cfg->out_fmt = *t->cfg_ext->copier.out_fmt; cfg->feature_mask = t->cfg_ext->copier.feature_mask; cfg->gtw_cfg.node_id = node_id; cfg->gtw_cfg.dma_buffer_size = t->cfg_ext->copier.dma_buffer_size; /* config_length in DWORDs */ cfg->gtw_cfg.config_length = DIV_ROUND_UP(data_size, 4); if (data) memcpy(&cfg->gtw_cfg.config, data, data_size); mod->gtw_attrs = cfg->gtw_cfg.config.attrs; ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id, t->domain, cfg, cfg_size, &mod->instance_id); kfree(cfg); return ret; } static struct avs_control_data *avs_get_module_control(struct avs_path_module *mod) { struct avs_tplg_module *t = mod->template; struct avs_tplg_path_template *path_tmpl; struct snd_soc_dapm_widget *w; int i; path_tmpl = t->owner->owner->owner; w = path_tmpl->w; for (i = 0; i < w->num_kcontrols; i++) { struct avs_control_data *ctl_data; struct soc_mixer_control *mc; mc = (struct soc_mixer_control *)w->kcontrols[i]->private_value; ctl_data = (struct avs_control_data *)mc->dobj.private; if (ctl_data->id == t->ctl_id) return ctl_data; } return NULL; } static int avs_peakvol_create(struct avs_dev *adev, struct avs_path_module *mod) { struct avs_tplg_module *t = mod->template; struct avs_control_data *ctl_data; struct avs_peakvol_cfg *cfg; int volume = S32_MAX; size_t size; int ret; ctl_data = avs_get_module_control(mod); if (ctl_data) volume = ctl_data->volume; /* As 2+ channels controls are unsupported, have a single block for all channels. */ size = struct_size(cfg, vols, 1); cfg = kzalloc(size, GFP_KERNEL); if (!cfg) return -ENOMEM; cfg->base.cpc = t->cfg_base->cpc; cfg->base.ibs = t->cfg_base->ibs; cfg->base.obs = t->cfg_base->obs; cfg->base.is_pages = t->cfg_base->is_pages; cfg->base.audio_fmt = *t->in_fmt; cfg->vols[0].target_volume = volume; cfg->vols[0].channel_id = AVS_ALL_CHANNELS_MASK; cfg->vols[0].curve_type = AVS_AUDIO_CURVE_NONE; cfg->vols[0].curve_duration = 0; ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id, t->domain, cfg, size, &mod->instance_id); kfree(cfg); return ret; } static int avs_updown_mix_create(struct avs_dev *adev, struct avs_path_module *mod) { struct avs_tplg_module *t = mod->template; struct avs_updown_mixer_cfg cfg; int i; cfg.base.cpc = t->cfg_base->cpc; cfg.base.ibs = t->cfg_base->ibs; cfg.base.obs = t->cfg_base->obs; cfg.base.is_pages = t->cfg_base->is_pages; cfg.base.audio_fmt = *t->in_fmt; cfg.out_channel_config = t->cfg_ext->updown_mix.out_channel_config; cfg.coefficients_select = t->cfg_ext->updown_mix.coefficients_select; for (i = 0; i < AVS_CHANNELS_MAX; i++) cfg.coefficients[i] = t->cfg_ext->updown_mix.coefficients[i]; cfg.channel_map = t->cfg_ext->updown_mix.channel_map; return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id, t->domain, &cfg, sizeof(cfg), &mod->instance_id); } static int avs_src_create(struct avs_dev *adev, struct avs_path_module *mod) { struct avs_tplg_module *t = mod->template; struct avs_src_cfg cfg; cfg.base.cpc = t->cfg_base->cpc; cfg.base.ibs = t->cfg_base->ibs; cfg.base.obs = t->cfg_base->obs; cfg.base.is_pages = t->cfg_base->is_pages; cfg.base.audio_fmt = *t->in_fmt; cfg.out_freq = t->cfg_ext->src.out_freq; return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id, t->domain, &cfg, sizeof(cfg), &mod->instance_id); } static int avs_asrc_create(struct avs_dev *adev, struct avs_path_module *mod) { struct avs_tplg_module *t = mod->template; struct avs_asrc_cfg cfg; cfg.base.cpc = t->cfg_base->cpc; cfg.base.ibs = t->cfg_base->ibs; cfg.base.obs = t->cfg_base->obs; cfg.base.is_pages = t->cfg_base->is_pages; cfg.base.audio_fmt = *t->in_fmt; cfg.out_freq = t->cfg_ext->asrc.out_freq; cfg.mode = t->cfg_ext->asrc.mode; cfg.disable_jitter_buffer = t->cfg_ext->asrc.disable_jitter_buffer; return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id, t->domain, &cfg, sizeof(cfg), &mod->instance_id); } static int avs_aec_create(struct avs_dev *adev, struct avs_path_module *mod) { struct avs_tplg_module *t = mod->template; struct avs_aec_cfg cfg; cfg.base.cpc = t->cfg_base->cpc; cfg.base.ibs = t->cfg_base->ibs; cfg.base.obs = t->cfg_base->obs; cfg.base.is_pages = t->cfg_base->is_pages; cfg.base.audio_fmt = *t->in_fmt; cfg.ref_fmt = *t->cfg_ext->aec.ref_fmt; cfg.out_fmt = *t->cfg_ext->aec.out_fmt; cfg.cpc_lp_mode = t->cfg_ext->aec.cpc_lp_mode; return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id, t->domain, &cfg, sizeof(cfg), &mod->instance_id); } static int avs_mux_create(struct avs_dev *adev, struct avs_path_module *mod) { struct avs_tplg_module *t = mod->template; struct avs_mux_cfg cfg; cfg.base.cpc = t->cfg_base->cpc; cfg.base.ibs = t->cfg_base->ibs; cfg.base.obs = t->cfg_base->obs; cfg.base.is_pages = t->cfg_base->is_pages; cfg.base.audio_fmt = *t->in_fmt; cfg.ref_fmt = *t->cfg_ext->mux.ref_fmt; cfg.out_fmt = *t->cfg_ext->mux.out_fmt; return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id, t->domain, &cfg, sizeof(cfg), &mod->instance_id); } static int avs_wov_create(struct avs_dev *adev, struct avs_path_module *mod) { struct avs_tplg_module *t = mod->template; struct avs_wov_cfg cfg; cfg.base.cpc = t->cfg_base->cpc; cfg.base.ibs = t->cfg_base->ibs; cfg.base.obs = t->cfg_base->obs; cfg.base.is_pages = t->cfg_base->is_pages; cfg.base.audio_fmt = *t->in_fmt; cfg.cpc_lp_mode = t->cfg_ext->wov.cpc_lp_mode; return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id, t->domain, &cfg, sizeof(cfg), &mod->instance_id); } static int avs_micsel_create(struct avs_dev *adev, struct avs_path_module *mod) { struct avs_tplg_module *t = mod->template; struct avs_micsel_cfg cfg; cfg.base.cpc = t->cfg_base->cpc; cfg.base.ibs = t->cfg_base->ibs; cfg.base.obs = t->cfg_base->obs; cfg.base.is_pages = t->cfg_base->is_pages; cfg.base.audio_fmt = *t->in_fmt; cfg.out_fmt = *t->cfg_ext->micsel.out_fmt; return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id, t->domain, &cfg, sizeof(cfg), &mod->instance_id); } static int avs_modbase_create(struct avs_dev *adev, struct avs_path_module *mod) { struct avs_tplg_module *t = mod->template; struct avs_modcfg_base cfg; cfg.cpc = t->cfg_base->cpc; cfg.ibs = t->cfg_base->ibs; cfg.obs = t->cfg_base->obs; cfg.is_pages = t->cfg_base->is_pages; cfg.audio_fmt = *t->in_fmt; return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id, t->domain, &cfg, sizeof(cfg), &mod->instance_id); } static int avs_modext_create(struct avs_dev *adev, struct avs_path_module *mod) { struct avs_tplg_module *t = mod->template; struct avs_tplg_modcfg_ext *tcfg = t->cfg_ext; struct avs_modcfg_ext *cfg; size_t cfg_size, num_pins; int ret, i; num_pins = tcfg->generic.num_input_pins + tcfg->generic.num_output_pins; cfg_size = struct_size(cfg, pin_fmts, num_pins); cfg = kzalloc(cfg_size, GFP_KERNEL); if (!cfg) return -ENOMEM; cfg->base.cpc = t->cfg_base->cpc; cfg->base.ibs = t->cfg_base->ibs; cfg->base.obs = t->cfg_base->obs; cfg->base.is_pages = t->cfg_base->is_pages; cfg->base.audio_fmt = *t->in_fmt; cfg->num_input_pins = tcfg->generic.num_input_pins; cfg->num_output_pins = tcfg->generic.num_output_pins; /* configure pin formats */ for (i = 0; i < num_pins; i++) { struct avs_tplg_pin_format *tpin = &tcfg->generic.pin_fmts[i]; struct avs_pin_format *pin = &cfg->pin_fmts[i]; pin->pin_index = tpin->pin_index; pin->iobs = tpin->iobs; pin->audio_fmt = *tpin->fmt; } ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id, t->domain, cfg, cfg_size, &mod->instance_id); kfree(cfg); return ret; } static int avs_probe_create(struct avs_dev *adev, struct avs_path_module *mod) { dev_err(adev->dev, "Probe module can't be instantiated by topology"); return -EINVAL; } struct avs_module_create { guid_t *guid; int (*create)(struct avs_dev *adev, struct avs_path_module *mod); }; static struct avs_module_create avs_module_create[] = { { &AVS_MIXIN_MOD_UUID, avs_modbase_create }, { &AVS_MIXOUT_MOD_UUID, avs_modbase_create }, { &AVS_KPBUFF_MOD_UUID, avs_modbase_create }, { &AVS_COPIER_MOD_UUID, avs_copier_create }, { &AVS_PEAKVOL_MOD_UUID, avs_peakvol_create }, { &AVS_GAIN_MOD_UUID, avs_peakvol_create }, { &AVS_MICSEL_MOD_UUID, avs_micsel_create }, { &AVS_MUX_MOD_UUID, avs_mux_create }, { &AVS_UPDWMIX_MOD_UUID, avs_updown_mix_create }, { &AVS_SRCINTC_MOD_UUID, avs_src_create }, { &AVS_AEC_MOD_UUID, avs_aec_create }, { &AVS_ASRC_MOD_UUID, avs_asrc_create }, { &AVS_INTELWOV_MOD_UUID, avs_wov_create }, { &AVS_PROBE_MOD_UUID, avs_probe_create }, }; static int avs_path_module_type_create(struct avs_dev *adev, struct avs_path_module *mod) { const guid_t *type = &mod->template->cfg_ext->type; for (int i = 0; i < ARRAY_SIZE(avs_module_create); i++) if (guid_equal(type, avs_module_create[i].guid)) return avs_module_create[i].create(adev, mod); return avs_modext_create(adev, mod); } static void avs_path_module_free(struct avs_dev *adev, struct avs_path_module *mod) { kfree(mod); } static struct avs_path_module * avs_path_module_create(struct avs_dev *adev, struct avs_path_pipeline *owner, struct avs_tplg_module *template) { struct avs_path_module *mod; int module_id, ret; module_id = avs_get_module_id(adev, &template->cfg_ext->type); if (module_id < 0) return ERR_PTR(module_id); mod = kzalloc(sizeof(*mod), GFP_KERNEL); if (!mod) return ERR_PTR(-ENOMEM); mod->template = template; mod->module_id = module_id; mod->owner = owner; INIT_LIST_HEAD(&mod->node); ret = avs_path_module_type_create(adev, mod); if (ret) { dev_err(adev->dev, "module-type create failed: %d\n", ret); kfree(mod); return ERR_PTR(ret); } return mod; } static int avs_path_binding_arm(struct avs_dev *adev, struct avs_path_binding *binding) { struct avs_path_module *this_mod, *target_mod; struct avs_path_pipeline *target_ppl; struct avs_path *target_path; struct avs_tplg_binding *t; t = binding->template; this_mod = avs_path_find_module(binding->owner, t->mod_id); if (!this_mod) { dev_err(adev->dev, "path mod %d not found\n", t->mod_id); return -EINVAL; } /* update with target_tplg_name too */ target_path = avs_path_find_path(adev, t->target_tplg_name, t->target_path_tmpl_id); if (!target_path) { dev_err(adev->dev, "target path %s:%d not found\n", t->target_tplg_name, t->target_path_tmpl_id); return -EINVAL; } target_ppl = avs_path_find_pipeline(target_path, t->target_ppl_id); if (!target_ppl) { dev_err(adev->dev, "target ppl %d not found\n", t->target_ppl_id); return -EINVAL; } target_mod = avs_path_find_module(target_ppl, t->target_mod_id); if (!target_mod) { dev_err(adev->dev, "target mod %d not found\n", t->target_mod_id); return -EINVAL; } if (t->is_sink) { binding->sink = this_mod; binding->sink_pin = t->mod_pin; binding->source = target_mod; binding->source_pin = t->target_mod_pin; } else { binding->sink = target_mod; binding->sink_pin = t->target_mod_pin; binding->source = this_mod; binding->source_pin = t->mod_pin; } return 0; } static void avs_path_binding_free(struct avs_dev *adev, struct avs_path_binding *binding) { kfree(binding); } static struct avs_path_binding *avs_path_binding_create(struct avs_dev *adev, struct avs_path_pipeline *owner, struct avs_tplg_binding *t) { struct avs_path_binding *binding; binding = kzalloc(sizeof(*binding), GFP_KERNEL); if (!binding) return ERR_PTR(-ENOMEM); binding->template = t; binding->owner = owner; INIT_LIST_HEAD(&binding->node); return binding; } static int avs_path_pipeline_arm(struct avs_dev *adev, struct avs_path_pipeline *ppl) { struct avs_path_module *mod; list_for_each_entry(mod, &ppl->mod_list, node) { struct avs_path_module *source, *sink; int ret; /* * Only one module (so it's implicitly last) or it is the last * one, either way we don't have next module to bind it to. */ if (mod == list_last_entry(&ppl->mod_list, struct avs_path_module, node)) break; /* bind current module to next module on list */ source = mod; sink = list_next_entry(mod, node); if (!source || !sink) return -EINVAL; ret = avs_ipc_bind(adev, source->module_id, source->instance_id, sink->module_id, sink->instance_id, 0, 0); if (ret) return AVS_IPC_RET(ret); } return 0; } static void avs_path_pipeline_free(struct avs_dev *adev, struct avs_path_pipeline *ppl) { struct avs_path_binding *binding, *bsave; struct avs_path_module *mod, *save; list_for_each_entry_safe(binding, bsave, &ppl->binding_list, node) { list_del(&binding->node); avs_path_binding_free(adev, binding); } avs_dsp_delete_pipeline(adev, ppl->instance_id); /* Unload resources occupied by owned modules */ list_for_each_entry_safe(mod, save, &ppl->mod_list, node) { avs_dsp_delete_module(adev, mod->module_id, mod->instance_id, mod->owner->instance_id, mod->template->core_id); avs_path_module_free(adev, mod); } list_del(&ppl->node); kfree(ppl); } static struct avs_path_pipeline * avs_path_pipeline_create(struct avs_dev *adev, struct avs_path *owner, struct avs_tplg_pipeline *template) { struct avs_path_pipeline *ppl; struct avs_tplg_pplcfg *cfg = template->cfg; struct avs_tplg_module *tmod; int ret, i; ppl = kzalloc(sizeof(*ppl), GFP_KERNEL); if (!ppl) return ERR_PTR(-ENOMEM); ppl->template = template; ppl->owner = owner; INIT_LIST_HEAD(&ppl->binding_list); INIT_LIST_HEAD(&ppl->mod_list); INIT_LIST_HEAD(&ppl->node); ret = avs_dsp_create_pipeline(adev, cfg->req_size, cfg->priority, cfg->lp, cfg->attributes, &ppl->instance_id); if (ret) { dev_err(adev->dev, "error creating pipeline %d\n", ret); kfree(ppl); return ERR_PTR(ret); } list_for_each_entry(tmod, &template->mod_list, node) { struct avs_path_module *mod; mod = avs_path_module_create(adev, ppl, tmod); if (IS_ERR(mod)) { ret = PTR_ERR(mod); dev_err(adev->dev, "error creating module %d\n", ret); goto init_err; } list_add_tail(&mod->node, &ppl->mod_list); } for (i = 0; i < template->num_bindings; i++) { struct avs_path_binding *binding; binding = avs_path_binding_create(adev, ppl, template->bindings[i]); if (IS_ERR(binding)) { ret = PTR_ERR(binding); dev_err(adev->dev, "error creating binding %d\n", ret); goto init_err; } list_add_tail(&binding->node, &ppl->binding_list); } return ppl; init_err: avs_path_pipeline_free(adev, ppl); return ERR_PTR(ret); } static int avs_path_init(struct avs_dev *adev, struct avs_path *path, struct avs_tplg_path *template, u32 dma_id) { struct avs_tplg_pipeline *tppl; path->owner = adev; path->template = template; path->dma_id = dma_id; INIT_LIST_HEAD(&path->ppl_list); INIT_LIST_HEAD(&path->node); /* create all the pipelines */ list_for_each_entry(tppl, &template->ppl_list, node) { struct avs_path_pipeline *ppl; ppl = avs_path_pipeline_create(adev, path, tppl); if (IS_ERR(ppl)) return PTR_ERR(ppl); list_add_tail(&ppl->node, &path->ppl_list); } spin_lock(&adev->path_list_lock); list_add_tail(&path->node, &adev->path_list); spin_unlock(&adev->path_list_lock); return 0; } static int avs_path_arm(struct avs_dev *adev, struct avs_path *path) { struct avs_path_pipeline *ppl; struct avs_path_binding *binding; int ret; list_for_each_entry(ppl, &path->ppl_list, node) { /* * Arm all ppl bindings before binding internal modules * as it costs no IPCs which isn't true for the latter. */ list_for_each_entry(binding, &ppl->binding_list, node) { ret = avs_path_binding_arm(adev, binding); if (ret < 0) return ret; } ret = avs_path_pipeline_arm(adev, ppl); if (ret < 0) return ret; } return 0; } static void avs_path_free_unlocked(struct avs_path *path) { struct avs_path_pipeline *ppl, *save; spin_lock(&path->owner->path_list_lock); list_del(&path->node); spin_unlock(&path->owner->path_list_lock); list_for_each_entry_safe(ppl, save, &path->ppl_list, node) avs_path_pipeline_free(path->owner, ppl); kfree(path); } static struct avs_path *avs_path_create_unlocked(struct avs_dev *adev, u32 dma_id, struct avs_tplg_path *template) { struct avs_path *path; int ret; path = kzalloc(sizeof(*path), GFP_KERNEL); if (!path) return ERR_PTR(-ENOMEM); ret = avs_path_init(adev, path, template, dma_id); if (ret < 0) goto err; ret = avs_path_arm(adev, path); if (ret < 0) goto err; path->state = AVS_PPL_STATE_INVALID; return path; err: avs_path_free_unlocked(path); return ERR_PTR(ret); } void avs_path_free(struct avs_path *path) { struct avs_dev *adev = path->owner; mutex_lock(&adev->path_mutex); avs_path_free_unlocked(path); mutex_unlock(&adev->path_mutex); } struct avs_path *avs_path_create(struct avs_dev *adev, u32 dma_id, struct avs_tplg_path_template *template, struct snd_pcm_hw_params *fe_params, struct snd_pcm_hw_params *be_params) { struct avs_tplg_path *variant; struct avs_path *path; variant = avs_path_find_variant(adev, template, fe_params, be_params); if (!variant) { dev_err(adev->dev, "no matching variant found\n"); return ERR_PTR(-ENOENT); } /* Serialize path and its components creation. */ mutex_lock(&adev->path_mutex); /* Satisfy needs of avs_path_find_tplg(). */ mutex_lock(&adev->comp_list_mutex); path = avs_path_create_unlocked(adev, dma_id, variant); mutex_unlock(&adev->comp_list_mutex); mutex_unlock(&adev->path_mutex); return path; } static int avs_path_bind_prepare(struct avs_dev *adev, struct avs_path_binding *binding) { const struct avs_audio_format *src_fmt, *sink_fmt; struct avs_tplg_module *tsource = binding->source->template; struct avs_path_module *source = binding->source; int ret; /* * only copier modules about to be bound * to output pin other than 0 need preparation */ if (!binding->source_pin) return 0; if (!guid_equal(&tsource->cfg_ext->type, &AVS_COPIER_MOD_UUID)) return 0; src_fmt = tsource->in_fmt; sink_fmt = binding->sink->template->in_fmt; ret = avs_ipc_copier_set_sink_format(adev, source->module_id, source->instance_id, binding->source_pin, src_fmt, sink_fmt); if (ret) { dev_err(adev->dev, "config copier failed: %d\n", ret); return AVS_IPC_RET(ret); } return 0; } int avs_path_bind(struct avs_path *path) { struct avs_path_pipeline *ppl; struct avs_dev *adev = path->owner; int ret; list_for_each_entry(ppl, &path->ppl_list, node) { struct avs_path_binding *binding; list_for_each_entry(binding, &ppl->binding_list, node) { struct avs_path_module *source, *sink; source = binding->source; sink = binding->sink; ret = avs_path_bind_prepare(adev, binding); if (ret < 0) return ret; ret = avs_ipc_bind(adev, source->module_id, source->instance_id, sink->module_id, sink->instance_id, binding->sink_pin, binding->source_pin); if (ret) { dev_err(adev->dev, "bind path failed: %d\n", ret); return AVS_IPC_RET(ret); } } } return 0; } int avs_path_unbind(struct avs_path *path) { struct avs_path_pipeline *ppl; struct avs_dev *adev = path->owner; int ret; list_for_each_entry(ppl, &path->ppl_list, node) { struct avs_path_binding *binding; list_for_each_entry(binding, &ppl->binding_list, node) { struct avs_path_module *source, *sink; source = binding->source; sink = binding->sink; ret = avs_ipc_unbind(adev, source->module_id, source->instance_id, sink->module_id, sink->instance_id, binding->sink_pin, binding->source_pin); if (ret) { dev_err(adev->dev, "unbind path failed: %d\n", ret); return AVS_IPC_RET(ret); } } } return 0; } int avs_path_reset(struct avs_path *path) { struct avs_path_pipeline *ppl; struct avs_dev *adev = path->owner; int ret; if (path->state == AVS_PPL_STATE_RESET) return 0; list_for_each_entry(ppl, &path->ppl_list, node) { ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id, AVS_PPL_STATE_RESET); if (ret) { dev_err(adev->dev, "reset path failed: %d\n", ret); path->state = AVS_PPL_STATE_INVALID; return AVS_IPC_RET(ret); } } path->state = AVS_PPL_STATE_RESET; return 0; } int avs_path_pause(struct avs_path *path) { struct avs_path_pipeline *ppl; struct avs_dev *adev = path->owner; int ret; if (path->state == AVS_PPL_STATE_PAUSED) return 0; list_for_each_entry_reverse(ppl, &path->ppl_list, node) { ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id, AVS_PPL_STATE_PAUSED); if (ret) { dev_err(adev->dev, "pause path failed: %d\n", ret); path->state = AVS_PPL_STATE_INVALID; return AVS_IPC_RET(ret); } } path->state = AVS_PPL_STATE_PAUSED; return 0; } int avs_path_run(struct avs_path *path, int trigger) { struct avs_path_pipeline *ppl; struct avs_dev *adev = path->owner; int ret; if (path->state == AVS_PPL_STATE_RUNNING && trigger == AVS_TPLG_TRIGGER_AUTO) return 0; list_for_each_entry(ppl, &path->ppl_list, node) { if (ppl->template->cfg->trigger != trigger) continue; ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id, AVS_PPL_STATE_RUNNING); if (ret) { dev_err(adev->dev, "run path failed: %d\n", ret); path->state = AVS_PPL_STATE_INVALID; return AVS_IPC_RET(ret); } } path->state = AVS_PPL_STATE_RUNNING; return 0; }
linux-master
sound/soc/intel/avs/path.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/slab.h> #include "avs.h" #include "messages.h" #define AVS_CL_TIMEOUT_MS 5000 int avs_ipc_set_boot_config(struct avs_dev *adev, u32 dma_id, u32 purge) { union avs_global_msg msg = AVS_GLOBAL_REQUEST(ROM_CONTROL); struct avs_ipc_msg request = {{0}}; int ret; msg.boot_cfg.rom_ctrl_msg_type = AVS_ROM_SET_BOOT_CONFIG; msg.boot_cfg.dma_id = dma_id; msg.boot_cfg.purge_request = purge; request.header = msg.val; ret = avs_dsp_send_rom_msg(adev, &request); if (ret) avs_ipc_err(adev, &request, "set boot config", ret); return ret; } int avs_ipc_load_modules(struct avs_dev *adev, u16 *mod_ids, u32 num_mod_ids) { union avs_global_msg msg = AVS_GLOBAL_REQUEST(LOAD_MULTIPLE_MODULES); struct avs_ipc_msg request; int ret; msg.load_multi_mods.mod_cnt = num_mod_ids; request.header = msg.val; request.data = mod_ids; request.size = sizeof(*mod_ids) * num_mod_ids; ret = avs_dsp_send_msg_timeout(adev, &request, NULL, AVS_CL_TIMEOUT_MS); if (ret) avs_ipc_err(adev, &request, "load multiple modules", ret); return ret; } int avs_ipc_unload_modules(struct avs_dev *adev, u16 *mod_ids, u32 num_mod_ids) { union avs_global_msg msg = AVS_GLOBAL_REQUEST(UNLOAD_MULTIPLE_MODULES); struct avs_ipc_msg request; int ret; msg.load_multi_mods.mod_cnt = num_mod_ids; request.header = msg.val; request.data = mod_ids; request.size = sizeof(*mod_ids) * num_mod_ids; ret = avs_dsp_send_msg(adev, &request, NULL); if (ret) avs_ipc_err(adev, &request, "unload multiple modules", ret); return ret; } int avs_ipc_load_library(struct avs_dev *adev, u32 dma_id, u32 lib_id) { union avs_global_msg msg = AVS_GLOBAL_REQUEST(LOAD_LIBRARY); struct avs_ipc_msg request = {{0}}; int ret; msg.load_lib.dma_id = dma_id; msg.load_lib.lib_id = lib_id; request.header = msg.val; ret = avs_dsp_send_msg_timeout(adev, &request, NULL, AVS_CL_TIMEOUT_MS); if (ret) avs_ipc_err(adev, &request, "load library", ret); return ret; } int avs_ipc_create_pipeline(struct avs_dev *adev, u16 req_size, u8 priority, u8 instance_id, bool lp, u16 attributes) { union avs_global_msg msg = AVS_GLOBAL_REQUEST(CREATE_PIPELINE); struct avs_ipc_msg request = {{0}}; int ret; msg.create_ppl.ppl_mem_size = req_size; msg.create_ppl.ppl_priority = priority; msg.create_ppl.instance_id = instance_id; msg.ext.create_ppl.lp = lp; msg.ext.create_ppl.attributes = attributes; request.header = msg.val; ret = avs_dsp_send_msg(adev, &request, NULL); if (ret) avs_ipc_err(adev, &request, "create pipeline", ret); return ret; } int avs_ipc_delete_pipeline(struct avs_dev *adev, u8 instance_id) { union avs_global_msg msg = AVS_GLOBAL_REQUEST(DELETE_PIPELINE); struct avs_ipc_msg request = {{0}}; int ret; msg.ppl.instance_id = instance_id; request.header = msg.val; ret = avs_dsp_send_msg(adev, &request, NULL); if (ret) avs_ipc_err(adev, &request, "delete pipeline", ret); return ret; } int avs_ipc_set_pipeline_state(struct avs_dev *adev, u8 instance_id, enum avs_pipeline_state state) { union avs_global_msg msg = AVS_GLOBAL_REQUEST(SET_PIPELINE_STATE); struct avs_ipc_msg request = {{0}}; int ret; msg.set_ppl_state.ppl_id = instance_id; msg.set_ppl_state.state = state; request.header = msg.val; ret = avs_dsp_send_msg(adev, &request, NULL); if (ret) avs_ipc_err(adev, &request, "set pipeline state", ret); return ret; } int avs_ipc_get_pipeline_state(struct avs_dev *adev, u8 instance_id, enum avs_pipeline_state *state) { union avs_global_msg msg = AVS_GLOBAL_REQUEST(GET_PIPELINE_STATE); struct avs_ipc_msg request = {{0}}; struct avs_ipc_msg reply = {{0}}; int ret; msg.get_ppl_state.ppl_id = instance_id; request.header = msg.val; ret = avs_dsp_send_msg(adev, &request, &reply); if (ret) { avs_ipc_err(adev, &request, "get pipeline state", ret); return ret; } *state = reply.rsp.ext.get_ppl_state.state; return ret; } /* * avs_ipc_init_instance - Initialize module instance * * @adev: Driver context * @module_id: Module-type id * @instance_id: Unique module instance id * @ppl_id: Parent pipeline id * @core_id: DSP core to allocate module on * @domain: Processing domain (low latency or data processing) * @param: Module-type specific configuration * @param_size: Size of @param in bytes * * Argument verification, as well as pipeline state checks are done by the * firmware. * * Note: @ppl_id and @core_id are independent of each other as single pipeline * can be composed of module instances located on different DSP cores. */ int avs_ipc_init_instance(struct avs_dev *adev, u16 module_id, u8 instance_id, u8 ppl_id, u8 core_id, u8 domain, void *param, u32 param_size) { union avs_module_msg msg = AVS_MODULE_REQUEST(INIT_INSTANCE); struct avs_ipc_msg request; int ret; msg.module_id = module_id; msg.instance_id = instance_id; /* firmware expects size provided in dwords */ msg.ext.init_instance.param_block_size = DIV_ROUND_UP(param_size, sizeof(u32)); msg.ext.init_instance.ppl_instance_id = ppl_id; msg.ext.init_instance.core_id = core_id; msg.ext.init_instance.proc_domain = domain; request.header = msg.val; request.data = param; request.size = param_size; ret = avs_dsp_send_msg(adev, &request, NULL); if (ret) avs_ipc_err(adev, &request, "init instance", ret); return ret; } /* * avs_ipc_delete_instance - Delete module instance * * @adev: Driver context * @module_id: Module-type id * @instance_id: Unique module instance id * * Argument verification, as well as pipeline state checks are done by the * firmware. * * Note: only standalone modules i.e. without a parent pipeline shall be * deleted using this IPC message. In all other cases, pipeline owning the * modules performs cleanup automatically when it is deleted. */ int avs_ipc_delete_instance(struct avs_dev *adev, u16 module_id, u8 instance_id) { union avs_module_msg msg = AVS_MODULE_REQUEST(DELETE_INSTANCE); struct avs_ipc_msg request = {{0}}; int ret; msg.module_id = module_id; msg.instance_id = instance_id; request.header = msg.val; ret = avs_dsp_send_msg(adev, &request, NULL); if (ret) avs_ipc_err(adev, &request, "delete instance", ret); return ret; } /* * avs_ipc_bind - Bind two module instances * * @adev: Driver context * @module_id: Source module-type id * @instance_id: Source module instance id * @dst_module_id: Sink module-type id * @dst_instance_id: Sink module instance id * @dst_queue: Sink module pin to bind @src_queue with * @src_queue: Source module pin to bind @dst_queue with */ int avs_ipc_bind(struct avs_dev *adev, u16 module_id, u8 instance_id, u16 dst_module_id, u8 dst_instance_id, u8 dst_queue, u8 src_queue) { union avs_module_msg msg = AVS_MODULE_REQUEST(BIND); struct avs_ipc_msg request = {{0}}; int ret; msg.module_id = module_id; msg.instance_id = instance_id; msg.ext.bind_unbind.dst_module_id = dst_module_id; msg.ext.bind_unbind.dst_instance_id = dst_instance_id; msg.ext.bind_unbind.dst_queue = dst_queue; msg.ext.bind_unbind.src_queue = src_queue; request.header = msg.val; ret = avs_dsp_send_msg(adev, &request, NULL); if (ret) avs_ipc_err(adev, &request, "bind modules", ret); return ret; } /* * avs_ipc_unbind - Unbind two module instances * * @adev: Driver context * @module_id: Source module-type id * @instance_id: Source module instance id * @dst_module_id: Sink module-type id * @dst_instance_id: Sink module instance id * @dst_queue: Sink module pin to unbind @src_queue from * @src_queue: Source module pin to unbind @dst_queue from */ int avs_ipc_unbind(struct avs_dev *adev, u16 module_id, u8 instance_id, u16 dst_module_id, u8 dst_instance_id, u8 dst_queue, u8 src_queue) { union avs_module_msg msg = AVS_MODULE_REQUEST(UNBIND); struct avs_ipc_msg request = {{0}}; int ret; msg.module_id = module_id; msg.instance_id = instance_id; msg.ext.bind_unbind.dst_module_id = dst_module_id; msg.ext.bind_unbind.dst_instance_id = dst_instance_id; msg.ext.bind_unbind.dst_queue = dst_queue; msg.ext.bind_unbind.src_queue = src_queue; request.header = msg.val; ret = avs_dsp_send_msg(adev, &request, NULL); if (ret) avs_ipc_err(adev, &request, "unbind modules", ret); return ret; } static int __avs_ipc_set_large_config(struct avs_dev *adev, u16 module_id, u8 instance_id, u8 param_id, bool init_block, bool final_block, u8 *request_data, size_t request_size, size_t off_size) { union avs_module_msg msg = AVS_MODULE_REQUEST(LARGE_CONFIG_SET); struct avs_ipc_msg request; int ret; msg.module_id = module_id; msg.instance_id = instance_id; msg.ext.large_config.data_off_size = off_size; msg.ext.large_config.large_param_id = param_id; msg.ext.large_config.final_block = final_block; msg.ext.large_config.init_block = init_block; request.header = msg.val; request.data = request_data; request.size = request_size; ret = avs_dsp_send_msg(adev, &request, NULL); if (ret) avs_ipc_err(adev, &request, "large config set", ret); return ret; } int avs_ipc_set_large_config(struct avs_dev *adev, u16 module_id, u8 instance_id, u8 param_id, u8 *request, size_t request_size) { size_t remaining, tx_size; bool final; int ret; remaining = request_size; tx_size = min_t(size_t, AVS_MAILBOX_SIZE, remaining); final = (tx_size == remaining); /* Initial request states total payload size. */ ret = __avs_ipc_set_large_config(adev, module_id, instance_id, param_id, 1, final, request, tx_size, request_size); if (ret) return ret; remaining -= tx_size; /* Loop the rest only when payload exceeds mailbox's size. */ while (remaining) { size_t offset; offset = request_size - remaining; tx_size = min_t(size_t, AVS_MAILBOX_SIZE, remaining); final = (tx_size == remaining); ret = __avs_ipc_set_large_config(adev, module_id, instance_id, param_id, 0, final, request + offset, tx_size, offset); if (ret) return ret; remaining -= tx_size; } return 0; } int avs_ipc_get_large_config(struct avs_dev *adev, u16 module_id, u8 instance_id, u8 param_id, u8 *request_data, size_t request_size, u8 **reply_data, size_t *reply_size) { union avs_module_msg msg = AVS_MODULE_REQUEST(LARGE_CONFIG_GET); struct avs_ipc_msg request; struct avs_ipc_msg reply = {{0}}; void *buf; int ret; reply.data = kzalloc(AVS_MAILBOX_SIZE, GFP_KERNEL); if (!reply.data) return -ENOMEM; msg.module_id = module_id; msg.instance_id = instance_id; msg.ext.large_config.data_off_size = request_size; msg.ext.large_config.large_param_id = param_id; /* final_block is always 0 on request. Updated by fw on reply. */ msg.ext.large_config.final_block = 0; msg.ext.large_config.init_block = 1; request.header = msg.val; request.data = request_data; request.size = request_size; reply.size = AVS_MAILBOX_SIZE; ret = avs_dsp_send_msg(adev, &request, &reply); if (ret) { avs_ipc_err(adev, &request, "large config get", ret); kfree(reply.data); return ret; } buf = krealloc(reply.data, reply.size, GFP_KERNEL); if (!buf) { kfree(reply.data); return -ENOMEM; } *reply_data = buf; *reply_size = reply.size; return 0; } int avs_ipc_set_dx(struct avs_dev *adev, u32 core_mask, bool powerup) { union avs_module_msg msg = AVS_MODULE_REQUEST(SET_DX); struct avs_ipc_msg request; struct avs_dxstate_info dx; int ret; dx.core_mask = core_mask; dx.dx_mask = powerup ? core_mask : 0; request.header = msg.val; request.data = &dx; request.size = sizeof(dx); ret = avs_dsp_send_pm_msg(adev, &request, NULL, true); if (ret) avs_ipc_err(adev, &request, "set dx", ret); return ret; } /* * avs_ipc_set_d0ix - Set power gating policy (entering D0IX substates) * * @enable_pg: Whether to enable or disable power gating * @streaming: Whether a stream is running when transitioning */ int avs_ipc_set_d0ix(struct avs_dev *adev, bool enable_pg, bool streaming) { union avs_module_msg msg = AVS_MODULE_REQUEST(SET_D0IX); struct avs_ipc_msg request = {{0}}; int ret; msg.ext.set_d0ix.wake = enable_pg; msg.ext.set_d0ix.streaming = streaming; request.header = msg.val; ret = avs_dsp_send_pm_msg(adev, &request, NULL, false); if (ret) avs_ipc_err(adev, &request, "set d0ix", ret); return ret; } int avs_ipc_get_fw_config(struct avs_dev *adev, struct avs_fw_cfg *cfg) { struct avs_tlv *tlv; size_t payload_size; size_t offset = 0; u8 *payload; int ret; ret = avs_ipc_get_large_config(adev, AVS_BASEFW_MOD_ID, AVS_BASEFW_INST_ID, AVS_BASEFW_FIRMWARE_CONFIG, NULL, 0, &payload, &payload_size); if (ret) return ret; /* Non-zero payload expected for FIRMWARE_CONFIG. */ if (!payload_size) return -EREMOTEIO; while (offset < payload_size) { tlv = (struct avs_tlv *)(payload + offset); switch (tlv->type) { case AVS_FW_CFG_FW_VERSION: memcpy(&cfg->fw_version, tlv->value, sizeof(cfg->fw_version)); break; case AVS_FW_CFG_MEMORY_RECLAIMED: cfg->memory_reclaimed = *tlv->value; break; case AVS_FW_CFG_SLOW_CLOCK_FREQ_HZ: cfg->slow_clock_freq_hz = *tlv->value; break; case AVS_FW_CFG_FAST_CLOCK_FREQ_HZ: cfg->fast_clock_freq_hz = *tlv->value; break; case AVS_FW_CFG_ALH_SUPPORT_LEVEL: cfg->alh_support = *tlv->value; break; case AVS_FW_CFG_IPC_DL_MAILBOX_BYTES: cfg->ipc_dl_mailbox_bytes = *tlv->value; break; case AVS_FW_CFG_IPC_UL_MAILBOX_BYTES: cfg->ipc_ul_mailbox_bytes = *tlv->value; break; case AVS_FW_CFG_TRACE_LOG_BYTES: cfg->trace_log_bytes = *tlv->value; break; case AVS_FW_CFG_MAX_PPL_COUNT: cfg->max_ppl_count = *tlv->value; break; case AVS_FW_CFG_MAX_ASTATE_COUNT: cfg->max_astate_count = *tlv->value; break; case AVS_FW_CFG_MAX_MODULE_PIN_COUNT: cfg->max_module_pin_count = *tlv->value; break; case AVS_FW_CFG_MODULES_COUNT: cfg->modules_count = *tlv->value; break; case AVS_FW_CFG_MAX_MOD_INST_COUNT: cfg->max_mod_inst_count = *tlv->value; break; case AVS_FW_CFG_MAX_LL_TASKS_PER_PRI_COUNT: cfg->max_ll_tasks_per_pri_count = *tlv->value; break; case AVS_FW_CFG_LL_PRI_COUNT: cfg->ll_pri_count = *tlv->value; break; case AVS_FW_CFG_MAX_DP_TASKS_COUNT: cfg->max_dp_tasks_count = *tlv->value; break; case AVS_FW_CFG_MAX_LIBS_COUNT: cfg->max_libs_count = *tlv->value; break; case AVS_FW_CFG_XTAL_FREQ_HZ: cfg->xtal_freq_hz = *tlv->value; break; case AVS_FW_CFG_POWER_GATING_POLICY: cfg->power_gating_policy = *tlv->value; break; /* Known but not useful to us. */ case AVS_FW_CFG_DMA_BUFFER_CONFIG: case AVS_FW_CFG_SCHEDULER_CONFIG: case AVS_FW_CFG_CLOCKS_CONFIG: case AVS_FW_CFG_RESERVED: break; default: dev_info(adev->dev, "Unrecognized fw param: %d\n", tlv->type); break; } offset += sizeof(*tlv) + tlv->length; } /* No longer needed, free it as it's owned by the get_large_config() caller. */ kfree(payload); return ret; } int avs_ipc_get_hw_config(struct avs_dev *adev, struct avs_hw_cfg *cfg) { struct avs_tlv *tlv; size_t payload_size; size_t size, offset = 0; u8 *payload; int ret; ret = avs_ipc_get_large_config(adev, AVS_BASEFW_MOD_ID, AVS_BASEFW_INST_ID, AVS_BASEFW_HARDWARE_CONFIG, NULL, 0, &payload, &payload_size); if (ret) return ret; /* Non-zero payload expected for HARDWARE_CONFIG. */ if (!payload_size) return -EREMOTEIO; while (offset < payload_size) { tlv = (struct avs_tlv *)(payload + offset); switch (tlv->type) { case AVS_HW_CFG_AVS_VER: cfg->avs_version = *tlv->value; break; case AVS_HW_CFG_DSP_CORES: cfg->dsp_cores = *tlv->value; break; case AVS_HW_CFG_MEM_PAGE_BYTES: cfg->mem_page_bytes = *tlv->value; break; case AVS_HW_CFG_TOTAL_PHYS_MEM_PAGES: cfg->total_phys_mem_pages = *tlv->value; break; case AVS_HW_CFG_I2S_CAPS: cfg->i2s_caps.i2s_version = tlv->value[0]; size = tlv->value[1]; cfg->i2s_caps.ctrl_count = size; if (!size) break; /* Multiply to get entire array size. */ size *= sizeof(*cfg->i2s_caps.ctrl_base_addr); cfg->i2s_caps.ctrl_base_addr = devm_kmemdup(adev->dev, &tlv->value[2], size, GFP_KERNEL); if (!cfg->i2s_caps.ctrl_base_addr) { ret = -ENOMEM; goto exit; } break; case AVS_HW_CFG_GATEWAY_COUNT: cfg->gateway_count = *tlv->value; break; case AVS_HW_CFG_HP_EBB_COUNT: cfg->hp_ebb_count = *tlv->value; break; case AVS_HW_CFG_LP_EBB_COUNT: cfg->lp_ebb_count = *tlv->value; break; case AVS_HW_CFG_EBB_SIZE_BYTES: cfg->ebb_size_bytes = *tlv->value; break; case AVS_HW_CFG_GPDMA_CAPS: break; default: dev_info(adev->dev, "Unrecognized hw config: %d\n", tlv->type); break; } offset += sizeof(*tlv) + tlv->length; } exit: /* No longer needed, free it as it's owned by the get_large_config() caller. */ kfree(payload); return ret; } int avs_ipc_get_modules_info(struct avs_dev *adev, struct avs_mods_info **info) { size_t payload_size; u8 *payload; int ret; ret = avs_ipc_get_large_config(adev, AVS_BASEFW_MOD_ID, AVS_BASEFW_INST_ID, AVS_BASEFW_MODULES_INFO, NULL, 0, &payload, &payload_size); if (ret) return ret; /* Non-zero payload expected for MODULES_INFO. */ if (!payload_size) return -EREMOTEIO; *info = (struct avs_mods_info *)payload; return 0; } int avs_ipc_copier_set_sink_format(struct avs_dev *adev, u16 module_id, u8 instance_id, u32 sink_id, const struct avs_audio_format *src_fmt, const struct avs_audio_format *sink_fmt) { struct avs_copier_sink_format cpr_fmt; cpr_fmt.sink_id = sink_id; /* Firmware expects driver to resend copier's input format. */ cpr_fmt.src_fmt = *src_fmt; cpr_fmt.sink_fmt = *sink_fmt; return avs_ipc_set_large_config(adev, module_id, instance_id, AVS_COPIER_SET_SINK_FORMAT, (u8 *)&cpr_fmt, sizeof(cpr_fmt)); } int avs_ipc_peakvol_set_volume(struct avs_dev *adev, u16 module_id, u8 instance_id, struct avs_volume_cfg *vol) { return avs_ipc_set_large_config(adev, module_id, instance_id, AVS_PEAKVOL_VOLUME, (u8 *)vol, sizeof(*vol)); } int avs_ipc_peakvol_get_volume(struct avs_dev *adev, u16 module_id, u8 instance_id, struct avs_volume_cfg **vols, size_t *num_vols) { size_t payload_size; u8 *payload; int ret; ret = avs_ipc_get_large_config(adev, module_id, instance_id, AVS_PEAKVOL_VOLUME, NULL, 0, &payload, &payload_size); if (ret) return ret; /* Non-zero payload expected for PEAKVOL_VOLUME. */ if (!payload_size) return -EREMOTEIO; *vols = (struct avs_volume_cfg *)payload; *num_vols = payload_size / sizeof(**vols); return 0; } #ifdef CONFIG_DEBUG_FS int avs_ipc_set_enable_logs(struct avs_dev *adev, u8 *log_info, size_t size) { return avs_ipc_set_large_config(adev, AVS_BASEFW_MOD_ID, AVS_BASEFW_INST_ID, AVS_BASEFW_ENABLE_LOGS, log_info, size); } int avs_ipc_set_system_time(struct avs_dev *adev) { struct avs_sys_time sys_time; u64 us; /* firmware expects UTC time in micro seconds */ us = ktime_to_us(ktime_get()); sys_time.val_l = us & UINT_MAX; sys_time.val_u = us >> 32; return avs_ipc_set_large_config(adev, AVS_BASEFW_MOD_ID, AVS_BASEFW_INST_ID, AVS_BASEFW_SYSTEM_TIME, (u8 *)&sys_time, sizeof(sys_time)); } int avs_ipc_probe_get_dma(struct avs_dev *adev, struct avs_probe_dma **dmas, size_t *num_dmas) { size_t payload_size; u32 module_id; u8 *payload; int ret; module_id = avs_get_module_id(adev, &AVS_PROBE_MOD_UUID); ret = avs_ipc_get_large_config(adev, module_id, AVS_PROBE_INST_ID, AVS_PROBE_INJECTION_DMA, NULL, 0, &payload, &payload_size); if (ret) return ret; *dmas = (struct avs_probe_dma *)payload; *num_dmas = payload_size / sizeof(**dmas); return 0; } int avs_ipc_probe_attach_dma(struct avs_dev *adev, struct avs_probe_dma *dmas, size_t num_dmas) { u32 module_id = avs_get_module_id(adev, &AVS_PROBE_MOD_UUID); return avs_ipc_set_large_config(adev, module_id, AVS_PROBE_INST_ID, AVS_PROBE_INJECTION_DMA, (u8 *)dmas, array_size(sizeof(*dmas), num_dmas)); } int avs_ipc_probe_detach_dma(struct avs_dev *adev, union avs_connector_node_id *node_ids, size_t num_node_ids) { u32 module_id = avs_get_module_id(adev, &AVS_PROBE_MOD_UUID); return avs_ipc_set_large_config(adev, module_id, AVS_PROBE_INST_ID, AVS_PROBE_INJECTION_DMA_DETACH, (u8 *)node_ids, array_size(sizeof(*node_ids), num_node_ids)); } int avs_ipc_probe_get_points(struct avs_dev *adev, struct avs_probe_point_desc **descs, size_t *num_descs) { size_t payload_size; u32 module_id; u8 *payload; int ret; module_id = avs_get_module_id(adev, &AVS_PROBE_MOD_UUID); ret = avs_ipc_get_large_config(adev, module_id, AVS_PROBE_INST_ID, AVS_PROBE_POINTS, NULL, 0, &payload, &payload_size); if (ret) return ret; *descs = (struct avs_probe_point_desc *)payload; *num_descs = payload_size / sizeof(**descs); return 0; } int avs_ipc_probe_connect_points(struct avs_dev *adev, struct avs_probe_point_desc *descs, size_t num_descs) { u32 module_id = avs_get_module_id(adev, &AVS_PROBE_MOD_UUID); return avs_ipc_set_large_config(adev, module_id, AVS_PROBE_INST_ID, AVS_PROBE_POINTS, (u8 *)descs, array_size(sizeof(*descs), num_descs)); } int avs_ipc_probe_disconnect_points(struct avs_dev *adev, union avs_probe_point_id *ids, size_t num_ids) { u32 module_id = avs_get_module_id(adev, &AVS_PROBE_MOD_UUID); return avs_ipc_set_large_config(adev, module_id, AVS_PROBE_INST_ID, AVS_PROBE_POINTS_DISCONNECT, (u8 *)ids, array_size(sizeof(*ids), num_ids)); } #endif
linux-master
sound/soc/intel/avs/messages.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/devcoredump.h> #include <linux/slab.h> #include "avs.h" #include "messages.h" #include "path.h" #include "topology.h" static int __maybe_unused apl_enable_logs(struct avs_dev *adev, enum avs_log_enable enable, u32 aging_period, u32 fifo_full_period, unsigned long resource_mask, u32 *priorities) { struct apl_log_state_info *info; u32 size, num_cores = adev->hw_cfg.dsp_cores; int ret, i; if (fls_long(resource_mask) > num_cores) return -EINVAL; size = struct_size(info, logs_core, num_cores); info = kzalloc(size, GFP_KERNEL); if (!info) return -ENOMEM; info->aging_timer_period = aging_period; info->fifo_full_timer_period = fifo_full_period; info->core_mask = resource_mask; if (enable) for_each_set_bit(i, &resource_mask, num_cores) { info->logs_core[i].enable = enable; info->logs_core[i].min_priority = *priorities++; } else for_each_set_bit(i, &resource_mask, num_cores) info->logs_core[i].enable = enable; ret = avs_ipc_set_enable_logs(adev, (u8 *)info, size); kfree(info); if (ret) return AVS_IPC_RET(ret); return 0; } static int apl_log_buffer_status(struct avs_dev *adev, union avs_notify_msg *msg) { struct apl_log_buffer_layout layout; void __iomem *addr, *buf; addr = avs_log_buffer_addr(adev, msg->log.core); if (!addr) return -ENXIO; memcpy_fromio(&layout, addr, sizeof(layout)); if (!avs_logging_fw(adev)) /* consume the logs regardless of consumer presence */ goto update_read_ptr; buf = apl_log_payload_addr(addr); if (layout.read_ptr > layout.write_ptr) { avs_dump_fw_log(adev, buf + layout.read_ptr, apl_log_payload_size(adev) - layout.read_ptr); layout.read_ptr = 0; } avs_dump_fw_log_wakeup(adev, buf + layout.read_ptr, layout.write_ptr - layout.read_ptr); update_read_ptr: writel(layout.write_ptr, addr); return 0; } static int apl_wait_log_entry(struct avs_dev *adev, u32 core, struct apl_log_buffer_layout *layout) { unsigned long timeout; void __iomem *addr; addr = avs_log_buffer_addr(adev, core); if (!addr) return -ENXIO; timeout = jiffies + msecs_to_jiffies(10); do { memcpy_fromio(layout, addr, sizeof(*layout)); if (layout->read_ptr != layout->write_ptr) return 0; usleep_range(500, 1000); } while (!time_after(jiffies, timeout)); return -ETIMEDOUT; } /* reads log header and tests its type */ #define apl_is_entry_stackdump(addr) ((readl(addr) >> 30) & 0x1) static int apl_coredump(struct avs_dev *adev, union avs_notify_msg *msg) { struct apl_log_buffer_layout layout; void __iomem *addr, *buf; size_t dump_size; u16 offset = 0; u8 *dump, *pos; dump_size = AVS_FW_REGS_SIZE + msg->ext.coredump.stack_dump_size; dump = vzalloc(dump_size); if (!dump) return -ENOMEM; memcpy_fromio(dump, avs_sram_addr(adev, AVS_FW_REGS_WINDOW), AVS_FW_REGS_SIZE); if (!msg->ext.coredump.stack_dump_size) goto exit; /* Dump the registers even if an external error prevents gathering the stack. */ addr = avs_log_buffer_addr(adev, msg->ext.coredump.core_id); if (!addr) goto exit; buf = apl_log_payload_addr(addr); memcpy_fromio(&layout, addr, sizeof(layout)); if (!apl_is_entry_stackdump(buf + layout.read_ptr)) { union avs_notify_msg lbs_msg = AVS_NOTIFICATION(LOG_BUFFER_STATUS); /* * DSP awaits the remaining logs to be * gathered before dumping stack */ lbs_msg.log.core = msg->ext.coredump.core_id; avs_log_buffer_status_locked(adev, &lbs_msg); } pos = dump + AVS_FW_REGS_SIZE; /* gather the stack */ do { u32 count; if (apl_wait_log_entry(adev, msg->ext.coredump.core_id, &layout)) break; if (layout.read_ptr > layout.write_ptr) { count = apl_log_payload_size(adev) - layout.read_ptr; memcpy_fromio(pos + offset, buf + layout.read_ptr, count); layout.read_ptr = 0; offset += count; } count = layout.write_ptr - layout.read_ptr; memcpy_fromio(pos + offset, buf + layout.read_ptr, count); offset += count; /* update read pointer */ writel(layout.write_ptr, addr); } while (offset < msg->ext.coredump.stack_dump_size); exit: dev_coredumpv(adev->dev, dump, dump_size, GFP_KERNEL); return 0; } static bool apl_lp_streaming(struct avs_dev *adev) { struct avs_path *path; spin_lock(&adev->path_list_lock); /* Any gateway without buffer allocated in LP area disqualifies D0IX. */ list_for_each_entry(path, &adev->path_list, node) { struct avs_path_pipeline *ppl; list_for_each_entry(ppl, &path->ppl_list, node) { struct avs_path_module *mod; list_for_each_entry(mod, &ppl->mod_list, node) { struct avs_tplg_modcfg_ext *cfg; cfg = mod->template->cfg_ext; /* only copiers have gateway attributes */ if (!guid_equal(&cfg->type, &AVS_COPIER_MOD_UUID)) continue; /* non-gateway copiers do not prevent PG */ if (cfg->copier.dma_type == INVALID_OBJECT_ID) continue; if (!mod->gtw_attrs.lp_buffer_alloc) { spin_unlock(&adev->path_list_lock); return false; } } } } spin_unlock(&adev->path_list_lock); return true; } static bool apl_d0ix_toggle(struct avs_dev *adev, struct avs_ipc_msg *tx, bool wake) { /* wake in all cases */ if (wake) return true; /* * If no pipelines are running, allow for d0ix schedule. * If all gateways have lp=1, allow for d0ix schedule. * If any gateway with lp=0 is allocated, abort scheduling d0ix. * * Note: for cAVS 1.5+ and 1.8, D0IX is LP-firmware transition, * not the power-gating mechanism known from cAVS 2.0. */ return apl_lp_streaming(adev); } static int apl_set_d0ix(struct avs_dev *adev, bool enable) { bool streaming = false; int ret; if (enable) /* Either idle or all gateways with lp=1. */ streaming = !list_empty(&adev->path_list); ret = avs_ipc_set_d0ix(adev, enable, streaming); return AVS_IPC_RET(ret); } const struct avs_dsp_ops apl_dsp_ops = { .power = avs_dsp_core_power, .reset = avs_dsp_core_reset, .stall = avs_dsp_core_stall, .irq_handler = avs_dsp_irq_handler, .irq_thread = avs_dsp_irq_thread, .int_control = avs_dsp_interrupt_control, .load_basefw = avs_hda_load_basefw, .load_lib = avs_hda_load_library, .transfer_mods = avs_hda_transfer_modules, .log_buffer_offset = skl_log_buffer_offset, .log_buffer_status = apl_log_buffer_status, .coredump = apl_coredump, .d0ix_toggle = apl_d0ix_toggle, .set_d0ix = apl_set_d0ix, AVS_SET_ENABLE_LOGS_OP(apl) };
linux-master
sound/soc/intel/avs/apl.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/slab.h> #include <sound/hdaudio_ext.h> #include "avs.h" #include "messages.h" #include "registers.h" #include "trace.h" #define AVS_IPC_TIMEOUT_MS 300 #define AVS_D0IX_DELAY_MS 300 static int avs_dsp_set_d0ix(struct avs_dev *adev, bool enable) { struct avs_ipc *ipc = adev->ipc; int ret; /* Is transition required? */ if (ipc->in_d0ix == enable) return 0; ret = avs_dsp_op(adev, set_d0ix, enable); if (ret) { /* Prevent further d0ix attempts on conscious IPC failure. */ if (ret == -AVS_EIPC) atomic_inc(&ipc->d0ix_disable_depth); ipc->in_d0ix = false; return ret; } ipc->in_d0ix = enable; return 0; } static void avs_dsp_schedule_d0ix(struct avs_dev *adev, struct avs_ipc_msg *tx) { if (atomic_read(&adev->ipc->d0ix_disable_depth)) return; mod_delayed_work(system_power_efficient_wq, &adev->ipc->d0ix_work, msecs_to_jiffies(AVS_D0IX_DELAY_MS)); } static void avs_dsp_d0ix_work(struct work_struct *work) { struct avs_ipc *ipc = container_of(work, struct avs_ipc, d0ix_work.work); avs_dsp_set_d0ix(to_avs_dev(ipc->dev), true); } static int avs_dsp_wake_d0i0(struct avs_dev *adev, struct avs_ipc_msg *tx) { struct avs_ipc *ipc = adev->ipc; if (!atomic_read(&ipc->d0ix_disable_depth)) { cancel_delayed_work_sync(&ipc->d0ix_work); return avs_dsp_set_d0ix(adev, false); } return 0; } int avs_dsp_disable_d0ix(struct avs_dev *adev) { struct avs_ipc *ipc = adev->ipc; /* Prevent PG only on the first disable. */ if (atomic_inc_return(&ipc->d0ix_disable_depth) == 1) { cancel_delayed_work_sync(&ipc->d0ix_work); return avs_dsp_set_d0ix(adev, false); } return 0; } int avs_dsp_enable_d0ix(struct avs_dev *adev) { struct avs_ipc *ipc = adev->ipc; if (atomic_dec_and_test(&ipc->d0ix_disable_depth)) queue_delayed_work(system_power_efficient_wq, &ipc->d0ix_work, msecs_to_jiffies(AVS_D0IX_DELAY_MS)); return 0; } static void avs_dsp_recovery(struct avs_dev *adev) { struct avs_soc_component *acomp; unsigned int core_mask; int ret; mutex_lock(&adev->comp_list_mutex); /* disconnect all running streams */ list_for_each_entry(acomp, &adev->comp_list, node) { struct snd_soc_pcm_runtime *rtd; struct snd_soc_card *card; card = acomp->base.card; if (!card) continue; for_each_card_rtds(card, rtd) { struct snd_pcm *pcm; int dir; pcm = rtd->pcm; if (!pcm || rtd->dai_link->no_pcm) continue; for_each_pcm_streams(dir) { struct snd_pcm_substream *substream; substream = pcm->streams[dir].substream; if (!substream || !substream->runtime) continue; /* No need for _irq() as we are in nonatomic context. */ snd_pcm_stream_lock(substream); snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED); snd_pcm_stream_unlock(substream); } } } mutex_unlock(&adev->comp_list_mutex); /* forcibly shutdown all cores */ core_mask = GENMASK(adev->hw_cfg.dsp_cores - 1, 0); avs_dsp_core_disable(adev, core_mask); /* attempt dsp reboot */ ret = avs_dsp_boot_firmware(adev, true); if (ret < 0) dev_err(adev->dev, "dsp reboot failed: %d\n", ret); pm_runtime_mark_last_busy(adev->dev); pm_runtime_enable(adev->dev); pm_request_autosuspend(adev->dev); atomic_set(&adev->ipc->recovering, 0); } static void avs_dsp_recovery_work(struct work_struct *work) { struct avs_ipc *ipc = container_of(work, struct avs_ipc, recovery_work); avs_dsp_recovery(to_avs_dev(ipc->dev)); } static void avs_dsp_exception_caught(struct avs_dev *adev, union avs_notify_msg *msg) { struct avs_ipc *ipc = adev->ipc; /* Account for the double-exception case. */ ipc->ready = false; if (!atomic_add_unless(&ipc->recovering, 1, 1)) { dev_err(adev->dev, "dsp recovery is already in progress\n"); return; } dev_crit(adev->dev, "communication severed, rebooting dsp..\n"); cancel_delayed_work_sync(&ipc->d0ix_work); ipc->in_d0ix = false; /* Re-enabled on recovery completion. */ pm_runtime_disable(adev->dev); /* Process received notification. */ avs_dsp_op(adev, coredump, msg); schedule_work(&ipc->recovery_work); } static void avs_dsp_receive_rx(struct avs_dev *adev, u64 header) { struct avs_ipc *ipc = adev->ipc; union avs_reply_msg msg = AVS_MSG(header); u64 reg; reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW)); trace_avs_ipc_reply_msg(header, reg); ipc->rx.header = header; /* Abort copying payload if request processing was unsuccessful. */ if (!msg.status) { /* update size in case of LARGE_CONFIG_GET */ if (msg.msg_target == AVS_MOD_MSG && msg.global_msg_type == AVS_MOD_LARGE_CONFIG_GET) ipc->rx.size = min_t(u32, AVS_MAILBOX_SIZE, msg.ext.large_config.data_off_size); memcpy_fromio(ipc->rx.data, avs_uplink_addr(adev), ipc->rx.size); trace_avs_msg_payload(ipc->rx.data, ipc->rx.size); } } static void avs_dsp_process_notification(struct avs_dev *adev, u64 header) { struct avs_notify_mod_data mod_data; union avs_notify_msg msg = AVS_MSG(header); size_t data_size = 0; void *data = NULL; u64 reg; reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW)); trace_avs_ipc_notify_msg(header, reg); /* Ignore spurious notifications until handshake is established. */ if (!adev->ipc->ready && msg.notify_msg_type != AVS_NOTIFY_FW_READY) { dev_dbg(adev->dev, "FW not ready, skip notification: 0x%08x\n", msg.primary); return; } /* Calculate notification payload size. */ switch (msg.notify_msg_type) { case AVS_NOTIFY_FW_READY: break; case AVS_NOTIFY_PHRASE_DETECTED: data_size = sizeof(struct avs_notify_voice_data); break; case AVS_NOTIFY_RESOURCE_EVENT: data_size = sizeof(struct avs_notify_res_data); break; case AVS_NOTIFY_LOG_BUFFER_STATUS: case AVS_NOTIFY_EXCEPTION_CAUGHT: break; case AVS_NOTIFY_MODULE_EVENT: /* To know the total payload size, header needs to be read first. */ memcpy_fromio(&mod_data, avs_uplink_addr(adev), sizeof(mod_data)); data_size = sizeof(mod_data) + mod_data.data_size; break; default: dev_info(adev->dev, "unknown notification: 0x%08x\n", msg.primary); break; } if (data_size) { data = kmalloc(data_size, GFP_KERNEL); if (!data) return; memcpy_fromio(data, avs_uplink_addr(adev), data_size); trace_avs_msg_payload(data, data_size); } /* Perform notification-specific operations. */ switch (msg.notify_msg_type) { case AVS_NOTIFY_FW_READY: dev_dbg(adev->dev, "FW READY 0x%08x\n", msg.primary); adev->ipc->ready = true; complete(&adev->fw_ready); break; case AVS_NOTIFY_LOG_BUFFER_STATUS: avs_log_buffer_status_locked(adev, &msg); break; case AVS_NOTIFY_EXCEPTION_CAUGHT: avs_dsp_exception_caught(adev, &msg); break; default: break; } kfree(data); } void avs_dsp_process_response(struct avs_dev *adev, u64 header) { struct avs_ipc *ipc = adev->ipc; /* * Response may either be solicited - a reply for a request that has * been sent beforehand - or unsolicited (notification). */ if (avs_msg_is_reply(header)) { /* Response processing is invoked from IRQ thread. */ spin_lock_irq(&ipc->rx_lock); avs_dsp_receive_rx(adev, header); ipc->rx_completed = true; spin_unlock_irq(&ipc->rx_lock); } else { avs_dsp_process_notification(adev, header); } complete(&ipc->busy_completion); } irqreturn_t avs_dsp_irq_handler(int irq, void *dev_id) { struct avs_dev *adev = dev_id; struct avs_ipc *ipc = adev->ipc; u32 adspis, hipc_rsp, hipc_ack; irqreturn_t ret = IRQ_NONE; adspis = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPIS); if (adspis == UINT_MAX || !(adspis & AVS_ADSP_ADSPIS_IPC)) return ret; hipc_ack = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCIE); hipc_rsp = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT); /* DSP acked host's request */ if (hipc_ack & SKL_ADSP_HIPCIE_DONE) { /* * As an extra precaution, mask done interrupt. Code executed * due to complete() found below does not assume any masking. */ snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, AVS_ADSP_HIPCCTL_DONE, 0); complete(&ipc->done_completion); /* tell DSP it has our attention */ snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCIE, SKL_ADSP_HIPCIE_DONE, SKL_ADSP_HIPCIE_DONE); /* unmask done interrupt */ snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, AVS_ADSP_HIPCCTL_DONE, AVS_ADSP_HIPCCTL_DONE); ret = IRQ_HANDLED; } /* DSP sent new response to process */ if (hipc_rsp & SKL_ADSP_HIPCT_BUSY) { /* mask busy interrupt */ snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, AVS_ADSP_HIPCCTL_BUSY, 0); ret = IRQ_WAKE_THREAD; } return ret; } irqreturn_t avs_dsp_irq_thread(int irq, void *dev_id) { struct avs_dev *adev = dev_id; union avs_reply_msg msg; u32 hipct, hipcte; hipct = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT); hipcte = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCTE); /* ensure DSP sent new response to process */ if (!(hipct & SKL_ADSP_HIPCT_BUSY)) return IRQ_NONE; msg.primary = hipct; msg.ext.val = hipcte; avs_dsp_process_response(adev, msg.val); /* tell DSP we accepted its message */ snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCT, SKL_ADSP_HIPCT_BUSY, SKL_ADSP_HIPCT_BUSY); /* unmask busy interrupt */ snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, AVS_ADSP_HIPCCTL_BUSY, AVS_ADSP_HIPCCTL_BUSY); return IRQ_HANDLED; } static bool avs_ipc_is_busy(struct avs_ipc *ipc) { struct avs_dev *adev = to_avs_dev(ipc->dev); u32 hipc_rsp; hipc_rsp = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT); return hipc_rsp & SKL_ADSP_HIPCT_BUSY; } static int avs_ipc_wait_busy_completion(struct avs_ipc *ipc, int timeout) { u32 repeats_left = 128; /* to avoid infinite looping */ int ret; again: ret = wait_for_completion_timeout(&ipc->busy_completion, msecs_to_jiffies(timeout)); /* DSP could be unresponsive at this point. */ if (!ipc->ready) return -EPERM; if (!ret) { if (!avs_ipc_is_busy(ipc)) return -ETIMEDOUT; /* * Firmware did its job, either notification or reply * has been received - now wait until it's processed. */ wait_for_completion_killable(&ipc->busy_completion); } /* Ongoing notification's bottom-half may cause early wakeup */ spin_lock(&ipc->rx_lock); if (!ipc->rx_completed) { if (repeats_left) { /* Reply delayed due to notification. */ repeats_left--; reinit_completion(&ipc->busy_completion); spin_unlock(&ipc->rx_lock); goto again; } spin_unlock(&ipc->rx_lock); return -ETIMEDOUT; } spin_unlock(&ipc->rx_lock); return 0; } static void avs_ipc_msg_init(struct avs_ipc *ipc, struct avs_ipc_msg *reply) { lockdep_assert_held(&ipc->rx_lock); ipc->rx.header = 0; ipc->rx.size = reply ? reply->size : 0; ipc->rx_completed = false; reinit_completion(&ipc->done_completion); reinit_completion(&ipc->busy_completion); } static void avs_dsp_send_tx(struct avs_dev *adev, struct avs_ipc_msg *tx, bool read_fwregs) { u64 reg = ULONG_MAX; tx->header |= SKL_ADSP_HIPCI_BUSY; if (read_fwregs) reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW)); trace_avs_request(tx, reg); if (tx->size) memcpy_toio(avs_downlink_addr(adev), tx->data, tx->size); snd_hdac_adsp_writel(adev, SKL_ADSP_REG_HIPCIE, tx->header >> 32); snd_hdac_adsp_writel(adev, SKL_ADSP_REG_HIPCI, tx->header & UINT_MAX); } static int avs_dsp_do_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request, struct avs_ipc_msg *reply, int timeout) { struct avs_ipc *ipc = adev->ipc; int ret; if (!ipc->ready) return -EPERM; mutex_lock(&ipc->msg_mutex); spin_lock(&ipc->rx_lock); avs_ipc_msg_init(ipc, reply); avs_dsp_send_tx(adev, request, true); spin_unlock(&ipc->rx_lock); ret = avs_ipc_wait_busy_completion(ipc, timeout); if (ret) { if (ret == -ETIMEDOUT) { union avs_notify_msg msg = AVS_NOTIFICATION(EXCEPTION_CAUGHT); /* Same treatment as on exception, just stack_dump=0. */ avs_dsp_exception_caught(adev, &msg); } goto exit; } ret = ipc->rx.rsp.status; if (reply) { reply->header = ipc->rx.header; reply->size = ipc->rx.size; if (reply->data && ipc->rx.size) memcpy(reply->data, ipc->rx.data, reply->size); } exit: mutex_unlock(&ipc->msg_mutex); return ret; } static int avs_dsp_send_msg_sequence(struct avs_dev *adev, struct avs_ipc_msg *request, struct avs_ipc_msg *reply, int timeout, bool wake_d0i0, bool schedule_d0ix) { int ret; trace_avs_d0ix("wake", wake_d0i0, request->header); if (wake_d0i0) { ret = avs_dsp_wake_d0i0(adev, request); if (ret) return ret; } ret = avs_dsp_do_send_msg(adev, request, reply, timeout); if (ret) return ret; trace_avs_d0ix("schedule", schedule_d0ix, request->header); if (schedule_d0ix) avs_dsp_schedule_d0ix(adev, request); return 0; } int avs_dsp_send_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, struct avs_ipc_msg *reply, int timeout) { bool wake_d0i0 = avs_dsp_op(adev, d0ix_toggle, request, true); bool schedule_d0ix = avs_dsp_op(adev, d0ix_toggle, request, false); return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, schedule_d0ix); } int avs_dsp_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request, struct avs_ipc_msg *reply) { return avs_dsp_send_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms); } int avs_dsp_send_pm_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, struct avs_ipc_msg *reply, int timeout, bool wake_d0i0) { return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, false); } int avs_dsp_send_pm_msg(struct avs_dev *adev, struct avs_ipc_msg *request, struct avs_ipc_msg *reply, bool wake_d0i0) { return avs_dsp_send_pm_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms, wake_d0i0); } static int avs_dsp_do_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout) { struct avs_ipc *ipc = adev->ipc; int ret; mutex_lock(&ipc->msg_mutex); spin_lock(&ipc->rx_lock); avs_ipc_msg_init(ipc, NULL); /* * with hw still stalled, memory windows may not be * configured properly so avoid accessing SRAM */ avs_dsp_send_tx(adev, request, false); spin_unlock(&ipc->rx_lock); /* ROM messages must be sent before main core is unstalled */ ret = avs_dsp_op(adev, stall, AVS_MAIN_CORE_MASK, false); if (!ret) { ret = wait_for_completion_timeout(&ipc->done_completion, msecs_to_jiffies(timeout)); ret = ret ? 0 : -ETIMEDOUT; } mutex_unlock(&ipc->msg_mutex); return ret; } int avs_dsp_send_rom_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout) { return avs_dsp_do_send_rom_msg(adev, request, timeout); } int avs_dsp_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request) { return avs_dsp_send_rom_msg_timeout(adev, request, adev->ipc->default_timeout_ms); } void avs_dsp_interrupt_control(struct avs_dev *adev, bool enable) { u32 value, mask; /* * No particular bit setting order. All of these are required * to have a functional SW <-> FW communication. */ value = enable ? AVS_ADSP_ADSPIC_IPC : 0; snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_IPC, value); mask = AVS_ADSP_HIPCCTL_DONE | AVS_ADSP_HIPCCTL_BUSY; value = enable ? mask : 0; snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, mask, value); } int avs_ipc_init(struct avs_ipc *ipc, struct device *dev) { ipc->rx.data = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL); if (!ipc->rx.data) return -ENOMEM; ipc->dev = dev; ipc->ready = false; ipc->default_timeout_ms = AVS_IPC_TIMEOUT_MS; INIT_WORK(&ipc->recovery_work, avs_dsp_recovery_work); INIT_DELAYED_WORK(&ipc->d0ix_work, avs_dsp_d0ix_work); init_completion(&ipc->done_completion); init_completion(&ipc->busy_completion); spin_lock_init(&ipc->rx_lock); mutex_init(&ipc->msg_mutex); return 0; } void avs_ipc_block(struct avs_ipc *ipc) { ipc->ready = false; cancel_work_sync(&ipc->recovery_work); cancel_delayed_work_sync(&ipc->d0ix_work); ipc->in_d0ix = false; }
linux-master
sound/soc/intel/avs/ipc.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Author: Cezary Rojewski <[email protected]> // #include <linux/pci.h> #include <sound/hda_register.h> #include <sound/hdaudio_ext.h> #include "cldma.h" #include "registers.h" /* Stream Registers */ #define AZX_CL_SD_BASE 0x80 #define AZX_SD_CTL_STRM_MASK GENMASK(23, 20) #define AZX_SD_CTL_STRM(s) (((s)->stream_tag << 20) & AZX_SD_CTL_STRM_MASK) #define AZX_SD_BDLPL_BDLPLBA_MASK GENMASK(31, 7) #define AZX_SD_BDLPL_BDLPLBA(lb) ((lb) & AZX_SD_BDLPL_BDLPLBA_MASK) /* Software Position Based FIFO Capability Registers */ #define AZX_CL_SPBFCS 0x20 #define AZX_REG_CL_SPBFCTL (AZX_CL_SPBFCS + 0x4) #define AZX_REG_CL_SD_SPIB (AZX_CL_SPBFCS + 0x8) #define AVS_CL_OP_INTERVAL_US 3 #define AVS_CL_OP_TIMEOUT_US 300 #define AVS_CL_IOC_TIMEOUT_MS 300 #define AVS_CL_STREAM_INDEX 0 struct hda_cldma { struct device *dev; struct hdac_bus *bus; void __iomem *dsp_ba; unsigned int buffer_size; unsigned int num_periods; unsigned int stream_tag; void __iomem *sd_addr; struct snd_dma_buffer dmab_data; struct snd_dma_buffer dmab_bdl; struct delayed_work memcpy_work; struct completion completion; /* runtime */ void *position; unsigned int remaining; unsigned int sd_status; }; static void cldma_memcpy_work(struct work_struct *work); struct hda_cldma code_loader = { .stream_tag = AVS_CL_STREAM_INDEX + 1, .memcpy_work = __DELAYED_WORK_INITIALIZER(code_loader.memcpy_work, cldma_memcpy_work, 0), .completion = COMPLETION_INITIALIZER(code_loader.completion), }; void hda_cldma_fill(struct hda_cldma *cl) { unsigned int size, offset; if (cl->remaining > cl->buffer_size) size = cl->buffer_size; else size = cl->remaining; offset = snd_hdac_stream_readl(cl, CL_SD_SPIB); if (offset + size > cl->buffer_size) { unsigned int ss; ss = cl->buffer_size - offset; memcpy(cl->dmab_data.area + offset, cl->position, ss); offset = 0; size -= ss; cl->position += ss; cl->remaining -= ss; } memcpy(cl->dmab_data.area + offset, cl->position, size); cl->position += size; cl->remaining -= size; snd_hdac_stream_writel(cl, CL_SD_SPIB, offset + size); } static void cldma_memcpy_work(struct work_struct *work) { struct hda_cldma *cl = container_of(work, struct hda_cldma, memcpy_work.work); int ret; ret = hda_cldma_start(cl); if (ret < 0) { dev_err(cl->dev, "cldma set RUN failed: %d\n", ret); return; } while (true) { ret = wait_for_completion_timeout(&cl->completion, msecs_to_jiffies(AVS_CL_IOC_TIMEOUT_MS)); if (!ret) { dev_err(cl->dev, "cldma IOC timeout\n"); break; } if (!(cl->sd_status & SD_INT_COMPLETE)) { dev_err(cl->dev, "cldma transfer error, SD status: 0x%08x\n", cl->sd_status); break; } if (!cl->remaining) break; reinit_completion(&cl->completion); hda_cldma_fill(cl); /* enable CLDMA interrupt */ snd_hdac_adsp_updatel(cl, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_CLDMA, AVS_ADSP_ADSPIC_CLDMA); } } void hda_cldma_transfer(struct hda_cldma *cl, unsigned long start_delay) { if (!cl->remaining) return; reinit_completion(&cl->completion); /* fill buffer with the first chunk before scheduling run */ hda_cldma_fill(cl); schedule_delayed_work(&cl->memcpy_work, start_delay); } int hda_cldma_start(struct hda_cldma *cl) { unsigned int reg; /* enable interrupts */ snd_hdac_adsp_updatel(cl, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_CLDMA, AVS_ADSP_ADSPIC_CLDMA); snd_hdac_stream_updateb(cl, SD_CTL, SD_INT_MASK | SD_CTL_DMA_START, SD_INT_MASK | SD_CTL_DMA_START); /* await DMA engine start */ return snd_hdac_stream_readb_poll(cl, SD_CTL, reg, reg & SD_CTL_DMA_START, AVS_CL_OP_INTERVAL_US, AVS_CL_OP_TIMEOUT_US); } int hda_cldma_stop(struct hda_cldma *cl) { unsigned int reg; int ret; /* disable interrupts */ snd_hdac_adsp_updatel(cl, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_CLDMA, 0); snd_hdac_stream_updateb(cl, SD_CTL, SD_INT_MASK | SD_CTL_DMA_START, 0); /* await DMA engine stop */ ret = snd_hdac_stream_readb_poll(cl, SD_CTL, reg, !(reg & SD_CTL_DMA_START), AVS_CL_OP_INTERVAL_US, AVS_CL_OP_TIMEOUT_US); cancel_delayed_work_sync(&cl->memcpy_work); return ret; } int hda_cldma_reset(struct hda_cldma *cl) { unsigned int reg; int ret; ret = hda_cldma_stop(cl); if (ret < 0) { dev_err(cl->dev, "cldma stop failed: %d\n", ret); return ret; } snd_hdac_stream_updateb(cl, SD_CTL, SD_CTL_STREAM_RESET, SD_CTL_STREAM_RESET); ret = snd_hdac_stream_readb_poll(cl, SD_CTL, reg, (reg & SD_CTL_STREAM_RESET), AVS_CL_OP_INTERVAL_US, AVS_CL_OP_TIMEOUT_US); if (ret < 0) { dev_err(cl->dev, "cldma set SRST failed: %d\n", ret); return ret; } snd_hdac_stream_updateb(cl, SD_CTL, SD_CTL_STREAM_RESET, 0); ret = snd_hdac_stream_readb_poll(cl, SD_CTL, reg, !(reg & SD_CTL_STREAM_RESET), AVS_CL_OP_INTERVAL_US, AVS_CL_OP_TIMEOUT_US); if (ret < 0) { dev_err(cl->dev, "cldma unset SRST failed: %d\n", ret); return ret; } return 0; } void hda_cldma_set_data(struct hda_cldma *cl, void *data, unsigned int size) { /* setup runtime */ cl->position = data; cl->remaining = size; } static void cldma_setup_bdle(struct hda_cldma *cl, u32 bdle_size) { struct snd_dma_buffer *dmab = &cl->dmab_data; __le32 *bdl = (__le32 *)cl->dmab_bdl.area; int remaining = cl->buffer_size; int offset = 0; cl->num_periods = 0; while (remaining > 0) { phys_addr_t addr; int chunk; addr = snd_sgbuf_get_addr(dmab, offset); bdl[0] = cpu_to_le32(lower_32_bits(addr)); bdl[1] = cpu_to_le32(upper_32_bits(addr)); chunk = snd_sgbuf_get_chunk_size(dmab, offset, bdle_size); bdl[2] = cpu_to_le32(chunk); remaining -= chunk; /* set IOC only for the last entry */ bdl[3] = (remaining > 0) ? 0 : cpu_to_le32(0x01); bdl += 4; offset += chunk; cl->num_periods++; } } void hda_cldma_setup(struct hda_cldma *cl) { dma_addr_t bdl_addr = cl->dmab_bdl.addr; cldma_setup_bdle(cl, cl->buffer_size / 2); snd_hdac_stream_writel(cl, SD_BDLPL, AZX_SD_BDLPL_BDLPLBA(lower_32_bits(bdl_addr))); snd_hdac_stream_writel(cl, SD_BDLPU, upper_32_bits(bdl_addr)); snd_hdac_stream_writel(cl, SD_CBL, cl->buffer_size); snd_hdac_stream_writeb(cl, SD_LVI, cl->num_periods - 1); snd_hdac_stream_updatel(cl, SD_CTL, AZX_SD_CTL_STRM_MASK, AZX_SD_CTL_STRM(cl)); /* enable spib */ snd_hdac_stream_writel(cl, CL_SPBFCTL, 1); } static irqreturn_t cldma_irq_handler(int irq, void *dev_id) { struct hda_cldma *cl = dev_id; u32 adspis; adspis = snd_hdac_adsp_readl(cl, AVS_ADSP_REG_ADSPIS); if (adspis == UINT_MAX) return IRQ_NONE; if (!(adspis & AVS_ADSP_ADSPIS_CLDMA)) return IRQ_NONE; cl->sd_status = snd_hdac_stream_readb(cl, SD_STS); dev_warn(cl->dev, "%s sd_status: 0x%08x\n", __func__, cl->sd_status); /* disable CLDMA interrupt */ snd_hdac_adsp_updatel(cl, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_CLDMA, 0); complete(&cl->completion); return IRQ_HANDLED; } int hda_cldma_init(struct hda_cldma *cl, struct hdac_bus *bus, void __iomem *dsp_ba, unsigned int buffer_size) { struct pci_dev *pci = to_pci_dev(bus->dev); int ret; ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, bus->dev, buffer_size, &cl->dmab_data); if (ret < 0) return ret; ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, bus->dev, BDL_SIZE, &cl->dmab_bdl); if (ret < 0) goto alloc_err; cl->dev = bus->dev; cl->bus = bus; cl->dsp_ba = dsp_ba; cl->buffer_size = buffer_size; cl->sd_addr = dsp_ba + AZX_CL_SD_BASE; ret = pci_request_irq(pci, 0, cldma_irq_handler, NULL, cl, "CLDMA"); if (ret < 0) { dev_err(cl->dev, "Failed to request CLDMA IRQ handler: %d\n", ret); goto req_err; } return 0; req_err: snd_dma_free_pages(&cl->dmab_bdl); alloc_err: snd_dma_free_pages(&cl->dmab_data); return ret; } void hda_cldma_free(struct hda_cldma *cl) { struct pci_dev *pci = to_pci_dev(cl->dev); pci_free_irq(pci, 0, cl); snd_dma_free_pages(&cl->dmab_data); snd_dma_free_pages(&cl->dmab_bdl); }
linux-master
sound/soc/intel/avs/cldma.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/module.h> #include <linux/platform_device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-acpi.h> #include "../../../codecs/nau8825.h" #define SKL_SSM_CODEC_DAI "ssm4567-hifi" static struct snd_soc_codec_conf card_codec_conf[] = { { .dlc = COMP_CODEC_CONF("i2c-INT343B:00"), .name_prefix = "Left", }, { .dlc = COMP_CODEC_CONF("i2c-INT343B:01"), .name_prefix = "Right", }, }; static const struct snd_kcontrol_new card_controls[] = { SOC_DAPM_PIN_SWITCH("Left Speaker"), SOC_DAPM_PIN_SWITCH("Right Speaker"), }; static const struct snd_soc_dapm_widget card_widgets[] = { SND_SOC_DAPM_SPK("Left Speaker", NULL), SND_SOC_DAPM_SPK("Right Speaker", NULL), SND_SOC_DAPM_SPK("DP1", NULL), SND_SOC_DAPM_SPK("DP2", NULL), }; static const struct snd_soc_dapm_route card_base_routes[] = { {"Left Speaker", NULL, "Left OUT"}, {"Right Speaker", NULL, "Right OUT"}, }; static int avs_ssm4567_codec_init(struct snd_soc_pcm_runtime *runtime) { int ret; /* Slot 1 for left */ ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_codec(runtime, 0), 0x01, 0x01, 2, 48); if (ret < 0) return ret; /* Slot 2 for right */ ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_codec(runtime, 1), 0x02, 0x02, 2, 48); if (ret < 0) return ret; return 0; } static int avs_ssm4567_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params) { struct snd_interval *rate, *channels; struct snd_mask *fmt; rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); /* The ADSP will convert the FE rate to 48k, stereo */ rate->min = rate->max = 48000; channels->min = channels->max = 2; /* set SSP0 to 24 bit */ snd_mask_none(fmt); snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE); return 0; } static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, struct snd_soc_dai_link **dai_link) { struct snd_soc_dai_link_component *platform; struct snd_soc_dai_link *dl; dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); if (!dl || !platform) return -ENOMEM; platform->name = platform_name; dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs) * 2, GFP_KERNEL); if (!dl->name || !dl->cpus || !dl->codecs) return -ENOMEM; dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); dl->codecs[0].name = devm_kasprintf(dev, GFP_KERNEL, "i2c-INT343B:00"); dl->codecs[0].dai_name = devm_kasprintf(dev, GFP_KERNEL, "ssm4567-hifi"); dl->codecs[1].name = devm_kasprintf(dev, GFP_KERNEL, "i2c-INT343B:01"); dl->codecs[1].dai_name = devm_kasprintf(dev, GFP_KERNEL, "ssm4567-hifi"); if (!dl->cpus->dai_name || !dl->codecs[0].name || !dl->codecs[0].dai_name || !dl->codecs[1].name || !dl->codecs[1].dai_name) return -ENOMEM; dl->num_cpus = 1; dl->num_codecs = 2; dl->platforms = platform; dl->num_platforms = 1; dl->id = 0; dl->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_NF | SND_SOC_DAIFMT_CBS_CFS; dl->init = avs_ssm4567_codec_init; dl->be_hw_params_fixup = avs_ssm4567_be_fixup; dl->nonatomic = 1; dl->no_pcm = 1; dl->dpcm_capture = 1; dl->dpcm_playback = 1; dl->ignore_pmdown_time = 1; *dai_link = dl; return 0; } static int avs_ssm4567_probe(struct platform_device *pdev) { struct snd_soc_dai_link *dai_link; struct snd_soc_acpi_mach *mach; struct snd_soc_card *card; struct device *dev = &pdev->dev; const char *pname; int ssp_port, ret; mach = dev_get_platdata(dev); pname = mach->mach_params.platform; ssp_port = __ffs(mach->mach_params.i2s_link_mask); ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); if (ret) { dev_err(dev, "Failed to create dai link: %d", ret); return ret; } card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; card->name = "avs_ssm4567-adi"; card->dev = dev; card->owner = THIS_MODULE; card->dai_link = dai_link; card->num_links = 1; card->codec_conf = card_codec_conf; card->num_configs = ARRAY_SIZE(card_codec_conf); card->controls = card_controls; card->num_controls = ARRAY_SIZE(card_controls); card->dapm_widgets = card_widgets; card->num_dapm_widgets = ARRAY_SIZE(card_widgets); card->dapm_routes = card_base_routes; card->num_dapm_routes = ARRAY_SIZE(card_base_routes); card->fully_routed = true; card->disable_route_checks = true; ret = snd_soc_fixup_dai_links_platform_name(card, pname); if (ret) return ret; return devm_snd_soc_register_card(dev, card); } static struct platform_driver avs_ssm4567_driver = { .probe = avs_ssm4567_probe, .driver = { .name = "avs_ssm4567", .pm = &snd_soc_pm_ops, }, }; module_platform_driver(avs_ssm4567_driver) MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:avs_ssm4567");
linux-master
sound/soc/intel/avs/boards/ssm4567.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/module.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-acpi.h> #include <sound/soc-dapm.h> static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, struct snd_soc_dai_link **dai_link) { struct snd_soc_dai_link_component *platform; struct snd_soc_dai_link *dl; dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); if (!dl || !platform) return -ENOMEM; platform->name = platform_name; dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); if (!dl->name || !dl->cpus) return -ENOMEM; dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); dl->codecs = &asoc_dummy_dlc; if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name) return -ENOMEM; dl->num_cpus = 1; dl->num_codecs = 1; dl->platforms = platform; dl->num_platforms = 1; dl->id = 0; dl->nonatomic = 1; dl->no_pcm = 1; dl->dpcm_capture = 1; dl->dpcm_playback = 1; *dai_link = dl; return 0; } static int avs_create_dapm_routes(struct device *dev, int ssp_port, struct snd_soc_dapm_route **routes, int *num_routes) { struct snd_soc_dapm_route *dr; const int num_dr = 2; dr = devm_kcalloc(dev, num_dr, sizeof(*dr), GFP_KERNEL); if (!dr) return -ENOMEM; dr[0].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%dpb", ssp_port); dr[0].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Tx", ssp_port); if (!dr[0].sink || !dr[0].source) return -ENOMEM; dr[1].sink = devm_kasprintf(dev, GFP_KERNEL, "ssp%d Rx", ssp_port); dr[1].source = devm_kasprintf(dev, GFP_KERNEL, "ssp%dcp", ssp_port); if (!dr[1].sink || !dr[1].source) return -ENOMEM; *routes = dr; *num_routes = num_dr; return 0; } static int avs_create_dapm_widgets(struct device *dev, int ssp_port, struct snd_soc_dapm_widget **widgets, int *num_widgets) { struct snd_soc_dapm_widget *dw; const int num_dw = 2; dw = devm_kcalloc(dev, num_dw, sizeof(*dw), GFP_KERNEL); if (!dw) return -ENOMEM; dw[0].id = snd_soc_dapm_hp; dw[0].reg = SND_SOC_NOPM; dw[0].name = devm_kasprintf(dev, GFP_KERNEL, "ssp%dpb", ssp_port); if (!dw[0].name) return -ENOMEM; dw[1].id = snd_soc_dapm_mic; dw[1].reg = SND_SOC_NOPM; dw[1].name = devm_kasprintf(dev, GFP_KERNEL, "ssp%dcp", ssp_port); if (!dw[1].name) return -ENOMEM; *widgets = dw; *num_widgets = num_dw; return 0; } static int avs_i2s_test_probe(struct platform_device *pdev) { struct snd_soc_dapm_widget *widgets; struct snd_soc_dapm_route *routes; struct snd_soc_dai_link *dai_link; struct snd_soc_acpi_mach *mach; struct snd_soc_card *card; struct device *dev = &pdev->dev; const char *pname; int num_routes, num_widgets; int ssp_port, ret; mach = dev_get_platdata(dev); pname = mach->mach_params.platform; ssp_port = __ffs(mach->mach_params.i2s_link_mask); card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; card->name = devm_kasprintf(dev, GFP_KERNEL, "ssp%d-loopback", ssp_port); if (!card->name) return -ENOMEM; ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); if (ret) { dev_err(dev, "Failed to create dai link: %d\n", ret); return ret; } ret = avs_create_dapm_routes(dev, ssp_port, &routes, &num_routes); if (ret) { dev_err(dev, "Failed to create dapm routes: %d\n", ret); return ret; } ret = avs_create_dapm_widgets(dev, ssp_port, &widgets, &num_widgets); if (ret) { dev_err(dev, "Failed to create dapm widgets: %d\n", ret); return ret; } card->dev = dev; card->owner = THIS_MODULE; card->dai_link = dai_link; card->num_links = 1; card->dapm_routes = routes; card->num_dapm_routes = num_routes; card->dapm_widgets = widgets; card->num_dapm_widgets = num_widgets; card->fully_routed = true; ret = snd_soc_fixup_dai_links_platform_name(card, pname); if (ret) return ret; return devm_snd_soc_register_card(dev, card); } static struct platform_driver avs_i2s_test_driver = { .probe = avs_i2s_test_probe, .driver = { .name = "avs_i2s_test", .pm = &snd_soc_pm_ops, }, }; module_platform_driver(avs_i2s_test_driver); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:avs_i2s_test");
linux-master
sound/soc/intel/avs/boards/i2s_test.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Author: Cezary Rojewski <[email protected]> // #include <linux/module.h> #include <linux/platform_data/x86/soc.h> #include <linux/platform_device.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-acpi.h> #include <sound/soc-dapm.h> #include <uapi/linux/input-event-codes.h> #include "../../../codecs/da7219.h" #define DA7219_DAI_NAME "da7219-hifi" static const struct snd_kcontrol_new card_controls[] = { SOC_DAPM_PIN_SWITCH("Headphone Jack"), SOC_DAPM_PIN_SWITCH("Headset Mic"), SOC_DAPM_PIN_SWITCH("Line Out"), }; static int platform_clock_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { struct snd_soc_dapm_context *dapm = w->dapm; struct snd_soc_card *card = dapm->card; struct snd_soc_dai *codec_dai; int ret = 0; codec_dai = snd_soc_card_get_codec_dai(card, DA7219_DAI_NAME); if (!codec_dai) { dev_err(card->dev, "Codec dai not found. Unable to set/unset codec pll\n"); return -EIO; } if (SND_SOC_DAPM_EVENT_OFF(event)) { ret = snd_soc_dai_set_pll(codec_dai, 0, DA7219_SYSCLK_MCLK, 0, 0); if (ret) dev_err(card->dev, "failed to stop PLL: %d\n", ret); } else if (SND_SOC_DAPM_EVENT_ON(event)) { ret = snd_soc_dai_set_pll(codec_dai, 0, DA7219_SYSCLK_PLL_SRM, 0, DA7219_PLL_FREQ_OUT_98304); if (ret) dev_err(card->dev, "failed to start PLL: %d\n", ret); } return ret; } static const struct snd_soc_dapm_widget card_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), SND_SOC_DAPM_LINE("Line Out", NULL), SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, platform_clock_control, SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU), }; static const struct snd_soc_dapm_route card_base_routes[] = { /* HP jack connectors - unknown if we have jack detection */ {"Headphone Jack", NULL, "HPL"}, {"Headphone Jack", NULL, "HPR"}, {"MIC", NULL, "Headset Mic"}, { "Headphone Jack", NULL, "Platform Clock" }, { "Headset Mic", NULL, "Platform Clock" }, { "Line Out", NULL, "Platform Clock" }, }; static const struct snd_soc_jack_pin card_headset_pins[] = { { .pin = "Headphone Jack", .mask = SND_JACK_HEADPHONE, }, { .pin = "Headset Mic", .mask = SND_JACK_MICROPHONE, }, { .pin = "Line Out", .mask = SND_JACK_LINEOUT, }, }; static int avs_da7219_codec_init(struct snd_soc_pcm_runtime *runtime) { struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(runtime, 0); struct snd_soc_component *component = codec_dai->component; struct snd_soc_card *card = runtime->card; struct snd_soc_jack_pin *pins; struct snd_soc_jack *jack; int num_pins; int clk_freq; int ret; jack = snd_soc_card_get_drvdata(card); if (soc_intel_is_apl()) clk_freq = 19200000; else /* kbl */ clk_freq = 24576000; ret = snd_soc_dai_set_sysclk(codec_dai, DA7219_CLKSRC_MCLK, clk_freq, SND_SOC_CLOCK_IN); if (ret) { dev_err(card->dev, "can't set codec sysclk configuration\n"); return ret; } num_pins = ARRAY_SIZE(card_headset_pins); pins = devm_kmemdup(card->dev, card_headset_pins, sizeof(*pins) * num_pins, GFP_KERNEL); if (!pins) return -ENOMEM; /* * Headset buttons map to the google Reference headset. * These can be configured by userspace. */ ret = snd_soc_card_jack_new_pins(card, "Headset Jack", SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_LINEOUT, jack, pins, num_pins); if (ret) { dev_err(card->dev, "Headset Jack creation failed: %d\n", ret); return ret; } snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE); snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP); snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN); snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND); return snd_soc_component_set_jack(component, jack, NULL); } static void avs_da7219_codec_exit(struct snd_soc_pcm_runtime *rtd) { snd_soc_component_set_jack(asoc_rtd_to_codec(rtd, 0)->component, NULL, NULL); } static int avs_da7219_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params) { struct snd_interval *rate, *channels; struct snd_mask *fmt; rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); /* The ADSP will convert the FE rate to 48k, stereo */ rate->min = rate->max = 48000; channels->min = channels->max = 2; /* set SSP0 to 24 bit */ snd_mask_none(fmt); snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE); return 0; } static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, struct snd_soc_dai_link **dai_link) { struct snd_soc_dai_link_component *platform; struct snd_soc_dai_link *dl; dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); if (!dl || !platform) return -ENOMEM; platform->name = platform_name; dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL); if (!dl->name || !dl->cpus || !dl->codecs) return -ENOMEM; dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-DLGS7219:00"); dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, DA7219_DAI_NAME); if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name) return -ENOMEM; dl->num_cpus = 1; dl->num_codecs = 1; dl->platforms = platform; dl->num_platforms = 1; dl->id = 0; dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS; dl->be_hw_params_fixup = avs_da7219_be_fixup; dl->init = avs_da7219_codec_init; dl->exit = avs_da7219_codec_exit; dl->nonatomic = 1; dl->no_pcm = 1; dl->dpcm_capture = 1; dl->dpcm_playback = 1; *dai_link = dl; return 0; } static int avs_card_suspend_pre(struct snd_soc_card *card) { struct snd_soc_dai *codec_dai = snd_soc_card_get_codec_dai(card, DA7219_DAI_NAME); return snd_soc_component_set_jack(codec_dai->component, NULL, NULL); } static int avs_card_resume_post(struct snd_soc_card *card) { struct snd_soc_dai *codec_dai = snd_soc_card_get_codec_dai(card, DA7219_DAI_NAME); struct snd_soc_jack *jack = snd_soc_card_get_drvdata(card); return snd_soc_component_set_jack(codec_dai->component, jack, NULL); } static int avs_da7219_probe(struct platform_device *pdev) { struct snd_soc_dai_link *dai_link; struct snd_soc_acpi_mach *mach; struct snd_soc_card *card; struct snd_soc_jack *jack; struct device *dev = &pdev->dev; const char *pname; int ssp_port, ret; mach = dev_get_platdata(dev); pname = mach->mach_params.platform; ssp_port = __ffs(mach->mach_params.i2s_link_mask); ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); if (ret) { dev_err(dev, "Failed to create dai link: %d", ret); return ret; } jack = devm_kzalloc(dev, sizeof(*jack), GFP_KERNEL); card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); if (!jack || !card) return -ENOMEM; card->name = "avs_da7219"; card->dev = dev; card->owner = THIS_MODULE; card->suspend_pre = avs_card_suspend_pre; card->resume_post = avs_card_resume_post; card->dai_link = dai_link; card->num_links = 1; card->controls = card_controls; card->num_controls = ARRAY_SIZE(card_controls); card->dapm_widgets = card_widgets; card->num_dapm_widgets = ARRAY_SIZE(card_widgets); card->dapm_routes = card_base_routes; card->num_dapm_routes = ARRAY_SIZE(card_base_routes); card->fully_routed = true; snd_soc_card_set_drvdata(card, jack); ret = snd_soc_fixup_dai_links_platform_name(card, pname); if (ret) return ret; return devm_snd_soc_register_card(dev, card); } static struct platform_driver avs_da7219_driver = { .probe = avs_da7219_probe, .driver = { .name = "avs_da7219", .pm = &snd_soc_pm_ops, }, }; module_platform_driver(avs_da7219_driver); MODULE_AUTHOR("Cezary Rojewski <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:avs_da7219");
linux-master
sound/soc/intel/avs/boards/da7219.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/module.h> #include <linux/platform_device.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-acpi.h> #include <sound/soc-dapm.h> #define MAX98373_DEV0_NAME "i2c-MX98373:00" #define MAX98373_DEV1_NAME "i2c-MX98373:01" #define MAX98373_CODEC_NAME "max98373-aif1" static struct snd_soc_codec_conf card_codec_conf[] = { { .dlc = COMP_CODEC_CONF(MAX98373_DEV0_NAME), .name_prefix = "Right", }, { .dlc = COMP_CODEC_CONF(MAX98373_DEV1_NAME), .name_prefix = "Left", }, }; static const struct snd_kcontrol_new card_controls[] = { SOC_DAPM_PIN_SWITCH("Left Spk"), SOC_DAPM_PIN_SWITCH("Right Spk"), }; static const struct snd_soc_dapm_widget card_widgets[] = { SND_SOC_DAPM_SPK("Left Spk", NULL), SND_SOC_DAPM_SPK("Right Spk", NULL), }; static const struct snd_soc_dapm_route card_base_routes[] = { { "Left Spk", NULL, "Left BE_OUT" }, { "Right Spk", NULL, "Right BE_OUT" }, }; static int avs_max98373_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params) { struct snd_interval *rate, *channels; struct snd_mask *fmt; rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); /* The ADSP will convert the FE rate to 48k, stereo */ rate->min = rate->max = 48000; channels->min = channels->max = 2; /* set SSP0 to 16 bit */ snd_mask_none(fmt); snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE); return 0; } static int avs_max98373_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *runtime = asoc_substream_to_rtd(substream); struct snd_soc_dai *codec_dai; int ret, i; for_each_rtd_codec_dais(runtime, i, codec_dai) { if (!strcmp(codec_dai->component->name, MAX98373_DEV0_NAME)) { ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x30, 3, 8, 16); if (ret < 0) { dev_err(runtime->dev, "DEV0 TDM slot err:%d\n", ret); return ret; } } if (!strcmp(codec_dai->component->name, MAX98373_DEV1_NAME)) { ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xC0, 3, 8, 16); if (ret < 0) { dev_err(runtime->dev, "DEV1 TDM slot err:%d\n", ret); return ret; } } } return 0; } static const struct snd_soc_ops avs_max98373_ops = { .hw_params = avs_max98373_hw_params, }; static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, struct snd_soc_dai_link **dai_link) { struct snd_soc_dai_link_component *platform; struct snd_soc_dai_link *dl; dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); if (!dl || !platform) return -ENOMEM; platform->name = platform_name; dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs) * 2, GFP_KERNEL); if (!dl->name || !dl->cpus || !dl->codecs) return -ENOMEM; dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); dl->codecs[0].name = devm_kasprintf(dev, GFP_KERNEL, MAX98373_DEV0_NAME); dl->codecs[0].dai_name = devm_kasprintf(dev, GFP_KERNEL, MAX98373_CODEC_NAME); dl->codecs[1].name = devm_kasprintf(dev, GFP_KERNEL, MAX98373_DEV1_NAME); dl->codecs[1].dai_name = devm_kasprintf(dev, GFP_KERNEL, MAX98373_CODEC_NAME); if (!dl->cpus->dai_name || !dl->codecs[0].name || !dl->codecs[0].dai_name || !dl->codecs[1].name || !dl->codecs[1].dai_name) return -ENOMEM; dl->num_cpus = 1; dl->num_codecs = 2; dl->platforms = platform; dl->num_platforms = 1; dl->id = 0; dl->dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBC_CFC; dl->be_hw_params_fixup = avs_max98373_be_fixup; dl->nonatomic = 1; dl->no_pcm = 1; dl->dpcm_capture = 1; dl->dpcm_playback = 1; dl->ignore_pmdown_time = 1; dl->ops = &avs_max98373_ops; *dai_link = dl; return 0; } static int avs_max98373_probe(struct platform_device *pdev) { struct snd_soc_dai_link *dai_link; struct snd_soc_acpi_mach *mach; struct snd_soc_card *card; struct device *dev = &pdev->dev; const char *pname; int ssp_port, ret; mach = dev_get_platdata(dev); pname = mach->mach_params.platform; ssp_port = __ffs(mach->mach_params.i2s_link_mask); ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); if (ret) { dev_err(dev, "Failed to create dai link: %d", ret); return ret; } card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; card->name = "avs_max98373"; card->dev = dev; card->owner = THIS_MODULE; card->dai_link = dai_link; card->num_links = 1; card->codec_conf = card_codec_conf; card->num_configs = ARRAY_SIZE(card_codec_conf); card->controls = card_controls; card->num_controls = ARRAY_SIZE(card_controls); card->dapm_widgets = card_widgets; card->num_dapm_widgets = ARRAY_SIZE(card_widgets); card->dapm_routes = card_base_routes; card->num_dapm_routes = ARRAY_SIZE(card_base_routes); card->fully_routed = true; ret = snd_soc_fixup_dai_links_platform_name(card, pname); if (ret) return ret; return devm_snd_soc_register_card(dev, card); } static struct platform_driver avs_max98373_driver = { .probe = avs_max98373_probe, .driver = { .name = "avs_max98373", .pm = &snd_soc_pm_ops, }, }; module_platform_driver(avs_max98373_driver) MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:avs_max98373");
linux-master
sound/soc/intel/avs/boards/max98373.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/module.h> #include <linux/platform_device.h> #include <sound/hda_codec.h> #include <sound/hda_i915.h> #include <sound/soc.h> #include <sound/soc-acpi.h> #include "../../../codecs/hda.h" static int avs_create_dai_links(struct device *dev, struct hda_codec *codec, int pcm_count, const char *platform_name, struct snd_soc_dai_link **links) { struct snd_soc_dai_link_component *platform; struct snd_soc_dai_link *dl; struct hda_pcm *pcm; const char *cname = dev_name(&codec->core.dev); int i; dl = devm_kcalloc(dev, pcm_count, sizeof(*dl), GFP_KERNEL); platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); if (!dl || !platform) return -ENOMEM; platform->name = platform_name; pcm = list_first_entry(&codec->pcm_list_head, struct hda_pcm, list); for (i = 0; i < pcm_count; i++, pcm = list_next_entry(pcm, list)) { dl[i].name = devm_kasprintf(dev, GFP_KERNEL, "%s link%d", cname, i); if (!dl[i].name) return -ENOMEM; dl[i].id = i; dl[i].nonatomic = 1; dl[i].no_pcm = 1; dl[i].dpcm_playback = 1; dl[i].dpcm_capture = 1; dl[i].platforms = platform; dl[i].num_platforms = 1; dl[i].ignore_pmdown_time = 1; dl[i].codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL); dl[i].cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); if (!dl[i].codecs || !dl[i].cpus) return -ENOMEM; dl[i].cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "%s-cpu%d", cname, i); if (!dl[i].cpus->dai_name) return -ENOMEM; dl[i].codecs->name = devm_kstrdup(dev, cname, GFP_KERNEL); if (!dl[i].codecs->name) return -ENOMEM; dl[i].codecs->dai_name = pcm->name; dl[i].num_codecs = 1; dl[i].num_cpus = 1; } *links = dl; return 0; } /* Should be aligned with SectionPCM's name from topology */ #define FEDAI_NAME_PREFIX "HDMI" static struct snd_pcm * avs_card_hdmi_pcm_at(struct snd_soc_card *card, int hdmi_idx) { struct snd_soc_pcm_runtime *rtd; int dir = SNDRV_PCM_STREAM_PLAYBACK; for_each_card_rtds(card, rtd) { struct snd_pcm *spcm; int ret, n; spcm = rtd->pcm ? rtd->pcm->streams[dir].pcm : NULL; if (!spcm || !strstr(spcm->id, FEDAI_NAME_PREFIX)) continue; ret = sscanf(spcm->id, FEDAI_NAME_PREFIX "%d", &n); if (ret != 1) continue; if (n == hdmi_idx) return rtd->pcm; } return NULL; } static int avs_card_late_probe(struct snd_soc_card *card) { struct snd_soc_acpi_mach *mach = dev_get_platdata(card->dev); struct hda_codec *codec = mach->pdata; struct hda_pcm *hpcm; /* Topology pcm indexing is 1-based */ int i = 1; list_for_each_entry(hpcm, &codec->pcm_list_head, list) { struct snd_pcm *spcm; spcm = avs_card_hdmi_pcm_at(card, i); if (spcm) { hpcm->pcm = spcm; hpcm->device = spcm->device; dev_info(card->dev, "%s: mapping HDMI converter %d to PCM %d (%p)\n", __func__, i, hpcm->device, spcm); } else { hpcm->pcm = NULL; hpcm->device = SNDRV_PCM_INVALID_DEVICE; dev_warn(card->dev, "%s: no PCM in topology for HDMI converter %d\n", __func__, i); } i++; } return hda_codec_probe_complete(codec); } static int avs_probing_link_init(struct snd_soc_pcm_runtime *rtm) { struct snd_soc_acpi_mach *mach; struct snd_soc_dai_link *links = NULL; struct snd_soc_card *card = rtm->card; struct hda_codec *codec; struct hda_pcm *pcm; int ret, pcm_count = 0; mach = dev_get_platdata(card->dev); codec = mach->pdata; if (list_empty(&codec->pcm_list_head)) return -EINVAL; list_for_each_entry(pcm, &codec->pcm_list_head, list) pcm_count++; ret = avs_create_dai_links(card->dev, codec, pcm_count, mach->mach_params.platform, &links); if (ret < 0) { dev_err(card->dev, "create links failed: %d\n", ret); return ret; } ret = snd_soc_add_pcm_runtimes(card, links, pcm_count); if (ret < 0) { dev_err(card->dev, "add links failed: %d\n", ret); return ret; } return 0; } SND_SOC_DAILINK_DEF(dummy, DAILINK_COMP_ARRAY(COMP_DUMMY())); static struct snd_soc_dai_link probing_link = { .name = "probing-LINK", .id = -1, .nonatomic = 1, .no_pcm = 1, .dpcm_playback = 1, .dpcm_capture = 1, .cpus = dummy, .num_cpus = ARRAY_SIZE(dummy), .init = avs_probing_link_init, }; static int avs_hdaudio_probe(struct platform_device *pdev) { struct snd_soc_dai_link *binder; struct snd_soc_acpi_mach *mach; struct snd_soc_card *card; struct device *dev = &pdev->dev; struct hda_codec *codec; mach = dev_get_platdata(dev); codec = mach->pdata; /* codec may be unloaded before card's probe() fires */ if (!device_is_registered(&codec->core.dev)) return -ENODEV; binder = devm_kmemdup(dev, &probing_link, sizeof(probing_link), GFP_KERNEL); if (!binder) return -ENOMEM; binder->platforms = devm_kzalloc(dev, sizeof(*binder->platforms), GFP_KERNEL); binder->codecs = devm_kzalloc(dev, sizeof(*binder->codecs), GFP_KERNEL); if (!binder->platforms || !binder->codecs) return -ENOMEM; binder->codecs->name = devm_kstrdup(dev, dev_name(&codec->core.dev), GFP_KERNEL); if (!binder->codecs->name) return -ENOMEM; binder->platforms->name = mach->mach_params.platform; binder->num_platforms = 1; binder->codecs->dai_name = "codec-probing-DAI"; binder->num_codecs = 1; card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; card->name = binder->codecs->name; card->dev = dev; card->owner = THIS_MODULE; card->dai_link = binder; card->num_links = 1; card->fully_routed = true; if (hda_codec_is_display(codec)) card->late_probe = avs_card_late_probe; return devm_snd_soc_register_card(dev, card); } static struct platform_driver avs_hdaudio_driver = { .probe = avs_hdaudio_probe, .driver = { .name = "avs_hdaudio", .pm = &snd_soc_pm_ops, }, }; module_platform_driver(avs_hdaudio_driver) MODULE_DESCRIPTION("Intel HD-Audio machine driver"); MODULE_AUTHOR("Cezary Rojewski <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:avs_hdaudio");
linux-master
sound/soc/intel/avs/boards/hdaudio.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/input.h> #include <linux/module.h> #include <linux/platform_device.h> #include <sound/core.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-acpi.h> #include "../../../codecs/nau8825.h" #define SKL_NUVOTON_CODEC_DAI "nau8825-hifi" static int avs_nau8825_clock_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *control, int event) { struct snd_soc_dapm_context *dapm = w->dapm; struct snd_soc_card *card = dapm->card; struct snd_soc_dai *codec_dai; int ret; codec_dai = snd_soc_card_get_codec_dai(card, SKL_NUVOTON_CODEC_DAI); if (!codec_dai) { dev_err(card->dev, "Codec dai not found\n"); return -EINVAL; } if (SND_SOC_DAPM_EVENT_ON(event)) ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_MCLK, 24000000, SND_SOC_CLOCK_IN); else ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_INTERNAL, 0, SND_SOC_CLOCK_IN); if (ret < 0) dev_err(card->dev, "Set sysclk failed: %d\n", ret); return ret; } static const struct snd_kcontrol_new card_controls[] = { SOC_DAPM_PIN_SWITCH("Headphone Jack"), SOC_DAPM_PIN_SWITCH("Headset Mic"), }; static const struct snd_soc_dapm_widget card_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, avs_nau8825_clock_control, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), }; static const struct snd_soc_dapm_route card_base_routes[] = { { "Headphone Jack", NULL, "HPOL" }, { "Headphone Jack", NULL, "HPOR" }, { "MIC", NULL, "Headset Mic" }, { "Headphone Jack", NULL, "Platform Clock" }, { "Headset Mic", NULL, "Platform Clock" }, }; static struct snd_soc_jack_pin card_headset_pins[] = { { .pin = "Headphone Jack", .mask = SND_JACK_HEADPHONE, }, { .pin = "Headset Mic", .mask = SND_JACK_MICROPHONE, }, }; static int avs_nau8825_codec_init(struct snd_soc_pcm_runtime *runtime) { struct snd_soc_card *card = runtime->card; struct snd_soc_jack_pin *pins; struct snd_soc_jack *jack; int num_pins, ret; jack = snd_soc_card_get_drvdata(card); num_pins = ARRAY_SIZE(card_headset_pins); pins = devm_kmemdup(card->dev, card_headset_pins, sizeof(*pins) * num_pins, GFP_KERNEL); if (!pins) return -ENOMEM; /* * 4 buttons here map to the google Reference headset. * The use of these buttons can be decided by the user space. */ ret = snd_soc_card_jack_new_pins(card, "Headset", SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | SND_JACK_BTN_3, jack, pins, num_pins); if (ret) return ret; snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE); snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND); snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP); snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN); return snd_soc_component_set_jack(asoc_rtd_to_codec(runtime, 0)->component, jack, NULL); } static void avs_nau8825_codec_exit(struct snd_soc_pcm_runtime *rtd) { snd_soc_component_set_jack(asoc_rtd_to_codec(rtd, 0)->component, NULL, NULL); } static int avs_nau8825_be_fixup(struct snd_soc_pcm_runtime *runtime, struct snd_pcm_hw_params *params) { struct snd_interval *rate, *channels; struct snd_mask *fmt; rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); /* The ADSP will convert the FE rate to 48k, stereo */ rate->min = rate->max = 48000; channels->min = channels->max = 2; /* set SSP to 24 bit */ snd_mask_none(fmt); snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE); return 0; } static int avs_nau8825_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *rtm = asoc_substream_to_rtd(substream); struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtm, 0); int ret = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_START: ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_FLL_FS, 0, SND_SOC_CLOCK_IN); if (ret < 0) { dev_err(codec_dai->dev, "can't set FS clock %d\n", ret); break; } ret = snd_soc_dai_set_pll(codec_dai, 0, 0, runtime->rate, runtime->rate * 256); if (ret < 0) dev_err(codec_dai->dev, "can't set FLL: %d\n", ret); break; case SNDRV_PCM_TRIGGER_RESUME: ret = snd_soc_dai_set_pll(codec_dai, 0, 0, runtime->rate, runtime->rate * 256); if (ret < 0) dev_err(codec_dai->dev, "can't set FLL: %d\n", ret); break; } return ret; } static const struct snd_soc_ops avs_nau8825_ops = { .trigger = avs_nau8825_trigger, }; static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, struct snd_soc_dai_link **dai_link) { struct snd_soc_dai_link_component *platform; struct snd_soc_dai_link *dl; dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); if (!dl || !platform) return -ENOMEM; platform->name = platform_name; dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL); if (!dl->name || !dl->cpus || !dl->codecs) return -ENOMEM; dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-10508825:00"); dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, SKL_NUVOTON_CODEC_DAI); if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name) return -ENOMEM; dl->num_cpus = 1; dl->num_codecs = 1; dl->platforms = platform; dl->num_platforms = 1; dl->id = 0; dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS; dl->init = avs_nau8825_codec_init; dl->exit = avs_nau8825_codec_exit; dl->be_hw_params_fixup = avs_nau8825_be_fixup; dl->ops = &avs_nau8825_ops; dl->nonatomic = 1; dl->no_pcm = 1; dl->dpcm_capture = 1; dl->dpcm_playback = 1; *dai_link = dl; return 0; } static int avs_card_suspend_pre(struct snd_soc_card *card) { struct snd_soc_dai *codec_dai = snd_soc_card_get_codec_dai(card, SKL_NUVOTON_CODEC_DAI); return snd_soc_component_set_jack(codec_dai->component, NULL, NULL); } static int avs_card_resume_post(struct snd_soc_card *card) { struct snd_soc_dai *codec_dai = snd_soc_card_get_codec_dai(card, SKL_NUVOTON_CODEC_DAI); struct snd_soc_jack *jack = snd_soc_card_get_drvdata(card); int stream = SNDRV_PCM_STREAM_PLAYBACK; if (!codec_dai) { dev_err(card->dev, "Codec dai not found\n"); return -EINVAL; } if (snd_soc_dai_stream_active(codec_dai, stream) && snd_soc_dai_get_widget(codec_dai, stream)->active) snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_FLL_FS, 0, SND_SOC_CLOCK_IN); return snd_soc_component_set_jack(codec_dai->component, jack, NULL); } static int avs_nau8825_probe(struct platform_device *pdev) { struct snd_soc_dai_link *dai_link; struct snd_soc_acpi_mach *mach; struct snd_soc_card *card; struct snd_soc_jack *jack; struct device *dev = &pdev->dev; const char *pname; int ssp_port, ret; mach = dev_get_platdata(dev); pname = mach->mach_params.platform; ssp_port = __ffs(mach->mach_params.i2s_link_mask); ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); if (ret) { dev_err(dev, "Failed to create dai link: %d", ret); return ret; } jack = devm_kzalloc(dev, sizeof(*jack), GFP_KERNEL); card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); if (!jack || !card) return -ENOMEM; card->name = "avs_nau8825"; card->dev = dev; card->owner = THIS_MODULE; card->suspend_pre = avs_card_suspend_pre; card->resume_post = avs_card_resume_post; card->dai_link = dai_link; card->num_links = 1; card->controls = card_controls; card->num_controls = ARRAY_SIZE(card_controls); card->dapm_widgets = card_widgets; card->num_dapm_widgets = ARRAY_SIZE(card_widgets); card->dapm_routes = card_base_routes; card->num_dapm_routes = ARRAY_SIZE(card_base_routes); card->fully_routed = true; snd_soc_card_set_drvdata(card, jack); ret = snd_soc_fixup_dai_links_platform_name(card, pname); if (ret) return ret; return devm_snd_soc_register_card(dev, card); } static struct platform_driver avs_nau8825_driver = { .probe = avs_nau8825_probe, .driver = { .name = "avs_nau8825", .pm = &snd_soc_pm_ops, }, }; module_platform_driver(avs_nau8825_driver) MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:avs_nau8825");
linux-master
sound/soc/intel/avs/boards/nau8825.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/clk.h> #include <linux/dmi.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/module.h> #include <linux/platform_device.h> #include <sound/core.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/rt5682.h> #include <sound/soc.h> #include <sound/soc-acpi.h> #include "../../common/soc-intel-quirks.h" #include "../../../codecs/rt5682.h" #define AVS_RT5682_SSP_CODEC(quirk) ((quirk) & GENMASK(2, 0)) #define AVS_RT5682_SSP_CODEC_MASK (GENMASK(2, 0)) #define AVS_RT5682_MCLK_EN BIT(3) #define AVS_RT5682_MCLK_24MHZ BIT(4) #define AVS_RT5682_CODEC_DAI_NAME "rt5682-aif1" /* Default: MCLK on, MCLK 19.2M, SSP0 */ static unsigned long avs_rt5682_quirk = AVS_RT5682_MCLK_EN | AVS_RT5682_SSP_CODEC(0); static int avs_rt5682_quirk_cb(const struct dmi_system_id *id) { avs_rt5682_quirk = (unsigned long)id->driver_data; return 1; } static const struct dmi_system_id avs_rt5682_quirk_table[] = { { .callback = avs_rt5682_quirk_cb, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "WhiskeyLake Client"), }, .driver_data = (void *)(AVS_RT5682_MCLK_EN | AVS_RT5682_MCLK_24MHZ | AVS_RT5682_SSP_CODEC(1)), }, { .callback = avs_rt5682_quirk_cb, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "Ice Lake Client"), }, .driver_data = (void *)(AVS_RT5682_MCLK_EN | AVS_RT5682_SSP_CODEC(0)), }, {} }; static const struct snd_kcontrol_new card_controls[] = { SOC_DAPM_PIN_SWITCH("Headphone Jack"), SOC_DAPM_PIN_SWITCH("Headset Mic"), }; static const struct snd_soc_dapm_widget card_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), }; static const struct snd_soc_dapm_route card_base_routes[] = { /* HP jack connectors - unknown if we have jack detect */ { "Headphone Jack", NULL, "HPOL" }, { "Headphone Jack", NULL, "HPOR" }, /* other jacks */ { "IN1P", NULL, "Headset Mic" }, }; static struct snd_soc_jack_pin card_jack_pins[] = { { .pin = "Headphone Jack", .mask = SND_JACK_HEADPHONE, }, { .pin = "Headset Mic", .mask = SND_JACK_MICROPHONE, }, }; static int avs_rt5682_codec_init(struct snd_soc_pcm_runtime *runtime) { struct snd_soc_component *component = asoc_rtd_to_codec(runtime, 0)->component; struct snd_soc_card *card = runtime->card; struct snd_soc_jack_pin *pins; struct snd_soc_jack *jack; int num_pins, ret; jack = snd_soc_card_get_drvdata(card); num_pins = ARRAY_SIZE(card_jack_pins); pins = devm_kmemdup(card->dev, card_jack_pins, sizeof(*pins) * num_pins, GFP_KERNEL); if (!pins) return -ENOMEM; /* Need to enable ASRC function for 24MHz mclk rate */ if ((avs_rt5682_quirk & AVS_RT5682_MCLK_EN) && (avs_rt5682_quirk & AVS_RT5682_MCLK_24MHZ)) { rt5682_sel_asrc_clk_src(component, RT5682_DA_STEREO1_FILTER | RT5682_AD_STEREO1_FILTER, RT5682_CLK_SEL_I2S1_ASRC); } ret = snd_soc_card_jack_new_pins(card, "Headset Jack", SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | SND_JACK_BTN_3, jack, pins, num_pins); if (ret) { dev_err(card->dev, "Headset Jack creation failed: %d\n", ret); return ret; } snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE); snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND); snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP); snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN); ret = snd_soc_component_set_jack(component, jack, NULL); if (ret) { dev_err(card->dev, "Headset Jack call-back failed: %d\n", ret); return ret; } return 0; }; static void avs_rt5682_codec_exit(struct snd_soc_pcm_runtime *rtd) { snd_soc_component_set_jack(asoc_rtd_to_codec(rtd, 0)->component, NULL, NULL); } static int avs_rt5682_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *runtime = asoc_substream_to_rtd(substream); struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(runtime, 0); int pll_source, freq_in, freq_out; int ret; if (avs_rt5682_quirk & AVS_RT5682_MCLK_EN) { pll_source = RT5682_PLL1_S_MCLK; if (avs_rt5682_quirk & AVS_RT5682_MCLK_24MHZ) freq_in = 24000000; else freq_in = 19200000; } else { pll_source = RT5682_PLL1_S_BCLK1; freq_in = params_rate(params) * 50; } freq_out = params_rate(params) * 512; ret = snd_soc_dai_set_pll(codec_dai, RT5682_PLL1, pll_source, freq_in, freq_out); if (ret < 0) dev_err(runtime->dev, "Set PLL failed: %d\n", ret); ret = snd_soc_dai_set_sysclk(codec_dai, RT5682_SCLK_S_PLL1, freq_out, SND_SOC_CLOCK_IN); if (ret < 0) dev_err(runtime->dev, "Set sysclk failed: %d\n", ret); /* slot_width should be equal or larger than data length. */ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x0, 0x0, 2, params_width(params)); if (ret < 0) dev_err(runtime->dev, "Set TDM slot failed: %d\n", ret); return ret; } static const struct snd_soc_ops avs_rt5682_ops = { .hw_params = avs_rt5682_hw_params, }; static int avs_rt5682_be_fixup(struct snd_soc_pcm_runtime *runtime, struct snd_pcm_hw_params *params) { struct snd_interval *rate, *channels; struct snd_mask *fmt; rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); /* The ADSP will convert the FE rate to 48k, stereo */ rate->min = rate->max = 48000; channels->min = channels->max = 2; /* set SSPN to 24 bit */ snd_mask_none(fmt); snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE); return 0; } static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, struct snd_soc_dai_link **dai_link) { struct snd_soc_dai_link_component *platform; struct snd_soc_dai_link *dl; dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); if (!dl || !platform) return -ENOMEM; platform->name = platform_name; dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL); if (!dl->name || !dl->cpus || !dl->codecs) return -ENOMEM; dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-10EC5682:00"); dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, AVS_RT5682_CODEC_DAI_NAME); if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name) return -ENOMEM; dl->num_cpus = 1; dl->num_codecs = 1; dl->platforms = platform; dl->num_platforms = 1; dl->id = 0; dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBC_CFC; dl->init = avs_rt5682_codec_init; dl->exit = avs_rt5682_codec_exit; dl->be_hw_params_fixup = avs_rt5682_be_fixup; dl->ops = &avs_rt5682_ops; dl->nonatomic = 1; dl->no_pcm = 1; dl->dpcm_capture = 1; dl->dpcm_playback = 1; *dai_link = dl; return 0; } static int avs_card_suspend_pre(struct snd_soc_card *card) { struct snd_soc_dai *codec_dai = snd_soc_card_get_codec_dai(card, AVS_RT5682_CODEC_DAI_NAME); return snd_soc_component_set_jack(codec_dai->component, NULL, NULL); } static int avs_card_resume_post(struct snd_soc_card *card) { struct snd_soc_dai *codec_dai = snd_soc_card_get_codec_dai(card, AVS_RT5682_CODEC_DAI_NAME); struct snd_soc_jack *jack = snd_soc_card_get_drvdata(card); return snd_soc_component_set_jack(codec_dai->component, jack, NULL); } static int avs_rt5682_probe(struct platform_device *pdev) { struct snd_soc_dai_link *dai_link; struct snd_soc_acpi_mach *mach; struct snd_soc_card *card; struct snd_soc_jack *jack; struct device *dev = &pdev->dev; const char *pname; int ssp_port, ret; if (pdev->id_entry && pdev->id_entry->driver_data) avs_rt5682_quirk = (unsigned long)pdev->id_entry->driver_data; dmi_check_system(avs_rt5682_quirk_table); dev_dbg(dev, "avs_rt5682_quirk = %lx\n", avs_rt5682_quirk); mach = dev_get_platdata(dev); pname = mach->mach_params.platform; ssp_port = __ffs(mach->mach_params.i2s_link_mask); ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); if (ret) { dev_err(dev, "Failed to create dai link: %d", ret); return ret; } jack = devm_kzalloc(dev, sizeof(*jack), GFP_KERNEL); card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); if (!jack || !card) return -ENOMEM; card->name = "avs_rt5682"; card->dev = dev; card->owner = THIS_MODULE; card->suspend_pre = avs_card_suspend_pre; card->resume_post = avs_card_resume_post; card->dai_link = dai_link; card->num_links = 1; card->controls = card_controls; card->num_controls = ARRAY_SIZE(card_controls); card->dapm_widgets = card_widgets; card->num_dapm_widgets = ARRAY_SIZE(card_widgets); card->dapm_routes = card_base_routes; card->num_dapm_routes = ARRAY_SIZE(card_base_routes); card->fully_routed = true; snd_soc_card_set_drvdata(card, jack); ret = snd_soc_fixup_dai_links_platform_name(card, pname); if (ret) return ret; return devm_snd_soc_register_card(dev, card); } static struct platform_driver avs_rt5682_driver = { .probe = avs_rt5682_probe, .driver = { .name = "avs_rt5682", .pm = &snd_soc_pm_ops, }, }; module_platform_driver(avs_rt5682_driver) MODULE_AUTHOR("Cezary Rojewski <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:avs_rt5682");
linux-master
sound/soc/intel/avs/boards/rt5682.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/module.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-acpi.h> #include "../../../codecs/rt274.h" #define AVS_RT274_FREQ_OUT 24000000 #define AVS_RT274_BE_FIXUP_RATE 48000 #define RT274_CODEC_DAI "rt274-aif1" static const struct snd_kcontrol_new card_controls[] = { SOC_DAPM_PIN_SWITCH("Headphone Jack"), SOC_DAPM_PIN_SWITCH("Mic Jack"), }; static int avs_rt274_clock_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *control, int event) { struct snd_soc_dapm_context *dapm = w->dapm; struct snd_soc_card *card = dapm->card; struct snd_soc_dai *codec_dai; int ret; codec_dai = snd_soc_card_get_codec_dai(card, RT274_CODEC_DAI); if (!codec_dai) return -EINVAL; /* Codec needs clock for Jack detection and button press */ ret = snd_soc_dai_set_sysclk(codec_dai, RT274_SCLK_S_PLL2, AVS_RT274_FREQ_OUT, SND_SOC_CLOCK_IN); if (ret < 0) { dev_err(codec_dai->dev, "set codec sysclk failed: %d\n", ret); return ret; } if (SND_SOC_DAPM_EVENT_ON(event)) { int ratio = 100; snd_soc_dai_set_bclk_ratio(codec_dai, ratio); ret = snd_soc_dai_set_pll(codec_dai, 0, RT274_PLL2_S_BCLK, AVS_RT274_BE_FIXUP_RATE * ratio, AVS_RT274_FREQ_OUT); if (ret) { dev_err(codec_dai->dev, "failed to enable PLL2: %d\n", ret); return ret; } } return 0; } static const struct snd_soc_dapm_widget card_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_MIC("Mic Jack", NULL), SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, avs_rt274_clock_control, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), }; static const struct snd_soc_dapm_route card_base_routes[] = { {"Headphone Jack", NULL, "HPO Pin"}, {"MIC", NULL, "Mic Jack"}, {"Headphone Jack", NULL, "Platform Clock"}, {"MIC", NULL, "Platform Clock"}, }; static struct snd_soc_jack_pin card_headset_pins[] = { { .pin = "Headphone Jack", .mask = SND_JACK_HEADPHONE, }, { .pin = "Mic Jack", .mask = SND_JACK_MICROPHONE, }, }; static int avs_rt274_codec_init(struct snd_soc_pcm_runtime *runtime) { struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(runtime, 0); struct snd_soc_component *component = codec_dai->component; struct snd_soc_jack_pin *pins; struct snd_soc_jack *jack; struct snd_soc_card *card = runtime->card; int num_pins, ret; jack = snd_soc_card_get_drvdata(card); num_pins = ARRAY_SIZE(card_headset_pins); pins = devm_kmemdup(card->dev, card_headset_pins, sizeof(*pins) * num_pins, GFP_KERNEL); if (!pins) return -ENOMEM; ret = snd_soc_card_jack_new_pins(card, "Headset", SND_JACK_HEADSET, jack, pins, num_pins); if (ret) return ret; snd_soc_component_set_jack(component, jack, NULL); /* TDM 4 slots 24 bit, set Rx & Tx bitmask to 4 active slots */ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xF, 0xF, 4, 24); if (ret < 0) { dev_err(card->dev, "can't set codec pcm format %d\n", ret); return ret; } card->dapm.idle_bias_off = true; return 0; } static void avs_rt274_codec_exit(struct snd_soc_pcm_runtime *rtd) { snd_soc_component_set_jack(asoc_rtd_to_codec(rtd, 0)->component, NULL, NULL); } static int avs_rt274_be_fixup(struct snd_soc_pcm_runtime *runtime, struct snd_pcm_hw_params *params) { struct snd_interval *rate, *channels; struct snd_mask *fmt; rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); /* The ADSP will convert the FE rate to 48k, stereo */ rate->min = rate->max = AVS_RT274_BE_FIXUP_RATE; channels->min = channels->max = 2; /* set SSPN to 24 bit */ snd_mask_none(fmt); snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE); return 0; } static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, struct snd_soc_dai_link **dai_link) { struct snd_soc_dai_link_component *platform; struct snd_soc_dai_link *dl; dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); if (!dl || !platform) return -ENOMEM; platform->name = platform_name; dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL); if (!dl->name || !dl->cpus || !dl->codecs) return -ENOMEM; dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-INT34C2:00"); dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, RT274_CODEC_DAI); if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name) return -ENOMEM; dl->num_cpus = 1; dl->num_codecs = 1; dl->platforms = platform; dl->num_platforms = 1; dl->id = 0; dl->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS; dl->init = avs_rt274_codec_init; dl->exit = avs_rt274_codec_exit; dl->be_hw_params_fixup = avs_rt274_be_fixup; dl->nonatomic = 1; dl->no_pcm = 1; dl->dpcm_capture = 1; dl->dpcm_playback = 1; *dai_link = dl; return 0; } static int avs_card_suspend_pre(struct snd_soc_card *card) { struct snd_soc_dai *codec_dai = snd_soc_card_get_codec_dai(card, RT274_CODEC_DAI); return snd_soc_component_set_jack(codec_dai->component, NULL, NULL); } static int avs_card_resume_post(struct snd_soc_card *card) { struct snd_soc_dai *codec_dai = snd_soc_card_get_codec_dai(card, RT274_CODEC_DAI); struct snd_soc_jack *jack = snd_soc_card_get_drvdata(card); return snd_soc_component_set_jack(codec_dai->component, jack, NULL); } static int avs_rt274_probe(struct platform_device *pdev) { struct snd_soc_dai_link *dai_link; struct snd_soc_acpi_mach *mach; struct snd_soc_card *card; struct snd_soc_jack *jack; struct device *dev = &pdev->dev; const char *pname; int ssp_port, ret; mach = dev_get_platdata(dev); pname = mach->mach_params.platform; ssp_port = __ffs(mach->mach_params.i2s_link_mask); ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); if (ret) { dev_err(dev, "Failed to create dai link: %d", ret); return ret; } jack = devm_kzalloc(dev, sizeof(*jack), GFP_KERNEL); card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); if (!jack || !card) return -ENOMEM; card->name = "avs_rt274"; card->dev = dev; card->owner = THIS_MODULE; card->suspend_pre = avs_card_suspend_pre; card->resume_post = avs_card_resume_post; card->dai_link = dai_link; card->num_links = 1; card->controls = card_controls; card->num_controls = ARRAY_SIZE(card_controls); card->dapm_widgets = card_widgets; card->num_dapm_widgets = ARRAY_SIZE(card_widgets); card->dapm_routes = card_base_routes; card->num_dapm_routes = ARRAY_SIZE(card_base_routes); card->fully_routed = true; snd_soc_card_set_drvdata(card, jack); ret = snd_soc_fixup_dai_links_platform_name(card, pname); if (ret) return ret; return devm_snd_soc_register_card(dev, card); } static struct platform_driver avs_rt274_driver = { .probe = avs_rt274_probe, .driver = { .name = "avs_rt274", .pm = &snd_soc_pm_ops, }, }; module_platform_driver(avs_rt274_driver); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:avs_rt274");
linux-master
sound/soc/intel/avs/boards/rt274.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/module.h> #include <linux/platform_device.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-acpi.h> #include <sound/soc-dapm.h> static const struct snd_kcontrol_new card_controls[] = { SOC_DAPM_PIN_SWITCH("Spk"), }; static const struct snd_soc_dapm_widget card_widgets[] = { SND_SOC_DAPM_SPK("Spk", NULL), }; static const struct snd_soc_dapm_route card_base_routes[] = { { "Spk", NULL, "Speaker" }, }; static int avs_max98357a_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params) { struct snd_interval *rate, *channels; struct snd_mask *fmt; rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); /* The ADSP will convert the FE rate to 48k, stereo */ rate->min = rate->max = 48000; channels->min = channels->max = 2; /* set SSP0 to 16 bit */ snd_mask_none(fmt); snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE); return 0; } static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, struct snd_soc_dai_link **dai_link) { struct snd_soc_dai_link_component *platform; struct snd_soc_dai_link *dl; dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); if (!dl || !platform) return -ENOMEM; platform->name = platform_name; dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL); if (!dl->name || !dl->cpus || !dl->codecs) return -ENOMEM; dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "MX98357A:00"); dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, "HiFi"); if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name) return -ENOMEM; dl->num_cpus = 1; dl->num_codecs = 1; dl->platforms = platform; dl->num_platforms = 1; dl->id = 0; dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS; dl->be_hw_params_fixup = avs_max98357a_be_fixup; dl->nonatomic = 1; dl->no_pcm = 1; dl->dpcm_playback = 1; *dai_link = dl; return 0; } static int avs_max98357a_probe(struct platform_device *pdev) { struct snd_soc_dai_link *dai_link; struct snd_soc_acpi_mach *mach; struct snd_soc_card *card; struct device *dev = &pdev->dev; const char *pname; int ssp_port, ret; mach = dev_get_platdata(dev); pname = mach->mach_params.platform; ssp_port = __ffs(mach->mach_params.i2s_link_mask); ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); if (ret) { dev_err(dev, "Failed to create dai link: %d", ret); return ret; } card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; card->name = "avs_max98357a"; card->dev = dev; card->owner = THIS_MODULE; card->dai_link = dai_link; card->num_links = 1; card->controls = card_controls; card->num_controls = ARRAY_SIZE(card_controls); card->dapm_widgets = card_widgets; card->num_dapm_widgets = ARRAY_SIZE(card_widgets); card->dapm_routes = card_base_routes; card->num_dapm_routes = ARRAY_SIZE(card_base_routes); card->fully_routed = true; ret = snd_soc_fixup_dai_links_platform_name(card, pname); if (ret) return ret; return devm_snd_soc_register_card(dev, card); } static struct platform_driver avs_max98357a_driver = { .probe = avs_max98357a_probe, .driver = { .name = "avs_max98357a", .pm = &snd_soc_pm_ops, }, }; module_platform_driver(avs_max98357a_driver) MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:avs_max98357a");
linux-master
sound/soc/intel/avs/boards/max98357a.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/device.h> #include <linux/module.h> #include <sound/soc.h> #include <sound/soc-acpi.h> SND_SOC_DAILINK_DEF(dmic_pin, DAILINK_COMP_ARRAY(COMP_CPU("DMIC Pin"))); SND_SOC_DAILINK_DEF(dmic_wov_pin, DAILINK_COMP_ARRAY(COMP_CPU("DMIC WoV Pin"))); SND_SOC_DAILINK_DEF(dmic_codec, DAILINK_COMP_ARRAY(COMP_CODEC("dmic-codec", "dmic-hifi"))); /* Name overridden on probe */ SND_SOC_DAILINK_DEF(platform, DAILINK_COMP_ARRAY(COMP_PLATFORM(""))); static struct snd_soc_dai_link card_dai_links[] = { /* Back ends */ { .name = "DMIC", .id = 0, .dpcm_capture = 1, .nonatomic = 1, .no_pcm = 1, SND_SOC_DAILINK_REG(dmic_pin, dmic_codec, platform), }, { .name = "DMIC WoV", .id = 1, .dpcm_capture = 1, .nonatomic = 1, .no_pcm = 1, .ignore_suspend = 1, SND_SOC_DAILINK_REG(dmic_wov_pin, dmic_codec, platform), }, }; static const struct snd_soc_dapm_widget card_widgets[] = { SND_SOC_DAPM_MIC("SoC DMIC", NULL), }; static const struct snd_soc_dapm_route card_routes[] = { {"DMic", NULL, "SoC DMIC"}, }; static int avs_dmic_probe(struct platform_device *pdev) { struct snd_soc_acpi_mach *mach; struct snd_soc_card *card; struct device *dev = &pdev->dev; int ret; mach = dev_get_platdata(dev); card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; card->name = "avs_dmic"; card->dev = dev; card->owner = THIS_MODULE; card->dai_link = card_dai_links; card->num_links = ARRAY_SIZE(card_dai_links); card->dapm_widgets = card_widgets; card->num_dapm_widgets = ARRAY_SIZE(card_widgets); card->dapm_routes = card_routes; card->num_dapm_routes = ARRAY_SIZE(card_routes); card->fully_routed = true; ret = snd_soc_fixup_dai_links_platform_name(card, mach->mach_params.platform); if (ret) return ret; return devm_snd_soc_register_card(dev, card); } static struct platform_driver avs_dmic_driver = { .probe = avs_dmic_probe, .driver = { .name = "avs_dmic", .pm = &snd_soc_pm_ops, }, }; module_platform_driver(avs_dmic_driver); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:avs_dmic");
linux-master
sound/soc/intel/avs/boards/dmic.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/dmi.h> #include <linux/module.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-acpi.h> #include "../../../codecs/rt298.h" #define RT298_CODEC_DAI "rt298-aif1" static const struct dmi_system_id kblr_dmi_table[] = { { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Kabylake R DDR4 RVP"), }, }, {} }; static const struct snd_kcontrol_new card_controls[] = { SOC_DAPM_PIN_SWITCH("Headphone Jack"), SOC_DAPM_PIN_SWITCH("Mic Jack"), SOC_DAPM_PIN_SWITCH("Speaker"), }; static const struct snd_soc_dapm_widget card_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_MIC("Mic Jack", NULL), SND_SOC_DAPM_SPK("Speaker", NULL), }; static const struct snd_soc_dapm_route card_base_routes[] = { /* HP jack connectors - unknown if we have jack detect */ {"Headphone Jack", NULL, "HPO Pin"}, {"MIC1", NULL, "Mic Jack"}, {"Speaker", NULL, "SPOR"}, {"Speaker", NULL, "SPOL"}, }; static struct snd_soc_jack_pin card_headset_pins[] = { { .pin = "Headphone Jack", .mask = SND_JACK_HEADPHONE, }, { .pin = "Mic Jack", .mask = SND_JACK_MICROPHONE, }, }; static int avs_rt298_codec_init(struct snd_soc_pcm_runtime *runtime) { struct snd_soc_card *card = runtime->card; struct snd_soc_jack_pin *pins; struct snd_soc_jack *jack; int num_pins, ret; jack = snd_soc_card_get_drvdata(card); num_pins = ARRAY_SIZE(card_headset_pins); pins = devm_kmemdup(card->dev, card_headset_pins, sizeof(*pins) * num_pins, GFP_KERNEL); if (!pins) return -ENOMEM; ret = snd_soc_card_jack_new_pins(card, "Headset", SND_JACK_HEADSET | SND_JACK_BTN_0, jack, pins, num_pins); if (ret) return ret; return snd_soc_component_set_jack(asoc_rtd_to_codec(runtime, 0)->component, jack, NULL); } static void avs_rt298_codec_exit(struct snd_soc_pcm_runtime *rtd) { snd_soc_component_set_jack(asoc_rtd_to_codec(rtd, 0)->component, NULL, NULL); } static int avs_rt298_be_fixup(struct snd_soc_pcm_runtime *runtime, struct snd_pcm_hw_params *params) { struct snd_interval *rate, *channels; struct snd_mask *fmt; rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); /* The ADSP will convert the FE rate to 48k, stereo */ rate->min = rate->max = 48000; channels->min = channels->max = 2; /* set SSP0 to 24 bit */ snd_mask_none(fmt); snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE); return 0; } static int avs_rt298_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0); unsigned int clk_freq; int ret; if (dmi_first_match(kblr_dmi_table)) clk_freq = 24000000; else clk_freq = 19200000; ret = snd_soc_dai_set_sysclk(codec_dai, RT298_SCLK_S_PLL, clk_freq, SND_SOC_CLOCK_IN); if (ret < 0) dev_err(rtd->dev, "Set codec sysclk failed: %d\n", ret); return ret; } static const struct snd_soc_ops avs_rt298_ops = { .hw_params = avs_rt298_hw_params, }; static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, struct snd_soc_dai_link **dai_link) { struct snd_soc_dai_link_component *platform; struct snd_soc_dai_link *dl; dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); if (!dl || !platform) return -ENOMEM; platform->name = platform_name; dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL); if (!dl->name || !dl->cpus || !dl->codecs) return -ENOMEM; dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-INT343A:00"); dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, RT298_CODEC_DAI); if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name) return -ENOMEM; dl->num_cpus = 1; dl->num_codecs = 1; dl->platforms = platform; dl->num_platforms = 1; dl->id = 0; if (dmi_first_match(kblr_dmi_table)) dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS; else dl->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS; dl->init = avs_rt298_codec_init; dl->exit = avs_rt298_codec_exit; dl->be_hw_params_fixup = avs_rt298_be_fixup; dl->ops = &avs_rt298_ops; dl->nonatomic = 1; dl->no_pcm = 1; dl->dpcm_capture = 1; dl->dpcm_playback = 1; *dai_link = dl; return 0; } static int avs_card_suspend_pre(struct snd_soc_card *card) { struct snd_soc_dai *codec_dai = snd_soc_card_get_codec_dai(card, RT298_CODEC_DAI); return snd_soc_component_set_jack(codec_dai->component, NULL, NULL); } static int avs_card_resume_post(struct snd_soc_card *card) { struct snd_soc_dai *codec_dai = snd_soc_card_get_codec_dai(card, RT298_CODEC_DAI); struct snd_soc_jack *jack = snd_soc_card_get_drvdata(card); return snd_soc_component_set_jack(codec_dai->component, jack, NULL); } static int avs_rt298_probe(struct platform_device *pdev) { struct snd_soc_dai_link *dai_link; struct snd_soc_acpi_mach *mach; struct snd_soc_card *card; struct snd_soc_jack *jack; struct device *dev = &pdev->dev; const char *pname; int ssp_port, ret; mach = dev_get_platdata(dev); pname = mach->mach_params.platform; ssp_port = __ffs(mach->mach_params.i2s_link_mask); ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); if (ret) { dev_err(dev, "Failed to create dai link: %d", ret); return ret; } jack = devm_kzalloc(dev, sizeof(*jack), GFP_KERNEL); card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); if (!jack || !card) return -ENOMEM; card->name = "avs_rt298"; card->dev = dev; card->owner = THIS_MODULE; card->suspend_pre = avs_card_suspend_pre; card->resume_post = avs_card_resume_post; card->dai_link = dai_link; card->num_links = 1; card->controls = card_controls; card->num_controls = ARRAY_SIZE(card_controls); card->dapm_widgets = card_widgets; card->num_dapm_widgets = ARRAY_SIZE(card_widgets); card->dapm_routes = card_base_routes; card->num_dapm_routes = ARRAY_SIZE(card_base_routes); card->fully_routed = true; snd_soc_card_set_drvdata(card, jack); ret = snd_soc_fixup_dai_links_platform_name(card, pname); if (ret) return ret; return devm_snd_soc_register_card(dev, card); } static struct platform_driver avs_rt298_driver = { .probe = avs_rt298_probe, .driver = { .name = "avs_rt298", .pm = &snd_soc_pm_ops, }, }; module_platform_driver(avs_rt298_driver); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:avs_rt298");
linux-master
sound/soc/intel/avs/boards/rt298.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/module.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-acpi.h> #include "../../../codecs/rt286.h" #define RT286_CODEC_DAI "rt286-aif1" static const struct snd_kcontrol_new card_controls[] = { SOC_DAPM_PIN_SWITCH("Headphone Jack"), SOC_DAPM_PIN_SWITCH("Mic Jack"), SOC_DAPM_PIN_SWITCH("Speaker"), }; static const struct snd_soc_dapm_widget card_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_MIC("Mic Jack", NULL), SND_SOC_DAPM_SPK("Speaker", NULL), }; static const struct snd_soc_dapm_route card_base_routes[] = { /* HP jack connectors - unknown if we have jack detect */ {"Headphone Jack", NULL, "HPO Pin"}, {"MIC1", NULL, "Mic Jack"}, {"Speaker", NULL, "SPOR"}, {"Speaker", NULL, "SPOL"}, }; static struct snd_soc_jack_pin card_headset_pins[] = { { .pin = "Headphone Jack", .mask = SND_JACK_HEADPHONE, }, { .pin = "Mic Jack", .mask = SND_JACK_MICROPHONE, }, }; static int avs_rt286_codec_init(struct snd_soc_pcm_runtime *runtime) { struct snd_soc_card *card = runtime->card; struct snd_soc_jack_pin *pins; struct snd_soc_jack *jack; int num_pins, ret; jack = snd_soc_card_get_drvdata(card); num_pins = ARRAY_SIZE(card_headset_pins); pins = devm_kmemdup(card->dev, card_headset_pins, sizeof(*pins) * num_pins, GFP_KERNEL); if (!pins) return -ENOMEM; ret = snd_soc_card_jack_new_pins(card, "Headset", SND_JACK_HEADSET | SND_JACK_BTN_0, jack, pins, num_pins); if (ret) return ret; return snd_soc_component_set_jack(asoc_rtd_to_codec(runtime, 0)->component, jack, NULL); } static void avs_rt286_codec_exit(struct snd_soc_pcm_runtime *rtd) { snd_soc_component_set_jack(asoc_rtd_to_codec(rtd, 0)->component, NULL, NULL); } static int avs_rt286_be_fixup(struct snd_soc_pcm_runtime *runtime, struct snd_pcm_hw_params *params) { struct snd_interval *rate, *channels; struct snd_mask *fmt; rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); /* The ADSP will convert the FE rate to 48k, stereo */ rate->min = rate->max = 48000; channels->min = channels->max = 2; /* set SSP0 to 24 bit */ snd_mask_none(fmt); snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE); return 0; } static int avs_rt286_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *runtime = asoc_substream_to_rtd(substream); struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(runtime, 0); int ret; ret = snd_soc_dai_set_sysclk(codec_dai, RT286_SCLK_S_PLL, 24000000, SND_SOC_CLOCK_IN); if (ret < 0) dev_err(runtime->dev, "Set codec sysclk failed: %d\n", ret); return ret; } static const struct snd_soc_ops avs_rt286_ops = { .hw_params = avs_rt286_hw_params, }; static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, struct snd_soc_dai_link **dai_link) { struct snd_soc_dai_link_component *platform; struct snd_soc_dai_link *dl; dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); if (!dl || !platform) return -ENOMEM; platform->name = platform_name; dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL); if (!dl->name || !dl->cpus || !dl->codecs) return -ENOMEM; dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-INT343A:00"); dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, RT286_CODEC_DAI); if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name) return -ENOMEM; dl->num_cpus = 1; dl->num_codecs = 1; dl->platforms = platform; dl->num_platforms = 1; dl->id = 0; dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS; dl->init = avs_rt286_codec_init; dl->exit = avs_rt286_codec_exit; dl->be_hw_params_fixup = avs_rt286_be_fixup; dl->ops = &avs_rt286_ops; dl->nonatomic = 1; dl->no_pcm = 1; dl->dpcm_capture = 1; dl->dpcm_playback = 1; *dai_link = dl; return 0; } static int avs_card_suspend_pre(struct snd_soc_card *card) { struct snd_soc_dai *codec_dai = snd_soc_card_get_codec_dai(card, RT286_CODEC_DAI); return snd_soc_component_set_jack(codec_dai->component, NULL, NULL); } static int avs_card_resume_post(struct snd_soc_card *card) { struct snd_soc_dai *codec_dai = snd_soc_card_get_codec_dai(card, RT286_CODEC_DAI); struct snd_soc_jack *jack = snd_soc_card_get_drvdata(card); return snd_soc_component_set_jack(codec_dai->component, jack, NULL); } static int avs_rt286_probe(struct platform_device *pdev) { struct snd_soc_dai_link *dai_link; struct snd_soc_acpi_mach *mach; struct snd_soc_card *card; struct snd_soc_jack *jack; struct device *dev = &pdev->dev; const char *pname; int ssp_port, ret; mach = dev_get_platdata(dev); pname = mach->mach_params.platform; ssp_port = __ffs(mach->mach_params.i2s_link_mask); ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); if (ret) { dev_err(dev, "Failed to create dai link: %d", ret); return ret; } jack = devm_kzalloc(dev, sizeof(*jack), GFP_KERNEL); card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); if (!jack || !card) return -ENOMEM; card->name = "avs_rt286"; card->dev = dev; card->owner = THIS_MODULE; card->suspend_pre = avs_card_suspend_pre; card->resume_post = avs_card_resume_post; card->dai_link = dai_link; card->num_links = 1; card->controls = card_controls; card->num_controls = ARRAY_SIZE(card_controls); card->dapm_widgets = card_widgets; card->num_dapm_widgets = ARRAY_SIZE(card_widgets); card->dapm_routes = card_base_routes; card->num_dapm_routes = ARRAY_SIZE(card_base_routes); card->fully_routed = true; snd_soc_card_set_drvdata(card, jack); ret = snd_soc_fixup_dai_links_platform_name(card, pname); if (ret) return ret; return devm_snd_soc_register_card(dev, card); } static struct platform_driver avs_rt286_driver = { .probe = avs_rt286_probe, .driver = { .name = "avs_rt286", .pm = &snd_soc_pm_ops, }, }; module_platform_driver(avs_rt286_driver); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:avs_rt286");
linux-master
sound/soc/intel/avs/boards/rt286.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2022-2023 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/clk.h> #include <linux/input.h> #include <linux/module.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-acpi.h> #include "../../../codecs/rt5663.h" #define RT5663_CODEC_DAI "rt5663-aif" struct rt5663_private { struct snd_soc_jack jack; }; static const struct snd_kcontrol_new card_controls[] = { SOC_DAPM_PIN_SWITCH("Headphone Jack"), SOC_DAPM_PIN_SWITCH("Headset Mic"), }; static const struct snd_soc_dapm_widget card_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), }; static const struct snd_soc_dapm_route card_routes[] = { /* HP jack connectors */ { "Headphone Jack", NULL, "HPOL" }, { "Headphone Jack", NULL, "HPOR" }, /* Mic jacks */ { "IN1P", NULL, "Headset Mic" }, { "IN1N", NULL, "Headset Mic" }, }; static struct snd_soc_jack_pin card_headset_pins[] = { { .pin = "Headphone Jack", .mask = SND_JACK_HEADPHONE, }, { .pin = "Headset Mic", .mask = SND_JACK_MICROPHONE, }, }; static int avs_rt5663_codec_init(struct snd_soc_pcm_runtime *runtime) { struct snd_soc_card *card = runtime->card; struct rt5663_private *priv = snd_soc_card_get_drvdata(card); struct snd_soc_jack_pin *pins; struct snd_soc_jack *jack; int num_pins, ret; jack = &priv->jack; num_pins = ARRAY_SIZE(card_headset_pins); pins = devm_kmemdup(card->dev, card_headset_pins, sizeof(*pins) * num_pins, GFP_KERNEL); if (!pins) return -ENOMEM; ret = snd_soc_card_jack_new_pins(card, "Headset Jack", SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | SND_JACK_BTN_3, jack, pins, num_pins); if (ret) return ret; snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE); snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND); snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP); snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN); snd_soc_component_set_jack(asoc_rtd_to_codec(runtime, 0)->component, jack, NULL); return 0; } static void avs_rt5663_codec_exit(struct snd_soc_pcm_runtime *runtime) { snd_soc_component_set_jack(asoc_rtd_to_codec(runtime, 0)->component, NULL, NULL); } static int avs_rt5663_be_fixup(struct snd_soc_pcm_runtime *runtime, struct snd_pcm_hw_params *params) { struct snd_interval *rate, *channels; struct snd_mask *fmt; rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); /* The ADSP will convert the FE rate to 48k, stereo */ rate->min = rate->max = 48000; channels->min = channels->max = 2; /* set SSPN to 24 bit */ snd_mask_none(fmt); snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE); return 0; } static int avs_rt5663_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0); int ret; /* use ASRC for internal clocks, as PLL rate isn't multiple of BCLK */ rt5663_sel_asrc_clk_src(codec_dai->component, RT5663_DA_STEREO_FILTER | RT5663_AD_STEREO_FILTER, RT5663_CLK_SEL_I2S1_ASRC); ret = snd_soc_dai_set_sysclk(codec_dai, RT5663_SCLK_S_MCLK, 24576000, SND_SOC_CLOCK_IN); return ret; } static const struct snd_soc_ops avs_rt5663_ops = { .hw_params = avs_rt5663_hw_params, }; static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, struct snd_soc_dai_link **dai_link) { struct snd_soc_dai_link_component *platform; struct snd_soc_dai_link *dl; dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); if (!dl || !platform) return -ENOMEM; platform->name = platform_name; dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL); if (!dl->name || !dl->cpus || !dl->codecs) return -ENOMEM; dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-10EC5663:00"); dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, RT5663_CODEC_DAI); if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name) return -ENOMEM; dl->num_cpus = 1; dl->num_codecs = 1; dl->platforms = platform; dl->num_platforms = 1; dl->id = 0; dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBC_CFC; dl->init = avs_rt5663_codec_init; dl->exit = avs_rt5663_codec_exit; dl->be_hw_params_fixup = avs_rt5663_be_fixup; dl->nonatomic = 1; dl->no_pcm = 1; dl->dpcm_capture = 1; dl->dpcm_playback = 1; dl->ops = &avs_rt5663_ops; *dai_link = dl; return 0; } static int avs_card_suspend_pre(struct snd_soc_card *card) { struct snd_soc_dai *codec_dai = snd_soc_card_get_codec_dai(card, RT5663_CODEC_DAI); return snd_soc_component_set_jack(codec_dai->component, NULL, NULL); } static int avs_card_resume_post(struct snd_soc_card *card) { struct snd_soc_dai *codec_dai = snd_soc_card_get_codec_dai(card, RT5663_CODEC_DAI); struct snd_soc_jack *jack = snd_soc_card_get_drvdata(card); return snd_soc_component_set_jack(codec_dai->component, jack, NULL); } static int avs_rt5663_probe(struct platform_device *pdev) { struct snd_soc_dai_link *dai_link; struct snd_soc_acpi_mach *mach; struct snd_soc_card *card; struct rt5663_private *priv; struct device *dev = &pdev->dev; const char *pname; int ssp_port, ret; mach = dev_get_platdata(dev); pname = mach->mach_params.platform; ssp_port = __ffs(mach->mach_params.i2s_link_mask); ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); if (ret) { dev_err(dev, "Failed to create dai link: %d", ret); return ret; } priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); if (!priv || !card) return -ENOMEM; card->name = "avs_rt5663"; card->dev = dev; card->owner = THIS_MODULE; card->suspend_pre = avs_card_suspend_pre; card->resume_post = avs_card_resume_post; card->dai_link = dai_link; card->num_links = 1; card->controls = card_controls; card->num_controls = ARRAY_SIZE(card_controls); card->dapm_widgets = card_widgets; card->num_dapm_widgets = ARRAY_SIZE(card_widgets); card->dapm_routes = card_routes; card->num_dapm_routes = ARRAY_SIZE(card_routes); card->fully_routed = true; snd_soc_card_set_drvdata(card, priv); ret = snd_soc_fixup_dai_links_platform_name(card, pname); if (ret) return ret; return devm_snd_soc_register_card(dev, card); } static struct platform_driver avs_rt5663_driver = { .probe = avs_rt5663_probe, .driver = { .name = "avs_rt5663", .pm = &snd_soc_pm_ops, }, }; module_platform_driver(avs_rt5663_driver); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:avs_rt5663");
linux-master
sound/soc/intel/avs/boards/rt5663.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/module.h> #include <linux/platform_device.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-acpi.h> #include <sound/soc-dapm.h> #define MAX98927_DEV0_NAME "i2c-MX98927:00" #define MAX98927_DEV1_NAME "i2c-MX98927:01" #define MAX98927_CODEC_NAME "max98927-aif1" static struct snd_soc_codec_conf card_codec_conf[] = { { .dlc = COMP_CODEC_CONF(MAX98927_DEV0_NAME), .name_prefix = "Right", }, { .dlc = COMP_CODEC_CONF(MAX98927_DEV1_NAME), .name_prefix = "Left", }, }; static const struct snd_kcontrol_new card_controls[] = { SOC_DAPM_PIN_SWITCH("Left Spk"), SOC_DAPM_PIN_SWITCH("Right Spk"), }; static const struct snd_soc_dapm_widget card_widgets[] = { SND_SOC_DAPM_SPK("Left Spk", NULL), SND_SOC_DAPM_SPK("Right Spk", NULL), }; static const struct snd_soc_dapm_route card_base_routes[] = { { "Left Spk", NULL, "Left BE_OUT" }, { "Right Spk", NULL, "Right BE_OUT" }, }; static int avs_max98927_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params) { struct snd_interval *rate, *channels; struct snd_mask *fmt; rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); /* The ADSP will convert the FE rate to 48k, stereo */ rate->min = rate->max = 48000; channels->min = channels->max = 2; /* set SSP0 to 16 bit */ snd_mask_none(fmt); snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE); return 0; } static int avs_max98927_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *runtime = asoc_substream_to_rtd(substream); struct snd_soc_dai *codec_dai; int ret = 0; int i; for_each_rtd_codec_dais(runtime, i, codec_dai) { if (!strcmp(codec_dai->component->name, MAX98927_DEV0_NAME)) ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x30, 3, 8, 16); else if (!strcmp(codec_dai->component->name, MAX98927_DEV1_NAME)) ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xC0, 3, 8, 16); if (ret < 0) { dev_err(runtime->dev, "hw_params for %s failed: %d\n", codec_dai->component->name, ret); return ret; } } return 0; } static const struct snd_soc_ops avs_max98927_ops = { .hw_params = avs_max98927_hw_params, }; static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, struct snd_soc_dai_link **dai_link) { struct snd_soc_dai_link_component *platform; struct snd_soc_dai_link *dl; dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); if (!dl || !platform) return -ENOMEM; platform->name = platform_name; dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs) * 2, GFP_KERNEL); if (!dl->name || !dl->cpus || !dl->codecs) return -ENOMEM; dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); dl->codecs[0].name = devm_kasprintf(dev, GFP_KERNEL, MAX98927_DEV0_NAME); dl->codecs[0].dai_name = devm_kasprintf(dev, GFP_KERNEL, MAX98927_CODEC_NAME); dl->codecs[1].name = devm_kasprintf(dev, GFP_KERNEL, MAX98927_DEV1_NAME); dl->codecs[1].dai_name = devm_kasprintf(dev, GFP_KERNEL, MAX98927_CODEC_NAME); if (!dl->cpus->dai_name || !dl->codecs[0].name || !dl->codecs[0].dai_name || !dl->codecs[1].name || !dl->codecs[1].dai_name) return -ENOMEM; dl->num_cpus = 1; dl->num_codecs = 2; dl->platforms = platform; dl->num_platforms = 1; dl->id = 0; dl->dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS; dl->be_hw_params_fixup = avs_max98927_be_fixup; dl->nonatomic = 1; dl->no_pcm = 1; dl->dpcm_capture = 1; dl->dpcm_playback = 1; dl->ignore_pmdown_time = 1; dl->ops = &avs_max98927_ops; *dai_link = dl; return 0; } static int avs_max98927_probe(struct platform_device *pdev) { struct snd_soc_dai_link *dai_link; struct snd_soc_acpi_mach *mach; struct snd_soc_card *card; struct device *dev = &pdev->dev; const char *pname; int ssp_port, ret; mach = dev_get_platdata(dev); pname = mach->mach_params.platform; ssp_port = __ffs(mach->mach_params.i2s_link_mask); ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); if (ret) { dev_err(dev, "Failed to create dai link: %d", ret); return ret; } card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; card->name = "avs_max98927"; card->dev = dev; card->owner = THIS_MODULE; card->dai_link = dai_link; card->num_links = 1; card->codec_conf = card_codec_conf; card->num_configs = ARRAY_SIZE(card_codec_conf); card->controls = card_controls; card->num_controls = ARRAY_SIZE(card_controls); card->dapm_widgets = card_widgets; card->num_dapm_widgets = ARRAY_SIZE(card_widgets); card->dapm_routes = card_base_routes; card->num_dapm_routes = ARRAY_SIZE(card_base_routes); card->fully_routed = true; ret = snd_soc_fixup_dai_links_platform_name(card, pname); if (ret) return ret; return devm_snd_soc_register_card(dev, card); } static struct platform_driver avs_max98927_driver = { .probe = avs_max98927_probe, .driver = { .name = "avs_max98927", .pm = &snd_soc_pm_ops, }, }; module_platform_driver(avs_max98927_driver) MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:avs_max98927");
linux-master
sound/soc/intel/avs/boards/max98927.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/device.h> #include <linux/module.h> #include <sound/soc.h> #include <sound/soc-acpi.h> SND_SOC_DAILINK_DEF(dummy, DAILINK_COMP_ARRAY(COMP_DUMMY())); SND_SOC_DAILINK_DEF(probe_cp, DAILINK_COMP_ARRAY(COMP_CPU("Probe Extraction CPU DAI"))); SND_SOC_DAILINK_DEF(platform, DAILINK_COMP_ARRAY(COMP_PLATFORM("probe-platform"))); static struct snd_soc_dai_link probe_mb_dai_links[] = { { .name = "Compress Probe Capture", .nonatomic = 1, SND_SOC_DAILINK_REG(probe_cp, dummy, platform), }, }; static int avs_probe_mb_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct snd_soc_acpi_mach *mach; struct snd_soc_card *card; int ret; mach = dev_get_platdata(dev); card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; card->name = "avs_probe_mb"; card->dev = dev; card->owner = THIS_MODULE; card->dai_link = probe_mb_dai_links; card->num_links = ARRAY_SIZE(probe_mb_dai_links); card->fully_routed = true; ret = snd_soc_fixup_dai_links_platform_name(card, mach->mach_params.platform); if (ret) return ret; return devm_snd_soc_register_card(dev, card); } static struct platform_driver avs_probe_mb_driver = { .probe = avs_probe_mb_probe, .driver = { .name = "avs_probe_mb", .pm = &snd_soc_pm_ops, }, }; module_platform_driver(avs_probe_mb_driver); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:avs_probe_mb");
linux-master
sound/soc/intel/avs/boards/probe.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2023 Intel Corporation. All rights reserved. // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/device.h> #include <linux/gpio/consumer.h> #include <linux/input.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/processor.h> #include <linux/slab.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-acpi.h> #include <asm/intel-family.h> #define ES8336_CODEC_DAI "ES8316 HiFi" struct avs_card_drvdata { struct snd_soc_jack jack; struct gpio_desc *gpiod; }; static const struct acpi_gpio_params enable_gpio = { 0, 0, true }; static const struct acpi_gpio_mapping speaker_gpios[] = { { "speaker-enable-gpios", &enable_gpio, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO }, { } }; static int avs_es8336_speaker_power_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_card *card = w->dapm->card; struct avs_card_drvdata *data; bool speaker_en; data = snd_soc_card_get_drvdata(card); /* As enable_gpio has active_low=true, logic is inverted. */ speaker_en = !SND_SOC_DAPM_EVENT_ON(event); gpiod_set_value_cansleep(data->gpiod, speaker_en); return 0; } static const struct snd_soc_dapm_widget card_widgets[] = { SND_SOC_DAPM_SPK("Speaker", NULL), SND_SOC_DAPM_HP("Headphone", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), SND_SOC_DAPM_MIC("Internal Mic", NULL), SND_SOC_DAPM_SUPPLY("Speaker Power", SND_SOC_NOPM, 0, 0, avs_es8336_speaker_power_event, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), }; static const struct snd_soc_dapm_route card_routes[] = { {"Headphone", NULL, "HPOL"}, {"Headphone", NULL, "HPOR"}, /* * There is no separate speaker output instead the speakers are muxed to * the HP outputs. The mux is controlled by the "Speaker Power" widget. */ {"Speaker", NULL, "HPOL"}, {"Speaker", NULL, "HPOR"}, {"Speaker", NULL, "Speaker Power"}, /* Mic route map */ {"MIC1", NULL, "Internal Mic"}, {"MIC2", NULL, "Headset Mic"}, }; static const struct snd_kcontrol_new card_controls[] = { SOC_DAPM_PIN_SWITCH("Speaker"), SOC_DAPM_PIN_SWITCH("Headphone"), SOC_DAPM_PIN_SWITCH("Headset Mic"), SOC_DAPM_PIN_SWITCH("Internal Mic"), }; static struct snd_soc_jack_pin card_headset_pins[] = { { .pin = "Headphone", .mask = SND_JACK_HEADPHONE, }, { .pin = "Headset Mic", .mask = SND_JACK_MICROPHONE, }, }; static int avs_es8336_codec_init(struct snd_soc_pcm_runtime *runtime) { struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(runtime, 0); struct snd_soc_component *component = codec_dai->component; struct snd_soc_card *card = runtime->card; struct snd_soc_jack_pin *pins; struct avs_card_drvdata *data; struct gpio_desc *gpiod; int num_pins, ret; data = snd_soc_card_get_drvdata(card); num_pins = ARRAY_SIZE(card_headset_pins); pins = devm_kmemdup(card->dev, card_headset_pins, sizeof(*pins) * num_pins, GFP_KERNEL); if (!pins) return -ENOMEM; ret = snd_soc_card_jack_new_pins(card, "Headset", SND_JACK_HEADSET | SND_JACK_BTN_0, &data->jack, pins, num_pins); if (ret) return ret; ret = devm_acpi_dev_add_driver_gpios(codec_dai->dev, speaker_gpios); if (ret) dev_warn(codec_dai->dev, "Unable to add GPIO mapping table\n"); gpiod = gpiod_get_optional(codec_dai->dev, "speaker-enable", GPIOD_OUT_LOW); if (IS_ERR(gpiod)) return dev_err_probe(codec_dai->dev, PTR_ERR(gpiod), "Get gpiod failed: %ld\n", PTR_ERR(gpiod)); data->gpiod = gpiod; snd_jack_set_key(data->jack.jack, SND_JACK_BTN_0, KEY_PLAYPAUSE); snd_soc_component_set_jack(component, &data->jack, NULL); card->dapm.idle_bias_off = true; return 0; } static void avs_es8336_codec_exit(struct snd_soc_pcm_runtime *runtime) { struct avs_card_drvdata *data = snd_soc_card_get_drvdata(runtime->card); struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(runtime, 0); snd_soc_component_set_jack(codec_dai->component, NULL, NULL); gpiod_put(data->gpiod); } static int avs_es8336_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *runtime = asoc_substream_to_rtd(substream); struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(runtime, 0); int clk_freq; int ret; switch (boot_cpu_data.x86_model) { case INTEL_FAM6_KABYLAKE_L: case INTEL_FAM6_KABYLAKE: clk_freq = 24000000; break; default: clk_freq = 19200000; break; } ret = snd_soc_dai_set_sysclk(codec_dai, 1, clk_freq, SND_SOC_CLOCK_OUT); if (ret < 0) dev_err(runtime->dev, "Set codec sysclk failed: %d\n", ret); return ret; } static const struct snd_soc_ops avs_es8336_ops = { .hw_params = avs_es8336_hw_params, }; static int avs_es8336_be_fixup(struct snd_soc_pcm_runtime *runtime, struct snd_pcm_hw_params *params) { struct snd_interval *rate, *channels; struct snd_mask *fmt; rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); /* The ADSP will convert the FE rate to 48k, stereo */ rate->min = rate->max = 48000; channels->min = channels->max = 2; /* set SSPN to 24 bit */ snd_mask_none(fmt); snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_3LE); return 0; } static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, struct snd_soc_dai_link **dai_link) { struct snd_soc_dai_link_component *platform; struct snd_soc_dai_link *dl; dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); if (!dl || !platform) return -ENOMEM; platform->name = platform_name; dl->name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d-Codec", ssp_port); dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL); if (!dl->name || !dl->cpus || !dl->codecs) return -ENOMEM; dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, "SSP%d Pin", ssp_port); dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-ESSX8336:00"); dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, ES8336_CODEC_DAI); if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name) return -ENOMEM; dl->num_cpus = 1; dl->num_codecs = 1; dl->platforms = platform; dl->num_platforms = 1; dl->id = 0; dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBC_CFC; dl->init = avs_es8336_codec_init; dl->exit = avs_es8336_codec_exit; dl->be_hw_params_fixup = avs_es8336_be_fixup; dl->ops = &avs_es8336_ops; dl->nonatomic = 1; dl->no_pcm = 1; dl->dpcm_capture = 1; dl->dpcm_playback = 1; *dai_link = dl; return 0; } static int avs_card_suspend_pre(struct snd_soc_card *card) { struct snd_soc_dai *codec_dai = snd_soc_card_get_codec_dai(card, ES8336_CODEC_DAI); return snd_soc_component_set_jack(codec_dai->component, NULL, NULL); } static int avs_card_resume_post(struct snd_soc_card *card) { struct snd_soc_dai *codec_dai = snd_soc_card_get_codec_dai(card, ES8336_CODEC_DAI); struct avs_card_drvdata *data = snd_soc_card_get_drvdata(card); return snd_soc_component_set_jack(codec_dai->component, &data->jack, NULL); } static int avs_es8336_probe(struct platform_device *pdev) { struct snd_soc_dai_link *dai_link; struct snd_soc_acpi_mach *mach; struct avs_card_drvdata *data; struct snd_soc_card *card; struct device *dev = &pdev->dev; const char *pname; int ssp_port, ret; mach = dev_get_platdata(dev); pname = mach->mach_params.platform; ssp_port = __ffs(mach->mach_params.i2s_link_mask); ret = avs_create_dai_link(dev, pname, ssp_port, &dai_link); if (ret) { dev_err(dev, "Failed to create dai link: %d", ret); return ret; } data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); if (!data || !card) return -ENOMEM; card->name = "avs_es8336"; card->dev = dev; card->owner = THIS_MODULE; card->suspend_pre = avs_card_suspend_pre; card->resume_post = avs_card_resume_post; card->dai_link = dai_link; card->num_links = 1; card->controls = card_controls; card->num_controls = ARRAY_SIZE(card_controls); card->dapm_widgets = card_widgets; card->num_dapm_widgets = ARRAY_SIZE(card_widgets); card->dapm_routes = card_routes; card->num_dapm_routes = ARRAY_SIZE(card_routes); card->fully_routed = true; snd_soc_card_set_drvdata(card, data); ret = snd_soc_fixup_dai_links_platform_name(card, pname); if (ret) return ret; return devm_snd_soc_register_card(dev, card); } static struct platform_driver avs_es8336_driver = { .probe = avs_es8336_probe, .driver = { .name = "avs_es8336", .pm = &snd_soc_pm_ops, }, }; module_platform_driver(avs_es8336_driver); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:avs_es8336");
linux-master
sound/soc/intel/avs/boards/es8336.c
// SPDX-License-Identifier: GPL-2.0-only /* * sst-atom-controls.c - Intel MID Platform driver DPCM ALSA controls for Mrfld * * Copyright (C) 2013-14 Intel Corp * Author: Omair Mohammed Abdullah <[email protected]> * Vinod Koul <[email protected]> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * In the dpcm driver modelling when a particular FE/BE/Mixer/Pipe is active * we forward the settings and parameters, rest we keep the values in * driver and forward when DAPM enables them * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> #include <sound/soc.h> #include <sound/tlv.h> #include "sst-mfld-platform.h" #include "sst-atom-controls.h" static int sst_fill_byte_control(struct sst_data *drv, u8 ipc_msg, u8 block, u8 task_id, u8 pipe_id, u16 len, void *cmd_data) { struct snd_sst_bytes_v2 *byte_data = drv->byte_stream; byte_data->type = SST_CMD_BYTES_SET; byte_data->ipc_msg = ipc_msg; byte_data->block = block; byte_data->task_id = task_id; byte_data->pipe_id = pipe_id; if (len > SST_MAX_BIN_BYTES - sizeof(*byte_data)) { dev_err(&drv->pdev->dev, "command length too big (%u)", len); return -EINVAL; } byte_data->len = len; memcpy(byte_data->bytes, cmd_data, len); print_hex_dump_bytes("writing to lpe: ", DUMP_PREFIX_OFFSET, byte_data, len + sizeof(*byte_data)); return 0; } static int sst_fill_and_send_cmd_unlocked(struct sst_data *drv, u8 ipc_msg, u8 block, u8 task_id, u8 pipe_id, void *cmd_data, u16 len) { int ret = 0; WARN_ON(!mutex_is_locked(&drv->lock)); ret = sst_fill_byte_control(drv, ipc_msg, block, task_id, pipe_id, len, cmd_data); if (ret < 0) return ret; return sst->ops->send_byte_stream(sst->dev, drv->byte_stream); } /** * sst_fill_and_send_cmd - generate the IPC message and send it to the FW * @drv: sst_data * @ipc_msg: type of IPC (CMD, SET_PARAMS, GET_PARAMS) * @block: block index * @task_id: task index * @pipe_id: pipe index * @cmd_data: the IPC payload * @len: length of data to be sent */ static int sst_fill_and_send_cmd(struct sst_data *drv, u8 ipc_msg, u8 block, u8 task_id, u8 pipe_id, void *cmd_data, u16 len) { int ret; mutex_lock(&drv->lock); ret = sst_fill_and_send_cmd_unlocked(drv, ipc_msg, block, task_id, pipe_id, cmd_data, len); mutex_unlock(&drv->lock); return ret; } /* * tx map value is a bitfield where each bit represents a FW channel * * 3 2 1 0 # 0 = codec0, 1 = codec1 * RLRLRLRL # 3, 4 = reserved * * e.g. slot 0 rx map = 00001100b -> data from slot 0 goes into codec_in1 L,R */ static u8 sst_ssp_tx_map[SST_MAX_TDM_SLOTS] = { 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80, /* default rx map */ }; /* * rx map value is a bitfield where each bit represents a slot * * 76543210 # 0 = slot 0, 1 = slot 1 * * e.g. codec1_0 tx map = 00000101b -> data from codec_out1_0 goes into slot 0, 2 */ static u8 sst_ssp_rx_map[SST_MAX_TDM_SLOTS] = { 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80, /* default tx map */ }; /* * NOTE: this is invoked with lock held */ static int sst_send_slot_map(struct sst_data *drv) { struct sst_param_sba_ssp_slot_map cmd; SST_FILL_DEFAULT_DESTINATION(cmd.header.dst); cmd.header.command_id = SBA_SET_SSP_SLOT_MAP; cmd.header.length = sizeof(struct sst_param_sba_ssp_slot_map) - sizeof(struct sst_dsp_header); cmd.param_id = SBA_SET_SSP_SLOT_MAP; cmd.param_len = sizeof(cmd.rx_slot_map) + sizeof(cmd.tx_slot_map) + sizeof(cmd.ssp_index); cmd.ssp_index = SSP_CODEC; memcpy(cmd.rx_slot_map, &sst_ssp_tx_map[0], sizeof(cmd.rx_slot_map)); memcpy(cmd.tx_slot_map, &sst_ssp_rx_map[0], sizeof(cmd.tx_slot_map)); return sst_fill_and_send_cmd_unlocked(drv, SST_IPC_IA_SET_PARAMS, SST_FLAG_BLOCKED, SST_TASK_SBA, 0, &cmd, sizeof(cmd.header) + cmd.header.length); } static int sst_slot_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct sst_enum *e = (struct sst_enum *)kcontrol->private_value; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = e->max; if (uinfo->value.enumerated.item > e->max - 1) uinfo->value.enumerated.item = e->max - 1; strcpy(uinfo->value.enumerated.name, e->texts[uinfo->value.enumerated.item]); return 0; } /** * sst_slot_get - get the status of the interleaver/deinterleaver control * @kcontrol: control pointer * @ucontrol: User data * Searches the map where the control status is stored, and gets the * channel/slot which is currently set for this enumerated control. Since it is * an enumerated control, there is only one possible value. */ static int sst_slot_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct sst_enum *e = (void *)kcontrol->private_value; struct snd_soc_component *c = snd_kcontrol_chip(kcontrol); struct sst_data *drv = snd_soc_component_get_drvdata(c); unsigned int ctl_no = e->reg; unsigned int is_tx = e->tx; unsigned int val, mux; u8 *map = is_tx ? sst_ssp_rx_map : sst_ssp_tx_map; mutex_lock(&drv->lock); val = 1 << ctl_no; /* search which slot/channel has this bit set - there should be only one */ for (mux = e->max; mux > 0; mux--) if (map[mux - 1] & val) break; ucontrol->value.enumerated.item[0] = mux; mutex_unlock(&drv->lock); dev_dbg(c->dev, "%s - %s map = %#x\n", is_tx ? "tx channel" : "rx slot", e->texts[mux], mux ? map[mux - 1] : -1); return 0; } /* sst_check_and_send_slot_map - helper for checking power state and sending * slot map cmd * * called with lock held */ static int sst_check_and_send_slot_map(struct sst_data *drv, struct snd_kcontrol *kcontrol) { struct sst_enum *e = (void *)kcontrol->private_value; int ret = 0; if (e->w && e->w->power) ret = sst_send_slot_map(drv); else if (!e->w) dev_err(&drv->pdev->dev, "Slot control: %s doesn't have DAPM widget!!!\n", kcontrol->id.name); return ret; } /** * sst_slot_put - set the status of interleaver/deinterleaver control * @kcontrol: control pointer * @ucontrol: User data * (de)interleaver controls are defined in opposite sense to be user-friendly * * Instead of the enum value being the value written to the register, it is the * register address; and the kcontrol number (register num) is the value written * to the register. This is so that there can be only one value for each * slot/channel since there is only one control for each slot/channel. * * This means that whenever an enum is set, we need to clear the bit * for that kcontrol_no for all the interleaver OR deinterleaver registers */ static int sst_slot_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol); struct sst_data *drv = snd_soc_component_get_drvdata(c); struct sst_enum *e = (void *)kcontrol->private_value; int i, ret = 0; unsigned int ctl_no = e->reg; unsigned int is_tx = e->tx; unsigned int slot_channel_no; unsigned int val, mux; u8 *map; map = is_tx ? sst_ssp_rx_map : sst_ssp_tx_map; val = 1 << ctl_no; mux = ucontrol->value.enumerated.item[0]; if (mux > e->max - 1) return -EINVAL; mutex_lock(&drv->lock); /* first clear all registers of this bit */ for (i = 0; i < e->max; i++) map[i] &= ~val; if (mux == 0) { /* kctl set to 'none' and we reset the bits so send IPC */ ret = sst_check_and_send_slot_map(drv, kcontrol); mutex_unlock(&drv->lock); return ret; } /* offset by one to take "None" into account */ slot_channel_no = mux - 1; map[slot_channel_no] |= val; dev_dbg(c->dev, "%s %s map = %#x\n", is_tx ? "tx channel" : "rx slot", e->texts[mux], map[slot_channel_no]); ret = sst_check_and_send_slot_map(drv, kcontrol); mutex_unlock(&drv->lock); return ret; } static int sst_send_algo_cmd(struct sst_data *drv, struct sst_algo_control *bc) { int len, ret = 0; struct sst_cmd_set_params *cmd; /*bc->max includes sizeof algos + length field*/ len = sizeof(cmd->dst) + sizeof(cmd->command_id) + bc->max; cmd = kzalloc(len, GFP_KERNEL); if (cmd == NULL) return -ENOMEM; SST_FILL_DESTINATION(2, cmd->dst, bc->pipe_id, bc->module_id); cmd->command_id = bc->cmd_id; memcpy(cmd->params, bc->params, bc->max); ret = sst_fill_and_send_cmd_unlocked(drv, SST_IPC_IA_SET_PARAMS, SST_FLAG_BLOCKED, bc->task_id, 0, cmd, len); kfree(cmd); return ret; } /** * sst_find_and_send_pipe_algo - send all the algo parameters for a pipe * @drv: sst_data * @pipe: string identifier * @ids: list of algorithms * The algos which are in each pipeline are sent to the firmware one by one * * Called with lock held */ static int sst_find_and_send_pipe_algo(struct sst_data *drv, const char *pipe, struct sst_ids *ids) { int ret = 0; struct sst_algo_control *bc; struct sst_module *algo; dev_dbg(&drv->pdev->dev, "Enter: widget=%s\n", pipe); list_for_each_entry(algo, &ids->algo_list, node) { bc = (void *)algo->kctl->private_value; dev_dbg(&drv->pdev->dev, "Found algo control name=%s pipe=%s\n", algo->kctl->id.name, pipe); ret = sst_send_algo_cmd(drv, bc); if (ret) return ret; } return ret; } static int sst_algo_bytes_ctl_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct sst_algo_control *bc = (void *)kcontrol->private_value; uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES; uinfo->count = bc->max; return 0; } static int sst_algo_control_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct sst_algo_control *bc = (void *)kcontrol->private_value; struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); switch (bc->type) { case SST_ALGO_PARAMS: memcpy(ucontrol->value.bytes.data, bc->params, bc->max); break; default: dev_err(component->dev, "Invalid Input- algo type:%d\n", bc->type); return -EINVAL; } return 0; } static int sst_algo_control_set(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int ret = 0; struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); struct sst_data *drv = snd_soc_component_get_drvdata(cmpnt); struct sst_algo_control *bc = (void *)kcontrol->private_value; dev_dbg(cmpnt->dev, "control_name=%s\n", kcontrol->id.name); mutex_lock(&drv->lock); switch (bc->type) { case SST_ALGO_PARAMS: memcpy(bc->params, ucontrol->value.bytes.data, bc->max); break; default: mutex_unlock(&drv->lock); dev_err(cmpnt->dev, "Invalid Input- algo type:%d\n", bc->type); return -EINVAL; } /*if pipe is enabled, need to send the algo params from here*/ if (bc->w && bc->w->power) ret = sst_send_algo_cmd(drv, bc); mutex_unlock(&drv->lock); return ret; } static int sst_gain_ctl_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct sst_gain_mixer_control *mc = (void *)kcontrol->private_value; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = mc->stereo ? 2 : 1; uinfo->value.integer.min = mc->min; uinfo->value.integer.max = mc->max; return 0; } /** * sst_send_gain_cmd - send the gain algorithm IPC to the FW * @drv: sst_data * @gv:the stored value of gain (also contains rampduration) * @task_id: task index * @loc_id: location/position index * @module_id: module index * @mute: flag that indicates whether this was called from the * digital_mute callback or directly. If called from the * digital_mute callback, module will be muted/unmuted based on this * flag. The flag is always 0 if called directly. * * Called with sst_data.lock held * * The user-set gain value is sent only if the user-controllable 'mute' control * is OFF (indicated by gv->mute). Otherwise, the mute value (MIN value) is * sent. */ static int sst_send_gain_cmd(struct sst_data *drv, struct sst_gain_value *gv, u16 task_id, u16 loc_id, u16 module_id, int mute) { struct sst_cmd_set_gain_dual cmd; dev_dbg(&drv->pdev->dev, "Enter\n"); cmd.header.command_id = MMX_SET_GAIN; SST_FILL_DEFAULT_DESTINATION(cmd.header.dst); cmd.gain_cell_num = 1; if (mute || gv->mute) { cmd.cell_gains[0].cell_gain_left = SST_GAIN_MIN_VALUE; cmd.cell_gains[0].cell_gain_right = SST_GAIN_MIN_VALUE; } else { cmd.cell_gains[0].cell_gain_left = gv->l_gain; cmd.cell_gains[0].cell_gain_right = gv->r_gain; } SST_FILL_DESTINATION(2, cmd.cell_gains[0].dest, loc_id, module_id); cmd.cell_gains[0].gain_time_constant = gv->ramp_duration; cmd.header.length = sizeof(struct sst_cmd_set_gain_dual) - sizeof(struct sst_dsp_header); /* we are with lock held, so call the unlocked api to send */ return sst_fill_and_send_cmd_unlocked(drv, SST_IPC_IA_SET_PARAMS, SST_FLAG_BLOCKED, task_id, 0, &cmd, sizeof(cmd.header) + cmd.header.length); } static int sst_gain_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); struct sst_gain_mixer_control *mc = (void *)kcontrol->private_value; struct sst_gain_value *gv = mc->gain_val; switch (mc->type) { case SST_GAIN_TLV: ucontrol->value.integer.value[0] = gv->l_gain; ucontrol->value.integer.value[1] = gv->r_gain; break; case SST_GAIN_MUTE: ucontrol->value.integer.value[0] = gv->mute ? 0 : 1; break; case SST_GAIN_RAMP_DURATION: ucontrol->value.integer.value[0] = gv->ramp_duration; break; default: dev_err(component->dev, "Invalid Input- gain type:%d\n", mc->type); return -EINVAL; } return 0; } static int sst_gain_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int ret = 0; struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); struct sst_data *drv = snd_soc_component_get_drvdata(cmpnt); struct sst_gain_mixer_control *mc = (void *)kcontrol->private_value; struct sst_gain_value *gv = mc->gain_val; mutex_lock(&drv->lock); switch (mc->type) { case SST_GAIN_TLV: gv->l_gain = ucontrol->value.integer.value[0]; gv->r_gain = ucontrol->value.integer.value[1]; dev_dbg(cmpnt->dev, "%s: Volume %d, %d\n", mc->pname, gv->l_gain, gv->r_gain); break; case SST_GAIN_MUTE: gv->mute = !ucontrol->value.integer.value[0]; dev_dbg(cmpnt->dev, "%s: Mute %d\n", mc->pname, gv->mute); break; case SST_GAIN_RAMP_DURATION: gv->ramp_duration = ucontrol->value.integer.value[0]; dev_dbg(cmpnt->dev, "%s: Ramp Delay%d\n", mc->pname, gv->ramp_duration); break; default: mutex_unlock(&drv->lock); dev_err(cmpnt->dev, "Invalid Input- gain type:%d\n", mc->type); return -EINVAL; } if (mc->w && mc->w->power) ret = sst_send_gain_cmd(drv, gv, mc->task_id, mc->pipe_id | mc->instance_id, mc->module_id, 0); mutex_unlock(&drv->lock); return ret; } static int sst_set_pipe_gain(struct sst_ids *ids, struct sst_data *drv, int mute); static int sst_send_pipe_module_params(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol) { struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm); struct sst_data *drv = snd_soc_component_get_drvdata(c); struct sst_ids *ids = w->priv; mutex_lock(&drv->lock); sst_find_and_send_pipe_algo(drv, w->name, ids); sst_set_pipe_gain(ids, drv, 0); mutex_unlock(&drv->lock); return 0; } static int sst_generic_modules_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { if (SND_SOC_DAPM_EVENT_ON(event)) return sst_send_pipe_module_params(w, k); return 0; } static const DECLARE_TLV_DB_SCALE(sst_gain_tlv_common, SST_GAIN_MIN_VALUE * 10, 10, 0); /* Look up table to convert MIXER SW bit regs to SWM inputs */ static const uint swm_mixer_input_ids[SST_SWM_INPUT_COUNT] = { [SST_IP_MODEM] = SST_SWM_IN_MODEM, [SST_IP_CODEC0] = SST_SWM_IN_CODEC0, [SST_IP_CODEC1] = SST_SWM_IN_CODEC1, [SST_IP_LOOP0] = SST_SWM_IN_SPROT_LOOP, [SST_IP_LOOP1] = SST_SWM_IN_MEDIA_LOOP1, [SST_IP_LOOP2] = SST_SWM_IN_MEDIA_LOOP2, [SST_IP_PCM0] = SST_SWM_IN_PCM0, [SST_IP_PCM1] = SST_SWM_IN_PCM1, [SST_IP_MEDIA0] = SST_SWM_IN_MEDIA0, [SST_IP_MEDIA1] = SST_SWM_IN_MEDIA1, [SST_IP_MEDIA2] = SST_SWM_IN_MEDIA2, [SST_IP_MEDIA3] = SST_SWM_IN_MEDIA3, }; /** * fill_swm_input - fill in the SWM input ids given the register * @cmpnt: ASoC component * @swm_input: array of swm_input_ids * @reg: the register value is a bit-field inicated which mixer inputs are ON. * * Use the lookup table to get the input-id and fill it in the * structure. */ static int fill_swm_input(struct snd_soc_component *cmpnt, struct swm_input_ids *swm_input, unsigned int reg) { uint i, is_set, nb_inputs = 0; u16 input_loc_id; dev_dbg(cmpnt->dev, "reg: %#x\n", reg); for (i = 0; i < SST_SWM_INPUT_COUNT; i++) { is_set = reg & BIT(i); if (!is_set) continue; input_loc_id = swm_mixer_input_ids[i]; SST_FILL_DESTINATION(2, swm_input->input_id, input_loc_id, SST_DEFAULT_MODULE_ID); nb_inputs++; swm_input++; dev_dbg(cmpnt->dev, "input id: %#x, nb_inputs: %d\n", input_loc_id, nb_inputs); if (nb_inputs == SST_CMD_SWM_MAX_INPUTS) { dev_warn(cmpnt->dev, "SET_SWM cmd max inputs reached"); break; } } return nb_inputs; } /* * called with lock held */ static int sst_set_pipe_gain(struct sst_ids *ids, struct sst_data *drv, int mute) { int ret = 0; struct sst_gain_mixer_control *mc; struct sst_gain_value *gv; struct sst_module *gain; list_for_each_entry(gain, &ids->gain_list, node) { struct snd_kcontrol *kctl = gain->kctl; dev_dbg(&drv->pdev->dev, "control name=%s\n", kctl->id.name); mc = (void *)kctl->private_value; gv = mc->gain_val; ret = sst_send_gain_cmd(drv, gv, mc->task_id, mc->pipe_id | mc->instance_id, mc->module_id, mute); if (ret) return ret; } return ret; } static int sst_swm_mixer_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { struct sst_cmd_set_swm cmd; struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm); struct sst_data *drv = snd_soc_component_get_drvdata(cmpnt); struct sst_ids *ids = w->priv; bool set_mixer = false; struct soc_mixer_control *mc; int val = 0; int i = 0; dev_dbg(cmpnt->dev, "widget = %s\n", w->name); /* * Identify which mixer input is on and send the bitmap of the * inputs as an IPC to the DSP. */ for (i = 0; i < w->num_kcontrols; i++) { if (dapm_kcontrol_get_value(w->kcontrols[i])) { mc = (struct soc_mixer_control *)(w->kcontrols[i])->private_value; val |= 1 << mc->shift; } } dev_dbg(cmpnt->dev, "val = %#x\n", val); switch (event) { case SND_SOC_DAPM_PRE_PMU: case SND_SOC_DAPM_POST_PMD: set_mixer = true; break; case SND_SOC_DAPM_POST_REG: if (w->power) set_mixer = true; break; default: set_mixer = false; } if (!set_mixer) return 0; if (SND_SOC_DAPM_EVENT_ON(event) || event == SND_SOC_DAPM_POST_REG) cmd.switch_state = SST_SWM_ON; else cmd.switch_state = SST_SWM_OFF; SST_FILL_DEFAULT_DESTINATION(cmd.header.dst); /* MMX_SET_SWM == SBA_SET_SWM */ cmd.header.command_id = SBA_SET_SWM; SST_FILL_DESTINATION(2, cmd.output_id, ids->location_id, SST_DEFAULT_MODULE_ID); cmd.nb_inputs = fill_swm_input(cmpnt, &cmd.input[0], val); cmd.header.length = offsetof(struct sst_cmd_set_swm, input) - sizeof(struct sst_dsp_header) + (cmd.nb_inputs * sizeof(cmd.input[0])); return sst_fill_and_send_cmd(drv, SST_IPC_IA_CMD, SST_FLAG_BLOCKED, ids->task_id, 0, &cmd, sizeof(cmd.header) + cmd.header.length); } /* SBA mixers - 16 inputs */ #define SST_SBA_DECLARE_MIX_CONTROLS(kctl_name) \ static const struct snd_kcontrol_new kctl_name[] = { \ SOC_DAPM_SINGLE("modem_in Switch", SND_SOC_NOPM, SST_IP_MODEM, 1, 0), \ SOC_DAPM_SINGLE("codec_in0 Switch", SND_SOC_NOPM, SST_IP_CODEC0, 1, 0), \ SOC_DAPM_SINGLE("codec_in1 Switch", SND_SOC_NOPM, SST_IP_CODEC1, 1, 0), \ SOC_DAPM_SINGLE("sprot_loop_in Switch", SND_SOC_NOPM, SST_IP_LOOP0, 1, 0), \ SOC_DAPM_SINGLE("media_loop1_in Switch", SND_SOC_NOPM, SST_IP_LOOP1, 1, 0), \ SOC_DAPM_SINGLE("media_loop2_in Switch", SND_SOC_NOPM, SST_IP_LOOP2, 1, 0), \ SOC_DAPM_SINGLE("pcm0_in Switch", SND_SOC_NOPM, SST_IP_PCM0, 1, 0), \ SOC_DAPM_SINGLE("pcm1_in Switch", SND_SOC_NOPM, SST_IP_PCM1, 1, 0), \ } #define SST_SBA_MIXER_GRAPH_MAP(mix_name) \ { mix_name, "modem_in Switch", "modem_in" }, \ { mix_name, "codec_in0 Switch", "codec_in0" }, \ { mix_name, "codec_in1 Switch", "codec_in1" }, \ { mix_name, "sprot_loop_in Switch", "sprot_loop_in" }, \ { mix_name, "media_loop1_in Switch", "media_loop1_in" }, \ { mix_name, "media_loop2_in Switch", "media_loop2_in" }, \ { mix_name, "pcm0_in Switch", "pcm0_in" }, \ { mix_name, "pcm1_in Switch", "pcm1_in" } #define SST_MMX_DECLARE_MIX_CONTROLS(kctl_name) \ static const struct snd_kcontrol_new kctl_name[] = { \ SOC_DAPM_SINGLE("media0_in Switch", SND_SOC_NOPM, SST_IP_MEDIA0, 1, 0), \ SOC_DAPM_SINGLE("media1_in Switch", SND_SOC_NOPM, SST_IP_MEDIA1, 1, 0), \ SOC_DAPM_SINGLE("media2_in Switch", SND_SOC_NOPM, SST_IP_MEDIA2, 1, 0), \ SOC_DAPM_SINGLE("media3_in Switch", SND_SOC_NOPM, SST_IP_MEDIA3, 1, 0), \ } SST_MMX_DECLARE_MIX_CONTROLS(sst_mix_media0_controls); SST_MMX_DECLARE_MIX_CONTROLS(sst_mix_media1_controls); /* 18 SBA mixers */ SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_pcm0_controls); SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_pcm1_controls); SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_pcm2_controls); SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_sprot_l0_controls); SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_media_l1_controls); SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_media_l2_controls); SST_SBA_DECLARE_MIX_CONTROLS(__maybe_unused sst_mix_voip_controls); SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_codec0_controls); SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_codec1_controls); SST_SBA_DECLARE_MIX_CONTROLS(sst_mix_modem_controls); /* * sst_handle_vb_timer - Start/Stop the DSP scheduler * * The DSP expects first cmd to be SBA_VB_START, so at first startup send * that. * DSP expects last cmd to be SBA_VB_IDLE, so at last shutdown send that. * * Do refcount internally so that we send command only at first start * and last end. Since SST driver does its own ref count, invoke sst's * power ops always! */ int sst_handle_vb_timer(struct snd_soc_dai *dai, bool enable) { int ret = 0; struct sst_cmd_generic cmd; struct sst_data *drv = snd_soc_dai_get_drvdata(dai); static int timer_usage; if (enable) cmd.header.command_id = SBA_VB_START; else cmd.header.command_id = SBA_IDLE; dev_dbg(dai->dev, "enable=%u, usage=%d\n", enable, timer_usage); SST_FILL_DEFAULT_DESTINATION(cmd.header.dst); cmd.header.length = 0; if (enable) { ret = sst->ops->power(sst->dev, true); if (ret < 0) return ret; } mutex_lock(&drv->lock); if (enable) timer_usage++; else timer_usage--; /* * Send the command only if this call is the first enable or last * disable */ if ((enable && (timer_usage == 1)) || (!enable && (timer_usage == 0))) { ret = sst_fill_and_send_cmd_unlocked(drv, SST_IPC_IA_CMD, SST_FLAG_BLOCKED, SST_TASK_SBA, 0, &cmd, sizeof(cmd.header) + cmd.header.length); if (ret && enable) { timer_usage--; enable = false; } } mutex_unlock(&drv->lock); if (!enable) sst->ops->power(sst->dev, false); return ret; } int sst_fill_ssp_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) { struct sst_data *ctx = snd_soc_dai_get_drvdata(dai); ctx->ssp_cmd.nb_slots = slots; ctx->ssp_cmd.active_tx_slot_map = tx_mask; ctx->ssp_cmd.active_rx_slot_map = rx_mask; ctx->ssp_cmd.nb_bits_per_slots = slot_width; return 0; } static int sst_get_frame_sync_polarity(struct snd_soc_dai *dai, unsigned int fmt) { int format; format = fmt & SND_SOC_DAIFMT_INV_MASK; dev_dbg(dai->dev, "Enter:%s, format=%x\n", __func__, format); switch (format) { case SND_SOC_DAIFMT_NB_NF: case SND_SOC_DAIFMT_IB_NF: return SSP_FS_ACTIVE_HIGH; case SND_SOC_DAIFMT_NB_IF: case SND_SOC_DAIFMT_IB_IF: return SSP_FS_ACTIVE_LOW; default: dev_err(dai->dev, "Invalid frame sync polarity %d\n", format); } return -EINVAL; } static int sst_get_ssp_mode(struct snd_soc_dai *dai, unsigned int fmt) { int format; format = (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK); dev_dbg(dai->dev, "Enter:%s, format=%x\n", __func__, format); switch (format) { case SND_SOC_DAIFMT_BP_FP: return SSP_MODE_PROVIDER; case SND_SOC_DAIFMT_BC_FC: return SSP_MODE_CONSUMER; default: dev_err(dai->dev, "Invalid ssp protocol: %d\n", format); } return -EINVAL; } int sst_fill_ssp_config(struct snd_soc_dai *dai, unsigned int fmt) { unsigned int mode; int fs_polarity; struct sst_data *ctx = snd_soc_dai_get_drvdata(dai); mode = fmt & SND_SOC_DAIFMT_FORMAT_MASK; switch (mode) { case SND_SOC_DAIFMT_DSP_B: ctx->ssp_cmd.ssp_protocol = SSP_MODE_PCM; ctx->ssp_cmd.mode = sst_get_ssp_mode(dai, fmt) | (SSP_PCM_MODE_NETWORK << 1); ctx->ssp_cmd.start_delay = 0; ctx->ssp_cmd.data_polarity = 1; ctx->ssp_cmd.frame_sync_width = 1; break; case SND_SOC_DAIFMT_DSP_A: ctx->ssp_cmd.ssp_protocol = SSP_MODE_PCM; ctx->ssp_cmd.mode = sst_get_ssp_mode(dai, fmt) | (SSP_PCM_MODE_NETWORK << 1); ctx->ssp_cmd.start_delay = 1; ctx->ssp_cmd.data_polarity = 1; ctx->ssp_cmd.frame_sync_width = 1; break; case SND_SOC_DAIFMT_I2S: ctx->ssp_cmd.ssp_protocol = SSP_MODE_I2S; ctx->ssp_cmd.mode = sst_get_ssp_mode(dai, fmt) | (SSP_PCM_MODE_NORMAL << 1); ctx->ssp_cmd.start_delay = 1; ctx->ssp_cmd.data_polarity = 0; ctx->ssp_cmd.frame_sync_width = ctx->ssp_cmd.nb_bits_per_slots; break; case SND_SOC_DAIFMT_LEFT_J: ctx->ssp_cmd.ssp_protocol = SSP_MODE_I2S; ctx->ssp_cmd.mode = sst_get_ssp_mode(dai, fmt) | (SSP_PCM_MODE_NORMAL << 1); ctx->ssp_cmd.start_delay = 0; ctx->ssp_cmd.data_polarity = 0; ctx->ssp_cmd.frame_sync_width = ctx->ssp_cmd.nb_bits_per_slots; break; default: dev_dbg(dai->dev, "using default ssp configs\n"); } fs_polarity = sst_get_frame_sync_polarity(dai, fmt); if (fs_polarity < 0) return fs_polarity; ctx->ssp_cmd.frame_sync_polarity = fs_polarity; return 0; } /* * sst_ssp_config - contains SSP configuration for media UC * this can be overwritten by set_dai_xxx APIs */ static const struct sst_ssp_config sst_ssp_configs = { .ssp_id = SSP_CODEC, .bits_per_slot = 24, .slots = 4, .ssp_mode = SSP_MODE_PROVIDER, .pcm_mode = SSP_PCM_MODE_NETWORK, .duplex = SSP_DUPLEX, .ssp_protocol = SSP_MODE_PCM, .fs_width = 1, .fs_frequency = SSP_FS_48_KHZ, .active_slot_map = 0xF, .start_delay = 0, .frame_sync_polarity = SSP_FS_ACTIVE_HIGH, .data_polarity = 1, }; void sst_fill_ssp_defaults(struct snd_soc_dai *dai) { const struct sst_ssp_config *config; struct sst_data *ctx = snd_soc_dai_get_drvdata(dai); config = &sst_ssp_configs; ctx->ssp_cmd.selection = config->ssp_id; ctx->ssp_cmd.nb_bits_per_slots = config->bits_per_slot; ctx->ssp_cmd.nb_slots = config->slots; ctx->ssp_cmd.mode = config->ssp_mode | (config->pcm_mode << 1); ctx->ssp_cmd.duplex = config->duplex; ctx->ssp_cmd.active_tx_slot_map = config->active_slot_map; ctx->ssp_cmd.active_rx_slot_map = config->active_slot_map; ctx->ssp_cmd.frame_sync_frequency = config->fs_frequency; ctx->ssp_cmd.frame_sync_polarity = config->frame_sync_polarity; ctx->ssp_cmd.data_polarity = config->data_polarity; ctx->ssp_cmd.frame_sync_width = config->fs_width; ctx->ssp_cmd.ssp_protocol = config->ssp_protocol; ctx->ssp_cmd.start_delay = config->start_delay; ctx->ssp_cmd.reserved1 = ctx->ssp_cmd.reserved2 = 0xFF; } int send_ssp_cmd(struct snd_soc_dai *dai, const char *id, bool enable) { struct sst_data *drv = snd_soc_dai_get_drvdata(dai); int ssp_id; dev_dbg(dai->dev, "Enter: enable=%d port_name=%s\n", enable, id); if (strcmp(id, "ssp0-port") == 0) ssp_id = SSP_MODEM; else if (strcmp(id, "ssp2-port") == 0) ssp_id = SSP_CODEC; else { dev_dbg(dai->dev, "port %s is not supported\n", id); return -1; } SST_FILL_DEFAULT_DESTINATION(drv->ssp_cmd.header.dst); drv->ssp_cmd.header.command_id = SBA_HW_SET_SSP; drv->ssp_cmd.header.length = sizeof(struct sst_cmd_sba_hw_set_ssp) - sizeof(struct sst_dsp_header); drv->ssp_cmd.selection = ssp_id; dev_dbg(dai->dev, "ssp_id: %u\n", ssp_id); if (enable) drv->ssp_cmd.switch_state = SST_SWITCH_ON; else drv->ssp_cmd.switch_state = SST_SWITCH_OFF; return sst_fill_and_send_cmd(drv, SST_IPC_IA_CMD, SST_FLAG_BLOCKED, SST_TASK_SBA, 0, &drv->ssp_cmd, sizeof(drv->ssp_cmd.header) + drv->ssp_cmd.header.length); } static int sst_set_be_modules(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { int ret = 0; struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm); struct sst_data *drv = snd_soc_component_get_drvdata(c); dev_dbg(c->dev, "Enter: widget=%s\n", w->name); if (SND_SOC_DAPM_EVENT_ON(event)) { mutex_lock(&drv->lock); ret = sst_send_slot_map(drv); mutex_unlock(&drv->lock); if (ret) return ret; ret = sst_send_pipe_module_params(w, k); } return ret; } static int sst_set_media_path(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { int ret = 0; struct sst_cmd_set_media_path cmd; struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm); struct sst_data *drv = snd_soc_component_get_drvdata(c); struct sst_ids *ids = w->priv; dev_dbg(c->dev, "widget=%s\n", w->name); dev_dbg(c->dev, "task=%u, location=%#x\n", ids->task_id, ids->location_id); if (SND_SOC_DAPM_EVENT_ON(event)) cmd.switch_state = SST_PATH_ON; else cmd.switch_state = SST_PATH_OFF; SST_FILL_DESTINATION(2, cmd.header.dst, ids->location_id, SST_DEFAULT_MODULE_ID); /* MMX_SET_MEDIA_PATH == SBA_SET_MEDIA_PATH */ cmd.header.command_id = MMX_SET_MEDIA_PATH; cmd.header.length = sizeof(struct sst_cmd_set_media_path) - sizeof(struct sst_dsp_header); ret = sst_fill_and_send_cmd(drv, SST_IPC_IA_CMD, SST_FLAG_BLOCKED, ids->task_id, 0, &cmd, sizeof(cmd.header) + cmd.header.length); if (ret) return ret; if (SND_SOC_DAPM_EVENT_ON(event)) ret = sst_send_pipe_module_params(w, k); return ret; } static int sst_set_media_loop(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { int ret = 0; struct sst_cmd_sba_set_media_loop_map cmd; struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm); struct sst_data *drv = snd_soc_component_get_drvdata(c); struct sst_ids *ids = w->priv; dev_dbg(c->dev, "Enter:widget=%s\n", w->name); if (SND_SOC_DAPM_EVENT_ON(event)) cmd.switch_state = SST_SWITCH_ON; else cmd.switch_state = SST_SWITCH_OFF; SST_FILL_DESTINATION(2, cmd.header.dst, ids->location_id, SST_DEFAULT_MODULE_ID); cmd.header.command_id = SBA_SET_MEDIA_LOOP_MAP; cmd.header.length = sizeof(struct sst_cmd_sba_set_media_loop_map) - sizeof(struct sst_dsp_header); cmd.param.part.cfg.rate = 2; /* 48khz */ cmd.param.part.cfg.format = ids->format; /* stereo/Mono */ cmd.param.part.cfg.s_length = 1; /* 24bit left justified */ cmd.map = 0; /* Algo sequence: Gain - DRP - FIR - IIR */ ret = sst_fill_and_send_cmd(drv, SST_IPC_IA_CMD, SST_FLAG_BLOCKED, SST_TASK_SBA, 0, &cmd, sizeof(cmd.header) + cmd.header.length); if (ret) return ret; if (SND_SOC_DAPM_EVENT_ON(event)) ret = sst_send_pipe_module_params(w, k); return ret; } static const struct snd_soc_dapm_widget sst_dapm_widgets[] = { SST_AIF_IN("modem_in", sst_set_be_modules), SST_AIF_IN("codec_in0", sst_set_be_modules), SST_AIF_IN("codec_in1", sst_set_be_modules), SST_AIF_OUT("modem_out", sst_set_be_modules), SST_AIF_OUT("codec_out0", sst_set_be_modules), SST_AIF_OUT("codec_out1", sst_set_be_modules), /* Media Paths */ /* MediaX IN paths are set via ALLOC, so no SET_MEDIA_PATH command */ SST_PATH_INPUT("media0_in", SST_TASK_MMX, SST_SWM_IN_MEDIA0, sst_generic_modules_event), SST_PATH_INPUT("media1_in", SST_TASK_MMX, SST_SWM_IN_MEDIA1, NULL), SST_PATH_INPUT("media2_in", SST_TASK_MMX, SST_SWM_IN_MEDIA2, sst_set_media_path), SST_PATH_INPUT("media3_in", SST_TASK_MMX, SST_SWM_IN_MEDIA3, NULL), SST_PATH_OUTPUT("media0_out", SST_TASK_MMX, SST_SWM_OUT_MEDIA0, sst_set_media_path), SST_PATH_OUTPUT("media1_out", SST_TASK_MMX, SST_SWM_OUT_MEDIA1, sst_set_media_path), /* SBA PCM Paths */ SST_PATH_INPUT("pcm0_in", SST_TASK_SBA, SST_SWM_IN_PCM0, sst_set_media_path), SST_PATH_INPUT("pcm1_in", SST_TASK_SBA, SST_SWM_IN_PCM1, sst_set_media_path), SST_PATH_OUTPUT("pcm0_out", SST_TASK_SBA, SST_SWM_OUT_PCM0, sst_set_media_path), SST_PATH_OUTPUT("pcm1_out", SST_TASK_SBA, SST_SWM_OUT_PCM1, sst_set_media_path), SST_PATH_OUTPUT("pcm2_out", SST_TASK_SBA, SST_SWM_OUT_PCM2, sst_set_media_path), /* SBA Loops */ SST_PATH_INPUT("sprot_loop_in", SST_TASK_SBA, SST_SWM_IN_SPROT_LOOP, NULL), SST_PATH_INPUT("media_loop1_in", SST_TASK_SBA, SST_SWM_IN_MEDIA_LOOP1, NULL), SST_PATH_INPUT("media_loop2_in", SST_TASK_SBA, SST_SWM_IN_MEDIA_LOOP2, NULL), SST_PATH_MEDIA_LOOP_OUTPUT("sprot_loop_out", SST_TASK_SBA, SST_SWM_OUT_SPROT_LOOP, SST_FMT_STEREO, sst_set_media_loop), SST_PATH_MEDIA_LOOP_OUTPUT("media_loop1_out", SST_TASK_SBA, SST_SWM_OUT_MEDIA_LOOP1, SST_FMT_STEREO, sst_set_media_loop), SST_PATH_MEDIA_LOOP_OUTPUT("media_loop2_out", SST_TASK_SBA, SST_SWM_OUT_MEDIA_LOOP2, SST_FMT_STEREO, sst_set_media_loop), /* Media Mixers */ SST_SWM_MIXER("media0_out mix 0", SND_SOC_NOPM, SST_TASK_MMX, SST_SWM_OUT_MEDIA0, sst_mix_media0_controls, sst_swm_mixer_event), SST_SWM_MIXER("media1_out mix 0", SND_SOC_NOPM, SST_TASK_MMX, SST_SWM_OUT_MEDIA1, sst_mix_media1_controls, sst_swm_mixer_event), /* SBA PCM mixers */ SST_SWM_MIXER("pcm0_out mix 0", SND_SOC_NOPM, SST_TASK_SBA, SST_SWM_OUT_PCM0, sst_mix_pcm0_controls, sst_swm_mixer_event), SST_SWM_MIXER("pcm1_out mix 0", SND_SOC_NOPM, SST_TASK_SBA, SST_SWM_OUT_PCM1, sst_mix_pcm1_controls, sst_swm_mixer_event), SST_SWM_MIXER("pcm2_out mix 0", SND_SOC_NOPM, SST_TASK_SBA, SST_SWM_OUT_PCM2, sst_mix_pcm2_controls, sst_swm_mixer_event), /* SBA Loop mixers */ SST_SWM_MIXER("sprot_loop_out mix 0", SND_SOC_NOPM, SST_TASK_SBA, SST_SWM_OUT_SPROT_LOOP, sst_mix_sprot_l0_controls, sst_swm_mixer_event), SST_SWM_MIXER("media_loop1_out mix 0", SND_SOC_NOPM, SST_TASK_SBA, SST_SWM_OUT_MEDIA_LOOP1, sst_mix_media_l1_controls, sst_swm_mixer_event), SST_SWM_MIXER("media_loop2_out mix 0", SND_SOC_NOPM, SST_TASK_SBA, SST_SWM_OUT_MEDIA_LOOP2, sst_mix_media_l2_controls, sst_swm_mixer_event), /* SBA Backend mixers */ SST_SWM_MIXER("codec_out0 mix 0", SND_SOC_NOPM, SST_TASK_SBA, SST_SWM_OUT_CODEC0, sst_mix_codec0_controls, sst_swm_mixer_event), SST_SWM_MIXER("codec_out1 mix 0", SND_SOC_NOPM, SST_TASK_SBA, SST_SWM_OUT_CODEC1, sst_mix_codec1_controls, sst_swm_mixer_event), SST_SWM_MIXER("modem_out mix 0", SND_SOC_NOPM, SST_TASK_SBA, SST_SWM_OUT_MODEM, sst_mix_modem_controls, sst_swm_mixer_event), }; static const struct snd_soc_dapm_route intercon[] = { {"media0_in", NULL, "Compress Playback"}, {"media1_in", NULL, "Headset Playback"}, {"media2_in", NULL, "pcm0_out"}, {"media3_in", NULL, "Deepbuffer Playback"}, {"media0_out mix 0", "media0_in Switch", "media0_in"}, {"media0_out mix 0", "media1_in Switch", "media1_in"}, {"media0_out mix 0", "media2_in Switch", "media2_in"}, {"media0_out mix 0", "media3_in Switch", "media3_in"}, {"media1_out mix 0", "media0_in Switch", "media0_in"}, {"media1_out mix 0", "media1_in Switch", "media1_in"}, {"media1_out mix 0", "media2_in Switch", "media2_in"}, {"media1_out mix 0", "media3_in Switch", "media3_in"}, {"media0_out", NULL, "media0_out mix 0"}, {"media1_out", NULL, "media1_out mix 0"}, {"pcm0_in", NULL, "media0_out"}, {"pcm1_in", NULL, "media1_out"}, {"Headset Capture", NULL, "pcm1_out"}, {"Headset Capture", NULL, "pcm2_out"}, {"pcm0_out", NULL, "pcm0_out mix 0"}, SST_SBA_MIXER_GRAPH_MAP("pcm0_out mix 0"), {"pcm1_out", NULL, "pcm1_out mix 0"}, SST_SBA_MIXER_GRAPH_MAP("pcm1_out mix 0"), {"pcm2_out", NULL, "pcm2_out mix 0"}, SST_SBA_MIXER_GRAPH_MAP("pcm2_out mix 0"), {"media_loop1_in", NULL, "media_loop1_out"}, {"media_loop1_out", NULL, "media_loop1_out mix 0"}, SST_SBA_MIXER_GRAPH_MAP("media_loop1_out mix 0"), {"media_loop2_in", NULL, "media_loop2_out"}, {"media_loop2_out", NULL, "media_loop2_out mix 0"}, SST_SBA_MIXER_GRAPH_MAP("media_loop2_out mix 0"), {"sprot_loop_in", NULL, "sprot_loop_out"}, {"sprot_loop_out", NULL, "sprot_loop_out mix 0"}, SST_SBA_MIXER_GRAPH_MAP("sprot_loop_out mix 0"), {"codec_out0", NULL, "codec_out0 mix 0"}, SST_SBA_MIXER_GRAPH_MAP("codec_out0 mix 0"), {"codec_out1", NULL, "codec_out1 mix 0"}, SST_SBA_MIXER_GRAPH_MAP("codec_out1 mix 0"), {"modem_out", NULL, "modem_out mix 0"}, SST_SBA_MIXER_GRAPH_MAP("modem_out mix 0"), }; static const char * const slot_names[] = { "none", "slot 0", "slot 1", "slot 2", "slot 3", "slot 4", "slot 5", "slot 6", "slot 7", /* not supported by FW */ }; static const char * const channel_names[] = { "none", "codec_out0_0", "codec_out0_1", "codec_out1_0", "codec_out1_1", "codec_out2_0", "codec_out2_1", "codec_out3_0", "codec_out3_1", /* not supported by FW */ }; #define SST_INTERLEAVER(xpname, slot_name, slotno) \ SST_SSP_SLOT_CTL(xpname, "tx interleaver", slot_name, slotno, true, \ channel_names, sst_slot_get, sst_slot_put) #define SST_DEINTERLEAVER(xpname, channel_name, channel_no) \ SST_SSP_SLOT_CTL(xpname, "rx deinterleaver", channel_name, channel_no, false, \ slot_names, sst_slot_get, sst_slot_put) static const struct snd_kcontrol_new sst_slot_controls[] = { SST_INTERLEAVER("codec_out", "slot 0", 0), SST_INTERLEAVER("codec_out", "slot 1", 1), SST_INTERLEAVER("codec_out", "slot 2", 2), SST_INTERLEAVER("codec_out", "slot 3", 3), SST_DEINTERLEAVER("codec_in", "codec_in0_0", 0), SST_DEINTERLEAVER("codec_in", "codec_in0_1", 1), SST_DEINTERLEAVER("codec_in", "codec_in1_0", 2), SST_DEINTERLEAVER("codec_in", "codec_in1_1", 3), }; /* Gain helper with min/max set */ #define SST_GAIN(name, path_id, task_id, instance, gain_var) \ SST_GAIN_KCONTROLS(name, "Gain", SST_GAIN_MIN_VALUE, SST_GAIN_MAX_VALUE, \ SST_GAIN_TC_MIN, SST_GAIN_TC_MAX, \ sst_gain_get, sst_gain_put, \ SST_MODULE_ID_GAIN_CELL, path_id, instance, task_id, \ sst_gain_tlv_common, gain_var) #define SST_VOLUME(name, path_id, task_id, instance, gain_var) \ SST_GAIN_KCONTROLS(name, "Volume", SST_GAIN_MIN_VALUE, SST_GAIN_MAX_VALUE, \ SST_GAIN_TC_MIN, SST_GAIN_TC_MAX, \ sst_gain_get, sst_gain_put, \ SST_MODULE_ID_VOLUME, path_id, instance, task_id, \ sst_gain_tlv_common, gain_var) static struct sst_gain_value sst_gains[]; static const struct snd_kcontrol_new sst_gain_controls[] = { SST_GAIN("media0_in", SST_PATH_INDEX_MEDIA0_IN, SST_TASK_MMX, 0, &sst_gains[0]), SST_GAIN("media1_in", SST_PATH_INDEX_MEDIA1_IN, SST_TASK_MMX, 0, &sst_gains[1]), SST_GAIN("media2_in", SST_PATH_INDEX_MEDIA2_IN, SST_TASK_MMX, 0, &sst_gains[2]), SST_GAIN("media3_in", SST_PATH_INDEX_MEDIA3_IN, SST_TASK_MMX, 0, &sst_gains[3]), SST_GAIN("pcm0_in", SST_PATH_INDEX_PCM0_IN, SST_TASK_SBA, 0, &sst_gains[4]), SST_GAIN("pcm1_in", SST_PATH_INDEX_PCM1_IN, SST_TASK_SBA, 0, &sst_gains[5]), SST_GAIN("pcm1_out", SST_PATH_INDEX_PCM1_OUT, SST_TASK_SBA, 0, &sst_gains[6]), SST_GAIN("pcm2_out", SST_PATH_INDEX_PCM2_OUT, SST_TASK_SBA, 0, &sst_gains[7]), SST_GAIN("codec_in0", SST_PATH_INDEX_CODEC_IN0, SST_TASK_SBA, 0, &sst_gains[8]), SST_GAIN("codec_in1", SST_PATH_INDEX_CODEC_IN1, SST_TASK_SBA, 0, &sst_gains[9]), SST_GAIN("codec_out0", SST_PATH_INDEX_CODEC_OUT0, SST_TASK_SBA, 0, &sst_gains[10]), SST_GAIN("codec_out1", SST_PATH_INDEX_CODEC_OUT1, SST_TASK_SBA, 0, &sst_gains[11]), SST_GAIN("media_loop1_out", SST_PATH_INDEX_MEDIA_LOOP1_OUT, SST_TASK_SBA, 0, &sst_gains[12]), SST_GAIN("media_loop2_out", SST_PATH_INDEX_MEDIA_LOOP2_OUT, SST_TASK_SBA, 0, &sst_gains[13]), SST_GAIN("sprot_loop_out", SST_PATH_INDEX_SPROT_LOOP_OUT, SST_TASK_SBA, 0, &sst_gains[14]), SST_VOLUME("media0_in", SST_PATH_INDEX_MEDIA0_IN, SST_TASK_MMX, 0, &sst_gains[15]), SST_GAIN("modem_in", SST_PATH_INDEX_MODEM_IN, SST_TASK_SBA, 0, &sst_gains[16]), SST_GAIN("modem_out", SST_PATH_INDEX_MODEM_OUT, SST_TASK_SBA, 0, &sst_gains[17]), }; #define SST_GAIN_NUM_CONTROLS 3 /* the SST_GAIN macro above will create three alsa controls for each * instance invoked, gain, mute and ramp duration, which use the same gain * cell sst_gain to keep track of data * To calculate number of gain cell instances we need to device by 3 in * below caulcation for gain cell memory. * This gets rid of static number and issues while adding new controls */ static struct sst_gain_value sst_gains[ARRAY_SIZE(sst_gain_controls)/SST_GAIN_NUM_CONTROLS]; static const struct snd_kcontrol_new sst_algo_controls[] = { SST_ALGO_KCONTROL_BYTES("media_loop1_out", "fir", 272, SST_MODULE_ID_FIR_24, SST_PATH_INDEX_MEDIA_LOOP1_OUT, 0, SST_TASK_SBA, SBA_VB_SET_FIR), SST_ALGO_KCONTROL_BYTES("media_loop1_out", "iir", 300, SST_MODULE_ID_IIR_24, SST_PATH_INDEX_MEDIA_LOOP1_OUT, 0, SST_TASK_SBA, SBA_VB_SET_IIR), SST_ALGO_KCONTROL_BYTES("media_loop1_out", "mdrp", 286, SST_MODULE_ID_MDRP, SST_PATH_INDEX_MEDIA_LOOP1_OUT, 0, SST_TASK_SBA, SBA_SET_MDRP), SST_ALGO_KCONTROL_BYTES("media_loop2_out", "fir", 272, SST_MODULE_ID_FIR_24, SST_PATH_INDEX_MEDIA_LOOP2_OUT, 0, SST_TASK_SBA, SBA_VB_SET_FIR), SST_ALGO_KCONTROL_BYTES("media_loop2_out", "iir", 300, SST_MODULE_ID_IIR_24, SST_PATH_INDEX_MEDIA_LOOP2_OUT, 0, SST_TASK_SBA, SBA_VB_SET_IIR), SST_ALGO_KCONTROL_BYTES("media_loop2_out", "mdrp", 286, SST_MODULE_ID_MDRP, SST_PATH_INDEX_MEDIA_LOOP2_OUT, 0, SST_TASK_SBA, SBA_SET_MDRP), SST_ALGO_KCONTROL_BYTES("sprot_loop_out", "lpro", 192, SST_MODULE_ID_SPROT, SST_PATH_INDEX_SPROT_LOOP_OUT, 0, SST_TASK_SBA, SBA_VB_LPRO), SST_ALGO_KCONTROL_BYTES("codec_in0", "dcr", 52, SST_MODULE_ID_FILT_DCR, SST_PATH_INDEX_CODEC_IN0, 0, SST_TASK_SBA, SBA_VB_SET_IIR), SST_ALGO_KCONTROL_BYTES("codec_in1", "dcr", 52, SST_MODULE_ID_FILT_DCR, SST_PATH_INDEX_CODEC_IN1, 0, SST_TASK_SBA, SBA_VB_SET_IIR), }; static int sst_algo_control_init(struct device *dev) { int i = 0; struct sst_algo_control *bc; /*allocate space to cache the algo parameters in the driver*/ for (i = 0; i < ARRAY_SIZE(sst_algo_controls); i++) { bc = (struct sst_algo_control *)sst_algo_controls[i].private_value; bc->params = devm_kzalloc(dev, bc->max, GFP_KERNEL); if (bc->params == NULL) return -ENOMEM; } return 0; } static bool is_sst_dapm_widget(struct snd_soc_dapm_widget *w) { switch (w->id) { case snd_soc_dapm_pga: case snd_soc_dapm_aif_in: case snd_soc_dapm_aif_out: case snd_soc_dapm_input: case snd_soc_dapm_output: case snd_soc_dapm_mixer: return true; default: return false; } } /** * sst_send_pipe_gains - send gains for the front-end DAIs * @dai: front-end dai * @stream: direction * @mute: boolean indicating mute status * * The gains in the pipes connected to the front-ends are muted/unmuted * automatically via the digital_mute() DAPM callback. This function sends the * gains for the front-end pipes. */ int sst_send_pipe_gains(struct snd_soc_dai *dai, int stream, int mute) { struct sst_data *drv = snd_soc_dai_get_drvdata(dai); struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(dai, stream); struct snd_soc_dapm_path *p; dev_dbg(dai->dev, "enter, dai-name=%s dir=%d\n", dai->name, stream); dev_dbg(dai->dev, "Stream name=%s\n", w->name); if (stream == SNDRV_PCM_STREAM_PLAYBACK) { snd_soc_dapm_widget_for_each_sink_path(w, p) { if (p->connected && !p->connected(w, p->sink)) continue; if (p->connect && p->sink->power && is_sst_dapm_widget(p->sink)) { struct sst_ids *ids = p->sink->priv; dev_dbg(dai->dev, "send gains for widget=%s\n", p->sink->name); mutex_lock(&drv->lock); sst_set_pipe_gain(ids, drv, mute); mutex_unlock(&drv->lock); } } } else { snd_soc_dapm_widget_for_each_source_path(w, p) { if (p->connected && !p->connected(w, p->source)) continue; if (p->connect && p->source->power && is_sst_dapm_widget(p->source)) { struct sst_ids *ids = p->source->priv; dev_dbg(dai->dev, "send gain for widget=%s\n", p->source->name); mutex_lock(&drv->lock); sst_set_pipe_gain(ids, drv, mute); mutex_unlock(&drv->lock); } } } return 0; } /** * sst_fill_module_list - populate the list of modules/gains for a pipe * @kctl: kcontrol pointer * @w: dapm widget * @type: widget type * * Fills the widget pointer in the kcontrol private data, and also fills the * kcontrol pointer in the widget private data. * * Widget pointer is used to send the algo/gain in the .put() handler if the * widget is powerd on. * * Kcontrol pointer is used to send the algo/gain in the widget power ON/OFF * event handler. Each widget (pipe) has multiple algos stored in the algo_list. */ static int sst_fill_module_list(struct snd_kcontrol *kctl, struct snd_soc_dapm_widget *w, int type) { struct sst_module *module; struct snd_soc_component *c = snd_soc_dapm_to_component(w->dapm); struct sst_ids *ids = w->priv; int ret = 0; module = devm_kzalloc(c->dev, sizeof(*module), GFP_KERNEL); if (!module) return -ENOMEM; if (type == SST_MODULE_GAIN) { struct sst_gain_mixer_control *mc = (void *)kctl->private_value; mc->w = w; module->kctl = kctl; list_add_tail(&module->node, &ids->gain_list); } else if (type == SST_MODULE_ALGO) { struct sst_algo_control *bc = (void *)kctl->private_value; bc->w = w; module->kctl = kctl; list_add_tail(&module->node, &ids->algo_list); } else { dev_err(c->dev, "invoked for unknown type %d module %s", type, kctl->id.name); ret = -EINVAL; } return ret; } /** * sst_fill_widget_module_info - fill list of gains/algos for the pipe * @w: pipe modeled as a DAPM widget * @component: ASoC component * * Fill the list of gains/algos for the widget by looking at all the card * controls and comparing the name of the widget with the first part of control * name. First part of control name contains the pipe name (widget name). */ static int sst_fill_widget_module_info(struct snd_soc_dapm_widget *w, struct snd_soc_component *component) { struct snd_kcontrol *kctl; int index, ret = 0; struct snd_card *card = component->card->snd_card; char *idx; down_read(&card->controls_rwsem); list_for_each_entry(kctl, &card->controls, list) { idx = strchr(kctl->id.name, ' '); if (idx == NULL) continue; index = idx - (char*)kctl->id.name; if (strncmp(kctl->id.name, w->name, index)) continue; if (strstr(kctl->id.name, "Volume")) ret = sst_fill_module_list(kctl, w, SST_MODULE_GAIN); else if (strstr(kctl->id.name, "params")) ret = sst_fill_module_list(kctl, w, SST_MODULE_ALGO); else if (strstr(kctl->id.name, "Switch") && strstr(kctl->id.name, "Gain")) { struct sst_gain_mixer_control *mc = (void *)kctl->private_value; mc->w = w; } else if (strstr(kctl->id.name, "interleaver")) { struct sst_enum *e = (void *)kctl->private_value; e->w = w; } else if (strstr(kctl->id.name, "deinterleaver")) { struct sst_enum *e = (void *)kctl->private_value; e->w = w; } if (ret < 0) { up_read(&card->controls_rwsem); return ret; } } up_read(&card->controls_rwsem); return 0; } /** * sst_fill_linked_widgets - fill the parent pointer for the linked widget * @component: ASoC component * @ids: sst_ids array */ static void sst_fill_linked_widgets(struct snd_soc_component *component, struct sst_ids *ids) { struct snd_soc_dapm_widget *w; unsigned int len = strlen(ids->parent_wname); list_for_each_entry(w, &component->card->widgets, list) { if (!strncmp(ids->parent_wname, w->name, len)) { ids->parent_w = w; break; } } } /** * sst_map_modules_to_pipe - fill algo/gains list for all pipes * @component: ASoC component */ static int sst_map_modules_to_pipe(struct snd_soc_component *component) { struct snd_soc_dapm_widget *w; int ret = 0; list_for_each_entry(w, &component->card->widgets, list) { if (is_sst_dapm_widget(w) && (w->priv)) { struct sst_ids *ids = w->priv; dev_dbg(component->dev, "widget type=%d name=%s\n", w->id, w->name); INIT_LIST_HEAD(&ids->algo_list); INIT_LIST_HEAD(&ids->gain_list); ret = sst_fill_widget_module_info(w, component); if (ret < 0) return ret; /* fill linked widgets */ if (ids->parent_wname != NULL) sst_fill_linked_widgets(component, ids); } } return 0; } int sst_dsp_init_v2_dpcm(struct snd_soc_component *component) { int i, ret = 0; struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component); struct sst_data *drv = snd_soc_component_get_drvdata(component); unsigned int gains = ARRAY_SIZE(sst_gain_controls)/3; drv->byte_stream = devm_kzalloc(component->dev, SST_MAX_BIN_BYTES, GFP_KERNEL); if (!drv->byte_stream) return -ENOMEM; snd_soc_dapm_new_controls(dapm, sst_dapm_widgets, ARRAY_SIZE(sst_dapm_widgets)); snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon)); snd_soc_dapm_new_widgets(dapm->card); for (i = 0; i < gains; i++) { sst_gains[i].mute = SST_GAIN_MUTE_DEFAULT; sst_gains[i].l_gain = SST_GAIN_VOLUME_DEFAULT; sst_gains[i].r_gain = SST_GAIN_VOLUME_DEFAULT; sst_gains[i].ramp_duration = SST_GAIN_RAMP_DURATION_DEFAULT; } ret = snd_soc_add_component_controls(component, sst_gain_controls, ARRAY_SIZE(sst_gain_controls)); if (ret) return ret; /* Initialize algo control params */ ret = sst_algo_control_init(component->dev); if (ret) return ret; ret = snd_soc_add_component_controls(component, sst_algo_controls, ARRAY_SIZE(sst_algo_controls)); if (ret) return ret; ret = snd_soc_add_component_controls(component, sst_slot_controls, ARRAY_SIZE(sst_slot_controls)); if (ret) return ret; ret = sst_map_modules_to_pipe(component); return ret; }
linux-master
sound/soc/intel/atom/sst-atom-controls.c
// SPDX-License-Identifier: GPL-2.0-only /* * sst_mfld_platform.c - Intel MID Platform driver * * Copyright (C) 2010-2014 Intel Corp * Author: Vinod Koul <[email protected]> * Author: Harsha Priya <[email protected]> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> #include <linux/io.h> #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/compress_driver.h> #include <asm/platform_sst_audio.h> #include "sst-mfld-platform.h" #include "sst-atom-controls.h" struct sst_device *sst; static DEFINE_MUTEX(sst_lock); int sst_register_dsp(struct sst_device *dev) { if (WARN_ON(!dev)) return -EINVAL; if (!try_module_get(dev->dev->driver->owner)) return -ENODEV; mutex_lock(&sst_lock); if (sst) { dev_err(dev->dev, "we already have a device %s\n", sst->name); module_put(dev->dev->driver->owner); mutex_unlock(&sst_lock); return -EEXIST; } dev_dbg(dev->dev, "registering device %s\n", dev->name); sst = dev; mutex_unlock(&sst_lock); return 0; } EXPORT_SYMBOL_GPL(sst_register_dsp); int sst_unregister_dsp(struct sst_device *dev) { if (WARN_ON(!dev)) return -EINVAL; if (dev != sst) return -EINVAL; mutex_lock(&sst_lock); if (!sst) { mutex_unlock(&sst_lock); return -EIO; } module_put(sst->dev->driver->owner); dev_dbg(dev->dev, "unreg %s\n", sst->name); sst = NULL; mutex_unlock(&sst_lock); return 0; } EXPORT_SYMBOL_GPL(sst_unregister_dsp); static const struct snd_pcm_hardware sst_platform_pcm_hw = { .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_DOUBLE | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP| SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_SYNC_START), .buffer_bytes_max = SST_MAX_BUFFER, .period_bytes_min = SST_MIN_PERIOD_BYTES, .period_bytes_max = SST_MAX_PERIOD_BYTES, .periods_min = SST_MIN_PERIODS, .periods_max = SST_MAX_PERIODS, .fifo_size = SST_FIFO_SIZE, }; static struct sst_dev_stream_map dpcm_strm_map[] = { {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, /* Reserved, not in use */ {MERR_DPCM_AUDIO, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_MEDIA1_IN, SST_TASK_ID_MEDIA, 0}, {MERR_DPCM_COMPR, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_MEDIA0_IN, SST_TASK_ID_MEDIA, 0}, {MERR_DPCM_AUDIO, 0, SNDRV_PCM_STREAM_CAPTURE, PIPE_PCM1_OUT, SST_TASK_ID_MEDIA, 0}, {MERR_DPCM_DEEP_BUFFER, 0, SNDRV_PCM_STREAM_PLAYBACK, PIPE_MEDIA3_IN, SST_TASK_ID_MEDIA, 0}, }; static int sst_media_digital_mute(struct snd_soc_dai *dai, int mute, int stream) { return sst_send_pipe_gains(dai, stream, mute); } /* helper functions */ void sst_set_stream_status(struct sst_runtime_stream *stream, int state) { unsigned long flags; spin_lock_irqsave(&stream->status_lock, flags); stream->stream_status = state; spin_unlock_irqrestore(&stream->status_lock, flags); } static inline int sst_get_stream_status(struct sst_runtime_stream *stream) { int state; unsigned long flags; spin_lock_irqsave(&stream->status_lock, flags); state = stream->stream_status; spin_unlock_irqrestore(&stream->status_lock, flags); return state; } static void sst_fill_alloc_params(struct snd_pcm_substream *substream, struct snd_sst_alloc_params_ext *alloc_param) { unsigned int channels; snd_pcm_uframes_t period_size; ssize_t periodbytes; ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream); u32 buffer_addr = substream->runtime->dma_addr; channels = substream->runtime->channels; period_size = substream->runtime->period_size; periodbytes = samples_to_bytes(substream->runtime, period_size); alloc_param->ring_buf_info[0].addr = buffer_addr; alloc_param->ring_buf_info[0].size = buffer_bytes; alloc_param->sg_count = 1; alloc_param->reserved = 0; alloc_param->frag_size = periodbytes * channels; } static void sst_fill_pcm_params(struct snd_pcm_substream *substream, struct snd_sst_stream_params *param) { param->uc.pcm_params.num_chan = (u8) substream->runtime->channels; param->uc.pcm_params.pcm_wd_sz = substream->runtime->sample_bits; param->uc.pcm_params.sfreq = substream->runtime->rate; /* PCM stream via ALSA interface */ param->uc.pcm_params.use_offload_path = 0; param->uc.pcm_params.reserved2 = 0; memset(param->uc.pcm_params.channel_map, 0, sizeof(u8)); } static int sst_get_stream_mapping(int dev, int sdev, int dir, struct sst_dev_stream_map *map, int size) { int i; if (map == NULL) return -EINVAL; /* index 0 is not used in stream map */ for (i = 1; i < size; i++) { if ((map[i].dev_num == dev) && (map[i].direction == dir)) return i; } return 0; } int sst_fill_stream_params(void *substream, const struct sst_data *ctx, struct snd_sst_params *str_params, bool is_compress) { int map_size; int index; struct sst_dev_stream_map *map; struct snd_pcm_substream *pstream = NULL; struct snd_compr_stream *cstream = NULL; map = ctx->pdata->pdev_strm_map; map_size = ctx->pdata->strm_map_size; if (is_compress) cstream = (struct snd_compr_stream *)substream; else pstream = (struct snd_pcm_substream *)substream; str_params->stream_type = SST_STREAM_TYPE_MUSIC; /* For pcm streams */ if (pstream) { index = sst_get_stream_mapping(pstream->pcm->device, pstream->number, pstream->stream, map, map_size); if (index <= 0) return -EINVAL; str_params->stream_id = index; str_params->device_type = map[index].device_id; str_params->task = map[index].task_id; str_params->ops = (u8)pstream->stream; } if (cstream) { index = sst_get_stream_mapping(cstream->device->device, 0, cstream->direction, map, map_size); if (index <= 0) return -EINVAL; str_params->stream_id = index; str_params->device_type = map[index].device_id; str_params->task = map[index].task_id; str_params->ops = (u8)cstream->direction; } return 0; } static int sst_platform_alloc_stream(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct sst_runtime_stream *stream = substream->runtime->private_data; struct snd_sst_stream_params param = {{{0,},},}; struct snd_sst_params str_params = {0}; struct snd_sst_alloc_params_ext alloc_params = {0}; int ret_val = 0; struct sst_data *ctx = snd_soc_dai_get_drvdata(dai); /* set codec params and inform SST driver the same */ sst_fill_pcm_params(substream, &param); sst_fill_alloc_params(substream, &alloc_params); str_params.sparams = param; str_params.aparams = alloc_params; str_params.codec = SST_CODEC_TYPE_PCM; /* fill the device type and stream id to pass to SST driver */ ret_val = sst_fill_stream_params(substream, ctx, &str_params, false); if (ret_val < 0) return ret_val; stream->stream_info.str_id = str_params.stream_id; ret_val = stream->ops->open(sst->dev, &str_params); if (ret_val <= 0) return ret_val; return ret_val; } static void sst_period_elapsed(void *arg) { struct snd_pcm_substream *substream = arg; struct sst_runtime_stream *stream; int status; if (!substream || !substream->runtime) return; stream = substream->runtime->private_data; if (!stream) return; status = sst_get_stream_status(stream); if (status != SST_PLATFORM_RUNNING) return; snd_pcm_period_elapsed(substream); } static int sst_platform_init_stream(struct snd_pcm_substream *substream) { struct sst_runtime_stream *stream = substream->runtime->private_data; struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); int ret_val; dev_dbg(rtd->dev, "setting buffer ptr param\n"); sst_set_stream_status(stream, SST_PLATFORM_INIT); stream->stream_info.period_elapsed = sst_period_elapsed; stream->stream_info.arg = substream; stream->stream_info.buffer_ptr = 0; stream->stream_info.sfreq = substream->runtime->rate; ret_val = stream->ops->stream_init(sst->dev, &stream->stream_info); if (ret_val) dev_err(rtd->dev, "control_set ret error %d\n", ret_val); return ret_val; } static int power_up_sst(struct sst_runtime_stream *stream) { return stream->ops->power(sst->dev, true); } static void power_down_sst(struct sst_runtime_stream *stream) { stream->ops->power(sst->dev, false); } static int sst_media_open(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { int ret_val = 0; struct snd_pcm_runtime *runtime = substream->runtime; struct sst_runtime_stream *stream; stream = kzalloc(sizeof(*stream), GFP_KERNEL); if (!stream) return -ENOMEM; spin_lock_init(&stream->status_lock); /* get the sst ops */ mutex_lock(&sst_lock); if (!sst || !try_module_get(sst->dev->driver->owner)) { dev_err(dai->dev, "no device available to run\n"); ret_val = -ENODEV; goto out_ops; } stream->ops = sst->ops; mutex_unlock(&sst_lock); stream->stream_info.str_id = 0; stream->stream_info.arg = substream; /* allocate memory for SST API set */ runtime->private_data = stream; ret_val = power_up_sst(stream); if (ret_val < 0) goto out_power_up; /* * Make sure the period to be multiple of 1ms to align the * design of firmware. Apply same rule to buffer size to make * sure alsa could always find a value for period size * regardless the buffer size given by user space. */ snd_pcm_hw_constraint_step(substream->runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 48); snd_pcm_hw_constraint_step(substream->runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 48); /* Make sure, that the period size is always even */ snd_pcm_hw_constraint_step(substream->runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS, 2); return snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); out_ops: mutex_unlock(&sst_lock); out_power_up: kfree(stream); return ret_val; } static void sst_media_close(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct sst_runtime_stream *stream; int str_id; stream = substream->runtime->private_data; power_down_sst(stream); str_id = stream->stream_info.str_id; if (str_id) stream->ops->close(sst->dev, str_id); module_put(sst->dev->driver->owner); kfree(stream); } static int sst_media_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct sst_runtime_stream *stream; int ret_val, str_id; stream = substream->runtime->private_data; str_id = stream->stream_info.str_id; if (stream->stream_info.str_id) { ret_val = stream->ops->stream_drop(sst->dev, str_id); return ret_val; } ret_val = sst_platform_alloc_stream(substream, dai); if (ret_val <= 0) return ret_val; snprintf(substream->pcm->id, sizeof(substream->pcm->id), "%d", stream->stream_info.str_id); ret_val = sst_platform_init_stream(substream); if (ret_val) return ret_val; substream->runtime->hw.info = SNDRV_PCM_INFO_BLOCK_TRANSFER; return 0; } static int sst_enable_ssp(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { int ret = 0; if (!snd_soc_dai_active(dai)) { ret = sst_handle_vb_timer(dai, true); sst_fill_ssp_defaults(dai); } return ret; } static int sst_be_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { int ret = 0; if (snd_soc_dai_active(dai) == 1) ret = send_ssp_cmd(dai, dai->name, 1); return ret; } static int sst_set_format(struct snd_soc_dai *dai, unsigned int fmt) { int ret = 0; if (!snd_soc_dai_active(dai)) return 0; ret = sst_fill_ssp_config(dai, fmt); if (ret < 0) dev_err(dai->dev, "sst_set_format failed..\n"); return ret; } static int sst_platform_set_ssp_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) { int ret = 0; if (!snd_soc_dai_active(dai)) return ret; ret = sst_fill_ssp_slot(dai, tx_mask, rx_mask, slots, slot_width); if (ret < 0) dev_err(dai->dev, "sst_fill_ssp_slot failed..%d\n", ret); return ret; } static void sst_disable_ssp(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { if (!snd_soc_dai_active(dai)) { send_ssp_cmd(dai, dai->name, 0); sst_handle_vb_timer(dai, false); } } static const struct snd_soc_dai_ops sst_media_dai_ops = { .startup = sst_media_open, .shutdown = sst_media_close, .prepare = sst_media_prepare, .mute_stream = sst_media_digital_mute, }; static const struct snd_soc_dai_ops sst_compr_dai_ops = { .compress_new = snd_soc_new_compress, .mute_stream = sst_media_digital_mute, }; static const struct snd_soc_dai_ops sst_be_dai_ops = { .startup = sst_enable_ssp, .hw_params = sst_be_hw_params, .set_fmt = sst_set_format, .set_tdm_slot = sst_platform_set_ssp_slot, .shutdown = sst_disable_ssp, }; static struct snd_soc_dai_driver sst_platform_dai[] = { { .name = "media-cpu-dai", .ops = &sst_media_dai_ops, .playback = { .stream_name = "Headset Playback", .channels_min = SST_STEREO, .channels_max = SST_STEREO, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { .stream_name = "Headset Capture", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, }, { .name = "deepbuffer-cpu-dai", .ops = &sst_media_dai_ops, .playback = { .stream_name = "Deepbuffer Playback", .channels_min = SST_STEREO, .channels_max = SST_STEREO, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, }, { .name = "compress-cpu-dai", .ops = &sst_compr_dai_ops, .playback = { .stream_name = "Compress Playback", .channels_min = 1, }, }, /* BE CPU Dais */ { .name = "ssp0-port", .ops = &sst_be_dai_ops, .playback = { .stream_name = "ssp0 Tx", .channels_min = SST_STEREO, .channels_max = SST_STEREO, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { .stream_name = "ssp0 Rx", .channels_min = SST_STEREO, .channels_max = SST_STEREO, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, }, { .name = "ssp1-port", .ops = &sst_be_dai_ops, .playback = { .stream_name = "ssp1 Tx", .channels_min = SST_STEREO, .channels_max = SST_STEREO, .rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { .stream_name = "ssp1 Rx", .channels_min = SST_STEREO, .channels_max = SST_STEREO, .rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, }, { .name = "ssp2-port", .ops = &sst_be_dai_ops, .playback = { .stream_name = "ssp2 Tx", .channels_min = SST_STEREO, .channels_max = SST_STEREO, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { .stream_name = "ssp2 Rx", .channels_min = SST_STEREO, .channels_max = SST_STEREO, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, }, }; static int sst_soc_open(struct snd_soc_component *component, struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; if (substream->pcm->internal) return 0; runtime = substream->runtime; runtime->hw = sst_platform_pcm_hw; return 0; } static int sst_soc_trigger(struct snd_soc_component *component, struct snd_pcm_substream *substream, int cmd) { int ret_val = 0, str_id; struct sst_runtime_stream *stream; int status; struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); dev_dbg(rtd->dev, "%s called\n", __func__); if (substream->pcm->internal) return 0; stream = substream->runtime->private_data; str_id = stream->stream_info.str_id; switch (cmd) { case SNDRV_PCM_TRIGGER_START: dev_dbg(rtd->dev, "sst: Trigger Start\n"); status = SST_PLATFORM_RUNNING; stream->stream_info.arg = substream; ret_val = stream->ops->stream_start(sst->dev, str_id); break; case SNDRV_PCM_TRIGGER_STOP: dev_dbg(rtd->dev, "sst: in stop\n"); status = SST_PLATFORM_DROPPED; ret_val = stream->ops->stream_drop(sst->dev, str_id); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: dev_dbg(rtd->dev, "sst: in pause\n"); status = SST_PLATFORM_PAUSED; ret_val = stream->ops->stream_pause(sst->dev, str_id); break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_RESUME: dev_dbg(rtd->dev, "sst: in pause release\n"); status = SST_PLATFORM_RUNNING; ret_val = stream->ops->stream_pause_release(sst->dev, str_id); break; default: return -EINVAL; } if (!ret_val) sst_set_stream_status(stream, status); return ret_val; } static snd_pcm_uframes_t sst_soc_pointer(struct snd_soc_component *component, struct snd_pcm_substream *substream) { struct sst_runtime_stream *stream; int ret_val, status; struct pcm_stream_info *str_info; struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); stream = substream->runtime->private_data; status = sst_get_stream_status(stream); if (status == SST_PLATFORM_INIT) return 0; str_info = &stream->stream_info; ret_val = stream->ops->stream_read_tstamp(sst->dev, str_info); if (ret_val) { dev_err(rtd->dev, "sst: error code = %d\n", ret_val); return ret_val; } return str_info->buffer_ptr; } static snd_pcm_sframes_t sst_soc_delay(struct snd_soc_component *component, struct snd_pcm_substream *substream) { struct sst_runtime_stream *stream = substream->runtime->private_data; struct pcm_stream_info *str_info = &stream->stream_info; if (sst_get_stream_status(stream) == SST_PLATFORM_INIT) return 0; return str_info->pcm_delay; } static int sst_soc_pcm_new(struct snd_soc_component *component, struct snd_soc_pcm_runtime *rtd) { struct snd_soc_dai *dai = asoc_rtd_to_cpu(rtd, 0); struct snd_pcm *pcm = rtd->pcm; if (dai->driver->playback.channels_min || dai->driver->capture.channels_min) { snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV, pcm->card->dev, SST_MIN_BUFFER, SST_MAX_BUFFER); } return 0; } static int sst_soc_probe(struct snd_soc_component *component) { struct sst_data *drv = dev_get_drvdata(component->dev); drv->soc_card = component->card; return sst_dsp_init_v2_dpcm(component); } static void sst_soc_remove(struct snd_soc_component *component) { struct sst_data *drv = dev_get_drvdata(component->dev); drv->soc_card = NULL; } static const struct snd_soc_component_driver sst_soc_platform_drv = { .name = DRV_NAME, .probe = sst_soc_probe, .remove = sst_soc_remove, .open = sst_soc_open, .trigger = sst_soc_trigger, .pointer = sst_soc_pointer, .delay = sst_soc_delay, .compress_ops = &sst_platform_compress_ops, .pcm_construct = sst_soc_pcm_new, }; static int sst_platform_probe(struct platform_device *pdev) { struct sst_data *drv; int ret; struct sst_platform_data *pdata; drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL); if (drv == NULL) { return -ENOMEM; } pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (pdata == NULL) { return -ENOMEM; } pdata->pdev_strm_map = dpcm_strm_map; pdata->strm_map_size = ARRAY_SIZE(dpcm_strm_map); drv->pdata = pdata; drv->pdev = pdev; mutex_init(&drv->lock); dev_set_drvdata(&pdev->dev, drv); ret = devm_snd_soc_register_component(&pdev->dev, &sst_soc_platform_drv, sst_platform_dai, ARRAY_SIZE(sst_platform_dai)); if (ret) dev_err(&pdev->dev, "registering cpu dais failed\n"); return ret; } static void sst_platform_remove(struct platform_device *pdev) { dev_dbg(&pdev->dev, "sst_platform_remove success\n"); } #ifdef CONFIG_PM_SLEEP static int sst_soc_prepare(struct device *dev) { struct sst_data *drv = dev_get_drvdata(dev); struct snd_soc_pcm_runtime *rtd; if (!drv->soc_card) return 0; /* suspend all pcms first */ snd_soc_suspend(drv->soc_card->dev); snd_soc_poweroff(drv->soc_card->dev); /* set the SSPs to idle */ for_each_card_rtds(drv->soc_card, rtd) { struct snd_soc_dai *dai = asoc_rtd_to_cpu(rtd, 0); if (snd_soc_dai_active(dai)) { send_ssp_cmd(dai, dai->name, 0); sst_handle_vb_timer(dai, false); } } return 0; } static void sst_soc_complete(struct device *dev) { struct sst_data *drv = dev_get_drvdata(dev); struct snd_soc_pcm_runtime *rtd; if (!drv->soc_card) return; /* restart SSPs */ for_each_card_rtds(drv->soc_card, rtd) { struct snd_soc_dai *dai = asoc_rtd_to_cpu(rtd, 0); if (snd_soc_dai_active(dai)) { sst_handle_vb_timer(dai, true); send_ssp_cmd(dai, dai->name, 1); } } snd_soc_resume(drv->soc_card->dev); } #else #define sst_soc_prepare NULL #define sst_soc_complete NULL #endif static const struct dev_pm_ops sst_platform_pm = { .prepare = sst_soc_prepare, .complete = sst_soc_complete, }; static struct platform_driver sst_platform_driver = { .driver = { .name = "sst-mfld-platform", .pm = &sst_platform_pm, }, .probe = sst_platform_probe, .remove_new = sst_platform_remove, }; module_platform_driver(sst_platform_driver); MODULE_DESCRIPTION("ASoC Intel(R) MID Platform driver"); MODULE_AUTHOR("Vinod Koul <[email protected]>"); MODULE_AUTHOR("Harsha Priya <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:sst-atom-hifi2-platform"); MODULE_ALIAS("platform:sst-mfld-platform");
linux-master
sound/soc/intel/atom/sst-mfld-platform-pcm.c
// SPDX-License-Identifier: GPL-2.0-only /* * sst_mfld_platform.c - Intel MID Platform driver * * Copyright (C) 2010-2014 Intel Corp * Author: Vinod Koul <[email protected]> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> #include <linux/io.h> #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/compress_driver.h> #include "sst-mfld-platform.h" /* compress stream operations */ static void sst_compr_fragment_elapsed(void *arg) { struct snd_compr_stream *cstream = (struct snd_compr_stream *)arg; pr_debug("fragment elapsed by driver\n"); if (cstream) snd_compr_fragment_elapsed(cstream); } static void sst_drain_notify(void *arg) { struct snd_compr_stream *cstream = (struct snd_compr_stream *)arg; pr_debug("drain notify by driver\n"); if (cstream) snd_compr_drain_notify(cstream); } static int sst_platform_compr_open(struct snd_soc_component *component, struct snd_compr_stream *cstream) { int ret_val; struct snd_compr_runtime *runtime = cstream->runtime; struct sst_runtime_stream *stream; stream = kzalloc(sizeof(*stream), GFP_KERNEL); if (!stream) return -ENOMEM; spin_lock_init(&stream->status_lock); /* get the sst ops */ if (!sst || !try_module_get(sst->dev->driver->owner)) { pr_err("no device available to run\n"); ret_val = -ENODEV; goto out_ops; } stream->compr_ops = sst->compr_ops; stream->id = 0; /* Turn on LPE */ sst->compr_ops->power(sst->dev, true); sst_set_stream_status(stream, SST_PLATFORM_INIT); runtime->private_data = stream; return 0; out_ops: kfree(stream); return ret_val; } static int sst_platform_compr_free(struct snd_soc_component *component, struct snd_compr_stream *cstream) { struct sst_runtime_stream *stream; int ret_val = 0, str_id; stream = cstream->runtime->private_data; /* Turn off LPE */ sst->compr_ops->power(sst->dev, false); /*need to check*/ str_id = stream->id; if (str_id) ret_val = stream->compr_ops->close(sst->dev, str_id); module_put(sst->dev->driver->owner); kfree(stream); pr_debug("%s: %d\n", __func__, ret_val); return 0; } static int sst_platform_compr_set_params(struct snd_soc_component *component, struct snd_compr_stream *cstream, struct snd_compr_params *params) { struct sst_runtime_stream *stream; int retval; struct snd_sst_params str_params; struct sst_compress_cb cb; struct sst_data *ctx = snd_soc_component_get_drvdata(component); stream = cstream->runtime->private_data; /* construct fw structure for this*/ memset(&str_params, 0, sizeof(str_params)); /* fill the device type and stream id to pass to SST driver */ retval = sst_fill_stream_params(cstream, ctx, &str_params, true); pr_debug("compr_set_params: fill stream params ret_val = 0x%x\n", retval); if (retval < 0) return retval; switch (params->codec.id) { case SND_AUDIOCODEC_MP3: { str_params.codec = SST_CODEC_TYPE_MP3; str_params.sparams.uc.mp3_params.num_chan = params->codec.ch_in; str_params.sparams.uc.mp3_params.pcm_wd_sz = 16; break; } case SND_AUDIOCODEC_AAC: { str_params.codec = SST_CODEC_TYPE_AAC; str_params.sparams.uc.aac_params.num_chan = params->codec.ch_in; str_params.sparams.uc.aac_params.pcm_wd_sz = 16; if (params->codec.format == SND_AUDIOSTREAMFORMAT_MP4ADTS) str_params.sparams.uc.aac_params.bs_format = AAC_BIT_STREAM_ADTS; else if (params->codec.format == SND_AUDIOSTREAMFORMAT_RAW) str_params.sparams.uc.aac_params.bs_format = AAC_BIT_STREAM_RAW; else { pr_err("Undefined format%d\n", params->codec.format); return -EINVAL; } str_params.sparams.uc.aac_params.externalsr = params->codec.sample_rate; break; } default: pr_err("codec not supported, id =%d\n", params->codec.id); return -EINVAL; } str_params.aparams.ring_buf_info[0].addr = virt_to_phys(cstream->runtime->buffer); str_params.aparams.ring_buf_info[0].size = cstream->runtime->buffer_size; str_params.aparams.sg_count = 1; str_params.aparams.frag_size = cstream->runtime->fragment_size; cb.param = cstream; cb.compr_cb = sst_compr_fragment_elapsed; cb.drain_cb_param = cstream; cb.drain_notify = sst_drain_notify; retval = stream->compr_ops->open(sst->dev, &str_params, &cb); if (retval < 0) { pr_err("stream allocation failed %d\n", retval); return retval; } stream->id = retval; return 0; } static int sst_platform_compr_trigger(struct snd_soc_component *component, struct snd_compr_stream *cstream, int cmd) { struct sst_runtime_stream *stream = cstream->runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: if (stream->compr_ops->stream_start) return stream->compr_ops->stream_start(sst->dev, stream->id); break; case SNDRV_PCM_TRIGGER_STOP: if (stream->compr_ops->stream_drop) return stream->compr_ops->stream_drop(sst->dev, stream->id); break; case SND_COMPR_TRIGGER_DRAIN: if (stream->compr_ops->stream_drain) return stream->compr_ops->stream_drain(sst->dev, stream->id); break; case SND_COMPR_TRIGGER_PARTIAL_DRAIN: if (stream->compr_ops->stream_partial_drain) return stream->compr_ops->stream_partial_drain(sst->dev, stream->id); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (stream->compr_ops->stream_pause) return stream->compr_ops->stream_pause(sst->dev, stream->id); break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (stream->compr_ops->stream_pause_release) return stream->compr_ops->stream_pause_release(sst->dev, stream->id); break; } return -EINVAL; } static int sst_platform_compr_pointer(struct snd_soc_component *component, struct snd_compr_stream *cstream, struct snd_compr_tstamp *tstamp) { struct sst_runtime_stream *stream; stream = cstream->runtime->private_data; stream->compr_ops->tstamp(sst->dev, stream->id, tstamp); tstamp->byte_offset = tstamp->copied_total % (u32)cstream->runtime->buffer_size; pr_debug("calc bytes offset/copied bytes as %d\n", tstamp->byte_offset); return 0; } static int sst_platform_compr_ack(struct snd_soc_component *component, struct snd_compr_stream *cstream, size_t bytes) { struct sst_runtime_stream *stream; stream = cstream->runtime->private_data; stream->compr_ops->ack(sst->dev, stream->id, (unsigned long)bytes); stream->bytes_written += bytes; return 0; } static int sst_platform_compr_get_caps(struct snd_soc_component *component, struct snd_compr_stream *cstream, struct snd_compr_caps *caps) { struct sst_runtime_stream *stream = cstream->runtime->private_data; return stream->compr_ops->get_caps(caps); } static int sst_platform_compr_get_codec_caps(struct snd_soc_component *component, struct snd_compr_stream *cstream, struct snd_compr_codec_caps *codec) { struct sst_runtime_stream *stream = cstream->runtime->private_data; return stream->compr_ops->get_codec_caps(codec); } static int sst_platform_compr_set_metadata(struct snd_soc_component *component, struct snd_compr_stream *cstream, struct snd_compr_metadata *metadata) { struct sst_runtime_stream *stream = cstream->runtime->private_data; return stream->compr_ops->set_metadata(sst->dev, stream->id, metadata); } const struct snd_compress_ops sst_platform_compress_ops = { .open = sst_platform_compr_open, .free = sst_platform_compr_free, .set_params = sst_platform_compr_set_params, .set_metadata = sst_platform_compr_set_metadata, .trigger = sst_platform_compr_trigger, .pointer = sst_platform_compr_pointer, .ack = sst_platform_compr_ack, .get_caps = sst_platform_compr_get_caps, .get_codec_caps = sst_platform_compr_get_codec_caps, };
linux-master
sound/soc/intel/atom/sst-mfld-platform-compress.c
// SPDX-License-Identifier: GPL-2.0-only /* * sst_ipc.c - Intel SST Driver for audio engine * * Copyright (C) 2008-14 Intel Corporation * Authors: Vinod Koul <[email protected]> * Harsha Priya <[email protected]> * Dharageswari R <[email protected]> * KP Jeeja <[email protected]> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/pci.h> #include <linux/firmware.h> #include <linux/sched.h> #include <linux/delay.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/compress_driver.h> #include <asm/intel-mid.h> #include <asm/platform_sst_audio.h> #include "../sst-mfld-platform.h" #include "sst.h" struct sst_block *sst_create_block(struct intel_sst_drv *ctx, u32 msg_id, u32 drv_id) { struct sst_block *msg; dev_dbg(ctx->dev, "Enter\n"); msg = kzalloc(sizeof(*msg), GFP_KERNEL); if (!msg) return NULL; msg->condition = false; msg->on = true; msg->msg_id = msg_id; msg->drv_id = drv_id; spin_lock_bh(&ctx->block_lock); list_add_tail(&msg->node, &ctx->block_list); spin_unlock_bh(&ctx->block_lock); return msg; } /* * while handling the interrupts, we need to check for message status and * then if we are blocking for a message * * here we are unblocking the blocked ones, this is based on id we have * passed and search that for block threads. * We will not find block in two cases * a) when its small message and block in not there, so silently ignore * them * b) when we are actually not able to find the block (bug perhaps) * * Since we have bit of small messages we can spam kernel log with err * print on above so need to keep as debug prints which should be enabled * via dynamic debug while debugging IPC issues */ int sst_wake_up_block(struct intel_sst_drv *ctx, int result, u32 drv_id, u32 ipc, void *data, u32 size) { struct sst_block *block; dev_dbg(ctx->dev, "Enter\n"); spin_lock_bh(&ctx->block_lock); list_for_each_entry(block, &ctx->block_list, node) { dev_dbg(ctx->dev, "Block ipc %d, drv_id %d\n", block->msg_id, block->drv_id); if (block->msg_id == ipc && block->drv_id == drv_id) { dev_dbg(ctx->dev, "free up the block\n"); block->ret_code = result; block->data = data; block->size = size; block->condition = true; spin_unlock_bh(&ctx->block_lock); wake_up(&ctx->wait_queue); return 0; } } spin_unlock_bh(&ctx->block_lock); dev_dbg(ctx->dev, "Block not found or a response received for a short msg for ipc %d, drv_id %d\n", ipc, drv_id); return -EINVAL; } int sst_free_block(struct intel_sst_drv *ctx, struct sst_block *freed) { struct sst_block *block, *__block; dev_dbg(ctx->dev, "Enter\n"); spin_lock_bh(&ctx->block_lock); list_for_each_entry_safe(block, __block, &ctx->block_list, node) { if (block == freed) { pr_debug("pvt_id freed --> %d\n", freed->drv_id); /* toggle the index position of pvt_id */ list_del(&freed->node); spin_unlock_bh(&ctx->block_lock); kfree(freed->data); freed->data = NULL; kfree(freed); return 0; } } spin_unlock_bh(&ctx->block_lock); dev_err(ctx->dev, "block is already freed!!!\n"); return -EINVAL; } int sst_post_message_mrfld(struct intel_sst_drv *sst_drv_ctx, struct ipc_post *ipc_msg, bool sync) { struct ipc_post *msg = ipc_msg; union ipc_header_mrfld header; unsigned int loop_count = 0; int retval = 0; unsigned long irq_flags; dev_dbg(sst_drv_ctx->dev, "Enter: sync: %d\n", sync); spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags); header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX); if (sync) { while (header.p.header_high.part.busy) { if (loop_count > 25) { dev_err(sst_drv_ctx->dev, "sst: Busy wait failed, can't send this msg\n"); retval = -EBUSY; goto out; } cpu_relax(); loop_count++; header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX); } } else { if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) { /* queue is empty, nothing to send */ spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags); dev_dbg(sst_drv_ctx->dev, "Empty msg queue... NO Action\n"); return 0; } if (header.p.header_high.part.busy) { spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags); dev_dbg(sst_drv_ctx->dev, "Busy not free... post later\n"); return 0; } /* copy msg from list */ msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next, struct ipc_post, node); list_del(&msg->node); } dev_dbg(sst_drv_ctx->dev, "sst: Post message: header = %x\n", msg->mrfld_header.p.header_high.full); dev_dbg(sst_drv_ctx->dev, "sst: size = 0x%x\n", msg->mrfld_header.p.header_low_payload); if (msg->mrfld_header.p.header_high.part.large) memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND, msg->mailbox_data, msg->mrfld_header.p.header_low_payload); sst_shim_write64(sst_drv_ctx->shim, SST_IPCX, msg->mrfld_header.full); out: spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags); kfree(msg->mailbox_data); kfree(msg); return retval; } void intel_sst_clear_intr_mrfld(struct intel_sst_drv *sst_drv_ctx) { union interrupt_reg_mrfld isr; union interrupt_reg_mrfld imr; union ipc_header_mrfld clear_ipc; unsigned long irq_flags; spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags); imr.full = sst_shim_read64(sst_drv_ctx->shim, SST_IMRX); isr.full = sst_shim_read64(sst_drv_ctx->shim, SST_ISRX); /* write 1 to clear*/ isr.part.busy_interrupt = 1; sst_shim_write64(sst_drv_ctx->shim, SST_ISRX, isr.full); /* Set IA done bit */ clear_ipc.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCD); clear_ipc.p.header_high.part.busy = 0; clear_ipc.p.header_high.part.done = 1; clear_ipc.p.header_low_payload = IPC_ACK_SUCCESS; sst_shim_write64(sst_drv_ctx->shim, SST_IPCD, clear_ipc.full); /* un mask busy interrupt */ imr.part.busy_interrupt = 0; sst_shim_write64(sst_drv_ctx->shim, SST_IMRX, imr.full); spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags); } /* * process_fw_init - process the FW init msg * * @msg: IPC message mailbox data from FW * * This function processes the FW init msg from FW * marks FW state and prints debug info of loaded FW */ static void process_fw_init(struct intel_sst_drv *sst_drv_ctx, void *msg) { struct ipc_header_fw_init *init = (struct ipc_header_fw_init *)msg; int retval = 0; dev_dbg(sst_drv_ctx->dev, "*** FW Init msg came***\n"); if (init->result) { sst_set_fw_state_locked(sst_drv_ctx, SST_RESET); dev_err(sst_drv_ctx->dev, "FW Init failed, Error %x\n", init->result); retval = init->result; goto ret; } if (memcmp(&sst_drv_ctx->fw_version, &init->fw_version, sizeof(init->fw_version))) dev_info(sst_drv_ctx->dev, "FW Version %02x.%02x.%02x.%02x\n", init->fw_version.type, init->fw_version.major, init->fw_version.minor, init->fw_version.build); dev_dbg(sst_drv_ctx->dev, "Build date %s Time %s\n", init->build_info.date, init->build_info.time); /* Save FW version */ sst_drv_ctx->fw_version.type = init->fw_version.type; sst_drv_ctx->fw_version.major = init->fw_version.major; sst_drv_ctx->fw_version.minor = init->fw_version.minor; sst_drv_ctx->fw_version.build = init->fw_version.build; ret: sst_wake_up_block(sst_drv_ctx, retval, FW_DWNL_ID, 0 , NULL, 0); } static void process_fw_async_msg(struct intel_sst_drv *sst_drv_ctx, struct ipc_post *msg) { u32 msg_id; int str_id; u32 data_size, i; void *data_offset; struct stream_info *stream; u32 msg_low, pipe_id; msg_low = msg->mrfld_header.p.header_low_payload; msg_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->cmd_id; data_offset = (msg->mailbox_data + sizeof(struct ipc_dsp_hdr)); data_size = msg_low - (sizeof(struct ipc_dsp_hdr)); switch (msg_id) { case IPC_SST_PERIOD_ELAPSED_MRFLD: pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id; str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id); if (str_id > 0) { dev_dbg(sst_drv_ctx->dev, "Period elapsed rcvd for pipe id 0x%x\n", pipe_id); stream = &sst_drv_ctx->streams[str_id]; /* If stream is dropped, skip processing this message*/ if (stream->status == STREAM_INIT) break; if (stream->period_elapsed) stream->period_elapsed(stream->pcm_substream); if (stream->compr_cb) stream->compr_cb(stream->compr_cb_param); } break; case IPC_IA_DRAIN_STREAM_MRFLD: pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id; str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id); if (str_id > 0) { stream = &sst_drv_ctx->streams[str_id]; if (stream->drain_notify) stream->drain_notify(stream->drain_cb_param); } break; case IPC_IA_FW_ASYNC_ERR_MRFLD: dev_err(sst_drv_ctx->dev, "FW sent async error msg:\n"); for (i = 0; i < (data_size/4); i++) print_hex_dump(KERN_DEBUG, NULL, DUMP_PREFIX_NONE, 16, 4, data_offset, data_size, false); break; case IPC_IA_FW_INIT_CMPLT_MRFLD: process_fw_init(sst_drv_ctx, data_offset); break; case IPC_IA_BUF_UNDER_RUN_MRFLD: pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id; str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id); if (str_id > 0) dev_err(sst_drv_ctx->dev, "Buffer under-run for pipe:%#x str_id:%d\n", pipe_id, str_id); break; default: dev_err(sst_drv_ctx->dev, "Unrecognized async msg from FW msg_id %#x\n", msg_id); } } void sst_process_reply_mrfld(struct intel_sst_drv *sst_drv_ctx, struct ipc_post *msg) { unsigned int drv_id; void *data; union ipc_header_high msg_high; u32 msg_low; struct ipc_dsp_hdr *dsp_hdr; msg_high = msg->mrfld_header.p.header_high; msg_low = msg->mrfld_header.p.header_low_payload; dev_dbg(sst_drv_ctx->dev, "IPC process message header %x payload %x\n", msg->mrfld_header.p.header_high.full, msg->mrfld_header.p.header_low_payload); drv_id = msg_high.part.drv_id; /* Check for async messages first */ if (drv_id == SST_ASYNC_DRV_ID) { /*FW sent async large message*/ process_fw_async_msg(sst_drv_ctx, msg); return; } /* FW sent short error response for an IPC */ if (msg_high.part.result && !msg_high.part.large) { /* 32-bit FW error code in msg_low */ dev_err(sst_drv_ctx->dev, "FW sent error response 0x%x", msg_low); sst_wake_up_block(sst_drv_ctx, msg_high.part.result, msg_high.part.drv_id, msg_high.part.msg_id, NULL, 0); return; } /* * Process all valid responses * if it is a large message, the payload contains the size to * copy from mailbox **/ if (msg_high.part.large) { data = kmemdup((void *)msg->mailbox_data, msg_low, GFP_KERNEL); if (!data) return; /* Copy command id so that we can use to put sst to reset */ dsp_hdr = (struct ipc_dsp_hdr *)data; dev_dbg(sst_drv_ctx->dev, "cmd_id %d\n", dsp_hdr->cmd_id); if (sst_wake_up_block(sst_drv_ctx, msg_high.part.result, msg_high.part.drv_id, msg_high.part.msg_id, data, msg_low)) kfree(data); } else { sst_wake_up_block(sst_drv_ctx, msg_high.part.result, msg_high.part.drv_id, msg_high.part.msg_id, NULL, 0); } }
linux-master
sound/soc/intel/atom/sst/sst_ipc.c
// SPDX-License-Identifier: GPL-2.0-only /* * sst.c - Intel SST Driver for audio engine * * Copyright (C) 2008-14 Intel Corp * Authors: Vinod Koul <[email protected]> * Harsha Priya <[email protected]> * Dharageswari R <[email protected]> * KP Jeeja <[email protected]> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/module.h> #include <linux/fs.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/firmware.h> #include <linux/pci.h> #include <linux/pm_runtime.h> #include <linux/pm_qos.h> #include <linux/async.h> #include <linux/acpi.h> #include <linux/sysfs.h> #include <sound/core.h> #include <sound/soc.h> #include <asm/platform_sst_audio.h> #include "../sst-mfld-platform.h" #include "sst.h" MODULE_AUTHOR("Vinod Koul <[email protected]>"); MODULE_AUTHOR("Harsha Priya <[email protected]>"); MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine Driver"); MODULE_LICENSE("GPL v2"); static inline bool sst_is_process_reply(u32 msg_id) { return ((msg_id & PROCESS_MSG) ? true : false); } static inline bool sst_validate_mailbox_size(unsigned int size) { return ((size <= SST_MAILBOX_SIZE) ? true : false); } static irqreturn_t intel_sst_interrupt_mrfld(int irq, void *context) { union interrupt_reg_mrfld isr; union ipc_header_mrfld header; union sst_imr_reg_mrfld imr; struct ipc_post *msg = NULL; unsigned int size; struct intel_sst_drv *drv = (struct intel_sst_drv *) context; irqreturn_t retval = IRQ_HANDLED; /* Interrupt arrived, check src */ isr.full = sst_shim_read64(drv->shim, SST_ISRX); if (isr.part.done_interrupt) { /* Clear done bit */ spin_lock(&drv->ipc_spin_lock); header.full = sst_shim_read64(drv->shim, drv->ipc_reg.ipcx); header.p.header_high.part.done = 0; sst_shim_write64(drv->shim, drv->ipc_reg.ipcx, header.full); /* write 1 to clear status register */; isr.part.done_interrupt = 1; sst_shim_write64(drv->shim, SST_ISRX, isr.full); spin_unlock(&drv->ipc_spin_lock); /* we can send more messages to DSP so trigger work */ queue_work(drv->post_msg_wq, &drv->ipc_post_msg_wq); retval = IRQ_HANDLED; } if (isr.part.busy_interrupt) { /* message from dsp so copy that */ spin_lock(&drv->ipc_spin_lock); imr.full = sst_shim_read64(drv->shim, SST_IMRX); imr.part.busy_interrupt = 1; sst_shim_write64(drv->shim, SST_IMRX, imr.full); spin_unlock(&drv->ipc_spin_lock); header.full = sst_shim_read64(drv->shim, drv->ipc_reg.ipcd); if (sst_create_ipc_msg(&msg, header.p.header_high.part.large)) { drv->ops->clear_interrupt(drv); return IRQ_HANDLED; } if (header.p.header_high.part.large) { size = header.p.header_low_payload; if (sst_validate_mailbox_size(size)) { memcpy_fromio(msg->mailbox_data, drv->mailbox + drv->mailbox_recv_offset, size); } else { dev_err(drv->dev, "Mailbox not copied, payload size is: %u\n", size); header.p.header_low_payload = 0; } } msg->mrfld_header = header; msg->is_process_reply = sst_is_process_reply(header.p.header_high.part.msg_id); spin_lock(&drv->rx_msg_lock); list_add_tail(&msg->node, &drv->rx_list); spin_unlock(&drv->rx_msg_lock); drv->ops->clear_interrupt(drv); retval = IRQ_WAKE_THREAD; } return retval; } static irqreturn_t intel_sst_irq_thread_mrfld(int irq, void *context) { struct intel_sst_drv *drv = (struct intel_sst_drv *) context; struct ipc_post *__msg, *msg; unsigned long irq_flags; spin_lock_irqsave(&drv->rx_msg_lock, irq_flags); if (list_empty(&drv->rx_list)) { spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags); return IRQ_HANDLED; } list_for_each_entry_safe(msg, __msg, &drv->rx_list, node) { list_del(&msg->node); spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags); if (msg->is_process_reply) drv->ops->process_message(msg); else drv->ops->process_reply(drv, msg); if (msg->is_large) kfree(msg->mailbox_data); kfree(msg); spin_lock_irqsave(&drv->rx_msg_lock, irq_flags); } spin_unlock_irqrestore(&drv->rx_msg_lock, irq_flags); return IRQ_HANDLED; } static int sst_save_dsp_context_v2(struct intel_sst_drv *sst) { int ret = 0; ret = sst_prepare_and_post_msg(sst, SST_TASK_ID_MEDIA, IPC_CMD, IPC_PREP_D3, PIPE_RSVD, 0, NULL, NULL, true, true, false, true); if (ret < 0) { dev_err(sst->dev, "not suspending FW!!, Err: %d\n", ret); return -EIO; } return 0; } static struct intel_sst_ops mrfld_ops = { .interrupt = intel_sst_interrupt_mrfld, .irq_thread = intel_sst_irq_thread_mrfld, .clear_interrupt = intel_sst_clear_intr_mrfld, .start = sst_start_mrfld, .reset = intel_sst_reset_dsp_mrfld, .post_message = sst_post_message_mrfld, .process_reply = sst_process_reply_mrfld, .save_dsp_context = sst_save_dsp_context_v2, .alloc_stream = sst_alloc_stream_mrfld, .post_download = sst_post_download_mrfld, }; int sst_driver_ops(struct intel_sst_drv *sst) { switch (sst->dev_id) { case PCI_DEVICE_ID_INTEL_SST_TNG: case PCI_DEVICE_ID_INTEL_SST_BYT: case PCI_DEVICE_ID_INTEL_SST_BSW: sst->tstamp = SST_TIME_STAMP_MRFLD; sst->ops = &mrfld_ops; return 0; default: dev_err(sst->dev, "SST Driver capabilities missing for dev_id: %x", sst->dev_id); return -EINVAL; } } void sst_process_pending_msg(struct work_struct *work) { struct intel_sst_drv *ctx = container_of(work, struct intel_sst_drv, ipc_post_msg_wq); ctx->ops->post_message(ctx, NULL, false); } static int sst_workqueue_init(struct intel_sst_drv *ctx) { INIT_LIST_HEAD(&ctx->memcpy_list); INIT_LIST_HEAD(&ctx->rx_list); INIT_LIST_HEAD(&ctx->ipc_dispatch_list); INIT_LIST_HEAD(&ctx->block_list); INIT_WORK(&ctx->ipc_post_msg_wq, sst_process_pending_msg); init_waitqueue_head(&ctx->wait_queue); ctx->post_msg_wq = create_singlethread_workqueue("sst_post_msg_wq"); if (!ctx->post_msg_wq) return -EBUSY; return 0; } static void sst_init_locks(struct intel_sst_drv *ctx) { mutex_init(&ctx->sst_lock); spin_lock_init(&ctx->rx_msg_lock); spin_lock_init(&ctx->ipc_spin_lock); spin_lock_init(&ctx->block_lock); } /* * Driver handles PCI IDs in ACPI - sst_acpi_probe() - and we are using only * device ID part. If real ACPI ID appears, the kstrtouint() returns error, so * we are fine with using unsigned short as dev_id type. */ int sst_alloc_drv_context(struct intel_sst_drv **ctx, struct device *dev, unsigned short dev_id) { *ctx = devm_kzalloc(dev, sizeof(struct intel_sst_drv), GFP_KERNEL); if (!(*ctx)) return -ENOMEM; (*ctx)->dev = dev; (*ctx)->dev_id = dev_id; return 0; } EXPORT_SYMBOL_GPL(sst_alloc_drv_context); static ssize_t firmware_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct intel_sst_drv *ctx = dev_get_drvdata(dev); if (ctx->fw_version.type == 0 && ctx->fw_version.major == 0 && ctx->fw_version.minor == 0 && ctx->fw_version.build == 0) return sysfs_emit(buf, "FW not yet loaded\n"); else return sysfs_emit(buf, "v%02x.%02x.%02x.%02x\n", ctx->fw_version.type, ctx->fw_version.major, ctx->fw_version.minor, ctx->fw_version.build); } static DEVICE_ATTR_RO(firmware_version); static const struct attribute *sst_fw_version_attrs[] = { &dev_attr_firmware_version.attr, NULL, }; static const struct attribute_group sst_fw_version_attr_group = { .attrs = (struct attribute **)sst_fw_version_attrs, }; int sst_context_init(struct intel_sst_drv *ctx) { int ret = 0, i; if (!ctx->pdata) return -EINVAL; if (!ctx->pdata->probe_data) return -EINVAL; memcpy(&ctx->info, ctx->pdata->probe_data, sizeof(ctx->info)); ret = sst_driver_ops(ctx); if (ret != 0) return -EINVAL; sst_init_locks(ctx); sst_set_fw_state_locked(ctx, SST_RESET); /* pvt_id 0 reserved for async messages */ ctx->pvt_id = 1; ctx->stream_cnt = 0; ctx->fw_in_mem = NULL; /* we use memcpy, so set to 0 */ ctx->use_dma = 0; ctx->use_lli = 0; if (sst_workqueue_init(ctx)) return -EINVAL; ctx->mailbox_recv_offset = ctx->pdata->ipc_info->mbox_recv_off; ctx->ipc_reg.ipcx = SST_IPCX + ctx->pdata->ipc_info->ipc_offset; ctx->ipc_reg.ipcd = SST_IPCD + ctx->pdata->ipc_info->ipc_offset; dev_info(ctx->dev, "Got drv data max stream %d\n", ctx->info.max_streams); for (i = 1; i <= ctx->info.max_streams; i++) { struct stream_info *stream = &ctx->streams[i]; memset(stream, 0, sizeof(*stream)); stream->pipe_id = PIPE_RSVD; mutex_init(&stream->lock); } /* Register the ISR */ ret = devm_request_threaded_irq(ctx->dev, ctx->irq_num, ctx->ops->interrupt, ctx->ops->irq_thread, 0, SST_DRV_NAME, ctx); if (ret) goto do_free_mem; dev_dbg(ctx->dev, "Registered IRQ %#x\n", ctx->irq_num); /* default intr are unmasked so set this as masked */ sst_shim_write64(ctx->shim, SST_IMRX, 0xFFFF0038); ctx->qos = devm_kzalloc(ctx->dev, sizeof(struct pm_qos_request), GFP_KERNEL); if (!ctx->qos) { ret = -ENOMEM; goto do_free_mem; } cpu_latency_qos_add_request(ctx->qos, PM_QOS_DEFAULT_VALUE); dev_dbg(ctx->dev, "Requesting FW %s now...\n", ctx->firmware_name); ret = request_firmware_nowait(THIS_MODULE, true, ctx->firmware_name, ctx->dev, GFP_KERNEL, ctx, sst_firmware_load_cb); if (ret) { dev_err(ctx->dev, "Firmware download failed:%d\n", ret); goto do_free_mem; } ret = sysfs_create_group(&ctx->dev->kobj, &sst_fw_version_attr_group); if (ret) { dev_err(ctx->dev, "Unable to create sysfs\n"); goto err_sysfs; } sst_register(ctx->dev); return 0; err_sysfs: sysfs_remove_group(&ctx->dev->kobj, &sst_fw_version_attr_group); do_free_mem: destroy_workqueue(ctx->post_msg_wq); return ret; } EXPORT_SYMBOL_GPL(sst_context_init); void sst_context_cleanup(struct intel_sst_drv *ctx) { pm_runtime_get_noresume(ctx->dev); pm_runtime_disable(ctx->dev); sst_unregister(ctx->dev); sst_set_fw_state_locked(ctx, SST_SHUTDOWN); sysfs_remove_group(&ctx->dev->kobj, &sst_fw_version_attr_group); destroy_workqueue(ctx->post_msg_wq); cpu_latency_qos_remove_request(ctx->qos); kfree(ctx->fw_sg_list.src); kfree(ctx->fw_sg_list.dst); ctx->fw_sg_list.list_len = 0; kfree(ctx->fw_in_mem); ctx->fw_in_mem = NULL; sst_memcpy_free_resources(ctx); } EXPORT_SYMBOL_GPL(sst_context_cleanup); void sst_configure_runtime_pm(struct intel_sst_drv *ctx) { pm_runtime_set_autosuspend_delay(ctx->dev, SST_SUSPEND_DELAY); pm_runtime_use_autosuspend(ctx->dev); /* * For acpi devices, the actual physical device state is * initially active. So change the state to active before * enabling the pm */ if (!acpi_disabled) pm_runtime_set_active(ctx->dev); pm_runtime_enable(ctx->dev); if (acpi_disabled) pm_runtime_set_active(ctx->dev); else pm_runtime_put_noidle(ctx->dev); } EXPORT_SYMBOL_GPL(sst_configure_runtime_pm); static int intel_sst_runtime_suspend(struct device *dev) { int ret = 0; struct intel_sst_drv *ctx = dev_get_drvdata(dev); if (ctx->sst_state == SST_RESET) { dev_dbg(dev, "LPE is already in RESET state, No action\n"); return 0; } /* save fw context */ if (ctx->ops->save_dsp_context(ctx)) return -EBUSY; /* Move the SST state to Reset */ sst_set_fw_state_locked(ctx, SST_RESET); synchronize_irq(ctx->irq_num); flush_workqueue(ctx->post_msg_wq); ctx->ops->reset(ctx); return ret; } static int intel_sst_suspend(struct device *dev) { struct intel_sst_drv *ctx = dev_get_drvdata(dev); struct sst_fw_save *fw_save; int i, ret; /* check first if we are already in SW reset */ if (ctx->sst_state == SST_RESET) return 0; /* * check if any stream is active and running * they should already by suspend by soc_suspend */ for (i = 1; i <= ctx->info.max_streams; i++) { struct stream_info *stream = &ctx->streams[i]; if (stream->status == STREAM_RUNNING) { dev_err(dev, "stream %d is running, can't suspend, abort\n", i); return -EBUSY; } if (ctx->pdata->streams_lost_on_suspend) { stream->resume_status = stream->status; stream->resume_prev = stream->prev; if (stream->status != STREAM_UN_INIT) sst_free_stream(ctx, i); } } synchronize_irq(ctx->irq_num); flush_workqueue(ctx->post_msg_wq); /* Move the SST state to Reset */ sst_set_fw_state_locked(ctx, SST_RESET); /* tell DSP we are suspending */ if (ctx->ops->save_dsp_context(ctx)) return -EBUSY; /* save the memories */ fw_save = kzalloc(sizeof(*fw_save), GFP_KERNEL); if (!fw_save) return -ENOMEM; fw_save->iram = kvzalloc(ctx->iram_end - ctx->iram_base, GFP_KERNEL); if (!fw_save->iram) { ret = -ENOMEM; goto iram; } fw_save->dram = kvzalloc(ctx->dram_end - ctx->dram_base, GFP_KERNEL); if (!fw_save->dram) { ret = -ENOMEM; goto dram; } fw_save->sram = kvzalloc(SST_MAILBOX_SIZE, GFP_KERNEL); if (!fw_save->sram) { ret = -ENOMEM; goto sram; } fw_save->ddr = kvzalloc(ctx->ddr_end - ctx->ddr_base, GFP_KERNEL); if (!fw_save->ddr) { ret = -ENOMEM; goto ddr; } memcpy32_fromio(fw_save->iram, ctx->iram, ctx->iram_end - ctx->iram_base); memcpy32_fromio(fw_save->dram, ctx->dram, ctx->dram_end - ctx->dram_base); memcpy32_fromio(fw_save->sram, ctx->mailbox, SST_MAILBOX_SIZE); memcpy32_fromio(fw_save->ddr, ctx->ddr, ctx->ddr_end - ctx->ddr_base); ctx->fw_save = fw_save; ctx->ops->reset(ctx); return 0; ddr: kvfree(fw_save->sram); sram: kvfree(fw_save->dram); dram: kvfree(fw_save->iram); iram: kfree(fw_save); return ret; } static int intel_sst_resume(struct device *dev) { struct intel_sst_drv *ctx = dev_get_drvdata(dev); struct sst_fw_save *fw_save = ctx->fw_save; struct sst_block *block; int i, ret = 0; if (!fw_save) return 0; sst_set_fw_state_locked(ctx, SST_FW_LOADING); /* we have to restore the memory saved */ ctx->ops->reset(ctx); ctx->fw_save = NULL; memcpy32_toio(ctx->iram, fw_save->iram, ctx->iram_end - ctx->iram_base); memcpy32_toio(ctx->dram, fw_save->dram, ctx->dram_end - ctx->dram_base); memcpy32_toio(ctx->mailbox, fw_save->sram, SST_MAILBOX_SIZE); memcpy32_toio(ctx->ddr, fw_save->ddr, ctx->ddr_end - ctx->ddr_base); kvfree(fw_save->sram); kvfree(fw_save->dram); kvfree(fw_save->iram); kvfree(fw_save->ddr); kfree(fw_save); block = sst_create_block(ctx, 0, FW_DWNL_ID); if (block == NULL) return -ENOMEM; /* start and wait for ack */ ctx->ops->start(ctx); ret = sst_wait_timeout(ctx, block); if (ret) { dev_err(ctx->dev, "fw download failed %d\n", ret); /* FW download failed due to timeout */ ret = -EBUSY; } else { sst_set_fw_state_locked(ctx, SST_FW_RUNNING); } if (ctx->pdata->streams_lost_on_suspend) { for (i = 1; i <= ctx->info.max_streams; i++) { struct stream_info *stream = &ctx->streams[i]; if (stream->resume_status != STREAM_UN_INIT) { dev_dbg(ctx->dev, "Re-allocing stream %d status %d prev %d\n", i, stream->resume_status, stream->resume_prev); sst_realloc_stream(ctx, i); stream->status = stream->resume_status; stream->prev = stream->resume_prev; } } } sst_free_block(ctx, block); return ret; } const struct dev_pm_ops intel_sst_pm = { .suspend = intel_sst_suspend, .resume = intel_sst_resume, .runtime_suspend = intel_sst_runtime_suspend, }; EXPORT_SYMBOL_GPL(intel_sst_pm);
linux-master
sound/soc/intel/atom/sst/sst.c
// SPDX-License-Identifier: GPL-2.0-only /* * sst_drv_interface.c - Intel SST Driver for audio engine * * Copyright (C) 2008-14 Intel Corp * Authors: Vinod Koul <[email protected]> * Harsha Priya <[email protected]> * Dharageswari R <[email protected]) * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/delay.h> #include <linux/pci.h> #include <linux/fs.h> #include <linux/firmware.h> #include <linux/pm_runtime.h> #include <linux/pm_qos.h> #include <linux/math64.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/compress_driver.h> #include <asm/platform_sst_audio.h> #include "../sst-mfld-platform.h" #include "sst.h" #define NUM_CODEC 2 #define MIN_FRAGMENT 2 #define MAX_FRAGMENT 4 #define MIN_FRAGMENT_SIZE (50 * 1024) #define MAX_FRAGMENT_SIZE (1024 * 1024) #define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz) (((pcm_wd_sz + 15) >> 4) << 1) #ifdef CONFIG_PM #define GET_USAGE_COUNT(dev) (atomic_read(&dev->power.usage_count)) #else #define GET_USAGE_COUNT(dev) 1 #endif int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id) { struct stream_info *stream; int ret = 0; stream = get_stream_info(ctx, str_id); if (stream) { /* str_id is valid, so stream is alloacted */ ret = sst_free_stream(ctx, str_id); if (ret) sst_clean_stream(&ctx->streams[str_id]); return ret; } else { dev_err(ctx->dev, "we tried to free stream context %d which was freed!!!\n", str_id); } return ret; } int sst_get_stream_allocated(struct intel_sst_drv *ctx, struct snd_sst_params *str_param, struct snd_sst_lib_download **lib_dnld) { int retval; retval = ctx->ops->alloc_stream(ctx, str_param); if (retval > 0) dev_dbg(ctx->dev, "Stream allocated %d\n", retval); return retval; } /* * sst_get_sfreq - this function returns the frequency of the stream * * @str_param : stream params */ int sst_get_sfreq(struct snd_sst_params *str_param) { switch (str_param->codec) { case SST_CODEC_TYPE_PCM: return str_param->sparams.uc.pcm_params.sfreq; case SST_CODEC_TYPE_AAC: return str_param->sparams.uc.aac_params.externalsr; case SST_CODEC_TYPE_MP3: return 0; default: return -EINVAL; } } /* * sst_get_num_channel - get number of channels for the stream * * @str_param : stream params */ int sst_get_num_channel(struct snd_sst_params *str_param) { switch (str_param->codec) { case SST_CODEC_TYPE_PCM: return str_param->sparams.uc.pcm_params.num_chan; case SST_CODEC_TYPE_MP3: return str_param->sparams.uc.mp3_params.num_chan; case SST_CODEC_TYPE_AAC: return str_param->sparams.uc.aac_params.num_chan; default: return -EINVAL; } } /* * sst_get_stream - this function prepares for stream allocation * * @str_param : stream param */ int sst_get_stream(struct intel_sst_drv *ctx, struct snd_sst_params *str_param) { int retval; struct stream_info *str_info; /* stream is not allocated, we are allocating */ retval = ctx->ops->alloc_stream(ctx, str_param); if (retval <= 0) { return -EIO; } /* store sampling freq */ str_info = &ctx->streams[retval]; str_info->sfreq = sst_get_sfreq(str_param); return retval; } static int sst_power_control(struct device *dev, bool state) { struct intel_sst_drv *ctx = dev_get_drvdata(dev); int ret = 0; int usage_count = 0; if (state) { ret = pm_runtime_resume_and_get(dev); usage_count = GET_USAGE_COUNT(dev); dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count); if (ret < 0) { dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret); return ret; } if ((ctx->sst_state == SST_RESET) && (usage_count == 1)) { ret = sst_load_fw(ctx); if (ret) { dev_err(dev, "FW download fail %d\n", ret); sst_set_fw_state_locked(ctx, SST_RESET); ret = sst_pm_runtime_put(ctx); } } } else { usage_count = GET_USAGE_COUNT(dev); dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count); return sst_pm_runtime_put(ctx); } return ret; } /* * sst_open_pcm_stream - Open PCM interface * * @str_param: parameters of pcm stream * * This function is called by MID sound card driver to open * a new pcm interface */ static int sst_open_pcm_stream(struct device *dev, struct snd_sst_params *str_param) { int retval; struct intel_sst_drv *ctx = dev_get_drvdata(dev); if (!str_param) return -EINVAL; retval = sst_get_stream(ctx, str_param); if (retval > 0) ctx->stream_cnt++; else dev_err(ctx->dev, "sst_get_stream returned err %d\n", retval); return retval; } static int sst_cdev_open(struct device *dev, struct snd_sst_params *str_params, struct sst_compress_cb *cb) { int str_id, retval; struct stream_info *stream; struct intel_sst_drv *ctx = dev_get_drvdata(dev); retval = pm_runtime_resume_and_get(ctx->dev); if (retval < 0) return retval; str_id = sst_get_stream(ctx, str_params); if (str_id > 0) { dev_dbg(dev, "stream allocated in sst_cdev_open %d\n", str_id); stream = &ctx->streams[str_id]; stream->compr_cb = cb->compr_cb; stream->compr_cb_param = cb->param; stream->drain_notify = cb->drain_notify; stream->drain_cb_param = cb->drain_cb_param; } else { dev_err(dev, "stream encountered error during alloc %d\n", str_id); str_id = -EINVAL; sst_pm_runtime_put(ctx); } return str_id; } static int sst_cdev_close(struct device *dev, unsigned int str_id) { int retval; struct stream_info *stream; struct intel_sst_drv *ctx = dev_get_drvdata(dev); stream = get_stream_info(ctx, str_id); if (!stream) { dev_err(dev, "stream info is NULL for str %d!!!\n", str_id); return -EINVAL; } retval = sst_free_stream(ctx, str_id); stream->compr_cb_param = NULL; stream->compr_cb = NULL; if (retval) dev_err(dev, "free stream returned err %d\n", retval); dev_dbg(dev, "End\n"); return retval; } static int sst_cdev_ack(struct device *dev, unsigned int str_id, unsigned long bytes) { struct stream_info *stream; struct snd_sst_tstamp fw_tstamp = {0,}; int offset; void __iomem *addr; struct intel_sst_drv *ctx = dev_get_drvdata(dev); stream = get_stream_info(ctx, str_id); if (!stream) return -EINVAL; /* update bytes sent */ stream->cumm_bytes += bytes; dev_dbg(dev, "bytes copied %d inc by %ld\n", stream->cumm_bytes, bytes); addr = ((void __iomem *)(ctx->mailbox + ctx->tstamp)) + (str_id * sizeof(fw_tstamp)); memcpy_fromio(&fw_tstamp, addr, sizeof(fw_tstamp)); fw_tstamp.bytes_copied = stream->cumm_bytes; dev_dbg(dev, "bytes sent to fw %llu inc by %ld\n", fw_tstamp.bytes_copied, bytes); offset = offsetof(struct snd_sst_tstamp, bytes_copied); sst_shim_write(addr, offset, fw_tstamp.bytes_copied); return 0; } static int sst_cdev_set_metadata(struct device *dev, unsigned int str_id, struct snd_compr_metadata *metadata) { int retval = 0; struct stream_info *str_info; struct intel_sst_drv *ctx = dev_get_drvdata(dev); dev_dbg(dev, "set metadata for stream %d\n", str_id); str_info = get_stream_info(ctx, str_id); if (!str_info) return -EINVAL; dev_dbg(dev, "pipe id = %d\n", str_info->pipe_id); retval = sst_prepare_and_post_msg(ctx, str_info->task_id, IPC_CMD, IPC_IA_SET_STREAM_PARAMS_MRFLD, str_info->pipe_id, sizeof(*metadata), metadata, NULL, true, true, true, false); return retval; } static int sst_cdev_stream_pause(struct device *dev, unsigned int str_id) { struct intel_sst_drv *ctx = dev_get_drvdata(dev); return sst_pause_stream(ctx, str_id); } static int sst_cdev_stream_pause_release(struct device *dev, unsigned int str_id) { struct intel_sst_drv *ctx = dev_get_drvdata(dev); return sst_resume_stream(ctx, str_id); } static int sst_cdev_stream_start(struct device *dev, unsigned int str_id) { struct stream_info *str_info; struct intel_sst_drv *ctx = dev_get_drvdata(dev); str_info = get_stream_info(ctx, str_id); if (!str_info) return -EINVAL; str_info->prev = str_info->status; str_info->status = STREAM_RUNNING; return sst_start_stream(ctx, str_id); } static int sst_cdev_stream_drop(struct device *dev, unsigned int str_id) { struct intel_sst_drv *ctx = dev_get_drvdata(dev); return sst_drop_stream(ctx, str_id); } static int sst_cdev_stream_drain(struct device *dev, unsigned int str_id) { struct intel_sst_drv *ctx = dev_get_drvdata(dev); return sst_drain_stream(ctx, str_id, false); } static int sst_cdev_stream_partial_drain(struct device *dev, unsigned int str_id) { struct intel_sst_drv *ctx = dev_get_drvdata(dev); return sst_drain_stream(ctx, str_id, true); } static int sst_cdev_tstamp(struct device *dev, unsigned int str_id, struct snd_compr_tstamp *tstamp) { struct snd_sst_tstamp fw_tstamp = {0,}; struct stream_info *stream; struct intel_sst_drv *ctx = dev_get_drvdata(dev); void __iomem *addr; addr = (void __iomem *)(ctx->mailbox + ctx->tstamp) + (str_id * sizeof(fw_tstamp)); memcpy_fromio(&fw_tstamp, addr, sizeof(fw_tstamp)); stream = get_stream_info(ctx, str_id); if (!stream) return -EINVAL; dev_dbg(dev, "rb_counter %llu in bytes\n", fw_tstamp.ring_buffer_counter); tstamp->copied_total = fw_tstamp.ring_buffer_counter; tstamp->pcm_frames = fw_tstamp.frames_decoded; tstamp->pcm_io_frames = div_u64(fw_tstamp.hardware_counter, (u64)stream->num_ch * SST_GET_BYTES_PER_SAMPLE(24)); tstamp->sampling_rate = fw_tstamp.sampling_frequency; dev_dbg(dev, "PCM = %u\n", tstamp->pcm_io_frames); dev_dbg(dev, "Ptr Query on strid = %d copied_total %d, decodec %d\n", str_id, tstamp->copied_total, tstamp->pcm_frames); dev_dbg(dev, "rendered %d\n", tstamp->pcm_io_frames); return 0; } static int sst_cdev_caps(struct snd_compr_caps *caps) { caps->num_codecs = NUM_CODEC; caps->min_fragment_size = MIN_FRAGMENT_SIZE; /* 50KB */ caps->max_fragment_size = MAX_FRAGMENT_SIZE; /* 1024KB */ caps->min_fragments = MIN_FRAGMENT; caps->max_fragments = MAX_FRAGMENT; caps->codecs[0] = SND_AUDIOCODEC_MP3; caps->codecs[1] = SND_AUDIOCODEC_AAC; return 0; } static const struct snd_compr_codec_caps caps_mp3 = { .num_descriptors = 1, .descriptor[0].max_ch = 2, .descriptor[0].sample_rates[0] = 48000, .descriptor[0].sample_rates[1] = 44100, .descriptor[0].sample_rates[2] = 32000, .descriptor[0].sample_rates[3] = 16000, .descriptor[0].sample_rates[4] = 8000, .descriptor[0].num_sample_rates = 5, .descriptor[0].bit_rate[0] = 320, .descriptor[0].bit_rate[1] = 192, .descriptor[0].num_bitrates = 2, .descriptor[0].profiles = 0, .descriptor[0].modes = SND_AUDIOCHANMODE_MP3_STEREO, .descriptor[0].formats = 0, }; static const struct snd_compr_codec_caps caps_aac = { .num_descriptors = 2, .descriptor[1].max_ch = 2, .descriptor[0].sample_rates[0] = 48000, .descriptor[0].sample_rates[1] = 44100, .descriptor[0].sample_rates[2] = 32000, .descriptor[0].sample_rates[3] = 16000, .descriptor[0].sample_rates[4] = 8000, .descriptor[0].num_sample_rates = 5, .descriptor[1].bit_rate[0] = 320, .descriptor[1].bit_rate[1] = 192, .descriptor[1].num_bitrates = 2, .descriptor[1].profiles = 0, .descriptor[1].modes = 0, .descriptor[1].formats = (SND_AUDIOSTREAMFORMAT_MP4ADTS | SND_AUDIOSTREAMFORMAT_RAW), }; static int sst_cdev_codec_caps(struct snd_compr_codec_caps *codec) { if (codec->codec == SND_AUDIOCODEC_MP3) *codec = caps_mp3; else if (codec->codec == SND_AUDIOCODEC_AAC) *codec = caps_aac; else return -EINVAL; return 0; } void sst_cdev_fragment_elapsed(struct intel_sst_drv *ctx, int str_id) { struct stream_info *stream; dev_dbg(ctx->dev, "fragment elapsed from firmware for str_id %d\n", str_id); stream = &ctx->streams[str_id]; if (stream->compr_cb) stream->compr_cb(stream->compr_cb_param); } /* * sst_close_pcm_stream - Close PCM interface * * @str_id: stream id to be closed * * This function is called by MID sound card driver to close * an existing pcm interface */ static int sst_close_pcm_stream(struct device *dev, unsigned int str_id) { struct stream_info *stream; int retval = 0; struct intel_sst_drv *ctx = dev_get_drvdata(dev); stream = get_stream_info(ctx, str_id); if (!stream) { dev_err(ctx->dev, "stream info is NULL for str %d!!!\n", str_id); return -EINVAL; } retval = free_stream_context(ctx, str_id); stream->pcm_substream = NULL; stream->status = STREAM_UN_INIT; stream->period_elapsed = NULL; ctx->stream_cnt--; if (retval) dev_err(ctx->dev, "free stream returned err %d\n", retval); dev_dbg(ctx->dev, "Exit\n"); return 0; } static inline int sst_calc_tstamp(struct intel_sst_drv *ctx, struct pcm_stream_info *info, struct snd_pcm_substream *substream, struct snd_sst_tstamp *fw_tstamp) { size_t delay_bytes, delay_frames; size_t buffer_sz; u32 pointer_bytes, pointer_samples; dev_dbg(ctx->dev, "mrfld ring_buffer_counter %llu in bytes\n", fw_tstamp->ring_buffer_counter); dev_dbg(ctx->dev, "mrfld hardware_counter %llu in bytes\n", fw_tstamp->hardware_counter); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) delay_bytes = (size_t) (fw_tstamp->ring_buffer_counter - fw_tstamp->hardware_counter); else delay_bytes = (size_t) (fw_tstamp->hardware_counter - fw_tstamp->ring_buffer_counter); delay_frames = bytes_to_frames(substream->runtime, delay_bytes); buffer_sz = snd_pcm_lib_buffer_bytes(substream); div_u64_rem(fw_tstamp->ring_buffer_counter, buffer_sz, &pointer_bytes); pointer_samples = bytes_to_samples(substream->runtime, pointer_bytes); dev_dbg(ctx->dev, "pcm delay %zu in bytes\n", delay_bytes); info->buffer_ptr = pointer_samples / substream->runtime->channels; info->pcm_delay = delay_frames; dev_dbg(ctx->dev, "buffer ptr %llu pcm_delay rep: %llu\n", info->buffer_ptr, info->pcm_delay); return 0; } static int sst_read_timestamp(struct device *dev, struct pcm_stream_info *info) { struct stream_info *stream; struct snd_pcm_substream *substream; struct snd_sst_tstamp fw_tstamp; unsigned int str_id; struct intel_sst_drv *ctx = dev_get_drvdata(dev); void __iomem *addr; str_id = info->str_id; stream = get_stream_info(ctx, str_id); if (!stream) return -EINVAL; if (!stream->pcm_substream) return -EINVAL; substream = stream->pcm_substream; addr = (void __iomem *)(ctx->mailbox + ctx->tstamp) + (str_id * sizeof(fw_tstamp)); memcpy_fromio(&fw_tstamp, addr, sizeof(fw_tstamp)); return sst_calc_tstamp(ctx, info, substream, &fw_tstamp); } static int sst_stream_start(struct device *dev, int str_id) { struct stream_info *str_info; struct intel_sst_drv *ctx = dev_get_drvdata(dev); if (ctx->sst_state != SST_FW_RUNNING) return 0; str_info = get_stream_info(ctx, str_id); if (!str_info) return -EINVAL; str_info->prev = str_info->status; str_info->status = STREAM_RUNNING; sst_start_stream(ctx, str_id); return 0; } static int sst_stream_drop(struct device *dev, int str_id) { struct stream_info *str_info; struct intel_sst_drv *ctx = dev_get_drvdata(dev); if (ctx->sst_state != SST_FW_RUNNING) return 0; str_info = get_stream_info(ctx, str_id); if (!str_info) return -EINVAL; str_info->prev = STREAM_UN_INIT; str_info->status = STREAM_INIT; return sst_drop_stream(ctx, str_id); } static int sst_stream_pause(struct device *dev, int str_id) { struct stream_info *str_info; struct intel_sst_drv *ctx = dev_get_drvdata(dev); if (ctx->sst_state != SST_FW_RUNNING) return 0; str_info = get_stream_info(ctx, str_id); if (!str_info) return -EINVAL; return sst_pause_stream(ctx, str_id); } static int sst_stream_resume(struct device *dev, int str_id) { struct stream_info *str_info; struct intel_sst_drv *ctx = dev_get_drvdata(dev); if (ctx->sst_state != SST_FW_RUNNING) return 0; str_info = get_stream_info(ctx, str_id); if (!str_info) return -EINVAL; return sst_resume_stream(ctx, str_id); } static int sst_stream_init(struct device *dev, struct pcm_stream_info *str_info) { int str_id = 0; struct stream_info *stream; struct intel_sst_drv *ctx = dev_get_drvdata(dev); str_id = str_info->str_id; if (ctx->sst_state != SST_FW_RUNNING) return 0; stream = get_stream_info(ctx, str_id); if (!stream) return -EINVAL; dev_dbg(ctx->dev, "setting the period ptrs\n"); stream->pcm_substream = str_info->arg; stream->period_elapsed = str_info->period_elapsed; stream->sfreq = str_info->sfreq; stream->prev = stream->status; stream->status = STREAM_INIT; dev_dbg(ctx->dev, "pcm_substream %p, period_elapsed %p, sfreq %d, status %d\n", stream->pcm_substream, stream->period_elapsed, stream->sfreq, stream->status); return 0; } /* * sst_set_byte_stream - Set generic params * * @cmd: control cmd to be set * @arg: command argument * * This function is called by MID sound card driver to configure * SST runtime params. */ static int sst_send_byte_stream(struct device *dev, struct snd_sst_bytes_v2 *bytes) { int ret_val = 0; struct intel_sst_drv *ctx = dev_get_drvdata(dev); if (NULL == bytes) return -EINVAL; ret_val = pm_runtime_resume_and_get(ctx->dev); if (ret_val < 0) return ret_val; ret_val = sst_send_byte_stream_mrfld(ctx, bytes); sst_pm_runtime_put(ctx); return ret_val; } static struct sst_ops pcm_ops = { .open = sst_open_pcm_stream, .stream_init = sst_stream_init, .stream_start = sst_stream_start, .stream_drop = sst_stream_drop, .stream_pause = sst_stream_pause, .stream_pause_release = sst_stream_resume, .stream_read_tstamp = sst_read_timestamp, .send_byte_stream = sst_send_byte_stream, .close = sst_close_pcm_stream, .power = sst_power_control, }; static struct compress_sst_ops compr_ops = { .open = sst_cdev_open, .close = sst_cdev_close, .stream_pause = sst_cdev_stream_pause, .stream_pause_release = sst_cdev_stream_pause_release, .stream_start = sst_cdev_stream_start, .stream_drop = sst_cdev_stream_drop, .stream_drain = sst_cdev_stream_drain, .stream_partial_drain = sst_cdev_stream_partial_drain, .tstamp = sst_cdev_tstamp, .ack = sst_cdev_ack, .get_caps = sst_cdev_caps, .get_codec_caps = sst_cdev_codec_caps, .set_metadata = sst_cdev_set_metadata, .power = sst_power_control, }; static struct sst_device sst_dsp_device = { .name = "Intel(R) SST LPE", .dev = NULL, .ops = &pcm_ops, .compr_ops = &compr_ops, }; /* * sst_register - function to register DSP * * This functions registers DSP with the platform driver */ int sst_register(struct device *dev) { int ret_val; sst_dsp_device.dev = dev; ret_val = sst_register_dsp(&sst_dsp_device); if (ret_val) dev_err(dev, "Unable to register DSP with platform driver\n"); return ret_val; } int sst_unregister(struct device *dev) { return sst_unregister_dsp(&sst_dsp_device); }
linux-master
sound/soc/intel/atom/sst/sst_drv_interface.c
// SPDX-License-Identifier: GPL-2.0-only /* * sst_stream.c - Intel SST Driver for audio engine * * Copyright (C) 2008-14 Intel Corp * Authors: Vinod Koul <[email protected]> * Harsha Priya <[email protected]> * Dharageswari R <[email protected]> * KP Jeeja <[email protected]> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/pci.h> #include <linux/firmware.h> #include <linux/sched.h> #include <linux/delay.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/compress_driver.h> #include <asm/platform_sst_audio.h> #include "../sst-mfld-platform.h" #include "sst.h" int sst_alloc_stream_mrfld(struct intel_sst_drv *sst_drv_ctx, void *params) { struct snd_pcm_params *pcm_params; struct snd_sst_params *str_params; struct snd_sst_tstamp fw_tstamp; struct stream_info *str_info; int i, num_ch, str_id; dev_dbg(sst_drv_ctx->dev, "Enter\n"); str_params = (struct snd_sst_params *)params; str_id = str_params->stream_id; str_info = get_stream_info(sst_drv_ctx, str_id); if (!str_info) return -EINVAL; memset(&str_info->alloc_param, 0, sizeof(str_info->alloc_param)); str_info->alloc_param.operation = str_params->ops; str_info->alloc_param.codec_type = str_params->codec; str_info->alloc_param.sg_count = str_params->aparams.sg_count; str_info->alloc_param.ring_buf_info[0].addr = str_params->aparams.ring_buf_info[0].addr; str_info->alloc_param.ring_buf_info[0].size = str_params->aparams.ring_buf_info[0].size; str_info->alloc_param.frag_size = str_params->aparams.frag_size; memcpy(&str_info->alloc_param.codec_params, &str_params->sparams, sizeof(struct snd_sst_stream_params)); /* * fill channel map params for multichannel support. * Ideally channel map should be received from upper layers * for multichannel support. * Currently hardcoding as per FW reqm. */ num_ch = sst_get_num_channel(str_params); pcm_params = &str_info->alloc_param.codec_params.uc.pcm_params; for (i = 0; i < 8; i++) { if (i < num_ch) pcm_params->channel_map[i] = i; else pcm_params->channel_map[i] = 0xff; } sst_drv_ctx->streams[str_id].status = STREAM_INIT; sst_drv_ctx->streams[str_id].prev = STREAM_UN_INIT; sst_drv_ctx->streams[str_id].pipe_id = str_params->device_type; sst_drv_ctx->streams[str_id].task_id = str_params->task; sst_drv_ctx->streams[str_id].num_ch = num_ch; if (sst_drv_ctx->info.lpe_viewpt_rqd) str_info->alloc_param.ts = sst_drv_ctx->info.mailbox_start + sst_drv_ctx->tstamp + (str_id * sizeof(fw_tstamp)); else str_info->alloc_param.ts = sst_drv_ctx->mailbox_add + sst_drv_ctx->tstamp + (str_id * sizeof(fw_tstamp)); dev_dbg(sst_drv_ctx->dev, "alloc tstamp location = 0x%x\n", str_info->alloc_param.ts); dev_dbg(sst_drv_ctx->dev, "assigned pipe id 0x%x to task %d\n", str_info->pipe_id, str_info->task_id); return sst_realloc_stream(sst_drv_ctx, str_id); } /** * sst_realloc_stream - Send msg for (re-)allocating a stream using the * @sst_drv_ctx: intel_sst_drv context pointer * @str_id: stream ID * * Send a msg for (re-)allocating a stream using the parameters previously * passed to sst_alloc_stream_mrfld() for the same stream ID. * Return: 0 or negative errno value. */ int sst_realloc_stream(struct intel_sst_drv *sst_drv_ctx, int str_id) { struct snd_sst_alloc_response *response; struct stream_info *str_info; void *data = NULL; int ret; str_info = get_stream_info(sst_drv_ctx, str_id); if (!str_info) return -EINVAL; dev_dbg(sst_drv_ctx->dev, "Alloc for str %d pipe %#x\n", str_id, str_info->pipe_id); ret = sst_prepare_and_post_msg(sst_drv_ctx, str_info->task_id, IPC_CMD, IPC_IA_ALLOC_STREAM_MRFLD, str_info->pipe_id, sizeof(str_info->alloc_param), &str_info->alloc_param, &data, true, true, false, true); if (ret < 0) { dev_err(sst_drv_ctx->dev, "FW alloc failed ret %d\n", ret); /* alloc failed, so reset the state to uninit */ str_info->status = STREAM_UN_INIT; str_id = ret; } else if (data) { response = (struct snd_sst_alloc_response *)data; ret = response->str_type.result; if (!ret) goto out; dev_err(sst_drv_ctx->dev, "FW alloc failed ret %d\n", ret); if (ret == SST_ERR_STREAM_IN_USE) { dev_err(sst_drv_ctx->dev, "FW not in clean state, send free for:%d\n", str_id); sst_free_stream(sst_drv_ctx, str_id); } str_id = -ret; } out: kfree(data); return str_id; } /** * sst_start_stream - Send msg for a starting stream * @sst_drv_ctx: intel_sst_drv context pointer * @str_id: stream ID * * This function is called by any function which wants to start * a stream. */ int sst_start_stream(struct intel_sst_drv *sst_drv_ctx, int str_id) { int retval = 0; struct stream_info *str_info; u16 data = 0; dev_dbg(sst_drv_ctx->dev, "sst_start_stream for %d\n", str_id); str_info = get_stream_info(sst_drv_ctx, str_id); if (!str_info) return -EINVAL; if (str_info->status != STREAM_RUNNING) return -EBADRQC; retval = sst_prepare_and_post_msg(sst_drv_ctx, str_info->task_id, IPC_CMD, IPC_IA_START_STREAM_MRFLD, str_info->pipe_id, sizeof(u16), &data, NULL, true, true, true, false); return retval; } int sst_send_byte_stream_mrfld(struct intel_sst_drv *sst_drv_ctx, struct snd_sst_bytes_v2 *bytes) { struct ipc_post *msg = NULL; u32 length; int pvt_id, ret = 0; struct sst_block *block = NULL; u8 bytes_block = bytes->block; dev_dbg(sst_drv_ctx->dev, "type:%u ipc_msg:%u block:%u task_id:%u pipe: %#x length:%#x\n", bytes->type, bytes->ipc_msg, bytes_block, bytes->task_id, bytes->pipe_id, bytes->len); if (sst_create_ipc_msg(&msg, true)) return -ENOMEM; pvt_id = sst_assign_pvt_id(sst_drv_ctx); sst_fill_header_mrfld(&msg->mrfld_header, bytes->ipc_msg, bytes->task_id, 1, pvt_id); msg->mrfld_header.p.header_high.part.res_rqd = bytes_block; length = bytes->len; msg->mrfld_header.p.header_low_payload = length; dev_dbg(sst_drv_ctx->dev, "length is %d\n", length); memcpy(msg->mailbox_data, &bytes->bytes, bytes->len); if (bytes_block) { block = sst_create_block(sst_drv_ctx, bytes->ipc_msg, pvt_id); if (block == NULL) { kfree(msg); ret = -ENOMEM; goto out; } } sst_add_to_dispatch_list_and_post(sst_drv_ctx, msg); dev_dbg(sst_drv_ctx->dev, "msg->mrfld_header.p.header_low_payload:%d", msg->mrfld_header.p.header_low_payload); if (bytes_block) { ret = sst_wait_timeout(sst_drv_ctx, block); if (ret) { dev_err(sst_drv_ctx->dev, "fw returned err %d\n", ret); sst_free_block(sst_drv_ctx, block); goto out; } } if (bytes->type == SND_SST_BYTES_GET) { /* * copy the reply and send back * we need to update only sz and payload */ if (bytes_block) { unsigned char *r = block->data; dev_dbg(sst_drv_ctx->dev, "read back %d bytes", bytes->len); memcpy(bytes->bytes, r, bytes->len); } } if (bytes_block) sst_free_block(sst_drv_ctx, block); out: test_and_clear_bit(pvt_id, &sst_drv_ctx->pvt_id); return ret; } /** * sst_pause_stream - Send msg for a pausing stream * @sst_drv_ctx: intel_sst_drv context pointer * @str_id: stream ID * * This function is called by any function which wants to pause * an already running stream. */ int sst_pause_stream(struct intel_sst_drv *sst_drv_ctx, int str_id) { int retval = 0; struct stream_info *str_info; dev_dbg(sst_drv_ctx->dev, "SST DBG:sst_pause_stream for %d\n", str_id); str_info = get_stream_info(sst_drv_ctx, str_id); if (!str_info) return -EINVAL; if (str_info->status == STREAM_PAUSED) return 0; if (str_info->status == STREAM_RUNNING || str_info->status == STREAM_INIT) { if (str_info->prev == STREAM_UN_INIT) return -EBADRQC; retval = sst_prepare_and_post_msg(sst_drv_ctx, str_info->task_id, IPC_CMD, IPC_IA_PAUSE_STREAM_MRFLD, str_info->pipe_id, 0, NULL, NULL, true, true, false, true); if (retval == 0) { str_info->prev = str_info->status; str_info->status = STREAM_PAUSED; } else if (retval == -SST_ERR_INVALID_STREAM_ID) { retval = -EINVAL; mutex_lock(&sst_drv_ctx->sst_lock); sst_clean_stream(str_info); mutex_unlock(&sst_drv_ctx->sst_lock); } } else { retval = -EBADRQC; dev_dbg(sst_drv_ctx->dev, "SST DBG:BADRQC for stream\n"); } return retval; } /** * sst_resume_stream - Send msg for resuming stream * @sst_drv_ctx: intel_sst_drv context pointer * @str_id: stream ID * * This function is called by any function which wants to resume * an already paused stream. */ int sst_resume_stream(struct intel_sst_drv *sst_drv_ctx, int str_id) { int retval = 0; struct stream_info *str_info; dev_dbg(sst_drv_ctx->dev, "SST DBG:sst_resume_stream for %d\n", str_id); str_info = get_stream_info(sst_drv_ctx, str_id); if (!str_info) return -EINVAL; if (str_info->status == STREAM_RUNNING) return 0; if (str_info->resume_status == STREAM_PAUSED && str_info->resume_prev == STREAM_RUNNING) { /* * Stream was running before suspend and re-created on resume, * start it to get back to running state. */ dev_dbg(sst_drv_ctx->dev, "restart recreated stream after resume\n"); str_info->status = STREAM_RUNNING; str_info->prev = STREAM_PAUSED; retval = sst_start_stream(sst_drv_ctx, str_id); str_info->resume_status = STREAM_UN_INIT; } else if (str_info->resume_status == STREAM_PAUSED && str_info->resume_prev == STREAM_INIT) { /* * Stream was idle before suspend and re-created on resume, * keep it as is. */ dev_dbg(sst_drv_ctx->dev, "leaving recreated stream idle after resume\n"); str_info->status = STREAM_INIT; str_info->prev = STREAM_PAUSED; str_info->resume_status = STREAM_UN_INIT; } else if (str_info->status == STREAM_PAUSED) { retval = sst_prepare_and_post_msg(sst_drv_ctx, str_info->task_id, IPC_CMD, IPC_IA_RESUME_STREAM_MRFLD, str_info->pipe_id, 0, NULL, NULL, true, true, false, true); if (!retval) { if (str_info->prev == STREAM_RUNNING) str_info->status = STREAM_RUNNING; else str_info->status = STREAM_INIT; str_info->prev = STREAM_PAUSED; } else if (retval == -SST_ERR_INVALID_STREAM_ID) { retval = -EINVAL; mutex_lock(&sst_drv_ctx->sst_lock); sst_clean_stream(str_info); mutex_unlock(&sst_drv_ctx->sst_lock); } } else { retval = -EBADRQC; dev_err(sst_drv_ctx->dev, "SST ERR: BADQRC for stream\n"); } return retval; } /** * sst_drop_stream - Send msg for stopping stream * @sst_drv_ctx: intel_sst_drv context pointer * @str_id: stream ID * * This function is called by any function which wants to stop * a stream. */ int sst_drop_stream(struct intel_sst_drv *sst_drv_ctx, int str_id) { int retval = 0; struct stream_info *str_info; dev_dbg(sst_drv_ctx->dev, "SST DBG:sst_drop_stream for %d\n", str_id); str_info = get_stream_info(sst_drv_ctx, str_id); if (!str_info) return -EINVAL; if (str_info->status != STREAM_UN_INIT) { str_info->prev = STREAM_UN_INIT; str_info->status = STREAM_INIT; str_info->cumm_bytes = 0; retval = sst_prepare_and_post_msg(sst_drv_ctx, str_info->task_id, IPC_CMD, IPC_IA_DROP_STREAM_MRFLD, str_info->pipe_id, 0, NULL, NULL, true, true, true, false); } else { retval = -EBADRQC; dev_dbg(sst_drv_ctx->dev, "BADQRC for stream, state %x\n", str_info->status); } return retval; } /** * sst_drain_stream - Send msg for draining stream * @sst_drv_ctx: intel_sst_drv context pointer * @str_id: stream ID * @partial_drain: boolean indicating if a gapless transition is taking place * * This function is called by any function which wants to drain * a stream. */ int sst_drain_stream(struct intel_sst_drv *sst_drv_ctx, int str_id, bool partial_drain) { int retval = 0; struct stream_info *str_info; dev_dbg(sst_drv_ctx->dev, "SST DBG:sst_drain_stream for %d\n", str_id); str_info = get_stream_info(sst_drv_ctx, str_id); if (!str_info) return -EINVAL; if (str_info->status != STREAM_RUNNING && str_info->status != STREAM_INIT && str_info->status != STREAM_PAUSED) { dev_err(sst_drv_ctx->dev, "SST ERR: BADQRC for stream = %d\n", str_info->status); return -EBADRQC; } retval = sst_prepare_and_post_msg(sst_drv_ctx, str_info->task_id, IPC_CMD, IPC_IA_DRAIN_STREAM_MRFLD, str_info->pipe_id, sizeof(u8), &partial_drain, NULL, true, true, false, false); /* * with new non blocked drain implementation in core we dont need to * wait for respsonse, and need to only invoke callback for drain * complete */ return retval; } /** * sst_free_stream - Frees a stream * @sst_drv_ctx: intel_sst_drv context pointer * @str_id: stream ID * * This function is called by any function which wants to free * a stream. */ int sst_free_stream(struct intel_sst_drv *sst_drv_ctx, int str_id) { int retval = 0; struct stream_info *str_info; dev_dbg(sst_drv_ctx->dev, "SST DBG:sst_free_stream for %d\n", str_id); mutex_lock(&sst_drv_ctx->sst_lock); if (sst_drv_ctx->sst_state == SST_RESET) { mutex_unlock(&sst_drv_ctx->sst_lock); return -ENODEV; } mutex_unlock(&sst_drv_ctx->sst_lock); str_info = get_stream_info(sst_drv_ctx, str_id); if (!str_info) return -EINVAL; mutex_lock(&str_info->lock); if (str_info->status != STREAM_UN_INIT) { str_info->prev = str_info->status; str_info->status = STREAM_UN_INIT; mutex_unlock(&str_info->lock); dev_dbg(sst_drv_ctx->dev, "Free for str %d pipe %#x\n", str_id, str_info->pipe_id); retval = sst_prepare_and_post_msg(sst_drv_ctx, str_info->task_id, IPC_CMD, IPC_IA_FREE_STREAM_MRFLD, str_info->pipe_id, 0, NULL, NULL, true, true, false, true); dev_dbg(sst_drv_ctx->dev, "sst: wait for free returned %d\n", retval); mutex_lock(&sst_drv_ctx->sst_lock); sst_clean_stream(str_info); mutex_unlock(&sst_drv_ctx->sst_lock); dev_dbg(sst_drv_ctx->dev, "SST DBG:Stream freed\n"); } else { mutex_unlock(&str_info->lock); retval = -EBADRQC; dev_dbg(sst_drv_ctx->dev, "SST DBG:BADQRC for stream\n"); } return retval; }
linux-master
sound/soc/intel/atom/sst/sst_stream.c
// SPDX-License-Identifier: GPL-2.0-only /* * sst_dsp.c - Intel SST Driver for audio engine * * Copyright (C) 2008-14 Intel Corp * Authors: Vinod Koul <[email protected]> * Harsha Priya <[email protected]> * Dharageswari R <[email protected]> * KP Jeeja <[email protected]> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This file contains all dsp controlling functions like firmware download, * setting/resetting dsp cores, etc */ #include <linux/pci.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/firmware.h> #include <linux/dmaengine.h> #include <linux/pm_qos.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/compress_driver.h> #include <asm/platform_sst_audio.h> #include "../sst-mfld-platform.h" #include "sst.h" void memcpy32_toio(void __iomem *dst, const void *src, int count) { /* __iowrite32_copy uses 32-bit count values so divide by 4 for * right count in words */ __iowrite32_copy(dst, src, count / 4); } void memcpy32_fromio(void *dst, const void __iomem *src, int count) { /* __ioread32_copy uses 32-bit count values so divide by 4 for * right count in words */ __ioread32_copy(dst, src, count / 4); } /** * intel_sst_reset_dsp_mrfld - Resetting SST DSP * @sst_drv_ctx: intel_sst_drv context pointer * * This resets DSP in case of MRFLD platfroms */ int intel_sst_reset_dsp_mrfld(struct intel_sst_drv *sst_drv_ctx) { union config_status_reg_mrfld csr; dev_dbg(sst_drv_ctx->dev, "sst: Resetting the DSP in mrfld\n"); csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR); dev_dbg(sst_drv_ctx->dev, "value:0x%llx\n", csr.full); csr.full |= 0x7; sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full); csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR); dev_dbg(sst_drv_ctx->dev, "value:0x%llx\n", csr.full); csr.full &= ~(0x1); sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full); csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR); dev_dbg(sst_drv_ctx->dev, "value:0x%llx\n", csr.full); return 0; } /** * sst_start_mrfld - Start the SST DSP processor * @sst_drv_ctx: intel_sst_drv context pointer * * This starts the DSP in MERRIFIELD platfroms */ int sst_start_mrfld(struct intel_sst_drv *sst_drv_ctx) { union config_status_reg_mrfld csr; dev_dbg(sst_drv_ctx->dev, "sst: Starting the DSP in mrfld LALALALA\n"); csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR); dev_dbg(sst_drv_ctx->dev, "value:0x%llx\n", csr.full); csr.full |= 0x7; sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full); csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR); dev_dbg(sst_drv_ctx->dev, "value:0x%llx\n", csr.full); csr.part.xt_snoop = 1; csr.full &= ~(0x5); sst_shim_write64(sst_drv_ctx->shim, SST_CSR, csr.full); csr.full = sst_shim_read64(sst_drv_ctx->shim, SST_CSR); dev_dbg(sst_drv_ctx->dev, "sst: Starting the DSP_merrifield:%llx\n", csr.full); return 0; } static int sst_validate_fw_image(struct intel_sst_drv *ctx, unsigned long size, struct fw_module_header **module, u32 *num_modules) { struct sst_fw_header *header; const void *sst_fw_in_mem = ctx->fw_in_mem; dev_dbg(ctx->dev, "Enter\n"); /* Read the header information from the data pointer */ header = (struct sst_fw_header *)sst_fw_in_mem; dev_dbg(ctx->dev, "header sign=%s size=%x modules=%x fmt=%x size=%zx\n", header->signature, header->file_size, header->modules, header->file_format, sizeof(*header)); /* verify FW */ if ((strncmp(header->signature, SST_FW_SIGN, 4) != 0) || (size != header->file_size + sizeof(*header))) { /* Invalid FW signature */ dev_err(ctx->dev, "InvalidFW sign/filesize mismatch\n"); return -EINVAL; } *num_modules = header->modules; *module = (void *)sst_fw_in_mem + sizeof(*header); return 0; } /* * sst_fill_memcpy_list - Fill the memcpy list * * @memcpy_list: List to be filled * @destn: Destination addr to be filled in the list * @src: Source addr to be filled in the list * @size: Size to be filled in the list * * Adds the node to the list after required fields * are populated in the node */ static int sst_fill_memcpy_list(struct list_head *memcpy_list, void *destn, const void *src, u32 size, bool is_io) { struct sst_memcpy_list *listnode; listnode = kzalloc(sizeof(*listnode), GFP_KERNEL); if (listnode == NULL) return -ENOMEM; listnode->dstn = destn; listnode->src = src; listnode->size = size; listnode->is_io = is_io; list_add_tail(&listnode->memcpylist, memcpy_list); return 0; } /** * sst_parse_module_memcpy - Parse audio FW modules and populate the memcpy list * * @sst_drv_ctx : driver context * @module : FW module header * @memcpy_list : Pointer to the list to be populated * Create the memcpy list as the number of block to be copied * returns error or 0 if module sizes are proper */ static int sst_parse_module_memcpy(struct intel_sst_drv *sst_drv_ctx, struct fw_module_header *module, struct list_head *memcpy_list) { struct fw_block_info *block; u32 count; int ret_val = 0; void __iomem *ram_iomem; dev_dbg(sst_drv_ctx->dev, "module sign %s size %x blocks %x type %x\n", module->signature, module->mod_size, module->blocks, module->type); dev_dbg(sst_drv_ctx->dev, "module entrypoint 0x%x\n", module->entry_point); block = (void *)module + sizeof(*module); for (count = 0; count < module->blocks; count++) { if (block->size <= 0) { dev_err(sst_drv_ctx->dev, "block size invalid\n"); return -EINVAL; } switch (block->type) { case SST_IRAM: ram_iomem = sst_drv_ctx->iram; break; case SST_DRAM: ram_iomem = sst_drv_ctx->dram; break; case SST_DDR: ram_iomem = sst_drv_ctx->ddr; break; case SST_CUSTOM_INFO: block = (void *)block + sizeof(*block) + block->size; continue; default: dev_err(sst_drv_ctx->dev, "wrong ram type0x%x in block0x%x\n", block->type, count); return -EINVAL; } ret_val = sst_fill_memcpy_list(memcpy_list, ram_iomem + block->ram_offset, (void *)block + sizeof(*block), block->size, 1); if (ret_val) return ret_val; block = (void *)block + sizeof(*block) + block->size; } return 0; } /** * sst_parse_fw_memcpy - parse the firmware image & populate the list for memcpy * * @ctx : pointer to drv context * @size : size of the firmware * @fw_list : pointer to list_head to be populated * This function parses the FW image and saves the parsed image in the list * for memcpy */ static int sst_parse_fw_memcpy(struct intel_sst_drv *ctx, unsigned long size, struct list_head *fw_list) { struct fw_module_header *module; u32 count, num_modules; int ret_val; ret_val = sst_validate_fw_image(ctx, size, &module, &num_modules); if (ret_val) return ret_val; for (count = 0; count < num_modules; count++) { ret_val = sst_parse_module_memcpy(ctx, module, fw_list); if (ret_val) return ret_val; module = (void *)module + sizeof(*module) + module->mod_size; } return 0; } /** * sst_do_memcpy - function initiates the memcpy * * @memcpy_list: Pter to memcpy list on which the memcpy needs to be initiated * * Triggers the memcpy */ static void sst_do_memcpy(struct list_head *memcpy_list) { struct sst_memcpy_list *listnode; list_for_each_entry(listnode, memcpy_list, memcpylist) { if (listnode->is_io) memcpy32_toio((void __iomem *)listnode->dstn, listnode->src, listnode->size); else memcpy(listnode->dstn, listnode->src, listnode->size); } } void sst_memcpy_free_resources(struct intel_sst_drv *sst_drv_ctx) { struct sst_memcpy_list *listnode, *tmplistnode; /* Free the list */ list_for_each_entry_safe(listnode, tmplistnode, &sst_drv_ctx->memcpy_list, memcpylist) { list_del(&listnode->memcpylist); kfree(listnode); } } static int sst_cache_and_parse_fw(struct intel_sst_drv *sst, const struct firmware *fw) { int retval = 0; sst->fw_in_mem = kzalloc(fw->size, GFP_KERNEL); if (!sst->fw_in_mem) { retval = -ENOMEM; goto end_release; } dev_dbg(sst->dev, "copied fw to %p", sst->fw_in_mem); dev_dbg(sst->dev, "phys: %lx", (unsigned long)virt_to_phys(sst->fw_in_mem)); memcpy(sst->fw_in_mem, fw->data, fw->size); retval = sst_parse_fw_memcpy(sst, fw->size, &sst->memcpy_list); if (retval) { dev_err(sst->dev, "Failed to parse fw\n"); kfree(sst->fw_in_mem); sst->fw_in_mem = NULL; } end_release: release_firmware(fw); return retval; } void sst_firmware_load_cb(const struct firmware *fw, void *context) { struct intel_sst_drv *ctx = context; dev_dbg(ctx->dev, "Enter\n"); if (fw == NULL) { dev_err(ctx->dev, "request fw failed\n"); return; } mutex_lock(&ctx->sst_lock); if (ctx->sst_state != SST_RESET || ctx->fw_in_mem != NULL) { release_firmware(fw); mutex_unlock(&ctx->sst_lock); return; } dev_dbg(ctx->dev, "Request Fw completed\n"); sst_cache_and_parse_fw(ctx, fw); mutex_unlock(&ctx->sst_lock); } /* * sst_request_fw - requests audio fw from kernel and saves a copy * * This function requests the SST FW from the kernel, parses it and * saves a copy in the driver context */ static int sst_request_fw(struct intel_sst_drv *sst) { int retval = 0; const struct firmware *fw; retval = request_firmware(&fw, sst->firmware_name, sst->dev); if (retval) { dev_err(sst->dev, "request fw failed %d\n", retval); return retval; } if (fw == NULL) { dev_err(sst->dev, "fw is returning as null\n"); return -EINVAL; } mutex_lock(&sst->sst_lock); retval = sst_cache_and_parse_fw(sst, fw); mutex_unlock(&sst->sst_lock); return retval; } /* * Writing the DDR physical base to DCCM offset * so that FW can use it to setup TLB */ static void sst_dccm_config_write(void __iomem *dram_base, unsigned int ddr_base) { void __iomem *addr; u32 bss_reset = 0; addr = (void __iomem *)(dram_base + MRFLD_FW_DDR_BASE_OFFSET); memcpy32_toio(addr, (void *)&ddr_base, sizeof(u32)); bss_reset |= (1 << MRFLD_FW_BSS_RESET_BIT); addr = (void __iomem *)(dram_base + MRFLD_FW_FEATURE_BASE_OFFSET); memcpy32_toio(addr, &bss_reset, sizeof(u32)); } void sst_post_download_mrfld(struct intel_sst_drv *ctx) { sst_dccm_config_write(ctx->dram, ctx->ddr_base); dev_dbg(ctx->dev, "config written to DCCM\n"); } /** * sst_load_fw - function to load FW into DSP * @sst_drv_ctx: intel_sst_drv context pointer * * Transfers the FW to DSP using dma/memcpy */ int sst_load_fw(struct intel_sst_drv *sst_drv_ctx) { int ret_val = 0; struct sst_block *block; dev_dbg(sst_drv_ctx->dev, "sst_load_fw\n"); if (sst_drv_ctx->sst_state != SST_RESET) return -EAGAIN; if (!sst_drv_ctx->fw_in_mem) { dev_dbg(sst_drv_ctx->dev, "sst: FW not in memory retry to download\n"); ret_val = sst_request_fw(sst_drv_ctx); if (ret_val) return ret_val; } block = sst_create_block(sst_drv_ctx, 0, FW_DWNL_ID); if (block == NULL) return -ENOMEM; /* Prevent C-states beyond C6 */ cpu_latency_qos_update_request(sst_drv_ctx->qos, 0); sst_drv_ctx->sst_state = SST_FW_LOADING; ret_val = sst_drv_ctx->ops->reset(sst_drv_ctx); if (ret_val) goto restore; sst_do_memcpy(&sst_drv_ctx->memcpy_list); /* Write the DRAM/DCCM config before enabling FW */ if (sst_drv_ctx->ops->post_download) sst_drv_ctx->ops->post_download(sst_drv_ctx); /* bring sst out of reset */ ret_val = sst_drv_ctx->ops->start(sst_drv_ctx); if (ret_val) goto restore; ret_val = sst_wait_timeout(sst_drv_ctx, block); if (ret_val) { dev_err(sst_drv_ctx->dev, "fw download failed %d\n" , ret_val); /* FW download failed due to timeout */ ret_val = -EBUSY; } restore: /* Re-enable Deeper C-states beyond C6 */ cpu_latency_qos_update_request(sst_drv_ctx->qos, PM_QOS_DEFAULT_VALUE); sst_free_block(sst_drv_ctx, block); dev_dbg(sst_drv_ctx->dev, "fw load successful!!!\n"); if (sst_drv_ctx->ops->restore_dsp_context) sst_drv_ctx->ops->restore_dsp_context(); sst_drv_ctx->sst_state = SST_FW_RUNNING; return ret_val; }
linux-master
sound/soc/intel/atom/sst/sst_loader.c
// SPDX-License-Identifier: GPL-2.0-only /* * sst_pci.c - SST (LPE) driver init file for pci enumeration. * * Copyright (C) 2008-14 Intel Corp * Authors: Vinod Koul <[email protected]> * Harsha Priya <[email protected]> * Dharageswari R <[email protected]> * KP Jeeja <[email protected]> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/module.h> #include <linux/pci.h> #include <linux/fs.h> #include <linux/firmware.h> #include <sound/core.h> #include <sound/soc.h> #include <asm/platform_sst_audio.h> #include "../sst-mfld-platform.h" #include "sst.h" static int sst_platform_get_resources(struct intel_sst_drv *ctx) { int ddr_base, ret = 0; struct pci_dev *pci = ctx->pci; ret = pci_request_regions(pci, SST_DRV_NAME); if (ret) return ret; /* map registers */ /* DDR base */ if (ctx->dev_id == PCI_DEVICE_ID_INTEL_SST_TNG) { ctx->ddr_base = pci_resource_start(pci, 0); /* check that the relocated IMR base matches with FW Binary */ ddr_base = relocate_imr_addr_mrfld(ctx->ddr_base); if (!ctx->pdata->lib_info) { dev_err(ctx->dev, "lib_info pointer NULL\n"); ret = -EINVAL; goto do_release_regions; } if (ddr_base != ctx->pdata->lib_info->mod_base) { dev_err(ctx->dev, "FW LSP DDR BASE does not match with IFWI\n"); ret = -EINVAL; goto do_release_regions; } ctx->ddr_end = pci_resource_end(pci, 0); ctx->ddr = pcim_iomap(pci, 0, pci_resource_len(pci, 0)); if (!ctx->ddr) { ret = -EINVAL; goto do_release_regions; } dev_dbg(ctx->dev, "sst: DDR Ptr %p\n", ctx->ddr); } else { ctx->ddr = NULL; } /* SHIM */ ctx->shim_phy_add = pci_resource_start(pci, 1); ctx->shim = pcim_iomap(pci, 1, pci_resource_len(pci, 1)); if (!ctx->shim) { ret = -EINVAL; goto do_release_regions; } dev_dbg(ctx->dev, "SST Shim Ptr %p\n", ctx->shim); /* Shared SRAM */ ctx->mailbox_add = pci_resource_start(pci, 2); ctx->mailbox = pcim_iomap(pci, 2, pci_resource_len(pci, 2)); if (!ctx->mailbox) { ret = -EINVAL; goto do_release_regions; } dev_dbg(ctx->dev, "SRAM Ptr %p\n", ctx->mailbox); /* IRAM */ ctx->iram_end = pci_resource_end(pci, 3); ctx->iram_base = pci_resource_start(pci, 3); ctx->iram = pcim_iomap(pci, 3, pci_resource_len(pci, 3)); if (!ctx->iram) { ret = -EINVAL; goto do_release_regions; } dev_dbg(ctx->dev, "IRAM Ptr %p\n", ctx->iram); /* DRAM */ ctx->dram_end = pci_resource_end(pci, 4); ctx->dram_base = pci_resource_start(pci, 4); ctx->dram = pcim_iomap(pci, 4, pci_resource_len(pci, 4)); if (!ctx->dram) { ret = -EINVAL; goto do_release_regions; } dev_dbg(ctx->dev, "DRAM Ptr %p\n", ctx->dram); do_release_regions: pci_release_regions(pci); return ret; } /* * intel_sst_probe - PCI probe function * * @pci: PCI device structure * @pci_id: PCI device ID structure * */ static int intel_sst_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { int ret = 0; struct intel_sst_drv *sst_drv_ctx; struct sst_platform_info *sst_pdata = pci->dev.platform_data; dev_dbg(&pci->dev, "Probe for DID %x\n", pci->device); ret = sst_alloc_drv_context(&sst_drv_ctx, &pci->dev, pci->device); if (ret < 0) return ret; sst_drv_ctx->pdata = sst_pdata; sst_drv_ctx->irq_num = pci->irq; snprintf(sst_drv_ctx->firmware_name, sizeof(sst_drv_ctx->firmware_name), "%s%04x%s", "fw_sst_", sst_drv_ctx->dev_id, ".bin"); ret = sst_context_init(sst_drv_ctx); if (ret < 0) return ret; /* Init the device */ ret = pcim_enable_device(pci); if (ret) { dev_err(sst_drv_ctx->dev, "device can't be enabled. Returned err: %d\n", ret); goto do_free_drv_ctx; } sst_drv_ctx->pci = pci_dev_get(pci); ret = sst_platform_get_resources(sst_drv_ctx); if (ret < 0) goto do_free_drv_ctx; pci_set_drvdata(pci, sst_drv_ctx); sst_configure_runtime_pm(sst_drv_ctx); return ret; do_free_drv_ctx: sst_context_cleanup(sst_drv_ctx); dev_err(sst_drv_ctx->dev, "Probe failed with %d\n", ret); return ret; } /** * intel_sst_remove - PCI remove function * * @pci: PCI device structure * * This function is called by OS when a device is unloaded * This frees the interrupt etc */ static void intel_sst_remove(struct pci_dev *pci) { struct intel_sst_drv *sst_drv_ctx = pci_get_drvdata(pci); sst_context_cleanup(sst_drv_ctx); pci_dev_put(sst_drv_ctx->pci); pci_release_regions(pci); pci_set_drvdata(pci, NULL); } /* PCI Routines */ static const struct pci_device_id intel_sst_ids[] = { { PCI_DEVICE_DATA(INTEL, SST_TNG, 0) }, { 0, } }; static struct pci_driver sst_driver = { .name = SST_DRV_NAME, .id_table = intel_sst_ids, .probe = intel_sst_probe, .remove = intel_sst_remove, #ifdef CONFIG_PM .driver = { .pm = &intel_sst_pm, }, #endif }; module_pci_driver(sst_driver); MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine PCI Driver"); MODULE_AUTHOR("Vinod Koul <[email protected]>"); MODULE_AUTHOR("Harsha Priya <[email protected]>"); MODULE_AUTHOR("Dharageswari R <[email protected]>"); MODULE_AUTHOR("KP Jeeja <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("sst");
linux-master
sound/soc/intel/atom/sst/sst_pci.c
// SPDX-License-Identifier: GPL-2.0-only /* * sst_pvt.c - Intel SST Driver for audio engine * * Copyright (C) 2008-14 Intel Corp * Authors: Vinod Koul <[email protected]> * Harsha Priya <[email protected]> * Dharageswari R <[email protected]> * KP Jeeja <[email protected]> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/kobject.h> #include <linux/pci.h> #include <linux/fs.h> #include <linux/firmware.h> #include <linux/pm_runtime.h> #include <linux/sched.h> #include <linux/delay.h> #include <sound/asound.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/compress_driver.h> #include <asm/platform_sst_audio.h> #include "../sst-mfld-platform.h" #include "sst.h" int sst_shim_write(void __iomem *addr, int offset, int value) { writel(value, addr + offset); return 0; } u32 sst_shim_read(void __iomem *addr, int offset) { return readl(addr + offset); } u64 sst_reg_read64(void __iomem *addr, int offset) { u64 val = 0; memcpy_fromio(&val, addr + offset, sizeof(val)); return val; } int sst_shim_write64(void __iomem *addr, int offset, u64 value) { memcpy_toio(addr + offset, &value, sizeof(value)); return 0; } u64 sst_shim_read64(void __iomem *addr, int offset) { u64 val = 0; memcpy_fromio(&val, addr + offset, sizeof(val)); return val; } void sst_set_fw_state_locked( struct intel_sst_drv *sst_drv_ctx, int sst_state) { mutex_lock(&sst_drv_ctx->sst_lock); sst_drv_ctx->sst_state = sst_state; mutex_unlock(&sst_drv_ctx->sst_lock); } /* * sst_wait_interruptible - wait on event * * @sst_drv_ctx: Driver context * @block: Driver block to wait on * * This function waits without a timeout (and is interruptable) for a * given block event */ int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx, struct sst_block *block) { int retval = 0; if (!wait_event_interruptible(sst_drv_ctx->wait_queue, block->condition)) { /* event wake */ if (block->ret_code < 0) { dev_err(sst_drv_ctx->dev, "stream failed %d\n", block->ret_code); retval = -EBUSY; } else { dev_dbg(sst_drv_ctx->dev, "event up\n"); retval = 0; } } else { dev_err(sst_drv_ctx->dev, "signal interrupted\n"); retval = -EINTR; } return retval; } /* * sst_wait_timeout - wait on event for timeout * * @sst_drv_ctx: Driver context * @block: Driver block to wait on * * This function waits with a timeout value (and is not interruptible) on a * given block event */ int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx, struct sst_block *block) { int retval = 0; /* * NOTE: * Observed that FW processes the alloc msg and replies even * before the alloc thread has finished execution */ dev_dbg(sst_drv_ctx->dev, "waiting for condition %x ipc %d drv_id %d\n", block->condition, block->msg_id, block->drv_id); if (wait_event_timeout(sst_drv_ctx->wait_queue, block->condition, msecs_to_jiffies(SST_BLOCK_TIMEOUT))) { /* event wake */ dev_dbg(sst_drv_ctx->dev, "Event wake %x\n", block->condition); dev_dbg(sst_drv_ctx->dev, "message ret: %d\n", block->ret_code); retval = -block->ret_code; } else { block->on = false; dev_err(sst_drv_ctx->dev, "Wait timed-out condition:%#x, msg_id:%#x fw_state %#x\n", block->condition, block->msg_id, sst_drv_ctx->sst_state); sst_drv_ctx->sst_state = SST_RESET; retval = -EBUSY; } return retval; } /* * sst_create_ipc_msg - create a IPC message * * @arg: ipc message * @large: large or short message * * this function allocates structures to send a large or short * message to the firmware */ int sst_create_ipc_msg(struct ipc_post **arg, bool large) { struct ipc_post *msg; msg = kzalloc(sizeof(*msg), GFP_ATOMIC); if (!msg) return -ENOMEM; if (large) { msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_ATOMIC); if (!msg->mailbox_data) { kfree(msg); return -ENOMEM; } } else { msg->mailbox_data = NULL; } msg->is_large = large; *arg = msg; return 0; } /* * sst_create_block_and_ipc_msg - Creates IPC message and sst block * @arg: passed to sst_create_ipc_message API * @large: large or short message * @sst_drv_ctx: sst driver context * @block: return block allocated * @msg_id: IPC * @drv_id: stream id or private id */ int sst_create_block_and_ipc_msg(struct ipc_post **arg, bool large, struct intel_sst_drv *sst_drv_ctx, struct sst_block **block, u32 msg_id, u32 drv_id) { int retval; retval = sst_create_ipc_msg(arg, large); if (retval) return retval; *block = sst_create_block(sst_drv_ctx, msg_id, drv_id); if (*block == NULL) { kfree(*arg); return -ENOMEM; } return 0; } /* * sst_clean_stream - clean the stream context * * @stream: stream structure * * this function resets the stream contexts * should be called in free */ void sst_clean_stream(struct stream_info *stream) { stream->status = STREAM_UN_INIT; stream->prev = STREAM_UN_INIT; mutex_lock(&stream->lock); stream->cumm_bytes = 0; mutex_unlock(&stream->lock); } int sst_prepare_and_post_msg(struct intel_sst_drv *sst, int task_id, int ipc_msg, int cmd_id, int pipe_id, size_t mbox_data_len, const void *mbox_data, void **data, bool large, bool fill_dsp, bool sync, bool response) { struct sst_block *block = NULL; struct ipc_post *msg = NULL; struct ipc_dsp_hdr dsp_hdr; int ret = 0, pvt_id; pvt_id = sst_assign_pvt_id(sst); if (pvt_id < 0) return pvt_id; if (response) ret = sst_create_block_and_ipc_msg( &msg, large, sst, &block, ipc_msg, pvt_id); else ret = sst_create_ipc_msg(&msg, large); if (ret < 0) { test_and_clear_bit(pvt_id, &sst->pvt_id); return -ENOMEM; } dev_dbg(sst->dev, "pvt_id = %d, pipe id = %d, task = %d ipc_msg: %d\n", pvt_id, pipe_id, task_id, ipc_msg); sst_fill_header_mrfld(&msg->mrfld_header, ipc_msg, task_id, large, pvt_id); msg->mrfld_header.p.header_low_payload = sizeof(dsp_hdr) + mbox_data_len; msg->mrfld_header.p.header_high.part.res_rqd = !sync; dev_dbg(sst->dev, "header:%x\n", msg->mrfld_header.p.header_high.full); dev_dbg(sst->dev, "response rqd: %x", msg->mrfld_header.p.header_high.part.res_rqd); dev_dbg(sst->dev, "msg->mrfld_header.p.header_low_payload:%d", msg->mrfld_header.p.header_low_payload); if (fill_dsp) { sst_fill_header_dsp(&dsp_hdr, cmd_id, pipe_id, mbox_data_len); memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr)); if (mbox_data_len) { memcpy(msg->mailbox_data + sizeof(dsp_hdr), mbox_data, mbox_data_len); } } if (sync) sst->ops->post_message(sst, msg, true); else sst_add_to_dispatch_list_and_post(sst, msg); if (response) { ret = sst_wait_timeout(sst, block); if (ret < 0) goto out; if (data && block->data) { *data = kmemdup(block->data, block->size, GFP_KERNEL); if (!*data) { ret = -ENOMEM; goto out; } } } out: if (response) sst_free_block(sst, block); test_and_clear_bit(pvt_id, &sst->pvt_id); return ret; } int sst_pm_runtime_put(struct intel_sst_drv *sst_drv) { int ret; pm_runtime_mark_last_busy(sst_drv->dev); ret = pm_runtime_put_autosuspend(sst_drv->dev); if (ret < 0) return ret; return 0; } void sst_fill_header_mrfld(union ipc_header_mrfld *header, int msg, int task_id, int large, int drv_id) { header->full = 0; header->p.header_high.part.msg_id = msg; header->p.header_high.part.task_id = task_id; header->p.header_high.part.large = large; header->p.header_high.part.drv_id = drv_id; header->p.header_high.part.done = 0; header->p.header_high.part.busy = 1; header->p.header_high.part.res_rqd = 1; } void sst_fill_header_dsp(struct ipc_dsp_hdr *dsp, int msg, int pipe_id, int len) { dsp->cmd_id = msg; dsp->mod_index_id = 0xff; dsp->pipe_id = pipe_id; dsp->length = len; dsp->mod_id = 0; } #define SST_MAX_BLOCKS 15 /* * sst_assign_pvt_id - assign a pvt id for stream * * @sst_drv_ctx : driver context * * this function assigns a private id for calls that dont have stream * context yet, should be called with lock held * uses bits for the id, and finds first free bits and assigns that */ int sst_assign_pvt_id(struct intel_sst_drv *drv) { int local; spin_lock(&drv->block_lock); /* find first zero index from lsb */ local = ffz(drv->pvt_id); dev_dbg(drv->dev, "pvt_id assigned --> %d\n", local); if (local >= SST_MAX_BLOCKS){ spin_unlock(&drv->block_lock); dev_err(drv->dev, "PVT _ID error: no free id blocks "); return -EINVAL; } /* toggle the index */ change_bit(local, &drv->pvt_id); spin_unlock(&drv->block_lock); return local; } int sst_validate_strid( struct intel_sst_drv *sst_drv_ctx, int str_id) { if (str_id <= 0 || str_id > sst_drv_ctx->info.max_streams) { dev_err(sst_drv_ctx->dev, "SST ERR: invalid stream id : %d, max %d\n", str_id, sst_drv_ctx->info.max_streams); return -EINVAL; } return 0; } struct stream_info *get_stream_info( struct intel_sst_drv *sst_drv_ctx, int str_id) { if (sst_validate_strid(sst_drv_ctx, str_id)) return NULL; return &sst_drv_ctx->streams[str_id]; } int get_stream_id_mrfld(struct intel_sst_drv *sst_drv_ctx, u32 pipe_id) { int i; for (i = 1; i <= sst_drv_ctx->info.max_streams; i++) if (pipe_id == sst_drv_ctx->streams[i].pipe_id) return i; dev_dbg(sst_drv_ctx->dev, "no such pipe_id(%u)", pipe_id); return -1; } u32 relocate_imr_addr_mrfld(u32 base_addr) { /* Get the difference from 512MB aligned base addr */ /* relocate the base */ base_addr = MRFLD_FW_VIRTUAL_BASE + (base_addr % (512 * 1024 * 1024)); return base_addr; } EXPORT_SYMBOL_GPL(relocate_imr_addr_mrfld); void sst_add_to_dispatch_list_and_post(struct intel_sst_drv *sst, struct ipc_post *msg) { unsigned long irq_flags; spin_lock_irqsave(&sst->ipc_spin_lock, irq_flags); list_add_tail(&msg->node, &sst->ipc_dispatch_list); spin_unlock_irqrestore(&sst->ipc_spin_lock, irq_flags); sst->ops->post_message(sst, NULL, false); }
linux-master
sound/soc/intel/atom/sst/sst_pvt.c
// SPDX-License-Identifier: GPL-2.0-only /* * sst_acpi.c - SST (LPE) driver init file for ACPI enumeration. * * Copyright (c) 2013, Intel Corporation. * * Authors: Ramesh Babu K V <[email protected]> * Authors: Omair Mohammed Abdullah <[email protected]> */ #include <linux/module.h> #include <linux/fs.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/firmware.h> #include <linux/pm_qos.h> #include <linux/dmi.h> #include <linux/acpi.h> #include <asm/platform_sst_audio.h> #include <sound/core.h> #include <sound/intel-dsp-config.h> #include <sound/soc.h> #include <sound/compress_driver.h> #include <acpi/acbuffer.h> #include <acpi/platform/acenv.h> #include <acpi/platform/aclinux.h> #include <acpi/actypes.h> #include <acpi/acpi_bus.h> #include <sound/soc-acpi.h> #include <sound/soc-acpi-intel-match.h> #include "../sst-mfld-platform.h" #include "../../common/soc-intel-quirks.h" #include "sst.h" /* LPE viewpoint addresses */ #define SST_BYT_IRAM_PHY_START 0xff2c0000 #define SST_BYT_IRAM_PHY_END 0xff2d4000 #define SST_BYT_DRAM_PHY_START 0xff300000 #define SST_BYT_DRAM_PHY_END 0xff320000 #define SST_BYT_IMR_VIRT_START 0xc0000000 /* virtual addr in LPE */ #define SST_BYT_IMR_VIRT_END 0xc01fffff #define SST_BYT_SHIM_PHY_ADDR 0xff340000 #define SST_BYT_MBOX_PHY_ADDR 0xff344000 #define SST_BYT_DMA0_PHY_ADDR 0xff298000 #define SST_BYT_DMA1_PHY_ADDR 0xff29c000 #define SST_BYT_SSP0_PHY_ADDR 0xff2a0000 #define SST_BYT_SSP2_PHY_ADDR 0xff2a2000 #define BYT_FW_MOD_TABLE_OFFSET 0x80000 #define BYT_FW_MOD_TABLE_SIZE 0x100 #define BYT_FW_MOD_OFFSET (BYT_FW_MOD_TABLE_OFFSET + BYT_FW_MOD_TABLE_SIZE) static const struct sst_info byt_fwparse_info = { .use_elf = false, .max_streams = 25, .iram_start = SST_BYT_IRAM_PHY_START, .iram_end = SST_BYT_IRAM_PHY_END, .iram_use = true, .dram_start = SST_BYT_DRAM_PHY_START, .dram_end = SST_BYT_DRAM_PHY_END, .dram_use = true, .imr_start = SST_BYT_IMR_VIRT_START, .imr_end = SST_BYT_IMR_VIRT_END, .imr_use = true, .mailbox_start = SST_BYT_MBOX_PHY_ADDR, .num_probes = 0, .lpe_viewpt_rqd = true, }; static const struct sst_ipc_info byt_ipc_info = { .ipc_offset = 0, .mbox_recv_off = 0x400, }; static const struct sst_lib_dnld_info byt_lib_dnld_info = { .mod_base = SST_BYT_IMR_VIRT_START, .mod_end = SST_BYT_IMR_VIRT_END, .mod_table_offset = BYT_FW_MOD_TABLE_OFFSET, .mod_table_size = BYT_FW_MOD_TABLE_SIZE, .mod_ddr_dnld = false, }; static const struct sst_res_info byt_rvp_res_info = { .shim_offset = 0x140000, .shim_size = 0x000100, .shim_phy_addr = SST_BYT_SHIM_PHY_ADDR, .ssp0_offset = 0xa0000, .ssp0_size = 0x1000, .dma0_offset = 0x98000, .dma0_size = 0x4000, .dma1_offset = 0x9c000, .dma1_size = 0x4000, .iram_offset = 0x0c0000, .iram_size = 0x14000, .dram_offset = 0x100000, .dram_size = 0x28000, .mbox_offset = 0x144000, .mbox_size = 0x1000, .acpi_lpe_res_index = 0, .acpi_ddr_index = 2, .acpi_ipc_irq_index = 5, }; /* BYTCR has different BIOS from BYT */ static const struct sst_res_info bytcr_res_info = { .shim_offset = 0x140000, .shim_size = 0x000100, .shim_phy_addr = SST_BYT_SHIM_PHY_ADDR, .ssp0_offset = 0xa0000, .ssp0_size = 0x1000, .dma0_offset = 0x98000, .dma0_size = 0x4000, .dma1_offset = 0x9c000, .dma1_size = 0x4000, .iram_offset = 0x0c0000, .iram_size = 0x14000, .dram_offset = 0x100000, .dram_size = 0x28000, .mbox_offset = 0x144000, .mbox_size = 0x1000, .acpi_lpe_res_index = 0, .acpi_ddr_index = 2, .acpi_ipc_irq_index = 0 }; static struct sst_platform_info byt_rvp_platform_data = { .probe_data = &byt_fwparse_info, .ipc_info = &byt_ipc_info, .lib_info = &byt_lib_dnld_info, .res_info = &byt_rvp_res_info, .platform = "sst-mfld-platform", .streams_lost_on_suspend = true, }; /* Cherryview (Cherrytrail and Braswell) uses same mrfld dpcm fw as Baytrail, * so pdata is same as Baytrail, minus the streams_lost_on_suspend quirk. */ static struct sst_platform_info chv_platform_data = { .probe_data = &byt_fwparse_info, .ipc_info = &byt_ipc_info, .lib_info = &byt_lib_dnld_info, .res_info = &byt_rvp_res_info, .platform = "sst-mfld-platform", }; static int sst_platform_get_resources(struct intel_sst_drv *ctx) { struct resource *rsrc; struct platform_device *pdev = to_platform_device(ctx->dev); /* All ACPI resource request here */ /* Get Shim addr */ rsrc = platform_get_resource(pdev, IORESOURCE_MEM, ctx->pdata->res_info->acpi_lpe_res_index); if (!rsrc) { dev_err(ctx->dev, "Invalid SHIM base from IFWI\n"); return -EIO; } dev_info(ctx->dev, "LPE base: %#x size:%#x", (unsigned int) rsrc->start, (unsigned int)resource_size(rsrc)); ctx->iram_base = rsrc->start + ctx->pdata->res_info->iram_offset; ctx->iram_end = ctx->iram_base + ctx->pdata->res_info->iram_size - 1; dev_info(ctx->dev, "IRAM base: %#x", ctx->iram_base); ctx->iram = devm_ioremap(ctx->dev, ctx->iram_base, ctx->pdata->res_info->iram_size); if (!ctx->iram) { dev_err(ctx->dev, "unable to map IRAM\n"); return -EIO; } ctx->dram_base = rsrc->start + ctx->pdata->res_info->dram_offset; ctx->dram_end = ctx->dram_base + ctx->pdata->res_info->dram_size - 1; dev_info(ctx->dev, "DRAM base: %#x", ctx->dram_base); ctx->dram = devm_ioremap(ctx->dev, ctx->dram_base, ctx->pdata->res_info->dram_size); if (!ctx->dram) { dev_err(ctx->dev, "unable to map DRAM\n"); return -EIO; } ctx->shim_phy_add = rsrc->start + ctx->pdata->res_info->shim_offset; dev_info(ctx->dev, "SHIM base: %#x", ctx->shim_phy_add); ctx->shim = devm_ioremap(ctx->dev, ctx->shim_phy_add, ctx->pdata->res_info->shim_size); if (!ctx->shim) { dev_err(ctx->dev, "unable to map SHIM\n"); return -EIO; } /* reassign physical address to LPE viewpoint address */ ctx->shim_phy_add = ctx->pdata->res_info->shim_phy_addr; /* Get mailbox addr */ ctx->mailbox_add = rsrc->start + ctx->pdata->res_info->mbox_offset; dev_info(ctx->dev, "Mailbox base: %#x", ctx->mailbox_add); ctx->mailbox = devm_ioremap(ctx->dev, ctx->mailbox_add, ctx->pdata->res_info->mbox_size); if (!ctx->mailbox) { dev_err(ctx->dev, "unable to map mailbox\n"); return -EIO; } /* reassign physical address to LPE viewpoint address */ ctx->mailbox_add = ctx->info.mailbox_start; rsrc = platform_get_resource(pdev, IORESOURCE_MEM, ctx->pdata->res_info->acpi_ddr_index); if (!rsrc) { dev_err(ctx->dev, "Invalid DDR base from IFWI\n"); return -EIO; } ctx->ddr_base = rsrc->start; ctx->ddr_end = rsrc->end; dev_info(ctx->dev, "DDR base: %#x", ctx->ddr_base); ctx->ddr = devm_ioremap(ctx->dev, ctx->ddr_base, resource_size(rsrc)); if (!ctx->ddr) { dev_err(ctx->dev, "unable to map DDR\n"); return -EIO; } /* Find the IRQ */ ctx->irq_num = platform_get_irq(pdev, ctx->pdata->res_info->acpi_ipc_irq_index); if (ctx->irq_num <= 0) return ctx->irq_num < 0 ? ctx->irq_num : -EIO; return 0; } static int sst_acpi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; int ret = 0; struct intel_sst_drv *ctx; const struct acpi_device_id *id; struct snd_soc_acpi_mach *mach; struct platform_device *mdev; struct platform_device *plat_dev; struct sst_platform_info *pdata; unsigned int dev_id; id = acpi_match_device(dev->driver->acpi_match_table, dev); if (!id) return -ENODEV; ret = snd_intel_acpi_dsp_driver_probe(dev, id->id); if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SST) { dev_dbg(dev, "SST ACPI driver not selected, aborting probe\n"); return -ENODEV; } dev_dbg(dev, "for %s\n", id->id); mach = (struct snd_soc_acpi_mach *)id->driver_data; mach = snd_soc_acpi_find_machine(mach); if (mach == NULL) { dev_err(dev, "No matching machine driver found\n"); return -ENODEV; } if (soc_intel_is_byt()) mach->pdata = &byt_rvp_platform_data; else mach->pdata = &chv_platform_data; pdata = mach->pdata; ret = kstrtouint(id->id, 16, &dev_id); if (ret < 0) { dev_err(dev, "Unique device id conversion error: %d\n", ret); return ret; } dev_dbg(dev, "ACPI device id: %x\n", dev_id); ret = sst_alloc_drv_context(&ctx, dev, dev_id); if (ret < 0) return ret; if (soc_intel_is_byt_cr(pdev)) { /* override resource info */ byt_rvp_platform_data.res_info = &bytcr_res_info; } /* update machine parameters */ mach->mach_params.acpi_ipc_irq_index = pdata->res_info->acpi_ipc_irq_index; plat_dev = platform_device_register_data(dev, pdata->platform, -1, NULL, 0); if (IS_ERR(plat_dev)) { dev_err(dev, "Failed to create machine device: %s\n", pdata->platform); return PTR_ERR(plat_dev); } /* * Create platform device for sst machine driver, * pass machine info as pdata */ mdev = platform_device_register_data(dev, mach->drv_name, -1, (const void *)mach, sizeof(*mach)); if (IS_ERR(mdev)) { dev_err(dev, "Failed to create machine device: %s\n", mach->drv_name); return PTR_ERR(mdev); } /* Fill sst platform data */ ctx->pdata = pdata; strcpy(ctx->firmware_name, mach->fw_filename); ret = sst_platform_get_resources(ctx); if (ret) return ret; ret = sst_context_init(ctx); if (ret < 0) return ret; sst_configure_runtime_pm(ctx); platform_set_drvdata(pdev, ctx); return ret; } /** * sst_acpi_remove - remove function * * @pdev: platform device structure * * This function is called by OS when a device is unloaded * This frees the interrupt etc */ static void sst_acpi_remove(struct platform_device *pdev) { struct intel_sst_drv *ctx; ctx = platform_get_drvdata(pdev); sst_context_cleanup(ctx); platform_set_drvdata(pdev, NULL); } static const struct acpi_device_id sst_acpi_ids[] = { { "80860F28", (unsigned long)&snd_soc_acpi_intel_baytrail_machines}, { "808622A8", (unsigned long)&snd_soc_acpi_intel_cherrytrail_machines}, { }, }; MODULE_DEVICE_TABLE(acpi, sst_acpi_ids); static struct platform_driver sst_acpi_driver = { .driver = { .name = "intel_sst_acpi", .acpi_match_table = ACPI_PTR(sst_acpi_ids), .pm = &intel_sst_pm, }, .probe = sst_acpi_probe, .remove_new = sst_acpi_remove, }; module_platform_driver(sst_acpi_driver); MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine ACPI Driver"); MODULE_AUTHOR("Ramesh Babu K V"); MODULE_AUTHOR("Omair Mohammed Abdullah"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("sst");
linux-master
sound/soc/intel/atom/sst/sst_acpi.c
// SPDX-License-Identifier: GPL-2.0-only // // Copyright (C) 2020 Intel Corporation. // // Intel KeemBay Platform driver. // #include <linux/bitrev.h> #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <sound/dmaengine_pcm.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include "kmb_platform.h" #define PERIODS_MIN 2 #define PERIODS_MAX 48 #define PERIOD_BYTES_MIN 4096 #define BUFFER_BYTES_MAX (PERIODS_MAX * PERIOD_BYTES_MIN) #define TDM_OPERATION 5 #define I2S_OPERATION 0 #define DATA_WIDTH_CONFIG_BIT 6 #define TDM_CHANNEL_CONFIG_BIT 3 static const struct snd_pcm_hardware kmb_pcm_hardware = { .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_BLOCK_TRANSFER, .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_48000, .rate_min = 8000, .rate_max = 48000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = BUFFER_BYTES_MAX, .period_bytes_min = PERIOD_BYTES_MIN, .period_bytes_max = BUFFER_BYTES_MAX / PERIODS_MIN, .periods_min = PERIODS_MIN, .periods_max = PERIODS_MAX, .fifo_size = 16, }; /* * Convert to ADV7511 HDMI hardware format. * ADV7511 HDMI chip need parity bit replaced by block start bit and * with the preamble bits left out. * ALSA IEC958 subframe format: * bit 0-3 = preamble (0x8 = block start) * 4-7 = AUX (=0) * 8-27 = audio data (without AUX if 24bit sample) * 28 = validity * 29 = user data * 30 = channel status * 31 = parity * * ADV7511 IEC958 subframe format: * bit 0-23 = audio data * 24 = validity * 25 = user data * 26 = channel status * 27 = block start * 28-31 = 0 * MSB to LSB bit reverse by software as hardware not supporting it. */ static void hdmi_reformat_iec958(struct snd_pcm_runtime *runtime, struct kmb_i2s_info *kmb_i2s, unsigned int tx_ptr) { u32(*buf)[2] = (void *)runtime->dma_area; unsigned long temp; u32 i, j, sample; for (i = 0; i < kmb_i2s->fifo_th; i++) { j = 0; do { temp = buf[tx_ptr][j]; /* Replace parity with block start*/ assign_bit(31, &temp, (BIT(3) & temp)); sample = bitrev32(temp); buf[tx_ptr][j] = sample << 4; j++; } while (j < 2); tx_ptr++; } } static unsigned int kmb_pcm_tx_fn(struct kmb_i2s_info *kmb_i2s, struct snd_pcm_runtime *runtime, unsigned int tx_ptr, bool *period_elapsed) { unsigned int period_pos = tx_ptr % runtime->period_size; void __iomem *i2s_base = kmb_i2s->i2s_base; void *buf = runtime->dma_area; int i; if (kmb_i2s->iec958_fmt) hdmi_reformat_iec958(runtime, kmb_i2s, tx_ptr); /* KMB i2s uses two separate L/R FIFO */ for (i = 0; i < kmb_i2s->fifo_th; i++) { if (kmb_i2s->config.data_width == 16) { writel(((u16(*)[2])buf)[tx_ptr][0], i2s_base + LRBR_LTHR(0)); writel(((u16(*)[2])buf)[tx_ptr][1], i2s_base + RRBR_RTHR(0)); } else { writel(((u32(*)[2])buf)[tx_ptr][0], i2s_base + LRBR_LTHR(0)); writel(((u32(*)[2])buf)[tx_ptr][1], i2s_base + RRBR_RTHR(0)); } period_pos++; if (++tx_ptr >= runtime->buffer_size) tx_ptr = 0; } *period_elapsed = period_pos >= runtime->period_size; return tx_ptr; } static unsigned int kmb_pcm_rx_fn(struct kmb_i2s_info *kmb_i2s, struct snd_pcm_runtime *runtime, unsigned int rx_ptr, bool *period_elapsed) { unsigned int period_pos = rx_ptr % runtime->period_size; void __iomem *i2s_base = kmb_i2s->i2s_base; int chan = kmb_i2s->config.chan_nr; void *buf = runtime->dma_area; int i, j; /* KMB i2s uses two separate L/R FIFO */ for (i = 0; i < kmb_i2s->fifo_th; i++) { for (j = 0; j < chan / 2; j++) { if (kmb_i2s->config.data_width == 16) { ((u16 *)buf)[rx_ptr * chan + (j * 2)] = readl(i2s_base + LRBR_LTHR(j)); ((u16 *)buf)[rx_ptr * chan + ((j * 2) + 1)] = readl(i2s_base + RRBR_RTHR(j)); } else { ((u32 *)buf)[rx_ptr * chan + (j * 2)] = readl(i2s_base + LRBR_LTHR(j)); ((u32 *)buf)[rx_ptr * chan + ((j * 2) + 1)] = readl(i2s_base + RRBR_RTHR(j)); } } period_pos++; if (++rx_ptr >= runtime->buffer_size) rx_ptr = 0; } *period_elapsed = period_pos >= runtime->period_size; return rx_ptr; } static inline void kmb_i2s_disable_channels(struct kmb_i2s_info *kmb_i2s, u32 stream) { u32 i; /* Disable all channels regardless of configuration*/ if (stream == SNDRV_PCM_STREAM_PLAYBACK) { for (i = 0; i < MAX_ISR; i++) writel(0, kmb_i2s->i2s_base + TER(i)); } else { for (i = 0; i < MAX_ISR; i++) writel(0, kmb_i2s->i2s_base + RER(i)); } } static inline void kmb_i2s_clear_irqs(struct kmb_i2s_info *kmb_i2s, u32 stream) { struct i2s_clk_config_data *config = &kmb_i2s->config; u32 i; if (stream == SNDRV_PCM_STREAM_PLAYBACK) { for (i = 0; i < config->chan_nr / 2; i++) readl(kmb_i2s->i2s_base + TOR(i)); } else { for (i = 0; i < config->chan_nr / 2; i++) readl(kmb_i2s->i2s_base + ROR(i)); } } static inline void kmb_i2s_irq_trigger(struct kmb_i2s_info *kmb_i2s, u32 stream, int chan_nr, bool trigger) { u32 i, irq; u32 flag; if (stream == SNDRV_PCM_STREAM_PLAYBACK) flag = TX_INT_FLAG; else flag = RX_INT_FLAG; for (i = 0; i < chan_nr / 2; i++) { irq = readl(kmb_i2s->i2s_base + IMR(i)); if (trigger) irq = irq & ~flag; else irq = irq | flag; writel(irq, kmb_i2s->i2s_base + IMR(i)); } } static void kmb_pcm_operation(struct kmb_i2s_info *kmb_i2s, bool playback) { struct snd_pcm_substream *substream; bool period_elapsed; unsigned int new_ptr; unsigned int ptr; if (playback) substream = kmb_i2s->tx_substream; else substream = kmb_i2s->rx_substream; if (!substream || !snd_pcm_running(substream)) return; if (playback) { ptr = kmb_i2s->tx_ptr; new_ptr = kmb_pcm_tx_fn(kmb_i2s, substream->runtime, ptr, &period_elapsed); cmpxchg(&kmb_i2s->tx_ptr, ptr, new_ptr); } else { ptr = kmb_i2s->rx_ptr; new_ptr = kmb_pcm_rx_fn(kmb_i2s, substream->runtime, ptr, &period_elapsed); cmpxchg(&kmb_i2s->rx_ptr, ptr, new_ptr); } if (period_elapsed) snd_pcm_period_elapsed(substream); } static int kmb_pcm_open(struct snd_soc_component *component, struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct kmb_i2s_info *kmb_i2s; kmb_i2s = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0)); snd_soc_set_runtime_hwparams(substream, &kmb_pcm_hardware); snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); runtime->private_data = kmb_i2s; return 0; } static int kmb_pcm_trigger(struct snd_soc_component *component, struct snd_pcm_substream *substream, int cmd) { struct snd_pcm_runtime *runtime = substream->runtime; struct kmb_i2s_info *kmb_i2s = runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { kmb_i2s->tx_ptr = 0; kmb_i2s->tx_substream = substream; } else { kmb_i2s->rx_ptr = 0; kmb_i2s->rx_substream = substream; } break; case SNDRV_PCM_TRIGGER_STOP: if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) kmb_i2s->tx_substream = NULL; else kmb_i2s->rx_substream = NULL; kmb_i2s->iec958_fmt = false; break; default: return -EINVAL; } return 0; } static irqreturn_t kmb_i2s_irq_handler(int irq, void *dev_id) { struct kmb_i2s_info *kmb_i2s = dev_id; struct i2s_clk_config_data *config = &kmb_i2s->config; irqreturn_t ret = IRQ_NONE; u32 tx_enabled = 0; u32 isr[4]; int i; for (i = 0; i < config->chan_nr / 2; i++) isr[i] = readl(kmb_i2s->i2s_base + ISR(i)); kmb_i2s_clear_irqs(kmb_i2s, SNDRV_PCM_STREAM_PLAYBACK); kmb_i2s_clear_irqs(kmb_i2s, SNDRV_PCM_STREAM_CAPTURE); /* Only check TX interrupt if TX is active */ tx_enabled = readl(kmb_i2s->i2s_base + ITER); /* * Data available. Retrieve samples from FIFO */ /* * 8 channel audio will have isr[0..2] triggered, * reading the specific isr based on the audio configuration, * to avoid reading the buffers too early. */ switch (config->chan_nr) { case 2: if (isr[0] & ISR_RXDA) kmb_pcm_operation(kmb_i2s, false); ret = IRQ_HANDLED; break; case 4: if (isr[1] & ISR_RXDA) kmb_pcm_operation(kmb_i2s, false); ret = IRQ_HANDLED; break; case 8: if (isr[3] & ISR_RXDA) kmb_pcm_operation(kmb_i2s, false); ret = IRQ_HANDLED; break; } for (i = 0; i < config->chan_nr / 2; i++) { /* * Check if TX fifo is empty. If empty fill FIFO with samples */ if ((isr[i] & ISR_TXFE) && tx_enabled) { kmb_pcm_operation(kmb_i2s, true); ret = IRQ_HANDLED; } /* Error Handling: TX */ if (isr[i] & ISR_TXFO) { dev_dbg(kmb_i2s->dev, "TX overrun (ch_id=%d)\n", i); ret = IRQ_HANDLED; } /* Error Handling: RX */ if (isr[i] & ISR_RXFO) { dev_dbg(kmb_i2s->dev, "RX overrun (ch_id=%d)\n", i); ret = IRQ_HANDLED; } } return ret; } static int kmb_platform_pcm_new(struct snd_soc_component *component, struct snd_soc_pcm_runtime *soc_runtime) { size_t size = kmb_pcm_hardware.buffer_bytes_max; /* Use SNDRV_DMA_TYPE_CONTINUOUS as KMB doesn't use PCI sg buffer */ snd_pcm_set_managed_buffer_all(soc_runtime->pcm, SNDRV_DMA_TYPE_CONTINUOUS, NULL, size, size); return 0; } static snd_pcm_uframes_t kmb_pcm_pointer(struct snd_soc_component *component, struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct kmb_i2s_info *kmb_i2s = runtime->private_data; snd_pcm_uframes_t pos; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) pos = kmb_i2s->tx_ptr; else pos = kmb_i2s->rx_ptr; return pos < runtime->buffer_size ? pos : 0; } static const struct snd_soc_component_driver kmb_component = { .name = "kmb", .pcm_construct = kmb_platform_pcm_new, .open = kmb_pcm_open, .trigger = kmb_pcm_trigger, .pointer = kmb_pcm_pointer, .legacy_dai_naming = 1, }; static const struct snd_soc_component_driver kmb_component_dma = { .name = "kmb", .legacy_dai_naming = 1, }; static int kmb_probe(struct snd_soc_dai *cpu_dai) { struct kmb_i2s_info *kmb_i2s = snd_soc_dai_get_drvdata(cpu_dai); if (kmb_i2s->use_pio) return 0; snd_soc_dai_init_dma_data(cpu_dai, &kmb_i2s->play_dma_data, &kmb_i2s->capture_dma_data); return 0; } static inline void kmb_i2s_enable_dma(struct kmb_i2s_info *kmb_i2s, u32 stream) { u32 dma_reg; dma_reg = readl(kmb_i2s->i2s_base + I2S_DMACR); /* Enable DMA handshake for stream */ if (stream == SNDRV_PCM_STREAM_PLAYBACK) dma_reg |= I2S_DMAEN_TXBLOCK; else dma_reg |= I2S_DMAEN_RXBLOCK; writel(dma_reg, kmb_i2s->i2s_base + I2S_DMACR); } static inline void kmb_i2s_disable_dma(struct kmb_i2s_info *kmb_i2s, u32 stream) { u32 dma_reg; dma_reg = readl(kmb_i2s->i2s_base + I2S_DMACR); /* Disable DMA handshake for stream */ if (stream == SNDRV_PCM_STREAM_PLAYBACK) { dma_reg &= ~I2S_DMAEN_TXBLOCK; writel(1, kmb_i2s->i2s_base + I2S_RTXDMA); } else { dma_reg &= ~I2S_DMAEN_RXBLOCK; writel(1, kmb_i2s->i2s_base + I2S_RRXDMA); } writel(dma_reg, kmb_i2s->i2s_base + I2S_DMACR); } static void kmb_i2s_start(struct kmb_i2s_info *kmb_i2s, struct snd_pcm_substream *substream) { struct i2s_clk_config_data *config = &kmb_i2s->config; /* I2S Programming sequence in Keem_Bay_VPU_DB_v1.1 */ writel(1, kmb_i2s->i2s_base + IER); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) writel(1, kmb_i2s->i2s_base + ITER); else writel(1, kmb_i2s->i2s_base + IRER); if (kmb_i2s->use_pio) kmb_i2s_irq_trigger(kmb_i2s, substream->stream, config->chan_nr, true); else kmb_i2s_enable_dma(kmb_i2s, substream->stream); if (kmb_i2s->clock_provider) writel(1, kmb_i2s->i2s_base + CER); else writel(0, kmb_i2s->i2s_base + CER); } static void kmb_i2s_stop(struct kmb_i2s_info *kmb_i2s, struct snd_pcm_substream *substream) { /* I2S Programming sequence in Keem_Bay_VPU_DB_v1.1 */ kmb_i2s_clear_irqs(kmb_i2s, substream->stream); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) writel(0, kmb_i2s->i2s_base + ITER); else writel(0, kmb_i2s->i2s_base + IRER); kmb_i2s_irq_trigger(kmb_i2s, substream->stream, 8, false); if (!kmb_i2s->active) { writel(0, kmb_i2s->i2s_base + CER); writel(0, kmb_i2s->i2s_base + IER); } } static void kmb_disable_clk(void *clk) { clk_disable_unprepare(clk); } static int kmb_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct kmb_i2s_info *kmb_i2s = snd_soc_dai_get_drvdata(cpu_dai); int ret; switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) { case SND_SOC_DAIFMT_BC_FC: kmb_i2s->clock_provider = false; ret = 0; break; case SND_SOC_DAIFMT_BP_FP: writel(CLOCK_PROVIDER_MODE, kmb_i2s->pss_base + I2S_GEN_CFG_0); ret = clk_prepare_enable(kmb_i2s->clk_i2s); if (ret < 0) return ret; ret = devm_add_action_or_reset(kmb_i2s->dev, kmb_disable_clk, kmb_i2s->clk_i2s); if (ret) return ret; kmb_i2s->clock_provider = true; break; default: return -EINVAL; } return ret; } static int kmb_dai_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *cpu_dai) { struct kmb_i2s_info *kmb_i2s = snd_soc_dai_get_drvdata(cpu_dai); switch (cmd) { case SNDRV_PCM_TRIGGER_START: /* Keep track of i2s activity before turn off * the i2s interface */ kmb_i2s->active++; kmb_i2s_start(kmb_i2s, substream); break; case SNDRV_PCM_TRIGGER_STOP: kmb_i2s->active--; if (kmb_i2s->use_pio) kmb_i2s_stop(kmb_i2s, substream); break; default: return -EINVAL; } return 0; } static void kmb_i2s_config(struct kmb_i2s_info *kmb_i2s, int stream) { struct i2s_clk_config_data *config = &kmb_i2s->config; u32 ch_reg; kmb_i2s_disable_channels(kmb_i2s, stream); for (ch_reg = 0; ch_reg < config->chan_nr / 2; ch_reg++) { if (stream == SNDRV_PCM_STREAM_PLAYBACK) { writel(kmb_i2s->xfer_resolution, kmb_i2s->i2s_base + TCR(ch_reg)); writel(kmb_i2s->fifo_th - 1, kmb_i2s->i2s_base + TFCR(ch_reg)); writel(1, kmb_i2s->i2s_base + TER(ch_reg)); } else { writel(kmb_i2s->xfer_resolution, kmb_i2s->i2s_base + RCR(ch_reg)); writel(kmb_i2s->fifo_th - 1, kmb_i2s->i2s_base + RFCR(ch_reg)); writel(1, kmb_i2s->i2s_base + RER(ch_reg)); } } } static int kmb_dai_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params, struct snd_soc_dai *cpu_dai) { struct kmb_i2s_info *kmb_i2s = snd_soc_dai_get_drvdata(cpu_dai); struct i2s_clk_config_data *config = &kmb_i2s->config; u32 write_val; int ret; switch (params_format(hw_params)) { case SNDRV_PCM_FORMAT_S16_LE: config->data_width = 16; kmb_i2s->ccr = 0x00; kmb_i2s->xfer_resolution = 0x02; kmb_i2s->play_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; kmb_i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; break; case SNDRV_PCM_FORMAT_S24_LE: config->data_width = 32; kmb_i2s->ccr = 0x14; kmb_i2s->xfer_resolution = 0x05; kmb_i2s->play_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; kmb_i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; break; case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE: kmb_i2s->iec958_fmt = true; fallthrough; case SNDRV_PCM_FORMAT_S32_LE: config->data_width = 32; kmb_i2s->ccr = 0x10; kmb_i2s->xfer_resolution = 0x05; kmb_i2s->play_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; kmb_i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; break; default: dev_err(kmb_i2s->dev, "kmb: unsupported PCM fmt"); return -EINVAL; } config->chan_nr = params_channels(hw_params); switch (config->chan_nr) { case 8: case 4: /* * Platform is not capable of providing clocks for * multi channel audio */ if (kmb_i2s->clock_provider) return -EINVAL; write_val = ((config->chan_nr / 2) << TDM_CHANNEL_CONFIG_BIT) | (config->data_width << DATA_WIDTH_CONFIG_BIT) | TDM_OPERATION; writel(write_val, kmb_i2s->pss_base + I2S_GEN_CFG_0); break; case 2: /* * Platform is only capable of providing clocks need for * 2 channel master mode */ if (!(kmb_i2s->clock_provider)) return -EINVAL; write_val = ((config->chan_nr / 2) << TDM_CHANNEL_CONFIG_BIT) | (config->data_width << DATA_WIDTH_CONFIG_BIT) | CLOCK_PROVIDER_MODE | I2S_OPERATION; writel(write_val, kmb_i2s->pss_base + I2S_GEN_CFG_0); break; default: dev_dbg(kmb_i2s->dev, "channel not supported\n"); return -EINVAL; } kmb_i2s_config(kmb_i2s, substream->stream); writel(kmb_i2s->ccr, kmb_i2s->i2s_base + CCR); config->sample_rate = params_rate(hw_params); if (kmb_i2s->clock_provider) { /* Only 2 ch supported in Master mode */ u32 bitclk = config->sample_rate * config->data_width * 2; ret = clk_set_rate(kmb_i2s->clk_i2s, bitclk); if (ret) { dev_err(kmb_i2s->dev, "Can't set I2S clock rate: %d\n", ret); return ret; } } return 0; } static int kmb_dai_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct kmb_i2s_info *kmb_i2s = snd_soc_dai_get_drvdata(cpu_dai); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) writel(1, kmb_i2s->i2s_base + TXFFR); else writel(1, kmb_i2s->i2s_base + RXFFR); return 0; } static int kmb_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct kmb_i2s_info *kmb_i2s = snd_soc_dai_get_drvdata(cpu_dai); struct snd_dmaengine_dai_dma_data *dma_data; if (kmb_i2s->use_pio) return 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) dma_data = &kmb_i2s->play_dma_data; else dma_data = &kmb_i2s->capture_dma_data; snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data); return 0; } static int kmb_dai_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct kmb_i2s_info *kmb_i2s = snd_soc_dai_get_drvdata(cpu_dai); /* I2S Programming sequence in Keem_Bay_VPU_DB_v1.1 */ if (kmb_i2s->use_pio) kmb_i2s_clear_irqs(kmb_i2s, substream->stream); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) writel(0, kmb_i2s->i2s_base + ITER); else writel(0, kmb_i2s->i2s_base + IRER); if (kmb_i2s->use_pio) kmb_i2s_irq_trigger(kmb_i2s, substream->stream, 8, false); else kmb_i2s_disable_dma(kmb_i2s, substream->stream); if (!kmb_i2s->active) { writel(0, kmb_i2s->i2s_base + CER); writel(0, kmb_i2s->i2s_base + IER); } return 0; } static const struct snd_soc_dai_ops kmb_dai_ops = { .probe = kmb_probe, .startup = kmb_dai_startup, .trigger = kmb_dai_trigger, .hw_params = kmb_dai_hw_params, .hw_free = kmb_dai_hw_free, .prepare = kmb_dai_prepare, .set_fmt = kmb_set_dai_fmt, }; static struct snd_soc_dai_driver intel_kmb_hdmi_dai[] = { { .name = "intel_kmb_hdmi_i2s", .playback = { .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE), }, .ops = &kmb_dai_ops, }, }; static struct snd_soc_dai_driver intel_kmb_i2s_dai[] = { { .name = "intel_kmb_i2s", .playback = { .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_48000, .rate_min = 8000, .rate_max = 48000, .formats = (SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE), }, .capture = { .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_48000, .rate_min = 8000, .rate_max = 48000, .formats = (SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE), }, .ops = &kmb_dai_ops, }, }; static struct snd_soc_dai_driver intel_kmb_tdm_dai[] = { { .name = "intel_kmb_tdm", .capture = { .channels_min = 4, .channels_max = 8, .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_48000, .rate_min = 8000, .rate_max = 48000, .formats = (SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE), }, .ops = &kmb_dai_ops, }, }; static const struct of_device_id kmb_plat_of_match[] = { { .compatible = "intel,keembay-i2s", .data = &intel_kmb_i2s_dai}, { .compatible = "intel,keembay-hdmi-i2s", .data = &intel_kmb_hdmi_dai}, { .compatible = "intel,keembay-tdm", .data = &intel_kmb_tdm_dai}, {} }; static int kmb_plat_dai_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct snd_soc_dai_driver *kmb_i2s_dai; const struct of_device_id *match; struct device *dev = &pdev->dev; struct kmb_i2s_info *kmb_i2s; struct resource *res; int ret, irq; u32 comp1_reg; kmb_i2s = devm_kzalloc(dev, sizeof(*kmb_i2s), GFP_KERNEL); if (!kmb_i2s) return -ENOMEM; kmb_i2s_dai = devm_kzalloc(dev, sizeof(*kmb_i2s_dai), GFP_KERNEL); if (!kmb_i2s_dai) return -ENOMEM; match = of_match_device(kmb_plat_of_match, &pdev->dev); if (!match) { dev_err(&pdev->dev, "Error: No device match found\n"); return -ENODEV; } kmb_i2s_dai = (struct snd_soc_dai_driver *) match->data; /* Prepare the related clocks */ kmb_i2s->clk_apb = devm_clk_get(dev, "apb_clk"); if (IS_ERR(kmb_i2s->clk_apb)) { dev_err(dev, "Failed to get apb clock\n"); return PTR_ERR(kmb_i2s->clk_apb); } ret = clk_prepare_enable(kmb_i2s->clk_apb); if (ret < 0) return ret; ret = devm_add_action_or_reset(dev, kmb_disable_clk, kmb_i2s->clk_apb); if (ret) { dev_err(dev, "Failed to add clk_apb reset action\n"); return ret; } kmb_i2s->clk_i2s = devm_clk_get(dev, "osc"); if (IS_ERR(kmb_i2s->clk_i2s)) { dev_err(dev, "Failed to get osc clock\n"); return PTR_ERR(kmb_i2s->clk_i2s); } kmb_i2s->i2s_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(kmb_i2s->i2s_base)) return PTR_ERR(kmb_i2s->i2s_base); kmb_i2s->pss_base = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(kmb_i2s->pss_base)) return PTR_ERR(kmb_i2s->pss_base); kmb_i2s->dev = &pdev->dev; comp1_reg = readl(kmb_i2s->i2s_base + I2S_COMP_PARAM_1); kmb_i2s->fifo_th = (1 << COMP1_FIFO_DEPTH(comp1_reg)) / 2; kmb_i2s->use_pio = !(of_property_read_bool(np, "dmas")); if (kmb_i2s->use_pio) { irq = platform_get_irq_optional(pdev, 0); if (irq > 0) { ret = devm_request_irq(dev, irq, kmb_i2s_irq_handler, 0, pdev->name, kmb_i2s); if (ret < 0) { dev_err(dev, "failed to request irq\n"); return ret; } } ret = devm_snd_soc_register_component(dev, &kmb_component, kmb_i2s_dai, 1); } else { kmb_i2s->play_dma_data.addr = res->start + I2S_TXDMA; kmb_i2s->capture_dma_data.addr = res->start + I2S_RXDMA; ret = snd_dmaengine_pcm_register(&pdev->dev, NULL, 0); if (ret) { dev_err(&pdev->dev, "could not register dmaengine: %d\n", ret); return ret; } ret = devm_snd_soc_register_component(dev, &kmb_component_dma, kmb_i2s_dai, 1); } if (ret) { dev_err(dev, "not able to register dai\n"); return ret; } /* To ensure none of the channels are enabled at boot up */ kmb_i2s_disable_channels(kmb_i2s, SNDRV_PCM_STREAM_PLAYBACK); kmb_i2s_disable_channels(kmb_i2s, SNDRV_PCM_STREAM_CAPTURE); dev_set_drvdata(dev, kmb_i2s); return ret; } static struct platform_driver kmb_plat_dai_driver = { .driver = { .name = "kmb-plat-dai", .of_match_table = kmb_plat_of_match, }, .probe = kmb_plat_dai_probe, }; module_platform_driver(kmb_plat_dai_driver); MODULE_DESCRIPTION("ASoC Intel KeemBay Platform driver"); MODULE_AUTHOR("Sia Jee Heng <[email protected]>"); MODULE_AUTHOR("Sit, Michael Wei Hong <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:kmb_platform");
linux-master
sound/soc/intel/keembay/kmb_platform.c
// SPDX-License-Identifier: GPL-2.0-only /* * skl-sst-cldma.c - Code Loader DMA handler * * Copyright (C) 2015, Intel Corporation. * Author: Subhransu S. Prusty <[email protected]> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/device.h> #include <linux/io.h> #include <linux/mm.h> #include <linux/delay.h> #include <sound/hda_register.h> #include "../common/sst-dsp.h" #include "../common/sst-dsp-priv.h" static void skl_cldma_int_enable(struct sst_dsp *ctx) { sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPIC, SKL_ADSPIC_CL_DMA, SKL_ADSPIC_CL_DMA); } void skl_cldma_int_disable(struct sst_dsp *ctx) { sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPIC, SKL_ADSPIC_CL_DMA, 0); } static void skl_cldma_stream_run(struct sst_dsp *ctx, bool enable) { unsigned char val; int timeout; sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_CL_SD_CTL, CL_SD_CTL_RUN_MASK, CL_SD_CTL_RUN(enable)); udelay(3); timeout = 300; do { /* waiting for hardware to report that the stream Run bit set */ val = sst_dsp_shim_read(ctx, SKL_ADSP_REG_CL_SD_CTL) & CL_SD_CTL_RUN_MASK; if (enable && val) break; else if (!enable && !val) break; udelay(3); } while (--timeout); if (timeout == 0) dev_err(ctx->dev, "Failed to set Run bit=%d enable=%d\n", val, enable); } static void skl_cldma_stream_clear(struct sst_dsp *ctx) { /* make sure Run bit is cleared before setting stream register */ skl_cldma_stream_run(ctx, 0); sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(0)); sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(0)); sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(0)); sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(0)); sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL, CL_SD_BDLPLBA(0)); sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU, 0); sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, 0); sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, 0); } /* Code loader helper APIs */ static void skl_cldma_setup_bdle(struct sst_dsp *ctx, struct snd_dma_buffer *dmab_data, __le32 **bdlp, int size, int with_ioc) { __le32 *bdl = *bdlp; int remaining = ctx->cl_dev.bufsize; int offset = 0; ctx->cl_dev.frags = 0; while (remaining > 0) { phys_addr_t addr; int chunk; addr = snd_sgbuf_get_addr(dmab_data, offset); bdl[0] = cpu_to_le32(lower_32_bits(addr)); bdl[1] = cpu_to_le32(upper_32_bits(addr)); chunk = snd_sgbuf_get_chunk_size(dmab_data, offset, size); bdl[2] = cpu_to_le32(chunk); remaining -= chunk; bdl[3] = (remaining > 0) ? 0 : cpu_to_le32(0x01); bdl += 4; offset += chunk; ctx->cl_dev.frags++; } } /* * Setup controller * Configure the registers to update the dma buffer address and * enable interrupts. * Note: Using the channel 1 for transfer */ static void skl_cldma_setup_controller(struct sst_dsp *ctx, struct snd_dma_buffer *dmab_bdl, unsigned int max_size, u32 count) { skl_cldma_stream_clear(ctx); sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL, CL_SD_BDLPLBA(dmab_bdl->addr)); sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU, CL_SD_BDLPUBA(dmab_bdl->addr)); sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, max_size); sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, count - 1); sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(1)); sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(1)); sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(1)); sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL, CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(FW_CL_STREAM_NUMBER)); } static void skl_cldma_setup_spb(struct sst_dsp *ctx, unsigned int size, bool enable) { if (enable) sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL, CL_SPBFIFO_SPBFCCTL_SPIBE_MASK, CL_SPBFIFO_SPBFCCTL_SPIBE(1)); sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, size); } static void skl_cldma_cleanup_spb(struct sst_dsp *ctx) { sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL, CL_SPBFIFO_SPBFCCTL_SPIBE_MASK, CL_SPBFIFO_SPBFCCTL_SPIBE(0)); sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, 0); } static void skl_cldma_cleanup(struct sst_dsp *ctx) { skl_cldma_cleanup_spb(ctx); skl_cldma_stream_clear(ctx); ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data); ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_bdl); } int skl_cldma_wait_interruptible(struct sst_dsp *ctx) { int ret = 0; if (!wait_event_timeout(ctx->cl_dev.wait_queue, ctx->cl_dev.wait_condition, msecs_to_jiffies(SKL_WAIT_TIMEOUT))) { dev_err(ctx->dev, "%s: Wait timeout\n", __func__); ret = -EIO; goto cleanup; } dev_dbg(ctx->dev, "%s: Event wake\n", __func__); if (ctx->cl_dev.wake_status != SKL_CL_DMA_BUF_COMPLETE) { dev_err(ctx->dev, "%s: DMA Error\n", __func__); ret = -EIO; } cleanup: ctx->cl_dev.wake_status = SKL_CL_DMA_STATUS_NONE; return ret; } static void skl_cldma_stop(struct sst_dsp *ctx) { skl_cldma_stream_run(ctx, false); } static void skl_cldma_fill_buffer(struct sst_dsp *ctx, unsigned int size, const void *curr_pos, bool intr_enable, bool trigger) { dev_dbg(ctx->dev, "Size: %x, intr_enable: %d\n", size, intr_enable); dev_dbg(ctx->dev, "buf_pos_index:%d, trigger:%d\n", ctx->cl_dev.dma_buffer_offset, trigger); dev_dbg(ctx->dev, "spib position: %d\n", ctx->cl_dev.curr_spib_pos); /* * Check if the size exceeds buffer boundary. If it exceeds * max_buffer size, then copy till buffer size and then copy * remaining buffer from the start of ring buffer. */ if (ctx->cl_dev.dma_buffer_offset + size > ctx->cl_dev.bufsize) { unsigned int size_b = ctx->cl_dev.bufsize - ctx->cl_dev.dma_buffer_offset; memcpy(ctx->cl_dev.dmab_data.area + ctx->cl_dev.dma_buffer_offset, curr_pos, size_b); size -= size_b; curr_pos += size_b; ctx->cl_dev.dma_buffer_offset = 0; } memcpy(ctx->cl_dev.dmab_data.area + ctx->cl_dev.dma_buffer_offset, curr_pos, size); if (ctx->cl_dev.curr_spib_pos == ctx->cl_dev.bufsize) ctx->cl_dev.dma_buffer_offset = 0; else ctx->cl_dev.dma_buffer_offset = ctx->cl_dev.curr_spib_pos; ctx->cl_dev.wait_condition = false; if (intr_enable) skl_cldma_int_enable(ctx); ctx->cl_dev.ops.cl_setup_spb(ctx, ctx->cl_dev.curr_spib_pos, trigger); if (trigger) ctx->cl_dev.ops.cl_trigger(ctx, true); } /* * The CL dma doesn't have any way to update the transfer status until a BDL * buffer is fully transferred * * So Copying is divided in two parts. * 1. Interrupt on buffer done where the size to be transferred is more than * ring buffer size. * 2. Polling on fw register to identify if data left to transferred doesn't * fill the ring buffer. Caller takes care of polling the required status * register to identify the transfer status. * 3. if wait flag is set, waits for DBL interrupt to copy the next chunk till * bytes_left is 0. * if wait flag is not set, doesn't wait for BDL interrupt. after ccopying * the first chunk return the no of bytes_left to be copied. */ static int skl_cldma_copy_to_buf(struct sst_dsp *ctx, const void *bin, u32 total_size, bool wait) { int ret; bool start = true; unsigned int excess_bytes; u32 size; unsigned int bytes_left = total_size; const void *curr_pos = bin; if (total_size <= 0) return -EINVAL; dev_dbg(ctx->dev, "%s: Total binary size: %u\n", __func__, bytes_left); while (bytes_left) { if (bytes_left > ctx->cl_dev.bufsize) { /* * dma transfers only till the write pointer as * updated in spib */ if (ctx->cl_dev.curr_spib_pos == 0) ctx->cl_dev.curr_spib_pos = ctx->cl_dev.bufsize; size = ctx->cl_dev.bufsize; skl_cldma_fill_buffer(ctx, size, curr_pos, true, start); if (wait) { start = false; ret = skl_cldma_wait_interruptible(ctx); if (ret < 0) { skl_cldma_stop(ctx); return ret; } } } else { skl_cldma_int_disable(ctx); if ((ctx->cl_dev.curr_spib_pos + bytes_left) <= ctx->cl_dev.bufsize) { ctx->cl_dev.curr_spib_pos += bytes_left; } else { excess_bytes = bytes_left - (ctx->cl_dev.bufsize - ctx->cl_dev.curr_spib_pos); ctx->cl_dev.curr_spib_pos = excess_bytes; } size = bytes_left; skl_cldma_fill_buffer(ctx, size, curr_pos, false, start); } bytes_left -= size; curr_pos = curr_pos + size; if (!wait) return bytes_left; } return bytes_left; } void skl_cldma_process_intr(struct sst_dsp *ctx) { u8 cl_dma_intr_status; cl_dma_intr_status = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_CL_SD_STS); if (!(cl_dma_intr_status & SKL_CL_DMA_SD_INT_COMPLETE)) ctx->cl_dev.wake_status = SKL_CL_DMA_ERR; else ctx->cl_dev.wake_status = SKL_CL_DMA_BUF_COMPLETE; ctx->cl_dev.wait_condition = true; wake_up(&ctx->cl_dev.wait_queue); } int skl_cldma_prepare(struct sst_dsp *ctx) { int ret; __le32 *bdl; ctx->cl_dev.bufsize = SKL_MAX_BUFFER_SIZE; /* Allocate cl ops */ ctx->cl_dev.ops.cl_setup_bdle = skl_cldma_setup_bdle; ctx->cl_dev.ops.cl_setup_controller = skl_cldma_setup_controller; ctx->cl_dev.ops.cl_setup_spb = skl_cldma_setup_spb; ctx->cl_dev.ops.cl_cleanup_spb = skl_cldma_cleanup_spb; ctx->cl_dev.ops.cl_trigger = skl_cldma_stream_run; ctx->cl_dev.ops.cl_cleanup_controller = skl_cldma_cleanup; ctx->cl_dev.ops.cl_copy_to_dmabuf = skl_cldma_copy_to_buf; ctx->cl_dev.ops.cl_stop_dma = skl_cldma_stop; /* Allocate buffer*/ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, ctx->dev, ctx->cl_dev.bufsize, &ctx->cl_dev.dmab_data); if (ret < 0) { dev_err(ctx->dev, "Alloc buffer for base fw failed: %x\n", ret); return ret; } /* Setup Code loader BDL */ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, ctx->dev, BDL_SIZE, &ctx->cl_dev.dmab_bdl); if (ret < 0) { dev_err(ctx->dev, "Alloc buffer for blde failed: %x\n", ret); ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data); return ret; } bdl = (__le32 *)ctx->cl_dev.dmab_bdl.area; /* Allocate BDLs */ ctx->cl_dev.ops.cl_setup_bdle(ctx, &ctx->cl_dev.dmab_data, &bdl, ctx->cl_dev.bufsize, 1); ctx->cl_dev.ops.cl_setup_controller(ctx, &ctx->cl_dev.dmab_bdl, ctx->cl_dev.bufsize, ctx->cl_dev.frags); ctx->cl_dev.curr_spib_pos = 0; ctx->cl_dev.dma_buffer_offset = 0; init_waitqueue_head(&ctx->cl_dev.wait_queue); return ret; }
linux-master
sound/soc/intel/skylake/skl-sst-cldma.c
// SPDX-License-Identifier: GPL-2.0-only /* * skl.c - Implementation of ASoC Intel SKL HD Audio driver * * Copyright (C) 2014-2015 Intel Corp * Author: Jeeja KP <[email protected]> * * Derived mostly from Intel HDA driver with following copyrights: * Copyright (c) 2004 Takashi Iwai <[email protected]> * PeiSen Hou <[email protected]> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/module.h> #include <linux/pci.h> #include <linux/pm_runtime.h> #include <linux/platform_device.h> #include <linux/firmware.h> #include <linux/delay.h> #include <sound/pcm.h> #include <sound/soc-acpi.h> #include <sound/soc-acpi-intel-match.h> #include <sound/hda_register.h> #include <sound/hdaudio.h> #include <sound/hda_i915.h> #include <sound/hda_codec.h> #include <sound/intel-nhlt.h> #include <sound/intel-dsp-config.h> #include "skl.h" #include "skl-sst-dsp.h" #include "skl-sst-ipc.h" #if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC) #include "../../../soc/codecs/hdac_hda.h" #endif static int skl_pci_binding; module_param_named(pci_binding, skl_pci_binding, int, 0444); MODULE_PARM_DESC(pci_binding, "PCI binding (0=auto, 1=only legacy, 2=only asoc"); /* * initialize the PCI registers */ static void skl_update_pci_byte(struct pci_dev *pci, unsigned int reg, unsigned char mask, unsigned char val) { unsigned char data; pci_read_config_byte(pci, reg, &data); data &= ~mask; data |= (val & mask); pci_write_config_byte(pci, reg, data); } static void skl_init_pci(struct skl_dev *skl) { struct hdac_bus *bus = skl_to_bus(skl); /* * Clear bits 0-2 of PCI register TCSEL (at offset 0x44) * TCSEL == Traffic Class Select Register, which sets PCI express QOS * Ensuring these bits are 0 clears playback static on some HD Audio * codecs. * The PCI register TCSEL is defined in the Intel manuals. */ dev_dbg(bus->dev, "Clearing TCSEL\n"); skl_update_pci_byte(skl->pci, AZX_PCIREG_TCSEL, 0x07, 0); } static void update_pci_dword(struct pci_dev *pci, unsigned int reg, u32 mask, u32 val) { u32 data = 0; pci_read_config_dword(pci, reg, &data); data &= ~mask; data |= (val & mask); pci_write_config_dword(pci, reg, data); } /* * skl_enable_miscbdcge - enable/dsiable CGCTL.MISCBDCGE bits * * @dev: device pointer * @enable: enable/disable flag */ static void skl_enable_miscbdcge(struct device *dev, bool enable) { struct pci_dev *pci = to_pci_dev(dev); u32 val; val = enable ? AZX_CGCTL_MISCBDCGE_MASK : 0; update_pci_dword(pci, AZX_PCIREG_CGCTL, AZX_CGCTL_MISCBDCGE_MASK, val); } /** * skl_clock_power_gating: Enable/Disable clock and power gating * * @dev: Device pointer * @enable: Enable/Disable flag */ static void skl_clock_power_gating(struct device *dev, bool enable) { struct pci_dev *pci = to_pci_dev(dev); struct hdac_bus *bus = pci_get_drvdata(pci); u32 val; /* Update PDCGE bit of CGCTL register */ val = enable ? AZX_CGCTL_ADSPDCGE : 0; update_pci_dword(pci, AZX_PCIREG_CGCTL, AZX_CGCTL_ADSPDCGE, val); /* Update L1SEN bit of EM2 register */ val = enable ? AZX_REG_VS_EM2_L1SEN : 0; snd_hdac_chip_updatel(bus, VS_EM2, AZX_REG_VS_EM2_L1SEN, val); /* Update ADSPPGD bit of PGCTL register */ val = enable ? 0 : AZX_PGCTL_ADSPPGD; update_pci_dword(pci, AZX_PCIREG_PGCTL, AZX_PGCTL_ADSPPGD, val); } /* * While performing reset, controller may not come back properly causing * issues, so recommendation is to set CGCTL.MISCBDCGE to 0 then do reset * (init chip) and then again set CGCTL.MISCBDCGE to 1 */ static int skl_init_chip(struct hdac_bus *bus, bool full_reset) { struct hdac_ext_link *hlink; int ret; snd_hdac_set_codec_wakeup(bus, true); skl_enable_miscbdcge(bus->dev, false); ret = snd_hdac_bus_init_chip(bus, full_reset); /* Reset stream-to-link mapping */ list_for_each_entry(hlink, &bus->hlink_list, list) writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV); skl_enable_miscbdcge(bus->dev, true); snd_hdac_set_codec_wakeup(bus, false); return ret; } void skl_update_d0i3c(struct device *dev, bool enable) { struct pci_dev *pci = to_pci_dev(dev); struct hdac_bus *bus = pci_get_drvdata(pci); u8 reg; int timeout = 50; reg = snd_hdac_chip_readb(bus, VS_D0I3C); /* Do not write to D0I3C until command in progress bit is cleared */ while ((reg & AZX_REG_VS_D0I3C_CIP) && --timeout) { udelay(10); reg = snd_hdac_chip_readb(bus, VS_D0I3C); } /* Highly unlikely. But if it happens, flag error explicitly */ if (!timeout) { dev_err(bus->dev, "Before D0I3C update: D0I3C CIP timeout\n"); return; } if (enable) reg = reg | AZX_REG_VS_D0I3C_I3; else reg = reg & (~AZX_REG_VS_D0I3C_I3); snd_hdac_chip_writeb(bus, VS_D0I3C, reg); timeout = 50; /* Wait for cmd in progress to be cleared before exiting the function */ reg = snd_hdac_chip_readb(bus, VS_D0I3C); while ((reg & AZX_REG_VS_D0I3C_CIP) && --timeout) { udelay(10); reg = snd_hdac_chip_readb(bus, VS_D0I3C); } /* Highly unlikely. But if it happens, flag error explicitly */ if (!timeout) { dev_err(bus->dev, "After D0I3C update: D0I3C CIP timeout\n"); return; } dev_dbg(bus->dev, "D0I3C register = 0x%x\n", snd_hdac_chip_readb(bus, VS_D0I3C)); } /** * skl_dum_set - set DUM bit in EM2 register * @bus: HD-audio core bus * * Addresses incorrect position reporting for capture streams. * Used on device power up. */ static void skl_dum_set(struct hdac_bus *bus) { /* For the DUM bit to be set, CRST needs to be out of reset state */ if (!(snd_hdac_chip_readb(bus, GCTL) & AZX_GCTL_RESET)) { skl_enable_miscbdcge(bus->dev, false); snd_hdac_bus_exit_link_reset(bus); skl_enable_miscbdcge(bus->dev, true); } snd_hdac_chip_updatel(bus, VS_EM2, AZX_VS_EM2_DUM, AZX_VS_EM2_DUM); } /* called from IRQ */ static void skl_stream_update(struct hdac_bus *bus, struct hdac_stream *hstr) { snd_pcm_period_elapsed(hstr->substream); } static irqreturn_t skl_interrupt(int irq, void *dev_id) { struct hdac_bus *bus = dev_id; u32 status; if (!pm_runtime_active(bus->dev)) return IRQ_NONE; spin_lock(&bus->reg_lock); status = snd_hdac_chip_readl(bus, INTSTS); if (status == 0 || status == 0xffffffff) { spin_unlock(&bus->reg_lock); return IRQ_NONE; } /* clear rirb int */ status = snd_hdac_chip_readb(bus, RIRBSTS); if (status & RIRB_INT_MASK) { if (status & RIRB_INT_RESPONSE) snd_hdac_bus_update_rirb(bus); snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK); } spin_unlock(&bus->reg_lock); return snd_hdac_chip_readl(bus, INTSTS) ? IRQ_WAKE_THREAD : IRQ_HANDLED; } static irqreturn_t skl_threaded_handler(int irq, void *dev_id) { struct hdac_bus *bus = dev_id; u32 status; status = snd_hdac_chip_readl(bus, INTSTS); snd_hdac_bus_handle_stream_irq(bus, status, skl_stream_update); return IRQ_HANDLED; } static int skl_acquire_irq(struct hdac_bus *bus, int do_disconnect) { struct skl_dev *skl = bus_to_skl(bus); int ret; ret = request_threaded_irq(skl->pci->irq, skl_interrupt, skl_threaded_handler, IRQF_SHARED, KBUILD_MODNAME, bus); if (ret) { dev_err(bus->dev, "unable to grab IRQ %d, disabling device\n", skl->pci->irq); return ret; } bus->irq = skl->pci->irq; pci_intx(skl->pci, 1); return 0; } static int skl_suspend_late(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); struct hdac_bus *bus = pci_get_drvdata(pci); struct skl_dev *skl = bus_to_skl(bus); return skl_suspend_late_dsp(skl); } #ifdef CONFIG_PM static int _skl_suspend(struct hdac_bus *bus) { struct skl_dev *skl = bus_to_skl(bus); struct pci_dev *pci = to_pci_dev(bus->dev); int ret; snd_hdac_ext_bus_link_power_down_all(bus); ret = skl_suspend_dsp(skl); if (ret < 0) return ret; snd_hdac_bus_stop_chip(bus); update_pci_dword(pci, AZX_PCIREG_PGCTL, AZX_PGCTL_LSRMD_MASK, AZX_PGCTL_LSRMD_MASK); skl_enable_miscbdcge(bus->dev, false); snd_hdac_bus_enter_link_reset(bus); skl_enable_miscbdcge(bus->dev, true); skl_cleanup_resources(skl); return 0; } static int _skl_resume(struct hdac_bus *bus) { struct skl_dev *skl = bus_to_skl(bus); skl_init_pci(skl); skl_dum_set(bus); skl_init_chip(bus, true); return skl_resume_dsp(skl); } #endif #ifdef CONFIG_PM_SLEEP /* * power management */ static int skl_suspend(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); struct hdac_bus *bus = pci_get_drvdata(pci); struct skl_dev *skl = bus_to_skl(bus); int ret; /* * Do not suspend if streams which are marked ignore suspend are * running, we need to save the state for these and continue */ if (skl->supend_active) { /* turn off the links and stop the CORB/RIRB DMA if it is On */ snd_hdac_ext_bus_link_power_down_all(bus); if (bus->cmd_dma_state) snd_hdac_bus_stop_cmd_io(bus); enable_irq_wake(bus->irq); pci_save_state(pci); } else { ret = _skl_suspend(bus); if (ret < 0) return ret; skl->fw_loaded = false; } return 0; } static int skl_resume(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); struct hdac_bus *bus = pci_get_drvdata(pci); struct skl_dev *skl = bus_to_skl(bus); struct hdac_ext_link *hlink; int ret; /* * resume only when we are not in suspend active, otherwise need to * restore the device */ if (skl->supend_active) { pci_restore_state(pci); snd_hdac_ext_bus_link_power_up_all(bus); disable_irq_wake(bus->irq); /* * turn On the links which are On before active suspend * and start the CORB/RIRB DMA if On before * active suspend. */ list_for_each_entry(hlink, &bus->hlink_list, list) { if (hlink->ref_count) snd_hdac_ext_bus_link_power_up(hlink); } ret = 0; if (bus->cmd_dma_state) snd_hdac_bus_init_cmd_io(bus); } else { ret = _skl_resume(bus); } return ret; } #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_PM static int skl_runtime_suspend(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); struct hdac_bus *bus = pci_get_drvdata(pci); dev_dbg(bus->dev, "in %s\n", __func__); return _skl_suspend(bus); } static int skl_runtime_resume(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); struct hdac_bus *bus = pci_get_drvdata(pci); dev_dbg(bus->dev, "in %s\n", __func__); return _skl_resume(bus); } #endif /* CONFIG_PM */ static const struct dev_pm_ops skl_pm = { SET_SYSTEM_SLEEP_PM_OPS(skl_suspend, skl_resume) SET_RUNTIME_PM_OPS(skl_runtime_suspend, skl_runtime_resume, NULL) .suspend_late = skl_suspend_late, }; /* * destructor */ static int skl_free(struct hdac_bus *bus) { struct skl_dev *skl = bus_to_skl(bus); skl->init_done = 0; /* to be sure */ snd_hdac_stop_streams_and_chip(bus); if (bus->irq >= 0) free_irq(bus->irq, (void *)bus); snd_hdac_bus_free_stream_pages(bus); snd_hdac_ext_stream_free_all(bus); snd_hdac_ext_link_free_all(bus); if (bus->remap_addr) iounmap(bus->remap_addr); pci_release_regions(skl->pci); pci_disable_device(skl->pci); snd_hdac_ext_bus_exit(bus); if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) { snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); snd_hdac_i915_exit(bus); } return 0; } /* * For each ssp there are 3 clocks (mclk/sclk/sclkfs). * e.g. for ssp0, clocks will be named as * "ssp0_mclk", "ssp0_sclk", "ssp0_sclkfs" * So for skl+, there are 6 ssps, so 18 clocks will be created. */ static struct skl_ssp_clk skl_ssp_clks[] = { {.name = "ssp0_mclk"}, {.name = "ssp1_mclk"}, {.name = "ssp2_mclk"}, {.name = "ssp3_mclk"}, {.name = "ssp4_mclk"}, {.name = "ssp5_mclk"}, {.name = "ssp0_sclk"}, {.name = "ssp1_sclk"}, {.name = "ssp2_sclk"}, {.name = "ssp3_sclk"}, {.name = "ssp4_sclk"}, {.name = "ssp5_sclk"}, {.name = "ssp0_sclkfs"}, {.name = "ssp1_sclkfs"}, {.name = "ssp2_sclkfs"}, {.name = "ssp3_sclkfs"}, {.name = "ssp4_sclkfs"}, {.name = "ssp5_sclkfs"}, }; static struct snd_soc_acpi_mach *skl_find_hda_machine(struct skl_dev *skl, struct snd_soc_acpi_mach *machines) { struct snd_soc_acpi_mach *mach; /* point to common table */ mach = snd_soc_acpi_intel_hda_machines; /* all entries in the machine table use the same firmware */ mach->fw_filename = machines->fw_filename; return mach; } static int skl_find_machine(struct skl_dev *skl, void *driver_data) { struct hdac_bus *bus = skl_to_bus(skl); struct snd_soc_acpi_mach *mach = driver_data; struct skl_machine_pdata *pdata; mach = snd_soc_acpi_find_machine(mach); if (!mach) { dev_dbg(bus->dev, "No matching I2S machine driver found\n"); mach = skl_find_hda_machine(skl, driver_data); if (!mach) { dev_err(bus->dev, "No matching machine driver found\n"); return -ENODEV; } } skl->mach = mach; skl->fw_name = mach->fw_filename; pdata = mach->pdata; if (pdata) { skl->use_tplg_pcm = pdata->use_tplg_pcm; mach->mach_params.dmic_num = intel_nhlt_get_dmic_geo(&skl->pci->dev, skl->nhlt); } return 0; } static int skl_machine_device_register(struct skl_dev *skl) { struct snd_soc_acpi_mach *mach = skl->mach; struct hdac_bus *bus = skl_to_bus(skl); struct platform_device *pdev; int ret; pdev = platform_device_alloc(mach->drv_name, -1); if (pdev == NULL) { dev_err(bus->dev, "platform device alloc failed\n"); return -EIO; } mach->mach_params.platform = dev_name(bus->dev); mach->mach_params.codec_mask = bus->codec_mask; ret = platform_device_add_data(pdev, (const void *)mach, sizeof(*mach)); if (ret) { dev_err(bus->dev, "failed to add machine device platform data\n"); platform_device_put(pdev); return ret; } ret = platform_device_add(pdev); if (ret) { dev_err(bus->dev, "failed to add machine device\n"); platform_device_put(pdev); return -EIO; } skl->i2s_dev = pdev; return 0; } static void skl_machine_device_unregister(struct skl_dev *skl) { if (skl->i2s_dev) platform_device_unregister(skl->i2s_dev); } static int skl_dmic_device_register(struct skl_dev *skl) { struct hdac_bus *bus = skl_to_bus(skl); struct platform_device *pdev; int ret; /* SKL has one dmic port, so allocate dmic device for this */ pdev = platform_device_alloc("dmic-codec", -1); if (!pdev) { dev_err(bus->dev, "failed to allocate dmic device\n"); return -ENOMEM; } ret = platform_device_add(pdev); if (ret) { dev_err(bus->dev, "failed to add dmic device: %d\n", ret); platform_device_put(pdev); return ret; } skl->dmic_dev = pdev; return 0; } static void skl_dmic_device_unregister(struct skl_dev *skl) { if (skl->dmic_dev) platform_device_unregister(skl->dmic_dev); } static struct skl_clk_parent_src skl_clk_src[] = { { .clk_id = SKL_XTAL, .name = "xtal" }, { .clk_id = SKL_CARDINAL, .name = "cardinal", .rate = 24576000 }, { .clk_id = SKL_PLL, .name = "pll", .rate = 96000000 }, }; struct skl_clk_parent_src *skl_get_parent_clk(u8 clk_id) { unsigned int i; for (i = 0; i < ARRAY_SIZE(skl_clk_src); i++) { if (skl_clk_src[i].clk_id == clk_id) return &skl_clk_src[i]; } return NULL; } static void init_skl_xtal_rate(int pci_id) { switch (pci_id) { case PCI_DEVICE_ID_INTEL_HDA_SKL_LP: case PCI_DEVICE_ID_INTEL_HDA_KBL_LP: skl_clk_src[0].rate = 24000000; return; default: skl_clk_src[0].rate = 19200000; return; } } static int skl_clock_device_register(struct skl_dev *skl) { struct platform_device_info pdevinfo = {NULL}; struct skl_clk_pdata *clk_pdata; if (!skl->nhlt) return 0; clk_pdata = devm_kzalloc(&skl->pci->dev, sizeof(*clk_pdata), GFP_KERNEL); if (!clk_pdata) return -ENOMEM; init_skl_xtal_rate(skl->pci->device); clk_pdata->parent_clks = skl_clk_src; clk_pdata->ssp_clks = skl_ssp_clks; clk_pdata->num_clks = ARRAY_SIZE(skl_ssp_clks); /* Query NHLT to fill the rates and parent */ skl_get_clks(skl, clk_pdata->ssp_clks); clk_pdata->pvt_data = skl; /* Register Platform device */ pdevinfo.parent = &skl->pci->dev; pdevinfo.id = -1; pdevinfo.name = "skl-ssp-clk"; pdevinfo.data = clk_pdata; pdevinfo.size_data = sizeof(*clk_pdata); skl->clk_dev = platform_device_register_full(&pdevinfo); return PTR_ERR_OR_ZERO(skl->clk_dev); } static void skl_clock_device_unregister(struct skl_dev *skl) { if (skl->clk_dev) platform_device_unregister(skl->clk_dev); } #if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC) #define IDISP_INTEL_VENDOR_ID 0x80860000 /* * load the legacy codec driver */ static void load_codec_module(struct hda_codec *codec) { #ifdef MODULE char modalias[MODULE_NAME_LEN]; const char *mod = NULL; snd_hdac_codec_modalias(&codec->core, modalias, sizeof(modalias)); mod = modalias; dev_dbg(&codec->core.dev, "loading %s codec module\n", mod); request_module(mod); #endif } #endif /* CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC */ static struct hda_codec *skl_codec_device_init(struct hdac_bus *bus, int addr) { struct hda_codec *codec; int ret; codec = snd_hda_codec_device_init(to_hda_bus(bus), addr, "ehdaudio%dD%d", bus->idx, addr); if (IS_ERR(codec)) { dev_err(bus->dev, "device init failed for hdac device\n"); return codec; } codec->core.type = HDA_DEV_ASOC; ret = snd_hdac_device_register(&codec->core); if (ret) { dev_err(bus->dev, "failed to register hdac device\n"); put_device(&codec->core.dev); return ERR_PTR(ret); } return codec; } /* * Probe the given codec address */ static int probe_codec(struct hdac_bus *bus, int addr) { unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) | (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID; unsigned int res = -1; #if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC) struct skl_dev *skl = bus_to_skl(bus); struct hdac_hda_priv *hda_codec; #endif struct hda_codec *codec; mutex_lock(&bus->cmd_mutex); snd_hdac_bus_send_cmd(bus, cmd); snd_hdac_bus_get_response(bus, addr, &res); mutex_unlock(&bus->cmd_mutex); if (res == -1) return -EIO; dev_dbg(bus->dev, "codec #%d probed OK: %x\n", addr, res); #if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC) hda_codec = devm_kzalloc(&skl->pci->dev, sizeof(*hda_codec), GFP_KERNEL); if (!hda_codec) return -ENOMEM; codec = skl_codec_device_init(bus, addr); if (IS_ERR(codec)) return PTR_ERR(codec); hda_codec->codec = codec; dev_set_drvdata(&codec->core.dev, hda_codec); /* use legacy bus only for HDA codecs, idisp uses ext bus */ if ((res & 0xFFFF0000) != IDISP_INTEL_VENDOR_ID) { codec->core.type = HDA_DEV_LEGACY; load_codec_module(hda_codec->codec); } return 0; #else codec = skl_codec_device_init(bus, addr); return PTR_ERR_OR_ZERO(codec); #endif /* CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC */ } /* Codec initialization */ static void skl_codec_create(struct hdac_bus *bus) { int c, max_slots; max_slots = HDA_MAX_CODECS; /* First try to probe all given codec slots */ for (c = 0; c < max_slots; c++) { if ((bus->codec_mask & (1 << c))) { if (probe_codec(bus, c) < 0) { /* * Some BIOSen give you wrong codec addresses * that don't exist */ dev_warn(bus->dev, "Codec #%d probe error; disabling it...\n", c); bus->codec_mask &= ~(1 << c); /* * More badly, accessing to a non-existing * codec often screws up the controller bus, * and disturbs the further communications. * Thus if an error occurs during probing, * better to reset the controller bus to get * back to the sanity state. */ snd_hdac_bus_stop_chip(bus); skl_init_chip(bus, true); } } } } static int skl_i915_init(struct hdac_bus *bus) { int err; /* * The HDMI codec is in GPU so we need to ensure that it is powered * up and ready for probe */ err = snd_hdac_i915_init(bus); if (err < 0) return err; snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true); return 0; } static void skl_probe_work(struct work_struct *work) { struct skl_dev *skl = container_of(work, struct skl_dev, probe_work); struct hdac_bus *bus = skl_to_bus(skl); struct hdac_ext_link *hlink; int err; if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) { err = skl_i915_init(bus); if (err < 0) return; } skl_init_pci(skl); skl_dum_set(bus); err = skl_init_chip(bus, true); if (err < 0) { dev_err(bus->dev, "Init chip failed with err: %d\n", err); goto out_err; } /* codec detection */ if (!bus->codec_mask) dev_info(bus->dev, "no hda codecs found!\n"); /* create codec instances */ skl_codec_create(bus); /* register platform dai and controls */ err = skl_platform_register(bus->dev); if (err < 0) { dev_err(bus->dev, "platform register failed: %d\n", err); goto out_err; } err = skl_machine_device_register(skl); if (err < 0) { dev_err(bus->dev, "machine register failed: %d\n", err); goto out_err; } /* * we are done probing so decrement link counts */ list_for_each_entry(hlink, &bus->hlink_list, list) snd_hdac_ext_bus_link_put(bus, hlink); if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); /* configure PM */ pm_runtime_put_noidle(bus->dev); pm_runtime_allow(bus->dev); skl->init_done = 1; return; out_err: if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false); } /* * constructor */ static int skl_create(struct pci_dev *pci, struct skl_dev **rskl) { struct hdac_ext_bus_ops *ext_ops = NULL; struct skl_dev *skl; struct hdac_bus *bus; struct hda_bus *hbus; int err; *rskl = NULL; err = pci_enable_device(pci); if (err < 0) return err; skl = devm_kzalloc(&pci->dev, sizeof(*skl), GFP_KERNEL); if (!skl) { pci_disable_device(pci); return -ENOMEM; } hbus = skl_to_hbus(skl); bus = skl_to_bus(skl); INIT_LIST_HEAD(&skl->ppl_list); INIT_LIST_HEAD(&skl->bind_list); #if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC) ext_ops = snd_soc_hdac_hda_get_ops(); #endif snd_hdac_ext_bus_init(bus, &pci->dev, NULL, ext_ops); bus->use_posbuf = 1; skl->pci = pci; INIT_WORK(&skl->probe_work, skl_probe_work); bus->bdl_pos_adj = 0; mutex_init(&hbus->prepare_mutex); hbus->pci = pci; hbus->mixer_assigned = -1; hbus->modelname = "sklbus"; *rskl = skl; return 0; } static int skl_first_init(struct hdac_bus *bus) { struct skl_dev *skl = bus_to_skl(bus); struct pci_dev *pci = skl->pci; int err; unsigned short gcap; int cp_streams, pb_streams, start_idx; err = pci_request_regions(pci, "Skylake HD audio"); if (err < 0) return err; bus->addr = pci_resource_start(pci, 0); bus->remap_addr = pci_ioremap_bar(pci, 0); if (bus->remap_addr == NULL) { dev_err(bus->dev, "ioremap error\n"); return -ENXIO; } snd_hdac_bus_parse_capabilities(bus); /* check if PPCAP exists */ if (!bus->ppcap) { dev_err(bus->dev, "bus ppcap not set, HDAudio or DSP not present?\n"); return -ENODEV; } if (skl_acquire_irq(bus, 0) < 0) return -EBUSY; pci_set_master(pci); synchronize_irq(bus->irq); gcap = snd_hdac_chip_readw(bus, GCAP); dev_dbg(bus->dev, "chipset global capabilities = 0x%x\n", gcap); /* read number of streams from GCAP register */ cp_streams = (gcap >> 8) & 0x0f; pb_streams = (gcap >> 12) & 0x0f; if (!pb_streams && !cp_streams) { dev_err(bus->dev, "no streams found in GCAP definitions?\n"); return -EIO; } bus->num_streams = cp_streams + pb_streams; /* allow 64bit DMA address if supported by H/W */ if (dma_set_mask_and_coherent(bus->dev, DMA_BIT_MASK(64))) dma_set_mask_and_coherent(bus->dev, DMA_BIT_MASK(32)); dma_set_max_seg_size(bus->dev, UINT_MAX); /* initialize streams */ snd_hdac_ext_stream_init_all (bus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE); start_idx = cp_streams; snd_hdac_ext_stream_init_all (bus, start_idx, pb_streams, SNDRV_PCM_STREAM_PLAYBACK); err = snd_hdac_bus_alloc_stream_pages(bus); if (err < 0) return err; return 0; } static int skl_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { struct skl_dev *skl; struct hdac_bus *bus = NULL; int err; switch (skl_pci_binding) { case SND_SKL_PCI_BIND_AUTO: err = snd_intel_dsp_driver_probe(pci); if (err != SND_INTEL_DSP_DRIVER_ANY && err != SND_INTEL_DSP_DRIVER_SST) return -ENODEV; break; case SND_SKL_PCI_BIND_LEGACY: dev_info(&pci->dev, "Module parameter forced binding with HDAudio legacy, aborting probe\n"); return -ENODEV; case SND_SKL_PCI_BIND_ASOC: dev_info(&pci->dev, "Module parameter forced binding with SKL driver, bypassed detection logic\n"); break; default: dev_err(&pci->dev, "invalid value for skl_pci_binding module parameter, ignored\n"); break; } /* we use ext core ops, so provide NULL for ops here */ err = skl_create(pci, &skl); if (err < 0) return err; bus = skl_to_bus(skl); err = skl_first_init(bus); if (err < 0) { dev_err(bus->dev, "skl_first_init failed with err: %d\n", err); goto out_free; } skl->pci_id = pci->device; device_disable_async_suspend(bus->dev); skl->nhlt = intel_nhlt_init(bus->dev); if (skl->nhlt == NULL) { #if !IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC) dev_err(bus->dev, "no nhlt info found\n"); err = -ENODEV; goto out_free; #else dev_warn(bus->dev, "no nhlt info found, continuing to try to enable HDAudio codec\n"); #endif } else { err = skl_nhlt_create_sysfs(skl); if (err < 0) { dev_err(bus->dev, "skl_nhlt_create_sysfs failed with err: %d\n", err); goto out_nhlt_free; } skl_nhlt_update_topology_bin(skl); /* create device for dsp clk */ err = skl_clock_device_register(skl); if (err < 0) { dev_err(bus->dev, "skl_clock_device_register failed with err: %d\n", err); goto out_clk_free; } } pci_set_drvdata(skl->pci, bus); err = skl_find_machine(skl, (void *)pci_id->driver_data); if (err < 0) { dev_err(bus->dev, "skl_find_machine failed with err: %d\n", err); goto out_nhlt_free; } err = skl_init_dsp(skl); if (err < 0) { dev_dbg(bus->dev, "error failed to register dsp\n"); goto out_nhlt_free; } skl->enable_miscbdcge = skl_enable_miscbdcge; skl->clock_power_gating = skl_clock_power_gating; if (bus->mlcap) snd_hdac_ext_bus_get_ml_capabilities(bus); /* create device for soc dmic */ err = skl_dmic_device_register(skl); if (err < 0) { dev_err(bus->dev, "skl_dmic_device_register failed with err: %d\n", err); goto out_dsp_free; } schedule_work(&skl->probe_work); return 0; out_dsp_free: skl_free_dsp(skl); out_clk_free: skl_clock_device_unregister(skl); out_nhlt_free: if (skl->nhlt) intel_nhlt_free(skl->nhlt); out_free: skl_free(bus); return err; } static void skl_shutdown(struct pci_dev *pci) { struct hdac_bus *bus = pci_get_drvdata(pci); struct hdac_stream *s; struct hdac_ext_stream *stream; struct skl_dev *skl; if (!bus) return; skl = bus_to_skl(bus); if (!skl->init_done) return; snd_hdac_stop_streams(bus); snd_hdac_ext_bus_link_power_down_all(bus); skl_dsp_sleep(skl->dsp); list_for_each_entry(s, &bus->stream_list, list) { stream = stream_to_hdac_ext_stream(s); snd_hdac_ext_stream_decouple(bus, stream, false); } snd_hdac_bus_stop_chip(bus); } static void skl_remove(struct pci_dev *pci) { struct hdac_bus *bus = pci_get_drvdata(pci); struct skl_dev *skl = bus_to_skl(bus); cancel_work_sync(&skl->probe_work); pm_runtime_get_noresume(&pci->dev); /* codec removal, invoke bus_device_remove */ snd_hdac_ext_bus_device_remove(bus); skl_platform_unregister(&pci->dev); skl_free_dsp(skl); skl_machine_device_unregister(skl); skl_dmic_device_unregister(skl); skl_clock_device_unregister(skl); skl_nhlt_remove_sysfs(skl); if (skl->nhlt) intel_nhlt_free(skl->nhlt); skl_free(bus); } /* PCI IDs */ static const struct pci_device_id skl_ids[] = { #if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKL) { PCI_DEVICE_DATA(INTEL, HDA_SKL_LP, &snd_soc_acpi_intel_skl_machines) }, #endif #if IS_ENABLED(CONFIG_SND_SOC_INTEL_APL) { PCI_DEVICE_DATA(INTEL, HDA_APL, &snd_soc_acpi_intel_bxt_machines) }, #endif #if IS_ENABLED(CONFIG_SND_SOC_INTEL_KBL) { PCI_DEVICE_DATA(INTEL, HDA_KBL_LP, &snd_soc_acpi_intel_kbl_machines) }, #endif #if IS_ENABLED(CONFIG_SND_SOC_INTEL_GLK) { PCI_DEVICE_DATA(INTEL, HDA_GML, &snd_soc_acpi_intel_glk_machines) }, #endif #if IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL) { PCI_DEVICE_DATA(INTEL, HDA_CNL_LP, &snd_soc_acpi_intel_cnl_machines) }, #endif #if IS_ENABLED(CONFIG_SND_SOC_INTEL_CFL) { PCI_DEVICE_DATA(INTEL, HDA_CNL_H, &snd_soc_acpi_intel_cnl_machines) }, #endif #if IS_ENABLED(CONFIG_SND_SOC_INTEL_CML_LP) { PCI_DEVICE_DATA(INTEL, HDA_CML_LP, &snd_soc_acpi_intel_cnl_machines) }, #endif #if IS_ENABLED(CONFIG_SND_SOC_INTEL_CML_H) { PCI_DEVICE_DATA(INTEL, HDA_CML_H, &snd_soc_acpi_intel_cnl_machines) }, #endif { 0, } }; MODULE_DEVICE_TABLE(pci, skl_ids); /* pci_driver definition */ static struct pci_driver skl_driver = { .name = KBUILD_MODNAME, .id_table = skl_ids, .probe = skl_probe, .remove = skl_remove, .shutdown = skl_shutdown, .driver = { .pm = &skl_pm, }, }; module_pci_driver(skl_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Intel Skylake ASoC HDA driver");
linux-master
sound/soc/intel/skylake/skl.c
// SPDX-License-Identifier: GPL-2.0-only /* * cnl-sst-dsp.c - CNL SST library generic function * * Copyright (C) 2016-17, Intel Corporation. * Author: Guneshwor Singh <[email protected]> * * Modified from: * SKL SST library generic function * Copyright (C) 2014-15, Intel Corporation. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/device.h> #include "../common/sst-dsp.h" #include "../common/sst-ipc.h" #include "../common/sst-dsp-priv.h" #include "cnl-sst-dsp.h" /* various timeout values */ #define CNL_DSP_PU_TO 50 #define CNL_DSP_PD_TO 50 #define CNL_DSP_RESET_TO 50 static int cnl_dsp_core_set_reset_state(struct sst_dsp *ctx, unsigned int core_mask) { /* update bits */ sst_dsp_shim_update_bits_unlocked(ctx, CNL_ADSP_REG_ADSPCS, CNL_ADSPCS_CRST(core_mask), CNL_ADSPCS_CRST(core_mask)); /* poll with timeout to check if operation successful */ return sst_dsp_register_poll(ctx, CNL_ADSP_REG_ADSPCS, CNL_ADSPCS_CRST(core_mask), CNL_ADSPCS_CRST(core_mask), CNL_DSP_RESET_TO, "Set reset"); } static int cnl_dsp_core_unset_reset_state(struct sst_dsp *ctx, unsigned int core_mask) { /* update bits */ sst_dsp_shim_update_bits_unlocked(ctx, CNL_ADSP_REG_ADSPCS, CNL_ADSPCS_CRST(core_mask), 0); /* poll with timeout to check if operation successful */ return sst_dsp_register_poll(ctx, CNL_ADSP_REG_ADSPCS, CNL_ADSPCS_CRST(core_mask), 0, CNL_DSP_RESET_TO, "Unset reset"); } static bool is_cnl_dsp_core_enable(struct sst_dsp *ctx, unsigned int core_mask) { int val; bool is_enable; val = sst_dsp_shim_read_unlocked(ctx, CNL_ADSP_REG_ADSPCS); is_enable = (val & CNL_ADSPCS_CPA(core_mask)) && (val & CNL_ADSPCS_SPA(core_mask)) && !(val & CNL_ADSPCS_CRST(core_mask)) && !(val & CNL_ADSPCS_CSTALL(core_mask)); dev_dbg(ctx->dev, "DSP core(s) enabled? %d: core_mask %#x\n", is_enable, core_mask); return is_enable; } static int cnl_dsp_reset_core(struct sst_dsp *ctx, unsigned int core_mask) { /* stall core */ sst_dsp_shim_update_bits_unlocked(ctx, CNL_ADSP_REG_ADSPCS, CNL_ADSPCS_CSTALL(core_mask), CNL_ADSPCS_CSTALL(core_mask)); /* set reset state */ return cnl_dsp_core_set_reset_state(ctx, core_mask); } static int cnl_dsp_start_core(struct sst_dsp *ctx, unsigned int core_mask) { int ret; /* unset reset state */ ret = cnl_dsp_core_unset_reset_state(ctx, core_mask); if (ret < 0) return ret; /* run core */ sst_dsp_shim_update_bits_unlocked(ctx, CNL_ADSP_REG_ADSPCS, CNL_ADSPCS_CSTALL(core_mask), 0); if (!is_cnl_dsp_core_enable(ctx, core_mask)) { cnl_dsp_reset_core(ctx, core_mask); dev_err(ctx->dev, "DSP core mask %#x enable failed\n", core_mask); ret = -EIO; } return ret; } static int cnl_dsp_core_power_up(struct sst_dsp *ctx, unsigned int core_mask) { /* update bits */ sst_dsp_shim_update_bits_unlocked(ctx, CNL_ADSP_REG_ADSPCS, CNL_ADSPCS_SPA(core_mask), CNL_ADSPCS_SPA(core_mask)); /* poll with timeout to check if operation successful */ return sst_dsp_register_poll(ctx, CNL_ADSP_REG_ADSPCS, CNL_ADSPCS_CPA(core_mask), CNL_ADSPCS_CPA(core_mask), CNL_DSP_PU_TO, "Power up"); } static int cnl_dsp_core_power_down(struct sst_dsp *ctx, unsigned int core_mask) { /* update bits */ sst_dsp_shim_update_bits_unlocked(ctx, CNL_ADSP_REG_ADSPCS, CNL_ADSPCS_SPA(core_mask), 0); /* poll with timeout to check if operation successful */ return sst_dsp_register_poll(ctx, CNL_ADSP_REG_ADSPCS, CNL_ADSPCS_CPA(core_mask), 0, CNL_DSP_PD_TO, "Power down"); } int cnl_dsp_enable_core(struct sst_dsp *ctx, unsigned int core_mask) { int ret; /* power up */ ret = cnl_dsp_core_power_up(ctx, core_mask); if (ret < 0) { dev_dbg(ctx->dev, "DSP core mask %#x power up failed", core_mask); return ret; } return cnl_dsp_start_core(ctx, core_mask); } int cnl_dsp_disable_core(struct sst_dsp *ctx, unsigned int core_mask) { int ret; ret = cnl_dsp_reset_core(ctx, core_mask); if (ret < 0) { dev_err(ctx->dev, "DSP core mask %#x reset failed\n", core_mask); return ret; } /* power down core*/ ret = cnl_dsp_core_power_down(ctx, core_mask); if (ret < 0) { dev_err(ctx->dev, "DSP core mask %#x power down failed\n", core_mask); return ret; } if (is_cnl_dsp_core_enable(ctx, core_mask)) { dev_err(ctx->dev, "DSP core mask %#x disable failed\n", core_mask); ret = -EIO; } return ret; } irqreturn_t cnl_dsp_sst_interrupt(int irq, void *dev_id) { struct sst_dsp *ctx = dev_id; u32 val; irqreturn_t ret = IRQ_NONE; spin_lock(&ctx->spinlock); val = sst_dsp_shim_read_unlocked(ctx, CNL_ADSP_REG_ADSPIS); ctx->intr_status = val; if (val == 0xffffffff) { spin_unlock(&ctx->spinlock); return IRQ_NONE; } if (val & CNL_ADSPIS_IPC) { cnl_ipc_int_disable(ctx); ret = IRQ_WAKE_THREAD; } spin_unlock(&ctx->spinlock); return ret; } void cnl_dsp_free(struct sst_dsp *dsp) { cnl_ipc_int_disable(dsp); free_irq(dsp->irq, dsp); cnl_ipc_op_int_disable(dsp); cnl_dsp_disable_core(dsp, SKL_DSP_CORE0_MASK); } EXPORT_SYMBOL_GPL(cnl_dsp_free); void cnl_ipc_int_enable(struct sst_dsp *ctx) { sst_dsp_shim_update_bits(ctx, CNL_ADSP_REG_ADSPIC, CNL_ADSPIC_IPC, CNL_ADSPIC_IPC); } void cnl_ipc_int_disable(struct sst_dsp *ctx) { sst_dsp_shim_update_bits_unlocked(ctx, CNL_ADSP_REG_ADSPIC, CNL_ADSPIC_IPC, 0); } void cnl_ipc_op_int_enable(struct sst_dsp *ctx) { /* enable IPC DONE interrupt */ sst_dsp_shim_update_bits(ctx, CNL_ADSP_REG_HIPCCTL, CNL_ADSP_REG_HIPCCTL_DONE, CNL_ADSP_REG_HIPCCTL_DONE); /* enable IPC BUSY interrupt */ sst_dsp_shim_update_bits(ctx, CNL_ADSP_REG_HIPCCTL, CNL_ADSP_REG_HIPCCTL_BUSY, CNL_ADSP_REG_HIPCCTL_BUSY); } void cnl_ipc_op_int_disable(struct sst_dsp *ctx) { /* disable IPC DONE interrupt */ sst_dsp_shim_update_bits(ctx, CNL_ADSP_REG_HIPCCTL, CNL_ADSP_REG_HIPCCTL_DONE, 0); /* disable IPC BUSY interrupt */ sst_dsp_shim_update_bits(ctx, CNL_ADSP_REG_HIPCCTL, CNL_ADSP_REG_HIPCCTL_BUSY, 0); } bool cnl_ipc_int_status(struct sst_dsp *ctx) { return sst_dsp_shim_read_unlocked(ctx, CNL_ADSP_REG_ADSPIS) & CNL_ADSPIS_IPC; } void cnl_ipc_free(struct sst_generic_ipc *ipc) { cnl_ipc_op_int_disable(ipc->dsp); sst_ipc_fini(ipc); }
linux-master
sound/soc/intel/skylake/cnl-sst-dsp.c
// SPDX-License-Identifier: GPL-2.0-only /* * skl-debug.c - Debugfs for skl driver * * Copyright (C) 2016-17 Intel Corp */ #include <linux/pci.h> #include <linux/debugfs.h> #include <uapi/sound/skl-tplg-interface.h> #include "skl.h" #include "skl-sst-dsp.h" #include "skl-sst-ipc.h" #include "skl-topology.h" #include "../common/sst-dsp.h" #include "../common/sst-dsp-priv.h" #define MOD_BUF PAGE_SIZE #define FW_REG_BUF PAGE_SIZE #define FW_REG_SIZE 0x60 struct skl_debug { struct skl_dev *skl; struct device *dev; struct dentry *fs; struct dentry *modules; u8 fw_read_buff[FW_REG_BUF]; }; static ssize_t skl_print_pins(struct skl_module_pin *m_pin, char *buf, int max_pin, ssize_t size, bool direction) { int i; ssize_t ret = 0; for (i = 0; i < max_pin; i++) { ret += scnprintf(buf + size, MOD_BUF - size, "%s %d\n\tModule %d\n\tInstance %d\n\t" "In-used %s\n\tType %s\n" "\tState %d\n\tIndex %d\n", direction ? "Input Pin:" : "Output Pin:", i, m_pin[i].id.module_id, m_pin[i].id.instance_id, m_pin[i].in_use ? "Used" : "Unused", m_pin[i].is_dynamic ? "Dynamic" : "Static", m_pin[i].pin_state, i); size += ret; } return ret; } static ssize_t skl_print_fmt(struct skl_module_fmt *fmt, char *buf, ssize_t size, bool direction) { return scnprintf(buf + size, MOD_BUF - size, "%s\n\tCh %d\n\tFreq %d\n\tBit depth %d\n\t" "Valid bit depth %d\n\tCh config %#x\n\tInterleaving %d\n\t" "Sample Type %d\n\tCh Map %#x\n", direction ? "Input Format:" : "Output Format:", fmt->channels, fmt->s_freq, fmt->bit_depth, fmt->valid_bit_depth, fmt->ch_cfg, fmt->interleaving_style, fmt->sample_type, fmt->ch_map); } static ssize_t module_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct skl_module_cfg *mconfig = file->private_data; struct skl_module *module = mconfig->module; struct skl_module_res *res = &module->resources[mconfig->res_idx]; char *buf; ssize_t ret; buf = kzalloc(MOD_BUF, GFP_KERNEL); if (!buf) return -ENOMEM; ret = scnprintf(buf, MOD_BUF, "Module:\n\tUUID %pUL\n\tModule id %d\n" "\tInstance id %d\n\tPvt_id %d\n", mconfig->guid, mconfig->id.module_id, mconfig->id.instance_id, mconfig->id.pvt_id); ret += scnprintf(buf + ret, MOD_BUF - ret, "Resources:\n\tCPC %#x\n\tIBS %#x\n\tOBS %#x\t\n", res->cpc, res->ibs, res->obs); ret += scnprintf(buf + ret, MOD_BUF - ret, "Module data:\n\tCore %d\n\tIn queue %d\n\t" "Out queue %d\n\tType %s\n", mconfig->core_id, mconfig->max_in_queue, mconfig->max_out_queue, mconfig->is_loadable ? "loadable" : "inbuilt"); ret += skl_print_fmt(mconfig->in_fmt, buf, ret, true); ret += skl_print_fmt(mconfig->out_fmt, buf, ret, false); ret += scnprintf(buf + ret, MOD_BUF - ret, "Fixup:\n\tParams %#x\n\tConverter %#x\n", mconfig->params_fixup, mconfig->converter); ret += scnprintf(buf + ret, MOD_BUF - ret, "Module Gateway:\n\tType %#x\n\tVbus %#x\n\tHW conn %#x\n\tSlot %#x\n", mconfig->dev_type, mconfig->vbus_id, mconfig->hw_conn_type, mconfig->time_slot); ret += scnprintf(buf + ret, MOD_BUF - ret, "Pipeline:\n\tID %d\n\tPriority %d\n\tConn Type %d\n\t" "Pages %#x\n", mconfig->pipe->ppl_id, mconfig->pipe->pipe_priority, mconfig->pipe->conn_type, mconfig->pipe->memory_pages); ret += scnprintf(buf + ret, MOD_BUF - ret, "\tParams:\n\t\tHost DMA %d\n\t\tLink DMA %d\n", mconfig->pipe->p_params->host_dma_id, mconfig->pipe->p_params->link_dma_id); ret += scnprintf(buf + ret, MOD_BUF - ret, "\tPCM params:\n\t\tCh %d\n\t\tFreq %d\n\t\tFormat %d\n", mconfig->pipe->p_params->ch, mconfig->pipe->p_params->s_freq, mconfig->pipe->p_params->s_fmt); ret += scnprintf(buf + ret, MOD_BUF - ret, "\tLink %#x\n\tStream %#x\n", mconfig->pipe->p_params->linktype, mconfig->pipe->p_params->stream); ret += scnprintf(buf + ret, MOD_BUF - ret, "\tState %d\n\tPassthru %s\n", mconfig->pipe->state, mconfig->pipe->passthru ? "true" : "false"); ret += skl_print_pins(mconfig->m_in_pin, buf, mconfig->max_in_queue, ret, true); ret += skl_print_pins(mconfig->m_out_pin, buf, mconfig->max_out_queue, ret, false); ret += scnprintf(buf + ret, MOD_BUF - ret, "Other:\n\tDomain %d\n\tHomogeneous Input %s\n\t" "Homogeneous Output %s\n\tIn Queue Mask %d\n\t" "Out Queue Mask %d\n\tDMA ID %d\n\tMem Pages %d\n\t" "Module Type %d\n\tModule State %d\n", mconfig->domain, mconfig->homogenous_inputs ? "true" : "false", mconfig->homogenous_outputs ? "true" : "false", mconfig->in_queue_mask, mconfig->out_queue_mask, mconfig->dma_id, mconfig->mem_pages, mconfig->m_state, mconfig->m_type); ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); return ret; } static const struct file_operations mcfg_fops = { .open = simple_open, .read = module_read, .llseek = default_llseek, }; void skl_debug_init_module(struct skl_debug *d, struct snd_soc_dapm_widget *w, struct skl_module_cfg *mconfig) { debugfs_create_file(w->name, 0444, d->modules, mconfig, &mcfg_fops); } static ssize_t fw_softreg_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct skl_debug *d = file->private_data; struct sst_dsp *sst = d->skl->dsp; size_t w0_stat_sz = sst->addr.w0_stat_sz; void __iomem *in_base = sst->mailbox.in_base; void __iomem *fw_reg_addr; unsigned int offset; char *tmp; ssize_t ret = 0; tmp = kzalloc(FW_REG_BUF, GFP_KERNEL); if (!tmp) return -ENOMEM; fw_reg_addr = in_base - w0_stat_sz; memset(d->fw_read_buff, 0, FW_REG_BUF); if (w0_stat_sz > 0) __ioread32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2); for (offset = 0; offset < FW_REG_SIZE; offset += 16) { ret += scnprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset); hex_dump_to_buffer(d->fw_read_buff + offset, 16, 16, 4, tmp + ret, FW_REG_BUF - ret, 0); ret += strlen(tmp + ret); /* print newline for each offset */ if (FW_REG_BUF - ret > 0) tmp[ret++] = '\n'; } ret = simple_read_from_buffer(user_buf, count, ppos, tmp, ret); kfree(tmp); return ret; } static const struct file_operations soft_regs_ctrl_fops = { .open = simple_open, .read = fw_softreg_read, .llseek = default_llseek, }; struct skl_debug *skl_debugfs_init(struct skl_dev *skl) { struct skl_debug *d; d = devm_kzalloc(&skl->pci->dev, sizeof(*d), GFP_KERNEL); if (!d) return NULL; /* create the debugfs dir with platform component's debugfs as parent */ d->fs = debugfs_create_dir("dsp", skl->component->debugfs_root); d->skl = skl; d->dev = &skl->pci->dev; /* now create the module dir */ d->modules = debugfs_create_dir("modules", d->fs); debugfs_create_file("fw_soft_regs_rd", 0444, d->fs, d, &soft_regs_ctrl_fops); return d; } void skl_debugfs_exit(struct skl_dev *skl) { struct skl_debug *d = skl->debugfs; debugfs_remove_recursive(d->fs); d = NULL; }
linux-master
sound/soc/intel/skylake/skl-debug.c
// SPDX-License-Identifier: GPL-2.0-only /* * skl-sst-dsp.c - SKL SST library generic function * * Copyright (C) 2014-15, Intel Corporation. * Author:Rafal Redzimski <[email protected]> * Jeeja KP <[email protected]> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <sound/pcm.h> #include "../common/sst-dsp.h" #include "../common/sst-ipc.h" #include "../common/sst-dsp-priv.h" #include "skl.h" /* various timeout values */ #define SKL_DSP_PU_TO 50 #define SKL_DSP_PD_TO 50 #define SKL_DSP_RESET_TO 50 void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state) { mutex_lock(&ctx->mutex); ctx->sst_state = state; mutex_unlock(&ctx->mutex); } /* * Initialize core power state and usage count. To be called after * successful first boot. Hence core 0 will be running and other cores * will be reset */ void skl_dsp_init_core_state(struct sst_dsp *ctx) { struct skl_dev *skl = ctx->thread_context; int i; skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING; skl->cores.usage_count[SKL_DSP_CORE0_ID] = 1; for (i = SKL_DSP_CORE0_ID + 1; i < skl->cores.count; i++) { skl->cores.state[i] = SKL_DSP_RESET; skl->cores.usage_count[i] = 0; } } /* Get the mask for all enabled cores */ unsigned int skl_dsp_get_enabled_cores(struct sst_dsp *ctx) { struct skl_dev *skl = ctx->thread_context; unsigned int core_mask, en_cores_mask; u32 val; core_mask = SKL_DSP_CORES_MASK(skl->cores.count); val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS); /* Cores having CPA bit set */ en_cores_mask = (val & SKL_ADSPCS_CPA_MASK(core_mask)) >> SKL_ADSPCS_CPA_SHIFT; /* And cores having CRST bit cleared */ en_cores_mask &= (~val & SKL_ADSPCS_CRST_MASK(core_mask)) >> SKL_ADSPCS_CRST_SHIFT; /* And cores having CSTALL bit cleared */ en_cores_mask &= (~val & SKL_ADSPCS_CSTALL_MASK(core_mask)) >> SKL_ADSPCS_CSTALL_SHIFT; en_cores_mask &= core_mask; dev_dbg(ctx->dev, "DSP enabled cores mask = %x\n", en_cores_mask); return en_cores_mask; } static int skl_dsp_core_set_reset_state(struct sst_dsp *ctx, unsigned int core_mask) { int ret; /* update bits */ sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CRST_MASK(core_mask), SKL_ADSPCS_CRST_MASK(core_mask)); /* poll with timeout to check if operation successful */ ret = sst_dsp_register_poll(ctx, SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CRST_MASK(core_mask), SKL_ADSPCS_CRST_MASK(core_mask), SKL_DSP_RESET_TO, "Set reset"); if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) & SKL_ADSPCS_CRST_MASK(core_mask)) != SKL_ADSPCS_CRST_MASK(core_mask)) { dev_err(ctx->dev, "Set reset state failed: core_mask %x\n", core_mask); ret = -EIO; } return ret; } int skl_dsp_core_unset_reset_state( struct sst_dsp *ctx, unsigned int core_mask) { int ret; dev_dbg(ctx->dev, "In %s\n", __func__); /* update bits */ sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CRST_MASK(core_mask), 0); /* poll with timeout to check if operation successful */ ret = sst_dsp_register_poll(ctx, SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CRST_MASK(core_mask), 0, SKL_DSP_RESET_TO, "Unset reset"); if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) & SKL_ADSPCS_CRST_MASK(core_mask)) != 0) { dev_err(ctx->dev, "Unset reset state failed: core_mask %x\n", core_mask); ret = -EIO; } return ret; } static bool is_skl_dsp_core_enable(struct sst_dsp *ctx, unsigned int core_mask) { int val; bool is_enable; val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS); is_enable = ((val & SKL_ADSPCS_CPA_MASK(core_mask)) && (val & SKL_ADSPCS_SPA_MASK(core_mask)) && !(val & SKL_ADSPCS_CRST_MASK(core_mask)) && !(val & SKL_ADSPCS_CSTALL_MASK(core_mask))); dev_dbg(ctx->dev, "DSP core(s) enabled? %d : core_mask %x\n", is_enable, core_mask); return is_enable; } static int skl_dsp_reset_core(struct sst_dsp *ctx, unsigned int core_mask) { /* stall core */ sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CSTALL_MASK(core_mask), SKL_ADSPCS_CSTALL_MASK(core_mask)); /* set reset state */ return skl_dsp_core_set_reset_state(ctx, core_mask); } int skl_dsp_start_core(struct sst_dsp *ctx, unsigned int core_mask) { int ret; /* unset reset state */ ret = skl_dsp_core_unset_reset_state(ctx, core_mask); if (ret < 0) return ret; /* run core */ dev_dbg(ctx->dev, "unstall/run core: core_mask = %x\n", core_mask); sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CSTALL_MASK(core_mask), 0); if (!is_skl_dsp_core_enable(ctx, core_mask)) { skl_dsp_reset_core(ctx, core_mask); dev_err(ctx->dev, "DSP start core failed: core_mask %x\n", core_mask); ret = -EIO; } return ret; } int skl_dsp_core_power_up(struct sst_dsp *ctx, unsigned int core_mask) { int ret; /* update bits */ sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_SPA_MASK(core_mask), SKL_ADSPCS_SPA_MASK(core_mask)); /* poll with timeout to check if operation successful */ ret = sst_dsp_register_poll(ctx, SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CPA_MASK(core_mask), SKL_ADSPCS_CPA_MASK(core_mask), SKL_DSP_PU_TO, "Power up"); if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) & SKL_ADSPCS_CPA_MASK(core_mask)) != SKL_ADSPCS_CPA_MASK(core_mask)) { dev_err(ctx->dev, "DSP core power up failed: core_mask %x\n", core_mask); ret = -EIO; } return ret; } int skl_dsp_core_power_down(struct sst_dsp *ctx, unsigned int core_mask) { /* update bits */ sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_SPA_MASK(core_mask), 0); /* poll with timeout to check if operation successful */ return sst_dsp_register_poll(ctx, SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CPA_MASK(core_mask), 0, SKL_DSP_PD_TO, "Power down"); } int skl_dsp_enable_core(struct sst_dsp *ctx, unsigned int core_mask) { int ret; /* power up */ ret = skl_dsp_core_power_up(ctx, core_mask); if (ret < 0) { dev_err(ctx->dev, "dsp core power up failed: core_mask %x\n", core_mask); return ret; } return skl_dsp_start_core(ctx, core_mask); } int skl_dsp_disable_core(struct sst_dsp *ctx, unsigned int core_mask) { int ret; ret = skl_dsp_reset_core(ctx, core_mask); if (ret < 0) { dev_err(ctx->dev, "dsp core reset failed: core_mask %x\n", core_mask); return ret; } /* power down core*/ ret = skl_dsp_core_power_down(ctx, core_mask); if (ret < 0) { dev_err(ctx->dev, "dsp core power down fail mask %x: %d\n", core_mask, ret); return ret; } if (is_skl_dsp_core_enable(ctx, core_mask)) { dev_err(ctx->dev, "dsp core disable fail mask %x: %d\n", core_mask, ret); ret = -EIO; } return ret; } int skl_dsp_boot(struct sst_dsp *ctx) { int ret; if (is_skl_dsp_core_enable(ctx, SKL_DSP_CORE0_MASK)) { ret = skl_dsp_reset_core(ctx, SKL_DSP_CORE0_MASK); if (ret < 0) { dev_err(ctx->dev, "dsp core0 reset fail: %d\n", ret); return ret; } ret = skl_dsp_start_core(ctx, SKL_DSP_CORE0_MASK); if (ret < 0) { dev_err(ctx->dev, "dsp core0 start fail: %d\n", ret); return ret; } } else { ret = skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK); if (ret < 0) { dev_err(ctx->dev, "dsp core0 disable fail: %d\n", ret); return ret; } ret = skl_dsp_enable_core(ctx, SKL_DSP_CORE0_MASK); } return ret; } irqreturn_t skl_dsp_sst_interrupt(int irq, void *dev_id) { struct sst_dsp *ctx = dev_id; u32 val; irqreturn_t result = IRQ_NONE; spin_lock(&ctx->spinlock); val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPIS); ctx->intr_status = val; if (val == 0xffffffff) { spin_unlock(&ctx->spinlock); return IRQ_NONE; } if (val & SKL_ADSPIS_IPC) { skl_ipc_int_disable(ctx); result = IRQ_WAKE_THREAD; } if (val & SKL_ADSPIS_CL_DMA) { skl_cldma_int_disable(ctx); result = IRQ_WAKE_THREAD; } spin_unlock(&ctx->spinlock); return result; } /* * skl_dsp_get_core/skl_dsp_put_core will be called inside DAPM context * within the dapm mutex. Hence no separate lock is used. */ int skl_dsp_get_core(struct sst_dsp *ctx, unsigned int core_id) { struct skl_dev *skl = ctx->thread_context; int ret = 0; if (core_id >= skl->cores.count) { dev_err(ctx->dev, "invalid core id: %d\n", core_id); return -EINVAL; } skl->cores.usage_count[core_id]++; if (skl->cores.state[core_id] == SKL_DSP_RESET) { ret = ctx->fw_ops.set_state_D0(ctx, core_id); if (ret < 0) { dev_err(ctx->dev, "unable to get core%d\n", core_id); goto out; } } out: dev_dbg(ctx->dev, "core id %d state %d usage_count %d\n", core_id, skl->cores.state[core_id], skl->cores.usage_count[core_id]); return ret; } EXPORT_SYMBOL_GPL(skl_dsp_get_core); int skl_dsp_put_core(struct sst_dsp *ctx, unsigned int core_id) { struct skl_dev *skl = ctx->thread_context; int ret = 0; if (core_id >= skl->cores.count) { dev_err(ctx->dev, "invalid core id: %d\n", core_id); return -EINVAL; } if ((--skl->cores.usage_count[core_id] == 0) && (skl->cores.state[core_id] != SKL_DSP_RESET)) { ret = ctx->fw_ops.set_state_D3(ctx, core_id); if (ret < 0) { dev_err(ctx->dev, "unable to put core %d: %d\n", core_id, ret); skl->cores.usage_count[core_id]++; } } dev_dbg(ctx->dev, "core id %d state %d usage_count %d\n", core_id, skl->cores.state[core_id], skl->cores.usage_count[core_id]); return ret; } EXPORT_SYMBOL_GPL(skl_dsp_put_core); int skl_dsp_wake(struct sst_dsp *ctx) { return skl_dsp_get_core(ctx, SKL_DSP_CORE0_ID); } EXPORT_SYMBOL_GPL(skl_dsp_wake); int skl_dsp_sleep(struct sst_dsp *ctx) { return skl_dsp_put_core(ctx, SKL_DSP_CORE0_ID); } EXPORT_SYMBOL_GPL(skl_dsp_sleep); struct sst_dsp *skl_dsp_ctx_init(struct device *dev, struct sst_dsp_device *sst_dev, int irq) { int ret; struct sst_dsp *sst; sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL); if (sst == NULL) return NULL; spin_lock_init(&sst->spinlock); mutex_init(&sst->mutex); sst->dev = dev; sst->sst_dev = sst_dev; sst->irq = irq; sst->ops = sst_dev->ops; sst->thread_context = sst_dev->thread_context; /* Initialise SST Audio DSP */ if (sst->ops->init) { ret = sst->ops->init(sst); if (ret < 0) return NULL; } return sst; } int skl_dsp_acquire_irq(struct sst_dsp *sst) { struct sst_dsp_device *sst_dev = sst->sst_dev; int ret; /* Register the ISR */ ret = request_threaded_irq(sst->irq, sst->ops->irq_handler, sst_dev->thread, IRQF_SHARED, "AudioDSP", sst); if (ret) dev_err(sst->dev, "unable to grab threaded IRQ %d, disabling device\n", sst->irq); return ret; } void skl_dsp_free(struct sst_dsp *dsp) { skl_ipc_int_disable(dsp); free_irq(dsp->irq, dsp); skl_ipc_op_int_disable(dsp); skl_dsp_disable_core(dsp, SKL_DSP_CORE0_MASK); } EXPORT_SYMBOL_GPL(skl_dsp_free); bool is_skl_dsp_running(struct sst_dsp *ctx) { return (ctx->sst_state == SKL_DSP_RUNNING); } EXPORT_SYMBOL_GPL(is_skl_dsp_running);
linux-master
sound/soc/intel/skylake/skl-sst-dsp.c