python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2018 Intel Corporation. All rights reserved.
//
// Authors: Liam Girdwood <[email protected]>
// Ranjani Sridharan <[email protected]>
// Rander Wang <[email protected]>
// Keyon Jie <[email protected]>
//
/*
* Hardware interface for HDA DSP code loader
*/
#include <linux/firmware.h>
#include <sound/hdaudio_ext.h>
#include <sound/hda_register.h>
#include <sound/sof.h>
#include <sound/sof/ipc4/header.h>
#include "ext_manifest.h"
#include "../ipc4-priv.h"
#include "../ops.h"
#include "../sof-priv.h"
#include "hda.h"
static void hda_ssp_set_cbp_cfp(struct snd_sof_dev *sdev)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
const struct sof_intel_dsp_desc *chip = hda->desc;
int i;
/* DSP is powered up, set all SSPs to clock consumer/codec provider mode */
for (i = 0; i < chip->ssp_count; i++) {
snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
chip->ssp_base_offset
+ i * SSP_DEV_MEM_SIZE
+ SSP_SSC1_OFFSET,
SSP_SET_CBP_CFP,
SSP_SET_CBP_CFP);
}
}
struct hdac_ext_stream *hda_cl_stream_prepare(struct snd_sof_dev *sdev, unsigned int format,
unsigned int size, struct snd_dma_buffer *dmab,
int direction)
{
struct hdac_ext_stream *hext_stream;
struct hdac_stream *hstream;
struct pci_dev *pci = to_pci_dev(sdev->dev);
int ret;
hext_stream = hda_dsp_stream_get(sdev, direction, 0);
if (!hext_stream) {
dev_err(sdev->dev, "error: no stream available\n");
return ERR_PTR(-ENODEV);
}
hstream = &hext_stream->hstream;
hstream->substream = NULL;
/* allocate DMA buffer */
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, &pci->dev, size, dmab);
if (ret < 0) {
dev_err(sdev->dev, "error: memory alloc failed: %d\n", ret);
goto out_put;
}
hstream->period_bytes = 0;/* initialize period_bytes */
hstream->format_val = format;
hstream->bufsize = size;
if (direction == SNDRV_PCM_STREAM_CAPTURE) {
ret = hda_dsp_iccmax_stream_hw_params(sdev, hext_stream, dmab, NULL);
if (ret < 0) {
dev_err(sdev->dev, "error: iccmax stream prepare failed: %d\n", ret);
goto out_free;
}
} else {
ret = hda_dsp_stream_hw_params(sdev, hext_stream, dmab, NULL);
if (ret < 0) {
dev_err(sdev->dev, "error: hdac prepare failed: %d\n", ret);
goto out_free;
}
hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_ENABLE, size);
}
return hext_stream;
out_free:
snd_dma_free_pages(dmab);
out_put:
hda_dsp_stream_put(sdev, direction, hstream->stream_tag);
return ERR_PTR(ret);
}
/*
* first boot sequence has some extra steps.
* power on all host managed cores and only unstall/run the boot core to boot the
* DSP then turn off all non boot cores (if any) is powered on.
*/
int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
const struct sof_intel_dsp_desc *chip = hda->desc;
unsigned int status, target_status;
u32 flags, ipc_hdr, j;
unsigned long mask;
char *dump_msg;
int ret;
/* step 1: power up corex */
ret = hda_dsp_core_power_up(sdev, chip->host_managed_cores_mask);
if (ret < 0) {
if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
dev_err(sdev->dev, "error: dsp core 0/1 power up failed\n");
goto err;
}
hda_ssp_set_cbp_cfp(sdev);
/* step 2: Send ROM_CONTROL command (stream_tag is ignored for IMR boot) */
ipc_hdr = chip->ipc_req_mask | HDA_DSP_ROM_IPC_CONTROL;
if (!imr_boot)
ipc_hdr |= HDA_DSP_ROM_IPC_PURGE_FW | ((stream_tag - 1) << 9);
snd_sof_dsp_write(sdev, HDA_DSP_BAR, chip->ipc_req, ipc_hdr);
/* step 3: unset core 0 reset state & unstall/run core 0 */
ret = hda_dsp_core_run(sdev, chip->init_core_mask);
if (ret < 0) {
if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
dev_err(sdev->dev,
"error: dsp core start failed %d\n", ret);
ret = -EIO;
goto err;
}
/* step 4: wait for IPC DONE bit from ROM */
ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
chip->ipc_ack, status,
((status & chip->ipc_ack_mask)
== chip->ipc_ack_mask),
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_INIT_TIMEOUT_US);
if (ret < 0) {
if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
dev_err(sdev->dev,
"error: %s: timeout for HIPCIE done\n",
__func__);
goto err;
}
/* set DONE bit to clear the reply IPC message */
snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
chip->ipc_ack,
chip->ipc_ack_mask,
chip->ipc_ack_mask);
/* step 5: power down cores that are no longer needed */
ret = hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask &
~(chip->init_core_mask));
if (ret < 0) {
if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
dev_err(sdev->dev,
"error: dsp core x power down failed\n");
goto err;
}
/* step 6: enable IPC interrupts */
hda_dsp_ipc_int_enable(sdev);
/*
* step 7:
* - Cold/Full boot: wait for ROM init to proceed to download the firmware
* - IMR boot: wait for ROM firmware entered (firmware booted up from IMR)
*/
if (imr_boot)
target_status = FSR_STATE_FW_ENTERED;
else
target_status = FSR_STATE_INIT_DONE;
ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
chip->rom_status_reg, status,
(FSR_TO_STATE_CODE(status) == target_status),
HDA_DSP_REG_POLL_INTERVAL_US,
chip->rom_init_timeout *
USEC_PER_MSEC);
if (!ret) {
/* set enabled cores mask and increment ref count for cores in init_core_mask */
sdev->enabled_cores_mask |= chip->init_core_mask;
mask = sdev->enabled_cores_mask;
for_each_set_bit(j, &mask, SOF_MAX_DSP_NUM_CORES)
sdev->dsp_core_ref_count[j]++;
return 0;
}
if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
dev_err(sdev->dev,
"%s: timeout with rom_status_reg (%#x) read\n",
__func__, chip->rom_status_reg);
err:
flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX | SOF_DBG_DUMP_OPTIONAL;
/* after max boot attempts make sure that the dump is printed */
if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
flags &= ~SOF_DBG_DUMP_OPTIONAL;
dump_msg = kasprintf(GFP_KERNEL, "Boot iteration failed: %d/%d",
hda->boot_iteration, HDA_FW_BOOT_ATTEMPTS);
snd_sof_dsp_dbg_dump(sdev, dump_msg, flags);
hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask);
kfree(dump_msg);
return ret;
}
static int cl_trigger(struct snd_sof_dev *sdev,
struct hdac_ext_stream *hext_stream, int cmd)
{
struct hdac_stream *hstream = &hext_stream->hstream;
int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
/* code loader is special case that reuses stream ops */
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
1 << hstream->index,
1 << hstream->index);
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
sd_offset,
SOF_HDA_SD_CTL_DMA_START |
SOF_HDA_CL_DMA_SD_INT_MASK,
SOF_HDA_SD_CTL_DMA_START |
SOF_HDA_CL_DMA_SD_INT_MASK);
hstream->running = true;
return 0;
default:
return hda_dsp_stream_trigger(sdev, hext_stream, cmd);
}
}
int hda_cl_cleanup(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab,
struct hdac_ext_stream *hext_stream)
{
struct hdac_stream *hstream = &hext_stream->hstream;
int sd_offset = SOF_STREAM_SD_OFFSET(hstream);
int ret = 0;
if (hstream->direction == SNDRV_PCM_STREAM_PLAYBACK)
ret = hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_DISABLE, 0);
else
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset,
SOF_HDA_SD_CTL_DMA_START, 0);
hda_dsp_stream_put(sdev, hstream->direction, hstream->stream_tag);
hstream->running = 0;
hstream->substream = NULL;
/* reset BDL address */
snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_BDLPL, 0);
snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_BDLPU, 0);
snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, sd_offset, 0);
snd_dma_free_pages(dmab);
dmab->area = NULL;
hstream->bufsize = 0;
hstream->format_val = 0;
return ret;
}
int hda_cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
const struct sof_intel_dsp_desc *chip = hda->desc;
unsigned int reg;
int ret, status;
ret = cl_trigger(sdev, hext_stream, SNDRV_PCM_TRIGGER_START);
if (ret < 0) {
dev_err(sdev->dev, "error: DMA trigger start failed\n");
return ret;
}
status = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
chip->rom_status_reg, reg,
(FSR_TO_STATE_CODE(reg) == FSR_STATE_FW_ENTERED),
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_BASEFW_TIMEOUT_US);
/*
* even in case of errors we still need to stop the DMAs,
* but we return the initial error should the DMA stop also fail
*/
if (status < 0) {
dev_err(sdev->dev,
"%s: timeout with rom_status_reg (%#x) read\n",
__func__, chip->rom_status_reg);
}
ret = cl_trigger(sdev, hext_stream, SNDRV_PCM_TRIGGER_STOP);
if (ret < 0) {
dev_err(sdev->dev, "error: DMA trigger stop failed\n");
if (!status)
status = ret;
}
return status;
}
int hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev *sdev)
{
struct hdac_ext_stream *iccmax_stream;
struct snd_dma_buffer dmab_bdl;
int ret, ret1;
u8 original_gb;
/* save the original LTRP guardband value */
original_gb = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_LTRP) &
HDA_VS_INTEL_LTRP_GB_MASK;
/*
* Prepare capture stream for ICCMAX. We do not need to store
* the data, so use a buffer of PAGE_SIZE for receiving.
*/
iccmax_stream = hda_cl_stream_prepare(sdev, HDA_CL_STREAM_FORMAT, PAGE_SIZE,
&dmab_bdl, SNDRV_PCM_STREAM_CAPTURE);
if (IS_ERR(iccmax_stream)) {
dev_err(sdev->dev, "error: dma prepare for ICCMAX stream failed\n");
return PTR_ERR(iccmax_stream);
}
ret = hda_dsp_cl_boot_firmware(sdev);
/*
* Perform iccmax stream cleanup. This should be done even if firmware loading fails.
* If the cleanup also fails, we return the initial error
*/
ret1 = hda_cl_cleanup(sdev, &dmab_bdl, iccmax_stream);
if (ret1 < 0) {
dev_err(sdev->dev, "error: ICCMAX stream cleanup failed\n");
/* set return value to indicate cleanup failure */
if (!ret)
ret = ret1;
}
/* restore the original guardband value after FW boot */
snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_LTRP,
HDA_VS_INTEL_LTRP_GB_MASK, original_gb);
return ret;
}
static int hda_dsp_boot_imr(struct snd_sof_dev *sdev)
{
const struct sof_intel_dsp_desc *chip_info;
int ret;
chip_info = get_chip_info(sdev->pdata);
if (chip_info->cl_init)
ret = chip_info->cl_init(sdev, 0, true);
else
ret = -EINVAL;
if (!ret)
hda_sdw_process_wakeen(sdev);
return ret;
}
int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
struct snd_sof_pdata *plat_data = sdev->pdata;
const struct sof_dev_desc *desc = plat_data->desc;
const struct sof_intel_dsp_desc *chip_info;
struct hdac_ext_stream *hext_stream;
struct firmware stripped_firmware;
struct snd_dma_buffer dmab;
int ret, ret1, i;
if (hda->imrboot_supported && !sdev->first_boot && !hda->skip_imr_boot) {
dev_dbg(sdev->dev, "IMR restore supported, booting from IMR directly\n");
hda->boot_iteration = 0;
ret = hda_dsp_boot_imr(sdev);
if (!ret) {
hda->booted_from_imr = true;
return 0;
}
dev_warn(sdev->dev, "IMR restore failed, trying to cold boot\n");
}
hda->booted_from_imr = false;
chip_info = desc->chip_info;
if (sdev->basefw.fw->size <= sdev->basefw.payload_offset) {
dev_err(sdev->dev, "error: firmware size must be greater than firmware offset\n");
return -EINVAL;
}
stripped_firmware.data = sdev->basefw.fw->data + sdev->basefw.payload_offset;
stripped_firmware.size = sdev->basefw.fw->size - sdev->basefw.payload_offset;
/* init for booting wait */
init_waitqueue_head(&sdev->boot_wait);
/* prepare DMA for code loader stream */
hext_stream = hda_cl_stream_prepare(sdev, HDA_CL_STREAM_FORMAT,
stripped_firmware.size,
&dmab, SNDRV_PCM_STREAM_PLAYBACK);
if (IS_ERR(hext_stream)) {
dev_err(sdev->dev, "error: dma prepare for fw loading failed\n");
return PTR_ERR(hext_stream);
}
memcpy(dmab.area, stripped_firmware.data,
stripped_firmware.size);
/* try ROM init a few times before giving up */
for (i = 0; i < HDA_FW_BOOT_ATTEMPTS; i++) {
dev_dbg(sdev->dev,
"Attempting iteration %d of Core En/ROM load...\n", i);
hda->boot_iteration = i + 1;
if (chip_info->cl_init)
ret = chip_info->cl_init(sdev, hext_stream->hstream.stream_tag, false);
else
ret = -EINVAL;
/* don't retry anymore if successful */
if (!ret)
break;
}
if (i == HDA_FW_BOOT_ATTEMPTS) {
dev_err(sdev->dev, "error: dsp init failed after %d attempts with err: %d\n",
i, ret);
goto cleanup;
}
/*
* When a SoundWire link is in clock stop state, a Slave
* device may trigger in-band wakes for events such as jack
* insertion or acoustic event detection. This event will lead
* to a WAKEEN interrupt, handled by the PCI device and routed
* to PME if the PCI device is in D3. The resume function in
* audio PCI driver will be invoked by ACPI for PME event and
* initialize the device and process WAKEEN interrupt.
*
* The WAKEEN interrupt should be processed ASAP to prevent an
* interrupt flood, otherwise other interrupts, such IPC,
* cannot work normally. The WAKEEN is handled after the ROM
* is initialized successfully, which ensures power rails are
* enabled before accessing the SoundWire SHIM registers
*/
if (!sdev->first_boot)
hda_sdw_process_wakeen(sdev);
/*
* Set the boot_iteration to the last attempt, indicating that the
* DSP ROM has been initialized and from this point there will be no
* retry done to boot.
*
* Continue with code loading and firmware boot
*/
hda->boot_iteration = HDA_FW_BOOT_ATTEMPTS;
ret = hda_cl_copy_fw(sdev, hext_stream);
if (!ret) {
dev_dbg(sdev->dev, "Firmware download successful, booting...\n");
hda->skip_imr_boot = false;
} else {
snd_sof_dsp_dbg_dump(sdev, "Firmware download failed",
SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX);
hda->skip_imr_boot = true;
}
cleanup:
/*
* Perform codeloader stream cleanup.
* This should be done even if firmware loading fails.
* If the cleanup also fails, we return the initial error
*/
ret1 = hda_cl_cleanup(sdev, &dmab, hext_stream);
if (ret1 < 0) {
dev_err(sdev->dev, "error: Code loader DSP cleanup failed\n");
/* set return value to indicate cleanup failure */
if (!ret)
ret = ret1;
}
/*
* return primary core id if both fw copy
* and stream clean up are successful
*/
if (!ret)
return chip_info->init_core_mask;
/* disable DSP */
snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR,
SOF_HDA_REG_PP_PPCTL,
SOF_HDA_PPCTL_GPROCEN, 0);
return ret;
}
int hda_dsp_ipc4_load_library(struct snd_sof_dev *sdev,
struct sof_ipc4_fw_library *fw_lib, bool reload)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
struct hdac_ext_stream *hext_stream;
struct firmware stripped_firmware;
struct sof_ipc4_msg msg = {};
struct snd_dma_buffer dmab;
int ret, ret1;
/* IMR booting will restore the libraries as well, skip the loading */
if (reload && hda->booted_from_imr)
return 0;
/* the fw_lib has been verified during loading, we can trust the validity here */
stripped_firmware.data = fw_lib->sof_fw.fw->data + fw_lib->sof_fw.payload_offset;
stripped_firmware.size = fw_lib->sof_fw.fw->size - fw_lib->sof_fw.payload_offset;
/* prepare DMA for code loader stream */
hext_stream = hda_cl_stream_prepare(sdev, HDA_CL_STREAM_FORMAT,
stripped_firmware.size,
&dmab, SNDRV_PCM_STREAM_PLAYBACK);
if (IS_ERR(hext_stream)) {
dev_err(sdev->dev, "%s: DMA prepare failed\n", __func__);
return PTR_ERR(hext_stream);
}
memcpy(dmab.area, stripped_firmware.data, stripped_firmware.size);
msg.primary = hext_stream->hstream.stream_tag - 1;
msg.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_LOAD_LIBRARY);
msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST);
msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_FW_GEN_MSG);
msg.primary |= SOF_IPC4_GLB_LOAD_LIBRARY_LIB_ID(fw_lib->id);
ret = cl_trigger(sdev, hext_stream, SNDRV_PCM_TRIGGER_START);
if (ret < 0) {
dev_err(sdev->dev, "%s: DMA trigger start failed\n", __func__);
goto cleanup;
}
ret = sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0);
ret1 = cl_trigger(sdev, hext_stream, SNDRV_PCM_TRIGGER_STOP);
if (ret1 < 0) {
dev_err(sdev->dev, "%s: DMA trigger stop failed\n", __func__);
if (!ret)
ret = ret1;
}
cleanup:
/* clean up even in case of error and return the first error */
ret1 = hda_cl_cleanup(sdev, &dmab, hext_stream);
if (ret1 < 0) {
dev_err(sdev->dev, "%s: Code loader DSP cleanup failed\n", __func__);
/* set return value to indicate cleanup failure */
if (!ret)
ret = ret1;
}
return ret;
}
/* pre fw run operations */
int hda_dsp_pre_fw_run(struct snd_sof_dev *sdev)
{
/* disable clock gating and power gating */
return hda_dsp_ctrl_clock_power_gating(sdev, false);
}
/* post fw run operations */
int hda_dsp_post_fw_run(struct snd_sof_dev *sdev)
{
int ret;
if (sdev->first_boot) {
struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
ret = hda_sdw_startup(sdev);
if (ret < 0) {
dev_err(sdev->dev,
"error: could not startup SoundWire links\n");
return ret;
}
/* Check if IMR boot is usable */
if (!sof_debug_check_flag(SOF_DBG_IGNORE_D3_PERSISTENT) &&
(sdev->fw_ready.flags & SOF_IPC_INFO_D3_PERSISTENT ||
sdev->pdata->ipc_type == SOF_INTEL_IPC4))
hdev->imrboot_supported = true;
}
hda_sdw_int_enable(sdev, true);
/* re-enable clock gating and power gating */
return hda_dsp_ctrl_clock_power_gating(sdev, true);
}
int hda_dsp_ext_man_get_cavs_config_data(struct snd_sof_dev *sdev,
const struct sof_ext_man_elem_header *hdr)
{
const struct sof_ext_man_cavs_config_data *config_data =
container_of(hdr, struct sof_ext_man_cavs_config_data, hdr);
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
int i, elem_num;
/* calculate total number of config data elements */
elem_num = (hdr->size - sizeof(struct sof_ext_man_elem_header))
/ sizeof(struct sof_config_elem);
if (elem_num <= 0) {
dev_err(sdev->dev, "cavs config data is inconsistent: %d\n", elem_num);
return -EINVAL;
}
for (i = 0; i < elem_num; i++)
switch (config_data->elems[i].token) {
case SOF_EXT_MAN_CAVS_CONFIG_EMPTY:
/* skip empty token */
break;
case SOF_EXT_MAN_CAVS_CONFIG_CAVS_LPRO:
hda->clk_config_lpro = config_data->elems[i].value;
dev_dbg(sdev->dev, "FW clock config: %s\n",
hda->clk_config_lpro ? "LPRO" : "HPRO");
break;
case SOF_EXT_MAN_CAVS_CONFIG_OUTBOX_SIZE:
case SOF_EXT_MAN_CAVS_CONFIG_INBOX_SIZE:
/* These elements are defined but not being used yet. No warn is required */
break;
default:
dev_info(sdev->dev, "unsupported token type: %d\n",
config_data->elems[i].token);
}
return 0;
}
| linux-master | sound/soc/sof/intel/hda-loader.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2018-2022 Intel Corporation. All rights reserved.
//
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <sound/hdaudio_ext.h>
#include <sound/sof.h>
#include <sound/pcm_params.h>
#include "../sof-priv.h"
#include "../ops.h"
#include "hda.h"
#define HDA_SKL_WAIT_TIMEOUT 500 /* 500 msec */
#define HDA_SKL_CLDMA_MAX_BUFFER_SIZE (32 * PAGE_SIZE)
/* Stream Reset */
#define HDA_CL_SD_CTL_SRST_SHIFT 0
#define HDA_CL_SD_CTL_SRST(x) (((x) & 0x1) << \
HDA_CL_SD_CTL_SRST_SHIFT)
/* Stream Run */
#define HDA_CL_SD_CTL_RUN_SHIFT 1
#define HDA_CL_SD_CTL_RUN(x) (((x) & 0x1) << \
HDA_CL_SD_CTL_RUN_SHIFT)
/* Interrupt On Completion Enable */
#define HDA_CL_SD_CTL_IOCE_SHIFT 2
#define HDA_CL_SD_CTL_IOCE(x) (((x) & 0x1) << \
HDA_CL_SD_CTL_IOCE_SHIFT)
/* FIFO Error Interrupt Enable */
#define HDA_CL_SD_CTL_FEIE_SHIFT 3
#define HDA_CL_SD_CTL_FEIE(x) (((x) & 0x1) << \
HDA_CL_SD_CTL_FEIE_SHIFT)
/* Descriptor Error Interrupt Enable */
#define HDA_CL_SD_CTL_DEIE_SHIFT 4
#define HDA_CL_SD_CTL_DEIE(x) (((x) & 0x1) << \
HDA_CL_SD_CTL_DEIE_SHIFT)
/* FIFO Limit Change */
#define HDA_CL_SD_CTL_FIFOLC_SHIFT 5
#define HDA_CL_SD_CTL_FIFOLC(x) (((x) & 0x1) << \
HDA_CL_SD_CTL_FIFOLC_SHIFT)
/* Stripe Control */
#define HDA_CL_SD_CTL_STRIPE_SHIFT 16
#define HDA_CL_SD_CTL_STRIPE(x) (((x) & 0x3) << \
HDA_CL_SD_CTL_STRIPE_SHIFT)
/* Traffic Priority */
#define HDA_CL_SD_CTL_TP_SHIFT 18
#define HDA_CL_SD_CTL_TP(x) (((x) & 0x1) << \
HDA_CL_SD_CTL_TP_SHIFT)
/* Bidirectional Direction Control */
#define HDA_CL_SD_CTL_DIR_SHIFT 19
#define HDA_CL_SD_CTL_DIR(x) (((x) & 0x1) << \
HDA_CL_SD_CTL_DIR_SHIFT)
/* Stream Number */
#define HDA_CL_SD_CTL_STRM_SHIFT 20
#define HDA_CL_SD_CTL_STRM(x) (((x) & 0xf) << \
HDA_CL_SD_CTL_STRM_SHIFT)
#define HDA_CL_SD_CTL_INT(x) \
(HDA_CL_SD_CTL_IOCE(x) | \
HDA_CL_SD_CTL_FEIE(x) | \
HDA_CL_SD_CTL_DEIE(x))
#define HDA_CL_SD_CTL_INT_MASK \
(HDA_CL_SD_CTL_IOCE(1) | \
HDA_CL_SD_CTL_FEIE(1) | \
HDA_CL_SD_CTL_DEIE(1))
#define DMA_ADDRESS_128_BITS_ALIGNMENT 7
#define BDL_ALIGN(x) ((x) >> DMA_ADDRESS_128_BITS_ALIGNMENT)
/* Buffer Descriptor List Lower Base Address */
#define HDA_CL_SD_BDLPLBA_SHIFT 7
#define HDA_CL_SD_BDLPLBA_MASK GENMASK(31, 7)
#define HDA_CL_SD_BDLPLBA(x) \
((BDL_ALIGN(lower_32_bits(x)) << HDA_CL_SD_BDLPLBA_SHIFT) & \
HDA_CL_SD_BDLPLBA_MASK)
/* Buffer Descriptor List Upper Base Address */
#define HDA_CL_SD_BDLPUBA(x) \
(upper_32_bits(x))
/* Software Position in Buffer Enable */
#define HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT 0
#define HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_MASK \
(1 << HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT)
#define HDA_CL_SPBFIFO_SPBFCCTL_SPIBE(x) \
(((x) << HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT) & \
HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_MASK)
#define HDA_CL_DMA_SD_INT_COMPLETE 0x4
static int cl_skl_cldma_setup_bdle(struct snd_sof_dev *sdev,
struct snd_dma_buffer *dmab_data,
__le32 **bdlp, int size, int with_ioc)
{
phys_addr_t addr = virt_to_phys(dmab_data->area);
__le32 *bdl = *bdlp;
/*
* This code is simplified by using one fragment of physical memory and assuming
* all the code fits. This could be improved with scatter-gather but the firmware
* size is limited by DSP memory anyways
*/
bdl[0] = cpu_to_le32(lower_32_bits(addr));
bdl[1] = cpu_to_le32(upper_32_bits(addr));
bdl[2] = cpu_to_le32(size);
bdl[3] = (!with_ioc) ? 0 : cpu_to_le32(0x01);
return 1; /* one fragment */
}
static void cl_skl_cldma_stream_run(struct snd_sof_dev *sdev, bool enable)
{
int sd_offset = SOF_HDA_ADSP_LOADER_BASE;
unsigned char val;
int retries;
u32 run = enable ? 0x1 : 0;
snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_CTL,
HDA_CL_SD_CTL_RUN(1), HDA_CL_SD_CTL_RUN(run));
retries = 300;
do {
udelay(3);
/* waiting for hardware to report the stream Run bit set */
val = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_CTL);
val &= HDA_CL_SD_CTL_RUN(1);
if (enable && val)
break;
else if (!enable && !val)
break;
} while (--retries);
if (retries == 0)
dev_err(sdev->dev, "%s: failed to set Run bit=%d enable=%d\n",
__func__, val, enable);
}
static void cl_skl_cldma_stream_clear(struct snd_sof_dev *sdev)
{
int sd_offset = SOF_HDA_ADSP_LOADER_BASE;
/* make sure Run bit is cleared before setting stream register */
cl_skl_cldma_stream_run(sdev, 0);
/* Disable the Interrupt On Completion, FIFO Error Interrupt,
* Descriptor Error Interrupt and set the cldma stream number to 0.
*/
snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_CTL,
HDA_CL_SD_CTL_INT_MASK, HDA_CL_SD_CTL_INT(0));
snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_CTL,
HDA_CL_SD_CTL_STRM(0xf), HDA_CL_SD_CTL_STRM(0));
snd_sof_dsp_write(sdev, HDA_DSP_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_BDLPL, HDA_CL_SD_BDLPLBA(0));
snd_sof_dsp_write(sdev, HDA_DSP_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_BDLPU, 0);
/* Set the Cyclic Buffer Length to 0. */
snd_sof_dsp_write(sdev, HDA_DSP_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_CBL, 0);
/* Set the Last Valid Index. */
snd_sof_dsp_write(sdev, HDA_DSP_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_LVI, 0);
}
static void cl_skl_cldma_setup_spb(struct snd_sof_dev *sdev,
unsigned int size, bool enable)
{
int sd_offset = SOF_DSP_REG_CL_SPBFIFO;
if (enable)
snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
sd_offset + SOF_HDA_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
HDA_CL_SPBFIFO_SPBFCCTL_SPIBE(1));
snd_sof_dsp_write(sdev, HDA_DSP_BAR,
sd_offset + SOF_HDA_ADSP_REG_CL_SPBFIFO_SPIB, size);
}
static void cl_skl_cldma_set_intr(struct snd_sof_dev *sdev, bool enable)
{
u32 val = enable ? HDA_DSP_ADSPIC_CL_DMA : 0;
snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
HDA_DSP_ADSPIC_CL_DMA, val);
}
static void cl_skl_cldma_cleanup_spb(struct snd_sof_dev *sdev)
{
int sd_offset = SOF_DSP_REG_CL_SPBFIFO;
snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
sd_offset + SOF_HDA_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
HDA_CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
HDA_CL_SPBFIFO_SPBFCCTL_SPIBE(0));
snd_sof_dsp_write(sdev, HDA_DSP_BAR,
sd_offset + SOF_HDA_ADSP_REG_CL_SPBFIFO_SPIB, 0);
}
static void cl_skl_cldma_setup_controller(struct snd_sof_dev *sdev,
struct snd_dma_buffer *dmab_bdl,
unsigned int max_size, u32 count)
{
int sd_offset = SOF_HDA_ADSP_LOADER_BASE;
/* Clear the stream first and then set it. */
cl_skl_cldma_stream_clear(sdev);
/* setting the stream register */
snd_sof_dsp_write(sdev, HDA_DSP_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_BDLPL,
HDA_CL_SD_BDLPLBA(dmab_bdl->addr));
snd_sof_dsp_write(sdev, HDA_DSP_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_BDLPU,
HDA_CL_SD_BDLPUBA(dmab_bdl->addr));
/* Set the Cyclic Buffer Length. */
snd_sof_dsp_write(sdev, HDA_DSP_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_CBL, max_size);
/* Set the Last Valid Index. */
snd_sof_dsp_write(sdev, HDA_DSP_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_LVI, count - 1);
/* Set the Interrupt On Completion, FIFO Error Interrupt,
* Descriptor Error Interrupt and the cldma stream number.
*/
snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_CTL,
HDA_CL_SD_CTL_INT_MASK, HDA_CL_SD_CTL_INT(1));
snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_CTL,
HDA_CL_SD_CTL_STRM(0xf),
HDA_CL_SD_CTL_STRM(1));
}
static int cl_stream_prepare_skl(struct snd_sof_dev *sdev,
struct snd_dma_buffer *dmab,
struct snd_dma_buffer *dmab_bdl)
{
unsigned int bufsize = HDA_SKL_CLDMA_MAX_BUFFER_SIZE;
__le32 *bdl;
int frags;
int ret;
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev, bufsize, dmab);
if (ret < 0) {
dev_err(sdev->dev, "%s: failed to alloc fw buffer: %x\n", __func__, ret);
return ret;
}
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev, bufsize, dmab_bdl);
if (ret < 0) {
dev_err(sdev->dev, "%s: failed to alloc blde: %x\n", __func__, ret);
snd_dma_free_pages(dmab);
return ret;
}
bdl = (__le32 *)dmab_bdl->area;
frags = cl_skl_cldma_setup_bdle(sdev, dmab, &bdl, bufsize, 1);
cl_skl_cldma_setup_controller(sdev, dmab_bdl, bufsize, frags);
return ret;
}
static void cl_cleanup_skl(struct snd_sof_dev *sdev,
struct snd_dma_buffer *dmab,
struct snd_dma_buffer *dmab_bdl)
{
cl_skl_cldma_cleanup_spb(sdev);
cl_skl_cldma_stream_clear(sdev);
snd_dma_free_pages(dmab);
snd_dma_free_pages(dmab_bdl);
}
static int cl_dsp_init_skl(struct snd_sof_dev *sdev,
struct snd_dma_buffer *dmab,
struct snd_dma_buffer *dmab_bdl)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
const struct sof_intel_dsp_desc *chip = hda->desc;
unsigned int status;
u32 flags;
int ret;
/* check if the init_core is already enabled, if yes, reset and make it run,
* if not, powerdown and enable it again.
*/
if (hda_dsp_core_is_enabled(sdev, chip->init_core_mask)) {
/* if enabled, reset it, and run the init_core. */
ret = hda_dsp_core_stall_reset(sdev, chip->init_core_mask);
if (ret < 0)
goto err;
ret = hda_dsp_core_run(sdev, chip->init_core_mask);
if (ret < 0) {
dev_err(sdev->dev, "%s: dsp core start failed %d\n", __func__, ret);
goto err;
}
} else {
/* if not enabled, power down it first and then powerup and run
* the init_core.
*/
ret = hda_dsp_core_reset_power_down(sdev, chip->init_core_mask);
if (ret < 0) {
dev_err(sdev->dev, "%s: dsp core0 disable fail: %d\n", __func__, ret);
goto err;
}
ret = hda_dsp_enable_core(sdev, chip->init_core_mask);
if (ret < 0) {
dev_err(sdev->dev, "%s: dsp core0 enable fail: %d\n", __func__, ret);
goto err;
}
}
/* prepare DMA for code loader stream */
ret = cl_stream_prepare_skl(sdev, dmab, dmab_bdl);
if (ret < 0) {
dev_err(sdev->dev, "%s: dma prepare fw loading err: %x\n", __func__, ret);
return ret;
}
/* enable the interrupt */
snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
/* enable IPC DONE interrupt */
snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
HDA_DSP_REG_HIPCCTL_DONE,
HDA_DSP_REG_HIPCCTL_DONE);
/* enable IPC BUSY interrupt */
snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
HDA_DSP_REG_HIPCCTL_BUSY,
HDA_DSP_REG_HIPCCTL_BUSY);
/* polling the ROM init status information. */
ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
chip->rom_status_reg, status,
(FSR_TO_STATE_CODE(status)
== FSR_STATE_INIT_DONE),
HDA_DSP_REG_POLL_INTERVAL_US,
chip->rom_init_timeout *
USEC_PER_MSEC);
if (ret < 0)
goto err;
return ret;
err:
flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX;
snd_sof_dsp_dbg_dump(sdev, "Boot failed\n", flags);
cl_cleanup_skl(sdev, dmab, dmab_bdl);
hda_dsp_core_reset_power_down(sdev, chip->init_core_mask);
return ret;
}
static void cl_skl_cldma_fill_buffer(struct snd_sof_dev *sdev,
struct snd_dma_buffer *dmab,
unsigned int bufsize,
unsigned int copysize,
const void *curr_pos,
bool intr_enable)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
/* copy the image into the buffer with the maximum buffer size. */
unsigned int size = (bufsize == copysize) ? bufsize : copysize;
memcpy(dmab->area, curr_pos, size);
/* Set the wait condition for every load. */
hda->code_loading = 1;
/* Set the interrupt. */
if (intr_enable)
cl_skl_cldma_set_intr(sdev, true);
/* Set the SPB. */
cl_skl_cldma_setup_spb(sdev, size, true);
/* Trigger the code loading stream. */
cl_skl_cldma_stream_run(sdev, true);
}
static int cl_skl_cldma_wait_interruptible(struct snd_sof_dev *sdev,
bool intr_wait)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
const struct sof_intel_dsp_desc *chip = hda->desc;
int sd_offset = SOF_HDA_ADSP_LOADER_BASE;
u8 cl_dma_intr_status;
/*
* Wait for CLDMA interrupt to inform the binary segment transfer is
* complete.
*/
if (!wait_event_timeout(hda->waitq, !hda->code_loading,
msecs_to_jiffies(HDA_SKL_WAIT_TIMEOUT))) {
dev_err(sdev->dev, "cldma copy timeout\n");
dev_err(sdev->dev, "ROM code=%#x: FW status=%#x\n",
snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_ROM_ERROR),
snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg));
return -EIO;
}
/* now check DMA interrupt status */
cl_dma_intr_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_STS);
if (!(cl_dma_intr_status & HDA_CL_DMA_SD_INT_COMPLETE)) {
dev_err(sdev->dev, "cldma copy failed\n");
return -EIO;
}
dev_dbg(sdev->dev, "cldma buffer copy complete\n");
return 0;
}
static int
cl_skl_cldma_copy_to_buf(struct snd_sof_dev *sdev,
struct snd_dma_buffer *dmab,
const void *bin,
u32 total_size, u32 bufsize)
{
unsigned int bytes_left = total_size;
const void *curr_pos = bin;
int ret;
if (total_size <= 0)
return -EINVAL;
while (bytes_left > 0) {
if (bytes_left > bufsize) {
dev_dbg(sdev->dev, "cldma copy %#x bytes\n", bufsize);
cl_skl_cldma_fill_buffer(sdev, dmab, bufsize, bufsize, curr_pos, true);
ret = cl_skl_cldma_wait_interruptible(sdev, false);
if (ret < 0) {
dev_err(sdev->dev, "%s: fw failed to load. %#x bytes remaining\n",
__func__, bytes_left);
return ret;
}
bytes_left -= bufsize;
curr_pos += bufsize;
} else {
dev_dbg(sdev->dev, "cldma copy %#x bytes\n", bytes_left);
cl_skl_cldma_set_intr(sdev, false);
cl_skl_cldma_fill_buffer(sdev, dmab, bufsize, bytes_left, curr_pos, false);
return 0;
}
}
return bytes_left;
}
static int cl_copy_fw_skl(struct snd_sof_dev *sdev,
struct snd_dma_buffer *dmab)
{
const struct firmware *fw = sdev->basefw.fw;
struct firmware stripped_firmware;
unsigned int bufsize = HDA_SKL_CLDMA_MAX_BUFFER_SIZE;
int ret;
stripped_firmware.data = fw->data + sdev->basefw.payload_offset;
stripped_firmware.size = fw->size - sdev->basefw.payload_offset;
dev_dbg(sdev->dev, "firmware size: %#zx buffer size %#x\n", fw->size, bufsize);
ret = cl_skl_cldma_copy_to_buf(sdev, dmab, stripped_firmware.data,
stripped_firmware.size, bufsize);
if (ret < 0)
dev_err(sdev->dev, "%s: fw copy failed %d\n", __func__, ret);
return ret;
}
int hda_dsp_cl_boot_firmware_skl(struct snd_sof_dev *sdev)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
const struct sof_intel_dsp_desc *chip = hda->desc;
struct snd_dma_buffer dmab_bdl;
struct snd_dma_buffer dmab;
unsigned int reg;
u32 flags;
int ret;
ret = cl_dsp_init_skl(sdev, &dmab, &dmab_bdl);
/* retry enabling core and ROM load. seemed to help */
if (ret < 0) {
ret = cl_dsp_init_skl(sdev, &dmab, &dmab_bdl);
if (ret < 0) {
dev_err(sdev->dev, "Error code=%#x: FW status=%#x\n",
snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_ROM_ERROR),
snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg));
dev_err(sdev->dev, "Core En/ROM load fail:%d\n", ret);
return ret;
}
}
dev_dbg(sdev->dev, "ROM init successful\n");
/* at this point DSP ROM has been initialized and should be ready for
* code loading and firmware boot
*/
ret = cl_copy_fw_skl(sdev, &dmab);
if (ret < 0) {
dev_err(sdev->dev, "%s: load firmware failed : %d\n", __func__, ret);
goto err;
}
ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
chip->rom_status_reg, reg,
(FSR_TO_STATE_CODE(reg)
== FSR_STATE_ROM_BASEFW_ENTERED),
HDA_DSP_REG_POLL_INTERVAL_US,
HDA_DSP_BASEFW_TIMEOUT_US);
dev_dbg(sdev->dev, "Firmware download successful, booting...\n");
cl_skl_cldma_stream_run(sdev, false);
cl_cleanup_skl(sdev, &dmab, &dmab_bdl);
if (!ret)
return chip->init_core_mask;
return ret;
err:
flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX;
snd_sof_dsp_dbg_dump(sdev, "Boot failed\n", flags);
/* power down DSP */
hda_dsp_core_reset_power_down(sdev, chip->init_core_mask);
cl_skl_cldma_stream_run(sdev, false);
cl_cleanup_skl(sdev, &dmab, &dmab_bdl);
dev_err(sdev->dev, "%s: load fw failed err: %d\n", __func__, ret);
return ret;
}
| linux-master | sound/soc/sof/intel/hda-loader-skl.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2018-2022 Intel Corporation. All rights reserved.
//
#include <linux/module.h>
#include <linux/pci.h>
#include <sound/soc-acpi.h>
#include <sound/soc-acpi-intel-match.h>
#include <sound/sof.h>
#include "../ops.h"
#include "../sof-pci-dev.h"
/* platform specific devices */
#include "hda.h"
static struct sof_dev_desc skl_desc = {
.machines = snd_soc_acpi_intel_skl_machines,
.resindex_lpe_base = 0,
.resindex_pcicfg_base = -1,
.resindex_imr_base = -1,
.chip_info = &skl_chip_info,
.irqindex_host_ipc = -1,
.ipc_supported_mask = BIT(SOF_INTEL_IPC4),
.ipc_default = SOF_INTEL_IPC4,
.dspless_mode_supported = true, /* Only supported for HDaudio */
.default_fw_path = {
[SOF_INTEL_IPC4] = "intel/avs/skl",
},
.default_tplg_path = {
[SOF_INTEL_IPC4] = "intel/avs-tplg",
},
.default_fw_filename = {
[SOF_INTEL_IPC4] = "dsp_basefw.bin",
},
.nocodec_tplg_filename = "sof-skl-nocodec.tplg",
.ops = &sof_skl_ops,
.ops_init = sof_skl_ops_init,
.ops_free = hda_ops_free,
};
static struct sof_dev_desc kbl_desc = {
.machines = snd_soc_acpi_intel_kbl_machines,
.resindex_lpe_base = 0,
.resindex_pcicfg_base = -1,
.resindex_imr_base = -1,
.chip_info = &skl_chip_info,
.irqindex_host_ipc = -1,
.ipc_supported_mask = BIT(SOF_INTEL_IPC4),
.ipc_default = SOF_INTEL_IPC4,
.dspless_mode_supported = true, /* Only supported for HDaudio */
.default_fw_path = {
[SOF_INTEL_IPC4] = "intel/avs/kbl",
},
.default_tplg_path = {
[SOF_INTEL_IPC4] = "intel/avs-tplg",
},
.default_fw_filename = {
[SOF_INTEL_IPC4] = "dsp_basefw.bin",
},
.nocodec_tplg_filename = "sof-kbl-nocodec.tplg",
.ops = &sof_skl_ops,
.ops_init = sof_skl_ops_init,
.ops_free = hda_ops_free,
};
/* PCI IDs */
static const struct pci_device_id sof_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, HDA_SKL_LP, &skl_desc) },
{ PCI_DEVICE_DATA(INTEL, HDA_KBL_LP, &kbl_desc) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, sof_pci_ids);
/* pci_driver definition */
static struct pci_driver snd_sof_pci_intel_skl_driver = {
.name = "sof-audio-pci-intel-skl",
.id_table = sof_pci_ids,
.probe = hda_pci_intel_probe,
.remove = sof_pci_remove,
.shutdown = sof_pci_shutdown,
.driver = {
.pm = &sof_pci_pm,
},
};
module_pci_driver(snd_sof_pci_intel_skl_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
| linux-master | sound/soc/sof/intel/pci-skl.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2018 Intel Corporation. All rights reserved.
//
// Authors: Liam Girdwood <[email protected]>
// Ranjani Sridharan <[email protected]>
// Rander Wang <[email protected]>
// Keyon Jie <[email protected]>
//
/*
* Hardware interface for generic Intel audio DSP HDA IP
*/
#include <sound/sof/ipc4/header.h>
#include <trace/events/sof_intel.h>
#include "../ops.h"
#include "hda.h"
static void hda_dsp_ipc_host_done(struct snd_sof_dev *sdev)
{
/*
* tell DSP cmd is done - clear busy
* interrupt and send reply msg to dsp
*/
snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
HDA_DSP_REG_HIPCT,
HDA_DSP_REG_HIPCT_BUSY,
HDA_DSP_REG_HIPCT_BUSY);
/* unmask BUSY interrupt */
snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
HDA_DSP_REG_HIPCCTL,
HDA_DSP_REG_HIPCCTL_BUSY,
HDA_DSP_REG_HIPCCTL_BUSY);
}
static void hda_dsp_ipc_dsp_done(struct snd_sof_dev *sdev)
{
/*
* set DONE bit - tell DSP we have received the reply msg
* from DSP, and processed it, don't send more reply to host
*/
snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR,
HDA_DSP_REG_HIPCIE,
HDA_DSP_REG_HIPCIE_DONE,
HDA_DSP_REG_HIPCIE_DONE);
/* unmask Done interrupt */
snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
HDA_DSP_REG_HIPCCTL,
HDA_DSP_REG_HIPCCTL_DONE,
HDA_DSP_REG_HIPCCTL_DONE);
}
int hda_dsp_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
{
/* send IPC message to DSP */
sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
msg->msg_size);
snd_sof_dsp_write(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCI,
HDA_DSP_REG_HIPCI_BUSY);
return 0;
}
static inline bool hda_dsp_ipc4_pm_msg(u32 primary)
{
/* pm setting is only supported by module msg */
if (SOF_IPC4_MSG_IS_MODULE_MSG(primary) != SOF_IPC4_MODULE_MSG)
return false;
if (SOF_IPC4_MSG_TYPE_GET(primary) == SOF_IPC4_MOD_SET_DX ||
SOF_IPC4_MSG_TYPE_GET(primary) == SOF_IPC4_MOD_SET_D0IX)
return true;
return false;
}
void hda_dsp_ipc4_schedule_d0i3_work(struct sof_intel_hda_dev *hdev,
struct snd_sof_ipc_msg *msg)
{
struct sof_ipc4_msg *msg_data = msg->msg_data;
/* Schedule a delayed work for d0i3 entry after sending non-pm ipc msg */
if (hda_dsp_ipc4_pm_msg(msg_data->primary))
return;
mod_delayed_work(system_wq, &hdev->d0i3_work,
msecs_to_jiffies(SOF_HDA_D0I3_WORK_DELAY_MS));
}
int hda_dsp_ipc4_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
{
struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
struct sof_ipc4_msg *msg_data = msg->msg_data;
if (hda_ipc4_tx_is_busy(sdev)) {
hdev->delayed_ipc_tx_msg = msg;
return 0;
}
hdev->delayed_ipc_tx_msg = NULL;
/* send the message via mailbox */
if (msg_data->data_size)
sof_mailbox_write(sdev, sdev->host_box.offset, msg_data->data_ptr,
msg_data->data_size);
snd_sof_dsp_write(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCIE, msg_data->extension);
snd_sof_dsp_write(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCI,
msg_data->primary | HDA_DSP_REG_HIPCI_BUSY);
hda_dsp_ipc4_schedule_d0i3_work(hdev, msg);
return 0;
}
void hda_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
{
struct snd_sof_ipc_msg *msg = sdev->msg;
struct sof_ipc_reply reply;
struct sof_ipc_cmd_hdr *hdr;
/*
* Sometimes, there is unexpected reply ipc arriving. The reply
* ipc belongs to none of the ipcs sent from driver.
* In this case, the driver must ignore the ipc.
*/
if (!msg) {
dev_warn(sdev->dev, "unexpected ipc interrupt raised!\n");
return;
}
hdr = msg->msg_data;
if (hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE) ||
hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE)) {
/*
* memory windows are powered off before sending IPC reply,
* so we can't read the mailbox for CTX_SAVE and PM_GATE
* replies.
*/
reply.error = 0;
reply.hdr.cmd = SOF_IPC_GLB_REPLY;
reply.hdr.size = sizeof(reply);
memcpy(msg->reply_data, &reply, sizeof(reply));
msg->reply_error = 0;
} else {
snd_sof_ipc_get_reply(sdev);
}
}
irqreturn_t hda_dsp_ipc4_irq_thread(int irq, void *context)
{
struct sof_ipc4_msg notification_data = {{ 0 }};
struct snd_sof_dev *sdev = context;
bool ack_received = false;
bool ipc_irq = false;
u32 hipcie, hipct;
hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCIE);
hipct = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCT);
if (hipcie & HDA_DSP_REG_HIPCIE_DONE) {
/* DSP received the message */
snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCCTL,
HDA_DSP_REG_HIPCCTL_DONE, 0);
hda_dsp_ipc_dsp_done(sdev);
ipc_irq = true;
ack_received = true;
}
if (hipct & HDA_DSP_REG_HIPCT_BUSY) {
/* Message from DSP (reply or notification) */
u32 hipcte = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
HDA_DSP_REG_HIPCTE);
u32 primary = hipct & HDA_DSP_REG_HIPCT_MSG_MASK;
u32 extension = hipcte & HDA_DSP_REG_HIPCTE_MSG_MASK;
/* mask BUSY interrupt */
snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCCTL,
HDA_DSP_REG_HIPCCTL_BUSY, 0);
if (primary & SOF_IPC4_MSG_DIR_MASK) {
/* Reply received */
if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
struct sof_ipc4_msg *data = sdev->ipc->msg.reply_data;
data->primary = primary;
data->extension = extension;
spin_lock_irq(&sdev->ipc_lock);
snd_sof_ipc_get_reply(sdev);
hda_dsp_ipc_host_done(sdev);
snd_sof_ipc_reply(sdev, data->primary);
spin_unlock_irq(&sdev->ipc_lock);
} else {
dev_dbg_ratelimited(sdev->dev,
"IPC reply before FW_READY: %#x|%#x\n",
primary, extension);
}
} else {
/* Notification received */
notification_data.primary = primary;
notification_data.extension = extension;
sdev->ipc->msg.rx_data = ¬ification_data;
snd_sof_ipc_msgs_rx(sdev);
sdev->ipc->msg.rx_data = NULL;
/* Let DSP know that we have finished processing the message */
hda_dsp_ipc_host_done(sdev);
}
ipc_irq = true;
}
if (!ipc_irq)
/* This interrupt is not shared so no need to return IRQ_NONE. */
dev_dbg_ratelimited(sdev->dev, "nothing to do in IPC IRQ thread\n");
if (ack_received) {
struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
if (hdev->delayed_ipc_tx_msg)
hda_dsp_ipc4_send_msg(sdev, hdev->delayed_ipc_tx_msg);
}
return IRQ_HANDLED;
}
/* IPC handler thread */
irqreturn_t hda_dsp_ipc_irq_thread(int irq, void *context)
{
struct snd_sof_dev *sdev = context;
u32 hipci;
u32 hipcie;
u32 hipct;
u32 hipcte;
u32 msg;
u32 msg_ext;
bool ipc_irq = false;
/* read IPC status */
hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
HDA_DSP_REG_HIPCIE);
hipct = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCT);
hipci = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCI);
hipcte = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCTE);
/* is this a reply message from the DSP */
if (hipcie & HDA_DSP_REG_HIPCIE_DONE) {
msg = hipci & HDA_DSP_REG_HIPCI_MSG_MASK;
msg_ext = hipcie & HDA_DSP_REG_HIPCIE_MSG_MASK;
trace_sof_intel_ipc_firmware_response(sdev, msg, msg_ext);
/* mask Done interrupt */
snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
HDA_DSP_REG_HIPCCTL,
HDA_DSP_REG_HIPCCTL_DONE, 0);
/*
* Make sure the interrupt thread cannot be preempted between
* waking up the sender and re-enabling the interrupt. Also
* protect against a theoretical race with sof_ipc_tx_message():
* if the DSP is fast enough to receive an IPC message, reply to
* it, and the host interrupt processing calls this function on
* a different core from the one, where the sending is taking
* place, the message might not yet be marked as expecting a
* reply.
*/
if (likely(sdev->fw_state == SOF_FW_BOOT_COMPLETE)) {
spin_lock_irq(&sdev->ipc_lock);
/* handle immediate reply from DSP core */
hda_dsp_ipc_get_reply(sdev);
snd_sof_ipc_reply(sdev, msg);
/* set the done bit */
hda_dsp_ipc_dsp_done(sdev);
spin_unlock_irq(&sdev->ipc_lock);
} else {
dev_dbg_ratelimited(sdev->dev, "IPC reply before FW_READY: %#x\n",
msg);
}
ipc_irq = true;
}
/* is this a new message from DSP */
if (hipct & HDA_DSP_REG_HIPCT_BUSY) {
msg = hipct & HDA_DSP_REG_HIPCT_MSG_MASK;
msg_ext = hipcte & HDA_DSP_REG_HIPCTE_MSG_MASK;
trace_sof_intel_ipc_firmware_initiated(sdev, msg, msg_ext);
/* mask BUSY interrupt */
snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR,
HDA_DSP_REG_HIPCCTL,
HDA_DSP_REG_HIPCCTL_BUSY, 0);
/* handle messages from DSP */
if ((hipct & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
bool non_recoverable = true;
/*
* This is a PANIC message!
*
* If it is arriving during firmware boot and it is not
* the last boot attempt then change the non_recoverable
* to false as the DSP might be able to boot in the next
* iteration(s)
*/
if (sdev->fw_state == SOF_FW_BOOT_IN_PROGRESS &&
hda->boot_iteration < HDA_FW_BOOT_ATTEMPTS)
non_recoverable = false;
snd_sof_dsp_panic(sdev, HDA_DSP_PANIC_OFFSET(msg_ext),
non_recoverable);
} else {
/* normal message - process normally */
snd_sof_ipc_msgs_rx(sdev);
}
hda_dsp_ipc_host_done(sdev);
ipc_irq = true;
}
if (!ipc_irq) {
/*
* This interrupt is not shared so no need to return IRQ_NONE.
*/
dev_dbg_ratelimited(sdev->dev,
"nothing to do in IPC IRQ thread\n");
}
return IRQ_HANDLED;
}
/* Check if an IPC IRQ occurred */
bool hda_dsp_check_ipc_irq(struct snd_sof_dev *sdev)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
bool ret = false;
u32 irq_status;
if (sdev->dspless_mode_selected)
return false;
/* store status */
irq_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIS);
trace_sof_intel_hda_irq_ipc_check(sdev, irq_status);
/* invalid message ? */
if (irq_status == 0xffffffff)
goto out;
/* IPC message ? */
if (irq_status & HDA_DSP_ADSPIS_IPC)
ret = true;
/* CLDMA message ? */
if (irq_status & HDA_DSP_ADSPIS_CL_DMA) {
hda->code_loading = 0;
wake_up(&hda->waitq);
ret = false;
}
out:
return ret;
}
int hda_dsp_ipc_get_mailbox_offset(struct snd_sof_dev *sdev)
{
return HDA_DSP_MBOX_UPLINK_OFFSET;
}
int hda_dsp_ipc_get_window_offset(struct snd_sof_dev *sdev, u32 id)
{
return SRAM_WINDOW_OFFSET(id);
}
int hda_ipc_msg_data(struct snd_sof_dev *sdev,
struct snd_sof_pcm_stream *sps,
void *p, size_t sz)
{
if (!sps || !sdev->stream_box.size) {
sof_mailbox_read(sdev, sdev->dsp_box.offset, p, sz);
} else {
struct snd_pcm_substream *substream = sps->substream;
struct hdac_stream *hstream = substream->runtime->private_data;
struct sof_intel_hda_stream *hda_stream;
hda_stream = container_of(hstream,
struct sof_intel_hda_stream,
hext_stream.hstream);
/* The stream might already be closed */
if (!hstream)
return -ESTRPIPE;
sof_mailbox_read(sdev, hda_stream->sof_intel_stream.posn_offset, p, sz);
}
return 0;
}
int hda_set_stream_data_offset(struct snd_sof_dev *sdev,
struct snd_sof_pcm_stream *sps,
size_t posn_offset)
{
struct snd_pcm_substream *substream = sps->substream;
struct hdac_stream *hstream = substream->runtime->private_data;
struct sof_intel_hda_stream *hda_stream;
hda_stream = container_of(hstream, struct sof_intel_hda_stream,
hext_stream.hstream);
/* check for unaligned offset or overflow */
if (posn_offset > sdev->stream_box.size ||
posn_offset % sizeof(struct sof_ipc_stream_posn) != 0)
return -EINVAL;
hda_stream->sof_intel_stream.posn_offset = sdev->stream_box.offset + posn_offset;
dev_dbg(sdev->dev, "pcm: stream dir %d, posn mailbox offset is %zu",
substream->stream, hda_stream->sof_intel_stream.posn_offset);
return 0;
}
| linux-master | sound/soc/sof/intel/hda-ipc.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2018-2022 Intel Corporation. All rights reserved.
//
// Author: Ranjani Sridharan <[email protected]>
//
#include <linux/module.h>
#include <linux/pci.h>
#include <sound/soc-acpi.h>
#include <sound/soc-acpi-intel-match.h>
#include <sound/sof.h>
#include "../ops.h"
#include "../sof-pci-dev.h"
/* platform specific devices */
#include "hda.h"
#include "mtl.h"
static const struct sof_dev_desc mtl_desc = {
.use_acpi_target_states = true,
.machines = snd_soc_acpi_intel_mtl_machines,
.alt_machines = snd_soc_acpi_intel_mtl_sdw_machines,
.resindex_lpe_base = 0,
.resindex_pcicfg_base = -1,
.resindex_imr_base = -1,
.irqindex_host_ipc = -1,
.chip_info = &mtl_chip_info,
.ipc_supported_mask = BIT(SOF_INTEL_IPC4),
.ipc_default = SOF_INTEL_IPC4,
.dspless_mode_supported = true, /* Only supported for HDaudio */
.default_fw_path = {
[SOF_INTEL_IPC4] = "intel/sof-ipc4/mtl",
},
.default_lib_path = {
[SOF_INTEL_IPC4] = "intel/sof-ipc4-lib/mtl",
},
.default_tplg_path = {
[SOF_INTEL_IPC4] = "intel/sof-ace-tplg",
},
.default_fw_filename = {
[SOF_INTEL_IPC4] = "sof-mtl.ri",
},
.nocodec_tplg_filename = "sof-mtl-nocodec.tplg",
.ops = &sof_mtl_ops,
.ops_init = sof_mtl_ops_init,
.ops_free = hda_ops_free,
};
/* PCI IDs */
static const struct pci_device_id sof_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, HDA_MTL, &mtl_desc) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, sof_pci_ids);
/* pci_driver definition */
static struct pci_driver snd_sof_pci_intel_mtl_driver = {
.name = "sof-audio-pci-intel-mtl",
.id_table = sof_pci_ids,
.probe = hda_pci_intel_probe,
.remove = sof_pci_remove,
.shutdown = sof_pci_shutdown,
.driver = {
.pm = &sof_pci_pm,
},
};
module_pci_driver(snd_sof_pci_intel_mtl_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
| linux-master | sound/soc/sof/intel/pci-mtl.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// Copyright(c) 2020 Intel Corporation. All rights reserved.
//
// Authors: Ranjani Sridharan <[email protected]>
//
/*
* Hardware interface for audio DSP on Tigerlake.
*/
#include <sound/sof/ext_manifest4.h>
#include "../ipc4-priv.h"
#include "../ops.h"
#include "hda.h"
#include "hda-ipc.h"
#include "../sof-audio.h"
static const struct snd_sof_debugfs_map tgl_dsp_debugfs[] = {
{"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
{"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
{"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
};
static int tgl_dsp_core_get(struct snd_sof_dev *sdev, int core)
{
const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
/* power up primary core if not already powered up and return */
if (core == SOF_DSP_PRIMARY_CORE)
return hda_dsp_enable_core(sdev, BIT(core));
if (pm_ops->set_core_state)
return pm_ops->set_core_state(sdev, core, true);
return 0;
}
static int tgl_dsp_core_put(struct snd_sof_dev *sdev, int core)
{
const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
int ret;
if (pm_ops->set_core_state) {
ret = pm_ops->set_core_state(sdev, core, false);
if (ret < 0)
return ret;
}
/* power down primary core and return */
if (core == SOF_DSP_PRIMARY_CORE)
return hda_dsp_core_reset_power_down(sdev, BIT(core));
return 0;
}
/* Tigerlake ops */
struct snd_sof_dsp_ops sof_tgl_ops;
EXPORT_SYMBOL_NS(sof_tgl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
int sof_tgl_ops_init(struct snd_sof_dev *sdev)
{
/* common defaults */
memcpy(&sof_tgl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
/* probe/remove/shutdown */
sof_tgl_ops.shutdown = hda_dsp_shutdown_dma_flush;
if (sdev->pdata->ipc_type == SOF_IPC) {
/* doorbell */
sof_tgl_ops.irq_thread = cnl_ipc_irq_thread;
/* ipc */
sof_tgl_ops.send_msg = cnl_ipc_send_msg;
/* debug */
sof_tgl_ops.ipc_dump = cnl_ipc_dump;
sof_tgl_ops.set_power_state = hda_dsp_set_power_state_ipc3;
}
if (sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
struct sof_ipc4_fw_data *ipc4_data;
sdev->private = devm_kzalloc(sdev->dev, sizeof(*ipc4_data), GFP_KERNEL);
if (!sdev->private)
return -ENOMEM;
ipc4_data = sdev->private;
ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET;
ipc4_data->mtrace_type = SOF_IPC4_MTRACE_INTEL_CAVS_2;
/* External library loading support */
ipc4_data->load_library = hda_dsp_ipc4_load_library;
/* doorbell */
sof_tgl_ops.irq_thread = cnl_ipc4_irq_thread;
/* ipc */
sof_tgl_ops.send_msg = cnl_ipc4_send_msg;
/* debug */
sof_tgl_ops.ipc_dump = cnl_ipc4_dump;
sof_tgl_ops.set_power_state = hda_dsp_set_power_state_ipc4;
}
/* set DAI driver ops */
hda_set_dai_drv_ops(sdev, &sof_tgl_ops);
/* debug */
sof_tgl_ops.debug_map = tgl_dsp_debugfs;
sof_tgl_ops.debug_map_count = ARRAY_SIZE(tgl_dsp_debugfs);
/* pre/post fw run */
sof_tgl_ops.post_fw_run = hda_dsp_post_fw_run;
/* firmware run */
sof_tgl_ops.run = hda_dsp_cl_boot_firmware_iccmax;
/* dsp core get/put */
sof_tgl_ops.core_get = tgl_dsp_core_get;
sof_tgl_ops.core_put = tgl_dsp_core_put;
return 0;
};
EXPORT_SYMBOL_NS(sof_tgl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
const struct sof_intel_dsp_desc tgl_chip_info = {
/* Tigerlake , Alderlake */
.cores_num = 4,
.init_core_mask = 1,
.host_managed_cores_mask = BIT(0),
.ipc_req = CNL_DSP_REG_HIPCIDR,
.ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
.ipc_ack = CNL_DSP_REG_HIPCIDA,
.ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
.ipc_ctl = CNL_DSP_REG_HIPCCTL,
.rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
.rom_init_timeout = 300,
.ssp_count = TGL_SSP_COUNT,
.ssp_base_offset = CNL_SSP_BASE_OFFSET,
.sdw_shim_base = SDW_SHIM_BASE,
.sdw_alh_base = SDW_ALH_BASE,
.d0i3_offset = SOF_HDA_VS_D0I3C,
.read_sdw_lcount = hda_sdw_check_lcount_common,
.enable_sdw_irq = hda_common_enable_sdw_irq,
.check_sdw_irq = hda_common_check_sdw_irq,
.check_sdw_wakeen_irq = hda_sdw_check_wakeen_irq_common,
.check_ipc_irq = hda_dsp_check_ipc_irq,
.cl_init = cl_dsp_init,
.power_down_dsp = hda_power_down_dsp,
.disable_interrupts = hda_dsp_disable_interrupts,
.hw_ip_version = SOF_INTEL_CAVS_2_5,
};
EXPORT_SYMBOL_NS(tgl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
const struct sof_intel_dsp_desc tglh_chip_info = {
/* Tigerlake-H */
.cores_num = 2,
.init_core_mask = 1,
.host_managed_cores_mask = BIT(0),
.ipc_req = CNL_DSP_REG_HIPCIDR,
.ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
.ipc_ack = CNL_DSP_REG_HIPCIDA,
.ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
.ipc_ctl = CNL_DSP_REG_HIPCCTL,
.rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
.rom_init_timeout = 300,
.ssp_count = TGL_SSP_COUNT,
.ssp_base_offset = CNL_SSP_BASE_OFFSET,
.sdw_shim_base = SDW_SHIM_BASE,
.sdw_alh_base = SDW_ALH_BASE,
.d0i3_offset = SOF_HDA_VS_D0I3C,
.read_sdw_lcount = hda_sdw_check_lcount_common,
.enable_sdw_irq = hda_common_enable_sdw_irq,
.check_sdw_irq = hda_common_check_sdw_irq,
.check_sdw_wakeen_irq = hda_sdw_check_wakeen_irq_common,
.check_ipc_irq = hda_dsp_check_ipc_irq,
.cl_init = cl_dsp_init,
.power_down_dsp = hda_power_down_dsp,
.disable_interrupts = hda_dsp_disable_interrupts,
.hw_ip_version = SOF_INTEL_CAVS_2_5,
};
EXPORT_SYMBOL_NS(tglh_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
const struct sof_intel_dsp_desc ehl_chip_info = {
/* Elkhartlake */
.cores_num = 4,
.init_core_mask = 1,
.host_managed_cores_mask = BIT(0),
.ipc_req = CNL_DSP_REG_HIPCIDR,
.ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
.ipc_ack = CNL_DSP_REG_HIPCIDA,
.ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
.ipc_ctl = CNL_DSP_REG_HIPCCTL,
.rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
.rom_init_timeout = 300,
.ssp_count = TGL_SSP_COUNT,
.ssp_base_offset = CNL_SSP_BASE_OFFSET,
.sdw_shim_base = SDW_SHIM_BASE,
.sdw_alh_base = SDW_ALH_BASE,
.d0i3_offset = SOF_HDA_VS_D0I3C,
.read_sdw_lcount = hda_sdw_check_lcount_common,
.enable_sdw_irq = hda_common_enable_sdw_irq,
.check_sdw_irq = hda_common_check_sdw_irq,
.check_sdw_wakeen_irq = hda_sdw_check_wakeen_irq_common,
.check_ipc_irq = hda_dsp_check_ipc_irq,
.cl_init = cl_dsp_init,
.power_down_dsp = hda_power_down_dsp,
.disable_interrupts = hda_dsp_disable_interrupts,
.hw_ip_version = SOF_INTEL_CAVS_2_5,
};
EXPORT_SYMBOL_NS(ehl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
const struct sof_intel_dsp_desc adls_chip_info = {
/* Alderlake-S */
.cores_num = 2,
.init_core_mask = BIT(0),
.host_managed_cores_mask = BIT(0),
.ipc_req = CNL_DSP_REG_HIPCIDR,
.ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
.ipc_ack = CNL_DSP_REG_HIPCIDA,
.ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
.ipc_ctl = CNL_DSP_REG_HIPCCTL,
.rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
.rom_init_timeout = 300,
.ssp_count = TGL_SSP_COUNT,
.ssp_base_offset = CNL_SSP_BASE_OFFSET,
.sdw_shim_base = SDW_SHIM_BASE,
.sdw_alh_base = SDW_ALH_BASE,
.d0i3_offset = SOF_HDA_VS_D0I3C,
.read_sdw_lcount = hda_sdw_check_lcount_common,
.enable_sdw_irq = hda_common_enable_sdw_irq,
.check_sdw_irq = hda_common_check_sdw_irq,
.check_sdw_wakeen_irq = hda_sdw_check_wakeen_irq_common,
.check_ipc_irq = hda_dsp_check_ipc_irq,
.cl_init = cl_dsp_init,
.power_down_dsp = hda_power_down_dsp,
.disable_interrupts = hda_dsp_disable_interrupts,
.hw_ip_version = SOF_INTEL_CAVS_2_5,
};
EXPORT_SYMBOL_NS(adls_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
| linux-master | sound/soc/sof/intel/tgl.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2018 Intel Corporation. All rights reserved.
//
// Authors: Liam Girdwood <[email protected]>
// Ranjani Sridharan <[email protected]>
// Rander Wang <[email protected]>
// Keyon Jie <[email protected]>
//
/*
* Hardware interface for generic Intel audio DSP HDA IP
*/
#include <linux/module.h>
#include <sound/hdaudio_ext.h>
#include <sound/hda_register.h>
#include <sound/hda_component.h>
#include <sound/hda-mlink.h>
#include "../ops.h"
#include "hda.h"
/*
* HDA Operations.
*/
int hda_dsp_ctrl_link_reset(struct snd_sof_dev *sdev, bool reset)
{
unsigned long timeout;
u32 gctl = 0;
u32 val;
/* 0 to enter reset and 1 to exit reset */
val = reset ? 0 : SOF_HDA_GCTL_RESET;
/* enter/exit HDA controller reset */
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_GCTL,
SOF_HDA_GCTL_RESET, val);
/* wait to enter/exit reset */
timeout = jiffies + msecs_to_jiffies(HDA_DSP_CTRL_RESET_TIMEOUT);
while (time_before(jiffies, timeout)) {
gctl = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_GCTL);
if ((gctl & SOF_HDA_GCTL_RESET) == val)
return 0;
usleep_range(500, 1000);
}
/* enter/exit reset failed */
dev_err(sdev->dev, "error: failed to %s HDA controller gctl 0x%x\n",
reset ? "reset" : "ready", gctl);
return -EIO;
}
int hda_dsp_ctrl_get_caps(struct snd_sof_dev *sdev)
{
struct hdac_bus *bus = sof_to_bus(sdev);
u32 cap, offset, feature;
int count = 0;
int ret;
/*
* On some devices, one reset cycle is necessary before reading
* capabilities
*/
ret = hda_dsp_ctrl_link_reset(sdev, true);
if (ret < 0)
return ret;
ret = hda_dsp_ctrl_link_reset(sdev, false);
if (ret < 0)
return ret;
offset = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_LLCH);
do {
dev_dbg(sdev->dev, "checking for capabilities at offset 0x%x\n",
offset & SOF_HDA_CAP_NEXT_MASK);
cap = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, offset);
if (cap == -1) {
dev_dbg(bus->dev, "Invalid capability reg read\n");
break;
}
feature = (cap & SOF_HDA_CAP_ID_MASK) >> SOF_HDA_CAP_ID_OFF;
switch (feature) {
case SOF_HDA_PP_CAP_ID:
dev_dbg(sdev->dev, "found DSP capability at 0x%x\n",
offset);
bus->ppcap = bus->remap_addr + offset;
sdev->bar[HDA_DSP_PP_BAR] = bus->ppcap;
break;
case SOF_HDA_SPIB_CAP_ID:
dev_dbg(sdev->dev, "found SPIB capability at 0x%x\n",
offset);
bus->spbcap = bus->remap_addr + offset;
sdev->bar[HDA_DSP_SPIB_BAR] = bus->spbcap;
break;
case SOF_HDA_DRSM_CAP_ID:
dev_dbg(sdev->dev, "found DRSM capability at 0x%x\n",
offset);
bus->drsmcap = bus->remap_addr + offset;
sdev->bar[HDA_DSP_DRSM_BAR] = bus->drsmcap;
break;
case SOF_HDA_GTS_CAP_ID:
dev_dbg(sdev->dev, "found GTS capability at 0x%x\n",
offset);
bus->gtscap = bus->remap_addr + offset;
break;
case SOF_HDA_ML_CAP_ID:
dev_dbg(sdev->dev, "found ML capability at 0x%x\n",
offset);
bus->mlcap = bus->remap_addr + offset;
break;
default:
dev_dbg(sdev->dev, "found capability %d at 0x%x\n",
feature, offset);
break;
}
offset = cap & SOF_HDA_CAP_NEXT_MASK;
} while (count++ <= SOF_HDA_MAX_CAPS && offset);
return 0;
}
void hda_dsp_ctrl_ppcap_enable(struct snd_sof_dev *sdev, bool enable)
{
u32 val = enable ? SOF_HDA_PPCTL_GPROCEN : 0;
snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
SOF_HDA_PPCTL_GPROCEN, val);
}
void hda_dsp_ctrl_ppcap_int_enable(struct snd_sof_dev *sdev, bool enable)
{
u32 val = enable ? SOF_HDA_PPCTL_PIE : 0;
snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
SOF_HDA_PPCTL_PIE, val);
}
void hda_dsp_ctrl_misc_clock_gating(struct snd_sof_dev *sdev, bool enable)
{
u32 val = enable ? PCI_CGCTL_MISCBDCGE_MASK : 0;
snd_sof_pci_update_bits(sdev, PCI_CGCTL, PCI_CGCTL_MISCBDCGE_MASK, val);
}
/*
* enable/disable audio dsp clock gating and power gating bits.
* This allows the HW to opportunistically power and clock gate
* the audio dsp when it is idle
*/
int hda_dsp_ctrl_clock_power_gating(struct snd_sof_dev *sdev, bool enable)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
u32 val;
/* enable/disable audio dsp clock gating */
val = enable ? PCI_CGCTL_ADSPDCGE : 0;
snd_sof_pci_update_bits(sdev, PCI_CGCTL, PCI_CGCTL_ADSPDCGE, val);
/* disable the DMI link when requested. But enable only if it wasn't disabled previously */
val = enable ? HDA_VS_INTEL_EM2_L1SEN : 0;
if (!enable || !hda->l1_disabled)
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2,
HDA_VS_INTEL_EM2_L1SEN, val);
/* enable/disable audio dsp power gating */
val = enable ? 0 : PCI_PGCTL_ADSPPGD;
snd_sof_pci_update_bits(sdev, PCI_PGCTL, PCI_PGCTL_ADSPPGD, val);
return 0;
}
int hda_dsp_ctrl_init_chip(struct snd_sof_dev *sdev)
{
struct hdac_bus *bus = sof_to_bus(sdev);
struct hdac_stream *stream;
int sd_offset, ret = 0;
if (bus->chip_init)
return 0;
hda_codec_set_codec_wakeup(sdev, true);
hda_dsp_ctrl_misc_clock_gating(sdev, false);
/* reset HDA controller */
ret = hda_dsp_ctrl_link_reset(sdev, true);
if (ret < 0) {
dev_err(sdev->dev, "error: failed to reset HDA controller\n");
goto err;
}
usleep_range(500, 1000);
/* exit HDA controller reset */
ret = hda_dsp_ctrl_link_reset(sdev, false);
if (ret < 0) {
dev_err(sdev->dev, "error: failed to exit HDA controller reset\n");
goto err;
}
usleep_range(1000, 1200);
hda_codec_detect_mask(sdev);
/* clear stream status */
list_for_each_entry(stream, &bus->stream_list, list) {
sd_offset = SOF_STREAM_SD_OFFSET(stream);
snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_STS,
SOF_HDA_CL_DMA_SD_INT_MASK);
}
/* clear WAKESTS */
snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_WAKESTS,
SOF_HDA_WAKESTS_INT_MASK);
hda_codec_rirb_status_clear(sdev);
/* clear interrupt status register */
snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS,
SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_ALL_STREAM);
hda_codec_init_cmd_io(sdev);
/* enable CIE and GIE interrupts */
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN,
SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN);
/* program the position buffer */
if (bus->use_posbuf && bus->posbuf.addr) {
snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPLBASE,
(u32)bus->posbuf.addr);
snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_ADSP_DPUBASE,
upper_32_bits(bus->posbuf.addr));
}
hda_bus_ml_reset_losidv(bus);
bus->chip_init = true;
err:
hda_dsp_ctrl_misc_clock_gating(sdev, true);
hda_codec_set_codec_wakeup(sdev, false);
return ret;
}
void hda_dsp_ctrl_stop_chip(struct snd_sof_dev *sdev)
{
struct hdac_bus *bus = sof_to_bus(sdev);
struct hdac_stream *stream;
int sd_offset;
if (!bus->chip_init)
return;
/* disable interrupts in stream descriptor */
list_for_each_entry(stream, &bus->stream_list, list) {
sd_offset = SOF_STREAM_SD_OFFSET(stream);
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
sd_offset +
SOF_HDA_ADSP_REG_SD_CTL,
SOF_HDA_CL_DMA_SD_INT_MASK,
0);
}
/* disable SIE for all streams */
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
SOF_HDA_INT_ALL_STREAM, 0);
/* disable controller CIE and GIE */
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN,
0);
/* clear stream status */
list_for_each_entry(stream, &bus->stream_list, list) {
sd_offset = SOF_STREAM_SD_OFFSET(stream);
snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
sd_offset + SOF_HDA_ADSP_REG_SD_STS,
SOF_HDA_CL_DMA_SD_INT_MASK);
}
/* clear WAKESTS */
snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_WAKESTS,
SOF_HDA_WAKESTS_INT_MASK);
hda_codec_rirb_status_clear(sdev);
/* clear interrupt status register */
snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS,
SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_ALL_STREAM);
hda_codec_stop_cmd_io(sdev);
/* disable position buffer */
if (bus->use_posbuf && bus->posbuf.addr) {
snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
SOF_HDA_ADSP_DPLBASE, 0);
snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR,
SOF_HDA_ADSP_DPUBASE, 0);
}
bus->chip_init = false;
}
| linux-master | sound/soc/sof/intel/hda-ctrl.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2018 Intel Corporation. All rights reserved.
//
// Authors: Liam Girdwood <[email protected]>
// Ranjani Sridharan <[email protected]>
// Rander Wang <[email protected]>
// Keyon Jie <[email protected]>
//
/*
* Hardware interface for audio DSP on Apollolake and GeminiLake
*/
#include <sound/sof/ext_manifest4.h>
#include "../ipc4-priv.h"
#include "../sof-priv.h"
#include "hda.h"
#include "../sof-audio.h"
static const struct snd_sof_debugfs_map apl_dsp_debugfs[] = {
{"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
{"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
{"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
};
/* apollolake ops */
struct snd_sof_dsp_ops sof_apl_ops;
EXPORT_SYMBOL_NS(sof_apl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
int sof_apl_ops_init(struct snd_sof_dev *sdev)
{
/* common defaults */
memcpy(&sof_apl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
/* probe/remove/shutdown */
sof_apl_ops.shutdown = hda_dsp_shutdown;
if (sdev->pdata->ipc_type == SOF_IPC) {
/* doorbell */
sof_apl_ops.irq_thread = hda_dsp_ipc_irq_thread;
/* ipc */
sof_apl_ops.send_msg = hda_dsp_ipc_send_msg;
/* debug */
sof_apl_ops.ipc_dump = hda_ipc_dump;
sof_apl_ops.set_power_state = hda_dsp_set_power_state_ipc3;
}
if (sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
struct sof_ipc4_fw_data *ipc4_data;
sdev->private = devm_kzalloc(sdev->dev, sizeof(*ipc4_data), GFP_KERNEL);
if (!sdev->private)
return -ENOMEM;
ipc4_data = sdev->private;
ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET;
ipc4_data->mtrace_type = SOF_IPC4_MTRACE_INTEL_CAVS_1_5;
/* External library loading support */
ipc4_data->load_library = hda_dsp_ipc4_load_library;
/* doorbell */
sof_apl_ops.irq_thread = hda_dsp_ipc4_irq_thread;
/* ipc */
sof_apl_ops.send_msg = hda_dsp_ipc4_send_msg;
/* debug */
sof_apl_ops.ipc_dump = hda_ipc4_dump;
sof_apl_ops.set_power_state = hda_dsp_set_power_state_ipc4;
}
/* set DAI driver ops */
hda_set_dai_drv_ops(sdev, &sof_apl_ops);
/* debug */
sof_apl_ops.debug_map = apl_dsp_debugfs;
sof_apl_ops.debug_map_count = ARRAY_SIZE(apl_dsp_debugfs);
/* firmware run */
sof_apl_ops.run = hda_dsp_cl_boot_firmware;
/* pre/post fw run */
sof_apl_ops.post_fw_run = hda_dsp_post_fw_run;
/* dsp core get/put */
sof_apl_ops.core_get = hda_dsp_core_get;
return 0;
};
EXPORT_SYMBOL_NS(sof_apl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
const struct sof_intel_dsp_desc apl_chip_info = {
/* Apollolake */
.cores_num = 2,
.init_core_mask = 1,
.host_managed_cores_mask = GENMASK(1, 0),
.ipc_req = HDA_DSP_REG_HIPCI,
.ipc_req_mask = HDA_DSP_REG_HIPCI_BUSY,
.ipc_ack = HDA_DSP_REG_HIPCIE,
.ipc_ack_mask = HDA_DSP_REG_HIPCIE_DONE,
.ipc_ctl = HDA_DSP_REG_HIPCCTL,
.rom_status_reg = HDA_DSP_SRAM_REG_ROM_STATUS,
.rom_init_timeout = 150,
.ssp_count = APL_SSP_COUNT,
.ssp_base_offset = APL_SSP_BASE_OFFSET,
.d0i3_offset = SOF_HDA_VS_D0I3C,
.quirks = SOF_INTEL_PROCEN_FMT_QUIRK,
.check_ipc_irq = hda_dsp_check_ipc_irq,
.cl_init = cl_dsp_init,
.power_down_dsp = hda_power_down_dsp,
.disable_interrupts = hda_dsp_disable_interrupts,
.hw_ip_version = SOF_INTEL_CAVS_1_5_PLUS,
};
EXPORT_SYMBOL_NS(apl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
| linux-master | sound/soc/sof/intel/apl.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2018 Intel Corporation. All rights reserved.
//
// Authors: Keyon Jie <[email protected]>
#include <linux/io.h>
#include <sound/hdaudio.h>
#include <sound/hda_i915.h>
#include <sound/hda_codec.h>
#include <sound/hda_register.h>
#include "../sof-priv.h"
#include "hda.h"
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
#include "../../codecs/hdac_hda.h"
#define sof_hda_ext_ops snd_soc_hdac_hda_get_ops()
static void update_codec_wake_enable(struct hdac_bus *bus, unsigned int addr, bool link_power)
{
unsigned int mask = snd_hdac_chip_readw(bus, WAKEEN);
if (link_power)
mask &= ~BIT(addr);
else
mask |= BIT(addr);
snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, mask);
}
static void sof_hda_bus_link_power(struct hdac_device *codec, bool enable)
{
struct hdac_bus *bus = codec->bus;
bool oldstate = test_bit(codec->addr, &bus->codec_powered);
snd_hdac_ext_bus_link_power(codec, enable);
if (enable == oldstate)
return;
/*
* Both codec driver and controller can hold references to
* display power. To avoid unnecessary power-up/down cycles,
* controller doesn't immediately release its reference.
*
* If the codec driver powers down the link, release
* the controller reference as well.
*/
if (codec->addr == HDA_IDISP_ADDR && !enable)
snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
/* WAKEEN needs to be set for disabled links */
update_codec_wake_enable(bus, codec->addr, enable);
}
static const struct hdac_bus_ops bus_core_ops = {
.command = snd_hdac_bus_send_cmd,
.get_response = snd_hdac_bus_get_response,
.link_power = sof_hda_bus_link_power,
};
#endif
/*
* This can be used for both with/without hda link support.
*/
void sof_hda_bus_init(struct snd_sof_dev *sdev, struct device *dev)
{
struct hdac_bus *bus = sof_to_bus(sdev);
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_LINK)
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
snd_hdac_ext_bus_init(bus, dev, &bus_core_ops, sof_hda_ext_ops);
#else
snd_hdac_ext_bus_init(bus, dev, NULL, NULL);
#endif
#else
memset(bus, 0, sizeof(*bus));
bus->dev = dev;
INIT_LIST_HEAD(&bus->stream_list);
bus->irq = -1;
/*
* There is only one HDA bus atm. keep the index as 0.
* Need to fix when there are more than one HDA bus.
*/
bus->idx = 0;
spin_lock_init(&bus->reg_lock);
#endif /* CONFIG_SND_SOC_SOF_HDA_LINK */
}
void sof_hda_bus_exit(struct snd_sof_dev *sdev)
{
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_LINK)
struct hdac_bus *bus = sof_to_bus(sdev);
snd_hdac_ext_bus_exit(bus);
#endif
}
| linux-master | sound/soc/sof/intel/hda-bus.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2018 Intel Corporation. All rights reserved.
//
// Authors: Keyon Jie <[email protected]>
//
#include <sound/pcm_params.h>
#include <sound/hdaudio_ext.h>
#include <sound/hda-mlink.h>
#include <sound/hda_register.h>
#include <sound/intel-nhlt.h>
#include <sound/sof/ipc4/header.h>
#include <uapi/sound/sof/header.h>
#include "../ipc4-priv.h"
#include "../ipc4-topology.h"
#include "../sof-priv.h"
#include "../sof-audio.h"
#include "hda.h"
/*
* The default method is to fetch NHLT from BIOS. With this parameter set
* it is possible to override that with NHLT in the SOF topology manifest.
*/
static bool hda_use_tplg_nhlt;
module_param_named(sof_use_tplg_nhlt, hda_use_tplg_nhlt, bool, 0444);
MODULE_PARM_DESC(sof_use_tplg_nhlt, "SOF topology nhlt override");
static struct snd_sof_dev *widget_to_sdev(struct snd_soc_dapm_widget *w)
{
struct snd_sof_widget *swidget = w->dobj.private;
struct snd_soc_component *component = swidget->scomp;
return snd_soc_component_get_drvdata(component);
}
int hda_dai_config(struct snd_soc_dapm_widget *w, unsigned int flags,
struct snd_sof_dai_config_data *data)
{
struct snd_sof_widget *swidget = w->dobj.private;
const struct sof_ipc_tplg_ops *tplg_ops;
struct snd_sof_dev *sdev;
int ret;
if (!swidget)
return 0;
sdev = widget_to_sdev(w);
tplg_ops = sof_ipc_get_ops(sdev, tplg);
if (tplg_ops && tplg_ops->dai_config) {
ret = tplg_ops->dai_config(sdev, swidget, flags, data);
if (ret < 0) {
dev_err(sdev->dev, "DAI config with flags %x failed for widget %s\n",
flags, w->name);
return ret;
}
}
return 0;
}
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_LINK)
static struct snd_sof_dev *dai_to_sdev(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
return widget_to_sdev(w);
}
static const struct hda_dai_widget_dma_ops *
hda_dai_get_ops(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai)
{
struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
struct snd_sof_widget *swidget = w->dobj.private;
struct snd_sof_dev *sdev;
struct snd_sof_dai *sdai;
sdev = widget_to_sdev(w);
/*
* The swidget parameter of hda_select_dai_widget_ops() is ignored in
* case of DSPless mode
*/
if (sdev->dspless_mode_selected)
return hda_select_dai_widget_ops(sdev, NULL);
sdai = swidget->private;
/* select and set the DAI widget ops if not set already */
if (!sdai->platform_private) {
const struct hda_dai_widget_dma_ops *ops =
hda_select_dai_widget_ops(sdev, swidget);
if (!ops)
return NULL;
/* check if mandatory ops are set */
if (!ops || !ops->get_hext_stream)
return NULL;
sdai->platform_private = ops;
}
return sdai->platform_private;
}
int hda_link_dma_cleanup(struct snd_pcm_substream *substream, struct hdac_ext_stream *hext_stream,
struct snd_soc_dai *cpu_dai)
{
const struct hda_dai_widget_dma_ops *ops = hda_dai_get_ops(substream, cpu_dai);
struct sof_intel_hda_stream *hda_stream;
struct hdac_ext_link *hlink;
struct snd_sof_dev *sdev;
int stream_tag;
if (!ops) {
dev_err(cpu_dai->dev, "DAI widget ops not set\n");
return -EINVAL;
}
sdev = dai_to_sdev(substream, cpu_dai);
hlink = ops->get_hlink(sdev, substream);
if (!hlink)
return -EINVAL;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
stream_tag = hdac_stream(hext_stream)->stream_tag;
snd_hdac_ext_bus_link_clear_stream_id(hlink, stream_tag);
}
if (ops->release_hext_stream)
ops->release_hext_stream(sdev, cpu_dai, substream);
hext_stream->link_prepared = 0;
/* free the host DMA channel reserved by hostless streams */
hda_stream = hstream_to_sof_hda_stream(hext_stream);
hda_stream->host_reserved = 0;
return 0;
}
static int hda_link_dma_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *cpu_dai)
{
const struct hda_dai_widget_dma_ops *ops = hda_dai_get_ops(substream, cpu_dai);
struct hdac_ext_stream *hext_stream;
struct hdac_stream *hstream;
struct hdac_ext_link *hlink;
struct snd_sof_dev *sdev;
int stream_tag;
if (!ops) {
dev_err(cpu_dai->dev, "DAI widget ops not set\n");
return -EINVAL;
}
sdev = dai_to_sdev(substream, cpu_dai);
hlink = ops->get_hlink(sdev, substream);
if (!hlink)
return -EINVAL;
hext_stream = ops->get_hext_stream(sdev, cpu_dai, substream);
if (!hext_stream) {
if (ops->assign_hext_stream)
hext_stream = ops->assign_hext_stream(sdev, cpu_dai, substream);
}
if (!hext_stream)
return -EBUSY;
hstream = &hext_stream->hstream;
stream_tag = hstream->stream_tag;
if (hext_stream->hstream.direction == SNDRV_PCM_STREAM_PLAYBACK)
snd_hdac_ext_bus_link_set_stream_id(hlink, stream_tag);
/* set the hdac_stream in the codec dai */
if (ops->codec_dai_set_stream)
ops->codec_dai_set_stream(sdev, substream, hstream);
if (ops->reset_hext_stream)
ops->reset_hext_stream(sdev, hext_stream);
if (ops->calc_stream_format && ops->setup_hext_stream) {
unsigned int format_val = ops->calc_stream_format(sdev, substream, params);
ops->setup_hext_stream(sdev, hext_stream, format_val);
}
hext_stream->link_prepared = 1;
return 0;
}
static int __maybe_unused hda_dai_hw_free(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
const struct hda_dai_widget_dma_ops *ops = hda_dai_get_ops(substream, cpu_dai);
struct hdac_ext_stream *hext_stream;
struct snd_sof_dev *sdev = dai_to_sdev(substream, cpu_dai);
if (!ops) {
dev_err(cpu_dai->dev, "DAI widget ops not set\n");
return -EINVAL;
}
hext_stream = ops->get_hext_stream(sdev, cpu_dai, substream);
if (!hext_stream)
return 0;
return hda_link_dma_cleanup(substream, hext_stream, cpu_dai);
}
static int __maybe_unused hda_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(dai, substream->stream);
const struct hda_dai_widget_dma_ops *ops = hda_dai_get_ops(substream, dai);
struct hdac_ext_stream *hext_stream;
struct snd_sof_dai_config_data data = { 0 };
unsigned int flags = SOF_DAI_CONFIG_FLAGS_HW_PARAMS;
struct snd_sof_dev *sdev = widget_to_sdev(w);
int ret;
if (!ops) {
dev_err(sdev->dev, "DAI widget ops not set\n");
return -EINVAL;
}
hext_stream = ops->get_hext_stream(sdev, dai, substream);
if (hext_stream && hext_stream->link_prepared)
return 0;
ret = hda_link_dma_hw_params(substream, params, dai);
if (ret < 0)
return ret;
hext_stream = ops->get_hext_stream(sdev, dai, substream);
flags |= SOF_DAI_CONFIG_FLAGS_2_STEP_STOP << SOF_DAI_CONFIG_FLAGS_QUIRK_SHIFT;
data.dai_data = hdac_stream(hext_stream)->stream_tag - 1;
return hda_dai_config(w, flags, &data);
}
/*
* In contrast to IPC3, the dai trigger in IPC4 mixes pipeline state changes
* (over IPC channel) and DMA state change (direct host register changes).
*/
static int __maybe_unused hda_dai_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
const struct hda_dai_widget_dma_ops *ops = hda_dai_get_ops(substream, dai);
struct hdac_ext_stream *hext_stream;
struct snd_sof_dev *sdev;
int ret;
if (!ops) {
dev_err(dai->dev, "DAI widget ops not set\n");
return -EINVAL;
}
dev_dbg(dai->dev, "cmd=%d dai %s direction %d\n", cmd,
dai->name, substream->stream);
sdev = dai_to_sdev(substream, dai);
hext_stream = ops->get_hext_stream(sdev, dai, substream);
if (!hext_stream)
return -EINVAL;
if (ops->pre_trigger) {
ret = ops->pre_trigger(sdev, dai, substream, cmd);
if (ret < 0)
return ret;
}
if (ops->trigger) {
ret = ops->trigger(sdev, dai, substream, cmd);
if (ret < 0)
return ret;
}
if (ops->post_trigger) {
ret = ops->post_trigger(sdev, dai, substream, cmd);
if (ret < 0)
return ret;
}
switch (cmd) {
case SNDRV_PCM_TRIGGER_SUSPEND:
ret = hda_link_dma_cleanup(substream, hext_stream, dai);
if (ret < 0) {
dev_err(sdev->dev, "%s: failed to clean up link DMA\n", __func__);
return ret;
}
break;
default:
break;
}
return 0;
}
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
static int hda_dai_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
int stream = substream->stream;
return hda_dai_hw_params(substream, &rtd->dpcm[stream].hw_params, dai);
}
static const struct snd_soc_dai_ops hda_dai_ops = {
.hw_params = hda_dai_hw_params,
.hw_free = hda_dai_hw_free,
.trigger = hda_dai_trigger,
.prepare = hda_dai_prepare,
};
#endif
static struct sof_ipc4_copier *widget_to_copier(struct snd_soc_dapm_widget *w)
{
struct snd_sof_widget *swidget = w->dobj.private;
struct snd_sof_dai *sdai = swidget->private;
struct sof_ipc4_copier *ipc4_copier = (struct sof_ipc4_copier *)sdai->private;
return ipc4_copier;
}
static int non_hda_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *cpu_dai)
{
struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
struct sof_ipc4_dma_config_tlv *dma_config_tlv;
const struct hda_dai_widget_dma_ops *ops;
struct sof_ipc4_dma_config *dma_config;
struct sof_ipc4_copier *ipc4_copier;
struct hdac_ext_stream *hext_stream;
struct hdac_stream *hstream;
struct snd_sof_dev *sdev;
int stream_id;
int ret;
ops = hda_dai_get_ops(substream, cpu_dai);
if (!ops) {
dev_err(cpu_dai->dev, "DAI widget ops not set\n");
return -EINVAL;
}
/* use HDaudio stream handling */
ret = hda_dai_hw_params(substream, params, cpu_dai);
if (ret < 0) {
dev_err(cpu_dai->dev, "%s: hda_dai_hw_params failed: %d\n", __func__, ret);
return ret;
}
/* get stream_id */
sdev = widget_to_sdev(w);
hext_stream = ops->get_hext_stream(sdev, cpu_dai, substream);
if (!hext_stream) {
dev_err(cpu_dai->dev, "%s: no hext_stream found\n", __func__);
return -ENODEV;
}
hstream = &hext_stream->hstream;
stream_id = hstream->stream_tag;
if (!stream_id) {
dev_err(cpu_dai->dev, "%s: no stream_id allocated\n", __func__);
return -ENODEV;
}
/* configure TLV */
ipc4_copier = widget_to_copier(w);
dma_config_tlv = &ipc4_copier->dma_config_tlv;
dma_config_tlv->type = SOF_IPC4_GTW_DMA_CONFIG_ID;
/* dma_config_priv_size is zero */
dma_config_tlv->length = sizeof(dma_config_tlv->dma_config);
dma_config = &dma_config_tlv->dma_config;
dma_config->dma_method = SOF_IPC4_DMA_METHOD_HDA;
dma_config->pre_allocated_by_host = 1;
dma_config->dma_channel_id = stream_id - 1;
dma_config->stream_id = stream_id;
dma_config->dma_stream_channel_map.device_count = 0; /* mapping not used */
dma_config->dma_priv_config_size = 0;
return 0;
}
static int non_hda_dai_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
int stream = substream->stream;
return non_hda_dai_hw_params(substream, &rtd->dpcm[stream].hw_params, cpu_dai);
}
static const struct snd_soc_dai_ops ssp_dai_ops = {
.hw_params = non_hda_dai_hw_params,
.hw_free = hda_dai_hw_free,
.trigger = hda_dai_trigger,
.prepare = non_hda_dai_prepare,
};
static const struct snd_soc_dai_ops dmic_dai_ops = {
.hw_params = non_hda_dai_hw_params,
.hw_free = hda_dai_hw_free,
.trigger = hda_dai_trigger,
.prepare = non_hda_dai_prepare,
};
int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *cpu_dai,
int link_id)
{
struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
const struct hda_dai_widget_dma_ops *ops;
struct hdac_ext_stream *hext_stream;
struct snd_sof_dev *sdev;
int ret;
ret = non_hda_dai_hw_params(substream, params, cpu_dai);
if (ret < 0) {
dev_err(cpu_dai->dev, "%s: non_hda_dai_hw_params failed %d\n", __func__, ret);
return ret;
}
ops = hda_dai_get_ops(substream, cpu_dai);
sdev = widget_to_sdev(w);
hext_stream = ops->get_hext_stream(sdev, cpu_dai, substream);
if (!hext_stream)
return -ENODEV;
/* in the case of SoundWire we need to program the PCMSyCM registers */
ret = hdac_bus_eml_sdw_map_stream_ch(sof_to_bus(sdev), link_id, cpu_dai->id,
GENMASK(params_channels(params) - 1, 0),
hdac_stream(hext_stream)->stream_tag,
substream->stream);
if (ret < 0) {
dev_err(cpu_dai->dev, "%s: hdac_bus_eml_sdw_map_stream_ch failed %d\n",
__func__, ret);
return ret;
}
return 0;
}
int sdw_hda_dai_hw_free(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai,
int link_id)
{
struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
struct snd_sof_dev *sdev;
int ret;
ret = hda_dai_hw_free(substream, cpu_dai);
if (ret < 0) {
dev_err(cpu_dai->dev, "%s: non_hda_dai_hw_free failed %d\n", __func__, ret);
return ret;
}
sdev = widget_to_sdev(w);
/* in the case of SoundWire we need to reset the PCMSyCM registers */
ret = hdac_bus_eml_sdw_map_stream_ch(sof_to_bus(sdev), link_id, cpu_dai->id,
0, 0, substream->stream);
if (ret < 0) {
dev_err(cpu_dai->dev, "%s: hdac_bus_eml_sdw_map_stream_ch failed %d\n",
__func__, ret);
return ret;
}
return 0;
}
int sdw_hda_dai_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *cpu_dai)
{
return hda_dai_trigger(substream, cmd, cpu_dai);
}
static int hda_dai_suspend(struct hdac_bus *bus)
{
struct snd_soc_pcm_runtime *rtd;
struct hdac_ext_stream *hext_stream;
struct hdac_stream *s;
int ret;
/* set internal flag for BE */
list_for_each_entry(s, &bus->stream_list, list) {
hext_stream = stream_to_hdac_ext_stream(s);
/*
* clear stream. This should already be taken care for running
* streams when the SUSPEND trigger is called. But paused
* streams do not get suspended, so this needs to be done
* explicitly during suspend.
*/
if (hext_stream->link_substream) {
const struct hda_dai_widget_dma_ops *ops;
struct snd_sof_widget *swidget;
struct snd_soc_dapm_widget *w;
struct snd_soc_dai *cpu_dai;
struct snd_sof_dev *sdev;
struct snd_sof_dai *sdai;
rtd = asoc_substream_to_rtd(hext_stream->link_substream);
cpu_dai = asoc_rtd_to_cpu(rtd, 0);
w = snd_soc_dai_get_widget(cpu_dai, hdac_stream(hext_stream)->direction);
swidget = w->dobj.private;
sdev = widget_to_sdev(w);
sdai = swidget->private;
ops = sdai->platform_private;
ret = hda_link_dma_cleanup(hext_stream->link_substream,
hext_stream,
cpu_dai);
if (ret < 0)
return ret;
/* for consistency with TRIGGER_SUSPEND */
if (ops->post_trigger) {
ret = ops->post_trigger(sdev, cpu_dai,
hext_stream->link_substream,
SNDRV_PCM_TRIGGER_SUSPEND);
if (ret < 0)
return ret;
}
}
}
return 0;
}
static void ssp_set_dai_drv_ops(struct snd_sof_dev *sdev, struct snd_sof_dsp_ops *ops)
{
const struct sof_intel_dsp_desc *chip;
int i;
chip = get_chip_info(sdev->pdata);
if (chip->hw_ip_version >= SOF_INTEL_ACE_2_0) {
for (i = 0; i < ops->num_drv; i++) {
if (strstr(ops->drv[i].name, "SSP"))
ops->drv[i].ops = &ssp_dai_ops;
}
}
}
static void dmic_set_dai_drv_ops(struct snd_sof_dev *sdev, struct snd_sof_dsp_ops *ops)
{
const struct sof_intel_dsp_desc *chip;
int i;
chip = get_chip_info(sdev->pdata);
if (chip->hw_ip_version >= SOF_INTEL_ACE_2_0) {
for (i = 0; i < ops->num_drv; i++) {
if (strstr(ops->drv[i].name, "DMIC"))
ops->drv[i].ops = &dmic_dai_ops;
}
}
}
#else
static inline void ssp_set_dai_drv_ops(struct snd_sof_dev *sdev, struct snd_sof_dsp_ops *ops) {}
static inline void dmic_set_dai_drv_ops(struct snd_sof_dev *sdev, struct snd_sof_dsp_ops *ops) {}
#endif /* CONFIG_SND_SOC_SOF_HDA_LINK */
void hda_set_dai_drv_ops(struct snd_sof_dev *sdev, struct snd_sof_dsp_ops *ops)
{
int i;
for (i = 0; i < ops->num_drv; i++) {
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
if (strstr(ops->drv[i].name, "iDisp") ||
strstr(ops->drv[i].name, "Analog") ||
strstr(ops->drv[i].name, "Digital"))
ops->drv[i].ops = &hda_dai_ops;
#endif
}
ssp_set_dai_drv_ops(sdev, ops);
dmic_set_dai_drv_ops(sdev, ops);
if (sdev->pdata->ipc_type == SOF_INTEL_IPC4 && !hda_use_tplg_nhlt) {
struct sof_ipc4_fw_data *ipc4_data = sdev->private;
ipc4_data->nhlt = intel_nhlt_init(sdev->dev);
}
}
void hda_ops_free(struct snd_sof_dev *sdev)
{
if (sdev->pdata->ipc_type == SOF_INTEL_IPC4) {
struct sof_ipc4_fw_data *ipc4_data = sdev->private;
if (!hda_use_tplg_nhlt)
intel_nhlt_free(ipc4_data->nhlt);
}
}
EXPORT_SYMBOL_NS(hda_ops_free, SND_SOC_SOF_INTEL_HDA_COMMON);
/*
* common dai driver for skl+ platforms.
* some products who use this DAI array only physically have a subset of
* the DAIs, but no harm is done here by adding the whole set.
*/
struct snd_soc_dai_driver skl_dai[] = {
{
.name = "SSP0 Pin",
.playback = {
.channels_min = 1,
.channels_max = 8,
},
.capture = {
.channels_min = 1,
.channels_max = 8,
},
},
{
.name = "SSP1 Pin",
.playback = {
.channels_min = 1,
.channels_max = 8,
},
.capture = {
.channels_min = 1,
.channels_max = 8,
},
},
{
.name = "SSP2 Pin",
.playback = {
.channels_min = 1,
.channels_max = 8,
},
.capture = {
.channels_min = 1,
.channels_max = 8,
},
},
{
.name = "SSP3 Pin",
.playback = {
.channels_min = 1,
.channels_max = 8,
},
.capture = {
.channels_min = 1,
.channels_max = 8,
},
},
{
.name = "SSP4 Pin",
.playback = {
.channels_min = 1,
.channels_max = 8,
},
.capture = {
.channels_min = 1,
.channels_max = 8,
},
},
{
.name = "SSP5 Pin",
.playback = {
.channels_min = 1,
.channels_max = 8,
},
.capture = {
.channels_min = 1,
.channels_max = 8,
},
},
{
.name = "DMIC01 Pin",
.capture = {
.channels_min = 1,
.channels_max = 4,
},
},
{
.name = "DMIC16k Pin",
.capture = {
.channels_min = 1,
.channels_max = 4,
},
},
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
{
.name = "iDisp1 Pin",
.playback = {
.channels_min = 1,
.channels_max = 8,
},
},
{
.name = "iDisp2 Pin",
.playback = {
.channels_min = 1,
.channels_max = 8,
},
},
{
.name = "iDisp3 Pin",
.playback = {
.channels_min = 1,
.channels_max = 8,
},
},
{
.name = "iDisp4 Pin",
.playback = {
.channels_min = 1,
.channels_max = 8,
},
},
{
.name = "Analog CPU DAI",
.playback = {
.channels_min = 1,
.channels_max = 16,
},
.capture = {
.channels_min = 1,
.channels_max = 16,
},
},
{
.name = "Digital CPU DAI",
.playback = {
.channels_min = 1,
.channels_max = 16,
},
.capture = {
.channels_min = 1,
.channels_max = 16,
},
},
{
.name = "Alt Analog CPU DAI",
.playback = {
.channels_min = 1,
.channels_max = 16,
},
.capture = {
.channels_min = 1,
.channels_max = 16,
},
},
#endif
};
int hda_dsp_dais_suspend(struct snd_sof_dev *sdev)
{
/*
* In the corner case where a SUSPEND happens during a PAUSE, the ALSA core
* does not throw the TRIGGER_SUSPEND. This leaves the DAIs in an unbalanced state.
* Since the component suspend is called last, we can trap this corner case
* and force the DAIs to release their resources.
*/
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_LINK)
int ret;
ret = hda_dai_suspend(sof_to_bus(sdev));
if (ret < 0)
return ret;
#endif
return 0;
}
| linux-master | sound/soc/sof/intel/hda-dai.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2022 Intel Corporation. All rights reserved.
//
/*
* common ops for SKL+ HDAudio platforms
*/
#include "../sof-priv.h"
#include "hda.h"
#include "../sof-audio.h"
struct snd_sof_dsp_ops sof_hda_common_ops = {
/* probe/remove/shutdown */
.probe = hda_dsp_probe,
.remove = hda_dsp_remove,
/* Register IO uses direct mmio */
/* Block IO */
.block_read = sof_block_read,
.block_write = sof_block_write,
/* Mailbox IO */
.mailbox_read = sof_mailbox_read,
.mailbox_write = sof_mailbox_write,
/* ipc */
.get_mailbox_offset = hda_dsp_ipc_get_mailbox_offset,
.get_window_offset = hda_dsp_ipc_get_window_offset,
.ipc_msg_data = hda_ipc_msg_data,
.set_stream_data_offset = hda_set_stream_data_offset,
/* machine driver */
.machine_select = hda_machine_select,
.machine_register = sof_machine_register,
.machine_unregister = sof_machine_unregister,
.set_mach_params = hda_set_mach_params,
/* debug */
.dbg_dump = hda_dsp_dump,
.debugfs_add_region_item = snd_sof_debugfs_add_region_item_iomem,
/* stream callbacks */
.pcm_open = hda_dsp_pcm_open,
.pcm_close = hda_dsp_pcm_close,
.pcm_hw_params = hda_dsp_pcm_hw_params,
.pcm_hw_free = hda_dsp_stream_hw_free,
.pcm_trigger = hda_dsp_pcm_trigger,
.pcm_pointer = hda_dsp_pcm_pointer,
.pcm_ack = hda_dsp_pcm_ack,
/* firmware loading */
.load_firmware = snd_sof_load_firmware_raw,
/* pre/post fw run */
.pre_fw_run = hda_dsp_pre_fw_run,
/* firmware run */
.run = hda_dsp_cl_boot_firmware,
/* parse platform specific extended manifest */
.parse_platform_ext_manifest = hda_dsp_ext_man_get_cavs_config_data,
/* dsp core get/put */
/* trace callback */
.trace_init = hda_dsp_trace_init,
.trace_release = hda_dsp_trace_release,
.trace_trigger = hda_dsp_trace_trigger,
/* client ops */
.register_ipc_clients = hda_register_clients,
.unregister_ipc_clients = hda_unregister_clients,
/* DAI drivers */
.drv = skl_dai,
.num_drv = SOF_SKL_NUM_DAIS,
/* PM */
.suspend = hda_dsp_suspend,
.resume = hda_dsp_resume,
.runtime_suspend = hda_dsp_runtime_suspend,
.runtime_resume = hda_dsp_runtime_resume,
.runtime_idle = hda_dsp_runtime_idle,
.set_hw_params_upon_resume = hda_dsp_set_hw_params_upon_resume,
/* ALSA HW info flags */
.hw_info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
.dsp_arch_ops = &sof_xtensa_arch_ops,
};
| linux-master | sound/soc/sof/intel/hda-common-ops.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2018 Intel Corporation. All rights reserved.
//
// Authors: Liam Girdwood <[email protected]>
// Ranjani Sridharan <[email protected]>
// Rander Wang <[email protected]>
// Keyon Jie <[email protected]>
//
/*
* Hardware interface for generic Intel audio DSP HDA IP
*/
#include <sound/hdaudio_ext.h>
#include <sound/hda_register.h>
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_intel.h>
#include <sound/intel-dsp-config.h>
#include <sound/intel-nhlt.h>
#include <sound/sof.h>
#include <sound/sof/xtensa.h>
#include <sound/hda-mlink.h>
#include "../sof-audio.h"
#include "../sof-pci-dev.h"
#include "../ops.h"
#include "hda.h"
#define CREATE_TRACE_POINTS
#include <trace/events/sof_intel.h>
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
#include <sound/soc-acpi-intel-match.h>
#endif
/* platform specific devices */
#include "shim.h"
#define EXCEPT_MAX_HDR_SIZE 0x400
#define HDA_EXT_ROM_STATUS_SIZE 8
static u32 hda_get_interface_mask(struct snd_sof_dev *sdev)
{
const struct sof_intel_dsp_desc *chip;
u32 interface_mask[2] = { 0 };
chip = get_chip_info(sdev->pdata);
switch (chip->hw_ip_version) {
case SOF_INTEL_TANGIER:
case SOF_INTEL_BAYTRAIL:
case SOF_INTEL_BROADWELL:
interface_mask[0] = BIT(SOF_DAI_INTEL_SSP);
break;
case SOF_INTEL_CAVS_1_5:
case SOF_INTEL_CAVS_1_5_PLUS:
interface_mask[0] = BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) |
BIT(SOF_DAI_INTEL_HDA);
interface_mask[1] = BIT(SOF_DAI_INTEL_HDA);
break;
case SOF_INTEL_CAVS_1_8:
case SOF_INTEL_CAVS_2_0:
case SOF_INTEL_CAVS_2_5:
case SOF_INTEL_ACE_1_0:
interface_mask[0] = BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) |
BIT(SOF_DAI_INTEL_HDA) | BIT(SOF_DAI_INTEL_ALH);
interface_mask[1] = BIT(SOF_DAI_INTEL_HDA);
break;
case SOF_INTEL_ACE_2_0:
interface_mask[0] = BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC) |
BIT(SOF_DAI_INTEL_HDA) | BIT(SOF_DAI_INTEL_ALH);
interface_mask[1] = interface_mask[0]; /* all interfaces accessible without DSP */
break;
default:
break;
}
return interface_mask[sdev->dspless_mode_selected];
}
#if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
/*
* The default for SoundWire clock stop quirks is to power gate the IP
* and do a Bus Reset, this will need to be modified when the DSP
* needs to remain in D0i3 so that the Master does not lose context
* and enumeration is not required on clock restart
*/
static int sdw_clock_stop_quirks = SDW_INTEL_CLK_STOP_BUS_RESET;
module_param(sdw_clock_stop_quirks, int, 0444);
MODULE_PARM_DESC(sdw_clock_stop_quirks, "SOF SoundWire clock stop quirks");
static int sdw_params_stream(struct device *dev,
struct sdw_intel_stream_params_data *params_data)
{
struct snd_soc_dai *d = params_data->dai;
struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(d, params_data->substream->stream);
struct snd_sof_dai_config_data data = { 0 };
data.dai_index = (params_data->link_id << 8) | d->id;
data.dai_data = params_data->alh_stream_id;
return hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_HW_PARAMS, &data);
}
struct sdw_intel_ops sdw_callback = {
.params_stream = sdw_params_stream,
};
static int sdw_ace2x_params_stream(struct device *dev,
struct sdw_intel_stream_params_data *params_data)
{
return sdw_hda_dai_hw_params(params_data->substream,
params_data->hw_params,
params_data->dai,
params_data->link_id);
}
static int sdw_ace2x_free_stream(struct device *dev,
struct sdw_intel_stream_free_data *free_data)
{
return sdw_hda_dai_hw_free(free_data->substream,
free_data->dai,
free_data->link_id);
}
static int sdw_ace2x_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
{
return sdw_hda_dai_trigger(substream, cmd, dai);
}
static struct sdw_intel_ops sdw_ace2x_callback = {
.params_stream = sdw_ace2x_params_stream,
.free_stream = sdw_ace2x_free_stream,
.trigger = sdw_ace2x_trigger,
};
void hda_common_enable_sdw_irq(struct snd_sof_dev *sdev, bool enable)
{
struct sof_intel_hda_dev *hdev;
hdev = sdev->pdata->hw_pdata;
if (!hdev->sdw)
return;
snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC2,
HDA_DSP_REG_ADSPIC2_SNDW,
enable ? HDA_DSP_REG_ADSPIC2_SNDW : 0);
}
void hda_sdw_int_enable(struct snd_sof_dev *sdev, bool enable)
{
u32 interface_mask = hda_get_interface_mask(sdev);
const struct sof_intel_dsp_desc *chip;
if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH)))
return;
chip = get_chip_info(sdev->pdata);
if (chip && chip->enable_sdw_irq)
chip->enable_sdw_irq(sdev, enable);
}
static int hda_sdw_acpi_scan(struct snd_sof_dev *sdev)
{
u32 interface_mask = hda_get_interface_mask(sdev);
struct sof_intel_hda_dev *hdev;
acpi_handle handle;
int ret;
if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH)))
return -EINVAL;
handle = ACPI_HANDLE(sdev->dev);
/* save ACPI info for the probe step */
hdev = sdev->pdata->hw_pdata;
ret = sdw_intel_acpi_scan(handle, &hdev->info);
if (ret < 0)
return -EINVAL;
return 0;
}
static int hda_sdw_probe(struct snd_sof_dev *sdev)
{
const struct sof_intel_dsp_desc *chip;
struct sof_intel_hda_dev *hdev;
struct sdw_intel_res res;
void *sdw;
hdev = sdev->pdata->hw_pdata;
memset(&res, 0, sizeof(res));
chip = get_chip_info(sdev->pdata);
if (chip->hw_ip_version < SOF_INTEL_ACE_2_0) {
res.mmio_base = sdev->bar[HDA_DSP_BAR];
res.hw_ops = &sdw_intel_cnl_hw_ops;
res.shim_base = hdev->desc->sdw_shim_base;
res.alh_base = hdev->desc->sdw_alh_base;
res.ext = false;
res.ops = &sdw_callback;
} else {
/*
* retrieve eml_lock needed to protect shared registers
* in the HDaudio multi-link areas
*/
res.eml_lock = hdac_bus_eml_get_mutex(sof_to_bus(sdev), true,
AZX_REG_ML_LEPTR_ID_SDW);
if (!res.eml_lock)
return -ENODEV;
res.mmio_base = sdev->bar[HDA_DSP_HDA_BAR];
/*
* the SHIM and SoundWire register offsets are link-specific
* and will be determined when adding auxiliary devices
*/
res.hw_ops = &sdw_intel_lnl_hw_ops;
res.ext = true;
res.ops = &sdw_ace2x_callback;
}
res.irq = sdev->ipc_irq;
res.handle = hdev->info.handle;
res.parent = sdev->dev;
res.dev = sdev->dev;
res.clock_stop_quirks = sdw_clock_stop_quirks;
res.hbus = sof_to_bus(sdev);
/*
* ops and arg fields are not populated for now,
* they will be needed when the DAI callbacks are
* provided
*/
/* we could filter links here if needed, e.g for quirks */
res.count = hdev->info.count;
res.link_mask = hdev->info.link_mask;
sdw = sdw_intel_probe(&res);
if (!sdw) {
dev_err(sdev->dev, "error: SoundWire probe failed\n");
return -EINVAL;
}
/* save context */
hdev->sdw = sdw;
return 0;
}
int hda_sdw_check_lcount_common(struct snd_sof_dev *sdev)
{
struct sof_intel_hda_dev *hdev;
struct sdw_intel_ctx *ctx;
u32 caps;
hdev = sdev->pdata->hw_pdata;
ctx = hdev->sdw;
caps = snd_sof_dsp_read(sdev, HDA_DSP_BAR, ctx->shim_base + SDW_SHIM_LCAP);
caps &= SDW_SHIM_LCAP_LCOUNT_MASK;
/* Check HW supported vs property value */
if (caps < ctx->count) {
dev_err(sdev->dev,
"%s: BIOS master count %d is larger than hardware capabilities %d\n",
__func__, ctx->count, caps);
return -EINVAL;
}
return 0;
}
int hda_sdw_check_lcount_ext(struct snd_sof_dev *sdev)
{
struct sof_intel_hda_dev *hdev;
struct sdw_intel_ctx *ctx;
struct hdac_bus *bus;
u32 slcount;
bus = sof_to_bus(sdev);
hdev = sdev->pdata->hw_pdata;
ctx = hdev->sdw;
slcount = hdac_bus_eml_get_count(bus, true, AZX_REG_ML_LEPTR_ID_SDW);
/* Check HW supported vs property value */
if (slcount < ctx->count) {
dev_err(sdev->dev,
"%s: BIOS master count %d is larger than hardware capabilities %d\n",
__func__, ctx->count, slcount);
return -EINVAL;
}
return 0;
}
static int hda_sdw_check_lcount(struct snd_sof_dev *sdev)
{
const struct sof_intel_dsp_desc *chip;
chip = get_chip_info(sdev->pdata);
if (chip && chip->read_sdw_lcount)
return chip->read_sdw_lcount(sdev);
return 0;
}
int hda_sdw_startup(struct snd_sof_dev *sdev)
{
struct sof_intel_hda_dev *hdev;
struct snd_sof_pdata *pdata = sdev->pdata;
int ret;
hdev = sdev->pdata->hw_pdata;
if (!hdev->sdw)
return 0;
if (pdata->machine && !pdata->machine->mach_params.link_mask)
return 0;
ret = hda_sdw_check_lcount(sdev);
if (ret < 0)
return ret;
return sdw_intel_startup(hdev->sdw);
}
static int hda_sdw_exit(struct snd_sof_dev *sdev)
{
struct sof_intel_hda_dev *hdev;
hdev = sdev->pdata->hw_pdata;
hda_sdw_int_enable(sdev, false);
if (hdev->sdw)
sdw_intel_exit(hdev->sdw);
hdev->sdw = NULL;
return 0;
}
bool hda_common_check_sdw_irq(struct snd_sof_dev *sdev)
{
struct sof_intel_hda_dev *hdev;
bool ret = false;
u32 irq_status;
hdev = sdev->pdata->hw_pdata;
if (!hdev->sdw)
return ret;
/* store status */
irq_status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIS2);
/* invalid message ? */
if (irq_status == 0xffffffff)
goto out;
/* SDW message ? */
if (irq_status & HDA_DSP_REG_ADSPIS2_SNDW)
ret = true;
out:
return ret;
}
static bool hda_dsp_check_sdw_irq(struct snd_sof_dev *sdev)
{
u32 interface_mask = hda_get_interface_mask(sdev);
const struct sof_intel_dsp_desc *chip;
if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH)))
return false;
chip = get_chip_info(sdev->pdata);
if (chip && chip->check_sdw_irq)
return chip->check_sdw_irq(sdev);
return false;
}
static irqreturn_t hda_dsp_sdw_thread(int irq, void *context)
{
return sdw_intel_thread(irq, context);
}
bool hda_sdw_check_wakeen_irq_common(struct snd_sof_dev *sdev)
{
struct sof_intel_hda_dev *hdev;
hdev = sdev->pdata->hw_pdata;
if (hdev->sdw &&
snd_sof_dsp_read(sdev, HDA_DSP_BAR,
hdev->desc->sdw_shim_base + SDW_SHIM_WAKESTS))
return true;
return false;
}
static bool hda_sdw_check_wakeen_irq(struct snd_sof_dev *sdev)
{
u32 interface_mask = hda_get_interface_mask(sdev);
const struct sof_intel_dsp_desc *chip;
if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH)))
return false;
chip = get_chip_info(sdev->pdata);
if (chip && chip->check_sdw_wakeen_irq)
return chip->check_sdw_wakeen_irq(sdev);
return false;
}
void hda_sdw_process_wakeen(struct snd_sof_dev *sdev)
{
u32 interface_mask = hda_get_interface_mask(sdev);
struct sof_intel_hda_dev *hdev;
if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH)))
return;
hdev = sdev->pdata->hw_pdata;
if (!hdev->sdw)
return;
sdw_intel_process_wakeen_event(hdev->sdw);
}
#else /* IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE) */
static inline int hda_sdw_acpi_scan(struct snd_sof_dev *sdev)
{
return 0;
}
static inline int hda_sdw_probe(struct snd_sof_dev *sdev)
{
return 0;
}
static inline int hda_sdw_exit(struct snd_sof_dev *sdev)
{
return 0;
}
static inline bool hda_dsp_check_sdw_irq(struct snd_sof_dev *sdev)
{
return false;
}
static inline irqreturn_t hda_dsp_sdw_thread(int irq, void *context)
{
return IRQ_HANDLED;
}
static inline bool hda_sdw_check_wakeen_irq(struct snd_sof_dev *sdev)
{
return false;
}
#endif /* IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE) */
/*
* Debug
*/
struct hda_dsp_msg_code {
u32 code;
const char *text;
};
#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
static bool hda_use_msi = true;
module_param_named(use_msi, hda_use_msi, bool, 0444);
MODULE_PARM_DESC(use_msi, "SOF HDA use PCI MSI mode");
#else
#define hda_use_msi (1)
#endif
int sof_hda_position_quirk = SOF_HDA_POSITION_QUIRK_USE_DPIB_REGISTERS;
module_param_named(position_quirk, sof_hda_position_quirk, int, 0444);
MODULE_PARM_DESC(position_quirk, "SOF HDaudio position quirk");
static char *hda_model;
module_param(hda_model, charp, 0444);
MODULE_PARM_DESC(hda_model, "Use the given HDA board model.");
static int dmic_num_override = -1;
module_param_named(dmic_num, dmic_num_override, int, 0444);
MODULE_PARM_DESC(dmic_num, "SOF HDA DMIC number");
static int mclk_id_override = -1;
module_param_named(mclk_id, mclk_id_override, int, 0444);
MODULE_PARM_DESC(mclk_id, "SOF SSP mclk_id");
static const struct hda_dsp_msg_code hda_dsp_rom_fw_error_texts[] = {
{HDA_DSP_ROM_CSE_ERROR, "error: cse error"},
{HDA_DSP_ROM_CSE_WRONG_RESPONSE, "error: cse wrong response"},
{HDA_DSP_ROM_IMR_TO_SMALL, "error: IMR too small"},
{HDA_DSP_ROM_BASE_FW_NOT_FOUND, "error: base fw not found"},
{HDA_DSP_ROM_CSE_VALIDATION_FAILED, "error: signature verification failed"},
{HDA_DSP_ROM_IPC_FATAL_ERROR, "error: ipc fatal error"},
{HDA_DSP_ROM_L2_CACHE_ERROR, "error: L2 cache error"},
{HDA_DSP_ROM_LOAD_OFFSET_TO_SMALL, "error: load offset too small"},
{HDA_DSP_ROM_API_PTR_INVALID, "error: API ptr invalid"},
{HDA_DSP_ROM_BASEFW_INCOMPAT, "error: base fw incompatible"},
{HDA_DSP_ROM_UNHANDLED_INTERRUPT, "error: unhandled interrupt"},
{HDA_DSP_ROM_MEMORY_HOLE_ECC, "error: ECC memory hole"},
{HDA_DSP_ROM_KERNEL_EXCEPTION, "error: kernel exception"},
{HDA_DSP_ROM_USER_EXCEPTION, "error: user exception"},
{HDA_DSP_ROM_UNEXPECTED_RESET, "error: unexpected reset"},
{HDA_DSP_ROM_NULL_FW_ENTRY, "error: null FW entry point"},
};
#define FSR_ROM_STATE_ENTRY(state) {FSR_STATE_ROM_##state, #state}
static const struct hda_dsp_msg_code fsr_rom_state_names[] = {
FSR_ROM_STATE_ENTRY(INIT),
FSR_ROM_STATE_ENTRY(INIT_DONE),
FSR_ROM_STATE_ENTRY(CSE_MANIFEST_LOADED),
FSR_ROM_STATE_ENTRY(FW_MANIFEST_LOADED),
FSR_ROM_STATE_ENTRY(FW_FW_LOADED),
FSR_ROM_STATE_ENTRY(FW_ENTERED),
FSR_ROM_STATE_ENTRY(VERIFY_FEATURE_MASK),
FSR_ROM_STATE_ENTRY(GET_LOAD_OFFSET),
FSR_ROM_STATE_ENTRY(FETCH_ROM_EXT),
FSR_ROM_STATE_ENTRY(FETCH_ROM_EXT_DONE),
/* CSE states */
FSR_ROM_STATE_ENTRY(CSE_IMR_REQUEST),
FSR_ROM_STATE_ENTRY(CSE_IMR_GRANTED),
FSR_ROM_STATE_ENTRY(CSE_VALIDATE_IMAGE_REQUEST),
FSR_ROM_STATE_ENTRY(CSE_IMAGE_VALIDATED),
FSR_ROM_STATE_ENTRY(CSE_IPC_IFACE_INIT),
FSR_ROM_STATE_ENTRY(CSE_IPC_RESET_PHASE_1),
FSR_ROM_STATE_ENTRY(CSE_IPC_OPERATIONAL_ENTRY),
FSR_ROM_STATE_ENTRY(CSE_IPC_OPERATIONAL),
FSR_ROM_STATE_ENTRY(CSE_IPC_DOWN),
};
#define FSR_BRINGUP_STATE_ENTRY(state) {FSR_STATE_BRINGUP_##state, #state}
static const struct hda_dsp_msg_code fsr_bringup_state_names[] = {
FSR_BRINGUP_STATE_ENTRY(INIT),
FSR_BRINGUP_STATE_ENTRY(INIT_DONE),
FSR_BRINGUP_STATE_ENTRY(HPSRAM_LOAD),
FSR_BRINGUP_STATE_ENTRY(UNPACK_START),
FSR_BRINGUP_STATE_ENTRY(IMR_RESTORE),
FSR_BRINGUP_STATE_ENTRY(FW_ENTERED),
};
#define FSR_WAIT_STATE_ENTRY(state) {FSR_WAIT_FOR_##state, #state}
static const struct hda_dsp_msg_code fsr_wait_state_names[] = {
FSR_WAIT_STATE_ENTRY(IPC_BUSY),
FSR_WAIT_STATE_ENTRY(IPC_DONE),
FSR_WAIT_STATE_ENTRY(CACHE_INVALIDATION),
FSR_WAIT_STATE_ENTRY(LP_SRAM_OFF),
FSR_WAIT_STATE_ENTRY(DMA_BUFFER_FULL),
FSR_WAIT_STATE_ENTRY(CSE_CSR),
};
#define FSR_MODULE_NAME_ENTRY(mod) [FSR_MOD_##mod] = #mod
static const char * const fsr_module_names[] = {
FSR_MODULE_NAME_ENTRY(ROM),
FSR_MODULE_NAME_ENTRY(ROM_BYP),
FSR_MODULE_NAME_ENTRY(BASE_FW),
FSR_MODULE_NAME_ENTRY(LP_BOOT),
FSR_MODULE_NAME_ENTRY(BRNGUP),
FSR_MODULE_NAME_ENTRY(ROM_EXT),
};
static const char *
hda_dsp_get_state_text(u32 code, const struct hda_dsp_msg_code *msg_code,
size_t array_size)
{
int i;
for (i = 0; i < array_size; i++) {
if (code == msg_code[i].code)
return msg_code[i].text;
}
return NULL;
}
static void hda_dsp_get_state(struct snd_sof_dev *sdev, const char *level)
{
const struct sof_intel_dsp_desc *chip = get_chip_info(sdev->pdata);
const char *state_text, *error_text, *module_text;
u32 fsr, state, wait_state, module, error_code;
fsr = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg);
state = FSR_TO_STATE_CODE(fsr);
wait_state = FSR_TO_WAIT_STATE_CODE(fsr);
module = FSR_TO_MODULE_CODE(fsr);
if (module > FSR_MOD_ROM_EXT)
module_text = "unknown";
else
module_text = fsr_module_names[module];
if (module == FSR_MOD_BRNGUP)
state_text = hda_dsp_get_state_text(state, fsr_bringup_state_names,
ARRAY_SIZE(fsr_bringup_state_names));
else
state_text = hda_dsp_get_state_text(state, fsr_rom_state_names,
ARRAY_SIZE(fsr_rom_state_names));
/* not for us, must be generic sof message */
if (!state_text) {
dev_printk(level, sdev->dev, "%#010x: unknown ROM status value\n", fsr);
return;
}
if (wait_state) {
const char *wait_state_text;
wait_state_text = hda_dsp_get_state_text(wait_state, fsr_wait_state_names,
ARRAY_SIZE(fsr_wait_state_names));
if (!wait_state_text)
wait_state_text = "unknown";
dev_printk(level, sdev->dev,
"%#010x: module: %s, state: %s, waiting for: %s, %s\n",
fsr, module_text, state_text, wait_state_text,
fsr & FSR_HALTED ? "not running" : "running");
} else {
dev_printk(level, sdev->dev, "%#010x: module: %s, state: %s, %s\n",
fsr, module_text, state_text,
fsr & FSR_HALTED ? "not running" : "running");
}
error_code = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg + 4);
if (!error_code)
return;
error_text = hda_dsp_get_state_text(error_code, hda_dsp_rom_fw_error_texts,
ARRAY_SIZE(hda_dsp_rom_fw_error_texts));
if (!error_text)
error_text = "unknown";
if (state == FSR_STATE_FW_ENTERED)
dev_printk(level, sdev->dev, "status code: %#x (%s)\n", error_code,
error_text);
else
dev_printk(level, sdev->dev, "error code: %#x (%s)\n", error_code,
error_text);
}
static void hda_dsp_get_registers(struct snd_sof_dev *sdev,
struct sof_ipc_dsp_oops_xtensa *xoops,
struct sof_ipc_panic_info *panic_info,
u32 *stack, size_t stack_words)
{
u32 offset = sdev->dsp_oops_offset;
/* first read registers */
sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
/* note: variable AR register array is not read */
/* then get panic info */
if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
xoops->arch_hdr.totalsize);
return;
}
offset += xoops->arch_hdr.totalsize;
sof_block_read(sdev, sdev->mmio_bar, offset,
panic_info, sizeof(*panic_info));
/* then get the stack */
offset += sizeof(*panic_info);
sof_block_read(sdev, sdev->mmio_bar, offset, stack,
stack_words * sizeof(u32));
}
/* dump the first 8 dwords representing the extended ROM status */
static void hda_dsp_dump_ext_rom_status(struct snd_sof_dev *sdev, const char *level,
u32 flags)
{
const struct sof_intel_dsp_desc *chip;
char msg[128];
int len = 0;
u32 value;
int i;
chip = get_chip_info(sdev->pdata);
for (i = 0; i < HDA_EXT_ROM_STATUS_SIZE; i++) {
value = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->rom_status_reg + i * 0x4);
len += scnprintf(msg + len, sizeof(msg) - len, " 0x%x", value);
}
dev_printk(level, sdev->dev, "extended rom status: %s", msg);
}
void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags)
{
char *level = (flags & SOF_DBG_DUMP_OPTIONAL) ? KERN_DEBUG : KERN_ERR;
struct sof_ipc_dsp_oops_xtensa xoops;
struct sof_ipc_panic_info panic_info;
u32 stack[HDA_DSP_STACK_DUMP_SIZE];
/* print ROM/FW status */
hda_dsp_get_state(sdev, level);
/* The firmware register dump only available with IPC3 */
if (flags & SOF_DBG_DUMP_REGS && sdev->pdata->ipc_type == SOF_IPC) {
u32 status = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_STATUS);
u32 panic = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_SRAM_REG_FW_TRACEP);
hda_dsp_get_registers(sdev, &xoops, &panic_info, stack,
HDA_DSP_STACK_DUMP_SIZE);
sof_print_oops_and_stack(sdev, level, status, panic, &xoops,
&panic_info, stack, HDA_DSP_STACK_DUMP_SIZE);
} else {
hda_dsp_dump_ext_rom_status(sdev, level, flags);
}
}
static bool hda_check_ipc_irq(struct snd_sof_dev *sdev)
{
const struct sof_intel_dsp_desc *chip;
chip = get_chip_info(sdev->pdata);
if (chip && chip->check_ipc_irq)
return chip->check_ipc_irq(sdev);
return false;
}
void hda_ipc_irq_dump(struct snd_sof_dev *sdev)
{
u32 adspis;
u32 intsts;
u32 intctl;
u32 ppsts;
u8 rirbsts;
/* read key IRQ stats and config registers */
adspis = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIS);
intsts = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS);
intctl = snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL);
ppsts = snd_sof_dsp_read(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPSTS);
rirbsts = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, AZX_REG_RIRBSTS);
dev_err(sdev->dev, "hda irq intsts 0x%8.8x intlctl 0x%8.8x rirb %2.2x\n",
intsts, intctl, rirbsts);
dev_err(sdev->dev, "dsp irq ppsts 0x%8.8x adspis 0x%8.8x\n", ppsts, adspis);
}
void hda_ipc_dump(struct snd_sof_dev *sdev)
{
u32 hipcie;
u32 hipct;
u32 hipcctl;
hda_ipc_irq_dump(sdev);
/* read IPC status */
hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCIE);
hipct = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCT);
hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCCTL);
/* dump the IPC regs */
/* TODO: parse the raw msg */
dev_err(sdev->dev, "host status 0x%8.8x dsp status 0x%8.8x mask 0x%8.8x\n",
hipcie, hipct, hipcctl);
}
void hda_ipc4_dump(struct snd_sof_dev *sdev)
{
u32 hipci, hipcie, hipct, hipcte, hipcctl;
hda_ipc_irq_dump(sdev);
hipci = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCI);
hipcie = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCIE);
hipct = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCT);
hipcte = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCTE);
hipcctl = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_HIPCCTL);
/* dump the IPC regs */
/* TODO: parse the raw msg */
dev_err(sdev->dev, "Host IPC initiator: %#x|%#x, target: %#x|%#x, ctl: %#x\n",
hipci, hipcie, hipct, hipcte, hipcctl);
}
bool hda_ipc4_tx_is_busy(struct snd_sof_dev *sdev)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
const struct sof_intel_dsp_desc *chip = hda->desc;
u32 val;
val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, chip->ipc_req);
return !!(val & chip->ipc_req_mask);
}
static int hda_init(struct snd_sof_dev *sdev)
{
struct hda_bus *hbus;
struct hdac_bus *bus;
struct pci_dev *pci = to_pci_dev(sdev->dev);
int ret;
hbus = sof_to_hbus(sdev);
bus = sof_to_bus(sdev);
/* HDA bus init */
sof_hda_bus_init(sdev, &pci->dev);
if (sof_hda_position_quirk == SOF_HDA_POSITION_QUIRK_USE_DPIB_REGISTERS)
bus->use_posbuf = 0;
else
bus->use_posbuf = 1;
bus->bdl_pos_adj = 0;
bus->sync_write = 1;
mutex_init(&hbus->prepare_mutex);
hbus->pci = pci;
hbus->mixer_assigned = -1;
hbus->modelname = hda_model;
/* initialise hdac bus */
bus->addr = pci_resource_start(pci, 0);
bus->remap_addr = pci_ioremap_bar(pci, 0);
if (!bus->remap_addr) {
dev_err(bus->dev, "error: ioremap error\n");
return -ENXIO;
}
/* HDA base */
sdev->bar[HDA_DSP_HDA_BAR] = bus->remap_addr;
/* init i915 and HDMI codecs */
ret = hda_codec_i915_init(sdev);
if (ret < 0)
dev_warn(sdev->dev, "init of i915 and HDMI codec failed\n");
/* get controller capabilities */
ret = hda_dsp_ctrl_get_caps(sdev);
if (ret < 0)
dev_err(sdev->dev, "error: get caps error\n");
return ret;
}
static int check_dmic_num(struct snd_sof_dev *sdev)
{
struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
struct nhlt_acpi_table *nhlt;
int dmic_num = 0;
nhlt = hdev->nhlt;
if (nhlt)
dmic_num = intel_nhlt_get_dmic_geo(sdev->dev, nhlt);
/* allow for module parameter override */
if (dmic_num_override != -1) {
dev_dbg(sdev->dev,
"overriding DMICs detected in NHLT tables %d by kernel param %d\n",
dmic_num, dmic_num_override);
dmic_num = dmic_num_override;
}
if (dmic_num < 0 || dmic_num > 4) {
dev_dbg(sdev->dev, "invalid dmic_number %d\n", dmic_num);
dmic_num = 0;
}
return dmic_num;
}
static int check_nhlt_ssp_mask(struct snd_sof_dev *sdev)
{
struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
struct nhlt_acpi_table *nhlt;
int ssp_mask = 0;
nhlt = hdev->nhlt;
if (!nhlt)
return ssp_mask;
if (intel_nhlt_has_endpoint_type(nhlt, NHLT_LINK_SSP)) {
ssp_mask = intel_nhlt_ssp_endpoint_mask(nhlt, NHLT_DEVICE_I2S);
if (ssp_mask)
dev_info(sdev->dev, "NHLT_DEVICE_I2S detected, ssp_mask %#x\n", ssp_mask);
}
return ssp_mask;
}
static int check_nhlt_ssp_mclk_mask(struct snd_sof_dev *sdev, int ssp_num)
{
struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
struct nhlt_acpi_table *nhlt;
nhlt = hdev->nhlt;
if (!nhlt)
return 0;
return intel_nhlt_ssp_mclk_mask(nhlt, ssp_num);
}
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC) || IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
static const char *fixup_tplg_name(struct snd_sof_dev *sdev,
const char *sof_tplg_filename,
const char *idisp_str,
const char *dmic_str)
{
const char *tplg_filename = NULL;
char *filename, *tmp;
const char *split_ext;
filename = kstrdup(sof_tplg_filename, GFP_KERNEL);
if (!filename)
return NULL;
/* this assumes a .tplg extension */
tmp = filename;
split_ext = strsep(&tmp, ".");
if (split_ext)
tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
"%s%s%s.tplg",
split_ext, idisp_str, dmic_str);
kfree(filename);
return tplg_filename;
}
static int dmic_detect_topology_fixup(struct snd_sof_dev *sdev,
const char **tplg_filename,
const char *idisp_str,
int *dmic_found,
bool tplg_fixup)
{
const char *dmic_str;
int dmic_num;
/* first check for DMICs (using NHLT or module parameter) */
dmic_num = check_dmic_num(sdev);
switch (dmic_num) {
case 1:
dmic_str = "-1ch";
break;
case 2:
dmic_str = "-2ch";
break;
case 3:
dmic_str = "-3ch";
break;
case 4:
dmic_str = "-4ch";
break;
default:
dmic_num = 0;
dmic_str = "";
break;
}
if (tplg_fixup) {
const char *default_tplg_filename = *tplg_filename;
const char *fixed_tplg_filename;
fixed_tplg_filename = fixup_tplg_name(sdev, default_tplg_filename,
idisp_str, dmic_str);
if (!fixed_tplg_filename)
return -ENOMEM;
*tplg_filename = fixed_tplg_filename;
}
dev_info(sdev->dev, "DMICs detected in NHLT tables: %d\n", dmic_num);
*dmic_found = dmic_num;
return 0;
}
#endif
static int hda_init_caps(struct snd_sof_dev *sdev)
{
u32 interface_mask = hda_get_interface_mask(sdev);
struct hdac_bus *bus = sof_to_bus(sdev);
struct snd_sof_pdata *pdata = sdev->pdata;
struct sof_intel_hda_dev *hdev = pdata->hw_pdata;
u32 link_mask;
int ret = 0;
/* check if dsp is there */
if (bus->ppcap)
dev_dbg(sdev->dev, "PP capability, will probe DSP later.\n");
/* Init HDA controller after i915 init */
ret = hda_dsp_ctrl_init_chip(sdev);
if (ret < 0) {
dev_err(bus->dev, "error: init chip failed with ret: %d\n",
ret);
return ret;
}
hda_bus_ml_init(bus);
/* Skip SoundWire if it is not supported */
if (!(interface_mask & BIT(SOF_DAI_INTEL_ALH)))
goto skip_soundwire;
/* scan SoundWire capabilities exposed by DSDT */
ret = hda_sdw_acpi_scan(sdev);
if (ret < 0) {
dev_dbg(sdev->dev, "skipping SoundWire, not detected with ACPI scan\n");
goto skip_soundwire;
}
link_mask = hdev->info.link_mask;
if (!link_mask) {
dev_dbg(sdev->dev, "skipping SoundWire, no links enabled\n");
goto skip_soundwire;
}
/*
* probe/allocate SoundWire resources.
* The hardware configuration takes place in hda_sdw_startup
* after power rails are enabled.
* It's entirely possible to have a mix of I2S/DMIC/SoundWire
* devices, so we allocate the resources in all cases.
*/
ret = hda_sdw_probe(sdev);
if (ret < 0) {
dev_err(sdev->dev, "error: SoundWire probe error\n");
return ret;
}
skip_soundwire:
/* create codec instances */
hda_codec_probe_bus(sdev);
if (!HDA_IDISP_CODEC(bus->codec_mask))
hda_codec_i915_display_power(sdev, false);
hda_bus_ml_put_all(bus);
return 0;
}
static irqreturn_t hda_dsp_interrupt_handler(int irq, void *context)
{
struct snd_sof_dev *sdev = context;
/*
* Get global interrupt status. It includes all hardware interrupt
* sources in the Intel HD Audio controller.
*/
if (snd_sof_dsp_read(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTSTS) &
SOF_HDA_INTSTS_GIS) {
/* disable GIE interrupt */
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
SOF_HDA_INTCTL,
SOF_HDA_INT_GLOBAL_EN,
0);
return IRQ_WAKE_THREAD;
}
return IRQ_NONE;
}
static irqreturn_t hda_dsp_interrupt_thread(int irq, void *context)
{
struct snd_sof_dev *sdev = context;
struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
/* deal with streams and controller first */
if (hda_dsp_check_stream_irq(sdev)) {
trace_sof_intel_hda_irq(sdev, "stream");
hda_dsp_stream_threaded_handler(irq, sdev);
}
if (hda_check_ipc_irq(sdev)) {
trace_sof_intel_hda_irq(sdev, "ipc");
sof_ops(sdev)->irq_thread(irq, sdev);
}
if (hda_dsp_check_sdw_irq(sdev)) {
trace_sof_intel_hda_irq(sdev, "sdw");
hda_dsp_sdw_thread(irq, hdev->sdw);
}
if (hda_sdw_check_wakeen_irq(sdev)) {
trace_sof_intel_hda_irq(sdev, "wakeen");
hda_sdw_process_wakeen(sdev);
}
hda_codec_check_for_state_change(sdev);
/* enable GIE interrupt */
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
SOF_HDA_INTCTL,
SOF_HDA_INT_GLOBAL_EN,
SOF_HDA_INT_GLOBAL_EN);
return IRQ_HANDLED;
}
int hda_dsp_probe(struct snd_sof_dev *sdev)
{
struct pci_dev *pci = to_pci_dev(sdev->dev);
struct sof_intel_hda_dev *hdev;
struct hdac_bus *bus;
const struct sof_intel_dsp_desc *chip;
int ret = 0;
if (!sdev->dspless_mode_selected) {
/*
* detect DSP by checking class/subclass/prog-id information
* class=04 subclass 03 prog-if 00: no DSP, legacy driver is required
* class=04 subclass 01 prog-if 00: DSP is present
* (and may be required e.g. for DMIC or SSP support)
* class=04 subclass 03 prog-if 80: either of DSP or legacy mode works
*/
if (pci->class == 0x040300) {
dev_err(sdev->dev, "the DSP is not enabled on this platform, aborting probe\n");
return -ENODEV;
} else if (pci->class != 0x040100 && pci->class != 0x040380) {
dev_err(sdev->dev, "unknown PCI class/subclass/prog-if 0x%06x found, aborting probe\n",
pci->class);
return -ENODEV;
}
dev_info(sdev->dev, "DSP detected with PCI class/subclass/prog-if 0x%06x\n",
pci->class);
}
chip = get_chip_info(sdev->pdata);
if (!chip) {
dev_err(sdev->dev, "error: no such device supported, chip id:%x\n",
pci->device);
ret = -EIO;
goto err;
}
sdev->num_cores = chip->cores_num;
hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL);
if (!hdev)
return -ENOMEM;
sdev->pdata->hw_pdata = hdev;
hdev->desc = chip;
hdev->dmic_dev = platform_device_register_data(sdev->dev, "dmic-codec",
PLATFORM_DEVID_NONE,
NULL, 0);
if (IS_ERR(hdev->dmic_dev)) {
dev_err(sdev->dev, "error: failed to create DMIC device\n");
return PTR_ERR(hdev->dmic_dev);
}
/*
* use position update IPC if either it is forced
* or we don't have other choice
*/
#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_FORCE_IPC_POSITION)
hdev->no_ipc_position = 0;
#else
hdev->no_ipc_position = sof_ops(sdev)->pcm_pointer ? 1 : 0;
#endif
if (sdev->dspless_mode_selected)
hdev->no_ipc_position = 1;
/* set up HDA base */
bus = sof_to_bus(sdev);
ret = hda_init(sdev);
if (ret < 0)
goto hdac_bus_unmap;
if (sdev->dspless_mode_selected)
goto skip_dsp_setup;
/* DSP base */
sdev->bar[HDA_DSP_BAR] = pci_ioremap_bar(pci, HDA_DSP_BAR);
if (!sdev->bar[HDA_DSP_BAR]) {
dev_err(sdev->dev, "error: ioremap error\n");
ret = -ENXIO;
goto hdac_bus_unmap;
}
sdev->mmio_bar = HDA_DSP_BAR;
sdev->mailbox_bar = HDA_DSP_BAR;
skip_dsp_setup:
/* allow 64bit DMA address if supported by H/W */
if (dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(64))) {
dev_dbg(sdev->dev, "DMA mask is 32 bit\n");
dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(32));
}
dma_set_max_seg_size(&pci->dev, UINT_MAX);
/* init streams */
ret = hda_dsp_stream_init(sdev);
if (ret < 0) {
dev_err(sdev->dev, "error: failed to init streams\n");
/*
* not all errors are due to memory issues, but trying
* to free everything does not harm
*/
goto free_streams;
}
/*
* register our IRQ
* let's try to enable msi firstly
* if it fails, use legacy interrupt mode
* TODO: support msi multiple vectors
*/
if (hda_use_msi && pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI) > 0) {
dev_info(sdev->dev, "use msi interrupt mode\n");
sdev->ipc_irq = pci_irq_vector(pci, 0);
/* initialised to "false" by kzalloc() */
sdev->msi_enabled = true;
}
if (!sdev->msi_enabled) {
dev_info(sdev->dev, "use legacy interrupt mode\n");
/*
* in IO-APIC mode, hda->irq and ipc_irq are using the same
* irq number of pci->irq
*/
sdev->ipc_irq = pci->irq;
}
dev_dbg(sdev->dev, "using IPC IRQ %d\n", sdev->ipc_irq);
ret = request_threaded_irq(sdev->ipc_irq, hda_dsp_interrupt_handler,
hda_dsp_interrupt_thread,
IRQF_SHARED, "AudioDSP", sdev);
if (ret < 0) {
dev_err(sdev->dev, "error: failed to register IPC IRQ %d\n",
sdev->ipc_irq);
goto free_irq_vector;
}
pci_set_master(pci);
synchronize_irq(pci->irq);
/*
* clear TCSEL to clear playback on some HD Audio
* codecs. PCI TCSEL is defined in the Intel manuals.
*/
snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0);
/* init HDA capabilities */
ret = hda_init_caps(sdev);
if (ret < 0)
goto free_ipc_irq;
if (!sdev->dspless_mode_selected) {
/* enable ppcap interrupt */
hda_dsp_ctrl_ppcap_enable(sdev, true);
hda_dsp_ctrl_ppcap_int_enable(sdev, true);
/* set default mailbox offset for FW ready message */
sdev->dsp_box.offset = HDA_DSP_MBOX_UPLINK_OFFSET;
INIT_DELAYED_WORK(&hdev->d0i3_work, hda_dsp_d0i3_work);
}
init_waitqueue_head(&hdev->waitq);
hdev->nhlt = intel_nhlt_init(sdev->dev);
return 0;
free_ipc_irq:
free_irq(sdev->ipc_irq, sdev);
free_irq_vector:
if (sdev->msi_enabled)
pci_free_irq_vectors(pci);
free_streams:
hda_dsp_stream_free(sdev);
/* dsp_unmap: not currently used */
if (!sdev->dspless_mode_selected)
iounmap(sdev->bar[HDA_DSP_BAR]);
hdac_bus_unmap:
platform_device_unregister(hdev->dmic_dev);
iounmap(bus->remap_addr);
hda_codec_i915_exit(sdev);
err:
return ret;
}
int hda_dsp_remove(struct snd_sof_dev *sdev)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
const struct sof_intel_dsp_desc *chip = hda->desc;
struct hdac_bus *bus = sof_to_bus(sdev);
struct pci_dev *pci = to_pci_dev(sdev->dev);
struct nhlt_acpi_table *nhlt = hda->nhlt;
if (nhlt)
intel_nhlt_free(nhlt);
if (!sdev->dspless_mode_selected)
/* cancel any attempt for DSP D0I3 */
cancel_delayed_work_sync(&hda->d0i3_work);
hda_codec_device_remove(sdev);
hda_sdw_exit(sdev);
if (!IS_ERR_OR_NULL(hda->dmic_dev))
platform_device_unregister(hda->dmic_dev);
if (!sdev->dspless_mode_selected) {
/* disable DSP IRQ */
snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
SOF_HDA_PPCTL_PIE, 0);
}
/* disable CIE and GIE interrupts */
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL,
SOF_HDA_INT_CTRL_EN | SOF_HDA_INT_GLOBAL_EN, 0);
if (sdev->dspless_mode_selected)
goto skip_disable_dsp;
/* no need to check for error as the DSP will be disabled anyway */
if (chip && chip->power_down_dsp)
chip->power_down_dsp(sdev);
/* disable DSP */
snd_sof_dsp_update_bits(sdev, HDA_DSP_PP_BAR, SOF_HDA_REG_PP_PPCTL,
SOF_HDA_PPCTL_GPROCEN, 0);
skip_disable_dsp:
free_irq(sdev->ipc_irq, sdev);
if (sdev->msi_enabled)
pci_free_irq_vectors(pci);
hda_dsp_stream_free(sdev);
hda_bus_ml_free(sof_to_bus(sdev));
if (!sdev->dspless_mode_selected)
iounmap(sdev->bar[HDA_DSP_BAR]);
iounmap(bus->remap_addr);
sof_hda_bus_exit(sdev);
hda_codec_i915_exit(sdev);
return 0;
}
int hda_power_down_dsp(struct snd_sof_dev *sdev)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
const struct sof_intel_dsp_desc *chip = hda->desc;
return hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask);
}
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
static void hda_generic_machine_select(struct snd_sof_dev *sdev,
struct snd_soc_acpi_mach **mach)
{
struct hdac_bus *bus = sof_to_bus(sdev);
struct snd_soc_acpi_mach_params *mach_params;
struct snd_soc_acpi_mach *hda_mach;
struct snd_sof_pdata *pdata = sdev->pdata;
const char *tplg_filename;
const char *idisp_str;
int dmic_num = 0;
int codec_num = 0;
int ret;
int i;
/* codec detection */
if (!bus->codec_mask) {
dev_info(bus->dev, "no hda codecs found!\n");
} else {
dev_info(bus->dev, "hda codecs found, mask %lx\n",
bus->codec_mask);
for (i = 0; i < HDA_MAX_CODECS; i++) {
if (bus->codec_mask & (1 << i))
codec_num++;
}
/*
* If no machine driver is found, then:
*
* generic hda machine driver can handle:
* - one HDMI codec, and/or
* - one external HDAudio codec
*/
if (!*mach && codec_num <= 2) {
bool tplg_fixup;
hda_mach = snd_soc_acpi_intel_hda_machines;
dev_info(bus->dev, "using HDA machine driver %s now\n",
hda_mach->drv_name);
if (codec_num == 1 && HDA_IDISP_CODEC(bus->codec_mask))
idisp_str = "-idisp";
else
idisp_str = "";
/* topology: use the info from hda_machines */
if (pdata->tplg_filename) {
tplg_fixup = false;
tplg_filename = pdata->tplg_filename;
} else {
tplg_fixup = true;
tplg_filename = hda_mach->sof_tplg_filename;
}
ret = dmic_detect_topology_fixup(sdev, &tplg_filename, idisp_str, &dmic_num,
tplg_fixup);
if (ret < 0)
return;
hda_mach->mach_params.dmic_num = dmic_num;
pdata->tplg_filename = tplg_filename;
if (codec_num == 2 ||
(codec_num == 1 && !HDA_IDISP_CODEC(bus->codec_mask))) {
/*
* Prevent SoundWire links from starting when an external
* HDaudio codec is used
*/
hda_mach->mach_params.link_mask = 0;
} else {
/*
* Allow SoundWire links to start when no external HDaudio codec
* was detected. This will not create a SoundWire card but
* will help detect if any SoundWire codec reports as ATTACHED.
*/
struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
hda_mach->mach_params.link_mask = hdev->info.link_mask;
}
*mach = hda_mach;
}
}
/* used by hda machine driver to create dai links */
if (*mach) {
mach_params = &(*mach)->mach_params;
mach_params->codec_mask = bus->codec_mask;
mach_params->common_hdmi_codec_drv = true;
}
}
#else
static void hda_generic_machine_select(struct snd_sof_dev *sdev,
struct snd_soc_acpi_mach **mach)
{
}
#endif
#if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
static struct snd_soc_acpi_mach *hda_sdw_machine_select(struct snd_sof_dev *sdev)
{
struct snd_sof_pdata *pdata = sdev->pdata;
const struct snd_soc_acpi_link_adr *link;
struct snd_soc_acpi_mach *mach;
struct sof_intel_hda_dev *hdev;
u32 link_mask;
int i;
hdev = pdata->hw_pdata;
link_mask = hdev->info.link_mask;
/*
* Select SoundWire machine driver if needed using the
* alternate tables. This case deals with SoundWire-only
* machines, for mixed cases with I2C/I2S the detection relies
* on the HID list.
*/
if (link_mask) {
for (mach = pdata->desc->alt_machines;
mach && mach->link_mask; mach++) {
/*
* On some platforms such as Up Extreme all links
* are enabled but only one link can be used by
* external codec. Instead of exact match of two masks,
* first check whether link_mask of mach is subset of
* link_mask supported by hw and then go on searching
* link_adr
*/
if (~link_mask & mach->link_mask)
continue;
/* No need to match adr if there is no links defined */
if (!mach->links)
break;
link = mach->links;
for (i = 0; i < hdev->info.count && link->num_adr;
i++, link++) {
/*
* Try next machine if any expected Slaves
* are not found on this link.
*/
if (!snd_soc_acpi_sdw_link_slaves_found(sdev->dev, link,
hdev->sdw->ids,
hdev->sdw->num_slaves))
break;
}
/* Found if all Slaves are checked */
if (i == hdev->info.count || !link->num_adr)
break;
}
if (mach && mach->link_mask) {
int dmic_num = 0;
bool tplg_fixup;
const char *tplg_filename;
mach->mach_params.links = mach->links;
mach->mach_params.link_mask = mach->link_mask;
mach->mach_params.platform = dev_name(sdev->dev);
if (pdata->tplg_filename) {
tplg_fixup = false;
} else {
tplg_fixup = true;
tplg_filename = mach->sof_tplg_filename;
}
/*
* DMICs use up to 4 pins and are typically pin-muxed with SoundWire
* link 2 and 3, or link 1 and 2, thus we only try to enable dmics
* if all conditions are true:
* a) 2 or fewer links are used by SoundWire
* b) the NHLT table reports the presence of microphones
*/
if (hweight_long(mach->link_mask) <= 2) {
int ret;
ret = dmic_detect_topology_fixup(sdev, &tplg_filename, "",
&dmic_num, tplg_fixup);
if (ret < 0)
return NULL;
}
if (tplg_fixup)
pdata->tplg_filename = tplg_filename;
mach->mach_params.dmic_num = dmic_num;
dev_dbg(sdev->dev,
"SoundWire machine driver %s topology %s\n",
mach->drv_name,
pdata->tplg_filename);
return mach;
}
dev_info(sdev->dev, "No SoundWire machine driver found\n");
}
return NULL;
}
#else
static struct snd_soc_acpi_mach *hda_sdw_machine_select(struct snd_sof_dev *sdev)
{
return NULL;
}
#endif
void hda_set_mach_params(struct snd_soc_acpi_mach *mach,
struct snd_sof_dev *sdev)
{
struct snd_sof_pdata *pdata = sdev->pdata;
const struct sof_dev_desc *desc = pdata->desc;
struct snd_soc_acpi_mach_params *mach_params;
mach_params = &mach->mach_params;
mach_params->platform = dev_name(sdev->dev);
if (IS_ENABLED(CONFIG_SND_SOC_SOF_NOCODEC_DEBUG_SUPPORT) &&
sof_debug_check_flag(SOF_DBG_FORCE_NOCODEC))
mach_params->num_dai_drivers = SOF_SKL_NUM_DAIS_NOCODEC;
else
mach_params->num_dai_drivers = desc->ops->num_drv;
mach_params->dai_drivers = desc->ops->drv;
}
struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev)
{
u32 interface_mask = hda_get_interface_mask(sdev);
struct snd_sof_pdata *sof_pdata = sdev->pdata;
const struct sof_dev_desc *desc = sof_pdata->desc;
struct snd_soc_acpi_mach *mach = NULL;
const char *tplg_filename;
/* Try I2S or DMIC if it is supported */
if (interface_mask & (BIT(SOF_DAI_INTEL_SSP) | BIT(SOF_DAI_INTEL_DMIC)))
mach = snd_soc_acpi_find_machine(desc->machines);
if (mach) {
bool add_extension = false;
bool tplg_fixup = false;
/*
* If tplg file name is overridden, use it instead of
* the one set in mach table
*/
if (!sof_pdata->tplg_filename) {
sof_pdata->tplg_filename = mach->sof_tplg_filename;
tplg_fixup = true;
}
/* report to machine driver if any DMICs are found */
mach->mach_params.dmic_num = check_dmic_num(sdev);
if (tplg_fixup &&
mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER &&
mach->mach_params.dmic_num) {
tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
"%s%s%d%s",
sof_pdata->tplg_filename,
"-dmic",
mach->mach_params.dmic_num,
"ch");
if (!tplg_filename)
return NULL;
sof_pdata->tplg_filename = tplg_filename;
add_extension = true;
}
if (mach->link_mask) {
mach->mach_params.links = mach->links;
mach->mach_params.link_mask = mach->link_mask;
}
/* report SSP link mask to machine driver */
mach->mach_params.i2s_link_mask = check_nhlt_ssp_mask(sdev);
if (tplg_fixup &&
mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER &&
mach->mach_params.i2s_link_mask) {
const struct sof_intel_dsp_desc *chip = get_chip_info(sdev->pdata);
int ssp_num;
int mclk_mask;
if (hweight_long(mach->mach_params.i2s_link_mask) > 1 &&
!(mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_SSP_MSB))
dev_warn(sdev->dev, "More than one SSP exposed by NHLT, choosing MSB\n");
/* fls returns 1-based results, SSPs indices are 0-based */
ssp_num = fls(mach->mach_params.i2s_link_mask) - 1;
if (ssp_num >= chip->ssp_count) {
dev_err(sdev->dev, "Invalid SSP %d, max on this platform is %d\n",
ssp_num, chip->ssp_count);
return NULL;
}
tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
"%s%s%d",
sof_pdata->tplg_filename,
"-ssp",
ssp_num);
if (!tplg_filename)
return NULL;
sof_pdata->tplg_filename = tplg_filename;
add_extension = true;
mclk_mask = check_nhlt_ssp_mclk_mask(sdev, ssp_num);
if (mclk_mask < 0) {
dev_err(sdev->dev, "Invalid MCLK configuration\n");
return NULL;
}
dev_dbg(sdev->dev, "MCLK mask %#x found in NHLT\n", mclk_mask);
if (mclk_mask) {
dev_info(sdev->dev, "Overriding topology with MCLK mask %#x from NHLT\n", mclk_mask);
sdev->mclk_id_override = true;
sdev->mclk_id_quirk = (mclk_mask & BIT(0)) ? 0 : 1;
}
}
if (tplg_fixup && add_extension) {
tplg_filename = devm_kasprintf(sdev->dev, GFP_KERNEL,
"%s%s",
sof_pdata->tplg_filename,
".tplg");
if (!tplg_filename)
return NULL;
sof_pdata->tplg_filename = tplg_filename;
}
/* check if mclk_id should be modified from topology defaults */
if (mclk_id_override >= 0) {
dev_info(sdev->dev, "Overriding topology with MCLK %d from kernel_parameter\n", mclk_id_override);
sdev->mclk_id_override = true;
sdev->mclk_id_quirk = mclk_id_override;
}
}
/* If I2S fails, try SoundWire if it is supported */
if (!mach && (interface_mask & BIT(SOF_DAI_INTEL_ALH)))
mach = hda_sdw_machine_select(sdev);
/*
* Choose HDA generic machine driver if mach is NULL.
* Otherwise, set certain mach params.
*/
hda_generic_machine_select(sdev, &mach);
if (!mach)
dev_warn(sdev->dev, "warning: No matching ASoC machine driver found\n");
return mach;
}
int hda_pci_intel_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
{
int ret;
ret = snd_intel_dsp_driver_probe(pci);
if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
dev_dbg(&pci->dev, "SOF PCI driver not selected, aborting probe\n");
return -ENODEV;
}
return sof_pci_probe(pci, pci_id);
}
EXPORT_SYMBOL_NS(hda_pci_intel_probe, SND_SOC_SOF_INTEL_HDA_COMMON);
int hda_register_clients(struct snd_sof_dev *sdev)
{
return hda_probes_register(sdev);
}
void hda_unregister_clients(struct snd_sof_dev *sdev)
{
hda_probes_unregister(sdev);
}
MODULE_LICENSE("Dual BSD/GPL");
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
MODULE_IMPORT_NS(SND_SOC_SOF_HDA_AUDIO_CODEC);
MODULE_IMPORT_NS(SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_IMPORT_NS(SND_INTEL_SOUNDWIRE_ACPI);
MODULE_IMPORT_NS(SOUNDWIRE_INTEL_INIT);
MODULE_IMPORT_NS(SOUNDWIRE_INTEL);
MODULE_IMPORT_NS(SND_SOC_SOF_HDA_MLINK);
| linux-master | sound/soc/sof/intel/hda.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2018-2021 Intel Corporation. All rights reserved.
//
// Author: Liam Girdwood <[email protected]>
//
#include <linux/module.h>
#include <linux/pci.h>
#include <sound/soc-acpi.h>
#include <sound/soc-acpi-intel-match.h>
#include <sound/sof.h>
#include "../ops.h"
#include "../sof-pci-dev.h"
/* platform specific devices */
#include "hda.h"
static const struct sof_dev_desc icl_desc = {
.machines = snd_soc_acpi_intel_icl_machines,
.alt_machines = snd_soc_acpi_intel_icl_sdw_machines,
.use_acpi_target_states = true,
.resindex_lpe_base = 0,
.resindex_pcicfg_base = -1,
.resindex_imr_base = -1,
.irqindex_host_ipc = -1,
.chip_info = &icl_chip_info,
.ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
.ipc_default = SOF_IPC,
.dspless_mode_supported = true, /* Only supported for HDaudio */
.default_fw_path = {
[SOF_IPC] = "intel/sof",
[SOF_INTEL_IPC4] = "intel/avs/icl",
},
.default_lib_path = {
[SOF_INTEL_IPC4] = "intel/avs-lib/icl",
},
.default_tplg_path = {
[SOF_IPC] = "intel/sof-tplg",
[SOF_INTEL_IPC4] = "intel/avs-tplg",
},
.default_fw_filename = {
[SOF_IPC] = "sof-icl.ri",
[SOF_INTEL_IPC4] = "dsp_basefw.bin",
},
.nocodec_tplg_filename = "sof-icl-nocodec.tplg",
.ops = &sof_icl_ops,
.ops_init = sof_icl_ops_init,
.ops_free = hda_ops_free,
};
static const struct sof_dev_desc jsl_desc = {
.machines = snd_soc_acpi_intel_jsl_machines,
.use_acpi_target_states = true,
.resindex_lpe_base = 0,
.resindex_pcicfg_base = -1,
.resindex_imr_base = -1,
.irqindex_host_ipc = -1,
.chip_info = &jsl_chip_info,
.ipc_supported_mask = BIT(SOF_IPC) | BIT(SOF_INTEL_IPC4),
.ipc_default = SOF_IPC,
.dspless_mode_supported = true, /* Only supported for HDaudio */
.default_fw_path = {
[SOF_IPC] = "intel/sof",
[SOF_INTEL_IPC4] = "intel/avs/jsl",
},
.default_lib_path = {
[SOF_INTEL_IPC4] = "intel/avs-lib/jsl",
},
.default_tplg_path = {
[SOF_IPC] = "intel/sof-tplg",
[SOF_INTEL_IPC4] = "intel/avs-tplg",
},
.default_fw_filename = {
[SOF_IPC] = "sof-jsl.ri",
[SOF_INTEL_IPC4] = "dsp_basefw.bin",
},
.nocodec_tplg_filename = "sof-jsl-nocodec.tplg",
.ops = &sof_cnl_ops,
.ops_init = sof_cnl_ops_init,
.ops_free = hda_ops_free,
};
/* PCI IDs */
static const struct pci_device_id sof_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, HDA_ICL_LP, &icl_desc) },
{ PCI_DEVICE_DATA(INTEL, HDA_ICL_H, &icl_desc) },
{ PCI_DEVICE_DATA(INTEL, HDA_ICL_N, &jsl_desc) },
{ PCI_DEVICE_DATA(INTEL, HDA_JSL_N, &jsl_desc) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, sof_pci_ids);
/* pci_driver definition */
static struct pci_driver snd_sof_pci_intel_icl_driver = {
.name = "sof-audio-pci-intel-icl",
.id_table = sof_pci_ids,
.probe = hda_pci_intel_probe,
.remove = sof_pci_remove,
.shutdown = sof_pci_shutdown,
.driver = {
.pm = &sof_pci_pm,
},
};
module_pci_driver(snd_sof_pci_intel_icl_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_IMPORT_NS(SND_SOC_SOF_INTEL_HDA_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
| linux-master | sound/soc/sof/intel/pci-icl.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2022 Intel Corporation. All rights reserved.
#include <sound/pcm_params.h>
#include <sound/hdaudio_ext.h>
#include <sound/hda-mlink.h>
#include <sound/sof/ipc4/header.h>
#include <uapi/sound/sof/header.h>
#include "../ipc4-priv.h"
#include "../ipc4-topology.h"
#include "../sof-priv.h"
#include "../sof-audio.h"
#include "hda.h"
/* These ops are only applicable for the HDA DAI's in their current form */
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_LINK)
/*
* This function checks if the host dma channel corresponding
* to the link DMA stream_tag argument is assigned to one
* of the FEs connected to the BE DAI.
*/
static bool hda_check_fes(struct snd_soc_pcm_runtime *rtd,
int dir, int stream_tag)
{
struct snd_pcm_substream *fe_substream;
struct hdac_stream *fe_hstream;
struct snd_soc_dpcm *dpcm;
for_each_dpcm_fe(rtd, dir, dpcm) {
fe_substream = snd_soc_dpcm_get_substream(dpcm->fe, dir);
fe_hstream = fe_substream->runtime->private_data;
if (fe_hstream->stream_tag == stream_tag)
return true;
}
return false;
}
static struct hdac_ext_stream *
hda_link_stream_assign(struct hdac_bus *bus, struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct sof_intel_hda_stream *hda_stream;
const struct sof_intel_dsp_desc *chip;
struct snd_sof_dev *sdev;
struct hdac_ext_stream *res = NULL;
struct hdac_stream *hstream = NULL;
int stream_dir = substream->stream;
if (!bus->ppcap) {
dev_err(bus->dev, "stream type not supported\n");
return NULL;
}
spin_lock_irq(&bus->reg_lock);
list_for_each_entry(hstream, &bus->stream_list, list) {
struct hdac_ext_stream *hext_stream =
stream_to_hdac_ext_stream(hstream);
if (hstream->direction != substream->stream)
continue;
hda_stream = hstream_to_sof_hda_stream(hext_stream);
sdev = hda_stream->sdev;
chip = get_chip_info(sdev->pdata);
/* check if link is available */
if (!hext_stream->link_locked) {
/*
* choose the first available link for platforms that do not have the
* PROCEN_FMT_QUIRK set.
*/
if (!(chip->quirks & SOF_INTEL_PROCEN_FMT_QUIRK)) {
res = hext_stream;
break;
}
if (hstream->opened) {
/*
* check if the stream tag matches the stream
* tag of one of the connected FEs
*/
if (hda_check_fes(rtd, stream_dir,
hstream->stream_tag)) {
res = hext_stream;
break;
}
} else {
res = hext_stream;
/*
* This must be a hostless stream.
* So reserve the host DMA channel.
*/
hda_stream->host_reserved = 1;
break;
}
}
}
if (res) {
/* Make sure that host and link DMA is decoupled. */
snd_hdac_ext_stream_decouple_locked(bus, res, true);
res->link_locked = 1;
res->link_substream = substream;
}
spin_unlock_irq(&bus->reg_lock);
return res;
}
static struct hdac_ext_stream *hda_get_hext_stream(struct snd_sof_dev *sdev,
struct snd_soc_dai *cpu_dai,
struct snd_pcm_substream *substream)
{
return snd_soc_dai_get_dma_data(cpu_dai, substream);
}
static struct hdac_ext_stream *hda_ipc4_get_hext_stream(struct snd_sof_dev *sdev,
struct snd_soc_dai *cpu_dai,
struct snd_pcm_substream *substream)
{
struct snd_sof_widget *pipe_widget;
struct sof_ipc4_pipeline *pipeline;
struct snd_sof_widget *swidget;
struct snd_soc_dapm_widget *w;
w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
swidget = w->dobj.private;
pipe_widget = swidget->spipe->pipe_widget;
pipeline = pipe_widget->private;
/* mark pipeline so that it can be skipped during FE trigger */
pipeline->skip_during_fe_trigger = true;
return snd_soc_dai_get_dma_data(cpu_dai, substream);
}
static struct hdac_ext_stream *hda_assign_hext_stream(struct snd_sof_dev *sdev,
struct snd_soc_dai *cpu_dai,
struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *dai;
struct hdac_ext_stream *hext_stream;
/* only allocate a stream_tag for the first DAI in the dailink */
dai = asoc_rtd_to_cpu(rtd, 0);
if (dai == cpu_dai)
hext_stream = hda_link_stream_assign(sof_to_bus(sdev), substream);
else
hext_stream = snd_soc_dai_get_dma_data(dai, substream);
if (!hext_stream)
return NULL;
snd_soc_dai_set_dma_data(cpu_dai, substream, (void *)hext_stream);
return hext_stream;
}
static void hda_release_hext_stream(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai,
struct snd_pcm_substream *substream)
{
struct hdac_ext_stream *hext_stream = hda_get_hext_stream(sdev, cpu_dai, substream);
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *dai;
/* only release a stream_tag for the first DAI in the dailink */
dai = asoc_rtd_to_cpu(rtd, 0);
if (dai == cpu_dai)
snd_hdac_ext_stream_release(hext_stream, HDAC_EXT_STREAM_TYPE_LINK);
snd_soc_dai_set_dma_data(cpu_dai, substream, NULL);
}
static void hda_setup_hext_stream(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream,
unsigned int format_val)
{
snd_hdac_ext_stream_setup(hext_stream, format_val);
}
static void hda_reset_hext_stream(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream)
{
snd_hdac_ext_stream_reset(hext_stream);
}
static void hda_codec_dai_set_stream(struct snd_sof_dev *sdev,
struct snd_pcm_substream *substream,
struct hdac_stream *hstream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
/* set the hdac_stream in the codec dai */
snd_soc_dai_set_stream(codec_dai, hstream, substream->stream);
}
static unsigned int hda_calc_stream_format(struct snd_sof_dev *sdev,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
unsigned int link_bps;
unsigned int format_val;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
link_bps = codec_dai->driver->playback.sig_bits;
else
link_bps = codec_dai->driver->capture.sig_bits;
format_val = snd_hdac_calc_stream_format(params_rate(params), params_channels(params),
params_format(params), link_bps, 0);
dev_dbg(sdev->dev, "format_val=%#x, rate=%d, ch=%d, format=%d\n", format_val,
params_rate(params), params_channels(params), params_format(params));
return format_val;
}
static struct hdac_ext_link *hda_get_hlink(struct snd_sof_dev *sdev,
struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct hdac_bus *bus = sof_to_bus(sdev);
return snd_hdac_ext_bus_get_hlink_by_name(bus, codec_dai->component->name);
}
static unsigned int generic_calc_stream_format(struct snd_sof_dev *sdev,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
unsigned int format_val;
format_val = snd_hdac_calc_stream_format(params_rate(params), params_channels(params),
params_format(params),
params_physical_width(params),
0);
dev_dbg(sdev->dev, "format_val=%#x, rate=%d, ch=%d, format=%d\n", format_val,
params_rate(params), params_channels(params), params_format(params));
return format_val;
}
static unsigned int dmic_calc_stream_format(struct snd_sof_dev *sdev,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
unsigned int format_val;
snd_pcm_format_t format;
unsigned int channels;
unsigned int width;
channels = params_channels(params);
format = params_format(params);
width = params_physical_width(params);
if (format == SNDRV_PCM_FORMAT_S16_LE) {
format = SNDRV_PCM_FORMAT_S32_LE;
channels /= 2;
width = 32;
}
format_val = snd_hdac_calc_stream_format(params_rate(params), channels,
format,
width,
0);
dev_dbg(sdev->dev, "format_val=%#x, rate=%d, ch=%d, format=%d\n", format_val,
params_rate(params), channels, format);
return format_val;
}
static struct hdac_ext_link *ssp_get_hlink(struct snd_sof_dev *sdev,
struct snd_pcm_substream *substream)
{
struct hdac_bus *bus = sof_to_bus(sdev);
return hdac_bus_eml_ssp_get_hlink(bus);
}
static struct hdac_ext_link *dmic_get_hlink(struct snd_sof_dev *sdev,
struct snd_pcm_substream *substream)
{
struct hdac_bus *bus = sof_to_bus(sdev);
return hdac_bus_eml_dmic_get_hlink(bus);
}
static struct hdac_ext_link *sdw_get_hlink(struct snd_sof_dev *sdev,
struct snd_pcm_substream *substream)
{
struct hdac_bus *bus = sof_to_bus(sdev);
return hdac_bus_eml_sdw_get_hlink(bus);
}
static int hda_ipc4_pre_trigger(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai,
struct snd_pcm_substream *substream, int cmd)
{
struct sof_ipc4_fw_data *ipc4_data = sdev->private;
struct snd_sof_widget *pipe_widget;
struct sof_ipc4_pipeline *pipeline;
struct snd_sof_widget *swidget;
struct snd_soc_dapm_widget *w;
int ret = 0;
w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
swidget = w->dobj.private;
pipe_widget = swidget->spipe->pipe_widget;
pipeline = pipe_widget->private;
if (pipe_widget->instance_id < 0)
return 0;
mutex_lock(&ipc4_data->pipeline_state_mutex);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
ret = sof_ipc4_set_pipeline_state(sdev, pipe_widget->instance_id,
SOF_IPC4_PIPE_PAUSED);
if (ret < 0)
goto out;
pipeline->state = SOF_IPC4_PIPE_PAUSED;
break;
default:
dev_err(sdev->dev, "unknown trigger command %d\n", cmd);
ret = -EINVAL;
}
out:
mutex_unlock(&ipc4_data->pipeline_state_mutex);
return ret;
}
static int hda_trigger(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai,
struct snd_pcm_substream *substream, int cmd)
{
struct hdac_ext_stream *hext_stream = snd_soc_dai_get_dma_data(cpu_dai, substream);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
snd_hdac_ext_stream_start(hext_stream);
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
snd_hdac_ext_stream_clear(hext_stream);
break;
default:
dev_err(sdev->dev, "unknown trigger command %d\n", cmd);
return -EINVAL;
}
return 0;
}
static int hda_ipc4_post_trigger(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai,
struct snd_pcm_substream *substream, int cmd)
{
struct sof_ipc4_fw_data *ipc4_data = sdev->private;
struct snd_sof_widget *pipe_widget;
struct sof_ipc4_pipeline *pipeline;
struct snd_sof_widget *swidget;
struct snd_soc_dapm_widget *w;
int ret = 0;
w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
swidget = w->dobj.private;
pipe_widget = swidget->spipe->pipe_widget;
pipeline = pipe_widget->private;
if (pipe_widget->instance_id < 0)
return 0;
mutex_lock(&ipc4_data->pipeline_state_mutex);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
if (pipeline->state != SOF_IPC4_PIPE_PAUSED) {
ret = sof_ipc4_set_pipeline_state(sdev, pipe_widget->instance_id,
SOF_IPC4_PIPE_PAUSED);
if (ret < 0)
goto out;
pipeline->state = SOF_IPC4_PIPE_PAUSED;
}
ret = sof_ipc4_set_pipeline_state(sdev, pipe_widget->instance_id,
SOF_IPC4_PIPE_RUNNING);
if (ret < 0)
goto out;
pipeline->state = SOF_IPC4_PIPE_RUNNING;
swidget->spipe->started_count++;
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
ret = sof_ipc4_set_pipeline_state(sdev, pipe_widget->instance_id,
SOF_IPC4_PIPE_RUNNING);
if (ret < 0)
goto out;
pipeline->state = SOF_IPC4_PIPE_RUNNING;
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
/*
* STOP/SUSPEND trigger is invoked only once when all users of this pipeline have
* been stopped. So, clear the started_count so that the pipeline can be reset
*/
swidget->spipe->started_count = 0;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
break;
default:
dev_err(sdev->dev, "unknown trigger command %d\n", cmd);
ret = -EINVAL;
break;
}
out:
mutex_unlock(&ipc4_data->pipeline_state_mutex);
return ret;
}
static struct hdac_ext_stream *sdw_hda_ipc4_get_hext_stream(struct snd_sof_dev *sdev,
struct snd_soc_dai *cpu_dai,
struct snd_pcm_substream *substream)
{
struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
struct snd_sof_widget *swidget = w->dobj.private;
struct snd_sof_dai *dai = swidget->private;
struct sof_ipc4_copier *ipc4_copier = dai->private;
struct sof_ipc4_alh_configuration_blob *blob;
blob = (struct sof_ipc4_alh_configuration_blob *)ipc4_copier->copier_config;
/*
* Starting with ACE_2_0, re-setting the device_count is mandatory to avoid using
* the multi-gateway firmware configuration. The DMA hardware can take care of
* multiple links without needing any firmware assistance
*/
blob->alh_cfg.device_count = 1;
return hda_ipc4_get_hext_stream(sdev, cpu_dai, substream);
}
static const struct hda_dai_widget_dma_ops hda_ipc4_dma_ops = {
.get_hext_stream = hda_ipc4_get_hext_stream,
.assign_hext_stream = hda_assign_hext_stream,
.release_hext_stream = hda_release_hext_stream,
.setup_hext_stream = hda_setup_hext_stream,
.reset_hext_stream = hda_reset_hext_stream,
.pre_trigger = hda_ipc4_pre_trigger,
.trigger = hda_trigger,
.post_trigger = hda_ipc4_post_trigger,
.codec_dai_set_stream = hda_codec_dai_set_stream,
.calc_stream_format = hda_calc_stream_format,
.get_hlink = hda_get_hlink,
};
static const struct hda_dai_widget_dma_ops ssp_ipc4_dma_ops = {
.get_hext_stream = hda_ipc4_get_hext_stream,
.assign_hext_stream = hda_assign_hext_stream,
.release_hext_stream = hda_release_hext_stream,
.setup_hext_stream = hda_setup_hext_stream,
.reset_hext_stream = hda_reset_hext_stream,
.pre_trigger = hda_ipc4_pre_trigger,
.trigger = hda_trigger,
.post_trigger = hda_ipc4_post_trigger,
.calc_stream_format = generic_calc_stream_format,
.get_hlink = ssp_get_hlink,
};
static const struct hda_dai_widget_dma_ops dmic_ipc4_dma_ops = {
.get_hext_stream = hda_ipc4_get_hext_stream,
.assign_hext_stream = hda_assign_hext_stream,
.release_hext_stream = hda_release_hext_stream,
.setup_hext_stream = hda_setup_hext_stream,
.reset_hext_stream = hda_reset_hext_stream,
.pre_trigger = hda_ipc4_pre_trigger,
.trigger = hda_trigger,
.post_trigger = hda_ipc4_post_trigger,
.calc_stream_format = dmic_calc_stream_format,
.get_hlink = dmic_get_hlink,
};
static const struct hda_dai_widget_dma_ops sdw_ipc4_dma_ops = {
.get_hext_stream = sdw_hda_ipc4_get_hext_stream,
.assign_hext_stream = hda_assign_hext_stream,
.release_hext_stream = hda_release_hext_stream,
.setup_hext_stream = hda_setup_hext_stream,
.reset_hext_stream = hda_reset_hext_stream,
.pre_trigger = hda_ipc4_pre_trigger,
.trigger = hda_trigger,
.post_trigger = hda_ipc4_post_trigger,
.calc_stream_format = generic_calc_stream_format,
.get_hlink = sdw_get_hlink,
};
static const struct hda_dai_widget_dma_ops hda_ipc4_chain_dma_ops = {
.get_hext_stream = hda_get_hext_stream,
.assign_hext_stream = hda_assign_hext_stream,
.release_hext_stream = hda_release_hext_stream,
.setup_hext_stream = hda_setup_hext_stream,
.reset_hext_stream = hda_reset_hext_stream,
.trigger = hda_trigger,
.codec_dai_set_stream = hda_codec_dai_set_stream,
.calc_stream_format = hda_calc_stream_format,
.get_hlink = hda_get_hlink,
};
static int hda_ipc3_post_trigger(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai,
struct snd_pcm_substream *substream, int cmd)
{
struct hdac_ext_stream *hext_stream = hda_get_hext_stream(sdev, cpu_dai, substream);
struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
switch (cmd) {
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
{
struct snd_sof_dai_config_data data = { 0 };
int ret;
data.dai_data = DMA_CHAN_INVALID;
ret = hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_HW_FREE, &data);
if (ret < 0)
return ret;
if (cmd == SNDRV_PCM_TRIGGER_STOP)
return hda_link_dma_cleanup(substream, hext_stream, cpu_dai);
break;
}
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
return hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_PAUSE, NULL);
default:
break;
}
return 0;
}
static const struct hda_dai_widget_dma_ops hda_ipc3_dma_ops = {
.get_hext_stream = hda_get_hext_stream,
.assign_hext_stream = hda_assign_hext_stream,
.release_hext_stream = hda_release_hext_stream,
.setup_hext_stream = hda_setup_hext_stream,
.reset_hext_stream = hda_reset_hext_stream,
.trigger = hda_trigger,
.post_trigger = hda_ipc3_post_trigger,
.codec_dai_set_stream = hda_codec_dai_set_stream,
.calc_stream_format = hda_calc_stream_format,
.get_hlink = hda_get_hlink,
};
static struct hdac_ext_stream *
hda_dspless_get_hext_stream(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai,
struct snd_pcm_substream *substream)
{
struct hdac_stream *hstream = substream->runtime->private_data;
return stream_to_hdac_ext_stream(hstream);
}
static void hda_dspless_setup_hext_stream(struct snd_sof_dev *sdev,
struct hdac_ext_stream *hext_stream,
unsigned int format_val)
{
/*
* Save the format_val which was adjusted by the maxbps of the codec.
* This information is not available on the FE side since there we are
* using dummy_codec.
*/
hext_stream->hstream.format_val = format_val;
}
static const struct hda_dai_widget_dma_ops hda_dspless_dma_ops = {
.get_hext_stream = hda_dspless_get_hext_stream,
.setup_hext_stream = hda_dspless_setup_hext_stream,
.codec_dai_set_stream = hda_codec_dai_set_stream,
.calc_stream_format = hda_calc_stream_format,
.get_hlink = hda_get_hlink,
};
#endif
const struct hda_dai_widget_dma_ops *
hda_select_dai_widget_ops(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget)
{
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_LINK)
struct snd_sof_dai *sdai;
if (sdev->dspless_mode_selected)
return &hda_dspless_dma_ops;
sdai = swidget->private;
switch (sdev->pdata->ipc_type) {
case SOF_IPC:
{
struct sof_dai_private_data *private = sdai->private;
if (private->dai_config->type == SOF_DAI_INTEL_HDA)
return &hda_ipc3_dma_ops;
break;
}
case SOF_INTEL_IPC4:
{
struct sof_ipc4_copier *ipc4_copier = sdai->private;
const struct sof_intel_dsp_desc *chip;
chip = get_chip_info(sdev->pdata);
switch (ipc4_copier->dai_type) {
case SOF_DAI_INTEL_HDA:
{
struct snd_sof_widget *pipe_widget = swidget->spipe->pipe_widget;
struct sof_ipc4_pipeline *pipeline = pipe_widget->private;
if (pipeline->use_chain_dma)
return &hda_ipc4_chain_dma_ops;
return &hda_ipc4_dma_ops;
}
case SOF_DAI_INTEL_SSP:
if (chip->hw_ip_version < SOF_INTEL_ACE_2_0)
return NULL;
return &ssp_ipc4_dma_ops;
case SOF_DAI_INTEL_DMIC:
if (chip->hw_ip_version < SOF_INTEL_ACE_2_0)
return NULL;
return &dmic_ipc4_dma_ops;
case SOF_DAI_INTEL_ALH:
if (chip->hw_ip_version < SOF_INTEL_ACE_2_0)
return NULL;
return &sdw_ipc4_dma_ops;
default:
break;
}
break;
}
default:
break;
}
#endif
return NULL;
}
| linux-master | sound/soc/sof/intel/hda-dai-ops.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// Copyright(c) 2023 Intel Corporation. All rights reserved.
/*
* Hardware interface for audio DSP on LunarLake.
*/
#include <linux/firmware.h>
#include <sound/hda_register.h>
#include <sound/sof/ipc4/header.h>
#include <trace/events/sof_intel.h>
#include "../ipc4-priv.h"
#include "../ops.h"
#include "hda.h"
#include "hda-ipc.h"
#include "../sof-audio.h"
#include "mtl.h"
#include <sound/hda-mlink.h>
/* LunarLake ops */
struct snd_sof_dsp_ops sof_lnl_ops;
EXPORT_SYMBOL_NS(sof_lnl_ops, SND_SOC_SOF_INTEL_HDA_COMMON);
static const struct snd_sof_debugfs_map lnl_dsp_debugfs[] = {
{"hda", HDA_DSP_HDA_BAR, 0, 0x4000, SOF_DEBUGFS_ACCESS_ALWAYS},
{"pp", HDA_DSP_PP_BAR, 0, 0x1000, SOF_DEBUGFS_ACCESS_ALWAYS},
{"dsp", HDA_DSP_BAR, 0, 0x10000, SOF_DEBUGFS_ACCESS_ALWAYS},
};
/* this helps allows the DSP to setup DMIC/SSP */
static int hdac_bus_offload_dmic_ssp(struct hdac_bus *bus)
{
int ret;
ret = hdac_bus_eml_enable_offload(bus, true, AZX_REG_ML_LEPTR_ID_INTEL_SSP, true);
if (ret < 0)
return ret;
ret = hdac_bus_eml_enable_offload(bus, true, AZX_REG_ML_LEPTR_ID_INTEL_DMIC, true);
if (ret < 0)
return ret;
return 0;
}
static int lnl_hda_dsp_probe(struct snd_sof_dev *sdev)
{
int ret;
ret = hda_dsp_probe(sdev);
if (ret < 0)
return ret;
return hdac_bus_offload_dmic_ssp(sof_to_bus(sdev));
}
static int lnl_hda_dsp_resume(struct snd_sof_dev *sdev)
{
int ret;
ret = hda_dsp_resume(sdev);
if (ret < 0)
return ret;
return hdac_bus_offload_dmic_ssp(sof_to_bus(sdev));
}
static int lnl_hda_dsp_runtime_resume(struct snd_sof_dev *sdev)
{
int ret;
ret = hda_dsp_runtime_resume(sdev);
if (ret < 0)
return ret;
return hdac_bus_offload_dmic_ssp(sof_to_bus(sdev));
}
int sof_lnl_ops_init(struct snd_sof_dev *sdev)
{
struct sof_ipc4_fw_data *ipc4_data;
/* common defaults */
memcpy(&sof_lnl_ops, &sof_hda_common_ops, sizeof(struct snd_sof_dsp_ops));
/* probe */
sof_lnl_ops.probe = lnl_hda_dsp_probe;
/* shutdown */
sof_lnl_ops.shutdown = hda_dsp_shutdown;
/* doorbell */
sof_lnl_ops.irq_thread = mtl_ipc_irq_thread;
/* ipc */
sof_lnl_ops.send_msg = mtl_ipc_send_msg;
sof_lnl_ops.get_mailbox_offset = mtl_dsp_ipc_get_mailbox_offset;
sof_lnl_ops.get_window_offset = mtl_dsp_ipc_get_window_offset;
/* debug */
sof_lnl_ops.debug_map = lnl_dsp_debugfs;
sof_lnl_ops.debug_map_count = ARRAY_SIZE(lnl_dsp_debugfs);
sof_lnl_ops.dbg_dump = mtl_dsp_dump;
sof_lnl_ops.ipc_dump = mtl_ipc_dump;
/* pre/post fw run */
sof_lnl_ops.pre_fw_run = mtl_dsp_pre_fw_run;
sof_lnl_ops.post_fw_run = mtl_dsp_post_fw_run;
/* parse platform specific extended manifest */
sof_lnl_ops.parse_platform_ext_manifest = NULL;
/* dsp core get/put */
/* TODO: add core_get and core_put */
/* PM */
sof_lnl_ops.resume = lnl_hda_dsp_resume;
sof_lnl_ops.runtime_resume = lnl_hda_dsp_runtime_resume;
sof_lnl_ops.get_stream_position = mtl_dsp_get_stream_hda_link_position;
sdev->private = devm_kzalloc(sdev->dev, sizeof(struct sof_ipc4_fw_data), GFP_KERNEL);
if (!sdev->private)
return -ENOMEM;
ipc4_data = sdev->private;
ipc4_data->manifest_fw_hdr_offset = SOF_MAN4_FW_HDR_OFFSET;
ipc4_data->mtrace_type = SOF_IPC4_MTRACE_INTEL_CAVS_2;
/* External library loading support */
ipc4_data->load_library = hda_dsp_ipc4_load_library;
/* set DAI ops */
hda_set_dai_drv_ops(sdev, &sof_lnl_ops);
sof_lnl_ops.set_power_state = hda_dsp_set_power_state_ipc4;
return 0;
};
EXPORT_SYMBOL_NS(sof_lnl_ops_init, SND_SOC_SOF_INTEL_HDA_COMMON);
/* Check if an SDW IRQ occurred */
static bool lnl_dsp_check_sdw_irq(struct snd_sof_dev *sdev)
{
struct hdac_bus *bus = sof_to_bus(sdev);
return hdac_bus_eml_check_interrupt(bus, true, AZX_REG_ML_LEPTR_ID_SDW);
}
static void lnl_enable_sdw_irq(struct snd_sof_dev *sdev, bool enable)
{
struct hdac_bus *bus = sof_to_bus(sdev);
hdac_bus_eml_enable_interrupt(bus, true, AZX_REG_ML_LEPTR_ID_SDW, enable);
}
static int lnl_dsp_disable_interrupts(struct snd_sof_dev *sdev)
{
lnl_enable_sdw_irq(sdev, false);
mtl_disable_ipc_interrupts(sdev);
return mtl_enable_interrupts(sdev, false);
}
const struct sof_intel_dsp_desc lnl_chip_info = {
.cores_num = 5,
.init_core_mask = BIT(0),
.host_managed_cores_mask = BIT(0),
.ipc_req = MTL_DSP_REG_HFIPCXIDR,
.ipc_req_mask = MTL_DSP_REG_HFIPCXIDR_BUSY,
.ipc_ack = MTL_DSP_REG_HFIPCXIDA,
.ipc_ack_mask = MTL_DSP_REG_HFIPCXIDA_DONE,
.ipc_ctl = MTL_DSP_REG_HFIPCXCTL,
.rom_status_reg = MTL_DSP_ROM_STS,
.rom_init_timeout = 300,
.ssp_count = MTL_SSP_COUNT,
.d0i3_offset = MTL_HDA_VS_D0I3C,
.read_sdw_lcount = hda_sdw_check_lcount_ext,
.enable_sdw_irq = lnl_enable_sdw_irq,
.check_sdw_irq = lnl_dsp_check_sdw_irq,
.check_ipc_irq = mtl_dsp_check_ipc_irq,
.cl_init = mtl_dsp_cl_init,
.power_down_dsp = mtl_power_down_dsp,
.disable_interrupts = lnl_dsp_disable_interrupts,
.hw_ip_version = SOF_INTEL_ACE_2_0,
};
EXPORT_SYMBOL_NS(lnl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
| linux-master | sound/soc/sof/intel/lnl.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2019-2021 Intel Corporation. All rights reserved.
//
// Author: Cezary Rojewski <[email protected]>
// Converted to SOF client:
// Ranjani Sridharan <[email protected]>
// Peter Ujfalusi <[email protected]>
//
#include <linux/module.h>
#include <sound/hdaudio_ext.h>
#include <sound/soc.h>
#include "../sof-priv.h"
#include "../sof-client-probes.h"
#include "../sof-client.h"
#include "hda.h"
static inline struct hdac_ext_stream *
hda_compr_get_stream(struct snd_compr_stream *cstream)
{
return cstream->runtime->private_data;
}
static int hda_probes_compr_startup(struct sof_client_dev *cdev,
struct snd_compr_stream *cstream,
struct snd_soc_dai *dai, u32 *stream_id)
{
struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
struct hdac_ext_stream *hext_stream;
hext_stream = hda_dsp_stream_get(sdev, cstream->direction, 0);
if (!hext_stream)
return -EBUSY;
hdac_stream(hext_stream)->curr_pos = 0;
hdac_stream(hext_stream)->cstream = cstream;
cstream->runtime->private_data = hext_stream;
*stream_id = hdac_stream(hext_stream)->stream_tag;
return 0;
}
static int hda_probes_compr_shutdown(struct sof_client_dev *cdev,
struct snd_compr_stream *cstream,
struct snd_soc_dai *dai)
{
struct hdac_ext_stream *hext_stream = hda_compr_get_stream(cstream);
struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
int ret;
ret = hda_dsp_stream_put(sdev, cstream->direction,
hdac_stream(hext_stream)->stream_tag);
if (ret < 0) {
dev_dbg(sdev->dev, "stream put failed: %d\n", ret);
return ret;
}
hdac_stream(hext_stream)->cstream = NULL;
cstream->runtime->private_data = NULL;
return 0;
}
static int hda_probes_compr_set_params(struct sof_client_dev *cdev,
struct snd_compr_stream *cstream,
struct snd_compr_params *params,
struct snd_soc_dai *dai)
{
struct hdac_ext_stream *hext_stream = hda_compr_get_stream(cstream);
struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
struct hdac_stream *hstream = hdac_stream(hext_stream);
struct snd_dma_buffer *dmab;
u32 bits, rate;
int bps, ret;
dmab = cstream->runtime->dma_buffer_p;
/* compr params do not store bit depth, default to S32_LE */
bps = snd_pcm_format_physical_width(SNDRV_PCM_FORMAT_S32_LE);
if (bps < 0)
return bps;
bits = hda_dsp_get_bits(sdev, bps);
rate = hda_dsp_get_mult_div(sdev, params->codec.sample_rate);
hstream->format_val = rate | bits | (params->codec.ch_out - 1);
hstream->bufsize = cstream->runtime->buffer_size;
hstream->period_bytes = cstream->runtime->fragment_size;
hstream->no_period_wakeup = 0;
ret = hda_dsp_stream_hw_params(sdev, hext_stream, dmab, NULL);
if (ret < 0) {
dev_err(sdev->dev, "error: hdac prepare failed: %d\n", ret);
return ret;
}
return 0;
}
static int hda_probes_compr_trigger(struct sof_client_dev *cdev,
struct snd_compr_stream *cstream,
int cmd, struct snd_soc_dai *dai)
{
struct hdac_ext_stream *hext_stream = hda_compr_get_stream(cstream);
struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
return hda_dsp_stream_trigger(sdev, hext_stream, cmd);
}
static int hda_probes_compr_pointer(struct sof_client_dev *cdev,
struct snd_compr_stream *cstream,
struct snd_compr_tstamp *tstamp,
struct snd_soc_dai *dai)
{
struct hdac_ext_stream *hext_stream = hda_compr_get_stream(cstream);
struct snd_soc_pcm_stream *pstream;
pstream = &dai->driver->capture;
tstamp->copied_total = hdac_stream(hext_stream)->curr_pos;
tstamp->sampling_rate = snd_pcm_rate_bit_to_rate(pstream->rates);
return 0;
}
/* SOF client implementation */
static const struct sof_probes_host_ops hda_probes_ops = {
.startup = hda_probes_compr_startup,
.shutdown = hda_probes_compr_shutdown,
.set_params = hda_probes_compr_set_params,
.trigger = hda_probes_compr_trigger,
.pointer = hda_probes_compr_pointer,
};
int hda_probes_register(struct snd_sof_dev *sdev)
{
return sof_client_dev_register(sdev, "hda-probes", 0, &hda_probes_ops,
sizeof(hda_probes_ops));
}
void hda_probes_unregister(struct snd_sof_dev *sdev)
{
sof_client_dev_unregister(sdev, "hda-probes", 0);
}
MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
| linux-master | sound/soc/sof/intel/hda-probes.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2018 Intel Corporation. All rights reserved.
//
// Author: Pan Xiuli <[email protected]>
//
#include <linux/module.h>
#include <sound/sof.h>
#include <sound/sof/xtensa.h>
#include "../sof-priv.h"
struct xtensa_exception_cause {
u32 id;
const char *msg;
const char *description;
};
/*
* From 4.4.1.5 table 4-64 Exception Causes of Xtensa
* Instruction Set Architecture (ISA) Reference Manual
*/
static const struct xtensa_exception_cause xtensa_exception_causes[] = {
{0, "IllegalInstructionCause", "Illegal instruction"},
{1, "SyscallCause", "SYSCALL instruction"},
{2, "InstructionFetchErrorCause",
"Processor internal physical address or data error during instruction fetch"},
{3, "LoadStoreErrorCause",
"Processor internal physical address or data error during load or store"},
{4, "Level1InterruptCause",
"Level-1 interrupt as indicated by set level-1 bits in the INTERRUPT register"},
{5, "AllocaCause",
"MOVSP instruction, if caller’s registers are not in the register file"},
{6, "IntegerDivideByZeroCause",
"QUOS, QUOU, REMS, or REMU divisor operand is zero"},
{8, "PrivilegedCause",
"Attempt to execute a privileged operation when CRING ? 0"},
{9, "LoadStoreAlignmentCause", "Load or store to an unaligned address"},
{12, "InstrPIFDataErrorCause",
"PIF data error during instruction fetch"},
{13, "LoadStorePIFDataErrorCause",
"Synchronous PIF data error during LoadStore access"},
{14, "InstrPIFAddrErrorCause",
"PIF address error during instruction fetch"},
{15, "LoadStorePIFAddrErrorCause",
"Synchronous PIF address error during LoadStore access"},
{16, "InstTLBMissCause", "Error during Instruction TLB refill"},
{17, "InstTLBMultiHitCause",
"Multiple instruction TLB entries matched"},
{18, "InstFetchPrivilegeCause",
"An instruction fetch referenced a virtual address at a ring level less than CRING"},
{20, "InstFetchProhibitedCause",
"An instruction fetch referenced a page mapped with an attribute that does not permit instruction fetch"},
{24, "LoadStoreTLBMissCause",
"Error during TLB refill for a load or store"},
{25, "LoadStoreTLBMultiHitCause",
"Multiple TLB entries matched for a load or store"},
{26, "LoadStorePrivilegeCause",
"A load or store referenced a virtual address at a ring level less than CRING"},
{28, "LoadProhibitedCause",
"A load referenced a page mapped with an attribute that does not permit loads"},
{32, "Coprocessor0Disabled",
"Coprocessor 0 instruction when cp0 disabled"},
{33, "Coprocessor1Disabled",
"Coprocessor 1 instruction when cp1 disabled"},
{34, "Coprocessor2Disabled",
"Coprocessor 2 instruction when cp2 disabled"},
{35, "Coprocessor3Disabled",
"Coprocessor 3 instruction when cp3 disabled"},
{36, "Coprocessor4Disabled",
"Coprocessor 4 instruction when cp4 disabled"},
{37, "Coprocessor5Disabled",
"Coprocessor 5 instruction when cp5 disabled"},
{38, "Coprocessor6Disabled",
"Coprocessor 6 instruction when cp6 disabled"},
{39, "Coprocessor7Disabled",
"Coprocessor 7 instruction when cp7 disabled"},
};
/* only need xtensa atm */
static void xtensa_dsp_oops(struct snd_sof_dev *sdev, const char *level, void *oops)
{
struct sof_ipc_dsp_oops_xtensa *xoops = oops;
int i;
dev_printk(level, sdev->dev, "error: DSP Firmware Oops\n");
for (i = 0; i < ARRAY_SIZE(xtensa_exception_causes); i++) {
if (xtensa_exception_causes[i].id == xoops->exccause) {
dev_printk(level, sdev->dev,
"error: Exception Cause: %s, %s\n",
xtensa_exception_causes[i].msg,
xtensa_exception_causes[i].description);
}
}
dev_printk(level, sdev->dev,
"EXCCAUSE 0x%8.8x EXCVADDR 0x%8.8x PS 0x%8.8x SAR 0x%8.8x\n",
xoops->exccause, xoops->excvaddr, xoops->ps, xoops->sar);
dev_printk(level, sdev->dev,
"EPC1 0x%8.8x EPC2 0x%8.8x EPC3 0x%8.8x EPC4 0x%8.8x",
xoops->epc1, xoops->epc2, xoops->epc3, xoops->epc4);
dev_printk(level, sdev->dev,
"EPC5 0x%8.8x EPC6 0x%8.8x EPC7 0x%8.8x DEPC 0x%8.8x",
xoops->epc5, xoops->epc6, xoops->epc7, xoops->depc);
dev_printk(level, sdev->dev,
"EPS2 0x%8.8x EPS3 0x%8.8x EPS4 0x%8.8x EPS5 0x%8.8x",
xoops->eps2, xoops->eps3, xoops->eps4, xoops->eps5);
dev_printk(level, sdev->dev,
"EPS6 0x%8.8x EPS7 0x%8.8x INTENABL 0x%8.8x INTERRU 0x%8.8x",
xoops->eps6, xoops->eps7, xoops->intenable, xoops->interrupt);
}
static void xtensa_stack(struct snd_sof_dev *sdev, const char *level, void *oops,
u32 *stack, u32 stack_words)
{
struct sof_ipc_dsp_oops_xtensa *xoops = oops;
u32 stack_ptr = xoops->plat_hdr.stackptr;
/* 4 * 8chars + 3 ws + 1 terminating NUL */
unsigned char buf[4 * 8 + 3 + 1];
int i;
dev_printk(level, sdev->dev, "stack dump from 0x%8.8x\n", stack_ptr);
/*
* example output:
* 0x0049fbb0: 8000f2d0 0049fc00 6f6c6c61 00632e63
*/
for (i = 0; i < stack_words; i += 4) {
hex_dump_to_buffer(stack + i, 16, 16, 4,
buf, sizeof(buf), false);
dev_printk(level, sdev->dev, "0x%08x: %s\n", stack_ptr + i * 4, buf);
}
}
const struct dsp_arch_ops sof_xtensa_arch_ops = {
.dsp_oops = xtensa_dsp_oops,
.dsp_stack = xtensa_stack,
};
EXPORT_SYMBOL_NS(sof_xtensa_arch_ops, SND_SOC_SOF_XTENSA);
MODULE_DESCRIPTION("SOF Xtensa DSP support");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | sound/soc/sof/xtensa/core.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2022 MediaTek Inc. All rights reserved.
//
// Author: YC Hung <[email protected]>
/*
* Common helpers for the audio DSP on MediaTek platforms
*/
#include <linux/module.h>
#include <sound/sof/xtensa.h>
#include "../ops.h"
#include "mtk-adsp-common.h"
/**
* mtk_adsp_get_registers() - This function is called in case of DSP oops
* in order to gather information about the registers, filename and
* linenumber and stack.
* @sdev: SOF device
* @xoops: Stores information about registers.
* @panic_info: Stores information about filename and line number.
* @stack: Stores the stack dump.
* @stack_words: Size of the stack dump.
*/
static void mtk_adsp_get_registers(struct snd_sof_dev *sdev,
struct sof_ipc_dsp_oops_xtensa *xoops,
struct sof_ipc_panic_info *panic_info,
u32 *stack, size_t stack_words)
{
u32 offset = sdev->dsp_oops_offset;
/* first read registers */
sof_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
/* then get panic info */
if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
dev_err(sdev->dev, "invalid header size 0x%x\n",
xoops->arch_hdr.totalsize);
return;
}
offset += xoops->arch_hdr.totalsize;
sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
/* then get the stack */
offset += sizeof(*panic_info);
sof_mailbox_read(sdev, offset, stack, stack_words * sizeof(u32));
}
/**
* mtk_adsp_dump() - This function is called when a panic message is
* received from the firmware.
* @sdev: SOF device
* @flags: parameter not used but required by ops prototype
*/
void mtk_adsp_dump(struct snd_sof_dev *sdev, u32 flags)
{
char *level = (flags & SOF_DBG_DUMP_OPTIONAL) ? KERN_DEBUG : KERN_ERR;
struct sof_ipc_dsp_oops_xtensa xoops;
struct sof_ipc_panic_info panic_info = {};
u32 stack[MTK_ADSP_STACK_DUMP_SIZE];
u32 status;
/* Get information about the panic status from the debug box area.
* Compute the trace point based on the status.
*/
sof_mailbox_read(sdev, sdev->debug_box.offset + 0x4, &status, 4);
/* Get information about the registers, the filename and line
* number and the stack.
*/
mtk_adsp_get_registers(sdev, &xoops, &panic_info, stack,
MTK_ADSP_STACK_DUMP_SIZE);
/* Print the information to the console */
sof_print_oops_and_stack(sdev, level, status, status, &xoops, &panic_info,
stack, MTK_ADSP_STACK_DUMP_SIZE);
}
EXPORT_SYMBOL(mtk_adsp_dump);
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | sound/soc/sof/mediatek/mtk-adsp-common.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// Copyright (c) 2021 Mediatek Corporation. All rights reserved.
//
// Author: YC Hung <[email protected]>
//
// Hardware interface for mt8195 DSP code loader
#include <sound/sof.h>
#include "mt8195.h"
#include "../../ops.h"
void sof_hifixdsp_boot_sequence(struct snd_sof_dev *sdev, u32 boot_addr)
{
/* ADSP bootup base */
snd_sof_dsp_write(sdev, DSP_REG_BAR, DSP_ALTRESETVEC, boot_addr);
/* pull high RunStall (set bit3 to 1) */
snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, DSP_RESET_SW,
ADSP_RUNSTALL, ADSP_RUNSTALL);
/* pull high StatVectorSel to use AltResetVec (set bit4 to 1) */
snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, DSP_RESET_SW,
STATVECTOR_SEL, STATVECTOR_SEL);
/* toggle DReset & BReset */
/* pull high DReset & BReset */
snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, DSP_RESET_SW,
ADSP_BRESET_SW | ADSP_DRESET_SW,
ADSP_BRESET_SW | ADSP_DRESET_SW);
/* delay 10 DSP cycles at 26M about 1us by IP vendor's suggestion */
udelay(1);
/* pull low DReset & BReset */
snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, DSP_RESET_SW,
ADSP_BRESET_SW | ADSP_DRESET_SW,
0);
/* Enable PDebug */
snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, DSP_PDEBUGBUS0,
PDEBUG_ENABLE,
PDEBUG_ENABLE);
/* release RunStall (set bit3 to 0) */
snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, DSP_RESET_SW,
ADSP_RUNSTALL, 0);
}
void sof_hifixdsp_shutdown(struct snd_sof_dev *sdev)
{
/* RUN_STALL pull high again to reset */
snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, DSP_RESET_SW,
ADSP_RUNSTALL, ADSP_RUNSTALL);
/* pull high DReset & BReset */
snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, DSP_RESET_SW,
ADSP_BRESET_SW | ADSP_DRESET_SW,
ADSP_BRESET_SW | ADSP_DRESET_SW);
}
| linux-master | sound/soc/sof/mediatek/mt8195/mt8195-loader.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// Copyright(c) 2021 Mediatek Corporation. All rights reserved.
//
// Author: YC Hung <[email protected]>
//
// Hardware interface for mt8195 DSP clock
#include <linux/clk.h>
#include <linux/io.h>
#include "mt8195.h"
#include "mt8195-clk.h"
#include "../adsp_helper.h"
#include "../../sof-audio.h"
static const char *adsp_clks[ADSP_CLK_MAX] = {
[CLK_TOP_ADSP] = "adsp_sel",
[CLK_TOP_CLK26M] = "clk26m_ck",
[CLK_TOP_AUDIO_LOCAL_BUS] = "audio_local_bus",
[CLK_TOP_MAINPLL_D7_D2] = "mainpll_d7_d2",
[CLK_SCP_ADSP_AUDIODSP] = "scp_adsp_audiodsp",
[CLK_TOP_AUDIO_H] = "audio_h",
};
int mt8195_adsp_init_clock(struct snd_sof_dev *sdev)
{
struct device *dev = sdev->dev;
struct adsp_priv *priv = sdev->pdata->hw_pdata;
int i;
priv->clk = devm_kcalloc(dev, ADSP_CLK_MAX, sizeof(*priv->clk), GFP_KERNEL);
if (!priv->clk)
return -ENOMEM;
for (i = 0; i < ADSP_CLK_MAX; i++) {
priv->clk[i] = devm_clk_get(dev, adsp_clks[i]);
if (IS_ERR(priv->clk[i]))
return PTR_ERR(priv->clk[i]);
}
return 0;
}
static int adsp_enable_all_clock(struct snd_sof_dev *sdev)
{
struct device *dev = sdev->dev;
struct adsp_priv *priv = sdev->pdata->hw_pdata;
int ret;
ret = clk_prepare_enable(priv->clk[CLK_TOP_MAINPLL_D7_D2]);
if (ret) {
dev_err(dev, "%s clk_prepare_enable(mainpll_d7_d2) fail %d\n",
__func__, ret);
return ret;
}
ret = clk_prepare_enable(priv->clk[CLK_TOP_ADSP]);
if (ret) {
dev_err(dev, "%s clk_prepare_enable(adsp_sel) fail %d\n",
__func__, ret);
goto disable_mainpll_d7_d2_clk;
}
ret = clk_prepare_enable(priv->clk[CLK_TOP_AUDIO_LOCAL_BUS]);
if (ret) {
dev_err(dev, "%s clk_prepare_enable(audio_local_bus) fail %d\n",
__func__, ret);
goto disable_dsp_sel_clk;
}
ret = clk_prepare_enable(priv->clk[CLK_SCP_ADSP_AUDIODSP]);
if (ret) {
dev_err(dev, "%s clk_prepare_enable(scp_adsp_audiodsp) fail %d\n",
__func__, ret);
goto disable_audio_local_bus_clk;
}
ret = clk_prepare_enable(priv->clk[CLK_TOP_AUDIO_H]);
if (ret) {
dev_err(dev, "%s clk_prepare_enable(audio_h) fail %d\n",
__func__, ret);
goto disable_scp_adsp_audiodsp_clk;
}
return 0;
disable_scp_adsp_audiodsp_clk:
clk_disable_unprepare(priv->clk[CLK_SCP_ADSP_AUDIODSP]);
disable_audio_local_bus_clk:
clk_disable_unprepare(priv->clk[CLK_TOP_AUDIO_LOCAL_BUS]);
disable_dsp_sel_clk:
clk_disable_unprepare(priv->clk[CLK_TOP_ADSP]);
disable_mainpll_d7_d2_clk:
clk_disable_unprepare(priv->clk[CLK_TOP_MAINPLL_D7_D2]);
return ret;
}
static void adsp_disable_all_clock(struct snd_sof_dev *sdev)
{
struct adsp_priv *priv = sdev->pdata->hw_pdata;
clk_disable_unprepare(priv->clk[CLK_TOP_AUDIO_H]);
clk_disable_unprepare(priv->clk[CLK_SCP_ADSP_AUDIODSP]);
clk_disable_unprepare(priv->clk[CLK_TOP_AUDIO_LOCAL_BUS]);
clk_disable_unprepare(priv->clk[CLK_TOP_ADSP]);
clk_disable_unprepare(priv->clk[CLK_TOP_MAINPLL_D7_D2]);
}
static int adsp_default_clk_init(struct snd_sof_dev *sdev, bool enable)
{
struct device *dev = sdev->dev;
struct adsp_priv *priv = sdev->pdata->hw_pdata;
int ret;
dev_dbg(dev, "%s: %s\n", __func__, enable ? "on" : "off");
if (enable) {
ret = clk_set_parent(priv->clk[CLK_TOP_ADSP],
priv->clk[CLK_TOP_CLK26M]);
if (ret) {
dev_err(dev, "failed to set dsp_sel to clk26m: %d\n", ret);
return ret;
}
ret = clk_set_parent(priv->clk[CLK_TOP_AUDIO_LOCAL_BUS],
priv->clk[CLK_TOP_MAINPLL_D7_D2]);
if (ret) {
dev_err(dev, "set audio_local_bus failed %d\n", ret);
return ret;
}
ret = clk_set_parent(priv->clk[CLK_TOP_AUDIO_H],
priv->clk[CLK_TOP_CLK26M]);
if (ret) {
dev_err(dev, "set audio_h_sel failed %d\n", ret);
return ret;
}
ret = adsp_enable_all_clock(sdev);
if (ret) {
dev_err(dev, "failed to adsp_enable_clock: %d\n", ret);
return ret;
}
} else {
adsp_disable_all_clock(sdev);
}
return 0;
}
int adsp_clock_on(struct snd_sof_dev *sdev)
{
/* Open ADSP clock */
return adsp_default_clk_init(sdev, 1);
}
int adsp_clock_off(struct snd_sof_dev *sdev)
{
/* Close ADSP clock */
return adsp_default_clk_init(sdev, 0);
}
| linux-master | sound/soc/sof/mediatek/mt8195/mt8195-clk.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// Copyright(c) 2021 Mediatek Inc. All rights reserved.
//
// Author: YC Hung <[email protected]>
//
/*
* Hardware interface for audio DSP on mt8195
*/
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
#include <linux/module.h>
#include <sound/sof.h>
#include <sound/sof/xtensa.h>
#include "../../ops.h"
#include "../../sof-of-dev.h"
#include "../../sof-audio.h"
#include "../adsp_helper.h"
#include "../mtk-adsp-common.h"
#include "mt8195.h"
#include "mt8195-clk.h"
static int mt8195_get_mailbox_offset(struct snd_sof_dev *sdev)
{
return MBOX_OFFSET;
}
static int mt8195_get_window_offset(struct snd_sof_dev *sdev, u32 id)
{
return MBOX_OFFSET;
}
static int mt8195_send_msg(struct snd_sof_dev *sdev,
struct snd_sof_ipc_msg *msg)
{
struct adsp_priv *priv = sdev->pdata->hw_pdata;
sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
msg->msg_size);
return mtk_adsp_ipc_send(priv->dsp_ipc, MTK_ADSP_IPC_REQ, MTK_ADSP_IPC_OP_REQ);
}
static void mt8195_dsp_handle_reply(struct mtk_adsp_ipc *ipc)
{
struct adsp_priv *priv = mtk_adsp_ipc_get_data(ipc);
unsigned long flags;
spin_lock_irqsave(&priv->sdev->ipc_lock, flags);
snd_sof_ipc_process_reply(priv->sdev, 0);
spin_unlock_irqrestore(&priv->sdev->ipc_lock, flags);
}
static void mt8195_dsp_handle_request(struct mtk_adsp_ipc *ipc)
{
struct adsp_priv *priv = mtk_adsp_ipc_get_data(ipc);
u32 p; /* panic code */
int ret;
/* Read the message from the debug box. */
sof_mailbox_read(priv->sdev, priv->sdev->debug_box.offset + 4,
&p, sizeof(p));
/* Check to see if the message is a panic code 0x0dead*** */
if ((p & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
snd_sof_dsp_panic(priv->sdev, p, true);
} else {
snd_sof_ipc_msgs_rx(priv->sdev);
/* tell DSP cmd is done */
ret = mtk_adsp_ipc_send(priv->dsp_ipc, MTK_ADSP_IPC_RSP, MTK_ADSP_IPC_OP_RSP);
if (ret)
dev_err(priv->dev, "request send ipc failed");
}
}
static struct mtk_adsp_ipc_ops dsp_ops = {
.handle_reply = mt8195_dsp_handle_reply,
.handle_request = mt8195_dsp_handle_request,
};
static int platform_parse_resource(struct platform_device *pdev, void *data)
{
struct resource *mmio;
struct resource res;
struct device_node *mem_region;
struct device *dev = &pdev->dev;
struct mtk_adsp_chip_info *adsp = data;
int ret;
mem_region = of_parse_phandle(dev->of_node, "memory-region", 0);
if (!mem_region) {
dev_err(dev, "no dma memory-region phandle\n");
return -ENODEV;
}
ret = of_address_to_resource(mem_region, 0, &res);
of_node_put(mem_region);
if (ret) {
dev_err(dev, "of_address_to_resource dma failed\n");
return ret;
}
dev_dbg(dev, "DMA %pR\n", &res);
adsp->pa_shared_dram = (phys_addr_t)res.start;
adsp->shared_size = resource_size(&res);
if (adsp->pa_shared_dram & DRAM_REMAP_MASK) {
dev_err(dev, "adsp shared dma memory(%#x) is not 4K-aligned\n",
(u32)adsp->pa_shared_dram);
return -EINVAL;
}
ret = of_reserved_mem_device_init(dev);
if (ret) {
dev_err(dev, "of_reserved_mem_device_init failed\n");
return ret;
}
mem_region = of_parse_phandle(dev->of_node, "memory-region", 1);
if (!mem_region) {
dev_err(dev, "no memory-region sysmem phandle\n");
return -ENODEV;
}
ret = of_address_to_resource(mem_region, 0, &res);
of_node_put(mem_region);
if (ret) {
dev_err(dev, "of_address_to_resource sysmem failed\n");
return ret;
}
adsp->pa_dram = (phys_addr_t)res.start;
adsp->dramsize = resource_size(&res);
if (adsp->pa_dram & DRAM_REMAP_MASK) {
dev_err(dev, "adsp memory(%#x) is not 4K-aligned\n",
(u32)adsp->pa_dram);
return -EINVAL;
}
if (adsp->dramsize < TOTAL_SIZE_SHARED_DRAM_FROM_TAIL) {
dev_err(dev, "adsp memory(%#x) is not enough for share\n",
adsp->dramsize);
return -EINVAL;
}
dev_dbg(dev, "dram pbase=%pa, dramsize=%#x\n",
&adsp->pa_dram, adsp->dramsize);
/* Parse CFG base */
mmio = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
if (!mmio) {
dev_err(dev, "no ADSP-CFG register resource\n");
return -ENXIO;
}
/* remap for DSP register accessing */
adsp->va_cfgreg = devm_ioremap_resource(dev, mmio);
if (IS_ERR(adsp->va_cfgreg))
return PTR_ERR(adsp->va_cfgreg);
adsp->pa_cfgreg = (phys_addr_t)mmio->start;
adsp->cfgregsize = resource_size(mmio);
dev_dbg(dev, "cfgreg-vbase=%p, cfgregsize=%#x\n",
adsp->va_cfgreg, adsp->cfgregsize);
/* Parse SRAM */
mmio = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
if (!mmio) {
dev_err(dev, "no SRAM resource\n");
return -ENXIO;
}
adsp->pa_sram = (phys_addr_t)mmio->start;
adsp->sramsize = resource_size(mmio);
dev_dbg(dev, "sram pbase=%pa,%#x\n", &adsp->pa_sram, adsp->sramsize);
return ret;
}
static int adsp_sram_power_on(struct device *dev, bool on)
{
void __iomem *va_dspsysreg;
u32 srampool_con;
va_dspsysreg = ioremap(ADSP_SRAM_POOL_CON, 0x4);
if (!va_dspsysreg) {
dev_err(dev, "failed to ioremap sram pool base %#x\n",
ADSP_SRAM_POOL_CON);
return -ENOMEM;
}
srampool_con = readl(va_dspsysreg);
if (on)
writel(srampool_con & ~DSP_SRAM_POOL_PD_MASK, va_dspsysreg);
else
writel(srampool_con | DSP_SRAM_POOL_PD_MASK, va_dspsysreg);
iounmap(va_dspsysreg);
return 0;
}
/* Init the basic DSP DRAM address */
static int adsp_memory_remap_init(struct device *dev, struct mtk_adsp_chip_info *adsp)
{
void __iomem *vaddr_emi_map;
int offset;
if (!adsp)
return -ENXIO;
vaddr_emi_map = devm_ioremap(dev, DSP_EMI_MAP_ADDR, 0x4);
if (!vaddr_emi_map) {
dev_err(dev, "failed to ioremap emi map base %#x\n",
DSP_EMI_MAP_ADDR);
return -ENOMEM;
}
offset = adsp->pa_dram - DRAM_PHYS_BASE_FROM_DSP_VIEW;
adsp->dram_offset = offset;
offset >>= DRAM_REMAP_SHIFT;
dev_dbg(dev, "adsp->pa_dram %pa, offset %#x\n", &adsp->pa_dram, offset);
writel(offset, vaddr_emi_map);
if (offset != readl(vaddr_emi_map)) {
dev_err(dev, "write emi map fail : %#x\n", readl(vaddr_emi_map));
return -EIO;
}
return 0;
}
static int adsp_shared_base_ioremap(struct platform_device *pdev, void *data)
{
struct device *dev = &pdev->dev;
struct mtk_adsp_chip_info *adsp = data;
/* remap shared-dram base to be non-cachable */
adsp->shared_dram = devm_ioremap(dev, adsp->pa_shared_dram,
adsp->shared_size);
if (!adsp->shared_dram) {
dev_err(dev, "failed to ioremap base %pa size %#x\n",
adsp->shared_dram, adsp->shared_size);
return -ENOMEM;
}
dev_dbg(dev, "shared-dram vbase=%p, phy addr :%pa, size=%#x\n",
adsp->shared_dram, &adsp->pa_shared_dram, adsp->shared_size);
return 0;
}
static int mt8195_run(struct snd_sof_dev *sdev)
{
u32 adsp_bootup_addr;
adsp_bootup_addr = SRAM_PHYS_BASE_FROM_DSP_VIEW;
dev_dbg(sdev->dev, "HIFIxDSP boot from base : 0x%08X\n", adsp_bootup_addr);
sof_hifixdsp_boot_sequence(sdev, adsp_bootup_addr);
return 0;
}
static int mt8195_dsp_probe(struct snd_sof_dev *sdev)
{
struct platform_device *pdev = container_of(sdev->dev, struct platform_device, dev);
struct adsp_priv *priv;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
sdev->pdata->hw_pdata = priv;
priv->dev = sdev->dev;
priv->sdev = sdev;
priv->adsp = devm_kzalloc(&pdev->dev, sizeof(struct mtk_adsp_chip_info), GFP_KERNEL);
if (!priv->adsp)
return -ENOMEM;
ret = platform_parse_resource(pdev, priv->adsp);
if (ret)
return ret;
ret = mt8195_adsp_init_clock(sdev);
if (ret) {
dev_err(sdev->dev, "mt8195_adsp_init_clock failed\n");
return -EINVAL;
}
ret = adsp_clock_on(sdev);
if (ret) {
dev_err(sdev->dev, "adsp_clock_on fail!\n");
return -EINVAL;
}
ret = adsp_sram_power_on(sdev->dev, true);
if (ret) {
dev_err(sdev->dev, "adsp_sram_power_on fail!\n");
goto exit_clk_disable;
}
ret = adsp_memory_remap_init(&pdev->dev, priv->adsp);
if (ret) {
dev_err(sdev->dev, "adsp_memory_remap_init fail!\n");
goto err_adsp_sram_power_off;
}
sdev->bar[SOF_FW_BLK_TYPE_IRAM] = devm_ioremap(sdev->dev,
priv->adsp->pa_sram,
priv->adsp->sramsize);
if (!sdev->bar[SOF_FW_BLK_TYPE_IRAM]) {
dev_err(sdev->dev, "failed to ioremap base %pa size %#x\n",
&priv->adsp->pa_sram, priv->adsp->sramsize);
ret = -EINVAL;
goto err_adsp_sram_power_off;
}
priv->adsp->va_sram = sdev->bar[SOF_FW_BLK_TYPE_IRAM];
sdev->bar[SOF_FW_BLK_TYPE_SRAM] = devm_ioremap(sdev->dev,
priv->adsp->pa_dram,
priv->adsp->dramsize);
if (!sdev->bar[SOF_FW_BLK_TYPE_SRAM]) {
dev_err(sdev->dev, "failed to ioremap base %pa size %#x\n",
&priv->adsp->pa_dram, priv->adsp->dramsize);
ret = -EINVAL;
goto err_adsp_sram_power_off;
}
priv->adsp->va_dram = sdev->bar[SOF_FW_BLK_TYPE_SRAM];
ret = adsp_shared_base_ioremap(pdev, priv->adsp);
if (ret) {
dev_err(sdev->dev, "adsp_shared_base_ioremap fail!\n");
goto err_adsp_sram_power_off;
}
sdev->bar[DSP_REG_BAR] = priv->adsp->va_cfgreg;
sdev->mmio_bar = SOF_FW_BLK_TYPE_SRAM;
sdev->mailbox_bar = SOF_FW_BLK_TYPE_SRAM;
/* set default mailbox offset for FW ready message */
sdev->dsp_box.offset = mt8195_get_mailbox_offset(sdev);
priv->ipc_dev = platform_device_register_data(&pdev->dev, "mtk-adsp-ipc",
PLATFORM_DEVID_NONE,
pdev, sizeof(*pdev));
if (IS_ERR(priv->ipc_dev)) {
ret = PTR_ERR(priv->ipc_dev);
dev_err(sdev->dev, "failed to register mtk-adsp-ipc device\n");
goto err_adsp_sram_power_off;
}
priv->dsp_ipc = dev_get_drvdata(&priv->ipc_dev->dev);
if (!priv->dsp_ipc) {
ret = -EPROBE_DEFER;
dev_err(sdev->dev, "failed to get drvdata\n");
goto exit_pdev_unregister;
}
mtk_adsp_ipc_set_data(priv->dsp_ipc, priv);
priv->dsp_ipc->ops = &dsp_ops;
return 0;
exit_pdev_unregister:
platform_device_unregister(priv->ipc_dev);
err_adsp_sram_power_off:
adsp_sram_power_on(&pdev->dev, false);
exit_clk_disable:
adsp_clock_off(sdev);
return ret;
}
static int mt8195_dsp_shutdown(struct snd_sof_dev *sdev)
{
return snd_sof_suspend(sdev->dev);
}
static int mt8195_dsp_remove(struct snd_sof_dev *sdev)
{
struct platform_device *pdev = container_of(sdev->dev, struct platform_device, dev);
struct adsp_priv *priv = sdev->pdata->hw_pdata;
platform_device_unregister(priv->ipc_dev);
adsp_sram_power_on(&pdev->dev, false);
adsp_clock_off(sdev);
return 0;
}
static int mt8195_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
{
struct platform_device *pdev = container_of(sdev->dev, struct platform_device, dev);
int ret;
u32 reset_sw, dbg_pc;
/* wait dsp enter idle, timeout is 1 second */
ret = snd_sof_dsp_read_poll_timeout(sdev, DSP_REG_BAR,
DSP_RESET_SW, reset_sw,
((reset_sw & ADSP_PWAIT) == ADSP_PWAIT),
SUSPEND_DSP_IDLE_POLL_INTERVAL_US,
SUSPEND_DSP_IDLE_TIMEOUT_US);
if (ret < 0) {
dbg_pc = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGPC);
dev_warn(sdev->dev, "dsp not idle, powering off anyway : swrest %#x, pc %#x, ret %d\n",
reset_sw, dbg_pc, ret);
}
/* stall and reset dsp */
sof_hifixdsp_shutdown(sdev);
/* power down adsp sram */
ret = adsp_sram_power_on(&pdev->dev, false);
if (ret) {
dev_err(sdev->dev, "adsp_sram_power_off fail!\n");
return ret;
}
/* turn off adsp clock */
return adsp_clock_off(sdev);
}
static int mt8195_dsp_resume(struct snd_sof_dev *sdev)
{
int ret;
/* turn on adsp clock */
ret = adsp_clock_on(sdev);
if (ret) {
dev_err(sdev->dev, "adsp_clock_on fail!\n");
return ret;
}
/* power on adsp sram */
ret = adsp_sram_power_on(sdev->dev, true);
if (ret)
dev_err(sdev->dev, "adsp_sram_power_on fail!\n");
return ret;
}
/* on mt8195 there is 1 to 1 match between type and BAR idx */
static int mt8195_get_bar_index(struct snd_sof_dev *sdev, u32 type)
{
return type;
}
static int mt8195_pcm_hw_params(struct snd_sof_dev *sdev,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_sof_platform_stream_params *platform_params)
{
platform_params->cont_update_posn = 1;
return 0;
}
static snd_pcm_uframes_t mt8195_pcm_pointer(struct snd_sof_dev *sdev,
struct snd_pcm_substream *substream)
{
int ret;
snd_pcm_uframes_t pos;
struct snd_sof_pcm *spcm;
struct sof_ipc_stream_posn posn;
struct snd_sof_pcm_stream *stream;
struct snd_soc_component *scomp = sdev->component;
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
spcm = snd_sof_find_spcm_dai(scomp, rtd);
if (!spcm) {
dev_warn_ratelimited(sdev->dev, "warn: can't find PCM with DAI ID %d\n",
rtd->dai_link->id);
return 0;
}
stream = &spcm->stream[substream->stream];
ret = snd_sof_ipc_msg_data(sdev, stream, &posn, sizeof(posn));
if (ret < 0) {
dev_warn(sdev->dev, "failed to read stream position: %d\n", ret);
return 0;
}
memcpy(&stream->posn, &posn, sizeof(posn));
pos = spcm->stream[substream->stream].posn.host_posn;
pos = bytes_to_frames(substream->runtime, pos);
return pos;
}
static void mt8195_adsp_dump(struct snd_sof_dev *sdev, u32 flags)
{
u32 dbg_pc, dbg_data, dbg_bus0, dbg_bus1, dbg_inst;
u32 dbg_ls0stat, dbg_ls1stat, faultbus, faultinfo, swrest;
/* dump debug registers */
dbg_pc = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGPC);
dbg_data = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGDATA);
dbg_bus0 = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGBUS0);
dbg_bus1 = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGBUS1);
dbg_inst = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGINST);
dbg_ls0stat = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGLS0STAT);
dbg_ls1stat = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGLS1STAT);
faultbus = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PFAULTBUS);
faultinfo = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PFAULTINFO);
swrest = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_RESET_SW);
dev_info(sdev->dev, "adsp dump : pc %#x, data %#x, bus0 %#x, bus1 %#x, swrest %#x",
dbg_pc, dbg_data, dbg_bus0, dbg_bus1, swrest);
dev_info(sdev->dev, "dbg_inst %#x, ls0stat %#x, ls1stat %#x, faultbus %#x, faultinfo %#x",
dbg_inst, dbg_ls0stat, dbg_ls1stat, faultbus, faultinfo);
mtk_adsp_dump(sdev, flags);
}
static struct snd_soc_dai_driver mt8195_dai[] = {
{
.name = "SOF_DL2",
.playback = {
.channels_min = 1,
.channels_max = 2,
},
},
{
.name = "SOF_DL3",
.playback = {
.channels_min = 1,
.channels_max = 2,
},
},
{
.name = "SOF_UL4",
.capture = {
.channels_min = 1,
.channels_max = 2,
},
},
{
.name = "SOF_UL5",
.capture = {
.channels_min = 1,
.channels_max = 2,
},
},
};
/* mt8195 ops */
static struct snd_sof_dsp_ops sof_mt8195_ops = {
/* probe and remove */
.probe = mt8195_dsp_probe,
.remove = mt8195_dsp_remove,
.shutdown = mt8195_dsp_shutdown,
/* DSP core boot */
.run = mt8195_run,
/* Block IO */
.block_read = sof_block_read,
.block_write = sof_block_write,
/* Mailbox IO */
.mailbox_read = sof_mailbox_read,
.mailbox_write = sof_mailbox_write,
/* Register IO */
.write = sof_io_write,
.read = sof_io_read,
.write64 = sof_io_write64,
.read64 = sof_io_read64,
/* ipc */
.send_msg = mt8195_send_msg,
.get_mailbox_offset = mt8195_get_mailbox_offset,
.get_window_offset = mt8195_get_window_offset,
.ipc_msg_data = sof_ipc_msg_data,
.set_stream_data_offset = sof_set_stream_data_offset,
/* misc */
.get_bar_index = mt8195_get_bar_index,
/* stream callbacks */
.pcm_open = sof_stream_pcm_open,
.pcm_hw_params = mt8195_pcm_hw_params,
.pcm_pointer = mt8195_pcm_pointer,
.pcm_close = sof_stream_pcm_close,
/* firmware loading */
.load_firmware = snd_sof_load_firmware_memcpy,
/* Firmware ops */
.dsp_arch_ops = &sof_xtensa_arch_ops,
/* Debug information */
.dbg_dump = mt8195_adsp_dump,
.debugfs_add_region_item = snd_sof_debugfs_add_region_item_iomem,
/* DAI drivers */
.drv = mt8195_dai,
.num_drv = ARRAY_SIZE(mt8195_dai),
/* PM */
.suspend = mt8195_dsp_suspend,
.resume = mt8195_dsp_resume,
/* ALSA HW info flags */
.hw_info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
};
static struct snd_sof_of_mach sof_mt8195_machs[] = {
{
.compatible = "google,tomato",
.sof_tplg_filename = "sof-mt8195-mt6359-rt1019-rt5682-dts.tplg"
}, {
.compatible = "mediatek,mt8195",
.sof_tplg_filename = "sof-mt8195.tplg"
}, {
/* sentinel */
}
};
static const struct sof_dev_desc sof_of_mt8195_desc = {
.of_machines = sof_mt8195_machs,
.ipc_supported_mask = BIT(SOF_IPC),
.ipc_default = SOF_IPC,
.default_fw_path = {
[SOF_IPC] = "mediatek/sof",
},
.default_tplg_path = {
[SOF_IPC] = "mediatek/sof-tplg",
},
.default_fw_filename = {
[SOF_IPC] = "sof-mt8195.ri",
},
.nocodec_tplg_filename = "sof-mt8195-nocodec.tplg",
.ops = &sof_mt8195_ops,
.ipc_timeout = 1000,
};
static const struct of_device_id sof_of_mt8195_ids[] = {
{ .compatible = "mediatek,mt8195-dsp", .data = &sof_of_mt8195_desc},
{ }
};
MODULE_DEVICE_TABLE(of, sof_of_mt8195_ids);
/* DT driver definition */
static struct platform_driver snd_sof_of_mt8195_driver = {
.probe = sof_of_probe,
.remove = sof_of_remove,
.shutdown = sof_of_shutdown,
.driver = {
.name = "sof-audio-of-mt8195",
.pm = &sof_of_pm,
.of_match_table = sof_of_mt8195_ids,
},
};
module_platform_driver(snd_sof_of_mt8195_driver);
MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_IMPORT_NS(SND_SOC_SOF_MTK_COMMON);
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | sound/soc/sof/mediatek/mt8195/mt8195.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// Copyright(c) 2022 Mediatek Inc. All rights reserved.
//
// Author: Allen-KH Cheng <[email protected]>
// Tinghan Shen <[email protected]>
/*
* Hardware interface for audio DSP on mt8186
*/
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
#include <linux/module.h>
#include <sound/sof.h>
#include <sound/sof/xtensa.h>
#include "../../ops.h"
#include "../../sof-of-dev.h"
#include "../../sof-audio.h"
#include "../adsp_helper.h"
#include "../mtk-adsp-common.h"
#include "mt8186.h"
#include "mt8186-clk.h"
static int mt8186_get_mailbox_offset(struct snd_sof_dev *sdev)
{
return MBOX_OFFSET;
}
static int mt8186_get_window_offset(struct snd_sof_dev *sdev, u32 id)
{
return MBOX_OFFSET;
}
static int mt8186_send_msg(struct snd_sof_dev *sdev,
struct snd_sof_ipc_msg *msg)
{
struct adsp_priv *priv = sdev->pdata->hw_pdata;
sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
msg->msg_size);
return mtk_adsp_ipc_send(priv->dsp_ipc, MTK_ADSP_IPC_REQ, MTK_ADSP_IPC_OP_REQ);
}
static void mt8186_dsp_handle_reply(struct mtk_adsp_ipc *ipc)
{
struct adsp_priv *priv = mtk_adsp_ipc_get_data(ipc);
unsigned long flags;
spin_lock_irqsave(&priv->sdev->ipc_lock, flags);
snd_sof_ipc_process_reply(priv->sdev, 0);
spin_unlock_irqrestore(&priv->sdev->ipc_lock, flags);
}
static void mt8186_dsp_handle_request(struct mtk_adsp_ipc *ipc)
{
struct adsp_priv *priv = mtk_adsp_ipc_get_data(ipc);
u32 p; /* panic code */
int ret;
/* Read the message from the debug box. */
sof_mailbox_read(priv->sdev, priv->sdev->debug_box.offset + 4,
&p, sizeof(p));
/* Check to see if the message is a panic code 0x0dead*** */
if ((p & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
snd_sof_dsp_panic(priv->sdev, p, true);
} else {
snd_sof_ipc_msgs_rx(priv->sdev);
/* tell DSP cmd is done */
ret = mtk_adsp_ipc_send(priv->dsp_ipc, MTK_ADSP_IPC_RSP, MTK_ADSP_IPC_OP_RSP);
if (ret)
dev_err(priv->dev, "request send ipc failed");
}
}
static struct mtk_adsp_ipc_ops dsp_ops = {
.handle_reply = mt8186_dsp_handle_reply,
.handle_request = mt8186_dsp_handle_request,
};
static int platform_parse_resource(struct platform_device *pdev, void *data)
{
struct resource *mmio;
struct resource res;
struct device_node *mem_region;
struct device *dev = &pdev->dev;
struct mtk_adsp_chip_info *adsp = data;
int ret;
mem_region = of_parse_phandle(dev->of_node, "memory-region", 0);
if (!mem_region) {
dev_err(dev, "no dma memory-region phandle\n");
return -ENODEV;
}
ret = of_address_to_resource(mem_region, 0, &res);
of_node_put(mem_region);
if (ret) {
dev_err(dev, "of_address_to_resource dma failed\n");
return ret;
}
dev_dbg(dev, "DMA %pR\n", &res);
adsp->pa_shared_dram = (phys_addr_t)res.start;
adsp->shared_size = resource_size(&res);
if (adsp->pa_shared_dram & DRAM_REMAP_MASK) {
dev_err(dev, "adsp shared dma memory(%#x) is not 4K-aligned\n",
(u32)adsp->pa_shared_dram);
return -EINVAL;
}
ret = of_reserved_mem_device_init(dev);
if (ret) {
dev_err(dev, "of_reserved_mem_device_init failed\n");
return ret;
}
mem_region = of_parse_phandle(dev->of_node, "memory-region", 1);
if (!mem_region) {
dev_err(dev, "no memory-region sysmem phandle\n");
return -ENODEV;
}
ret = of_address_to_resource(mem_region, 0, &res);
of_node_put(mem_region);
if (ret) {
dev_err(dev, "of_address_to_resource sysmem failed\n");
return ret;
}
adsp->pa_dram = (phys_addr_t)res.start;
if (adsp->pa_dram & DRAM_REMAP_MASK) {
dev_err(dev, "adsp memory(%#x) is not 4K-aligned\n",
(u32)adsp->pa_dram);
return -EINVAL;
}
adsp->dramsize = resource_size(&res);
if (adsp->dramsize < TOTAL_SIZE_SHARED_DRAM_FROM_TAIL) {
dev_err(dev, "adsp memory(%#x) is not enough for share\n",
adsp->dramsize);
return -EINVAL;
}
dev_dbg(dev, "dram pbase=%pa size=%#x\n", &adsp->pa_dram, adsp->dramsize);
mmio = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
if (!mmio) {
dev_err(dev, "no ADSP-CFG register resource\n");
return -ENXIO;
}
adsp->va_cfgreg = devm_ioremap_resource(dev, mmio);
if (IS_ERR(adsp->va_cfgreg))
return PTR_ERR(adsp->va_cfgreg);
adsp->pa_cfgreg = (phys_addr_t)mmio->start;
adsp->cfgregsize = resource_size(mmio);
dev_dbg(dev, "cfgreg pbase=%pa size=%#x\n", &adsp->pa_cfgreg, adsp->cfgregsize);
mmio = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
if (!mmio) {
dev_err(dev, "no SRAM resource\n");
return -ENXIO;
}
adsp->pa_sram = (phys_addr_t)mmio->start;
adsp->sramsize = resource_size(mmio);
dev_dbg(dev, "sram pbase=%pa size=%#x\n", &adsp->pa_sram, adsp->sramsize);
mmio = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sec");
if (!mmio) {
dev_err(dev, "no SEC register resource\n");
return -ENXIO;
}
adsp->va_secreg = devm_ioremap_resource(dev, mmio);
if (IS_ERR(adsp->va_secreg))
return PTR_ERR(adsp->va_secreg);
adsp->pa_secreg = (phys_addr_t)mmio->start;
adsp->secregsize = resource_size(mmio);
dev_dbg(dev, "secreg pbase=%pa size=%#x\n", &adsp->pa_secreg, adsp->secregsize);
mmio = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bus");
if (!mmio) {
dev_err(dev, "no BUS register resource\n");
return -ENXIO;
}
adsp->va_busreg = devm_ioremap_resource(dev, mmio);
if (IS_ERR(adsp->va_busreg))
return PTR_ERR(adsp->va_busreg);
adsp->pa_busreg = (phys_addr_t)mmio->start;
adsp->busregsize = resource_size(mmio);
dev_dbg(dev, "busreg pbase=%pa size=%#x\n", &adsp->pa_busreg, adsp->busregsize);
return 0;
}
static void adsp_sram_power_on(struct snd_sof_dev *sdev)
{
snd_sof_dsp_update_bits(sdev, DSP_BUSREG_BAR, ADSP_SRAM_POOL_CON,
DSP_SRAM_POOL_PD_MASK, 0);
}
static void adsp_sram_power_off(struct snd_sof_dev *sdev)
{
snd_sof_dsp_update_bits(sdev, DSP_BUSREG_BAR, ADSP_SRAM_POOL_CON,
DSP_SRAM_POOL_PD_MASK, DSP_SRAM_POOL_PD_MASK);
}
/* Init the basic DSP DRAM address */
static int adsp_memory_remap_init(struct snd_sof_dev *sdev, struct mtk_adsp_chip_info *adsp)
{
u32 offset;
offset = adsp->pa_dram - DRAM_PHYS_BASE_FROM_DSP_VIEW;
adsp->dram_offset = offset;
offset >>= DRAM_REMAP_SHIFT;
dev_dbg(sdev->dev, "adsp->pa_dram %pa, offset %#x\n", &adsp->pa_dram, offset);
snd_sof_dsp_write(sdev, DSP_BUSREG_BAR, DSP_C0_EMI_MAP_ADDR, offset);
snd_sof_dsp_write(sdev, DSP_BUSREG_BAR, DSP_C0_DMAEMI_MAP_ADDR, offset);
if (offset != snd_sof_dsp_read(sdev, DSP_BUSREG_BAR, DSP_C0_EMI_MAP_ADDR) ||
offset != snd_sof_dsp_read(sdev, DSP_BUSREG_BAR, DSP_C0_DMAEMI_MAP_ADDR)) {
dev_err(sdev->dev, "emi remap fail\n");
return -EIO;
}
return 0;
}
static int adsp_shared_base_ioremap(struct platform_device *pdev, void *data)
{
struct device *dev = &pdev->dev;
struct mtk_adsp_chip_info *adsp = data;
/* remap shared-dram base to be non-cachable */
adsp->shared_dram = devm_ioremap(dev, adsp->pa_shared_dram,
adsp->shared_size);
if (!adsp->shared_dram) {
dev_err(dev, "failed to ioremap base %pa size %#x\n",
adsp->shared_dram, adsp->shared_size);
return -ENOMEM;
}
dev_dbg(dev, "shared-dram vbase=%p, phy addr :%pa, size=%#x\n",
adsp->shared_dram, &adsp->pa_shared_dram, adsp->shared_size);
return 0;
}
static int mt8186_run(struct snd_sof_dev *sdev)
{
u32 adsp_bootup_addr;
adsp_bootup_addr = SRAM_PHYS_BASE_FROM_DSP_VIEW;
dev_dbg(sdev->dev, "HIFIxDSP boot from base : 0x%08X\n", adsp_bootup_addr);
mt8186_sof_hifixdsp_boot_sequence(sdev, adsp_bootup_addr);
return 0;
}
static int mt8186_dsp_probe(struct snd_sof_dev *sdev)
{
struct platform_device *pdev = container_of(sdev->dev, struct platform_device, dev);
struct adsp_priv *priv;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
sdev->pdata->hw_pdata = priv;
priv->dev = sdev->dev;
priv->sdev = sdev;
priv->adsp = devm_kzalloc(&pdev->dev, sizeof(struct mtk_adsp_chip_info), GFP_KERNEL);
if (!priv->adsp)
return -ENOMEM;
ret = platform_parse_resource(pdev, priv->adsp);
if (ret)
return ret;
sdev->bar[SOF_FW_BLK_TYPE_IRAM] = devm_ioremap(sdev->dev,
priv->adsp->pa_sram,
priv->adsp->sramsize);
if (!sdev->bar[SOF_FW_BLK_TYPE_IRAM]) {
dev_err(sdev->dev, "failed to ioremap base %pa size %#x\n",
&priv->adsp->pa_sram, priv->adsp->sramsize);
return -ENOMEM;
}
priv->adsp->va_sram = sdev->bar[SOF_FW_BLK_TYPE_IRAM];
sdev->bar[SOF_FW_BLK_TYPE_SRAM] = devm_ioremap(sdev->dev,
priv->adsp->pa_dram,
priv->adsp->dramsize);
if (!sdev->bar[SOF_FW_BLK_TYPE_SRAM]) {
dev_err(sdev->dev, "failed to ioremap base %pa size %#x\n",
&priv->adsp->pa_dram, priv->adsp->dramsize);
return -ENOMEM;
}
priv->adsp->va_dram = sdev->bar[SOF_FW_BLK_TYPE_SRAM];
ret = adsp_shared_base_ioremap(pdev, priv->adsp);
if (ret) {
dev_err(sdev->dev, "adsp_shared_base_ioremap fail!\n");
return ret;
}
sdev->bar[DSP_REG_BAR] = priv->adsp->va_cfgreg;
sdev->bar[DSP_SECREG_BAR] = priv->adsp->va_secreg;
sdev->bar[DSP_BUSREG_BAR] = priv->adsp->va_busreg;
sdev->mmio_bar = SOF_FW_BLK_TYPE_SRAM;
sdev->mailbox_bar = SOF_FW_BLK_TYPE_SRAM;
/* set default mailbox offset for FW ready message */
sdev->dsp_box.offset = mt8186_get_mailbox_offset(sdev);
ret = adsp_memory_remap_init(sdev, priv->adsp);
if (ret) {
dev_err(sdev->dev, "adsp_memory_remap_init fail!\n");
return ret;
}
/* enable adsp clock before touching registers */
ret = mt8186_adsp_init_clock(sdev);
if (ret) {
dev_err(sdev->dev, "mt8186_adsp_init_clock failed\n");
return ret;
}
ret = mt8186_adsp_clock_on(sdev);
if (ret) {
dev_err(sdev->dev, "mt8186_adsp_clock_on fail!\n");
return ret;
}
adsp_sram_power_on(sdev);
priv->ipc_dev = platform_device_register_data(&pdev->dev, "mtk-adsp-ipc",
PLATFORM_DEVID_NONE,
pdev, sizeof(*pdev));
if (IS_ERR(priv->ipc_dev)) {
ret = PTR_ERR(priv->ipc_dev);
dev_err(sdev->dev, "failed to create mtk-adsp-ipc device\n");
goto err_adsp_off;
}
priv->dsp_ipc = dev_get_drvdata(&priv->ipc_dev->dev);
if (!priv->dsp_ipc) {
ret = -EPROBE_DEFER;
dev_err(sdev->dev, "failed to get drvdata\n");
goto exit_pdev_unregister;
}
mtk_adsp_ipc_set_data(priv->dsp_ipc, priv);
priv->dsp_ipc->ops = &dsp_ops;
return 0;
exit_pdev_unregister:
platform_device_unregister(priv->ipc_dev);
err_adsp_off:
adsp_sram_power_off(sdev);
mt8186_adsp_clock_off(sdev);
return ret;
}
static int mt8186_dsp_remove(struct snd_sof_dev *sdev)
{
struct adsp_priv *priv = sdev->pdata->hw_pdata;
platform_device_unregister(priv->ipc_dev);
mt8186_sof_hifixdsp_shutdown(sdev);
adsp_sram_power_off(sdev);
mt8186_adsp_clock_off(sdev);
return 0;
}
static int mt8186_dsp_shutdown(struct snd_sof_dev *sdev)
{
return snd_sof_suspend(sdev->dev);
}
static int mt8186_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
{
mt8186_sof_hifixdsp_shutdown(sdev);
adsp_sram_power_off(sdev);
mt8186_adsp_clock_off(sdev);
return 0;
}
static int mt8186_dsp_resume(struct snd_sof_dev *sdev)
{
int ret;
ret = mt8186_adsp_clock_on(sdev);
if (ret) {
dev_err(sdev->dev, "mt8186_adsp_clock_on fail!\n");
return ret;
}
adsp_sram_power_on(sdev);
return ret;
}
/* on mt8186 there is 1 to 1 match between type and BAR idx */
static int mt8186_get_bar_index(struct snd_sof_dev *sdev, u32 type)
{
return type;
}
static int mt8186_pcm_hw_params(struct snd_sof_dev *sdev,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_sof_platform_stream_params *platform_params)
{
platform_params->cont_update_posn = 1;
return 0;
}
static snd_pcm_uframes_t mt8186_pcm_pointer(struct snd_sof_dev *sdev,
struct snd_pcm_substream *substream)
{
int ret;
snd_pcm_uframes_t pos;
struct snd_sof_pcm *spcm;
struct sof_ipc_stream_posn posn;
struct snd_sof_pcm_stream *stream;
struct snd_soc_component *scomp = sdev->component;
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
spcm = snd_sof_find_spcm_dai(scomp, rtd);
if (!spcm) {
dev_warn_ratelimited(sdev->dev, "warn: can't find PCM with DAI ID %d\n",
rtd->dai_link->id);
return 0;
}
stream = &spcm->stream[substream->stream];
ret = snd_sof_ipc_msg_data(sdev, stream, &posn, sizeof(posn));
if (ret < 0) {
dev_warn(sdev->dev, "failed to read stream position: %d\n", ret);
return 0;
}
memcpy(&stream->posn, &posn, sizeof(posn));
pos = spcm->stream[substream->stream].posn.host_posn;
pos = bytes_to_frames(substream->runtime, pos);
return pos;
}
static void mt8186_adsp_dump(struct snd_sof_dev *sdev, u32 flags)
{
u32 dbg_pc, dbg_data, dbg_inst, dbg_ls0stat, dbg_status, faultinfo;
/* dump debug registers */
dbg_pc = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGPC);
dbg_data = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGDATA);
dbg_inst = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGINST);
dbg_ls0stat = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGLS0STAT);
dbg_status = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PDEBUGSTATUS);
faultinfo = snd_sof_dsp_read(sdev, DSP_REG_BAR, DSP_PFAULTINFO);
dev_info(sdev->dev, "adsp dump : pc %#x, data %#x, dbg_inst %#x,",
dbg_pc, dbg_data, dbg_inst);
dev_info(sdev->dev, "ls0stat %#x, status %#x, faultinfo %#x",
dbg_ls0stat, dbg_status, faultinfo);
mtk_adsp_dump(sdev, flags);
}
static struct snd_soc_dai_driver mt8186_dai[] = {
{
.name = "SOF_DL1",
.playback = {
.channels_min = 1,
.channels_max = 2,
},
},
{
.name = "SOF_DL2",
.playback = {
.channels_min = 1,
.channels_max = 2,
},
},
{
.name = "SOF_UL1",
.capture = {
.channels_min = 1,
.channels_max = 2,
},
},
{
.name = "SOF_UL2",
.capture = {
.channels_min = 1,
.channels_max = 2,
},
},
};
/* mt8186 ops */
static struct snd_sof_dsp_ops sof_mt8186_ops = {
/* probe and remove */
.probe = mt8186_dsp_probe,
.remove = mt8186_dsp_remove,
.shutdown = mt8186_dsp_shutdown,
/* DSP core boot */
.run = mt8186_run,
/* Block IO */
.block_read = sof_block_read,
.block_write = sof_block_write,
/* Mailbox IO */
.mailbox_read = sof_mailbox_read,
.mailbox_write = sof_mailbox_write,
/* Register IO */
.write = sof_io_write,
.read = sof_io_read,
.write64 = sof_io_write64,
.read64 = sof_io_read64,
/* ipc */
.send_msg = mt8186_send_msg,
.get_mailbox_offset = mt8186_get_mailbox_offset,
.get_window_offset = mt8186_get_window_offset,
.ipc_msg_data = sof_ipc_msg_data,
.set_stream_data_offset = sof_set_stream_data_offset,
/* misc */
.get_bar_index = mt8186_get_bar_index,
/* stream callbacks */
.pcm_open = sof_stream_pcm_open,
.pcm_hw_params = mt8186_pcm_hw_params,
.pcm_pointer = mt8186_pcm_pointer,
.pcm_close = sof_stream_pcm_close,
/* firmware loading */
.load_firmware = snd_sof_load_firmware_memcpy,
/* Firmware ops */
.dsp_arch_ops = &sof_xtensa_arch_ops,
/* DAI drivers */
.drv = mt8186_dai,
.num_drv = ARRAY_SIZE(mt8186_dai),
/* Debug information */
.dbg_dump = mt8186_adsp_dump,
.debugfs_add_region_item = snd_sof_debugfs_add_region_item_iomem,
/* PM */
.suspend = mt8186_dsp_suspend,
.resume = mt8186_dsp_resume,
/* ALSA HW info flags */
.hw_info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
};
static struct snd_sof_of_mach sof_mt8186_machs[] = {
{
.compatible = "mediatek,mt8186",
.sof_tplg_filename = "sof-mt8186.tplg",
},
{}
};
static const struct sof_dev_desc sof_of_mt8186_desc = {
.of_machines = sof_mt8186_machs,
.ipc_supported_mask = BIT(SOF_IPC),
.ipc_default = SOF_IPC,
.default_fw_path = {
[SOF_IPC] = "mediatek/sof",
},
.default_tplg_path = {
[SOF_IPC] = "mediatek/sof-tplg",
},
.default_fw_filename = {
[SOF_IPC] = "sof-mt8186.ri",
},
.nocodec_tplg_filename = "sof-mt8186-nocodec.tplg",
.ops = &sof_mt8186_ops,
};
/*
* DL2, DL3, UL4, UL5 are registered as SOF FE, so creating the corresponding
* SOF BE to complete the pipeline.
*/
static struct snd_soc_dai_driver mt8188_dai[] = {
{
.name = "SOF_DL2",
.playback = {
.channels_min = 1,
.channels_max = 2,
},
},
{
.name = "SOF_DL3",
.playback = {
.channels_min = 1,
.channels_max = 2,
},
},
{
.name = "SOF_UL4",
.capture = {
.channels_min = 1,
.channels_max = 2,
},
},
{
.name = "SOF_UL5",
.capture = {
.channels_min = 1,
.channels_max = 2,
},
},
};
/* mt8188 ops */
static struct snd_sof_dsp_ops sof_mt8188_ops;
static int sof_mt8188_ops_init(struct snd_sof_dev *sdev)
{
/* common defaults */
memcpy(&sof_mt8188_ops, &sof_mt8186_ops, sizeof(sof_mt8188_ops));
sof_mt8188_ops.drv = mt8188_dai;
sof_mt8188_ops.num_drv = ARRAY_SIZE(mt8188_dai);
return 0;
}
static struct snd_sof_of_mach sof_mt8188_machs[] = {
{
.compatible = "mediatek,mt8188",
.sof_tplg_filename = "sof-mt8188.tplg",
},
{}
};
static const struct sof_dev_desc sof_of_mt8188_desc = {
.of_machines = sof_mt8188_machs,
.ipc_supported_mask = BIT(SOF_IPC),
.ipc_default = SOF_IPC,
.default_fw_path = {
[SOF_IPC] = "mediatek/sof",
},
.default_tplg_path = {
[SOF_IPC] = "mediatek/sof-tplg",
},
.default_fw_filename = {
[SOF_IPC] = "sof-mt8188.ri",
},
.nocodec_tplg_filename = "sof-mt8188-nocodec.tplg",
.ops = &sof_mt8188_ops,
.ops_init = sof_mt8188_ops_init,
};
static const struct of_device_id sof_of_mt8186_ids[] = {
{ .compatible = "mediatek,mt8186-dsp", .data = &sof_of_mt8186_desc},
{ .compatible = "mediatek,mt8188-dsp", .data = &sof_of_mt8188_desc},
{ }
};
MODULE_DEVICE_TABLE(of, sof_of_mt8186_ids);
/* DT driver definition */
static struct platform_driver snd_sof_of_mt8186_driver = {
.probe = sof_of_probe,
.remove = sof_of_remove,
.shutdown = sof_of_shutdown,
.driver = {
.name = "sof-audio-of-mt8186",
.pm = &sof_of_pm,
.of_match_table = sof_of_mt8186_ids,
},
};
module_platform_driver(snd_sof_of_mt8186_driver);
MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_IMPORT_NS(SND_SOC_SOF_MTK_COMMON);
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | sound/soc/sof/mediatek/mt8186/mt8186.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// Copyright (c) 2022 Mediatek Corporation. All rights reserved.
//
// Author: Allen-KH Cheng <[email protected]>
// Tinghan Shen <[email protected]>
//
// Hardware interface for mt8186 DSP code loader
#include <sound/sof.h>
#include "mt8186.h"
#include "../../ops.h"
void mt8186_sof_hifixdsp_boot_sequence(struct snd_sof_dev *sdev, u32 boot_addr)
{
/* set RUNSTALL to stop core */
snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, ADSP_HIFI_IO_CONFIG,
RUNSTALL, RUNSTALL);
/* enable mbox 0 & 1 IRQ */
snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, ADSP_MBOX_IRQ_EN,
DSP_MBOX0_IRQ_EN | DSP_MBOX1_IRQ_EN,
DSP_MBOX0_IRQ_EN | DSP_MBOX1_IRQ_EN);
/* set core boot address */
snd_sof_dsp_write(sdev, DSP_SECREG_BAR, ADSP_ALTVEC_C0, boot_addr);
snd_sof_dsp_write(sdev, DSP_SECREG_BAR, ADSP_ALTVECSEL, ADSP_ALTVECSEL_C0);
/* assert core reset */
snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, ADSP_CFGREG_SW_RSTN,
SW_RSTN_C0 | SW_DBG_RSTN_C0,
SW_RSTN_C0 | SW_DBG_RSTN_C0);
/* hardware requirement */
udelay(1);
/* release core reset */
snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, ADSP_CFGREG_SW_RSTN,
SW_RSTN_C0 | SW_DBG_RSTN_C0,
0);
/* clear RUNSTALL (bit31) to start core */
snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, ADSP_HIFI_IO_CONFIG,
RUNSTALL, 0);
}
void mt8186_sof_hifixdsp_shutdown(struct snd_sof_dev *sdev)
{
/* set RUNSTALL to stop core */
snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, ADSP_HIFI_IO_CONFIG,
RUNSTALL, RUNSTALL);
/* assert core reset */
snd_sof_dsp_update_bits(sdev, DSP_REG_BAR, ADSP_CFGREG_SW_RSTN,
SW_RSTN_C0 | SW_DBG_RSTN_C0,
SW_RSTN_C0 | SW_DBG_RSTN_C0);
}
| linux-master | sound/soc/sof/mediatek/mt8186/mt8186-loader.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// Copyright(c) 2022 Mediatek Corporation. All rights reserved.
//
// Author: Allen-KH Cheng <[email protected]>
// Tinghan Shen <[email protected]>
//
// Hardware interface for mt8186 DSP clock
#include <linux/clk.h>
#include <linux/io.h>
#include "../../sof-audio.h"
#include "../../ops.h"
#include "../adsp_helper.h"
#include "mt8186.h"
#include "mt8186-clk.h"
static const char *adsp_clks[ADSP_CLK_MAX] = {
[CLK_TOP_AUDIODSP] = "audiodsp",
[CLK_TOP_ADSP_BUS] = "adsp_bus",
};
int mt8186_adsp_init_clock(struct snd_sof_dev *sdev)
{
struct adsp_priv *priv = sdev->pdata->hw_pdata;
struct device *dev = sdev->dev;
int i;
priv->clk = devm_kcalloc(dev, ADSP_CLK_MAX, sizeof(*priv->clk), GFP_KERNEL);
if (!priv->clk)
return -ENOMEM;
for (i = 0; i < ADSP_CLK_MAX; i++) {
priv->clk[i] = devm_clk_get(dev, adsp_clks[i]);
if (IS_ERR(priv->clk[i]))
return PTR_ERR(priv->clk[i]);
}
return 0;
}
static int adsp_enable_all_clock(struct snd_sof_dev *sdev)
{
struct adsp_priv *priv = sdev->pdata->hw_pdata;
struct device *dev = sdev->dev;
int ret;
ret = clk_prepare_enable(priv->clk[CLK_TOP_AUDIODSP]);
if (ret) {
dev_err(dev, "%s clk_prepare_enable(audiodsp) fail %d\n",
__func__, ret);
return ret;
}
ret = clk_prepare_enable(priv->clk[CLK_TOP_ADSP_BUS]);
if (ret) {
dev_err(dev, "%s clk_prepare_enable(adsp_bus) fail %d\n",
__func__, ret);
clk_disable_unprepare(priv->clk[CLK_TOP_AUDIODSP]);
return ret;
}
return 0;
}
static void adsp_disable_all_clock(struct snd_sof_dev *sdev)
{
struct adsp_priv *priv = sdev->pdata->hw_pdata;
clk_disable_unprepare(priv->clk[CLK_TOP_ADSP_BUS]);
clk_disable_unprepare(priv->clk[CLK_TOP_AUDIODSP]);
}
int mt8186_adsp_clock_on(struct snd_sof_dev *sdev)
{
struct device *dev = sdev->dev;
int ret;
ret = adsp_enable_all_clock(sdev);
if (ret) {
dev_err(dev, "failed to adsp_enable_clock: %d\n", ret);
return ret;
}
snd_sof_dsp_write(sdev, DSP_REG_BAR, ADSP_CK_EN,
UART_EN | DMA_EN | TIMER_EN | COREDBG_EN | CORE_CLK_EN);
snd_sof_dsp_write(sdev, DSP_REG_BAR, ADSP_UART_CTRL,
UART_BCLK_CG | UART_RSTN);
return 0;
}
void mt8186_adsp_clock_off(struct snd_sof_dev *sdev)
{
snd_sof_dsp_write(sdev, DSP_REG_BAR, ADSP_CK_EN, 0);
snd_sof_dsp_write(sdev, DSP_REG_BAR, ADSP_UART_CTRL, 0);
adsp_disable_all_clock(sdev);
}
| linux-master | sound/soc/sof/mediatek/mt8186/mt8186-clk.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2021 Advanced Micro Devices, Inc.
//
// Authors: Ajit Kumar Pandey <[email protected]>
/*
* PCM interface for generic AMD audio ACP DSP block
*/
#include <sound/pcm_params.h>
#include "../ops.h"
#include "acp.h"
#include "acp-dsp-offset.h"
int acp_pcm_hw_params(struct snd_sof_dev *sdev, struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_sof_platform_stream_params *platform_params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct acp_dsp_stream *stream = runtime->private_data;
unsigned int buf_offset, index;
u32 size;
int ret;
size = runtime->dma_bytes;
stream->num_pages = PFN_UP(runtime->dma_bytes);
stream->dmab = substream->runtime->dma_buffer_p;
ret = acp_dsp_stream_config(sdev, stream);
if (ret < 0) {
dev_err(sdev->dev, "stream configuration failed\n");
return ret;
}
platform_params->use_phy_address = true;
platform_params->phy_addr = stream->reg_offset;
platform_params->stream_tag = stream->stream_tag;
platform_params->cont_update_posn = 1;
/* write buffer size of stream in scratch memory */
buf_offset = sdev->debug_box.offset +
offsetof(struct scratch_reg_conf, buf_size);
index = stream->stream_tag - 1;
buf_offset = buf_offset + index * 4;
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + buf_offset, size);
return 0;
}
EXPORT_SYMBOL_NS(acp_pcm_hw_params, SND_SOC_SOF_AMD_COMMON);
int acp_pcm_open(struct snd_sof_dev *sdev, struct snd_pcm_substream *substream)
{
struct acp_dsp_stream *stream;
stream = acp_dsp_stream_get(sdev, 0);
if (!stream)
return -ENODEV;
substream->runtime->private_data = stream;
stream->substream = substream;
return 0;
}
EXPORT_SYMBOL_NS(acp_pcm_open, SND_SOC_SOF_AMD_COMMON);
int acp_pcm_close(struct snd_sof_dev *sdev, struct snd_pcm_substream *substream)
{
struct acp_dsp_stream *stream;
stream = substream->runtime->private_data;
if (!stream) {
dev_err(sdev->dev, "No open stream\n");
return -EINVAL;
}
stream->substream = NULL;
substream->runtime->private_data = NULL;
return acp_dsp_stream_put(sdev, stream);
}
EXPORT_SYMBOL_NS(acp_pcm_close, SND_SOC_SOF_AMD_COMMON);
snd_pcm_uframes_t acp_pcm_pointer(struct snd_sof_dev *sdev,
struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_component *scomp = sdev->component;
struct snd_sof_pcm_stream *stream;
struct sof_ipc_stream_posn posn;
struct snd_sof_pcm *spcm;
snd_pcm_uframes_t pos;
int ret;
spcm = snd_sof_find_spcm_dai(scomp, rtd);
if (!spcm) {
dev_warn_ratelimited(sdev->dev, "warn: can't find PCM with DAI ID %d\n",
rtd->dai_link->id);
return 0;
}
stream = &spcm->stream[substream->stream];
ret = snd_sof_ipc_msg_data(sdev, stream, &posn, sizeof(posn));
if (ret < 0) {
dev_warn(sdev->dev, "failed to read stream position: %d\n", ret);
return 0;
}
memcpy(&stream->posn, &posn, sizeof(posn));
pos = spcm->stream[substream->stream].posn.host_posn;
pos = bytes_to_frames(substream->runtime, pos);
return pos;
}
EXPORT_SYMBOL_NS(acp_pcm_pointer, SND_SOC_SOF_AMD_COMMON);
| linux-master | sound/soc/sof/amd/acp-pcm.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2021 Advanced Micro Devices, Inc.
//
// Authors: Ajit Kumar Pandey <[email protected]>
/*
* Hardware interface for Audio DSP on Renoir platform
*/
#include <linux/platform_device.h>
#include <linux/module.h>
#include "../ops.h"
#include "../sof-audio.h"
#include "acp.h"
#include "acp-dsp-offset.h"
#define I2S_BT_INSTANCE 0
#define I2S_SP_INSTANCE 1
#define PDM_DMIC_INSTANCE 2
#define I2S_SP_VIRTUAL_INSTANCE 3
static struct snd_soc_dai_driver renoir_sof_dai[] = {
[I2S_BT_INSTANCE] = {
.id = I2S_BT_INSTANCE,
.name = "acp-sof-bt",
.playback = {
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 2,
.channels_max = 8,
.rate_min = 8000,
.rate_max = 96000,
},
.capture = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
/* Supporting only stereo for I2S BT controller capture */
.channels_min = 2,
.channels_max = 2,
.rate_min = 8000,
.rate_max = 48000,
},
},
[I2S_SP_INSTANCE] = {
.id = I2S_SP_INSTANCE,
.name = "acp-sof-sp",
.playback = {
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 2,
.channels_max = 8,
.rate_min = 8000,
.rate_max = 96000,
},
.capture = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
/* Supporting only stereo for I2S SP controller capture */
.channels_min = 2,
.channels_max = 2,
.rate_min = 8000,
.rate_max = 48000,
},
},
[PDM_DMIC_INSTANCE] = {
.id = PDM_DMIC_INSTANCE,
.name = "acp-sof-dmic",
.capture = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 2,
.channels_max = 4,
.rate_min = 8000,
.rate_max = 48000,
},
},
[I2S_SP_VIRTUAL_INSTANCE] = {
.id = I2S_SP_VIRTUAL_INSTANCE,
.name = "acp-sof-sp-virtual",
.playback = {
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 2,
.channels_max = 8,
.rate_min = 8000,
.rate_max = 96000,
},
},
};
/* Renoir ops */
struct snd_sof_dsp_ops sof_renoir_ops;
EXPORT_SYMBOL_NS(sof_renoir_ops, SND_SOC_SOF_AMD_COMMON);
int sof_renoir_ops_init(struct snd_sof_dev *sdev)
{
/* common defaults */
memcpy(&sof_renoir_ops, &sof_acp_common_ops, sizeof(struct snd_sof_dsp_ops));
sof_renoir_ops.drv = renoir_sof_dai;
sof_renoir_ops.num_drv = ARRAY_SIZE(renoir_sof_dai);
return 0;
}
MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
MODULE_DESCRIPTION("RENOIR SOF Driver");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | sound/soc/sof/amd/renoir.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2022 Advanced Micro Devices, Inc. All rights reserved.
//
// Authors: Ajit Kumar Pandey <[email protected]>
/*.
* PCI interface for Rembrandt ACP device
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <sound/sof.h>
#include <sound/soc-acpi.h>
#include "../ops.h"
#include "../sof-pci-dev.h"
#include "../../amd/mach-config.h"
#include "acp.h"
#include "acp-dsp-offset.h"
#define ACP6x_REG_START 0x1240000
#define ACP6x_REG_END 0x125C000
#define ACP6X_FUTURE_REG_ACLK_0 0x1854
static const struct sof_amd_acp_desc rembrandt_chip_info = {
.rev = 6,
.host_bridge_id = HOST_BRIDGE_RMB,
.pgfsm_base = ACP6X_PGFSM_BASE,
.ext_intr_stat = ACP6X_EXT_INTR_STAT,
.dsp_intr_base = ACP6X_DSP_SW_INTR_BASE,
.sram_pte_offset = ACP6X_SRAM_PTE_OFFSET,
.hw_semaphore_offset = ACP6X_AXI2DAGB_SEM_0,
.acp_clkmux_sel = ACP6X_CLKMUX_SEL,
.fusion_dsp_offset = ACP6X_DSP_FUSION_RUNSTALL,
.probe_reg_offset = ACP6X_FUTURE_REG_ACLK_0,
};
static const struct sof_dev_desc rembrandt_desc = {
.machines = snd_soc_acpi_amd_rmb_sof_machines,
.resindex_lpe_base = 0,
.resindex_pcicfg_base = -1,
.resindex_imr_base = -1,
.irqindex_host_ipc = -1,
.chip_info = &rembrandt_chip_info,
.ipc_supported_mask = BIT(SOF_IPC),
.ipc_default = SOF_IPC,
.default_fw_path = {
[SOF_IPC] = "amd/sof",
},
.default_tplg_path = {
[SOF_IPC] = "amd/sof-tplg",
},
.default_fw_filename = {
[SOF_IPC] = "sof-rmb.ri",
},
.nocodec_tplg_filename = "sof-acp.tplg",
.ops = &sof_rembrandt_ops,
.ops_init = sof_rembrandt_ops_init,
};
static int acp_pci_rmb_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
{
unsigned int flag;
if (pci->revision != ACP_RMB_PCI_ID)
return -ENODEV;
flag = snd_amd_acp_find_config(pci);
if (flag != FLAG_AMD_SOF && flag != FLAG_AMD_SOF_ONLY_DMIC)
return -ENODEV;
return sof_pci_probe(pci, pci_id);
};
static void acp_pci_rmb_remove(struct pci_dev *pci)
{
sof_pci_remove(pci);
}
/* PCI IDs */
static const struct pci_device_id rmb_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, ACP_PCI_DEV_ID),
.driver_data = (unsigned long)&rembrandt_desc},
{ 0, }
};
MODULE_DEVICE_TABLE(pci, rmb_pci_ids);
/* pci_driver definition */
static struct pci_driver snd_sof_pci_amd_rmb_driver = {
.name = KBUILD_MODNAME,
.id_table = rmb_pci_ids,
.probe = acp_pci_rmb_probe,
.remove = acp_pci_rmb_remove,
};
module_pci_driver(snd_sof_pci_amd_rmb_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
| linux-master | sound/soc/sof/amd/pci-rmb.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2021 Advanced Micro Devices, Inc. All rights reserved.
//
// Authors: Vishnuvardhanrao Ravuapati <[email protected]>
// V Sujith Kumar Reddy <[email protected]>
/*This file support Host TRACE Logger driver callback for SOF FW */
#include "acp.h"
#define ACP_LOGGER_STREAM 8
#define NUM_PAGES 16
int acp_sof_trace_release(struct snd_sof_dev *sdev)
{
struct acp_dsp_stream *stream;
struct acp_dev_data *adata;
int ret;
adata = sdev->pdata->hw_pdata;
stream = adata->dtrace_stream;
ret = acp_dsp_stream_put(sdev, stream);
if (ret < 0) {
dev_err(sdev->dev, "Failed to release trace stream\n");
return ret;
}
adata->dtrace_stream = NULL;
return 0;
}
EXPORT_SYMBOL_NS(acp_sof_trace_release, SND_SOC_SOF_AMD_COMMON);
int acp_sof_trace_init(struct snd_sof_dev *sdev, struct snd_dma_buffer *dmab,
struct sof_ipc_dma_trace_params_ext *dtrace_params)
{
struct acp_dsp_stream *stream;
struct acp_dev_data *adata;
int ret;
adata = sdev->pdata->hw_pdata;
stream = acp_dsp_stream_get(sdev, ACP_LOGGER_STREAM);
if (!stream)
return -ENODEV;
stream->dmab = dmab;
stream->num_pages = NUM_PAGES;
ret = acp_dsp_stream_config(sdev, stream);
if (ret < 0) {
acp_dsp_stream_put(sdev, stream);
return ret;
}
adata->dtrace_stream = stream;
dtrace_params->stream_tag = stream->stream_tag;
dtrace_params->buffer.phy_addr = stream->reg_offset;
return 0;
}
EXPORT_SYMBOL_NS(acp_sof_trace_init, SND_SOC_SOF_AMD_COMMON);
| linux-master | sound/soc/sof/amd/acp-trace.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2021 Advanced Micro Devices, Inc.
//
// Authors: Ajit Kumar Pandey <[email protected]>
/*
* Hardware interface for generic AMD audio DSP ACP IP
*/
#include "../ops.h"
#include "acp-dsp-offset.h"
#include "acp.h"
#define PTE_GRP1_OFFSET 0x00000000
#define PTE_GRP2_OFFSET 0x00800000
#define PTE_GRP3_OFFSET 0x01000000
#define PTE_GRP4_OFFSET 0x01800000
#define PTE_GRP5_OFFSET 0x02000000
#define PTE_GRP6_OFFSET 0x02800000
#define PTE_GRP7_OFFSET 0x03000000
#define PTE_GRP8_OFFSET 0x03800000
int acp_dsp_stream_config(struct snd_sof_dev *sdev, struct acp_dsp_stream *stream)
{
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
unsigned int pte_reg, pte_size, phy_addr_offset, index;
int stream_tag = stream->stream_tag;
u32 low, high, offset, reg_val;
dma_addr_t addr;
int page_idx;
switch (stream_tag) {
case 1:
pte_reg = ACPAXI2AXI_ATU_BASE_ADDR_GRP_1;
pte_size = ACPAXI2AXI_ATU_PAGE_SIZE_GRP_1;
offset = offsetof(struct scratch_reg_conf, grp1_pte);
stream->reg_offset = PTE_GRP1_OFFSET;
break;
case 2:
pte_reg = ACPAXI2AXI_ATU_BASE_ADDR_GRP_2;
pte_size = ACPAXI2AXI_ATU_PAGE_SIZE_GRP_2;
offset = offsetof(struct scratch_reg_conf, grp2_pte);
stream->reg_offset = PTE_GRP2_OFFSET;
break;
case 3:
pte_reg = ACPAXI2AXI_ATU_BASE_ADDR_GRP_3;
pte_size = ACPAXI2AXI_ATU_PAGE_SIZE_GRP_3;
offset = offsetof(struct scratch_reg_conf, grp3_pte);
stream->reg_offset = PTE_GRP3_OFFSET;
break;
case 4:
pte_reg = ACPAXI2AXI_ATU_BASE_ADDR_GRP_4;
pte_size = ACPAXI2AXI_ATU_PAGE_SIZE_GRP_4;
offset = offsetof(struct scratch_reg_conf, grp4_pte);
stream->reg_offset = PTE_GRP4_OFFSET;
break;
case 5:
pte_reg = ACPAXI2AXI_ATU_BASE_ADDR_GRP_5;
pte_size = ACPAXI2AXI_ATU_PAGE_SIZE_GRP_5;
offset = offsetof(struct scratch_reg_conf, grp5_pte);
stream->reg_offset = PTE_GRP5_OFFSET;
break;
case 6:
pte_reg = ACPAXI2AXI_ATU_BASE_ADDR_GRP_6;
pte_size = ACPAXI2AXI_ATU_PAGE_SIZE_GRP_6;
offset = offsetof(struct scratch_reg_conf, grp6_pte);
stream->reg_offset = PTE_GRP6_OFFSET;
break;
case 7:
pte_reg = ACPAXI2AXI_ATU_BASE_ADDR_GRP_7;
pte_size = ACPAXI2AXI_ATU_PAGE_SIZE_GRP_7;
offset = offsetof(struct scratch_reg_conf, grp7_pte);
stream->reg_offset = PTE_GRP7_OFFSET;
break;
case 8:
pte_reg = ACPAXI2AXI_ATU_BASE_ADDR_GRP_8;
pte_size = ACPAXI2AXI_ATU_PAGE_SIZE_GRP_8;
offset = offsetof(struct scratch_reg_conf, grp8_pte);
stream->reg_offset = PTE_GRP8_OFFSET;
break;
default:
dev_err(sdev->dev, "Invalid stream tag %d\n", stream_tag);
return -EINVAL;
}
/* write phy_addr in scratch memory */
phy_addr_offset = sdev->debug_box.offset +
offsetof(struct scratch_reg_conf, reg_offset);
index = stream_tag - 1;
phy_addr_offset = phy_addr_offset + index * 4;
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 +
phy_addr_offset, stream->reg_offset);
/* Group Enable */
offset = offset + sdev->debug_box.offset;
reg_val = desc->sram_pte_offset + offset;
snd_sof_dsp_write(sdev, ACP_DSP_BAR, pte_reg, reg_val | BIT(31));
snd_sof_dsp_write(sdev, ACP_DSP_BAR, pte_size, PAGE_SIZE_4K_ENABLE);
for (page_idx = 0; page_idx < stream->num_pages; page_idx++) {
addr = snd_sgbuf_get_addr(stream->dmab, page_idx * PAGE_SIZE);
/* Load the low address of page int ACP SRAM through SRBM */
low = lower_32_bits(addr);
high = upper_32_bits(addr);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + offset, low);
high |= BIT(31);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + offset + 4, high);
/* Move to next physically contiguous page */
offset += 8;
}
/* Flush ATU Cache after PTE Update */
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACPAXI2AXI_ATU_CTRL, ACP_ATU_CACHE_INVALID);
return 0;
}
struct acp_dsp_stream *acp_dsp_stream_get(struct snd_sof_dev *sdev, int tag)
{
struct acp_dev_data *adata = sdev->pdata->hw_pdata;
struct acp_dsp_stream *stream = adata->stream_buf;
int i;
for (i = 0; i < ACP_MAX_STREAM; i++, stream++) {
if (stream->active)
continue;
/* return stream if tag not specified*/
if (!tag) {
stream->active = 1;
return stream;
}
/* check if this is the requested stream tag */
if (stream->stream_tag == tag) {
stream->active = 1;
return stream;
}
}
dev_err(sdev->dev, "stream %d active or no inactive stream\n", tag);
return NULL;
}
EXPORT_SYMBOL_NS(acp_dsp_stream_get, SND_SOC_SOF_AMD_COMMON);
int acp_dsp_stream_put(struct snd_sof_dev *sdev,
struct acp_dsp_stream *acp_stream)
{
struct acp_dev_data *adata = sdev->pdata->hw_pdata;
struct acp_dsp_stream *stream = adata->stream_buf;
int i;
/* Free an active stream */
for (i = 0; i < ACP_MAX_STREAM; i++, stream++) {
if (stream == acp_stream) {
stream->active = 0;
return 0;
}
}
dev_err(sdev->dev, "Cannot find active stream tag %d\n", acp_stream->stream_tag);
return -EINVAL;
}
EXPORT_SYMBOL_NS(acp_dsp_stream_put, SND_SOC_SOF_AMD_COMMON);
int acp_dsp_stream_init(struct snd_sof_dev *sdev)
{
struct acp_dev_data *adata = sdev->pdata->hw_pdata;
int i;
for (i = 0; i < ACP_MAX_STREAM; i++) {
adata->stream_buf[i].sdev = sdev;
adata->stream_buf[i].active = 0;
adata->stream_buf[i].stream_tag = i + 1;
}
return 0;
}
EXPORT_SYMBOL_NS(acp_dsp_stream_init, SND_SOC_SOF_AMD_COMMON);
| linux-master | sound/soc/sof/amd/acp-stream.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2021 Advanced Micro Devices, Inc. All rights reserved.
//
// Authors: Ajit Kumar Pandey <[email protected]>
/*
* PCI interface for Renoir ACP device
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <sound/sof.h>
#include <sound/soc-acpi.h>
#include "../ops.h"
#include "../sof-pci-dev.h"
#include "../../amd/mach-config.h"
#include "acp.h"
#include "acp-dsp-offset.h"
#define ACP3x_REG_START 0x1240000
#define ACP3x_REG_END 0x125C000
#define ACP3X_FUTURE_REG_ACLK_0 0x1860
static const struct sof_amd_acp_desc renoir_chip_info = {
.rev = 3,
.host_bridge_id = HOST_BRIDGE_CZN,
.pgfsm_base = ACP3X_PGFSM_BASE,
.ext_intr_stat = ACP3X_EXT_INTR_STAT,
.dsp_intr_base = ACP3X_DSP_SW_INTR_BASE,
.sram_pte_offset = ACP3X_SRAM_PTE_OFFSET,
.hw_semaphore_offset = ACP3X_AXI2DAGB_SEM_0,
.acp_clkmux_sel = ACP3X_CLKMUX_SEL,
.probe_reg_offset = ACP3X_FUTURE_REG_ACLK_0,
};
static const struct sof_dev_desc renoir_desc = {
.machines = snd_soc_acpi_amd_sof_machines,
.use_acpi_target_states = true,
.resindex_lpe_base = 0,
.resindex_pcicfg_base = -1,
.resindex_imr_base = -1,
.irqindex_host_ipc = -1,
.chip_info = &renoir_chip_info,
.ipc_supported_mask = BIT(SOF_IPC),
.ipc_default = SOF_IPC,
.default_fw_path = {
[SOF_IPC] = "amd/sof",
},
.default_tplg_path = {
[SOF_IPC] = "amd/sof-tplg",
},
.default_fw_filename = {
[SOF_IPC] = "sof-rn.ri",
},
.nocodec_tplg_filename = "sof-acp.tplg",
.ops = &sof_renoir_ops,
.ops_init = sof_renoir_ops_init,
};
static int acp_pci_rn_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
{
unsigned int flag;
if (pci->revision != ACP_RN_PCI_ID)
return -ENODEV;
flag = snd_amd_acp_find_config(pci);
if (flag != FLAG_AMD_SOF && flag != FLAG_AMD_SOF_ONLY_DMIC)
return -ENODEV;
return sof_pci_probe(pci, pci_id);
};
static void acp_pci_rn_remove(struct pci_dev *pci)
{
return sof_pci_remove(pci);
}
/* PCI IDs */
static const struct pci_device_id rn_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, ACP_PCI_DEV_ID),
.driver_data = (unsigned long)&renoir_desc},
{ 0, }
};
MODULE_DEVICE_TABLE(pci, rn_pci_ids);
/* pci_driver definition */
static struct pci_driver snd_sof_pci_amd_rn_driver = {
.name = KBUILD_MODNAME,
.id_table = rn_pci_ids,
.probe = acp_pci_rn_probe,
.remove = acp_pci_rn_remove,
.driver = {
.pm = &sof_pci_pm,
},
};
module_pci_driver(snd_sof_pci_amd_rn_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
| linux-master | sound/soc/sof/amd/pci-rn.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2021, 2023 Advanced Micro Devices, Inc. All rights reserved.
//
// Authors: Vijendar Mukunda <[email protected]>
// Ajit Kumar Pandey <[email protected]>
/*
* Hardware interface for generic AMD ACP processor
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "../ops.h"
#include "acp.h"
#include "acp-dsp-offset.h"
#define SECURED_FIRMWARE 1
static bool enable_fw_debug;
module_param(enable_fw_debug, bool, 0444);
MODULE_PARM_DESC(enable_fw_debug, "Enable Firmware debug");
const struct dmi_system_id acp_sof_quirk_table[] = {
{
/* Valve Jupiter device */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Valve"),
DMI_MATCH(DMI_PRODUCT_NAME, "Galileo"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "Sephiroth"),
},
.driver_data = (void *)SECURED_FIRMWARE,
},
{}
};
EXPORT_SYMBOL_GPL(acp_sof_quirk_table);
static int smn_write(struct pci_dev *dev, u32 smn_addr, u32 data)
{
pci_write_config_dword(dev, 0x60, smn_addr);
pci_write_config_dword(dev, 0x64, data);
return 0;
}
static int smn_read(struct pci_dev *dev, u32 smn_addr)
{
u32 data = 0;
pci_write_config_dword(dev, 0x60, smn_addr);
pci_read_config_dword(dev, 0x64, &data);
return data;
}
static void init_dma_descriptor(struct acp_dev_data *adata)
{
struct snd_sof_dev *sdev = adata->dev;
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
unsigned int addr;
addr = desc->sram_pte_offset + sdev->debug_box.offset +
offsetof(struct scratch_reg_conf, dma_desc);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DESC_BASE_ADDR, addr);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DESC_MAX_NUM_DSCR, ACP_MAX_DESC_CNT);
}
static void configure_dma_descriptor(struct acp_dev_data *adata, unsigned short idx,
struct dma_descriptor *dscr_info)
{
struct snd_sof_dev *sdev = adata->dev;
unsigned int offset;
offset = ACP_SCRATCH_REG_0 + sdev->debug_box.offset +
offsetof(struct scratch_reg_conf, dma_desc) +
idx * sizeof(struct dma_descriptor);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset, dscr_info->src_addr);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset + 0x4, dscr_info->dest_addr);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, offset + 0x8, dscr_info->tx_cnt.u32_all);
}
static int config_dma_channel(struct acp_dev_data *adata, unsigned int ch,
unsigned int idx, unsigned int dscr_count)
{
struct snd_sof_dev *sdev = adata->dev;
unsigned int val, status;
int ret;
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32),
ACP_DMA_CH_RST | ACP_DMA_CH_GRACEFUL_RST_EN);
ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_DMA_CH_RST_STS, val,
val & (1 << ch), ACP_REG_POLL_INTERVAL,
ACP_REG_POLL_TIMEOUT_US);
if (ret < 0) {
status = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_ERROR_STATUS);
val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DMA_ERR_STS_0 + ch * sizeof(u32));
dev_err(sdev->dev, "ACP_DMA_ERR_STS :0x%x ACP_ERROR_STATUS :0x%x\n", val, status);
return ret;
}
snd_sof_dsp_write(sdev, ACP_DSP_BAR, (ACP_DMA_CNTL_0 + ch * sizeof(u32)), 0);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DSCR_CNT_0 + ch * sizeof(u32), dscr_count);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_DSCR_STRT_IDX_0 + ch * sizeof(u32), idx);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_PRIO_0 + ch * sizeof(u32), 0);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32), ACP_DMA_CH_RUN);
return ret;
}
static int acpbus_dma_start(struct acp_dev_data *adata, unsigned int ch,
unsigned int dscr_count, struct dma_descriptor *dscr_info)
{
struct snd_sof_dev *sdev = adata->dev;
int ret;
u16 dscr;
if (!dscr_info || !dscr_count)
return -EINVAL;
for (dscr = 0; dscr < dscr_count; dscr++)
configure_dma_descriptor(adata, dscr, dscr_info++);
ret = config_dma_channel(adata, ch, 0, dscr_count);
if (ret < 0)
dev_err(sdev->dev, "config dma ch failed:%d\n", ret);
return ret;
}
int configure_and_run_dma(struct acp_dev_data *adata, unsigned int src_addr,
unsigned int dest_addr, int dsp_data_size)
{
struct snd_sof_dev *sdev = adata->dev;
unsigned int desc_count, index;
int ret;
for (desc_count = 0; desc_count < ACP_MAX_DESC && dsp_data_size >= 0;
desc_count++, dsp_data_size -= ACP_PAGE_SIZE) {
adata->dscr_info[desc_count].src_addr = src_addr + desc_count * ACP_PAGE_SIZE;
adata->dscr_info[desc_count].dest_addr = dest_addr + desc_count * ACP_PAGE_SIZE;
adata->dscr_info[desc_count].tx_cnt.bits.count = ACP_PAGE_SIZE;
if (dsp_data_size < ACP_PAGE_SIZE)
adata->dscr_info[desc_count].tx_cnt.bits.count = dsp_data_size;
}
ret = acpbus_dma_start(adata, 0, desc_count, adata->dscr_info);
if (ret)
dev_err(sdev->dev, "acpbus_dma_start failed\n");
/* Clear descriptor array */
for (index = 0; index < desc_count; index++)
memset(&adata->dscr_info[index], 0x00, sizeof(struct dma_descriptor));
return ret;
}
/*
* psp_mbox_ready- function to poll ready bit of psp mbox
* @adata: acp device data
* @ack: bool variable to check ready bit status or psp ack
*/
static int psp_mbox_ready(struct acp_dev_data *adata, bool ack)
{
struct snd_sof_dev *sdev = adata->dev;
int ret;
u32 data;
ret = read_poll_timeout(smn_read, data, data & MBOX_READY_MASK, MBOX_DELAY_US,
ACP_PSP_TIMEOUT_US, false, adata->smn_dev, MP0_C2PMSG_114_REG);
if (!ret)
return 0;
dev_err(sdev->dev, "PSP error status %x\n", data & MBOX_STATUS_MASK);
if (ack)
return -ETIMEDOUT;
return -EBUSY;
}
/*
* psp_send_cmd - function to send psp command over mbox
* @adata: acp device data
* @cmd: non zero integer value for command type
*/
static int psp_send_cmd(struct acp_dev_data *adata, int cmd)
{
struct snd_sof_dev *sdev = adata->dev;
int ret;
u32 data;
if (!cmd)
return -EINVAL;
/* Get a non-zero Doorbell value from PSP */
ret = read_poll_timeout(smn_read, data, data, MBOX_DELAY_US, ACP_PSP_TIMEOUT_US, false,
adata->smn_dev, MP0_C2PMSG_73_REG);
if (ret) {
dev_err(sdev->dev, "Failed to get Doorbell from MBOX %x\n", MP0_C2PMSG_73_REG);
return ret;
}
/* Check if PSP is ready for new command */
ret = psp_mbox_ready(adata, 0);
if (ret)
return ret;
smn_write(adata->smn_dev, MP0_C2PMSG_114_REG, cmd);
/* Ring the Doorbell for PSP */
smn_write(adata->smn_dev, MP0_C2PMSG_73_REG, data);
/* Check MBOX ready as PSP ack */
ret = psp_mbox_ready(adata, 1);
return ret;
}
int configure_and_run_sha_dma(struct acp_dev_data *adata, void *image_addr,
unsigned int start_addr, unsigned int dest_addr,
unsigned int image_length)
{
struct snd_sof_dev *sdev = adata->dev;
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
unsigned int tx_count, fw_qualifier, val;
int ret;
if (!image_addr) {
dev_err(sdev->dev, "SHA DMA image address is NULL\n");
return -EINVAL;
}
val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD);
if (val & ACP_SHA_RUN) {
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RESET);
ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD_STS,
val, val & ACP_SHA_RESET,
ACP_REG_POLL_INTERVAL,
ACP_REG_POLL_TIMEOUT_US);
if (ret < 0) {
dev_err(sdev->dev, "SHA DMA Failed to Reset\n");
return ret;
}
}
if (adata->signed_fw_image)
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_INCLUDE_HDR, ACP_SHA_HEADER);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_STRT_ADDR, start_addr);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_DESTINATION_ADDR, dest_addr);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_MSG_LENGTH, image_length);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SHA_DMA_CMD, ACP_SHA_RUN);
ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_TRANSFER_BYTE_CNT,
tx_count, tx_count == image_length,
ACP_REG_POLL_INTERVAL, ACP_DMA_COMPLETE_TIMEOUT_US);
if (ret < 0) {
dev_err(sdev->dev, "SHA DMA Failed to Transfer Length %x\n", tx_count);
return ret;
}
/* psp_send_cmd only required for renoir platform (rev - 3) */
if (desc->rev == 3) {
ret = psp_send_cmd(adata, MBOX_ACP_SHA_DMA_COMMAND);
if (ret)
return ret;
}
ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SHA_DSP_FW_QUALIFIER,
fw_qualifier, fw_qualifier & DSP_FW_RUN_ENABLE,
ACP_REG_POLL_INTERVAL, ACP_DMA_COMPLETE_TIMEOUT_US);
if (ret < 0) {
dev_err(sdev->dev, "PSP validation failed\n");
return ret;
}
return 0;
}
int acp_dma_status(struct acp_dev_data *adata, unsigned char ch)
{
struct snd_sof_dev *sdev = adata->dev;
unsigned int val;
int ret = 0;
val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32));
if (val & ACP_DMA_CH_RUN) {
ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_DMA_CH_STS, val, !val,
ACP_REG_POLL_INTERVAL,
ACP_DMA_COMPLETE_TIMEOUT_US);
if (ret < 0)
dev_err(sdev->dev, "DMA_CHANNEL %d status timeout\n", ch);
}
return ret;
}
void memcpy_from_scratch(struct snd_sof_dev *sdev, u32 offset, unsigned int *dst, size_t bytes)
{
unsigned int reg_offset = offset + ACP_SCRATCH_REG_0;
int i, j;
for (i = 0, j = 0; i < bytes; i = i + 4, j++)
dst[j] = snd_sof_dsp_read(sdev, ACP_DSP_BAR, reg_offset + i);
}
void memcpy_to_scratch(struct snd_sof_dev *sdev, u32 offset, unsigned int *src, size_t bytes)
{
unsigned int reg_offset = offset + ACP_SCRATCH_REG_0;
int i, j;
for (i = 0, j = 0; i < bytes; i = i + 4, j++)
snd_sof_dsp_write(sdev, ACP_DSP_BAR, reg_offset + i, src[j]);
}
static int acp_memory_init(struct snd_sof_dev *sdev)
{
struct acp_dev_data *adata = sdev->pdata->hw_pdata;
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
snd_sof_dsp_update_bits(sdev, ACP_DSP_BAR, desc->dsp_intr_base + DSP_SW_INTR_CNTL_OFFSET,
ACP_DSP_INTR_EN_MASK, ACP_DSP_INTR_EN_MASK);
init_dma_descriptor(adata);
return 0;
}
static irqreturn_t acp_irq_thread(int irq, void *context)
{
struct snd_sof_dev *sdev = context;
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
unsigned int count = ACP_HW_SEM_RETRY_COUNT;
while (snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset)) {
/* Wait until acquired HW Semaphore lock or timeout */
count--;
if (!count) {
dev_err(sdev->dev, "%s: Failed to acquire HW lock\n", __func__);
return IRQ_NONE;
}
}
sof_ops(sdev)->irq_thread(irq, sdev);
/* Unlock or Release HW Semaphore */
snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset, 0x0);
return IRQ_HANDLED;
};
static irqreturn_t acp_irq_handler(int irq, void *dev_id)
{
struct snd_sof_dev *sdev = dev_id;
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
unsigned int base = desc->dsp_intr_base;
unsigned int val;
val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET);
if (val & ACP_DSP_TO_HOST_IRQ) {
snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET,
ACP_DSP_TO_HOST_IRQ);
return IRQ_WAKE_THREAD;
}
return IRQ_NONE;
}
static int acp_power_on(struct snd_sof_dev *sdev)
{
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
unsigned int base = desc->pgfsm_base;
unsigned int val;
int ret;
val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET);
if (val == ACP_POWERED_ON)
return 0;
if (val & ACP_PGFSM_STATUS_MASK)
snd_sof_dsp_write(sdev, ACP_DSP_BAR, base + PGFSM_CONTROL_OFFSET,
ACP_PGFSM_CNTL_POWER_ON_MASK);
ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, base + PGFSM_STATUS_OFFSET, val,
!val, ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
if (ret < 0)
dev_err(sdev->dev, "timeout in ACP_PGFSM_STATUS read\n");
return ret;
}
static int acp_reset(struct snd_sof_dev *sdev)
{
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
unsigned int val;
int ret;
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_ASSERT_RESET);
ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val,
val & ACP_SOFT_RESET_DONE_MASK,
ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
if (ret < 0) {
dev_err(sdev->dev, "timeout asserting reset\n");
return ret;
}
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, ACP_RELEASE_RESET);
ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_SOFT_RESET, val, !val,
ACP_REG_POLL_INTERVAL, ACP_REG_POLL_TIMEOUT_US);
if (ret < 0)
dev_err(sdev->dev, "timeout in releasing reset\n");
if (desc->acp_clkmux_sel)
snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->acp_clkmux_sel, ACP_CLOCK_ACLK);
if (desc->ext_intr_enb)
snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->ext_intr_enb, 0x01);
return ret;
}
static int acp_init(struct snd_sof_dev *sdev)
{
int ret;
/* power on */
ret = acp_power_on(sdev);
if (ret) {
dev_err(sdev->dev, "ACP power on failed\n");
return ret;
}
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CONTROL, 0x01);
/* Reset */
return acp_reset(sdev);
}
int amd_sof_acp_suspend(struct snd_sof_dev *sdev, u32 target_state)
{
int ret;
ret = acp_reset(sdev);
if (ret) {
dev_err(sdev->dev, "ACP Reset failed\n");
return ret;
}
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_CONTROL, 0x00);
return 0;
}
EXPORT_SYMBOL_NS(amd_sof_acp_suspend, SND_SOC_SOF_AMD_COMMON);
int amd_sof_acp_resume(struct snd_sof_dev *sdev)
{
int ret;
ret = acp_init(sdev);
if (ret) {
dev_err(sdev->dev, "ACP Init failed\n");
return ret;
}
return acp_memory_init(sdev);
}
EXPORT_SYMBOL_NS(amd_sof_acp_resume, SND_SOC_SOF_AMD_COMMON);
int amd_sof_acp_probe(struct snd_sof_dev *sdev)
{
struct pci_dev *pci = to_pci_dev(sdev->dev);
struct snd_sof_pdata *plat_data = sdev->pdata;
struct acp_dev_data *adata;
const struct sof_amd_acp_desc *chip;
const struct dmi_system_id *dmi_id;
unsigned int addr;
int ret;
chip = get_chip_info(sdev->pdata);
if (!chip) {
dev_err(sdev->dev, "no such device supported, chip id:%x\n", pci->device);
return -EIO;
}
adata = devm_kzalloc(sdev->dev, sizeof(struct acp_dev_data),
GFP_KERNEL);
if (!adata)
return -ENOMEM;
adata->dev = sdev;
adata->dmic_dev = platform_device_register_data(sdev->dev, "dmic-codec",
PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(adata->dmic_dev)) {
dev_err(sdev->dev, "failed to register platform for dmic codec\n");
return PTR_ERR(adata->dmic_dev);
}
addr = pci_resource_start(pci, ACP_DSP_BAR);
sdev->bar[ACP_DSP_BAR] = devm_ioremap(sdev->dev, addr, pci_resource_len(pci, ACP_DSP_BAR));
if (!sdev->bar[ACP_DSP_BAR]) {
dev_err(sdev->dev, "ioremap error\n");
ret = -ENXIO;
goto unregister_dev;
}
pci_set_master(pci);
sdev->pdata->hw_pdata = adata;
adata->smn_dev = pci_get_device(PCI_VENDOR_ID_AMD, chip->host_bridge_id, NULL);
if (!adata->smn_dev) {
dev_err(sdev->dev, "Failed to get host bridge device\n");
ret = -ENODEV;
goto unregister_dev;
}
sdev->ipc_irq = pci->irq;
ret = request_threaded_irq(sdev->ipc_irq, acp_irq_handler, acp_irq_thread,
IRQF_SHARED, "AudioDSP", sdev);
if (ret < 0) {
dev_err(sdev->dev, "failed to register IRQ %d\n",
sdev->ipc_irq);
goto free_smn_dev;
}
ret = acp_init(sdev);
if (ret < 0)
goto free_ipc_irq;
sdev->dsp_box.offset = 0;
sdev->dsp_box.size = BOX_SIZE_512;
sdev->host_box.offset = sdev->dsp_box.offset + sdev->dsp_box.size;
sdev->host_box.size = BOX_SIZE_512;
sdev->debug_box.offset = sdev->host_box.offset + sdev->host_box.size;
sdev->debug_box.size = BOX_SIZE_1024;
adata->signed_fw_image = false;
dmi_id = dmi_first_match(acp_sof_quirk_table);
if (dmi_id && dmi_id->driver_data) {
adata->fw_code_bin = kasprintf(GFP_KERNEL, "%s/sof-%s-code.bin",
plat_data->fw_filename_prefix,
chip->name);
adata->fw_data_bin = kasprintf(GFP_KERNEL, "%s/sof-%s-data.bin",
plat_data->fw_filename_prefix,
chip->name);
adata->signed_fw_image = dmi_id->driver_data;
dev_dbg(sdev->dev, "fw_code_bin:%s, fw_data_bin:%s\n", adata->fw_code_bin,
adata->fw_data_bin);
}
adata->enable_fw_debug = enable_fw_debug;
acp_memory_init(sdev);
acp_dsp_stream_init(sdev);
return 0;
free_ipc_irq:
free_irq(sdev->ipc_irq, sdev);
free_smn_dev:
pci_dev_put(adata->smn_dev);
unregister_dev:
platform_device_unregister(adata->dmic_dev);
return ret;
}
EXPORT_SYMBOL_NS(amd_sof_acp_probe, SND_SOC_SOF_AMD_COMMON);
int amd_sof_acp_remove(struct snd_sof_dev *sdev)
{
struct acp_dev_data *adata = sdev->pdata->hw_pdata;
if (adata->smn_dev)
pci_dev_put(adata->smn_dev);
if (sdev->ipc_irq)
free_irq(sdev->ipc_irq, sdev);
if (adata->dmic_dev)
platform_device_unregister(adata->dmic_dev);
return acp_reset(sdev);
}
EXPORT_SYMBOL_NS(amd_sof_acp_remove, SND_SOC_SOF_AMD_COMMON);
MODULE_DESCRIPTION("AMD ACP sof driver");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | sound/soc/sof/amd/acp.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2021, 2023 Advanced Micro Devices, Inc.
//
// Authors: Ajit Kumar Pandey <[email protected]>
/*
* Hardware interface for ACP DSP Firmware binaries loader
*/
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "../ops.h"
#include "acp-dsp-offset.h"
#include "acp.h"
#define FW_BIN 0
#define FW_DATA_BIN 1
#define FW_BIN_PTE_OFFSET 0x00
#define FW_DATA_BIN_PTE_OFFSET 0x08
#define ACP_DSP_RUN 0x00
int acp_dsp_block_read(struct snd_sof_dev *sdev, enum snd_sof_fw_blk_type blk_type,
u32 offset, void *dest, size_t size)
{
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
switch (blk_type) {
case SOF_FW_BLK_TYPE_SRAM:
offset = offset - desc->sram_pte_offset;
memcpy_from_scratch(sdev, offset, dest, size);
break;
default:
dev_err(sdev->dev, "bad blk type 0x%x\n", blk_type);
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_NS(acp_dsp_block_read, SND_SOC_SOF_AMD_COMMON);
int acp_dsp_block_write(struct snd_sof_dev *sdev, enum snd_sof_fw_blk_type blk_type,
u32 offset, void *src, size_t size)
{
struct pci_dev *pci = to_pci_dev(sdev->dev);
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
struct acp_dev_data *adata;
void *dest;
u32 dma_size, page_count;
unsigned int size_fw;
adata = sdev->pdata->hw_pdata;
switch (blk_type) {
case SOF_FW_BLK_TYPE_IRAM:
if (!adata->bin_buf) {
size_fw = sdev->basefw.fw->size;
page_count = PAGE_ALIGN(size_fw) >> PAGE_SHIFT;
dma_size = page_count * ACP_PAGE_SIZE;
adata->bin_buf = dma_alloc_coherent(&pci->dev, dma_size,
&adata->sha_dma_addr,
GFP_ATOMIC);
if (!adata->bin_buf)
return -ENOMEM;
}
adata->fw_bin_size = size + offset;
dest = adata->bin_buf + offset;
break;
case SOF_FW_BLK_TYPE_DRAM:
if (!adata->data_buf) {
adata->data_buf = dma_alloc_coherent(&pci->dev,
ACP_DEFAULT_DRAM_LENGTH,
&adata->dma_addr,
GFP_ATOMIC);
if (!adata->data_buf)
return -ENOMEM;
}
dest = adata->data_buf + offset;
adata->fw_data_bin_size = size + offset;
break;
case SOF_FW_BLK_TYPE_SRAM:
offset = offset - desc->sram_pte_offset;
memcpy_to_scratch(sdev, offset, src, size);
return 0;
default:
dev_err(sdev->dev, "bad blk type 0x%x\n", blk_type);
return -EINVAL;
}
memcpy(dest, src, size);
return 0;
}
EXPORT_SYMBOL_NS(acp_dsp_block_write, SND_SOC_SOF_AMD_COMMON);
int acp_get_bar_index(struct snd_sof_dev *sdev, u32 type)
{
return type;
}
EXPORT_SYMBOL_NS(acp_get_bar_index, SND_SOC_SOF_AMD_COMMON);
static void configure_pte_for_fw_loading(int type, int num_pages, struct acp_dev_data *adata)
{
struct snd_sof_dev *sdev = adata->dev;
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
unsigned int low, high;
dma_addr_t addr;
u16 page_idx;
u32 offset;
switch (type) {
case FW_BIN:
offset = FW_BIN_PTE_OFFSET;
addr = adata->sha_dma_addr;
break;
case FW_DATA_BIN:
offset = adata->fw_bin_page_count * 8;
addr = adata->dma_addr;
break;
default:
dev_err(sdev->dev, "Invalid data type %x\n", type);
return;
}
/* Group Enable */
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACPAXI2AXI_ATU_BASE_ADDR_GRP_1,
desc->sram_pte_offset | BIT(31));
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACPAXI2AXI_ATU_PAGE_SIZE_GRP_1,
PAGE_SIZE_4K_ENABLE);
for (page_idx = 0; page_idx < num_pages; page_idx++) {
low = lower_32_bits(addr);
high = upper_32_bits(addr);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + offset, low);
high |= BIT(31);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + offset + 4, high);
offset += 8;
addr += PAGE_SIZE;
}
/* Flush ATU Cache after PTE Update */
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACPAXI2AXI_ATU_CTRL, ACP_ATU_CACHE_INVALID);
}
/* pre fw run operations */
int acp_dsp_pre_fw_run(struct snd_sof_dev *sdev)
{
struct pci_dev *pci = to_pci_dev(sdev->dev);
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
struct acp_dev_data *adata;
unsigned int src_addr, size_fw;
u32 page_count, dma_size;
int ret;
adata = sdev->pdata->hw_pdata;
if (adata->signed_fw_image)
size_fw = adata->fw_bin_size - ACP_FIRMWARE_SIGNATURE;
else
size_fw = adata->fw_bin_size;
page_count = PAGE_ALIGN(size_fw) >> PAGE_SHIFT;
adata->fw_bin_page_count = page_count;
configure_pte_for_fw_loading(FW_BIN, page_count, adata);
ret = configure_and_run_sha_dma(adata, adata->bin_buf, ACP_SYSTEM_MEMORY_WINDOW,
ACP_IRAM_BASE_ADDRESS, size_fw);
if (ret < 0) {
dev_err(sdev->dev, "SHA DMA transfer failed status: %d\n", ret);
return ret;
}
configure_pte_for_fw_loading(FW_DATA_BIN, ACP_DRAM_PAGE_COUNT, adata);
src_addr = ACP_SYSTEM_MEMORY_WINDOW + page_count * ACP_PAGE_SIZE;
ret = configure_and_run_dma(adata, src_addr, ACP_DATA_RAM_BASE_ADDRESS,
adata->fw_data_bin_size);
if (ret < 0) {
dev_err(sdev->dev, "acp dma configuration failed: %d\n", ret);
return ret;
}
ret = acp_dma_status(adata, 0);
if (ret < 0)
dev_err(sdev->dev, "acp dma transfer status: %d\n", ret);
if (desc->rev > 3) {
/* Cache Window enable */
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DSP0_CACHE_OFFSET0, desc->sram_pte_offset);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DSP0_CACHE_SIZE0, SRAM1_SIZE | BIT(31));
}
/* Free memory once DMA is complete */
dma_size = (PAGE_ALIGN(sdev->basefw.fw->size) >> PAGE_SHIFT) * ACP_PAGE_SIZE;
dma_free_coherent(&pci->dev, dma_size, adata->bin_buf, adata->sha_dma_addr);
dma_free_coherent(&pci->dev, ACP_DEFAULT_DRAM_LENGTH, adata->data_buf, adata->dma_addr);
adata->bin_buf = NULL;
adata->data_buf = NULL;
return ret;
}
EXPORT_SYMBOL_NS(acp_dsp_pre_fw_run, SND_SOC_SOF_AMD_COMMON);
int acp_sof_dsp_run(struct snd_sof_dev *sdev)
{
struct acp_dev_data *adata = sdev->pdata->hw_pdata;
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
int val;
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DSP0_RUNSTALL, ACP_DSP_RUN);
val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DSP0_RUNSTALL);
dev_dbg(sdev->dev, "ACP_DSP0_RUNSTALL : 0x%0x\n", val);
/* Some platforms won't support fusion DSP,keep offset zero for no support */
if (desc->fusion_dsp_offset && adata->enable_fw_debug) {
snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->fusion_dsp_offset, ACP_DSP_RUN);
val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->fusion_dsp_offset);
dev_dbg(sdev->dev, "ACP_DSP0_FUSION_RUNSTALL : 0x%0x\n", val);
}
return 0;
}
EXPORT_SYMBOL_NS(acp_sof_dsp_run, SND_SOC_SOF_AMD_COMMON);
int acp_sof_load_signed_firmware(struct snd_sof_dev *sdev)
{
struct snd_sof_pdata *plat_data = sdev->pdata;
struct acp_dev_data *adata = plat_data->hw_pdata;
int ret;
ret = request_firmware(&sdev->basefw.fw, adata->fw_code_bin, sdev->dev);
if (ret < 0) {
dev_err(sdev->dev, "sof signed firmware code bin is missing\n");
return ret;
} else {
dev_dbg(sdev->dev, "request_firmware %s successful\n", adata->fw_code_bin);
}
ret = snd_sof_dsp_block_write(sdev, SOF_FW_BLK_TYPE_IRAM, 0,
(void *)sdev->basefw.fw->data, sdev->basefw.fw->size);
ret = request_firmware(&adata->fw_dbin, adata->fw_data_bin, sdev->dev);
if (ret < 0) {
dev_err(sdev->dev, "sof signed firmware data bin is missing\n");
return ret;
} else {
dev_dbg(sdev->dev, "request_firmware %s successful\n", adata->fw_data_bin);
}
ret = snd_sof_dsp_block_write(sdev, SOF_FW_BLK_TYPE_DRAM, 0,
(void *)adata->fw_dbin->data, adata->fw_dbin->size);
return ret;
}
EXPORT_SYMBOL_NS(acp_sof_load_signed_firmware, SND_SOC_SOF_AMD_COMMON);
| linux-master | sound/soc/sof/amd/acp-loader.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2021 Advanced Micro Devices, Inc.
//
// Authors: Balakishore Pati <[email protected]>
// Ajit Kumar Pandey <[email protected]>
/* ACP-specific SOF IPC code */
#include <linux/module.h>
#include "../ops.h"
#include "acp.h"
#include "acp-dsp-offset.h"
void acp_mailbox_write(struct snd_sof_dev *sdev, u32 offset, void *message, size_t bytes)
{
memcpy_to_scratch(sdev, offset, message, bytes);
}
EXPORT_SYMBOL_NS(acp_mailbox_write, SND_SOC_SOF_AMD_COMMON);
void acp_mailbox_read(struct snd_sof_dev *sdev, u32 offset, void *message, size_t bytes)
{
memcpy_from_scratch(sdev, offset, message, bytes);
}
EXPORT_SYMBOL_NS(acp_mailbox_read, SND_SOC_SOF_AMD_COMMON);
static void acpbus_trigger_host_to_dsp_swintr(struct acp_dev_data *adata)
{
struct snd_sof_dev *sdev = adata->dev;
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
u32 swintr_trigger;
swintr_trigger = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->dsp_intr_base +
DSP_SW_INTR_TRIG_OFFSET);
swintr_trigger |= 0x01;
snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->dsp_intr_base + DSP_SW_INTR_TRIG_OFFSET,
swintr_trigger);
}
static void acp_ipc_host_msg_set(struct snd_sof_dev *sdev)
{
unsigned int host_msg = sdev->debug_box.offset +
offsetof(struct scratch_ipc_conf, sof_host_msg_write);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + host_msg, 1);
}
static void acp_dsp_ipc_host_done(struct snd_sof_dev *sdev)
{
unsigned int dsp_msg = sdev->debug_box.offset +
offsetof(struct scratch_ipc_conf, sof_dsp_msg_write);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + dsp_msg, 0);
}
static void acp_dsp_ipc_dsp_done(struct snd_sof_dev *sdev)
{
unsigned int dsp_ack = sdev->debug_box.offset +
offsetof(struct scratch_ipc_conf, sof_dsp_ack_write);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + dsp_ack, 0);
}
int acp_sof_ipc_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
{
struct acp_dev_data *adata = sdev->pdata->hw_pdata;
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
unsigned int offset = sdev->host_box.offset;
unsigned int count = ACP_HW_SEM_RETRY_COUNT;
while (snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset)) {
/* Wait until acquired HW Semaphore Lock or timeout*/
count--;
if (!count) {
dev_err(sdev->dev, "%s: Failed to acquire HW lock\n", __func__);
return -EINVAL;
}
}
acp_mailbox_write(sdev, offset, msg->msg_data, msg->msg_size);
acp_ipc_host_msg_set(sdev);
/* Trigger host to dsp interrupt for the msg */
acpbus_trigger_host_to_dsp_swintr(adata);
/* Unlock or Release HW Semaphore */
snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->hw_semaphore_offset, 0x0);
return 0;
}
EXPORT_SYMBOL_NS(acp_sof_ipc_send_msg, SND_SOC_SOF_AMD_COMMON);
static void acp_dsp_ipc_get_reply(struct snd_sof_dev *sdev)
{
struct snd_sof_ipc_msg *msg = sdev->msg;
struct sof_ipc_reply reply;
struct sof_ipc_cmd_hdr *hdr;
unsigned int offset = sdev->host_box.offset;
int ret = 0;
/*
* Sometimes, there is unexpected reply ipc arriving. The reply
* ipc belongs to none of the ipcs sent from driver.
* In this case, the driver must ignore the ipc.
*/
if (!msg) {
dev_warn(sdev->dev, "unexpected ipc interrupt raised!\n");
return;
}
hdr = msg->msg_data;
if (hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_CTX_SAVE) ||
hdr->cmd == (SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE)) {
/*
* memory windows are powered off before sending IPC reply,
* so we can't read the mailbox for CTX_SAVE and PM_GATE
* replies.
*/
reply.error = 0;
reply.hdr.cmd = SOF_IPC_GLB_REPLY;
reply.hdr.size = sizeof(reply);
memcpy(msg->reply_data, &reply, sizeof(reply));
goto out;
}
/* get IPC reply from DSP in the mailbox */
acp_mailbox_read(sdev, offset, &reply, sizeof(reply));
if (reply.error < 0) {
memcpy(msg->reply_data, &reply, sizeof(reply));
ret = reply.error;
} else {
/*
* To support an IPC tx_message with a
* reply_size set to zero.
*/
if (!msg->reply_size)
goto out;
/* reply correct size ? */
if (reply.hdr.size != msg->reply_size &&
!(reply.hdr.cmd & SOF_IPC_GLB_PROBE)) {
dev_err(sdev->dev, "reply expected %zu got %u bytes\n",
msg->reply_size, reply.hdr.size);
ret = -EINVAL;
}
/* read the message */
if (msg->reply_size > 0)
acp_mailbox_read(sdev, offset, msg->reply_data, msg->reply_size);
}
out:
msg->reply_error = ret;
}
irqreturn_t acp_sof_ipc_irq_thread(int irq, void *context)
{
struct snd_sof_dev *sdev = context;
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
struct acp_dev_data *adata = sdev->pdata->hw_pdata;
unsigned int dsp_msg_write = sdev->debug_box.offset +
offsetof(struct scratch_ipc_conf, sof_dsp_msg_write);
unsigned int dsp_ack_write = sdev->debug_box.offset +
offsetof(struct scratch_ipc_conf, sof_dsp_ack_write);
bool ipc_irq = false;
int dsp_msg, dsp_ack;
unsigned int status;
if (sdev->first_boot && sdev->fw_state != SOF_FW_BOOT_COMPLETE) {
acp_mailbox_read(sdev, sdev->dsp_box.offset, &status, sizeof(status));
if ((status & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
snd_sof_dsp_panic(sdev, sdev->dsp_box.offset + sizeof(status),
true);
status = 0;
acp_mailbox_write(sdev, sdev->dsp_box.offset, &status, sizeof(status));
return IRQ_HANDLED;
}
snd_sof_ipc_msgs_rx(sdev);
acp_dsp_ipc_host_done(sdev);
return IRQ_HANDLED;
}
dsp_msg = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + dsp_msg_write);
if (dsp_msg) {
snd_sof_ipc_msgs_rx(sdev);
acp_dsp_ipc_host_done(sdev);
ipc_irq = true;
}
dsp_ack = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + dsp_ack_write);
if (dsp_ack) {
spin_lock_irq(&sdev->ipc_lock);
/* handle immediate reply from DSP core */
acp_dsp_ipc_get_reply(sdev);
snd_sof_ipc_reply(sdev, 0);
/* set the done bit */
acp_dsp_ipc_dsp_done(sdev);
spin_unlock_irq(&sdev->ipc_lock);
ipc_irq = true;
}
acp_mailbox_read(sdev, sdev->debug_box.offset, &status, sizeof(u32));
if ((status & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
snd_sof_dsp_panic(sdev, sdev->dsp_oops_offset, true);
status = 0;
acp_mailbox_write(sdev, sdev->debug_box.offset, &status, sizeof(status));
return IRQ_HANDLED;
}
if (desc->probe_reg_offset) {
u32 val;
u32 posn;
/* Probe register consists of two parts
* (0-30) bit has cumulative position value
* 31 bit is a synchronization flag between DSP and CPU
* for the position update
*/
val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->probe_reg_offset);
if (val & PROBE_STATUS_BIT) {
posn = val & ~PROBE_STATUS_BIT;
if (adata->probe_stream) {
/* Probe related posn value is of 31 bits limited to 2GB
* once wrapped DSP won't send posn interrupt.
*/
adata->probe_stream->cstream_posn = posn;
snd_compr_fragment_elapsed(adata->probe_stream->cstream);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->probe_reg_offset, posn);
ipc_irq = true;
}
}
}
if (!ipc_irq)
dev_dbg_ratelimited(sdev->dev, "nothing to do in IPC IRQ thread\n");
return IRQ_HANDLED;
}
EXPORT_SYMBOL_NS(acp_sof_ipc_irq_thread, SND_SOC_SOF_AMD_COMMON);
int acp_sof_ipc_msg_data(struct snd_sof_dev *sdev, struct snd_sof_pcm_stream *sps,
void *p, size_t sz)
{
unsigned int offset = sdev->dsp_box.offset;
if (!sps || !sdev->stream_box.size) {
acp_mailbox_read(sdev, offset, p, sz);
} else {
struct snd_pcm_substream *substream = sps->substream;
struct acp_dsp_stream *stream;
if (!substream || !substream->runtime)
return -ESTRPIPE;
stream = substream->runtime->private_data;
if (!stream)
return -ESTRPIPE;
acp_mailbox_read(sdev, stream->posn_offset, p, sz);
}
return 0;
}
EXPORT_SYMBOL_NS(acp_sof_ipc_msg_data, SND_SOC_SOF_AMD_COMMON);
int acp_set_stream_data_offset(struct snd_sof_dev *sdev,
struct snd_sof_pcm_stream *sps,
size_t posn_offset)
{
struct snd_pcm_substream *substream = sps->substream;
struct acp_dsp_stream *stream = substream->runtime->private_data;
/* check for unaligned offset or overflow */
if (posn_offset > sdev->stream_box.size ||
posn_offset % sizeof(struct sof_ipc_stream_posn) != 0)
return -EINVAL;
stream->posn_offset = sdev->stream_box.offset + posn_offset;
dev_dbg(sdev->dev, "pcm: stream dir %d, posn mailbox offset is %zu",
substream->stream, stream->posn_offset);
return 0;
}
EXPORT_SYMBOL_NS(acp_set_stream_data_offset, SND_SOC_SOF_AMD_COMMON);
int acp_sof_ipc_get_mailbox_offset(struct snd_sof_dev *sdev)
{
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
return desc->sram_pte_offset;
}
EXPORT_SYMBOL_NS(acp_sof_ipc_get_mailbox_offset, SND_SOC_SOF_AMD_COMMON);
int acp_sof_ipc_get_window_offset(struct snd_sof_dev *sdev, u32 id)
{
return 0;
}
EXPORT_SYMBOL_NS(acp_sof_ipc_get_window_offset, SND_SOC_SOF_AMD_COMMON);
MODULE_DESCRIPTION("AMD ACP sof-ipc driver");
| linux-master | sound/soc/sof/amd/acp-ipc.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2022 Advanced Micro Devices, Inc.
//
// Authors: Ajit Kumar Pandey <[email protected]>
// V sujith kumar Reddy <[email protected]>
/* ACP-specific Common code */
#include "../sof-priv.h"
#include "../sof-audio.h"
#include "../ops.h"
#include "../sof-audio.h"
#include "acp.h"
#include "acp-dsp-offset.h"
#include <sound/sof/xtensa.h>
/**
* amd_sof_ipc_dump() - This function is called when IPC tx times out.
* @sdev: SOF device.
*/
void amd_sof_ipc_dump(struct snd_sof_dev *sdev)
{
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
u32 base = desc->dsp_intr_base;
u32 dsp_msg_write = sdev->debug_box.offset +
offsetof(struct scratch_ipc_conf, sof_dsp_msg_write);
u32 dsp_ack_write = sdev->debug_box.offset +
offsetof(struct scratch_ipc_conf, sof_dsp_ack_write);
u32 host_msg_write = sdev->debug_box.offset +
offsetof(struct scratch_ipc_conf, sof_host_msg_write);
u32 host_ack_write = sdev->debug_box.offset +
offsetof(struct scratch_ipc_conf, sof_host_ack_write);
u32 dsp_msg, dsp_ack, host_msg, host_ack, irq_stat;
dsp_msg = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + dsp_msg_write);
dsp_ack = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + dsp_ack_write);
host_msg = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + host_msg_write);
host_ack = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + host_ack_write);
irq_stat = snd_sof_dsp_read(sdev, ACP_DSP_BAR, base + DSP_SW_INTR_STAT_OFFSET);
dev_err(sdev->dev,
"dsp_msg = %#x dsp_ack = %#x host_msg = %#x host_ack = %#x irq_stat = %#x\n",
dsp_msg, dsp_ack, host_msg, host_ack, irq_stat);
}
/**
* amd_get_registers() - This function is called in case of DSP oops
* in order to gather information about the registers, filename and
* linenumber and stack.
* @sdev: SOF device.
* @xoops: Stores information about registers.
* @panic_info: Stores information about filename and line number.
* @stack: Stores the stack dump.
* @stack_words: Size of the stack dump.
*/
static void amd_get_registers(struct snd_sof_dev *sdev,
struct sof_ipc_dsp_oops_xtensa *xoops,
struct sof_ipc_panic_info *panic_info,
u32 *stack, size_t stack_words)
{
u32 offset = sdev->dsp_oops_offset;
/* first read registers */
acp_mailbox_read(sdev, offset, xoops, sizeof(*xoops));
/* then get panic info */
if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
xoops->arch_hdr.totalsize);
return;
}
offset += xoops->arch_hdr.totalsize;
acp_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
/* then get the stack */
offset += sizeof(*panic_info);
acp_mailbox_read(sdev, offset, stack, stack_words * sizeof(u32));
}
/**
* amd_sof_dump() - This function is called when a panic message is
* received from the firmware.
* @sdev: SOF device.
* @flags: parameter not used but required by ops prototype
*/
void amd_sof_dump(struct snd_sof_dev *sdev, u32 flags)
{
struct sof_ipc_dsp_oops_xtensa xoops;
struct sof_ipc_panic_info panic_info;
u32 stack[AMD_STACK_DUMP_SIZE];
u32 status;
/* Get information about the panic status from the debug box area.
* Compute the trace point based on the status.
*/
if (sdev->dsp_oops_offset > sdev->debug_box.offset) {
acp_mailbox_read(sdev, sdev->debug_box.offset, &status, sizeof(u32));
} else {
/* Read DSP Panic status from dsp_box.
* As window information for exception box offset and size is not available
* before FW_READY
*/
acp_mailbox_read(sdev, sdev->dsp_box.offset, &status, sizeof(u32));
sdev->dsp_oops_offset = sdev->dsp_box.offset + sizeof(status);
}
/* Get information about the registers, the filename and line
* number and the stack.
*/
amd_get_registers(sdev, &xoops, &panic_info, stack, AMD_STACK_DUMP_SIZE);
/* Print the information to the console */
sof_print_oops_and_stack(sdev, KERN_ERR, status, status, &xoops,
&panic_info, stack, AMD_STACK_DUMP_SIZE);
}
struct snd_soc_acpi_mach *amd_sof_machine_select(struct snd_sof_dev *sdev)
{
struct snd_sof_pdata *sof_pdata = sdev->pdata;
const struct sof_dev_desc *desc = sof_pdata->desc;
struct snd_soc_acpi_mach *mach;
mach = snd_soc_acpi_find_machine(desc->machines);
if (!mach) {
dev_warn(sdev->dev, "No matching ASoC machine driver found\n");
return NULL;
}
sof_pdata->tplg_filename = mach->sof_tplg_filename;
sof_pdata->fw_filename = mach->fw_filename;
return mach;
}
/* AMD Common DSP ops */
struct snd_sof_dsp_ops sof_acp_common_ops = {
/* probe and remove */
.probe = amd_sof_acp_probe,
.remove = amd_sof_acp_remove,
/* Register IO */
.write = sof_io_write,
.read = sof_io_read,
/* Block IO */
.block_read = acp_dsp_block_read,
.block_write = acp_dsp_block_write,
/*Firmware loading */
.load_firmware = snd_sof_load_firmware_memcpy,
.pre_fw_run = acp_dsp_pre_fw_run,
.get_bar_index = acp_get_bar_index,
/* DSP core boot */
.run = acp_sof_dsp_run,
/*IPC */
.send_msg = acp_sof_ipc_send_msg,
.ipc_msg_data = acp_sof_ipc_msg_data,
.set_stream_data_offset = acp_set_stream_data_offset,
.get_mailbox_offset = acp_sof_ipc_get_mailbox_offset,
.get_window_offset = acp_sof_ipc_get_window_offset,
.irq_thread = acp_sof_ipc_irq_thread,
/* stream callbacks */
.pcm_open = acp_pcm_open,
.pcm_close = acp_pcm_close,
.pcm_hw_params = acp_pcm_hw_params,
.pcm_pointer = acp_pcm_pointer,
.hw_info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
/* Machine driver callbacks */
.machine_select = amd_sof_machine_select,
.machine_register = sof_machine_register,
.machine_unregister = sof_machine_unregister,
/* Trace Logger */
.trace_init = acp_sof_trace_init,
.trace_release = acp_sof_trace_release,
/* PM */
.suspend = amd_sof_acp_suspend,
.resume = amd_sof_acp_resume,
.ipc_dump = amd_sof_ipc_dump,
.dbg_dump = amd_sof_dump,
.debugfs_add_region_item = snd_sof_debugfs_add_region_item_iomem,
.dsp_arch_ops = &sof_xtensa_arch_ops,
/* probe client device registation */
.register_ipc_clients = acp_probes_register,
.unregister_ipc_clients = acp_probes_unregister,
};
EXPORT_SYMBOL_NS(sof_acp_common_ops, SND_SOC_SOF_AMD_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_XTENSA);
MODULE_DESCRIPTION("ACP SOF COMMON Driver");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | sound/soc/sof/amd/acp-common.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2023 Advanced Micro Devices, Inc.
//
// Authors: Venkata Prasad Potturu <[email protected]>
/*
* Hardware interface for Audio DSP on Vangogh platform
*/
#include <linux/platform_device.h>
#include <linux/module.h>
#include "../ops.h"
#include "../sof-audio.h"
#include "acp.h"
#include "acp-dsp-offset.h"
#define I2S_HS_INSTANCE 0
#define I2S_BT_INSTANCE 1
#define I2S_SP_INSTANCE 2
#define PDM_DMIC_INSTANCE 3
#define I2S_HS_VIRTUAL_INSTANCE 4
static struct snd_soc_dai_driver vangogh_sof_dai[] = {
[I2S_HS_INSTANCE] = {
.id = I2S_HS_INSTANCE,
.name = "acp-sof-hs",
.playback = {
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 2,
.channels_max = 8,
.rate_min = 8000,
.rate_max = 96000,
},
.capture = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
/* Supporting only stereo for I2S HS controller capture */
.channels_min = 2,
.channels_max = 2,
.rate_min = 8000,
.rate_max = 48000,
},
},
[I2S_BT_INSTANCE] = {
.id = I2S_BT_INSTANCE,
.name = "acp-sof-bt",
.playback = {
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 2,
.channels_max = 8,
.rate_min = 8000,
.rate_max = 96000,
},
.capture = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
/* Supporting only stereo for I2S BT controller capture */
.channels_min = 2,
.channels_max = 2,
.rate_min = 8000,
.rate_max = 48000,
},
},
[I2S_SP_INSTANCE] = {
.id = I2S_SP_INSTANCE,
.name = "acp-sof-sp",
.playback = {
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 2,
.channels_max = 8,
.rate_min = 8000,
.rate_max = 96000,
},
.capture = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
/* Supporting only stereo for I2S SP controller capture */
.channels_min = 2,
.channels_max = 2,
.rate_min = 8000,
.rate_max = 48000,
},
},
[PDM_DMIC_INSTANCE] = {
.id = PDM_DMIC_INSTANCE,
.name = "acp-sof-dmic",
.capture = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 2,
.channels_max = 4,
.rate_min = 8000,
.rate_max = 48000,
},
},
[I2S_HS_VIRTUAL_INSTANCE] = {
.id = I2S_HS_VIRTUAL_INSTANCE,
.name = "acp-sof-hs-virtual",
.playback = {
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 2,
.channels_max = 8,
.rate_min = 8000,
.rate_max = 96000,
},
.capture = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
/* Supporting only stereo for I2S HS-Virtual controller capture */
.channels_min = 2,
.channels_max = 2,
.rate_min = 8000,
.rate_max = 48000,
},
},
};
/* Vangogh ops */
struct snd_sof_dsp_ops sof_vangogh_ops;
EXPORT_SYMBOL_NS(sof_vangogh_ops, SND_SOC_SOF_AMD_COMMON);
int sof_vangogh_ops_init(struct snd_sof_dev *sdev)
{
const struct dmi_system_id *dmi_id;
/* common defaults */
memcpy(&sof_vangogh_ops, &sof_acp_common_ops, sizeof(struct snd_sof_dsp_ops));
sof_vangogh_ops.drv = vangogh_sof_dai;
sof_vangogh_ops.num_drv = ARRAY_SIZE(vangogh_sof_dai);
dmi_id = dmi_first_match(acp_sof_quirk_table);
if (dmi_id && dmi_id->driver_data)
sof_vangogh_ops.load_firmware = acp_sof_load_signed_firmware;
return 0;
}
MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
MODULE_DESCRIPTION("VANGOGH SOF Driver");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | sound/soc/sof/amd/vangogh.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2023 Advanced Micro Devices, Inc. All rights reserved.
//
// Authors: Venkata Prasad Potturu <[email protected]>
/*.
* PCI interface for Vangogh ACP device
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <sound/sof.h>
#include <sound/soc-acpi.h>
#include "../ops.h"
#include "../sof-pci-dev.h"
#include "../../amd/mach-config.h"
#include "acp.h"
#include "acp-dsp-offset.h"
#define ACP5X_FUTURE_REG_ACLK_0 0x1864
static const struct sof_amd_acp_desc vangogh_chip_info = {
.rev = 5,
.name = "vangogh",
.host_bridge_id = HOST_BRIDGE_VGH,
.pgfsm_base = ACP5X_PGFSM_BASE,
.ext_intr_stat = ACP5X_EXT_INTR_STAT,
.dsp_intr_base = ACP5X_DSP_SW_INTR_BASE,
.sram_pte_offset = ACP5X_SRAM_PTE_OFFSET,
.hw_semaphore_offset = ACP5X_AXI2DAGB_SEM_0,
.acp_clkmux_sel = ACP5X_CLKMUX_SEL,
.probe_reg_offset = ACP5X_FUTURE_REG_ACLK_0,
};
static const struct sof_dev_desc vangogh_desc = {
.machines = snd_soc_acpi_amd_vangogh_sof_machines,
.resindex_lpe_base = 0,
.resindex_pcicfg_base = -1,
.resindex_imr_base = -1,
.irqindex_host_ipc = -1,
.chip_info = &vangogh_chip_info,
.ipc_supported_mask = BIT(SOF_IPC),
.ipc_default = SOF_IPC,
.default_fw_path = {
[SOF_IPC] = "amd/sof",
},
.default_tplg_path = {
[SOF_IPC] = "amd/sof-tplg",
},
.default_fw_filename = {
[SOF_IPC] = "sof-vangogh.ri",
},
.nocodec_tplg_filename = "sof-acp.tplg",
.ops = &sof_vangogh_ops,
.ops_init = sof_vangogh_ops_init,
};
static int acp_pci_vgh_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
{
unsigned int flag;
if (pci->revision != ACP_VANGOGH_PCI_ID)
return -ENODEV;
flag = snd_amd_acp_find_config(pci);
if (flag != FLAG_AMD_SOF && flag != FLAG_AMD_SOF_ONLY_DMIC)
return -ENODEV;
return sof_pci_probe(pci, pci_id);
};
static void acp_pci_vgh_remove(struct pci_dev *pci)
{
sof_pci_remove(pci);
}
/* PCI IDs */
static const struct pci_device_id vgh_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, ACP_PCI_DEV_ID),
.driver_data = (unsigned long)&vangogh_desc},
{ 0, }
};
MODULE_DEVICE_TABLE(pci, vgh_pci_ids);
/* pci_driver definition */
static struct pci_driver snd_sof_pci_amd_vgh_driver = {
.name = KBUILD_MODNAME,
.id_table = vgh_pci_ids,
.probe = acp_pci_vgh_probe,
.remove = acp_pci_vgh_remove,
.driver = {
.pm = &sof_pci_pm,
},
};
module_pci_driver(snd_sof_pci_amd_vgh_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_PCI_DEV);
| linux-master | sound/soc/sof/amd/pci-vangogh.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2023 Advanced Micro Devices, Inc.
//
// Authors: V Sujith Kumar Reddy <[email protected]>
/*
* Probe interface for generic AMD audio ACP DSP block
*/
#include <linux/module.h>
#include <sound/soc.h>
#include "../sof-priv.h"
#include "../sof-client-probes.h"
#include "../sof-client.h"
#include "../ops.h"
#include "acp.h"
#include "acp-dsp-offset.h"
static int acp_probes_compr_startup(struct sof_client_dev *cdev,
struct snd_compr_stream *cstream,
struct snd_soc_dai *dai, u32 *stream_id)
{
struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
struct acp_dsp_stream *stream;
struct acp_dev_data *adata;
adata = sdev->pdata->hw_pdata;
stream = acp_dsp_stream_get(sdev, 0);
if (!stream)
return -ENODEV;
stream->cstream = cstream;
cstream->runtime->private_data = stream;
adata->probe_stream = stream;
*stream_id = stream->stream_tag;
return 0;
}
static int acp_probes_compr_shutdown(struct sof_client_dev *cdev,
struct snd_compr_stream *cstream,
struct snd_soc_dai *dai)
{
struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
struct acp_dsp_stream *stream = cstream->runtime->private_data;
struct acp_dev_data *adata;
int ret;
ret = acp_dsp_stream_put(sdev, stream);
if (ret < 0) {
dev_err(sdev->dev, "Failed to release probe compress stream\n");
return ret;
}
adata = sdev->pdata->hw_pdata;
stream->cstream = NULL;
cstream->runtime->private_data = NULL;
adata->probe_stream = NULL;
return 0;
}
static int acp_probes_compr_set_params(struct sof_client_dev *cdev,
struct snd_compr_stream *cstream,
struct snd_compr_params *params,
struct snd_soc_dai *dai)
{
struct snd_sof_dev *sdev = sof_client_dev_to_sof_dev(cdev);
struct acp_dsp_stream *stream = cstream->runtime->private_data;
unsigned int buf_offset, index;
u32 size;
int ret;
stream->dmab = cstream->runtime->dma_buffer_p;
stream->num_pages = PFN_UP(cstream->runtime->dma_bytes);
size = cstream->runtime->buffer_size;
ret = acp_dsp_stream_config(sdev, stream);
if (ret < 0) {
acp_dsp_stream_put(sdev, stream);
return ret;
}
/* write buffer size of stream in scratch memory */
buf_offset = sdev->debug_box.offset +
offsetof(struct scratch_reg_conf, buf_size);
index = stream->stream_tag - 1;
buf_offset = buf_offset + index * 4;
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + buf_offset, size);
return 0;
}
static int acp_probes_compr_trigger(struct sof_client_dev *cdev,
struct snd_compr_stream *cstream,
int cmd, struct snd_soc_dai *dai)
{
/* Nothing to do here, as it is a mandatory callback just defined */
return 0;
}
static int acp_probes_compr_pointer(struct sof_client_dev *cdev,
struct snd_compr_stream *cstream,
struct snd_compr_tstamp *tstamp,
struct snd_soc_dai *dai)
{
struct acp_dsp_stream *stream = cstream->runtime->private_data;
struct snd_soc_pcm_stream *pstream;
pstream = &dai->driver->capture;
tstamp->copied_total = stream->cstream_posn;
tstamp->sampling_rate = snd_pcm_rate_bit_to_rate(pstream->rates);
return 0;
}
/* SOF client implementation */
static const struct sof_probes_host_ops acp_probes_ops = {
.startup = acp_probes_compr_startup,
.shutdown = acp_probes_compr_shutdown,
.set_params = acp_probes_compr_set_params,
.trigger = acp_probes_compr_trigger,
.pointer = acp_probes_compr_pointer,
};
int acp_probes_register(struct snd_sof_dev *sdev)
{
return sof_client_dev_register(sdev, "acp-probes", 0, &acp_probes_ops,
sizeof(acp_probes_ops));
}
EXPORT_SYMBOL_NS(acp_probes_register, SND_SOC_SOF_AMD_COMMON);
void acp_probes_unregister(struct snd_sof_dev *sdev)
{
sof_client_dev_unregister(sdev, "acp-probes", 0);
}
EXPORT_SYMBOL_NS(acp_probes_unregister, SND_SOC_SOF_AMD_COMMON);
MODULE_IMPORT_NS(SND_SOC_SOF_CLIENT);
| linux-master | sound/soc/sof/amd/acp-probes.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2022 Advanced Micro Devices, Inc.
//
// Authors: Ajit Kumar Pandey <[email protected]>
/*
* Hardware interface for Audio DSP on Rembrandt platform
*/
#include <linux/platform_device.h>
#include <linux/module.h>
#include "../ops.h"
#include "../sof-audio.h"
#include "acp.h"
#include "acp-dsp-offset.h"
#define I2S_HS_INSTANCE 0
#define I2S_BT_INSTANCE 1
#define I2S_SP_INSTANCE 2
#define PDM_DMIC_INSTANCE 3
#define I2S_HS_VIRTUAL_INSTANCE 4
static struct snd_soc_dai_driver rembrandt_sof_dai[] = {
[I2S_HS_INSTANCE] = {
.id = I2S_HS_INSTANCE,
.name = "acp-sof-hs",
.playback = {
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 2,
.channels_max = 8,
.rate_min = 8000,
.rate_max = 96000,
},
.capture = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
/* Supporting only stereo for I2S HS controller capture */
.channels_min = 2,
.channels_max = 2,
.rate_min = 8000,
.rate_max = 48000,
},
},
[I2S_BT_INSTANCE] = {
.id = I2S_BT_INSTANCE,
.name = "acp-sof-bt",
.playback = {
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 2,
.channels_max = 8,
.rate_min = 8000,
.rate_max = 96000,
},
.capture = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
/* Supporting only stereo for I2S BT controller capture */
.channels_min = 2,
.channels_max = 2,
.rate_min = 8000,
.rate_max = 48000,
},
},
[I2S_SP_INSTANCE] = {
.id = I2S_SP_INSTANCE,
.name = "acp-sof-sp",
.playback = {
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 2,
.channels_max = 8,
.rate_min = 8000,
.rate_max = 96000,
},
.capture = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
/* Supporting only stereo for I2S SP controller capture */
.channels_min = 2,
.channels_max = 2,
.rate_min = 8000,
.rate_max = 48000,
},
},
[PDM_DMIC_INSTANCE] = {
.id = PDM_DMIC_INSTANCE,
.name = "acp-sof-dmic",
.capture = {
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 2,
.channels_max = 4,
.rate_min = 8000,
.rate_max = 48000,
},
},
[I2S_HS_VIRTUAL_INSTANCE] = {
.id = I2S_HS_VIRTUAL_INSTANCE,
.name = "acp-sof-hs-virtual",
.playback = {
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S32_LE,
.channels_min = 2,
.channels_max = 8,
.rate_min = 8000,
.rate_max = 96000,
},
},
};
/* Rembrandt ops */
struct snd_sof_dsp_ops sof_rembrandt_ops;
EXPORT_SYMBOL_NS(sof_rembrandt_ops, SND_SOC_SOF_AMD_COMMON);
int sof_rembrandt_ops_init(struct snd_sof_dev *sdev)
{
/* common defaults */
memcpy(&sof_rembrandt_ops, &sof_acp_common_ops, sizeof(struct snd_sof_dsp_ops));
sof_rembrandt_ops.drv = rembrandt_sof_dai;
sof_rembrandt_ops.num_drv = ARRAY_SIZE(rembrandt_sof_dai);
return 0;
}
MODULE_IMPORT_NS(SND_SOC_SOF_AMD_COMMON);
MODULE_DESCRIPTION("REMBRANDT SOF Driver");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | sound/soc/sof/amd/rembrandt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
*
* Based on sound/soc/imx/imx-pcm-dma-mx2.c
*/
#include <linux/device.h>
#include <linux/init.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
#include "mxs-pcm.h"
static const struct snd_pcm_hardware snd_mxs_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_HALF_DUPLEX,
.period_bytes_min = 32,
.period_bytes_max = 8192,
.periods_min = 1,
.periods_max = 52,
.buffer_bytes_max = 64 * 1024,
.fifo_size = 32,
};
static const struct snd_dmaengine_pcm_config mxs_dmaengine_pcm_config = {
.pcm_hardware = &snd_mxs_hardware,
.prealloc_buffer_size = 64 * 1024,
};
int mxs_pcm_platform_register(struct device *dev)
{
return devm_snd_dmaengine_pcm_register(dev, &mxs_dmaengine_pcm_config,
SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX);
}
EXPORT_SYMBOL_GPL(mxs_pcm_platform_register);
MODULE_LICENSE("GPL");
| linux-master | sound/soc/mxs/mxs-pcm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2011 Freescale Semiconductor, Inc.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/time.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include "mxs-saif.h"
#define MXS_SET_ADDR 0x4
#define MXS_CLR_ADDR 0x8
static struct mxs_saif *mxs_saif[2];
/*
* SAIF is a little different with other normal SOC DAIs on clock using.
*
* For MXS, two SAIF modules are instantiated on-chip.
* Each SAIF has a set of clock pins and can be operating in master
* mode simultaneously if they are connected to different off-chip codecs.
* Also, one of the two SAIFs can master or drive the clock pins while the
* other SAIF, in slave mode, receives clocking from the master SAIF.
* This also means that both SAIFs must operate at the same sample rate.
*
* We abstract this as each saif has a master, the master could be
* itself or other saifs. In the generic saif driver, saif does not need
* to know the different clkmux. Saif only needs to know who is its master
* and operating its master to generate the proper clock rate for it.
* The master id is provided in mach-specific layer according to different
* clkmux setting.
*/
static int mxs_saif_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
int clk_id, unsigned int freq, int dir)
{
struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai);
switch (clk_id) {
case MXS_SAIF_MCLK:
saif->mclk = freq;
break;
default:
return -EINVAL;
}
return 0;
}
/*
* Since SAIF may work on EXTMASTER mode, IOW, it's working BITCLK&LRCLK
* is provided by other SAIF, we provide a interface here to get its master
* from its master_id.
* Note that the master could be itself.
*/
static inline struct mxs_saif *mxs_saif_get_master(struct mxs_saif * saif)
{
return mxs_saif[saif->master_id];
}
/*
* Set SAIF clock and MCLK
*/
static int mxs_saif_set_clk(struct mxs_saif *saif,
unsigned int mclk,
unsigned int rate)
{
u32 scr;
int ret;
struct mxs_saif *master_saif;
dev_dbg(saif->dev, "mclk %d rate %d\n", mclk, rate);
/* Set master saif to generate proper clock */
master_saif = mxs_saif_get_master(saif);
if (!master_saif)
return -EINVAL;
dev_dbg(saif->dev, "master saif%d\n", master_saif->id);
/* Checking if can playback and capture simutaneously */
if (master_saif->ongoing && rate != master_saif->cur_rate) {
dev_err(saif->dev,
"can not change clock, master saif%d(rate %d) is ongoing\n",
master_saif->id, master_saif->cur_rate);
return -EINVAL;
}
scr = __raw_readl(master_saif->base + SAIF_CTRL);
scr &= ~BM_SAIF_CTRL_BITCLK_MULT_RATE;
scr &= ~BM_SAIF_CTRL_BITCLK_BASE_RATE;
/*
* Set SAIF clock
*
* The SAIF clock should be either 384*fs or 512*fs.
* If MCLK is used, the SAIF clk ratio needs to match mclk ratio.
* For 256x, 128x, 64x, and 32x sub-rates, set saif clk as 512*fs.
* For 192x, 96x, and 48x sub-rates, set saif clk as 384*fs.
*
* If MCLK is not used, we just set saif clk to 512*fs.
*/
ret = clk_prepare_enable(master_saif->clk);
if (ret)
return ret;
if (master_saif->mclk_in_use) {
switch (mclk / rate) {
case 32:
case 64:
case 128:
case 256:
case 512:
scr &= ~BM_SAIF_CTRL_BITCLK_BASE_RATE;
ret = clk_set_rate(master_saif->clk, 512 * rate);
break;
case 48:
case 96:
case 192:
case 384:
scr |= BM_SAIF_CTRL_BITCLK_BASE_RATE;
ret = clk_set_rate(master_saif->clk, 384 * rate);
break;
default:
/* SAIF MCLK should be a sub-rate of 512x or 384x */
clk_disable_unprepare(master_saif->clk);
return -EINVAL;
}
} else {
ret = clk_set_rate(master_saif->clk, 512 * rate);
scr &= ~BM_SAIF_CTRL_BITCLK_BASE_RATE;
}
clk_disable_unprepare(master_saif->clk);
if (ret)
return ret;
master_saif->cur_rate = rate;
if (!master_saif->mclk_in_use) {
__raw_writel(scr, master_saif->base + SAIF_CTRL);
return 0;
}
/*
* Program the over-sample rate for MCLK output
*
* The available MCLK range is 32x, 48x... 512x. The rate
* could be from 8kHz to 192kH.
*/
switch (mclk / rate) {
case 32:
scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(4);
break;
case 64:
scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(3);
break;
case 128:
scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(2);
break;
case 256:
scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(1);
break;
case 512:
scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(0);
break;
case 48:
scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(3);
break;
case 96:
scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(2);
break;
case 192:
scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(1);
break;
case 384:
scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(0);
break;
default:
return -EINVAL;
}
__raw_writel(scr, master_saif->base + SAIF_CTRL);
return 0;
}
/*
* Put and disable MCLK.
*/
int mxs_saif_put_mclk(unsigned int saif_id)
{
struct mxs_saif *saif = mxs_saif[saif_id];
u32 stat;
if (!saif)
return -EINVAL;
stat = __raw_readl(saif->base + SAIF_STAT);
if (stat & BM_SAIF_STAT_BUSY) {
dev_err(saif->dev, "error: busy\n");
return -EBUSY;
}
clk_disable_unprepare(saif->clk);
/* disable MCLK output */
__raw_writel(BM_SAIF_CTRL_CLKGATE,
saif->base + SAIF_CTRL + MXS_SET_ADDR);
__raw_writel(BM_SAIF_CTRL_RUN,
saif->base + SAIF_CTRL + MXS_CLR_ADDR);
saif->mclk_in_use = 0;
return 0;
}
EXPORT_SYMBOL_GPL(mxs_saif_put_mclk);
/*
* Get MCLK and set clock rate, then enable it
*
* This interface is used for codecs who are using MCLK provided
* by saif.
*/
int mxs_saif_get_mclk(unsigned int saif_id, unsigned int mclk,
unsigned int rate)
{
struct mxs_saif *saif = mxs_saif[saif_id];
u32 stat;
int ret;
struct mxs_saif *master_saif;
if (!saif)
return -EINVAL;
/* Clear Reset */
__raw_writel(BM_SAIF_CTRL_SFTRST,
saif->base + SAIF_CTRL + MXS_CLR_ADDR);
/* FIXME: need clear clk gate for register r/w */
__raw_writel(BM_SAIF_CTRL_CLKGATE,
saif->base + SAIF_CTRL + MXS_CLR_ADDR);
master_saif = mxs_saif_get_master(saif);
if (saif != master_saif) {
dev_err(saif->dev, "can not get mclk from a non-master saif\n");
return -EINVAL;
}
stat = __raw_readl(saif->base + SAIF_STAT);
if (stat & BM_SAIF_STAT_BUSY) {
dev_err(saif->dev, "error: busy\n");
return -EBUSY;
}
saif->mclk_in_use = 1;
ret = mxs_saif_set_clk(saif, mclk, rate);
if (ret)
return ret;
ret = clk_prepare_enable(saif->clk);
if (ret)
return ret;
/* enable MCLK output */
__raw_writel(BM_SAIF_CTRL_RUN,
saif->base + SAIF_CTRL + MXS_SET_ADDR);
return 0;
}
EXPORT_SYMBOL_GPL(mxs_saif_get_mclk);
/*
* SAIF DAI format configuration.
* Should only be called when port is inactive.
*/
static int mxs_saif_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
{
u32 scr, stat;
u32 scr0;
struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai);
stat = __raw_readl(saif->base + SAIF_STAT);
if (stat & BM_SAIF_STAT_BUSY) {
dev_err(cpu_dai->dev, "error: busy\n");
return -EBUSY;
}
/* If SAIF1 is configured as slave, the clk gate needs to be cleared
* before the register can be written.
*/
if (saif->id != saif->master_id) {
__raw_writel(BM_SAIF_CTRL_SFTRST,
saif->base + SAIF_CTRL + MXS_CLR_ADDR);
__raw_writel(BM_SAIF_CTRL_CLKGATE,
saif->base + SAIF_CTRL + MXS_CLR_ADDR);
}
scr0 = __raw_readl(saif->base + SAIF_CTRL);
scr0 = scr0 & ~BM_SAIF_CTRL_BITCLK_EDGE & ~BM_SAIF_CTRL_LRCLK_POLARITY \
& ~BM_SAIF_CTRL_JUSTIFY & ~BM_SAIF_CTRL_DELAY;
scr = 0;
/* DAI mode */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
/* data frame low 1clk before data */
scr |= BM_SAIF_CTRL_DELAY;
scr &= ~BM_SAIF_CTRL_LRCLK_POLARITY;
break;
case SND_SOC_DAIFMT_LEFT_J:
/* data frame high with data */
scr &= ~BM_SAIF_CTRL_DELAY;
scr &= ~BM_SAIF_CTRL_LRCLK_POLARITY;
scr &= ~BM_SAIF_CTRL_JUSTIFY;
break;
default:
return -EINVAL;
}
/* DAI clock inversion */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_IB_IF:
scr |= BM_SAIF_CTRL_BITCLK_EDGE;
scr |= BM_SAIF_CTRL_LRCLK_POLARITY;
break;
case SND_SOC_DAIFMT_IB_NF:
scr |= BM_SAIF_CTRL_BITCLK_EDGE;
scr &= ~BM_SAIF_CTRL_LRCLK_POLARITY;
break;
case SND_SOC_DAIFMT_NB_IF:
scr &= ~BM_SAIF_CTRL_BITCLK_EDGE;
scr |= BM_SAIF_CTRL_LRCLK_POLARITY;
break;
case SND_SOC_DAIFMT_NB_NF:
scr &= ~BM_SAIF_CTRL_BITCLK_EDGE;
scr &= ~BM_SAIF_CTRL_LRCLK_POLARITY;
break;
}
/*
* Note: We simply just support master mode since SAIF TX can only
* work as master.
* Here the master is relative to codec side.
* Saif internally could be slave when working on EXTMASTER mode.
* We just hide this to machine driver.
*/
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
case SND_SOC_DAIFMT_BP_FP:
if (saif->id == saif->master_id)
scr &= ~BM_SAIF_CTRL_SLAVE_MODE;
else
scr |= BM_SAIF_CTRL_SLAVE_MODE;
__raw_writel(scr | scr0, saif->base + SAIF_CTRL);
break;
default:
return -EINVAL;
}
return 0;
}
static int mxs_saif_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai);
int ret;
/* clear error status to 0 for each re-open */
saif->fifo_underrun = 0;
saif->fifo_overrun = 0;
/* Clear Reset for normal operations */
__raw_writel(BM_SAIF_CTRL_SFTRST,
saif->base + SAIF_CTRL + MXS_CLR_ADDR);
/* clear clock gate */
__raw_writel(BM_SAIF_CTRL_CLKGATE,
saif->base + SAIF_CTRL + MXS_CLR_ADDR);
ret = clk_prepare(saif->clk);
if (ret)
return ret;
return 0;
}
static void mxs_saif_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai);
clk_unprepare(saif->clk);
}
/*
* Should only be called when port is inactive.
* although can be called multiple times by upper layers.
*/
static int mxs_saif_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *cpu_dai)
{
struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai);
struct mxs_saif *master_saif;
u32 scr, stat;
int ret;
master_saif = mxs_saif_get_master(saif);
if (!master_saif)
return -EINVAL;
/* mclk should already be set */
if (!saif->mclk && saif->mclk_in_use) {
dev_err(cpu_dai->dev, "set mclk first\n");
return -EINVAL;
}
stat = __raw_readl(saif->base + SAIF_STAT);
if (!saif->mclk_in_use && (stat & BM_SAIF_STAT_BUSY)) {
dev_err(cpu_dai->dev, "error: busy\n");
return -EBUSY;
}
/*
* Set saif clk based on sample rate.
* If mclk is used, we also set mclk, if not, saif->mclk is
* default 0, means not used.
*/
ret = mxs_saif_set_clk(saif, saif->mclk, params_rate(params));
if (ret) {
dev_err(cpu_dai->dev, "unable to get proper clk\n");
return ret;
}
if (saif != master_saif) {
/*
* Set an initial clock rate for the saif internal logic to work
* properly. This is important when working in EXTMASTER mode
* that uses the other saif's BITCLK&LRCLK but it still needs a
* basic clock which should be fast enough for the internal
* logic.
*/
ret = clk_enable(saif->clk);
if (ret)
return ret;
ret = clk_set_rate(saif->clk, 24000000);
clk_disable(saif->clk);
if (ret)
return ret;
ret = clk_prepare(master_saif->clk);
if (ret)
return ret;
}
scr = __raw_readl(saif->base + SAIF_CTRL);
scr &= ~BM_SAIF_CTRL_WORD_LENGTH;
scr &= ~BM_SAIF_CTRL_BITCLK_48XFS_ENABLE;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
scr |= BF_SAIF_CTRL_WORD_LENGTH(0);
break;
case SNDRV_PCM_FORMAT_S20_3LE:
scr |= BF_SAIF_CTRL_WORD_LENGTH(4);
scr |= BM_SAIF_CTRL_BITCLK_48XFS_ENABLE;
break;
case SNDRV_PCM_FORMAT_S24_LE:
scr |= BF_SAIF_CTRL_WORD_LENGTH(8);
scr |= BM_SAIF_CTRL_BITCLK_48XFS_ENABLE;
break;
default:
return -EINVAL;
}
/* Tx/Rx config */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
/* enable TX mode */
scr &= ~BM_SAIF_CTRL_READ_MODE;
} else {
/* enable RX mode */
scr |= BM_SAIF_CTRL_READ_MODE;
}
__raw_writel(scr, saif->base + SAIF_CTRL);
return 0;
}
static int mxs_saif_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai);
/* enable FIFO error irqs */
__raw_writel(BM_SAIF_CTRL_FIFO_ERROR_IRQ_EN,
saif->base + SAIF_CTRL + MXS_SET_ADDR);
return 0;
}
static int mxs_saif_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *cpu_dai)
{
struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai);
struct mxs_saif *master_saif;
u32 delay;
int ret;
master_saif = mxs_saif_get_master(saif);
if (!master_saif)
return -EINVAL;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if (saif->state == MXS_SAIF_STATE_RUNNING)
return 0;
dev_dbg(cpu_dai->dev, "start\n");
ret = clk_enable(master_saif->clk);
if (ret) {
dev_err(saif->dev, "Failed to enable master clock\n");
return ret;
}
/*
* If the saif's master is not itself, we also need to enable
* itself clk for its internal basic logic to work.
*/
if (saif != master_saif) {
ret = clk_enable(saif->clk);
if (ret) {
dev_err(saif->dev, "Failed to enable master clock\n");
clk_disable(master_saif->clk);
return ret;
}
__raw_writel(BM_SAIF_CTRL_RUN,
saif->base + SAIF_CTRL + MXS_SET_ADDR);
}
if (!master_saif->mclk_in_use)
__raw_writel(BM_SAIF_CTRL_RUN,
master_saif->base + SAIF_CTRL + MXS_SET_ADDR);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
/*
* write data to saif data register to trigger
* the transfer.
* For 24-bit format the 32-bit FIFO register stores
* only one channel, so we need to write twice.
* This is also safe for the other non 24-bit formats.
*/
__raw_writel(0, saif->base + SAIF_DATA);
__raw_writel(0, saif->base + SAIF_DATA);
} else {
/*
* read data from saif data register to trigger
* the receive.
* For 24-bit format the 32-bit FIFO register stores
* only one channel, so we need to read twice.
* This is also safe for the other non 24-bit formats.
*/
__raw_readl(saif->base + SAIF_DATA);
__raw_readl(saif->base + SAIF_DATA);
}
master_saif->ongoing = 1;
saif->state = MXS_SAIF_STATE_RUNNING;
dev_dbg(saif->dev, "CTRL 0x%x STAT 0x%x\n",
__raw_readl(saif->base + SAIF_CTRL),
__raw_readl(saif->base + SAIF_STAT));
dev_dbg(master_saif->dev, "CTRL 0x%x STAT 0x%x\n",
__raw_readl(master_saif->base + SAIF_CTRL),
__raw_readl(master_saif->base + SAIF_STAT));
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
if (saif->state == MXS_SAIF_STATE_STOPPED)
return 0;
dev_dbg(cpu_dai->dev, "stop\n");
/* wait a while for the current sample to complete */
delay = USEC_PER_SEC / master_saif->cur_rate;
if (!master_saif->mclk_in_use) {
__raw_writel(BM_SAIF_CTRL_RUN,
master_saif->base + SAIF_CTRL + MXS_CLR_ADDR);
udelay(delay);
}
clk_disable(master_saif->clk);
if (saif != master_saif) {
__raw_writel(BM_SAIF_CTRL_RUN,
saif->base + SAIF_CTRL + MXS_CLR_ADDR);
udelay(delay);
clk_disable(saif->clk);
}
master_saif->ongoing = 0;
saif->state = MXS_SAIF_STATE_STOPPED;
break;
default:
return -EINVAL;
}
return 0;
}
#define MXS_SAIF_RATES SNDRV_PCM_RATE_8000_192000
#define MXS_SAIF_FORMATS \
(SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
SNDRV_PCM_FMTBIT_S24_LE)
static const struct snd_soc_dai_ops mxs_saif_dai_ops = {
.startup = mxs_saif_startup,
.shutdown = mxs_saif_shutdown,
.trigger = mxs_saif_trigger,
.prepare = mxs_saif_prepare,
.hw_params = mxs_saif_hw_params,
.set_sysclk = mxs_saif_set_dai_sysclk,
.set_fmt = mxs_saif_set_dai_fmt,
};
static struct snd_soc_dai_driver mxs_saif_dai = {
.name = "mxs-saif",
.playback = {
.channels_min = 2,
.channels_max = 2,
.rates = MXS_SAIF_RATES,
.formats = MXS_SAIF_FORMATS,
},
.capture = {
.channels_min = 2,
.channels_max = 2,
.rates = MXS_SAIF_RATES,
.formats = MXS_SAIF_FORMATS,
},
.ops = &mxs_saif_dai_ops,
};
static const struct snd_soc_component_driver mxs_saif_component = {
.name = "mxs-saif",
.legacy_dai_naming = 1,
};
static irqreturn_t mxs_saif_irq(int irq, void *dev_id)
{
struct mxs_saif *saif = dev_id;
unsigned int stat;
stat = __raw_readl(saif->base + SAIF_STAT);
if (!(stat & (BM_SAIF_STAT_FIFO_UNDERFLOW_IRQ |
BM_SAIF_STAT_FIFO_OVERFLOW_IRQ)))
return IRQ_NONE;
if (stat & BM_SAIF_STAT_FIFO_UNDERFLOW_IRQ) {
dev_dbg(saif->dev, "underrun!!! %d\n", ++saif->fifo_underrun);
__raw_writel(BM_SAIF_STAT_FIFO_UNDERFLOW_IRQ,
saif->base + SAIF_STAT + MXS_CLR_ADDR);
}
if (stat & BM_SAIF_STAT_FIFO_OVERFLOW_IRQ) {
dev_dbg(saif->dev, "overrun!!! %d\n", ++saif->fifo_overrun);
__raw_writel(BM_SAIF_STAT_FIFO_OVERFLOW_IRQ,
saif->base + SAIF_STAT + MXS_CLR_ADDR);
}
dev_dbg(saif->dev, "SAIF_CTRL %x SAIF_STAT %x\n",
__raw_readl(saif->base + SAIF_CTRL),
__raw_readl(saif->base + SAIF_STAT));
return IRQ_HANDLED;
}
static int mxs_saif_mclk_init(struct platform_device *pdev)
{
struct mxs_saif *saif = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
struct clk *clk;
int ret;
clk = clk_register_divider(&pdev->dev, "mxs_saif_mclk",
__clk_get_name(saif->clk), 0,
saif->base + SAIF_CTRL,
BP_SAIF_CTRL_BITCLK_MULT_RATE, 3,
0, NULL);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
if (ret == -EEXIST)
return 0;
dev_err(&pdev->dev, "failed to register mclk: %d\n", ret);
return PTR_ERR(clk);
}
ret = of_clk_add_provider(np, of_clk_src_simple_get, clk);
if (ret)
return ret;
return 0;
}
static int mxs_saif_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mxs_saif *saif;
int irq, ret;
struct device_node *master;
saif = devm_kzalloc(&pdev->dev, sizeof(*saif), GFP_KERNEL);
if (!saif)
return -ENOMEM;
ret = of_alias_get_id(np, "saif");
if (ret < 0)
return ret;
else
saif->id = ret;
if (saif->id >= ARRAY_SIZE(mxs_saif)) {
dev_err(&pdev->dev, "get wrong saif id\n");
return -EINVAL;
}
/*
* If there is no "fsl,saif-master" phandle, it's a saif
* master. Otherwise, it's a slave and its phandle points
* to the master.
*/
master = of_parse_phandle(np, "fsl,saif-master", 0);
if (!master) {
saif->master_id = saif->id;
} else {
ret = of_alias_get_id(master, "saif");
of_node_put(master);
if (ret < 0)
return ret;
else
saif->master_id = ret;
if (saif->master_id >= ARRAY_SIZE(mxs_saif)) {
dev_err(&pdev->dev, "get wrong master id\n");
return -EINVAL;
}
}
mxs_saif[saif->id] = saif;
saif->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(saif->clk)) {
ret = PTR_ERR(saif->clk);
dev_err(&pdev->dev, "Cannot get the clock: %d\n",
ret);
return ret;
}
saif->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(saif->base))
return PTR_ERR(saif->base);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
saif->dev = &pdev->dev;
ret = devm_request_irq(&pdev->dev, irq, mxs_saif_irq, 0,
dev_name(&pdev->dev), saif);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
return ret;
}
platform_set_drvdata(pdev, saif);
/* We only support saif0 being tx and clock master */
if (saif->id == 0) {
ret = mxs_saif_mclk_init(pdev);
if (ret)
dev_warn(&pdev->dev, "failed to init clocks\n");
}
ret = devm_snd_soc_register_component(&pdev->dev, &mxs_saif_component,
&mxs_saif_dai, 1);
if (ret) {
dev_err(&pdev->dev, "register DAI failed\n");
return ret;
}
ret = mxs_pcm_platform_register(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "register PCM failed: %d\n", ret);
return ret;
}
return 0;
}
static const struct of_device_id mxs_saif_dt_ids[] = {
{ .compatible = "fsl,imx28-saif", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mxs_saif_dt_ids);
static struct platform_driver mxs_saif_driver = {
.probe = mxs_saif_probe,
.driver = {
.name = "mxs-saif",
.of_match_table = mxs_saif_dt_ids,
},
};
module_platform_driver(mxs_saif_driver);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("MXS ASoC SAIF driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:mxs-saif");
| linux-master | sound/soc/mxs/mxs-saif.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2011 Freescale Semiconductor, Inc.
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <sound/jack.h>
#include <sound/soc-dapm.h>
#include "../codecs/sgtl5000.h"
#include "mxs-saif.h"
static int mxs_sgtl5000_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
unsigned int rate = params_rate(params);
u32 mclk;
int ret;
/* sgtl5000 does not support 512*rate when in 96000 fs */
switch (rate) {
case 96000:
mclk = 256 * rate;
break;
default:
mclk = 512 * rate;
break;
}
/* Set SGTL5000's SYSCLK (provided by SAIF MCLK) */
ret = snd_soc_dai_set_sysclk(codec_dai, SGTL5000_SYSCLK, mclk, 0);
if (ret) {
dev_err(codec_dai->dev, "Failed to set sysclk to %u.%03uMHz\n",
mclk / 1000000, mclk / 1000 % 1000);
return ret;
}
/* The SAIF MCLK should be the same as SGTL5000_SYSCLK */
ret = snd_soc_dai_set_sysclk(cpu_dai, MXS_SAIF_MCLK, mclk, 0);
if (ret) {
dev_err(cpu_dai->dev, "Failed to set sysclk to %u.%03uMHz\n",
mclk / 1000000, mclk / 1000 % 1000);
return ret;
}
return 0;
}
static const struct snd_soc_ops mxs_sgtl5000_hifi_ops = {
.hw_params = mxs_sgtl5000_hw_params,
};
#define MXS_SGTL5000_DAI_FMT (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | \
SND_SOC_DAIFMT_CBS_CFS)
SND_SOC_DAILINK_DEFS(hifi_tx,
DAILINK_COMP_ARRAY(COMP_EMPTY()),
DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "sgtl5000")),
DAILINK_COMP_ARRAY(COMP_EMPTY()));
SND_SOC_DAILINK_DEFS(hifi_rx,
DAILINK_COMP_ARRAY(COMP_EMPTY()),
DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "sgtl5000")),
DAILINK_COMP_ARRAY(COMP_EMPTY()));
static struct snd_soc_dai_link mxs_sgtl5000_dai[] = {
{
.name = "HiFi Tx",
.stream_name = "HiFi Playback",
.dai_fmt = MXS_SGTL5000_DAI_FMT,
.ops = &mxs_sgtl5000_hifi_ops,
.playback_only = true,
SND_SOC_DAILINK_REG(hifi_tx),
}, {
.name = "HiFi Rx",
.stream_name = "HiFi Capture",
.dai_fmt = MXS_SGTL5000_DAI_FMT,
.ops = &mxs_sgtl5000_hifi_ops,
.capture_only = true,
SND_SOC_DAILINK_REG(hifi_rx),
},
};
static const struct snd_soc_dapm_widget mxs_sgtl5000_dapm_widgets[] = {
SND_SOC_DAPM_MIC("Mic Jack", NULL),
SND_SOC_DAPM_LINE("Line In Jack", NULL),
SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_SPK("Line Out Jack", NULL),
SND_SOC_DAPM_SPK("Ext Spk", NULL),
};
static struct snd_soc_card mxs_sgtl5000 = {
.name = "mxs_sgtl5000",
.owner = THIS_MODULE,
.dai_link = mxs_sgtl5000_dai,
.num_links = ARRAY_SIZE(mxs_sgtl5000_dai),
};
static int mxs_sgtl5000_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &mxs_sgtl5000;
int ret, i;
struct device_node *np = pdev->dev.of_node;
struct device_node *saif_np[2], *codec_np;
saif_np[0] = of_parse_phandle(np, "saif-controllers", 0);
saif_np[1] = of_parse_phandle(np, "saif-controllers", 1);
codec_np = of_parse_phandle(np, "audio-codec", 0);
if (!saif_np[0] || !saif_np[1] || !codec_np) {
dev_err(&pdev->dev, "phandle missing or invalid\n");
of_node_put(codec_np);
of_node_put(saif_np[0]);
of_node_put(saif_np[1]);
return -EINVAL;
}
for (i = 0; i < 2; i++) {
mxs_sgtl5000_dai[i].codecs->name = NULL;
mxs_sgtl5000_dai[i].codecs->of_node = codec_np;
mxs_sgtl5000_dai[i].cpus->dai_name = NULL;
mxs_sgtl5000_dai[i].cpus->of_node = saif_np[i];
mxs_sgtl5000_dai[i].platforms->name = NULL;
mxs_sgtl5000_dai[i].platforms->of_node = saif_np[i];
}
of_node_put(codec_np);
of_node_put(saif_np[0]);
of_node_put(saif_np[1]);
/*
* Set an init clock(11.28Mhz) for sgtl5000 initialization(i2c r/w).
* The Sgtl5000 sysclk is derived from saif0 mclk and it's range
* should be >= 8MHz and <= 27M.
*/
ret = mxs_saif_get_mclk(0, 44100 * 256, 44100);
if (ret) {
dev_err(&pdev->dev, "failed to get mclk\n");
return ret;
}
card->dev = &pdev->dev;
if (of_property_present(np, "audio-routing")) {
card->dapm_widgets = mxs_sgtl5000_dapm_widgets;
card->num_dapm_widgets = ARRAY_SIZE(mxs_sgtl5000_dapm_widgets);
ret = snd_soc_of_parse_audio_routing(card, "audio-routing");
if (ret) {
dev_err(&pdev->dev, "failed to parse audio-routing (%d)\n",
ret);
return ret;
}
}
ret = devm_snd_soc_register_card(&pdev->dev, card);
if (ret)
return dev_err_probe(&pdev->dev, ret, "snd_soc_register_card failed\n");
return 0;
}
static void mxs_sgtl5000_remove(struct platform_device *pdev)
{
mxs_saif_put_mclk(0);
}
static const struct of_device_id mxs_sgtl5000_dt_ids[] = {
{ .compatible = "fsl,mxs-audio-sgtl5000", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mxs_sgtl5000_dt_ids);
static struct platform_driver mxs_sgtl5000_audio_driver = {
.driver = {
.name = "mxs-sgtl5000",
.of_match_table = mxs_sgtl5000_dt_ids,
},
.probe = mxs_sgtl5000_probe,
.remove_new = mxs_sgtl5000_remove,
};
module_platform_driver(mxs_sgtl5000_audio_driver);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("MXS ALSA SoC Machine driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:mxs-sgtl5000");
| linux-master | sound/soc/mxs/mxs-sgtl5000.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 Spreadtrum Communications Inc.
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/dma/sprd-dma.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/compress_driver.h>
#include "sprd-pcm-dma.h"
#define SPRD_COMPR_DMA_CHANS 2
/* Default values if userspace does not set */
#define SPRD_COMPR_MIN_FRAGMENT_SIZE SZ_8K
#define SPRD_COMPR_MAX_FRAGMENT_SIZE SZ_128K
#define SPRD_COMPR_MIN_NUM_FRAGMENTS 4
#define SPRD_COMPR_MAX_NUM_FRAGMENTS 64
/* DSP FIFO size */
#define SPRD_COMPR_MCDT_EMPTY_WMK 0
#define SPRD_COMPR_MCDT_FIFO_SIZE 512
/* Stage 0 IRAM buffer size definition */
#define SPRD_COMPR_IRAM_BUF_SIZE SZ_32K
#define SPRD_COMPR_IRAM_INFO_SIZE (sizeof(struct sprd_compr_playinfo))
#define SPRD_COMPR_IRAM_LINKLIST_SIZE (1024 - SPRD_COMPR_IRAM_INFO_SIZE)
#define SPRD_COMPR_IRAM_SIZE (SPRD_COMPR_IRAM_BUF_SIZE + \
SPRD_COMPR_IRAM_INFO_SIZE + \
SPRD_COMPR_IRAM_LINKLIST_SIZE)
/* Stage 1 DDR buffer size definition */
#define SPRD_COMPR_AREA_BUF_SIZE SZ_2M
#define SPRD_COMPR_AREA_LINKLIST_SIZE 1024
#define SPRD_COMPR_AREA_SIZE (SPRD_COMPR_AREA_BUF_SIZE + \
SPRD_COMPR_AREA_LINKLIST_SIZE)
struct sprd_compr_dma {
struct dma_chan *chan;
struct dma_async_tx_descriptor *desc;
dma_cookie_t cookie;
dma_addr_t phys;
void *virt;
int trans_len;
};
/*
* The Spreadtrum Audio compress offload mode will use 2-stage DMA transfer to
* save power. That means we can request 2 dma channels, one for source channel,
* and another one for destination channel. Once the source channel's transaction
* is done, it will trigger the destination channel's transaction automatically
* by hardware signal.
*
* For 2-stage DMA transfer, we can allocate 2 buffers: IRAM buffer (always
* power-on) and DDR buffer. The source channel will transfer data from IRAM
* buffer to the DSP fifo to decoding/encoding, once IRAM buffer is empty by
* transferring done, the destination channel will start to transfer data from
* DDR buffer to IRAM buffer.
*
* Since the DSP fifo is only 512B, IRAM buffer is allocated by 32K, and DDR
* buffer is larger to 2M. That means only the IRAM 32k data is transferred
* done, we can wake up the AP system to transfer data from DDR to IRAM, and
* other time the AP system can be suspended to save power.
*/
struct sprd_compr_stream {
struct snd_compr_stream *cstream;
struct sprd_compr_ops *compr_ops;
struct sprd_compr_dma dma[SPRD_COMPR_DMA_CHANS];
/* DMA engine channel number */
int num_channels;
/* Stage 0 IRAM buffer */
struct snd_dma_buffer iram_buffer;
/* Stage 1 DDR buffer */
struct snd_dma_buffer compr_buffer;
/* DSP play information IRAM buffer */
dma_addr_t info_phys;
void *info_area;
int info_size;
/* Data size copied to IRAM buffer */
int copied_total;
/* Total received data size from userspace */
int received_total;
/* Stage 0 IRAM buffer received data size */
int received_stage0;
/* Stage 1 DDR buffer received data size */
int received_stage1;
/* Stage 1 DDR buffer pointer */
int stage1_pointer;
};
static int sprd_platform_compr_trigger(struct snd_soc_component *component,
struct snd_compr_stream *cstream,
int cmd);
static void sprd_platform_compr_drain_notify(void *arg)
{
struct snd_compr_stream *cstream = arg;
struct snd_compr_runtime *runtime = cstream->runtime;
struct sprd_compr_stream *stream = runtime->private_data;
memset(stream->info_area, 0, sizeof(struct sprd_compr_playinfo));
snd_compr_drain_notify(cstream);
}
static void sprd_platform_compr_dma_complete(void *data)
{
struct snd_compr_stream *cstream = data;
struct snd_compr_runtime *runtime = cstream->runtime;
struct sprd_compr_stream *stream = runtime->private_data;
struct sprd_compr_dma *dma = &stream->dma[1];
/* Update data size copied to IRAM buffer */
stream->copied_total += dma->trans_len;
if (stream->copied_total > stream->received_total)
stream->copied_total = stream->received_total;
snd_compr_fragment_elapsed(cstream);
}
static int sprd_platform_compr_dma_config(struct snd_soc_component *component,
struct snd_compr_stream *cstream,
struct snd_compr_params *params,
int channel)
{
struct snd_compr_runtime *runtime = cstream->runtime;
struct sprd_compr_stream *stream = runtime->private_data;
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct device *dev = component->dev;
struct sprd_compr_data *data = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
struct sprd_pcm_dma_params *dma_params = data->dma_params;
struct sprd_compr_dma *dma = &stream->dma[channel];
struct dma_slave_config config = { };
struct sprd_dma_linklist link = { };
enum dma_transfer_direction dir;
struct scatterlist *sg, *sgt;
enum dma_slave_buswidth bus_width;
int period, period_cnt, sg_num = 2;
dma_addr_t src_addr, dst_addr;
unsigned long flags;
int ret, j;
if (!dma_params) {
dev_err(dev, "no dma parameters setting\n");
return -EINVAL;
}
dma->chan = dma_request_slave_channel(dev,
dma_params->chan_name[channel]);
if (!dma->chan) {
dev_err(dev, "failed to request dma channel\n");
return -ENODEV;
}
sgt = sg = devm_kcalloc(dev, sg_num, sizeof(*sg), GFP_KERNEL);
if (!sg) {
ret = -ENOMEM;
goto sg_err;
}
switch (channel) {
case 0:
bus_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
period = (SPRD_COMPR_MCDT_FIFO_SIZE - SPRD_COMPR_MCDT_EMPTY_WMK) * 4;
period_cnt = params->buffer.fragment_size / period;
src_addr = stream->iram_buffer.addr;
dst_addr = dma_params->dev_phys[channel];
flags = SPRD_DMA_FLAGS(SPRD_DMA_SRC_CHN1,
SPRD_DMA_TRANS_DONE_TRG,
SPRD_DMA_FRAG_REQ,
SPRD_DMA_TRANS_INT);
break;
case 1:
bus_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
period = params->buffer.fragment_size;
period_cnt = params->buffer.fragments;
src_addr = stream->compr_buffer.addr;
dst_addr = stream->iram_buffer.addr;
flags = SPRD_DMA_FLAGS(SPRD_DMA_DST_CHN1,
SPRD_DMA_TRANS_DONE_TRG,
SPRD_DMA_FRAG_REQ,
SPRD_DMA_TRANS_INT);
break;
default:
ret = -EINVAL;
goto config_err;
}
dma->trans_len = period * period_cnt;
config.src_maxburst = period;
config.src_addr_width = bus_width;
config.dst_addr_width = bus_width;
if (cstream->direction == SND_COMPRESS_PLAYBACK) {
config.src_addr = src_addr;
config.dst_addr = dst_addr;
dir = DMA_MEM_TO_DEV;
} else {
config.src_addr = dst_addr;
config.dst_addr = src_addr;
dir = DMA_DEV_TO_MEM;
}
sg_init_table(sgt, sg_num);
for (j = 0; j < sg_num; j++, sgt++) {
sg_dma_len(sgt) = dma->trans_len;
sg_dma_address(sgt) = dst_addr;
}
/*
* Configure the link-list address for the DMA engine link-list
* mode.
*/
link.virt_addr = (unsigned long)dma->virt;
link.phy_addr = dma->phys;
ret = dmaengine_slave_config(dma->chan, &config);
if (ret) {
dev_err(dev,
"failed to set slave configuration: %d\n", ret);
goto config_err;
}
/*
* We configure the DMA request mode, interrupt mode, channel
* mode and channel trigger mode by the flags.
*/
dma->desc = dma->chan->device->device_prep_slave_sg(dma->chan, sg,
sg_num, dir,
flags, &link);
if (!dma->desc) {
dev_err(dev, "failed to prepare slave sg\n");
ret = -ENOMEM;
goto config_err;
}
/* Only channel 1 transfer can wake up the AP system. */
if (!params->no_wake_mode && channel == 1) {
dma->desc->callback = sprd_platform_compr_dma_complete;
dma->desc->callback_param = cstream;
}
devm_kfree(dev, sg);
return 0;
config_err:
devm_kfree(dev, sg);
sg_err:
dma_release_channel(dma->chan);
return ret;
}
static int sprd_platform_compr_set_params(struct snd_soc_component *component,
struct snd_compr_stream *cstream,
struct snd_compr_params *params)
{
struct snd_compr_runtime *runtime = cstream->runtime;
struct sprd_compr_stream *stream = runtime->private_data;
struct device *dev = component->dev;
struct sprd_compr_params compr_params = { };
int ret;
/*
* Configure the DMA engine 2-stage transfer mode. Channel 1 set as the
* destination channel, and channel 0 set as the source channel, that
* means once the source channel's transaction is done, it will trigger
* the destination channel's transaction automatically.
*/
ret = sprd_platform_compr_dma_config(component, cstream, params, 1);
if (ret) {
dev_err(dev, "failed to config stage 1 DMA: %d\n", ret);
return ret;
}
ret = sprd_platform_compr_dma_config(component, cstream, params, 0);
if (ret) {
dev_err(dev, "failed to config stage 0 DMA: %d\n", ret);
goto config_err;
}
compr_params.direction = cstream->direction;
compr_params.sample_rate = params->codec.sample_rate;
compr_params.channels = stream->num_channels;
compr_params.info_phys = stream->info_phys;
compr_params.info_size = stream->info_size;
compr_params.rate = params->codec.bit_rate;
compr_params.format = params->codec.id;
ret = stream->compr_ops->set_params(cstream->direction, &compr_params);
if (ret) {
dev_err(dev, "failed to set parameters: %d\n", ret);
goto params_err;
}
return 0;
params_err:
dma_release_channel(stream->dma[0].chan);
config_err:
dma_release_channel(stream->dma[1].chan);
return ret;
}
static int sprd_platform_compr_open(struct snd_soc_component *component,
struct snd_compr_stream *cstream)
{
struct snd_compr_runtime *runtime = cstream->runtime;
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct device *dev = component->dev;
struct sprd_compr_data *data = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
struct sprd_compr_stream *stream;
struct sprd_compr_callback cb;
int stream_id = cstream->direction, ret;
ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
return ret;
stream = devm_kzalloc(dev, sizeof(*stream), GFP_KERNEL);
if (!stream)
return -ENOMEM;
stream->cstream = cstream;
stream->num_channels = 2;
stream->compr_ops = data->ops;
/*
* Allocate the stage 0 IRAM buffer size, including the DMA 0
* link-list size and play information of DSP address size.
*/
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_IRAM, dev,
SPRD_COMPR_IRAM_SIZE, &stream->iram_buffer);
if (ret < 0)
goto err_iram;
/* Use to save link-list configuration for DMA 0. */
stream->dma[0].virt = stream->iram_buffer.area + SPRD_COMPR_IRAM_SIZE;
stream->dma[0].phys = stream->iram_buffer.addr + SPRD_COMPR_IRAM_SIZE;
/* Use to update the current data offset of DSP. */
stream->info_phys = stream->iram_buffer.addr + SPRD_COMPR_IRAM_SIZE +
SPRD_COMPR_IRAM_LINKLIST_SIZE;
stream->info_area = stream->iram_buffer.area + SPRD_COMPR_IRAM_SIZE +
SPRD_COMPR_IRAM_LINKLIST_SIZE;
stream->info_size = SPRD_COMPR_IRAM_INFO_SIZE;
/*
* Allocate the stage 1 DDR buffer size, including the DMA 1 link-list
* size.
*/
ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev,
SPRD_COMPR_AREA_SIZE, &stream->compr_buffer);
if (ret < 0)
goto err_compr;
/* Use to save link-list configuration for DMA 1. */
stream->dma[1].virt = stream->compr_buffer.area + SPRD_COMPR_AREA_SIZE;
stream->dma[1].phys = stream->compr_buffer.addr + SPRD_COMPR_AREA_SIZE;
cb.drain_notify = sprd_platform_compr_drain_notify;
cb.drain_data = cstream;
ret = stream->compr_ops->open(stream_id, &cb);
if (ret) {
dev_err(dev, "failed to open compress platform: %d\n", ret);
goto err_open;
}
runtime->private_data = stream;
return 0;
err_open:
snd_dma_free_pages(&stream->compr_buffer);
err_compr:
snd_dma_free_pages(&stream->iram_buffer);
err_iram:
devm_kfree(dev, stream);
return ret;
}
static int sprd_platform_compr_free(struct snd_soc_component *component,
struct snd_compr_stream *cstream)
{
struct snd_compr_runtime *runtime = cstream->runtime;
struct sprd_compr_stream *stream = runtime->private_data;
struct device *dev = component->dev;
int stream_id = cstream->direction, i;
for (i = 0; i < stream->num_channels; i++) {
struct sprd_compr_dma *dma = &stream->dma[i];
if (dma->chan) {
dma_release_channel(dma->chan);
dma->chan = NULL;
}
}
snd_dma_free_pages(&stream->compr_buffer);
snd_dma_free_pages(&stream->iram_buffer);
stream->compr_ops->close(stream_id);
devm_kfree(dev, stream);
return 0;
}
static int sprd_platform_compr_trigger(struct snd_soc_component *component,
struct snd_compr_stream *cstream,
int cmd)
{
struct snd_compr_runtime *runtime = cstream->runtime;
struct sprd_compr_stream *stream = runtime->private_data;
struct device *dev = component->dev;
int channels = stream->num_channels, ret = 0, i;
int stream_id = cstream->direction;
if (cstream->direction != SND_COMPRESS_PLAYBACK) {
dev_err(dev, "unsupported compress direction\n");
return -EINVAL;
}
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
for (i = channels - 1; i >= 0; i--) {
struct sprd_compr_dma *dma = &stream->dma[i];
if (!dma->desc)
continue;
dma->cookie = dmaengine_submit(dma->desc);
ret = dma_submit_error(dma->cookie);
if (ret) {
dev_err(dev, "failed to submit request: %d\n",
ret);
return ret;
}
}
for (i = channels - 1; i >= 0; i--) {
struct sprd_compr_dma *dma = &stream->dma[i];
if (dma->chan)
dma_async_issue_pending(dma->chan);
}
ret = stream->compr_ops->start(stream_id);
break;
case SNDRV_PCM_TRIGGER_STOP:
for (i = channels - 1; i >= 0; i--) {
struct sprd_compr_dma *dma = &stream->dma[i];
if (dma->chan)
dmaengine_terminate_async(dma->chan);
}
stream->copied_total = 0;
stream->stage1_pointer = 0;
stream->received_total = 0;
stream->received_stage0 = 0;
stream->received_stage1 = 0;
ret = stream->compr_ops->stop(stream_id);
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
for (i = channels - 1; i >= 0; i--) {
struct sprd_compr_dma *dma = &stream->dma[i];
if (dma->chan)
dmaengine_pause(dma->chan);
}
ret = stream->compr_ops->pause(stream_id);
break;
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
for (i = channels - 1; i >= 0; i--) {
struct sprd_compr_dma *dma = &stream->dma[i];
if (dma->chan)
dmaengine_resume(dma->chan);
}
ret = stream->compr_ops->pause_release(stream_id);
break;
case SND_COMPR_TRIGGER_PARTIAL_DRAIN:
case SND_COMPR_TRIGGER_DRAIN:
ret = stream->compr_ops->drain(stream->received_total);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int sprd_platform_compr_pointer(struct snd_soc_component *component,
struct snd_compr_stream *cstream,
struct snd_compr_tstamp *tstamp)
{
struct snd_compr_runtime *runtime = cstream->runtime;
struct sprd_compr_stream *stream = runtime->private_data;
struct sprd_compr_playinfo *info =
(struct sprd_compr_playinfo *)stream->info_area;
tstamp->copied_total = stream->copied_total;
tstamp->pcm_io_frames = info->current_data_offset;
return 0;
}
static int sprd_platform_compr_copy(struct snd_soc_component *component,
struct snd_compr_stream *cstream,
char __user *buf, size_t count)
{
struct snd_compr_runtime *runtime = cstream->runtime;
struct sprd_compr_stream *stream = runtime->private_data;
int avail_bytes, data_count = count;
void *dst;
/*
* We usually set fragment size as 32K, and the stage 0 IRAM buffer
* size is 32K too. So if now the received data size of the stage 0
* IRAM buffer is less than 32K, that means we have some available
* spaces for the stage 0 IRAM buffer.
*/
if (stream->received_stage0 < runtime->fragment_size) {
avail_bytes = runtime->fragment_size - stream->received_stage0;
dst = stream->iram_buffer.area + stream->received_stage0;
if (avail_bytes >= data_count) {
/*
* Copy data to the stage 0 IRAM buffer directly if
* spaces are enough.
*/
if (copy_from_user(dst, buf, data_count))
return -EFAULT;
stream->received_stage0 += data_count;
stream->copied_total += data_count;
goto copy_done;
} else {
/*
* If the data count is larger than the available spaces
* of the stage 0 IRAM buffer, we should copy one
* partial data to the stage 0 IRAM buffer, and copy
* the left to the stage 1 DDR buffer.
*/
if (copy_from_user(dst, buf, avail_bytes))
return -EFAULT;
data_count -= avail_bytes;
stream->received_stage0 += avail_bytes;
stream->copied_total += avail_bytes;
buf += avail_bytes;
}
}
/*
* Copy data to the stage 1 DDR buffer if no spaces for the stage 0 IRAM
* buffer.
*/
dst = stream->compr_buffer.area + stream->stage1_pointer;
if (data_count < stream->compr_buffer.bytes - stream->stage1_pointer) {
if (copy_from_user(dst, buf, data_count))
return -EFAULT;
stream->stage1_pointer += data_count;
} else {
avail_bytes = stream->compr_buffer.bytes - stream->stage1_pointer;
if (copy_from_user(dst, buf, avail_bytes))
return -EFAULT;
if (copy_from_user(stream->compr_buffer.area, buf + avail_bytes,
data_count - avail_bytes))
return -EFAULT;
stream->stage1_pointer = data_count - avail_bytes;
}
stream->received_stage1 += data_count;
copy_done:
/* Update the copied data size. */
stream->received_total += count;
return count;
}
static int sprd_platform_compr_get_caps(struct snd_soc_component *component,
struct snd_compr_stream *cstream,
struct snd_compr_caps *caps)
{
caps->direction = cstream->direction;
caps->min_fragment_size = SPRD_COMPR_MIN_FRAGMENT_SIZE;
caps->max_fragment_size = SPRD_COMPR_MAX_FRAGMENT_SIZE;
caps->min_fragments = SPRD_COMPR_MIN_NUM_FRAGMENTS;
caps->max_fragments = SPRD_COMPR_MAX_NUM_FRAGMENTS;
caps->num_codecs = 2;
caps->codecs[0] = SND_AUDIOCODEC_MP3;
caps->codecs[1] = SND_AUDIOCODEC_AAC;
return 0;
}
static int
sprd_platform_compr_get_codec_caps(struct snd_soc_component *component,
struct snd_compr_stream *cstream,
struct snd_compr_codec_caps *codec)
{
switch (codec->codec) {
case SND_AUDIOCODEC_MP3:
codec->num_descriptors = 2;
codec->descriptor[0].max_ch = 2;
codec->descriptor[0].bit_rate[0] = 320;
codec->descriptor[0].bit_rate[1] = 128;
codec->descriptor[0].num_bitrates = 2;
codec->descriptor[0].profiles = 0;
codec->descriptor[0].modes = SND_AUDIOCHANMODE_MP3_STEREO;
codec->descriptor[0].formats = 0;
break;
case SND_AUDIOCODEC_AAC:
codec->num_descriptors = 2;
codec->descriptor[1].max_ch = 2;
codec->descriptor[1].bit_rate[0] = 320;
codec->descriptor[1].bit_rate[1] = 128;
codec->descriptor[1].num_bitrates = 2;
codec->descriptor[1].profiles = 0;
codec->descriptor[1].modes = 0;
codec->descriptor[1].formats = 0;
break;
default:
return -EINVAL;
}
return 0;
}
const struct snd_compress_ops sprd_platform_compress_ops = {
.open = sprd_platform_compr_open,
.free = sprd_platform_compr_free,
.set_params = sprd_platform_compr_set_params,
.trigger = sprd_platform_compr_trigger,
.pointer = sprd_platform_compr_pointer,
.copy = sprd_platform_compr_copy,
.get_caps = sprd_platform_compr_get_caps,
.get_codec_caps = sprd_platform_compr_get_codec_caps,
};
MODULE_DESCRIPTION("Spreadtrum ASoC Compress Platform Driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:compress-platform");
| linux-master | sound/soc/sprd/sprd-pcm-compress.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 Spreadtrum Communications Inc.
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include "sprd-mcdt.h"
/* MCDT registers definition */
#define MCDT_CH0_TXD 0x0
#define MCDT_CH0_RXD 0x28
#define MCDT_DAC0_WTMK 0x60
#define MCDT_ADC0_WTMK 0x88
#define MCDT_DMA_EN 0xb0
#define MCDT_INT_EN0 0xb4
#define MCDT_INT_EN1 0xb8
#define MCDT_INT_EN2 0xbc
#define MCDT_INT_CLR0 0xc0
#define MCDT_INT_CLR1 0xc4
#define MCDT_INT_CLR2 0xc8
#define MCDT_INT_RAW1 0xcc
#define MCDT_INT_RAW2 0xd0
#define MCDT_INT_RAW3 0xd4
#define MCDT_INT_MSK1 0xd8
#define MCDT_INT_MSK2 0xdc
#define MCDT_INT_MSK3 0xe0
#define MCDT_DAC0_FIFO_ADDR_ST 0xe4
#define MCDT_ADC0_FIFO_ADDR_ST 0xe8
#define MCDT_CH_FIFO_ST0 0x134
#define MCDT_CH_FIFO_ST1 0x138
#define MCDT_CH_FIFO_ST2 0x13c
#define MCDT_INT_MSK_CFG0 0x140
#define MCDT_INT_MSK_CFG1 0x144
#define MCDT_DMA_CFG0 0x148
#define MCDT_FIFO_CLR 0x14c
#define MCDT_DMA_CFG1 0x150
#define MCDT_DMA_CFG2 0x154
#define MCDT_DMA_CFG3 0x158
#define MCDT_DMA_CFG4 0x15c
#define MCDT_DMA_CFG5 0x160
/* Channel water mark definition */
#define MCDT_CH_FIFO_AE_SHIFT 16
#define MCDT_CH_FIFO_AE_MASK GENMASK(24, 16)
#define MCDT_CH_FIFO_AF_MASK GENMASK(8, 0)
/* DMA channel select definition */
#define MCDT_DMA_CH0_SEL_MASK GENMASK(3, 0)
#define MCDT_DMA_CH0_SEL_SHIFT 0
#define MCDT_DMA_CH1_SEL_MASK GENMASK(7, 4)
#define MCDT_DMA_CH1_SEL_SHIFT 4
#define MCDT_DMA_CH2_SEL_MASK GENMASK(11, 8)
#define MCDT_DMA_CH2_SEL_SHIFT 8
#define MCDT_DMA_CH3_SEL_MASK GENMASK(15, 12)
#define MCDT_DMA_CH3_SEL_SHIFT 12
#define MCDT_DMA_CH4_SEL_MASK GENMASK(19, 16)
#define MCDT_DMA_CH4_SEL_SHIFT 16
#define MCDT_DAC_DMA_SHIFT 16
/* DMA channel ACK select definition */
#define MCDT_DMA_ACK_SEL_MASK GENMASK(3, 0)
/* Channel FIFO definition */
#define MCDT_CH_FIFO_ADDR_SHIFT 16
#define MCDT_CH_FIFO_ADDR_MASK GENMASK(9, 0)
#define MCDT_ADC_FIFO_SHIFT 16
#define MCDT_FIFO_LENGTH 512
#define MCDT_ADC_CHANNEL_NUM 10
#define MCDT_DAC_CHANNEL_NUM 10
#define MCDT_CHANNEL_NUM (MCDT_ADC_CHANNEL_NUM + MCDT_DAC_CHANNEL_NUM)
enum sprd_mcdt_fifo_int {
MCDT_ADC_FIFO_AE_INT,
MCDT_ADC_FIFO_AF_INT,
MCDT_DAC_FIFO_AE_INT,
MCDT_DAC_FIFO_AF_INT,
MCDT_ADC_FIFO_OV_INT,
MCDT_DAC_FIFO_OV_INT
};
enum sprd_mcdt_fifo_sts {
MCDT_ADC_FIFO_REAL_FULL,
MCDT_ADC_FIFO_REAL_EMPTY,
MCDT_ADC_FIFO_AF,
MCDT_ADC_FIFO_AE,
MCDT_DAC_FIFO_REAL_FULL,
MCDT_DAC_FIFO_REAL_EMPTY,
MCDT_DAC_FIFO_AF,
MCDT_DAC_FIFO_AE
};
struct sprd_mcdt_dev {
struct device *dev;
void __iomem *base;
spinlock_t lock;
struct sprd_mcdt_chan chan[MCDT_CHANNEL_NUM];
};
static LIST_HEAD(sprd_mcdt_chan_list);
static DEFINE_MUTEX(sprd_mcdt_list_mutex);
static void sprd_mcdt_update(struct sprd_mcdt_dev *mcdt, u32 reg, u32 val,
u32 mask)
{
u32 orig = readl_relaxed(mcdt->base + reg);
u32 tmp;
tmp = (orig & ~mask) | val;
writel_relaxed(tmp, mcdt->base + reg);
}
static void sprd_mcdt_dac_set_watermark(struct sprd_mcdt_dev *mcdt, u8 channel,
u32 full, u32 empty)
{
u32 reg = MCDT_DAC0_WTMK + channel * 4;
u32 water_mark =
(empty << MCDT_CH_FIFO_AE_SHIFT) & MCDT_CH_FIFO_AE_MASK;
water_mark |= full & MCDT_CH_FIFO_AF_MASK;
sprd_mcdt_update(mcdt, reg, water_mark,
MCDT_CH_FIFO_AE_MASK | MCDT_CH_FIFO_AF_MASK);
}
static void sprd_mcdt_adc_set_watermark(struct sprd_mcdt_dev *mcdt, u8 channel,
u32 full, u32 empty)
{
u32 reg = MCDT_ADC0_WTMK + channel * 4;
u32 water_mark =
(empty << MCDT_CH_FIFO_AE_SHIFT) & MCDT_CH_FIFO_AE_MASK;
water_mark |= full & MCDT_CH_FIFO_AF_MASK;
sprd_mcdt_update(mcdt, reg, water_mark,
MCDT_CH_FIFO_AE_MASK | MCDT_CH_FIFO_AF_MASK);
}
static void sprd_mcdt_dac_dma_enable(struct sprd_mcdt_dev *mcdt, u8 channel,
bool enable)
{
u32 shift = MCDT_DAC_DMA_SHIFT + channel;
if (enable)
sprd_mcdt_update(mcdt, MCDT_DMA_EN, BIT(shift), BIT(shift));
else
sprd_mcdt_update(mcdt, MCDT_DMA_EN, 0, BIT(shift));
}
static void sprd_mcdt_adc_dma_enable(struct sprd_mcdt_dev *mcdt, u8 channel,
bool enable)
{
if (enable)
sprd_mcdt_update(mcdt, MCDT_DMA_EN, BIT(channel), BIT(channel));
else
sprd_mcdt_update(mcdt, MCDT_DMA_EN, 0, BIT(channel));
}
static void sprd_mcdt_ap_int_enable(struct sprd_mcdt_dev *mcdt, u8 channel,
bool enable)
{
if (enable)
sprd_mcdt_update(mcdt, MCDT_INT_MSK_CFG0, BIT(channel),
BIT(channel));
else
sprd_mcdt_update(mcdt, MCDT_INT_MSK_CFG0, 0, BIT(channel));
}
static void sprd_mcdt_dac_write_fifo(struct sprd_mcdt_dev *mcdt, u8 channel,
u32 val)
{
u32 reg = MCDT_CH0_TXD + channel * 4;
writel_relaxed(val, mcdt->base + reg);
}
static void sprd_mcdt_adc_read_fifo(struct sprd_mcdt_dev *mcdt, u8 channel,
u32 *val)
{
u32 reg = MCDT_CH0_RXD + channel * 4;
*val = readl_relaxed(mcdt->base + reg);
}
static void sprd_mcdt_dac_dma_chn_select(struct sprd_mcdt_dev *mcdt, u8 channel,
enum sprd_mcdt_dma_chan dma_chan)
{
switch (dma_chan) {
case SPRD_MCDT_DMA_CH0:
sprd_mcdt_update(mcdt, MCDT_DMA_CFG0,
channel << MCDT_DMA_CH0_SEL_SHIFT,
MCDT_DMA_CH0_SEL_MASK);
break;
case SPRD_MCDT_DMA_CH1:
sprd_mcdt_update(mcdt, MCDT_DMA_CFG0,
channel << MCDT_DMA_CH1_SEL_SHIFT,
MCDT_DMA_CH1_SEL_MASK);
break;
case SPRD_MCDT_DMA_CH2:
sprd_mcdt_update(mcdt, MCDT_DMA_CFG0,
channel << MCDT_DMA_CH2_SEL_SHIFT,
MCDT_DMA_CH2_SEL_MASK);
break;
case SPRD_MCDT_DMA_CH3:
sprd_mcdt_update(mcdt, MCDT_DMA_CFG0,
channel << MCDT_DMA_CH3_SEL_SHIFT,
MCDT_DMA_CH3_SEL_MASK);
break;
case SPRD_MCDT_DMA_CH4:
sprd_mcdt_update(mcdt, MCDT_DMA_CFG0,
channel << MCDT_DMA_CH4_SEL_SHIFT,
MCDT_DMA_CH4_SEL_MASK);
break;
}
}
static void sprd_mcdt_adc_dma_chn_select(struct sprd_mcdt_dev *mcdt, u8 channel,
enum sprd_mcdt_dma_chan dma_chan)
{
switch (dma_chan) {
case SPRD_MCDT_DMA_CH0:
sprd_mcdt_update(mcdt, MCDT_DMA_CFG1,
channel << MCDT_DMA_CH0_SEL_SHIFT,
MCDT_DMA_CH0_SEL_MASK);
break;
case SPRD_MCDT_DMA_CH1:
sprd_mcdt_update(mcdt, MCDT_DMA_CFG1,
channel << MCDT_DMA_CH1_SEL_SHIFT,
MCDT_DMA_CH1_SEL_MASK);
break;
case SPRD_MCDT_DMA_CH2:
sprd_mcdt_update(mcdt, MCDT_DMA_CFG1,
channel << MCDT_DMA_CH2_SEL_SHIFT,
MCDT_DMA_CH2_SEL_MASK);
break;
case SPRD_MCDT_DMA_CH3:
sprd_mcdt_update(mcdt, MCDT_DMA_CFG1,
channel << MCDT_DMA_CH3_SEL_SHIFT,
MCDT_DMA_CH3_SEL_MASK);
break;
case SPRD_MCDT_DMA_CH4:
sprd_mcdt_update(mcdt, MCDT_DMA_CFG1,
channel << MCDT_DMA_CH4_SEL_SHIFT,
MCDT_DMA_CH4_SEL_MASK);
break;
}
}
static u32 sprd_mcdt_dma_ack_shift(u8 channel)
{
switch (channel) {
default:
case 0:
case 8:
return 0;
case 1:
case 9:
return 4;
case 2:
return 8;
case 3:
return 12;
case 4:
return 16;
case 5:
return 20;
case 6:
return 24;
case 7:
return 28;
}
}
static void sprd_mcdt_dac_dma_ack_select(struct sprd_mcdt_dev *mcdt, u8 channel,
enum sprd_mcdt_dma_chan dma_chan)
{
u32 reg, shift = sprd_mcdt_dma_ack_shift(channel), ack = dma_chan;
switch (channel) {
case 0 ... 7:
reg = MCDT_DMA_CFG2;
break;
case 8 ... 9:
reg = MCDT_DMA_CFG3;
break;
default:
return;
}
sprd_mcdt_update(mcdt, reg, ack << shift,
MCDT_DMA_ACK_SEL_MASK << shift);
}
static void sprd_mcdt_adc_dma_ack_select(struct sprd_mcdt_dev *mcdt, u8 channel,
enum sprd_mcdt_dma_chan dma_chan)
{
u32 reg, shift = sprd_mcdt_dma_ack_shift(channel), ack = dma_chan;
switch (channel) {
case 0 ... 7:
reg = MCDT_DMA_CFG4;
break;
case 8 ... 9:
reg = MCDT_DMA_CFG5;
break;
default:
return;
}
sprd_mcdt_update(mcdt, reg, ack << shift,
MCDT_DMA_ACK_SEL_MASK << shift);
}
static bool sprd_mcdt_chan_fifo_sts(struct sprd_mcdt_dev *mcdt, u8 channel,
enum sprd_mcdt_fifo_sts fifo_sts)
{
u32 reg, shift;
switch (channel) {
case 0 ... 3:
reg = MCDT_CH_FIFO_ST0;
break;
case 4 ... 7:
reg = MCDT_CH_FIFO_ST1;
break;
case 8 ... 9:
reg = MCDT_CH_FIFO_ST2;
break;
default:
return false;
}
switch (channel) {
case 0:
case 4:
case 8:
shift = fifo_sts;
break;
case 1:
case 5:
case 9:
shift = 8 + fifo_sts;
break;
case 2:
case 6:
shift = 16 + fifo_sts;
break;
case 3:
case 7:
shift = 24 + fifo_sts;
break;
default:
return false;
}
return !!(readl_relaxed(mcdt->base + reg) & BIT(shift));
}
static void sprd_mcdt_dac_fifo_clear(struct sprd_mcdt_dev *mcdt, u8 channel)
{
sprd_mcdt_update(mcdt, MCDT_FIFO_CLR, BIT(channel), BIT(channel));
}
static void sprd_mcdt_adc_fifo_clear(struct sprd_mcdt_dev *mcdt, u8 channel)
{
u32 shift = MCDT_ADC_FIFO_SHIFT + channel;
sprd_mcdt_update(mcdt, MCDT_FIFO_CLR, BIT(shift), BIT(shift));
}
static u32 sprd_mcdt_dac_fifo_avail(struct sprd_mcdt_dev *mcdt, u8 channel)
{
u32 reg = MCDT_DAC0_FIFO_ADDR_ST + channel * 8;
u32 r_addr = (readl_relaxed(mcdt->base + reg) >>
MCDT_CH_FIFO_ADDR_SHIFT) & MCDT_CH_FIFO_ADDR_MASK;
u32 w_addr = readl_relaxed(mcdt->base + reg) & MCDT_CH_FIFO_ADDR_MASK;
if (w_addr >= r_addr)
return 4 * (MCDT_FIFO_LENGTH - w_addr + r_addr);
else
return 4 * (r_addr - w_addr);
}
static u32 sprd_mcdt_adc_fifo_avail(struct sprd_mcdt_dev *mcdt, u8 channel)
{
u32 reg = MCDT_ADC0_FIFO_ADDR_ST + channel * 8;
u32 r_addr = (readl_relaxed(mcdt->base + reg) >>
MCDT_CH_FIFO_ADDR_SHIFT) & MCDT_CH_FIFO_ADDR_MASK;
u32 w_addr = readl_relaxed(mcdt->base + reg) & MCDT_CH_FIFO_ADDR_MASK;
if (w_addr >= r_addr)
return 4 * (w_addr - r_addr);
else
return 4 * (MCDT_FIFO_LENGTH - r_addr + w_addr);
}
static u32 sprd_mcdt_int_type_shift(u8 channel,
enum sprd_mcdt_fifo_int int_type)
{
switch (channel) {
case 0:
case 4:
case 8:
return int_type;
case 1:
case 5:
case 9:
return 8 + int_type;
case 2:
case 6:
return 16 + int_type;
case 3:
case 7:
return 24 + int_type;
default:
return 0;
}
}
static void sprd_mcdt_chan_int_en(struct sprd_mcdt_dev *mcdt, u8 channel,
enum sprd_mcdt_fifo_int int_type, bool enable)
{
u32 reg, shift = sprd_mcdt_int_type_shift(channel, int_type);
switch (channel) {
case 0 ... 3:
reg = MCDT_INT_EN0;
break;
case 4 ... 7:
reg = MCDT_INT_EN1;
break;
case 8 ... 9:
reg = MCDT_INT_EN2;
break;
default:
return;
}
if (enable)
sprd_mcdt_update(mcdt, reg, BIT(shift), BIT(shift));
else
sprd_mcdt_update(mcdt, reg, 0, BIT(shift));
}
static void sprd_mcdt_chan_int_clear(struct sprd_mcdt_dev *mcdt, u8 channel,
enum sprd_mcdt_fifo_int int_type)
{
u32 reg, shift = sprd_mcdt_int_type_shift(channel, int_type);
switch (channel) {
case 0 ... 3:
reg = MCDT_INT_CLR0;
break;
case 4 ... 7:
reg = MCDT_INT_CLR1;
break;
case 8 ... 9:
reg = MCDT_INT_CLR2;
break;
default:
return;
}
sprd_mcdt_update(mcdt, reg, BIT(shift), BIT(shift));
}
static bool sprd_mcdt_chan_int_sts(struct sprd_mcdt_dev *mcdt, u8 channel,
enum sprd_mcdt_fifo_int int_type)
{
u32 reg, shift = sprd_mcdt_int_type_shift(channel, int_type);
switch (channel) {
case 0 ... 3:
reg = MCDT_INT_MSK1;
break;
case 4 ... 7:
reg = MCDT_INT_MSK2;
break;
case 8 ... 9:
reg = MCDT_INT_MSK3;
break;
default:
return false;
}
return !!(readl_relaxed(mcdt->base + reg) & BIT(shift));
}
static irqreturn_t sprd_mcdt_irq_handler(int irq, void *dev_id)
{
struct sprd_mcdt_dev *mcdt = (struct sprd_mcdt_dev *)dev_id;
int i;
spin_lock(&mcdt->lock);
for (i = 0; i < MCDT_ADC_CHANNEL_NUM; i++) {
if (sprd_mcdt_chan_int_sts(mcdt, i, MCDT_ADC_FIFO_AF_INT)) {
struct sprd_mcdt_chan *chan = &mcdt->chan[i];
sprd_mcdt_chan_int_clear(mcdt, i, MCDT_ADC_FIFO_AF_INT);
if (chan->cb)
chan->cb->notify(chan->cb->data);
}
}
for (i = 0; i < MCDT_DAC_CHANNEL_NUM; i++) {
if (sprd_mcdt_chan_int_sts(mcdt, i, MCDT_DAC_FIFO_AE_INT)) {
struct sprd_mcdt_chan *chan =
&mcdt->chan[i + MCDT_ADC_CHANNEL_NUM];
sprd_mcdt_chan_int_clear(mcdt, i, MCDT_DAC_FIFO_AE_INT);
if (chan->cb)
chan->cb->notify(chan->cb->data);
}
}
spin_unlock(&mcdt->lock);
return IRQ_HANDLED;
}
/**
* sprd_mcdt_chan_write - write data to the MCDT channel's fifo
* @chan: the MCDT channel
* @tx_buf: send buffer
* @size: data size
*
* Note: We can not write data to the channel fifo when enabling the DMA mode,
* otherwise the channel fifo data will be invalid.
*
* If there are not enough space of the channel fifo, it will return errors
* to users.
*
* Returns 0 on success, or an appropriate error code on failure.
*/
int sprd_mcdt_chan_write(struct sprd_mcdt_chan *chan, char *tx_buf, u32 size)
{
struct sprd_mcdt_dev *mcdt = chan->mcdt;
unsigned long flags;
int avail, i = 0, words = size / 4;
u32 *buf = (u32 *)tx_buf;
spin_lock_irqsave(&mcdt->lock, flags);
if (chan->dma_enable) {
dev_err(mcdt->dev,
"Can not write data when DMA mode enabled\n");
spin_unlock_irqrestore(&mcdt->lock, flags);
return -EINVAL;
}
if (sprd_mcdt_chan_fifo_sts(mcdt, chan->id, MCDT_DAC_FIFO_REAL_FULL)) {
dev_err(mcdt->dev, "Channel fifo is full now\n");
spin_unlock_irqrestore(&mcdt->lock, flags);
return -EBUSY;
}
avail = sprd_mcdt_dac_fifo_avail(mcdt, chan->id);
if (size > avail) {
dev_err(mcdt->dev,
"Data size is larger than the available fifo size\n");
spin_unlock_irqrestore(&mcdt->lock, flags);
return -EBUSY;
}
while (i++ < words)
sprd_mcdt_dac_write_fifo(mcdt, chan->id, *buf++);
spin_unlock_irqrestore(&mcdt->lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(sprd_mcdt_chan_write);
/**
* sprd_mcdt_chan_read - read data from the MCDT channel's fifo
* @chan: the MCDT channel
* @rx_buf: receive buffer
* @size: data size
*
* Note: We can not read data from the channel fifo when enabling the DMA mode,
* otherwise the reading data will be invalid.
*
* Usually user need start to read data once receiving the fifo full interrupt.
*
* Returns data size of reading successfully, or an error code on failure.
*/
int sprd_mcdt_chan_read(struct sprd_mcdt_chan *chan, char *rx_buf, u32 size)
{
struct sprd_mcdt_dev *mcdt = chan->mcdt;
unsigned long flags;
int i = 0, avail, words = size / 4;
u32 *buf = (u32 *)rx_buf;
spin_lock_irqsave(&mcdt->lock, flags);
if (chan->dma_enable) {
dev_err(mcdt->dev, "Can not read data when DMA mode enabled\n");
spin_unlock_irqrestore(&mcdt->lock, flags);
return -EINVAL;
}
if (sprd_mcdt_chan_fifo_sts(mcdt, chan->id, MCDT_ADC_FIFO_REAL_EMPTY)) {
dev_err(mcdt->dev, "Channel fifo is empty\n");
spin_unlock_irqrestore(&mcdt->lock, flags);
return -EBUSY;
}
avail = sprd_mcdt_adc_fifo_avail(mcdt, chan->id);
if (size > avail)
words = avail / 4;
while (i++ < words)
sprd_mcdt_adc_read_fifo(mcdt, chan->id, buf++);
spin_unlock_irqrestore(&mcdt->lock, flags);
return words * 4;
}
EXPORT_SYMBOL_GPL(sprd_mcdt_chan_read);
/**
* sprd_mcdt_chan_int_enable - enable the interrupt mode for the MCDT channel
* @chan: the MCDT channel
* @water_mark: water mark to trigger a interrupt
* @cb: callback when a interrupt happened
*
* Now it only can enable fifo almost full interrupt for ADC channel and fifo
* almost empty interrupt for DAC channel. Morevoer for interrupt mode, user
* should use sprd_mcdt_chan_read() or sprd_mcdt_chan_write() to read or write
* data manually.
*
* For ADC channel, user can start to read data once receiving one fifo full
* interrupt. For DAC channel, user can start to write data once receiving one
* fifo empty interrupt or just call sprd_mcdt_chan_write() to write data
* directly.
*
* Returns 0 on success, or an error code on failure.
*/
int sprd_mcdt_chan_int_enable(struct sprd_mcdt_chan *chan, u32 water_mark,
struct sprd_mcdt_chan_callback *cb)
{
struct sprd_mcdt_dev *mcdt = chan->mcdt;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&mcdt->lock, flags);
if (chan->dma_enable || chan->int_enable) {
dev_err(mcdt->dev, "Failed to set interrupt mode.\n");
spin_unlock_irqrestore(&mcdt->lock, flags);
return -EINVAL;
}
switch (chan->type) {
case SPRD_MCDT_ADC_CHAN:
sprd_mcdt_adc_fifo_clear(mcdt, chan->id);
sprd_mcdt_adc_set_watermark(mcdt, chan->id, water_mark,
MCDT_FIFO_LENGTH - 1);
sprd_mcdt_chan_int_en(mcdt, chan->id,
MCDT_ADC_FIFO_AF_INT, true);
sprd_mcdt_ap_int_enable(mcdt, chan->id, true);
break;
case SPRD_MCDT_DAC_CHAN:
sprd_mcdt_dac_fifo_clear(mcdt, chan->id);
sprd_mcdt_dac_set_watermark(mcdt, chan->id,
MCDT_FIFO_LENGTH - 1, water_mark);
sprd_mcdt_chan_int_en(mcdt, chan->id,
MCDT_DAC_FIFO_AE_INT, true);
sprd_mcdt_ap_int_enable(mcdt, chan->id, true);
break;
default:
dev_err(mcdt->dev, "Unsupported channel type\n");
ret = -EINVAL;
}
if (!ret) {
chan->cb = cb;
chan->int_enable = true;
}
spin_unlock_irqrestore(&mcdt->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(sprd_mcdt_chan_int_enable);
/**
* sprd_mcdt_chan_int_disable - disable the interrupt mode for the MCDT channel
* @chan: the MCDT channel
*/
void sprd_mcdt_chan_int_disable(struct sprd_mcdt_chan *chan)
{
struct sprd_mcdt_dev *mcdt = chan->mcdt;
unsigned long flags;
spin_lock_irqsave(&mcdt->lock, flags);
if (!chan->int_enable) {
spin_unlock_irqrestore(&mcdt->lock, flags);
return;
}
switch (chan->type) {
case SPRD_MCDT_ADC_CHAN:
sprd_mcdt_chan_int_en(mcdt, chan->id,
MCDT_ADC_FIFO_AF_INT, false);
sprd_mcdt_chan_int_clear(mcdt, chan->id, MCDT_ADC_FIFO_AF_INT);
sprd_mcdt_ap_int_enable(mcdt, chan->id, false);
break;
case SPRD_MCDT_DAC_CHAN:
sprd_mcdt_chan_int_en(mcdt, chan->id,
MCDT_DAC_FIFO_AE_INT, false);
sprd_mcdt_chan_int_clear(mcdt, chan->id, MCDT_DAC_FIFO_AE_INT);
sprd_mcdt_ap_int_enable(mcdt, chan->id, false);
break;
default:
break;
}
chan->int_enable = false;
spin_unlock_irqrestore(&mcdt->lock, flags);
}
EXPORT_SYMBOL_GPL(sprd_mcdt_chan_int_disable);
/**
* sprd_mcdt_chan_dma_enable - enable the DMA mode for the MCDT channel
* @chan: the MCDT channel
* @dma_chan: specify which DMA channel will be used for this MCDT channel
* @water_mark: water mark to trigger a DMA request
*
* Enable the DMA mode for the MCDT channel, that means we can use DMA to
* transfer data to the channel fifo and do not need reading/writing data
* manually.
*
* Returns 0 on success, or an error code on failure.
*/
int sprd_mcdt_chan_dma_enable(struct sprd_mcdt_chan *chan,
enum sprd_mcdt_dma_chan dma_chan,
u32 water_mark)
{
struct sprd_mcdt_dev *mcdt = chan->mcdt;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&mcdt->lock, flags);
if (chan->dma_enable || chan->int_enable ||
dma_chan > SPRD_MCDT_DMA_CH4) {
dev_err(mcdt->dev, "Failed to set DMA mode\n");
spin_unlock_irqrestore(&mcdt->lock, flags);
return -EINVAL;
}
switch (chan->type) {
case SPRD_MCDT_ADC_CHAN:
sprd_mcdt_adc_fifo_clear(mcdt, chan->id);
sprd_mcdt_adc_set_watermark(mcdt, chan->id,
water_mark, MCDT_FIFO_LENGTH - 1);
sprd_mcdt_adc_dma_enable(mcdt, chan->id, true);
sprd_mcdt_adc_dma_chn_select(mcdt, chan->id, dma_chan);
sprd_mcdt_adc_dma_ack_select(mcdt, chan->id, dma_chan);
break;
case SPRD_MCDT_DAC_CHAN:
sprd_mcdt_dac_fifo_clear(mcdt, chan->id);
sprd_mcdt_dac_set_watermark(mcdt, chan->id,
MCDT_FIFO_LENGTH - 1, water_mark);
sprd_mcdt_dac_dma_enable(mcdt, chan->id, true);
sprd_mcdt_dac_dma_chn_select(mcdt, chan->id, dma_chan);
sprd_mcdt_dac_dma_ack_select(mcdt, chan->id, dma_chan);
break;
default:
dev_err(mcdt->dev, "Unsupported channel type\n");
ret = -EINVAL;
}
if (!ret)
chan->dma_enable = true;
spin_unlock_irqrestore(&mcdt->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(sprd_mcdt_chan_dma_enable);
/**
* sprd_mcdt_chan_dma_disable - disable the DMA mode for the MCDT channel
* @chan: the MCDT channel
*/
void sprd_mcdt_chan_dma_disable(struct sprd_mcdt_chan *chan)
{
struct sprd_mcdt_dev *mcdt = chan->mcdt;
unsigned long flags;
spin_lock_irqsave(&mcdt->lock, flags);
if (!chan->dma_enable) {
spin_unlock_irqrestore(&mcdt->lock, flags);
return;
}
switch (chan->type) {
case SPRD_MCDT_ADC_CHAN:
sprd_mcdt_adc_dma_enable(mcdt, chan->id, false);
sprd_mcdt_adc_fifo_clear(mcdt, chan->id);
break;
case SPRD_MCDT_DAC_CHAN:
sprd_mcdt_dac_dma_enable(mcdt, chan->id, false);
sprd_mcdt_dac_fifo_clear(mcdt, chan->id);
break;
default:
break;
}
chan->dma_enable = false;
spin_unlock_irqrestore(&mcdt->lock, flags);
}
EXPORT_SYMBOL_GPL(sprd_mcdt_chan_dma_disable);
/**
* sprd_mcdt_request_chan - request one MCDT channel
* @channel: channel id
* @type: channel type, it can be one ADC channel or DAC channel
*
* Rreturn NULL if no available channel.
*/
struct sprd_mcdt_chan *sprd_mcdt_request_chan(u8 channel,
enum sprd_mcdt_channel_type type)
{
struct sprd_mcdt_chan *temp;
mutex_lock(&sprd_mcdt_list_mutex);
list_for_each_entry(temp, &sprd_mcdt_chan_list, list) {
if (temp->type == type && temp->id == channel) {
list_del_init(&temp->list);
break;
}
}
if (list_entry_is_head(temp, &sprd_mcdt_chan_list, list))
temp = NULL;
mutex_unlock(&sprd_mcdt_list_mutex);
return temp;
}
EXPORT_SYMBOL_GPL(sprd_mcdt_request_chan);
/**
* sprd_mcdt_free_chan - free one MCDT channel
* @chan: the channel to be freed
*/
void sprd_mcdt_free_chan(struct sprd_mcdt_chan *chan)
{
struct sprd_mcdt_chan *temp;
sprd_mcdt_chan_dma_disable(chan);
sprd_mcdt_chan_int_disable(chan);
mutex_lock(&sprd_mcdt_list_mutex);
list_for_each_entry(temp, &sprd_mcdt_chan_list, list) {
if (temp == chan) {
mutex_unlock(&sprd_mcdt_list_mutex);
return;
}
}
list_add_tail(&chan->list, &sprd_mcdt_chan_list);
mutex_unlock(&sprd_mcdt_list_mutex);
}
EXPORT_SYMBOL_GPL(sprd_mcdt_free_chan);
static void sprd_mcdt_init_chans(struct sprd_mcdt_dev *mcdt,
struct resource *res)
{
int i;
for (i = 0; i < MCDT_CHANNEL_NUM; i++) {
struct sprd_mcdt_chan *chan = &mcdt->chan[i];
if (i < MCDT_ADC_CHANNEL_NUM) {
chan->id = i;
chan->type = SPRD_MCDT_ADC_CHAN;
chan->fifo_phys = res->start + MCDT_CH0_RXD + i * 4;
} else {
chan->id = i - MCDT_ADC_CHANNEL_NUM;
chan->type = SPRD_MCDT_DAC_CHAN;
chan->fifo_phys = res->start + MCDT_CH0_TXD +
(i - MCDT_ADC_CHANNEL_NUM) * 4;
}
chan->mcdt = mcdt;
INIT_LIST_HEAD(&chan->list);
mutex_lock(&sprd_mcdt_list_mutex);
list_add_tail(&chan->list, &sprd_mcdt_chan_list);
mutex_unlock(&sprd_mcdt_list_mutex);
}
}
static int sprd_mcdt_probe(struct platform_device *pdev)
{
struct sprd_mcdt_dev *mcdt;
struct resource *res;
int ret, irq;
mcdt = devm_kzalloc(&pdev->dev, sizeof(*mcdt), GFP_KERNEL);
if (!mcdt)
return -ENOMEM;
mcdt->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(mcdt->base))
return PTR_ERR(mcdt->base);
mcdt->dev = &pdev->dev;
spin_lock_init(&mcdt->lock);
platform_set_drvdata(pdev, mcdt);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_irq(&pdev->dev, irq, sprd_mcdt_irq_handler,
0, "sprd-mcdt", mcdt);
if (ret) {
dev_err(&pdev->dev, "Failed to request MCDT IRQ\n");
return ret;
}
sprd_mcdt_init_chans(mcdt, res);
return 0;
}
static void sprd_mcdt_remove(struct platform_device *pdev)
{
struct sprd_mcdt_chan *chan, *temp;
mutex_lock(&sprd_mcdt_list_mutex);
list_for_each_entry_safe(chan, temp, &sprd_mcdt_chan_list, list)
list_del(&chan->list);
mutex_unlock(&sprd_mcdt_list_mutex);
}
static const struct of_device_id sprd_mcdt_of_match[] = {
{ .compatible = "sprd,sc9860-mcdt", },
{ }
};
MODULE_DEVICE_TABLE(of, sprd_mcdt_of_match);
static struct platform_driver sprd_mcdt_driver = {
.probe = sprd_mcdt_probe,
.remove_new = sprd_mcdt_remove,
.driver = {
.name = "sprd-mcdt",
.of_match_table = sprd_mcdt_of_match,
},
};
module_platform_driver(sprd_mcdt_driver);
MODULE_DESCRIPTION("Spreadtrum Multi-Channel Data Transfer Driver");
MODULE_LICENSE("GPL v2");
| linux-master | sound/soc/sprd/sprd-mcdt.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 Spreadtrum Communications Inc.
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/dma/sprd-dma.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include "sprd-pcm-dma.h"
#define SPRD_PCM_DMA_LINKLIST_SIZE 64
#define SPRD_PCM_DMA_BRUST_LEN 640
struct sprd_pcm_dma_data {
struct dma_chan *chan;
struct dma_async_tx_descriptor *desc;
dma_cookie_t cookie;
dma_addr_t phys;
void *virt;
int pre_pointer;
};
struct sprd_pcm_dma_private {
struct snd_pcm_substream *substream;
struct sprd_pcm_dma_params *params;
struct sprd_pcm_dma_data data[SPRD_PCM_CHANNEL_MAX];
int hw_chan;
int dma_addr_offset;
};
static const struct snd_pcm_hardware sprd_pcm_hardware = {
.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
.period_bytes_min = 1,
.period_bytes_max = 64 * 1024,
.periods_min = 1,
.periods_max = PAGE_SIZE / SPRD_PCM_DMA_LINKLIST_SIZE,
.buffer_bytes_max = 64 * 1024,
};
static int sprd_pcm_open(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct device *dev = component->dev;
struct sprd_pcm_dma_private *dma_private;
int hw_chan = SPRD_PCM_CHANNEL_MAX;
int size, ret, i;
snd_soc_set_runtime_hwparams(substream, &sprd_pcm_hardware);
ret = snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
SPRD_PCM_DMA_BRUST_LEN);
if (ret < 0)
return ret;
ret = snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
SPRD_PCM_DMA_BRUST_LEN);
if (ret < 0)
return ret;
ret = snd_pcm_hw_constraint_integer(runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
if (ret < 0)
return ret;
dma_private = devm_kzalloc(dev, sizeof(*dma_private), GFP_KERNEL);
if (!dma_private)
return -ENOMEM;
size = runtime->hw.periods_max * SPRD_PCM_DMA_LINKLIST_SIZE;
for (i = 0; i < hw_chan; i++) {
struct sprd_pcm_dma_data *data = &dma_private->data[i];
data->virt = dmam_alloc_coherent(dev, size, &data->phys,
GFP_KERNEL);
if (!data->virt) {
ret = -ENOMEM;
goto error;
}
}
dma_private->hw_chan = hw_chan;
runtime->private_data = dma_private;
dma_private->substream = substream;
return 0;
error:
for (i = 0; i < hw_chan; i++) {
struct sprd_pcm_dma_data *data = &dma_private->data[i];
if (data->virt)
dmam_free_coherent(dev, size, data->virt, data->phys);
}
devm_kfree(dev, dma_private);
return ret;
}
static int sprd_pcm_close(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sprd_pcm_dma_private *dma_private = runtime->private_data;
struct device *dev = component->dev;
int size = runtime->hw.periods_max * SPRD_PCM_DMA_LINKLIST_SIZE;
int i;
for (i = 0; i < dma_private->hw_chan; i++) {
struct sprd_pcm_dma_data *data = &dma_private->data[i];
dmam_free_coherent(dev, size, data->virt, data->phys);
}
devm_kfree(dev, dma_private);
return 0;
}
static void sprd_pcm_dma_complete(void *data)
{
struct sprd_pcm_dma_private *dma_private = data;
struct snd_pcm_substream *substream = dma_private->substream;
snd_pcm_period_elapsed(substream);
}
static void sprd_pcm_release_dma_channel(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sprd_pcm_dma_private *dma_private = runtime->private_data;
int i;
for (i = 0; i < SPRD_PCM_CHANNEL_MAX; i++) {
struct sprd_pcm_dma_data *data = &dma_private->data[i];
if (data->chan) {
dma_release_channel(data->chan);
data->chan = NULL;
}
}
}
static int sprd_pcm_request_dma_channel(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
int channels)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sprd_pcm_dma_private *dma_private = runtime->private_data;
struct device *dev = component->dev;
struct sprd_pcm_dma_params *dma_params = dma_private->params;
int i;
if (channels > SPRD_PCM_CHANNEL_MAX) {
dev_err(dev, "invalid dma channel number:%d\n", channels);
return -EINVAL;
}
for (i = 0; i < channels; i++) {
struct sprd_pcm_dma_data *data = &dma_private->data[i];
data->chan = dma_request_slave_channel(dev,
dma_params->chan_name[i]);
if (!data->chan) {
dev_err(dev, "failed to request dma channel:%s\n",
dma_params->chan_name[i]);
sprd_pcm_release_dma_channel(substream);
return -ENODEV;
}
}
return 0;
}
static int sprd_pcm_hw_params(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sprd_pcm_dma_private *dma_private = runtime->private_data;
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct sprd_pcm_dma_params *dma_params;
size_t totsize = params_buffer_bytes(params);
size_t period = params_period_bytes(params);
int channels = params_channels(params);
int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
struct scatterlist *sg;
unsigned long flags;
int ret, i, j, sg_num;
dma_params = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
if (!dma_params) {
dev_warn(component->dev, "no dma parameters setting\n");
dma_private->params = NULL;
return 0;
}
if (!dma_private->params) {
dma_private->params = dma_params;
ret = sprd_pcm_request_dma_channel(component,
substream, channels);
if (ret)
return ret;
}
sg_num = totsize / period;
dma_private->dma_addr_offset = totsize / channels;
sg = devm_kcalloc(component->dev, sg_num, sizeof(*sg), GFP_KERNEL);
if (!sg) {
ret = -ENOMEM;
goto sg_err;
}
for (i = 0; i < channels; i++) {
struct sprd_pcm_dma_data *data = &dma_private->data[i];
struct dma_chan *chan = data->chan;
struct dma_slave_config config = { };
struct sprd_dma_linklist link = { };
enum dma_transfer_direction dir;
struct scatterlist *sgt = sg;
config.src_maxburst = dma_params->fragment_len[i];
config.src_addr_width = dma_params->datawidth[i];
config.dst_addr_width = dma_params->datawidth[i];
if (is_playback) {
config.src_addr = runtime->dma_addr +
i * dma_private->dma_addr_offset;
config.dst_addr = dma_params->dev_phys[i];
dir = DMA_MEM_TO_DEV;
} else {
config.src_addr = dma_params->dev_phys[i];
config.dst_addr = runtime->dma_addr +
i * dma_private->dma_addr_offset;
dir = DMA_DEV_TO_MEM;
}
sg_init_table(sgt, sg_num);
for (j = 0; j < sg_num; j++, sgt++) {
u32 sg_len = period / channels;
sg_dma_len(sgt) = sg_len;
sg_dma_address(sgt) = runtime->dma_addr +
i * dma_private->dma_addr_offset + sg_len * j;
}
/*
* Configure the link-list address for the DMA engine link-list
* mode.
*/
link.virt_addr = (unsigned long)data->virt;
link.phy_addr = data->phys;
ret = dmaengine_slave_config(chan, &config);
if (ret) {
dev_err(component->dev,
"failed to set slave configuration: %d\n", ret);
goto config_err;
}
/*
* We configure the DMA request mode, interrupt mode, channel
* mode and channel trigger mode by the flags.
*/
flags = SPRD_DMA_FLAGS(SPRD_DMA_CHN_MODE_NONE, SPRD_DMA_NO_TRG,
SPRD_DMA_FRAG_REQ, SPRD_DMA_TRANS_INT);
data->desc = chan->device->device_prep_slave_sg(chan, sg,
sg_num, dir,
flags, &link);
if (!data->desc) {
dev_err(component->dev, "failed to prepare slave sg\n");
ret = -ENOMEM;
goto config_err;
}
if (!runtime->no_period_wakeup) {
data->desc->callback = sprd_pcm_dma_complete;
data->desc->callback_param = dma_private;
}
}
devm_kfree(component->dev, sg);
return 0;
config_err:
devm_kfree(component->dev, sg);
sg_err:
sprd_pcm_release_dma_channel(substream);
return ret;
}
static int sprd_pcm_hw_free(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
sprd_pcm_release_dma_channel(substream);
return 0;
}
static int sprd_pcm_trigger(struct snd_soc_component *component,
struct snd_pcm_substream *substream, int cmd)
{
struct sprd_pcm_dma_private *dma_private =
substream->runtime->private_data;
int ret = 0, i;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
for (i = 0; i < dma_private->hw_chan; i++) {
struct sprd_pcm_dma_data *data = &dma_private->data[i];
if (!data->desc)
continue;
data->cookie = dmaengine_submit(data->desc);
ret = dma_submit_error(data->cookie);
if (ret) {
dev_err(component->dev,
"failed to submit dma request: %d\n",
ret);
return ret;
}
dma_async_issue_pending(data->chan);
}
break;
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
for (i = 0; i < dma_private->hw_chan; i++) {
struct sprd_pcm_dma_data *data = &dma_private->data[i];
if (data->chan)
dmaengine_resume(data->chan);
}
break;
case SNDRV_PCM_TRIGGER_STOP:
for (i = 0; i < dma_private->hw_chan; i++) {
struct sprd_pcm_dma_data *data = &dma_private->data[i];
if (data->chan)
dmaengine_terminate_async(data->chan);
}
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
for (i = 0; i < dma_private->hw_chan; i++) {
struct sprd_pcm_dma_data *data = &dma_private->data[i];
if (data->chan)
dmaengine_pause(data->chan);
}
break;
default:
ret = -EINVAL;
}
return ret;
}
static snd_pcm_uframes_t sprd_pcm_pointer(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct sprd_pcm_dma_private *dma_private = runtime->private_data;
int pointer[SPRD_PCM_CHANNEL_MAX];
int bytes_of_pointer = 0, sel_max = 0, i;
snd_pcm_uframes_t x;
struct dma_tx_state state;
enum dma_status status;
for (i = 0; i < dma_private->hw_chan; i++) {
struct sprd_pcm_dma_data *data = &dma_private->data[i];
if (!data->chan)
continue;
status = dmaengine_tx_status(data->chan, data->cookie, &state);
if (status == DMA_ERROR) {
dev_err(component->dev,
"failed to get dma channel %d status\n", i);
return 0;
}
/*
* We just get current transfer address from the DMA engine, so
* we need convert to current pointer.
*/
pointer[i] = state.residue - runtime->dma_addr -
i * dma_private->dma_addr_offset;
if (i == 0) {
bytes_of_pointer = pointer[i];
sel_max = pointer[i] < data->pre_pointer ? 1 : 0;
} else {
sel_max ^= pointer[i] < data->pre_pointer ? 1 : 0;
if (sel_max)
bytes_of_pointer =
max(pointer[i], pointer[i - 1]) << 1;
else
bytes_of_pointer =
min(pointer[i], pointer[i - 1]) << 1;
}
data->pre_pointer = pointer[i];
}
x = bytes_to_frames(runtime, bytes_of_pointer);
if (x == runtime->buffer_size)
x = 0;
return x;
}
static int sprd_pcm_new(struct snd_soc_component *component,
struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
struct snd_pcm *pcm = rtd->pcm;
int ret;
ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
return snd_pcm_set_fixed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
card->dev,
sprd_pcm_hardware.buffer_bytes_max);
}
static const struct snd_soc_component_driver sprd_soc_component = {
.name = DRV_NAME,
.open = sprd_pcm_open,
.close = sprd_pcm_close,
.hw_params = sprd_pcm_hw_params,
.hw_free = sprd_pcm_hw_free,
.trigger = sprd_pcm_trigger,
.pointer = sprd_pcm_pointer,
.pcm_construct = sprd_pcm_new,
.compress_ops = &sprd_platform_compress_ops,
};
static int sprd_soc_platform_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
int ret;
ret = of_reserved_mem_device_init_by_idx(&pdev->dev, np, 0);
if (ret)
dev_warn(&pdev->dev,
"no reserved DMA memory for audio platform device\n");
ret = devm_snd_soc_register_component(&pdev->dev, &sprd_soc_component,
NULL, 0);
if (ret)
dev_err(&pdev->dev, "could not register platform:%d\n", ret);
return ret;
}
static const struct of_device_id sprd_pcm_of_match[] = {
{ .compatible = "sprd,pcm-platform", },
{ },
};
MODULE_DEVICE_TABLE(of, sprd_pcm_of_match);
static struct platform_driver sprd_pcm_driver = {
.driver = {
.name = "sprd-pcm-audio",
.of_match_table = sprd_pcm_of_match,
},
.probe = sprd_soc_platform_probe,
};
module_platform_driver(sprd_pcm_driver);
MODULE_DESCRIPTION("Spreadtrum ASoC PCM DMA");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:sprd-audio");
| linux-master | sound/soc/sprd/sprd-pcm-dma.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SoC audio for EDB93xx
*
* Copyright (c) 2010 Alexander Sverdlin <[email protected]>
*
* This driver support CS4271 codec being master or slave, working
* in control port mode, connected either via SPI or I2C.
* The data format accepted is I2S or left-justified.
* DAPM support not implemented.
*/
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/soc/cirrus/ep93xx.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <asm/mach-types.h>
static int edb93xx_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int err;
unsigned int mclk_rate;
unsigned int rate = params_rate(params);
/*
* According to CS4271 datasheet we use MCLK/LRCK=256 for
* rates below 50kHz and 128 for higher sample rates
*/
if (rate < 50000)
mclk_rate = rate * 64 * 4;
else
mclk_rate = rate * 64 * 2;
err = snd_soc_dai_set_sysclk(codec_dai, 0, mclk_rate,
SND_SOC_CLOCK_IN);
if (err)
return err;
return snd_soc_dai_set_sysclk(cpu_dai, 0, mclk_rate,
SND_SOC_CLOCK_OUT);
}
static const struct snd_soc_ops edb93xx_ops = {
.hw_params = edb93xx_hw_params,
};
SND_SOC_DAILINK_DEFS(hifi,
DAILINK_COMP_ARRAY(COMP_CPU("ep93xx-i2s")),
DAILINK_COMP_ARRAY(COMP_CODEC("spi0.0", "cs4271-hifi")),
DAILINK_COMP_ARRAY(COMP_PLATFORM("ep93xx-i2s")));
static struct snd_soc_dai_link edb93xx_dai = {
.name = "CS4271",
.stream_name = "CS4271 HiFi",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBC_CFC,
.ops = &edb93xx_ops,
SND_SOC_DAILINK_REG(hifi),
};
static struct snd_soc_card snd_soc_edb93xx = {
.name = "EDB93XX",
.owner = THIS_MODULE,
.dai_link = &edb93xx_dai,
.num_links = 1,
};
static int edb93xx_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &snd_soc_edb93xx;
int ret;
ret = ep93xx_i2s_acquire();
if (ret)
return ret;
card->dev = &pdev->dev;
ret = snd_soc_register_card(card);
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
ret);
ep93xx_i2s_release();
}
return ret;
}
static void edb93xx_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
snd_soc_unregister_card(card);
ep93xx_i2s_release();
}
static struct platform_driver edb93xx_driver = {
.driver = {
.name = "edb93xx-audio",
},
.probe = edb93xx_probe,
.remove_new = edb93xx_remove,
};
module_platform_driver(edb93xx_driver);
MODULE_AUTHOR("Alexander Sverdlin <[email protected]>");
MODULE_DESCRIPTION("ALSA SoC EDB93xx");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:edb93xx-audio");
| linux-master | sound/soc/cirrus/edb93xx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/sound/arm/ep93xx-pcm.c - EP93xx ALSA PCM interface
*
* Copyright (C) 2006 Lennert Buytenhek <[email protected]>
* Copyright (C) 2006 Applied Data Systems
*
* Rewritten for the SoC audio subsystem (Based on PXA2xx code):
* Copyright (c) 2008 Ryan Mallon
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
#include <linux/platform_data/dma-ep93xx.h>
#include "ep93xx-pcm.h"
static const struct snd_pcm_hardware ep93xx_pcm_hardware = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER),
.buffer_bytes_max = 131072,
.period_bytes_min = 32,
.period_bytes_max = 32768,
.periods_min = 1,
.periods_max = 32,
.fifo_size = 32,
};
static bool ep93xx_pcm_dma_filter(struct dma_chan *chan, void *filter_param)
{
struct ep93xx_dma_data *data = filter_param;
if (data->direction == ep93xx_dma_chan_direction(chan)) {
chan->private = data;
return true;
}
return false;
}
static const struct snd_dmaengine_pcm_config ep93xx_dmaengine_pcm_config = {
.pcm_hardware = &ep93xx_pcm_hardware,
.compat_filter_fn = ep93xx_pcm_dma_filter,
.prealloc_buffer_size = 131072,
};
int devm_ep93xx_pcm_platform_register(struct device *dev)
{
return devm_snd_dmaengine_pcm_register(dev,
&ep93xx_dmaengine_pcm_config,
SND_DMAENGINE_PCM_FLAG_NO_DT |
SND_DMAENGINE_PCM_FLAG_COMPAT);
}
EXPORT_SYMBOL_GPL(devm_ep93xx_pcm_platform_register);
MODULE_AUTHOR("Ryan Mallon");
MODULE_DESCRIPTION("EP93xx ALSA PCM interface");
MODULE_LICENSE("GPL");
| linux-master | sound/soc/cirrus/ep93xx-pcm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/sound/soc/ep93xx-i2s.c
* EP93xx I2S driver
*
* Copyright (C) 2010 Ryan Mallon
*
* Based on the original driver by:
* Copyright (C) 2007 Chase Douglas <chasedouglas@gmail>
* Copyright (C) 2006 Lennert Buytenhek <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
#include <sound/core.h>
#include <sound/dmaengine_pcm.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include <linux/platform_data/dma-ep93xx.h>
#include <linux/soc/cirrus/ep93xx.h>
#include "ep93xx-pcm.h"
#define EP93XX_I2S_TXCLKCFG 0x00
#define EP93XX_I2S_RXCLKCFG 0x04
#define EP93XX_I2S_GLSTS 0x08
#define EP93XX_I2S_GLCTRL 0x0C
#define EP93XX_I2S_I2STX0LFT 0x10
#define EP93XX_I2S_I2STX0RT 0x14
#define EP93XX_I2S_TXLINCTRLDATA 0x28
#define EP93XX_I2S_TXCTRL 0x2C
#define EP93XX_I2S_TXWRDLEN 0x30
#define EP93XX_I2S_TX0EN 0x34
#define EP93XX_I2S_RXLINCTRLDATA 0x58
#define EP93XX_I2S_RXCTRL 0x5C
#define EP93XX_I2S_RXWRDLEN 0x60
#define EP93XX_I2S_RX0EN 0x64
#define EP93XX_I2S_WRDLEN_16 (0 << 0)
#define EP93XX_I2S_WRDLEN_24 (1 << 0)
#define EP93XX_I2S_WRDLEN_32 (2 << 0)
#define EP93XX_I2S_RXLINCTRLDATA_R_JUST BIT(1) /* Right justify */
#define EP93XX_I2S_TXLINCTRLDATA_R_JUST BIT(2) /* Right justify */
/*
* Transmit empty interrupt level select:
* 0 - Generate interrupt when FIFO is half empty
* 1 - Generate interrupt when FIFO is empty
*/
#define EP93XX_I2S_TXCTRL_TXEMPTY_LVL BIT(0)
#define EP93XX_I2S_TXCTRL_TXUFIE BIT(1) /* Transmit interrupt enable */
#define EP93XX_I2S_CLKCFG_LRS (1 << 0) /* lrclk polarity */
#define EP93XX_I2S_CLKCFG_CKP (1 << 1) /* Bit clock polarity */
#define EP93XX_I2S_CLKCFG_REL (1 << 2) /* First bit transition */
#define EP93XX_I2S_CLKCFG_MASTER (1 << 3) /* Master mode */
#define EP93XX_I2S_CLKCFG_NBCG (1 << 4) /* Not bit clock gating */
#define EP93XX_I2S_GLSTS_TX0_FIFO_FULL BIT(12)
struct ep93xx_i2s_info {
struct clk *mclk;
struct clk *sclk;
struct clk *lrclk;
void __iomem *regs;
struct snd_dmaengine_dai_dma_data dma_params_rx;
struct snd_dmaengine_dai_dma_data dma_params_tx;
};
static struct ep93xx_dma_data ep93xx_i2s_dma_data[] = {
[SNDRV_PCM_STREAM_PLAYBACK] = {
.name = "i2s-pcm-out",
.port = EP93XX_DMA_I2S1,
.direction = DMA_MEM_TO_DEV,
},
[SNDRV_PCM_STREAM_CAPTURE] = {
.name = "i2s-pcm-in",
.port = EP93XX_DMA_I2S1,
.direction = DMA_DEV_TO_MEM,
},
};
static inline void ep93xx_i2s_write_reg(struct ep93xx_i2s_info *info,
unsigned reg, unsigned val)
{
__raw_writel(val, info->regs + reg);
}
static inline unsigned ep93xx_i2s_read_reg(struct ep93xx_i2s_info *info,
unsigned reg)
{
return __raw_readl(info->regs + reg);
}
static void ep93xx_i2s_enable(struct ep93xx_i2s_info *info, int stream)
{
unsigned base_reg;
if ((ep93xx_i2s_read_reg(info, EP93XX_I2S_TX0EN) & 0x1) == 0 &&
(ep93xx_i2s_read_reg(info, EP93XX_I2S_RX0EN) & 0x1) == 0) {
/* Enable clocks */
clk_prepare_enable(info->mclk);
clk_prepare_enable(info->sclk);
clk_prepare_enable(info->lrclk);
/* Enable i2s */
ep93xx_i2s_write_reg(info, EP93XX_I2S_GLCTRL, 1);
}
/* Enable fifo */
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
base_reg = EP93XX_I2S_TX0EN;
else
base_reg = EP93XX_I2S_RX0EN;
ep93xx_i2s_write_reg(info, base_reg, 1);
/* Enable TX IRQs (FIFO empty or underflow) */
if (IS_ENABLED(CONFIG_SND_EP93XX_SOC_I2S_WATCHDOG) &&
stream == SNDRV_PCM_STREAM_PLAYBACK)
ep93xx_i2s_write_reg(info, EP93XX_I2S_TXCTRL,
EP93XX_I2S_TXCTRL_TXEMPTY_LVL |
EP93XX_I2S_TXCTRL_TXUFIE);
}
static void ep93xx_i2s_disable(struct ep93xx_i2s_info *info, int stream)
{
unsigned base_reg;
/* Disable IRQs */
if (IS_ENABLED(CONFIG_SND_EP93XX_SOC_I2S_WATCHDOG) &&
stream == SNDRV_PCM_STREAM_PLAYBACK)
ep93xx_i2s_write_reg(info, EP93XX_I2S_TXCTRL, 0);
/* Disable fifo */
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
base_reg = EP93XX_I2S_TX0EN;
else
base_reg = EP93XX_I2S_RX0EN;
ep93xx_i2s_write_reg(info, base_reg, 0);
if ((ep93xx_i2s_read_reg(info, EP93XX_I2S_TX0EN) & 0x1) == 0 &&
(ep93xx_i2s_read_reg(info, EP93XX_I2S_RX0EN) & 0x1) == 0) {
/* Disable i2s */
ep93xx_i2s_write_reg(info, EP93XX_I2S_GLCTRL, 0);
/* Disable clocks */
clk_disable_unprepare(info->lrclk);
clk_disable_unprepare(info->sclk);
clk_disable_unprepare(info->mclk);
}
}
/*
* According to documentation I2S controller can handle underflow conditions
* just fine, but in reality the state machine is sometimes confused so that
* the whole stream is shifted by one byte. The watchdog below disables the TX
* FIFO, fills the buffer with zeroes and re-enables the FIFO. State machine
* is being reset and by filling the buffer we get some time before next
* underflow happens.
*/
static irqreturn_t ep93xx_i2s_interrupt(int irq, void *dev_id)
{
struct ep93xx_i2s_info *info = dev_id;
/* Disable FIFO */
ep93xx_i2s_write_reg(info, EP93XX_I2S_TX0EN, 0);
/*
* Fill TX FIFO with zeroes, this way we can defer next IRQs as much as
* possible and get more time for DMA to catch up. Actually there are
* only 8 samples in this FIFO, so even on 8kHz maximum deferral here is
* 1ms.
*/
while (!(ep93xx_i2s_read_reg(info, EP93XX_I2S_GLSTS) &
EP93XX_I2S_GLSTS_TX0_FIFO_FULL)) {
ep93xx_i2s_write_reg(info, EP93XX_I2S_I2STX0LFT, 0);
ep93xx_i2s_write_reg(info, EP93XX_I2S_I2STX0RT, 0);
}
/* Re-enable FIFO */
ep93xx_i2s_write_reg(info, EP93XX_I2S_TX0EN, 1);
return IRQ_HANDLED;
}
static int ep93xx_i2s_dai_probe(struct snd_soc_dai *dai)
{
struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(dai);
info->dma_params_tx.filter_data =
&ep93xx_i2s_dma_data[SNDRV_PCM_STREAM_PLAYBACK];
info->dma_params_rx.filter_data =
&ep93xx_i2s_dma_data[SNDRV_PCM_STREAM_CAPTURE];
snd_soc_dai_init_dma_data(dai, &info->dma_params_tx,
&info->dma_params_rx);
return 0;
}
static int ep93xx_i2s_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(dai);
ep93xx_i2s_enable(info, substream->stream);
return 0;
}
static void ep93xx_i2s_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(dai);
ep93xx_i2s_disable(info, substream->stream);
}
static int ep93xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
unsigned int fmt)
{
struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(cpu_dai);
unsigned int clk_cfg;
unsigned int txlin_ctrl = 0;
unsigned int rxlin_ctrl = 0;
clk_cfg = ep93xx_i2s_read_reg(info, EP93XX_I2S_RXCLKCFG);
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
clk_cfg |= EP93XX_I2S_CLKCFG_REL;
break;
case SND_SOC_DAIFMT_LEFT_J:
clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
break;
case SND_SOC_DAIFMT_RIGHT_J:
clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
rxlin_ctrl |= EP93XX_I2S_RXLINCTRLDATA_R_JUST;
txlin_ctrl |= EP93XX_I2S_TXLINCTRLDATA_R_JUST;
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
case SND_SOC_DAIFMT_BP_FP:
/* CPU is provider */
clk_cfg |= EP93XX_I2S_CLKCFG_MASTER;
break;
case SND_SOC_DAIFMT_BC_FC:
/* Codec is provider */
clk_cfg &= ~EP93XX_I2S_CLKCFG_MASTER;
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
/* Negative bit clock, lrclk low on left word */
clk_cfg &= ~(EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_LRS);
break;
case SND_SOC_DAIFMT_NB_IF:
/* Negative bit clock, lrclk low on right word */
clk_cfg &= ~EP93XX_I2S_CLKCFG_CKP;
clk_cfg |= EP93XX_I2S_CLKCFG_LRS;
break;
case SND_SOC_DAIFMT_IB_NF:
/* Positive bit clock, lrclk low on left word */
clk_cfg |= EP93XX_I2S_CLKCFG_CKP;
clk_cfg &= ~EP93XX_I2S_CLKCFG_LRS;
break;
case SND_SOC_DAIFMT_IB_IF:
/* Positive bit clock, lrclk low on right word */
clk_cfg |= EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_LRS;
break;
}
/* Write new register values */
ep93xx_i2s_write_reg(info, EP93XX_I2S_RXCLKCFG, clk_cfg);
ep93xx_i2s_write_reg(info, EP93XX_I2S_TXCLKCFG, clk_cfg);
ep93xx_i2s_write_reg(info, EP93XX_I2S_RXLINCTRLDATA, rxlin_ctrl);
ep93xx_i2s_write_reg(info, EP93XX_I2S_TXLINCTRLDATA, txlin_ctrl);
return 0;
}
static int ep93xx_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(dai);
unsigned word_len, div, sdiv, lrdiv;
int err;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
word_len = EP93XX_I2S_WRDLEN_16;
break;
case SNDRV_PCM_FORMAT_S24_LE:
word_len = EP93XX_I2S_WRDLEN_24;
break;
case SNDRV_PCM_FORMAT_S32_LE:
word_len = EP93XX_I2S_WRDLEN_32;
break;
default:
return -EINVAL;
}
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
ep93xx_i2s_write_reg(info, EP93XX_I2S_TXWRDLEN, word_len);
else
ep93xx_i2s_write_reg(info, EP93XX_I2S_RXWRDLEN, word_len);
/*
* EP93xx I2S module can be setup so SCLK / LRCLK value can be
* 32, 64, 128. MCLK / SCLK value can be 2 and 4.
* We set LRCLK equal to `rate' and minimum SCLK / LRCLK
* value is 64, because our sample size is 32 bit * 2 channels.
* I2S standard permits us to transmit more bits than
* the codec uses.
*/
div = clk_get_rate(info->mclk) / params_rate(params);
sdiv = 4;
if (div > (256 + 512) / 2) {
lrdiv = 128;
} else {
lrdiv = 64;
if (div < (128 + 256) / 2)
sdiv = 2;
}
err = clk_set_rate(info->sclk, clk_get_rate(info->mclk) / sdiv);
if (err)
return err;
err = clk_set_rate(info->lrclk, clk_get_rate(info->sclk) / lrdiv);
if (err)
return err;
return 0;
}
static int ep93xx_i2s_set_sysclk(struct snd_soc_dai *cpu_dai, int clk_id,
unsigned int freq, int dir)
{
struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(cpu_dai);
if (dir == SND_SOC_CLOCK_IN || clk_id != 0)
return -EINVAL;
if (!freq)
return 0;
return clk_set_rate(info->mclk, freq);
}
#ifdef CONFIG_PM
static int ep93xx_i2s_suspend(struct snd_soc_component *component)
{
struct ep93xx_i2s_info *info = snd_soc_component_get_drvdata(component);
if (!snd_soc_component_active(component))
return 0;
ep93xx_i2s_disable(info, SNDRV_PCM_STREAM_PLAYBACK);
ep93xx_i2s_disable(info, SNDRV_PCM_STREAM_CAPTURE);
return 0;
}
static int ep93xx_i2s_resume(struct snd_soc_component *component)
{
struct ep93xx_i2s_info *info = snd_soc_component_get_drvdata(component);
if (!snd_soc_component_active(component))
return 0;
ep93xx_i2s_enable(info, SNDRV_PCM_STREAM_PLAYBACK);
ep93xx_i2s_enable(info, SNDRV_PCM_STREAM_CAPTURE);
return 0;
}
#else
#define ep93xx_i2s_suspend NULL
#define ep93xx_i2s_resume NULL
#endif
static const struct snd_soc_dai_ops ep93xx_i2s_dai_ops = {
.probe = ep93xx_i2s_dai_probe,
.startup = ep93xx_i2s_startup,
.shutdown = ep93xx_i2s_shutdown,
.hw_params = ep93xx_i2s_hw_params,
.set_sysclk = ep93xx_i2s_set_sysclk,
.set_fmt = ep93xx_i2s_set_dai_fmt,
};
#define EP93XX_I2S_FORMATS (SNDRV_PCM_FMTBIT_S32_LE)
static struct snd_soc_dai_driver ep93xx_i2s_dai = {
.symmetric_rate = 1,
.playback = {
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = EP93XX_I2S_FORMATS,
},
.capture = {
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = EP93XX_I2S_FORMATS,
},
.ops = &ep93xx_i2s_dai_ops,
};
static const struct snd_soc_component_driver ep93xx_i2s_component = {
.name = "ep93xx-i2s",
.suspend = ep93xx_i2s_suspend,
.resume = ep93xx_i2s_resume,
.legacy_dai_naming = 1,
};
static int ep93xx_i2s_probe(struct platform_device *pdev)
{
struct ep93xx_i2s_info *info;
int err;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
if (IS_ENABLED(CONFIG_SND_EP93XX_SOC_I2S_WATCHDOG)) {
int irq = platform_get_irq(pdev, 0);
if (irq <= 0)
return irq < 0 ? irq : -ENODEV;
err = devm_request_irq(&pdev->dev, irq, ep93xx_i2s_interrupt, 0,
pdev->name, info);
if (err)
return err;
}
info->mclk = clk_get(&pdev->dev, "mclk");
if (IS_ERR(info->mclk)) {
err = PTR_ERR(info->mclk);
goto fail;
}
info->sclk = clk_get(&pdev->dev, "sclk");
if (IS_ERR(info->sclk)) {
err = PTR_ERR(info->sclk);
goto fail_put_mclk;
}
info->lrclk = clk_get(&pdev->dev, "lrclk");
if (IS_ERR(info->lrclk)) {
err = PTR_ERR(info->lrclk);
goto fail_put_sclk;
}
dev_set_drvdata(&pdev->dev, info);
err = devm_snd_soc_register_component(&pdev->dev, &ep93xx_i2s_component,
&ep93xx_i2s_dai, 1);
if (err)
goto fail_put_lrclk;
err = devm_ep93xx_pcm_platform_register(&pdev->dev);
if (err)
goto fail_put_lrclk;
return 0;
fail_put_lrclk:
clk_put(info->lrclk);
fail_put_sclk:
clk_put(info->sclk);
fail_put_mclk:
clk_put(info->mclk);
fail:
return err;
}
static void ep93xx_i2s_remove(struct platform_device *pdev)
{
struct ep93xx_i2s_info *info = dev_get_drvdata(&pdev->dev);
clk_put(info->lrclk);
clk_put(info->sclk);
clk_put(info->mclk);
}
static const struct of_device_id ep93xx_i2s_of_ids[] = {
{ .compatible = "cirrus,ep9301-i2s" },
{}
};
MODULE_DEVICE_TABLE(of, ep93xx_i2s_of_ids);
static struct platform_driver ep93xx_i2s_driver = {
.probe = ep93xx_i2s_probe,
.remove_new = ep93xx_i2s_remove,
.driver = {
.name = "ep93xx-i2s",
.of_match_table = ep93xx_i2s_of_ids,
},
};
module_platform_driver(ep93xx_i2s_driver);
MODULE_ALIAS("platform:ep93xx-i2s");
MODULE_AUTHOR("Ryan Mallon");
MODULE_DESCRIPTION("EP93XX I2S driver");
MODULE_LICENSE("GPL");
| linux-master | sound/soc/cirrus/ep93xx-i2s.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) STMicroelectronics SA 2015
* Authors: Arnaud Pouliquen <[email protected]>
* for STMicroelectronics.
*/
#include <sound/soc.h>
#include "uniperif.h"
#define UNIPERIF_READER_I2S_IN 0 /* reader id connected to I2S/TDM TX bus */
/*
* Note: snd_pcm_hardware is linked to DMA controller but is declared here to
* integrate unireader capability in term of rate and supported channels
*/
static const struct snd_pcm_hardware uni_reader_pcm_hw = {
.info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID,
.formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_CONTINUOUS,
.rate_min = 8000,
.rate_max = 96000,
.channels_min = 2,
.channels_max = 8,
.periods_min = 2,
.periods_max = 48,
.period_bytes_min = 128,
.period_bytes_max = 64 * PAGE_SIZE,
.buffer_bytes_max = 256 * PAGE_SIZE
};
/*
* uni_reader_irq_handler
* In case of error audio stream is stopped; stop action is protected via PCM
* stream lock to avoid race condition with trigger callback.
*/
static irqreturn_t uni_reader_irq_handler(int irq, void *dev_id)
{
irqreturn_t ret = IRQ_NONE;
struct uniperif *reader = dev_id;
unsigned int status;
spin_lock(&reader->irq_lock);
if (!reader->substream)
goto irq_spin_unlock;
snd_pcm_stream_lock(reader->substream);
if (reader->state == UNIPERIF_STATE_STOPPED) {
/* Unexpected IRQ: do nothing */
dev_warn(reader->dev, "unexpected IRQ\n");
goto stream_unlock;
}
/* Get interrupt status & clear them immediately */
status = GET_UNIPERIF_ITS(reader);
SET_UNIPERIF_ITS_BCLR(reader, status);
/* Check for fifo overflow error */
if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(reader))) {
dev_err(reader->dev, "FIFO error detected\n");
snd_pcm_stop(reader->substream, SNDRV_PCM_STATE_XRUN);
ret = IRQ_HANDLED;
}
stream_unlock:
snd_pcm_stream_unlock(reader->substream);
irq_spin_unlock:
spin_unlock(&reader->irq_lock);
return ret;
}
static int uni_reader_prepare_pcm(struct snd_pcm_runtime *runtime,
struct uniperif *reader)
{
int slot_width;
/* Force slot width to 32 in I2S mode */
if ((reader->daifmt & SND_SOC_DAIFMT_FORMAT_MASK)
== SND_SOC_DAIFMT_I2S) {
slot_width = 32;
} else {
switch (runtime->format) {
case SNDRV_PCM_FORMAT_S16_LE:
slot_width = 16;
break;
default:
slot_width = 32;
break;
}
}
/* Number of bits per subframe (i.e one channel sample) on input. */
switch (slot_width) {
case 32:
SET_UNIPERIF_I2S_FMT_NBIT_32(reader);
SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(reader);
break;
case 16:
SET_UNIPERIF_I2S_FMT_NBIT_16(reader);
SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(reader);
break;
default:
dev_err(reader->dev, "subframe format not supported\n");
return -EINVAL;
}
/* Configure data memory format */
switch (runtime->format) {
case SNDRV_PCM_FORMAT_S16_LE:
/* One data word contains two samples */
SET_UNIPERIF_CONFIG_MEM_FMT_16_16(reader);
break;
case SNDRV_PCM_FORMAT_S32_LE:
/*
* Actually "16 bits/0 bits" means "32/28/24/20/18/16 bits
* on the MSB then zeros (if less than 32 bytes)"...
*/
SET_UNIPERIF_CONFIG_MEM_FMT_16_0(reader);
break;
default:
dev_err(reader->dev, "format not supported\n");
return -EINVAL;
}
/* Number of channels must be even */
if ((runtime->channels % 2) || (runtime->channels < 2) ||
(runtime->channels > 10)) {
dev_err(reader->dev, "%s: invalid nb of channels\n", __func__);
return -EINVAL;
}
SET_UNIPERIF_I2S_FMT_NUM_CH(reader, runtime->channels / 2);
SET_UNIPERIF_I2S_FMT_ORDER_MSB(reader);
return 0;
}
static int uni_reader_prepare_tdm(struct snd_pcm_runtime *runtime,
struct uniperif *reader)
{
int frame_size; /* user tdm frame size in bytes */
/* default unip TDM_WORD_POS_X_Y */
unsigned int word_pos[4] = {
0x04060002, 0x0C0E080A, 0x14161012, 0x1C1E181A};
frame_size = sti_uniperiph_get_user_frame_size(runtime);
/* fix 16/0 format */
SET_UNIPERIF_CONFIG_MEM_FMT_16_0(reader);
SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(reader);
/* number of words inserted on the TDM line */
SET_UNIPERIF_I2S_FMT_NUM_CH(reader, frame_size / 4 / 2);
SET_UNIPERIF_I2S_FMT_ORDER_MSB(reader);
SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(reader);
SET_UNIPERIF_TDM_ENABLE_TDM_ENABLE(reader);
/*
* set the timeslots allocation for words in FIFO
*
* HW bug: (LSB word < MSB word) => this config is not possible
* So if we want (LSB word < MSB) word, then it shall be
* handled by user
*/
sti_uniperiph_get_tdm_word_pos(reader, word_pos);
SET_UNIPERIF_TDM_WORD_POS(reader, 1_2, word_pos[WORD_1_2]);
SET_UNIPERIF_TDM_WORD_POS(reader, 3_4, word_pos[WORD_3_4]);
SET_UNIPERIF_TDM_WORD_POS(reader, 5_6, word_pos[WORD_5_6]);
SET_UNIPERIF_TDM_WORD_POS(reader, 7_8, word_pos[WORD_7_8]);
return 0;
}
static int uni_reader_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
struct uniperif *reader = priv->dai_data.uni;
struct snd_pcm_runtime *runtime = substream->runtime;
int transfer_size, trigger_limit, ret;
/* The reader should be stopped */
if (reader->state != UNIPERIF_STATE_STOPPED) {
dev_err(reader->dev, "%s: invalid reader state %d\n", __func__,
reader->state);
return -EINVAL;
}
/* Calculate transfer size (in fifo cells and bytes) for frame count */
if (reader->type == SND_ST_UNIPERIF_TYPE_TDM) {
/* transfer size = unip frame size (in 32 bits FIFO cell) */
transfer_size =
sti_uniperiph_get_user_frame_size(runtime) / 4;
} else {
transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
}
/* Calculate number of empty cells available before asserting DREQ */
if (reader->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
trigger_limit = UNIPERIF_FIFO_SIZE - transfer_size;
else
/*
* Since SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0
* FDMA_TRIGGER_LIMIT also controls when the state switches
* from OFF or STANDBY to AUDIO DATA.
*/
trigger_limit = transfer_size;
/* Trigger limit must be an even number */
if ((!trigger_limit % 2) ||
(trigger_limit != 1 && transfer_size % 2) ||
(trigger_limit > UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(reader))) {
dev_err(reader->dev, "invalid trigger limit %d\n",
trigger_limit);
return -EINVAL;
}
SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(reader, trigger_limit);
if (UNIPERIF_TYPE_IS_TDM(reader))
ret = uni_reader_prepare_tdm(runtime, reader);
else
ret = uni_reader_prepare_pcm(runtime, reader);
if (ret)
return ret;
switch (reader->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(reader);
SET_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(reader);
break;
case SND_SOC_DAIFMT_LEFT_J:
SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(reader);
SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(reader);
break;
case SND_SOC_DAIFMT_RIGHT_J:
SET_UNIPERIF_I2S_FMT_ALIGN_RIGHT(reader);
SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(reader);
break;
default:
dev_err(reader->dev, "format not supported\n");
return -EINVAL;
}
/* Data clocking (changing) on the rising/falling edge */
switch (reader->daifmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
SET_UNIPERIF_I2S_FMT_LR_POL_LOW(reader);
SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(reader);
break;
case SND_SOC_DAIFMT_NB_IF:
SET_UNIPERIF_I2S_FMT_LR_POL_HIG(reader);
SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(reader);
break;
case SND_SOC_DAIFMT_IB_NF:
SET_UNIPERIF_I2S_FMT_LR_POL_LOW(reader);
SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(reader);
break;
case SND_SOC_DAIFMT_IB_IF:
SET_UNIPERIF_I2S_FMT_LR_POL_HIG(reader);
SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(reader);
break;
}
/* Clear any pending interrupts */
SET_UNIPERIF_ITS_BCLR(reader, GET_UNIPERIF_ITS(reader));
SET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(reader, 0);
/* Set the interrupt mask */
SET_UNIPERIF_ITM_BSET_DMA_ERROR(reader);
SET_UNIPERIF_ITM_BSET_FIFO_ERROR(reader);
SET_UNIPERIF_ITM_BSET_MEM_BLK_READ(reader);
/* Enable underflow recovery interrupts */
if (reader->underflow_enabled) {
SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE(reader);
SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED(reader);
}
/* Reset uniperipheral reader */
return sti_uniperiph_reset(reader);
}
static int uni_reader_start(struct uniperif *reader)
{
/* The reader should be stopped */
if (reader->state != UNIPERIF_STATE_STOPPED) {
dev_err(reader->dev, "%s: invalid reader state\n", __func__);
return -EINVAL;
}
/* Enable reader interrupts (and clear possible stalled ones) */
SET_UNIPERIF_ITS_BCLR_FIFO_ERROR(reader);
SET_UNIPERIF_ITM_BSET_FIFO_ERROR(reader);
/* Launch the reader */
SET_UNIPERIF_CTRL_OPERATION_PCM_DATA(reader);
/* Update state to started */
reader->state = UNIPERIF_STATE_STARTED;
return 0;
}
static int uni_reader_stop(struct uniperif *reader)
{
/* The reader should not be in stopped state */
if (reader->state == UNIPERIF_STATE_STOPPED) {
dev_err(reader->dev, "%s: invalid reader state\n", __func__);
return -EINVAL;
}
/* Turn the reader off */
SET_UNIPERIF_CTRL_OPERATION_OFF(reader);
/* Disable interrupts */
SET_UNIPERIF_ITM_BCLR(reader, GET_UNIPERIF_ITM(reader));
/* Update state to stopped and return */
reader->state = UNIPERIF_STATE_STOPPED;
return 0;
}
static int uni_reader_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
struct uniperif *reader = priv->dai_data.uni;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
return uni_reader_start(reader);
case SNDRV_PCM_TRIGGER_STOP:
return uni_reader_stop(reader);
default:
return -EINVAL;
}
}
static int uni_reader_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
struct uniperif *reader = priv->dai_data.uni;
unsigned long flags;
int ret;
spin_lock_irqsave(&reader->irq_lock, flags);
reader->substream = substream;
spin_unlock_irqrestore(&reader->irq_lock, flags);
if (!UNIPERIF_TYPE_IS_TDM(reader))
return 0;
/* refine hw constraint in tdm mode */
ret = snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_CHANNELS,
sti_uniperiph_fix_tdm_chan,
reader, SNDRV_PCM_HW_PARAM_CHANNELS,
-1);
if (ret < 0)
return ret;
return snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_FORMAT,
sti_uniperiph_fix_tdm_format,
reader, SNDRV_PCM_HW_PARAM_FORMAT,
-1);
}
static void uni_reader_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
struct uniperif *reader = priv->dai_data.uni;
unsigned long flags;
spin_lock_irqsave(&reader->irq_lock, flags);
if (reader->state != UNIPERIF_STATE_STOPPED) {
/* Stop the reader */
uni_reader_stop(reader);
}
reader->substream = NULL;
spin_unlock_irqrestore(&reader->irq_lock, flags);
}
static const struct snd_soc_dai_ops uni_reader_dai_ops = {
.startup = uni_reader_startup,
.shutdown = uni_reader_shutdown,
.prepare = uni_reader_prepare,
.trigger = uni_reader_trigger,
.hw_params = sti_uniperiph_dai_hw_params,
.set_fmt = sti_uniperiph_dai_set_fmt,
.set_tdm_slot = sti_uniperiph_set_tdm_slot
};
int uni_reader_init(struct platform_device *pdev,
struct uniperif *reader)
{
int ret = 0;
reader->dev = &pdev->dev;
reader->state = UNIPERIF_STATE_STOPPED;
reader->dai_ops = &uni_reader_dai_ops;
if (UNIPERIF_TYPE_IS_TDM(reader))
reader->hw = &uni_tdm_hw;
else
reader->hw = &uni_reader_pcm_hw;
ret = devm_request_irq(&pdev->dev, reader->irq,
uni_reader_irq_handler, IRQF_SHARED,
dev_name(&pdev->dev), reader);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to request IRQ\n");
return -EBUSY;
}
spin_lock_init(&reader->irq_lock);
return 0;
}
EXPORT_SYMBOL_GPL(uni_reader_init);
| linux-master | sound/soc/sti/uniperif_reader.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) STMicroelectronics SA 2015
* Authors: Arnaud Pouliquen <[email protected]>
* for STMicroelectronics.
*/
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <sound/asoundef.h>
#include <sound/soc.h>
#include "uniperif.h"
/*
* Some hardware-related definitions
*/
/* sys config registers definitions */
#define SYS_CFG_AUDIO_GLUE 0xA4
/*
* Driver specific types.
*/
#define UNIPERIF_PLAYER_CLK_ADJ_MIN -999999
#define UNIPERIF_PLAYER_CLK_ADJ_MAX 1000000
#define UNIPERIF_PLAYER_I2S_OUT 1 /* player id connected to I2S/TDM TX bus */
/*
* Note: snd_pcm_hardware is linked to DMA controller but is declared here to
* integrate DAI_CPU capability in term of rate and supported channels
*/
static const struct snd_pcm_hardware uni_player_pcm_hw = {
.info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID,
.formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_CONTINUOUS,
.rate_min = 8000,
.rate_max = 192000,
.channels_min = 2,
.channels_max = 8,
.periods_min = 2,
.periods_max = 48,
.period_bytes_min = 128,
.period_bytes_max = 64 * PAGE_SIZE,
.buffer_bytes_max = 256 * PAGE_SIZE
};
/*
* uni_player_irq_handler
* In case of error audio stream is stopped; stop action is protected via PCM
* stream lock to avoid race condition with trigger callback.
*/
static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
{
irqreturn_t ret = IRQ_NONE;
struct uniperif *player = dev_id;
unsigned int status;
unsigned int tmp;
spin_lock(&player->irq_lock);
if (!player->substream)
goto irq_spin_unlock;
snd_pcm_stream_lock(player->substream);
if (player->state == UNIPERIF_STATE_STOPPED)
goto stream_unlock;
/* Get interrupt status & clear them immediately */
status = GET_UNIPERIF_ITS(player);
SET_UNIPERIF_ITS_BCLR(player, status);
/* Check for fifo error (underrun) */
if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(player))) {
dev_err(player->dev, "FIFO underflow error detected\n");
/* Interrupt is just for information when underflow recovery */
if (player->underflow_enabled) {
/* Update state to underflow */
player->state = UNIPERIF_STATE_UNDERFLOW;
} else {
/* Disable interrupt so doesn't continually fire */
SET_UNIPERIF_ITM_BCLR_FIFO_ERROR(player);
/* Stop the player */
snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
}
ret = IRQ_HANDLED;
}
/* Check for dma error (overrun) */
if (unlikely(status & UNIPERIF_ITS_DMA_ERROR_MASK(player))) {
dev_err(player->dev, "DMA error detected\n");
/* Disable interrupt so doesn't continually fire */
SET_UNIPERIF_ITM_BCLR_DMA_ERROR(player);
/* Stop the player */
snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
ret = IRQ_HANDLED;
}
/* Check for underflow recovery done */
if (unlikely(status & UNIPERIF_ITM_UNDERFLOW_REC_DONE_MASK(player))) {
if (!player->underflow_enabled) {
dev_err(player->dev,
"unexpected Underflow recovering\n");
ret = -EPERM;
goto stream_unlock;
}
/* Read the underflow recovery duration */
tmp = GET_UNIPERIF_STATUS_1_UNDERFLOW_DURATION(player);
dev_dbg(player->dev, "Underflow recovered (%d LR clocks max)\n",
tmp);
/* Clear the underflow recovery duration */
SET_UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION(player);
/* Update state to started */
player->state = UNIPERIF_STATE_STARTED;
ret = IRQ_HANDLED;
}
/* Check if underflow recovery failed */
if (unlikely(status &
UNIPERIF_ITM_UNDERFLOW_REC_FAILED_MASK(player))) {
dev_err(player->dev, "Underflow recovery failed\n");
/* Stop the player */
snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
ret = IRQ_HANDLED;
}
stream_unlock:
snd_pcm_stream_unlock(player->substream);
irq_spin_unlock:
spin_unlock(&player->irq_lock);
return ret;
}
static int uni_player_clk_set_rate(struct uniperif *player, unsigned long rate)
{
int rate_adjusted, rate_achieved, delta, ret;
int adjustment = player->clk_adj;
/*
* a
* F = f + --------- * f = f + d
* 1000000
*
* a
* d = --------- * f
* 1000000
*
* where:
* f - nominal rate
* a - adjustment in ppm (parts per milion)
* F - rate to be set in synthesizer
* d - delta (difference) between f and F
*/
if (adjustment < 0) {
/* div64_64 operates on unsigned values... */
delta = -1;
adjustment = -adjustment;
} else {
delta = 1;
}
/* 500000 ppm is 0.5, which is used to round up values */
delta *= (int)div64_u64((uint64_t)rate *
(uint64_t)adjustment + 500000, 1000000);
rate_adjusted = rate + delta;
/* Adjusted rate should never be == 0 */
if (!rate_adjusted)
return -EINVAL;
ret = clk_set_rate(player->clk, rate_adjusted);
if (ret < 0)
return ret;
rate_achieved = clk_get_rate(player->clk);
if (!rate_achieved)
/* If value is 0 means that clock or parent not valid */
return -EINVAL;
/*
* Using ALSA's adjustment control, we can modify the rate to be up
* to twice as much as requested, but no more
*/
delta = rate_achieved - rate;
if (delta < 0) {
/* div64_64 operates on unsigned values... */
delta = -delta;
adjustment = -1;
} else {
adjustment = 1;
}
/* Frequency/2 is added to round up result */
adjustment *= (int)div64_u64((uint64_t)delta * 1000000 + rate / 2,
rate);
player->clk_adj = adjustment;
return 0;
}
static void uni_player_set_channel_status(struct uniperif *player,
struct snd_pcm_runtime *runtime)
{
int n;
unsigned int status;
/*
* Some AVRs and TVs require the channel status to contain a correct
* sampling frequency. If no sample rate is already specified, then
* set one.
*/
if (runtime) {
switch (runtime->rate) {
case 22050:
player->stream_settings.iec958.status[3] =
IEC958_AES3_CON_FS_22050;
break;
case 44100:
player->stream_settings.iec958.status[3] =
IEC958_AES3_CON_FS_44100;
break;
case 88200:
player->stream_settings.iec958.status[3] =
IEC958_AES3_CON_FS_88200;
break;
case 176400:
player->stream_settings.iec958.status[3] =
IEC958_AES3_CON_FS_176400;
break;
case 24000:
player->stream_settings.iec958.status[3] =
IEC958_AES3_CON_FS_24000;
break;
case 48000:
player->stream_settings.iec958.status[3] =
IEC958_AES3_CON_FS_48000;
break;
case 96000:
player->stream_settings.iec958.status[3] =
IEC958_AES3_CON_FS_96000;
break;
case 192000:
player->stream_settings.iec958.status[3] =
IEC958_AES3_CON_FS_192000;
break;
case 32000:
player->stream_settings.iec958.status[3] =
IEC958_AES3_CON_FS_32000;
break;
default:
/* Mark as sampling frequency not indicated */
player->stream_settings.iec958.status[3] =
IEC958_AES3_CON_FS_NOTID;
break;
}
}
/* Audio mode:
* Use audio mode status to select PCM or encoded mode
*/
if (player->stream_settings.iec958.status[0] & IEC958_AES0_NONAUDIO)
player->stream_settings.encoding_mode =
UNIPERIF_IEC958_ENCODING_MODE_ENCODED;
else
player->stream_settings.encoding_mode =
UNIPERIF_IEC958_ENCODING_MODE_PCM;
if (player->stream_settings.encoding_mode ==
UNIPERIF_IEC958_ENCODING_MODE_PCM)
/* Clear user validity bits */
SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(player, 0);
else
/* Set user validity bits */
SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(player, 1);
/* Program the new channel status */
for (n = 0; n < 6; ++n) {
status =
player->stream_settings.iec958.status[0 + (n * 4)] & 0xf;
status |=
player->stream_settings.iec958.status[1 + (n * 4)] << 8;
status |=
player->stream_settings.iec958.status[2 + (n * 4)] << 16;
status |=
player->stream_settings.iec958.status[3 + (n * 4)] << 24;
SET_UNIPERIF_CHANNEL_STA_REGN(player, n, status);
}
/* Update the channel status */
if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
SET_UNIPERIF_CONFIG_CHL_STS_UPDATE(player);
else
SET_UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE(player);
}
static int uni_player_prepare_iec958(struct uniperif *player,
struct snd_pcm_runtime *runtime)
{
int clk_div;
clk_div = player->mclk / runtime->rate;
/* Oversampling must be multiple of 128 as iec958 frame is 32-bits */
if ((clk_div % 128) || (clk_div <= 0)) {
dev_err(player->dev, "%s: invalid clk_div %d\n",
__func__, clk_div);
return -EINVAL;
}
switch (runtime->format) {
case SNDRV_PCM_FORMAT_S16_LE:
/* 16/16 memory format */
SET_UNIPERIF_CONFIG_MEM_FMT_16_16(player);
/* 16-bits per sub-frame */
SET_UNIPERIF_I2S_FMT_NBIT_32(player);
/* Set 16-bit sample precision */
SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(player);
break;
case SNDRV_PCM_FORMAT_S32_LE:
/* 16/0 memory format */
SET_UNIPERIF_CONFIG_MEM_FMT_16_0(player);
/* 32-bits per sub-frame */
SET_UNIPERIF_I2S_FMT_NBIT_32(player);
/* Set 24-bit sample precision */
SET_UNIPERIF_I2S_FMT_DATA_SIZE_24(player);
break;
default:
dev_err(player->dev, "format not supported\n");
return -EINVAL;
}
/* Set parity to be calculated by the hardware */
SET_UNIPERIF_CONFIG_PARITY_CNTR_BY_HW(player);
/* Set channel status bits to be inserted by the hardware */
SET_UNIPERIF_CONFIG_CHANNEL_STA_CNTR_BY_HW(player);
/* Set user data bits to be inserted by the hardware */
SET_UNIPERIF_CONFIG_USER_DAT_CNTR_BY_HW(player);
/* Set validity bits to be inserted by the hardware */
SET_UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_BY_HW(player);
/* Set full software control to disabled */
SET_UNIPERIF_CONFIG_SPDIF_SW_CTRL_DISABLE(player);
SET_UNIPERIF_CTRL_ZERO_STUFF_HW(player);
mutex_lock(&player->ctrl_lock);
/* Update the channel status */
uni_player_set_channel_status(player, runtime);
mutex_unlock(&player->ctrl_lock);
/* Clear the user validity user bits */
SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(player, 0);
/* Disable one-bit audio mode */
SET_UNIPERIF_CONFIG_ONE_BIT_AUD_DISABLE(player);
/* Enable consecutive frames repetition of Z preamble (not for HBRA) */
SET_UNIPERIF_CONFIG_REPEAT_CHL_STS_ENABLE(player);
/* Change to SUF0_SUBF1 and left/right channels swap! */
SET_UNIPERIF_CONFIG_SUBFRAME_SEL_SUBF1_SUBF0(player);
/* Set data output as MSB first */
SET_UNIPERIF_I2S_FMT_ORDER_MSB(player);
if (player->stream_settings.encoding_mode ==
UNIPERIF_IEC958_ENCODING_MODE_ENCODED)
SET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_ON(player);
else
SET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_OFF(player);
SET_UNIPERIF_I2S_FMT_NUM_CH(player, runtime->channels / 2);
/* Set rounding to off */
SET_UNIPERIF_CTRL_ROUNDING_OFF(player);
/* Set clock divisor */
SET_UNIPERIF_CTRL_DIVIDER(player, clk_div / 128);
/* Set the spdif latency to not wait before starting player */
SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(player);
/*
* Ensure iec958 formatting is off. It will be enabled in function
* uni_player_start() at the same time as the operation
* mode is set to work around a silicon issue.
*/
if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
SET_UNIPERIF_CTRL_SPDIF_FMT_OFF(player);
else
SET_UNIPERIF_CTRL_SPDIF_FMT_ON(player);
return 0;
}
static int uni_player_prepare_pcm(struct uniperif *player,
struct snd_pcm_runtime *runtime)
{
int output_frame_size, slot_width, clk_div;
/* Force slot width to 32 in I2S mode (HW constraint) */
if ((player->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
SND_SOC_DAIFMT_I2S)
slot_width = 32;
else
slot_width = snd_pcm_format_width(runtime->format);
output_frame_size = slot_width * runtime->channels;
clk_div = player->mclk / runtime->rate;
/*
* For 32 bits subframe clk_div must be a multiple of 128,
* for 16 bits must be a multiple of 64
*/
if ((slot_width == 32) && (clk_div % 128)) {
dev_err(player->dev, "%s: invalid clk_div\n", __func__);
return -EINVAL;
}
if ((slot_width == 16) && (clk_div % 64)) {
dev_err(player->dev, "%s: invalid clk_div\n", __func__);
return -EINVAL;
}
/*
* Number of bits per subframe (which is one channel sample)
* on output - Transfer 16 or 32 bits from FIFO
*/
switch (slot_width) {
case 32:
SET_UNIPERIF_I2S_FMT_NBIT_32(player);
SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(player);
break;
case 16:
SET_UNIPERIF_I2S_FMT_NBIT_16(player);
SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(player);
break;
default:
dev_err(player->dev, "subframe format not supported\n");
return -EINVAL;
}
/* Configure data memory format */
switch (runtime->format) {
case SNDRV_PCM_FORMAT_S16_LE:
/* One data word contains two samples */
SET_UNIPERIF_CONFIG_MEM_FMT_16_16(player);
break;
case SNDRV_PCM_FORMAT_S32_LE:
/*
* Actually "16 bits/0 bits" means "32/28/24/20/18/16 bits
* on the left than zeros (if less than 32 bytes)"... ;-)
*/
SET_UNIPERIF_CONFIG_MEM_FMT_16_0(player);
break;
default:
dev_err(player->dev, "format not supported\n");
return -EINVAL;
}
/* Set rounding to off */
SET_UNIPERIF_CTRL_ROUNDING_OFF(player);
/* Set clock divisor */
SET_UNIPERIF_CTRL_DIVIDER(player, clk_div / (2 * output_frame_size));
/* Number of channelsmust be even*/
if ((runtime->channels % 2) || (runtime->channels < 2) ||
(runtime->channels > 10)) {
dev_err(player->dev, "%s: invalid nb of channels\n", __func__);
return -EINVAL;
}
SET_UNIPERIF_I2S_FMT_NUM_CH(player, runtime->channels / 2);
/* Set 1-bit audio format to disabled */
SET_UNIPERIF_CONFIG_ONE_BIT_AUD_DISABLE(player);
SET_UNIPERIF_I2S_FMT_ORDER_MSB(player);
/* No iec958 formatting as outputting to DAC */
SET_UNIPERIF_CTRL_SPDIF_FMT_OFF(player);
return 0;
}
static int uni_player_prepare_tdm(struct uniperif *player,
struct snd_pcm_runtime *runtime)
{
int tdm_frame_size; /* unip tdm frame size in bytes */
int user_frame_size; /* user tdm frame size in bytes */
/* default unip TDM_WORD_POS_X_Y */
unsigned int word_pos[4] = {
0x04060002, 0x0C0E080A, 0x14161012, 0x1C1E181A};
int freq, ret;
tdm_frame_size =
sti_uniperiph_get_unip_tdm_frame_size(player);
user_frame_size =
sti_uniperiph_get_user_frame_size(runtime);
/* fix 16/0 format */
SET_UNIPERIF_CONFIG_MEM_FMT_16_0(player);
SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(player);
/* number of words inserted on the TDM line */
SET_UNIPERIF_I2S_FMT_NUM_CH(player, user_frame_size / 4 / 2);
SET_UNIPERIF_I2S_FMT_ORDER_MSB(player);
SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(player);
/* Enable the tdm functionality */
SET_UNIPERIF_TDM_ENABLE_TDM_ENABLE(player);
/* number of 8 bits timeslots avail in unip tdm frame */
SET_UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT(player, tdm_frame_size);
/* set the timeslot allocation for words in FIFO */
sti_uniperiph_get_tdm_word_pos(player, word_pos);
SET_UNIPERIF_TDM_WORD_POS(player, 1_2, word_pos[WORD_1_2]);
SET_UNIPERIF_TDM_WORD_POS(player, 3_4, word_pos[WORD_3_4]);
SET_UNIPERIF_TDM_WORD_POS(player, 5_6, word_pos[WORD_5_6]);
SET_UNIPERIF_TDM_WORD_POS(player, 7_8, word_pos[WORD_7_8]);
/* set unip clk rate (not done vai set_sysclk ops) */
freq = runtime->rate * tdm_frame_size * 8;
mutex_lock(&player->ctrl_lock);
ret = uni_player_clk_set_rate(player, freq);
if (!ret)
player->mclk = freq;
mutex_unlock(&player->ctrl_lock);
return 0;
}
/*
* ALSA uniperipheral iec958 controls
*/
static int uni_player_ctl_iec958_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
uinfo->count = 1;
return 0;
}
static int uni_player_ctl_iec958_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
struct uniperif *player = priv->dai_data.uni;
struct snd_aes_iec958 *iec958 = &player->stream_settings.iec958;
mutex_lock(&player->ctrl_lock);
ucontrol->value.iec958.status[0] = iec958->status[0];
ucontrol->value.iec958.status[1] = iec958->status[1];
ucontrol->value.iec958.status[2] = iec958->status[2];
ucontrol->value.iec958.status[3] = iec958->status[3];
mutex_unlock(&player->ctrl_lock);
return 0;
}
static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
struct uniperif *player = priv->dai_data.uni;
struct snd_aes_iec958 *iec958 = &player->stream_settings.iec958;
unsigned long flags;
mutex_lock(&player->ctrl_lock);
iec958->status[0] = ucontrol->value.iec958.status[0];
iec958->status[1] = ucontrol->value.iec958.status[1];
iec958->status[2] = ucontrol->value.iec958.status[2];
iec958->status[3] = ucontrol->value.iec958.status[3];
spin_lock_irqsave(&player->irq_lock, flags);
if (player->substream && player->substream->runtime)
uni_player_set_channel_status(player,
player->substream->runtime);
else
uni_player_set_channel_status(player, NULL);
spin_unlock_irqrestore(&player->irq_lock, flags);
mutex_unlock(&player->ctrl_lock);
return 0;
}
static struct snd_kcontrol_new uni_player_iec958_ctl = {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
.info = uni_player_ctl_iec958_info,
.get = uni_player_ctl_iec958_get,
.put = uni_player_ctl_iec958_put,
};
/*
* uniperif rate adjustement control
*/
static int snd_sti_clk_adjustment_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = UNIPERIF_PLAYER_CLK_ADJ_MIN;
uinfo->value.integer.max = UNIPERIF_PLAYER_CLK_ADJ_MAX;
uinfo->value.integer.step = 1;
return 0;
}
static int snd_sti_clk_adjustment_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
struct uniperif *player = priv->dai_data.uni;
mutex_lock(&player->ctrl_lock);
ucontrol->value.integer.value[0] = player->clk_adj;
mutex_unlock(&player->ctrl_lock);
return 0;
}
static int snd_sti_clk_adjustment_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
struct uniperif *player = priv->dai_data.uni;
int ret = 0;
if ((ucontrol->value.integer.value[0] < UNIPERIF_PLAYER_CLK_ADJ_MIN) ||
(ucontrol->value.integer.value[0] > UNIPERIF_PLAYER_CLK_ADJ_MAX))
return -EINVAL;
mutex_lock(&player->ctrl_lock);
player->clk_adj = ucontrol->value.integer.value[0];
if (player->mclk)
ret = uni_player_clk_set_rate(player, player->mclk);
mutex_unlock(&player->ctrl_lock);
return ret;
}
static struct snd_kcontrol_new uni_player_clk_adj_ctl = {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "PCM Playback Oversampling Freq. Adjustment",
.info = snd_sti_clk_adjustment_info,
.get = snd_sti_clk_adjustment_get,
.put = snd_sti_clk_adjustment_put,
};
static struct snd_kcontrol_new *snd_sti_pcm_ctl[] = {
&uni_player_clk_adj_ctl,
};
static struct snd_kcontrol_new *snd_sti_iec_ctl[] = {
&uni_player_iec958_ctl,
&uni_player_clk_adj_ctl,
};
static int uni_player_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
struct uniperif *player = priv->dai_data.uni;
unsigned long flags;
int ret;
spin_lock_irqsave(&player->irq_lock, flags);
player->substream = substream;
spin_unlock_irqrestore(&player->irq_lock, flags);
player->clk_adj = 0;
if (!UNIPERIF_TYPE_IS_TDM(player))
return 0;
/* refine hw constraint in tdm mode */
ret = snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_CHANNELS,
sti_uniperiph_fix_tdm_chan,
player, SNDRV_PCM_HW_PARAM_CHANNELS,
-1);
if (ret < 0)
return ret;
return snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_FORMAT,
sti_uniperiph_fix_tdm_format,
player, SNDRV_PCM_HW_PARAM_FORMAT,
-1);
}
static int uni_player_set_sysclk(struct snd_soc_dai *dai, int clk_id,
unsigned int freq, int dir)
{
struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
struct uniperif *player = priv->dai_data.uni;
int ret;
if (UNIPERIF_TYPE_IS_TDM(player) || (dir == SND_SOC_CLOCK_IN))
return 0;
if (clk_id != 0)
return -EINVAL;
mutex_lock(&player->ctrl_lock);
ret = uni_player_clk_set_rate(player, freq);
if (!ret)
player->mclk = freq;
mutex_unlock(&player->ctrl_lock);
return ret;
}
static int uni_player_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
struct uniperif *player = priv->dai_data.uni;
struct snd_pcm_runtime *runtime = substream->runtime;
int transfer_size, trigger_limit;
int ret;
/* The player should be stopped */
if (player->state != UNIPERIF_STATE_STOPPED) {
dev_err(player->dev, "%s: invalid player state %d\n", __func__,
player->state);
return -EINVAL;
}
/* Calculate transfer size (in fifo cells and bytes) for frame count */
if (player->type == SND_ST_UNIPERIF_TYPE_TDM) {
/* transfer size = user frame size (in 32 bits FIFO cell) */
transfer_size =
sti_uniperiph_get_user_frame_size(runtime) / 4;
} else {
transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
}
/* Calculate number of empty cells available before asserting DREQ */
if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) {
trigger_limit = UNIPERIF_FIFO_SIZE - transfer_size;
} else {
/*
* Since SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0
* FDMA_TRIGGER_LIMIT also controls when the state switches
* from OFF or STANDBY to AUDIO DATA.
*/
trigger_limit = transfer_size;
}
/* Trigger limit must be an even number */
if ((!trigger_limit % 2) || (trigger_limit != 1 && transfer_size % 2) ||
(trigger_limit > UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(player))) {
dev_err(player->dev, "invalid trigger limit %d\n",
trigger_limit);
return -EINVAL;
}
SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(player, trigger_limit);
/* Uniperipheral setup depends on player type */
switch (player->type) {
case SND_ST_UNIPERIF_TYPE_HDMI:
ret = uni_player_prepare_iec958(player, runtime);
break;
case SND_ST_UNIPERIF_TYPE_PCM:
ret = uni_player_prepare_pcm(player, runtime);
break;
case SND_ST_UNIPERIF_TYPE_SPDIF:
ret = uni_player_prepare_iec958(player, runtime);
break;
case SND_ST_UNIPERIF_TYPE_TDM:
ret = uni_player_prepare_tdm(player, runtime);
break;
default:
dev_err(player->dev, "invalid player type\n");
return -EINVAL;
}
if (ret)
return ret;
switch (player->daifmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
SET_UNIPERIF_I2S_FMT_LR_POL_LOW(player);
SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(player);
break;
case SND_SOC_DAIFMT_NB_IF:
SET_UNIPERIF_I2S_FMT_LR_POL_HIG(player);
SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(player);
break;
case SND_SOC_DAIFMT_IB_NF:
SET_UNIPERIF_I2S_FMT_LR_POL_LOW(player);
SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(player);
break;
case SND_SOC_DAIFMT_IB_IF:
SET_UNIPERIF_I2S_FMT_LR_POL_HIG(player);
SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(player);
break;
}
switch (player->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(player);
SET_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(player);
break;
case SND_SOC_DAIFMT_LEFT_J:
SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(player);
SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(player);
break;
case SND_SOC_DAIFMT_RIGHT_J:
SET_UNIPERIF_I2S_FMT_ALIGN_RIGHT(player);
SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(player);
break;
default:
dev_err(player->dev, "format not supported\n");
return -EINVAL;
}
SET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(player, 0);
return sti_uniperiph_reset(player);
}
static int uni_player_start(struct uniperif *player)
{
int ret;
/* The player should be stopped */
if (player->state != UNIPERIF_STATE_STOPPED) {
dev_err(player->dev, "%s: invalid player state\n", __func__);
return -EINVAL;
}
ret = clk_prepare_enable(player->clk);
if (ret) {
dev_err(player->dev, "%s: Failed to enable clock\n", __func__);
return ret;
}
/* Clear any pending interrupts */
SET_UNIPERIF_ITS_BCLR(player, GET_UNIPERIF_ITS(player));
/* Set the interrupt mask */
SET_UNIPERIF_ITM_BSET_DMA_ERROR(player);
SET_UNIPERIF_ITM_BSET_FIFO_ERROR(player);
/* Enable underflow recovery interrupts */
if (player->underflow_enabled) {
SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE(player);
SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED(player);
}
ret = sti_uniperiph_reset(player);
if (ret < 0) {
clk_disable_unprepare(player->clk);
return ret;
}
/*
* Does not use IEC61937 features of the uniperipheral hardware.
* Instead it performs IEC61937 in software and inserts it directly
* into the audio data stream. As such, when encoded mode is selected,
* linear pcm mode is still used, but with the differences of the
* channel status bits set for encoded mode and the validity bits set.
*/
SET_UNIPERIF_CTRL_OPERATION_PCM_DATA(player);
/*
* If iec958 formatting is required for hdmi or spdif, then it must be
* enabled after the operation mode is set. If set prior to this, it
* will not take affect and hang the player.
*/
if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
if (UNIPERIF_TYPE_IS_IEC958(player))
SET_UNIPERIF_CTRL_SPDIF_FMT_ON(player);
/* Force channel status update (no update if clk disable) */
if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
SET_UNIPERIF_CONFIG_CHL_STS_UPDATE(player);
else
SET_UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE(player);
/* Update state to started */
player->state = UNIPERIF_STATE_STARTED;
return 0;
}
static int uni_player_stop(struct uniperif *player)
{
int ret;
/* The player should not be in stopped state */
if (player->state == UNIPERIF_STATE_STOPPED) {
dev_err(player->dev, "%s: invalid player state\n", __func__);
return -EINVAL;
}
/* Turn the player off */
SET_UNIPERIF_CTRL_OPERATION_OFF(player);
ret = sti_uniperiph_reset(player);
if (ret < 0)
return ret;
/* Disable interrupts */
SET_UNIPERIF_ITM_BCLR(player, GET_UNIPERIF_ITM(player));
/* Disable clock */
clk_disable_unprepare(player->clk);
/* Update state to stopped and return */
player->state = UNIPERIF_STATE_STOPPED;
return 0;
}
int uni_player_resume(struct uniperif *player)
{
int ret;
/* Select the frequency synthesizer clock */
if (player->clk_sel) {
ret = regmap_field_write(player->clk_sel, 1);
if (ret) {
dev_err(player->dev,
"%s: Failed to select freq synth clock\n",
__func__);
return ret;
}
}
SET_UNIPERIF_CONFIG_BACK_STALL_REQ_DISABLE(player);
SET_UNIPERIF_CTRL_ROUNDING_OFF(player);
SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(player);
SET_UNIPERIF_CONFIG_IDLE_MOD_DISABLE(player);
return 0;
}
EXPORT_SYMBOL_GPL(uni_player_resume);
static int uni_player_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
struct uniperif *player = priv->dai_data.uni;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
return uni_player_start(player);
case SNDRV_PCM_TRIGGER_STOP:
return uni_player_stop(player);
case SNDRV_PCM_TRIGGER_RESUME:
return uni_player_resume(player);
default:
return -EINVAL;
}
}
static void uni_player_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
struct uniperif *player = priv->dai_data.uni;
unsigned long flags;
spin_lock_irqsave(&player->irq_lock, flags);
if (player->state != UNIPERIF_STATE_STOPPED)
/* Stop the player */
uni_player_stop(player);
player->substream = NULL;
spin_unlock_irqrestore(&player->irq_lock, flags);
}
static int uni_player_parse_dt_audio_glue(struct platform_device *pdev,
struct uniperif *player)
{
struct device_node *node = pdev->dev.of_node;
struct regmap *regmap;
struct reg_field regfield[2] = {
/* PCM_CLK_SEL */
REG_FIELD(SYS_CFG_AUDIO_GLUE,
8 + player->id,
8 + player->id),
/* PCMP_VALID_SEL */
REG_FIELD(SYS_CFG_AUDIO_GLUE, 0, 1)
};
regmap = syscon_regmap_lookup_by_phandle(node, "st,syscfg");
if (IS_ERR(regmap)) {
dev_err(&pdev->dev, "sti-audio-clk-glue syscf not found\n");
return PTR_ERR(regmap);
}
player->clk_sel = regmap_field_alloc(regmap, regfield[0]);
player->valid_sel = regmap_field_alloc(regmap, regfield[1]);
return 0;
}
static const struct snd_soc_dai_ops uni_player_dai_ops = {
.startup = uni_player_startup,
.shutdown = uni_player_shutdown,
.prepare = uni_player_prepare,
.trigger = uni_player_trigger,
.hw_params = sti_uniperiph_dai_hw_params,
.set_fmt = sti_uniperiph_dai_set_fmt,
.set_sysclk = uni_player_set_sysclk,
.set_tdm_slot = sti_uniperiph_set_tdm_slot
};
int uni_player_init(struct platform_device *pdev,
struct uniperif *player)
{
int ret = 0;
player->dev = &pdev->dev;
player->state = UNIPERIF_STATE_STOPPED;
player->dai_ops = &uni_player_dai_ops;
/* Get PCM_CLK_SEL & PCMP_VALID_SEL from audio-glue-ctrl SoC reg */
ret = uni_player_parse_dt_audio_glue(pdev, player);
if (ret < 0) {
dev_err(player->dev, "Failed to parse DeviceTree\n");
return ret;
}
/* Underflow recovery is only supported on later ip revisions */
if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
player->underflow_enabled = 1;
if (UNIPERIF_TYPE_IS_TDM(player))
player->hw = &uni_tdm_hw;
else
player->hw = &uni_player_pcm_hw;
/* Get uniperif resource */
player->clk = of_clk_get(pdev->dev.of_node, 0);
if (IS_ERR(player->clk)) {
dev_err(player->dev, "Failed to get clock\n");
return PTR_ERR(player->clk);
}
/* Select the frequency synthesizer clock */
if (player->clk_sel) {
ret = regmap_field_write(player->clk_sel, 1);
if (ret) {
dev_err(player->dev,
"%s: Failed to select freq synth clock\n",
__func__);
return ret;
}
}
/* connect to I2S/TDM TX bus */
if (player->valid_sel &&
(player->id == UNIPERIF_PLAYER_I2S_OUT)) {
ret = regmap_field_write(player->valid_sel, player->id);
if (ret) {
dev_err(player->dev,
"%s: unable to connect to tdm bus\n", __func__);
return ret;
}
}
ret = devm_request_irq(&pdev->dev, player->irq,
uni_player_irq_handler, IRQF_SHARED,
dev_name(&pdev->dev), player);
if (ret < 0) {
dev_err(player->dev, "unable to request IRQ %d\n", player->irq);
return ret;
}
mutex_init(&player->ctrl_lock);
spin_lock_init(&player->irq_lock);
/* Ensure that disabled by default */
SET_UNIPERIF_CONFIG_BACK_STALL_REQ_DISABLE(player);
SET_UNIPERIF_CTRL_ROUNDING_OFF(player);
SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(player);
SET_UNIPERIF_CONFIG_IDLE_MOD_DISABLE(player);
if (UNIPERIF_TYPE_IS_IEC958(player)) {
/* Set default iec958 status bits */
/* Consumer, PCM, copyright, 2ch, mode 0 */
player->stream_settings.iec958.status[0] = 0x00;
/* Broadcast reception category */
player->stream_settings.iec958.status[1] =
IEC958_AES1_CON_GENERAL;
/* Do not take into account source or channel number */
player->stream_settings.iec958.status[2] =
IEC958_AES2_CON_SOURCE_UNSPEC;
/* Sampling frequency not indicated */
player->stream_settings.iec958.status[3] =
IEC958_AES3_CON_FS_NOTID;
/* Max sample word 24-bit, sample word length not indicated */
player->stream_settings.iec958.status[4] =
IEC958_AES4_CON_MAX_WORDLEN_24 |
IEC958_AES4_CON_WORDLEN_24_20;
player->num_ctrls = ARRAY_SIZE(snd_sti_iec_ctl);
player->snd_ctrls = snd_sti_iec_ctl[0];
} else {
player->num_ctrls = ARRAY_SIZE(snd_sti_pcm_ctl);
player->snd_ctrls = snd_sti_pcm_ctl[0];
}
return 0;
}
EXPORT_SYMBOL_GPL(uni_player_init);
| linux-master | sound/soc/sti/uniperif_player.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) STMicroelectronics SA 2015
* Authors: Arnaud Pouliquen <[email protected]>
* for STMicroelectronics.
*/
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/delay.h>
#include "uniperif.h"
/*
* User frame size shall be 2, 4, 6 or 8 32-bits words length
* (i.e. 8, 16, 24 or 32 bytes)
* This constraint comes from allowed values for
* UNIPERIF_I2S_FMT_NUM_CH register
*/
#define UNIPERIF_MAX_FRAME_SZ 0x20
#define UNIPERIF_ALLOWED_FRAME_SZ (0x08 | 0x10 | 0x18 | UNIPERIF_MAX_FRAME_SZ)
struct sti_uniperiph_dev_data {
unsigned int id; /* Nb available player instances */
unsigned int version; /* player IP version */
unsigned int stream;
const char *dai_names;
enum uniperif_type type;
};
static const struct sti_uniperiph_dev_data sti_uniplayer_hdmi = {
.id = 0,
.version = SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0,
.stream = SNDRV_PCM_STREAM_PLAYBACK,
.dai_names = "Uni Player #0 (HDMI)",
.type = SND_ST_UNIPERIF_TYPE_HDMI
};
static const struct sti_uniperiph_dev_data sti_uniplayer_pcm_out = {
.id = 1,
.version = SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0,
.stream = SNDRV_PCM_STREAM_PLAYBACK,
.dai_names = "Uni Player #1 (PCM OUT)",
.type = SND_ST_UNIPERIF_TYPE_PCM | SND_ST_UNIPERIF_TYPE_TDM,
};
static const struct sti_uniperiph_dev_data sti_uniplayer_dac = {
.id = 2,
.version = SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0,
.stream = SNDRV_PCM_STREAM_PLAYBACK,
.dai_names = "Uni Player #2 (DAC)",
.type = SND_ST_UNIPERIF_TYPE_PCM,
};
static const struct sti_uniperiph_dev_data sti_uniplayer_spdif = {
.id = 3,
.version = SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0,
.stream = SNDRV_PCM_STREAM_PLAYBACK,
.dai_names = "Uni Player #3 (SPDIF)",
.type = SND_ST_UNIPERIF_TYPE_SPDIF
};
static const struct sti_uniperiph_dev_data sti_unireader_pcm_in = {
.id = 0,
.version = SND_ST_UNIPERIF_VERSION_UNI_RDR_1_0,
.stream = SNDRV_PCM_STREAM_CAPTURE,
.dai_names = "Uni Reader #0 (PCM IN)",
.type = SND_ST_UNIPERIF_TYPE_PCM | SND_ST_UNIPERIF_TYPE_TDM,
};
static const struct sti_uniperiph_dev_data sti_unireader_hdmi_in = {
.id = 1,
.version = SND_ST_UNIPERIF_VERSION_UNI_RDR_1_0,
.stream = SNDRV_PCM_STREAM_CAPTURE,
.dai_names = "Uni Reader #1 (HDMI IN)",
.type = SND_ST_UNIPERIF_TYPE_PCM,
};
static const struct of_device_id snd_soc_sti_match[] = {
{ .compatible = "st,stih407-uni-player-hdmi",
.data = &sti_uniplayer_hdmi
},
{ .compatible = "st,stih407-uni-player-pcm-out",
.data = &sti_uniplayer_pcm_out
},
{ .compatible = "st,stih407-uni-player-dac",
.data = &sti_uniplayer_dac
},
{ .compatible = "st,stih407-uni-player-spdif",
.data = &sti_uniplayer_spdif
},
{ .compatible = "st,stih407-uni-reader-pcm_in",
.data = &sti_unireader_pcm_in
},
{ .compatible = "st,stih407-uni-reader-hdmi",
.data = &sti_unireader_hdmi_in
},
{},
};
MODULE_DEVICE_TABLE(of, snd_soc_sti_match);
int sti_uniperiph_reset(struct uniperif *uni)
{
int count = 10;
/* Reset uniperipheral uni */
SET_UNIPERIF_SOFT_RST_SOFT_RST(uni);
if (uni->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) {
while (GET_UNIPERIF_SOFT_RST_SOFT_RST(uni) && count) {
udelay(5);
count--;
}
}
if (!count) {
dev_err(uni->dev, "Failed to reset uniperif\n");
return -EIO;
}
return 0;
}
int sti_uniperiph_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
unsigned int rx_mask, int slots,
int slot_width)
{
struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
struct uniperif *uni = priv->dai_data.uni;
int i, frame_size, avail_slots;
if (!UNIPERIF_TYPE_IS_TDM(uni)) {
dev_err(uni->dev, "cpu dai not in tdm mode\n");
return -EINVAL;
}
/* store info in unip context */
uni->tdm_slot.slots = slots;
uni->tdm_slot.slot_width = slot_width;
/* unip is unidirectionnal */
uni->tdm_slot.mask = (tx_mask != 0) ? tx_mask : rx_mask;
/* number of available timeslots */
for (i = 0, avail_slots = 0; i < uni->tdm_slot.slots; i++) {
if ((uni->tdm_slot.mask >> i) & 0x01)
avail_slots++;
}
uni->tdm_slot.avail_slots = avail_slots;
/* frame size in bytes */
frame_size = uni->tdm_slot.avail_slots * uni->tdm_slot.slot_width / 8;
/* check frame size is allowed */
if ((frame_size > UNIPERIF_MAX_FRAME_SZ) ||
(frame_size & ~(int)UNIPERIF_ALLOWED_FRAME_SZ)) {
dev_err(uni->dev, "frame size not allowed: %d bytes\n",
frame_size);
return -EINVAL;
}
return 0;
}
int sti_uniperiph_fix_tdm_chan(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct uniperif *uni = rule->private;
struct snd_interval t;
t.min = uni->tdm_slot.avail_slots;
t.max = uni->tdm_slot.avail_slots;
t.openmin = 0;
t.openmax = 0;
t.integer = 0;
return snd_interval_refine(hw_param_interval(params, rule->var), &t);
}
int sti_uniperiph_fix_tdm_format(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct uniperif *uni = rule->private;
struct snd_mask *maskp = hw_param_mask(params, rule->var);
u64 format;
switch (uni->tdm_slot.slot_width) {
case 16:
format = SNDRV_PCM_FMTBIT_S16_LE;
break;
case 32:
format = SNDRV_PCM_FMTBIT_S32_LE;
break;
default:
dev_err(uni->dev, "format not supported: %d bits\n",
uni->tdm_slot.slot_width);
return -EINVAL;
}
maskp->bits[0] &= (u_int32_t)format;
maskp->bits[1] &= (u_int32_t)(format >> 32);
/* clear remaining indexes */
memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX - 64) / 8);
if (!maskp->bits[0] && !maskp->bits[1])
return -EINVAL;
return 0;
}
int sti_uniperiph_get_tdm_word_pos(struct uniperif *uni,
unsigned int *word_pos)
{
int slot_width = uni->tdm_slot.slot_width / 8;
int slots_num = uni->tdm_slot.slots;
unsigned int slots_mask = uni->tdm_slot.mask;
int i, j, k;
unsigned int word16_pos[4];
/* word16_pos:
* word16_pos[0] = WORDX_LSB
* word16_pos[1] = WORDX_MSB,
* word16_pos[2] = WORDX+1_LSB
* word16_pos[3] = WORDX+1_MSB
*/
/* set unip word position */
for (i = 0, j = 0, k = 0; (i < slots_num) && (k < WORD_MAX); i++) {
if ((slots_mask >> i) & 0x01) {
word16_pos[j] = i * slot_width;
if (slot_width == 4) {
word16_pos[j + 1] = word16_pos[j] + 2;
j++;
}
j++;
if (j > 3) {
word_pos[k] = word16_pos[1] |
(word16_pos[0] << 8) |
(word16_pos[3] << 16) |
(word16_pos[2] << 24);
j = 0;
k++;
}
}
}
return 0;
}
/*
* sti_uniperiph_dai_create_ctrl
* This function is used to create Ctrl associated to DAI but also pcm device.
* Request is done by front end to associate ctrl with pcm device id
*/
static int sti_uniperiph_dai_create_ctrl(struct snd_soc_dai *dai)
{
struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
struct uniperif *uni = priv->dai_data.uni;
struct snd_kcontrol_new *ctrl;
int i;
if (!uni->num_ctrls)
return 0;
for (i = 0; i < uni->num_ctrls; i++) {
/*
* Several Control can have same name. Controls are indexed on
* Uniperipheral instance ID
*/
ctrl = &uni->snd_ctrls[i];
ctrl->index = uni->id;
ctrl->device = uni->id;
}
return snd_soc_add_dai_controls(dai, uni->snd_ctrls, uni->num_ctrls);
}
/*
* DAI
*/
int sti_uniperiph_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
struct uniperif *uni = priv->dai_data.uni;
struct snd_dmaengine_dai_dma_data *dma_data;
int transfer_size;
if (uni->type == SND_ST_UNIPERIF_TYPE_TDM)
/* transfer size = user frame size (in 32-bits FIFO cell) */
transfer_size = snd_soc_params_to_frame_size(params) / 32;
else
transfer_size = params_channels(params) * UNIPERIF_FIFO_FRAMES;
dma_data = snd_soc_dai_get_dma_data(dai, substream);
dma_data->maxburst = transfer_size;
return 0;
}
int sti_uniperiph_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
priv->dai_data.uni->daifmt = fmt;
return 0;
}
static int sti_uniperiph_suspend(struct snd_soc_component *component)
{
struct sti_uniperiph_data *priv = snd_soc_component_get_drvdata(component);
struct uniperif *uni = priv->dai_data.uni;
int ret;
/* The uniperipheral should be in stopped state */
if (uni->state != UNIPERIF_STATE_STOPPED) {
dev_err(uni->dev, "%s: invalid uni state( %d)\n",
__func__, (int)uni->state);
return -EBUSY;
}
/* Pinctrl: switch pinstate to sleep */
ret = pinctrl_pm_select_sleep_state(uni->dev);
if (ret)
dev_err(uni->dev, "%s: failed to select pinctrl state\n",
__func__);
return ret;
}
static int sti_uniperiph_resume(struct snd_soc_component *component)
{
struct sti_uniperiph_data *priv = snd_soc_component_get_drvdata(component);
struct uniperif *uni = priv->dai_data.uni;
int ret;
if (priv->dai_data.stream == SNDRV_PCM_STREAM_PLAYBACK) {
ret = uni_player_resume(uni);
if (ret)
return ret;
}
/* pinctrl: switch pinstate to default */
ret = pinctrl_pm_select_default_state(uni->dev);
if (ret)
dev_err(uni->dev, "%s: failed to select pinctrl state\n",
__func__);
return ret;
}
static int sti_uniperiph_dai_probe(struct snd_soc_dai *dai)
{
struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
struct sti_uniperiph_dai *dai_data = &priv->dai_data;
/* DMA settings*/
if (priv->dai_data.stream == SNDRV_PCM_STREAM_PLAYBACK)
snd_soc_dai_init_dma_data(dai, &dai_data->dma_data, NULL);
else
snd_soc_dai_init_dma_data(dai, NULL, &dai_data->dma_data);
dai_data->dma_data.addr = dai_data->uni->fifo_phys_address;
dai_data->dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
return sti_uniperiph_dai_create_ctrl(dai);
}
static const struct snd_soc_dai_ops sti_uniperiph_dai_ops = {
.probe = sti_uniperiph_dai_probe,
};
static const struct snd_soc_dai_driver sti_uniperiph_dai_template = {
.ops = &sti_uniperiph_dai_ops,
};
static const struct snd_soc_component_driver sti_uniperiph_dai_component = {
.name = "sti_cpu_dai",
.suspend = sti_uniperiph_suspend,
.resume = sti_uniperiph_resume,
.legacy_dai_naming = 1,
};
static int sti_uniperiph_cpu_dai_of(struct device_node *node,
struct sti_uniperiph_data *priv)
{
struct device *dev = &priv->pdev->dev;
struct sti_uniperiph_dai *dai_data = &priv->dai_data;
struct snd_soc_dai_driver *dai = priv->dai;
struct snd_soc_pcm_stream *stream;
struct uniperif *uni;
const struct of_device_id *of_id;
const struct sti_uniperiph_dev_data *dev_data;
const char *mode;
int ret;
/* Populate data structure depending on compatibility */
of_id = of_match_node(snd_soc_sti_match, node);
if (!of_id->data) {
dev_err(dev, "data associated to device is missing\n");
return -EINVAL;
}
dev_data = (struct sti_uniperiph_dev_data *)of_id->data;
uni = devm_kzalloc(dev, sizeof(*uni), GFP_KERNEL);
if (!uni)
return -ENOMEM;
uni->id = dev_data->id;
uni->ver = dev_data->version;
*dai = sti_uniperiph_dai_template;
dai->name = dev_data->dai_names;
/* Get resources and base address */
uni->base = devm_platform_get_and_ioremap_resource(priv->pdev, 0, &uni->mem_region);
if (IS_ERR(uni->base))
return PTR_ERR(uni->base);
uni->fifo_phys_address = uni->mem_region->start +
UNIPERIF_FIFO_DATA_OFFSET(uni);
uni->irq = platform_get_irq(priv->pdev, 0);
if (uni->irq < 0)
return -ENXIO;
uni->type = dev_data->type;
/* check if player should be configured for tdm */
if (dev_data->type & SND_ST_UNIPERIF_TYPE_TDM) {
if (!of_property_read_string(node, "st,tdm-mode", &mode))
uni->type = SND_ST_UNIPERIF_TYPE_TDM;
else
uni->type = SND_ST_UNIPERIF_TYPE_PCM;
}
dai_data->uni = uni;
dai_data->stream = dev_data->stream;
if (priv->dai_data.stream == SNDRV_PCM_STREAM_PLAYBACK) {
ret = uni_player_init(priv->pdev, uni);
stream = &dai->playback;
} else {
ret = uni_reader_init(priv->pdev, uni);
stream = &dai->capture;
}
if (ret < 0)
return ret;
dai->ops = uni->dai_ops;
stream->stream_name = dai->name;
stream->channels_min = uni->hw->channels_min;
stream->channels_max = uni->hw->channels_max;
stream->rates = uni->hw->rates;
stream->formats = uni->hw->formats;
return 0;
}
static const struct snd_dmaengine_pcm_config dmaengine_pcm_config = {
.prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
};
static int sti_uniperiph_probe(struct platform_device *pdev)
{
struct sti_uniperiph_data *priv;
struct device_node *node = pdev->dev.of_node;
int ret;
/* Allocate the private data and the CPU_DAI array */
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dai = devm_kzalloc(&pdev->dev, sizeof(*priv->dai), GFP_KERNEL);
if (!priv->dai)
return -ENOMEM;
priv->pdev = pdev;
ret = sti_uniperiph_cpu_dai_of(node, priv);
if (ret < 0)
return ret;
dev_set_drvdata(&pdev->dev, priv);
ret = devm_snd_soc_register_component(&pdev->dev,
&sti_uniperiph_dai_component,
priv->dai, 1);
if (ret < 0)
return ret;
return devm_snd_dmaengine_pcm_register(&pdev->dev,
&dmaengine_pcm_config, 0);
}
static struct platform_driver sti_uniperiph_driver = {
.driver = {
.name = "sti-uniperiph-dai",
.of_match_table = snd_soc_sti_match,
},
.probe = sti_uniperiph_probe,
};
module_platform_driver(sti_uniperiph_driver);
MODULE_DESCRIPTION("uniperipheral DAI driver");
MODULE_AUTHOR("Arnaud Pouliquen <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | sound/soc/sti/sti_uniperif.c |
// SPDX-License-Identifier: GPL-2.0
//
// Socionext UniPhier AIO ALSA CPU DAI driver.
//
// Copyright (c) 2016-2018 Socionext Inc.
#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include "aio.h"
static bool is_valid_pll(struct uniphier_aio_chip *chip, int pll_id)
{
struct device *dev = &chip->pdev->dev;
if (pll_id < 0 || chip->num_plls <= pll_id) {
dev_err(dev, "PLL(%d) is not supported\n", pll_id);
return false;
}
return chip->plls[pll_id].enable;
}
/**
* find_volume - find volume supported HW port by HW port number
* @chip: the AIO chip pointer
* @oport_hw: HW port number, one of AUD_HW_XXXX
*
* Find AIO device from device list by HW port number. Volume feature is
* available only in Output and PCM ports, this limitation comes from HW
* specifications.
*
* Return: The pointer of AIO substream if successful, otherwise NULL on error.
*/
static struct uniphier_aio_sub *find_volume(struct uniphier_aio_chip *chip,
int oport_hw)
{
int i;
for (i = 0; i < chip->num_aios; i++) {
struct uniphier_aio_sub *sub = &chip->aios[i].sub[0];
if (!sub->swm)
continue;
if (sub->swm->oport.hw == oport_hw)
return sub;
}
return NULL;
}
static bool match_spec(const struct uniphier_aio_spec *spec,
const char *name, int dir)
{
if (dir == SNDRV_PCM_STREAM_PLAYBACK &&
spec->swm.dir != PORT_DIR_OUTPUT) {
return false;
}
if (dir == SNDRV_PCM_STREAM_CAPTURE &&
spec->swm.dir != PORT_DIR_INPUT) {
return false;
}
if (spec->name && strcmp(spec->name, name) == 0)
return true;
if (spec->gname && strcmp(spec->gname, name) == 0)
return true;
return false;
}
/**
* find_spec - find HW specification info by name
* @aio: the AIO device pointer
* @name: name of device
* @direction: the direction of substream, SNDRV_PCM_STREAM_*
*
* Find hardware specification information from list by device name. This
* information is used for telling the difference of SoCs to driver.
*
* Specification list is array of 'struct uniphier_aio_spec' which is defined
* in each drivers (see: aio-i2s.c).
*
* Return: The pointer of hardware specification of AIO if successful,
* otherwise NULL on error.
*/
static const struct uniphier_aio_spec *find_spec(struct uniphier_aio *aio,
const char *name,
int direction)
{
const struct uniphier_aio_chip_spec *chip_spec = aio->chip->chip_spec;
int i;
for (i = 0; i < chip_spec->num_specs; i++) {
const struct uniphier_aio_spec *spec = &chip_spec->specs[i];
if (match_spec(spec, name, direction))
return spec;
}
return NULL;
}
/**
* find_divider - find clock divider by frequency
* @aio: the AIO device pointer
* @pll_id: PLL ID, should be AUD_PLL_XX
* @freq: required frequency
*
* Find suitable clock divider by frequency.
*
* Return: The ID of PLL if successful, otherwise negative error value.
*/
static int find_divider(struct uniphier_aio *aio, int pll_id, unsigned int freq)
{
struct uniphier_aio_pll *pll;
static const int mul[] = { 1, 1, 1, 2, };
static const int div[] = { 2, 3, 1, 3, };
int i;
if (!is_valid_pll(aio->chip, pll_id))
return -EINVAL;
pll = &aio->chip->plls[pll_id];
for (i = 0; i < ARRAY_SIZE(mul); i++)
if (pll->freq * mul[i] / div[i] == freq)
return i;
return -ENOTSUPP;
}
static int uniphier_aio_set_sysclk(struct snd_soc_dai *dai, int clk_id,
unsigned int freq, int dir)
{
struct uniphier_aio *aio = uniphier_priv(dai);
struct device *dev = &aio->chip->pdev->dev;
bool pll_auto = false;
int pll_id, div_id;
switch (clk_id) {
case AUD_CLK_IO:
return -ENOTSUPP;
case AUD_CLK_A1:
pll_id = AUD_PLL_A1;
break;
case AUD_CLK_F1:
pll_id = AUD_PLL_F1;
break;
case AUD_CLK_A2:
pll_id = AUD_PLL_A2;
break;
case AUD_CLK_F2:
pll_id = AUD_PLL_F2;
break;
case AUD_CLK_A:
pll_id = AUD_PLL_A1;
pll_auto = true;
break;
case AUD_CLK_F:
pll_id = AUD_PLL_F1;
pll_auto = true;
break;
case AUD_CLK_APLL:
pll_id = AUD_PLL_APLL;
break;
case AUD_CLK_RX0:
pll_id = AUD_PLL_RX0;
break;
case AUD_CLK_USB0:
pll_id = AUD_PLL_USB0;
break;
case AUD_CLK_HSC0:
pll_id = AUD_PLL_HSC0;
break;
default:
dev_err(dev, "Sysclk(%d) is not supported\n", clk_id);
return -EINVAL;
}
if (pll_auto) {
for (pll_id = 0; pll_id < aio->chip->num_plls; pll_id++) {
div_id = find_divider(aio, pll_id, freq);
if (div_id >= 0) {
aio->plldiv = div_id;
break;
}
}
if (pll_id == aio->chip->num_plls) {
dev_err(dev, "Sysclk frequency is not supported(%d)\n",
freq);
return -EINVAL;
}
}
if (dir == SND_SOC_CLOCK_OUT)
aio->pll_out = pll_id;
else
aio->pll_in = pll_id;
return 0;
}
static int uniphier_aio_set_pll(struct snd_soc_dai *dai, int pll_id,
int source, unsigned int freq_in,
unsigned int freq_out)
{
struct uniphier_aio *aio = uniphier_priv(dai);
int ret;
if (!is_valid_pll(aio->chip, pll_id))
return -EINVAL;
ret = aio_chip_set_pll(aio->chip, pll_id, freq_out);
if (ret < 0)
return ret;
return 0;
}
static int uniphier_aio_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct uniphier_aio *aio = uniphier_priv(dai);
struct device *dev = &aio->chip->pdev->dev;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_LEFT_J:
case SND_SOC_DAIFMT_RIGHT_J:
case SND_SOC_DAIFMT_I2S:
aio->fmt = fmt & SND_SOC_DAIFMT_FORMAT_MASK;
break;
default:
dev_err(dev, "Format is not supported(%d)\n",
fmt & SND_SOC_DAIFMT_FORMAT_MASK);
return -EINVAL;
}
return 0;
}
static int uniphier_aio_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct uniphier_aio *aio = uniphier_priv(dai);
struct uniphier_aio_sub *sub = &aio->sub[substream->stream];
sub->substream = substream;
sub->pass_through = 0;
sub->use_mmap = true;
return aio_init(sub);
}
static void uniphier_aio_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct uniphier_aio *aio = uniphier_priv(dai);
struct uniphier_aio_sub *sub = &aio->sub[substream->stream];
sub->substream = NULL;
}
static int uniphier_aio_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct uniphier_aio *aio = uniphier_priv(dai);
struct uniphier_aio_sub *sub = &aio->sub[substream->stream];
struct device *dev = &aio->chip->pdev->dev;
int freq, ret;
switch (params_rate(params)) {
case 48000:
case 32000:
case 24000:
freq = 12288000;
break;
case 44100:
case 22050:
freq = 11289600;
break;
default:
dev_err(dev, "Rate is not supported(%d)\n",
params_rate(params));
return -EINVAL;
}
ret = snd_soc_dai_set_sysclk(dai, AUD_CLK_A,
freq, SND_SOC_CLOCK_OUT);
if (ret)
return ret;
sub->params = *params;
sub->setting = 1;
aio_port_reset(sub);
aio_port_set_volume(sub, sub->vol);
aio_src_reset(sub);
return 0;
}
static int uniphier_aio_hw_free(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct uniphier_aio *aio = uniphier_priv(dai);
struct uniphier_aio_sub *sub = &aio->sub[substream->stream];
sub->setting = 0;
return 0;
}
static int uniphier_aio_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct uniphier_aio *aio = uniphier_priv(dai);
struct uniphier_aio_sub *sub = &aio->sub[substream->stream];
int ret;
ret = aio_port_set_param(sub, sub->pass_through, &sub->params);
if (ret)
return ret;
ret = aio_src_set_param(sub, &sub->params);
if (ret)
return ret;
aio_port_set_enable(sub, 1);
ret = aio_if_set_param(sub, sub->pass_through);
if (ret)
return ret;
if (sub->swm->type == PORT_TYPE_CONV) {
ret = aio_srcif_set_param(sub);
if (ret)
return ret;
ret = aio_srcch_set_param(sub);
if (ret)
return ret;
aio_srcch_set_enable(sub, 1);
}
return 0;
}
static int uniphier_aio_dai_probe(struct snd_soc_dai *dai)
{
struct uniphier_aio *aio = uniphier_priv(dai);
int i;
for (i = 0; i < ARRAY_SIZE(aio->sub); i++) {
struct uniphier_aio_sub *sub = &aio->sub[i];
const struct uniphier_aio_spec *spec;
spec = find_spec(aio, dai->name, i);
if (!spec)
continue;
sub->swm = &spec->swm;
sub->spec = spec;
sub->vol = AUD_VOL_INIT;
}
aio_iecout_set_enable(aio->chip, true);
aio_chip_init(aio->chip);
aio->chip->active = 1;
return 0;
}
static int uniphier_aio_dai_remove(struct snd_soc_dai *dai)
{
struct uniphier_aio *aio = uniphier_priv(dai);
aio->chip->active = 0;
return 0;
}
static int uniphier_aio_ld11_probe(struct snd_soc_dai *dai)
{
int ret;
ret = uniphier_aio_dai_probe(dai);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_pll(dai, AUD_PLL_A1, 0, 0, 36864000);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_pll(dai, AUD_PLL_F1, 0, 0, 36864000);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_pll(dai, AUD_PLL_A2, 0, 0, 33868800);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_pll(dai, AUD_PLL_F2, 0, 0, 33868800);
if (ret < 0)
return ret;
return 0;
}
static int uniphier_aio_pxs2_probe(struct snd_soc_dai *dai)
{
int ret;
ret = uniphier_aio_dai_probe(dai);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_pll(dai, AUD_PLL_A1, 0, 0, 36864000);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_pll(dai, AUD_PLL_F1, 0, 0, 36864000);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_pll(dai, AUD_PLL_A2, 0, 0, 33868800);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_pll(dai, AUD_PLL_F2, 0, 0, 33868800);
if (ret < 0)
return ret;
return 0;
}
const struct snd_soc_dai_ops uniphier_aio_i2s_ld11_ops = {
.probe = uniphier_aio_ld11_probe,
.remove = uniphier_aio_dai_remove,
.set_sysclk = uniphier_aio_set_sysclk,
.set_pll = uniphier_aio_set_pll,
.set_fmt = uniphier_aio_set_fmt,
.startup = uniphier_aio_startup,
.shutdown = uniphier_aio_shutdown,
.hw_params = uniphier_aio_hw_params,
.hw_free = uniphier_aio_hw_free,
.prepare = uniphier_aio_prepare,
};
EXPORT_SYMBOL_GPL(uniphier_aio_i2s_ld11_ops);
const struct snd_soc_dai_ops uniphier_aio_spdif_ld11_ops = {
.probe = uniphier_aio_ld11_probe,
.remove = uniphier_aio_dai_remove,
.set_sysclk = uniphier_aio_set_sysclk,
.set_pll = uniphier_aio_set_pll,
.startup = uniphier_aio_startup,
.shutdown = uniphier_aio_shutdown,
.hw_params = uniphier_aio_hw_params,
.hw_free = uniphier_aio_hw_free,
.prepare = uniphier_aio_prepare,
};
EXPORT_SYMBOL_GPL(uniphier_aio_spdif_ld11_ops);
const struct snd_soc_dai_ops uniphier_aio_spdif_ld11_ops2 = {
.probe = uniphier_aio_ld11_probe,
.remove = uniphier_aio_dai_remove,
.set_sysclk = uniphier_aio_set_sysclk,
.set_pll = uniphier_aio_set_pll,
.startup = uniphier_aio_startup,
.shutdown = uniphier_aio_shutdown,
.hw_params = uniphier_aio_hw_params,
.hw_free = uniphier_aio_hw_free,
.prepare = uniphier_aio_prepare,
.compress_new = snd_soc_new_compress,
};
EXPORT_SYMBOL_GPL(uniphier_aio_spdif_ld11_ops2);
const struct snd_soc_dai_ops uniphier_aio_i2s_pxs2_ops = {
.probe = uniphier_aio_pxs2_probe,
.remove = uniphier_aio_dai_remove,
.set_sysclk = uniphier_aio_set_sysclk,
.set_pll = uniphier_aio_set_pll,
.set_fmt = uniphier_aio_set_fmt,
.startup = uniphier_aio_startup,
.shutdown = uniphier_aio_shutdown,
.hw_params = uniphier_aio_hw_params,
.hw_free = uniphier_aio_hw_free,
.prepare = uniphier_aio_prepare,
};
EXPORT_SYMBOL_GPL(uniphier_aio_i2s_pxs2_ops);
const struct snd_soc_dai_ops uniphier_aio_spdif_pxs2_ops = {
.probe = uniphier_aio_pxs2_probe,
.remove = uniphier_aio_dai_remove,
.set_sysclk = uniphier_aio_set_sysclk,
.set_pll = uniphier_aio_set_pll,
.startup = uniphier_aio_startup,
.shutdown = uniphier_aio_shutdown,
.hw_params = uniphier_aio_hw_params,
.hw_free = uniphier_aio_hw_free,
.prepare = uniphier_aio_prepare,
};
EXPORT_SYMBOL_GPL(uniphier_aio_spdif_pxs2_ops);
const struct snd_soc_dai_ops uniphier_aio_spdif_pxs2_ops2 = {
.probe = uniphier_aio_pxs2_probe,
.remove = uniphier_aio_dai_remove,
.set_sysclk = uniphier_aio_set_sysclk,
.set_pll = uniphier_aio_set_pll,
.startup = uniphier_aio_startup,
.shutdown = uniphier_aio_shutdown,
.hw_params = uniphier_aio_hw_params,
.hw_free = uniphier_aio_hw_free,
.prepare = uniphier_aio_prepare,
.compress_new = snd_soc_new_compress,
};
EXPORT_SYMBOL_GPL(uniphier_aio_spdif_pxs2_ops2);
static void uniphier_aio_dai_suspend(struct snd_soc_dai *dai)
{
struct uniphier_aio *aio = uniphier_priv(dai);
if (!snd_soc_dai_active(dai))
return;
aio->chip->num_wup_aios--;
if (!aio->chip->num_wup_aios) {
reset_control_assert(aio->chip->rst);
clk_disable_unprepare(aio->chip->clk);
}
}
static int uniphier_aio_suspend(struct snd_soc_component *component)
{
struct snd_soc_dai *dai;
for_each_component_dais(component, dai)
uniphier_aio_dai_suspend(dai);
return 0;
}
static int uniphier_aio_dai_resume(struct snd_soc_dai *dai)
{
struct uniphier_aio *aio = uniphier_priv(dai);
int ret, i;
if (!snd_soc_dai_active(dai))
return 0;
if (!aio->chip->active)
return 0;
if (!aio->chip->num_wup_aios) {
ret = clk_prepare_enable(aio->chip->clk);
if (ret)
return ret;
ret = reset_control_deassert(aio->chip->rst);
if (ret)
goto err_out_clock;
}
aio_iecout_set_enable(aio->chip, true);
aio_chip_init(aio->chip);
for (i = 0; i < ARRAY_SIZE(aio->sub); i++) {
struct uniphier_aio_sub *sub = &aio->sub[i];
if (!sub->spec || !sub->substream)
continue;
ret = aio_init(sub);
if (ret)
goto err_out_reset;
if (!sub->setting)
continue;
aio_port_reset(sub);
aio_src_reset(sub);
}
aio->chip->num_wup_aios++;
return 0;
err_out_reset:
if (!aio->chip->num_wup_aios)
reset_control_assert(aio->chip->rst);
err_out_clock:
if (!aio->chip->num_wup_aios)
clk_disable_unprepare(aio->chip->clk);
return ret;
}
static int uniphier_aio_resume(struct snd_soc_component *component)
{
struct snd_soc_dai *dai;
int ret = 0;
for_each_component_dais(component, dai)
ret |= uniphier_aio_dai_resume(dai);
return ret;
}
static int uniphier_aio_vol_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = AUD_VOL_MAX;
return 0;
}
static int uniphier_aio_vol_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct uniphier_aio_chip *chip = snd_soc_component_get_drvdata(comp);
struct uniphier_aio_sub *sub;
int oport_hw = kcontrol->private_value;
sub = find_volume(chip, oport_hw);
if (!sub)
return 0;
ucontrol->value.integer.value[0] = sub->vol;
return 0;
}
static int uniphier_aio_vol_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct uniphier_aio_chip *chip = snd_soc_component_get_drvdata(comp);
struct uniphier_aio_sub *sub;
int oport_hw = kcontrol->private_value;
sub = find_volume(chip, oport_hw);
if (!sub)
return 0;
if (sub->vol == ucontrol->value.integer.value[0])
return 0;
sub->vol = ucontrol->value.integer.value[0];
aio_port_set_volume(sub, sub->vol);
return 0;
}
static const struct snd_kcontrol_new uniphier_aio_controls[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.name = "HPCMOUT1 Volume",
.info = uniphier_aio_vol_info,
.get = uniphier_aio_vol_get,
.put = uniphier_aio_vol_put,
.private_value = AUD_HW_HPCMOUT1,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.name = "PCMOUT1 Volume",
.info = uniphier_aio_vol_info,
.get = uniphier_aio_vol_get,
.put = uniphier_aio_vol_put,
.private_value = AUD_HW_PCMOUT1,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.name = "PCMOUT2 Volume",
.info = uniphier_aio_vol_info,
.get = uniphier_aio_vol_get,
.put = uniphier_aio_vol_put,
.private_value = AUD_HW_PCMOUT2,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.name = "PCMOUT3 Volume",
.info = uniphier_aio_vol_info,
.get = uniphier_aio_vol_get,
.put = uniphier_aio_vol_put,
.private_value = AUD_HW_PCMOUT3,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.name = "HIECOUT1 Volume",
.info = uniphier_aio_vol_info,
.get = uniphier_aio_vol_get,
.put = uniphier_aio_vol_put,
.private_value = AUD_HW_HIECOUT1,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.name = "IECOUT1 Volume",
.info = uniphier_aio_vol_info,
.get = uniphier_aio_vol_get,
.put = uniphier_aio_vol_put,
.private_value = AUD_HW_IECOUT1,
},
};
static const struct snd_soc_component_driver uniphier_aio_component = {
.name = "uniphier-aio",
.controls = uniphier_aio_controls,
.num_controls = ARRAY_SIZE(uniphier_aio_controls),
.suspend = uniphier_aio_suspend,
.resume = uniphier_aio_resume,
};
int uniphier_aio_probe(struct platform_device *pdev)
{
struct uniphier_aio_chip *chip;
struct device *dev = &pdev->dev;
int ret, i, j;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->chip_spec = of_device_get_match_data(dev);
if (!chip->chip_spec)
return -EINVAL;
chip->regmap_sg = syscon_regmap_lookup_by_phandle(dev->of_node,
"socionext,syscon");
if (IS_ERR(chip->regmap_sg)) {
if (PTR_ERR(chip->regmap_sg) == -EPROBE_DEFER)
return -EPROBE_DEFER;
chip->regmap_sg = NULL;
}
chip->clk = devm_clk_get(dev, "aio");
if (IS_ERR(chip->clk))
return PTR_ERR(chip->clk);
chip->rst = devm_reset_control_get_shared(dev, "aio");
if (IS_ERR(chip->rst))
return PTR_ERR(chip->rst);
chip->num_aios = chip->chip_spec->num_dais;
chip->num_wup_aios = chip->num_aios;
chip->aios = devm_kcalloc(dev,
chip->num_aios, sizeof(struct uniphier_aio),
GFP_KERNEL);
if (!chip->aios)
return -ENOMEM;
chip->num_plls = chip->chip_spec->num_plls;
chip->plls = devm_kcalloc(dev,
chip->num_plls,
sizeof(struct uniphier_aio_pll),
GFP_KERNEL);
if (!chip->plls)
return -ENOMEM;
memcpy(chip->plls, chip->chip_spec->plls,
sizeof(struct uniphier_aio_pll) * chip->num_plls);
for (i = 0; i < chip->num_aios; i++) {
struct uniphier_aio *aio = &chip->aios[i];
aio->chip = chip;
aio->fmt = SND_SOC_DAIFMT_I2S;
for (j = 0; j < ARRAY_SIZE(aio->sub); j++) {
struct uniphier_aio_sub *sub = &aio->sub[j];
sub->aio = aio;
spin_lock_init(&sub->lock);
}
}
chip->pdev = pdev;
platform_set_drvdata(pdev, chip);
ret = clk_prepare_enable(chip->clk);
if (ret)
return ret;
ret = reset_control_deassert(chip->rst);
if (ret)
goto err_out_clock;
ret = devm_snd_soc_register_component(dev, &uniphier_aio_component,
chip->chip_spec->dais,
chip->chip_spec->num_dais);
if (ret) {
dev_err(dev, "Register component failed.\n");
goto err_out_reset;
}
ret = uniphier_aiodma_soc_register_platform(pdev);
if (ret) {
dev_err(dev, "Register platform failed.\n");
goto err_out_reset;
}
return 0;
err_out_reset:
reset_control_assert(chip->rst);
err_out_clock:
clk_disable_unprepare(chip->clk);
return ret;
}
EXPORT_SYMBOL_GPL(uniphier_aio_probe);
int uniphier_aio_remove(struct platform_device *pdev)
{
struct uniphier_aio_chip *chip = platform_get_drvdata(pdev);
reset_control_assert(chip->rst);
clk_disable_unprepare(chip->clk);
return 0;
}
EXPORT_SYMBOL_GPL(uniphier_aio_remove);
MODULE_AUTHOR("Katsuhiro Suzuki <[email protected]>");
MODULE_DESCRIPTION("UniPhier AIO CPU DAI driver.");
MODULE_LICENSE("GPL v2");
| linux-master | sound/soc/uniphier/aio-cpu.c |
// SPDX-License-Identifier: GPL-2.0
//
// Socionext UniPhier AIO ALSA common driver.
//
// Copyright (c) 2016-2018 Socionext Inc.
#include <linux/bitfield.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include "aio.h"
#include "aio-reg.h"
static u64 rb_cnt(u64 wr, u64 rd, u64 len)
{
if (rd <= wr)
return wr - rd;
else
return len - (rd - wr);
}
static u64 rb_cnt_to_end(u64 wr, u64 rd, u64 len)
{
if (rd <= wr)
return wr - rd;
else
return len - rd;
}
static u64 rb_space(u64 wr, u64 rd, u64 len)
{
if (rd <= wr)
return len - (wr - rd) - 8;
else
return rd - wr - 8;
}
static u64 rb_space_to_end(u64 wr, u64 rd, u64 len)
{
if (rd > wr)
return rd - wr - 8;
else if (rd > 0)
return len - wr;
else
return len - wr - 8;
}
u64 aio_rb_cnt(struct uniphier_aio_sub *sub)
{
return rb_cnt(sub->wr_offs, sub->rd_offs, sub->compr_bytes);
}
u64 aio_rbt_cnt_to_end(struct uniphier_aio_sub *sub)
{
return rb_cnt_to_end(sub->wr_offs, sub->rd_offs, sub->compr_bytes);
}
u64 aio_rb_space(struct uniphier_aio_sub *sub)
{
return rb_space(sub->wr_offs, sub->rd_offs, sub->compr_bytes);
}
u64 aio_rb_space_to_end(struct uniphier_aio_sub *sub)
{
return rb_space_to_end(sub->wr_offs, sub->rd_offs, sub->compr_bytes);
}
/**
* aio_iecout_set_enable - setup IEC output via SoC glue
* @chip: the AIO chip pointer
* @enable: false to stop the output, true to start
*
* Set enabled or disabled S/PDIF signal output to out of SoC via AOnIEC pins.
* This function need to call at driver startup.
*
* The regmap of SoC glue is specified by 'socionext,syscon' optional property
* of DT. This function has no effect if no property.
*/
void aio_iecout_set_enable(struct uniphier_aio_chip *chip, bool enable)
{
struct regmap *r = chip->regmap_sg;
if (!r)
return;
regmap_write(r, SG_AOUTEN, (enable) ? ~0 : 0);
}
/**
* aio_chip_set_pll - set frequency to audio PLL
* @chip: the AIO chip pointer
* @pll_id: PLL
* @freq: frequency in Hz, 0 is ignored
*
* Sets frequency of audio PLL. This function can be called anytime,
* but it takes time till PLL is locked.
*
* Return: Zero if successful, otherwise a negative value on error.
*/
int aio_chip_set_pll(struct uniphier_aio_chip *chip, int pll_id,
unsigned int freq)
{
struct device *dev = &chip->pdev->dev;
struct regmap *r = chip->regmap;
int shift;
u32 v;
/* Not change */
if (freq == 0)
return 0;
switch (pll_id) {
case AUD_PLL_A1:
shift = 0;
break;
case AUD_PLL_F1:
shift = 1;
break;
case AUD_PLL_A2:
shift = 2;
break;
case AUD_PLL_F2:
shift = 3;
break;
default:
dev_err(dev, "PLL(%d) not supported\n", pll_id);
return -EINVAL;
}
switch (freq) {
case 36864000:
v = A2APLLCTR1_APLLX_36MHZ;
break;
case 33868800:
v = A2APLLCTR1_APLLX_33MHZ;
break;
default:
dev_err(dev, "PLL frequency not supported(%d)\n", freq);
return -EINVAL;
}
chip->plls[pll_id].freq = freq;
regmap_update_bits(r, A2APLLCTR1, A2APLLCTR1_APLLX_MASK << shift,
v << shift);
return 0;
}
/**
* aio_chip_init - initialize AIO whole settings
* @chip: the AIO chip pointer
*
* Sets AIO fixed and whole device settings to AIO.
* This function need to call once at driver startup.
*
* The register area that is changed by this function is shared by all
* modules of AIO. But there is not race condition since this function
* has always set the same initialize values.
*/
void aio_chip_init(struct uniphier_aio_chip *chip)
{
struct regmap *r = chip->regmap;
regmap_update_bits(r, A2APLLCTR0,
A2APLLCTR0_APLLXPOW_MASK,
A2APLLCTR0_APLLXPOW_PWON);
regmap_update_bits(r, A2EXMCLKSEL0,
A2EXMCLKSEL0_EXMCLK_MASK,
A2EXMCLKSEL0_EXMCLK_OUTPUT);
regmap_update_bits(r, A2AIOINPUTSEL, A2AIOINPUTSEL_RXSEL_MASK,
A2AIOINPUTSEL_RXSEL_PCMI1_HDMIRX1 |
A2AIOINPUTSEL_RXSEL_PCMI2_SIF |
A2AIOINPUTSEL_RXSEL_PCMI3_EVEA |
A2AIOINPUTSEL_RXSEL_IECI1_HDMIRX1);
if (chip->chip_spec->addr_ext)
regmap_update_bits(r, CDA2D_TEST, CDA2D_TEST_DDR_MODE_MASK,
CDA2D_TEST_DDR_MODE_EXTON0);
else
regmap_update_bits(r, CDA2D_TEST, CDA2D_TEST_DDR_MODE_MASK,
CDA2D_TEST_DDR_MODE_EXTOFF1);
}
/**
* aio_init - initialize AIO substream
* @sub: the AIO substream pointer
*
* Sets fixed settings of each AIO substreams.
* This function need to call once at substream startup.
*
* Return: Zero if successful, otherwise a negative value on error.
*/
int aio_init(struct uniphier_aio_sub *sub)
{
struct device *dev = &sub->aio->chip->pdev->dev;
struct regmap *r = sub->aio->chip->regmap;
regmap_write(r, A2RBNMAPCTR0(sub->swm->rb.hw),
MAPCTR0_EN | sub->swm->rb.map);
regmap_write(r, A2CHNMAPCTR0(sub->swm->ch.hw),
MAPCTR0_EN | sub->swm->ch.map);
switch (sub->swm->type) {
case PORT_TYPE_I2S:
case PORT_TYPE_SPDIF:
case PORT_TYPE_EVE:
if (sub->swm->dir == PORT_DIR_INPUT) {
regmap_write(r, A2IIFNMAPCTR0(sub->swm->iif.hw),
MAPCTR0_EN | sub->swm->iif.map);
regmap_write(r, A2IPORTNMAPCTR0(sub->swm->iport.hw),
MAPCTR0_EN | sub->swm->iport.map);
} else {
regmap_write(r, A2OIFNMAPCTR0(sub->swm->oif.hw),
MAPCTR0_EN | sub->swm->oif.map);
regmap_write(r, A2OPORTNMAPCTR0(sub->swm->oport.hw),
MAPCTR0_EN | sub->swm->oport.map);
}
break;
case PORT_TYPE_CONV:
regmap_write(r, A2OIFNMAPCTR0(sub->swm->oif.hw),
MAPCTR0_EN | sub->swm->oif.map);
regmap_write(r, A2OPORTNMAPCTR0(sub->swm->oport.hw),
MAPCTR0_EN | sub->swm->oport.map);
regmap_write(r, A2CHNMAPCTR0(sub->swm->och.hw),
MAPCTR0_EN | sub->swm->och.map);
regmap_write(r, A2IIFNMAPCTR0(sub->swm->iif.hw),
MAPCTR0_EN | sub->swm->iif.map);
break;
default:
dev_err(dev, "Unknown port type %d.\n", sub->swm->type);
return -EINVAL;
}
return 0;
}
/**
* aio_port_reset - reset AIO port block
* @sub: the AIO substream pointer
*
* Resets the digital signal input/output port block of AIO.
*/
void aio_port_reset(struct uniphier_aio_sub *sub)
{
struct regmap *r = sub->aio->chip->regmap;
if (sub->swm->dir == PORT_DIR_OUTPUT) {
regmap_write(r, AOUTRSTCTR0, BIT(sub->swm->oport.map));
regmap_write(r, AOUTRSTCTR1, BIT(sub->swm->oport.map));
} else {
regmap_update_bits(r, IPORTMXRSTCTR(sub->swm->iport.map),
IPORTMXRSTCTR_RSTPI_MASK,
IPORTMXRSTCTR_RSTPI_RESET);
regmap_update_bits(r, IPORTMXRSTCTR(sub->swm->iport.map),
IPORTMXRSTCTR_RSTPI_MASK,
IPORTMXRSTCTR_RSTPI_RELEASE);
}
}
/**
* aio_port_set_ch - set channels of LPCM
* @sub: the AIO substream pointer, PCM substream only
*
* Set suitable slot selecting to input/output port block of AIO.
*
* This function may return error if non-PCM substream.
*
* Return: Zero if successful, otherwise a negative value on error.
*/
static int aio_port_set_ch(struct uniphier_aio_sub *sub)
{
struct regmap *r = sub->aio->chip->regmap;
static const u32 slotsel_2ch[] = {
0, 0, 0, 0, 0,
};
static const u32 slotsel_multi[] = {
OPORTMXTYSLOTCTR_SLOTSEL_SLOT0,
OPORTMXTYSLOTCTR_SLOTSEL_SLOT1,
OPORTMXTYSLOTCTR_SLOTSEL_SLOT2,
OPORTMXTYSLOTCTR_SLOTSEL_SLOT3,
OPORTMXTYSLOTCTR_SLOTSEL_SLOT4,
};
u32 mode;
const u32 *slotsel;
int i;
switch (params_channels(&sub->params)) {
case 8:
case 6:
mode = OPORTMXTYSLOTCTR_MODE;
slotsel = slotsel_multi;
break;
case 2:
mode = 0;
slotsel = slotsel_2ch;
break;
default:
return -EINVAL;
}
for (i = 0; i < AUD_MAX_SLOTSEL; i++) {
regmap_update_bits(r, OPORTMXTYSLOTCTR(sub->swm->oport.map, i),
OPORTMXTYSLOTCTR_MODE, mode);
regmap_update_bits(r, OPORTMXTYSLOTCTR(sub->swm->oport.map, i),
OPORTMXTYSLOTCTR_SLOTSEL_MASK, slotsel[i]);
}
return 0;
}
/**
* aio_port_set_rate - set sampling rate of LPCM
* @sub: the AIO substream pointer, PCM substream only
* @rate: Sampling rate in Hz.
*
* Set suitable I2S format settings to input/output port block of AIO.
* Parameter is specified by hw_params().
*
* This function may return error if non-PCM substream.
*
* Return: Zero if successful, otherwise a negative value on error.
*/
static int aio_port_set_rate(struct uniphier_aio_sub *sub, int rate)
{
struct regmap *r = sub->aio->chip->regmap;
struct device *dev = &sub->aio->chip->pdev->dev;
u32 v;
if (sub->swm->dir == PORT_DIR_OUTPUT) {
switch (rate) {
case 8000:
v = OPORTMXCTR1_FSSEL_8;
break;
case 11025:
v = OPORTMXCTR1_FSSEL_11_025;
break;
case 12000:
v = OPORTMXCTR1_FSSEL_12;
break;
case 16000:
v = OPORTMXCTR1_FSSEL_16;
break;
case 22050:
v = OPORTMXCTR1_FSSEL_22_05;
break;
case 24000:
v = OPORTMXCTR1_FSSEL_24;
break;
case 32000:
v = OPORTMXCTR1_FSSEL_32;
break;
case 44100:
v = OPORTMXCTR1_FSSEL_44_1;
break;
case 48000:
v = OPORTMXCTR1_FSSEL_48;
break;
case 88200:
v = OPORTMXCTR1_FSSEL_88_2;
break;
case 96000:
v = OPORTMXCTR1_FSSEL_96;
break;
case 176400:
v = OPORTMXCTR1_FSSEL_176_4;
break;
case 192000:
v = OPORTMXCTR1_FSSEL_192;
break;
default:
dev_err(dev, "Rate not supported(%d)\n", rate);
return -EINVAL;
}
regmap_update_bits(r, OPORTMXCTR1(sub->swm->oport.map),
OPORTMXCTR1_FSSEL_MASK, v);
} else {
switch (rate) {
case 8000:
v = IPORTMXCTR1_FSSEL_8;
break;
case 11025:
v = IPORTMXCTR1_FSSEL_11_025;
break;
case 12000:
v = IPORTMXCTR1_FSSEL_12;
break;
case 16000:
v = IPORTMXCTR1_FSSEL_16;
break;
case 22050:
v = IPORTMXCTR1_FSSEL_22_05;
break;
case 24000:
v = IPORTMXCTR1_FSSEL_24;
break;
case 32000:
v = IPORTMXCTR1_FSSEL_32;
break;
case 44100:
v = IPORTMXCTR1_FSSEL_44_1;
break;
case 48000:
v = IPORTMXCTR1_FSSEL_48;
break;
case 88200:
v = IPORTMXCTR1_FSSEL_88_2;
break;
case 96000:
v = IPORTMXCTR1_FSSEL_96;
break;
case 176400:
v = IPORTMXCTR1_FSSEL_176_4;
break;
case 192000:
v = IPORTMXCTR1_FSSEL_192;
break;
default:
dev_err(dev, "Rate not supported(%d)\n", rate);
return -EINVAL;
}
regmap_update_bits(r, IPORTMXCTR1(sub->swm->iport.map),
IPORTMXCTR1_FSSEL_MASK, v);
}
return 0;
}
/**
* aio_port_set_fmt - set format of I2S data
* @sub: the AIO substream pointer, PCM substream only
* This parameter has no effect if substream is I2S or PCM.
*
* Set suitable I2S format settings to input/output port block of AIO.
* Parameter is specified by set_fmt().
*
* This function may return error if non-PCM substream.
*
* Return: Zero if successful, otherwise a negative value on error.
*/
static int aio_port_set_fmt(struct uniphier_aio_sub *sub)
{
struct regmap *r = sub->aio->chip->regmap;
struct device *dev = &sub->aio->chip->pdev->dev;
u32 v;
if (sub->swm->dir == PORT_DIR_OUTPUT) {
switch (sub->aio->fmt) {
case SND_SOC_DAIFMT_LEFT_J:
v = OPORTMXCTR1_I2SLRSEL_LEFT;
break;
case SND_SOC_DAIFMT_RIGHT_J:
v = OPORTMXCTR1_I2SLRSEL_RIGHT;
break;
case SND_SOC_DAIFMT_I2S:
v = OPORTMXCTR1_I2SLRSEL_I2S;
break;
default:
dev_err(dev, "Format is not supported(%d)\n",
sub->aio->fmt);
return -EINVAL;
}
v |= OPORTMXCTR1_OUTBITSEL_24;
regmap_update_bits(r, OPORTMXCTR1(sub->swm->oport.map),
OPORTMXCTR1_I2SLRSEL_MASK |
OPORTMXCTR1_OUTBITSEL_MASK, v);
} else {
switch (sub->aio->fmt) {
case SND_SOC_DAIFMT_LEFT_J:
v = IPORTMXCTR1_LRSEL_LEFT;
break;
case SND_SOC_DAIFMT_RIGHT_J:
v = IPORTMXCTR1_LRSEL_RIGHT;
break;
case SND_SOC_DAIFMT_I2S:
v = IPORTMXCTR1_LRSEL_I2S;
break;
default:
dev_err(dev, "Format is not supported(%d)\n",
sub->aio->fmt);
return -EINVAL;
}
v |= IPORTMXCTR1_OUTBITSEL_24 |
IPORTMXCTR1_CHSEL_ALL;
regmap_update_bits(r, IPORTMXCTR1(sub->swm->iport.map),
IPORTMXCTR1_LRSEL_MASK |
IPORTMXCTR1_OUTBITSEL_MASK |
IPORTMXCTR1_CHSEL_MASK, v);
}
return 0;
}
/**
* aio_port_set_clk - set clock and divider of AIO port block
* @sub: the AIO substream pointer
*
* Set suitable PLL clock divider and relational settings to
* input/output port block of AIO. Parameters are specified by
* set_sysclk() and set_pll().
*
* Return: Zero if successful, otherwise a negative value on error.
*/
static int aio_port_set_clk(struct uniphier_aio_sub *sub)
{
struct uniphier_aio_chip *chip = sub->aio->chip;
struct device *dev = &sub->aio->chip->pdev->dev;
struct regmap *r = sub->aio->chip->regmap;
static const u32 v_pll[] = {
OPORTMXCTR2_ACLKSEL_A1, OPORTMXCTR2_ACLKSEL_F1,
OPORTMXCTR2_ACLKSEL_A2, OPORTMXCTR2_ACLKSEL_F2,
OPORTMXCTR2_ACLKSEL_A2PLL,
OPORTMXCTR2_ACLKSEL_RX1,
};
static const u32 v_div[] = {
OPORTMXCTR2_DACCKSEL_1_2, OPORTMXCTR2_DACCKSEL_1_3,
OPORTMXCTR2_DACCKSEL_1_1, OPORTMXCTR2_DACCKSEL_2_3,
};
u32 v;
if (sub->swm->dir == PORT_DIR_OUTPUT) {
if (sub->swm->type == PORT_TYPE_I2S) {
if (sub->aio->pll_out >= ARRAY_SIZE(v_pll)) {
dev_err(dev, "PLL(%d) is invalid\n",
sub->aio->pll_out);
return -EINVAL;
}
if (sub->aio->plldiv >= ARRAY_SIZE(v_div)) {
dev_err(dev, "PLL divider(%d) is invalid\n",
sub->aio->plldiv);
return -EINVAL;
}
v = v_pll[sub->aio->pll_out] |
OPORTMXCTR2_MSSEL_MASTER |
v_div[sub->aio->plldiv];
switch (chip->plls[sub->aio->pll_out].freq) {
case 0:
case 36864000:
case 33868800:
v |= OPORTMXCTR2_EXTLSIFSSEL_36;
break;
default:
v |= OPORTMXCTR2_EXTLSIFSSEL_24;
break;
}
} else if (sub->swm->type == PORT_TYPE_EVE) {
v = OPORTMXCTR2_ACLKSEL_A2PLL |
OPORTMXCTR2_MSSEL_MASTER |
OPORTMXCTR2_EXTLSIFSSEL_36 |
OPORTMXCTR2_DACCKSEL_1_2;
} else if (sub->swm->type == PORT_TYPE_SPDIF) {
if (sub->aio->pll_out >= ARRAY_SIZE(v_pll)) {
dev_err(dev, "PLL(%d) is invalid\n",
sub->aio->pll_out);
return -EINVAL;
}
v = v_pll[sub->aio->pll_out] |
OPORTMXCTR2_MSSEL_MASTER |
OPORTMXCTR2_DACCKSEL_1_2;
switch (chip->plls[sub->aio->pll_out].freq) {
case 0:
case 36864000:
case 33868800:
v |= OPORTMXCTR2_EXTLSIFSSEL_36;
break;
default:
v |= OPORTMXCTR2_EXTLSIFSSEL_24;
break;
}
} else {
v = OPORTMXCTR2_ACLKSEL_A1 |
OPORTMXCTR2_MSSEL_MASTER |
OPORTMXCTR2_EXTLSIFSSEL_36 |
OPORTMXCTR2_DACCKSEL_1_2;
}
regmap_write(r, OPORTMXCTR2(sub->swm->oport.map), v);
} else {
v = IPORTMXCTR2_ACLKSEL_A1 |
IPORTMXCTR2_MSSEL_SLAVE |
IPORTMXCTR2_EXTLSIFSSEL_36 |
IPORTMXCTR2_DACCKSEL_1_2;
regmap_write(r, IPORTMXCTR2(sub->swm->iport.map), v);
}
return 0;
}
/**
* aio_port_set_param - set parameters of AIO port block
* @sub: the AIO substream pointer
* @pass_through: Zero if sound data is LPCM, otherwise if data is not LPCM.
* This parameter has no effect if substream is I2S or PCM.
* @params: hardware parameters of ALSA
*
* Set suitable setting to input/output port block of AIO to process the
* specified in params.
*
* Return: Zero if successful, otherwise a negative value on error.
*/
int aio_port_set_param(struct uniphier_aio_sub *sub, int pass_through,
const struct snd_pcm_hw_params *params)
{
struct regmap *r = sub->aio->chip->regmap;
unsigned int rate;
u32 v;
int ret;
if (!pass_through) {
if (sub->swm->type == PORT_TYPE_EVE ||
sub->swm->type == PORT_TYPE_CONV) {
rate = 48000;
} else {
rate = params_rate(params);
}
ret = aio_port_set_ch(sub);
if (ret)
return ret;
ret = aio_port_set_rate(sub, rate);
if (ret)
return ret;
ret = aio_port_set_fmt(sub);
if (ret)
return ret;
}
ret = aio_port_set_clk(sub);
if (ret)
return ret;
if (sub->swm->dir == PORT_DIR_OUTPUT) {
if (pass_through)
v = OPORTMXCTR3_SRCSEL_STREAM |
OPORTMXCTR3_VALID_STREAM;
else
v = OPORTMXCTR3_SRCSEL_PCM |
OPORTMXCTR3_VALID_PCM;
v |= OPORTMXCTR3_IECTHUR_IECOUT |
OPORTMXCTR3_PMSEL_PAUSE |
OPORTMXCTR3_PMSW_MUTE_OFF;
regmap_write(r, OPORTMXCTR3(sub->swm->oport.map), v);
} else {
regmap_write(r, IPORTMXACLKSEL0EX(sub->swm->iport.map),
IPORTMXACLKSEL0EX_ACLKSEL0EX_INTERNAL);
regmap_write(r, IPORTMXEXNOE(sub->swm->iport.map),
IPORTMXEXNOE_PCMINOE_INPUT);
}
return 0;
}
/**
* aio_port_set_enable - start or stop of AIO port block
* @sub: the AIO substream pointer
* @enable: zero to stop the block, otherwise to start
*
* Start or stop the signal input/output port block of AIO.
*/
void aio_port_set_enable(struct uniphier_aio_sub *sub, int enable)
{
struct regmap *r = sub->aio->chip->regmap;
if (sub->swm->dir == PORT_DIR_OUTPUT) {
regmap_write(r, OPORTMXPATH(sub->swm->oport.map),
sub->swm->oif.map);
regmap_update_bits(r, OPORTMXMASK(sub->swm->oport.map),
OPORTMXMASK_IUDXMSK_MASK |
OPORTMXMASK_IUXCKMSK_MASK |
OPORTMXMASK_DXMSK_MASK |
OPORTMXMASK_XCKMSK_MASK,
OPORTMXMASK_IUDXMSK_OFF |
OPORTMXMASK_IUXCKMSK_OFF |
OPORTMXMASK_DXMSK_OFF |
OPORTMXMASK_XCKMSK_OFF);
if (enable)
regmap_write(r, AOUTENCTR0, BIT(sub->swm->oport.map));
else
regmap_write(r, AOUTENCTR1, BIT(sub->swm->oport.map));
} else {
regmap_update_bits(r, IPORTMXMASK(sub->swm->iport.map),
IPORTMXMASK_IUXCKMSK_MASK |
IPORTMXMASK_XCKMSK_MASK,
IPORTMXMASK_IUXCKMSK_OFF |
IPORTMXMASK_XCKMSK_OFF);
if (enable)
regmap_update_bits(r,
IPORTMXCTR2(sub->swm->iport.map),
IPORTMXCTR2_REQEN_MASK,
IPORTMXCTR2_REQEN_ENABLE);
else
regmap_update_bits(r,
IPORTMXCTR2(sub->swm->iport.map),
IPORTMXCTR2_REQEN_MASK,
IPORTMXCTR2_REQEN_DISABLE);
}
}
/**
* aio_port_get_volume - get volume of AIO port block
* @sub: the AIO substream pointer
*
* Return: current volume, range is 0x0000 - 0xffff
*/
int aio_port_get_volume(struct uniphier_aio_sub *sub)
{
struct regmap *r = sub->aio->chip->regmap;
u32 v;
regmap_read(r, OPORTMXTYVOLGAINSTATUS(sub->swm->oport.map, 0), &v);
return FIELD_GET(OPORTMXTYVOLGAINSTATUS_CUR_MASK, v);
}
/**
* aio_port_set_volume - set volume of AIO port block
* @sub: the AIO substream pointer
* @vol: target volume, range is 0x0000 - 0xffff.
*
* Change digital volume and perfome fade-out/fade-in effect for specified
* output slot of port. Gained PCM value can calculate as the following:
* Gained = Original * vol / 0x4000
*/
void aio_port_set_volume(struct uniphier_aio_sub *sub, int vol)
{
struct regmap *r = sub->aio->chip->regmap;
int oport_map = sub->swm->oport.map;
int cur, diff, slope = 0, fs;
if (sub->swm->dir == PORT_DIR_INPUT)
return;
cur = aio_port_get_volume(sub);
diff = abs(vol - cur);
fs = params_rate(&sub->params);
if (fs)
slope = diff / AUD_VOL_FADE_TIME * 1000 / fs;
slope = max(1, slope);
regmap_update_bits(r, OPORTMXTYVOLPARA1(oport_map, 0),
OPORTMXTYVOLPARA1_SLOPEU_MASK, slope << 16);
regmap_update_bits(r, OPORTMXTYVOLPARA2(oport_map, 0),
OPORTMXTYVOLPARA2_TARGET_MASK, vol);
if (cur < vol)
regmap_update_bits(r, OPORTMXTYVOLPARA2(oport_map, 0),
OPORTMXTYVOLPARA2_FADE_MASK,
OPORTMXTYVOLPARA2_FADE_FADEIN);
else
regmap_update_bits(r, OPORTMXTYVOLPARA2(oport_map, 0),
OPORTMXTYVOLPARA2_FADE_MASK,
OPORTMXTYVOLPARA2_FADE_FADEOUT);
regmap_write(r, AOUTFADECTR0, BIT(oport_map));
}
/**
* aio_if_set_param - set parameters of AIO DMA I/F block
* @sub: the AIO substream pointer
* @pass_through: Zero if sound data is LPCM, otherwise if data is not LPCM.
* This parameter has no effect if substream is I2S or PCM.
*
* Set suitable setting to DMA interface block of AIO to process the
* specified in settings.
*
* Return: Zero if successful, otherwise a negative value on error.
*/
int aio_if_set_param(struct uniphier_aio_sub *sub, int pass_through)
{
struct regmap *r = sub->aio->chip->regmap;
u32 memfmt, v;
if (sub->swm->dir == PORT_DIR_OUTPUT) {
if (pass_through) {
v = PBOUTMXCTR0_ENDIAN_0123 |
PBOUTMXCTR0_MEMFMT_STREAM;
} else {
switch (params_channels(&sub->params)) {
case 2:
memfmt = PBOUTMXCTR0_MEMFMT_2CH;
break;
case 6:
memfmt = PBOUTMXCTR0_MEMFMT_6CH;
break;
case 8:
memfmt = PBOUTMXCTR0_MEMFMT_8CH;
break;
default:
return -EINVAL;
}
v = PBOUTMXCTR0_ENDIAN_3210 | memfmt;
}
regmap_write(r, PBOUTMXCTR0(sub->swm->oif.map), v);
regmap_write(r, PBOUTMXCTR1(sub->swm->oif.map), 0);
} else {
regmap_write(r, PBINMXCTR(sub->swm->iif.map),
PBINMXCTR_NCONNECT_CONNECT |
PBINMXCTR_INOUTSEL_IN |
(sub->swm->iport.map << PBINMXCTR_PBINSEL_SHIFT) |
PBINMXCTR_ENDIAN_3210 |
PBINMXCTR_MEMFMT_D0);
}
return 0;
}
/**
* aio_oport_set_stream_type - set parameters of AIO playback port block
* @sub: the AIO substream pointer
* @pc: Pc type of IEC61937
*
* Set special setting to output port block of AIO to output the stream
* via S/PDIF.
*
* Return: Zero if successful, otherwise a negative value on error.
*/
int aio_oport_set_stream_type(struct uniphier_aio_sub *sub,
enum IEC61937_PC pc)
{
struct regmap *r = sub->aio->chip->regmap;
u32 repet = 0, pause = OPORTMXPAUDAT_PAUSEPC_CMN;
switch (pc) {
case IEC61937_PC_AC3:
repet = OPORTMXREPET_STRLENGTH_AC3 |
OPORTMXREPET_PMLENGTH_AC3;
pause |= OPORTMXPAUDAT_PAUSEPD_AC3;
break;
case IEC61937_PC_MPA:
repet = OPORTMXREPET_STRLENGTH_MPA |
OPORTMXREPET_PMLENGTH_MPA;
pause |= OPORTMXPAUDAT_PAUSEPD_MPA;
break;
case IEC61937_PC_MP3:
repet = OPORTMXREPET_STRLENGTH_MP3 |
OPORTMXREPET_PMLENGTH_MP3;
pause |= OPORTMXPAUDAT_PAUSEPD_MP3;
break;
case IEC61937_PC_DTS1:
repet = OPORTMXREPET_STRLENGTH_DTS1 |
OPORTMXREPET_PMLENGTH_DTS1;
pause |= OPORTMXPAUDAT_PAUSEPD_DTS1;
break;
case IEC61937_PC_DTS2:
repet = OPORTMXREPET_STRLENGTH_DTS2 |
OPORTMXREPET_PMLENGTH_DTS2;
pause |= OPORTMXPAUDAT_PAUSEPD_DTS2;
break;
case IEC61937_PC_DTS3:
repet = OPORTMXREPET_STRLENGTH_DTS3 |
OPORTMXREPET_PMLENGTH_DTS3;
pause |= OPORTMXPAUDAT_PAUSEPD_DTS3;
break;
case IEC61937_PC_AAC:
repet = OPORTMXREPET_STRLENGTH_AAC |
OPORTMXREPET_PMLENGTH_AAC;
pause |= OPORTMXPAUDAT_PAUSEPD_AAC;
break;
case IEC61937_PC_PAUSE:
/* Do nothing */
break;
}
regmap_write(r, OPORTMXREPET(sub->swm->oport.map), repet);
regmap_write(r, OPORTMXPAUDAT(sub->swm->oport.map), pause);
return 0;
}
/**
* aio_src_reset - reset AIO SRC block
* @sub: the AIO substream pointer
*
* Resets the digital signal input/output port with sampling rate converter
* block of AIO.
* This function has no effect if substream is not supported rate converter.
*/
void aio_src_reset(struct uniphier_aio_sub *sub)
{
struct regmap *r = sub->aio->chip->regmap;
if (sub->swm->dir != PORT_DIR_OUTPUT)
return;
regmap_write(r, AOUTSRCRSTCTR0, BIT(sub->swm->oport.map));
regmap_write(r, AOUTSRCRSTCTR1, BIT(sub->swm->oport.map));
}
/**
* aio_src_set_param - set parameters of AIO SRC block
* @sub: the AIO substream pointer
* @params: hardware parameters of ALSA
*
* Set suitable setting to input/output port with sampling rate converter
* block of AIO to process the specified in params.
* This function has no effect if substream is not supported rate converter.
*
* Return: Zero if successful, otherwise a negative value on error.
*/
int aio_src_set_param(struct uniphier_aio_sub *sub,
const struct snd_pcm_hw_params *params)
{
struct regmap *r = sub->aio->chip->regmap;
u32 v;
if (sub->swm->dir != PORT_DIR_OUTPUT)
return 0;
regmap_write(r, OPORTMXSRC1CTR(sub->swm->oport.map),
OPORTMXSRC1CTR_THMODE_SRC |
OPORTMXSRC1CTR_SRCPATH_CALC |
OPORTMXSRC1CTR_SYNC_ASYNC |
OPORTMXSRC1CTR_FSIIPSEL_INNER |
OPORTMXSRC1CTR_FSISEL_ACLK);
switch (params_rate(params)) {
default:
case 48000:
v = OPORTMXRATE_I_ACLKSEL_APLLA1 |
OPORTMXRATE_I_MCKSEL_36 |
OPORTMXRATE_I_FSSEL_48;
break;
case 44100:
v = OPORTMXRATE_I_ACLKSEL_APLLA2 |
OPORTMXRATE_I_MCKSEL_33 |
OPORTMXRATE_I_FSSEL_44_1;
break;
case 32000:
v = OPORTMXRATE_I_ACLKSEL_APLLA1 |
OPORTMXRATE_I_MCKSEL_36 |
OPORTMXRATE_I_FSSEL_32;
break;
}
regmap_write(r, OPORTMXRATE_I(sub->swm->oport.map),
v | OPORTMXRATE_I_ACLKSRC_APLL |
OPORTMXRATE_I_LRCKSTP_STOP);
regmap_update_bits(r, OPORTMXRATE_I(sub->swm->oport.map),
OPORTMXRATE_I_LRCKSTP_MASK,
OPORTMXRATE_I_LRCKSTP_START);
return 0;
}
int aio_srcif_set_param(struct uniphier_aio_sub *sub)
{
struct regmap *r = sub->aio->chip->regmap;
regmap_write(r, PBINMXCTR(sub->swm->iif.map),
PBINMXCTR_NCONNECT_CONNECT |
PBINMXCTR_INOUTSEL_OUT |
(sub->swm->oport.map << PBINMXCTR_PBINSEL_SHIFT) |
PBINMXCTR_ENDIAN_3210 |
PBINMXCTR_MEMFMT_D0);
return 0;
}
int aio_srcch_set_param(struct uniphier_aio_sub *sub)
{
struct regmap *r = sub->aio->chip->regmap;
regmap_write(r, CDA2D_CHMXCTRL1(sub->swm->och.map),
CDA2D_CHMXCTRL1_INDSIZE_INFINITE);
regmap_write(r, CDA2D_CHMXSRCAMODE(sub->swm->och.map),
CDA2D_CHMXAMODE_ENDIAN_3210 |
CDA2D_CHMXAMODE_AUPDT_FIX |
CDA2D_CHMXAMODE_TYPE_NORMAL);
regmap_write(r, CDA2D_CHMXDSTAMODE(sub->swm->och.map),
CDA2D_CHMXAMODE_ENDIAN_3210 |
CDA2D_CHMXAMODE_AUPDT_INC |
CDA2D_CHMXAMODE_TYPE_RING |
(sub->swm->och.map << CDA2D_CHMXAMODE_RSSEL_SHIFT));
return 0;
}
void aio_srcch_set_enable(struct uniphier_aio_sub *sub, int enable)
{
struct regmap *r = sub->aio->chip->regmap;
u32 v;
if (enable)
v = CDA2D_STRT0_STOP_START;
else
v = CDA2D_STRT0_STOP_STOP;
regmap_write(r, CDA2D_STRT0,
v | BIT(sub->swm->och.map));
}
int aiodma_ch_set_param(struct uniphier_aio_sub *sub)
{
struct regmap *r = sub->aio->chip->regmap;
u32 v;
regmap_write(r, CDA2D_CHMXCTRL1(sub->swm->ch.map),
CDA2D_CHMXCTRL1_INDSIZE_INFINITE);
v = CDA2D_CHMXAMODE_ENDIAN_3210 |
CDA2D_CHMXAMODE_AUPDT_INC |
CDA2D_CHMXAMODE_TYPE_NORMAL |
(sub->swm->rb.map << CDA2D_CHMXAMODE_RSSEL_SHIFT);
if (sub->swm->dir == PORT_DIR_OUTPUT)
regmap_write(r, CDA2D_CHMXSRCAMODE(sub->swm->ch.map), v);
else
regmap_write(r, CDA2D_CHMXDSTAMODE(sub->swm->ch.map), v);
return 0;
}
void aiodma_ch_set_enable(struct uniphier_aio_sub *sub, int enable)
{
struct regmap *r = sub->aio->chip->regmap;
if (enable) {
regmap_write(r, CDA2D_STRT0,
CDA2D_STRT0_STOP_START | BIT(sub->swm->ch.map));
regmap_update_bits(r, INTRBIM(0),
BIT(sub->swm->rb.map),
BIT(sub->swm->rb.map));
} else {
regmap_write(r, CDA2D_STRT0,
CDA2D_STRT0_STOP_STOP | BIT(sub->swm->ch.map));
regmap_update_bits(r, INTRBIM(0),
BIT(sub->swm->rb.map),
0);
}
}
static u64 aiodma_rb_get_rp(struct uniphier_aio_sub *sub)
{
struct regmap *r = sub->aio->chip->regmap;
u32 pos_u, pos_l;
int i;
regmap_write(r, CDA2D_RDPTRLOAD,
CDA2D_RDPTRLOAD_LSFLAG_STORE | BIT(sub->swm->rb.map));
/* Wait for setup */
for (i = 0; i < 6; i++)
regmap_read(r, CDA2D_RBMXRDPTR(sub->swm->rb.map), &pos_l);
regmap_read(r, CDA2D_RBMXRDPTR(sub->swm->rb.map), &pos_l);
regmap_read(r, CDA2D_RBMXRDPTRU(sub->swm->rb.map), &pos_u);
pos_u = FIELD_GET(CDA2D_RBMXPTRU_PTRU_MASK, pos_u);
return ((u64)pos_u << 32) | pos_l;
}
static void aiodma_rb_set_rp(struct uniphier_aio_sub *sub, u64 pos)
{
struct regmap *r = sub->aio->chip->regmap;
u32 tmp;
int i;
regmap_write(r, CDA2D_RBMXRDPTR(sub->swm->rb.map), (u32)pos);
regmap_write(r, CDA2D_RBMXRDPTRU(sub->swm->rb.map), (u32)(pos >> 32));
regmap_write(r, CDA2D_RDPTRLOAD, BIT(sub->swm->rb.map));
/* Wait for setup */
for (i = 0; i < 6; i++)
regmap_read(r, CDA2D_RBMXRDPTR(sub->swm->rb.map), &tmp);
}
static u64 aiodma_rb_get_wp(struct uniphier_aio_sub *sub)
{
struct regmap *r = sub->aio->chip->regmap;
u32 pos_u, pos_l;
int i;
regmap_write(r, CDA2D_WRPTRLOAD,
CDA2D_WRPTRLOAD_LSFLAG_STORE | BIT(sub->swm->rb.map));
/* Wait for setup */
for (i = 0; i < 6; i++)
regmap_read(r, CDA2D_RBMXWRPTR(sub->swm->rb.map), &pos_l);
regmap_read(r, CDA2D_RBMXWRPTR(sub->swm->rb.map), &pos_l);
regmap_read(r, CDA2D_RBMXWRPTRU(sub->swm->rb.map), &pos_u);
pos_u = FIELD_GET(CDA2D_RBMXPTRU_PTRU_MASK, pos_u);
return ((u64)pos_u << 32) | pos_l;
}
static void aiodma_rb_set_wp(struct uniphier_aio_sub *sub, u64 pos)
{
struct regmap *r = sub->aio->chip->regmap;
u32 tmp;
int i;
regmap_write(r, CDA2D_RBMXWRPTR(sub->swm->rb.map),
lower_32_bits(pos));
regmap_write(r, CDA2D_RBMXWRPTRU(sub->swm->rb.map),
upper_32_bits(pos));
regmap_write(r, CDA2D_WRPTRLOAD, BIT(sub->swm->rb.map));
/* Wait for setup */
for (i = 0; i < 6; i++)
regmap_read(r, CDA2D_RBMXWRPTR(sub->swm->rb.map), &tmp);
}
int aiodma_rb_set_threshold(struct uniphier_aio_sub *sub, u64 size, u32 th)
{
struct regmap *r = sub->aio->chip->regmap;
if (size <= th)
return -EINVAL;
regmap_write(r, CDA2D_RBMXBTH(sub->swm->rb.map), th);
regmap_write(r, CDA2D_RBMXRTH(sub->swm->rb.map), th);
return 0;
}
int aiodma_rb_set_buffer(struct uniphier_aio_sub *sub, u64 start, u64 end,
int period)
{
struct regmap *r = sub->aio->chip->regmap;
u64 size = end - start;
int ret;
if (end < start || period < 0)
return -EINVAL;
regmap_write(r, CDA2D_RBMXCNFG(sub->swm->rb.map), 0);
regmap_write(r, CDA2D_RBMXBGNADRS(sub->swm->rb.map),
lower_32_bits(start));
regmap_write(r, CDA2D_RBMXBGNADRSU(sub->swm->rb.map),
upper_32_bits(start));
regmap_write(r, CDA2D_RBMXENDADRS(sub->swm->rb.map),
lower_32_bits(end));
regmap_write(r, CDA2D_RBMXENDADRSU(sub->swm->rb.map),
upper_32_bits(end));
regmap_write(r, CDA2D_RBADRSLOAD, BIT(sub->swm->rb.map));
ret = aiodma_rb_set_threshold(sub, size, 2 * period);
if (ret)
return ret;
if (sub->swm->dir == PORT_DIR_OUTPUT) {
aiodma_rb_set_rp(sub, start);
aiodma_rb_set_wp(sub, end - period);
regmap_update_bits(r, CDA2D_RBMXIE(sub->swm->rb.map),
CDA2D_RBMXIX_SPACE,
CDA2D_RBMXIX_SPACE);
} else {
aiodma_rb_set_rp(sub, end - period);
aiodma_rb_set_wp(sub, start);
regmap_update_bits(r, CDA2D_RBMXIE(sub->swm->rb.map),
CDA2D_RBMXIX_REMAIN,
CDA2D_RBMXIX_REMAIN);
}
sub->threshold = 2 * period;
sub->rd_offs = 0;
sub->wr_offs = 0;
sub->rd_org = 0;
sub->wr_org = 0;
sub->rd_total = 0;
sub->wr_total = 0;
return 0;
}
void aiodma_rb_sync(struct uniphier_aio_sub *sub, u64 start, u64 size,
int period)
{
if (sub->swm->dir == PORT_DIR_OUTPUT) {
sub->rd_offs = aiodma_rb_get_rp(sub) - start;
if (sub->use_mmap) {
sub->threshold = 2 * period;
aiodma_rb_set_threshold(sub, size, 2 * period);
sub->wr_offs = sub->rd_offs - period;
if (sub->rd_offs < period)
sub->wr_offs += size;
}
aiodma_rb_set_wp(sub, sub->wr_offs + start);
} else {
sub->wr_offs = aiodma_rb_get_wp(sub) - start;
if (sub->use_mmap) {
sub->threshold = 2 * period;
aiodma_rb_set_threshold(sub, size, 2 * period);
sub->rd_offs = sub->wr_offs - period;
if (sub->wr_offs < period)
sub->rd_offs += size;
}
aiodma_rb_set_rp(sub, sub->rd_offs + start);
}
sub->rd_total += sub->rd_offs - sub->rd_org;
if (sub->rd_offs < sub->rd_org)
sub->rd_total += size;
sub->wr_total += sub->wr_offs - sub->wr_org;
if (sub->wr_offs < sub->wr_org)
sub->wr_total += size;
sub->rd_org = sub->rd_offs;
sub->wr_org = sub->wr_offs;
}
bool aiodma_rb_is_irq(struct uniphier_aio_sub *sub)
{
struct regmap *r = sub->aio->chip->regmap;
u32 ir;
regmap_read(r, CDA2D_RBMXIR(sub->swm->rb.map), &ir);
if (sub->swm->dir == PORT_DIR_OUTPUT)
return !!(ir & CDA2D_RBMXIX_SPACE);
else
return !!(ir & CDA2D_RBMXIX_REMAIN);
}
void aiodma_rb_clear_irq(struct uniphier_aio_sub *sub)
{
struct regmap *r = sub->aio->chip->regmap;
if (sub->swm->dir == PORT_DIR_OUTPUT)
regmap_write(r, CDA2D_RBMXIR(sub->swm->rb.map),
CDA2D_RBMXIX_SPACE);
else
regmap_write(r, CDA2D_RBMXIR(sub->swm->rb.map),
CDA2D_RBMXIX_REMAIN);
}
| linux-master | sound/soc/uniphier/aio-core.c |
// SPDX-License-Identifier: GPL-2.0
//
// Socionext UniPhier AIO ALSA driver for PXs2.
//
// Copyright (c) 2018 Socionext Inc.
#include <linux/module.h>
#include "aio.h"
static const struct uniphier_aio_spec uniphier_aio_pxs2[] = {
/* for Line PCM In, Pin:AI1Dx */
{
.name = AUD_NAME_PCMIN1,
.gname = AUD_GNAME_LINE,
.swm = {
.type = PORT_TYPE_I2S,
.dir = PORT_DIR_INPUT,
.rb = { 16, 11, },
.ch = { 16, 11, },
.iif = { 0, 0, },
.iport = { 0, AUD_HW_PCMIN1, },
},
},
/* for Speaker/Headphone/Mic PCM In, Pin:AI2Dx */
{
.name = AUD_NAME_PCMIN2,
.gname = AUD_GNAME_AUX,
.swm = {
.type = PORT_TYPE_I2S,
.dir = PORT_DIR_INPUT,
.rb = { 17, 12, },
.ch = { 17, 12, },
.iif = { 1, 1, },
.iport = { 1, AUD_HW_PCMIN2, },
},
},
/* for HDMI PCM Out, Pin:AO1Dx (inner) */
{
.name = AUD_NAME_HPCMOUT1,
.gname = AUD_GNAME_HDMI,
.swm = {
.type = PORT_TYPE_I2S,
.dir = PORT_DIR_OUTPUT,
.rb = { 0, 0, },
.ch = { 0, 0, },
.oif = { 0, 0, },
.oport = { 3, AUD_HW_HPCMOUT1, },
},
},
/* for Line PCM Out, Pin:AO2Dx */
{
.name = AUD_NAME_PCMOUT1,
.gname = AUD_GNAME_LINE,
.swm = {
.type = PORT_TYPE_I2S,
.dir = PORT_DIR_OUTPUT,
.rb = { 1, 1, },
.ch = { 1, 1, },
.oif = { 1, 1, },
.oport = { 0, AUD_HW_PCMOUT1, },
},
},
/* for Speaker/Headphone/Mic PCM Out, Pin:AO3Dx */
{
.name = AUD_NAME_PCMOUT2,
.gname = AUD_GNAME_AUX,
.swm = {
.type = PORT_TYPE_I2S,
.dir = PORT_DIR_OUTPUT,
.rb = { 2, 2, },
.ch = { 2, 2, },
.oif = { 2, 2, },
.oport = { 1, AUD_HW_PCMOUT2, },
},
},
/* for HDMI Out, Pin:AO1IEC */
{
.name = AUD_NAME_HIECOUT1,
.swm = {
.type = PORT_TYPE_SPDIF,
.dir = PORT_DIR_OUTPUT,
.rb = { 6, 4, },
.ch = { 6, 4, },
.oif = { 6, 4, },
.oport = { 12, AUD_HW_HIECOUT1, },
},
},
/* for HDMI Out, Pin:AO1IEC, Compress */
{
.name = AUD_NAME_HIECCOMPOUT1,
.swm = {
.type = PORT_TYPE_SPDIF,
.dir = PORT_DIR_OUTPUT,
.rb = { 6, 4, },
.ch = { 6, 4, },
.oif = { 6, 4, },
.oport = { 12, AUD_HW_HIECOUT1, },
},
},
/* for S/PDIF Out, Pin:AO2IEC */
{
.name = AUD_NAME_IECOUT1,
.swm = {
.type = PORT_TYPE_SPDIF,
.dir = PORT_DIR_OUTPUT,
.rb = { 7, 5, },
.ch = { 7, 5, },
.oif = { 7, 5, },
.oport = { 13, AUD_HW_IECOUT1, },
},
},
/* for S/PDIF Out, Pin:AO2IEC */
{
.name = AUD_NAME_IECCOMPOUT1,
.swm = {
.type = PORT_TYPE_SPDIF,
.dir = PORT_DIR_OUTPUT,
.rb = { 7, 5, },
.ch = { 7, 5, },
.oif = { 7, 5, },
.oport = { 13, AUD_HW_IECOUT1, },
},
},
};
static const struct uniphier_aio_pll uniphier_aio_pll_pxs2[] = {
[AUD_PLL_A1] = { .enable = true, },
[AUD_PLL_F1] = { .enable = true, },
[AUD_PLL_A2] = { .enable = true, },
[AUD_PLL_F2] = { .enable = true, },
[AUD_PLL_APLL] = { .enable = true, },
[AUD_PLL_HSC0] = { .enable = true, },
};
static struct snd_soc_dai_driver uniphier_aio_dai_pxs2[] = {
{
.name = AUD_GNAME_HDMI,
.playback = {
.stream_name = AUD_NAME_HPCMOUT1,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000,
.channels_min = 2,
.channels_max = 2,
},
.ops = &uniphier_aio_i2s_pxs2_ops,
},
{
.name = AUD_GNAME_LINE,
.playback = {
.stream_name = AUD_NAME_PCMOUT1,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000,
.channels_min = 2,
.channels_max = 2,
},
.capture = {
.stream_name = AUD_NAME_PCMIN1,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000,
.channels_min = 2,
.channels_max = 2,
},
.ops = &uniphier_aio_i2s_pxs2_ops,
},
{
.name = AUD_GNAME_AUX,
.playback = {
.stream_name = AUD_NAME_PCMOUT2,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000,
.channels_min = 2,
.channels_max = 2,
},
.capture = {
.stream_name = AUD_NAME_PCMIN2,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000,
.channels_min = 2,
.channels_max = 2,
},
.ops = &uniphier_aio_i2s_pxs2_ops,
},
{
.name = AUD_NAME_HIECOUT1,
.playback = {
.stream_name = AUD_NAME_HIECOUT1,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000,
.channels_min = 2,
.channels_max = 2,
},
.ops = &uniphier_aio_spdif_pxs2_ops,
},
{
.name = AUD_NAME_IECOUT1,
.playback = {
.stream_name = AUD_NAME_IECOUT1,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000,
.channels_min = 2,
.channels_max = 2,
},
.ops = &uniphier_aio_spdif_pxs2_ops,
},
{
.name = AUD_NAME_HIECCOMPOUT1,
.playback = {
.stream_name = AUD_NAME_HIECCOMPOUT1,
.channels_min = 1,
.channels_max = 1,
},
.ops = &uniphier_aio_spdif_pxs2_ops2,
},
{
.name = AUD_NAME_IECCOMPOUT1,
.playback = {
.stream_name = AUD_NAME_IECCOMPOUT1,
.channels_min = 1,
.channels_max = 1,
},
.ops = &uniphier_aio_spdif_pxs2_ops2,
},
};
static const struct uniphier_aio_chip_spec uniphier_aio_pxs2_spec = {
.specs = uniphier_aio_pxs2,
.num_specs = ARRAY_SIZE(uniphier_aio_pxs2),
.dais = uniphier_aio_dai_pxs2,
.num_dais = ARRAY_SIZE(uniphier_aio_dai_pxs2),
.plls = uniphier_aio_pll_pxs2,
.num_plls = ARRAY_SIZE(uniphier_aio_pll_pxs2),
.addr_ext = 0,
};
static const struct of_device_id uniphier_aio_of_match[] __maybe_unused = {
{
.compatible = "socionext,uniphier-pxs2-aio",
.data = &uniphier_aio_pxs2_spec,
},
{},
};
MODULE_DEVICE_TABLE(of, uniphier_aio_of_match);
static struct platform_driver uniphier_aio_driver = {
.driver = {
.name = "snd-uniphier-aio-pxs2",
.of_match_table = of_match_ptr(uniphier_aio_of_match),
},
.probe = uniphier_aio_probe,
.remove = uniphier_aio_remove,
};
module_platform_driver(uniphier_aio_driver);
MODULE_AUTHOR("Katsuhiro Suzuki <[email protected]>");
MODULE_DESCRIPTION("UniPhier PXs2 AIO driver.");
MODULE_LICENSE("GPL v2");
| linux-master | sound/soc/uniphier/aio-pxs2.c |
// SPDX-License-Identifier: GPL-2.0
//
// Socionext UniPhier EVEA ADC/DAC codec driver.
//
// Copyright (c) 2016-2017 Socionext Inc.
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#define DRV_NAME "evea"
#define EVEA_RATES SNDRV_PCM_RATE_48000
#define EVEA_FORMATS SNDRV_PCM_FMTBIT_S32_LE
#define AADCPOW(n) (0x0078 + 0x04 * (n))
#define AADCPOW_AADC_POWD BIT(0)
#define ALINSW1 0x0088
#define ALINSW1_SEL1_SHIFT 3
#define AHPOUTPOW 0x0098
#define AHPOUTPOW_HP_ON BIT(4)
#define ALINEPOW 0x009c
#define ALINEPOW_LIN2_POWD BIT(3)
#define ALINEPOW_LIN1_POWD BIT(4)
#define ALO1OUTPOW 0x00a8
#define ALO1OUTPOW_LO1_ON BIT(4)
#define ALO2OUTPOW 0x00ac
#define ALO2OUTPOW_ADAC2_MUTE BIT(0)
#define ALO2OUTPOW_LO2_ON BIT(4)
#define AANAPOW 0x00b8
#define AANAPOW_A_POWD BIT(4)
#define ADACSEQ1(n) (0x0144 + 0x40 * (n))
#define ADACSEQ1_MMUTE BIT(1)
#define ADACSEQ2(n) (0x0160 + 0x40 * (n))
#define ADACSEQ2_ADACIN_FIX BIT(0)
#define ADAC1ODC 0x0200
#define ADAC1ODC_HP_DIS_RES_MASK GENMASK(2, 1)
#define ADAC1ODC_HP_DIS_RES_OFF (0x0 << 1)
#define ADAC1ODC_HP_DIS_RES_ON (0x3 << 1)
#define ADAC1ODC_ADAC_RAMPCLT_MASK GENMASK(8, 7)
#define ADAC1ODC_ADAC_RAMPCLT_NORMAL (0x0 << 7)
#define ADAC1ODC_ADAC_RAMPCLT_REDUCE (0x1 << 7)
struct evea_priv {
struct clk *clk, *clk_exiv;
struct reset_control *rst, *rst_exiv, *rst_adamv;
struct regmap *regmap;
int switch_lin;
int switch_lo;
int switch_hp;
};
static const char * const linsw1_sel1_text[] = {
"LIN1", "LIN2", "LIN3"
};
static SOC_ENUM_SINGLE_DECL(linsw1_sel1_enum,
ALINSW1, ALINSW1_SEL1_SHIFT,
linsw1_sel1_text);
static const struct snd_kcontrol_new linesw1_mux[] = {
SOC_DAPM_ENUM("Line In 1 Source", linsw1_sel1_enum),
};
static const struct snd_soc_dapm_widget evea_widgets[] = {
SND_SOC_DAPM_ADC("ADC", NULL, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_MUX("Line In 1 Mux", SND_SOC_NOPM, 0, 0, linesw1_mux),
SND_SOC_DAPM_INPUT("LIN1_LP"),
SND_SOC_DAPM_INPUT("LIN1_RP"),
SND_SOC_DAPM_INPUT("LIN2_LP"),
SND_SOC_DAPM_INPUT("LIN2_RP"),
SND_SOC_DAPM_INPUT("LIN3_LP"),
SND_SOC_DAPM_INPUT("LIN3_RP"),
SND_SOC_DAPM_DAC("DAC HP", NULL, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_DAC("DAC LO1", NULL, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_DAC("DAC LO2", NULL, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_OUTPUT("HP1_L"),
SND_SOC_DAPM_OUTPUT("HP1_R"),
SND_SOC_DAPM_OUTPUT("LO2_L"),
SND_SOC_DAPM_OUTPUT("LO2_R"),
};
static const struct snd_soc_dapm_route evea_routes[] = {
{ "Line In 1", NULL, "ADC" },
{ "ADC", NULL, "Line In 1 Mux" },
{ "Line In 1 Mux", "LIN1", "LIN1_LP" },
{ "Line In 1 Mux", "LIN1", "LIN1_RP" },
{ "Line In 1 Mux", "LIN2", "LIN2_LP" },
{ "Line In 1 Mux", "LIN2", "LIN2_RP" },
{ "Line In 1 Mux", "LIN3", "LIN3_LP" },
{ "Line In 1 Mux", "LIN3", "LIN3_RP" },
{ "DAC HP", NULL, "Headphone 1" },
{ "DAC LO1", NULL, "Line Out 1" },
{ "DAC LO2", NULL, "Line Out 2" },
{ "HP1_L", NULL, "DAC HP" },
{ "HP1_R", NULL, "DAC HP" },
{ "LO2_L", NULL, "DAC LO2" },
{ "LO2_R", NULL, "DAC LO2" },
};
static void evea_set_power_state_on(struct evea_priv *evea)
{
struct regmap *map = evea->regmap;
regmap_update_bits(map, AANAPOW, AANAPOW_A_POWD,
AANAPOW_A_POWD);
regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
ADAC1ODC_HP_DIS_RES_ON);
regmap_update_bits(map, ADAC1ODC, ADAC1ODC_ADAC_RAMPCLT_MASK,
ADAC1ODC_ADAC_RAMPCLT_REDUCE);
regmap_update_bits(map, ADACSEQ2(0), ADACSEQ2_ADACIN_FIX, 0);
regmap_update_bits(map, ADACSEQ2(1), ADACSEQ2_ADACIN_FIX, 0);
regmap_update_bits(map, ADACSEQ2(2), ADACSEQ2_ADACIN_FIX, 0);
}
static void evea_set_power_state_off(struct evea_priv *evea)
{
struct regmap *map = evea->regmap;
regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
ADAC1ODC_HP_DIS_RES_ON);
regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE,
ADACSEQ1_MMUTE);
regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE,
ADACSEQ1_MMUTE);
regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE,
ADACSEQ1_MMUTE);
regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON, 0);
regmap_update_bits(map, ALO2OUTPOW, ALO2OUTPOW_LO2_ON, 0);
regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON, 0);
}
static int evea_update_switch_lin(struct evea_priv *evea)
{
struct regmap *map = evea->regmap;
if (evea->switch_lin) {
regmap_update_bits(map, ALINEPOW,
ALINEPOW_LIN2_POWD | ALINEPOW_LIN1_POWD,
ALINEPOW_LIN2_POWD | ALINEPOW_LIN1_POWD);
regmap_update_bits(map, AADCPOW(0), AADCPOW_AADC_POWD,
AADCPOW_AADC_POWD);
regmap_update_bits(map, AADCPOW(1), AADCPOW_AADC_POWD,
AADCPOW_AADC_POWD);
} else {
regmap_update_bits(map, AADCPOW(0), AADCPOW_AADC_POWD, 0);
regmap_update_bits(map, AADCPOW(1), AADCPOW_AADC_POWD, 0);
regmap_update_bits(map, ALINEPOW,
ALINEPOW_LIN2_POWD | ALINEPOW_LIN1_POWD, 0);
}
return 0;
}
static int evea_update_switch_lo(struct evea_priv *evea)
{
struct regmap *map = evea->regmap;
if (evea->switch_lo) {
regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE, 0);
regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE, 0);
regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON,
ALO1OUTPOW_LO1_ON);
regmap_update_bits(map, ALO2OUTPOW,
ALO2OUTPOW_ADAC2_MUTE | ALO2OUTPOW_LO2_ON,
ALO2OUTPOW_ADAC2_MUTE | ALO2OUTPOW_LO2_ON);
} else {
regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE,
ADACSEQ1_MMUTE);
regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE,
ADACSEQ1_MMUTE);
regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON, 0);
regmap_update_bits(map, ALO2OUTPOW,
ALO2OUTPOW_ADAC2_MUTE | ALO2OUTPOW_LO2_ON,
0);
}
return 0;
}
static int evea_update_switch_hp(struct evea_priv *evea)
{
struct regmap *map = evea->regmap;
if (evea->switch_hp) {
regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE, 0);
regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON,
AHPOUTPOW_HP_ON);
regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
ADAC1ODC_HP_DIS_RES_OFF);
} else {
regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
ADAC1ODC_HP_DIS_RES_ON);
regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE,
ADACSEQ1_MMUTE);
regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON, 0);
}
return 0;
}
static void evea_update_switch_all(struct evea_priv *evea)
{
evea_update_switch_lin(evea);
evea_update_switch_lo(evea);
evea_update_switch_hp(evea);
}
static int evea_get_switch_lin(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct evea_priv *evea = snd_soc_component_get_drvdata(component);
ucontrol->value.integer.value[0] = evea->switch_lin;
return 0;
}
static int evea_set_switch_lin(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct evea_priv *evea = snd_soc_component_get_drvdata(component);
if (evea->switch_lin == ucontrol->value.integer.value[0])
return 0;
evea->switch_lin = ucontrol->value.integer.value[0];
return evea_update_switch_lin(evea);
}
static int evea_get_switch_lo(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct evea_priv *evea = snd_soc_component_get_drvdata(component);
ucontrol->value.integer.value[0] = evea->switch_lo;
return 0;
}
static int evea_set_switch_lo(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct evea_priv *evea = snd_soc_component_get_drvdata(component);
if (evea->switch_lo == ucontrol->value.integer.value[0])
return 0;
evea->switch_lo = ucontrol->value.integer.value[0];
return evea_update_switch_lo(evea);
}
static int evea_get_switch_hp(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct evea_priv *evea = snd_soc_component_get_drvdata(component);
ucontrol->value.integer.value[0] = evea->switch_hp;
return 0;
}
static int evea_set_switch_hp(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
struct evea_priv *evea = snd_soc_component_get_drvdata(component);
if (evea->switch_hp == ucontrol->value.integer.value[0])
return 0;
evea->switch_hp = ucontrol->value.integer.value[0];
return evea_update_switch_hp(evea);
}
static const struct snd_kcontrol_new evea_controls[] = {
SOC_SINGLE_BOOL_EXT("Line Capture Switch", 0,
evea_get_switch_lin, evea_set_switch_lin),
SOC_SINGLE_BOOL_EXT("Line Playback Switch", 0,
evea_get_switch_lo, evea_set_switch_lo),
SOC_SINGLE_BOOL_EXT("Headphone Playback Switch", 0,
evea_get_switch_hp, evea_set_switch_hp),
};
static int evea_codec_probe(struct snd_soc_component *component)
{
struct evea_priv *evea = snd_soc_component_get_drvdata(component);
evea->switch_lin = 1;
evea->switch_lo = 1;
evea->switch_hp = 1;
evea_set_power_state_on(evea);
evea_update_switch_all(evea);
return 0;
}
static int evea_codec_suspend(struct snd_soc_component *component)
{
struct evea_priv *evea = snd_soc_component_get_drvdata(component);
evea_set_power_state_off(evea);
reset_control_assert(evea->rst_adamv);
reset_control_assert(evea->rst_exiv);
reset_control_assert(evea->rst);
clk_disable_unprepare(evea->clk_exiv);
clk_disable_unprepare(evea->clk);
return 0;
}
static int evea_codec_resume(struct snd_soc_component *component)
{
struct evea_priv *evea = snd_soc_component_get_drvdata(component);
int ret;
ret = clk_prepare_enable(evea->clk);
if (ret)
return ret;
ret = clk_prepare_enable(evea->clk_exiv);
if (ret)
goto err_out_clock;
ret = reset_control_deassert(evea->rst);
if (ret)
goto err_out_clock_exiv;
ret = reset_control_deassert(evea->rst_exiv);
if (ret)
goto err_out_reset;
ret = reset_control_deassert(evea->rst_adamv);
if (ret)
goto err_out_reset_exiv;
evea_set_power_state_on(evea);
evea_update_switch_all(evea);
return 0;
err_out_reset_exiv:
reset_control_assert(evea->rst_exiv);
err_out_reset:
reset_control_assert(evea->rst);
err_out_clock_exiv:
clk_disable_unprepare(evea->clk_exiv);
err_out_clock:
clk_disable_unprepare(evea->clk);
return ret;
}
static struct snd_soc_component_driver soc_codec_evea = {
.probe = evea_codec_probe,
.suspend = evea_codec_suspend,
.resume = evea_codec_resume,
.dapm_widgets = evea_widgets,
.num_dapm_widgets = ARRAY_SIZE(evea_widgets),
.dapm_routes = evea_routes,
.num_dapm_routes = ARRAY_SIZE(evea_routes),
.controls = evea_controls,
.num_controls = ARRAY_SIZE(evea_controls),
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
};
static struct snd_soc_dai_driver soc_dai_evea[] = {
{
.name = DRV_NAME "-line1",
.playback = {
.stream_name = "Line Out 1",
.formats = EVEA_FORMATS,
.rates = EVEA_RATES,
.channels_min = 2,
.channels_max = 2,
},
.capture = {
.stream_name = "Line In 1",
.formats = EVEA_FORMATS,
.rates = EVEA_RATES,
.channels_min = 2,
.channels_max = 2,
},
},
{
.name = DRV_NAME "-hp1",
.playback = {
.stream_name = "Headphone 1",
.formats = EVEA_FORMATS,
.rates = EVEA_RATES,
.channels_min = 2,
.channels_max = 2,
},
},
{
.name = DRV_NAME "-lo2",
.playback = {
.stream_name = "Line Out 2",
.formats = EVEA_FORMATS,
.rates = EVEA_RATES,
.channels_min = 2,
.channels_max = 2,
},
},
};
static const struct regmap_config evea_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0xffc,
.cache_type = REGCACHE_NONE,
};
static int evea_probe(struct platform_device *pdev)
{
struct evea_priv *evea;
void __iomem *preg;
int ret;
evea = devm_kzalloc(&pdev->dev, sizeof(struct evea_priv), GFP_KERNEL);
if (!evea)
return -ENOMEM;
evea->clk = devm_clk_get(&pdev->dev, "evea");
if (IS_ERR(evea->clk))
return PTR_ERR(evea->clk);
evea->clk_exiv = devm_clk_get(&pdev->dev, "exiv");
if (IS_ERR(evea->clk_exiv))
return PTR_ERR(evea->clk_exiv);
evea->rst = devm_reset_control_get_shared(&pdev->dev, "evea");
if (IS_ERR(evea->rst))
return PTR_ERR(evea->rst);
evea->rst_exiv = devm_reset_control_get_shared(&pdev->dev, "exiv");
if (IS_ERR(evea->rst_exiv))
return PTR_ERR(evea->rst_exiv);
preg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(preg))
return PTR_ERR(preg);
evea->regmap = devm_regmap_init_mmio(&pdev->dev, preg,
&evea_regmap_config);
if (IS_ERR(evea->regmap))
return PTR_ERR(evea->regmap);
ret = clk_prepare_enable(evea->clk);
if (ret)
return ret;
ret = clk_prepare_enable(evea->clk_exiv);
if (ret)
goto err_out_clock;
ret = reset_control_deassert(evea->rst);
if (ret)
goto err_out_clock_exiv;
ret = reset_control_deassert(evea->rst_exiv);
if (ret)
goto err_out_reset;
/* ADAMV will hangup if EXIV reset is asserted */
evea->rst_adamv = devm_reset_control_get_shared(&pdev->dev, "adamv");
if (IS_ERR(evea->rst_adamv)) {
ret = PTR_ERR(evea->rst_adamv);
goto err_out_reset_exiv;
}
ret = reset_control_deassert(evea->rst_adamv);
if (ret)
goto err_out_reset_exiv;
platform_set_drvdata(pdev, evea);
ret = devm_snd_soc_register_component(&pdev->dev, &soc_codec_evea,
soc_dai_evea, ARRAY_SIZE(soc_dai_evea));
if (ret)
goto err_out_reset_adamv;
return 0;
err_out_reset_adamv:
reset_control_assert(evea->rst_adamv);
err_out_reset_exiv:
reset_control_assert(evea->rst_exiv);
err_out_reset:
reset_control_assert(evea->rst);
err_out_clock_exiv:
clk_disable_unprepare(evea->clk_exiv);
err_out_clock:
clk_disable_unprepare(evea->clk);
return ret;
}
static void evea_remove(struct platform_device *pdev)
{
struct evea_priv *evea = platform_get_drvdata(pdev);
reset_control_assert(evea->rst_adamv);
reset_control_assert(evea->rst_exiv);
reset_control_assert(evea->rst);
clk_disable_unprepare(evea->clk_exiv);
clk_disable_unprepare(evea->clk);
}
static const struct of_device_id evea_of_match[] __maybe_unused = {
{ .compatible = "socionext,uniphier-evea", },
{}
};
MODULE_DEVICE_TABLE(of, evea_of_match);
static struct platform_driver evea_codec_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(evea_of_match),
},
.probe = evea_probe,
.remove_new = evea_remove,
};
module_platform_driver(evea_codec_driver);
MODULE_AUTHOR("Katsuhiro Suzuki <[email protected]>");
MODULE_DESCRIPTION("UniPhier EVEA codec driver");
MODULE_LICENSE("GPL v2");
| linux-master | sound/soc/uniphier/evea.c |
// SPDX-License-Identifier: GPL-2.0
//
// Socionext UniPhier AIO Compress Audio driver.
//
// Copyright (c) 2017-2018 Socionext Inc.
#include <linux/bitfield.h>
#include <linux/circ_buf.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include "aio.h"
static int uniphier_aio_compr_prepare(struct snd_soc_component *component,
struct snd_compr_stream *cstream);
static int uniphier_aio_compr_hw_free(struct snd_soc_component *component,
struct snd_compr_stream *cstream);
static int uniphier_aio_comprdma_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_compr *compr = rtd->compr;
struct device *dev = compr->card->dev;
struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[compr->direction];
size_t size = AUD_RING_SIZE;
int dma_dir = DMA_FROM_DEVICE, ret;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(33));
if (ret)
return ret;
sub->compr_area = kzalloc(size, GFP_KERNEL);
if (!sub->compr_area)
return -ENOMEM;
if (sub->swm->dir == PORT_DIR_OUTPUT)
dma_dir = DMA_TO_DEVICE;
sub->compr_addr = dma_map_single(dev, sub->compr_area, size, dma_dir);
if (dma_mapping_error(dev, sub->compr_addr)) {
kfree(sub->compr_area);
sub->compr_area = NULL;
return -ENOMEM;
}
sub->compr_bytes = size;
return 0;
}
static int uniphier_aio_comprdma_free(struct snd_soc_pcm_runtime *rtd)
{
struct snd_compr *compr = rtd->compr;
struct device *dev = compr->card->dev;
struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[compr->direction];
int dma_dir = DMA_FROM_DEVICE;
if (sub->swm->dir == PORT_DIR_OUTPUT)
dma_dir = DMA_TO_DEVICE;
dma_unmap_single(dev, sub->compr_addr, sub->compr_bytes, dma_dir);
kfree(sub->compr_area);
sub->compr_area = NULL;
return 0;
}
static int uniphier_aio_compr_open(struct snd_soc_component *component,
struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
int ret;
if (sub->cstream)
return -EBUSY;
sub->cstream = cstream;
sub->pass_through = 1;
sub->use_mmap = false;
ret = uniphier_aio_comprdma_new(rtd);
if (ret)
return ret;
ret = aio_init(sub);
if (ret)
return ret;
return 0;
}
static int uniphier_aio_compr_free(struct snd_soc_component *component,
struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
int ret;
ret = uniphier_aio_compr_hw_free(component, cstream);
if (ret)
return ret;
ret = uniphier_aio_comprdma_free(rtd);
if (ret)
return ret;
sub->cstream = NULL;
return 0;
}
static int uniphier_aio_compr_get_params(struct snd_soc_component *component,
struct snd_compr_stream *cstream,
struct snd_codec *params)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
*params = sub->cparams.codec;
return 0;
}
static int uniphier_aio_compr_set_params(struct snd_soc_component *component,
struct snd_compr_stream *cstream,
struct snd_compr_params *params)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
struct device *dev = &aio->chip->pdev->dev;
if (params->codec.id != SND_AUDIOCODEC_IEC61937) {
dev_err(dev, "Codec ID is not supported(%d)\n",
params->codec.id);
return -EINVAL;
}
if (params->codec.profile != SND_AUDIOPROFILE_IEC61937_SPDIF) {
dev_err(dev, "Codec profile is not supported(%d)\n",
params->codec.profile);
return -EINVAL;
}
/* IEC frame type will be changed after received valid data */
sub->iec_pc = IEC61937_PC_AAC;
sub->cparams = *params;
sub->setting = 1;
aio_port_reset(sub);
aio_src_reset(sub);
return uniphier_aio_compr_prepare(component, cstream);
}
static int uniphier_aio_compr_hw_free(struct snd_soc_component *component,
struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
sub->setting = 0;
return 0;
}
static int uniphier_aio_compr_prepare(struct snd_soc_component *component,
struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_compr_runtime *runtime = cstream->runtime;
struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
int bytes = runtime->fragment_size;
unsigned long flags;
int ret;
ret = aiodma_ch_set_param(sub);
if (ret)
return ret;
spin_lock_irqsave(&sub->lock, flags);
ret = aiodma_rb_set_buffer(sub, sub->compr_addr,
sub->compr_addr + sub->compr_bytes,
bytes);
spin_unlock_irqrestore(&sub->lock, flags);
if (ret)
return ret;
ret = aio_port_set_param(sub, sub->pass_through, &sub->params);
if (ret)
return ret;
ret = aio_oport_set_stream_type(sub, sub->iec_pc);
if (ret)
return ret;
aio_port_set_enable(sub, 1);
ret = aio_if_set_param(sub, sub->pass_through);
if (ret)
return ret;
return 0;
}
static int uniphier_aio_compr_trigger(struct snd_soc_component *component,
struct snd_compr_stream *cstream,
int cmd)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_compr_runtime *runtime = cstream->runtime;
struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
struct device *dev = &aio->chip->pdev->dev;
int bytes = runtime->fragment_size, ret = 0;
unsigned long flags;
spin_lock_irqsave(&sub->lock, flags);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
aiodma_rb_sync(sub, sub->compr_addr, sub->compr_bytes, bytes);
aiodma_ch_set_enable(sub, 1);
sub->running = 1;
break;
case SNDRV_PCM_TRIGGER_STOP:
sub->running = 0;
aiodma_ch_set_enable(sub, 0);
break;
default:
dev_warn(dev, "Unknown trigger(%d)\n", cmd);
ret = -EINVAL;
}
spin_unlock_irqrestore(&sub->lock, flags);
return ret;
}
static int uniphier_aio_compr_pointer(struct snd_soc_component *component,
struct snd_compr_stream *cstream,
struct snd_compr_tstamp *tstamp)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_compr_runtime *runtime = cstream->runtime;
struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
int bytes = runtime->fragment_size;
unsigned long flags;
u32 pos;
spin_lock_irqsave(&sub->lock, flags);
aiodma_rb_sync(sub, sub->compr_addr, sub->compr_bytes, bytes);
if (sub->swm->dir == PORT_DIR_OUTPUT) {
pos = sub->rd_offs;
/* Size of AIO output format is double of IEC61937 */
tstamp->copied_total = sub->rd_total / 2;
} else {
pos = sub->wr_offs;
tstamp->copied_total = sub->rd_total;
}
tstamp->byte_offset = pos;
spin_unlock_irqrestore(&sub->lock, flags);
return 0;
}
static int aio_compr_send_to_hw(struct uniphier_aio_sub *sub,
char __user *buf, size_t dstsize)
{
u32 __user *srcbuf = (u32 __user *)buf;
u32 *dstbuf = (u32 *)(sub->compr_area + sub->wr_offs);
int src = 0, dst = 0, ret;
u32 frm, frm_a, frm_b;
while (dstsize > 0) {
ret = get_user(frm, srcbuf + src);
if (ret)
return ret;
src++;
frm_a = frm & 0xffff;
frm_b = (frm >> 16) & 0xffff;
if (frm == IEC61937_HEADER_SIGN) {
frm_a |= 0x01000000;
/* Next data is Pc and Pd */
sub->iec_header = true;
} else {
u16 pc = be16_to_cpu((__be16)frm_a);
if (sub->iec_header && sub->iec_pc != pc) {
/* Force overwrite IEC frame type */
sub->iec_pc = pc;
ret = aio_oport_set_stream_type(sub, pc);
if (ret)
return ret;
}
sub->iec_header = false;
}
dstbuf[dst++] = frm_a;
dstbuf[dst++] = frm_b;
dstsize -= sizeof(u32) * 2;
}
return 0;
}
static int uniphier_aio_compr_copy(struct snd_soc_component *component,
struct snd_compr_stream *cstream,
char __user *buf, size_t count)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_compr_runtime *runtime = cstream->runtime;
struct device *carddev = rtd->compr->card->dev;
struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
size_t cnt = min_t(size_t, count, aio_rb_space_to_end(sub) / 2);
int bytes = runtime->fragment_size;
unsigned long flags;
size_t s;
int ret;
if (cnt < sizeof(u32))
return 0;
if (sub->swm->dir == PORT_DIR_OUTPUT) {
dma_addr_t dmapos = sub->compr_addr + sub->wr_offs;
/* Size of AIO output format is double of IEC61937 */
s = cnt * 2;
dma_sync_single_for_cpu(carddev, dmapos, s, DMA_TO_DEVICE);
ret = aio_compr_send_to_hw(sub, buf, s);
dma_sync_single_for_device(carddev, dmapos, s, DMA_TO_DEVICE);
} else {
dma_addr_t dmapos = sub->compr_addr + sub->rd_offs;
s = cnt;
dma_sync_single_for_cpu(carddev, dmapos, s, DMA_FROM_DEVICE);
ret = copy_to_user(buf, sub->compr_area + sub->rd_offs, s);
dma_sync_single_for_device(carddev, dmapos, s, DMA_FROM_DEVICE);
}
if (ret)
return -EFAULT;
spin_lock_irqsave(&sub->lock, flags);
sub->threshold = 2 * bytes;
aiodma_rb_set_threshold(sub, sub->compr_bytes, 2 * bytes);
if (sub->swm->dir == PORT_DIR_OUTPUT) {
sub->wr_offs += s;
if (sub->wr_offs >= sub->compr_bytes)
sub->wr_offs -= sub->compr_bytes;
} else {
sub->rd_offs += s;
if (sub->rd_offs >= sub->compr_bytes)
sub->rd_offs -= sub->compr_bytes;
}
aiodma_rb_sync(sub, sub->compr_addr, sub->compr_bytes, bytes);
spin_unlock_irqrestore(&sub->lock, flags);
return cnt;
}
static int uniphier_aio_compr_get_caps(struct snd_soc_component *component,
struct snd_compr_stream *cstream,
struct snd_compr_caps *caps)
{
caps->num_codecs = 1;
caps->min_fragment_size = AUD_MIN_FRAGMENT_SIZE;
caps->max_fragment_size = AUD_MAX_FRAGMENT_SIZE;
caps->min_fragments = AUD_MIN_FRAGMENT;
caps->max_fragments = AUD_MAX_FRAGMENT;
caps->codecs[0] = SND_AUDIOCODEC_IEC61937;
return 0;
}
static const struct snd_compr_codec_caps caps_iec = {
.num_descriptors = 1,
.descriptor[0].max_ch = 8,
.descriptor[0].num_sample_rates = 0,
.descriptor[0].num_bitrates = 0,
.descriptor[0].profiles = SND_AUDIOPROFILE_IEC61937_SPDIF,
.descriptor[0].modes = SND_AUDIOMODE_IEC_AC3 |
SND_AUDIOMODE_IEC_MPEG1 |
SND_AUDIOMODE_IEC_MP3 |
SND_AUDIOMODE_IEC_DTS,
.descriptor[0].formats = 0,
};
static int uniphier_aio_compr_get_codec_caps(struct snd_soc_component *component,
struct snd_compr_stream *stream,
struct snd_compr_codec_caps *codec)
{
if (codec->codec == SND_AUDIOCODEC_IEC61937)
*codec = caps_iec;
else
return -EINVAL;
return 0;
}
const struct snd_compress_ops uniphier_aio_compress_ops = {
.open = uniphier_aio_compr_open,
.free = uniphier_aio_compr_free,
.get_params = uniphier_aio_compr_get_params,
.set_params = uniphier_aio_compr_set_params,
.trigger = uniphier_aio_compr_trigger,
.pointer = uniphier_aio_compr_pointer,
.copy = uniphier_aio_compr_copy,
.get_caps = uniphier_aio_compr_get_caps,
.get_codec_caps = uniphier_aio_compr_get_codec_caps,
};
| linux-master | sound/soc/uniphier/aio-compress.c |
// SPDX-License-Identifier: GPL-2.0
//
// Socionext UniPhier AIO DMA driver.
//
// Copyright (c) 2016-2018 Socionext Inc.
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include "aio.h"
static struct snd_pcm_hardware uniphier_aiodma_hw = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED,
.period_bytes_min = 256,
.period_bytes_max = 4096,
.periods_min = 4,
.periods_max = 1024,
.buffer_bytes_max = 128 * 1024,
};
static void aiodma_pcm_irq(struct uniphier_aio_sub *sub)
{
struct snd_pcm_runtime *runtime = sub->substream->runtime;
int bytes = runtime->period_size *
runtime->channels * samples_to_bytes(runtime, 1);
int ret;
spin_lock(&sub->lock);
ret = aiodma_rb_set_threshold(sub, runtime->dma_bytes,
sub->threshold + bytes);
if (!ret)
sub->threshold += bytes;
aiodma_rb_sync(sub, runtime->dma_addr, runtime->dma_bytes, bytes);
aiodma_rb_clear_irq(sub);
spin_unlock(&sub->lock);
snd_pcm_period_elapsed(sub->substream);
}
static void aiodma_compr_irq(struct uniphier_aio_sub *sub)
{
struct snd_compr_runtime *runtime = sub->cstream->runtime;
int bytes = runtime->fragment_size;
int ret;
spin_lock(&sub->lock);
ret = aiodma_rb_set_threshold(sub, sub->compr_bytes,
sub->threshold + bytes);
if (!ret)
sub->threshold += bytes;
aiodma_rb_sync(sub, sub->compr_addr, sub->compr_bytes, bytes);
aiodma_rb_clear_irq(sub);
spin_unlock(&sub->lock);
snd_compr_fragment_elapsed(sub->cstream);
}
static irqreturn_t aiodma_irq(int irq, void *p)
{
struct platform_device *pdev = p;
struct uniphier_aio_chip *chip = platform_get_drvdata(pdev);
irqreturn_t ret = IRQ_NONE;
int i, j;
for (i = 0; i < chip->num_aios; i++) {
struct uniphier_aio *aio = &chip->aios[i];
for (j = 0; j < ARRAY_SIZE(aio->sub); j++) {
struct uniphier_aio_sub *sub = &aio->sub[j];
/* Skip channel that does not trigger */
if (!sub->running || !aiodma_rb_is_irq(sub))
continue;
if (sub->substream)
aiodma_pcm_irq(sub);
if (sub->cstream)
aiodma_compr_irq(sub);
ret = IRQ_HANDLED;
}
}
return ret;
}
static int uniphier_aiodma_open(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_soc_set_runtime_hwparams(substream, &uniphier_aiodma_hw);
return snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 256);
}
static int uniphier_aiodma_prepare(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[substream->stream];
int bytes = runtime->period_size *
runtime->channels * samples_to_bytes(runtime, 1);
unsigned long flags;
int ret;
ret = aiodma_ch_set_param(sub);
if (ret)
return ret;
spin_lock_irqsave(&sub->lock, flags);
ret = aiodma_rb_set_buffer(sub, runtime->dma_addr,
runtime->dma_addr + runtime->dma_bytes,
bytes);
spin_unlock_irqrestore(&sub->lock, flags);
if (ret)
return ret;
return 0;
}
static int uniphier_aiodma_trigger(struct snd_soc_component *component,
struct snd_pcm_substream *substream, int cmd)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[substream->stream];
struct device *dev = &aio->chip->pdev->dev;
int bytes = runtime->period_size *
runtime->channels * samples_to_bytes(runtime, 1);
unsigned long flags;
spin_lock_irqsave(&sub->lock, flags);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
aiodma_rb_sync(sub, runtime->dma_addr, runtime->dma_bytes,
bytes);
aiodma_ch_set_enable(sub, 1);
sub->running = 1;
break;
case SNDRV_PCM_TRIGGER_STOP:
sub->running = 0;
aiodma_ch_set_enable(sub, 0);
break;
default:
dev_warn(dev, "Unknown trigger(%d) ignored\n", cmd);
break;
}
spin_unlock_irqrestore(&sub->lock, flags);
return 0;
}
static snd_pcm_uframes_t uniphier_aiodma_pointer(
struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[substream->stream];
int bytes = runtime->period_size *
runtime->channels * samples_to_bytes(runtime, 1);
unsigned long flags;
snd_pcm_uframes_t pos;
spin_lock_irqsave(&sub->lock, flags);
aiodma_rb_sync(sub, runtime->dma_addr, runtime->dma_bytes, bytes);
if (sub->swm->dir == PORT_DIR_OUTPUT)
pos = bytes_to_frames(runtime, sub->rd_offs);
else
pos = bytes_to_frames(runtime, sub->wr_offs);
spin_unlock_irqrestore(&sub->lock, flags);
return pos;
}
static int uniphier_aiodma_mmap(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
struct vm_area_struct *vma)
{
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
return remap_pfn_range(vma, vma->vm_start,
substream->runtime->dma_addr >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
static int uniphier_aiodma_new(struct snd_soc_component *component,
struct snd_soc_pcm_runtime *rtd)
{
struct device *dev = rtd->card->snd_card->dev;
struct snd_pcm *pcm = rtd->pcm;
int ret;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(33));
if (ret)
return ret;
snd_pcm_set_managed_buffer_all(pcm,
SNDRV_DMA_TYPE_DEV, dev,
uniphier_aiodma_hw.buffer_bytes_max,
uniphier_aiodma_hw.buffer_bytes_max);
return 0;
}
static const struct snd_soc_component_driver uniphier_soc_platform = {
.open = uniphier_aiodma_open,
.prepare = uniphier_aiodma_prepare,
.trigger = uniphier_aiodma_trigger,
.pointer = uniphier_aiodma_pointer,
.mmap = uniphier_aiodma_mmap,
.pcm_construct = uniphier_aiodma_new,
.compress_ops = &uniphier_aio_compress_ops,
};
static const struct regmap_config aiodma_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x7fffc,
.cache_type = REGCACHE_NONE,
};
/**
* uniphier_aiodma_soc_register_platform - register the AIO DMA
* @pdev: the platform device
*
* Register and setup the DMA of AIO to transfer the sound data to device.
* This function need to call once at driver startup and need NOT to call
* unregister function.
*
* Return: Zero if successful, otherwise a negative value on error.
*/
int uniphier_aiodma_soc_register_platform(struct platform_device *pdev)
{
struct uniphier_aio_chip *chip = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
void __iomem *preg;
int irq, ret;
preg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(preg))
return PTR_ERR(preg);
chip->regmap = devm_regmap_init_mmio(dev, preg,
&aiodma_regmap_config);
if (IS_ERR(chip->regmap))
return PTR_ERR(chip->regmap);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_irq(dev, irq, aiodma_irq,
IRQF_SHARED, dev_name(dev), pdev);
if (ret)
return ret;
return devm_snd_soc_register_component(dev, &uniphier_soc_platform,
NULL, 0);
}
EXPORT_SYMBOL_GPL(uniphier_aiodma_soc_register_platform);
| linux-master | sound/soc/uniphier/aio-dma.c |
// SPDX-License-Identifier: GPL-2.0
//
// Socionext UniPhier AIO ALSA driver for LD11/LD20.
//
// Copyright (c) 2016-2018 Socionext Inc.
#include <linux/module.h>
#include "aio.h"
static const struct uniphier_aio_spec uniphier_aio_ld11[] = {
/* for HDMI PCM In, Pin:AI1Dx */
{
.name = AUD_NAME_PCMIN1,
.gname = AUD_GNAME_HDMI,
.swm = {
.type = PORT_TYPE_I2S,
.dir = PORT_DIR_INPUT,
.rb = { 21, 14, },
.ch = { 21, 14, },
.iif = { 5, 3, },
.iport = { 0, AUD_HW_PCMIN1, },
},
},
/* for SIF In, Pin:AI2Dx */
{
.name = AUD_NAME_PCMIN2,
.swm = {
.type = PORT_TYPE_I2S,
.dir = PORT_DIR_INPUT,
.rb = { 22, 15, },
.ch = { 22, 15, },
.iif = { 6, 4, },
.iport = { 1, AUD_HW_PCMIN2, },
},
},
/* for Line In, Pin:AI3Dx */
{
.name = AUD_NAME_PCMIN3,
.gname = AUD_GNAME_LINE,
.swm = {
.type = PORT_TYPE_EVE,
.dir = PORT_DIR_INPUT,
.rb = { 23, 16, },
.ch = { 23, 16, },
.iif = { 7, 5, },
.iport = { 2, AUD_HW_PCMIN3, },
},
},
/* for S/PDIF In, Pin:AI1IEC */
{
.name = AUD_NAME_IECIN1,
.gname = AUD_GNAME_IEC,
.swm = {
.type = PORT_TYPE_SPDIF,
.dir = PORT_DIR_INPUT,
.rb = { 26, 17, },
.ch = { 26, 17, },
.iif = { 10, 6, },
.iport = { 3, AUD_HW_IECIN1, },
},
},
/* for Speaker, Pin:AO1Dx */
{
.name = AUD_NAME_HPCMOUT1,
.swm = {
.type = PORT_TYPE_I2S,
.dir = PORT_DIR_OUTPUT,
.rb = { 0, 0, },
.ch = { 0, 0, },
.oif = { 0, 0, },
.oport = { 0, AUD_HW_HPCMOUT1, },
},
},
/* for HDMI PCM, Pin:AO2Dx */
{
.name = AUD_NAME_PCMOUT1,
.gname = AUD_GNAME_HDMI,
.swm = {
.type = PORT_TYPE_I2S,
.dir = PORT_DIR_OUTPUT,
.rb = { 0, 0, },
.ch = { 0, 0, },
.oif = { 0, 0, },
.oport = { 3, AUD_HW_PCMOUT1, },
},
},
/* for Line Out, Pin:LO2_x */
{
.name = AUD_NAME_PCMOUT2,
.gname = AUD_GNAME_LINE,
.swm = {
.type = PORT_TYPE_EVE,
.dir = PORT_DIR_OUTPUT,
.rb = { 2, 2, },
.ch = { 2, 2, },
.oif = { 2, 2, },
.oport = { 1, AUD_HW_PCMOUT2, },
},
},
/* for Headphone, Pin:HP1_x */
{
.name = AUD_NAME_PCMOUT3,
.swm = {
.type = PORT_TYPE_EVE,
.dir = PORT_DIR_OUTPUT,
.rb = { 3, 3, },
.ch = { 3, 3, },
.oif = { 3, 3, },
.oport = { 2, AUD_HW_PCMOUT3, },
},
},
/* for HW Sampling Rate Converter */
{
.name = AUD_NAME_EPCMOUT2,
.swm = {
.type = PORT_TYPE_CONV,
.dir = PORT_DIR_OUTPUT,
.rb = { 7, 5, },
.ch = { 7, 5, },
.oif = { 7, 5, },
.oport = { 6, AUD_HW_EPCMOUT2, },
.och = { 17, 12, },
.iif = { 1, 1, },
},
},
/* for HW Sampling Rate Converter 2 */
{
.name = AUD_NAME_EPCMOUT3,
.swm = {
.type = PORT_TYPE_CONV,
.dir = PORT_DIR_OUTPUT,
.rb = { 8, 6, },
.ch = { 8, 6, },
.oif = { 8, 6, },
.oport = { 7, AUD_HW_EPCMOUT3, },
.och = { 18, 13, },
.iif = { 2, 2, },
},
},
/* for S/PDIF Out, Pin:AO1IEC */
{
.name = AUD_NAME_HIECOUT1,
.gname = AUD_GNAME_IEC,
.swm = {
.type = PORT_TYPE_SPDIF,
.dir = PORT_DIR_OUTPUT,
.rb = { 1, 1, },
.ch = { 1, 1, },
.oif = { 1, 1, },
.oport = { 12, AUD_HW_HIECOUT1, },
},
},
/* for S/PDIF Out, Pin:AO1IEC, Compress */
{
.name = AUD_NAME_HIECCOMPOUT1,
.gname = AUD_GNAME_IEC,
.swm = {
.type = PORT_TYPE_SPDIF,
.dir = PORT_DIR_OUTPUT,
.rb = { 1, 1, },
.ch = { 1, 1, },
.oif = { 1, 1, },
.oport = { 12, AUD_HW_HIECOUT1, },
},
},
};
static const struct uniphier_aio_pll uniphier_aio_pll_ld11[] = {
[AUD_PLL_A1] = { .enable = true, },
[AUD_PLL_F1] = { .enable = true, },
[AUD_PLL_A2] = { .enable = true, },
[AUD_PLL_F2] = { .enable = true, },
[AUD_PLL_APLL] = { .enable = true, },
[AUD_PLL_RX0] = { .enable = true, },
[AUD_PLL_USB0] = { .enable = true, },
[AUD_PLL_HSC0] = { .enable = true, },
};
static struct snd_soc_dai_driver uniphier_aio_dai_ld11[] = {
{
.name = AUD_GNAME_HDMI,
.playback = {
.stream_name = AUD_NAME_PCMOUT1,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000,
.channels_min = 2,
.channels_max = 2,
},
.capture = {
.stream_name = AUD_NAME_PCMIN1,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000 |
SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_32000,
.channels_min = 2,
.channels_max = 2,
},
.ops = &uniphier_aio_i2s_ld11_ops,
},
{
.name = AUD_NAME_PCMIN2,
.capture = {
.stream_name = AUD_NAME_PCMIN2,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000,
.channels_min = 2,
.channels_max = 2,
},
.ops = &uniphier_aio_i2s_ld11_ops,
},
{
.name = AUD_GNAME_LINE,
.playback = {
.stream_name = AUD_NAME_PCMOUT2,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000,
.channels_min = 2,
.channels_max = 2,
},
.capture = {
.stream_name = AUD_NAME_PCMIN3,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000,
.channels_min = 2,
.channels_max = 2,
},
.ops = &uniphier_aio_i2s_ld11_ops,
},
{
.name = AUD_NAME_HPCMOUT1,
.playback = {
.stream_name = AUD_NAME_HPCMOUT1,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000,
.channels_min = 2,
.channels_max = 8,
},
.ops = &uniphier_aio_i2s_ld11_ops,
},
{
.name = AUD_NAME_PCMOUT3,
.playback = {
.stream_name = AUD_NAME_PCMOUT3,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000,
.channels_min = 2,
.channels_max = 2,
},
.ops = &uniphier_aio_i2s_ld11_ops,
},
{
.name = AUD_NAME_HIECOUT1,
.playback = {
.stream_name = AUD_NAME_HIECOUT1,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000,
.channels_min = 2,
.channels_max = 2,
},
.ops = &uniphier_aio_spdif_ld11_ops,
},
{
.name = AUD_NAME_EPCMOUT2,
.playback = {
.stream_name = AUD_NAME_EPCMOUT2,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000 |
SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_32000,
.channels_min = 2,
.channels_max = 2,
},
.ops = &uniphier_aio_i2s_ld11_ops,
},
{
.name = AUD_NAME_EPCMOUT3,
.playback = {
.stream_name = AUD_NAME_EPCMOUT3,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_48000 |
SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_32000,
.channels_min = 2,
.channels_max = 2,
},
.ops = &uniphier_aio_i2s_ld11_ops,
},
{
.name = AUD_NAME_HIECCOMPOUT1,
.playback = {
.stream_name = AUD_NAME_HIECCOMPOUT1,
.channels_min = 1,
.channels_max = 1,
},
.ops = &uniphier_aio_spdif_ld11_ops2,
},
};
static const struct uniphier_aio_chip_spec uniphier_aio_ld11_spec = {
.specs = uniphier_aio_ld11,
.num_specs = ARRAY_SIZE(uniphier_aio_ld11),
.dais = uniphier_aio_dai_ld11,
.num_dais = ARRAY_SIZE(uniphier_aio_dai_ld11),
.plls = uniphier_aio_pll_ld11,
.num_plls = ARRAY_SIZE(uniphier_aio_pll_ld11),
.addr_ext = 0,
};
static const struct uniphier_aio_chip_spec uniphier_aio_ld20_spec = {
.specs = uniphier_aio_ld11,
.num_specs = ARRAY_SIZE(uniphier_aio_ld11),
.dais = uniphier_aio_dai_ld11,
.num_dais = ARRAY_SIZE(uniphier_aio_dai_ld11),
.plls = uniphier_aio_pll_ld11,
.num_plls = ARRAY_SIZE(uniphier_aio_pll_ld11),
.addr_ext = 1,
};
static const struct of_device_id uniphier_aio_of_match[] __maybe_unused = {
{
.compatible = "socionext,uniphier-ld11-aio",
.data = &uniphier_aio_ld11_spec,
},
{
.compatible = "socionext,uniphier-ld20-aio",
.data = &uniphier_aio_ld20_spec,
},
{},
};
MODULE_DEVICE_TABLE(of, uniphier_aio_of_match);
static struct platform_driver uniphier_aio_driver = {
.driver = {
.name = "snd-uniphier-aio-ld11",
.of_match_table = of_match_ptr(uniphier_aio_of_match),
},
.probe = uniphier_aio_probe,
.remove = uniphier_aio_remove,
};
module_platform_driver(uniphier_aio_driver);
MODULE_AUTHOR("Katsuhiro Suzuki <[email protected]>");
MODULE_DESCRIPTION("UniPhier LD11/LD20 AIO driver.");
MODULE_LICENSE("GPL v2");
| linux-master | sound/soc/uniphier/aio-ld11.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* omap-twl4030.c -- SoC audio for TI SoC based boards with twl4030 codec
*
* Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com
* All rights reserved.
*
* Author: Peter Ujfalusi <[email protected]>
*
* This driver replaces the following machine drivers:
* omap3beagle (Author: Steve Sakoman <[email protected]>)
* omap3evm (Author: Anuj Aggarwal <[email protected]>)
* overo (Author: Steve Sakoman <[email protected]>)
* igep0020 (Author: Enric Balletbo i Serra <[email protected]>)
* zoom2 (Author: Misael Lopez Cruz <[email protected]>)
* sdp3430 (Author: Misael Lopez Cruz <[email protected]>)
*/
#include <linux/platform_device.h>
#include <linux/platform_data/omap-twl4030.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <sound/jack.h>
#include "omap-mcbsp.h"
struct omap_twl4030 {
int jack_detect; /* board can detect jack events */
struct snd_soc_jack hs_jack;
};
static int omap_twl4030_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
unsigned int fmt;
switch (params_channels(params)) {
case 2: /* Stereo I2S mode */
fmt = SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM;
break;
case 4: /* Four channel TDM mode */
fmt = SND_SOC_DAIFMT_DSP_A |
SND_SOC_DAIFMT_IB_NF |
SND_SOC_DAIFMT_CBM_CFM;
break;
default:
return -EINVAL;
}
return snd_soc_runtime_set_dai_fmt(rtd, fmt);
}
static const struct snd_soc_ops omap_twl4030_ops = {
.hw_params = omap_twl4030_hw_params,
};
static const struct snd_soc_dapm_widget dapm_widgets[] = {
SND_SOC_DAPM_SPK("Earpiece Spk", NULL),
SND_SOC_DAPM_SPK("Handsfree Spk", NULL),
SND_SOC_DAPM_HP("Headset Stereophone", NULL),
SND_SOC_DAPM_SPK("Ext Spk", NULL),
SND_SOC_DAPM_SPK("Carkit Spk", NULL),
SND_SOC_DAPM_MIC("Main Mic", NULL),
SND_SOC_DAPM_MIC("Sub Mic", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_MIC("Carkit Mic", NULL),
SND_SOC_DAPM_MIC("Digital0 Mic", NULL),
SND_SOC_DAPM_MIC("Digital1 Mic", NULL),
SND_SOC_DAPM_LINE("Line In", NULL),
};
static const struct snd_soc_dapm_route audio_map[] = {
/* Headset Stereophone: HSOL, HSOR */
{"Headset Stereophone", NULL, "HSOL"},
{"Headset Stereophone", NULL, "HSOR"},
/* External Speakers: HFL, HFR */
{"Handsfree Spk", NULL, "HFL"},
{"Handsfree Spk", NULL, "HFR"},
/* External Speakers: PredrivL, PredrivR */
{"Ext Spk", NULL, "PREDRIVEL"},
{"Ext Spk", NULL, "PREDRIVER"},
/* Carkit speakers: CARKITL, CARKITR */
{"Carkit Spk", NULL, "CARKITL"},
{"Carkit Spk", NULL, "CARKITR"},
/* Earpiece */
{"Earpiece Spk", NULL, "EARPIECE"},
/* External Mics: MAINMIC, SUBMIC with bias */
{"MAINMIC", NULL, "Main Mic"},
{"Main Mic", NULL, "Mic Bias 1"},
{"SUBMIC", NULL, "Sub Mic"},
{"Sub Mic", NULL, "Mic Bias 2"},
/* Headset Mic: HSMIC with bias */
{"HSMIC", NULL, "Headset Mic"},
{"Headset Mic", NULL, "Headset Mic Bias"},
/* Digital Mics: DIGIMIC0, DIGIMIC1 with bias */
{"DIGIMIC0", NULL, "Digital0 Mic"},
{"Digital0 Mic", NULL, "Mic Bias 1"},
{"DIGIMIC1", NULL, "Digital1 Mic"},
{"Digital1 Mic", NULL, "Mic Bias 2"},
/* Carkit In: CARKITMIC */
{"CARKITMIC", NULL, "Carkit Mic"},
/* Aux In: AUXL, AUXR */
{"AUXL", NULL, "Line In"},
{"AUXR", NULL, "Line In"},
};
/* Headset jack detection DAPM pins */
static struct snd_soc_jack_pin hs_jack_pins[] = {
{
.pin = "Headset Mic",
.mask = SND_JACK_MICROPHONE,
},
{
.pin = "Headset Stereophone",
.mask = SND_JACK_HEADPHONE,
},
};
/* Headset jack detection gpios */
static struct snd_soc_jack_gpio hs_jack_gpios[] = {
{
.name = "hsdet-gpio",
.report = SND_JACK_HEADSET,
.debounce_time = 200,
},
};
static inline void twl4030_disconnect_pin(struct snd_soc_dapm_context *dapm,
int connected, char *pin)
{
if (!connected)
snd_soc_dapm_disable_pin(dapm, pin);
}
static int omap_twl4030_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_card *card = rtd->card;
struct snd_soc_dapm_context *dapm = &card->dapm;
struct omap_tw4030_pdata *pdata = dev_get_platdata(card->dev);
struct omap_twl4030 *priv = snd_soc_card_get_drvdata(card);
int ret = 0;
/* Headset jack detection only if it is supported */
if (priv->jack_detect > 0) {
hs_jack_gpios[0].gpio = priv->jack_detect;
ret = snd_soc_card_jack_new_pins(rtd->card, "Headset Jack",
SND_JACK_HEADSET,
&priv->hs_jack, hs_jack_pins,
ARRAY_SIZE(hs_jack_pins));
if (ret)
return ret;
ret = snd_soc_jack_add_gpios(&priv->hs_jack,
ARRAY_SIZE(hs_jack_gpios),
hs_jack_gpios);
if (ret)
return ret;
}
/*
* NULL pdata means we booted with DT. In this case the routing is
* provided and the card is fully routed, no need to mark pins.
*/
if (!pdata || !pdata->custom_routing)
return ret;
/* Disable not connected paths if not used */
twl4030_disconnect_pin(dapm, pdata->has_ear, "Earpiece Spk");
twl4030_disconnect_pin(dapm, pdata->has_hf, "Handsfree Spk");
twl4030_disconnect_pin(dapm, pdata->has_hs, "Headset Stereophone");
twl4030_disconnect_pin(dapm, pdata->has_predriv, "Ext Spk");
twl4030_disconnect_pin(dapm, pdata->has_carkit, "Carkit Spk");
twl4030_disconnect_pin(dapm, pdata->has_mainmic, "Main Mic");
twl4030_disconnect_pin(dapm, pdata->has_submic, "Sub Mic");
twl4030_disconnect_pin(dapm, pdata->has_hsmic, "Headset Mic");
twl4030_disconnect_pin(dapm, pdata->has_carkitmic, "Carkit Mic");
twl4030_disconnect_pin(dapm, pdata->has_digimic0, "Digital0 Mic");
twl4030_disconnect_pin(dapm, pdata->has_digimic1, "Digital1 Mic");
twl4030_disconnect_pin(dapm, pdata->has_linein, "Line In");
return ret;
}
/* Digital audio interface glue - connects codec <--> CPU */
SND_SOC_DAILINK_DEFS(hifi,
DAILINK_COMP_ARRAY(COMP_CPU("omap-mcbsp.2")),
DAILINK_COMP_ARRAY(COMP_CODEC("twl4030-codec", "twl4030-hifi")),
DAILINK_COMP_ARRAY(COMP_PLATFORM("omap-mcbsp.2")));
SND_SOC_DAILINK_DEFS(voice,
DAILINK_COMP_ARRAY(COMP_CPU("omap-mcbsp.3")),
DAILINK_COMP_ARRAY(COMP_CODEC("twl4030-codec", "twl4030-voice")),
DAILINK_COMP_ARRAY(COMP_PLATFORM("omap-mcbsp.3")));
static struct snd_soc_dai_link omap_twl4030_dai_links[] = {
{
.name = "TWL4030 HiFi",
.stream_name = "TWL4030 HiFi",
.init = omap_twl4030_init,
.ops = &omap_twl4030_ops,
SND_SOC_DAILINK_REG(hifi),
},
{
.name = "TWL4030 Voice",
.stream_name = "TWL4030 Voice",
.dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_NF |
SND_SOC_DAIFMT_CBM_CFM,
SND_SOC_DAILINK_REG(voice),
},
};
/* Audio machine driver */
static struct snd_soc_card omap_twl4030_card = {
.owner = THIS_MODULE,
.dai_link = omap_twl4030_dai_links,
.num_links = ARRAY_SIZE(omap_twl4030_dai_links),
.dapm_widgets = dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(dapm_widgets),
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
};
static int omap_twl4030_probe(struct platform_device *pdev)
{
struct omap_tw4030_pdata *pdata = dev_get_platdata(&pdev->dev);
struct device_node *node = pdev->dev.of_node;
struct snd_soc_card *card = &omap_twl4030_card;
struct omap_twl4030 *priv;
int ret = 0;
card->dev = &pdev->dev;
priv = devm_kzalloc(&pdev->dev, sizeof(struct omap_twl4030), GFP_KERNEL);
if (priv == NULL)
return -ENOMEM;
if (node) {
struct device_node *dai_node;
struct property *prop;
if (snd_soc_of_parse_card_name(card, "ti,model")) {
dev_err(&pdev->dev, "Card name is not provided\n");
return -ENODEV;
}
dai_node = of_parse_phandle(node, "ti,mcbsp", 0);
if (!dai_node) {
dev_err(&pdev->dev, "McBSP node is not provided\n");
return -EINVAL;
}
omap_twl4030_dai_links[0].cpus->dai_name = NULL;
omap_twl4030_dai_links[0].cpus->of_node = dai_node;
omap_twl4030_dai_links[0].platforms->name = NULL;
omap_twl4030_dai_links[0].platforms->of_node = dai_node;
dai_node = of_parse_phandle(node, "ti,mcbsp-voice", 0);
if (!dai_node) {
card->num_links = 1;
} else {
omap_twl4030_dai_links[1].cpus->dai_name = NULL;
omap_twl4030_dai_links[1].cpus->of_node = dai_node;
omap_twl4030_dai_links[1].platforms->name = NULL;
omap_twl4030_dai_links[1].platforms->of_node = dai_node;
}
priv->jack_detect = of_get_named_gpio(node,
"ti,jack-det-gpio", 0);
/* Optional: audio routing can be provided */
prop = of_find_property(node, "ti,audio-routing", NULL);
if (prop) {
ret = snd_soc_of_parse_audio_routing(card,
"ti,audio-routing");
if (ret)
return ret;
card->fully_routed = 1;
}
} else if (pdata) {
if (pdata->card_name) {
card->name = pdata->card_name;
} else {
dev_err(&pdev->dev, "Card name is not provided\n");
return -ENODEV;
}
if (!pdata->voice_connected)
card->num_links = 1;
priv->jack_detect = pdata->jack_detect;
} else {
dev_err(&pdev->dev, "Missing pdata\n");
return -ENODEV;
}
snd_soc_card_set_drvdata(card, priv);
ret = devm_snd_soc_register_card(&pdev->dev, card);
if (ret) {
dev_err(&pdev->dev, "devm_snd_soc_register_card() failed: %d\n",
ret);
return ret;
}
return 0;
}
static const struct of_device_id omap_twl4030_of_match[] = {
{.compatible = "ti,omap-twl4030", },
{ },
};
MODULE_DEVICE_TABLE(of, omap_twl4030_of_match);
static struct platform_driver omap_twl4030_driver = {
.driver = {
.name = "omap-twl4030",
.pm = &snd_soc_pm_ops,
.of_match_table = omap_twl4030_of_match,
},
.probe = omap_twl4030_probe,
};
module_platform_driver(omap_twl4030_driver);
MODULE_AUTHOR("Peter Ujfalusi <[email protected]>");
MODULE_DESCRIPTION("ALSA SoC for TI SoC based boards with twl4030 codec");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:omap-twl4030");
| linux-master | sound/soc/ti/omap-twl4030.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ams-delta.c -- SoC audio for Amstrad E3 (Delta) videophone
*
* Copyright (C) 2009 Janusz Krzysztofik <[email protected]>
*
* Initially based on sound/soc/omap/osk5912.x
* Copyright (C) 2008 Mistral Solutions
*/
#include <linux/gpio/consumer.h>
#include <linux/spinlock.h>
#include <linux/tty.h>
#include <linux/module.h>
#include <sound/soc.h>
#include <sound/jack.h>
#include <asm/mach-types.h>
#include <linux/platform_data/asoc-ti-mcbsp.h>
#include "omap-mcbsp.h"
#include "../codecs/cx20442.h"
static struct gpio_desc *handset_mute;
static struct gpio_desc *handsfree_mute;
static int ams_delta_event_handset(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
gpiod_set_value_cansleep(handset_mute, !SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
static int ams_delta_event_handsfree(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
gpiod_set_value_cansleep(handsfree_mute, !SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
/* Board specific DAPM widgets */
static const struct snd_soc_dapm_widget ams_delta_dapm_widgets[] = {
/* Handset */
SND_SOC_DAPM_MIC("Mouthpiece", NULL),
SND_SOC_DAPM_HP("Earpiece", ams_delta_event_handset),
/* Handsfree/Speakerphone */
SND_SOC_DAPM_MIC("Microphone", NULL),
SND_SOC_DAPM_SPK("Speaker", ams_delta_event_handsfree),
};
/* How they are connected to codec pins */
static const struct snd_soc_dapm_route ams_delta_audio_map[] = {
{"TELIN", NULL, "Mouthpiece"},
{"Earpiece", NULL, "TELOUT"},
{"MIC", NULL, "Microphone"},
{"Speaker", NULL, "SPKOUT"},
};
/*
* Controls, functional after the modem line discipline is activated.
*/
/* Virtual switch: audio input/output constellations */
static const char *ams_delta_audio_mode[] =
{"Mixed", "Handset", "Handsfree", "Speakerphone"};
/* Selection <-> pin translation */
#define AMS_DELTA_MOUTHPIECE 0
#define AMS_DELTA_EARPIECE 1
#define AMS_DELTA_MICROPHONE 2
#define AMS_DELTA_SPEAKER 3
#define AMS_DELTA_AGC 4
#define AMS_DELTA_MIXED ((1 << AMS_DELTA_EARPIECE) | \
(1 << AMS_DELTA_MICROPHONE))
#define AMS_DELTA_HANDSET ((1 << AMS_DELTA_MOUTHPIECE) | \
(1 << AMS_DELTA_EARPIECE))
#define AMS_DELTA_HANDSFREE ((1 << AMS_DELTA_MICROPHONE) | \
(1 << AMS_DELTA_SPEAKER))
#define AMS_DELTA_SPEAKERPHONE (AMS_DELTA_HANDSFREE | (1 << AMS_DELTA_AGC))
static const unsigned short ams_delta_audio_mode_pins[] = {
AMS_DELTA_MIXED,
AMS_DELTA_HANDSET,
AMS_DELTA_HANDSFREE,
AMS_DELTA_SPEAKERPHONE,
};
static unsigned short ams_delta_audio_agc;
/*
* Used for passing a codec structure pointer
* from the board initialization code to the tty line discipline.
*/
static struct snd_soc_component *cx20442_codec;
static int ams_delta_set_audio_mode(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
struct snd_soc_dapm_context *dapm = &card->dapm;
struct soc_enum *control = (struct soc_enum *)kcontrol->private_value;
unsigned short pins;
int pin, changed = 0;
/* Refuse any mode changes if we are not able to control the codec. */
if (!cx20442_codec->card->pop_time)
return -EUNATCH;
if (ucontrol->value.enumerated.item[0] >= control->items)
return -EINVAL;
snd_soc_dapm_mutex_lock(dapm);
/* Translate selection to bitmap */
pins = ams_delta_audio_mode_pins[ucontrol->value.enumerated.item[0]];
/* Setup pins after corresponding bits if changed */
pin = !!(pins & (1 << AMS_DELTA_MOUTHPIECE));
if (pin != snd_soc_dapm_get_pin_status(dapm, "Mouthpiece")) {
changed = 1;
if (pin)
snd_soc_dapm_enable_pin_unlocked(dapm, "Mouthpiece");
else
snd_soc_dapm_disable_pin_unlocked(dapm, "Mouthpiece");
}
pin = !!(pins & (1 << AMS_DELTA_EARPIECE));
if (pin != snd_soc_dapm_get_pin_status(dapm, "Earpiece")) {
changed = 1;
if (pin)
snd_soc_dapm_enable_pin_unlocked(dapm, "Earpiece");
else
snd_soc_dapm_disable_pin_unlocked(dapm, "Earpiece");
}
pin = !!(pins & (1 << AMS_DELTA_MICROPHONE));
if (pin != snd_soc_dapm_get_pin_status(dapm, "Microphone")) {
changed = 1;
if (pin)
snd_soc_dapm_enable_pin_unlocked(dapm, "Microphone");
else
snd_soc_dapm_disable_pin_unlocked(dapm, "Microphone");
}
pin = !!(pins & (1 << AMS_DELTA_SPEAKER));
if (pin != snd_soc_dapm_get_pin_status(dapm, "Speaker")) {
changed = 1;
if (pin)
snd_soc_dapm_enable_pin_unlocked(dapm, "Speaker");
else
snd_soc_dapm_disable_pin_unlocked(dapm, "Speaker");
}
pin = !!(pins & (1 << AMS_DELTA_AGC));
if (pin != ams_delta_audio_agc) {
ams_delta_audio_agc = pin;
changed = 1;
if (pin)
snd_soc_dapm_enable_pin_unlocked(dapm, "AGCIN");
else
snd_soc_dapm_disable_pin_unlocked(dapm, "AGCIN");
}
if (changed)
snd_soc_dapm_sync_unlocked(dapm);
snd_soc_dapm_mutex_unlock(dapm);
return changed;
}
static int ams_delta_get_audio_mode(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
struct snd_soc_dapm_context *dapm = &card->dapm;
unsigned short pins, mode;
pins = ((snd_soc_dapm_get_pin_status(dapm, "Mouthpiece") <<
AMS_DELTA_MOUTHPIECE) |
(snd_soc_dapm_get_pin_status(dapm, "Earpiece") <<
AMS_DELTA_EARPIECE));
if (pins)
pins |= (snd_soc_dapm_get_pin_status(dapm, "Microphone") <<
AMS_DELTA_MICROPHONE);
else
pins = ((snd_soc_dapm_get_pin_status(dapm, "Microphone") <<
AMS_DELTA_MICROPHONE) |
(snd_soc_dapm_get_pin_status(dapm, "Speaker") <<
AMS_DELTA_SPEAKER) |
(ams_delta_audio_agc << AMS_DELTA_AGC));
for (mode = 0; mode < ARRAY_SIZE(ams_delta_audio_mode); mode++)
if (pins == ams_delta_audio_mode_pins[mode])
break;
if (mode >= ARRAY_SIZE(ams_delta_audio_mode))
return -EINVAL;
ucontrol->value.enumerated.item[0] = mode;
return 0;
}
static SOC_ENUM_SINGLE_EXT_DECL(ams_delta_audio_enum,
ams_delta_audio_mode);
static const struct snd_kcontrol_new ams_delta_audio_controls[] = {
SOC_ENUM_EXT("Audio Mode", ams_delta_audio_enum,
ams_delta_get_audio_mode, ams_delta_set_audio_mode),
};
/* Hook switch */
static struct snd_soc_jack ams_delta_hook_switch;
static struct snd_soc_jack_gpio ams_delta_hook_switch_gpios[] = {
{
.name = "hook_switch",
.report = SND_JACK_HEADSET,
.invert = 1,
.debounce_time = 150,
}
};
/* After we are able to control the codec over the modem,
* the hook switch can be used for dynamic DAPM reconfiguration. */
static struct snd_soc_jack_pin ams_delta_hook_switch_pins[] = {
/* Handset */
{
.pin = "Mouthpiece",
.mask = SND_JACK_MICROPHONE,
},
{
.pin = "Earpiece",
.mask = SND_JACK_HEADPHONE,
},
/* Handsfree */
{
.pin = "Microphone",
.mask = SND_JACK_MICROPHONE,
.invert = 1,
},
{
.pin = "Speaker",
.mask = SND_JACK_HEADPHONE,
.invert = 1,
},
};
/*
* Modem line discipline, required for making above controls functional.
* Activated from userspace with ldattach, possibly invoked from udev rule.
*/
/* To actually apply any modem controlled configuration changes to the codec,
* we must connect codec DAI pins to the modem for a moment. Be careful not
* to interfere with our digital mute function that shares the same hardware. */
static struct timer_list cx81801_timer;
static bool cx81801_cmd_pending;
static bool ams_delta_muted;
static DEFINE_SPINLOCK(ams_delta_lock);
static struct gpio_desc *gpiod_modem_codec;
static void cx81801_timeout(struct timer_list *unused)
{
int muted;
spin_lock(&ams_delta_lock);
cx81801_cmd_pending = 0;
muted = ams_delta_muted;
spin_unlock(&ams_delta_lock);
/* Reconnect the codec DAI back from the modem to the CPU DAI
* only if digital mute still off */
if (!muted)
gpiod_set_value(gpiod_modem_codec, 0);
}
/* Line discipline .open() */
static int cx81801_open(struct tty_struct *tty)
{
int ret;
if (!cx20442_codec)
return -ENODEV;
/*
* Pass the codec structure pointer for use by other ldisc callbacks,
* both the card and the codec specific parts.
*/
tty->disc_data = cx20442_codec;
ret = v253_ops.open(tty);
if (ret < 0)
tty->disc_data = NULL;
return ret;
}
/* Line discipline .close() */
static void cx81801_close(struct tty_struct *tty)
{
struct snd_soc_component *component = tty->disc_data;
struct snd_soc_dapm_context *dapm = &component->card->dapm;
del_timer_sync(&cx81801_timer);
/* Prevent the hook switch from further changing the DAPM pins */
INIT_LIST_HEAD(&ams_delta_hook_switch.pins);
if (!component)
return;
v253_ops.close(tty);
/* Revert back to default audio input/output constellation */
snd_soc_dapm_mutex_lock(dapm);
snd_soc_dapm_disable_pin_unlocked(dapm, "Mouthpiece");
snd_soc_dapm_enable_pin_unlocked(dapm, "Earpiece");
snd_soc_dapm_enable_pin_unlocked(dapm, "Microphone");
snd_soc_dapm_disable_pin_unlocked(dapm, "Speaker");
snd_soc_dapm_disable_pin_unlocked(dapm, "AGCIN");
snd_soc_dapm_sync_unlocked(dapm);
snd_soc_dapm_mutex_unlock(dapm);
}
/* Line discipline .hangup() */
static void cx81801_hangup(struct tty_struct *tty)
{
cx81801_close(tty);
}
/* Line discipline .receive_buf() */
static void cx81801_receive(struct tty_struct *tty, const u8 *cp,
const char *fp, int count)
{
struct snd_soc_component *component = tty->disc_data;
const unsigned char *c;
int apply, ret;
if (!component)
return;
if (!component->card->pop_time) {
/* First modem response, complete setup procedure */
/* Initialize timer used for config pulse generation */
timer_setup(&cx81801_timer, cx81801_timeout, 0);
v253_ops.receive_buf(tty, cp, fp, count);
/* Link hook switch to DAPM pins */
ret = snd_soc_jack_add_pins(&ams_delta_hook_switch,
ARRAY_SIZE(ams_delta_hook_switch_pins),
ams_delta_hook_switch_pins);
if (ret)
dev_warn(component->dev,
"Failed to link hook switch to DAPM pins, "
"will continue with hook switch unlinked.\n");
return;
}
v253_ops.receive_buf(tty, cp, fp, count);
for (c = &cp[count - 1]; c >= cp; c--) {
if (*c != '\r')
continue;
/* Complete modem response received, apply config to codec */
spin_lock_bh(&ams_delta_lock);
mod_timer(&cx81801_timer, jiffies + msecs_to_jiffies(150));
apply = !ams_delta_muted && !cx81801_cmd_pending;
cx81801_cmd_pending = 1;
spin_unlock_bh(&ams_delta_lock);
/* Apply config pulse by connecting the codec to the modem
* if not already done */
if (apply)
gpiod_set_value(gpiod_modem_codec, 1);
break;
}
}
/* Line discipline .write_wakeup() */
static void cx81801_wakeup(struct tty_struct *tty)
{
v253_ops.write_wakeup(tty);
}
static struct tty_ldisc_ops cx81801_ops = {
.name = "cx81801",
.num = N_V253,
.owner = THIS_MODULE,
.open = cx81801_open,
.close = cx81801_close,
.hangup = cx81801_hangup,
.receive_buf = cx81801_receive,
.write_wakeup = cx81801_wakeup,
};
/*
* Even if not very useful, the sound card can still work without any of the
* above functionality activated. You can still control its audio input/output
* constellation and speakerphone gain from userspace by issuing AT commands
* over the modem port.
*/
static struct snd_soc_ops ams_delta_ops;
/* Digital mute implemented using modem/CPU multiplexer.
* Shares hardware with codec config pulse generation */
static bool ams_delta_muted = 1;
static int ams_delta_mute(struct snd_soc_dai *dai, int mute, int direction)
{
int apply;
if (ams_delta_muted == mute)
return 0;
spin_lock_bh(&ams_delta_lock);
ams_delta_muted = mute;
apply = !cx81801_cmd_pending;
spin_unlock_bh(&ams_delta_lock);
if (apply)
gpiod_set_value(gpiod_modem_codec, !!mute);
return 0;
}
/* Our codec DAI probably doesn't have its own .ops structure */
static const struct snd_soc_dai_ops ams_delta_dai_ops = {
.mute_stream = ams_delta_mute,
.no_capture_mute = 1,
};
/* Will be used if the codec ever has its own digital_mute function */
static int ams_delta_startup(struct snd_pcm_substream *substream)
{
return ams_delta_mute(NULL, 0, substream->stream);
}
static void ams_delta_shutdown(struct snd_pcm_substream *substream)
{
ams_delta_mute(NULL, 1, substream->stream);
}
/*
* Card initialization
*/
static int ams_delta_cx20442_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_card *card = rtd->card;
struct snd_soc_dapm_context *dapm = &card->dapm;
int ret;
/* Codec is ready, now add/activate board specific controls */
/* Store a pointer to the codec structure for tty ldisc use */
cx20442_codec = asoc_rtd_to_codec(rtd, 0)->component;
/* Add hook switch - can be used to control the codec from userspace
* even if line discipline fails */
ret = snd_soc_card_jack_new_pins(card, "hook_switch", SND_JACK_HEADSET,
&ams_delta_hook_switch, NULL, 0);
if (ret)
dev_warn(card->dev,
"Failed to allocate resources for hook switch, "
"will continue without one.\n");
else {
ret = snd_soc_jack_add_gpiods(card->dev, &ams_delta_hook_switch,
ARRAY_SIZE(ams_delta_hook_switch_gpios),
ams_delta_hook_switch_gpios);
if (ret)
dev_warn(card->dev,
"Failed to set up hook switch GPIO line, "
"will continue with hook switch inactive.\n");
}
gpiod_modem_codec = devm_gpiod_get(card->dev, "modem_codec",
GPIOD_OUT_HIGH);
if (IS_ERR(gpiod_modem_codec)) {
dev_warn(card->dev, "Failed to obtain modem_codec GPIO\n");
return 0;
}
/* Set up digital mute if not provided by the codec */
if (!codec_dai->driver->ops) {
codec_dai->driver->ops = &ams_delta_dai_ops;
} else {
ams_delta_ops.startup = ams_delta_startup;
ams_delta_ops.shutdown = ams_delta_shutdown;
}
/* Register optional line discipline for over the modem control */
ret = tty_register_ldisc(&cx81801_ops);
if (ret) {
dev_warn(card->dev,
"Failed to register line discipline, "
"will continue without any controls.\n");
return 0;
}
/* Set up initial pin constellation */
snd_soc_dapm_disable_pin(dapm, "Mouthpiece");
snd_soc_dapm_disable_pin(dapm, "Speaker");
snd_soc_dapm_disable_pin(dapm, "AGCIN");
snd_soc_dapm_disable_pin(dapm, "AGCOUT");
return 0;
}
/* DAI glue - connects codec <--> CPU */
SND_SOC_DAILINK_DEFS(cx20442,
DAILINK_COMP_ARRAY(COMP_CPU("omap-mcbsp.1")),
DAILINK_COMP_ARRAY(COMP_CODEC("cx20442-codec", "cx20442-voice")),
DAILINK_COMP_ARRAY(COMP_PLATFORM("omap-mcbsp.1")));
static struct snd_soc_dai_link ams_delta_dai_link = {
.name = "CX20442",
.stream_name = "CX20442",
.init = ams_delta_cx20442_init,
.ops = &ams_delta_ops,
.dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM,
SND_SOC_DAILINK_REG(cx20442),
};
/* Audio card driver */
static struct snd_soc_card ams_delta_audio_card = {
.name = "AMS_DELTA",
.owner = THIS_MODULE,
.dai_link = &ams_delta_dai_link,
.num_links = 1,
.controls = ams_delta_audio_controls,
.num_controls = ARRAY_SIZE(ams_delta_audio_controls),
.dapm_widgets = ams_delta_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(ams_delta_dapm_widgets),
.dapm_routes = ams_delta_audio_map,
.num_dapm_routes = ARRAY_SIZE(ams_delta_audio_map),
};
/* Module init/exit */
static int ams_delta_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &ams_delta_audio_card;
int ret;
card->dev = &pdev->dev;
handset_mute = devm_gpiod_get(card->dev, "handset_mute",
GPIOD_OUT_HIGH);
if (IS_ERR(handset_mute))
return PTR_ERR(handset_mute);
handsfree_mute = devm_gpiod_get(card->dev, "handsfree_mute",
GPIOD_OUT_HIGH);
if (IS_ERR(handsfree_mute))
return PTR_ERR(handsfree_mute);
ret = snd_soc_register_card(card);
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
card->dev = NULL;
return ret;
}
return 0;
}
static void ams_delta_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
tty_unregister_ldisc(&cx81801_ops);
snd_soc_unregister_card(card);
card->dev = NULL;
}
#define DRV_NAME "ams-delta-audio"
static struct platform_driver ams_delta_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = ams_delta_probe,
.remove_new = ams_delta_remove,
};
module_platform_driver(ams_delta_driver);
MODULE_AUTHOR("Janusz Krzysztofik <[email protected]>");
MODULE_DESCRIPTION("ALSA SoC driver for Amstrad E3 (Delta) videophone");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | sound/soc/ti/ams-delta.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* omap-hdmi-audio.c -- OMAP4+ DSS HDMI audio support library
*
* Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com
*
* Author: Jyri Sarha <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/platform_device.h>
#include <sound/soc.h>
#include <sound/pcm_params.h>
#include <sound/dmaengine_pcm.h>
#include <uapi/sound/asound.h>
#include <sound/asoundef.h>
#include <sound/omap-hdmi-audio.h>
#include "sdma-pcm.h"
#define DRV_NAME "omap-hdmi-audio"
struct hdmi_audio_data {
struct snd_soc_card *card;
const struct omap_hdmi_audio_ops *ops;
struct device *dssdev;
struct snd_dmaengine_dai_dma_data dma_data;
struct omap_dss_audio dss_audio;
struct snd_aes_iec958 iec;
struct snd_cea_861_aud_if cea;
struct mutex current_stream_lock;
struct snd_pcm_substream *current_stream;
};
static
struct hdmi_audio_data *card_drvdata_substream(struct snd_pcm_substream *ss)
{
struct snd_soc_pcm_runtime *rtd = ss->private_data;
return snd_soc_card_get_drvdata(rtd->card);
}
static void hdmi_dai_abort(struct device *dev)
{
struct hdmi_audio_data *ad = dev_get_drvdata(dev);
mutex_lock(&ad->current_stream_lock);
if (ad->current_stream && ad->current_stream->runtime &&
snd_pcm_running(ad->current_stream)) {
dev_err(dev, "HDMI display disabled, aborting playback\n");
snd_pcm_stream_lock_irq(ad->current_stream);
snd_pcm_stop(ad->current_stream, SNDRV_PCM_STATE_DISCONNECTED);
snd_pcm_stream_unlock_irq(ad->current_stream);
}
mutex_unlock(&ad->current_stream_lock);
}
static int hdmi_dai_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct hdmi_audio_data *ad = card_drvdata_substream(substream);
int ret;
/*
* Make sure that the period bytes are multiple of the DMA packet size.
* Largest packet size we use is 32 32-bit words = 128 bytes
*/
ret = snd_pcm_hw_constraint_step(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 128);
if (ret < 0) {
dev_err(dai->dev, "Could not apply period constraint: %d\n",
ret);
return ret;
}
ret = snd_pcm_hw_constraint_step(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 128);
if (ret < 0) {
dev_err(dai->dev, "Could not apply buffer constraint: %d\n",
ret);
return ret;
}
snd_soc_dai_set_dma_data(dai, substream, &ad->dma_data);
mutex_lock(&ad->current_stream_lock);
ad->current_stream = substream;
mutex_unlock(&ad->current_stream_lock);
ret = ad->ops->audio_startup(ad->dssdev, hdmi_dai_abort);
if (ret) {
mutex_lock(&ad->current_stream_lock);
ad->current_stream = NULL;
mutex_unlock(&ad->current_stream_lock);
}
return ret;
}
static int hdmi_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct hdmi_audio_data *ad = card_drvdata_substream(substream);
struct snd_aes_iec958 *iec = &ad->iec;
struct snd_cea_861_aud_if *cea = &ad->cea;
WARN_ON(ad->current_stream != substream);
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
ad->dma_data.maxburst = 16;
break;
case SNDRV_PCM_FORMAT_S24_LE:
ad->dma_data.maxburst = 32;
break;
default:
dev_err(dai->dev, "format not supported!\n");
return -EINVAL;
}
ad->dss_audio.iec = iec;
ad->dss_audio.cea = cea;
/*
* fill the IEC-60958 channel status word
*/
/* initialize the word bytes */
memset(iec->status, 0, sizeof(iec->status));
/* specify IEC-60958-3 (commercial use) */
iec->status[0] &= ~IEC958_AES0_PROFESSIONAL;
/* specify that the audio is LPCM*/
iec->status[0] &= ~IEC958_AES0_NONAUDIO;
iec->status[0] |= IEC958_AES0_CON_NOT_COPYRIGHT;
iec->status[0] |= IEC958_AES0_CON_EMPHASIS_NONE;
iec->status[1] = IEC958_AES1_CON_GENERAL;
iec->status[2] |= IEC958_AES2_CON_SOURCE_UNSPEC;
iec->status[2] |= IEC958_AES2_CON_CHANNEL_UNSPEC;
switch (params_rate(params)) {
case 32000:
iec->status[3] |= IEC958_AES3_CON_FS_32000;
break;
case 44100:
iec->status[3] |= IEC958_AES3_CON_FS_44100;
break;
case 48000:
iec->status[3] |= IEC958_AES3_CON_FS_48000;
break;
case 88200:
iec->status[3] |= IEC958_AES3_CON_FS_88200;
break;
case 96000:
iec->status[3] |= IEC958_AES3_CON_FS_96000;
break;
case 176400:
iec->status[3] |= IEC958_AES3_CON_FS_176400;
break;
case 192000:
iec->status[3] |= IEC958_AES3_CON_FS_192000;
break;
default:
dev_err(dai->dev, "rate not supported!\n");
return -EINVAL;
}
/* specify the clock accuracy */
iec->status[3] |= IEC958_AES3_CON_CLOCK_1000PPM;
/*
* specify the word length. The same word length value can mean
* two different lengths. Hence, we need to specify the maximum
* word length as well.
*/
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
iec->status[4] |= IEC958_AES4_CON_WORDLEN_20_16;
iec->status[4] &= ~IEC958_AES4_CON_MAX_WORDLEN_24;
break;
case SNDRV_PCM_FORMAT_S24_LE:
iec->status[4] |= IEC958_AES4_CON_WORDLEN_24_20;
iec->status[4] |= IEC958_AES4_CON_MAX_WORDLEN_24;
break;
default:
dev_err(dai->dev, "format not supported!\n");
return -EINVAL;
}
/*
* Fill the CEA-861 audio infoframe (see spec for details)
*/
cea->db1_ct_cc = (params_channels(params) - 1)
& CEA861_AUDIO_INFOFRAME_DB1CC;
cea->db1_ct_cc |= CEA861_AUDIO_INFOFRAME_DB1CT_FROM_STREAM;
cea->db2_sf_ss = CEA861_AUDIO_INFOFRAME_DB2SF_FROM_STREAM;
cea->db2_sf_ss |= CEA861_AUDIO_INFOFRAME_DB2SS_FROM_STREAM;
cea->db3 = 0; /* not used, all zeros */
if (params_channels(params) == 2)
cea->db4_ca = 0x0;
else if (params_channels(params) == 6)
cea->db4_ca = 0xb;
else
cea->db4_ca = 0x13;
if (cea->db4_ca == 0x00)
cea->db5_dminh_lsv = CEA861_AUDIO_INFOFRAME_DB5_DM_INH_PERMITTED;
else
cea->db5_dminh_lsv = CEA861_AUDIO_INFOFRAME_DB5_DM_INH_PROHIBITED;
/* the expression is trivial but makes clear what we are doing */
cea->db5_dminh_lsv |= (0 & CEA861_AUDIO_INFOFRAME_DB5_LSV);
return ad->ops->audio_config(ad->dssdev, &ad->dss_audio);
}
static int hdmi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct hdmi_audio_data *ad = card_drvdata_substream(substream);
int err = 0;
WARN_ON(ad->current_stream != substream);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
err = ad->ops->audio_start(ad->dssdev);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
ad->ops->audio_stop(ad->dssdev);
break;
default:
err = -EINVAL;
}
return err;
}
static void hdmi_dai_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct hdmi_audio_data *ad = card_drvdata_substream(substream);
WARN_ON(ad->current_stream != substream);
ad->ops->audio_shutdown(ad->dssdev);
mutex_lock(&ad->current_stream_lock);
ad->current_stream = NULL;
mutex_unlock(&ad->current_stream_lock);
}
static const struct snd_soc_dai_ops hdmi_dai_ops = {
.startup = hdmi_dai_startup,
.hw_params = hdmi_dai_hw_params,
.trigger = hdmi_dai_trigger,
.shutdown = hdmi_dai_shutdown,
};
static const struct snd_soc_component_driver omap_hdmi_component = {
.name = "omapdss_hdmi",
.legacy_dai_naming = 1,
};
static struct snd_soc_dai_driver omap5_hdmi_dai = {
.name = "omap5-hdmi-dai",
.playback = {
.channels_min = 2,
.channels_max = 8,
.rates = (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
SNDRV_PCM_RATE_192000),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.ops = &hdmi_dai_ops,
};
static struct snd_soc_dai_driver omap4_hdmi_dai = {
.name = "omap4-hdmi-dai",
.playback = {
.channels_min = 2,
.channels_max = 8,
.rates = (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |
SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |
SNDRV_PCM_RATE_192000),
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
},
.ops = &hdmi_dai_ops,
};
static int omap_hdmi_audio_probe(struct platform_device *pdev)
{
struct omap_hdmi_audio_pdata *ha = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
struct hdmi_audio_data *ad;
struct snd_soc_dai_driver *dai_drv;
struct snd_soc_card *card;
struct snd_soc_dai_link_component *compnent;
int ret;
if (!ha) {
dev_err(dev, "No platform data\n");
return -EINVAL;
}
ad = devm_kzalloc(dev, sizeof(*ad), GFP_KERNEL);
if (!ad)
return -ENOMEM;
ad->dssdev = ha->dev;
ad->ops = ha->ops;
ad->dma_data.addr = ha->audio_dma_addr;
ad->dma_data.filter_data = "audio_tx";
ad->dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
mutex_init(&ad->current_stream_lock);
switch (ha->version) {
case 4:
dai_drv = &omap4_hdmi_dai;
break;
case 5:
dai_drv = &omap5_hdmi_dai;
break;
default:
return -EINVAL;
}
ret = devm_snd_soc_register_component(ad->dssdev, &omap_hdmi_component,
dai_drv, 1);
if (ret)
return ret;
ret = sdma_pcm_platform_register(ad->dssdev, "audio_tx", NULL);
if (ret)
return ret;
card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
if (!card)
return -ENOMEM;
card->name = devm_kasprintf(dev, GFP_KERNEL,
"HDMI %s", dev_name(ad->dssdev));
if (!card->name)
return -ENOMEM;
card->owner = THIS_MODULE;
card->dai_link =
devm_kzalloc(dev, sizeof(*(card->dai_link)), GFP_KERNEL);
if (!card->dai_link)
return -ENOMEM;
compnent = devm_kzalloc(dev, sizeof(*compnent), GFP_KERNEL);
if (!compnent)
return -ENOMEM;
card->dai_link->cpus = compnent;
card->dai_link->num_cpus = 1;
card->dai_link->codecs = &asoc_dummy_dlc;
card->dai_link->num_codecs = 1;
card->dai_link->name = card->name;
card->dai_link->stream_name = card->name;
card->dai_link->cpus->dai_name = dev_name(ad->dssdev);
card->num_links = 1;
card->dev = dev;
ret = snd_soc_register_card(card);
if (ret) {
dev_err(dev, "snd_soc_register_card failed (%d)\n", ret);
return ret;
}
ad->card = card;
snd_soc_card_set_drvdata(card, ad);
dev_set_drvdata(dev, ad);
return 0;
}
static void omap_hdmi_audio_remove(struct platform_device *pdev)
{
struct hdmi_audio_data *ad = platform_get_drvdata(pdev);
snd_soc_unregister_card(ad->card);
}
static struct platform_driver hdmi_audio_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = omap_hdmi_audio_probe,
.remove_new = omap_hdmi_audio_remove,
};
module_platform_driver(hdmi_audio_driver);
MODULE_AUTHOR("Jyri Sarha <[email protected]>");
MODULE_DESCRIPTION("OMAP HDMI Audio Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | sound/soc/ti/omap-hdmi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ASoC driver for TI DAVINCI EVM platform
*
* Author: Vladimir Barinov, <[email protected]>
* Copyright: (C) 2007 MontaVista Software, Inc., <[email protected]>
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <asm/dma.h>
#include <asm/mach-types.h>
struct snd_soc_card_drvdata_davinci {
struct clk *mclk;
unsigned sysclk;
};
static int evm_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_card *soc_card = rtd->card;
struct snd_soc_card_drvdata_davinci *drvdata =
snd_soc_card_get_drvdata(soc_card);
if (drvdata->mclk)
return clk_prepare_enable(drvdata->mclk);
return 0;
}
static void evm_shutdown(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_card *soc_card = rtd->card;
struct snd_soc_card_drvdata_davinci *drvdata =
snd_soc_card_get_drvdata(soc_card);
clk_disable_unprepare(drvdata->mclk);
}
static int evm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct snd_soc_card *soc_card = rtd->card;
int ret = 0;
unsigned sysclk = ((struct snd_soc_card_drvdata_davinci *)
snd_soc_card_get_drvdata(soc_card))->sysclk;
/* set the codec system clock */
ret = snd_soc_dai_set_sysclk(codec_dai, 0, sysclk, SND_SOC_CLOCK_OUT);
if (ret < 0)
return ret;
/* set the CPU system clock */
ret = snd_soc_dai_set_sysclk(cpu_dai, 0, sysclk, SND_SOC_CLOCK_OUT);
if (ret < 0 && ret != -ENOTSUPP)
return ret;
return 0;
}
static const struct snd_soc_ops evm_ops = {
.startup = evm_startup,
.shutdown = evm_shutdown,
.hw_params = evm_hw_params,
};
/* davinci-evm machine dapm widgets */
static const struct snd_soc_dapm_widget aic3x_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_LINE("Line Out", NULL),
SND_SOC_DAPM_MIC("Mic Jack", NULL),
SND_SOC_DAPM_LINE("Line In", NULL),
};
/* davinci-evm machine audio_mapnections to the codec pins */
static const struct snd_soc_dapm_route audio_map[] = {
/* Headphone connected to HPLOUT, HPROUT */
{"Headphone Jack", NULL, "HPLOUT"},
{"Headphone Jack", NULL, "HPROUT"},
/* Line Out connected to LLOUT, RLOUT */
{"Line Out", NULL, "LLOUT"},
{"Line Out", NULL, "RLOUT"},
/* Mic connected to (MIC3L | MIC3R) */
{"MIC3L", NULL, "Mic Bias"},
{"MIC3R", NULL, "Mic Bias"},
{"Mic Bias", NULL, "Mic Jack"},
/* Line In connected to (LINE1L | LINE2L), (LINE1R | LINE2R) */
{"LINE1L", NULL, "Line In"},
{"LINE2L", NULL, "Line In"},
{"LINE1R", NULL, "Line In"},
{"LINE2R", NULL, "Line In"},
};
/* Logic for a aic3x as connected on a davinci-evm */
static int evm_aic3x_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_card *card = rtd->card;
struct device_node *np = card->dev->of_node;
int ret;
/* Add davinci-evm specific widgets */
snd_soc_dapm_new_controls(&card->dapm, aic3x_dapm_widgets,
ARRAY_SIZE(aic3x_dapm_widgets));
if (np) {
ret = snd_soc_of_parse_audio_routing(card, "ti,audio-routing");
if (ret)
return ret;
} else {
/* Set up davinci-evm specific audio path audio_map */
snd_soc_dapm_add_routes(&card->dapm, audio_map,
ARRAY_SIZE(audio_map));
}
/* not connected */
snd_soc_dapm_nc_pin(&card->dapm, "MONO_LOUT");
snd_soc_dapm_nc_pin(&card->dapm, "HPLCOM");
snd_soc_dapm_nc_pin(&card->dapm, "HPRCOM");
return 0;
}
/*
* The struct is used as place holder. It will be completely
* filled with data from dt node.
*/
SND_SOC_DAILINK_DEFS(evm,
DAILINK_COMP_ARRAY(COMP_EMPTY()),
DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "tlv320aic3x-hifi")),
DAILINK_COMP_ARRAY(COMP_EMPTY()));
static struct snd_soc_dai_link evm_dai_tlv320aic3x = {
.name = "TLV320AIC3X",
.stream_name = "AIC3X",
.ops = &evm_ops,
.init = evm_aic3x_init,
.dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_CBM_CFM |
SND_SOC_DAIFMT_IB_NF,
SND_SOC_DAILINK_REG(evm),
};
static const struct of_device_id davinci_evm_dt_ids[] = {
{
.compatible = "ti,da830-evm-audio",
.data = (void *) &evm_dai_tlv320aic3x,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, davinci_evm_dt_ids);
/* davinci evm audio machine driver */
static struct snd_soc_card evm_soc_card = {
.owner = THIS_MODULE,
.num_links = 1,
};
static int davinci_evm_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *match;
struct snd_soc_dai_link *dai;
struct snd_soc_card_drvdata_davinci *drvdata = NULL;
struct clk *mclk;
int ret = 0;
match = of_match_device(of_match_ptr(davinci_evm_dt_ids), &pdev->dev);
if (!match) {
dev_err(&pdev->dev, "Error: No device match found\n");
return -ENODEV;
}
dai = (struct snd_soc_dai_link *) match->data;
evm_soc_card.dai_link = dai;
dai->codecs->of_node = of_parse_phandle(np, "ti,audio-codec", 0);
if (!dai->codecs->of_node)
return -EINVAL;
dai->cpus->of_node = of_parse_phandle(np, "ti,mcasp-controller", 0);
if (!dai->cpus->of_node)
return -EINVAL;
dai->platforms->of_node = dai->cpus->of_node;
evm_soc_card.dev = &pdev->dev;
ret = snd_soc_of_parse_card_name(&evm_soc_card, "ti,model");
if (ret)
return ret;
mclk = devm_clk_get(&pdev->dev, "mclk");
if (PTR_ERR(mclk) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (IS_ERR(mclk)) {
dev_dbg(&pdev->dev, "mclk not found.\n");
mclk = NULL;
}
drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->mclk = mclk;
ret = of_property_read_u32(np, "ti,codec-clock-rate", &drvdata->sysclk);
if (ret < 0) {
if (!drvdata->mclk) {
dev_err(&pdev->dev,
"No clock or clock rate defined.\n");
return -EINVAL;
}
drvdata->sysclk = clk_get_rate(drvdata->mclk);
} else if (drvdata->mclk) {
unsigned int requestd_rate = drvdata->sysclk;
clk_set_rate(drvdata->mclk, drvdata->sysclk);
drvdata->sysclk = clk_get_rate(drvdata->mclk);
if (drvdata->sysclk != requestd_rate)
dev_warn(&pdev->dev,
"Could not get requested rate %u using %u.\n",
requestd_rate, drvdata->sysclk);
}
snd_soc_card_set_drvdata(&evm_soc_card, drvdata);
ret = devm_snd_soc_register_card(&pdev->dev, &evm_soc_card);
if (ret)
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
return ret;
}
static struct platform_driver davinci_evm_driver = {
.probe = davinci_evm_probe,
.driver = {
.name = "davinci_evm",
.pm = &snd_soc_pm_ops,
.of_match_table = davinci_evm_dt_ids,
},
};
module_platform_driver(davinci_evm_driver);
MODULE_AUTHOR("Vladimir Barinov");
MODULE_DESCRIPTION("TI DAVINCI EVM ASoC driver");
MODULE_LICENSE("GPL");
| linux-master | sound/soc/ti/davinci-evm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* edma-pcm.c - eDMA PCM driver using dmaengine for AM3xxx, AM4xxx
*
* Copyright (C) 2014 Texas Instruments, Inc.
*
* Author: Peter Ujfalusi <[email protected]>
*
* Based on: sound/soc/tegra/tegra_pcm.c
*/
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
#include "edma-pcm.h"
static const struct snd_pcm_hardware edma_pcm_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP |
SNDRV_PCM_INFO_INTERLEAVED,
.buffer_bytes_max = 128 * 1024,
.period_bytes_min = 32,
.period_bytes_max = 64 * 1024,
.periods_min = 2,
.periods_max = 19, /* Limit by edma dmaengine driver */
};
static const struct snd_dmaengine_pcm_config edma_dmaengine_pcm_config = {
.pcm_hardware = &edma_pcm_hardware,
.prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
.prealloc_buffer_size = 128 * 1024,
};
int edma_pcm_platform_register(struct device *dev)
{
struct snd_dmaengine_pcm_config *config;
if (dev->of_node)
return devm_snd_dmaengine_pcm_register(dev,
&edma_dmaengine_pcm_config, 0);
config = devm_kzalloc(dev, sizeof(*config), GFP_KERNEL);
if (!config)
return -ENOMEM;
*config = edma_dmaengine_pcm_config;
config->chan_names[0] = "tx";
config->chan_names[1] = "rx";
return devm_snd_dmaengine_pcm_register(dev, config, 0);
}
EXPORT_SYMBOL_GPL(edma_pcm_platform_register);
MODULE_AUTHOR("Peter Ujfalusi <[email protected]>");
MODULE_DESCRIPTION("eDMA PCM ASoC platform driver");
MODULE_LICENSE("GPL");
| linux-master | sound/soc/ti/edma-pcm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ALSA SoC I2S (McBSP) Audio Layer for TI DAVINCI processor
*
* Author: Vladimir Barinov, <[email protected]>
* Copyright: (C) 2007 MontaVista Software, Inc., <[email protected]>
*
* DT support (c) 2016 Petr Kulhavy, Barix AG <[email protected]>
* based on davinci-mcasp.c DT support
*
* TODO:
* on DA850 implement HW FIFOs instead of DMA into DXR and DRR registers
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/platform_data/davinci_asp.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
#include "edma-pcm.h"
#include "davinci-i2s.h"
#define DRV_NAME "davinci-i2s"
/*
* NOTE: terminology here is confusing.
*
* - This driver supports the "Audio Serial Port" (ASP),
* found on dm6446, dm355, and other DaVinci chips.
*
* - But it labels it a "Multi-channel Buffered Serial Port"
* (McBSP) as on older chips like the dm642 ... which was
* backward-compatible, possibly explaining that confusion.
*
* - OMAP chips have a controller called McBSP, which is
* incompatible with the DaVinci flavor of McBSP.
*
* - Newer DaVinci chips have a controller called McASP,
* incompatible with ASP and with either McBSP.
*
* In short: this uses ASP to implement I2S, not McBSP.
* And it won't be the only DaVinci implemention of I2S.
*/
#define DAVINCI_MCBSP_DRR_REG 0x00
#define DAVINCI_MCBSP_DXR_REG 0x04
#define DAVINCI_MCBSP_SPCR_REG 0x08
#define DAVINCI_MCBSP_RCR_REG 0x0c
#define DAVINCI_MCBSP_XCR_REG 0x10
#define DAVINCI_MCBSP_SRGR_REG 0x14
#define DAVINCI_MCBSP_PCR_REG 0x24
#define DAVINCI_MCBSP_SPCR_RRST (1 << 0)
#define DAVINCI_MCBSP_SPCR_RINTM(v) ((v) << 4)
#define DAVINCI_MCBSP_SPCR_XRST (1 << 16)
#define DAVINCI_MCBSP_SPCR_XINTM(v) ((v) << 20)
#define DAVINCI_MCBSP_SPCR_GRST (1 << 22)
#define DAVINCI_MCBSP_SPCR_FRST (1 << 23)
#define DAVINCI_MCBSP_SPCR_FREE (1 << 25)
#define DAVINCI_MCBSP_RCR_RWDLEN1(v) ((v) << 5)
#define DAVINCI_MCBSP_RCR_RFRLEN1(v) ((v) << 8)
#define DAVINCI_MCBSP_RCR_RDATDLY(v) ((v) << 16)
#define DAVINCI_MCBSP_RCR_RFIG (1 << 18)
#define DAVINCI_MCBSP_RCR_RWDLEN2(v) ((v) << 21)
#define DAVINCI_MCBSP_RCR_RFRLEN2(v) ((v) << 24)
#define DAVINCI_MCBSP_RCR_RPHASE BIT(31)
#define DAVINCI_MCBSP_XCR_XWDLEN1(v) ((v) << 5)
#define DAVINCI_MCBSP_XCR_XFRLEN1(v) ((v) << 8)
#define DAVINCI_MCBSP_XCR_XDATDLY(v) ((v) << 16)
#define DAVINCI_MCBSP_XCR_XFIG (1 << 18)
#define DAVINCI_MCBSP_XCR_XWDLEN2(v) ((v) << 21)
#define DAVINCI_MCBSP_XCR_XFRLEN2(v) ((v) << 24)
#define DAVINCI_MCBSP_XCR_XPHASE BIT(31)
#define DAVINCI_MCBSP_SRGR_FWID(v) ((v) << 8)
#define DAVINCI_MCBSP_SRGR_FPER(v) ((v) << 16)
#define DAVINCI_MCBSP_SRGR_FSGM (1 << 28)
#define DAVINCI_MCBSP_SRGR_CLKSM BIT(29)
#define DAVINCI_MCBSP_PCR_CLKRP (1 << 0)
#define DAVINCI_MCBSP_PCR_CLKXP (1 << 1)
#define DAVINCI_MCBSP_PCR_FSRP (1 << 2)
#define DAVINCI_MCBSP_PCR_FSXP (1 << 3)
#define DAVINCI_MCBSP_PCR_SCLKME (1 << 7)
#define DAVINCI_MCBSP_PCR_CLKRM (1 << 8)
#define DAVINCI_MCBSP_PCR_CLKXM (1 << 9)
#define DAVINCI_MCBSP_PCR_FSRM (1 << 10)
#define DAVINCI_MCBSP_PCR_FSXM (1 << 11)
enum {
DAVINCI_MCBSP_WORD_8 = 0,
DAVINCI_MCBSP_WORD_12,
DAVINCI_MCBSP_WORD_16,
DAVINCI_MCBSP_WORD_20,
DAVINCI_MCBSP_WORD_24,
DAVINCI_MCBSP_WORD_32,
};
static const unsigned char data_type[SNDRV_PCM_FORMAT_S32_LE + 1] = {
[SNDRV_PCM_FORMAT_S8] = 1,
[SNDRV_PCM_FORMAT_S16_LE] = 2,
[SNDRV_PCM_FORMAT_S32_LE] = 4,
};
static const unsigned char asp_word_length[SNDRV_PCM_FORMAT_S32_LE + 1] = {
[SNDRV_PCM_FORMAT_S8] = DAVINCI_MCBSP_WORD_8,
[SNDRV_PCM_FORMAT_S16_LE] = DAVINCI_MCBSP_WORD_16,
[SNDRV_PCM_FORMAT_S32_LE] = DAVINCI_MCBSP_WORD_32,
};
static const unsigned char double_fmt[SNDRV_PCM_FORMAT_S32_LE + 1] = {
[SNDRV_PCM_FORMAT_S8] = SNDRV_PCM_FORMAT_S16_LE,
[SNDRV_PCM_FORMAT_S16_LE] = SNDRV_PCM_FORMAT_S32_LE,
};
struct davinci_mcbsp_dev {
struct device *dev;
struct snd_dmaengine_dai_dma_data dma_data[2];
int dma_request[2];
void __iomem *base;
#define MOD_DSP_A 0
#define MOD_DSP_B 1
int mode;
u32 pcr;
struct clk *clk;
/*
* Combining both channels into 1 element will at least double the
* amount of time between servicing the dma channel, increase
* effiency, and reduce the chance of overrun/underrun. But,
* it will result in the left & right channels being swapped.
*
* If relabeling the left and right channels is not possible,
* you may want to let the codec know to swap them back.
*
* It may allow x10 the amount of time to service dma requests,
* if the codec is master and is using an unnecessarily fast bit clock
* (ie. tlvaic23b), independent of the sample rate. So, having an
* entire frame at once means it can be serviced at the sample rate
* instead of the bit clock rate.
*
* In the now unlikely case that an underrun still
* occurs, both the left and right samples will be repeated
* so that no pops are heard, and the left and right channels
* won't end up being swapped because of the underrun.
*/
unsigned enable_channel_combine:1;
unsigned int fmt;
int clk_div;
int clk_input_pin;
bool i2s_accurate_sck;
};
static inline void davinci_mcbsp_write_reg(struct davinci_mcbsp_dev *dev,
int reg, u32 val)
{
__raw_writel(val, dev->base + reg);
}
static inline u32 davinci_mcbsp_read_reg(struct davinci_mcbsp_dev *dev, int reg)
{
return __raw_readl(dev->base + reg);
}
static void toggle_clock(struct davinci_mcbsp_dev *dev, int playback)
{
u32 m = playback ? DAVINCI_MCBSP_PCR_CLKXP : DAVINCI_MCBSP_PCR_CLKRP;
/* The clock needs to toggle to complete reset.
* So, fake it by toggling the clk polarity.
*/
davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, dev->pcr ^ m);
davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, dev->pcr);
}
static void davinci_mcbsp_start(struct davinci_mcbsp_dev *dev,
struct snd_pcm_substream *substream)
{
int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
u32 spcr;
u32 mask = playback ? DAVINCI_MCBSP_SPCR_XRST : DAVINCI_MCBSP_SPCR_RRST;
/* Enable transmitter or receiver */
spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
spcr |= mask;
if (dev->pcr & (DAVINCI_MCBSP_PCR_FSXM | DAVINCI_MCBSP_PCR_FSRM)) {
/* Start frame sync */
spcr |= DAVINCI_MCBSP_SPCR_FRST;
}
davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
}
static void davinci_mcbsp_stop(struct davinci_mcbsp_dev *dev, int playback)
{
u32 spcr;
/* Reset transmitter/receiver and sample rate/frame sync generators */
spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
spcr &= ~(DAVINCI_MCBSP_SPCR_GRST | DAVINCI_MCBSP_SPCR_FRST);
spcr &= playback ? ~DAVINCI_MCBSP_SPCR_XRST : ~DAVINCI_MCBSP_SPCR_RRST;
davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
toggle_clock(dev, playback);
}
#define DEFAULT_BITPERSAMPLE 16
static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
unsigned int fmt)
{
struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
unsigned int pcr;
unsigned int srgr;
bool inv_fs = false;
/* Attention srgr is updated by hw_params! */
srgr = DAVINCI_MCBSP_SRGR_FSGM |
DAVINCI_MCBSP_SRGR_FPER(DEFAULT_BITPERSAMPLE * 2 - 1) |
DAVINCI_MCBSP_SRGR_FWID(DEFAULT_BITPERSAMPLE - 1);
dev->fmt = fmt;
/* set master/slave audio interface */
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
case SND_SOC_DAIFMT_BP_FP:
/* cpu is master */
pcr = DAVINCI_MCBSP_PCR_FSXM |
DAVINCI_MCBSP_PCR_FSRM |
DAVINCI_MCBSP_PCR_CLKXM |
DAVINCI_MCBSP_PCR_CLKRM;
break;
case SND_SOC_DAIFMT_BC_FP:
pcr = DAVINCI_MCBSP_PCR_FSRM | DAVINCI_MCBSP_PCR_FSXM;
/*
* Selection of the clock input pin that is the
* input for the Sample Rate Generator.
* McBSP FSR and FSX are driven by the Sample Rate
* Generator.
*/
switch (dev->clk_input_pin) {
case MCBSP_CLKS:
pcr |= DAVINCI_MCBSP_PCR_CLKXM |
DAVINCI_MCBSP_PCR_CLKRM;
break;
case MCBSP_CLKR:
pcr |= DAVINCI_MCBSP_PCR_SCLKME;
break;
default:
dev_err(dev->dev, "bad clk_input_pin\n");
return -EINVAL;
}
break;
case SND_SOC_DAIFMT_BC_FC:
/* codec is master */
pcr = 0;
break;
default:
printk(KERN_ERR "%s:bad master\n", __func__);
return -EINVAL;
}
/* interface format */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
/* Davinci doesn't support TRUE I2S, but some codecs will have
* the left and right channels contiguous. This allows
* dsp_a mode to be used with an inverted normal frame clk.
* If your codec is master and does not have contiguous
* channels, then you will have sound on only one channel.
* Try using a different mode, or codec as slave.
*
* The TLV320AIC33 is an example of a codec where this works.
* It has a variable bit clock frequency allowing it to have
* valid data on every bit clock.
*
* The TLV320AIC23 is an example of a codec where this does not
* work. It has a fixed bit clock frequency with progressively
* more empty bit clock slots between channels as the sample
* rate is lowered.
*/
inv_fs = true;
fallthrough;
case SND_SOC_DAIFMT_DSP_A:
dev->mode = MOD_DSP_A;
break;
case SND_SOC_DAIFMT_DSP_B:
dev->mode = MOD_DSP_B;
break;
default:
printk(KERN_ERR "%s:bad format\n", __func__);
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
/* CLKRP Receive clock polarity,
* 1 - sampled on rising edge of CLKR
* valid on rising edge
* CLKXP Transmit clock polarity,
* 1 - clocked on falling edge of CLKX
* valid on rising edge
* FSRP Receive frame sync pol, 0 - active high
* FSXP Transmit frame sync pol, 0 - active high
*/
pcr |= (DAVINCI_MCBSP_PCR_CLKXP | DAVINCI_MCBSP_PCR_CLKRP);
break;
case SND_SOC_DAIFMT_IB_IF:
/* CLKRP Receive clock polarity,
* 0 - sampled on falling edge of CLKR
* valid on falling edge
* CLKXP Transmit clock polarity,
* 0 - clocked on rising edge of CLKX
* valid on falling edge
* FSRP Receive frame sync pol, 1 - active low
* FSXP Transmit frame sync pol, 1 - active low
*/
pcr |= (DAVINCI_MCBSP_PCR_FSXP | DAVINCI_MCBSP_PCR_FSRP);
break;
case SND_SOC_DAIFMT_NB_IF:
/* CLKRP Receive clock polarity,
* 1 - sampled on rising edge of CLKR
* valid on rising edge
* CLKXP Transmit clock polarity,
* 1 - clocked on falling edge of CLKX
* valid on rising edge
* FSRP Receive frame sync pol, 1 - active low
* FSXP Transmit frame sync pol, 1 - active low
*/
pcr |= (DAVINCI_MCBSP_PCR_CLKXP | DAVINCI_MCBSP_PCR_CLKRP |
DAVINCI_MCBSP_PCR_FSXP | DAVINCI_MCBSP_PCR_FSRP);
break;
case SND_SOC_DAIFMT_IB_NF:
/* CLKRP Receive clock polarity,
* 0 - sampled on falling edge of CLKR
* valid on falling edge
* CLKXP Transmit clock polarity,
* 0 - clocked on rising edge of CLKX
* valid on falling edge
* FSRP Receive frame sync pol, 0 - active high
* FSXP Transmit frame sync pol, 0 - active high
*/
break;
default:
return -EINVAL;
}
if (inv_fs == true)
pcr ^= (DAVINCI_MCBSP_PCR_FSXP | DAVINCI_MCBSP_PCR_FSRP);
davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SRGR_REG, srgr);
dev->pcr = pcr;
davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, pcr);
return 0;
}
static int davinci_i2s_dai_set_clkdiv(struct snd_soc_dai *cpu_dai,
int div_id, int div)
{
struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
if (div_id != DAVINCI_MCBSP_CLKGDV)
return -ENODEV;
dev->clk_div = div;
return 0;
}
static int davinci_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai);
struct snd_interval *i = NULL;
int mcbsp_word_length, master;
unsigned int rcr, xcr, srgr, clk_div, freq, framesize;
u32 spcr;
snd_pcm_format_t fmt;
unsigned element_cnt = 1;
/* general line settings */
spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
spcr |= DAVINCI_MCBSP_SPCR_RINTM(3) | DAVINCI_MCBSP_SPCR_FREE;
davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
} else {
spcr |= DAVINCI_MCBSP_SPCR_XINTM(3) | DAVINCI_MCBSP_SPCR_FREE;
davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
}
master = dev->fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK;
fmt = params_format(params);
mcbsp_word_length = asp_word_length[fmt];
switch (master) {
case SND_SOC_DAIFMT_BP_FP:
freq = clk_get_rate(dev->clk);
srgr = DAVINCI_MCBSP_SRGR_FSGM |
DAVINCI_MCBSP_SRGR_CLKSM;
srgr |= DAVINCI_MCBSP_SRGR_FWID(mcbsp_word_length *
8 - 1);
if (dev->i2s_accurate_sck) {
clk_div = 256;
do {
framesize = (freq / (--clk_div)) /
params->rate_num *
params->rate_den;
} while (((framesize < 33) || (framesize > 4095)) &&
(clk_div));
clk_div--;
srgr |= DAVINCI_MCBSP_SRGR_FPER(framesize - 1);
} else {
/* symmetric waveforms */
clk_div = freq / (mcbsp_word_length * 16) /
params->rate_num * params->rate_den;
srgr |= DAVINCI_MCBSP_SRGR_FPER(mcbsp_word_length *
16 - 1);
}
clk_div &= 0xFF;
srgr |= clk_div;
break;
case SND_SOC_DAIFMT_BC_FP:
srgr = DAVINCI_MCBSP_SRGR_FSGM;
clk_div = dev->clk_div - 1;
srgr |= DAVINCI_MCBSP_SRGR_FWID(mcbsp_word_length * 8 - 1);
srgr |= DAVINCI_MCBSP_SRGR_FPER(mcbsp_word_length * 16 - 1);
clk_div &= 0xFF;
srgr |= clk_div;
break;
case SND_SOC_DAIFMT_BC_FC:
/* Clock and frame sync given from external sources */
i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
srgr = DAVINCI_MCBSP_SRGR_FSGM;
srgr |= DAVINCI_MCBSP_SRGR_FWID(snd_interval_value(i) - 1);
pr_debug("%s - %d FWID set: re-read srgr = %X\n",
__func__, __LINE__, snd_interval_value(i) - 1);
i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_FRAME_BITS);
srgr |= DAVINCI_MCBSP_SRGR_FPER(snd_interval_value(i) - 1);
break;
default:
return -EINVAL;
}
davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SRGR_REG, srgr);
rcr = DAVINCI_MCBSP_RCR_RFIG;
xcr = DAVINCI_MCBSP_XCR_XFIG;
if (dev->mode == MOD_DSP_B) {
rcr |= DAVINCI_MCBSP_RCR_RDATDLY(0);
xcr |= DAVINCI_MCBSP_XCR_XDATDLY(0);
} else {
rcr |= DAVINCI_MCBSP_RCR_RDATDLY(1);
xcr |= DAVINCI_MCBSP_XCR_XDATDLY(1);
}
/* Determine xfer data type */
fmt = params_format(params);
if ((fmt > SNDRV_PCM_FORMAT_S32_LE) || !data_type[fmt]) {
printk(KERN_WARNING "davinci-i2s: unsupported PCM format\n");
return -EINVAL;
}
if (params_channels(params) == 2) {
element_cnt = 2;
if (double_fmt[fmt] && dev->enable_channel_combine) {
element_cnt = 1;
fmt = double_fmt[fmt];
}
switch (master) {
case SND_SOC_DAIFMT_BP_FP:
case SND_SOC_DAIFMT_BP_FC:
rcr |= DAVINCI_MCBSP_RCR_RFRLEN2(0);
xcr |= DAVINCI_MCBSP_XCR_XFRLEN2(0);
rcr |= DAVINCI_MCBSP_RCR_RPHASE;
xcr |= DAVINCI_MCBSP_XCR_XPHASE;
break;
case SND_SOC_DAIFMT_BC_FC:
case SND_SOC_DAIFMT_BC_FP:
rcr |= DAVINCI_MCBSP_RCR_RFRLEN2(element_cnt - 1);
xcr |= DAVINCI_MCBSP_XCR_XFRLEN2(element_cnt - 1);
break;
default:
return -EINVAL;
}
}
mcbsp_word_length = asp_word_length[fmt];
switch (master) {
case SND_SOC_DAIFMT_BP_FP:
case SND_SOC_DAIFMT_BP_FC:
rcr |= DAVINCI_MCBSP_RCR_RFRLEN1(0);
xcr |= DAVINCI_MCBSP_XCR_XFRLEN1(0);
break;
case SND_SOC_DAIFMT_BC_FC:
case SND_SOC_DAIFMT_BC_FP:
rcr |= DAVINCI_MCBSP_RCR_RFRLEN1(element_cnt - 1);
xcr |= DAVINCI_MCBSP_XCR_XFRLEN1(element_cnt - 1);
break;
default:
return -EINVAL;
}
rcr |= DAVINCI_MCBSP_RCR_RWDLEN1(mcbsp_word_length) |
DAVINCI_MCBSP_RCR_RWDLEN2(mcbsp_word_length);
xcr |= DAVINCI_MCBSP_XCR_XWDLEN1(mcbsp_word_length) |
DAVINCI_MCBSP_XCR_XWDLEN2(mcbsp_word_length);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_XCR_REG, xcr);
else
davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_RCR_REG, rcr);
pr_debug("%s - %d srgr=%X\n", __func__, __LINE__, srgr);
pr_debug("%s - %d xcr=%X\n", __func__, __LINE__, xcr);
pr_debug("%s - %d rcr=%X\n", __func__, __LINE__, rcr);
return 0;
}
static int davinci_i2s_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai);
int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
u32 spcr;
u32 mask = playback ? DAVINCI_MCBSP_SPCR_XRST : DAVINCI_MCBSP_SPCR_RRST;
davinci_mcbsp_stop(dev, playback);
spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
if (spcr & mask) {
/* start off disabled */
davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG,
spcr & ~mask);
toggle_clock(dev, playback);
}
if (dev->pcr & (DAVINCI_MCBSP_PCR_FSXM | DAVINCI_MCBSP_PCR_FSRM |
DAVINCI_MCBSP_PCR_CLKXM | DAVINCI_MCBSP_PCR_CLKRM)) {
/* Start the sample generator */
spcr |= DAVINCI_MCBSP_SPCR_GRST;
davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
}
if (playback) {
/* Enable the transmitter */
spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
spcr |= DAVINCI_MCBSP_SPCR_XRST;
davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
/* wait for any unexpected frame sync error to occur */
udelay(100);
/* Disable the transmitter to clear any outstanding XSYNCERR */
spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
spcr &= ~DAVINCI_MCBSP_SPCR_XRST;
davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
toggle_clock(dev, playback);
}
return 0;
}
static int davinci_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai);
int ret = 0;
int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
davinci_mcbsp_start(dev, substream);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
davinci_mcbsp_stop(dev, playback);
break;
default:
ret = -EINVAL;
}
return ret;
}
static void davinci_i2s_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai);
int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
davinci_mcbsp_stop(dev, playback);
}
#define DAVINCI_I2S_RATES SNDRV_PCM_RATE_8000_96000
#define DAVINCI_I2S_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S32_LE)
static int davinci_i2s_dai_probe(struct snd_soc_dai *dai)
{
struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai);
int stream;
for_each_pcm_streams(stream)
snd_soc_dai_dma_data_set(dai, stream, &dev->dma_data[stream]);
return 0;
}
static const struct snd_soc_dai_ops davinci_i2s_dai_ops = {
.probe = davinci_i2s_dai_probe,
.shutdown = davinci_i2s_shutdown,
.prepare = davinci_i2s_prepare,
.trigger = davinci_i2s_trigger,
.hw_params = davinci_i2s_hw_params,
.set_fmt = davinci_i2s_set_dai_fmt,
.set_clkdiv = davinci_i2s_dai_set_clkdiv,
};
static struct snd_soc_dai_driver davinci_i2s_dai = {
.playback = {
.channels_min = 2,
.channels_max = 2,
.rates = DAVINCI_I2S_RATES,
.formats = DAVINCI_I2S_FORMATS,
},
.capture = {
.channels_min = 2,
.channels_max = 2,
.rates = DAVINCI_I2S_RATES,
.formats = DAVINCI_I2S_FORMATS,
},
.ops = &davinci_i2s_dai_ops,
};
static const struct snd_soc_component_driver davinci_i2s_component = {
.name = DRV_NAME,
.legacy_dai_naming = 1,
};
static int davinci_i2s_probe(struct platform_device *pdev)
{
struct snd_dmaengine_dai_dma_data *dma_data;
struct davinci_mcbsp_dev *dev;
struct resource *mem, *res;
void __iomem *io_base;
int *dma;
int ret;
mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu");
if (!mem) {
dev_warn(&pdev->dev,
"\"mpu\" mem resource not found, using index 0\n");
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&pdev->dev, "no mem resource?\n");
return -ENODEV;
}
}
io_base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
dev = devm_kzalloc(&pdev->dev, sizeof(struct davinci_mcbsp_dev),
GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->base = io_base;
/* setup DMA, first TX, then RX */
dma_data = &dev->dma_data[SNDRV_PCM_STREAM_PLAYBACK];
dma_data->addr = (dma_addr_t)(mem->start + DAVINCI_MCBSP_DXR_REG);
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (res) {
dma = &dev->dma_request[SNDRV_PCM_STREAM_PLAYBACK];
*dma = res->start;
dma_data->filter_data = dma;
} else if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
dma_data->filter_data = "tx";
} else {
dev_err(&pdev->dev, "Missing DMA tx resource\n");
return -ENODEV;
}
dma_data = &dev->dma_data[SNDRV_PCM_STREAM_CAPTURE];
dma_data->addr = (dma_addr_t)(mem->start + DAVINCI_MCBSP_DRR_REG);
res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (res) {
dma = &dev->dma_request[SNDRV_PCM_STREAM_CAPTURE];
*dma = res->start;
dma_data->filter_data = dma;
} else if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
dma_data->filter_data = "rx";
} else {
dev_err(&pdev->dev, "Missing DMA rx resource\n");
return -ENODEV;
}
dev->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(dev->clk))
return -ENODEV;
ret = clk_enable(dev->clk);
if (ret)
goto err_put_clk;
dev->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, dev);
ret = snd_soc_register_component(&pdev->dev, &davinci_i2s_component,
&davinci_i2s_dai, 1);
if (ret != 0)
goto err_release_clk;
ret = edma_pcm_platform_register(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "register PCM failed: %d\n", ret);
goto err_unregister_component;
}
return 0;
err_unregister_component:
snd_soc_unregister_component(&pdev->dev);
err_release_clk:
clk_disable(dev->clk);
err_put_clk:
clk_put(dev->clk);
return ret;
}
static void davinci_i2s_remove(struct platform_device *pdev)
{
struct davinci_mcbsp_dev *dev = dev_get_drvdata(&pdev->dev);
snd_soc_unregister_component(&pdev->dev);
clk_disable(dev->clk);
clk_put(dev->clk);
dev->clk = NULL;
}
static const struct of_device_id davinci_i2s_match[] __maybe_unused = {
{ .compatible = "ti,da850-mcbsp" },
{},
};
MODULE_DEVICE_TABLE(of, davinci_i2s_match);
static struct platform_driver davinci_mcbsp_driver = {
.probe = davinci_i2s_probe,
.remove_new = davinci_i2s_remove,
.driver = {
.name = "davinci-mcbsp",
.of_match_table = of_match_ptr(davinci_i2s_match),
},
};
module_platform_driver(davinci_mcbsp_driver);
MODULE_AUTHOR("Vladimir Barinov");
MODULE_DESCRIPTION("TI DAVINCI I2S (McBSP) SoC Interface");
MODULE_LICENSE("GPL");
| linux-master | sound/soc/ti/davinci-i2s.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* omap-mcbsp.c -- OMAP ALSA SoC DAI driver using McBSP port
*
* Copyright (C) 2008 Nokia Corporation
*
* Contact: Jarkko Nikula <[email protected]>
* Peter Ujfalusi <[email protected]>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
#include "omap-mcbsp-priv.h"
#include "omap-mcbsp.h"
#include "sdma-pcm.h"
#define OMAP_MCBSP_RATES (SNDRV_PCM_RATE_8000_96000)
enum {
OMAP_MCBSP_WORD_8 = 0,
OMAP_MCBSP_WORD_12,
OMAP_MCBSP_WORD_16,
OMAP_MCBSP_WORD_20,
OMAP_MCBSP_WORD_24,
OMAP_MCBSP_WORD_32,
};
static void omap_mcbsp_dump_reg(struct omap_mcbsp *mcbsp)
{
dev_dbg(mcbsp->dev, "**** McBSP%d regs ****\n", mcbsp->id);
dev_dbg(mcbsp->dev, "DRR2: 0x%04x\n", MCBSP_READ(mcbsp, DRR2));
dev_dbg(mcbsp->dev, "DRR1: 0x%04x\n", MCBSP_READ(mcbsp, DRR1));
dev_dbg(mcbsp->dev, "DXR2: 0x%04x\n", MCBSP_READ(mcbsp, DXR2));
dev_dbg(mcbsp->dev, "DXR1: 0x%04x\n", MCBSP_READ(mcbsp, DXR1));
dev_dbg(mcbsp->dev, "SPCR2: 0x%04x\n", MCBSP_READ(mcbsp, SPCR2));
dev_dbg(mcbsp->dev, "SPCR1: 0x%04x\n", MCBSP_READ(mcbsp, SPCR1));
dev_dbg(mcbsp->dev, "RCR2: 0x%04x\n", MCBSP_READ(mcbsp, RCR2));
dev_dbg(mcbsp->dev, "RCR1: 0x%04x\n", MCBSP_READ(mcbsp, RCR1));
dev_dbg(mcbsp->dev, "XCR2: 0x%04x\n", MCBSP_READ(mcbsp, XCR2));
dev_dbg(mcbsp->dev, "XCR1: 0x%04x\n", MCBSP_READ(mcbsp, XCR1));
dev_dbg(mcbsp->dev, "SRGR2: 0x%04x\n", MCBSP_READ(mcbsp, SRGR2));
dev_dbg(mcbsp->dev, "SRGR1: 0x%04x\n", MCBSP_READ(mcbsp, SRGR1));
dev_dbg(mcbsp->dev, "PCR0: 0x%04x\n", MCBSP_READ(mcbsp, PCR0));
dev_dbg(mcbsp->dev, "***********************\n");
}
static int omap2_mcbsp_set_clks_src(struct omap_mcbsp *mcbsp, u8 fck_src_id)
{
struct clk *fck_src;
const char *src;
int r;
if (fck_src_id == MCBSP_CLKS_PAD_SRC)
src = "pad_fck";
else if (fck_src_id == MCBSP_CLKS_PRCM_SRC)
src = "prcm_fck";
else
return -EINVAL;
fck_src = clk_get(mcbsp->dev, src);
if (IS_ERR(fck_src)) {
dev_info(mcbsp->dev, "CLKS: could not clk_get() %s\n", src);
return 0;
}
pm_runtime_put_sync(mcbsp->dev);
r = clk_set_parent(mcbsp->fclk, fck_src);
if (r)
dev_err(mcbsp->dev, "CLKS: could not clk_set_parent() to %s\n",
src);
pm_runtime_get_sync(mcbsp->dev);
clk_put(fck_src);
return r;
}
static irqreturn_t omap_mcbsp_irq_handler(int irq, void *data)
{
struct omap_mcbsp *mcbsp = data;
u16 irqst;
irqst = MCBSP_READ(mcbsp, IRQST);
dev_dbg(mcbsp->dev, "IRQ callback : 0x%x\n", irqst);
if (irqst & RSYNCERREN)
dev_err(mcbsp->dev, "RX Frame Sync Error!\n");
if (irqst & RFSREN)
dev_dbg(mcbsp->dev, "RX Frame Sync\n");
if (irqst & REOFEN)
dev_dbg(mcbsp->dev, "RX End Of Frame\n");
if (irqst & RRDYEN)
dev_dbg(mcbsp->dev, "RX Buffer Threshold Reached\n");
if (irqst & RUNDFLEN)
dev_err(mcbsp->dev, "RX Buffer Underflow!\n");
if (irqst & ROVFLEN)
dev_err(mcbsp->dev, "RX Buffer Overflow!\n");
if (irqst & XSYNCERREN)
dev_err(mcbsp->dev, "TX Frame Sync Error!\n");
if (irqst & XFSXEN)
dev_dbg(mcbsp->dev, "TX Frame Sync\n");
if (irqst & XEOFEN)
dev_dbg(mcbsp->dev, "TX End Of Frame\n");
if (irqst & XRDYEN)
dev_dbg(mcbsp->dev, "TX Buffer threshold Reached\n");
if (irqst & XUNDFLEN)
dev_err(mcbsp->dev, "TX Buffer Underflow!\n");
if (irqst & XOVFLEN)
dev_err(mcbsp->dev, "TX Buffer Overflow!\n");
if (irqst & XEMPTYEOFEN)
dev_dbg(mcbsp->dev, "TX Buffer empty at end of frame\n");
MCBSP_WRITE(mcbsp, IRQST, irqst);
return IRQ_HANDLED;
}
static irqreturn_t omap_mcbsp_tx_irq_handler(int irq, void *data)
{
struct omap_mcbsp *mcbsp = data;
u16 irqst_spcr2;
irqst_spcr2 = MCBSP_READ(mcbsp, SPCR2);
dev_dbg(mcbsp->dev, "TX IRQ callback : 0x%x\n", irqst_spcr2);
if (irqst_spcr2 & XSYNC_ERR) {
dev_err(mcbsp->dev, "TX Frame Sync Error! : 0x%x\n",
irqst_spcr2);
/* Writing zero to XSYNC_ERR clears the IRQ */
MCBSP_WRITE(mcbsp, SPCR2, MCBSP_READ_CACHE(mcbsp, SPCR2));
}
return IRQ_HANDLED;
}
static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *data)
{
struct omap_mcbsp *mcbsp = data;
u16 irqst_spcr1;
irqst_spcr1 = MCBSP_READ(mcbsp, SPCR1);
dev_dbg(mcbsp->dev, "RX IRQ callback : 0x%x\n", irqst_spcr1);
if (irqst_spcr1 & RSYNC_ERR) {
dev_err(mcbsp->dev, "RX Frame Sync Error! : 0x%x\n",
irqst_spcr1);
/* Writing zero to RSYNC_ERR clears the IRQ */
MCBSP_WRITE(mcbsp, SPCR1, MCBSP_READ_CACHE(mcbsp, SPCR1));
}
return IRQ_HANDLED;
}
/*
* omap_mcbsp_config simply write a config to the
* appropriate McBSP.
* You either call this function or set the McBSP registers
* by yourself before calling omap_mcbsp_start().
*/
static void omap_mcbsp_config(struct omap_mcbsp *mcbsp,
const struct omap_mcbsp_reg_cfg *config)
{
dev_dbg(mcbsp->dev, "Configuring McBSP%d phys_base: 0x%08lx\n",
mcbsp->id, mcbsp->phys_base);
/* We write the given config */
MCBSP_WRITE(mcbsp, SPCR2, config->spcr2);
MCBSP_WRITE(mcbsp, SPCR1, config->spcr1);
MCBSP_WRITE(mcbsp, RCR2, config->rcr2);
MCBSP_WRITE(mcbsp, RCR1, config->rcr1);
MCBSP_WRITE(mcbsp, XCR2, config->xcr2);
MCBSP_WRITE(mcbsp, XCR1, config->xcr1);
MCBSP_WRITE(mcbsp, SRGR2, config->srgr2);
MCBSP_WRITE(mcbsp, SRGR1, config->srgr1);
MCBSP_WRITE(mcbsp, MCR2, config->mcr2);
MCBSP_WRITE(mcbsp, MCR1, config->mcr1);
MCBSP_WRITE(mcbsp, PCR0, config->pcr0);
if (mcbsp->pdata->has_ccr) {
MCBSP_WRITE(mcbsp, XCCR, config->xccr);
MCBSP_WRITE(mcbsp, RCCR, config->rccr);
}
/* Enable wakeup behavior */
if (mcbsp->pdata->has_wakeup)
MCBSP_WRITE(mcbsp, WAKEUPEN, XRDYEN | RRDYEN);
/* Enable TX/RX sync error interrupts by default */
if (mcbsp->irq)
MCBSP_WRITE(mcbsp, IRQEN, RSYNCERREN | XSYNCERREN |
RUNDFLEN | ROVFLEN | XUNDFLEN | XOVFLEN);
}
/**
* omap_mcbsp_dma_reg_params - returns the address of mcbsp data register
* @mcbsp: omap_mcbsp struct for the McBSP instance
* @stream: Stream direction (playback/capture)
*
* Returns the address of mcbsp data transmit register or data receive register
* to be used by DMA for transferring/receiving data
*/
static int omap_mcbsp_dma_reg_params(struct omap_mcbsp *mcbsp,
unsigned int stream)
{
int data_reg;
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
if (mcbsp->pdata->reg_size == 2)
data_reg = OMAP_MCBSP_REG_DXR1;
else
data_reg = OMAP_MCBSP_REG_DXR;
} else {
if (mcbsp->pdata->reg_size == 2)
data_reg = OMAP_MCBSP_REG_DRR1;
else
data_reg = OMAP_MCBSP_REG_DRR;
}
return mcbsp->phys_dma_base + data_reg * mcbsp->pdata->reg_step;
}
/*
* omap_mcbsp_set_rx_threshold configures the transmit threshold in words.
* The threshold parameter is 1 based, and it is converted (threshold - 1)
* for the THRSH2 register.
*/
static void omap_mcbsp_set_tx_threshold(struct omap_mcbsp *mcbsp, u16 threshold)
{
if (threshold && threshold <= mcbsp->max_tx_thres)
MCBSP_WRITE(mcbsp, THRSH2, threshold - 1);
}
/*
* omap_mcbsp_set_rx_threshold configures the receive threshold in words.
* The threshold parameter is 1 based, and it is converted (threshold - 1)
* for the THRSH1 register.
*/
static void omap_mcbsp_set_rx_threshold(struct omap_mcbsp *mcbsp, u16 threshold)
{
if (threshold && threshold <= mcbsp->max_rx_thres)
MCBSP_WRITE(mcbsp, THRSH1, threshold - 1);
}
/*
* omap_mcbsp_get_tx_delay returns the number of used slots in the McBSP FIFO
*/
static u16 omap_mcbsp_get_tx_delay(struct omap_mcbsp *mcbsp)
{
u16 buffstat;
/* Returns the number of free locations in the buffer */
buffstat = MCBSP_READ(mcbsp, XBUFFSTAT);
/* Number of slots are different in McBSP ports */
return mcbsp->pdata->buffer_size - buffstat;
}
/*
* omap_mcbsp_get_rx_delay returns the number of free slots in the McBSP FIFO
* to reach the threshold value (when the DMA will be triggered to read it)
*/
static u16 omap_mcbsp_get_rx_delay(struct omap_mcbsp *mcbsp)
{
u16 buffstat, threshold;
/* Returns the number of used locations in the buffer */
buffstat = MCBSP_READ(mcbsp, RBUFFSTAT);
/* RX threshold */
threshold = MCBSP_READ(mcbsp, THRSH1);
/* Return the number of location till we reach the threshold limit */
if (threshold <= buffstat)
return 0;
else
return threshold - buffstat;
}
static int omap_mcbsp_request(struct omap_mcbsp *mcbsp)
{
void *reg_cache;
int err;
reg_cache = kzalloc(mcbsp->reg_cache_size, GFP_KERNEL);
if (!reg_cache)
return -ENOMEM;
spin_lock(&mcbsp->lock);
if (!mcbsp->free) {
dev_err(mcbsp->dev, "McBSP%d is currently in use\n", mcbsp->id);
err = -EBUSY;
goto err_kfree;
}
mcbsp->free = false;
mcbsp->reg_cache = reg_cache;
spin_unlock(&mcbsp->lock);
if(mcbsp->pdata->ops && mcbsp->pdata->ops->request)
mcbsp->pdata->ops->request(mcbsp->id - 1);
/*
* Make sure that transmitter, receiver and sample-rate generator are
* not running before activating IRQs.
*/
MCBSP_WRITE(mcbsp, SPCR1, 0);
MCBSP_WRITE(mcbsp, SPCR2, 0);
if (mcbsp->irq) {
err = request_irq(mcbsp->irq, omap_mcbsp_irq_handler, 0,
"McBSP", (void *)mcbsp);
if (err != 0) {
dev_err(mcbsp->dev, "Unable to request IRQ\n");
goto err_clk_disable;
}
} else {
err = request_irq(mcbsp->tx_irq, omap_mcbsp_tx_irq_handler, 0,
"McBSP TX", (void *)mcbsp);
if (err != 0) {
dev_err(mcbsp->dev, "Unable to request TX IRQ\n");
goto err_clk_disable;
}
err = request_irq(mcbsp->rx_irq, omap_mcbsp_rx_irq_handler, 0,
"McBSP RX", (void *)mcbsp);
if (err != 0) {
dev_err(mcbsp->dev, "Unable to request RX IRQ\n");
goto err_free_irq;
}
}
return 0;
err_free_irq:
free_irq(mcbsp->tx_irq, (void *)mcbsp);
err_clk_disable:
if(mcbsp->pdata->ops && mcbsp->pdata->ops->free)
mcbsp->pdata->ops->free(mcbsp->id - 1);
/* Disable wakeup behavior */
if (mcbsp->pdata->has_wakeup)
MCBSP_WRITE(mcbsp, WAKEUPEN, 0);
spin_lock(&mcbsp->lock);
mcbsp->free = true;
mcbsp->reg_cache = NULL;
err_kfree:
spin_unlock(&mcbsp->lock);
kfree(reg_cache);
return err;
}
static void omap_mcbsp_free(struct omap_mcbsp *mcbsp)
{
void *reg_cache;
if(mcbsp->pdata->ops && mcbsp->pdata->ops->free)
mcbsp->pdata->ops->free(mcbsp->id - 1);
/* Disable wakeup behavior */
if (mcbsp->pdata->has_wakeup)
MCBSP_WRITE(mcbsp, WAKEUPEN, 0);
/* Disable interrupt requests */
if (mcbsp->irq) {
MCBSP_WRITE(mcbsp, IRQEN, 0);
free_irq(mcbsp->irq, (void *)mcbsp);
} else {
free_irq(mcbsp->rx_irq, (void *)mcbsp);
free_irq(mcbsp->tx_irq, (void *)mcbsp);
}
reg_cache = mcbsp->reg_cache;
/*
* Select CLKS source from internal source unconditionally before
* marking the McBSP port as free.
* If the external clock source via MCBSP_CLKS pin has been selected the
* system will refuse to enter idle if the CLKS pin source is not reset
* back to internal source.
*/
if (!mcbsp_omap1())
omap2_mcbsp_set_clks_src(mcbsp, MCBSP_CLKS_PRCM_SRC);
spin_lock(&mcbsp->lock);
if (mcbsp->free)
dev_err(mcbsp->dev, "McBSP%d was not reserved\n", mcbsp->id);
else
mcbsp->free = true;
mcbsp->reg_cache = NULL;
spin_unlock(&mcbsp->lock);
kfree(reg_cache);
}
/*
* Here we start the McBSP, by enabling transmitter, receiver or both.
* If no transmitter or receiver is active prior calling, then sample-rate
* generator and frame sync are started.
*/
static void omap_mcbsp_start(struct omap_mcbsp *mcbsp, int stream)
{
int tx = (stream == SNDRV_PCM_STREAM_PLAYBACK);
int rx = !tx;
int enable_srg = 0;
u16 w;
if (mcbsp->st_data)
omap_mcbsp_st_start(mcbsp);
/* Only enable SRG, if McBSP is master */
w = MCBSP_READ_CACHE(mcbsp, PCR0);
if (w & (FSXM | FSRM | CLKXM | CLKRM))
enable_srg = !((MCBSP_READ_CACHE(mcbsp, SPCR2) |
MCBSP_READ_CACHE(mcbsp, SPCR1)) & 1);
if (enable_srg) {
/* Start the sample generator */
w = MCBSP_READ_CACHE(mcbsp, SPCR2);
MCBSP_WRITE(mcbsp, SPCR2, w | (1 << 6));
}
/* Enable transmitter and receiver */
tx &= 1;
w = MCBSP_READ_CACHE(mcbsp, SPCR2);
MCBSP_WRITE(mcbsp, SPCR2, w | tx);
rx &= 1;
w = MCBSP_READ_CACHE(mcbsp, SPCR1);
MCBSP_WRITE(mcbsp, SPCR1, w | rx);
/*
* Worst case: CLKSRG*2 = 8000khz: (1/8000) * 2 * 2 usec
* REVISIT: 100us may give enough time for two CLKSRG, however
* due to some unknown PM related, clock gating etc. reason it
* is now at 500us.
*/
udelay(500);
if (enable_srg) {
/* Start frame sync */
w = MCBSP_READ_CACHE(mcbsp, SPCR2);
MCBSP_WRITE(mcbsp, SPCR2, w | (1 << 7));
}
if (mcbsp->pdata->has_ccr) {
/* Release the transmitter and receiver */
w = MCBSP_READ_CACHE(mcbsp, XCCR);
w &= ~(tx ? XDISABLE : 0);
MCBSP_WRITE(mcbsp, XCCR, w);
w = MCBSP_READ_CACHE(mcbsp, RCCR);
w &= ~(rx ? RDISABLE : 0);
MCBSP_WRITE(mcbsp, RCCR, w);
}
/* Dump McBSP Regs */
omap_mcbsp_dump_reg(mcbsp);
}
static void omap_mcbsp_stop(struct omap_mcbsp *mcbsp, int stream)
{
int tx = (stream == SNDRV_PCM_STREAM_PLAYBACK);
int rx = !tx;
int idle;
u16 w;
/* Reset transmitter */
tx &= 1;
if (mcbsp->pdata->has_ccr) {
w = MCBSP_READ_CACHE(mcbsp, XCCR);
w |= (tx ? XDISABLE : 0);
MCBSP_WRITE(mcbsp, XCCR, w);
}
w = MCBSP_READ_CACHE(mcbsp, SPCR2);
MCBSP_WRITE(mcbsp, SPCR2, w & ~tx);
/* Reset receiver */
rx &= 1;
if (mcbsp->pdata->has_ccr) {
w = MCBSP_READ_CACHE(mcbsp, RCCR);
w |= (rx ? RDISABLE : 0);
MCBSP_WRITE(mcbsp, RCCR, w);
}
w = MCBSP_READ_CACHE(mcbsp, SPCR1);
MCBSP_WRITE(mcbsp, SPCR1, w & ~rx);
idle = !((MCBSP_READ_CACHE(mcbsp, SPCR2) |
MCBSP_READ_CACHE(mcbsp, SPCR1)) & 1);
if (idle) {
/* Reset the sample rate generator */
w = MCBSP_READ_CACHE(mcbsp, SPCR2);
MCBSP_WRITE(mcbsp, SPCR2, w & ~(1 << 6));
}
if (mcbsp->st_data)
omap_mcbsp_st_stop(mcbsp);
}
#define max_thres(m) (mcbsp->pdata->buffer_size)
#define valid_threshold(m, val) ((val) <= max_thres(m))
#define THRESHOLD_PROP_BUILDER(prop) \
static ssize_t prop##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); \
\
return sysfs_emit(buf, "%u\n", mcbsp->prop); \
} \
\
static ssize_t prop##_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t size) \
{ \
struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); \
unsigned long val; \
int status; \
\
status = kstrtoul(buf, 0, &val); \
if (status) \
return status; \
\
if (!valid_threshold(mcbsp, val)) \
return -EDOM; \
\
mcbsp->prop = val; \
return size; \
} \
\
static DEVICE_ATTR_RW(prop)
THRESHOLD_PROP_BUILDER(max_tx_thres);
THRESHOLD_PROP_BUILDER(max_rx_thres);
static const char * const dma_op_modes[] = {
"element", "threshold",
};
static ssize_t dma_op_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct omap_mcbsp *mcbsp = dev_get_drvdata(dev);
int dma_op_mode, i = 0;
ssize_t len = 0;
const char * const *s;
dma_op_mode = mcbsp->dma_op_mode;
for (s = &dma_op_modes[i]; i < ARRAY_SIZE(dma_op_modes); s++, i++) {
if (dma_op_mode == i)
len += sysfs_emit_at(buf, len, "[%s] ", *s);
else
len += sysfs_emit_at(buf, len, "%s ", *s);
}
len += sysfs_emit_at(buf, len, "\n");
return len;
}
static ssize_t dma_op_mode_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t size)
{
struct omap_mcbsp *mcbsp = dev_get_drvdata(dev);
int i;
i = sysfs_match_string(dma_op_modes, buf);
if (i < 0)
return i;
spin_lock_irq(&mcbsp->lock);
if (!mcbsp->free) {
size = -EBUSY;
goto unlock;
}
mcbsp->dma_op_mode = i;
unlock:
spin_unlock_irq(&mcbsp->lock);
return size;
}
static DEVICE_ATTR_RW(dma_op_mode);
static const struct attribute *additional_attrs[] = {
&dev_attr_max_tx_thres.attr,
&dev_attr_max_rx_thres.attr,
&dev_attr_dma_op_mode.attr,
NULL,
};
static const struct attribute_group additional_attr_group = {
.attrs = (struct attribute **)additional_attrs,
};
/*
* McBSP1 and McBSP3 are directly mapped on 1610 and 1510.
* 730 has only 2 McBSP, and both of them are MPU peripherals.
*/
static int omap_mcbsp_init(struct platform_device *pdev)
{
struct omap_mcbsp *mcbsp = platform_get_drvdata(pdev);
struct resource *res;
int ret;
spin_lock_init(&mcbsp->lock);
mcbsp->free = true;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu");
if (!res)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mcbsp->io_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mcbsp->io_base))
return PTR_ERR(mcbsp->io_base);
mcbsp->phys_base = res->start;
mcbsp->reg_cache_size = resource_size(res);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma");
if (!res)
mcbsp->phys_dma_base = mcbsp->phys_base;
else
mcbsp->phys_dma_base = res->start;
/*
* OMAP1, 2 uses two interrupt lines: TX, RX
* OMAP2430, OMAP3 SoC have combined IRQ line as well.
* OMAP4 and newer SoC only have the combined IRQ line.
* Use the combined IRQ if available since it gives better debugging
* possibilities.
*/
mcbsp->irq = platform_get_irq_byname(pdev, "common");
if (mcbsp->irq == -ENXIO) {
mcbsp->tx_irq = platform_get_irq_byname(pdev, "tx");
if (mcbsp->tx_irq == -ENXIO) {
mcbsp->irq = platform_get_irq(pdev, 0);
mcbsp->tx_irq = 0;
} else {
mcbsp->rx_irq = platform_get_irq_byname(pdev, "rx");
mcbsp->irq = 0;
}
}
if (!pdev->dev.of_node) {
res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
if (!res) {
dev_err(&pdev->dev, "invalid tx DMA channel\n");
return -ENODEV;
}
mcbsp->dma_req[0] = res->start;
mcbsp->dma_data[0].filter_data = &mcbsp->dma_req[0];
res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
if (!res) {
dev_err(&pdev->dev, "invalid rx DMA channel\n");
return -ENODEV;
}
mcbsp->dma_req[1] = res->start;
mcbsp->dma_data[1].filter_data = &mcbsp->dma_req[1];
} else {
mcbsp->dma_data[0].filter_data = "tx";
mcbsp->dma_data[1].filter_data = "rx";
}
mcbsp->dma_data[0].addr = omap_mcbsp_dma_reg_params(mcbsp,
SNDRV_PCM_STREAM_PLAYBACK);
mcbsp->dma_data[1].addr = omap_mcbsp_dma_reg_params(mcbsp,
SNDRV_PCM_STREAM_CAPTURE);
mcbsp->fclk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(mcbsp->fclk)) {
ret = PTR_ERR(mcbsp->fclk);
dev_err(mcbsp->dev, "unable to get fck: %d\n", ret);
return ret;
}
mcbsp->dma_op_mode = MCBSP_DMA_MODE_ELEMENT;
if (mcbsp->pdata->buffer_size) {
/*
* Initially configure the maximum thresholds to a safe value.
* The McBSP FIFO usage with these values should not go under
* 16 locations.
* If the whole FIFO without safety buffer is used, than there
* is a possibility that the DMA will be not able to push the
* new data on time, causing channel shifts in runtime.
*/
mcbsp->max_tx_thres = max_thres(mcbsp) - 0x10;
mcbsp->max_rx_thres = max_thres(mcbsp) - 0x10;
ret = devm_device_add_group(mcbsp->dev, &additional_attr_group);
if (ret) {
dev_err(mcbsp->dev,
"Unable to create additional controls\n");
return ret;
}
}
return omap_mcbsp_st_init(pdev);
}
/*
* Stream DMA parameters. DMA request line and port address are set runtime
* since they are different between OMAP1 and later OMAPs
*/
static void omap_mcbsp_set_threshold(struct snd_pcm_substream *substream,
unsigned int packet_size)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
int words;
/* No need to proceed further if McBSP does not have FIFO */
if (mcbsp->pdata->buffer_size == 0)
return;
/*
* Configure McBSP threshold based on either:
* packet_size, when the sDMA is in packet mode, or based on the
* period size in THRESHOLD mode, otherwise use McBSP threshold = 1
* for mono streams.
*/
if (packet_size)
words = packet_size;
else
words = 1;
/* Configure McBSP internal buffer usage */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
omap_mcbsp_set_tx_threshold(mcbsp, words);
else
omap_mcbsp_set_rx_threshold(mcbsp, words);
}
static int omap_mcbsp_hwrule_min_buffersize(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_interval *buffer_size = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_BUFFER_SIZE);
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
struct omap_mcbsp *mcbsp = rule->private;
struct snd_interval frames;
int size;
snd_interval_any(&frames);
size = mcbsp->pdata->buffer_size;
frames.min = size / channels->min;
frames.integer = 1;
return snd_interval_refine(buffer_size, &frames);
}
static int omap_mcbsp_dai_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
int err = 0;
if (!snd_soc_dai_active(cpu_dai))
err = omap_mcbsp_request(mcbsp);
/*
* OMAP3 McBSP FIFO is word structured.
* McBSP2 has 1024 + 256 = 1280 word long buffer,
* McBSP1,3,4,5 has 128 word long buffer
* This means that the size of the FIFO depends on the sample format.
* For example on McBSP3:
* 16bit samples: size is 128 * 2 = 256 bytes
* 32bit samples: size is 128 * 4 = 512 bytes
* It is simpler to place constraint for buffer and period based on
* channels.
* McBSP3 as example again (16 or 32 bit samples):
* 1 channel (mono): size is 128 frames (128 words)
* 2 channels (stereo): size is 128 / 2 = 64 frames (2 * 64 words)
* 4 channels: size is 128 / 4 = 32 frames (4 * 32 words)
*/
if (mcbsp->pdata->buffer_size) {
/*
* Rule for the buffer size. We should not allow
* smaller buffer than the FIFO size to avoid underruns.
* This applies only for the playback stream.
*/
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
omap_mcbsp_hwrule_min_buffersize,
mcbsp,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
/* Make sure, that the period size is always even */
snd_pcm_hw_constraint_step(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2);
}
return err;
}
static void omap_mcbsp_dai_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
if (mcbsp->latency[stream2])
cpu_latency_qos_update_request(&mcbsp->pm_qos_req,
mcbsp->latency[stream2]);
else if (mcbsp->latency[stream1])
cpu_latency_qos_remove_request(&mcbsp->pm_qos_req);
mcbsp->latency[stream1] = 0;
if (!snd_soc_dai_active(cpu_dai)) {
omap_mcbsp_free(mcbsp);
mcbsp->configured = 0;
}
}
static int omap_mcbsp_dai_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
struct pm_qos_request *pm_qos_req = &mcbsp->pm_qos_req;
int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
int latency = mcbsp->latency[stream2];
/* Prevent omap hardware from hitting off between FIFO fills */
if (!latency || mcbsp->latency[stream1] < latency)
latency = mcbsp->latency[stream1];
if (cpu_latency_qos_request_active(pm_qos_req))
cpu_latency_qos_update_request(pm_qos_req, latency);
else if (latency)
cpu_latency_qos_add_request(pm_qos_req, latency);
return 0;
}
static int omap_mcbsp_dai_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *cpu_dai)
{
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
mcbsp->active++;
omap_mcbsp_start(mcbsp, substream->stream);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
omap_mcbsp_stop(mcbsp, substream->stream);
mcbsp->active--;
break;
default:
return -EINVAL;
}
return 0;
}
static snd_pcm_sframes_t omap_mcbsp_dai_delay(
struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
u16 fifo_use;
snd_pcm_sframes_t delay;
/* No need to proceed further if McBSP does not have FIFO */
if (mcbsp->pdata->buffer_size == 0)
return 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
fifo_use = omap_mcbsp_get_tx_delay(mcbsp);
else
fifo_use = omap_mcbsp_get_rx_delay(mcbsp);
/*
* Divide the used locations with the channel count to get the
* FIFO usage in samples (don't care about partial samples in the
* buffer).
*/
delay = fifo_use / substream->runtime->channels;
return delay;
}
static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *cpu_dai)
{
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
struct omap_mcbsp_reg_cfg *regs = &mcbsp->cfg_regs;
struct snd_dmaengine_dai_dma_data *dma_data;
int wlen, channels, wpf;
int pkt_size = 0;
unsigned int format, div, framesize, master;
unsigned int buffer_size = mcbsp->pdata->buffer_size;
dma_data = snd_soc_dai_get_dma_data(cpu_dai, substream);
channels = params_channels(params);
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
wlen = 16;
break;
case SNDRV_PCM_FORMAT_S32_LE:
wlen = 32;
break;
default:
return -EINVAL;
}
if (buffer_size) {
int latency;
if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) {
int period_words, max_thrsh;
int divider = 0;
period_words = params_period_bytes(params) / (wlen / 8);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
max_thrsh = mcbsp->max_tx_thres;
else
max_thrsh = mcbsp->max_rx_thres;
/*
* Use sDMA packet mode if McBSP is in threshold mode:
* If period words less than the FIFO size the packet
* size is set to the number of period words, otherwise
* Look for the biggest threshold value which divides
* the period size evenly.
*/
divider = period_words / max_thrsh;
if (period_words % max_thrsh)
divider++;
while (period_words % divider &&
divider < period_words)
divider++;
if (divider == period_words)
return -EINVAL;
pkt_size = period_words / divider;
} else if (channels > 1) {
/* Use packet mode for non mono streams */
pkt_size = channels;
}
latency = (buffer_size - pkt_size) / channels;
latency = latency * USEC_PER_SEC /
(params->rate_num / params->rate_den);
mcbsp->latency[substream->stream] = latency;
omap_mcbsp_set_threshold(substream, pkt_size);
}
dma_data->maxburst = pkt_size;
if (mcbsp->configured) {
/* McBSP already configured by another stream */
return 0;
}
regs->rcr2 &= ~(RPHASE | RFRLEN2(0x7f) | RWDLEN2(7));
regs->xcr2 &= ~(RPHASE | XFRLEN2(0x7f) | XWDLEN2(7));
regs->rcr1 &= ~(RFRLEN1(0x7f) | RWDLEN1(7));
regs->xcr1 &= ~(XFRLEN1(0x7f) | XWDLEN1(7));
format = mcbsp->fmt & SND_SOC_DAIFMT_FORMAT_MASK;
wpf = channels;
if (channels == 2 && (format == SND_SOC_DAIFMT_I2S ||
format == SND_SOC_DAIFMT_LEFT_J)) {
/* Use dual-phase frames */
regs->rcr2 |= RPHASE;
regs->xcr2 |= XPHASE;
/* Set 1 word per (McBSP) frame for phase1 and phase2 */
wpf--;
regs->rcr2 |= RFRLEN2(wpf - 1);
regs->xcr2 |= XFRLEN2(wpf - 1);
}
regs->rcr1 |= RFRLEN1(wpf - 1);
regs->xcr1 |= XFRLEN1(wpf - 1);
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
/* Set word lengths */
regs->rcr2 |= RWDLEN2(OMAP_MCBSP_WORD_16);
regs->rcr1 |= RWDLEN1(OMAP_MCBSP_WORD_16);
regs->xcr2 |= XWDLEN2(OMAP_MCBSP_WORD_16);
regs->xcr1 |= XWDLEN1(OMAP_MCBSP_WORD_16);
break;
case SNDRV_PCM_FORMAT_S32_LE:
/* Set word lengths */
regs->rcr2 |= RWDLEN2(OMAP_MCBSP_WORD_32);
regs->rcr1 |= RWDLEN1(OMAP_MCBSP_WORD_32);
regs->xcr2 |= XWDLEN2(OMAP_MCBSP_WORD_32);
regs->xcr1 |= XWDLEN1(OMAP_MCBSP_WORD_32);
break;
default:
/* Unsupported PCM format */
return -EINVAL;
}
/* In McBSP master modes, FRAME (i.e. sample rate) is generated
* by _counting_ BCLKs. Calculate frame size in BCLKs */
master = mcbsp->fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK;
if (master == SND_SOC_DAIFMT_BP_FP) {
div = mcbsp->clk_div ? mcbsp->clk_div : 1;
framesize = (mcbsp->in_freq / div) / params_rate(params);
if (framesize < wlen * channels) {
printk(KERN_ERR "%s: not enough bandwidth for desired rate and "
"channels\n", __func__);
return -EINVAL;
}
} else
framesize = wlen * channels;
/* Set FS period and length in terms of bit clock periods */
regs->srgr2 &= ~FPER(0xfff);
regs->srgr1 &= ~FWID(0xff);
switch (format) {
case SND_SOC_DAIFMT_I2S:
case SND_SOC_DAIFMT_LEFT_J:
regs->srgr2 |= FPER(framesize - 1);
regs->srgr1 |= FWID((framesize >> 1) - 1);
break;
case SND_SOC_DAIFMT_DSP_A:
case SND_SOC_DAIFMT_DSP_B:
regs->srgr2 |= FPER(framesize - 1);
regs->srgr1 |= FWID(0);
break;
}
omap_mcbsp_config(mcbsp, &mcbsp->cfg_regs);
mcbsp->wlen = wlen;
mcbsp->configured = 1;
return 0;
}
/*
* This must be called before _set_clkdiv and _set_sysclk since McBSP register
* cache is initialized here
*/
static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
unsigned int fmt)
{
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
struct omap_mcbsp_reg_cfg *regs = &mcbsp->cfg_regs;
bool inv_fs = false;
if (mcbsp->configured)
return 0;
mcbsp->fmt = fmt;
memset(regs, 0, sizeof(*regs));
/* Generic McBSP register settings */
regs->spcr2 |= XINTM(3) | FREE;
regs->spcr1 |= RINTM(3);
/* RFIG and XFIG are not defined in 2430 and on OMAP3+ */
if (!mcbsp->pdata->has_ccr) {
regs->rcr2 |= RFIG;
regs->xcr2 |= XFIG;
}
/* Configure XCCR/RCCR only for revisions which have ccr registers */
if (mcbsp->pdata->has_ccr) {
regs->xccr = DXENDLY(1) | XDMAEN | XDISABLE;
regs->rccr = RFULL_CYCLE | RDMAEN | RDISABLE;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
/* 1-bit data delay */
regs->rcr2 |= RDATDLY(1);
regs->xcr2 |= XDATDLY(1);
break;
case SND_SOC_DAIFMT_LEFT_J:
/* 0-bit data delay */
regs->rcr2 |= RDATDLY(0);
regs->xcr2 |= XDATDLY(0);
regs->spcr1 |= RJUST(2);
/* Invert FS polarity configuration */
inv_fs = true;
break;
case SND_SOC_DAIFMT_DSP_A:
/* 1-bit data delay */
regs->rcr2 |= RDATDLY(1);
regs->xcr2 |= XDATDLY(1);
/* Invert FS polarity configuration */
inv_fs = true;
break;
case SND_SOC_DAIFMT_DSP_B:
/* 0-bit data delay */
regs->rcr2 |= RDATDLY(0);
regs->xcr2 |= XDATDLY(0);
/* Invert FS polarity configuration */
inv_fs = true;
break;
default:
/* Unsupported data format */
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
case SND_SOC_DAIFMT_BP_FP:
/* McBSP master. Set FS and bit clocks as outputs */
regs->pcr0 |= FSXM | FSRM |
CLKXM | CLKRM;
/* Sample rate generator drives the FS */
regs->srgr2 |= FSGM;
break;
case SND_SOC_DAIFMT_BC_FP:
/* McBSP slave. FS clock as output */
regs->srgr2 |= FSGM;
regs->pcr0 |= FSXM | FSRM;
break;
case SND_SOC_DAIFMT_BC_FC:
/* McBSP slave */
break;
default:
/* Unsupported master/slave configuration */
return -EINVAL;
}
/* Set bit clock (CLKX/CLKR) and FS polarities */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
/*
* Normal BCLK + FS.
* FS active low. TX data driven on falling edge of bit clock
* and RX data sampled on rising edge of bit clock.
*/
regs->pcr0 |= FSXP | FSRP |
CLKXP | CLKRP;
break;
case SND_SOC_DAIFMT_NB_IF:
regs->pcr0 |= CLKXP | CLKRP;
break;
case SND_SOC_DAIFMT_IB_NF:
regs->pcr0 |= FSXP | FSRP;
break;
case SND_SOC_DAIFMT_IB_IF:
break;
default:
return -EINVAL;
}
if (inv_fs)
regs->pcr0 ^= FSXP | FSRP;
return 0;
}
static int omap_mcbsp_dai_set_clkdiv(struct snd_soc_dai *cpu_dai,
int div_id, int div)
{
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
struct omap_mcbsp_reg_cfg *regs = &mcbsp->cfg_regs;
if (div_id != OMAP_MCBSP_CLKGDV)
return -ENODEV;
mcbsp->clk_div = div;
regs->srgr1 &= ~CLKGDV(0xff);
regs->srgr1 |= CLKGDV(div - 1);
return 0;
}
static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
int clk_id, unsigned int freq,
int dir)
{
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
struct omap_mcbsp_reg_cfg *regs = &mcbsp->cfg_regs;
int err = 0;
if (mcbsp->active) {
if (freq == mcbsp->in_freq)
return 0;
else
return -EBUSY;
}
mcbsp->in_freq = freq;
regs->srgr2 &= ~CLKSM;
regs->pcr0 &= ~SCLKME;
switch (clk_id) {
case OMAP_MCBSP_SYSCLK_CLK:
regs->srgr2 |= CLKSM;
break;
case OMAP_MCBSP_SYSCLK_CLKS_FCLK:
if (mcbsp_omap1()) {
err = -EINVAL;
break;
}
err = omap2_mcbsp_set_clks_src(mcbsp,
MCBSP_CLKS_PRCM_SRC);
break;
case OMAP_MCBSP_SYSCLK_CLKS_EXT:
if (mcbsp_omap1()) {
err = 0;
break;
}
err = omap2_mcbsp_set_clks_src(mcbsp,
MCBSP_CLKS_PAD_SRC);
break;
case OMAP_MCBSP_SYSCLK_CLKX_EXT:
regs->srgr2 |= CLKSM;
regs->pcr0 |= SCLKME;
/*
* If McBSP is master but yet the CLKX/CLKR pin drives the SRG,
* disable output on those pins. This enables to inject the
* reference clock through CLKX/CLKR. For this to work
* set_dai_sysclk() _needs_ to be called after set_dai_fmt().
*/
regs->pcr0 &= ~CLKXM;
break;
case OMAP_MCBSP_SYSCLK_CLKR_EXT:
regs->pcr0 |= SCLKME;
/* Disable ouput on CLKR pin in master mode */
regs->pcr0 &= ~CLKRM;
break;
default:
err = -ENODEV;
}
return err;
}
static int omap_mcbsp_probe(struct snd_soc_dai *dai)
{
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(dai);
pm_runtime_enable(mcbsp->dev);
snd_soc_dai_init_dma_data(dai,
&mcbsp->dma_data[SNDRV_PCM_STREAM_PLAYBACK],
&mcbsp->dma_data[SNDRV_PCM_STREAM_CAPTURE]);
return 0;
}
static int omap_mcbsp_remove(struct snd_soc_dai *dai)
{
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(dai);
pm_runtime_disable(mcbsp->dev);
return 0;
}
static const struct snd_soc_dai_ops mcbsp_dai_ops = {
.probe = omap_mcbsp_probe,
.remove = omap_mcbsp_remove,
.startup = omap_mcbsp_dai_startup,
.shutdown = omap_mcbsp_dai_shutdown,
.prepare = omap_mcbsp_dai_prepare,
.trigger = omap_mcbsp_dai_trigger,
.delay = omap_mcbsp_dai_delay,
.hw_params = omap_mcbsp_dai_hw_params,
.set_fmt = omap_mcbsp_dai_set_dai_fmt,
.set_clkdiv = omap_mcbsp_dai_set_clkdiv,
.set_sysclk = omap_mcbsp_dai_set_dai_sysclk,
};
static struct snd_soc_dai_driver omap_mcbsp_dai = {
.playback = {
.channels_min = 1,
.channels_max = 16,
.rates = OMAP_MCBSP_RATES,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE,
},
.capture = {
.channels_min = 1,
.channels_max = 16,
.rates = OMAP_MCBSP_RATES,
.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE,
},
.ops = &mcbsp_dai_ops,
};
static const struct snd_soc_component_driver omap_mcbsp_component = {
.name = "omap-mcbsp",
.legacy_dai_naming = 1,
};
static struct omap_mcbsp_platform_data omap2420_pdata = {
.reg_step = 4,
.reg_size = 2,
};
static struct omap_mcbsp_platform_data omap2430_pdata = {
.reg_step = 4,
.reg_size = 4,
.has_ccr = true,
};
static struct omap_mcbsp_platform_data omap3_pdata = {
.reg_step = 4,
.reg_size = 4,
.has_ccr = true,
.has_wakeup = true,
};
static struct omap_mcbsp_platform_data omap4_pdata = {
.reg_step = 4,
.reg_size = 4,
.has_ccr = true,
.has_wakeup = true,
};
static const struct of_device_id omap_mcbsp_of_match[] = {
{
.compatible = "ti,omap2420-mcbsp",
.data = &omap2420_pdata,
},
{
.compatible = "ti,omap2430-mcbsp",
.data = &omap2430_pdata,
},
{
.compatible = "ti,omap3-mcbsp",
.data = &omap3_pdata,
},
{
.compatible = "ti,omap4-mcbsp",
.data = &omap4_pdata,
},
{ },
};
MODULE_DEVICE_TABLE(of, omap_mcbsp_of_match);
static int asoc_mcbsp_probe(struct platform_device *pdev)
{
struct omap_mcbsp_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct omap_mcbsp *mcbsp;
const struct of_device_id *match;
int ret;
match = of_match_device(omap_mcbsp_of_match, &pdev->dev);
if (match) {
struct device_node *node = pdev->dev.of_node;
struct omap_mcbsp_platform_data *pdata_quirk = pdata;
int buffer_size;
pdata = devm_kzalloc(&pdev->dev,
sizeof(struct omap_mcbsp_platform_data),
GFP_KERNEL);
if (!pdata)
return -ENOMEM;
memcpy(pdata, match->data, sizeof(*pdata));
if (!of_property_read_u32(node, "ti,buffer-size", &buffer_size))
pdata->buffer_size = buffer_size;
if (pdata_quirk)
pdata->force_ick_on = pdata_quirk->force_ick_on;
} else if (!pdata) {
dev_err(&pdev->dev, "missing platform data.\n");
return -EINVAL;
}
mcbsp = devm_kzalloc(&pdev->dev, sizeof(struct omap_mcbsp), GFP_KERNEL);
if (!mcbsp)
return -ENOMEM;
mcbsp->id = pdev->id;
mcbsp->pdata = pdata;
mcbsp->dev = &pdev->dev;
platform_set_drvdata(pdev, mcbsp);
ret = omap_mcbsp_init(pdev);
if (ret)
return ret;
if (mcbsp->pdata->reg_size == 2) {
omap_mcbsp_dai.playback.formats = SNDRV_PCM_FMTBIT_S16_LE;
omap_mcbsp_dai.capture.formats = SNDRV_PCM_FMTBIT_S16_LE;
}
ret = devm_snd_soc_register_component(&pdev->dev,
&omap_mcbsp_component,
&omap_mcbsp_dai, 1);
if (ret)
return ret;
return sdma_pcm_platform_register(&pdev->dev, "tx", "rx");
}
static void asoc_mcbsp_remove(struct platform_device *pdev)
{
struct omap_mcbsp *mcbsp = platform_get_drvdata(pdev);
if (mcbsp->pdata->ops && mcbsp->pdata->ops->free)
mcbsp->pdata->ops->free(mcbsp->id);
if (cpu_latency_qos_request_active(&mcbsp->pm_qos_req))
cpu_latency_qos_remove_request(&mcbsp->pm_qos_req);
}
static struct platform_driver asoc_mcbsp_driver = {
.driver = {
.name = "omap-mcbsp",
.of_match_table = omap_mcbsp_of_match,
},
.probe = asoc_mcbsp_probe,
.remove_new = asoc_mcbsp_remove,
};
module_platform_driver(asoc_mcbsp_driver);
MODULE_AUTHOR("Jarkko Nikula <[email protected]>");
MODULE_DESCRIPTION("OMAP I2S SoC Interface");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:omap-mcbsp");
| linux-master | sound/soc/ti/omap-mcbsp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* McBSP Sidetone support
*
* Copyright (C) 2004 Nokia Corporation
* Author: Samuel Ortiz <[email protected]>
*
* Contact: Jarkko Nikula <[email protected]>
* Peter Ujfalusi <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/slab.h>
#include "omap-mcbsp.h"
#include "omap-mcbsp-priv.h"
/* OMAP3 sidetone control registers */
#define OMAP_ST_REG_REV 0x00
#define OMAP_ST_REG_SYSCONFIG 0x10
#define OMAP_ST_REG_IRQSTATUS 0x18
#define OMAP_ST_REG_IRQENABLE 0x1C
#define OMAP_ST_REG_SGAINCR 0x24
#define OMAP_ST_REG_SFIRCR 0x28
#define OMAP_ST_REG_SSELCR 0x2C
/********************** McBSP SSELCR bit definitions ***********************/
#define SIDETONEEN BIT(10)
/********************** McBSP Sidetone SYSCONFIG bit definitions ***********/
#define ST_AUTOIDLE BIT(0)
/********************** McBSP Sidetone SGAINCR bit definitions *************/
#define ST_CH0GAIN(value) ((value) & 0xffff) /* Bits 0:15 */
#define ST_CH1GAIN(value) (((value) & 0xffff) << 16) /* Bits 16:31 */
/********************** McBSP Sidetone SFIRCR bit definitions **************/
#define ST_FIRCOEFF(value) ((value) & 0xffff) /* Bits 0:15 */
/********************** McBSP Sidetone SSELCR bit definitions **************/
#define ST_SIDETONEEN BIT(0)
#define ST_COEFFWREN BIT(1)
#define ST_COEFFWRDONE BIT(2)
struct omap_mcbsp_st_data {
void __iomem *io_base_st;
struct clk *mcbsp_iclk;
bool running;
bool enabled;
s16 taps[128]; /* Sidetone filter coefficients */
int nr_taps; /* Number of filter coefficients in use */
s16 ch0gain;
s16 ch1gain;
};
static void omap_mcbsp_st_write(struct omap_mcbsp *mcbsp, u16 reg, u32 val)
{
writel_relaxed(val, mcbsp->st_data->io_base_st + reg);
}
static int omap_mcbsp_st_read(struct omap_mcbsp *mcbsp, u16 reg)
{
return readl_relaxed(mcbsp->st_data->io_base_st + reg);
}
#define MCBSP_ST_READ(mcbsp, reg) omap_mcbsp_st_read(mcbsp, OMAP_ST_REG_##reg)
#define MCBSP_ST_WRITE(mcbsp, reg, val) \
omap_mcbsp_st_write(mcbsp, OMAP_ST_REG_##reg, val)
static void omap_mcbsp_st_on(struct omap_mcbsp *mcbsp)
{
unsigned int w;
if (mcbsp->pdata->force_ick_on)
mcbsp->pdata->force_ick_on(mcbsp->st_data->mcbsp_iclk, true);
/* Disable Sidetone clock auto-gating for normal operation */
w = MCBSP_ST_READ(mcbsp, SYSCONFIG);
MCBSP_ST_WRITE(mcbsp, SYSCONFIG, w & ~(ST_AUTOIDLE));
/* Enable McBSP Sidetone */
w = MCBSP_READ(mcbsp, SSELCR);
MCBSP_WRITE(mcbsp, SSELCR, w | SIDETONEEN);
/* Enable Sidetone from Sidetone Core */
w = MCBSP_ST_READ(mcbsp, SSELCR);
MCBSP_ST_WRITE(mcbsp, SSELCR, w | ST_SIDETONEEN);
}
static void omap_mcbsp_st_off(struct omap_mcbsp *mcbsp)
{
unsigned int w;
w = MCBSP_ST_READ(mcbsp, SSELCR);
MCBSP_ST_WRITE(mcbsp, SSELCR, w & ~(ST_SIDETONEEN));
w = MCBSP_READ(mcbsp, SSELCR);
MCBSP_WRITE(mcbsp, SSELCR, w & ~(SIDETONEEN));
/* Enable Sidetone clock auto-gating to reduce power consumption */
w = MCBSP_ST_READ(mcbsp, SYSCONFIG);
MCBSP_ST_WRITE(mcbsp, SYSCONFIG, w | ST_AUTOIDLE);
if (mcbsp->pdata->force_ick_on)
mcbsp->pdata->force_ick_on(mcbsp->st_data->mcbsp_iclk, false);
}
static void omap_mcbsp_st_fir_write(struct omap_mcbsp *mcbsp, s16 *fir)
{
u16 val, i;
val = MCBSP_ST_READ(mcbsp, SSELCR);
if (val & ST_COEFFWREN)
MCBSP_ST_WRITE(mcbsp, SSELCR, val & ~(ST_COEFFWREN));
MCBSP_ST_WRITE(mcbsp, SSELCR, val | ST_COEFFWREN);
for (i = 0; i < 128; i++)
MCBSP_ST_WRITE(mcbsp, SFIRCR, fir[i]);
i = 0;
val = MCBSP_ST_READ(mcbsp, SSELCR);
while (!(val & ST_COEFFWRDONE) && (++i < 1000))
val = MCBSP_ST_READ(mcbsp, SSELCR);
MCBSP_ST_WRITE(mcbsp, SSELCR, val & ~(ST_COEFFWREN));
if (i == 1000)
dev_err(mcbsp->dev, "McBSP FIR load error!\n");
}
static void omap_mcbsp_st_chgain(struct omap_mcbsp *mcbsp)
{
struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
MCBSP_ST_WRITE(mcbsp, SGAINCR, ST_CH0GAIN(st_data->ch0gain) |
ST_CH1GAIN(st_data->ch1gain));
}
static int omap_mcbsp_st_set_chgain(struct omap_mcbsp *mcbsp, int channel,
s16 chgain)
{
struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
int ret = 0;
if (!st_data)
return -ENOENT;
spin_lock_irq(&mcbsp->lock);
if (channel == 0)
st_data->ch0gain = chgain;
else if (channel == 1)
st_data->ch1gain = chgain;
else
ret = -EINVAL;
if (st_data->enabled)
omap_mcbsp_st_chgain(mcbsp);
spin_unlock_irq(&mcbsp->lock);
return ret;
}
static int omap_mcbsp_st_get_chgain(struct omap_mcbsp *mcbsp, int channel,
s16 *chgain)
{
struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
int ret = 0;
if (!st_data)
return -ENOENT;
spin_lock_irq(&mcbsp->lock);
if (channel == 0)
*chgain = st_data->ch0gain;
else if (channel == 1)
*chgain = st_data->ch1gain;
else
ret = -EINVAL;
spin_unlock_irq(&mcbsp->lock);
return ret;
}
static int omap_mcbsp_st_enable(struct omap_mcbsp *mcbsp)
{
struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
if (!st_data)
return -ENODEV;
spin_lock_irq(&mcbsp->lock);
st_data->enabled = 1;
omap_mcbsp_st_start(mcbsp);
spin_unlock_irq(&mcbsp->lock);
return 0;
}
static int omap_mcbsp_st_disable(struct omap_mcbsp *mcbsp)
{
struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
int ret = 0;
if (!st_data)
return -ENODEV;
spin_lock_irq(&mcbsp->lock);
omap_mcbsp_st_stop(mcbsp);
st_data->enabled = 0;
spin_unlock_irq(&mcbsp->lock);
return ret;
}
static int omap_mcbsp_st_is_enabled(struct omap_mcbsp *mcbsp)
{
struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
if (!st_data)
return -ENODEV;
return st_data->enabled;
}
static ssize_t st_taps_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct omap_mcbsp *mcbsp = dev_get_drvdata(dev);
struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
ssize_t status = 0;
int i;
spin_lock_irq(&mcbsp->lock);
for (i = 0; i < st_data->nr_taps; i++)
status += sysfs_emit_at(buf, status, (i ? ", %d" : "%d"),
st_data->taps[i]);
if (i)
status += sysfs_emit_at(buf, status, "\n");
spin_unlock_irq(&mcbsp->lock);
return status;
}
static ssize_t st_taps_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct omap_mcbsp *mcbsp = dev_get_drvdata(dev);
struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
int val, tmp, status, i = 0;
spin_lock_irq(&mcbsp->lock);
memset(st_data->taps, 0, sizeof(st_data->taps));
st_data->nr_taps = 0;
do {
status = sscanf(buf, "%d%n", &val, &tmp);
if (status < 0 || status == 0) {
size = -EINVAL;
goto out;
}
if (val < -32768 || val > 32767) {
size = -EINVAL;
goto out;
}
st_data->taps[i++] = val;
buf += tmp;
if (*buf != ',')
break;
buf++;
} while (1);
st_data->nr_taps = i;
out:
spin_unlock_irq(&mcbsp->lock);
return size;
}
static DEVICE_ATTR_RW(st_taps);
static const struct attribute *sidetone_attrs[] = {
&dev_attr_st_taps.attr,
NULL,
};
static const struct attribute_group sidetone_attr_group = {
.attrs = (struct attribute **)sidetone_attrs,
};
int omap_mcbsp_st_start(struct omap_mcbsp *mcbsp)
{
struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
if (st_data->enabled && !st_data->running) {
omap_mcbsp_st_fir_write(mcbsp, st_data->taps);
omap_mcbsp_st_chgain(mcbsp);
if (!mcbsp->free) {
omap_mcbsp_st_on(mcbsp);
st_data->running = 1;
}
}
return 0;
}
int omap_mcbsp_st_stop(struct omap_mcbsp *mcbsp)
{
struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
if (st_data->running) {
if (!mcbsp->free) {
omap_mcbsp_st_off(mcbsp);
st_data->running = 0;
}
}
return 0;
}
int omap_mcbsp_st_init(struct platform_device *pdev)
{
struct omap_mcbsp *mcbsp = platform_get_drvdata(pdev);
struct omap_mcbsp_st_data *st_data;
struct resource *res;
int ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sidetone");
if (!res)
return 0;
st_data = devm_kzalloc(mcbsp->dev, sizeof(*mcbsp->st_data), GFP_KERNEL);
if (!st_data)
return -ENOMEM;
st_data->mcbsp_iclk = devm_clk_get(mcbsp->dev, "ick");
if (IS_ERR(st_data->mcbsp_iclk)) {
dev_warn(mcbsp->dev,
"Failed to get ick, sidetone might be broken\n");
st_data->mcbsp_iclk = NULL;
}
st_data->io_base_st = devm_ioremap(mcbsp->dev, res->start,
resource_size(res));
if (!st_data->io_base_st)
return -ENOMEM;
ret = devm_device_add_group(mcbsp->dev, &sidetone_attr_group);
if (ret)
return ret;
mcbsp->st_data = st_data;
return 0;
}
static int omap_mcbsp_st_info_volsw(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
int max = mc->max;
int min = mc->min;
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = min;
uinfo->value.integer.max = max;
return 0;
}
#define OMAP_MCBSP_ST_CHANNEL_VOLUME(channel) \
static int \
omap_mcbsp_set_st_ch##channel##_volume(struct snd_kcontrol *kc, \
struct snd_ctl_elem_value *uc) \
{ \
struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kc); \
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); \
struct soc_mixer_control *mc = \
(struct soc_mixer_control *)kc->private_value; \
int max = mc->max; \
int min = mc->min; \
int val = uc->value.integer.value[0]; \
\
if (val < min || val > max) \
return -EINVAL; \
\
/* OMAP McBSP implementation uses index values 0..4 */ \
return omap_mcbsp_st_set_chgain(mcbsp, channel, val); \
} \
\
static int \
omap_mcbsp_get_st_ch##channel##_volume(struct snd_kcontrol *kc, \
struct snd_ctl_elem_value *uc) \
{ \
struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kc); \
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); \
s16 chgain; \
\
if (omap_mcbsp_st_get_chgain(mcbsp, channel, &chgain)) \
return -EAGAIN; \
\
uc->value.integer.value[0] = chgain; \
return 0; \
}
OMAP_MCBSP_ST_CHANNEL_VOLUME(0)
OMAP_MCBSP_ST_CHANNEL_VOLUME(1)
static int omap_mcbsp_st_put_mode(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
u8 value = ucontrol->value.integer.value[0];
if (value == omap_mcbsp_st_is_enabled(mcbsp))
return 0;
if (value)
omap_mcbsp_st_enable(mcbsp);
else
omap_mcbsp_st_disable(mcbsp);
return 1;
}
static int omap_mcbsp_st_get_mode(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
ucontrol->value.integer.value[0] = omap_mcbsp_st_is_enabled(mcbsp);
return 0;
}
#define OMAP_MCBSP_SOC_SINGLE_S16_EXT(xname, xmin, xmax, \
xhandler_get, xhandler_put) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = omap_mcbsp_st_info_volsw, \
.get = xhandler_get, .put = xhandler_put, \
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.min = xmin, .max = xmax} }
#define OMAP_MCBSP_ST_CONTROLS(port) \
static const struct snd_kcontrol_new omap_mcbsp##port##_st_controls[] = { \
SOC_SINGLE_EXT("McBSP" #port " Sidetone Switch", 1, 0, 1, 0, \
omap_mcbsp_st_get_mode, omap_mcbsp_st_put_mode), \
OMAP_MCBSP_SOC_SINGLE_S16_EXT("McBSP" #port " Sidetone Channel 0 Volume", \
-32768, 32767, \
omap_mcbsp_get_st_ch0_volume, \
omap_mcbsp_set_st_ch0_volume), \
OMAP_MCBSP_SOC_SINGLE_S16_EXT("McBSP" #port " Sidetone Channel 1 Volume", \
-32768, 32767, \
omap_mcbsp_get_st_ch1_volume, \
omap_mcbsp_set_st_ch1_volume), \
}
OMAP_MCBSP_ST_CONTROLS(2);
OMAP_MCBSP_ST_CONTROLS(3);
int omap_mcbsp_st_add_controls(struct snd_soc_pcm_runtime *rtd, int port_id)
{
struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
if (!mcbsp->st_data) {
dev_warn(mcbsp->dev, "No sidetone data for port\n");
return 0;
}
switch (port_id) {
case 2: /* McBSP 2 */
return snd_soc_add_dai_controls(cpu_dai,
omap_mcbsp2_st_controls,
ARRAY_SIZE(omap_mcbsp2_st_controls));
case 3: /* McBSP 3 */
return snd_soc_add_dai_controls(cpu_dai,
omap_mcbsp3_st_controls,
ARRAY_SIZE(omap_mcbsp3_st_controls));
default:
dev_err(mcbsp->dev, "Port %d not supported\n", port_id);
break;
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(omap_mcbsp_st_add_controls);
| linux-master | sound/soc/ti/omap-mcbsp-st.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* rx51.c -- SoC audio for Nokia RX-51
*
* Copyright (C) 2008 - 2009 Nokia Corporation
*
* Contact: Peter Ujfalusi <[email protected]>
* Eduardo Valentin <[email protected]>
* Jarkko Nikula <[email protected]>
*/
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/jack.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <linux/platform_data/asoc-ti-mcbsp.h>
#include <asm/mach-types.h>
#include "omap-mcbsp.h"
enum {
RX51_JACK_DISABLED,
RX51_JACK_TVOUT, /* tv-out with stereo output */
RX51_JACK_HP, /* headphone: stereo output, no mic */
RX51_JACK_HS, /* headset: stereo output with mic */
};
struct rx51_audio_pdata {
struct gpio_desc *tvout_selection_gpio;
struct gpio_desc *jack_detection_gpio;
struct gpio_desc *eci_sw_gpio;
struct gpio_desc *speaker_amp_gpio;
};
static int rx51_spk_func;
static int rx51_dmic_func;
static int rx51_jack_func;
static void rx51_ext_control(struct snd_soc_dapm_context *dapm)
{
struct snd_soc_card *card = dapm->card;
struct rx51_audio_pdata *pdata = snd_soc_card_get_drvdata(card);
int hp = 0, hs = 0, tvout = 0;
switch (rx51_jack_func) {
case RX51_JACK_TVOUT:
tvout = 1;
hp = 1;
break;
case RX51_JACK_HS:
hs = 1;
fallthrough;
case RX51_JACK_HP:
hp = 1;
break;
}
snd_soc_dapm_mutex_lock(dapm);
if (rx51_spk_func)
snd_soc_dapm_enable_pin_unlocked(dapm, "Ext Spk");
else
snd_soc_dapm_disable_pin_unlocked(dapm, "Ext Spk");
if (rx51_dmic_func)
snd_soc_dapm_enable_pin_unlocked(dapm, "DMic");
else
snd_soc_dapm_disable_pin_unlocked(dapm, "DMic");
if (hp)
snd_soc_dapm_enable_pin_unlocked(dapm, "Headphone Jack");
else
snd_soc_dapm_disable_pin_unlocked(dapm, "Headphone Jack");
if (hs)
snd_soc_dapm_enable_pin_unlocked(dapm, "HS Mic");
else
snd_soc_dapm_disable_pin_unlocked(dapm, "HS Mic");
gpiod_set_value(pdata->tvout_selection_gpio, tvout);
snd_soc_dapm_sync_unlocked(dapm);
snd_soc_dapm_mutex_unlock(dapm);
}
static int rx51_startup(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_card *card = rtd->card;
snd_pcm_hw_constraint_single(runtime, SNDRV_PCM_HW_PARAM_CHANNELS, 2);
rx51_ext_control(&card->dapm);
return 0;
}
static int rx51_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
/* Set the codec system clock for DAC and ADC */
return snd_soc_dai_set_sysclk(codec_dai, 0, 19200000,
SND_SOC_CLOCK_IN);
}
static const struct snd_soc_ops rx51_ops = {
.startup = rx51_startup,
.hw_params = rx51_hw_params,
};
static int rx51_get_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.enumerated.item[0] = rx51_spk_func;
return 0;
}
static int rx51_set_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
if (rx51_spk_func == ucontrol->value.enumerated.item[0])
return 0;
rx51_spk_func = ucontrol->value.enumerated.item[0];
rx51_ext_control(&card->dapm);
return 1;
}
static int rx51_spk_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
struct snd_soc_dapm_context *dapm = w->dapm;
struct snd_soc_card *card = dapm->card;
struct rx51_audio_pdata *pdata = snd_soc_card_get_drvdata(card);
gpiod_set_raw_value_cansleep(pdata->speaker_amp_gpio,
!!SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
static int rx51_get_input(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.enumerated.item[0] = rx51_dmic_func;
return 0;
}
static int rx51_set_input(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
if (rx51_dmic_func == ucontrol->value.enumerated.item[0])
return 0;
rx51_dmic_func = ucontrol->value.enumerated.item[0];
rx51_ext_control(&card->dapm);
return 1;
}
static int rx51_get_jack(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.enumerated.item[0] = rx51_jack_func;
return 0;
}
static int rx51_set_jack(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
if (rx51_jack_func == ucontrol->value.enumerated.item[0])
return 0;
rx51_jack_func = ucontrol->value.enumerated.item[0];
rx51_ext_control(&card->dapm);
return 1;
}
static struct snd_soc_jack rx51_av_jack;
static struct snd_soc_jack_gpio rx51_av_jack_gpios[] = {
{
.name = "avdet-gpio",
.report = SND_JACK_HEADSET,
.invert = 1,
.debounce_time = 200,
},
};
static const struct snd_soc_dapm_widget aic34_dapm_widgets[] = {
SND_SOC_DAPM_SPK("Ext Spk", rx51_spk_event),
SND_SOC_DAPM_MIC("DMic", NULL),
SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_MIC("HS Mic", NULL),
SND_SOC_DAPM_LINE("FM Transmitter", NULL),
SND_SOC_DAPM_SPK("Earphone", NULL),
};
static const struct snd_soc_dapm_route audio_map[] = {
{"Ext Spk", NULL, "HPLOUT"},
{"Ext Spk", NULL, "HPROUT"},
{"Ext Spk", NULL, "HPLCOM"},
{"Ext Spk", NULL, "HPRCOM"},
{"FM Transmitter", NULL, "LLOUT"},
{"FM Transmitter", NULL, "RLOUT"},
{"Headphone Jack", NULL, "TPA6130A2 HPLEFT"},
{"Headphone Jack", NULL, "TPA6130A2 HPRIGHT"},
{"TPA6130A2 LEFTIN", NULL, "LLOUT"},
{"TPA6130A2 RIGHTIN", NULL, "RLOUT"},
{"DMic Rate 64", NULL, "DMic"},
{"DMic", NULL, "Mic Bias"},
{"b LINE2R", NULL, "MONO_LOUT"},
{"Earphone", NULL, "b HPLOUT"},
{"LINE1L", NULL, "HS Mic"},
{"HS Mic", NULL, "b Mic Bias"},
};
static const char * const spk_function[] = {"Off", "On"};
static const char * const input_function[] = {"ADC", "Digital Mic"};
static const char * const jack_function[] = {
"Off", "TV-OUT", "Headphone", "Headset"
};
static const struct soc_enum rx51_enum[] = {
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(spk_function), spk_function),
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(input_function), input_function),
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(jack_function), jack_function),
};
static const struct snd_kcontrol_new aic34_rx51_controls[] = {
SOC_ENUM_EXT("Speaker Function", rx51_enum[0],
rx51_get_spk, rx51_set_spk),
SOC_ENUM_EXT("Input Select", rx51_enum[1],
rx51_get_input, rx51_set_input),
SOC_ENUM_EXT("Jack Function", rx51_enum[2],
rx51_get_jack, rx51_set_jack),
SOC_DAPM_PIN_SWITCH("FM Transmitter"),
SOC_DAPM_PIN_SWITCH("Earphone"),
};
static int rx51_aic34_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_card *card = rtd->card;
struct rx51_audio_pdata *pdata = snd_soc_card_get_drvdata(card);
int err;
snd_soc_limit_volume(card, "TPA6130A2 Headphone Playback Volume", 42);
err = omap_mcbsp_st_add_controls(rtd, 2);
if (err < 0) {
dev_err(card->dev, "Failed to add MCBSP controls\n");
return err;
}
/* AV jack detection */
err = snd_soc_card_jack_new(rtd->card, "AV Jack",
SND_JACK_HEADSET | SND_JACK_VIDEOOUT,
&rx51_av_jack);
if (err) {
dev_err(card->dev, "Failed to add AV Jack\n");
return err;
}
/* prepare gpio for snd_soc_jack_add_gpios */
rx51_av_jack_gpios[0].gpio = desc_to_gpio(pdata->jack_detection_gpio);
devm_gpiod_put(card->dev, pdata->jack_detection_gpio);
err = snd_soc_jack_add_gpios(&rx51_av_jack,
ARRAY_SIZE(rx51_av_jack_gpios),
rx51_av_jack_gpios);
if (err) {
dev_err(card->dev, "Failed to add GPIOs\n");
return err;
}
return err;
}
/* Digital audio interface glue - connects codec <--> CPU */
SND_SOC_DAILINK_DEFS(aic34,
DAILINK_COMP_ARRAY(COMP_CPU("omap-mcbsp.2")),
DAILINK_COMP_ARRAY(COMP_CODEC("tlv320aic3x-codec.2-0018",
"tlv320aic3x-hifi")),
DAILINK_COMP_ARRAY(COMP_PLATFORM("omap-mcbsp.2")));
static struct snd_soc_dai_link rx51_dai[] = {
{
.name = "TLV320AIC34",
.stream_name = "AIC34",
.dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_NF |
SND_SOC_DAIFMT_CBM_CFM,
.init = rx51_aic34_init,
.ops = &rx51_ops,
SND_SOC_DAILINK_REG(aic34),
},
};
static struct snd_soc_aux_dev rx51_aux_dev[] = {
{
.dlc = COMP_AUX("tlv320aic3x-codec.2-0019"),
},
{
.dlc = COMP_AUX("tpa6130a2.2-0060"),
},
};
static struct snd_soc_codec_conf rx51_codec_conf[] = {
{
.dlc = COMP_CODEC_CONF("tlv320aic3x-codec.2-0019"),
.name_prefix = "b",
},
{
.dlc = COMP_CODEC_CONF("tpa6130a2.2-0060"),
.name_prefix = "TPA6130A2",
},
};
/* Audio card */
static struct snd_soc_card rx51_sound_card = {
.name = "RX-51",
.owner = THIS_MODULE,
.dai_link = rx51_dai,
.num_links = ARRAY_SIZE(rx51_dai),
.aux_dev = rx51_aux_dev,
.num_aux_devs = ARRAY_SIZE(rx51_aux_dev),
.codec_conf = rx51_codec_conf,
.num_configs = ARRAY_SIZE(rx51_codec_conf),
.fully_routed = true,
.controls = aic34_rx51_controls,
.num_controls = ARRAY_SIZE(aic34_rx51_controls),
.dapm_widgets = aic34_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(aic34_dapm_widgets),
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
};
static int rx51_soc_probe(struct platform_device *pdev)
{
struct rx51_audio_pdata *pdata;
struct device_node *np = pdev->dev.of_node;
struct snd_soc_card *card = &rx51_sound_card;
int err;
if (!machine_is_nokia_rx51() && !of_machine_is_compatible("nokia,omap3-n900"))
return -ENODEV;
card->dev = &pdev->dev;
if (np) {
struct device_node *dai_node;
dai_node = of_parse_phandle(np, "nokia,cpu-dai", 0);
if (!dai_node) {
dev_err(&pdev->dev, "McBSP node is not provided\n");
return -EINVAL;
}
rx51_dai[0].cpus->dai_name = NULL;
rx51_dai[0].platforms->name = NULL;
rx51_dai[0].cpus->of_node = dai_node;
rx51_dai[0].platforms->of_node = dai_node;
dai_node = of_parse_phandle(np, "nokia,audio-codec", 0);
if (!dai_node) {
dev_err(&pdev->dev, "Codec node is not provided\n");
return -EINVAL;
}
rx51_dai[0].codecs->name = NULL;
rx51_dai[0].codecs->of_node = dai_node;
dai_node = of_parse_phandle(np, "nokia,audio-codec", 1);
if (!dai_node) {
dev_err(&pdev->dev, "Auxiliary Codec node is not provided\n");
return -EINVAL;
}
rx51_aux_dev[0].dlc.name = NULL;
rx51_aux_dev[0].dlc.of_node = dai_node;
rx51_codec_conf[0].dlc.name = NULL;
rx51_codec_conf[0].dlc.of_node = dai_node;
dai_node = of_parse_phandle(np, "nokia,headphone-amplifier", 0);
if (!dai_node) {
dev_err(&pdev->dev, "Headphone amplifier node is not provided\n");
return -EINVAL;
}
rx51_aux_dev[1].dlc.name = NULL;
rx51_aux_dev[1].dlc.of_node = dai_node;
rx51_codec_conf[1].dlc.name = NULL;
rx51_codec_conf[1].dlc.of_node = dai_node;
}
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (pdata == NULL)
return -ENOMEM;
snd_soc_card_set_drvdata(card, pdata);
pdata->tvout_selection_gpio = devm_gpiod_get(card->dev,
"tvout-selection",
GPIOD_OUT_LOW);
if (IS_ERR(pdata->tvout_selection_gpio)) {
dev_err(card->dev, "could not get tvout selection gpio\n");
return PTR_ERR(pdata->tvout_selection_gpio);
}
pdata->jack_detection_gpio = devm_gpiod_get(card->dev,
"jack-detection",
GPIOD_ASIS);
if (IS_ERR(pdata->jack_detection_gpio)) {
dev_err(card->dev, "could not get jack detection gpio\n");
return PTR_ERR(pdata->jack_detection_gpio);
}
pdata->eci_sw_gpio = devm_gpiod_get(card->dev, "eci-switch",
GPIOD_OUT_HIGH);
if (IS_ERR(pdata->eci_sw_gpio)) {
dev_err(card->dev, "could not get eci switch gpio\n");
return PTR_ERR(pdata->eci_sw_gpio);
}
pdata->speaker_amp_gpio = devm_gpiod_get(card->dev,
"speaker-amplifier",
GPIOD_OUT_LOW);
if (IS_ERR(pdata->speaker_amp_gpio)) {
dev_err(card->dev, "could not get speaker enable gpio\n");
return PTR_ERR(pdata->speaker_amp_gpio);
}
err = devm_snd_soc_register_card(card->dev, card);
if (err) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", err);
return err;
}
return 0;
}
#if defined(CONFIG_OF)
static const struct of_device_id rx51_audio_of_match[] = {
{ .compatible = "nokia,n900-audio", },
{},
};
MODULE_DEVICE_TABLE(of, rx51_audio_of_match);
#endif
static struct platform_driver rx51_soc_driver = {
.driver = {
.name = "rx51-audio",
.of_match_table = of_match_ptr(rx51_audio_of_match),
},
.probe = rx51_soc_probe,
};
module_platform_driver(rx51_soc_driver);
MODULE_AUTHOR("Nokia Corporation");
MODULE_DESCRIPTION("ALSA SoC Nokia RX-51");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:rx51-audio");
| linux-master | sound/soc/ti/rx51.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* omap-abe-twl6040.c -- SoC audio for TI OMAP based boards with ABE and
* twl6040 codec
*
* Author: Misael Lopez Cruz <[email protected]>
*/
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/mfd/twl6040.h>
#include <linux/module.h>
#include <linux/of.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <sound/jack.h>
#include "omap-dmic.h"
#include "omap-mcpdm.h"
#include "../codecs/twl6040.h"
SND_SOC_DAILINK_DEFS(link0,
DAILINK_COMP_ARRAY(COMP_EMPTY()),
DAILINK_COMP_ARRAY(COMP_CODEC("twl6040-codec",
"twl6040-legacy")),
DAILINK_COMP_ARRAY(COMP_EMPTY()));
SND_SOC_DAILINK_DEFS(link1,
DAILINK_COMP_ARRAY(COMP_EMPTY()),
DAILINK_COMP_ARRAY(COMP_CODEC("dmic-codec",
"dmic-hifi")),
DAILINK_COMP_ARRAY(COMP_EMPTY()));
struct abe_twl6040 {
struct snd_soc_card card;
struct snd_soc_dai_link dai_links[2];
int jack_detection; /* board can detect jack events */
int mclk_freq; /* MCLK frequency speed for twl6040 */
};
static struct platform_device *dmic_codec_dev;
static int omap_abe_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_card *card = rtd->card;
struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
int clk_id, freq;
int ret;
clk_id = twl6040_get_clk_id(codec_dai->component);
if (clk_id == TWL6040_SYSCLK_SEL_HPPLL)
freq = priv->mclk_freq;
else if (clk_id == TWL6040_SYSCLK_SEL_LPPLL)
freq = 32768;
else
return -EINVAL;
/* set the codec mclk */
ret = snd_soc_dai_set_sysclk(codec_dai, clk_id, freq,
SND_SOC_CLOCK_IN);
if (ret) {
printk(KERN_ERR "can't set codec system clock\n");
return ret;
}
return ret;
}
static const struct snd_soc_ops omap_abe_ops = {
.hw_params = omap_abe_hw_params,
};
static int omap_abe_dmic_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int ret = 0;
ret = snd_soc_dai_set_sysclk(cpu_dai, OMAP_DMIC_SYSCLK_PAD_CLKS,
19200000, SND_SOC_CLOCK_IN);
if (ret < 0) {
printk(KERN_ERR "can't set DMIC cpu system clock\n");
return ret;
}
ret = snd_soc_dai_set_sysclk(cpu_dai, OMAP_DMIC_ABE_DMIC_CLK, 2400000,
SND_SOC_CLOCK_OUT);
if (ret < 0) {
printk(KERN_ERR "can't set DMIC output clock\n");
return ret;
}
return 0;
}
static const struct snd_soc_ops omap_abe_dmic_ops = {
.hw_params = omap_abe_dmic_hw_params,
};
/* Headset jack */
static struct snd_soc_jack hs_jack;
/*Headset jack detection DAPM pins */
static struct snd_soc_jack_pin hs_jack_pins[] = {
{
.pin = "Headset Mic",
.mask = SND_JACK_MICROPHONE,
},
{
.pin = "Headset Stereophone",
.mask = SND_JACK_HEADPHONE,
},
};
/* SDP4430 machine DAPM */
static const struct snd_soc_dapm_widget twl6040_dapm_widgets[] = {
/* Outputs */
SND_SOC_DAPM_HP("Headset Stereophone", NULL),
SND_SOC_DAPM_SPK("Earphone Spk", NULL),
SND_SOC_DAPM_SPK("Ext Spk", NULL),
SND_SOC_DAPM_LINE("Line Out", NULL),
SND_SOC_DAPM_SPK("Vibrator", NULL),
/* Inputs */
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_MIC("Main Handset Mic", NULL),
SND_SOC_DAPM_MIC("Sub Handset Mic", NULL),
SND_SOC_DAPM_LINE("Line In", NULL),
/* Digital microphones */
SND_SOC_DAPM_MIC("Digital Mic", NULL),
};
static const struct snd_soc_dapm_route audio_map[] = {
/* Routings for outputs */
{"Headset Stereophone", NULL, "HSOL"},
{"Headset Stereophone", NULL, "HSOR"},
{"Earphone Spk", NULL, "EP"},
{"Ext Spk", NULL, "HFL"},
{"Ext Spk", NULL, "HFR"},
{"Line Out", NULL, "AUXL"},
{"Line Out", NULL, "AUXR"},
{"Vibrator", NULL, "VIBRAL"},
{"Vibrator", NULL, "VIBRAR"},
/* Routings for inputs */
{"HSMIC", NULL, "Headset Mic"},
{"Headset Mic", NULL, "Headset Mic Bias"},
{"MAINMIC", NULL, "Main Handset Mic"},
{"Main Handset Mic", NULL, "Main Mic Bias"},
{"SUBMIC", NULL, "Sub Handset Mic"},
{"Sub Handset Mic", NULL, "Main Mic Bias"},
{"AFML", NULL, "Line In"},
{"AFMR", NULL, "Line In"},
};
static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_component *component = asoc_rtd_to_codec(rtd, 0)->component;
struct snd_soc_card *card = rtd->card;
struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
int hs_trim;
int ret;
/*
* Configure McPDM offset cancellation based on the HSOTRIM value from
* twl6040.
*/
hs_trim = twl6040_get_trim_value(component, TWL6040_TRIM_HSOTRIM);
omap_mcpdm_configure_dn_offsets(rtd, TWL6040_HSF_TRIM_LEFT(hs_trim),
TWL6040_HSF_TRIM_RIGHT(hs_trim));
/* Headset jack detection only if it is supported */
if (priv->jack_detection) {
ret = snd_soc_card_jack_new_pins(rtd->card, "Headset Jack",
SND_JACK_HEADSET, &hs_jack,
hs_jack_pins,
ARRAY_SIZE(hs_jack_pins));
if (ret)
return ret;
twl6040_hs_jack_detect(component, &hs_jack, SND_JACK_HEADSET);
}
return 0;
}
static const struct snd_soc_dapm_route dmic_audio_map[] = {
{"DMic", NULL, "Digital Mic"},
{"Digital Mic", NULL, "Digital Mic1 Bias"},
};
static int omap_abe_dmic_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dapm_context *dapm = &rtd->card->dapm;
return snd_soc_dapm_add_routes(dapm, dmic_audio_map,
ARRAY_SIZE(dmic_audio_map));
}
static int omap_abe_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct snd_soc_card *card;
struct device_node *dai_node;
struct abe_twl6040 *priv;
int num_links = 0;
int ret = 0;
if (!node) {
dev_err(&pdev->dev, "of node is missing.\n");
return -ENODEV;
}
priv = devm_kzalloc(&pdev->dev, sizeof(struct abe_twl6040), GFP_KERNEL);
if (priv == NULL)
return -ENOMEM;
card = &priv->card;
card->dev = &pdev->dev;
card->owner = THIS_MODULE;
card->dapm_widgets = twl6040_dapm_widgets;
card->num_dapm_widgets = ARRAY_SIZE(twl6040_dapm_widgets);
card->dapm_routes = audio_map;
card->num_dapm_routes = ARRAY_SIZE(audio_map);
if (snd_soc_of_parse_card_name(card, "ti,model")) {
dev_err(&pdev->dev, "Card name is not provided\n");
return -ENODEV;
}
ret = snd_soc_of_parse_audio_routing(card, "ti,audio-routing");
if (ret) {
dev_err(&pdev->dev, "Error while parsing DAPM routing\n");
return ret;
}
dai_node = of_parse_phandle(node, "ti,mcpdm", 0);
if (!dai_node) {
dev_err(&pdev->dev, "McPDM node is not provided\n");
return -EINVAL;
}
priv->dai_links[0].name = "DMIC";
priv->dai_links[0].stream_name = "TWL6040";
priv->dai_links[0].cpus = link0_cpus;
priv->dai_links[0].num_cpus = 1;
priv->dai_links[0].cpus->of_node = dai_node;
priv->dai_links[0].platforms = link0_platforms;
priv->dai_links[0].num_platforms = 1;
priv->dai_links[0].platforms->of_node = dai_node;
priv->dai_links[0].codecs = link0_codecs;
priv->dai_links[0].num_codecs = 1;
priv->dai_links[0].init = omap_abe_twl6040_init;
priv->dai_links[0].ops = &omap_abe_ops;
dai_node = of_parse_phandle(node, "ti,dmic", 0);
if (dai_node) {
num_links = 2;
priv->dai_links[1].name = "TWL6040";
priv->dai_links[1].stream_name = "DMIC Capture";
priv->dai_links[1].cpus = link1_cpus;
priv->dai_links[1].num_cpus = 1;
priv->dai_links[1].cpus->of_node = dai_node;
priv->dai_links[1].platforms = link1_platforms;
priv->dai_links[1].num_platforms = 1;
priv->dai_links[1].platforms->of_node = dai_node;
priv->dai_links[1].codecs = link1_codecs;
priv->dai_links[1].num_codecs = 1;
priv->dai_links[1].init = omap_abe_dmic_init;
priv->dai_links[1].ops = &omap_abe_dmic_ops;
} else {
num_links = 1;
}
priv->jack_detection = of_property_read_bool(node, "ti,jack-detection");
of_property_read_u32(node, "ti,mclk-freq", &priv->mclk_freq);
if (!priv->mclk_freq) {
dev_err(&pdev->dev, "MCLK frequency not provided\n");
return -EINVAL;
}
card->fully_routed = 1;
card->dai_link = priv->dai_links;
card->num_links = num_links;
snd_soc_card_set_drvdata(card, priv);
ret = devm_snd_soc_register_card(&pdev->dev, card);
if (ret)
dev_err(&pdev->dev, "devm_snd_soc_register_card() failed: %d\n",
ret);
return ret;
}
static const struct of_device_id omap_abe_of_match[] = {
{.compatible = "ti,abe-twl6040", },
{ },
};
MODULE_DEVICE_TABLE(of, omap_abe_of_match);
static struct platform_driver omap_abe_driver = {
.driver = {
.name = "omap-abe-twl6040",
.pm = &snd_soc_pm_ops,
.of_match_table = omap_abe_of_match,
},
.probe = omap_abe_probe,
};
static int __init omap_abe_init(void)
{
int ret;
dmic_codec_dev = platform_device_register_simple("dmic-codec", -1, NULL,
0);
if (IS_ERR(dmic_codec_dev)) {
pr_err("%s: dmic-codec device registration failed\n", __func__);
return PTR_ERR(dmic_codec_dev);
}
ret = platform_driver_register(&omap_abe_driver);
if (ret) {
pr_err("%s: platform driver registration failed\n", __func__);
platform_device_unregister(dmic_codec_dev);
}
return ret;
}
module_init(omap_abe_init);
static void __exit omap_abe_exit(void)
{
platform_driver_unregister(&omap_abe_driver);
platform_device_unregister(dmic_codec_dev);
}
module_exit(omap_abe_exit);
MODULE_AUTHOR("Misael Lopez Cruz <[email protected]>");
MODULE_DESCRIPTION("ALSA SoC for OMAP boards with ABE and twl6040 codec");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:omap-abe-twl6040");
| linux-master | sound/soc/ti/omap-abe-twl6040.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com
* Author: Peter Ujfalusi <[email protected]>
*/
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
#include "udma-pcm.h"
static const struct snd_pcm_hardware udma_pcm_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP |
SNDRV_PCM_INFO_INTERLEAVED,
.buffer_bytes_max = SIZE_MAX,
.period_bytes_min = 32,
.period_bytes_max = SZ_64K,
.periods_min = 2,
.periods_max = UINT_MAX,
};
static const struct snd_dmaengine_pcm_config udma_dmaengine_pcm_config = {
.pcm_hardware = &udma_pcm_hardware,
.prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
};
int udma_pcm_platform_register(struct device *dev)
{
return devm_snd_dmaengine_pcm_register(dev, &udma_dmaengine_pcm_config,
0);
}
EXPORT_SYMBOL_GPL(udma_pcm_platform_register);
MODULE_AUTHOR("Peter Ujfalusi <[email protected]>");
MODULE_DESCRIPTION("UDMA PCM ASoC platform driver");
MODULE_LICENSE("GPL v2");
| linux-master | sound/soc/ti/udma-pcm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ALSA SoC McASP Audio Layer for TI DAVINCI processor
*
* Multi-channel Audio Serial Port Driver
*
* Author: Nirmal Pandey <[email protected]>,
* Suresh Rajashekara <[email protected]>
* Steve Chen <[email protected]>
*
* Copyright: (C) 2009 MontaVista Software, Inc., <[email protected]>
* Copyright: (C) 2009 Texas Instruments, India
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
#include <linux/platform_data/davinci_asp.h>
#include <linux/math64.h>
#include <linux/bitmap.h>
#include <linux/gpio/driver.h>
#include <sound/asoundef.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
#include "edma-pcm.h"
#include "sdma-pcm.h"
#include "udma-pcm.h"
#include "davinci-mcasp.h"
#define MCASP_MAX_AFIFO_DEPTH 64
#ifdef CONFIG_PM
static u32 context_regs[] = {
DAVINCI_MCASP_TXFMCTL_REG,
DAVINCI_MCASP_RXFMCTL_REG,
DAVINCI_MCASP_TXFMT_REG,
DAVINCI_MCASP_RXFMT_REG,
DAVINCI_MCASP_ACLKXCTL_REG,
DAVINCI_MCASP_ACLKRCTL_REG,
DAVINCI_MCASP_AHCLKXCTL_REG,
DAVINCI_MCASP_AHCLKRCTL_REG,
DAVINCI_MCASP_PDIR_REG,
DAVINCI_MCASP_PFUNC_REG,
DAVINCI_MCASP_RXMASK_REG,
DAVINCI_MCASP_TXMASK_REG,
DAVINCI_MCASP_RXTDM_REG,
DAVINCI_MCASP_TXTDM_REG,
};
struct davinci_mcasp_context {
u32 config_regs[ARRAY_SIZE(context_regs)];
u32 afifo_regs[2]; /* for read/write fifo control registers */
u32 *xrsr_regs; /* for serializer configuration */
bool pm_state;
};
#endif
struct davinci_mcasp_ruledata {
struct davinci_mcasp *mcasp;
int serializers;
};
struct davinci_mcasp {
struct snd_dmaengine_dai_dma_data dma_data[2];
struct davinci_mcasp_pdata *pdata;
void __iomem *base;
u32 fifo_base;
struct device *dev;
struct snd_pcm_substream *substreams[2];
unsigned int dai_fmt;
u32 iec958_status;
/* Audio can not be enabled due to missing parameter(s) */
bool missing_audio_param;
/* McASP specific data */
int tdm_slots;
u32 tdm_mask[2];
int slot_width;
u8 op_mode;
u8 dismod;
u8 num_serializer;
u8 *serial_dir;
u8 version;
u8 bclk_div;
int streams;
u32 irq_request[2];
int sysclk_freq;
bool bclk_master;
u32 auxclk_fs_ratio;
unsigned long pdir; /* Pin direction bitfield */
/* McASP FIFO related */
u8 txnumevt;
u8 rxnumevt;
bool dat_port;
/* Used for comstraint setting on the second stream */
u32 channels;
int max_format_width;
u8 active_serializers[2];
#ifdef CONFIG_GPIOLIB
struct gpio_chip gpio_chip;
#endif
#ifdef CONFIG_PM
struct davinci_mcasp_context context;
#endif
struct davinci_mcasp_ruledata ruledata[2];
struct snd_pcm_hw_constraint_list chconstr[2];
};
static inline void mcasp_set_bits(struct davinci_mcasp *mcasp, u32 offset,
u32 val)
{
void __iomem *reg = mcasp->base + offset;
__raw_writel(__raw_readl(reg) | val, reg);
}
static inline void mcasp_clr_bits(struct davinci_mcasp *mcasp, u32 offset,
u32 val)
{
void __iomem *reg = mcasp->base + offset;
__raw_writel((__raw_readl(reg) & ~(val)), reg);
}
static inline void mcasp_mod_bits(struct davinci_mcasp *mcasp, u32 offset,
u32 val, u32 mask)
{
void __iomem *reg = mcasp->base + offset;
__raw_writel((__raw_readl(reg) & ~mask) | val, reg);
}
static inline void mcasp_set_reg(struct davinci_mcasp *mcasp, u32 offset,
u32 val)
{
__raw_writel(val, mcasp->base + offset);
}
static inline u32 mcasp_get_reg(struct davinci_mcasp *mcasp, u32 offset)
{
return (u32)__raw_readl(mcasp->base + offset);
}
static void mcasp_set_ctl_reg(struct davinci_mcasp *mcasp, u32 ctl_reg, u32 val)
{
int i = 0;
mcasp_set_bits(mcasp, ctl_reg, val);
/* programming GBLCTL needs to read back from GBLCTL and verfiy */
/* loop count is to avoid the lock-up */
for (i = 0; i < 1000; i++) {
if ((mcasp_get_reg(mcasp, ctl_reg) & val) == val)
break;
}
if (i == 1000 && ((mcasp_get_reg(mcasp, ctl_reg) & val) != val))
printk(KERN_ERR "GBLCTL write error\n");
}
static bool mcasp_is_synchronous(struct davinci_mcasp *mcasp)
{
u32 rxfmctl = mcasp_get_reg(mcasp, DAVINCI_MCASP_RXFMCTL_REG);
u32 aclkxctl = mcasp_get_reg(mcasp, DAVINCI_MCASP_ACLKXCTL_REG);
return !(aclkxctl & TX_ASYNC) && rxfmctl & AFSRE;
}
static inline void mcasp_set_clk_pdir(struct davinci_mcasp *mcasp, bool enable)
{
u32 bit = PIN_BIT_AMUTE;
for_each_set_bit_from(bit, &mcasp->pdir, PIN_BIT_AFSR + 1) {
if (enable)
mcasp_set_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(bit));
else
mcasp_clr_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(bit));
}
}
static inline void mcasp_set_axr_pdir(struct davinci_mcasp *mcasp, bool enable)
{
u32 bit;
for_each_set_bit(bit, &mcasp->pdir, PIN_BIT_AMUTE) {
if (enable)
mcasp_set_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(bit));
else
mcasp_clr_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(bit));
}
}
static void mcasp_start_rx(struct davinci_mcasp *mcasp)
{
if (mcasp->rxnumevt) { /* enable FIFO */
u32 reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
mcasp_set_bits(mcasp, reg, FIFO_ENABLE);
}
/* Start clocks */
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXHCLKRST);
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXCLKRST);
/*
* When ASYNC == 0 the transmit and receive sections operate
* synchronously from the transmit clock and frame sync. We need to make
* sure that the TX signlas are enabled when starting reception.
*/
if (mcasp_is_synchronous(mcasp)) {
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXHCLKRST);
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST);
mcasp_set_clk_pdir(mcasp, true);
}
/* Activate serializer(s) */
mcasp_set_reg(mcasp, DAVINCI_MCASP_RXSTAT_REG, 0xFFFFFFFF);
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXSERCLR);
/* Release RX state machine */
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXSMRST);
/* Release Frame Sync generator */
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXFSRST);
if (mcasp_is_synchronous(mcasp))
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXFSRST);
/* enable receive IRQs */
mcasp_set_bits(mcasp, DAVINCI_MCASP_EVTCTLR_REG,
mcasp->irq_request[SNDRV_PCM_STREAM_CAPTURE]);
}
static void mcasp_start_tx(struct davinci_mcasp *mcasp)
{
u32 cnt;
if (mcasp->txnumevt) { /* enable FIFO */
u32 reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
mcasp_set_bits(mcasp, reg, FIFO_ENABLE);
}
/* Start clocks */
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXHCLKRST);
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST);
mcasp_set_clk_pdir(mcasp, true);
/* Activate serializer(s) */
mcasp_set_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG, 0xFFFFFFFF);
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXSERCLR);
/* wait for XDATA to be cleared */
cnt = 0;
while ((mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG) & XRDATA) &&
(cnt < 100000))
cnt++;
mcasp_set_axr_pdir(mcasp, true);
/* Release TX state machine */
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXSMRST);
/* Release Frame Sync generator */
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXFSRST);
/* enable transmit IRQs */
mcasp_set_bits(mcasp, DAVINCI_MCASP_EVTCTLX_REG,
mcasp->irq_request[SNDRV_PCM_STREAM_PLAYBACK]);
}
static void davinci_mcasp_start(struct davinci_mcasp *mcasp, int stream)
{
mcasp->streams++;
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
mcasp_start_tx(mcasp);
else
mcasp_start_rx(mcasp);
}
static void mcasp_stop_rx(struct davinci_mcasp *mcasp)
{
/* disable IRQ sources */
mcasp_clr_bits(mcasp, DAVINCI_MCASP_EVTCTLR_REG,
mcasp->irq_request[SNDRV_PCM_STREAM_CAPTURE]);
/*
* In synchronous mode stop the TX clocks if no other stream is
* running
*/
if (mcasp_is_synchronous(mcasp) && !mcasp->streams) {
mcasp_set_clk_pdir(mcasp, false);
mcasp_set_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, 0);
}
mcasp_set_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, 0);
mcasp_set_reg(mcasp, DAVINCI_MCASP_RXSTAT_REG, 0xFFFFFFFF);
if (mcasp->rxnumevt) { /* disable FIFO */
u32 reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
}
}
static void mcasp_stop_tx(struct davinci_mcasp *mcasp)
{
u32 val = 0;
/* disable IRQ sources */
mcasp_clr_bits(mcasp, DAVINCI_MCASP_EVTCTLX_REG,
mcasp->irq_request[SNDRV_PCM_STREAM_PLAYBACK]);
/*
* In synchronous mode keep TX clocks running if the capture stream is
* still running.
*/
if (mcasp_is_synchronous(mcasp) && mcasp->streams)
val = TXHCLKRST | TXCLKRST | TXFSRST;
else
mcasp_set_clk_pdir(mcasp, false);
mcasp_set_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, val);
mcasp_set_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG, 0xFFFFFFFF);
if (mcasp->txnumevt) { /* disable FIFO */
u32 reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
}
mcasp_set_axr_pdir(mcasp, false);
}
static void davinci_mcasp_stop(struct davinci_mcasp *mcasp, int stream)
{
mcasp->streams--;
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
mcasp_stop_tx(mcasp);
else
mcasp_stop_rx(mcasp);
}
static irqreturn_t davinci_mcasp_tx_irq_handler(int irq, void *data)
{
struct davinci_mcasp *mcasp = (struct davinci_mcasp *)data;
struct snd_pcm_substream *substream;
u32 irq_mask = mcasp->irq_request[SNDRV_PCM_STREAM_PLAYBACK];
u32 handled_mask = 0;
u32 stat;
stat = mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG);
if (stat & XUNDRN & irq_mask) {
dev_warn(mcasp->dev, "Transmit buffer underflow\n");
handled_mask |= XUNDRN;
substream = mcasp->substreams[SNDRV_PCM_STREAM_PLAYBACK];
if (substream)
snd_pcm_stop_xrun(substream);
}
if (!handled_mask)
dev_warn(mcasp->dev, "unhandled tx event. txstat: 0x%08x\n",
stat);
if (stat & XRERR)
handled_mask |= XRERR;
/* Ack the handled event only */
mcasp_set_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG, handled_mask);
return IRQ_RETVAL(handled_mask);
}
static irqreturn_t davinci_mcasp_rx_irq_handler(int irq, void *data)
{
struct davinci_mcasp *mcasp = (struct davinci_mcasp *)data;
struct snd_pcm_substream *substream;
u32 irq_mask = mcasp->irq_request[SNDRV_PCM_STREAM_CAPTURE];
u32 handled_mask = 0;
u32 stat;
stat = mcasp_get_reg(mcasp, DAVINCI_MCASP_RXSTAT_REG);
if (stat & ROVRN & irq_mask) {
dev_warn(mcasp->dev, "Receive buffer overflow\n");
handled_mask |= ROVRN;
substream = mcasp->substreams[SNDRV_PCM_STREAM_CAPTURE];
if (substream)
snd_pcm_stop_xrun(substream);
}
if (!handled_mask)
dev_warn(mcasp->dev, "unhandled rx event. rxstat: 0x%08x\n",
stat);
if (stat & XRERR)
handled_mask |= XRERR;
/* Ack the handled event only */
mcasp_set_reg(mcasp, DAVINCI_MCASP_RXSTAT_REG, handled_mask);
return IRQ_RETVAL(handled_mask);
}
static irqreturn_t davinci_mcasp_common_irq_handler(int irq, void *data)
{
struct davinci_mcasp *mcasp = (struct davinci_mcasp *)data;
irqreturn_t ret = IRQ_NONE;
if (mcasp->substreams[SNDRV_PCM_STREAM_PLAYBACK])
ret = davinci_mcasp_tx_irq_handler(irq, data);
if (mcasp->substreams[SNDRV_PCM_STREAM_CAPTURE])
ret |= davinci_mcasp_rx_irq_handler(irq, data);
return ret;
}
static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
unsigned int fmt)
{
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
int ret = 0;
u32 data_delay;
bool fs_pol_rising;
bool inv_fs = false;
if (!fmt)
return 0;
pm_runtime_get_sync(mcasp->dev);
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_A:
mcasp_clr_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, FSXDUR);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, FSRDUR);
/* 1st data bit occur one ACLK cycle after the frame sync */
data_delay = 1;
break;
case SND_SOC_DAIFMT_DSP_B:
case SND_SOC_DAIFMT_AC97:
mcasp_clr_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, FSXDUR);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, FSRDUR);
/* No delay after FS */
data_delay = 0;
break;
case SND_SOC_DAIFMT_I2S:
/* configure a full-word SYNC pulse (LRCLK) */
mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, FSXDUR);
mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, FSRDUR);
/* 1st data bit occur one ACLK cycle after the frame sync */
data_delay = 1;
/* FS need to be inverted */
inv_fs = true;
break;
case SND_SOC_DAIFMT_RIGHT_J:
case SND_SOC_DAIFMT_LEFT_J:
/* configure a full-word SYNC pulse (LRCLK) */
mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, FSXDUR);
mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, FSRDUR);
/* No delay after FS */
data_delay = 0;
break;
default:
ret = -EINVAL;
goto out;
}
mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, FSXDLY(data_delay),
FSXDLY(3));
mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, FSRDLY(data_delay),
FSRDLY(3));
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
case SND_SOC_DAIFMT_BP_FP:
/* codec is clock and frame slave */
mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
/* BCLK */
set_bit(PIN_BIT_ACLKX, &mcasp->pdir);
set_bit(PIN_BIT_ACLKR, &mcasp->pdir);
/* Frame Sync */
set_bit(PIN_BIT_AFSX, &mcasp->pdir);
set_bit(PIN_BIT_AFSR, &mcasp->pdir);
mcasp->bclk_master = 1;
break;
case SND_SOC_DAIFMT_BP_FC:
/* codec is clock slave and frame master */
mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
/* BCLK */
set_bit(PIN_BIT_ACLKX, &mcasp->pdir);
set_bit(PIN_BIT_ACLKR, &mcasp->pdir);
/* Frame Sync */
clear_bit(PIN_BIT_AFSX, &mcasp->pdir);
clear_bit(PIN_BIT_AFSR, &mcasp->pdir);
mcasp->bclk_master = 1;
break;
case SND_SOC_DAIFMT_BC_FP:
/* codec is clock master and frame slave */
mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
/* BCLK */
clear_bit(PIN_BIT_ACLKX, &mcasp->pdir);
clear_bit(PIN_BIT_ACLKR, &mcasp->pdir);
/* Frame Sync */
set_bit(PIN_BIT_AFSX, &mcasp->pdir);
set_bit(PIN_BIT_AFSR, &mcasp->pdir);
mcasp->bclk_master = 0;
break;
case SND_SOC_DAIFMT_BC_FC:
/* codec is clock and frame master */
mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
/* BCLK */
clear_bit(PIN_BIT_ACLKX, &mcasp->pdir);
clear_bit(PIN_BIT_ACLKR, &mcasp->pdir);
/* Frame Sync */
clear_bit(PIN_BIT_AFSX, &mcasp->pdir);
clear_bit(PIN_BIT_AFSR, &mcasp->pdir);
mcasp->bclk_master = 0;
break;
default:
ret = -EINVAL;
goto out;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_IB_NF:
mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
fs_pol_rising = true;
break;
case SND_SOC_DAIFMT_NB_IF:
mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
fs_pol_rising = false;
break;
case SND_SOC_DAIFMT_IB_IF:
mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
fs_pol_rising = false;
break;
case SND_SOC_DAIFMT_NB_NF:
mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
fs_pol_rising = true;
break;
default:
ret = -EINVAL;
goto out;
}
if (inv_fs)
fs_pol_rising = !fs_pol_rising;
if (fs_pol_rising) {
mcasp_clr_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
} else {
mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
}
mcasp->dai_fmt = fmt;
out:
pm_runtime_put(mcasp->dev);
return ret;
}
static int __davinci_mcasp_set_clkdiv(struct davinci_mcasp *mcasp, int div_id,
int div, bool explicit)
{
pm_runtime_get_sync(mcasp->dev);
switch (div_id) {
case MCASP_CLKDIV_AUXCLK: /* MCLK divider */
mcasp_mod_bits(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG,
AHCLKXDIV(div - 1), AHCLKXDIV_MASK);
mcasp_mod_bits(mcasp, DAVINCI_MCASP_AHCLKRCTL_REG,
AHCLKRDIV(div - 1), AHCLKRDIV_MASK);
break;
case MCASP_CLKDIV_BCLK: /* BCLK divider */
mcasp_mod_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG,
ACLKXDIV(div - 1), ACLKXDIV_MASK);
mcasp_mod_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG,
ACLKRDIV(div - 1), ACLKRDIV_MASK);
if (explicit)
mcasp->bclk_div = div;
break;
case MCASP_CLKDIV_BCLK_FS_RATIO:
/*
* BCLK/LRCLK ratio descries how many bit-clock cycles
* fit into one frame. The clock ratio is given for a
* full period of data (for I2S format both left and
* right channels), so it has to be divided by number
* of tdm-slots (for I2S - divided by 2).
* Instead of storing this ratio, we calculate a new
* tdm_slot width by dividing the ratio by the
* number of configured tdm slots.
*/
mcasp->slot_width = div / mcasp->tdm_slots;
if (div % mcasp->tdm_slots)
dev_warn(mcasp->dev,
"%s(): BCLK/LRCLK %d is not divisible by %d tdm slots",
__func__, div, mcasp->tdm_slots);
break;
default:
return -EINVAL;
}
pm_runtime_put(mcasp->dev);
return 0;
}
static int davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id,
int div)
{
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
return __davinci_mcasp_set_clkdiv(mcasp, div_id, div, 1);
}
static int davinci_mcasp_set_sysclk(struct snd_soc_dai *dai, int clk_id,
unsigned int freq, int dir)
{
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
pm_runtime_get_sync(mcasp->dev);
if (dir == SND_SOC_CLOCK_IN) {
switch (clk_id) {
case MCASP_CLK_HCLK_AHCLK:
mcasp_clr_bits(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG,
AHCLKXE);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_AHCLKRCTL_REG,
AHCLKRE);
clear_bit(PIN_BIT_AHCLKX, &mcasp->pdir);
break;
case MCASP_CLK_HCLK_AUXCLK:
mcasp_set_bits(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG,
AHCLKXE);
mcasp_set_bits(mcasp, DAVINCI_MCASP_AHCLKRCTL_REG,
AHCLKRE);
set_bit(PIN_BIT_AHCLKX, &mcasp->pdir);
break;
default:
dev_err(mcasp->dev, "Invalid clk id: %d\n", clk_id);
goto out;
}
} else {
/* Select AUXCLK as HCLK */
mcasp_set_bits(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG, AHCLKXE);
mcasp_set_bits(mcasp, DAVINCI_MCASP_AHCLKRCTL_REG, AHCLKRE);
set_bit(PIN_BIT_AHCLKX, &mcasp->pdir);
}
/*
* When AHCLK X/R is selected to be output it means that the HCLK is
* the same clock - coming via AUXCLK.
*/
mcasp->sysclk_freq = freq;
out:
pm_runtime_put(mcasp->dev);
return 0;
}
/* All serializers must have equal number of channels */
static int davinci_mcasp_ch_constraint(struct davinci_mcasp *mcasp, int stream,
int serializers)
{
struct snd_pcm_hw_constraint_list *cl = &mcasp->chconstr[stream];
unsigned int *list = (unsigned int *) cl->list;
int slots = mcasp->tdm_slots;
int i, count = 0;
if (mcasp->tdm_mask[stream])
slots = hweight32(mcasp->tdm_mask[stream]);
for (i = 1; i <= slots; i++)
list[count++] = i;
for (i = 2; i <= serializers; i++)
list[count++] = i*slots;
cl->count = count;
return 0;
}
static int davinci_mcasp_set_ch_constraints(struct davinci_mcasp *mcasp)
{
int rx_serializers = 0, tx_serializers = 0, ret, i;
for (i = 0; i < mcasp->num_serializer; i++)
if (mcasp->serial_dir[i] == TX_MODE)
tx_serializers++;
else if (mcasp->serial_dir[i] == RX_MODE)
rx_serializers++;
ret = davinci_mcasp_ch_constraint(mcasp, SNDRV_PCM_STREAM_PLAYBACK,
tx_serializers);
if (ret)
return ret;
ret = davinci_mcasp_ch_constraint(mcasp, SNDRV_PCM_STREAM_CAPTURE,
rx_serializers);
return ret;
}
static int davinci_mcasp_set_tdm_slot(struct snd_soc_dai *dai,
unsigned int tx_mask,
unsigned int rx_mask,
int slots, int slot_width)
{
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
return 0;
dev_dbg(mcasp->dev,
"%s() tx_mask 0x%08x rx_mask 0x%08x slots %d width %d\n",
__func__, tx_mask, rx_mask, slots, slot_width);
if (tx_mask >= (1<<slots) || rx_mask >= (1<<slots)) {
dev_err(mcasp->dev,
"Bad tdm mask tx: 0x%08x rx: 0x%08x slots %d\n",
tx_mask, rx_mask, slots);
return -EINVAL;
}
if (slot_width &&
(slot_width < 8 || slot_width > 32 || slot_width % 4 != 0)) {
dev_err(mcasp->dev, "%s: Unsupported slot_width %d\n",
__func__, slot_width);
return -EINVAL;
}
mcasp->tdm_slots = slots;
mcasp->tdm_mask[SNDRV_PCM_STREAM_PLAYBACK] = tx_mask;
mcasp->tdm_mask[SNDRV_PCM_STREAM_CAPTURE] = rx_mask;
mcasp->slot_width = slot_width;
return davinci_mcasp_set_ch_constraints(mcasp);
}
static int davinci_config_channel_size(struct davinci_mcasp *mcasp,
int sample_width)
{
u32 fmt;
u32 tx_rotate, rx_rotate, slot_width;
u32 mask = (1ULL << sample_width) - 1;
if (mcasp->slot_width)
slot_width = mcasp->slot_width;
else if (mcasp->max_format_width)
slot_width = mcasp->max_format_width;
else
slot_width = sample_width;
/*
* TX rotation:
* right aligned formats: rotate w/ slot_width
* left aligned formats: rotate w/ sample_width
*
* RX rotation:
* right aligned formats: no rotation needed
* left aligned formats: rotate w/ (slot_width - sample_width)
*/
if ((mcasp->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
SND_SOC_DAIFMT_RIGHT_J) {
tx_rotate = (slot_width / 4) & 0x7;
rx_rotate = 0;
} else {
tx_rotate = (sample_width / 4) & 0x7;
rx_rotate = (slot_width - sample_width) / 4;
}
/* mapping of the XSSZ bit-field as described in the datasheet */
fmt = (slot_width >> 1) - 1;
if (mcasp->op_mode != DAVINCI_MCASP_DIT_MODE) {
mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, RXSSZ(fmt),
RXSSZ(0x0F));
mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXSSZ(fmt),
TXSSZ(0x0F));
mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXROT(tx_rotate),
TXROT(7));
mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, RXROT(rx_rotate),
RXROT(7));
mcasp_set_reg(mcasp, DAVINCI_MCASP_RXMASK_REG, mask);
} else {
/*
* according to the TRM it should be TXROT=0, this one works:
* 16 bit to 23-8 (TXROT=6, rotate 24 bits)
* 24 bit to 23-0 (TXROT=0, rotate 0 bits)
*
* TXROT = 0 only works with 24bit samples
*/
tx_rotate = (sample_width / 4 + 2) & 0x7;
mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXROT(tx_rotate),
TXROT(7));
mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXSSZ(15),
TXSSZ(0x0F));
}
mcasp_set_reg(mcasp, DAVINCI_MCASP_TXMASK_REG, mask);
return 0;
}
static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream,
int period_words, int channels)
{
struct snd_dmaengine_dai_dma_data *dma_data = &mcasp->dma_data[stream];
int i;
u8 tx_ser = 0;
u8 rx_ser = 0;
u8 slots = mcasp->tdm_slots;
u8 max_active_serializers, max_rx_serializers, max_tx_serializers;
int active_serializers, numevt;
u32 reg;
/* In DIT mode we only allow maximum of one serializers for now */
if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
max_active_serializers = 1;
else
max_active_serializers = DIV_ROUND_UP(channels, slots);
/* Default configuration */
if (mcasp->version < MCASP_VERSION_3)
mcasp_set_bits(mcasp, DAVINCI_MCASP_PWREMUMGT_REG, MCASP_SOFT);
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
mcasp_set_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG, 0xFFFFFFFF);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_XEVTCTL_REG, TXDATADMADIS);
max_tx_serializers = max_active_serializers;
max_rx_serializers =
mcasp->active_serializers[SNDRV_PCM_STREAM_CAPTURE];
} else {
mcasp_set_reg(mcasp, DAVINCI_MCASP_RXSTAT_REG, 0xFFFFFFFF);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_REVTCTL_REG, RXDATADMADIS);
max_tx_serializers =
mcasp->active_serializers[SNDRV_PCM_STREAM_PLAYBACK];
max_rx_serializers = max_active_serializers;
}
for (i = 0; i < mcasp->num_serializer; i++) {
mcasp_set_bits(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i),
mcasp->serial_dir[i]);
if (mcasp->serial_dir[i] == TX_MODE &&
tx_ser < max_tx_serializers) {
mcasp_mod_bits(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i),
mcasp->dismod, DISMOD_MASK);
set_bit(PIN_BIT_AXR(i), &mcasp->pdir);
tx_ser++;
} else if (mcasp->serial_dir[i] == RX_MODE &&
rx_ser < max_rx_serializers) {
clear_bit(PIN_BIT_AXR(i), &mcasp->pdir);
rx_ser++;
} else {
/* Inactive or unused pin, set it to inactive */
mcasp_mod_bits(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i),
SRMOD_INACTIVE, SRMOD_MASK);
/* If unused, set DISMOD for the pin */
if (mcasp->serial_dir[i] != INACTIVE_MODE)
mcasp_mod_bits(mcasp,
DAVINCI_MCASP_XRSRCTL_REG(i),
mcasp->dismod, DISMOD_MASK);
clear_bit(PIN_BIT_AXR(i), &mcasp->pdir);
}
}
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
active_serializers = tx_ser;
numevt = mcasp->txnumevt;
reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
} else {
active_serializers = rx_ser;
numevt = mcasp->rxnumevt;
reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
}
if (active_serializers < max_active_serializers) {
dev_warn(mcasp->dev, "stream has more channels (%d) than are "
"enabled in mcasp (%d)\n", channels,
active_serializers * slots);
return -EINVAL;
}
/* AFIFO is not in use */
if (!numevt) {
/* Configure the burst size for platform drivers */
if (active_serializers > 1) {
/*
* If more than one serializers are in use we have one
* DMA request to provide data for all serializers.
* For example if three serializers are enabled the DMA
* need to transfer three words per DMA request.
*/
dma_data->maxburst = active_serializers;
} else {
dma_data->maxburst = 0;
}
goto out;
}
if (period_words % active_serializers) {
dev_err(mcasp->dev, "Invalid combination of period words and "
"active serializers: %d, %d\n", period_words,
active_serializers);
return -EINVAL;
}
/*
* Calculate the optimal AFIFO depth for platform side:
* The number of words for numevt need to be in steps of active
* serializers.
*/
numevt = (numevt / active_serializers) * active_serializers;
while (period_words % numevt && numevt > 0)
numevt -= active_serializers;
if (numevt <= 0)
numevt = active_serializers;
mcasp_mod_bits(mcasp, reg, active_serializers, NUMDMA_MASK);
mcasp_mod_bits(mcasp, reg, NUMEVT(numevt), NUMEVT_MASK);
/* Configure the burst size for platform drivers */
if (numevt == 1)
numevt = 0;
dma_data->maxburst = numevt;
out:
mcasp->active_serializers[stream] = active_serializers;
return 0;
}
static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream,
int channels)
{
int i, active_slots;
int total_slots;
int active_serializers;
u32 mask = 0;
u32 busel = 0;
total_slots = mcasp->tdm_slots;
/*
* If more than one serializer is needed, then use them with
* all the specified tdm_slots. Otherwise, one serializer can
* cope with the transaction using just as many slots as there
* are channels in the stream.
*/
if (mcasp->tdm_mask[stream]) {
active_slots = hweight32(mcasp->tdm_mask[stream]);
active_serializers = DIV_ROUND_UP(channels, active_slots);
if (active_serializers == 1)
active_slots = channels;
for (i = 0; i < total_slots; i++) {
if ((1 << i) & mcasp->tdm_mask[stream]) {
mask |= (1 << i);
if (--active_slots <= 0)
break;
}
}
} else {
active_serializers = DIV_ROUND_UP(channels, total_slots);
if (active_serializers == 1)
active_slots = channels;
else
active_slots = total_slots;
for (i = 0; i < active_slots; i++)
mask |= (1 << i);
}
mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, TX_ASYNC);
if (!mcasp->dat_port)
busel = TXSEL;
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, mask);
mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, busel | TXORD);
mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG,
FSXMOD(total_slots), FSXMOD(0x1FF));
} else if (stream == SNDRV_PCM_STREAM_CAPTURE) {
mcasp_set_reg(mcasp, DAVINCI_MCASP_RXTDM_REG, mask);
mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD);
mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG,
FSRMOD(total_slots), FSRMOD(0x1FF));
/*
* If McASP is set to be TX/RX synchronous and the playback is
* not running already we need to configure the TX slots in
* order to have correct FSX on the bus
*/
if (mcasp_is_synchronous(mcasp) && !mcasp->channels)
mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG,
FSXMOD(total_slots), FSXMOD(0x1FF));
}
return 0;
}
/* S/PDIF */
static int mcasp_dit_hw_param(struct davinci_mcasp *mcasp,
unsigned int rate)
{
u8 *cs_bytes = (u8 *)&mcasp->iec958_status;
if (!mcasp->dat_port)
mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXSEL);
else
mcasp_clr_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, TXSEL);
/* Set TX frame synch : DIT Mode, 1 bit width, internal, rising edge */
mcasp_set_reg(mcasp, DAVINCI_MCASP_TXFMCTL_REG, AFSXE | FSXMOD(0x180));
mcasp_set_reg(mcasp, DAVINCI_MCASP_TXMASK_REG, 0xFFFF);
/* Set the TX tdm : for all the slots */
mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, 0xFFFFFFFF);
/* Set the TX clock controls : div = 1 and internal */
mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE | TX_ASYNC);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_XEVTCTL_REG, TXDATADMADIS);
/* Set S/PDIF channel status bits */
cs_bytes[3] &= ~IEC958_AES3_CON_FS;
switch (rate) {
case 22050:
cs_bytes[3] |= IEC958_AES3_CON_FS_22050;
break;
case 24000:
cs_bytes[3] |= IEC958_AES3_CON_FS_24000;
break;
case 32000:
cs_bytes[3] |= IEC958_AES3_CON_FS_32000;
break;
case 44100:
cs_bytes[3] |= IEC958_AES3_CON_FS_44100;
break;
case 48000:
cs_bytes[3] |= IEC958_AES3_CON_FS_48000;
break;
case 88200:
cs_bytes[3] |= IEC958_AES3_CON_FS_88200;
break;
case 96000:
cs_bytes[3] |= IEC958_AES3_CON_FS_96000;
break;
case 176400:
cs_bytes[3] |= IEC958_AES3_CON_FS_176400;
break;
case 192000:
cs_bytes[3] |= IEC958_AES3_CON_FS_192000;
break;
default:
dev_err(mcasp->dev, "unsupported sampling rate: %d\n", rate);
return -EINVAL;
}
mcasp_set_reg(mcasp, DAVINCI_MCASP_DITCSRA_REG, mcasp->iec958_status);
mcasp_set_reg(mcasp, DAVINCI_MCASP_DITCSRB_REG, mcasp->iec958_status);
/* Enable the DIT */
mcasp_set_bits(mcasp, DAVINCI_MCASP_TXDITCTL_REG, DITEN);
return 0;
}
static int davinci_mcasp_calc_clk_div(struct davinci_mcasp *mcasp,
unsigned int sysclk_freq,
unsigned int bclk_freq, bool set)
{
u32 reg = mcasp_get_reg(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG);
int div = sysclk_freq / bclk_freq;
int rem = sysclk_freq % bclk_freq;
int error_ppm;
int aux_div = 1;
if (div > (ACLKXDIV_MASK + 1)) {
if (reg & AHCLKXE) {
aux_div = div / (ACLKXDIV_MASK + 1);
if (div % (ACLKXDIV_MASK + 1))
aux_div++;
sysclk_freq /= aux_div;
div = sysclk_freq / bclk_freq;
rem = sysclk_freq % bclk_freq;
} else if (set) {
dev_warn(mcasp->dev, "Too fast reference clock (%u)\n",
sysclk_freq);
}
}
if (rem != 0) {
if (div == 0 ||
((sysclk_freq / div) - bclk_freq) >
(bclk_freq - (sysclk_freq / (div+1)))) {
div++;
rem = rem - bclk_freq;
}
}
error_ppm = (div*1000000 + (int)div64_long(1000000LL*rem,
(int)bclk_freq)) / div - 1000000;
if (set) {
if (error_ppm)
dev_info(mcasp->dev, "Sample-rate is off by %d PPM\n",
error_ppm);
__davinci_mcasp_set_clkdiv(mcasp, MCASP_CLKDIV_BCLK, div, 0);
if (reg & AHCLKXE)
__davinci_mcasp_set_clkdiv(mcasp, MCASP_CLKDIV_AUXCLK,
aux_div, 0);
}
return error_ppm;
}
static inline u32 davinci_mcasp_tx_delay(struct davinci_mcasp *mcasp)
{
if (!mcasp->txnumevt)
return 0;
return mcasp_get_reg(mcasp, mcasp->fifo_base + MCASP_WFIFOSTS_OFFSET);
}
static inline u32 davinci_mcasp_rx_delay(struct davinci_mcasp *mcasp)
{
if (!mcasp->rxnumevt)
return 0;
return mcasp_get_reg(mcasp, mcasp->fifo_base + MCASP_RFIFOSTS_OFFSET);
}
static snd_pcm_sframes_t davinci_mcasp_delay(
struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
u32 fifo_use;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
fifo_use = davinci_mcasp_tx_delay(mcasp);
else
fifo_use = davinci_mcasp_rx_delay(mcasp);
/*
* Divide the used locations with the channel count to get the
* FIFO usage in samples (don't care about partial samples in the
* buffer).
*/
return fifo_use / substream->runtime->channels;
}
static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *cpu_dai)
{
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
int word_length;
int channels = params_channels(params);
int period_size = params_period_size(params);
int ret;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_U8:
case SNDRV_PCM_FORMAT_S8:
word_length = 8;
break;
case SNDRV_PCM_FORMAT_U16_LE:
case SNDRV_PCM_FORMAT_S16_LE:
word_length = 16;
break;
case SNDRV_PCM_FORMAT_U24_3LE:
case SNDRV_PCM_FORMAT_S24_3LE:
word_length = 24;
break;
case SNDRV_PCM_FORMAT_U24_LE:
case SNDRV_PCM_FORMAT_S24_LE:
word_length = 24;
break;
case SNDRV_PCM_FORMAT_U32_LE:
case SNDRV_PCM_FORMAT_S32_LE:
word_length = 32;
break;
default:
printk(KERN_WARNING "davinci-mcasp: unsupported PCM format");
return -EINVAL;
}
ret = davinci_mcasp_set_dai_fmt(cpu_dai, mcasp->dai_fmt);
if (ret)
return ret;
/*
* If mcasp is BCLK master, and a BCLK divider was not provided by
* the machine driver, we need to calculate the ratio.
*/
if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) {
int slots = mcasp->tdm_slots;
int rate = params_rate(params);
int sbits = params_width(params);
unsigned int bclk_target;
if (mcasp->slot_width)
sbits = mcasp->slot_width;
if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE)
bclk_target = rate * sbits * slots;
else
bclk_target = rate * 128;
davinci_mcasp_calc_clk_div(mcasp, mcasp->sysclk_freq,
bclk_target, true);
}
ret = mcasp_common_hw_param(mcasp, substream->stream,
period_size * channels, channels);
if (ret)
return ret;
if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
ret = mcasp_dit_hw_param(mcasp, params_rate(params));
else
ret = mcasp_i2s_hw_param(mcasp, substream->stream,
channels);
if (ret)
return ret;
davinci_config_channel_size(mcasp, word_length);
if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE) {
mcasp->channels = channels;
if (!mcasp->max_format_width)
mcasp->max_format_width = word_length;
}
return 0;
}
static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *cpu_dai)
{
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
int ret = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
davinci_mcasp_start(mcasp, substream->stream);
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
davinci_mcasp_stop(mcasp, substream->stream);
break;
default:
ret = -EINVAL;
}
return ret;
}
static int davinci_mcasp_hw_rule_slot_width(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct davinci_mcasp_ruledata *rd = rule->private;
struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
struct snd_mask nfmt;
int slot_width;
snd_pcm_format_t i;
snd_mask_none(&nfmt);
slot_width = rd->mcasp->slot_width;
pcm_for_each_format(i) {
if (snd_mask_test_format(fmt, i)) {
if (snd_pcm_format_width(i) <= slot_width) {
snd_mask_set_format(&nfmt, i);
}
}
}
return snd_mask_refine(fmt, &nfmt);
}
static int davinci_mcasp_hw_rule_format_width(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct davinci_mcasp_ruledata *rd = rule->private;
struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
struct snd_mask nfmt;
int format_width;
snd_pcm_format_t i;
snd_mask_none(&nfmt);
format_width = rd->mcasp->max_format_width;
pcm_for_each_format(i) {
if (snd_mask_test_format(fmt, i)) {
if (snd_pcm_format_width(i) == format_width) {
snd_mask_set_format(&nfmt, i);
}
}
}
return snd_mask_refine(fmt, &nfmt);
}
static const unsigned int davinci_mcasp_dai_rates[] = {
8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
88200, 96000, 176400, 192000,
};
#define DAVINCI_MAX_RATE_ERROR_PPM 1000
static int davinci_mcasp_hw_rule_rate(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct davinci_mcasp_ruledata *rd = rule->private;
struct snd_interval *ri =
hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
int sbits = params_width(params);
int slots = rd->mcasp->tdm_slots;
struct snd_interval range;
int i;
if (rd->mcasp->slot_width)
sbits = rd->mcasp->slot_width;
snd_interval_any(&range);
range.empty = 1;
for (i = 0; i < ARRAY_SIZE(davinci_mcasp_dai_rates); i++) {
if (snd_interval_test(ri, davinci_mcasp_dai_rates[i])) {
uint bclk_freq = sbits * slots *
davinci_mcasp_dai_rates[i];
unsigned int sysclk_freq;
int ppm;
if (rd->mcasp->auxclk_fs_ratio)
sysclk_freq = davinci_mcasp_dai_rates[i] *
rd->mcasp->auxclk_fs_ratio;
else
sysclk_freq = rd->mcasp->sysclk_freq;
ppm = davinci_mcasp_calc_clk_div(rd->mcasp, sysclk_freq,
bclk_freq, false);
if (abs(ppm) < DAVINCI_MAX_RATE_ERROR_PPM) {
if (range.empty) {
range.min = davinci_mcasp_dai_rates[i];
range.empty = 0;
}
range.max = davinci_mcasp_dai_rates[i];
}
}
}
dev_dbg(rd->mcasp->dev,
"Frequencies %d-%d -> %d-%d for %d sbits and %d tdm slots\n",
ri->min, ri->max, range.min, range.max, sbits, slots);
return snd_interval_refine(hw_param_interval(params, rule->var),
&range);
}
static int davinci_mcasp_hw_rule_format(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct davinci_mcasp_ruledata *rd = rule->private;
struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
struct snd_mask nfmt;
int rate = params_rate(params);
int slots = rd->mcasp->tdm_slots;
int count = 0;
snd_pcm_format_t i;
snd_mask_none(&nfmt);
pcm_for_each_format(i) {
if (snd_mask_test_format(fmt, i)) {
uint sbits = snd_pcm_format_width(i);
unsigned int sysclk_freq;
int ppm;
if (rd->mcasp->auxclk_fs_ratio)
sysclk_freq = rate *
rd->mcasp->auxclk_fs_ratio;
else
sysclk_freq = rd->mcasp->sysclk_freq;
if (rd->mcasp->slot_width)
sbits = rd->mcasp->slot_width;
ppm = davinci_mcasp_calc_clk_div(rd->mcasp, sysclk_freq,
sbits * slots * rate,
false);
if (abs(ppm) < DAVINCI_MAX_RATE_ERROR_PPM) {
snd_mask_set_format(&nfmt, i);
count++;
}
}
}
dev_dbg(rd->mcasp->dev,
"%d possible sample format for %d Hz and %d tdm slots\n",
count, rate, slots);
return snd_mask_refine(fmt, &nfmt);
}
static int davinci_mcasp_hw_rule_min_periodsize(
struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
{
struct snd_interval *period_size = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
struct snd_interval frames;
snd_interval_any(&frames);
frames.min = 64;
frames.integer = 1;
return snd_interval_refine(period_size, &frames);
}
static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
struct davinci_mcasp_ruledata *ruledata =
&mcasp->ruledata[substream->stream];
u32 max_channels = 0;
int i, dir, ret;
int tdm_slots = mcasp->tdm_slots;
/* Do not allow more then one stream per direction */
if (mcasp->substreams[substream->stream])
return -EBUSY;
mcasp->substreams[substream->stream] = substream;
if (mcasp->tdm_mask[substream->stream])
tdm_slots = hweight32(mcasp->tdm_mask[substream->stream]);
if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
return 0;
/*
* Limit the maximum allowed channels for the first stream:
* number of serializers for the direction * tdm slots per serializer
*/
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dir = TX_MODE;
else
dir = RX_MODE;
for (i = 0; i < mcasp->num_serializer; i++) {
if (mcasp->serial_dir[i] == dir)
max_channels++;
}
ruledata->serializers = max_channels;
ruledata->mcasp = mcasp;
max_channels *= tdm_slots;
/*
* If the already active stream has less channels than the calculated
* limit based on the seirializers * tdm_slots, and only one serializer
* is in use we need to use that as a constraint for the second stream.
* Otherwise (first stream or less allowed channels or more than one
* serializer in use) we use the calculated constraint.
*/
if (mcasp->channels && mcasp->channels < max_channels &&
ruledata->serializers == 1)
max_channels = mcasp->channels;
/*
* But we can always allow channels upto the amount of
* the available tdm_slots.
*/
if (max_channels < tdm_slots)
max_channels = tdm_slots;
snd_pcm_hw_constraint_minmax(substream->runtime,
SNDRV_PCM_HW_PARAM_CHANNELS,
0, max_channels);
snd_pcm_hw_constraint_list(substream->runtime,
0, SNDRV_PCM_HW_PARAM_CHANNELS,
&mcasp->chconstr[substream->stream]);
if (mcasp->max_format_width) {
/*
* Only allow formats which require same amount of bits on the
* bus as the currently running stream
*/
ret = snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_FORMAT,
davinci_mcasp_hw_rule_format_width,
ruledata,
SNDRV_PCM_HW_PARAM_FORMAT, -1);
if (ret)
return ret;
}
else if (mcasp->slot_width) {
/* Only allow formats require <= slot_width bits on the bus */
ret = snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_FORMAT,
davinci_mcasp_hw_rule_slot_width,
ruledata,
SNDRV_PCM_HW_PARAM_FORMAT, -1);
if (ret)
return ret;
}
/*
* If we rely on implicit BCLK divider setting we should
* set constraints based on what we can provide.
*/
if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) {
ret = snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
davinci_mcasp_hw_rule_rate,
ruledata,
SNDRV_PCM_HW_PARAM_FORMAT, -1);
if (ret)
return ret;
ret = snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_FORMAT,
davinci_mcasp_hw_rule_format,
ruledata,
SNDRV_PCM_HW_PARAM_RATE, -1);
if (ret)
return ret;
}
snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
davinci_mcasp_hw_rule_min_periodsize, NULL,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
return 0;
}
static void davinci_mcasp_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
mcasp->substreams[substream->stream] = NULL;
mcasp->active_serializers[substream->stream] = 0;
if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
return;
if (!snd_soc_dai_active(cpu_dai)) {
mcasp->channels = 0;
mcasp->max_format_width = 0;
}
}
static int davinci_mcasp_iec958_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
uinfo->count = 1;
return 0;
}
static int davinci_mcasp_iec958_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uctl)
{
struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
memcpy(uctl->value.iec958.status, &mcasp->iec958_status,
sizeof(mcasp->iec958_status));
return 0;
}
static int davinci_mcasp_iec958_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uctl)
{
struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
memcpy(&mcasp->iec958_status, uctl->value.iec958.status,
sizeof(mcasp->iec958_status));
return 0;
}
static int davinci_mcasp_iec958_con_mask_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
memset(ucontrol->value.iec958.status, 0xff, sizeof(mcasp->iec958_status));
return 0;
}
static const struct snd_kcontrol_new davinci_mcasp_iec958_ctls[] = {
{
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_VOLATILE),
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
.info = davinci_mcasp_iec958_info,
.get = davinci_mcasp_iec958_get,
.put = davinci_mcasp_iec958_put,
}, {
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, CON_MASK),
.info = davinci_mcasp_iec958_info,
.get = davinci_mcasp_iec958_con_mask_get,
},
};
static void davinci_mcasp_init_iec958_status(struct davinci_mcasp *mcasp)
{
unsigned char *cs = (u8 *)&mcasp->iec958_status;
cs[0] = IEC958_AES0_CON_NOT_COPYRIGHT | IEC958_AES0_CON_EMPHASIS_NONE;
cs[1] = IEC958_AES1_CON_PCM_CODER;
cs[2] = IEC958_AES2_CON_SOURCE_UNSPEC | IEC958_AES2_CON_CHANNEL_UNSPEC;
cs[3] = IEC958_AES3_CON_CLOCK_1000PPM;
}
static int davinci_mcasp_dai_probe(struct snd_soc_dai *dai)
{
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
int stream;
for_each_pcm_streams(stream)
snd_soc_dai_dma_data_set(dai, stream, &mcasp->dma_data[stream]);
if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE) {
davinci_mcasp_init_iec958_status(mcasp);
snd_soc_add_dai_controls(dai, davinci_mcasp_iec958_ctls,
ARRAY_SIZE(davinci_mcasp_iec958_ctls));
}
return 0;
}
static const struct snd_soc_dai_ops davinci_mcasp_dai_ops = {
.probe = davinci_mcasp_dai_probe,
.startup = davinci_mcasp_startup,
.shutdown = davinci_mcasp_shutdown,
.trigger = davinci_mcasp_trigger,
.delay = davinci_mcasp_delay,
.hw_params = davinci_mcasp_hw_params,
.set_fmt = davinci_mcasp_set_dai_fmt,
.set_clkdiv = davinci_mcasp_set_clkdiv,
.set_sysclk = davinci_mcasp_set_sysclk,
.set_tdm_slot = davinci_mcasp_set_tdm_slot,
};
#define DAVINCI_MCASP_RATES SNDRV_PCM_RATE_8000_192000
#define DAVINCI_MCASP_PCM_FMTS (SNDRV_PCM_FMTBIT_S8 | \
SNDRV_PCM_FMTBIT_U8 | \
SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_U16_LE | \
SNDRV_PCM_FMTBIT_S24_LE | \
SNDRV_PCM_FMTBIT_U24_LE | \
SNDRV_PCM_FMTBIT_S24_3LE | \
SNDRV_PCM_FMTBIT_U24_3LE | \
SNDRV_PCM_FMTBIT_S32_LE | \
SNDRV_PCM_FMTBIT_U32_LE)
static struct snd_soc_dai_driver davinci_mcasp_dai[] = {
{
.name = "davinci-mcasp.0",
.playback = {
.stream_name = "IIS Playback",
.channels_min = 1,
.channels_max = 32 * 16,
.rates = DAVINCI_MCASP_RATES,
.formats = DAVINCI_MCASP_PCM_FMTS,
},
.capture = {
.stream_name = "IIS Capture",
.channels_min = 1,
.channels_max = 32 * 16,
.rates = DAVINCI_MCASP_RATES,
.formats = DAVINCI_MCASP_PCM_FMTS,
},
.ops = &davinci_mcasp_dai_ops,
.symmetric_rate = 1,
},
{
.name = "davinci-mcasp.1",
.playback = {
.stream_name = "DIT Playback",
.channels_min = 1,
.channels_max = 384,
.rates = DAVINCI_MCASP_RATES,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE,
},
.ops = &davinci_mcasp_dai_ops,
},
};
static const struct snd_soc_component_driver davinci_mcasp_component = {
.name = "davinci-mcasp",
.legacy_dai_naming = 1,
};
/* Some HW specific values and defaults. The rest is filled in from DT. */
static struct davinci_mcasp_pdata dm646x_mcasp_pdata = {
.tx_dma_offset = 0x400,
.rx_dma_offset = 0x400,
.version = MCASP_VERSION_1,
};
static struct davinci_mcasp_pdata da830_mcasp_pdata = {
.tx_dma_offset = 0x2000,
.rx_dma_offset = 0x2000,
.version = MCASP_VERSION_2,
};
static struct davinci_mcasp_pdata am33xx_mcasp_pdata = {
.tx_dma_offset = 0,
.rx_dma_offset = 0,
.version = MCASP_VERSION_3,
};
static struct davinci_mcasp_pdata dra7_mcasp_pdata = {
/* The CFG port offset will be calculated if it is needed */
.tx_dma_offset = 0,
.rx_dma_offset = 0,
.version = MCASP_VERSION_4,
};
static struct davinci_mcasp_pdata omap_mcasp_pdata = {
.tx_dma_offset = 0x200,
.rx_dma_offset = 0,
.version = MCASP_VERSION_OMAP,
};
static const struct of_device_id mcasp_dt_ids[] = {
{
.compatible = "ti,dm646x-mcasp-audio",
.data = &dm646x_mcasp_pdata,
},
{
.compatible = "ti,da830-mcasp-audio",
.data = &da830_mcasp_pdata,
},
{
.compatible = "ti,am33xx-mcasp-audio",
.data = &am33xx_mcasp_pdata,
},
{
.compatible = "ti,dra7-mcasp-audio",
.data = &dra7_mcasp_pdata,
},
{
.compatible = "ti,omap4-mcasp-audio",
.data = &omap_mcasp_pdata,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mcasp_dt_ids);
static int mcasp_reparent_fck(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct clk *gfclk, *parent_clk;
const char *parent_name;
int ret;
if (!node)
return 0;
parent_name = of_get_property(node, "fck_parent", NULL);
if (!parent_name)
return 0;
dev_warn(&pdev->dev, "Update the bindings to use assigned-clocks!\n");
gfclk = clk_get(&pdev->dev, "fck");
if (IS_ERR(gfclk)) {
dev_err(&pdev->dev, "failed to get fck\n");
return PTR_ERR(gfclk);
}
parent_clk = clk_get(NULL, parent_name);
if (IS_ERR(parent_clk)) {
dev_err(&pdev->dev, "failed to get parent clock\n");
ret = PTR_ERR(parent_clk);
goto err1;
}
ret = clk_set_parent(gfclk, parent_clk);
if (ret) {
dev_err(&pdev->dev, "failed to reparent fck\n");
goto err2;
}
err2:
clk_put(parent_clk);
err1:
clk_put(gfclk);
return ret;
}
static bool davinci_mcasp_have_gpiochip(struct davinci_mcasp *mcasp)
{
#ifdef CONFIG_OF_GPIO
return of_property_read_bool(mcasp->dev->of_node, "gpio-controller");
#else
return false;
#endif
}
static int davinci_mcasp_get_config(struct davinci_mcasp *mcasp,
struct platform_device *pdev)
{
const struct of_device_id *match = of_match_device(mcasp_dt_ids, &pdev->dev);
struct device_node *np = pdev->dev.of_node;
struct davinci_mcasp_pdata *pdata = NULL;
const u32 *of_serial_dir32;
u32 val;
int i;
if (pdev->dev.platform_data) {
pdata = pdev->dev.platform_data;
pdata->dismod = DISMOD_LOW;
goto out;
} else if (match) {
pdata = devm_kmemdup(&pdev->dev, match->data, sizeof(*pdata),
GFP_KERNEL);
if (!pdata)
return -ENOMEM;
} else {
dev_err(&pdev->dev, "No compatible match found\n");
return -EINVAL;
}
if (of_property_read_u32(np, "op-mode", &val) == 0) {
pdata->op_mode = val;
} else {
mcasp->missing_audio_param = true;
goto out;
}
if (of_property_read_u32(np, "tdm-slots", &val) == 0) {
if (val < 2 || val > 32) {
dev_err(&pdev->dev, "tdm-slots must be in rage [2-32]\n");
return -EINVAL;
}
pdata->tdm_slots = val;
} else if (pdata->op_mode == DAVINCI_MCASP_IIS_MODE) {
mcasp->missing_audio_param = true;
goto out;
}
of_serial_dir32 = of_get_property(np, "serial-dir", &val);
val /= sizeof(u32);
if (of_serial_dir32) {
u8 *of_serial_dir = devm_kzalloc(&pdev->dev,
(sizeof(*of_serial_dir) * val),
GFP_KERNEL);
if (!of_serial_dir)
return -ENOMEM;
for (i = 0; i < val; i++)
of_serial_dir[i] = be32_to_cpup(&of_serial_dir32[i]);
pdata->num_serializer = val;
pdata->serial_dir = of_serial_dir;
} else {
mcasp->missing_audio_param = true;
goto out;
}
if (of_property_read_u32(np, "tx-num-evt", &val) == 0)
pdata->txnumevt = val;
if (of_property_read_u32(np, "rx-num-evt", &val) == 0)
pdata->rxnumevt = val;
if (of_property_read_u32(np, "auxclk-fs-ratio", &val) == 0)
mcasp->auxclk_fs_ratio = val;
if (of_property_read_u32(np, "dismod", &val) == 0) {
if (val == 0 || val == 2 || val == 3) {
pdata->dismod = DISMOD_VAL(val);
} else {
dev_warn(&pdev->dev, "Invalid dismod value: %u\n", val);
pdata->dismod = DISMOD_LOW;
}
} else {
pdata->dismod = DISMOD_LOW;
}
out:
mcasp->pdata = pdata;
if (mcasp->missing_audio_param) {
if (davinci_mcasp_have_gpiochip(mcasp)) {
dev_dbg(&pdev->dev, "Missing DT parameter(s) for audio\n");
return 0;
}
dev_err(&pdev->dev, "Insufficient DT parameter(s)\n");
return -ENODEV;
}
mcasp->op_mode = pdata->op_mode;
/* sanity check for tdm slots parameter */
if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE) {
if (pdata->tdm_slots < 2) {
dev_warn(&pdev->dev, "invalid tdm slots: %d\n",
pdata->tdm_slots);
mcasp->tdm_slots = 2;
} else if (pdata->tdm_slots > 32) {
dev_warn(&pdev->dev, "invalid tdm slots: %d\n",
pdata->tdm_slots);
mcasp->tdm_slots = 32;
} else {
mcasp->tdm_slots = pdata->tdm_slots;
}
} else {
mcasp->tdm_slots = 32;
}
mcasp->num_serializer = pdata->num_serializer;
#ifdef CONFIG_PM
mcasp->context.xrsr_regs = devm_kcalloc(&pdev->dev,
mcasp->num_serializer, sizeof(u32),
GFP_KERNEL);
if (!mcasp->context.xrsr_regs)
return -ENOMEM;
#endif
mcasp->serial_dir = pdata->serial_dir;
mcasp->version = pdata->version;
mcasp->txnumevt = pdata->txnumevt;
mcasp->rxnumevt = pdata->rxnumevt;
mcasp->dismod = pdata->dismod;
return 0;
}
enum {
PCM_EDMA,
PCM_SDMA,
PCM_UDMA,
};
static const char *sdma_prefix = "ti,omap";
static int davinci_mcasp_get_dma_type(struct davinci_mcasp *mcasp)
{
struct dma_chan *chan;
const char *tmp;
int ret = PCM_EDMA;
if (!mcasp->dev->of_node)
return PCM_EDMA;
tmp = mcasp->dma_data[SNDRV_PCM_STREAM_PLAYBACK].filter_data;
chan = dma_request_chan(mcasp->dev, tmp);
if (IS_ERR(chan))
return dev_err_probe(mcasp->dev, PTR_ERR(chan),
"Can't verify DMA configuration\n");
if (WARN_ON(!chan->device || !chan->device->dev)) {
dma_release_channel(chan);
return -EINVAL;
}
if (chan->device->dev->of_node)
ret = of_property_read_string(chan->device->dev->of_node,
"compatible", &tmp);
else
dev_dbg(mcasp->dev, "DMA controller has no of-node\n");
dma_release_channel(chan);
if (ret)
return ret;
dev_dbg(mcasp->dev, "DMA controller compatible = \"%s\"\n", tmp);
if (!strncmp(tmp, sdma_prefix, strlen(sdma_prefix)))
return PCM_SDMA;
else if (strstr(tmp, "udmap"))
return PCM_UDMA;
else if (strstr(tmp, "bcdma"))
return PCM_UDMA;
return PCM_EDMA;
}
static u32 davinci_mcasp_txdma_offset(struct davinci_mcasp_pdata *pdata)
{
int i;
u32 offset = 0;
if (pdata->version != MCASP_VERSION_4)
return pdata->tx_dma_offset;
for (i = 0; i < pdata->num_serializer; i++) {
if (pdata->serial_dir[i] == TX_MODE) {
if (!offset) {
offset = DAVINCI_MCASP_TXBUF_REG(i);
} else {
pr_err("%s: Only one serializer allowed!\n",
__func__);
break;
}
}
}
return offset;
}
static u32 davinci_mcasp_rxdma_offset(struct davinci_mcasp_pdata *pdata)
{
int i;
u32 offset = 0;
if (pdata->version != MCASP_VERSION_4)
return pdata->rx_dma_offset;
for (i = 0; i < pdata->num_serializer; i++) {
if (pdata->serial_dir[i] == RX_MODE) {
if (!offset) {
offset = DAVINCI_MCASP_RXBUF_REG(i);
} else {
pr_err("%s: Only one serializer allowed!\n",
__func__);
break;
}
}
}
return offset;
}
#ifdef CONFIG_GPIOLIB
static int davinci_mcasp_gpio_request(struct gpio_chip *chip, unsigned offset)
{
struct davinci_mcasp *mcasp = gpiochip_get_data(chip);
if (mcasp->num_serializer && offset < mcasp->num_serializer &&
mcasp->serial_dir[offset] != INACTIVE_MODE) {
dev_err(mcasp->dev, "AXR%u pin is used for audio\n", offset);
return -EBUSY;
}
/* Do not change the PIN yet */
return pm_runtime_resume_and_get(mcasp->dev);
}
static void davinci_mcasp_gpio_free(struct gpio_chip *chip, unsigned offset)
{
struct davinci_mcasp *mcasp = gpiochip_get_data(chip);
/* Set the direction to input */
mcasp_clr_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(offset));
/* Set the pin as McASP pin */
mcasp_clr_bits(mcasp, DAVINCI_MCASP_PFUNC_REG, BIT(offset));
pm_runtime_put_sync(mcasp->dev);
}
static int davinci_mcasp_gpio_direction_out(struct gpio_chip *chip,
unsigned offset, int value)
{
struct davinci_mcasp *mcasp = gpiochip_get_data(chip);
u32 val;
if (value)
mcasp_set_bits(mcasp, DAVINCI_MCASP_PDOUT_REG, BIT(offset));
else
mcasp_clr_bits(mcasp, DAVINCI_MCASP_PDOUT_REG, BIT(offset));
val = mcasp_get_reg(mcasp, DAVINCI_MCASP_PFUNC_REG);
if (!(val & BIT(offset))) {
/* Set the pin as GPIO pin */
mcasp_set_bits(mcasp, DAVINCI_MCASP_PFUNC_REG, BIT(offset));
/* Set the direction to output */
mcasp_set_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(offset));
}
return 0;
}
static void davinci_mcasp_gpio_set(struct gpio_chip *chip, unsigned offset,
int value)
{
struct davinci_mcasp *mcasp = gpiochip_get_data(chip);
if (value)
mcasp_set_bits(mcasp, DAVINCI_MCASP_PDOUT_REG, BIT(offset));
else
mcasp_clr_bits(mcasp, DAVINCI_MCASP_PDOUT_REG, BIT(offset));
}
static int davinci_mcasp_gpio_direction_in(struct gpio_chip *chip,
unsigned offset)
{
struct davinci_mcasp *mcasp = gpiochip_get_data(chip);
u32 val;
val = mcasp_get_reg(mcasp, DAVINCI_MCASP_PFUNC_REG);
if (!(val & BIT(offset))) {
/* Set the direction to input */
mcasp_clr_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(offset));
/* Set the pin as GPIO pin */
mcasp_set_bits(mcasp, DAVINCI_MCASP_PFUNC_REG, BIT(offset));
}
return 0;
}
static int davinci_mcasp_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct davinci_mcasp *mcasp = gpiochip_get_data(chip);
u32 val;
val = mcasp_get_reg(mcasp, DAVINCI_MCASP_PDSET_REG);
if (val & BIT(offset))
return 1;
return 0;
}
static int davinci_mcasp_gpio_get_direction(struct gpio_chip *chip,
unsigned offset)
{
struct davinci_mcasp *mcasp = gpiochip_get_data(chip);
u32 val;
val = mcasp_get_reg(mcasp, DAVINCI_MCASP_PDIR_REG);
if (val & BIT(offset))
return 0;
return 1;
}
static const struct gpio_chip davinci_mcasp_template_chip = {
.owner = THIS_MODULE,
.request = davinci_mcasp_gpio_request,
.free = davinci_mcasp_gpio_free,
.direction_output = davinci_mcasp_gpio_direction_out,
.set = davinci_mcasp_gpio_set,
.direction_input = davinci_mcasp_gpio_direction_in,
.get = davinci_mcasp_gpio_get,
.get_direction = davinci_mcasp_gpio_get_direction,
.base = -1,
.ngpio = 32,
};
static int davinci_mcasp_init_gpiochip(struct davinci_mcasp *mcasp)
{
if (!davinci_mcasp_have_gpiochip(mcasp))
return 0;
mcasp->gpio_chip = davinci_mcasp_template_chip;
mcasp->gpio_chip.label = dev_name(mcasp->dev);
mcasp->gpio_chip.parent = mcasp->dev;
return devm_gpiochip_add_data(mcasp->dev, &mcasp->gpio_chip, mcasp);
}
#else /* CONFIG_GPIOLIB */
static inline int davinci_mcasp_init_gpiochip(struct davinci_mcasp *mcasp)
{
return 0;
}
#endif /* CONFIG_GPIOLIB */
static int davinci_mcasp_probe(struct platform_device *pdev)
{
struct snd_dmaengine_dai_dma_data *dma_data;
struct resource *mem, *dat;
struct davinci_mcasp *mcasp;
char *irq_name;
int irq;
int ret;
if (!pdev->dev.platform_data && !pdev->dev.of_node) {
dev_err(&pdev->dev, "No platform data supplied\n");
return -EINVAL;
}
mcasp = devm_kzalloc(&pdev->dev, sizeof(struct davinci_mcasp),
GFP_KERNEL);
if (!mcasp)
return -ENOMEM;
mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu");
if (!mem) {
dev_warn(&pdev->dev,
"\"mpu\" mem resource not found, using index 0\n");
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&pdev->dev, "no mem resource?\n");
return -ENODEV;
}
}
mcasp->base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(mcasp->base))
return PTR_ERR(mcasp->base);
dev_set_drvdata(&pdev->dev, mcasp);
pm_runtime_enable(&pdev->dev);
mcasp->dev = &pdev->dev;
ret = davinci_mcasp_get_config(mcasp, pdev);
if (ret)
goto err;
/* All PINS as McASP */
pm_runtime_get_sync(mcasp->dev);
mcasp_set_reg(mcasp, DAVINCI_MCASP_PFUNC_REG, 0x00000000);
pm_runtime_put(mcasp->dev);
/* Skip audio related setup code if the configuration is not adequat */
if (mcasp->missing_audio_param)
goto no_audio;
irq = platform_get_irq_byname_optional(pdev, "common");
if (irq > 0) {
irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_common",
dev_name(&pdev->dev));
if (!irq_name) {
ret = -ENOMEM;
goto err;
}
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
davinci_mcasp_common_irq_handler,
IRQF_ONESHOT | IRQF_SHARED,
irq_name, mcasp);
if (ret) {
dev_err(&pdev->dev, "common IRQ request failed\n");
goto err;
}
mcasp->irq_request[SNDRV_PCM_STREAM_PLAYBACK] = XUNDRN;
mcasp->irq_request[SNDRV_PCM_STREAM_CAPTURE] = ROVRN;
}
irq = platform_get_irq_byname_optional(pdev, "rx");
if (irq > 0) {
irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_rx",
dev_name(&pdev->dev));
if (!irq_name) {
ret = -ENOMEM;
goto err;
}
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
davinci_mcasp_rx_irq_handler,
IRQF_ONESHOT, irq_name, mcasp);
if (ret) {
dev_err(&pdev->dev, "RX IRQ request failed\n");
goto err;
}
mcasp->irq_request[SNDRV_PCM_STREAM_CAPTURE] = ROVRN;
}
irq = platform_get_irq_byname_optional(pdev, "tx");
if (irq > 0) {
irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_tx",
dev_name(&pdev->dev));
if (!irq_name) {
ret = -ENOMEM;
goto err;
}
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
davinci_mcasp_tx_irq_handler,
IRQF_ONESHOT, irq_name, mcasp);
if (ret) {
dev_err(&pdev->dev, "TX IRQ request failed\n");
goto err;
}
mcasp->irq_request[SNDRV_PCM_STREAM_PLAYBACK] = XUNDRN;
}
dat = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
if (dat)
mcasp->dat_port = true;
dma_data = &mcasp->dma_data[SNDRV_PCM_STREAM_PLAYBACK];
dma_data->filter_data = "tx";
if (dat) {
dma_data->addr = dat->start;
/*
* According to the TRM there should be 0x200 offset added to
* the DAT port address
*/
if (mcasp->version == MCASP_VERSION_OMAP)
dma_data->addr += davinci_mcasp_txdma_offset(mcasp->pdata);
} else {
dma_data->addr = mem->start + davinci_mcasp_txdma_offset(mcasp->pdata);
}
/* RX is not valid in DIT mode */
if (mcasp->op_mode != DAVINCI_MCASP_DIT_MODE) {
dma_data = &mcasp->dma_data[SNDRV_PCM_STREAM_CAPTURE];
dma_data->filter_data = "rx";
if (dat)
dma_data->addr = dat->start;
else
dma_data->addr =
mem->start + davinci_mcasp_rxdma_offset(mcasp->pdata);
}
if (mcasp->version < MCASP_VERSION_3) {
mcasp->fifo_base = DAVINCI_MCASP_V2_AFIFO_BASE;
/* dma_params->dma_addr is pointing to the data port address */
mcasp->dat_port = true;
} else {
mcasp->fifo_base = DAVINCI_MCASP_V3_AFIFO_BASE;
}
/* Allocate memory for long enough list for all possible
* scenarios. Maximum number tdm slots is 32 and there cannot
* be more serializers than given in the configuration. The
* serializer directions could be taken into account, but it
* would make code much more complex and save only couple of
* bytes.
*/
mcasp->chconstr[SNDRV_PCM_STREAM_PLAYBACK].list =
devm_kcalloc(mcasp->dev,
32 + mcasp->num_serializer - 1,
sizeof(unsigned int),
GFP_KERNEL);
mcasp->chconstr[SNDRV_PCM_STREAM_CAPTURE].list =
devm_kcalloc(mcasp->dev,
32 + mcasp->num_serializer - 1,
sizeof(unsigned int),
GFP_KERNEL);
if (!mcasp->chconstr[SNDRV_PCM_STREAM_PLAYBACK].list ||
!mcasp->chconstr[SNDRV_PCM_STREAM_CAPTURE].list) {
ret = -ENOMEM;
goto err;
}
ret = davinci_mcasp_set_ch_constraints(mcasp);
if (ret)
goto err;
mcasp_reparent_fck(pdev);
ret = devm_snd_soc_register_component(&pdev->dev, &davinci_mcasp_component,
&davinci_mcasp_dai[mcasp->op_mode], 1);
if (ret != 0)
goto err;
ret = davinci_mcasp_get_dma_type(mcasp);
switch (ret) {
case PCM_EDMA:
ret = edma_pcm_platform_register(&pdev->dev);
break;
case PCM_SDMA:
if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE)
ret = sdma_pcm_platform_register(&pdev->dev, "tx", "rx");
else
ret = sdma_pcm_platform_register(&pdev->dev, "tx", NULL);
break;
case PCM_UDMA:
ret = udma_pcm_platform_register(&pdev->dev);
break;
default:
dev_err(&pdev->dev, "No DMA controller found (%d)\n", ret);
fallthrough;
case -EPROBE_DEFER:
goto err;
}
if (ret) {
dev_err(&pdev->dev, "register PCM failed: %d\n", ret);
goto err;
}
no_audio:
ret = davinci_mcasp_init_gpiochip(mcasp);
if (ret) {
dev_err(&pdev->dev, "gpiochip registration failed: %d\n", ret);
goto err;
}
return 0;
err:
pm_runtime_disable(&pdev->dev);
return ret;
}
static void davinci_mcasp_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
}
#ifdef CONFIG_PM
static int davinci_mcasp_runtime_suspend(struct device *dev)
{
struct davinci_mcasp *mcasp = dev_get_drvdata(dev);
struct davinci_mcasp_context *context = &mcasp->context;
u32 reg;
int i;
for (i = 0; i < ARRAY_SIZE(context_regs); i++)
context->config_regs[i] = mcasp_get_reg(mcasp, context_regs[i]);
if (mcasp->txnumevt) {
reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
context->afifo_regs[0] = mcasp_get_reg(mcasp, reg);
}
if (mcasp->rxnumevt) {
reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
context->afifo_regs[1] = mcasp_get_reg(mcasp, reg);
}
for (i = 0; i < mcasp->num_serializer; i++)
context->xrsr_regs[i] = mcasp_get_reg(mcasp,
DAVINCI_MCASP_XRSRCTL_REG(i));
return 0;
}
static int davinci_mcasp_runtime_resume(struct device *dev)
{
struct davinci_mcasp *mcasp = dev_get_drvdata(dev);
struct davinci_mcasp_context *context = &mcasp->context;
u32 reg;
int i;
for (i = 0; i < ARRAY_SIZE(context_regs); i++)
mcasp_set_reg(mcasp, context_regs[i], context->config_regs[i]);
if (mcasp->txnumevt) {
reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
mcasp_set_reg(mcasp, reg, context->afifo_regs[0]);
}
if (mcasp->rxnumevt) {
reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
mcasp_set_reg(mcasp, reg, context->afifo_regs[1]);
}
for (i = 0; i < mcasp->num_serializer; i++)
mcasp_set_reg(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i),
context->xrsr_regs[i]);
return 0;
}
#endif
static const struct dev_pm_ops davinci_mcasp_pm_ops = {
SET_RUNTIME_PM_OPS(davinci_mcasp_runtime_suspend,
davinci_mcasp_runtime_resume,
NULL)
};
static struct platform_driver davinci_mcasp_driver = {
.probe = davinci_mcasp_probe,
.remove_new = davinci_mcasp_remove,
.driver = {
.name = "davinci-mcasp",
.pm = &davinci_mcasp_pm_ops,
.of_match_table = mcasp_dt_ids,
},
};
module_platform_driver(davinci_mcasp_driver);
MODULE_AUTHOR("Steve Chen");
MODULE_DESCRIPTION("TI DAVINCI McASP SoC Interface");
MODULE_LICENSE("GPL");
| linux-master | sound/soc/ti/davinci-mcasp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* omap-mcpdm.c -- OMAP ALSA SoC DAI driver using McPDM port
*
* Copyright (C) 2009 - 2011 Texas Instruments
*
* Author: Misael Lopez Cruz <[email protected]>
* Contact: Jorge Eduardo Candelaria <[email protected]>
* Margarita Olaya <[email protected]>
* Peter Ujfalusi <[email protected]>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/of_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
#include "omap-mcpdm.h"
#include "sdma-pcm.h"
struct mcpdm_link_config {
u32 link_mask; /* channel mask for the direction */
u32 threshold; /* FIFO threshold */
};
struct omap_mcpdm {
struct device *dev;
unsigned long phys_base;
void __iomem *io_base;
int irq;
struct pm_qos_request pm_qos_req;
int latency[2];
struct mutex mutex;
/* Playback/Capture configuration */
struct mcpdm_link_config config[2];
/* McPDM dn offsets for rx1, and 2 channels */
u32 dn_rx_offset;
/* McPDM needs to be restarted due to runtime reconfiguration */
bool restart;
/* pm state for suspend/resume handling */
int pm_active_count;
struct snd_dmaengine_dai_dma_data dma_data[2];
};
/*
* Stream DMA parameters
*/
static inline void omap_mcpdm_write(struct omap_mcpdm *mcpdm, u16 reg, u32 val)
{
writel_relaxed(val, mcpdm->io_base + reg);
}
static inline int omap_mcpdm_read(struct omap_mcpdm *mcpdm, u16 reg)
{
return readl_relaxed(mcpdm->io_base + reg);
}
#ifdef DEBUG
static void omap_mcpdm_reg_dump(struct omap_mcpdm *mcpdm)
{
dev_dbg(mcpdm->dev, "***********************\n");
dev_dbg(mcpdm->dev, "IRQSTATUS_RAW: 0x%04x\n",
omap_mcpdm_read(mcpdm, MCPDM_REG_IRQSTATUS_RAW));
dev_dbg(mcpdm->dev, "IRQSTATUS: 0x%04x\n",
omap_mcpdm_read(mcpdm, MCPDM_REG_IRQSTATUS));
dev_dbg(mcpdm->dev, "IRQENABLE_SET: 0x%04x\n",
omap_mcpdm_read(mcpdm, MCPDM_REG_IRQENABLE_SET));
dev_dbg(mcpdm->dev, "IRQENABLE_CLR: 0x%04x\n",
omap_mcpdm_read(mcpdm, MCPDM_REG_IRQENABLE_CLR));
dev_dbg(mcpdm->dev, "IRQWAKE_EN: 0x%04x\n",
omap_mcpdm_read(mcpdm, MCPDM_REG_IRQWAKE_EN));
dev_dbg(mcpdm->dev, "DMAENABLE_SET: 0x%04x\n",
omap_mcpdm_read(mcpdm, MCPDM_REG_DMAENABLE_SET));
dev_dbg(mcpdm->dev, "DMAENABLE_CLR: 0x%04x\n",
omap_mcpdm_read(mcpdm, MCPDM_REG_DMAENABLE_CLR));
dev_dbg(mcpdm->dev, "DMAWAKEEN: 0x%04x\n",
omap_mcpdm_read(mcpdm, MCPDM_REG_DMAWAKEEN));
dev_dbg(mcpdm->dev, "CTRL: 0x%04x\n",
omap_mcpdm_read(mcpdm, MCPDM_REG_CTRL));
dev_dbg(mcpdm->dev, "DN_DATA: 0x%04x\n",
omap_mcpdm_read(mcpdm, MCPDM_REG_DN_DATA));
dev_dbg(mcpdm->dev, "UP_DATA: 0x%04x\n",
omap_mcpdm_read(mcpdm, MCPDM_REG_UP_DATA));
dev_dbg(mcpdm->dev, "FIFO_CTRL_DN: 0x%04x\n",
omap_mcpdm_read(mcpdm, MCPDM_REG_FIFO_CTRL_DN));
dev_dbg(mcpdm->dev, "FIFO_CTRL_UP: 0x%04x\n",
omap_mcpdm_read(mcpdm, MCPDM_REG_FIFO_CTRL_UP));
dev_dbg(mcpdm->dev, "***********************\n");
}
#else
static void omap_mcpdm_reg_dump(struct omap_mcpdm *mcpdm) {}
#endif
/*
* Enables the transfer through the PDM interface to/from the Phoenix
* codec by enabling the corresponding UP or DN channels.
*/
static void omap_mcpdm_start(struct omap_mcpdm *mcpdm)
{
u32 ctrl = omap_mcpdm_read(mcpdm, MCPDM_REG_CTRL);
u32 link_mask = mcpdm->config[0].link_mask | mcpdm->config[1].link_mask;
ctrl |= (MCPDM_SW_DN_RST | MCPDM_SW_UP_RST);
omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, ctrl);
ctrl |= link_mask;
omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, ctrl);
ctrl &= ~(MCPDM_SW_DN_RST | MCPDM_SW_UP_RST);
omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, ctrl);
}
/*
* Disables the transfer through the PDM interface to/from the Phoenix
* codec by disabling the corresponding UP or DN channels.
*/
static void omap_mcpdm_stop(struct omap_mcpdm *mcpdm)
{
u32 ctrl = omap_mcpdm_read(mcpdm, MCPDM_REG_CTRL);
u32 link_mask = MCPDM_PDM_DN_MASK | MCPDM_PDM_UP_MASK;
ctrl |= (MCPDM_SW_DN_RST | MCPDM_SW_UP_RST);
omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, ctrl);
ctrl &= ~(link_mask);
omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, ctrl);
ctrl &= ~(MCPDM_SW_DN_RST | MCPDM_SW_UP_RST);
omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, ctrl);
}
/*
* Is the physical McPDM interface active.
*/
static inline int omap_mcpdm_active(struct omap_mcpdm *mcpdm)
{
return omap_mcpdm_read(mcpdm, MCPDM_REG_CTRL) &
(MCPDM_PDM_DN_MASK | MCPDM_PDM_UP_MASK);
}
/*
* Configures McPDM uplink, and downlink for audio.
* This function should be called before omap_mcpdm_start.
*/
static void omap_mcpdm_open_streams(struct omap_mcpdm *mcpdm)
{
u32 ctrl = omap_mcpdm_read(mcpdm, MCPDM_REG_CTRL);
omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, ctrl | MCPDM_WD_EN);
omap_mcpdm_write(mcpdm, MCPDM_REG_IRQENABLE_SET,
MCPDM_DN_IRQ_EMPTY | MCPDM_DN_IRQ_FULL |
MCPDM_UP_IRQ_EMPTY | MCPDM_UP_IRQ_FULL);
/* Enable DN RX1/2 offset cancellation feature, if configured */
if (mcpdm->dn_rx_offset) {
u32 dn_offset = mcpdm->dn_rx_offset;
omap_mcpdm_write(mcpdm, MCPDM_REG_DN_OFFSET, dn_offset);
dn_offset |= (MCPDM_DN_OFST_RX1_EN | MCPDM_DN_OFST_RX2_EN);
omap_mcpdm_write(mcpdm, MCPDM_REG_DN_OFFSET, dn_offset);
}
omap_mcpdm_write(mcpdm, MCPDM_REG_FIFO_CTRL_DN,
mcpdm->config[SNDRV_PCM_STREAM_PLAYBACK].threshold);
omap_mcpdm_write(mcpdm, MCPDM_REG_FIFO_CTRL_UP,
mcpdm->config[SNDRV_PCM_STREAM_CAPTURE].threshold);
omap_mcpdm_write(mcpdm, MCPDM_REG_DMAENABLE_SET,
MCPDM_DMA_DN_ENABLE | MCPDM_DMA_UP_ENABLE);
}
/*
* Cleans McPDM uplink, and downlink configuration.
* This function should be called when the stream is closed.
*/
static void omap_mcpdm_close_streams(struct omap_mcpdm *mcpdm)
{
/* Disable irq request generation for downlink */
omap_mcpdm_write(mcpdm, MCPDM_REG_IRQENABLE_CLR,
MCPDM_DN_IRQ_EMPTY | MCPDM_DN_IRQ_FULL);
/* Disable DMA request generation for downlink */
omap_mcpdm_write(mcpdm, MCPDM_REG_DMAENABLE_CLR, MCPDM_DMA_DN_ENABLE);
/* Disable irq request generation for uplink */
omap_mcpdm_write(mcpdm, MCPDM_REG_IRQENABLE_CLR,
MCPDM_UP_IRQ_EMPTY | MCPDM_UP_IRQ_FULL);
/* Disable DMA request generation for uplink */
omap_mcpdm_write(mcpdm, MCPDM_REG_DMAENABLE_CLR, MCPDM_DMA_UP_ENABLE);
/* Disable RX1/2 offset cancellation */
if (mcpdm->dn_rx_offset)
omap_mcpdm_write(mcpdm, MCPDM_REG_DN_OFFSET, 0);
}
static irqreturn_t omap_mcpdm_irq_handler(int irq, void *dev_id)
{
struct omap_mcpdm *mcpdm = dev_id;
int irq_status;
irq_status = omap_mcpdm_read(mcpdm, MCPDM_REG_IRQSTATUS);
/* Acknowledge irq event */
omap_mcpdm_write(mcpdm, MCPDM_REG_IRQSTATUS, irq_status);
if (irq_status & MCPDM_DN_IRQ_FULL)
dev_dbg(mcpdm->dev, "DN (playback) FIFO Full\n");
if (irq_status & MCPDM_DN_IRQ_EMPTY)
dev_dbg(mcpdm->dev, "DN (playback) FIFO Empty\n");
if (irq_status & MCPDM_DN_IRQ)
dev_dbg(mcpdm->dev, "DN (playback) write request\n");
if (irq_status & MCPDM_UP_IRQ_FULL)
dev_dbg(mcpdm->dev, "UP (capture) FIFO Full\n");
if (irq_status & MCPDM_UP_IRQ_EMPTY)
dev_dbg(mcpdm->dev, "UP (capture) FIFO Empty\n");
if (irq_status & MCPDM_UP_IRQ)
dev_dbg(mcpdm->dev, "UP (capture) write request\n");
return IRQ_HANDLED;
}
static int omap_mcpdm_dai_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
mutex_lock(&mcpdm->mutex);
if (!snd_soc_dai_active(dai))
omap_mcpdm_open_streams(mcpdm);
mutex_unlock(&mcpdm->mutex);
return 0;
}
static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
mutex_lock(&mcpdm->mutex);
if (!snd_soc_dai_active(dai)) {
if (omap_mcpdm_active(mcpdm)) {
omap_mcpdm_stop(mcpdm);
omap_mcpdm_close_streams(mcpdm);
mcpdm->config[0].link_mask = 0;
mcpdm->config[1].link_mask = 0;
}
}
if (mcpdm->latency[stream2])
cpu_latency_qos_update_request(&mcpdm->pm_qos_req,
mcpdm->latency[stream2]);
else if (mcpdm->latency[stream1])
cpu_latency_qos_remove_request(&mcpdm->pm_qos_req);
mcpdm->latency[stream1] = 0;
mutex_unlock(&mcpdm->mutex);
}
static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
int stream = substream->stream;
struct snd_dmaengine_dai_dma_data *dma_data;
u32 threshold;
int channels, latency;
int link_mask = 0;
channels = params_channels(params);
switch (channels) {
case 5:
if (stream == SNDRV_PCM_STREAM_CAPTURE)
/* up to 3 channels for capture */
return -EINVAL;
link_mask |= 1 << 4;
fallthrough;
case 4:
if (stream == SNDRV_PCM_STREAM_CAPTURE)
/* up to 3 channels for capture */
return -EINVAL;
link_mask |= 1 << 3;
fallthrough;
case 3:
link_mask |= 1 << 2;
fallthrough;
case 2:
link_mask |= 1 << 1;
fallthrough;
case 1:
link_mask |= 1 << 0;
break;
default:
/* unsupported number of channels */
return -EINVAL;
}
dma_data = snd_soc_dai_get_dma_data(dai, substream);
threshold = mcpdm->config[stream].threshold;
/* Configure McPDM channels, and DMA packet size */
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
link_mask <<= 3;
/* If capture is not running assume a stereo stream to come */
if (!mcpdm->config[!stream].link_mask)
mcpdm->config[!stream].link_mask = 0x3;
dma_data->maxburst =
(MCPDM_DN_THRES_MAX - threshold) * channels;
latency = threshold;
} else {
/* If playback is not running assume a stereo stream to come */
if (!mcpdm->config[!stream].link_mask)
mcpdm->config[!stream].link_mask = (0x3 << 3);
dma_data->maxburst = threshold * channels;
latency = (MCPDM_DN_THRES_MAX - threshold);
}
/*
* The DMA must act to a DMA request within latency time (usec) to avoid
* under/overflow
*/
mcpdm->latency[stream] = latency * USEC_PER_SEC / params_rate(params);
if (!mcpdm->latency[stream])
mcpdm->latency[stream] = 10;
/* Check if we need to restart McPDM with this stream */
if (mcpdm->config[stream].link_mask &&
mcpdm->config[stream].link_mask != link_mask)
mcpdm->restart = true;
mcpdm->config[stream].link_mask = link_mask;
return 0;
}
static int omap_mcpdm_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
struct pm_qos_request *pm_qos_req = &mcpdm->pm_qos_req;
int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
int latency = mcpdm->latency[stream2];
/* Prevent omap hardware from hitting off between FIFO fills */
if (!latency || mcpdm->latency[stream1] < latency)
latency = mcpdm->latency[stream1];
if (cpu_latency_qos_request_active(pm_qos_req))
cpu_latency_qos_update_request(pm_qos_req, latency);
else if (latency)
cpu_latency_qos_add_request(pm_qos_req, latency);
if (!omap_mcpdm_active(mcpdm)) {
omap_mcpdm_start(mcpdm);
omap_mcpdm_reg_dump(mcpdm);
} else if (mcpdm->restart) {
omap_mcpdm_stop(mcpdm);
omap_mcpdm_start(mcpdm);
mcpdm->restart = false;
omap_mcpdm_reg_dump(mcpdm);
}
return 0;
}
static int omap_mcpdm_probe(struct snd_soc_dai *dai)
{
struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
int ret;
pm_runtime_enable(mcpdm->dev);
/* Disable lines while request is ongoing */
pm_runtime_get_sync(mcpdm->dev);
omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, 0x00);
ret = request_irq(mcpdm->irq, omap_mcpdm_irq_handler, 0, "McPDM",
(void *)mcpdm);
pm_runtime_put_sync(mcpdm->dev);
if (ret) {
dev_err(mcpdm->dev, "Request for IRQ failed\n");
pm_runtime_disable(mcpdm->dev);
}
/* Configure McPDM threshold values */
mcpdm->config[SNDRV_PCM_STREAM_PLAYBACK].threshold = 2;
mcpdm->config[SNDRV_PCM_STREAM_CAPTURE].threshold =
MCPDM_UP_THRES_MAX - 3;
snd_soc_dai_init_dma_data(dai,
&mcpdm->dma_data[SNDRV_PCM_STREAM_PLAYBACK],
&mcpdm->dma_data[SNDRV_PCM_STREAM_CAPTURE]);
return ret;
}
static int omap_mcpdm_remove(struct snd_soc_dai *dai)
{
struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
free_irq(mcpdm->irq, (void *)mcpdm);
pm_runtime_disable(mcpdm->dev);
if (cpu_latency_qos_request_active(&mcpdm->pm_qos_req))
cpu_latency_qos_remove_request(&mcpdm->pm_qos_req);
return 0;
}
static const struct snd_soc_dai_ops omap_mcpdm_dai_ops = {
.probe = omap_mcpdm_probe,
.remove = omap_mcpdm_remove,
.startup = omap_mcpdm_dai_startup,
.shutdown = omap_mcpdm_dai_shutdown,
.hw_params = omap_mcpdm_dai_hw_params,
.prepare = omap_mcpdm_prepare,
.probe_order = SND_SOC_COMP_ORDER_LATE,
.remove_order = SND_SOC_COMP_ORDER_EARLY,
};
#ifdef CONFIG_PM_SLEEP
static int omap_mcpdm_suspend(struct snd_soc_component *component)
{
struct omap_mcpdm *mcpdm = snd_soc_component_get_drvdata(component);
if (snd_soc_component_active(component)) {
omap_mcpdm_stop(mcpdm);
omap_mcpdm_close_streams(mcpdm);
}
mcpdm->pm_active_count = 0;
while (pm_runtime_active(mcpdm->dev)) {
pm_runtime_put_sync(mcpdm->dev);
mcpdm->pm_active_count++;
}
return 0;
}
static int omap_mcpdm_resume(struct snd_soc_component *component)
{
struct omap_mcpdm *mcpdm = snd_soc_component_get_drvdata(component);
if (mcpdm->pm_active_count) {
while (mcpdm->pm_active_count--)
pm_runtime_get_sync(mcpdm->dev);
if (snd_soc_component_active(component)) {
omap_mcpdm_open_streams(mcpdm);
omap_mcpdm_start(mcpdm);
}
}
return 0;
}
#else
#define omap_mcpdm_suspend NULL
#define omap_mcpdm_resume NULL
#endif
#define OMAP_MCPDM_RATES (SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
#define OMAP_MCPDM_FORMATS SNDRV_PCM_FMTBIT_S32_LE
static struct snd_soc_dai_driver omap_mcpdm_dai = {
.playback = {
.channels_min = 1,
.channels_max = 5,
.rates = OMAP_MCPDM_RATES,
.formats = OMAP_MCPDM_FORMATS,
.sig_bits = 24,
},
.capture = {
.channels_min = 1,
.channels_max = 3,
.rates = OMAP_MCPDM_RATES,
.formats = OMAP_MCPDM_FORMATS,
.sig_bits = 24,
},
.ops = &omap_mcpdm_dai_ops,
};
static const struct snd_soc_component_driver omap_mcpdm_component = {
.name = "omap-mcpdm",
.suspend = omap_mcpdm_suspend,
.resume = omap_mcpdm_resume,
.legacy_dai_naming = 1,
};
void omap_mcpdm_configure_dn_offsets(struct snd_soc_pcm_runtime *rtd,
u8 rx1, u8 rx2)
{
struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
mcpdm->dn_rx_offset = MCPDM_DNOFST_RX1(rx1) | MCPDM_DNOFST_RX2(rx2);
}
EXPORT_SYMBOL_GPL(omap_mcpdm_configure_dn_offsets);
static int asoc_mcpdm_probe(struct platform_device *pdev)
{
struct omap_mcpdm *mcpdm;
struct resource *res;
int ret;
mcpdm = devm_kzalloc(&pdev->dev, sizeof(struct omap_mcpdm), GFP_KERNEL);
if (!mcpdm)
return -ENOMEM;
platform_set_drvdata(pdev, mcpdm);
mutex_init(&mcpdm->mutex);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma");
if (res == NULL)
return -ENOMEM;
mcpdm->dma_data[0].addr = res->start + MCPDM_REG_DN_DATA;
mcpdm->dma_data[1].addr = res->start + MCPDM_REG_UP_DATA;
mcpdm->dma_data[0].filter_data = "dn_link";
mcpdm->dma_data[1].filter_data = "up_link";
mcpdm->io_base = devm_platform_ioremap_resource_byname(pdev, "mpu");
if (IS_ERR(mcpdm->io_base))
return PTR_ERR(mcpdm->io_base);
mcpdm->irq = platform_get_irq(pdev, 0);
if (mcpdm->irq < 0)
return mcpdm->irq;
mcpdm->dev = &pdev->dev;
ret = devm_snd_soc_register_component(&pdev->dev,
&omap_mcpdm_component,
&omap_mcpdm_dai, 1);
if (ret)
return ret;
return sdma_pcm_platform_register(&pdev->dev, "dn_link", "up_link");
}
static const struct of_device_id omap_mcpdm_of_match[] = {
{ .compatible = "ti,omap4-mcpdm", },
{ }
};
MODULE_DEVICE_TABLE(of, omap_mcpdm_of_match);
static struct platform_driver asoc_mcpdm_driver = {
.driver = {
.name = "omap-mcpdm",
.of_match_table = omap_mcpdm_of_match,
},
.probe = asoc_mcpdm_probe,
};
module_platform_driver(asoc_mcpdm_driver);
MODULE_ALIAS("platform:omap-mcpdm");
MODULE_AUTHOR("Misael Lopez Cruz <[email protected]>");
MODULE_DESCRIPTION("OMAP PDM SoC Interface");
MODULE_LICENSE("GPL");
| linux-master | sound/soc/ti/omap-mcpdm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* n810.c -- SoC audio for Nokia N810
*
* Copyright (C) 2008 Nokia Corporation
*
* Contact: Jarkko Nikula <[email protected]>
*/
#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <asm/mach-types.h>
#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/platform_data/asoc-ti-mcbsp.h>
#include "omap-mcbsp.h"
#define N810_HEADSET_AMP_GPIO 10
#define N810_SPEAKER_AMP_GPIO 101
enum {
N810_JACK_DISABLED,
N810_JACK_HP,
N810_JACK_HS,
N810_JACK_MIC,
};
static struct clk *sys_clkout2;
static struct clk *sys_clkout2_src;
static struct clk *func96m_clk;
static int n810_spk_func;
static int n810_jack_func;
static int n810_dmic_func;
static void n810_ext_control(struct snd_soc_dapm_context *dapm)
{
int hp = 0, line1l = 0;
switch (n810_jack_func) {
case N810_JACK_HS:
line1l = 1;
fallthrough;
case N810_JACK_HP:
hp = 1;
break;
case N810_JACK_MIC:
line1l = 1;
break;
}
snd_soc_dapm_mutex_lock(dapm);
if (n810_spk_func)
snd_soc_dapm_enable_pin_unlocked(dapm, "Ext Spk");
else
snd_soc_dapm_disable_pin_unlocked(dapm, "Ext Spk");
if (hp)
snd_soc_dapm_enable_pin_unlocked(dapm, "Headphone Jack");
else
snd_soc_dapm_disable_pin_unlocked(dapm, "Headphone Jack");
if (line1l)
snd_soc_dapm_enable_pin_unlocked(dapm, "HS Mic");
else
snd_soc_dapm_disable_pin_unlocked(dapm, "HS Mic");
if (n810_dmic_func)
snd_soc_dapm_enable_pin_unlocked(dapm, "DMic");
else
snd_soc_dapm_disable_pin_unlocked(dapm, "DMic");
snd_soc_dapm_sync_unlocked(dapm);
snd_soc_dapm_mutex_unlock(dapm);
}
static int n810_startup(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
snd_pcm_hw_constraint_single(runtime, SNDRV_PCM_HW_PARAM_CHANNELS, 2);
n810_ext_control(&rtd->card->dapm);
return clk_prepare_enable(sys_clkout2);
}
static void n810_shutdown(struct snd_pcm_substream *substream)
{
clk_disable_unprepare(sys_clkout2);
}
static int n810_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int err;
/* Set the codec system clock for DAC and ADC */
err = snd_soc_dai_set_sysclk(codec_dai, 0, 12000000,
SND_SOC_CLOCK_IN);
return err;
}
static const struct snd_soc_ops n810_ops = {
.startup = n810_startup,
.hw_params = n810_hw_params,
.shutdown = n810_shutdown,
};
static int n810_get_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.enumerated.item[0] = n810_spk_func;
return 0;
}
static int n810_set_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
if (n810_spk_func == ucontrol->value.enumerated.item[0])
return 0;
n810_spk_func = ucontrol->value.enumerated.item[0];
n810_ext_control(&card->dapm);
return 1;
}
static int n810_get_jack(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.enumerated.item[0] = n810_jack_func;
return 0;
}
static int n810_set_jack(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
if (n810_jack_func == ucontrol->value.enumerated.item[0])
return 0;
n810_jack_func = ucontrol->value.enumerated.item[0];
n810_ext_control(&card->dapm);
return 1;
}
static int n810_get_input(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.enumerated.item[0] = n810_dmic_func;
return 0;
}
static int n810_set_input(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
if (n810_dmic_func == ucontrol->value.enumerated.item[0])
return 0;
n810_dmic_func = ucontrol->value.enumerated.item[0];
n810_ext_control(&card->dapm);
return 1;
}
static int n810_spk_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
if (SND_SOC_DAPM_EVENT_ON(event))
gpio_set_value(N810_SPEAKER_AMP_GPIO, 1);
else
gpio_set_value(N810_SPEAKER_AMP_GPIO, 0);
return 0;
}
static int n810_jack_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
if (SND_SOC_DAPM_EVENT_ON(event))
gpio_set_value(N810_HEADSET_AMP_GPIO, 1);
else
gpio_set_value(N810_HEADSET_AMP_GPIO, 0);
return 0;
}
static const struct snd_soc_dapm_widget aic33_dapm_widgets[] = {
SND_SOC_DAPM_SPK("Ext Spk", n810_spk_event),
SND_SOC_DAPM_HP("Headphone Jack", n810_jack_event),
SND_SOC_DAPM_MIC("DMic", NULL),
SND_SOC_DAPM_MIC("HS Mic", NULL),
};
static const struct snd_soc_dapm_route audio_map[] = {
{"Headphone Jack", NULL, "HPLOUT"},
{"Headphone Jack", NULL, "HPROUT"},
{"Ext Spk", NULL, "LLOUT"},
{"Ext Spk", NULL, "RLOUT"},
{"DMic Rate 64", NULL, "DMic"},
{"DMic", NULL, "Mic Bias"},
/*
* Note that the mic bias is coming from Retu/Vilma and we don't have
* control over it atm. The analog HS mic is not working. <- TODO
*/
{"LINE1L", NULL, "HS Mic"},
};
static const char *spk_function[] = {"Off", "On"};
static const char *jack_function[] = {"Off", "Headphone", "Headset", "Mic"};
static const char *input_function[] = {"ADC", "Digital Mic"};
static const struct soc_enum n810_enum[] = {
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(spk_function), spk_function),
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(jack_function), jack_function),
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(input_function), input_function),
};
static const struct snd_kcontrol_new aic33_n810_controls[] = {
SOC_ENUM_EXT("Speaker Function", n810_enum[0],
n810_get_spk, n810_set_spk),
SOC_ENUM_EXT("Jack Function", n810_enum[1],
n810_get_jack, n810_set_jack),
SOC_ENUM_EXT("Input Select", n810_enum[2],
n810_get_input, n810_set_input),
};
/* Digital audio interface glue - connects codec <--> CPU */
SND_SOC_DAILINK_DEFS(aic33,
DAILINK_COMP_ARRAY(COMP_CPU("48076000.mcbsp")),
DAILINK_COMP_ARRAY(COMP_CODEC("tlv320aic3x-codec.1-0018",
"tlv320aic3x-hifi")),
DAILINK_COMP_ARRAY(COMP_PLATFORM("48076000.mcbsp")));
static struct snd_soc_dai_link n810_dai = {
.name = "TLV320AIC33",
.stream_name = "AIC33",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM,
.ops = &n810_ops,
SND_SOC_DAILINK_REG(aic33),
};
/* Audio machine driver */
static struct snd_soc_card snd_soc_n810 = {
.name = "N810",
.owner = THIS_MODULE,
.dai_link = &n810_dai,
.num_links = 1,
.controls = aic33_n810_controls,
.num_controls = ARRAY_SIZE(aic33_n810_controls),
.dapm_widgets = aic33_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(aic33_dapm_widgets),
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
.fully_routed = true,
};
static struct platform_device *n810_snd_device;
static int __init n810_soc_init(void)
{
int err;
struct device *dev;
if (!of_have_populated_dt() ||
(!of_machine_is_compatible("nokia,n810") &&
!of_machine_is_compatible("nokia,n810-wimax")))
return -ENODEV;
n810_snd_device = platform_device_alloc("soc-audio", -1);
if (!n810_snd_device)
return -ENOMEM;
platform_set_drvdata(n810_snd_device, &snd_soc_n810);
err = platform_device_add(n810_snd_device);
if (err)
goto err1;
dev = &n810_snd_device->dev;
sys_clkout2_src = clk_get(dev, "sys_clkout2_src");
if (IS_ERR(sys_clkout2_src)) {
dev_err(dev, "Could not get sys_clkout2_src clock\n");
err = PTR_ERR(sys_clkout2_src);
goto err2;
}
sys_clkout2 = clk_get(dev, "sys_clkout2");
if (IS_ERR(sys_clkout2)) {
dev_err(dev, "Could not get sys_clkout2\n");
err = PTR_ERR(sys_clkout2);
goto err3;
}
/*
* Configure 12 MHz output on SYS_CLKOUT2. Therefore we must use
* 96 MHz as its parent in order to get 12 MHz
*/
func96m_clk = clk_get(dev, "func_96m_ck");
if (IS_ERR(func96m_clk)) {
dev_err(dev, "Could not get func 96M clock\n");
err = PTR_ERR(func96m_clk);
goto err4;
}
clk_set_parent(sys_clkout2_src, func96m_clk);
clk_set_rate(sys_clkout2, 12000000);
if (WARN_ON((gpio_request(N810_HEADSET_AMP_GPIO, "hs_amp") < 0) ||
(gpio_request(N810_SPEAKER_AMP_GPIO, "spk_amp") < 0))) {
err = -EINVAL;
goto err4;
}
gpio_direction_output(N810_HEADSET_AMP_GPIO, 0);
gpio_direction_output(N810_SPEAKER_AMP_GPIO, 0);
return 0;
err4:
clk_put(sys_clkout2);
err3:
clk_put(sys_clkout2_src);
err2:
platform_device_del(n810_snd_device);
err1:
platform_device_put(n810_snd_device);
return err;
}
static void __exit n810_soc_exit(void)
{
gpio_free(N810_SPEAKER_AMP_GPIO);
gpio_free(N810_HEADSET_AMP_GPIO);
clk_put(sys_clkout2_src);
clk_put(sys_clkout2);
clk_put(func96m_clk);
platform_device_unregister(n810_snd_device);
}
module_init(n810_soc_init);
module_exit(n810_soc_exit);
MODULE_AUTHOR("Jarkko Nikula <[email protected]>");
MODULE_DESCRIPTION("ALSA SoC Nokia N810");
MODULE_LICENSE("GPL");
| linux-master | sound/soc/ti/n810.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com
* Author: Peter Ujfalusi <[email protected]>
*/
#include <linux/device.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
#include "sdma-pcm.h"
static const struct snd_pcm_hardware sdma_pcm_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP |
SNDRV_PCM_INFO_INTERLEAVED,
.period_bytes_min = 32,
.period_bytes_max = 64 * 1024,
.buffer_bytes_max = 128 * 1024,
.periods_min = 2,
.periods_max = 255,
};
static const struct snd_dmaengine_pcm_config sdma_dmaengine_pcm_config = {
.pcm_hardware = &sdma_pcm_hardware,
.prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
.prealloc_buffer_size = 128 * 1024,
};
int sdma_pcm_platform_register(struct device *dev,
char *txdmachan, char *rxdmachan)
{
struct snd_dmaengine_pcm_config *config;
unsigned int flags = 0;
/* Standard names for the directions: 'tx' and 'rx' */
if (!txdmachan && !rxdmachan)
return devm_snd_dmaengine_pcm_register(dev,
&sdma_dmaengine_pcm_config, 0);
config = devm_kzalloc(dev, sizeof(*config), GFP_KERNEL);
if (!config)
return -ENOMEM;
*config = sdma_dmaengine_pcm_config;
if (!txdmachan || !rxdmachan) {
/* One direction only PCM */
flags |= SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX;
if (!txdmachan) {
txdmachan = rxdmachan;
rxdmachan = NULL;
}
}
config->chan_names[0] = txdmachan;
config->chan_names[1] = rxdmachan;
return devm_snd_dmaengine_pcm_register(dev, config, flags);
}
EXPORT_SYMBOL_GPL(sdma_pcm_platform_register);
MODULE_AUTHOR("Peter Ujfalusi <[email protected]>");
MODULE_DESCRIPTION("sDMA PCM ASoC platform driver");
MODULE_LICENSE("GPL v2");
| linux-master | sound/soc/ti/sdma-pcm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* osk5912.c -- SoC audio for OSK 5912
*
* Copyright (C) 2008 Mistral Solutions
*
* Contact: Arun KS <[email protected]>
*/
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <asm/mach-types.h>
#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/platform_data/asoc-ti-mcbsp.h>
#include "omap-mcbsp.h"
#include "../codecs/tlv320aic23.h"
#define CODEC_CLOCK 12000000
static struct clk *tlv320aic23_mclk;
static int osk_startup(struct snd_pcm_substream *substream)
{
return clk_prepare_enable(tlv320aic23_mclk);
}
static void osk_shutdown(struct snd_pcm_substream *substream)
{
clk_disable_unprepare(tlv320aic23_mclk);
}
static int osk_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int err;
/* Set the codec system clock for DAC and ADC */
err =
snd_soc_dai_set_sysclk(codec_dai, 0, CODEC_CLOCK, SND_SOC_CLOCK_IN);
if (err < 0) {
printk(KERN_ERR "can't set codec system clock\n");
return err;
}
return err;
}
static const struct snd_soc_ops osk_ops = {
.startup = osk_startup,
.hw_params = osk_hw_params,
.shutdown = osk_shutdown,
};
static const struct snd_soc_dapm_widget tlv320aic23_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_LINE("Line In", NULL),
SND_SOC_DAPM_MIC("Mic Jack", NULL),
};
static const struct snd_soc_dapm_route audio_map[] = {
{"Headphone Jack", NULL, "LHPOUT"},
{"Headphone Jack", NULL, "RHPOUT"},
{"LLINEIN", NULL, "Line In"},
{"RLINEIN", NULL, "Line In"},
{"MICIN", NULL, "Mic Jack"},
};
/* Digital audio interface glue - connects codec <--> CPU */
SND_SOC_DAILINK_DEFS(aic23,
DAILINK_COMP_ARRAY(COMP_CPU("omap-mcbsp.1")),
DAILINK_COMP_ARRAY(COMP_CODEC("tlv320aic23-codec",
"tlv320aic23-hifi")),
DAILINK_COMP_ARRAY(COMP_PLATFORM("omap-mcbsp.1")));
static struct snd_soc_dai_link osk_dai = {
.name = "TLV320AIC23",
.stream_name = "AIC23",
.dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM,
.ops = &osk_ops,
SND_SOC_DAILINK_REG(aic23),
};
/* Audio machine driver */
static struct snd_soc_card snd_soc_card_osk = {
.name = "OSK5912",
.owner = THIS_MODULE,
.dai_link = &osk_dai,
.num_links = 1,
.dapm_widgets = tlv320aic23_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(tlv320aic23_dapm_widgets),
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
};
static struct platform_device *osk_snd_device;
static int __init osk_soc_init(void)
{
int err;
u32 curRate;
struct device *dev;
if (!(machine_is_omap_osk()))
return -ENODEV;
osk_snd_device = platform_device_alloc("soc-audio", -1);
if (!osk_snd_device)
return -ENOMEM;
platform_set_drvdata(osk_snd_device, &snd_soc_card_osk);
err = platform_device_add(osk_snd_device);
if (err)
goto err1;
dev = &osk_snd_device->dev;
tlv320aic23_mclk = clk_get(dev, "mclk");
if (IS_ERR(tlv320aic23_mclk)) {
printk(KERN_ERR "Could not get mclk clock\n");
err = PTR_ERR(tlv320aic23_mclk);
goto err2;
}
/*
* Configure 12 MHz output on MCLK.
*/
curRate = (uint) clk_get_rate(tlv320aic23_mclk);
if (curRate != CODEC_CLOCK) {
if (clk_set_rate(tlv320aic23_mclk, CODEC_CLOCK)) {
printk(KERN_ERR "Cannot set MCLK for AIC23 CODEC\n");
err = -ECANCELED;
goto err3;
}
}
printk(KERN_INFO "MCLK = %d [%d]\n",
(uint) clk_get_rate(tlv320aic23_mclk), CODEC_CLOCK);
return 0;
err3:
clk_put(tlv320aic23_mclk);
err2:
platform_device_del(osk_snd_device);
err1:
platform_device_put(osk_snd_device);
return err;
}
static void __exit osk_soc_exit(void)
{
clk_put(tlv320aic23_mclk);
platform_device_unregister(osk_snd_device);
}
module_init(osk_soc_init);
module_exit(osk_soc_exit);
MODULE_AUTHOR("Arun KS <[email protected]>");
MODULE_DESCRIPTION("ALSA SoC OSK 5912");
MODULE_LICENSE("GPL");
| linux-master | sound/soc/ti/osk5912.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
* Author: Peter Ujfalusi <[email protected]>
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include "davinci-mcasp.h"
/*
* Maximum number of configuration entries for prefixes:
* CPB: 2 (mcasp10 + codec)
* IVI: 3 (mcasp0 + 2x codec)
*/
#define J721E_CODEC_CONF_COUNT 5
enum j721e_audio_domain_id {
J721E_AUDIO_DOMAIN_CPB = 0,
J721E_AUDIO_DOMAIN_IVI,
J721E_AUDIO_DOMAIN_LAST,
};
#define J721E_CLK_PARENT_48000 0
#define J721E_CLK_PARENT_44100 1
#define J721E_MAX_CLK_HSDIV 128
#define PCM1368A_MAX_SYSCLK 36864000
#define J721E_DAI_FMT (SND_SOC_DAIFMT_RIGHT_J | \
SND_SOC_DAIFMT_NB_NF | \
SND_SOC_DAIFMT_CBS_CFS)
enum j721e_board_type {
J721E_BOARD_CPB = 1,
J721E_BOARD_CPB_IVI,
};
struct j721e_audio_match_data {
enum j721e_board_type board_type;
int num_links;
unsigned int pll_rates[2];
};
static unsigned int ratios_for_pcm3168a[] = {
256,
512,
768,
};
struct j721e_audio_clocks {
struct clk *target;
struct clk *parent[2];
};
struct j721e_audio_domain {
struct j721e_audio_clocks codec;
struct j721e_audio_clocks mcasp;
int parent_clk_id;
int active;
unsigned int active_link;
unsigned int rate;
};
struct j721e_priv {
struct device *dev;
struct snd_soc_card card;
struct snd_soc_dai_link *dai_links;
struct snd_soc_codec_conf codec_conf[J721E_CODEC_CONF_COUNT];
struct snd_interval rate_range;
const struct j721e_audio_match_data *match_data;
u32 pll_rates[2];
unsigned int hsdiv_rates[2];
struct j721e_audio_domain audio_domains[J721E_AUDIO_DOMAIN_LAST];
struct mutex mutex;
};
static const struct snd_soc_dapm_widget j721e_cpb_dapm_widgets[] = {
SND_SOC_DAPM_HP("CPB Stereo HP 1", NULL),
SND_SOC_DAPM_HP("CPB Stereo HP 2", NULL),
SND_SOC_DAPM_HP("CPB Stereo HP 3", NULL),
SND_SOC_DAPM_LINE("CPB Line Out", NULL),
SND_SOC_DAPM_MIC("CPB Stereo Mic 1", NULL),
SND_SOC_DAPM_MIC("CPB Stereo Mic 2", NULL),
SND_SOC_DAPM_LINE("CPB Line In", NULL),
};
static const struct snd_soc_dapm_route j721e_cpb_dapm_routes[] = {
{"CPB Stereo HP 1", NULL, "codec-1 AOUT1L"},
{"CPB Stereo HP 1", NULL, "codec-1 AOUT1R"},
{"CPB Stereo HP 2", NULL, "codec-1 AOUT2L"},
{"CPB Stereo HP 2", NULL, "codec-1 AOUT2R"},
{"CPB Stereo HP 3", NULL, "codec-1 AOUT3L"},
{"CPB Stereo HP 3", NULL, "codec-1 AOUT3R"},
{"CPB Line Out", NULL, "codec-1 AOUT4L"},
{"CPB Line Out", NULL, "codec-1 AOUT4R"},
{"codec-1 AIN1L", NULL, "CPB Stereo Mic 1"},
{"codec-1 AIN1R", NULL, "CPB Stereo Mic 1"},
{"codec-1 AIN2L", NULL, "CPB Stereo Mic 2"},
{"codec-1 AIN2R", NULL, "CPB Stereo Mic 2"},
{"codec-1 AIN3L", NULL, "CPB Line In"},
{"codec-1 AIN3R", NULL, "CPB Line In"},
};
static const struct snd_soc_dapm_widget j721e_ivi_codec_a_dapm_widgets[] = {
SND_SOC_DAPM_LINE("IVI A Line Out 1", NULL),
SND_SOC_DAPM_LINE("IVI A Line Out 2", NULL),
SND_SOC_DAPM_LINE("IVI A Line Out 3", NULL),
SND_SOC_DAPM_LINE("IVI A Line Out 4", NULL),
SND_SOC_DAPM_MIC("IVI A Stereo Mic 1", NULL),
SND_SOC_DAPM_MIC("IVI A Stereo Mic 2", NULL),
SND_SOC_DAPM_LINE("IVI A Line In", NULL),
};
static const struct snd_soc_dapm_route j721e_codec_a_dapm_routes[] = {
{"IVI A Line Out 1", NULL, "codec-a AOUT1L"},
{"IVI A Line Out 1", NULL, "codec-a AOUT1R"},
{"IVI A Line Out 2", NULL, "codec-a AOUT2L"},
{"IVI A Line Out 2", NULL, "codec-a AOUT2R"},
{"IVI A Line Out 3", NULL, "codec-a AOUT3L"},
{"IVI A Line Out 3", NULL, "codec-a AOUT3R"},
{"IVI A Line Out 4", NULL, "codec-a AOUT4L"},
{"IVI A Line Out 4", NULL, "codec-a AOUT4R"},
{"codec-a AIN1L", NULL, "IVI A Stereo Mic 1"},
{"codec-a AIN1R", NULL, "IVI A Stereo Mic 1"},
{"codec-a AIN2L", NULL, "IVI A Stereo Mic 2"},
{"codec-a AIN2R", NULL, "IVI A Stereo Mic 2"},
{"codec-a AIN3L", NULL, "IVI A Line In"},
{"codec-a AIN3R", NULL, "IVI A Line In"},
};
static const struct snd_soc_dapm_widget j721e_ivi_codec_b_dapm_widgets[] = {
SND_SOC_DAPM_LINE("IVI B Line Out 1", NULL),
SND_SOC_DAPM_LINE("IVI B Line Out 2", NULL),
SND_SOC_DAPM_LINE("IVI B Line Out 3", NULL),
SND_SOC_DAPM_LINE("IVI B Line Out 4", NULL),
SND_SOC_DAPM_MIC("IVI B Stereo Mic 1", NULL),
SND_SOC_DAPM_MIC("IVI B Stereo Mic 2", NULL),
SND_SOC_DAPM_LINE("IVI B Line In", NULL),
};
static const struct snd_soc_dapm_route j721e_codec_b_dapm_routes[] = {
{"IVI B Line Out 1", NULL, "codec-b AOUT1L"},
{"IVI B Line Out 1", NULL, "codec-b AOUT1R"},
{"IVI B Line Out 2", NULL, "codec-b AOUT2L"},
{"IVI B Line Out 2", NULL, "codec-b AOUT2R"},
{"IVI B Line Out 3", NULL, "codec-b AOUT3L"},
{"IVI B Line Out 3", NULL, "codec-b AOUT3R"},
{"IVI B Line Out 4", NULL, "codec-b AOUT4L"},
{"IVI B Line Out 4", NULL, "codec-b AOUT4R"},
{"codec-b AIN1L", NULL, "IVI B Stereo Mic 1"},
{"codec-b AIN1R", NULL, "IVI B Stereo Mic 1"},
{"codec-b AIN2L", NULL, "IVI B Stereo Mic 2"},
{"codec-b AIN2R", NULL, "IVI B Stereo Mic 2"},
{"codec-b AIN3L", NULL, "IVI B Line In"},
{"codec-b AIN3R", NULL, "IVI B Line In"},
};
static int j721e_configure_refclk(struct j721e_priv *priv,
unsigned int audio_domain, unsigned int rate)
{
struct j721e_audio_domain *domain = &priv->audio_domains[audio_domain];
unsigned int scki;
int ret = -EINVAL;
int i, clk_id;
if (!(rate % 8000) && priv->pll_rates[J721E_CLK_PARENT_48000])
clk_id = J721E_CLK_PARENT_48000;
else if (!(rate % 11025) && priv->pll_rates[J721E_CLK_PARENT_44100])
clk_id = J721E_CLK_PARENT_44100;
else
return ret;
for (i = 0; i < ARRAY_SIZE(ratios_for_pcm3168a); i++) {
scki = ratios_for_pcm3168a[i] * rate;
if (priv->pll_rates[clk_id] / scki <= J721E_MAX_CLK_HSDIV) {
ret = 0;
break;
}
}
if (ret) {
dev_err(priv->dev, "No valid clock configuration for %u Hz\n",
rate);
return ret;
}
if (domain->parent_clk_id == -1 || priv->hsdiv_rates[domain->parent_clk_id] != scki) {
dev_dbg(priv->dev,
"domain%u configuration for %u Hz: %s, %dxFS (SCKI: %u Hz)\n",
audio_domain, rate,
clk_id == J721E_CLK_PARENT_48000 ? "PLL4" : "PLL15",
ratios_for_pcm3168a[i], scki);
if (domain->parent_clk_id != clk_id) {
ret = clk_set_parent(domain->codec.target,
domain->codec.parent[clk_id]);
if (ret)
return ret;
ret = clk_set_parent(domain->mcasp.target,
domain->mcasp.parent[clk_id]);
if (ret)
return ret;
domain->parent_clk_id = clk_id;
}
ret = clk_set_rate(domain->codec.target, scki);
if (ret) {
dev_err(priv->dev, "codec set rate failed for %u Hz\n",
scki);
return ret;
}
ret = clk_set_rate(domain->mcasp.target, scki);
if (!ret) {
priv->hsdiv_rates[domain->parent_clk_id] = scki;
} else {
dev_err(priv->dev, "mcasp set rate failed for %u Hz\n",
scki);
return ret;
}
}
return ret;
}
static int j721e_rule_rate(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_interval *t = rule->private;
return snd_interval_refine(hw_param_interval(params, rule->var), t);
}
static int j721e_audio_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct j721e_priv *priv = snd_soc_card_get_drvdata(rtd->card);
unsigned int domain_id = rtd->dai_link->id;
struct j721e_audio_domain *domain = &priv->audio_domains[domain_id];
struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct snd_soc_dai *codec_dai;
unsigned int active_rate;
int ret = 0;
int i;
mutex_lock(&priv->mutex);
domain->active++;
for (i = 0; i < J721E_AUDIO_DOMAIN_LAST; i++) {
active_rate = priv->audio_domains[i].rate;
if (active_rate)
break;
}
if (active_rate)
ret = snd_pcm_hw_constraint_single(substream->runtime,
SNDRV_PCM_HW_PARAM_RATE,
active_rate);
else
ret = snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
j721e_rule_rate, &priv->rate_range,
SNDRV_PCM_HW_PARAM_RATE, -1);
if (ret)
goto out;
/* Reset TDM slots to 32 */
ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0x3, 0x3, 2, 32);
if (ret && ret != -ENOTSUPP)
goto out;
for_each_rtd_codec_dais(rtd, i, codec_dai) {
ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x3, 0x3, 2, 32);
if (ret && ret != -ENOTSUPP)
goto out;
}
if (ret == -ENOTSUPP)
ret = 0;
out:
if (ret)
domain->active--;
mutex_unlock(&priv->mutex);
return ret;
}
static int j721e_audio_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_card *card = rtd->card;
struct j721e_priv *priv = snd_soc_card_get_drvdata(card);
unsigned int domain_id = rtd->dai_link->id;
struct j721e_audio_domain *domain = &priv->audio_domains[domain_id];
struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct snd_soc_dai *codec_dai;
unsigned int sysclk_rate;
int slot_width = 32;
int ret;
int i;
mutex_lock(&priv->mutex);
if (domain->rate && domain->rate != params_rate(params)) {
ret = -EINVAL;
goto out;
}
if (params_width(params) == 16)
slot_width = 16;
ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0x3, 0x3, 2, slot_width);
if (ret && ret != -ENOTSUPP)
goto out;
for_each_rtd_codec_dais(rtd, i, codec_dai) {
ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x3, 0x3, 2,
slot_width);
if (ret && ret != -ENOTSUPP)
goto out;
}
ret = j721e_configure_refclk(priv, domain_id, params_rate(params));
if (ret)
goto out;
sysclk_rate = priv->hsdiv_rates[domain->parent_clk_id];
for_each_rtd_codec_dais(rtd, i, codec_dai) {
ret = snd_soc_dai_set_sysclk(codec_dai, 0, sysclk_rate,
SND_SOC_CLOCK_IN);
if (ret && ret != -ENOTSUPP) {
dev_err(priv->dev,
"codec set_sysclk failed for %u Hz\n",
sysclk_rate);
goto out;
}
}
ret = snd_soc_dai_set_sysclk(cpu_dai, MCASP_CLK_HCLK_AUXCLK,
sysclk_rate, SND_SOC_CLOCK_IN);
if (ret && ret != -ENOTSUPP) {
dev_err(priv->dev, "mcasp set_sysclk failed for %u Hz\n",
sysclk_rate);
} else {
domain->rate = params_rate(params);
ret = 0;
}
out:
mutex_unlock(&priv->mutex);
return ret;
}
static void j721e_audio_shutdown(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct j721e_priv *priv = snd_soc_card_get_drvdata(rtd->card);
unsigned int domain_id = rtd->dai_link->id;
struct j721e_audio_domain *domain = &priv->audio_domains[domain_id];
mutex_lock(&priv->mutex);
domain->active--;
if (!domain->active) {
domain->rate = 0;
domain->active_link = 0;
}
mutex_unlock(&priv->mutex);
}
static const struct snd_soc_ops j721e_audio_ops = {
.startup = j721e_audio_startup,
.hw_params = j721e_audio_hw_params,
.shutdown = j721e_audio_shutdown,
};
static int j721e_audio_init(struct snd_soc_pcm_runtime *rtd)
{
struct j721e_priv *priv = snd_soc_card_get_drvdata(rtd->card);
unsigned int domain_id = rtd->dai_link->id;
struct j721e_audio_domain *domain = &priv->audio_domains[domain_id];
struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct snd_soc_dai *codec_dai;
unsigned int sysclk_rate;
int i, ret;
/* Set up initial clock configuration */
ret = j721e_configure_refclk(priv, domain_id, 48000);
if (ret)
return ret;
sysclk_rate = priv->hsdiv_rates[domain->parent_clk_id];
for_each_rtd_codec_dais(rtd, i, codec_dai) {
ret = snd_soc_dai_set_sysclk(codec_dai, 0, sysclk_rate,
SND_SOC_CLOCK_IN);
if (ret && ret != -ENOTSUPP)
return ret;
}
ret = snd_soc_dai_set_sysclk(cpu_dai, MCASP_CLK_HCLK_AUXCLK,
sysclk_rate, SND_SOC_CLOCK_IN);
if (ret && ret != -ENOTSUPP)
return ret;
/* Set initial tdm slots */
ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0x3, 0x3, 2, 32);
if (ret && ret != -ENOTSUPP)
return ret;
for_each_rtd_codec_dais(rtd, i, codec_dai) {
ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x3, 0x3, 2, 32);
if (ret && ret != -ENOTSUPP)
return ret;
}
return 0;
}
static int j721e_audio_init_ivi(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dapm_context *dapm = &rtd->card->dapm;
snd_soc_dapm_new_controls(dapm, j721e_ivi_codec_a_dapm_widgets,
ARRAY_SIZE(j721e_ivi_codec_a_dapm_widgets));
snd_soc_dapm_add_routes(dapm, j721e_codec_a_dapm_routes,
ARRAY_SIZE(j721e_codec_a_dapm_routes));
snd_soc_dapm_new_controls(dapm, j721e_ivi_codec_b_dapm_widgets,
ARRAY_SIZE(j721e_ivi_codec_b_dapm_widgets));
snd_soc_dapm_add_routes(dapm, j721e_codec_b_dapm_routes,
ARRAY_SIZE(j721e_codec_b_dapm_routes));
return j721e_audio_init(rtd);
}
static int j721e_get_clocks(struct device *dev,
struct j721e_audio_clocks *clocks, char *prefix)
{
struct clk *parent;
char *clk_name;
int ret;
clocks->target = devm_clk_get(dev, prefix);
if (IS_ERR(clocks->target))
return dev_err_probe(dev, PTR_ERR(clocks->target),
"failed to acquire %s\n", prefix);
clk_name = kasprintf(GFP_KERNEL, "%s-48000", prefix);
if (clk_name) {
parent = devm_clk_get(dev, clk_name);
kfree(clk_name);
if (IS_ERR(parent)) {
ret = PTR_ERR(parent);
if (ret == -EPROBE_DEFER)
return ret;
dev_dbg(dev, "no 48KHz parent for %s: %d\n", prefix, ret);
parent = NULL;
}
clocks->parent[J721E_CLK_PARENT_48000] = parent;
} else {
return -ENOMEM;
}
clk_name = kasprintf(GFP_KERNEL, "%s-44100", prefix);
if (clk_name) {
parent = devm_clk_get(dev, clk_name);
kfree(clk_name);
if (IS_ERR(parent)) {
ret = PTR_ERR(parent);
if (ret == -EPROBE_DEFER)
return ret;
dev_dbg(dev, "no 44.1KHz parent for %s: %d\n", prefix, ret);
parent = NULL;
}
clocks->parent[J721E_CLK_PARENT_44100] = parent;
} else {
return -ENOMEM;
}
if (!clocks->parent[J721E_CLK_PARENT_44100] &&
!clocks->parent[J721E_CLK_PARENT_48000]) {
dev_err(dev, "At least one parent clock is needed for %s\n",
prefix);
return -EINVAL;
}
return 0;
}
static const struct j721e_audio_match_data j721e_cpb_data = {
.board_type = J721E_BOARD_CPB,
.num_links = 2, /* CPB pcm3168a */
.pll_rates = {
[J721E_CLK_PARENT_44100] = 1083801600, /* PLL15 */
[J721E_CLK_PARENT_48000] = 1179648000, /* PLL4 */
},
};
static const struct j721e_audio_match_data j721e_cpb_ivi_data = {
.board_type = J721E_BOARD_CPB_IVI,
.num_links = 4, /* CPB pcm3168a + 2x pcm3168a on IVI */
.pll_rates = {
[J721E_CLK_PARENT_44100] = 1083801600, /* PLL15 */
[J721E_CLK_PARENT_48000] = 1179648000, /* PLL4 */
},
};
static const struct j721e_audio_match_data j7200_cpb_data = {
.board_type = J721E_BOARD_CPB,
.num_links = 2, /* CPB pcm3168a */
.pll_rates = {
[J721E_CLK_PARENT_48000] = 2359296000u, /* PLL4 */
},
};
static const struct of_device_id j721e_audio_of_match[] = {
{
.compatible = "ti,j721e-cpb-audio",
.data = &j721e_cpb_data,
}, {
.compatible = "ti,j721e-cpb-ivi-audio",
.data = &j721e_cpb_ivi_data,
}, {
.compatible = "ti,j7200-cpb-audio",
.data = &j7200_cpb_data,
},
{ },
};
MODULE_DEVICE_TABLE(of, j721e_audio_of_match);
static int j721e_calculate_rate_range(struct j721e_priv *priv)
{
const struct j721e_audio_match_data *match_data = priv->match_data;
struct j721e_audio_clocks *domain_clocks;
unsigned int min_rate, max_rate, pll_rate;
struct clk *pll;
domain_clocks = &priv->audio_domains[J721E_AUDIO_DOMAIN_CPB].mcasp;
pll = clk_get_parent(domain_clocks->parent[J721E_CLK_PARENT_44100]);
if (IS_ERR_OR_NULL(pll)) {
priv->pll_rates[J721E_CLK_PARENT_44100] =
match_data->pll_rates[J721E_CLK_PARENT_44100];
} else {
priv->pll_rates[J721E_CLK_PARENT_44100] = clk_get_rate(pll);
clk_put(pll);
}
pll = clk_get_parent(domain_clocks->parent[J721E_CLK_PARENT_48000]);
if (IS_ERR_OR_NULL(pll)) {
priv->pll_rates[J721E_CLK_PARENT_48000] =
match_data->pll_rates[J721E_CLK_PARENT_48000];
} else {
priv->pll_rates[J721E_CLK_PARENT_48000] = clk_get_rate(pll);
clk_put(pll);
}
if (!priv->pll_rates[J721E_CLK_PARENT_44100] &&
!priv->pll_rates[J721E_CLK_PARENT_48000]) {
dev_err(priv->dev, "At least one PLL is needed\n");
return -EINVAL;
}
if (priv->pll_rates[J721E_CLK_PARENT_44100])
pll_rate = priv->pll_rates[J721E_CLK_PARENT_44100];
else
pll_rate = priv->pll_rates[J721E_CLK_PARENT_48000];
min_rate = pll_rate / J721E_MAX_CLK_HSDIV;
min_rate /= ratios_for_pcm3168a[ARRAY_SIZE(ratios_for_pcm3168a) - 1];
if (priv->pll_rates[J721E_CLK_PARENT_48000])
pll_rate = priv->pll_rates[J721E_CLK_PARENT_48000];
else
pll_rate = priv->pll_rates[J721E_CLK_PARENT_44100];
if (pll_rate > PCM1368A_MAX_SYSCLK)
pll_rate = PCM1368A_MAX_SYSCLK;
max_rate = pll_rate / ratios_for_pcm3168a[0];
snd_interval_any(&priv->rate_range);
priv->rate_range.min = min_rate;
priv->rate_range.max = max_rate;
return 0;
}
static int j721e_soc_probe_cpb(struct j721e_priv *priv, int *link_idx,
int *conf_idx)
{
struct device_node *node = priv->dev->of_node;
struct snd_soc_dai_link_component *compnent;
struct device_node *dai_node, *codec_node;
struct j721e_audio_domain *domain;
int comp_count, comp_idx;
int ret;
dai_node = of_parse_phandle(node, "ti,cpb-mcasp", 0);
if (!dai_node) {
dev_err(priv->dev, "CPB McASP node is not provided\n");
return -EINVAL;
}
codec_node = of_parse_phandle(node, "ti,cpb-codec", 0);
if (!codec_node) {
dev_err(priv->dev, "CPB codec node is not provided\n");
ret = -EINVAL;
goto put_dai_node;
}
domain = &priv->audio_domains[J721E_AUDIO_DOMAIN_CPB];
ret = j721e_get_clocks(priv->dev, &domain->codec, "cpb-codec-scki");
if (ret)
goto put_codec_node;
ret = j721e_get_clocks(priv->dev, &domain->mcasp, "cpb-mcasp-auxclk");
if (ret)
goto put_codec_node;
/*
* Common Processor Board, two links
* Link 1: McASP10 -> pcm3168a_1 DAC
* Link 2: McASP10 <- pcm3168a_1 ADC
*/
comp_count = 6;
compnent = devm_kzalloc(priv->dev, comp_count * sizeof(*compnent),
GFP_KERNEL);
if (!compnent) {
ret = -ENOMEM;
goto put_codec_node;
}
comp_idx = 0;
priv->dai_links[*link_idx].cpus = &compnent[comp_idx++];
priv->dai_links[*link_idx].num_cpus = 1;
priv->dai_links[*link_idx].codecs = &compnent[comp_idx++];
priv->dai_links[*link_idx].num_codecs = 1;
priv->dai_links[*link_idx].platforms = &compnent[comp_idx++];
priv->dai_links[*link_idx].num_platforms = 1;
priv->dai_links[*link_idx].name = "CPB PCM3168A Playback";
priv->dai_links[*link_idx].stream_name = "CPB PCM3168A Analog";
priv->dai_links[*link_idx].cpus->of_node = dai_node;
priv->dai_links[*link_idx].platforms->of_node = dai_node;
priv->dai_links[*link_idx].codecs->of_node = codec_node;
priv->dai_links[*link_idx].codecs->dai_name = "pcm3168a-dac";
priv->dai_links[*link_idx].playback_only = 1;
priv->dai_links[*link_idx].id = J721E_AUDIO_DOMAIN_CPB;
priv->dai_links[*link_idx].dai_fmt = J721E_DAI_FMT;
priv->dai_links[*link_idx].init = j721e_audio_init;
priv->dai_links[*link_idx].ops = &j721e_audio_ops;
(*link_idx)++;
priv->dai_links[*link_idx].cpus = &compnent[comp_idx++];
priv->dai_links[*link_idx].num_cpus = 1;
priv->dai_links[*link_idx].codecs = &compnent[comp_idx++];
priv->dai_links[*link_idx].num_codecs = 1;
priv->dai_links[*link_idx].platforms = &compnent[comp_idx++];
priv->dai_links[*link_idx].num_platforms = 1;
priv->dai_links[*link_idx].name = "CPB PCM3168A Capture";
priv->dai_links[*link_idx].stream_name = "CPB PCM3168A Analog";
priv->dai_links[*link_idx].cpus->of_node = dai_node;
priv->dai_links[*link_idx].platforms->of_node = dai_node;
priv->dai_links[*link_idx].codecs->of_node = codec_node;
priv->dai_links[*link_idx].codecs->dai_name = "pcm3168a-adc";
priv->dai_links[*link_idx].capture_only = 1;
priv->dai_links[*link_idx].id = J721E_AUDIO_DOMAIN_CPB;
priv->dai_links[*link_idx].dai_fmt = J721E_DAI_FMT;
priv->dai_links[*link_idx].init = j721e_audio_init;
priv->dai_links[*link_idx].ops = &j721e_audio_ops;
(*link_idx)++;
priv->codec_conf[*conf_idx].dlc.of_node = codec_node;
priv->codec_conf[*conf_idx].name_prefix = "codec-1";
(*conf_idx)++;
priv->codec_conf[*conf_idx].dlc.of_node = dai_node;
priv->codec_conf[*conf_idx].name_prefix = "McASP10";
(*conf_idx)++;
return 0;
put_codec_node:
of_node_put(codec_node);
put_dai_node:
of_node_put(dai_node);
return ret;
}
static int j721e_soc_probe_ivi(struct j721e_priv *priv, int *link_idx,
int *conf_idx)
{
struct device_node *node = priv->dev->of_node;
struct snd_soc_dai_link_component *compnent;
struct device_node *dai_node, *codeca_node, *codecb_node;
struct j721e_audio_domain *domain;
int comp_count, comp_idx;
int ret;
if (priv->match_data->board_type != J721E_BOARD_CPB_IVI)
return 0;
dai_node = of_parse_phandle(node, "ti,ivi-mcasp", 0);
if (!dai_node) {
dev_err(priv->dev, "IVI McASP node is not provided\n");
return -EINVAL;
}
codeca_node = of_parse_phandle(node, "ti,ivi-codec-a", 0);
if (!codeca_node) {
dev_err(priv->dev, "IVI codec-a node is not provided\n");
ret = -EINVAL;
goto put_dai_node;
}
codecb_node = of_parse_phandle(node, "ti,ivi-codec-b", 0);
if (!codecb_node) {
dev_warn(priv->dev, "IVI codec-b node is not provided\n");
ret = 0;
goto put_codeca_node;
}
domain = &priv->audio_domains[J721E_AUDIO_DOMAIN_IVI];
ret = j721e_get_clocks(priv->dev, &domain->codec, "ivi-codec-scki");
if (ret)
goto put_codecb_node;
ret = j721e_get_clocks(priv->dev, &domain->mcasp, "ivi-mcasp-auxclk");
if (ret)
goto put_codecb_node;
/*
* IVI extension, two links
* Link 1: McASP0 -> pcm3168a_a DAC
* \> pcm3168a_b DAC
* Link 2: McASP0 <- pcm3168a_a ADC
* \ pcm3168a_b ADC
*/
comp_count = 8;
compnent = devm_kzalloc(priv->dev, comp_count * sizeof(*compnent),
GFP_KERNEL);
if (!compnent) {
ret = -ENOMEM;
goto put_codecb_node;
}
comp_idx = 0;
priv->dai_links[*link_idx].cpus = &compnent[comp_idx++];
priv->dai_links[*link_idx].num_cpus = 1;
priv->dai_links[*link_idx].platforms = &compnent[comp_idx++];
priv->dai_links[*link_idx].num_platforms = 1;
priv->dai_links[*link_idx].codecs = &compnent[comp_idx];
priv->dai_links[*link_idx].num_codecs = 2;
comp_idx += 2;
priv->dai_links[*link_idx].name = "IVI 2xPCM3168A Playback";
priv->dai_links[*link_idx].stream_name = "IVI 2xPCM3168A Analog";
priv->dai_links[*link_idx].cpus->of_node = dai_node;
priv->dai_links[*link_idx].platforms->of_node = dai_node;
priv->dai_links[*link_idx].codecs[0].of_node = codeca_node;
priv->dai_links[*link_idx].codecs[0].dai_name = "pcm3168a-dac";
priv->dai_links[*link_idx].codecs[1].of_node = codecb_node;
priv->dai_links[*link_idx].codecs[1].dai_name = "pcm3168a-dac";
priv->dai_links[*link_idx].playback_only = 1;
priv->dai_links[*link_idx].id = J721E_AUDIO_DOMAIN_IVI;
priv->dai_links[*link_idx].dai_fmt = J721E_DAI_FMT;
priv->dai_links[*link_idx].init = j721e_audio_init_ivi;
priv->dai_links[*link_idx].ops = &j721e_audio_ops;
(*link_idx)++;
priv->dai_links[*link_idx].cpus = &compnent[comp_idx++];
priv->dai_links[*link_idx].num_cpus = 1;
priv->dai_links[*link_idx].platforms = &compnent[comp_idx++];
priv->dai_links[*link_idx].num_platforms = 1;
priv->dai_links[*link_idx].codecs = &compnent[comp_idx];
priv->dai_links[*link_idx].num_codecs = 2;
priv->dai_links[*link_idx].name = "IVI 2xPCM3168A Capture";
priv->dai_links[*link_idx].stream_name = "IVI 2xPCM3168A Analog";
priv->dai_links[*link_idx].cpus->of_node = dai_node;
priv->dai_links[*link_idx].platforms->of_node = dai_node;
priv->dai_links[*link_idx].codecs[0].of_node = codeca_node;
priv->dai_links[*link_idx].codecs[0].dai_name = "pcm3168a-adc";
priv->dai_links[*link_idx].codecs[1].of_node = codecb_node;
priv->dai_links[*link_idx].codecs[1].dai_name = "pcm3168a-adc";
priv->dai_links[*link_idx].capture_only = 1;
priv->dai_links[*link_idx].id = J721E_AUDIO_DOMAIN_IVI;
priv->dai_links[*link_idx].dai_fmt = J721E_DAI_FMT;
priv->dai_links[*link_idx].init = j721e_audio_init;
priv->dai_links[*link_idx].ops = &j721e_audio_ops;
(*link_idx)++;
priv->codec_conf[*conf_idx].dlc.of_node = codeca_node;
priv->codec_conf[*conf_idx].name_prefix = "codec-a";
(*conf_idx)++;
priv->codec_conf[*conf_idx].dlc.of_node = codecb_node;
priv->codec_conf[*conf_idx].name_prefix = "codec-b";
(*conf_idx)++;
priv->codec_conf[*conf_idx].dlc.of_node = dai_node;
priv->codec_conf[*conf_idx].name_prefix = "McASP0";
(*conf_idx)++;
return 0;
put_codecb_node:
of_node_put(codecb_node);
put_codeca_node:
of_node_put(codeca_node);
put_dai_node:
of_node_put(dai_node);
return ret;
}
static int j721e_soc_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct snd_soc_card *card;
const struct of_device_id *match;
struct j721e_priv *priv;
int link_cnt, conf_cnt, ret, i;
if (!node) {
dev_err(&pdev->dev, "of node is missing.\n");
return -ENODEV;
}
match = of_match_node(j721e_audio_of_match, node);
if (!match) {
dev_err(&pdev->dev, "No compatible match found\n");
return -ENODEV;
}
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->match_data = match->data;
priv->dai_links = devm_kcalloc(&pdev->dev, priv->match_data->num_links,
sizeof(*priv->dai_links), GFP_KERNEL);
if (!priv->dai_links)
return -ENOMEM;
for (i = 0; i < J721E_AUDIO_DOMAIN_LAST; i++)
priv->audio_domains[i].parent_clk_id = -1;
priv->dev = &pdev->dev;
card = &priv->card;
card->dev = &pdev->dev;
card->owner = THIS_MODULE;
card->dapm_widgets = j721e_cpb_dapm_widgets;
card->num_dapm_widgets = ARRAY_SIZE(j721e_cpb_dapm_widgets);
card->dapm_routes = j721e_cpb_dapm_routes;
card->num_dapm_routes = ARRAY_SIZE(j721e_cpb_dapm_routes);
card->fully_routed = 1;
if (snd_soc_of_parse_card_name(card, "model")) {
dev_err(&pdev->dev, "Card name is not provided\n");
return -ENODEV;
}
link_cnt = 0;
conf_cnt = 0;
ret = j721e_soc_probe_cpb(priv, &link_cnt, &conf_cnt);
if (ret)
return ret;
ret = j721e_soc_probe_ivi(priv, &link_cnt, &conf_cnt);
if (ret)
return ret;
card->dai_link = priv->dai_links;
card->num_links = link_cnt;
card->codec_conf = priv->codec_conf;
card->num_configs = conf_cnt;
ret = j721e_calculate_rate_range(priv);
if (ret)
return ret;
snd_soc_card_set_drvdata(card, priv);
mutex_init(&priv->mutex);
ret = devm_snd_soc_register_card(&pdev->dev, card);
if (ret)
dev_err(&pdev->dev, "devm_snd_soc_register_card() failed: %d\n",
ret);
return ret;
}
static struct platform_driver j721e_soc_driver = {
.driver = {
.name = "j721e-audio",
.pm = &snd_soc_pm_ops,
.of_match_table = j721e_audio_of_match,
},
.probe = j721e_soc_probe,
};
module_platform_driver(j721e_soc_driver);
MODULE_AUTHOR("Peter Ujfalusi <[email protected]>");
MODULE_DESCRIPTION("ASoC machine driver for j721e Common Processor Board");
MODULE_LICENSE("GPL v2");
| linux-master | sound/soc/ti/j721e-evm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* omap3pandora.c -- SoC audio for Pandora Handheld Console
*
* Author: Gražvydas Ignotas <[email protected]>
*/
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <asm/mach-types.h>
#include <linux/platform_data/asoc-ti-mcbsp.h>
#include "omap-mcbsp.h"
#define OMAP3_PANDORA_DAC_POWER_GPIO 118
#define OMAP3_PANDORA_AMP_POWER_GPIO 14
#define PREFIX "ASoC omap3pandora: "
static struct regulator *omap3pandora_dac_reg;
static int omap3pandora_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int ret;
/* Set the codec system clock for DAC and ADC */
ret = snd_soc_dai_set_sysclk(codec_dai, 0, 26000000,
SND_SOC_CLOCK_IN);
if (ret < 0) {
pr_err(PREFIX "can't set codec system clock\n");
return ret;
}
/* Set McBSP clock to external */
ret = snd_soc_dai_set_sysclk(cpu_dai, OMAP_MCBSP_SYSCLK_CLKS_EXT,
256 * params_rate(params),
SND_SOC_CLOCK_IN);
if (ret < 0) {
pr_err(PREFIX "can't set cpu system clock\n");
return ret;
}
ret = snd_soc_dai_set_clkdiv(cpu_dai, OMAP_MCBSP_CLKGDV, 8);
if (ret < 0) {
pr_err(PREFIX "can't set SRG clock divider\n");
return ret;
}
return 0;
}
static int omap3pandora_dac_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
int ret;
/*
* The PCM1773 DAC datasheet requires 1ms delay between switching
* VCC power on/off and /PD pin high/low
*/
if (SND_SOC_DAPM_EVENT_ON(event)) {
ret = regulator_enable(omap3pandora_dac_reg);
if (ret) {
dev_err(w->dapm->dev, "Failed to power DAC: %d\n", ret);
return ret;
}
mdelay(1);
gpio_set_value(OMAP3_PANDORA_DAC_POWER_GPIO, 1);
} else {
gpio_set_value(OMAP3_PANDORA_DAC_POWER_GPIO, 0);
mdelay(1);
regulator_disable(omap3pandora_dac_reg);
}
return 0;
}
static int omap3pandora_hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
if (SND_SOC_DAPM_EVENT_ON(event))
gpio_set_value(OMAP3_PANDORA_AMP_POWER_GPIO, 1);
else
gpio_set_value(OMAP3_PANDORA_AMP_POWER_GPIO, 0);
return 0;
}
/*
* Audio paths on Pandora board:
*
* |O| ---> PCM DAC +-> AMP -> Headphone Jack
* |M| A +--------> Line Out
* |A| <~~clk~~+
* |P| <--- TWL4030 <--------- Line In and MICs
*/
static const struct snd_soc_dapm_widget omap3pandora_dapm_widgets[] = {
SND_SOC_DAPM_DAC_E("PCM DAC", "HiFi Playback", SND_SOC_NOPM,
0, 0, omap3pandora_dac_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_PGA_E("Headphone Amplifier", SND_SOC_NOPM,
0, 0, NULL, 0, omap3pandora_hp_event,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_LINE("Line Out", NULL),
SND_SOC_DAPM_MIC("Mic (internal)", NULL),
SND_SOC_DAPM_MIC("Mic (external)", NULL),
SND_SOC_DAPM_LINE("Line In", NULL),
};
static const struct snd_soc_dapm_route omap3pandora_map[] = {
{"PCM DAC", NULL, "APLL Enable"},
{"Headphone Amplifier", NULL, "PCM DAC"},
{"Line Out", NULL, "PCM DAC"},
{"Headphone Jack", NULL, "Headphone Amplifier"},
{"AUXL", NULL, "Line In"},
{"AUXR", NULL, "Line In"},
{"MAINMIC", NULL, "Mic (internal)"},
{"Mic (internal)", NULL, "Mic Bias 1"},
{"SUBMIC", NULL, "Mic (external)"},
{"Mic (external)", NULL, "Mic Bias 2"},
};
static int omap3pandora_out_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dapm_context *dapm = &rtd->card->dapm;
/* All TWL4030 output pins are floating */
snd_soc_dapm_nc_pin(dapm, "EARPIECE");
snd_soc_dapm_nc_pin(dapm, "PREDRIVEL");
snd_soc_dapm_nc_pin(dapm, "PREDRIVER");
snd_soc_dapm_nc_pin(dapm, "HSOL");
snd_soc_dapm_nc_pin(dapm, "HSOR");
snd_soc_dapm_nc_pin(dapm, "CARKITL");
snd_soc_dapm_nc_pin(dapm, "CARKITR");
snd_soc_dapm_nc_pin(dapm, "HFL");
snd_soc_dapm_nc_pin(dapm, "HFR");
snd_soc_dapm_nc_pin(dapm, "VIBRA");
return 0;
}
static int omap3pandora_in_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dapm_context *dapm = &rtd->card->dapm;
/* Not comnnected */
snd_soc_dapm_nc_pin(dapm, "HSMIC");
snd_soc_dapm_nc_pin(dapm, "CARKITMIC");
snd_soc_dapm_nc_pin(dapm, "DIGIMIC0");
snd_soc_dapm_nc_pin(dapm, "DIGIMIC1");
return 0;
}
static const struct snd_soc_ops omap3pandora_ops = {
.hw_params = omap3pandora_hw_params,
};
/* Digital audio interface glue - connects codec <--> CPU */
SND_SOC_DAILINK_DEFS(out,
DAILINK_COMP_ARRAY(COMP_CPU("omap-mcbsp.2")),
DAILINK_COMP_ARRAY(COMP_CODEC("twl4030-codec", "twl4030-hifi")),
DAILINK_COMP_ARRAY(COMP_PLATFORM("omap-mcbsp.2")));
SND_SOC_DAILINK_DEFS(in,
DAILINK_COMP_ARRAY(COMP_CPU("omap-mcbsp.4")),
DAILINK_COMP_ARRAY(COMP_CODEC("twl4030-codec", "twl4030-hifi")),
DAILINK_COMP_ARRAY(COMP_PLATFORM("omap-mcbsp.4")));
static struct snd_soc_dai_link omap3pandora_dai[] = {
{
.name = "PCM1773",
.stream_name = "HiFi Out",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
.ops = &omap3pandora_ops,
.init = omap3pandora_out_init,
SND_SOC_DAILINK_REG(out),
}, {
.name = "TWL4030",
.stream_name = "Line/Mic In",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
.ops = &omap3pandora_ops,
.init = omap3pandora_in_init,
SND_SOC_DAILINK_REG(in),
}
};
/* SoC card */
static struct snd_soc_card snd_soc_card_omap3pandora = {
.name = "omap3pandora",
.owner = THIS_MODULE,
.dai_link = omap3pandora_dai,
.num_links = ARRAY_SIZE(omap3pandora_dai),
.dapm_widgets = omap3pandora_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(omap3pandora_dapm_widgets),
.dapm_routes = omap3pandora_map,
.num_dapm_routes = ARRAY_SIZE(omap3pandora_map),
};
static struct platform_device *omap3pandora_snd_device;
static int __init omap3pandora_soc_init(void)
{
int ret;
if (!machine_is_omap3_pandora())
return -ENODEV;
pr_info("OMAP3 Pandora SoC init\n");
ret = gpio_request(OMAP3_PANDORA_DAC_POWER_GPIO, "dac_power");
if (ret) {
pr_err(PREFIX "Failed to get DAC power GPIO\n");
return ret;
}
ret = gpio_direction_output(OMAP3_PANDORA_DAC_POWER_GPIO, 0);
if (ret) {
pr_err(PREFIX "Failed to set DAC power GPIO direction\n");
goto fail0;
}
ret = gpio_request(OMAP3_PANDORA_AMP_POWER_GPIO, "amp_power");
if (ret) {
pr_err(PREFIX "Failed to get amp power GPIO\n");
goto fail0;
}
ret = gpio_direction_output(OMAP3_PANDORA_AMP_POWER_GPIO, 0);
if (ret) {
pr_err(PREFIX "Failed to set amp power GPIO direction\n");
goto fail1;
}
omap3pandora_snd_device = platform_device_alloc("soc-audio", -1);
if (omap3pandora_snd_device == NULL) {
pr_err(PREFIX "Platform device allocation failed\n");
ret = -ENOMEM;
goto fail1;
}
platform_set_drvdata(omap3pandora_snd_device, &snd_soc_card_omap3pandora);
ret = platform_device_add(omap3pandora_snd_device);
if (ret) {
pr_err(PREFIX "Unable to add platform device\n");
goto fail2;
}
omap3pandora_dac_reg = regulator_get(&omap3pandora_snd_device->dev, "vcc");
if (IS_ERR(omap3pandora_dac_reg)) {
pr_err(PREFIX "Failed to get DAC regulator from %s: %ld\n",
dev_name(&omap3pandora_snd_device->dev),
PTR_ERR(omap3pandora_dac_reg));
ret = PTR_ERR(omap3pandora_dac_reg);
goto fail3;
}
return 0;
fail3:
platform_device_del(omap3pandora_snd_device);
fail2:
platform_device_put(omap3pandora_snd_device);
fail1:
gpio_free(OMAP3_PANDORA_AMP_POWER_GPIO);
fail0:
gpio_free(OMAP3_PANDORA_DAC_POWER_GPIO);
return ret;
}
module_init(omap3pandora_soc_init);
static void __exit omap3pandora_soc_exit(void)
{
regulator_put(omap3pandora_dac_reg);
platform_device_unregister(omap3pandora_snd_device);
gpio_free(OMAP3_PANDORA_AMP_POWER_GPIO);
gpio_free(OMAP3_PANDORA_DAC_POWER_GPIO);
}
module_exit(omap3pandora_soc_exit);
MODULE_AUTHOR("Grazvydas Ignotas <[email protected]>");
MODULE_DESCRIPTION("ALSA SoC OMAP3 Pandora");
MODULE_LICENSE("GPL");
| linux-master | sound/soc/ti/omap3pandora.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* omap-dmic.c -- OMAP ASoC DMIC DAI driver
*
* Copyright (C) 2010 - 2011 Texas Instruments
*
* Author: David Lambert <[email protected]>
* Misael Lopez Cruz <[email protected]>
* Liam Girdwood <[email protected]>
* Peter Ujfalusi <[email protected]>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/of_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
#include "omap-dmic.h"
#include "sdma-pcm.h"
struct omap_dmic {
struct device *dev;
void __iomem *io_base;
struct clk *fclk;
struct pm_qos_request pm_qos_req;
int latency;
int fclk_freq;
int out_freq;
int clk_div;
int sysclk;
int threshold;
u32 ch_enabled;
bool active;
struct mutex mutex;
struct snd_dmaengine_dai_dma_data dma_data;
};
static inline void omap_dmic_write(struct omap_dmic *dmic, u16 reg, u32 val)
{
writel_relaxed(val, dmic->io_base + reg);
}
static inline int omap_dmic_read(struct omap_dmic *dmic, u16 reg)
{
return readl_relaxed(dmic->io_base + reg);
}
static inline void omap_dmic_start(struct omap_dmic *dmic)
{
u32 ctrl = omap_dmic_read(dmic, OMAP_DMIC_CTRL_REG);
/* Configure DMA controller */
omap_dmic_write(dmic, OMAP_DMIC_DMAENABLE_SET_REG,
OMAP_DMIC_DMA_ENABLE);
omap_dmic_write(dmic, OMAP_DMIC_CTRL_REG, ctrl | dmic->ch_enabled);
}
static inline void omap_dmic_stop(struct omap_dmic *dmic)
{
u32 ctrl = omap_dmic_read(dmic, OMAP_DMIC_CTRL_REG);
omap_dmic_write(dmic, OMAP_DMIC_CTRL_REG,
ctrl & ~OMAP_DMIC_UP_ENABLE_MASK);
/* Disable DMA request generation */
omap_dmic_write(dmic, OMAP_DMIC_DMAENABLE_CLR_REG,
OMAP_DMIC_DMA_ENABLE);
}
static inline int dmic_is_enabled(struct omap_dmic *dmic)
{
return omap_dmic_read(dmic, OMAP_DMIC_CTRL_REG) &
OMAP_DMIC_UP_ENABLE_MASK;
}
static int omap_dmic_dai_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct omap_dmic *dmic = snd_soc_dai_get_drvdata(dai);
int ret = 0;
mutex_lock(&dmic->mutex);
if (!snd_soc_dai_active(dai))
dmic->active = 1;
else
ret = -EBUSY;
mutex_unlock(&dmic->mutex);
return ret;
}
static void omap_dmic_dai_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct omap_dmic *dmic = snd_soc_dai_get_drvdata(dai);
mutex_lock(&dmic->mutex);
cpu_latency_qos_remove_request(&dmic->pm_qos_req);
if (!snd_soc_dai_active(dai))
dmic->active = 0;
mutex_unlock(&dmic->mutex);
}
static int omap_dmic_select_divider(struct omap_dmic *dmic, int sample_rate)
{
int divider = -EINVAL;
/*
* 192KHz rate is only supported with 19.2MHz/3.84MHz clock
* configuration.
*/
if (sample_rate == 192000) {
if (dmic->fclk_freq == 19200000 && dmic->out_freq == 3840000)
divider = 0x6; /* Divider: 5 (192KHz sampling rate) */
else
dev_err(dmic->dev,
"invalid clock configuration for 192KHz\n");
return divider;
}
switch (dmic->out_freq) {
case 1536000:
if (dmic->fclk_freq != 24576000)
goto div_err;
divider = 0x4; /* Divider: 16 */
break;
case 2400000:
switch (dmic->fclk_freq) {
case 12000000:
divider = 0x5; /* Divider: 5 */
break;
case 19200000:
divider = 0x0; /* Divider: 8 */
break;
case 24000000:
divider = 0x2; /* Divider: 10 */
break;
default:
goto div_err;
}
break;
case 3072000:
if (dmic->fclk_freq != 24576000)
goto div_err;
divider = 0x3; /* Divider: 8 */
break;
case 3840000:
if (dmic->fclk_freq != 19200000)
goto div_err;
divider = 0x1; /* Divider: 5 (96KHz sampling rate) */
break;
default:
dev_err(dmic->dev, "invalid out frequency: %dHz\n",
dmic->out_freq);
break;
}
return divider;
div_err:
dev_err(dmic->dev, "invalid out frequency %dHz for %dHz input\n",
dmic->out_freq, dmic->fclk_freq);
return -EINVAL;
}
static int omap_dmic_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct omap_dmic *dmic = snd_soc_dai_get_drvdata(dai);
struct snd_dmaengine_dai_dma_data *dma_data;
int channels;
dmic->clk_div = omap_dmic_select_divider(dmic, params_rate(params));
if (dmic->clk_div < 0) {
dev_err(dmic->dev, "no valid divider for %dHz from %dHz\n",
dmic->out_freq, dmic->fclk_freq);
return -EINVAL;
}
dmic->ch_enabled = 0;
channels = params_channels(params);
switch (channels) {
case 6:
dmic->ch_enabled |= OMAP_DMIC_UP3_ENABLE;
fallthrough;
case 4:
dmic->ch_enabled |= OMAP_DMIC_UP2_ENABLE;
fallthrough;
case 2:
dmic->ch_enabled |= OMAP_DMIC_UP1_ENABLE;
break;
default:
dev_err(dmic->dev, "invalid number of legacy channels\n");
return -EINVAL;
}
/* packet size is threshold * channels */
dma_data = snd_soc_dai_get_dma_data(dai, substream);
dma_data->maxburst = dmic->threshold * channels;
dmic->latency = (OMAP_DMIC_THRES_MAX - dmic->threshold) * USEC_PER_SEC /
params_rate(params);
return 0;
}
static int omap_dmic_dai_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct omap_dmic *dmic = snd_soc_dai_get_drvdata(dai);
u32 ctrl;
if (cpu_latency_qos_request_active(&dmic->pm_qos_req))
cpu_latency_qos_update_request(&dmic->pm_qos_req,
dmic->latency);
/* Configure uplink threshold */
omap_dmic_write(dmic, OMAP_DMIC_FIFO_CTRL_REG, dmic->threshold);
ctrl = omap_dmic_read(dmic, OMAP_DMIC_CTRL_REG);
/* Set dmic out format */
ctrl &= ~(OMAP_DMIC_FORMAT | OMAP_DMIC_POLAR_MASK);
ctrl |= (OMAP_DMICOUTFORMAT_LJUST | OMAP_DMIC_POLAR1 |
OMAP_DMIC_POLAR2 | OMAP_DMIC_POLAR3);
/* Configure dmic clock divider */
ctrl &= ~OMAP_DMIC_CLK_DIV_MASK;
ctrl |= OMAP_DMIC_CLK_DIV(dmic->clk_div);
omap_dmic_write(dmic, OMAP_DMIC_CTRL_REG, ctrl);
omap_dmic_write(dmic, OMAP_DMIC_CTRL_REG,
ctrl | OMAP_DMICOUTFORMAT_LJUST | OMAP_DMIC_POLAR1 |
OMAP_DMIC_POLAR2 | OMAP_DMIC_POLAR3);
return 0;
}
static int omap_dmic_dai_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
struct omap_dmic *dmic = snd_soc_dai_get_drvdata(dai);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
omap_dmic_start(dmic);
break;
case SNDRV_PCM_TRIGGER_STOP:
omap_dmic_stop(dmic);
break;
default:
break;
}
return 0;
}
static int omap_dmic_select_fclk(struct omap_dmic *dmic, int clk_id,
unsigned int freq)
{
struct clk *parent_clk, *mux;
char *parent_clk_name;
int ret = 0;
switch (freq) {
case 12000000:
case 19200000:
case 24000000:
case 24576000:
break;
default:
dev_err(dmic->dev, "invalid input frequency: %dHz\n", freq);
dmic->fclk_freq = 0;
return -EINVAL;
}
if (dmic->sysclk == clk_id) {
dmic->fclk_freq = freq;
return 0;
}
/* re-parent not allowed if a stream is ongoing */
if (dmic->active && dmic_is_enabled(dmic)) {
dev_err(dmic->dev, "can't re-parent when DMIC active\n");
return -EBUSY;
}
switch (clk_id) {
case OMAP_DMIC_SYSCLK_PAD_CLKS:
parent_clk_name = "pad_clks_ck";
break;
case OMAP_DMIC_SYSCLK_SLIMBLUS_CLKS:
parent_clk_name = "slimbus_clk";
break;
case OMAP_DMIC_SYSCLK_SYNC_MUX_CLKS:
parent_clk_name = "dmic_sync_mux_ck";
break;
default:
dev_err(dmic->dev, "fclk clk_id (%d) not supported\n", clk_id);
return -EINVAL;
}
parent_clk = clk_get(dmic->dev, parent_clk_name);
if (IS_ERR(parent_clk)) {
dev_err(dmic->dev, "can't get %s\n", parent_clk_name);
return -ENODEV;
}
mux = clk_get_parent(dmic->fclk);
if (IS_ERR(mux)) {
dev_err(dmic->dev, "can't get fck mux parent\n");
clk_put(parent_clk);
return -ENODEV;
}
mutex_lock(&dmic->mutex);
if (dmic->active) {
/* disable clock while reparenting */
pm_runtime_put_sync(dmic->dev);
ret = clk_set_parent(mux, parent_clk);
pm_runtime_get_sync(dmic->dev);
} else {
ret = clk_set_parent(mux, parent_clk);
}
mutex_unlock(&dmic->mutex);
if (ret < 0) {
dev_err(dmic->dev, "re-parent failed\n");
goto err_busy;
}
dmic->sysclk = clk_id;
dmic->fclk_freq = freq;
err_busy:
clk_put(mux);
clk_put(parent_clk);
return ret;
}
static int omap_dmic_select_outclk(struct omap_dmic *dmic, int clk_id,
unsigned int freq)
{
int ret = 0;
if (clk_id != OMAP_DMIC_ABE_DMIC_CLK) {
dev_err(dmic->dev, "output clk_id (%d) not supported\n",
clk_id);
return -EINVAL;
}
switch (freq) {
case 1536000:
case 2400000:
case 3072000:
case 3840000:
dmic->out_freq = freq;
break;
default:
dev_err(dmic->dev, "invalid out frequency: %dHz\n", freq);
dmic->out_freq = 0;
ret = -EINVAL;
}
return ret;
}
static int omap_dmic_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
unsigned int freq, int dir)
{
struct omap_dmic *dmic = snd_soc_dai_get_drvdata(dai);
if (dir == SND_SOC_CLOCK_IN)
return omap_dmic_select_fclk(dmic, clk_id, freq);
else if (dir == SND_SOC_CLOCK_OUT)
return omap_dmic_select_outclk(dmic, clk_id, freq);
dev_err(dmic->dev, "invalid clock direction (%d)\n", dir);
return -EINVAL;
}
static int omap_dmic_probe(struct snd_soc_dai *dai)
{
struct omap_dmic *dmic = snd_soc_dai_get_drvdata(dai);
pm_runtime_enable(dmic->dev);
/* Disable lines while request is ongoing */
pm_runtime_get_sync(dmic->dev);
omap_dmic_write(dmic, OMAP_DMIC_CTRL_REG, 0x00);
pm_runtime_put_sync(dmic->dev);
/* Configure DMIC threshold value */
dmic->threshold = OMAP_DMIC_THRES_MAX - 3;
snd_soc_dai_init_dma_data(dai, NULL, &dmic->dma_data);
return 0;
}
static int omap_dmic_remove(struct snd_soc_dai *dai)
{
struct omap_dmic *dmic = snd_soc_dai_get_drvdata(dai);
pm_runtime_disable(dmic->dev);
return 0;
}
static const struct snd_soc_dai_ops omap_dmic_dai_ops = {
.probe = omap_dmic_probe,
.remove = omap_dmic_remove,
.startup = omap_dmic_dai_startup,
.shutdown = omap_dmic_dai_shutdown,
.hw_params = omap_dmic_dai_hw_params,
.prepare = omap_dmic_dai_prepare,
.trigger = omap_dmic_dai_trigger,
.set_sysclk = omap_dmic_set_dai_sysclk,
};
static struct snd_soc_dai_driver omap_dmic_dai = {
.name = "omap-dmic",
.capture = {
.channels_min = 2,
.channels_max = 6,
.rates = SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.sig_bits = 24,
},
.ops = &omap_dmic_dai_ops,
};
static const struct snd_soc_component_driver omap_dmic_component = {
.name = "omap-dmic",
.legacy_dai_naming = 1,
};
static int asoc_dmic_probe(struct platform_device *pdev)
{
struct omap_dmic *dmic;
struct resource *res;
int ret;
dmic = devm_kzalloc(&pdev->dev, sizeof(struct omap_dmic), GFP_KERNEL);
if (!dmic)
return -ENOMEM;
platform_set_drvdata(pdev, dmic);
dmic->dev = &pdev->dev;
dmic->sysclk = OMAP_DMIC_SYSCLK_SYNC_MUX_CLKS;
mutex_init(&dmic->mutex);
dmic->fclk = devm_clk_get(dmic->dev, "fck");
if (IS_ERR(dmic->fclk)) {
dev_err(dmic->dev, "can't get fck\n");
return -ENODEV;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma");
if (!res) {
dev_err(dmic->dev, "invalid dma memory resource\n");
return -ENODEV;
}
dmic->dma_data.addr = res->start + OMAP_DMIC_DATA_REG;
dmic->dma_data.filter_data = "up_link";
dmic->io_base = devm_platform_ioremap_resource_byname(pdev, "mpu");
if (IS_ERR(dmic->io_base))
return PTR_ERR(dmic->io_base);
ret = devm_snd_soc_register_component(&pdev->dev,
&omap_dmic_component,
&omap_dmic_dai, 1);
if (ret)
return ret;
ret = sdma_pcm_platform_register(&pdev->dev, NULL, "up_link");
if (ret)
return ret;
return 0;
}
static const struct of_device_id omap_dmic_of_match[] = {
{ .compatible = "ti,omap4-dmic", },
{ }
};
MODULE_DEVICE_TABLE(of, omap_dmic_of_match);
static struct platform_driver asoc_dmic_driver = {
.driver = {
.name = "omap-dmic",
.of_match_table = omap_dmic_of_match,
},
.probe = asoc_dmic_probe,
};
module_platform_driver(asoc_dmic_driver);
MODULE_ALIAS("platform:omap-dmic");
MODULE_AUTHOR("Peter Ujfalusi <[email protected]>");
MODULE_DESCRIPTION("OMAP DMIC ASoC Interface");
MODULE_LICENSE("GPL");
| linux-master | sound/soc/ti/omap-dmic.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for Atmel I2S controller
*
* Copyright (C) 2015 Atmel Corporation
*
* Author: Cyrille Pitchen <[email protected]>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
#define ATMEL_I2SC_MAX_TDM_CHANNELS 8
/*
* ---- I2S Controller Register map ----
*/
#define ATMEL_I2SC_CR 0x0000 /* Control Register */
#define ATMEL_I2SC_MR 0x0004 /* Mode Register */
#define ATMEL_I2SC_SR 0x0008 /* Status Register */
#define ATMEL_I2SC_SCR 0x000c /* Status Clear Register */
#define ATMEL_I2SC_SSR 0x0010 /* Status Set Register */
#define ATMEL_I2SC_IER 0x0014 /* Interrupt Enable Register */
#define ATMEL_I2SC_IDR 0x0018 /* Interrupt Disable Register */
#define ATMEL_I2SC_IMR 0x001c /* Interrupt Mask Register */
#define ATMEL_I2SC_RHR 0x0020 /* Receiver Holding Register */
#define ATMEL_I2SC_THR 0x0024 /* Transmitter Holding Register */
#define ATMEL_I2SC_VERSION 0x0028 /* Version Register */
/*
* ---- Control Register (Write-only) ----
*/
#define ATMEL_I2SC_CR_RXEN BIT(0) /* Receiver Enable */
#define ATMEL_I2SC_CR_RXDIS BIT(1) /* Receiver Disable */
#define ATMEL_I2SC_CR_CKEN BIT(2) /* Clock Enable */
#define ATMEL_I2SC_CR_CKDIS BIT(3) /* Clock Disable */
#define ATMEL_I2SC_CR_TXEN BIT(4) /* Transmitter Enable */
#define ATMEL_I2SC_CR_TXDIS BIT(5) /* Transmitter Disable */
#define ATMEL_I2SC_CR_SWRST BIT(7) /* Software Reset */
/*
* ---- Mode Register (Read/Write) ----
*/
#define ATMEL_I2SC_MR_MODE_MASK GENMASK(0, 0)
#define ATMEL_I2SC_MR_MODE_SLAVE (0 << 0)
#define ATMEL_I2SC_MR_MODE_MASTER (1 << 0)
#define ATMEL_I2SC_MR_DATALENGTH_MASK GENMASK(4, 2)
#define ATMEL_I2SC_MR_DATALENGTH_32_BITS (0 << 2)
#define ATMEL_I2SC_MR_DATALENGTH_24_BITS (1 << 2)
#define ATMEL_I2SC_MR_DATALENGTH_20_BITS (2 << 2)
#define ATMEL_I2SC_MR_DATALENGTH_18_BITS (3 << 2)
#define ATMEL_I2SC_MR_DATALENGTH_16_BITS (4 << 2)
#define ATMEL_I2SC_MR_DATALENGTH_16_BITS_COMPACT (5 << 2)
#define ATMEL_I2SC_MR_DATALENGTH_8_BITS (6 << 2)
#define ATMEL_I2SC_MR_DATALENGTH_8_BITS_COMPACT (7 << 2)
#define ATMEL_I2SC_MR_FORMAT_MASK GENMASK(7, 6)
#define ATMEL_I2SC_MR_FORMAT_I2S (0 << 6)
#define ATMEL_I2SC_MR_FORMAT_LJ (1 << 6) /* Left Justified */
#define ATMEL_I2SC_MR_FORMAT_TDM (2 << 6)
#define ATMEL_I2SC_MR_FORMAT_TDMLJ (3 << 6)
/* Left audio samples duplicated to right audio channel */
#define ATMEL_I2SC_MR_RXMONO BIT(8)
/* Receiver uses one DMA channel ... */
#define ATMEL_I2SC_MR_RXDMA_MASK GENMASK(9, 9)
#define ATMEL_I2SC_MR_RXDMA_SINGLE (0 << 9) /* for all audio channels */
#define ATMEL_I2SC_MR_RXDMA_MULTIPLE (1 << 9) /* per audio channel */
/* I2SDO output of I2SC is internally connected to I2SDI input */
#define ATMEL_I2SC_MR_RXLOOP BIT(10)
/* Left audio samples duplicated to right audio channel */
#define ATMEL_I2SC_MR_TXMONO BIT(12)
/* Transmitter uses one DMA channel ... */
#define ATMEL_I2SC_MR_TXDMA_MASK GENMASK(13, 13)
#define ATMEL_I2SC_MR_TXDMA_SINGLE (0 << 13) /* for all audio channels */
#define ATMEL_I2SC_MR_TXDME_MULTIPLE (1 << 13) /* per audio channel */
/* x sample transmitted when underrun */
#define ATMEL_I2SC_MR_TXSAME_MASK GENMASK(14, 14)
#define ATMEL_I2SC_MR_TXSAME_ZERO (0 << 14) /* Zero sample */
#define ATMEL_I2SC_MR_TXSAME_PREVIOUS (1 << 14) /* Previous sample */
/* Audio Clock to I2SC Master Clock ratio */
#define ATMEL_I2SC_MR_IMCKDIV_MASK GENMASK(21, 16)
#define ATMEL_I2SC_MR_IMCKDIV(div) \
(((div) << 16) & ATMEL_I2SC_MR_IMCKDIV_MASK)
/* Master Clock to fs ratio */
#define ATMEL_I2SC_MR_IMCKFS_MASK GENMASK(29, 24)
#define ATMEL_I2SC_MR_IMCKFS(fs) \
(((fs) << 24) & ATMEL_I2SC_MR_IMCKFS_MASK)
/* Master Clock mode */
#define ATMEL_I2SC_MR_IMCKMODE_MASK GENMASK(30, 30)
/* 0: No master clock generated (selected clock drives I2SCK pin) */
#define ATMEL_I2SC_MR_IMCKMODE_I2SCK (0 << 30)
/* 1: master clock generated (internally generated clock drives I2SMCK pin) */
#define ATMEL_I2SC_MR_IMCKMODE_I2SMCK (1 << 30)
/* Slot Width */
/* 0: slot is 32 bits wide for DATALENGTH = 18/20/24 bits. */
/* 1: slot is 24 bits wide for DATALENGTH = 18/20/24 bits. */
#define ATMEL_I2SC_MR_IWS BIT(31)
/*
* ---- Status Registers ----
*/
#define ATMEL_I2SC_SR_RXEN BIT(0) /* Receiver Enabled */
#define ATMEL_I2SC_SR_RXRDY BIT(1) /* Receive Ready */
#define ATMEL_I2SC_SR_RXOR BIT(2) /* Receive Overrun */
#define ATMEL_I2SC_SR_TXEN BIT(4) /* Transmitter Enabled */
#define ATMEL_I2SC_SR_TXRDY BIT(5) /* Transmit Ready */
#define ATMEL_I2SC_SR_TXUR BIT(6) /* Transmit Underrun */
/* Receive Overrun Channel */
#define ATMEL_I2SC_SR_RXORCH_MASK GENMASK(15, 8)
#define ATMEL_I2SC_SR_RXORCH(ch) (1 << (((ch) & 0x7) + 8))
/* Transmit Underrun Channel */
#define ATMEL_I2SC_SR_TXURCH_MASK GENMASK(27, 20)
#define ATMEL_I2SC_SR_TXURCH(ch) (1 << (((ch) & 0x7) + 20))
/*
* ---- Interrupt Enable/Disable/Mask Registers ----
*/
#define ATMEL_I2SC_INT_RXRDY ATMEL_I2SC_SR_RXRDY
#define ATMEL_I2SC_INT_RXOR ATMEL_I2SC_SR_RXOR
#define ATMEL_I2SC_INT_TXRDY ATMEL_I2SC_SR_TXRDY
#define ATMEL_I2SC_INT_TXUR ATMEL_I2SC_SR_TXUR
static const struct regmap_config atmel_i2s_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = ATMEL_I2SC_VERSION,
};
struct atmel_i2s_gck_param {
int fs;
unsigned long mck;
int imckdiv;
int imckfs;
};
#define I2S_MCK_12M288 12288000UL
#define I2S_MCK_11M2896 11289600UL
#define I2S_MCK_6M144 6144000UL
/* mck = (32 * (imckfs+1) / (imckdiv+1)) * fs */
static const struct atmel_i2s_gck_param gck_params[] = {
/* mck = 6.144Mhz */
{ 8000, I2S_MCK_6M144, 1, 47}, /* mck = 768 fs */
/* mck = 12.288MHz */
{ 16000, I2S_MCK_12M288, 1, 47}, /* mck = 768 fs */
{ 24000, I2S_MCK_12M288, 3, 63}, /* mck = 512 fs */
{ 32000, I2S_MCK_12M288, 3, 47}, /* mck = 384 fs */
{ 48000, I2S_MCK_12M288, 7, 63}, /* mck = 256 fs */
{ 64000, I2S_MCK_12M288, 7, 47}, /* mck = 192 fs */
{ 96000, I2S_MCK_12M288, 7, 31}, /* mck = 128 fs */
{192000, I2S_MCK_12M288, 7, 15}, /* mck = 64 fs */
/* mck = 11.2896MHz */
{ 11025, I2S_MCK_11M2896, 1, 63}, /* mck = 1024 fs */
{ 22050, I2S_MCK_11M2896, 3, 63}, /* mck = 512 fs */
{ 44100, I2S_MCK_11M2896, 7, 63}, /* mck = 256 fs */
{ 88200, I2S_MCK_11M2896, 7, 31}, /* mck = 128 fs */
{176400, I2S_MCK_11M2896, 7, 15}, /* mck = 64 fs */
};
struct atmel_i2s_dev;
struct atmel_i2s_caps {
int (*mck_init)(struct atmel_i2s_dev *, struct device_node *np);
};
struct atmel_i2s_dev {
struct device *dev;
struct regmap *regmap;
struct clk *pclk;
struct clk *gclk;
struct snd_dmaengine_dai_dma_data playback;
struct snd_dmaengine_dai_dma_data capture;
unsigned int fmt;
const struct atmel_i2s_gck_param *gck_param;
const struct atmel_i2s_caps *caps;
int clk_use_no;
};
static irqreturn_t atmel_i2s_interrupt(int irq, void *dev_id)
{
struct atmel_i2s_dev *dev = dev_id;
unsigned int sr, imr, pending, ch, mask;
irqreturn_t ret = IRQ_NONE;
regmap_read(dev->regmap, ATMEL_I2SC_SR, &sr);
regmap_read(dev->regmap, ATMEL_I2SC_IMR, &imr);
pending = sr & imr;
if (!pending)
return IRQ_NONE;
if (pending & ATMEL_I2SC_INT_RXOR) {
mask = ATMEL_I2SC_SR_RXOR;
for (ch = 0; ch < ATMEL_I2SC_MAX_TDM_CHANNELS; ++ch) {
if (sr & ATMEL_I2SC_SR_RXORCH(ch)) {
mask |= ATMEL_I2SC_SR_RXORCH(ch);
dev_err(dev->dev,
"RX overrun on channel %d\n", ch);
}
}
regmap_write(dev->regmap, ATMEL_I2SC_SCR, mask);
ret = IRQ_HANDLED;
}
if (pending & ATMEL_I2SC_INT_TXUR) {
mask = ATMEL_I2SC_SR_TXUR;
for (ch = 0; ch < ATMEL_I2SC_MAX_TDM_CHANNELS; ++ch) {
if (sr & ATMEL_I2SC_SR_TXURCH(ch)) {
mask |= ATMEL_I2SC_SR_TXURCH(ch);
dev_err(dev->dev,
"TX underrun on channel %d\n", ch);
}
}
regmap_write(dev->regmap, ATMEL_I2SC_SCR, mask);
ret = IRQ_HANDLED;
}
return ret;
}
#define ATMEL_I2S_RATES SNDRV_PCM_RATE_8000_192000
#define ATMEL_I2S_FORMATS (SNDRV_PCM_FMTBIT_S8 | \
SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S18_3LE | \
SNDRV_PCM_FMTBIT_S20_3LE | \
SNDRV_PCM_FMTBIT_S24_3LE | \
SNDRV_PCM_FMTBIT_S24_LE | \
SNDRV_PCM_FMTBIT_S32_LE)
static int atmel_i2s_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct atmel_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
dev->fmt = fmt;
return 0;
}
static int atmel_i2s_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct atmel_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
bool is_playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
unsigned int rhr, sr = 0;
if (is_playback) {
regmap_read(dev->regmap, ATMEL_I2SC_SR, &sr);
if (sr & ATMEL_I2SC_SR_RXRDY) {
/*
* The RX Ready flag should not be set. However if here,
* we flush (read) the Receive Holding Register to start
* from a clean state.
*/
dev_dbg(dev->dev, "RXRDY is set\n");
regmap_read(dev->regmap, ATMEL_I2SC_RHR, &rhr);
}
}
return 0;
}
static int atmel_i2s_get_gck_param(struct atmel_i2s_dev *dev, int fs)
{
int i, best;
if (!dev->gclk) {
dev_err(dev->dev, "cannot generate the I2S Master Clock\n");
return -EINVAL;
}
/*
* Find the best possible settings to generate the I2S Master Clock
* from the PLL Audio.
*/
dev->gck_param = NULL;
best = INT_MAX;
for (i = 0; i < ARRAY_SIZE(gck_params); ++i) {
const struct atmel_i2s_gck_param *gck_param = &gck_params[i];
int val = abs(fs - gck_param->fs);
if (val < best) {
best = val;
dev->gck_param = gck_param;
}
}
return 0;
}
static int atmel_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct atmel_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
bool is_playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
unsigned int mr = 0, mr_mask;
int ret;
mr_mask = ATMEL_I2SC_MR_FORMAT_MASK | ATMEL_I2SC_MR_MODE_MASK |
ATMEL_I2SC_MR_DATALENGTH_MASK;
if (is_playback)
mr_mask |= ATMEL_I2SC_MR_TXMONO;
else
mr_mask |= ATMEL_I2SC_MR_RXMONO;
switch (dev->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
mr |= ATMEL_I2SC_MR_FORMAT_I2S;
break;
default:
dev_err(dev->dev, "unsupported bus format\n");
return -EINVAL;
}
switch (dev->fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
case SND_SOC_DAIFMT_BP_FP:
/* codec is slave, so cpu is master */
mr |= ATMEL_I2SC_MR_MODE_MASTER;
ret = atmel_i2s_get_gck_param(dev, params_rate(params));
if (ret)
return ret;
break;
case SND_SOC_DAIFMT_BC_FC:
/* codec is master, so cpu is slave */
mr |= ATMEL_I2SC_MR_MODE_SLAVE;
dev->gck_param = NULL;
break;
default:
dev_err(dev->dev, "unsupported master/slave mode\n");
return -EINVAL;
}
switch (params_channels(params)) {
case 1:
if (is_playback)
mr |= ATMEL_I2SC_MR_TXMONO;
else
mr |= ATMEL_I2SC_MR_RXMONO;
break;
case 2:
break;
default:
dev_err(dev->dev, "unsupported number of audio channels\n");
return -EINVAL;
}
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S8:
mr |= ATMEL_I2SC_MR_DATALENGTH_8_BITS;
break;
case SNDRV_PCM_FORMAT_S16_LE:
mr |= ATMEL_I2SC_MR_DATALENGTH_16_BITS;
break;
case SNDRV_PCM_FORMAT_S18_3LE:
mr |= ATMEL_I2SC_MR_DATALENGTH_18_BITS | ATMEL_I2SC_MR_IWS;
break;
case SNDRV_PCM_FORMAT_S20_3LE:
mr |= ATMEL_I2SC_MR_DATALENGTH_20_BITS | ATMEL_I2SC_MR_IWS;
break;
case SNDRV_PCM_FORMAT_S24_3LE:
mr |= ATMEL_I2SC_MR_DATALENGTH_24_BITS | ATMEL_I2SC_MR_IWS;
break;
case SNDRV_PCM_FORMAT_S24_LE:
mr |= ATMEL_I2SC_MR_DATALENGTH_24_BITS;
break;
case SNDRV_PCM_FORMAT_S32_LE:
mr |= ATMEL_I2SC_MR_DATALENGTH_32_BITS;
break;
default:
dev_err(dev->dev, "unsupported size/endianness for audio samples\n");
return -EINVAL;
}
return regmap_update_bits(dev->regmap, ATMEL_I2SC_MR, mr_mask, mr);
}
static int atmel_i2s_switch_mck_generator(struct atmel_i2s_dev *dev,
bool enabled)
{
unsigned int mr, mr_mask;
unsigned long gclk_rate;
int ret;
mr = 0;
mr_mask = (ATMEL_I2SC_MR_IMCKDIV_MASK |
ATMEL_I2SC_MR_IMCKFS_MASK |
ATMEL_I2SC_MR_IMCKMODE_MASK);
if (!enabled) {
/* Disable the I2S Master Clock generator. */
ret = regmap_write(dev->regmap, ATMEL_I2SC_CR,
ATMEL_I2SC_CR_CKDIS);
if (ret)
return ret;
/* Reset the I2S Master Clock generator settings. */
ret = regmap_update_bits(dev->regmap, ATMEL_I2SC_MR,
mr_mask, mr);
if (ret)
return ret;
/* Disable/unprepare the PMC generated clock. */
clk_disable_unprepare(dev->gclk);
return 0;
}
if (!dev->gck_param)
return -EINVAL;
gclk_rate = dev->gck_param->mck * (dev->gck_param->imckdiv + 1);
ret = clk_set_rate(dev->gclk, gclk_rate);
if (ret)
return ret;
ret = clk_prepare_enable(dev->gclk);
if (ret)
return ret;
/* Update the Mode Register to generate the I2S Master Clock. */
mr |= ATMEL_I2SC_MR_IMCKDIV(dev->gck_param->imckdiv);
mr |= ATMEL_I2SC_MR_IMCKFS(dev->gck_param->imckfs);
mr |= ATMEL_I2SC_MR_IMCKMODE_I2SMCK;
ret = regmap_update_bits(dev->regmap, ATMEL_I2SC_MR, mr_mask, mr);
if (ret)
return ret;
/* Finally enable the I2S Master Clock generator. */
return regmap_write(dev->regmap, ATMEL_I2SC_CR,
ATMEL_I2SC_CR_CKEN);
}
static int atmel_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct atmel_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
bool is_playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
bool is_master, mck_enabled;
unsigned int cr, mr;
int err;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
cr = is_playback ? ATMEL_I2SC_CR_TXEN : ATMEL_I2SC_CR_RXEN;
mck_enabled = true;
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
cr = is_playback ? ATMEL_I2SC_CR_TXDIS : ATMEL_I2SC_CR_RXDIS;
mck_enabled = false;
break;
default:
return -EINVAL;
}
/* Read the Mode Register to retrieve the master/slave state. */
err = regmap_read(dev->regmap, ATMEL_I2SC_MR, &mr);
if (err)
return err;
is_master = (mr & ATMEL_I2SC_MR_MODE_MASK) == ATMEL_I2SC_MR_MODE_MASTER;
/* If master starts, enable the audio clock. */
if (is_master && mck_enabled) {
if (!dev->clk_use_no) {
err = atmel_i2s_switch_mck_generator(dev, true);
if (err)
return err;
}
dev->clk_use_no++;
}
err = regmap_write(dev->regmap, ATMEL_I2SC_CR, cr);
if (err)
return err;
/* If master stops, disable the audio clock. */
if (is_master && !mck_enabled) {
if (dev->clk_use_no == 1) {
err = atmel_i2s_switch_mck_generator(dev, false);
if (err)
return err;
}
dev->clk_use_no--;
}
return err;
}
static int atmel_i2s_dai_probe(struct snd_soc_dai *dai)
{
struct atmel_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
snd_soc_dai_init_dma_data(dai, &dev->playback, &dev->capture);
return 0;
}
static const struct snd_soc_dai_ops atmel_i2s_dai_ops = {
.probe = atmel_i2s_dai_probe,
.prepare = atmel_i2s_prepare,
.trigger = atmel_i2s_trigger,
.hw_params = atmel_i2s_hw_params,
.set_fmt = atmel_i2s_set_dai_fmt,
};
static struct snd_soc_dai_driver atmel_i2s_dai = {
.playback = {
.channels_min = 1,
.channels_max = 2,
.rates = ATMEL_I2S_RATES,
.formats = ATMEL_I2S_FORMATS,
},
.capture = {
.channels_min = 1,
.channels_max = 2,
.rates = ATMEL_I2S_RATES,
.formats = ATMEL_I2S_FORMATS,
},
.ops = &atmel_i2s_dai_ops,
.symmetric_rate = 1,
.symmetric_sample_bits = 1,
};
static const struct snd_soc_component_driver atmel_i2s_component = {
.name = "atmel-i2s",
.legacy_dai_naming = 1,
};
static int atmel_i2s_sama5d2_mck_init(struct atmel_i2s_dev *dev,
struct device_node *np)
{
struct clk *muxclk;
int err;
if (!dev->gclk)
return 0;
/* muxclk is optional, so we return error for probe defer only */
muxclk = devm_clk_get(dev->dev, "muxclk");
if (IS_ERR(muxclk)) {
err = PTR_ERR(muxclk);
if (err == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_dbg(dev->dev,
"failed to get the I2S clock control: %d\n", err);
return 0;
}
return clk_set_parent(muxclk, dev->gclk);
}
static const struct atmel_i2s_caps atmel_i2s_sama5d2_caps = {
.mck_init = atmel_i2s_sama5d2_mck_init,
};
static const struct of_device_id atmel_i2s_dt_ids[] = {
{
.compatible = "atmel,sama5d2-i2s",
.data = (void *)&atmel_i2s_sama5d2_caps,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_i2s_dt_ids);
static int atmel_i2s_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *match;
struct atmel_i2s_dev *dev;
struct resource *mem;
struct regmap *regmap;
void __iomem *base;
int irq;
int err;
unsigned int pcm_flags = 0;
unsigned int version;
/* Get memory for driver data. */
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
/* Get hardware capabilities. */
match = of_match_node(atmel_i2s_dt_ids, np);
if (match)
dev->caps = match->data;
/* Map I/O registers. */
base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(base))
return PTR_ERR(base);
regmap = devm_regmap_init_mmio(&pdev->dev, base,
&atmel_i2s_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
/* Request IRQ. */
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
err = devm_request_irq(&pdev->dev, irq, atmel_i2s_interrupt, 0,
dev_name(&pdev->dev), dev);
if (err)
return err;
/* Get the peripheral clock. */
dev->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(dev->pclk)) {
err = PTR_ERR(dev->pclk);
dev_err(&pdev->dev,
"failed to get the peripheral clock: %d\n", err);
return err;
}
/* Get audio clock to generate the I2S Master Clock (I2S_MCK) */
dev->gclk = devm_clk_get(&pdev->dev, "gclk");
if (IS_ERR(dev->gclk)) {
if (PTR_ERR(dev->gclk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
/* Master Mode not supported */
dev->gclk = NULL;
}
dev->dev = &pdev->dev;
dev->regmap = regmap;
platform_set_drvdata(pdev, dev);
/* Do hardware specific settings to initialize I2S_MCK generator */
if (dev->caps && dev->caps->mck_init) {
err = dev->caps->mck_init(dev, np);
if (err)
return err;
}
/* Enable the peripheral clock. */
err = clk_prepare_enable(dev->pclk);
if (err)
return err;
/* Get IP version. */
regmap_read(dev->regmap, ATMEL_I2SC_VERSION, &version);
dev_info(&pdev->dev, "hw version: %#x\n", version);
/* Enable error interrupts. */
regmap_write(dev->regmap, ATMEL_I2SC_IER,
ATMEL_I2SC_INT_RXOR | ATMEL_I2SC_INT_TXUR);
err = devm_snd_soc_register_component(&pdev->dev,
&atmel_i2s_component,
&atmel_i2s_dai, 1);
if (err) {
dev_err(&pdev->dev, "failed to register DAI: %d\n", err);
clk_disable_unprepare(dev->pclk);
return err;
}
/* Prepare DMA config. */
dev->playback.addr = (dma_addr_t)mem->start + ATMEL_I2SC_THR;
dev->playback.maxburst = 1;
dev->capture.addr = (dma_addr_t)mem->start + ATMEL_I2SC_RHR;
dev->capture.maxburst = 1;
if (of_property_match_string(np, "dma-names", "rx-tx") == 0)
pcm_flags |= SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX;
err = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, pcm_flags);
if (err) {
dev_err(&pdev->dev, "failed to register PCM: %d\n", err);
clk_disable_unprepare(dev->pclk);
return err;
}
return 0;
}
static void atmel_i2s_remove(struct platform_device *pdev)
{
struct atmel_i2s_dev *dev = platform_get_drvdata(pdev);
clk_disable_unprepare(dev->pclk);
}
static struct platform_driver atmel_i2s_driver = {
.driver = {
.name = "atmel_i2s",
.of_match_table = atmel_i2s_dt_ids,
},
.probe = atmel_i2s_probe,
.remove_new = atmel_i2s_remove,
};
module_platform_driver(atmel_i2s_driver);
MODULE_DESCRIPTION("Atmel I2S Controller driver");
MODULE_AUTHOR("Cyrille Pitchen <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | sound/soc/atmel/atmel-i2s.c |
// SPDX-License-Identifier: GPL-2.0
//
// Driver for Microchip S/PDIF RX Controller
//
// Copyright (C) 2020 Microchip Technology Inc. and its subsidiaries
//
// Author: Codrin Ciubotariu <[email protected]>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/spinlock.h>
#include <sound/dmaengine_pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
/*
* ---- S/PDIF Receiver Controller Register map ----
*/
#define SPDIFRX_CR 0x00 /* Control Register */
#define SPDIFRX_MR 0x04 /* Mode Register */
#define SPDIFRX_IER 0x10 /* Interrupt Enable Register */
#define SPDIFRX_IDR 0x14 /* Interrupt Disable Register */
#define SPDIFRX_IMR 0x18 /* Interrupt Mask Register */
#define SPDIFRX_ISR 0x1c /* Interrupt Status Register */
#define SPDIFRX_RSR 0x20 /* Status Register */
#define SPDIFRX_RHR 0x24 /* Holding Register */
#define SPDIFRX_CHSR(channel, reg) \
(0x30 + (channel) * 0x30 + (reg) * 4) /* Channel x Status Registers */
#define SPDIFRX_CHUD(channel, reg) \
(0x48 + (channel) * 0x30 + (reg) * 4) /* Channel x User Data Registers */
#define SPDIFRX_WPMR 0xE4 /* Write Protection Mode Register */
#define SPDIFRX_WPSR 0xE8 /* Write Protection Status Register */
#define SPDIFRX_VERSION 0xFC /* Version Register */
/*
* ---- Control Register (Write-only) ----
*/
#define SPDIFRX_CR_SWRST BIT(0) /* Software Reset */
/*
* ---- Mode Register (Read/Write) ----
*/
/* Receive Enable */
#define SPDIFRX_MR_RXEN_MASK GENMASK(0, 0)
#define SPDIFRX_MR_RXEN_DISABLE (0 << 0) /* SPDIF Receiver Disabled */
#define SPDIFRX_MR_RXEN_ENABLE (1 << 0) /* SPDIF Receiver Enabled */
/* Validity Bit Mode */
#define SPDIFRX_MR_VBMODE_MASK GENAMSK(1, 1)
#define SPDIFRX_MR_VBMODE_ALWAYS_LOAD \
(0 << 1) /* Load sample regardless of validity bit value */
#define SPDIFRX_MR_VBMODE_DISCARD_IF_VB1 \
(1 << 1) /* Load sample only if validity bit is 0 */
/* Data Word Endian Mode */
#define SPDIFRX_MR_ENDIAN_MASK GENMASK(2, 2)
#define SPDIFRX_MR_ENDIAN_LITTLE (0 << 2) /* Little Endian Mode */
#define SPDIFRX_MR_ENDIAN_BIG (1 << 2) /* Big Endian Mode */
/* Parity Bit Mode */
#define SPDIFRX_MR_PBMODE_MASK GENMASK(3, 3)
#define SPDIFRX_MR_PBMODE_PARCHECK (0 << 3) /* Parity Check Enabled */
#define SPDIFRX_MR_PBMODE_NOPARCHECK (1 << 3) /* Parity Check Disabled */
/* Sample Data Width */
#define SPDIFRX_MR_DATAWIDTH_MASK GENMASK(5, 4)
#define SPDIFRX_MR_DATAWIDTH(width) \
(((6 - (width) / 4) << 4) & SPDIFRX_MR_DATAWIDTH_MASK)
/* Packed Data Mode in Receive Holding Register */
#define SPDIFRX_MR_PACK_MASK GENMASK(7, 7)
#define SPDIFRX_MR_PACK_DISABLED (0 << 7)
#define SPDIFRX_MR_PACK_ENABLED (1 << 7)
/* Start of Block Bit Mode */
#define SPDIFRX_MR_SBMODE_MASK GENMASK(8, 8)
#define SPDIFRX_MR_SBMODE_ALWAYS_LOAD (0 << 8)
#define SPDIFRX_MR_SBMODE_DISCARD (1 << 8)
/* Consecutive Preamble Error Threshold Automatic Restart */
#define SPDIFRX_MR_AUTORST_MASK GENMASK(24, 24)
#define SPDIFRX_MR_AUTORST_NOACTION (0 << 24)
#define SPDIFRX_MR_AUTORST_UNLOCK_ON_PRE_ERR (1 << 24)
/*
* ---- Interrupt Enable/Disable/Mask/Status Register (Write/Read-only) ----
*/
#define SPDIFRX_IR_RXRDY BIT(0)
#define SPDIFRX_IR_LOCKED BIT(1)
#define SPDIFRX_IR_LOSS BIT(2)
#define SPDIFRX_IR_BLOCKEND BIT(3)
#define SPDIFRX_IR_SFE BIT(4)
#define SPDIFRX_IR_PAR_ERR BIT(5)
#define SPDIFRX_IR_OVERRUN BIT(6)
#define SPDIFRX_IR_RXFULL BIT(7)
#define SPDIFRX_IR_CSC(ch) BIT((ch) + 8)
#define SPDIFRX_IR_SECE BIT(10)
#define SPDIFRX_IR_BLOCKST BIT(11)
#define SPDIFRX_IR_NRZ_ERR BIT(12)
#define SPDIFRX_IR_PRE_ERR BIT(13)
#define SPDIFRX_IR_CP_ERR BIT(14)
/*
* ---- Receiver Status Register (Read/Write) ----
*/
/* Enable Status */
#define SPDIFRX_RSR_ULOCK BIT(0)
#define SPDIFRX_RSR_BADF BIT(1)
#define SPDIFRX_RSR_LOWF BIT(2)
#define SPDIFRX_RSR_NOSIGNAL BIT(3)
#define SPDIFRX_RSR_IFS_MASK GENMASK(27, 16)
#define SPDIFRX_RSR_IFS(reg) \
(((reg) & SPDIFRX_RSR_IFS_MASK) >> 16)
/*
* ---- Version Register (Read-only) ----
*/
#define SPDIFRX_VERSION_MASK GENMASK(11, 0)
#define SPDIFRX_VERSION_MFN_MASK GENMASK(18, 16)
#define SPDIFRX_VERSION_MFN(reg) (((reg) & SPDIFRX_VERSION_MFN_MASK) >> 16)
static bool mchp_spdifrx_readable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case SPDIFRX_MR:
case SPDIFRX_IMR:
case SPDIFRX_ISR:
case SPDIFRX_RSR:
case SPDIFRX_CHSR(0, 0):
case SPDIFRX_CHSR(0, 1):
case SPDIFRX_CHSR(0, 2):
case SPDIFRX_CHSR(0, 3):
case SPDIFRX_CHSR(0, 4):
case SPDIFRX_CHSR(0, 5):
case SPDIFRX_CHUD(0, 0):
case SPDIFRX_CHUD(0, 1):
case SPDIFRX_CHUD(0, 2):
case SPDIFRX_CHUD(0, 3):
case SPDIFRX_CHUD(0, 4):
case SPDIFRX_CHUD(0, 5):
case SPDIFRX_CHSR(1, 0):
case SPDIFRX_CHSR(1, 1):
case SPDIFRX_CHSR(1, 2):
case SPDIFRX_CHSR(1, 3):
case SPDIFRX_CHSR(1, 4):
case SPDIFRX_CHSR(1, 5):
case SPDIFRX_CHUD(1, 0):
case SPDIFRX_CHUD(1, 1):
case SPDIFRX_CHUD(1, 2):
case SPDIFRX_CHUD(1, 3):
case SPDIFRX_CHUD(1, 4):
case SPDIFRX_CHUD(1, 5):
case SPDIFRX_WPMR:
case SPDIFRX_WPSR:
case SPDIFRX_VERSION:
return true;
default:
return false;
}
}
static bool mchp_spdifrx_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case SPDIFRX_CR:
case SPDIFRX_MR:
case SPDIFRX_IER:
case SPDIFRX_IDR:
case SPDIFRX_WPMR:
return true;
default:
return false;
}
}
static bool mchp_spdifrx_precious_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case SPDIFRX_ISR:
case SPDIFRX_RHR:
return true;
default:
return false;
}
}
static bool mchp_spdifrx_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case SPDIFRX_IMR:
case SPDIFRX_ISR:
case SPDIFRX_RSR:
case SPDIFRX_CHSR(0, 0):
case SPDIFRX_CHSR(0, 1):
case SPDIFRX_CHSR(0, 2):
case SPDIFRX_CHSR(0, 3):
case SPDIFRX_CHSR(0, 4):
case SPDIFRX_CHSR(0, 5):
case SPDIFRX_CHUD(0, 0):
case SPDIFRX_CHUD(0, 1):
case SPDIFRX_CHUD(0, 2):
case SPDIFRX_CHUD(0, 3):
case SPDIFRX_CHUD(0, 4):
case SPDIFRX_CHUD(0, 5):
case SPDIFRX_CHSR(1, 0):
case SPDIFRX_CHSR(1, 1):
case SPDIFRX_CHSR(1, 2):
case SPDIFRX_CHSR(1, 3):
case SPDIFRX_CHSR(1, 4):
case SPDIFRX_CHSR(1, 5):
case SPDIFRX_CHUD(1, 0):
case SPDIFRX_CHUD(1, 1):
case SPDIFRX_CHUD(1, 2):
case SPDIFRX_CHUD(1, 3):
case SPDIFRX_CHUD(1, 4):
case SPDIFRX_CHUD(1, 5):
case SPDIFRX_VERSION:
return true;
default:
return false;
}
}
static const struct regmap_config mchp_spdifrx_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = SPDIFRX_VERSION,
.readable_reg = mchp_spdifrx_readable_reg,
.writeable_reg = mchp_spdifrx_writeable_reg,
.precious_reg = mchp_spdifrx_precious_reg,
.volatile_reg = mchp_spdifrx_volatile_reg,
.cache_type = REGCACHE_FLAT,
};
#define SPDIFRX_GCLK_RATIO_MIN (12 * 64)
#define SPDIFRX_CS_BITS 192
#define SPDIFRX_UD_BITS 192
#define SPDIFRX_CHANNELS 2
/**
* struct mchp_spdifrx_ch_stat: MCHP SPDIFRX channel status
* @data: channel status bits
* @done: completion to signal channel status bits acquisition done
*/
struct mchp_spdifrx_ch_stat {
unsigned char data[SPDIFRX_CS_BITS / 8];
struct completion done;
};
/**
* struct mchp_spdifrx_user_data: MCHP SPDIFRX user data
* @data: user data bits
* @done: completion to signal user data bits acquisition done
*/
struct mchp_spdifrx_user_data {
unsigned char data[SPDIFRX_UD_BITS / 8];
struct completion done;
};
/**
* struct mchp_spdifrx_mixer_control: MCHP SPDIFRX mixer control data structure
* @ch_stat: array of channel statuses
* @user_data: array of user data
* @ulock: ulock bit status
* @badf: badf bit status
* @signal: signal bit status
*/
struct mchp_spdifrx_mixer_control {
struct mchp_spdifrx_ch_stat ch_stat[SPDIFRX_CHANNELS];
struct mchp_spdifrx_user_data user_data[SPDIFRX_CHANNELS];
bool ulock;
bool badf;
bool signal;
};
/**
* struct mchp_spdifrx_dev: MCHP SPDIFRX device data structure
* @capture: DAI DMA configuration data
* @control: mixer controls
* @mlock: mutex to protect concurency b/w configuration and control APIs
* @dev: struct device
* @regmap: regmap for this device
* @pclk: peripheral clock
* @gclk: generic clock
* @trigger_enabled: true if enabled though trigger() ops
*/
struct mchp_spdifrx_dev {
struct snd_dmaengine_dai_dma_data capture;
struct mchp_spdifrx_mixer_control control;
struct mutex mlock;
struct device *dev;
struct regmap *regmap;
struct clk *pclk;
struct clk *gclk;
unsigned int trigger_enabled;
};
static void mchp_spdifrx_channel_status_read(struct mchp_spdifrx_dev *dev,
int channel)
{
struct mchp_spdifrx_mixer_control *ctrl = &dev->control;
u8 *ch_stat = &ctrl->ch_stat[channel].data[0];
u32 val;
int i;
for (i = 0; i < ARRAY_SIZE(ctrl->ch_stat[channel].data) / 4; i++) {
regmap_read(dev->regmap, SPDIFRX_CHSR(channel, i), &val);
*ch_stat++ = val & 0xFF;
*ch_stat++ = (val >> 8) & 0xFF;
*ch_stat++ = (val >> 16) & 0xFF;
*ch_stat++ = (val >> 24) & 0xFF;
}
}
static void mchp_spdifrx_channel_user_data_read(struct mchp_spdifrx_dev *dev,
int channel)
{
struct mchp_spdifrx_mixer_control *ctrl = &dev->control;
u8 *user_data = &ctrl->user_data[channel].data[0];
u32 val;
int i;
for (i = 0; i < ARRAY_SIZE(ctrl->user_data[channel].data) / 4; i++) {
regmap_read(dev->regmap, SPDIFRX_CHUD(channel, i), &val);
*user_data++ = val & 0xFF;
*user_data++ = (val >> 8) & 0xFF;
*user_data++ = (val >> 16) & 0xFF;
*user_data++ = (val >> 24) & 0xFF;
}
}
static irqreturn_t mchp_spdif_interrupt(int irq, void *dev_id)
{
struct mchp_spdifrx_dev *dev = dev_id;
struct mchp_spdifrx_mixer_control *ctrl = &dev->control;
u32 sr, imr, pending;
irqreturn_t ret = IRQ_NONE;
int ch;
regmap_read(dev->regmap, SPDIFRX_ISR, &sr);
regmap_read(dev->regmap, SPDIFRX_IMR, &imr);
pending = sr & imr;
dev_dbg(dev->dev, "ISR: %#x, IMR: %#x, pending: %#x\n", sr, imr,
pending);
if (!pending)
return IRQ_NONE;
if (pending & SPDIFRX_IR_BLOCKEND) {
for (ch = 0; ch < SPDIFRX_CHANNELS; ch++) {
mchp_spdifrx_channel_user_data_read(dev, ch);
complete(&ctrl->user_data[ch].done);
}
regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_BLOCKEND);
ret = IRQ_HANDLED;
}
for (ch = 0; ch < SPDIFRX_CHANNELS; ch++) {
if (pending & SPDIFRX_IR_CSC(ch)) {
mchp_spdifrx_channel_status_read(dev, ch);
complete(&ctrl->ch_stat[ch].done);
regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_CSC(ch));
ret = IRQ_HANDLED;
}
}
if (pending & SPDIFRX_IR_OVERRUN) {
dev_warn(dev->dev, "Overrun detected\n");
ret = IRQ_HANDLED;
}
return ret;
}
static int mchp_spdifrx_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
int ret = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
mutex_lock(&dev->mlock);
/* Enable overrun interrupts */
regmap_write(dev->regmap, SPDIFRX_IER, SPDIFRX_IR_OVERRUN);
/* Enable receiver. */
regmap_update_bits(dev->regmap, SPDIFRX_MR, SPDIFRX_MR_RXEN_MASK,
SPDIFRX_MR_RXEN_ENABLE);
dev->trigger_enabled = true;
mutex_unlock(&dev->mlock);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
mutex_lock(&dev->mlock);
/* Disable overrun interrupts */
regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_OVERRUN);
/* Disable receiver. */
regmap_update_bits(dev->regmap, SPDIFRX_MR, SPDIFRX_MR_RXEN_MASK,
SPDIFRX_MR_RXEN_DISABLE);
dev->trigger_enabled = false;
mutex_unlock(&dev->mlock);
break;
default:
ret = -EINVAL;
}
return ret;
}
static int mchp_spdifrx_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
u32 mr = 0;
int ret;
dev_dbg(dev->dev, "%s() rate=%u format=%#x width=%u channels=%u\n",
__func__, params_rate(params), params_format(params),
params_width(params), params_channels(params));
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
dev_err(dev->dev, "Playback is not supported\n");
return -EINVAL;
}
if (params_channels(params) != SPDIFRX_CHANNELS) {
dev_err(dev->dev, "unsupported number of channels: %d\n",
params_channels(params));
return -EINVAL;
}
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_BE:
case SNDRV_PCM_FORMAT_S20_3BE:
case SNDRV_PCM_FORMAT_S24_3BE:
case SNDRV_PCM_FORMAT_S24_BE:
mr |= SPDIFRX_MR_ENDIAN_BIG;
fallthrough;
case SNDRV_PCM_FORMAT_S16_LE:
case SNDRV_PCM_FORMAT_S20_3LE:
case SNDRV_PCM_FORMAT_S24_3LE:
case SNDRV_PCM_FORMAT_S24_LE:
mr |= SPDIFRX_MR_DATAWIDTH(params_width(params));
break;
default:
dev_err(dev->dev, "unsupported PCM format: %d\n",
params_format(params));
return -EINVAL;
}
mutex_lock(&dev->mlock);
if (dev->trigger_enabled) {
dev_err(dev->dev, "PCM already running\n");
ret = -EBUSY;
goto unlock;
}
/* GCLK is enabled by runtime PM. */
clk_disable_unprepare(dev->gclk);
ret = clk_set_min_rate(dev->gclk, params_rate(params) *
SPDIFRX_GCLK_RATIO_MIN + 1);
if (ret) {
dev_err(dev->dev,
"unable to set gclk min rate: rate %u * ratio %u + 1\n",
params_rate(params), SPDIFRX_GCLK_RATIO_MIN);
/* Restore runtime PM state. */
clk_prepare_enable(dev->gclk);
goto unlock;
}
ret = clk_prepare_enable(dev->gclk);
if (ret) {
dev_err(dev->dev, "unable to enable gclk: %d\n", ret);
goto unlock;
}
dev_dbg(dev->dev, "GCLK range min set to %d\n",
params_rate(params) * SPDIFRX_GCLK_RATIO_MIN + 1);
ret = regmap_write(dev->regmap, SPDIFRX_MR, mr);
unlock:
mutex_unlock(&dev->mlock);
return ret;
}
#define MCHP_SPDIF_RATES SNDRV_PCM_RATE_8000_192000
#define MCHP_SPDIF_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_U16_BE | \
SNDRV_PCM_FMTBIT_S20_3LE | \
SNDRV_PCM_FMTBIT_S20_3BE | \
SNDRV_PCM_FMTBIT_S24_3LE | \
SNDRV_PCM_FMTBIT_S24_3BE | \
SNDRV_PCM_FMTBIT_S24_LE | \
SNDRV_PCM_FMTBIT_S24_BE \
)
static int mchp_spdifrx_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
uinfo->count = 1;
return 0;
}
static int mchp_spdifrx_cs_get(struct mchp_spdifrx_dev *dev,
int channel,
struct snd_ctl_elem_value *uvalue)
{
struct mchp_spdifrx_mixer_control *ctrl = &dev->control;
struct mchp_spdifrx_ch_stat *ch_stat = &ctrl->ch_stat[channel];
int ret = 0;
mutex_lock(&dev->mlock);
ret = pm_runtime_resume_and_get(dev->dev);
if (ret < 0)
goto unlock;
/*
* We may reach this point with both clocks enabled but the receiver
* still disabled. To void waiting for completion and return with
* timeout check the dev->trigger_enabled.
*
* To retrieve data:
* - if the receiver is enabled CSC IRQ will update the data in software
* caches (ch_stat->data)
* - otherwise we just update it here the software caches with latest
* available information and return it; in this case we don't need
* spin locking as the IRQ is disabled and will not be raised from
* anywhere else.
*/
if (dev->trigger_enabled) {
reinit_completion(&ch_stat->done);
regmap_write(dev->regmap, SPDIFRX_IER, SPDIFRX_IR_CSC(channel));
/* Check for new data available */
ret = wait_for_completion_interruptible_timeout(&ch_stat->done,
msecs_to_jiffies(100));
/* Valid stream might not be present */
if (ret <= 0) {
dev_dbg(dev->dev, "channel status for channel %d timeout\n",
channel);
regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_CSC(channel));
ret = ret ? : -ETIMEDOUT;
goto pm_runtime_put;
} else {
ret = 0;
}
} else {
/* Update software cache with latest channel status. */
mchp_spdifrx_channel_status_read(dev, channel);
}
memcpy(uvalue->value.iec958.status, ch_stat->data,
sizeof(ch_stat->data));
pm_runtime_put:
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
unlock:
mutex_unlock(&dev->mlock);
return ret;
}
static int mchp_spdifrx_cs1_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uvalue)
{
struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
return mchp_spdifrx_cs_get(dev, 0, uvalue);
}
static int mchp_spdifrx_cs2_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uvalue)
{
struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
return mchp_spdifrx_cs_get(dev, 1, uvalue);
}
static int mchp_spdifrx_cs_mask(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uvalue)
{
memset(uvalue->value.iec958.status, 0xff,
sizeof(uvalue->value.iec958.status));
return 0;
}
static int mchp_spdifrx_subcode_ch_get(struct mchp_spdifrx_dev *dev,
int channel,
struct snd_ctl_elem_value *uvalue)
{
struct mchp_spdifrx_mixer_control *ctrl = &dev->control;
struct mchp_spdifrx_user_data *user_data = &ctrl->user_data[channel];
int ret = 0;
mutex_lock(&dev->mlock);
ret = pm_runtime_resume_and_get(dev->dev);
if (ret < 0)
goto unlock;
/*
* We may reach this point with both clocks enabled but the receiver
* still disabled. To void waiting for completion to just timeout we
* check here the dev->trigger_enabled flag.
*
* To retrieve data:
* - if the receiver is enabled we need to wait for blockend IRQ to read
* data to and update it for us in software caches
* - otherwise reading the SPDIFRX_CHUD() registers is enough.
*/
if (dev->trigger_enabled) {
reinit_completion(&user_data->done);
regmap_write(dev->regmap, SPDIFRX_IER, SPDIFRX_IR_BLOCKEND);
ret = wait_for_completion_interruptible_timeout(&user_data->done,
msecs_to_jiffies(100));
/* Valid stream might not be present. */
if (ret <= 0) {
dev_dbg(dev->dev, "user data for channel %d timeout\n",
channel);
regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_BLOCKEND);
ret = ret ? : -ETIMEDOUT;
goto pm_runtime_put;
} else {
ret = 0;
}
} else {
/* Update software cache with last available data. */
mchp_spdifrx_channel_user_data_read(dev, channel);
}
memcpy(uvalue->value.iec958.subcode, user_data->data,
sizeof(user_data->data));
pm_runtime_put:
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
unlock:
mutex_unlock(&dev->mlock);
return ret;
}
static int mchp_spdifrx_subcode_ch1_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uvalue)
{
struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
return mchp_spdifrx_subcode_ch_get(dev, 0, uvalue);
}
static int mchp_spdifrx_subcode_ch2_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uvalue)
{
struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
return mchp_spdifrx_subcode_ch_get(dev, 1, uvalue);
}
static int mchp_spdifrx_boolean_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
return 0;
}
static int mchp_spdifrx_ulock_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uvalue)
{
struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
struct mchp_spdifrx_mixer_control *ctrl = &dev->control;
u32 val;
int ret;
bool ulock_old = ctrl->ulock;
mutex_lock(&dev->mlock);
ret = pm_runtime_resume_and_get(dev->dev);
if (ret < 0)
goto unlock;
/*
* The RSR.ULOCK has wrong value if both pclk and gclk are enabled
* and the receiver is disabled. Thus we take into account the
* dev->trigger_enabled here to return a real status.
*/
if (dev->trigger_enabled) {
regmap_read(dev->regmap, SPDIFRX_RSR, &val);
ctrl->ulock = !(val & SPDIFRX_RSR_ULOCK);
} else {
ctrl->ulock = 0;
}
uvalue->value.integer.value[0] = ctrl->ulock;
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
unlock:
mutex_unlock(&dev->mlock);
return ulock_old != ctrl->ulock;
}
static int mchp_spdifrx_badf_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uvalue)
{
struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
struct mchp_spdifrx_mixer_control *ctrl = &dev->control;
u32 val;
int ret;
bool badf_old = ctrl->badf;
mutex_lock(&dev->mlock);
ret = pm_runtime_resume_and_get(dev->dev);
if (ret < 0)
goto unlock;
/*
* The RSR.ULOCK has wrong value if both pclk and gclk are enabled
* and the receiver is disabled. Thus we take into account the
* dev->trigger_enabled here to return a real status.
*/
if (dev->trigger_enabled) {
regmap_read(dev->regmap, SPDIFRX_RSR, &val);
ctrl->badf = !!(val & SPDIFRX_RSR_BADF);
} else {
ctrl->badf = 0;
}
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
unlock:
mutex_unlock(&dev->mlock);
uvalue->value.integer.value[0] = ctrl->badf;
return badf_old != ctrl->badf;
}
static int mchp_spdifrx_signal_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uvalue)
{
struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
struct mchp_spdifrx_mixer_control *ctrl = &dev->control;
u32 val = ~0U, loops = 10;
int ret;
bool signal_old = ctrl->signal;
mutex_lock(&dev->mlock);
ret = pm_runtime_resume_and_get(dev->dev);
if (ret < 0)
goto unlock;
/*
* To get the signal we need to have receiver enabled. This
* could be enabled also from trigger() function thus we need to
* take care of not disabling the receiver when it runs.
*/
if (!dev->trigger_enabled) {
regmap_update_bits(dev->regmap, SPDIFRX_MR, SPDIFRX_MR_RXEN_MASK,
SPDIFRX_MR_RXEN_ENABLE);
/* Wait for RSR.ULOCK bit. */
while (--loops) {
regmap_read(dev->regmap, SPDIFRX_RSR, &val);
if (!(val & SPDIFRX_RSR_ULOCK))
break;
usleep_range(100, 150);
}
regmap_update_bits(dev->regmap, SPDIFRX_MR, SPDIFRX_MR_RXEN_MASK,
SPDIFRX_MR_RXEN_DISABLE);
} else {
regmap_read(dev->regmap, SPDIFRX_RSR, &val);
}
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
unlock:
mutex_unlock(&dev->mlock);
if (!(val & SPDIFRX_RSR_ULOCK))
ctrl->signal = !(val & SPDIFRX_RSR_NOSIGNAL);
else
ctrl->signal = 0;
uvalue->value.integer.value[0] = ctrl->signal;
return signal_old != ctrl->signal;
}
static int mchp_spdifrx_rate_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 192000;
return 0;
}
static int mchp_spdifrx_rate_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
unsigned long rate;
u32 val;
int ret;
mutex_lock(&dev->mlock);
ret = pm_runtime_resume_and_get(dev->dev);
if (ret < 0)
goto unlock;
/*
* The RSR.ULOCK has wrong value if both pclk and gclk are enabled
* and the receiver is disabled. Thus we take into account the
* dev->trigger_enabled here to return a real status.
*/
if (dev->trigger_enabled) {
regmap_read(dev->regmap, SPDIFRX_RSR, &val);
/* If the receiver is not locked, ISF data is invalid. */
if (val & SPDIFRX_RSR_ULOCK || !(val & SPDIFRX_RSR_IFS_MASK)) {
ucontrol->value.integer.value[0] = 0;
goto pm_runtime_put;
}
} else {
/* Reveicer is not locked, IFS data is invalid. */
ucontrol->value.integer.value[0] = 0;
goto pm_runtime_put;
}
rate = clk_get_rate(dev->gclk);
ucontrol->value.integer.value[0] = rate / (32 * SPDIFRX_RSR_IFS(val));
pm_runtime_put:
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
unlock:
mutex_unlock(&dev->mlock);
return ret;
}
static struct snd_kcontrol_new mchp_spdifrx_ctrls[] = {
/* Channel status controller */
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", CAPTURE, DEFAULT)
" Channel 1",
.access = SNDRV_CTL_ELEM_ACCESS_READ |
SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = mchp_spdifrx_info,
.get = mchp_spdifrx_cs1_get,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", CAPTURE, DEFAULT)
" Channel 2",
.access = SNDRV_CTL_ELEM_ACCESS_READ |
SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = mchp_spdifrx_info,
.get = mchp_spdifrx_cs2_get,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", CAPTURE, MASK),
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.info = mchp_spdifrx_info,
.get = mchp_spdifrx_cs_mask,
},
/* User bits controller */
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "IEC958 Subcode Capture Default Channel 1",
.access = SNDRV_CTL_ELEM_ACCESS_READ |
SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = mchp_spdifrx_info,
.get = mchp_spdifrx_subcode_ch1_get,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "IEC958 Subcode Capture Default Channel 2",
.access = SNDRV_CTL_ELEM_ACCESS_READ |
SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = mchp_spdifrx_info,
.get = mchp_spdifrx_subcode_ch2_get,
},
/* Lock status */
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", CAPTURE, NONE) "Unlocked",
.access = SNDRV_CTL_ELEM_ACCESS_READ |
SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = mchp_spdifrx_boolean_info,
.get = mchp_spdifrx_ulock_get,
},
/* Bad format */
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", CAPTURE, NONE)"Bad Format",
.access = SNDRV_CTL_ELEM_ACCESS_READ |
SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = mchp_spdifrx_boolean_info,
.get = mchp_spdifrx_badf_get,
},
/* Signal */
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", CAPTURE, NONE) "Signal",
.access = SNDRV_CTL_ELEM_ACCESS_READ |
SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = mchp_spdifrx_boolean_info,
.get = mchp_spdifrx_signal_get,
},
/* Sampling rate */
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", CAPTURE, NONE) "Rate",
.access = SNDRV_CTL_ELEM_ACCESS_READ |
SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = mchp_spdifrx_rate_info,
.get = mchp_spdifrx_rate_get,
},
};
static int mchp_spdifrx_dai_probe(struct snd_soc_dai *dai)
{
struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
struct mchp_spdifrx_mixer_control *ctrl = &dev->control;
int ch;
snd_soc_dai_init_dma_data(dai, NULL, &dev->capture);
/* Software reset the IP */
regmap_write(dev->regmap, SPDIFRX_CR, SPDIFRX_CR_SWRST);
/* Default configuration */
regmap_write(dev->regmap, SPDIFRX_MR,
SPDIFRX_MR_VBMODE_DISCARD_IF_VB1 |
SPDIFRX_MR_SBMODE_DISCARD |
SPDIFRX_MR_AUTORST_NOACTION |
SPDIFRX_MR_PACK_DISABLED);
for (ch = 0; ch < SPDIFRX_CHANNELS; ch++) {
init_completion(&ctrl->ch_stat[ch].done);
init_completion(&ctrl->user_data[ch].done);
}
/* Add controls */
snd_soc_add_dai_controls(dai, mchp_spdifrx_ctrls,
ARRAY_SIZE(mchp_spdifrx_ctrls));
return 0;
}
static int mchp_spdifrx_dai_remove(struct snd_soc_dai *dai)
{
struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
/* Disable interrupts */
regmap_write(dev->regmap, SPDIFRX_IDR, GENMASK(14, 0));
return 0;
}
static const struct snd_soc_dai_ops mchp_spdifrx_dai_ops = {
.probe = mchp_spdifrx_dai_probe,
.remove = mchp_spdifrx_dai_remove,
.trigger = mchp_spdifrx_trigger,
.hw_params = mchp_spdifrx_hw_params,
};
static struct snd_soc_dai_driver mchp_spdifrx_dai = {
.name = "mchp-spdifrx",
.capture = {
.stream_name = "S/PDIF Capture",
.channels_min = SPDIFRX_CHANNELS,
.channels_max = SPDIFRX_CHANNELS,
.rates = MCHP_SPDIF_RATES,
.formats = MCHP_SPDIF_FORMATS,
},
.ops = &mchp_spdifrx_dai_ops,
};
static const struct snd_soc_component_driver mchp_spdifrx_component = {
.name = "mchp-spdifrx",
.legacy_dai_naming = 1,
};
static const struct of_device_id mchp_spdifrx_dt_ids[] = {
{
.compatible = "microchip,sama7g5-spdifrx",
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mchp_spdifrx_dt_ids);
static int mchp_spdifrx_runtime_suspend(struct device *dev)
{
struct mchp_spdifrx_dev *spdifrx = dev_get_drvdata(dev);
regcache_cache_only(spdifrx->regmap, true);
clk_disable_unprepare(spdifrx->gclk);
clk_disable_unprepare(spdifrx->pclk);
return 0;
}
static int mchp_spdifrx_runtime_resume(struct device *dev)
{
struct mchp_spdifrx_dev *spdifrx = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(spdifrx->pclk);
if (ret)
return ret;
ret = clk_prepare_enable(spdifrx->gclk);
if (ret)
goto disable_pclk;
regcache_cache_only(spdifrx->regmap, false);
regcache_mark_dirty(spdifrx->regmap);
ret = regcache_sync(spdifrx->regmap);
if (ret) {
regcache_cache_only(spdifrx->regmap, true);
clk_disable_unprepare(spdifrx->gclk);
disable_pclk:
clk_disable_unprepare(spdifrx->pclk);
}
return ret;
}
static const struct dev_pm_ops mchp_spdifrx_pm_ops = {
RUNTIME_PM_OPS(mchp_spdifrx_runtime_suspend, mchp_spdifrx_runtime_resume,
NULL)
};
static int mchp_spdifrx_probe(struct platform_device *pdev)
{
struct mchp_spdifrx_dev *dev;
struct resource *mem;
struct regmap *regmap;
void __iomem *base;
int irq;
int err;
u32 vers;
/* Get memory for driver data. */
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
/* Map I/O registers. */
base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(base))
return PTR_ERR(base);
regmap = devm_regmap_init_mmio(&pdev->dev, base,
&mchp_spdifrx_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
/* Request IRQ. */
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
err = devm_request_irq(&pdev->dev, irq, mchp_spdif_interrupt, 0,
dev_name(&pdev->dev), dev);
if (err)
return err;
/* Get the peripheral clock */
dev->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(dev->pclk)) {
err = PTR_ERR(dev->pclk);
dev_err(&pdev->dev, "failed to get the peripheral clock: %d\n",
err);
return err;
}
/* Get the generated clock */
dev->gclk = devm_clk_get(&pdev->dev, "gclk");
if (IS_ERR(dev->gclk)) {
err = PTR_ERR(dev->gclk);
dev_err(&pdev->dev,
"failed to get the PMC generated clock: %d\n", err);
return err;
}
/*
* Signal control need a valid rate on gclk. hw_params() configures
* it propertly but requesting signal before any hw_params() has been
* called lead to invalid value returned for signal. Thus, configure
* gclk at a valid rate, here, in initialization, to simplify the
* control path.
*/
clk_set_min_rate(dev->gclk, 48000 * SPDIFRX_GCLK_RATIO_MIN + 1);
mutex_init(&dev->mlock);
dev->dev = &pdev->dev;
dev->regmap = regmap;
platform_set_drvdata(pdev, dev);
pm_runtime_enable(dev->dev);
if (!pm_runtime_enabled(dev->dev)) {
err = mchp_spdifrx_runtime_resume(dev->dev);
if (err)
goto pm_runtime_disable;
}
dev->capture.addr = (dma_addr_t)mem->start + SPDIFRX_RHR;
dev->capture.maxburst = 1;
err = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
if (err) {
dev_err(&pdev->dev, "failed to register PCM: %d\n", err);
goto pm_runtime_suspend;
}
err = devm_snd_soc_register_component(&pdev->dev,
&mchp_spdifrx_component,
&mchp_spdifrx_dai, 1);
if (err) {
dev_err(&pdev->dev, "fail to register dai\n");
goto pm_runtime_suspend;
}
regmap_read(regmap, SPDIFRX_VERSION, &vers);
dev_info(&pdev->dev, "hw version: %#lx\n", vers & SPDIFRX_VERSION_MASK);
return 0;
pm_runtime_suspend:
if (!pm_runtime_status_suspended(dev->dev))
mchp_spdifrx_runtime_suspend(dev->dev);
pm_runtime_disable:
pm_runtime_disable(dev->dev);
return err;
}
static void mchp_spdifrx_remove(struct platform_device *pdev)
{
struct mchp_spdifrx_dev *dev = platform_get_drvdata(pdev);
pm_runtime_disable(dev->dev);
if (!pm_runtime_status_suspended(dev->dev))
mchp_spdifrx_runtime_suspend(dev->dev);
}
static struct platform_driver mchp_spdifrx_driver = {
.probe = mchp_spdifrx_probe,
.remove_new = mchp_spdifrx_remove,
.driver = {
.name = "mchp_spdifrx",
.of_match_table = mchp_spdifrx_dt_ids,
.pm = pm_ptr(&mchp_spdifrx_pm_ops),
},
};
module_platform_driver(mchp_spdifrx_driver);
MODULE_AUTHOR("Codrin Ciubotariu <[email protected]>");
MODULE_DESCRIPTION("Microchip S/PDIF RX Controller Driver");
MODULE_LICENSE("GPL v2");
| linux-master | sound/soc/atmel/mchp-spdifrx.c |
// SPDX-License-Identifier: GPL-2.0
//
// TSE-850 audio - ASoC driver for the Axentia TSE-850 with a PCM5142 codec
//
// Copyright (C) 2016 Axentia Technologies AB
//
// Author: Peter Rosin <[email protected]>
//
// loop1 relays
// IN1 +---o +------------+ o---+ OUT1
// \ /
// + +
// | / |
// +--o +--. |
// | add | |
// | V |
// | .---. |
// DAC +----------->|Sum|---+
// | '---' |
// | |
// + +
//
// IN2 +---o--+------------+--o---+ OUT2
// loop2 relays
//
// The 'loop1' gpio pin controls two relays, which are either in loop
// position, meaning that input and output are directly connected, or
// they are in mixer position, meaning that the signal is passed through
// the 'Sum' mixer. Similarly for 'loop2'.
//
// In the above, the 'loop1' relays are inactive, thus feeding IN1 to the
// mixer (if 'add' is active) and feeding the mixer output to OUT1. The
// 'loop2' relays are active, short-cutting the TSE-850 from channel 2.
// IN1, IN2, OUT1 and OUT2 are TSE-850 connectors and DAC is the PCB name
// of the (filtered) output from the PCM5142 codec.
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/regulator/consumer.h>
#include <sound/soc.h>
#include <sound/pcm_params.h>
struct tse850_priv {
struct gpio_desc *add;
struct gpio_desc *loop1;
struct gpio_desc *loop2;
struct regulator *ana;
int add_cache;
int loop1_cache;
int loop2_cache;
};
static int tse850_get_mux1(struct snd_kcontrol *kctrl,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
struct snd_soc_card *card = dapm->card;
struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
ucontrol->value.enumerated.item[0] = tse850->loop1_cache;
return 0;
}
static int tse850_put_mux1(struct snd_kcontrol *kctrl,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
struct snd_soc_card *card = dapm->card;
struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
struct soc_enum *e = (struct soc_enum *)kctrl->private_value;
unsigned int val = ucontrol->value.enumerated.item[0];
if (val >= e->items)
return -EINVAL;
gpiod_set_value_cansleep(tse850->loop1, val);
tse850->loop1_cache = val;
return snd_soc_dapm_put_enum_double(kctrl, ucontrol);
}
static int tse850_get_mux2(struct snd_kcontrol *kctrl,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
struct snd_soc_card *card = dapm->card;
struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
ucontrol->value.enumerated.item[0] = tse850->loop2_cache;
return 0;
}
static int tse850_put_mux2(struct snd_kcontrol *kctrl,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
struct snd_soc_card *card = dapm->card;
struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
struct soc_enum *e = (struct soc_enum *)kctrl->private_value;
unsigned int val = ucontrol->value.enumerated.item[0];
if (val >= e->items)
return -EINVAL;
gpiod_set_value_cansleep(tse850->loop2, val);
tse850->loop2_cache = val;
return snd_soc_dapm_put_enum_double(kctrl, ucontrol);
}
static int tse850_get_mix(struct snd_kcontrol *kctrl,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
struct snd_soc_card *card = dapm->card;
struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
ucontrol->value.enumerated.item[0] = tse850->add_cache;
return 0;
}
static int tse850_put_mix(struct snd_kcontrol *kctrl,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
struct snd_soc_card *card = dapm->card;
struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
int connect = !!ucontrol->value.integer.value[0];
if (tse850->add_cache == connect)
return 0;
/*
* Hmmm, this gpiod_set_value_cansleep call should probably happen
* inside snd_soc_dapm_mixer_update_power in the loop.
*/
gpiod_set_value_cansleep(tse850->add, connect);
tse850->add_cache = connect;
snd_soc_dapm_mixer_update_power(dapm, kctrl, connect, NULL);
return 1;
}
static int tse850_get_ana(struct snd_kcontrol *kctrl,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
struct snd_soc_card *card = dapm->card;
struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
int ret;
ret = regulator_get_voltage(tse850->ana);
if (ret < 0)
return ret;
/*
* Map regulator output values like so:
* -11.5V to "Low" (enum 0)
* 11.5V-12.5V to "12V" (enum 1)
* 12.5V-13.5V to "13V" (enum 2)
* ...
* 18.5V-19.5V to "19V" (enum 8)
* 19.5V- to "20V" (enum 9)
*/
if (ret < 11000000)
ret = 11000000;
else if (ret > 20000000)
ret = 20000000;
ret -= 11000000;
ret = (ret + 500000) / 1000000;
ucontrol->value.enumerated.item[0] = ret;
return 0;
}
static int tse850_put_ana(struct snd_kcontrol *kctrl,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
struct snd_soc_card *card = dapm->card;
struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
struct soc_enum *e = (struct soc_enum *)kctrl->private_value;
unsigned int uV = ucontrol->value.enumerated.item[0];
int ret;
if (uV >= e->items)
return -EINVAL;
/*
* Map enum zero (Low) to 2 volts on the regulator, do this since
* the ana regulator is supplied by the system 12V voltage and
* requesting anything below the system voltage causes the system
* voltage to be passed through the regulator. Also, the ana
* regulator induces noise when requesting voltages near the
* system voltage. So, by mapping Low to 2V, that noise is
* eliminated when all that is needed is 12V (the system voltage).
*/
if (uV)
uV = 11000000 + (1000000 * uV);
else
uV = 2000000;
ret = regulator_set_voltage(tse850->ana, uV, uV);
if (ret < 0)
return ret;
return snd_soc_dapm_put_enum_double(kctrl, ucontrol);
}
static const char * const mux_text[] = { "Mixer", "Loop" };
static const struct soc_enum mux_enum =
SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(mux_text), mux_text);
static const struct snd_kcontrol_new mux1 =
SOC_DAPM_ENUM_EXT("MUX1", mux_enum, tse850_get_mux1, tse850_put_mux1);
static const struct snd_kcontrol_new mux2 =
SOC_DAPM_ENUM_EXT("MUX2", mux_enum, tse850_get_mux2, tse850_put_mux2);
#define TSE850_DAPM_SINGLE_EXT(xname, reg, shift, max, invert, xget, xput) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_volsw, \
.get = xget, \
.put = xput, \
.private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
static const struct snd_kcontrol_new mix[] = {
TSE850_DAPM_SINGLE_EXT("IN Switch", SND_SOC_NOPM, 0, 1, 0,
tse850_get_mix, tse850_put_mix),
};
static const char * const ana_text[] = {
"Low", "12V", "13V", "14V", "15V", "16V", "17V", "18V", "19V", "20V"
};
static const struct soc_enum ana_enum =
SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(ana_text), ana_text);
static const struct snd_kcontrol_new out =
SOC_DAPM_ENUM_EXT("ANA", ana_enum, tse850_get_ana, tse850_put_ana);
static const struct snd_soc_dapm_widget tse850_dapm_widgets[] = {
SND_SOC_DAPM_LINE("OUT1", NULL),
SND_SOC_DAPM_LINE("OUT2", NULL),
SND_SOC_DAPM_LINE("IN1", NULL),
SND_SOC_DAPM_LINE("IN2", NULL),
SND_SOC_DAPM_INPUT("DAC"),
SND_SOC_DAPM_AIF_IN("AIFINL", "Playback", 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("AIFINR", "Playback", 1, SND_SOC_NOPM, 0, 0),
SOC_MIXER_ARRAY("MIX", SND_SOC_NOPM, 0, 0, mix),
SND_SOC_DAPM_MUX("MUX1", SND_SOC_NOPM, 0, 0, &mux1),
SND_SOC_DAPM_MUX("MUX2", SND_SOC_NOPM, 0, 0, &mux2),
SND_SOC_DAPM_OUT_DRV("OUT", SND_SOC_NOPM, 0, 0, &out, 1),
};
/*
* These connections are not entirely correct, since both IN1 and IN2
* are always fed to MIX (if the "IN switch" is set so), i.e. without
* regard to the loop1 and loop2 relays that according to this only
* control MUX1 and MUX2 but in fact also control how the input signals
* are routed.
* But, 1) I don't know how to do it right, and 2) it doesn't seem to
* matter in practice since nothing is powered in those sections anyway.
*/
static const struct snd_soc_dapm_route tse850_intercon[] = {
{ "OUT1", NULL, "MUX1" },
{ "OUT2", NULL, "MUX2" },
{ "MUX1", "Loop", "IN1" },
{ "MUX1", "Mixer", "OUT" },
{ "MUX2", "Loop", "IN2" },
{ "MUX2", "Mixer", "OUT" },
{ "OUT", NULL, "MIX" },
{ "MIX", NULL, "DAC" },
{ "MIX", "IN Switch", "IN1" },
{ "MIX", "IN Switch", "IN2" },
/* connect board input to the codec left channel output pin */
{ "DAC", NULL, "OUTL" },
};
SND_SOC_DAILINK_DEFS(pcm,
DAILINK_COMP_ARRAY(COMP_EMPTY()),
DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "pcm512x-hifi")),
DAILINK_COMP_ARRAY(COMP_EMPTY()));
static struct snd_soc_dai_link tse850_dailink = {
.name = "TSE-850",
.stream_name = "TSE-850-PCM",
.dai_fmt = SND_SOC_DAIFMT_I2S
| SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBP_CFC,
SND_SOC_DAILINK_REG(pcm),
};
static struct snd_soc_card tse850_card = {
.name = "TSE-850-ASoC",
.owner = THIS_MODULE,
.dai_link = &tse850_dailink,
.num_links = 1,
.dapm_widgets = tse850_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(tse850_dapm_widgets),
.dapm_routes = tse850_intercon,
.num_dapm_routes = ARRAY_SIZE(tse850_intercon),
.fully_routed = true,
};
static int tse850_dt_init(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *codec_np, *cpu_np;
struct snd_soc_dai_link *dailink = &tse850_dailink;
if (!np) {
dev_err(&pdev->dev, "only device tree supported\n");
return -EINVAL;
}
cpu_np = of_parse_phandle(np, "axentia,cpu-dai", 0);
if (!cpu_np) {
dev_err(&pdev->dev, "failed to get cpu dai\n");
return -EINVAL;
}
dailink->cpus->of_node = cpu_np;
dailink->platforms->of_node = cpu_np;
of_node_put(cpu_np);
codec_np = of_parse_phandle(np, "axentia,audio-codec", 0);
if (!codec_np) {
dev_err(&pdev->dev, "failed to get codec info\n");
return -EINVAL;
}
dailink->codecs->of_node = codec_np;
of_node_put(codec_np);
return 0;
}
static int tse850_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &tse850_card;
struct device *dev = card->dev = &pdev->dev;
struct tse850_priv *tse850;
int ret;
tse850 = devm_kzalloc(dev, sizeof(*tse850), GFP_KERNEL);
if (!tse850)
return -ENOMEM;
snd_soc_card_set_drvdata(card, tse850);
ret = tse850_dt_init(pdev);
if (ret) {
dev_err(dev, "failed to init dt info\n");
return ret;
}
tse850->add = devm_gpiod_get(dev, "axentia,add", GPIOD_OUT_HIGH);
if (IS_ERR(tse850->add))
return dev_err_probe(dev, PTR_ERR(tse850->add),
"failed to get 'add' gpio\n");
tse850->add_cache = 1;
tse850->loop1 = devm_gpiod_get(dev, "axentia,loop1", GPIOD_OUT_HIGH);
if (IS_ERR(tse850->loop1))
return dev_err_probe(dev, PTR_ERR(tse850->loop1),
"failed to get 'loop1' gpio\n");
tse850->loop1_cache = 1;
tse850->loop2 = devm_gpiod_get(dev, "axentia,loop2", GPIOD_OUT_HIGH);
if (IS_ERR(tse850->loop2))
return dev_err_probe(dev, PTR_ERR(tse850->loop2),
"failed to get 'loop2' gpio\n");
tse850->loop2_cache = 1;
tse850->ana = devm_regulator_get(dev, "axentia,ana");
if (IS_ERR(tse850->ana))
return dev_err_probe(dev, PTR_ERR(tse850->ana),
"failed to get 'ana' regulator\n");
ret = regulator_enable(tse850->ana);
if (ret < 0) {
dev_err(dev, "failed to enable the 'ana' regulator\n");
return ret;
}
ret = snd_soc_register_card(card);
if (ret) {
dev_err(dev, "snd_soc_register_card failed\n");
goto err_disable_ana;
}
return 0;
err_disable_ana:
regulator_disable(tse850->ana);
return ret;
}
static void tse850_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
snd_soc_unregister_card(card);
regulator_disable(tse850->ana);
}
static const struct of_device_id tse850_dt_ids[] = {
{ .compatible = "axentia,tse850-pcm5142", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, tse850_dt_ids);
static struct platform_driver tse850_driver = {
.driver = {
.name = "axentia-tse850-pcm5142",
.of_match_table = tse850_dt_ids,
},
.probe = tse850_probe,
.remove_new = tse850_remove,
};
module_platform_driver(tse850_driver);
/* Module information */
MODULE_AUTHOR("Peter Rosin <[email protected]>");
MODULE_DESCRIPTION("ALSA SoC driver for TSE-850 with PCM5142 codec");
MODULE_LICENSE("GPL v2");
| linux-master | sound/soc/atmel/tse850-pcm5142.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* atmel-pcm.c -- ALSA PCM interface for the Atmel atmel SoC.
*
* Copyright (C) 2005 SAN People
* Copyright (C) 2008 Atmel
*
* Authors: Sedji Gaouaou <[email protected]>
*
* Based on at91-pcm. by:
* Frank Mandarino <[email protected]>
* Copyright 2006 Endrelia Technologies Inc.
*
* Based on pxa2xx-pcm.c by:
*
* Author: Nicolas Pitre
* Created: Nov 30, 2004
* Copyright: (C) 2004 MontaVista Software, Inc.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/atmel_pdc.h>
#include <linux/atmel-ssc.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include "atmel-pcm.h"
static int atmel_pcm_new(struct snd_soc_component *component,
struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
int ret;
ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
snd_pcm_set_managed_buffer_all(rtd->pcm, SNDRV_DMA_TYPE_DEV,
card->dev, ATMEL_SSC_DMABUF_SIZE,
ATMEL_SSC_DMABUF_SIZE);
return 0;
}
/*--------------------------------------------------------------------------*\
* Hardware definition
\*--------------------------------------------------------------------------*/
/* TODO: These values were taken from the AT91 platform driver, check
* them against real values for AT32
*/
static const struct snd_pcm_hardware atmel_pcm_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE,
.period_bytes_min = 32,
.period_bytes_max = 8192,
.periods_min = 2,
.periods_max = 1024,
.buffer_bytes_max = ATMEL_SSC_DMABUF_SIZE,
};
/*--------------------------------------------------------------------------*\
* Data types
\*--------------------------------------------------------------------------*/
struct atmel_runtime_data {
struct atmel_pcm_dma_params *params;
dma_addr_t dma_buffer; /* physical address of dma buffer */
dma_addr_t dma_buffer_end; /* first address beyond DMA buffer */
size_t period_size;
dma_addr_t period_ptr; /* physical address of next period */
};
/*--------------------------------------------------------------------------*\
* ISR
\*--------------------------------------------------------------------------*/
static void atmel_pcm_dma_irq(u32 ssc_sr,
struct snd_pcm_substream *substream)
{
struct atmel_runtime_data *prtd = substream->runtime->private_data;
struct atmel_pcm_dma_params *params = prtd->params;
static int count;
count++;
if (ssc_sr & params->mask->ssc_endbuf) {
pr_warn("atmel-pcm: buffer %s on %s (SSC_SR=%#x, count=%d)\n",
substream->stream == SNDRV_PCM_STREAM_PLAYBACK
? "underrun" : "overrun",
params->name, ssc_sr, count);
/* re-start the PDC */
ssc_writex(params->ssc->regs, ATMEL_PDC_PTCR,
params->mask->pdc_disable);
prtd->period_ptr += prtd->period_size;
if (prtd->period_ptr >= prtd->dma_buffer_end)
prtd->period_ptr = prtd->dma_buffer;
ssc_writex(params->ssc->regs, params->pdc->xpr,
prtd->period_ptr);
ssc_writex(params->ssc->regs, params->pdc->xcr,
prtd->period_size / params->pdc_xfer_size);
ssc_writex(params->ssc->regs, ATMEL_PDC_PTCR,
params->mask->pdc_enable);
}
if (ssc_sr & params->mask->ssc_endx) {
/* Load the PDC next pointer and counter registers */
prtd->period_ptr += prtd->period_size;
if (prtd->period_ptr >= prtd->dma_buffer_end)
prtd->period_ptr = prtd->dma_buffer;
ssc_writex(params->ssc->regs, params->pdc->xnpr,
prtd->period_ptr);
ssc_writex(params->ssc->regs, params->pdc->xncr,
prtd->period_size / params->pdc_xfer_size);
}
snd_pcm_period_elapsed(substream);
}
/*--------------------------------------------------------------------------*\
* PCM operations
\*--------------------------------------------------------------------------*/
static int atmel_pcm_hw_params(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct atmel_runtime_data *prtd = runtime->private_data;
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
/* this may get called several times by oss emulation
* with different params */
prtd->params = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
prtd->params->dma_intr_handler = atmel_pcm_dma_irq;
prtd->dma_buffer = runtime->dma_addr;
prtd->dma_buffer_end = runtime->dma_addr + runtime->dma_bytes;
prtd->period_size = params_period_bytes(params);
pr_debug("atmel-pcm: "
"hw_params: DMA for %s initialized "
"(dma_bytes=%zu, period_size=%zu)\n",
prtd->params->name,
runtime->dma_bytes,
prtd->period_size);
return 0;
}
static int atmel_pcm_hw_free(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct atmel_runtime_data *prtd = substream->runtime->private_data;
struct atmel_pcm_dma_params *params = prtd->params;
if (params != NULL) {
ssc_writex(params->ssc->regs, SSC_PDC_PTCR,
params->mask->pdc_disable);
prtd->params->dma_intr_handler = NULL;
}
return 0;
}
static int atmel_pcm_prepare(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct atmel_runtime_data *prtd = substream->runtime->private_data;
struct atmel_pcm_dma_params *params = prtd->params;
ssc_writex(params->ssc->regs, SSC_IDR,
params->mask->ssc_endx | params->mask->ssc_endbuf);
ssc_writex(params->ssc->regs, ATMEL_PDC_PTCR,
params->mask->pdc_disable);
return 0;
}
static int atmel_pcm_trigger(struct snd_soc_component *component,
struct snd_pcm_substream *substream, int cmd)
{
struct snd_pcm_runtime *rtd = substream->runtime;
struct atmel_runtime_data *prtd = rtd->private_data;
struct atmel_pcm_dma_params *params = prtd->params;
int ret = 0;
pr_debug("atmel-pcm:buffer_size = %ld,"
"dma_area = %p, dma_bytes = %zu\n",
rtd->buffer_size, rtd->dma_area, rtd->dma_bytes);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
prtd->period_ptr = prtd->dma_buffer;
ssc_writex(params->ssc->regs, params->pdc->xpr,
prtd->period_ptr);
ssc_writex(params->ssc->regs, params->pdc->xcr,
prtd->period_size / params->pdc_xfer_size);
prtd->period_ptr += prtd->period_size;
ssc_writex(params->ssc->regs, params->pdc->xnpr,
prtd->period_ptr);
ssc_writex(params->ssc->regs, params->pdc->xncr,
prtd->period_size / params->pdc_xfer_size);
pr_debug("atmel-pcm: trigger: "
"period_ptr=%lx, xpr=%u, "
"xcr=%u, xnpr=%u, xncr=%u\n",
(unsigned long)prtd->period_ptr,
ssc_readx(params->ssc->regs, params->pdc->xpr),
ssc_readx(params->ssc->regs, params->pdc->xcr),
ssc_readx(params->ssc->regs, params->pdc->xnpr),
ssc_readx(params->ssc->regs, params->pdc->xncr));
ssc_writex(params->ssc->regs, SSC_IER,
params->mask->ssc_endx | params->mask->ssc_endbuf);
ssc_writex(params->ssc->regs, SSC_PDC_PTCR,
params->mask->pdc_enable);
pr_debug("sr=%u imr=%u\n",
ssc_readx(params->ssc->regs, SSC_SR),
ssc_readx(params->ssc->regs, SSC_IER));
break; /* SNDRV_PCM_TRIGGER_START */
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
ssc_writex(params->ssc->regs, ATMEL_PDC_PTCR,
params->mask->pdc_disable);
break;
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
ssc_writex(params->ssc->regs, ATMEL_PDC_PTCR,
params->mask->pdc_enable);
break;
default:
ret = -EINVAL;
}
return ret;
}
static snd_pcm_uframes_t atmel_pcm_pointer(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct atmel_runtime_data *prtd = runtime->private_data;
struct atmel_pcm_dma_params *params = prtd->params;
dma_addr_t ptr;
snd_pcm_uframes_t x;
ptr = (dma_addr_t) ssc_readx(params->ssc->regs, params->pdc->xpr);
x = bytes_to_frames(runtime, ptr - prtd->dma_buffer);
if (x == runtime->buffer_size)
x = 0;
return x;
}
static int atmel_pcm_open(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct atmel_runtime_data *prtd;
int ret = 0;
snd_soc_set_runtime_hwparams(substream, &atmel_pcm_hardware);
/* ensure that buffer size is a multiple of period size */
ret = snd_pcm_hw_constraint_integer(runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
if (ret < 0)
goto out;
prtd = kzalloc(sizeof(struct atmel_runtime_data), GFP_KERNEL);
if (prtd == NULL) {
ret = -ENOMEM;
goto out;
}
runtime->private_data = prtd;
out:
return ret;
}
static int atmel_pcm_close(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
struct atmel_runtime_data *prtd = substream->runtime->private_data;
kfree(prtd);
return 0;
}
static const struct snd_soc_component_driver atmel_soc_platform = {
.open = atmel_pcm_open,
.close = atmel_pcm_close,
.hw_params = atmel_pcm_hw_params,
.hw_free = atmel_pcm_hw_free,
.prepare = atmel_pcm_prepare,
.trigger = atmel_pcm_trigger,
.pointer = atmel_pcm_pointer,
.pcm_construct = atmel_pcm_new,
};
int atmel_pcm_pdc_platform_register(struct device *dev)
{
return devm_snd_soc_register_component(dev, &atmel_soc_platform,
NULL, 0);
}
EXPORT_SYMBOL(atmel_pcm_pdc_platform_register);
MODULE_AUTHOR("Sedji Gaouaou <[email protected]>");
MODULE_DESCRIPTION("Atmel PCM module");
MODULE_LICENSE("GPL");
| linux-master | sound/soc/atmel/atmel-pcm-pdc.c |
// SPDX-License-Identifier: GPL-2.0
//
// Driver for Microchip Pulse Density Microphone Controller (PDMC) interfaces
//
// Copyright (C) 2019-2022 Microchip Technology Inc. and its subsidiaries
//
// Author: Codrin Ciubotariu <[email protected]>
#include <dt-bindings/sound/microchip,pdmc.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <sound/core.h>
#include <sound/dmaengine_pcm.h>
#include <sound/pcm_params.h>
#include <sound/tlv.h>
/*
* ---- PDMC Register map ----
*/
#define MCHP_PDMC_CR 0x00 /* Control Register */
#define MCHP_PDMC_MR 0x04 /* Mode Register */
#define MCHP_PDMC_CFGR 0x08 /* Configuration Register */
#define MCHP_PDMC_RHR 0x0C /* Receive Holding Register */
#define MCHP_PDMC_IER 0x14 /* Interrupt Enable Register */
#define MCHP_PDMC_IDR 0x18 /* Interrupt Disable Register */
#define MCHP_PDMC_IMR 0x1C /* Interrupt Mask Register */
#define MCHP_PDMC_ISR 0x20 /* Interrupt Status Register */
#define MCHP_PDMC_VER 0x50 /* Version Register */
/*
* ---- Control Register (Write-only) ----
*/
#define MCHP_PDMC_CR_SWRST BIT(0) /* Software Reset */
/*
* ---- Mode Register (Read/Write) ----
*/
#define MCHP_PDMC_MR_PDMCEN_MASK GENMASK(3, 0)
#define MCHP_PDMC_MR_PDMCEN(ch) (BIT(ch) & MCHP_PDMC_MR_PDMCEN_MASK)
#define MCHP_PDMC_MR_OSR_MASK GENMASK(17, 16)
#define MCHP_PDMC_MR_OSR64 (1 << 16)
#define MCHP_PDMC_MR_OSR128 (2 << 16)
#define MCHP_PDMC_MR_OSR256 (3 << 16)
#define MCHP_PDMC_MR_SINCORDER_MASK GENMASK(23, 20)
#define MCHP_PDMC_MR_SINC_OSR_MASK GENMASK(27, 24)
#define MCHP_PDMC_MR_SINC_OSR_DIS (0 << 24)
#define MCHP_PDMC_MR_SINC_OSR_8 (1 << 24)
#define MCHP_PDMC_MR_SINC_OSR_16 (2 << 24)
#define MCHP_PDMC_MR_SINC_OSR_32 (3 << 24)
#define MCHP_PDMC_MR_SINC_OSR_64 (4 << 24)
#define MCHP_PDMC_MR_SINC_OSR_128 (5 << 24)
#define MCHP_PDMC_MR_SINC_OSR_256 (6 << 24)
#define MCHP_PDMC_MR_CHUNK_MASK GENMASK(31, 28)
/*
* ---- Configuration Register (Read/Write) ----
*/
#define MCHP_PDMC_CFGR_BSSEL_MASK (BIT(0) | BIT(2) | BIT(4) | BIT(6))
#define MCHP_PDMC_CFGR_BSSEL(ch) BIT((ch) * 2)
#define MCHP_PDMC_CFGR_PDMSEL_MASK (BIT(16) | BIT(18) | BIT(20) | BIT(22))
#define MCHP_PDMC_CFGR_PDMSEL(ch) BIT((ch) * 2 + 16)
/*
* ---- Interrupt Enable/Disable/Mask/Status Registers ----
*/
#define MCHP_PDMC_IR_RXRDY BIT(0)
#define MCHP_PDMC_IR_RXEMPTY BIT(1)
#define MCHP_PDMC_IR_RXFULL BIT(2)
#define MCHP_PDMC_IR_RXCHUNK BIT(3)
#define MCHP_PDMC_IR_RXUDR BIT(4)
#define MCHP_PDMC_IR_RXOVR BIT(5)
/*
* ---- Version Register (Read-only) ----
*/
#define MCHP_PDMC_VER_VERSION GENMASK(11, 0)
#define MCHP_PDMC_MAX_CHANNELS 4
#define MCHP_PDMC_DS_NO 2
#define MCHP_PDMC_EDGE_NO 2
struct mic_map {
int ds_pos;
int clk_edge;
};
struct mchp_pdmc_chmap {
struct snd_pcm_chmap_elem *chmap;
struct mchp_pdmc *dd;
struct snd_pcm *pcm;
struct snd_kcontrol *kctl;
};
struct mchp_pdmc {
struct mic_map channel_mic_map[MCHP_PDMC_MAX_CHANNELS];
struct device *dev;
struct snd_dmaengine_dai_dma_data addr;
struct regmap *regmap;
struct clk *pclk;
struct clk *gclk;
u32 pdmcen;
u32 suspend_irq;
u32 startup_delay_us;
int mic_no;
int sinc_order;
bool audio_filter_en;
};
static const char *const mchp_pdmc_sinc_filter_order_text[] = {
"1", "2", "3", "4", "5"
};
static const unsigned int mchp_pdmc_sinc_filter_order_values[] = {
1, 2, 3, 4, 5,
};
static const struct soc_enum mchp_pdmc_sinc_filter_order_enum = {
.items = ARRAY_SIZE(mchp_pdmc_sinc_filter_order_text),
.texts = mchp_pdmc_sinc_filter_order_text,
.values = mchp_pdmc_sinc_filter_order_values,
};
static int mchp_pdmc_sinc_order_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uvalue)
{
struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
struct mchp_pdmc *dd = snd_soc_component_get_drvdata(component);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int item;
item = snd_soc_enum_val_to_item(e, dd->sinc_order);
uvalue->value.enumerated.item[0] = item;
return 0;
}
static int mchp_pdmc_sinc_order_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uvalue)
{
struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
struct mchp_pdmc *dd = snd_soc_component_get_drvdata(component);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int *item = uvalue->value.enumerated.item;
unsigned int val;
if (item[0] >= e->items)
return -EINVAL;
val = snd_soc_enum_item_to_val(e, item[0]) << e->shift_l;
if (val == dd->sinc_order)
return 0;
dd->sinc_order = val;
return 1;
}
static int mchp_pdmc_af_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uvalue)
{
struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
struct mchp_pdmc *dd = snd_soc_component_get_drvdata(component);
uvalue->value.integer.value[0] = !!dd->audio_filter_en;
return 0;
}
static int mchp_pdmc_af_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uvalue)
{
struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
struct mchp_pdmc *dd = snd_soc_component_get_drvdata(component);
bool af = uvalue->value.integer.value[0] ? true : false;
if (dd->audio_filter_en == af)
return 0;
dd->audio_filter_en = af;
return 1;
}
static int mchp_pdmc_chmap_ctl_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
struct mchp_pdmc_chmap *info = snd_kcontrol_chip(kcontrol);
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = info->dd->mic_no;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = SNDRV_CHMAP_RR; /* maxmimum 4 channels */
return 0;
}
static inline struct snd_pcm_substream *
mchp_pdmc_chmap_substream(struct mchp_pdmc_chmap *info, unsigned int idx)
{
struct snd_pcm_substream *s;
for (s = info->pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream; s; s = s->next)
if (s->number == idx)
return s;
return NULL;
}
static struct snd_pcm_chmap_elem *mchp_pdmc_chmap_get(struct snd_pcm_substream *substream,
struct mchp_pdmc_chmap *ch_info)
{
struct snd_pcm_chmap_elem *map;
for (map = ch_info->chmap; map->channels; map++) {
if (map->channels == substream->runtime->channels)
return map;
}
return NULL;
}
static int mchp_pdmc_chmap_ctl_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct mchp_pdmc_chmap *info = snd_kcontrol_chip(kcontrol);
struct mchp_pdmc *dd = info->dd;
unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
struct snd_pcm_substream *substream;
const struct snd_pcm_chmap_elem *map;
int i;
u32 cfgr_val = 0;
if (!info->chmap)
return -EINVAL;
substream = mchp_pdmc_chmap_substream(info, idx);
if (!substream)
return -ENODEV;
memset(ucontrol->value.integer.value, 0, sizeof(long) * info->dd->mic_no);
if (!substream->runtime)
return 0; /* no channels set */
map = mchp_pdmc_chmap_get(substream, info);
if (!map)
return -EINVAL;
for (i = 0; i < map->channels; i++) {
int map_idx = map->channels == 1 ? map->map[i] - SNDRV_CHMAP_MONO :
map->map[i] - SNDRV_CHMAP_FL;
/* make sure the reported channel map is the real one, so write the map */
if (dd->channel_mic_map[map_idx].ds_pos)
cfgr_val |= MCHP_PDMC_CFGR_PDMSEL(i);
if (dd->channel_mic_map[map_idx].clk_edge)
cfgr_val |= MCHP_PDMC_CFGR_BSSEL(i);
ucontrol->value.integer.value[i] = map->map[i];
}
regmap_write(dd->regmap, MCHP_PDMC_CFGR, cfgr_val);
return 0;
}
static int mchp_pdmc_chmap_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct mchp_pdmc_chmap *info = snd_kcontrol_chip(kcontrol);
struct mchp_pdmc *dd = info->dd;
unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
struct snd_pcm_substream *substream;
struct snd_pcm_chmap_elem *map;
u32 cfgr_val = 0;
int i;
if (!info->chmap)
return -EINVAL;
substream = mchp_pdmc_chmap_substream(info, idx);
if (!substream)
return -ENODEV;
map = mchp_pdmc_chmap_get(substream, info);
if (!map)
return -EINVAL;
for (i = 0; i < map->channels; i++) {
int map_idx;
map->map[i] = ucontrol->value.integer.value[i];
map_idx = map->channels == 1 ? map->map[i] - SNDRV_CHMAP_MONO :
map->map[i] - SNDRV_CHMAP_FL;
/* configure IP for the desired channel map */
if (dd->channel_mic_map[map_idx].ds_pos)
cfgr_val |= MCHP_PDMC_CFGR_PDMSEL(i);
if (dd->channel_mic_map[map_idx].clk_edge)
cfgr_val |= MCHP_PDMC_CFGR_BSSEL(i);
}
regmap_write(dd->regmap, MCHP_PDMC_CFGR, cfgr_val);
return 0;
}
static void mchp_pdmc_chmap_ctl_private_free(struct snd_kcontrol *kcontrol)
{
struct mchp_pdmc_chmap *info = snd_kcontrol_chip(kcontrol);
info->pcm->streams[SNDRV_PCM_STREAM_CAPTURE].chmap_kctl = NULL;
kfree(info);
}
static int mchp_pdmc_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
unsigned int size, unsigned int __user *tlv)
{
struct mchp_pdmc_chmap *info = snd_kcontrol_chip(kcontrol);
const struct snd_pcm_chmap_elem *map;
unsigned int __user *dst;
int c, count = 0;
if (!info->chmap)
return -EINVAL;
if (size < 8)
return -ENOMEM;
if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
return -EFAULT;
size -= 8;
dst = tlv + 2;
for (map = info->chmap; map->channels; map++) {
int chs_bytes = map->channels * 4;
if (size < 8)
return -ENOMEM;
if (put_user(SNDRV_CTL_TLVT_CHMAP_VAR, dst) ||
put_user(chs_bytes, dst + 1))
return -EFAULT;
dst += 2;
size -= 8;
count += 8;
if (size < chs_bytes)
return -ENOMEM;
size -= chs_bytes;
count += chs_bytes;
for (c = 0; c < map->channels; c++) {
if (put_user(map->map[c], dst))
return -EFAULT;
dst++;
}
}
if (put_user(count, tlv + 1))
return -EFAULT;
return 0;
}
static const struct snd_kcontrol_new mchp_pdmc_snd_controls[] = {
SOC_SINGLE_BOOL_EXT("Audio Filter", 0, &mchp_pdmc_af_get, &mchp_pdmc_af_put),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "SINC Filter Order",
.info = snd_soc_info_enum_double,
.get = mchp_pdmc_sinc_order_get,
.put = mchp_pdmc_sinc_order_put,
.private_value = (unsigned long)&mchp_pdmc_sinc_filter_order_enum,
},
};
static int mchp_pdmc_close(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
return snd_soc_add_component_controls(component, mchp_pdmc_snd_controls,
ARRAY_SIZE(mchp_pdmc_snd_controls));
}
static int mchp_pdmc_open(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
int i;
/* remove controls that can't be changed at runtime */
for (i = 0; i < ARRAY_SIZE(mchp_pdmc_snd_controls); i++) {
const struct snd_kcontrol_new *control = &mchp_pdmc_snd_controls[i];
struct snd_ctl_elem_id id;
int err;
if (component->name_prefix)
snprintf(id.name, sizeof(id.name), "%s %s", component->name_prefix,
control->name);
else
strscpy(id.name, control->name, sizeof(id.name));
id.numid = 0;
id.iface = control->iface;
id.device = control->device;
id.subdevice = control->subdevice;
id.index = control->index;
err = snd_ctl_remove_id(component->card->snd_card, &id);
if (err < 0)
dev_err(component->dev, "%d: Failed to remove %s\n", err,
control->name);
}
return 0;
}
static const struct snd_soc_component_driver mchp_pdmc_dai_component = {
.name = "mchp-pdmc",
.controls = mchp_pdmc_snd_controls,
.num_controls = ARRAY_SIZE(mchp_pdmc_snd_controls),
.open = &mchp_pdmc_open,
.close = &mchp_pdmc_close,
.legacy_dai_naming = 1,
.trigger_start = SND_SOC_TRIGGER_ORDER_LDC,
};
static const unsigned int mchp_pdmc_1mic[] = {1};
static const unsigned int mchp_pdmc_2mic[] = {1, 2};
static const unsigned int mchp_pdmc_3mic[] = {1, 2, 3};
static const unsigned int mchp_pdmc_4mic[] = {1, 2, 3, 4};
static const struct snd_pcm_hw_constraint_list mchp_pdmc_chan_constr[] = {
{
.list = mchp_pdmc_1mic,
.count = ARRAY_SIZE(mchp_pdmc_1mic),
},
{
.list = mchp_pdmc_2mic,
.count = ARRAY_SIZE(mchp_pdmc_2mic),
},
{
.list = mchp_pdmc_3mic,
.count = ARRAY_SIZE(mchp_pdmc_3mic),
},
{
.list = mchp_pdmc_4mic,
.count = ARRAY_SIZE(mchp_pdmc_4mic),
},
};
static int mchp_pdmc_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct mchp_pdmc *dd = snd_soc_dai_get_drvdata(dai);
regmap_write(dd->regmap, MCHP_PDMC_CR, MCHP_PDMC_CR_SWRST);
snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
&mchp_pdmc_chan_constr[dd->mic_no - 1]);
return 0;
}
static int mchp_pdmc_dai_probe(struct snd_soc_dai *dai)
{
struct mchp_pdmc *dd = snd_soc_dai_get_drvdata(dai);
snd_soc_dai_init_dma_data(dai, NULL, &dd->addr);
return 0;
}
static int mchp_pdmc_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
unsigned int fmt_master = fmt & SND_SOC_DAIFMT_MASTER_MASK;
unsigned int fmt_format = fmt & SND_SOC_DAIFMT_FORMAT_MASK;
/* IP needs to be bitclock master */
if (fmt_master != SND_SOC_DAIFMT_BP_FP &&
fmt_master != SND_SOC_DAIFMT_BP_FC)
return -EINVAL;
/* IP supports only PDM interface */
if (fmt_format != SND_SOC_DAIFMT_PDM)
return -EINVAL;
return 0;
}
static u32 mchp_pdmc_mr_set_osr(int audio_filter_en, unsigned int osr)
{
if (audio_filter_en) {
switch (osr) {
case 64:
return MCHP_PDMC_MR_OSR64;
case 128:
return MCHP_PDMC_MR_OSR128;
case 256:
return MCHP_PDMC_MR_OSR256;
}
} else {
switch (osr) {
case 8:
return MCHP_PDMC_MR_SINC_OSR_8;
case 16:
return MCHP_PDMC_MR_SINC_OSR_16;
case 32:
return MCHP_PDMC_MR_SINC_OSR_32;
case 64:
return MCHP_PDMC_MR_SINC_OSR_64;
case 128:
return MCHP_PDMC_MR_SINC_OSR_128;
case 256:
return MCHP_PDMC_MR_SINC_OSR_256;
}
}
return 0;
}
static inline int mchp_pdmc_period_to_maxburst(int period_size)
{
if (!(period_size % 8))
return 8;
if (!(period_size % 4))
return 4;
if (!(period_size % 2))
return 2;
return 1;
}
static struct snd_pcm_chmap_elem mchp_pdmc_std_chmaps[] = {
{ .channels = 1,
.map = { SNDRV_CHMAP_MONO } },
{ .channels = 2,
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
{ .channels = 3,
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
SNDRV_CHMAP_RL } },
{ .channels = 4,
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
{ }
};
static int mchp_pdmc_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct mchp_pdmc *dd = snd_soc_dai_get_drvdata(dai);
struct snd_soc_component *comp = dai->component;
unsigned long gclk_rate = 0;
unsigned long best_diff_rate = ~0UL;
unsigned int channels = params_channels(params);
unsigned int osr = 0, osr_start;
unsigned int fs = params_rate(params);
u32 mr_val = 0;
u32 cfgr_val = 0;
int i;
int ret;
dev_dbg(comp->dev, "%s() rate=%u format=%#x width=%u channels=%u\n",
__func__, params_rate(params), params_format(params),
params_width(params), params_channels(params));
if (channels > dd->mic_no) {
dev_err(comp->dev, "more channels %u than microphones %d\n",
channels, dd->mic_no);
return -EINVAL;
}
dd->pdmcen = 0;
for (i = 0; i < channels; i++) {
dd->pdmcen |= MCHP_PDMC_MR_PDMCEN(i);
if (dd->channel_mic_map[i].ds_pos)
cfgr_val |= MCHP_PDMC_CFGR_PDMSEL(i);
if (dd->channel_mic_map[i].clk_edge)
cfgr_val |= MCHP_PDMC_CFGR_BSSEL(i);
}
for (osr_start = dd->audio_filter_en ? 64 : 8;
osr_start <= 256 && best_diff_rate; osr_start *= 2) {
long round_rate;
unsigned long diff_rate;
round_rate = clk_round_rate(dd->gclk,
(unsigned long)fs * 16 * osr_start);
if (round_rate < 0)
continue;
diff_rate = abs((fs * 16 * osr_start) - round_rate);
if (diff_rate < best_diff_rate) {
best_diff_rate = diff_rate;
osr = osr_start;
gclk_rate = fs * 16 * osr;
}
}
if (!gclk_rate) {
dev_err(comp->dev, "invalid sampling rate: %u\n", fs);
return -EINVAL;
}
/* CLK is enabled by runtime PM. */
clk_disable_unprepare(dd->gclk);
/* set the rate */
ret = clk_set_rate(dd->gclk, gclk_rate);
clk_prepare_enable(dd->gclk);
if (ret) {
dev_err(comp->dev, "unable to set rate %lu to GCLK: %d\n",
gclk_rate, ret);
return ret;
}
mr_val |= mchp_pdmc_mr_set_osr(dd->audio_filter_en, osr);
mr_val |= FIELD_PREP(MCHP_PDMC_MR_SINCORDER_MASK, dd->sinc_order);
dd->addr.maxburst = mchp_pdmc_period_to_maxburst(snd_pcm_lib_period_bytes(substream));
mr_val |= FIELD_PREP(MCHP_PDMC_MR_CHUNK_MASK, dd->addr.maxburst);
dev_dbg(comp->dev, "maxburst set to %d\n", dd->addr.maxburst);
snd_soc_component_update_bits(comp, MCHP_PDMC_MR,
MCHP_PDMC_MR_OSR_MASK |
MCHP_PDMC_MR_SINCORDER_MASK |
MCHP_PDMC_MR_SINC_OSR_MASK |
MCHP_PDMC_MR_CHUNK_MASK, mr_val);
snd_soc_component_write(comp, MCHP_PDMC_CFGR, cfgr_val);
return 0;
}
static void mchp_pdmc_noise_filter_workaround(struct mchp_pdmc *dd)
{
u32 tmp, steps = 16;
/*
* PDMC doesn't wait for microphones' startup time thus the acquisition
* may start before the microphones are ready leading to poc noises at
* the beginning of capture. To avoid this, we need to wait 50ms (in
* normal startup procedure) or 150 ms (worst case after resume from sleep
* states) after microphones are enabled and then clear the FIFOs (by
* reading the RHR 16 times) and possible interrupts before continuing.
* Also, for this to work the DMA needs to be started after interrupts
* are enabled.
*/
usleep_range(dd->startup_delay_us, dd->startup_delay_us + 5);
while (steps--)
regmap_read(dd->regmap, MCHP_PDMC_RHR, &tmp);
/* Clear interrupts. */
regmap_read(dd->regmap, MCHP_PDMC_ISR, &tmp);
}
static int mchp_pdmc_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
struct mchp_pdmc *dd = snd_soc_dai_get_drvdata(dai);
struct snd_soc_component *cpu = dai->component;
#ifdef DEBUG
u32 val;
#endif
switch (cmd) {
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
snd_soc_component_update_bits(cpu, MCHP_PDMC_MR,
MCHP_PDMC_MR_PDMCEN_MASK,
dd->pdmcen);
mchp_pdmc_noise_filter_workaround(dd);
/* Enable interrupts. */
regmap_write(dd->regmap, MCHP_PDMC_IER, dd->suspend_irq |
MCHP_PDMC_IR_RXOVR | MCHP_PDMC_IR_RXUDR);
dd->suspend_irq = 0;
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
regmap_read(dd->regmap, MCHP_PDMC_IMR, &dd->suspend_irq);
fallthrough;
case SNDRV_PCM_TRIGGER_STOP:
/* Disable overrun and underrun error interrupts */
regmap_write(dd->regmap, MCHP_PDMC_IDR, dd->suspend_irq |
MCHP_PDMC_IR_RXOVR | MCHP_PDMC_IR_RXUDR);
fallthrough;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
snd_soc_component_update_bits(cpu, MCHP_PDMC_MR,
MCHP_PDMC_MR_PDMCEN_MASK, 0);
break;
default:
return -EINVAL;
}
#ifdef DEBUG
regmap_read(dd->regmap, MCHP_PDMC_MR, &val);
dev_dbg(dd->dev, "MR (0x%02x): 0x%08x\n", MCHP_PDMC_MR, val);
regmap_read(dd->regmap, MCHP_PDMC_CFGR, &val);
dev_dbg(dd->dev, "CFGR (0x%02x): 0x%08x\n", MCHP_PDMC_CFGR, val);
regmap_read(dd->regmap, MCHP_PDMC_IMR, &val);
dev_dbg(dd->dev, "IMR (0x%02x): 0x%08x\n", MCHP_PDMC_IMR, val);
#endif
return 0;
}
static int mchp_pdmc_add_chmap_ctls(struct snd_pcm *pcm, struct mchp_pdmc *dd)
{
struct mchp_pdmc_chmap *info;
struct snd_kcontrol_new knew = {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ |
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
.info = mchp_pdmc_chmap_ctl_info,
.get = mchp_pdmc_chmap_ctl_get,
.put = mchp_pdmc_chmap_ctl_put,
.tlv.c = mchp_pdmc_chmap_ctl_tlv,
};
int err;
if (WARN_ON(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].chmap_kctl))
return -EBUSY;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->pcm = pcm;
info->dd = dd;
info->chmap = mchp_pdmc_std_chmaps;
knew.name = "Capture Channel Map";
knew.device = pcm->device;
knew.count = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream_count;
info->kctl = snd_ctl_new1(&knew, info);
if (!info->kctl) {
kfree(info);
return -ENOMEM;
}
info->kctl->private_free = mchp_pdmc_chmap_ctl_private_free;
err = snd_ctl_add(pcm->card, info->kctl);
if (err < 0)
return err;
pcm->streams[SNDRV_PCM_STREAM_CAPTURE].chmap_kctl = info->kctl;
return 0;
}
static int mchp_pdmc_pcm_new(struct snd_soc_pcm_runtime *rtd,
struct snd_soc_dai *dai)
{
struct mchp_pdmc *dd = snd_soc_dai_get_drvdata(dai);
int ret;
ret = mchp_pdmc_add_chmap_ctls(rtd->pcm, dd);
if (ret < 0)
dev_err(dd->dev, "failed to add channel map controls: %d\n", ret);
return ret;
}
static const struct snd_soc_dai_ops mchp_pdmc_dai_ops = {
.probe = mchp_pdmc_dai_probe,
.set_fmt = mchp_pdmc_set_fmt,
.startup = mchp_pdmc_startup,
.hw_params = mchp_pdmc_hw_params,
.trigger = mchp_pdmc_trigger,
.pcm_new = &mchp_pdmc_pcm_new,
};
static struct snd_soc_dai_driver mchp_pdmc_dai = {
.capture = {
.stream_name = "Capture",
.channels_min = 1,
.channels_max = 4,
.rate_min = 8000,
.rate_max = 192000,
.rates = SNDRV_PCM_RATE_KNOT,
.formats = SNDRV_PCM_FMTBIT_S24_LE,
},
.ops = &mchp_pdmc_dai_ops,
};
/* PDMC interrupt handler */
static irqreturn_t mchp_pdmc_interrupt(int irq, void *dev_id)
{
struct mchp_pdmc *dd = dev_id;
u32 isr, msr, pending;
irqreturn_t ret = IRQ_NONE;
regmap_read(dd->regmap, MCHP_PDMC_ISR, &isr);
regmap_read(dd->regmap, MCHP_PDMC_IMR, &msr);
pending = isr & msr;
dev_dbg(dd->dev, "ISR (0x%02x): 0x%08x, IMR (0x%02x): 0x%08x, pending: 0x%08x\n",
MCHP_PDMC_ISR, isr, MCHP_PDMC_IMR, msr, pending);
if (!pending)
return IRQ_NONE;
if (pending & MCHP_PDMC_IR_RXUDR) {
dev_warn(dd->dev, "underrun detected\n");
regmap_write(dd->regmap, MCHP_PDMC_IDR, MCHP_PDMC_IR_RXUDR);
ret = IRQ_HANDLED;
}
if (pending & MCHP_PDMC_IR_RXOVR) {
dev_warn(dd->dev, "overrun detected\n");
regmap_write(dd->regmap, MCHP_PDMC_IDR, MCHP_PDMC_IR_RXOVR);
ret = IRQ_HANDLED;
}
return ret;
}
/* regmap configuration */
static bool mchp_pdmc_readable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case MCHP_PDMC_MR:
case MCHP_PDMC_CFGR:
case MCHP_PDMC_IMR:
case MCHP_PDMC_ISR:
case MCHP_PDMC_RHR:
case MCHP_PDMC_VER:
return true;
default:
return false;
}
}
static bool mchp_pdmc_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case MCHP_PDMC_CR:
case MCHP_PDMC_MR:
case MCHP_PDMC_CFGR:
case MCHP_PDMC_IER:
case MCHP_PDMC_IDR:
return true;
default:
return false;
}
}
static bool mchp_pdmc_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case MCHP_PDMC_ISR:
case MCHP_PDMC_RHR:
return true;
default:
return false;
}
}
static bool mchp_pdmc_precious_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case MCHP_PDMC_RHR:
case MCHP_PDMC_ISR:
return true;
default:
return false;
}
}
static const struct regmap_config mchp_pdmc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = MCHP_PDMC_VER,
.readable_reg = mchp_pdmc_readable_reg,
.writeable_reg = mchp_pdmc_writeable_reg,
.precious_reg = mchp_pdmc_precious_reg,
.volatile_reg = mchp_pdmc_volatile_reg,
.cache_type = REGCACHE_FLAT,
};
static int mchp_pdmc_dt_init(struct mchp_pdmc *dd)
{
struct device_node *np = dd->dev->of_node;
bool mic_ch[MCHP_PDMC_DS_NO][MCHP_PDMC_EDGE_NO] = {0};
int i;
int ret;
if (!np) {
dev_err(dd->dev, "device node not found\n");
return -EINVAL;
}
dd->mic_no = of_property_count_u32_elems(np, "microchip,mic-pos");
if (dd->mic_no < 0) {
dev_err(dd->dev, "failed to get microchip,mic-pos: %d",
dd->mic_no);
return dd->mic_no;
}
if (!dd->mic_no || dd->mic_no % 2 ||
dd->mic_no / 2 > MCHP_PDMC_MAX_CHANNELS) {
dev_err(dd->dev, "invalid array length for microchip,mic-pos: %d",
dd->mic_no);
return -EINVAL;
}
dd->mic_no /= 2;
dev_info(dd->dev, "%d PDM microphones declared\n", dd->mic_no);
/*
* by default, we consider the order of microphones in
* microchip,mic-pos to be the same with the channel mapping;
* 1st microphone channel 0, 2nd microphone channel 1, etc.
*/
for (i = 0; i < dd->mic_no; i++) {
int ds;
int edge;
ret = of_property_read_u32_index(np, "microchip,mic-pos", i * 2,
&ds);
if (ret) {
dev_err(dd->dev,
"failed to get value no %d value from microchip,mic-pos: %d",
i * 2, ret);
return ret;
}
if (ds >= MCHP_PDMC_DS_NO) {
dev_err(dd->dev,
"invalid DS index in microchip,mic-pos array: %d",
ds);
return -EINVAL;
}
ret = of_property_read_u32_index(np, "microchip,mic-pos", i * 2 + 1,
&edge);
if (ret) {
dev_err(dd->dev,
"failed to get value no %d value from microchip,mic-pos: %d",
i * 2 + 1, ret);
return ret;
}
if (edge != MCHP_PDMC_CLK_POSITIVE &&
edge != MCHP_PDMC_CLK_NEGATIVE) {
dev_err(dd->dev,
"invalid edge in microchip,mic-pos array: %d", edge);
return -EINVAL;
}
if (mic_ch[ds][edge]) {
dev_err(dd->dev,
"duplicated mic (DS %d, edge %d) in microchip,mic-pos array",
ds, edge);
return -EINVAL;
}
mic_ch[ds][edge] = true;
dd->channel_mic_map[i].ds_pos = ds;
dd->channel_mic_map[i].clk_edge = edge;
}
dd->startup_delay_us = 150000;
of_property_read_u32(np, "microchip,startup-delay-us", &dd->startup_delay_us);
return 0;
}
/* used to clean the channel index found on RHR's MSB */
static int mchp_pdmc_process(struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
unsigned long bytes)
{
struct snd_pcm_runtime *runtime = substream->runtime;
u8 *dma_ptr = runtime->dma_area + hwoff +
channel * (runtime->dma_bytes / runtime->channels);
u8 *dma_ptr_end = dma_ptr + bytes;
unsigned int sample_size = samples_to_bytes(runtime, 1);
for (; dma_ptr < dma_ptr_end; dma_ptr += sample_size)
*dma_ptr = 0;
return 0;
}
static struct snd_dmaengine_pcm_config mchp_pdmc_config = {
.process = mchp_pdmc_process,
.prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
};
static int mchp_pdmc_runtime_suspend(struct device *dev)
{
struct mchp_pdmc *dd = dev_get_drvdata(dev);
regcache_cache_only(dd->regmap, true);
clk_disable_unprepare(dd->gclk);
clk_disable_unprepare(dd->pclk);
return 0;
}
static int mchp_pdmc_runtime_resume(struct device *dev)
{
struct mchp_pdmc *dd = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(dd->pclk);
if (ret) {
dev_err(dd->dev,
"failed to enable the peripheral clock: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(dd->gclk);
if (ret) {
dev_err(dd->dev,
"failed to enable generic clock: %d\n", ret);
goto disable_pclk;
}
regcache_cache_only(dd->regmap, false);
regcache_mark_dirty(dd->regmap);
ret = regcache_sync(dd->regmap);
if (ret) {
regcache_cache_only(dd->regmap, true);
clk_disable_unprepare(dd->gclk);
disable_pclk:
clk_disable_unprepare(dd->pclk);
}
return ret;
}
static int mchp_pdmc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mchp_pdmc *dd;
struct resource *res;
void __iomem *io_base;
u32 version;
int irq;
int ret;
dd = devm_kzalloc(dev, sizeof(*dd), GFP_KERNEL);
if (!dd)
return -ENOMEM;
dd->dev = &pdev->dev;
ret = mchp_pdmc_dt_init(dd);
if (ret < 0)
return ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
dd->pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(dd->pclk)) {
ret = PTR_ERR(dd->pclk);
dev_err(dev, "failed to get peripheral clock: %d\n", ret);
return ret;
}
dd->gclk = devm_clk_get(dev, "gclk");
if (IS_ERR(dd->gclk)) {
ret = PTR_ERR(dd->gclk);
dev_err(dev, "failed to get GCK: %d\n", ret);
return ret;
}
io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(io_base)) {
ret = PTR_ERR(io_base);
dev_err(dev, "failed to remap register memory: %d\n", ret);
return ret;
}
dd->regmap = devm_regmap_init_mmio(dev, io_base,
&mchp_pdmc_regmap_config);
if (IS_ERR(dd->regmap)) {
ret = PTR_ERR(dd->regmap);
dev_err(dev, "failed to init register map: %d\n", ret);
return ret;
}
ret = devm_request_irq(dev, irq, mchp_pdmc_interrupt, 0,
dev_name(&pdev->dev), dd);
if (ret < 0) {
dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n",
irq, ret);
return ret;
}
/* by default audio filter is enabled and the SINC Filter order
* will be set to the recommended value, 3
*/
dd->audio_filter_en = true;
dd->sinc_order = 3;
dd->addr.addr = (dma_addr_t)res->start + MCHP_PDMC_RHR;
platform_set_drvdata(pdev, dd);
pm_runtime_enable(dd->dev);
if (!pm_runtime_enabled(dd->dev)) {
ret = mchp_pdmc_runtime_resume(dd->dev);
if (ret)
return ret;
}
/* register platform */
ret = devm_snd_dmaengine_pcm_register(dev, &mchp_pdmc_config, 0);
if (ret) {
dev_err(dev, "could not register platform: %d\n", ret);
goto pm_runtime_suspend;
}
ret = devm_snd_soc_register_component(dev, &mchp_pdmc_dai_component,
&mchp_pdmc_dai, 1);
if (ret) {
dev_err(dev, "could not register CPU DAI: %d\n", ret);
goto pm_runtime_suspend;
}
/* print IP version */
regmap_read(dd->regmap, MCHP_PDMC_VER, &version);
dev_info(dd->dev, "hw version: %#lx\n",
version & MCHP_PDMC_VER_VERSION);
return 0;
pm_runtime_suspend:
if (!pm_runtime_status_suspended(dd->dev))
mchp_pdmc_runtime_suspend(dd->dev);
pm_runtime_disable(dd->dev);
return ret;
}
static void mchp_pdmc_remove(struct platform_device *pdev)
{
struct mchp_pdmc *dd = platform_get_drvdata(pdev);
if (!pm_runtime_status_suspended(dd->dev))
mchp_pdmc_runtime_suspend(dd->dev);
pm_runtime_disable(dd->dev);
}
static const struct of_device_id mchp_pdmc_of_match[] = {
{
.compatible = "microchip,sama7g5-pdmc",
}, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(of, mchp_pdmc_of_match);
static const struct dev_pm_ops mchp_pdmc_pm_ops = {
SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
RUNTIME_PM_OPS(mchp_pdmc_runtime_suspend, mchp_pdmc_runtime_resume,
NULL)
};
static struct platform_driver mchp_pdmc_driver = {
.driver = {
.name = "mchp-pdmc",
.of_match_table = of_match_ptr(mchp_pdmc_of_match),
.pm = pm_ptr(&mchp_pdmc_pm_ops),
},
.probe = mchp_pdmc_probe,
.remove_new = mchp_pdmc_remove,
};
module_platform_driver(mchp_pdmc_driver);
MODULE_DESCRIPTION("Microchip PDMC driver under ALSA SoC architecture");
MODULE_AUTHOR("Codrin Ciubotariu <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | sound/soc/atmel/mchp-pdmc.c |
// SPDX-License-Identifier: GPL-2.0
//
// Driver for Microchip I2S Multi-channel controller
//
// Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries
//
// Author: Codrin Ciubotariu <[email protected]>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/lcm.h>
#include <linux/of_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
/*
* ---- I2S Controller Register map ----
*/
#define MCHP_I2SMCC_CR 0x0000 /* Control Register */
#define MCHP_I2SMCC_MRA 0x0004 /* Mode Register A */
#define MCHP_I2SMCC_MRB 0x0008 /* Mode Register B */
#define MCHP_I2SMCC_SR 0x000C /* Status Register */
#define MCHP_I2SMCC_IERA 0x0010 /* Interrupt Enable Register A */
#define MCHP_I2SMCC_IDRA 0x0014 /* Interrupt Disable Register A */
#define MCHP_I2SMCC_IMRA 0x0018 /* Interrupt Mask Register A */
#define MCHP_I2SMCC_ISRA 0X001C /* Interrupt Status Register A */
#define MCHP_I2SMCC_IERB 0x0020 /* Interrupt Enable Register B */
#define MCHP_I2SMCC_IDRB 0x0024 /* Interrupt Disable Register B */
#define MCHP_I2SMCC_IMRB 0x0028 /* Interrupt Mask Register B */
#define MCHP_I2SMCC_ISRB 0X002C /* Interrupt Status Register B */
#define MCHP_I2SMCC_RHR 0x0030 /* Receiver Holding Register */
#define MCHP_I2SMCC_THR 0x0034 /* Transmitter Holding Register */
#define MCHP_I2SMCC_RHL0R 0x0040 /* Receiver Holding Left 0 Register */
#define MCHP_I2SMCC_RHR0R 0x0044 /* Receiver Holding Right 0 Register */
#define MCHP_I2SMCC_RHL1R 0x0048 /* Receiver Holding Left 1 Register */
#define MCHP_I2SMCC_RHR1R 0x004C /* Receiver Holding Right 1 Register */
#define MCHP_I2SMCC_RHL2R 0x0050 /* Receiver Holding Left 2 Register */
#define MCHP_I2SMCC_RHR2R 0x0054 /* Receiver Holding Right 2 Register */
#define MCHP_I2SMCC_RHL3R 0x0058 /* Receiver Holding Left 3 Register */
#define MCHP_I2SMCC_RHR3R 0x005C /* Receiver Holding Right 3 Register */
#define MCHP_I2SMCC_THL0R 0x0060 /* Transmitter Holding Left 0 Register */
#define MCHP_I2SMCC_THR0R 0x0064 /* Transmitter Holding Right 0 Register */
#define MCHP_I2SMCC_THL1R 0x0068 /* Transmitter Holding Left 1 Register */
#define MCHP_I2SMCC_THR1R 0x006C /* Transmitter Holding Right 1 Register */
#define MCHP_I2SMCC_THL2R 0x0070 /* Transmitter Holding Left 2 Register */
#define MCHP_I2SMCC_THR2R 0x0074 /* Transmitter Holding Right 2 Register */
#define MCHP_I2SMCC_THL3R 0x0078 /* Transmitter Holding Left 3 Register */
#define MCHP_I2SMCC_THR3R 0x007C /* Transmitter Holding Right 3 Register */
#define MCHP_I2SMCC_VERSION 0x00FC /* Version Register */
/*
* ---- Control Register (Write-only) ----
*/
#define MCHP_I2SMCC_CR_RXEN BIT(0) /* Receiver Enable */
#define MCHP_I2SMCC_CR_RXDIS BIT(1) /* Receiver Disable */
#define MCHP_I2SMCC_CR_CKEN BIT(2) /* Clock Enable */
#define MCHP_I2SMCC_CR_CKDIS BIT(3) /* Clock Disable */
#define MCHP_I2SMCC_CR_TXEN BIT(4) /* Transmitter Enable */
#define MCHP_I2SMCC_CR_TXDIS BIT(5) /* Transmitter Disable */
#define MCHP_I2SMCC_CR_SWRST BIT(7) /* Software Reset */
/*
* ---- Mode Register A (Read/Write) ----
*/
#define MCHP_I2SMCC_MRA_MODE_MASK GENMASK(0, 0)
#define MCHP_I2SMCC_MRA_MODE_SLAVE (0 << 0)
#define MCHP_I2SMCC_MRA_MODE_MASTER (1 << 0)
#define MCHP_I2SMCC_MRA_DATALENGTH_MASK GENMASK(3, 1)
#define MCHP_I2SMCC_MRA_DATALENGTH_32_BITS (0 << 1)
#define MCHP_I2SMCC_MRA_DATALENGTH_24_BITS (1 << 1)
#define MCHP_I2SMCC_MRA_DATALENGTH_20_BITS (2 << 1)
#define MCHP_I2SMCC_MRA_DATALENGTH_18_BITS (3 << 1)
#define MCHP_I2SMCC_MRA_DATALENGTH_16_BITS (4 << 1)
#define MCHP_I2SMCC_MRA_DATALENGTH_16_BITS_COMPACT (5 << 1)
#define MCHP_I2SMCC_MRA_DATALENGTH_8_BITS (6 << 1)
#define MCHP_I2SMCC_MRA_DATALENGTH_8_BITS_COMPACT (7 << 1)
#define MCHP_I2SMCC_MRA_WIRECFG_MASK GENMASK(5, 4)
#define MCHP_I2SMCC_MRA_WIRECFG_TDM(pin) (((pin) << 4) & \
MCHP_I2SMCC_MRA_WIRECFG_MASK)
#define MCHP_I2SMCC_MRA_WIRECFG_I2S_1_TDM_0 (0 << 4)
#define MCHP_I2SMCC_MRA_WIRECFG_I2S_2_TDM_1 (1 << 4)
#define MCHP_I2SMCC_MRA_WIRECFG_I2S_4_TDM_2 (2 << 4)
#define MCHP_I2SMCC_MRA_WIRECFG_TDM_3 (3 << 4)
#define MCHP_I2SMCC_MRA_FORMAT_MASK GENMASK(7, 6)
#define MCHP_I2SMCC_MRA_FORMAT_I2S (0 << 6)
#define MCHP_I2SMCC_MRA_FORMAT_LJ (1 << 6) /* Left Justified */
#define MCHP_I2SMCC_MRA_FORMAT_TDM (2 << 6)
#define MCHP_I2SMCC_MRA_FORMAT_TDMLJ (3 << 6)
/* Transmitter uses one DMA channel ... */
/* Left audio samples duplicated to right audio channel */
#define MCHP_I2SMCC_MRA_RXMONO BIT(8)
/* I2SDO output of I2SC is internally connected to I2SDI input */
#define MCHP_I2SMCC_MRA_RXLOOP BIT(9)
/* Receiver uses one DMA channel ... */
/* Left audio samples duplicated to right audio channel */
#define MCHP_I2SMCC_MRA_TXMONO BIT(10)
/* x sample transmitted when underrun */
#define MCHP_I2SMCC_MRA_TXSAME_ZERO (0 << 11) /* Zero sample */
#define MCHP_I2SMCC_MRA_TXSAME_PREVIOUS (1 << 11) /* Previous sample */
/* select between peripheral clock and generated clock */
#define MCHP_I2SMCC_MRA_SRCCLK_PCLK (0 << 12)
#define MCHP_I2SMCC_MRA_SRCCLK_GCLK (1 << 12)
/* Number of TDM Channels - 1 */
#define MCHP_I2SMCC_MRA_NBCHAN_MASK GENMASK(15, 13)
#define MCHP_I2SMCC_MRA_NBCHAN(ch) \
((((ch) - 1) << 13) & MCHP_I2SMCC_MRA_NBCHAN_MASK)
/* Selected Clock to I2SMCC Master Clock ratio */
#define MCHP_I2SMCC_MRA_IMCKDIV_MASK GENMASK(21, 16)
#define MCHP_I2SMCC_MRA_IMCKDIV(div) \
(((div) << 16) & MCHP_I2SMCC_MRA_IMCKDIV_MASK)
/* TDM Frame Synchronization */
#define MCHP_I2SMCC_MRA_TDMFS_MASK GENMASK(23, 22)
#define MCHP_I2SMCC_MRA_TDMFS_SLOT (0 << 22)
#define MCHP_I2SMCC_MRA_TDMFS_HALF (1 << 22)
#define MCHP_I2SMCC_MRA_TDMFS_BIT (2 << 22)
/* Selected Clock to I2SMC Serial Clock ratio */
#define MCHP_I2SMCC_MRA_ISCKDIV_MASK GENMASK(29, 24)
#define MCHP_I2SMCC_MRA_ISCKDIV(div) \
(((div) << 24) & MCHP_I2SMCC_MRA_ISCKDIV_MASK)
/* Master Clock mode */
#define MCHP_I2SMCC_MRA_IMCKMODE_MASK GENMASK(30, 30)
/* 0: No master clock generated*/
#define MCHP_I2SMCC_MRA_IMCKMODE_NONE (0 << 30)
/* 1: master clock generated (internally generated clock drives I2SMCK pin) */
#define MCHP_I2SMCC_MRA_IMCKMODE_GEN (1 << 30)
/* Slot Width */
/* 0: slot is 32 bits wide for DATALENGTH = 18/20/24 bits. */
/* 1: slot is 24 bits wide for DATALENGTH = 18/20/24 bits. */
#define MCHP_I2SMCC_MRA_IWS BIT(31)
/*
* ---- Mode Register B (Read/Write) ----
*/
/* all enabled I2S left channels are filled first, then I2S right channels */
#define MCHP_I2SMCC_MRB_CRAMODE_LEFT_FIRST (0 << 0)
/*
* an enabled I2S left channel is filled, then the corresponding right
* channel, until all channels are filled
*/
#define MCHP_I2SMCC_MRB_CRAMODE_REGULAR (1 << 0)
#define MCHP_I2SMCC_MRB_FIFOEN BIT(4)
#define MCHP_I2SMCC_MRB_DMACHUNK_MASK GENMASK(9, 8)
#define MCHP_I2SMCC_MRB_DMACHUNK(no_words) \
(((fls(no_words) - 1) << 8) & MCHP_I2SMCC_MRB_DMACHUNK_MASK)
#define MCHP_I2SMCC_MRB_CLKSEL_MASK GENMASK(16, 16)
#define MCHP_I2SMCC_MRB_CLKSEL_EXT (0 << 16)
#define MCHP_I2SMCC_MRB_CLKSEL_INT (1 << 16)
/*
* ---- Status Registers (Read-only) ----
*/
#define MCHP_I2SMCC_SR_RXEN BIT(0) /* Receiver Enabled */
#define MCHP_I2SMCC_SR_TXEN BIT(4) /* Transmitter Enabled */
/*
* ---- Interrupt Enable/Disable/Mask/Status Registers A ----
*/
#define MCHP_I2SMCC_INT_TXRDY_MASK(ch) GENMASK((ch) - 1, 0)
#define MCHP_I2SMCC_INT_TXRDYCH(ch) BIT(ch)
#define MCHP_I2SMCC_INT_TXUNF_MASK(ch) GENMASK((ch) + 7, 8)
#define MCHP_I2SMCC_INT_TXUNFCH(ch) BIT((ch) + 8)
#define MCHP_I2SMCC_INT_RXRDY_MASK(ch) GENMASK((ch) + 15, 16)
#define MCHP_I2SMCC_INT_RXRDYCH(ch) BIT((ch) + 16)
#define MCHP_I2SMCC_INT_RXOVF_MASK(ch) GENMASK((ch) + 23, 24)
#define MCHP_I2SMCC_INT_RXOVFCH(ch) BIT((ch) + 24)
/*
* ---- Interrupt Enable/Disable/Mask/Status Registers B ----
*/
#define MCHP_I2SMCC_INT_WERR BIT(0)
#define MCHP_I2SMCC_INT_TXFFRDY BIT(8)
#define MCHP_I2SMCC_INT_TXFFEMP BIT(9)
#define MCHP_I2SMCC_INT_RXFFRDY BIT(12)
#define MCHP_I2SMCC_INT_RXFFFUL BIT(13)
/*
* ---- Version Register (Read-only) ----
*/
#define MCHP_I2SMCC_VERSION_MASK GENMASK(11, 0)
#define MCHP_I2SMCC_MAX_CHANNELS 8
#define MCHP_I2MCC_TDM_SLOT_WIDTH 32
static const struct regmap_config mchp_i2s_mcc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = MCHP_I2SMCC_VERSION,
};
struct mchp_i2s_mcc_soc_data {
unsigned int data_pin_pair_num;
bool has_fifo;
};
struct mchp_i2s_mcc_dev {
struct wait_queue_head wq_txrdy;
struct wait_queue_head wq_rxrdy;
struct device *dev;
struct regmap *regmap;
struct clk *pclk;
struct clk *gclk;
const struct mchp_i2s_mcc_soc_data *soc;
struct snd_dmaengine_dai_dma_data playback;
struct snd_dmaengine_dai_dma_data capture;
unsigned int fmt;
unsigned int sysclk;
unsigned int frame_length;
int tdm_slots;
int channels;
u8 tdm_data_pair;
unsigned int gclk_use:1;
unsigned int gclk_running:1;
unsigned int tx_rdy:1;
unsigned int rx_rdy:1;
};
static irqreturn_t mchp_i2s_mcc_interrupt(int irq, void *dev_id)
{
struct mchp_i2s_mcc_dev *dev = dev_id;
u32 sra, imra, srb, imrb, pendinga, pendingb, idra = 0, idrb = 0;
irqreturn_t ret = IRQ_NONE;
regmap_read(dev->regmap, MCHP_I2SMCC_IMRA, &imra);
regmap_read(dev->regmap, MCHP_I2SMCC_ISRA, &sra);
pendinga = imra & sra;
regmap_read(dev->regmap, MCHP_I2SMCC_IMRB, &imrb);
regmap_read(dev->regmap, MCHP_I2SMCC_ISRB, &srb);
pendingb = imrb & srb;
if (!pendinga && !pendingb)
return IRQ_NONE;
/*
* Tx/Rx ready interrupts are enabled when stopping only, to assure
* availability and to disable clocks if necessary
*/
if (dev->soc->has_fifo) {
idrb |= pendingb & (MCHP_I2SMCC_INT_TXFFRDY |
MCHP_I2SMCC_INT_RXFFRDY);
} else {
idra |= pendinga & (MCHP_I2SMCC_INT_TXRDY_MASK(dev->channels) |
MCHP_I2SMCC_INT_RXRDY_MASK(dev->channels));
}
if (idra || idrb)
ret = IRQ_HANDLED;
if ((!dev->soc->has_fifo &&
(imra & MCHP_I2SMCC_INT_TXRDY_MASK(dev->channels)) &&
(imra & MCHP_I2SMCC_INT_TXRDY_MASK(dev->channels)) ==
(idra & MCHP_I2SMCC_INT_TXRDY_MASK(dev->channels))) ||
(dev->soc->has_fifo && imrb & MCHP_I2SMCC_INT_TXFFRDY)) {
dev->tx_rdy = 1;
wake_up_interruptible(&dev->wq_txrdy);
}
if ((!dev->soc->has_fifo &&
(imra & MCHP_I2SMCC_INT_RXRDY_MASK(dev->channels)) &&
(imra & MCHP_I2SMCC_INT_RXRDY_MASK(dev->channels)) ==
(idra & MCHP_I2SMCC_INT_RXRDY_MASK(dev->channels))) ||
(dev->soc->has_fifo && imrb & MCHP_I2SMCC_INT_RXFFRDY)) {
dev->rx_rdy = 1;
wake_up_interruptible(&dev->wq_rxrdy);
}
if (dev->soc->has_fifo)
regmap_write(dev->regmap, MCHP_I2SMCC_IDRB, idrb);
else
regmap_write(dev->regmap, MCHP_I2SMCC_IDRA, idra);
return ret;
}
static int mchp_i2s_mcc_set_sysclk(struct snd_soc_dai *dai,
int clk_id, unsigned int freq, int dir)
{
struct mchp_i2s_mcc_dev *dev = snd_soc_dai_get_drvdata(dai);
dev_dbg(dev->dev, "%s() clk_id=%d freq=%u dir=%d\n",
__func__, clk_id, freq, dir);
/* We do not need SYSCLK */
if (dir == SND_SOC_CLOCK_IN)
return 0;
dev->sysclk = freq;
return 0;
}
static int mchp_i2s_mcc_set_bclk_ratio(struct snd_soc_dai *dai,
unsigned int ratio)
{
struct mchp_i2s_mcc_dev *dev = snd_soc_dai_get_drvdata(dai);
dev_dbg(dev->dev, "%s() ratio=%u\n", __func__, ratio);
dev->frame_length = ratio;
return 0;
}
static int mchp_i2s_mcc_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct mchp_i2s_mcc_dev *dev = snd_soc_dai_get_drvdata(dai);
dev_dbg(dev->dev, "%s() fmt=%#x\n", __func__, fmt);
/* We don't support any kind of clock inversion */
if ((fmt & SND_SOC_DAIFMT_INV_MASK) != SND_SOC_DAIFMT_NB_NF)
return -EINVAL;
/* We can't generate only FSYNC */
if ((fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) == SND_SOC_DAIFMT_BC_FP)
return -EINVAL;
/* We can only reconfigure the IP when it's stopped */
if (fmt & SND_SOC_DAIFMT_CONT)
return -EINVAL;
dev->fmt = fmt;
return 0;
}
static int mchp_i2s_mcc_set_dai_tdm_slot(struct snd_soc_dai *dai,
unsigned int tx_mask,
unsigned int rx_mask,
int slots, int slot_width)
{
struct mchp_i2s_mcc_dev *dev = snd_soc_dai_get_drvdata(dai);
dev_dbg(dev->dev,
"%s() tx_mask=0x%08x rx_mask=0x%08x slots=%d width=%d\n",
__func__, tx_mask, rx_mask, slots, slot_width);
if (slots < 0 || slots > MCHP_I2SMCC_MAX_CHANNELS ||
slot_width != MCHP_I2MCC_TDM_SLOT_WIDTH)
return -EINVAL;
if (slots) {
/* We do not support daisy chain */
if (rx_mask != GENMASK(slots - 1, 0) ||
rx_mask != tx_mask)
return -EINVAL;
}
dev->tdm_slots = slots;
dev->frame_length = slots * MCHP_I2MCC_TDM_SLOT_WIDTH;
return 0;
}
static int mchp_i2s_mcc_clk_get_rate_diff(struct clk *clk,
unsigned long rate,
struct clk **best_clk,
unsigned long *best_rate,
unsigned long *best_diff_rate)
{
long round_rate;
unsigned int diff_rate;
round_rate = clk_round_rate(clk, rate);
if (round_rate < 0)
return (int)round_rate;
diff_rate = abs(rate - round_rate);
if (diff_rate < *best_diff_rate) {
*best_clk = clk;
*best_diff_rate = diff_rate;
*best_rate = rate;
}
return 0;
}
static int mchp_i2s_mcc_config_divs(struct mchp_i2s_mcc_dev *dev,
unsigned int bclk, unsigned int *mra,
unsigned long *best_rate)
{
unsigned long clk_rate;
unsigned long lcm_rate;
unsigned long best_diff_rate = ~0;
unsigned int sysclk;
struct clk *best_clk = NULL;
int ret;
/* For code simplification */
if (!dev->sysclk)
sysclk = bclk;
else
sysclk = dev->sysclk;
/*
* MCLK is Selected CLK / (2 * IMCKDIV),
* BCLK is Selected CLK / (2 * ISCKDIV);
* if IMCKDIV or ISCKDIV are 0, MCLK or BCLK = Selected CLK
*/
lcm_rate = lcm(sysclk, bclk);
if ((lcm_rate / sysclk % 2 == 1 && lcm_rate / sysclk > 2) ||
(lcm_rate / bclk % 2 == 1 && lcm_rate / bclk > 2))
lcm_rate *= 2;
for (clk_rate = lcm_rate;
(clk_rate == sysclk || clk_rate / (sysclk * 2) <= GENMASK(5, 0)) &&
(clk_rate == bclk || clk_rate / (bclk * 2) <= GENMASK(5, 0));
clk_rate += lcm_rate) {
ret = mchp_i2s_mcc_clk_get_rate_diff(dev->gclk, clk_rate,
&best_clk, best_rate,
&best_diff_rate);
if (ret) {
dev_err(dev->dev, "gclk error for rate %lu: %d",
clk_rate, ret);
} else {
if (!best_diff_rate) {
dev_dbg(dev->dev, "found perfect rate on gclk: %lu\n",
clk_rate);
break;
}
}
ret = mchp_i2s_mcc_clk_get_rate_diff(dev->pclk, clk_rate,
&best_clk, best_rate,
&best_diff_rate);
if (ret) {
dev_err(dev->dev, "pclk error for rate %lu: %d",
clk_rate, ret);
} else {
if (!best_diff_rate) {
dev_dbg(dev->dev, "found perfect rate on pclk: %lu\n",
clk_rate);
break;
}
}
}
/* check if clocks returned only errors */
if (!best_clk) {
dev_err(dev->dev, "unable to change rate to clocks\n");
return -EINVAL;
}
dev_dbg(dev->dev, "source CLK is %s with rate %lu, diff %lu\n",
best_clk == dev->pclk ? "pclk" : "gclk",
*best_rate, best_diff_rate);
/* Configure divisors */
if (dev->sysclk)
*mra |= MCHP_I2SMCC_MRA_IMCKDIV(*best_rate / (2 * sysclk));
*mra |= MCHP_I2SMCC_MRA_ISCKDIV(*best_rate / (2 * bclk));
if (best_clk == dev->gclk)
*mra |= MCHP_I2SMCC_MRA_SRCCLK_GCLK;
else
*mra |= MCHP_I2SMCC_MRA_SRCCLK_PCLK;
return 0;
}
static int mchp_i2s_mcc_is_running(struct mchp_i2s_mcc_dev *dev)
{
u32 sr;
regmap_read(dev->regmap, MCHP_I2SMCC_SR, &sr);
return !!(sr & (MCHP_I2SMCC_SR_TXEN | MCHP_I2SMCC_SR_RXEN));
}
static int mchp_i2s_mcc_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
unsigned long rate = 0;
struct mchp_i2s_mcc_dev *dev = snd_soc_dai_get_drvdata(dai);
u32 mra = 0;
u32 mrb = 0;
unsigned int channels = params_channels(params);
unsigned int frame_length = dev->frame_length;
unsigned int bclk_rate;
int set_divs = 0;
int ret;
bool is_playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
dev_dbg(dev->dev, "%s() rate=%u format=%#x width=%u channels=%u\n",
__func__, params_rate(params), params_format(params),
params_width(params), params_channels(params));
switch (dev->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
if (dev->tdm_slots) {
dev_err(dev->dev, "I2S with TDM is not supported\n");
return -EINVAL;
}
mra |= MCHP_I2SMCC_MRA_FORMAT_I2S;
break;
case SND_SOC_DAIFMT_LEFT_J:
if (dev->tdm_slots) {
dev_err(dev->dev, "Left-Justified with TDM is not supported\n");
return -EINVAL;
}
mra |= MCHP_I2SMCC_MRA_FORMAT_LJ;
break;
case SND_SOC_DAIFMT_DSP_A:
mra |= MCHP_I2SMCC_MRA_FORMAT_TDM;
break;
default:
dev_err(dev->dev, "unsupported bus format\n");
return -EINVAL;
}
switch (dev->fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
case SND_SOC_DAIFMT_BP_FP:
/* cpu is BCLK and LRC master */
mra |= MCHP_I2SMCC_MRA_MODE_MASTER;
if (dev->sysclk)
mra |= MCHP_I2SMCC_MRA_IMCKMODE_GEN;
set_divs = 1;
break;
case SND_SOC_DAIFMT_BP_FC:
/* cpu is BCLK master */
mrb |= MCHP_I2SMCC_MRB_CLKSEL_INT;
set_divs = 1;
fallthrough;
case SND_SOC_DAIFMT_BC_FC:
/* cpu is slave */
mra |= MCHP_I2SMCC_MRA_MODE_SLAVE;
if (dev->sysclk)
dev_warn(dev->dev, "Unable to generate MCLK in Slave mode\n");
break;
default:
dev_err(dev->dev, "unsupported master/slave mode\n");
return -EINVAL;
}
if (dev->fmt & (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_LEFT_J)) {
/* for I2S and LEFT_J one pin is needed for every 2 channels */
if (channels > dev->soc->data_pin_pair_num * 2) {
dev_err(dev->dev,
"unsupported number of audio channels: %d\n",
channels);
return -EINVAL;
}
/* enable for interleaved format */
mrb |= MCHP_I2SMCC_MRB_CRAMODE_REGULAR;
switch (channels) {
case 1:
if (is_playback)
mra |= MCHP_I2SMCC_MRA_TXMONO;
else
mra |= MCHP_I2SMCC_MRA_RXMONO;
break;
case 2:
break;
case 4:
mra |= MCHP_I2SMCC_MRA_WIRECFG_I2S_2_TDM_1;
break;
case 8:
mra |= MCHP_I2SMCC_MRA_WIRECFG_I2S_4_TDM_2;
break;
default:
dev_err(dev->dev, "unsupported number of audio channels\n");
return -EINVAL;
}
if (!frame_length)
frame_length = 2 * params_physical_width(params);
} else if (dev->fmt & SND_SOC_DAIFMT_DSP_A) {
mra |= MCHP_I2SMCC_MRA_WIRECFG_TDM(dev->tdm_data_pair);
if (dev->tdm_slots) {
if (channels % 2 && channels * 2 <= dev->tdm_slots) {
/*
* Duplicate data for even-numbered channels
* to odd-numbered channels
*/
if (is_playback)
mra |= MCHP_I2SMCC_MRA_TXMONO;
else
mra |= MCHP_I2SMCC_MRA_RXMONO;
}
channels = dev->tdm_slots;
}
mra |= MCHP_I2SMCC_MRA_NBCHAN(channels);
if (!frame_length)
frame_length = channels * MCHP_I2MCC_TDM_SLOT_WIDTH;
}
/*
* We must have the same burst size configured
* in the DMA transfer and in out IP
*/
mrb |= MCHP_I2SMCC_MRB_DMACHUNK(channels);
if (is_playback)
dev->playback.maxburst = 1 << (fls(channels) - 1);
else
dev->capture.maxburst = 1 << (fls(channels) - 1);
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S8:
mra |= MCHP_I2SMCC_MRA_DATALENGTH_8_BITS;
break;
case SNDRV_PCM_FORMAT_S16_LE:
mra |= MCHP_I2SMCC_MRA_DATALENGTH_16_BITS;
break;
case SNDRV_PCM_FORMAT_S18_3LE:
mra |= MCHP_I2SMCC_MRA_DATALENGTH_18_BITS |
MCHP_I2SMCC_MRA_IWS;
break;
case SNDRV_PCM_FORMAT_S20_3LE:
mra |= MCHP_I2SMCC_MRA_DATALENGTH_20_BITS |
MCHP_I2SMCC_MRA_IWS;
break;
case SNDRV_PCM_FORMAT_S24_3LE:
mra |= MCHP_I2SMCC_MRA_DATALENGTH_24_BITS |
MCHP_I2SMCC_MRA_IWS;
break;
case SNDRV_PCM_FORMAT_S24_LE:
mra |= MCHP_I2SMCC_MRA_DATALENGTH_24_BITS;
break;
case SNDRV_PCM_FORMAT_S32_LE:
mra |= MCHP_I2SMCC_MRA_DATALENGTH_32_BITS;
break;
default:
dev_err(dev->dev, "unsupported size/endianness for audio samples\n");
return -EINVAL;
}
if (set_divs) {
bclk_rate = frame_length * params_rate(params);
ret = mchp_i2s_mcc_config_divs(dev, bclk_rate, &mra,
&rate);
if (ret) {
dev_err(dev->dev,
"unable to configure the divisors: %d\n", ret);
return ret;
}
}
/* enable FIFO if available */
if (dev->soc->has_fifo)
mrb |= MCHP_I2SMCC_MRB_FIFOEN;
/*
* If we are already running, the wanted setup must be
* the same with the one that's currently ongoing
*/
if (mchp_i2s_mcc_is_running(dev)) {
u32 mra_cur;
u32 mrb_cur;
regmap_read(dev->regmap, MCHP_I2SMCC_MRA, &mra_cur);
regmap_read(dev->regmap, MCHP_I2SMCC_MRB, &mrb_cur);
if (mra != mra_cur || mrb != mrb_cur)
return -EINVAL;
return 0;
}
if (mra & MCHP_I2SMCC_MRA_SRCCLK_GCLK && !dev->gclk_use) {
/* set the rate */
ret = clk_set_rate(dev->gclk, rate);
if (ret) {
dev_err(dev->dev,
"unable to set rate %lu to GCLK: %d\n",
rate, ret);
return ret;
}
ret = clk_prepare(dev->gclk);
if (ret < 0) {
dev_err(dev->dev, "unable to prepare GCLK: %d\n", ret);
return ret;
}
dev->gclk_use = 1;
}
/* Save the number of channels to know what interrupts to enable */
dev->channels = channels;
ret = regmap_write(dev->regmap, MCHP_I2SMCC_MRA, mra);
if (ret < 0) {
if (dev->gclk_use) {
clk_unprepare(dev->gclk);
dev->gclk_use = 0;
}
return ret;
}
return regmap_write(dev->regmap, MCHP_I2SMCC_MRB, mrb);
}
static int mchp_i2s_mcc_hw_free(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct mchp_i2s_mcc_dev *dev = snd_soc_dai_get_drvdata(dai);
bool is_playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
long err;
if (is_playback) {
err = wait_event_interruptible_timeout(dev->wq_txrdy,
dev->tx_rdy,
msecs_to_jiffies(500));
if (err == 0) {
dev_warn_once(dev->dev,
"Timeout waiting for Tx ready\n");
if (dev->soc->has_fifo)
regmap_write(dev->regmap, MCHP_I2SMCC_IDRB,
MCHP_I2SMCC_INT_TXFFRDY);
else
regmap_write(dev->regmap, MCHP_I2SMCC_IDRA,
MCHP_I2SMCC_INT_TXRDY_MASK(dev->channels));
dev->tx_rdy = 1;
}
} else {
err = wait_event_interruptible_timeout(dev->wq_rxrdy,
dev->rx_rdy,
msecs_to_jiffies(500));
if (err == 0) {
dev_warn_once(dev->dev,
"Timeout waiting for Rx ready\n");
if (dev->soc->has_fifo)
regmap_write(dev->regmap, MCHP_I2SMCC_IDRB,
MCHP_I2SMCC_INT_RXFFRDY);
else
regmap_write(dev->regmap, MCHP_I2SMCC_IDRA,
MCHP_I2SMCC_INT_RXRDY_MASK(dev->channels));
dev->rx_rdy = 1;
}
}
if (!mchp_i2s_mcc_is_running(dev)) {
regmap_write(dev->regmap, MCHP_I2SMCC_CR, MCHP_I2SMCC_CR_CKDIS);
if (dev->gclk_running) {
clk_disable(dev->gclk);
dev->gclk_running = 0;
}
if (dev->gclk_use) {
clk_unprepare(dev->gclk);
dev->gclk_use = 0;
}
}
return 0;
}
static int mchp_i2s_mcc_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct mchp_i2s_mcc_dev *dev = snd_soc_dai_get_drvdata(dai);
bool is_playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
u32 cr = 0;
u32 iera = 0, ierb = 0;
u32 sr;
int err;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if (is_playback)
cr = MCHP_I2SMCC_CR_TXEN | MCHP_I2SMCC_CR_CKEN;
else
cr = MCHP_I2SMCC_CR_RXEN | MCHP_I2SMCC_CR_CKEN;
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
regmap_read(dev->regmap, MCHP_I2SMCC_SR, &sr);
if (is_playback && (sr & MCHP_I2SMCC_SR_TXEN)) {
cr = MCHP_I2SMCC_CR_TXDIS;
dev->tx_rdy = 0;
/*
* Enable Tx Ready interrupts on all channels
* to assure all data is sent
*/
if (dev->soc->has_fifo)
ierb = MCHP_I2SMCC_INT_TXFFRDY;
else
iera = MCHP_I2SMCC_INT_TXRDY_MASK(dev->channels);
} else if (!is_playback && (sr & MCHP_I2SMCC_SR_RXEN)) {
cr = MCHP_I2SMCC_CR_RXDIS;
dev->rx_rdy = 0;
/*
* Enable Rx Ready interrupts on all channels
* to assure all data is received
*/
if (dev->soc->has_fifo)
ierb = MCHP_I2SMCC_INT_RXFFRDY;
else
iera = MCHP_I2SMCC_INT_RXRDY_MASK(dev->channels);
}
break;
default:
return -EINVAL;
}
if ((cr & MCHP_I2SMCC_CR_CKEN) && dev->gclk_use &&
!dev->gclk_running) {
err = clk_enable(dev->gclk);
if (err) {
dev_err_once(dev->dev, "failed to enable GCLK: %d\n",
err);
} else {
dev->gclk_running = 1;
}
}
if (dev->soc->has_fifo)
regmap_write(dev->regmap, MCHP_I2SMCC_IERB, ierb);
else
regmap_write(dev->regmap, MCHP_I2SMCC_IERA, iera);
regmap_write(dev->regmap, MCHP_I2SMCC_CR, cr);
return 0;
}
static int mchp_i2s_mcc_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct mchp_i2s_mcc_dev *dev = snd_soc_dai_get_drvdata(dai);
/* Software reset the IP if it's not running */
if (!mchp_i2s_mcc_is_running(dev)) {
return regmap_write(dev->regmap, MCHP_I2SMCC_CR,
MCHP_I2SMCC_CR_SWRST);
}
return 0;
}
static int mchp_i2s_mcc_dai_probe(struct snd_soc_dai *dai)
{
struct mchp_i2s_mcc_dev *dev = snd_soc_dai_get_drvdata(dai);
init_waitqueue_head(&dev->wq_txrdy);
init_waitqueue_head(&dev->wq_rxrdy);
dev->tx_rdy = 1;
dev->rx_rdy = 1;
snd_soc_dai_init_dma_data(dai, &dev->playback, &dev->capture);
return 0;
}
static const struct snd_soc_dai_ops mchp_i2s_mcc_dai_ops = {
.probe = mchp_i2s_mcc_dai_probe,
.set_sysclk = mchp_i2s_mcc_set_sysclk,
.set_bclk_ratio = mchp_i2s_mcc_set_bclk_ratio,
.startup = mchp_i2s_mcc_startup,
.trigger = mchp_i2s_mcc_trigger,
.hw_params = mchp_i2s_mcc_hw_params,
.hw_free = mchp_i2s_mcc_hw_free,
.set_fmt = mchp_i2s_mcc_set_dai_fmt,
.set_tdm_slot = mchp_i2s_mcc_set_dai_tdm_slot,
};
#define MCHP_I2SMCC_RATES SNDRV_PCM_RATE_8000_192000
#define MCHP_I2SMCC_FORMATS (SNDRV_PCM_FMTBIT_S8 | \
SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S18_3LE | \
SNDRV_PCM_FMTBIT_S20_3LE | \
SNDRV_PCM_FMTBIT_S24_3LE | \
SNDRV_PCM_FMTBIT_S24_LE | \
SNDRV_PCM_FMTBIT_S32_LE)
static struct snd_soc_dai_driver mchp_i2s_mcc_dai = {
.playback = {
.stream_name = "I2SMCC-Playback",
.channels_min = 1,
.channels_max = 8,
.rates = MCHP_I2SMCC_RATES,
.formats = MCHP_I2SMCC_FORMATS,
},
.capture = {
.stream_name = "I2SMCC-Capture",
.channels_min = 1,
.channels_max = 8,
.rates = MCHP_I2SMCC_RATES,
.formats = MCHP_I2SMCC_FORMATS,
},
.ops = &mchp_i2s_mcc_dai_ops,
.symmetric_rate = 1,
.symmetric_sample_bits = 1,
.symmetric_channels = 1,
};
static const struct snd_soc_component_driver mchp_i2s_mcc_component = {
.name = "mchp-i2s-mcc",
.legacy_dai_naming = 1,
};
#ifdef CONFIG_OF
static struct mchp_i2s_mcc_soc_data mchp_i2s_mcc_sam9x60 = {
.data_pin_pair_num = 1,
};
static struct mchp_i2s_mcc_soc_data mchp_i2s_mcc_sama7g5 = {
.data_pin_pair_num = 4,
.has_fifo = true,
};
static const struct of_device_id mchp_i2s_mcc_dt_ids[] = {
{
.compatible = "microchip,sam9x60-i2smcc",
.data = &mchp_i2s_mcc_sam9x60,
},
{
.compatible = "microchip,sama7g5-i2smcc",
.data = &mchp_i2s_mcc_sama7g5,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mchp_i2s_mcc_dt_ids);
#endif
static int mchp_i2s_mcc_soc_data_parse(struct platform_device *pdev,
struct mchp_i2s_mcc_dev *dev)
{
int err;
if (!dev->soc) {
dev_err(&pdev->dev, "failed to get soc data\n");
return -ENODEV;
}
if (dev->soc->data_pin_pair_num == 1)
return 0;
err = of_property_read_u8(pdev->dev.of_node, "microchip,tdm-data-pair",
&dev->tdm_data_pair);
if (err < 0 && err != -EINVAL) {
dev_err(&pdev->dev,
"bad property data for 'microchip,tdm-data-pair': %d",
err);
return err;
}
if (err == -EINVAL) {
dev_info(&pdev->dev,
"'microchip,tdm-data-pair' not found; assuming DIN/DOUT 0 for TDM\n");
dev->tdm_data_pair = 0;
} else {
if (dev->tdm_data_pair > dev->soc->data_pin_pair_num - 1) {
dev_err(&pdev->dev,
"invalid value for 'microchip,tdm-data-pair': %d\n",
dev->tdm_data_pair);
return -EINVAL;
}
dev_dbg(&pdev->dev, "TMD format on DIN/DOUT %d pins\n",
dev->tdm_data_pair);
}
return 0;
}
static int mchp_i2s_mcc_probe(struct platform_device *pdev)
{
struct mchp_i2s_mcc_dev *dev;
struct resource *mem;
struct regmap *regmap;
void __iomem *base;
u32 version;
int irq;
int err;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(base))
return PTR_ERR(base);
regmap = devm_regmap_init_mmio(&pdev->dev, base,
&mchp_i2s_mcc_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
err = devm_request_irq(&pdev->dev, irq, mchp_i2s_mcc_interrupt, 0,
dev_name(&pdev->dev), dev);
if (err)
return err;
dev->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(dev->pclk)) {
err = PTR_ERR(dev->pclk);
dev_err(&pdev->dev,
"failed to get the peripheral clock: %d\n", err);
return err;
}
/* Get the optional generated clock */
dev->gclk = devm_clk_get(&pdev->dev, "gclk");
if (IS_ERR(dev->gclk)) {
if (PTR_ERR(dev->gclk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_warn(&pdev->dev,
"generated clock not found: %d\n", err);
dev->gclk = NULL;
}
dev->soc = of_device_get_match_data(&pdev->dev);
err = mchp_i2s_mcc_soc_data_parse(pdev, dev);
if (err < 0)
return err;
dev->dev = &pdev->dev;
dev->regmap = regmap;
platform_set_drvdata(pdev, dev);
err = clk_prepare_enable(dev->pclk);
if (err) {
dev_err(&pdev->dev,
"failed to enable the peripheral clock: %d\n", err);
return err;
}
err = devm_snd_soc_register_component(&pdev->dev,
&mchp_i2s_mcc_component,
&mchp_i2s_mcc_dai, 1);
if (err) {
dev_err(&pdev->dev, "failed to register DAI: %d\n", err);
clk_disable_unprepare(dev->pclk);
return err;
}
dev->playback.addr = (dma_addr_t)mem->start + MCHP_I2SMCC_THR;
dev->capture.addr = (dma_addr_t)mem->start + MCHP_I2SMCC_RHR;
err = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
if (err) {
dev_err(&pdev->dev, "failed to register PCM: %d\n", err);
clk_disable_unprepare(dev->pclk);
return err;
}
/* Get IP version. */
regmap_read(dev->regmap, MCHP_I2SMCC_VERSION, &version);
dev_info(&pdev->dev, "hw version: %#lx\n",
version & MCHP_I2SMCC_VERSION_MASK);
return 0;
}
static void mchp_i2s_mcc_remove(struct platform_device *pdev)
{
struct mchp_i2s_mcc_dev *dev = platform_get_drvdata(pdev);
clk_disable_unprepare(dev->pclk);
}
static struct platform_driver mchp_i2s_mcc_driver = {
.driver = {
.name = "mchp_i2s_mcc",
.of_match_table = mchp_i2s_mcc_dt_ids,
},
.probe = mchp_i2s_mcc_probe,
.remove_new = mchp_i2s_mcc_remove,
};
module_platform_driver(mchp_i2s_mcc_driver);
MODULE_DESCRIPTION("Microchip I2S Multi-Channel Controller driver");
MODULE_AUTHOR("Codrin Ciubotariu <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | sound/soc/atmel/mchp-i2s-mcc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Atmel PDMIC driver
*
* Copyright (C) 2015 Atmel
*
* Author: Songjun Wu <[email protected]>
*/
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <sound/core.h>
#include <sound/dmaengine_pcm.h>
#include <sound/pcm_params.h>
#include <sound/tlv.h>
#include "atmel-pdmic.h"
struct atmel_pdmic_pdata {
u32 mic_min_freq;
u32 mic_max_freq;
s32 mic_offset;
const char *card_name;
};
struct atmel_pdmic {
dma_addr_t phy_base;
struct regmap *regmap;
struct clk *pclk;
struct clk *gclk;
struct device *dev;
int irq;
struct snd_pcm_substream *substream;
const struct atmel_pdmic_pdata *pdata;
};
static const struct of_device_id atmel_pdmic_of_match[] = {
{
.compatible = "atmel,sama5d2-pdmic",
}, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(of, atmel_pdmic_of_match);
#define PDMIC_OFFSET_MAX_VAL S16_MAX
#define PDMIC_OFFSET_MIN_VAL S16_MIN
static struct atmel_pdmic_pdata *atmel_pdmic_dt_init(struct device *dev)
{
struct device_node *np = dev->of_node;
struct atmel_pdmic_pdata *pdata;
if (!np) {
dev_err(dev, "device node not found\n");
return ERR_PTR(-EINVAL);
}
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
if (of_property_read_string(np, "atmel,model", &pdata->card_name))
pdata->card_name = "PDMIC";
if (of_property_read_u32(np, "atmel,mic-min-freq",
&pdata->mic_min_freq)) {
dev_err(dev, "failed to get mic-min-freq\n");
return ERR_PTR(-EINVAL);
}
if (of_property_read_u32(np, "atmel,mic-max-freq",
&pdata->mic_max_freq)) {
dev_err(dev, "failed to get mic-max-freq\n");
return ERR_PTR(-EINVAL);
}
if (pdata->mic_max_freq < pdata->mic_min_freq) {
dev_err(dev,
"mic-max-freq should not be less than mic-min-freq\n");
return ERR_PTR(-EINVAL);
}
if (of_property_read_s32(np, "atmel,mic-offset", &pdata->mic_offset))
pdata->mic_offset = 0;
if (pdata->mic_offset > PDMIC_OFFSET_MAX_VAL) {
dev_warn(dev,
"mic-offset value %d is larger than the max value %d, the max value is specified\n",
pdata->mic_offset, PDMIC_OFFSET_MAX_VAL);
pdata->mic_offset = PDMIC_OFFSET_MAX_VAL;
} else if (pdata->mic_offset < PDMIC_OFFSET_MIN_VAL) {
dev_warn(dev,
"mic-offset value %d is less than the min value %d, the min value is specified\n",
pdata->mic_offset, PDMIC_OFFSET_MIN_VAL);
pdata->mic_offset = PDMIC_OFFSET_MIN_VAL;
}
return pdata;
}
/* cpu dai component */
static int atmel_pdmic_cpu_dai_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct atmel_pdmic *dd = snd_soc_card_get_drvdata(rtd->card);
int ret;
ret = clk_prepare_enable(dd->gclk);
if (ret)
return ret;
ret = clk_prepare_enable(dd->pclk);
if (ret) {
clk_disable_unprepare(dd->gclk);
return ret;
}
/* Clear all bits in the Control Register(PDMIC_CR) */
regmap_write(dd->regmap, PDMIC_CR, 0);
dd->substream = substream;
/* Enable the overrun error interrupt */
regmap_write(dd->regmap, PDMIC_IER, PDMIC_IER_OVRE);
return 0;
}
static void atmel_pdmic_cpu_dai_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct atmel_pdmic *dd = snd_soc_card_get_drvdata(rtd->card);
/* Disable the overrun error interrupt */
regmap_write(dd->regmap, PDMIC_IDR, PDMIC_IDR_OVRE);
clk_disable_unprepare(dd->gclk);
clk_disable_unprepare(dd->pclk);
}
static int atmel_pdmic_cpu_dai_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct atmel_pdmic *dd = snd_soc_card_get_drvdata(rtd->card);
struct snd_soc_component *component = cpu_dai->component;
u32 val;
int ret;
/* Clean the PDMIC Converted Data Register */
ret = regmap_read(dd->regmap, PDMIC_CDR, &val);
if (ret < 0)
return 0;
ret = snd_soc_component_update_bits(component, PDMIC_CR,
PDMIC_CR_ENPDM_MASK,
PDMIC_CR_ENPDM_DIS <<
PDMIC_CR_ENPDM_SHIFT);
if (ret < 0)
return ret;
return 0;
}
#define ATMEL_PDMIC_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
/* platform */
#define ATMEL_PDMIC_MAX_BUF_SIZE (64 * 1024)
#define ATMEL_PDMIC_PREALLOC_BUF_SIZE ATMEL_PDMIC_MAX_BUF_SIZE
static const struct snd_pcm_hardware atmel_pdmic_hw = {
.info = SNDRV_PCM_INFO_MMAP
| SNDRV_PCM_INFO_MMAP_VALID
| SNDRV_PCM_INFO_INTERLEAVED
| SNDRV_PCM_INFO_RESUME
| SNDRV_PCM_INFO_PAUSE,
.formats = ATMEL_PDMIC_FORMATS,
.buffer_bytes_max = ATMEL_PDMIC_MAX_BUF_SIZE,
.period_bytes_min = 256,
.period_bytes_max = 32 * 1024,
.periods_min = 2,
.periods_max = 256,
};
static int
atmel_pdmic_platform_configure_dma(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct dma_slave_config *slave_config)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct atmel_pdmic *dd = snd_soc_card_get_drvdata(rtd->card);
int ret;
ret = snd_hwparams_to_dma_slave_config(substream, params,
slave_config);
if (ret) {
dev_err(dd->dev,
"hw params to dma slave configure failed\n");
return ret;
}
slave_config->src_addr = dd->phy_base + PDMIC_CDR;
slave_config->src_maxburst = 1;
slave_config->dst_maxburst = 1;
return 0;
}
static const struct snd_dmaengine_pcm_config
atmel_pdmic_dmaengine_pcm_config = {
.prepare_slave_config = atmel_pdmic_platform_configure_dma,
.pcm_hardware = &atmel_pdmic_hw,
.prealloc_buffer_size = ATMEL_PDMIC_PREALLOC_BUF_SIZE,
};
/* codec */
/* Mic Gain = dgain * 2^(-scale) */
struct mic_gain {
unsigned int dgain;
unsigned int scale;
};
/* range from -90 dB to 90 dB */
static const struct mic_gain mic_gain_table[] = {
{ 1, 15}, { 1, 14}, /* -90, -84 dB */
{ 3, 15}, { 1, 13}, { 3, 14}, { 1, 12}, /* -81, -78, -75, -72 dB */
{ 5, 14}, { 13, 15}, /* -70, -68 dB */
{ 9, 14}, { 21, 15}, { 23, 15}, { 13, 14}, /* -65 ~ -62 dB */
{ 29, 15}, { 33, 15}, { 37, 15}, { 41, 15}, /* -61 ~ -58 dB */
{ 23, 14}, { 13, 13}, { 58, 15}, { 65, 15}, /* -57 ~ -54 dB */
{ 73, 15}, { 41, 14}, { 23, 13}, { 13, 12}, /* -53 ~ -50 dB */
{ 29, 13}, { 65, 14}, { 73, 14}, { 41, 13}, /* -49 ~ -46 dB */
{ 23, 12}, { 207, 15}, { 29, 12}, { 65, 13}, /* -45 ~ -42 dB */
{ 73, 13}, { 41, 12}, { 23, 11}, { 413, 15}, /* -41 ~ -38 dB */
{ 463, 15}, { 519, 15}, { 583, 15}, { 327, 14}, /* -37 ~ -34 dB */
{ 367, 14}, { 823, 15}, { 231, 13}, { 1036, 15}, /* -33 ~ -30 dB */
{ 1163, 15}, { 1305, 15}, { 183, 12}, { 1642, 15}, /* -29 ~ -26 dB */
{ 1843, 15}, { 2068, 15}, { 145, 11}, { 2603, 15}, /* -25 ~ -22 dB */
{ 365, 12}, { 3277, 15}, { 3677, 15}, { 4125, 15}, /* -21 ~ -18 dB */
{ 4629, 15}, { 5193, 15}, { 5827, 15}, { 3269, 14}, /* -17 ~ -14 dB */
{ 917, 12}, { 8231, 15}, { 9235, 15}, { 5181, 14}, /* -13 ~ -10 dB */
{11627, 15}, {13045, 15}, {14637, 15}, {16423, 15}, /* -9 ~ -6 dB */
{18427, 15}, {20675, 15}, { 5799, 13}, {26029, 15}, /* -5 ~ -2 dB */
{ 7301, 13}, { 1, 0}, {18383, 14}, {10313, 13}, /* -1 ~ 2 dB */
{23143, 14}, {25967, 14}, {29135, 14}, {16345, 13}, /* 3 ~ 6 dB */
{ 4585, 11}, {20577, 13}, { 1443, 9}, {25905, 13}, /* 7 ~ 10 dB */
{14533, 12}, { 8153, 11}, { 2287, 9}, {20529, 12}, /* 11 ~ 14 dB */
{11517, 11}, { 6461, 10}, {28997, 12}, { 4067, 9}, /* 15 ~ 18 dB */
{18253, 11}, { 10, 0}, {22979, 11}, {25783, 11}, /* 19 ~ 22 dB */
{28929, 11}, {32459, 11}, { 9105, 9}, {20431, 10}, /* 23 ~ 26 dB */
{22925, 10}, {12861, 9}, { 7215, 8}, {16191, 9}, /* 27 ~ 30 dB */
{ 9083, 8}, {20383, 9}, {11435, 8}, { 6145, 7}, /* 31 ~ 34 dB */
{ 3599, 6}, {32305, 9}, {18123, 8}, {20335, 8}, /* 35 ~ 38 dB */
{ 713, 3}, { 100, 0}, { 7181, 6}, { 8057, 6}, /* 39 ~ 42 dB */
{ 565, 2}, {20287, 7}, {11381, 6}, {25539, 7}, /* 43 ~ 46 dB */
{ 1791, 3}, { 4019, 4}, { 9019, 5}, {20239, 6}, /* 47 ~ 50 dB */
{ 5677, 4}, {25479, 6}, { 7147, 4}, { 8019, 4}, /* 51 ~ 54 dB */
{17995, 5}, {20191, 5}, {11327, 4}, {12709, 4}, /* 55 ~ 58 dB */
{ 3565, 2}, { 1000, 0}, { 1122, 0}, { 1259, 0}, /* 59 ~ 62 dB */
{ 2825, 1}, {12679, 3}, { 7113, 2}, { 7981, 2}, /* 63 ~ 66 dB */
{ 8955, 2}, {20095, 3}, {22547, 3}, {12649, 2}, /* 67 ~ 70 dB */
{28385, 3}, { 3981, 0}, {17867, 2}, {20047, 2}, /* 71 ~ 74 dB */
{11247, 1}, {12619, 1}, {14159, 1}, {31773, 2}, /* 75 ~ 78 dB */
{17825, 1}, {10000, 0}, {11220, 0}, {12589, 0}, /* 79 ~ 82 dB */
{28251, 1}, {15849, 0}, {17783, 0}, {19953, 0}, /* 83 ~ 86 dB */
{22387, 0}, {25119, 0}, {28184, 0}, {31623, 0}, /* 87 ~ 90 dB */
};
static const DECLARE_TLV_DB_RANGE(mic_gain_tlv,
0, 1, TLV_DB_SCALE_ITEM(-9000, 600, 0),
2, 5, TLV_DB_SCALE_ITEM(-8100, 300, 0),
6, 7, TLV_DB_SCALE_ITEM(-7000, 200, 0),
8, ARRAY_SIZE(mic_gain_table)-1, TLV_DB_SCALE_ITEM(-6500, 100, 0),
);
static int pdmic_get_mic_volsw(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
unsigned int dgain_val, scale_val;
int i;
dgain_val = (snd_soc_component_read(component, PDMIC_DSPR1) & PDMIC_DSPR1_DGAIN_MASK)
>> PDMIC_DSPR1_DGAIN_SHIFT;
scale_val = (snd_soc_component_read(component, PDMIC_DSPR0) & PDMIC_DSPR0_SCALE_MASK)
>> PDMIC_DSPR0_SCALE_SHIFT;
for (i = 0; i < ARRAY_SIZE(mic_gain_table); i++) {
if ((mic_gain_table[i].dgain == dgain_val) &&
(mic_gain_table[i].scale == scale_val))
ucontrol->value.integer.value[0] = i;
}
return 0;
}
static int pdmic_put_mic_volsw(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
int max = mc->max;
unsigned int val;
int ret;
val = ucontrol->value.integer.value[0];
if (val > max)
return -EINVAL;
ret = snd_soc_component_update_bits(component, PDMIC_DSPR1, PDMIC_DSPR1_DGAIN_MASK,
mic_gain_table[val].dgain << PDMIC_DSPR1_DGAIN_SHIFT);
if (ret < 0)
return ret;
ret = snd_soc_component_update_bits(component, PDMIC_DSPR0, PDMIC_DSPR0_SCALE_MASK,
mic_gain_table[val].scale << PDMIC_DSPR0_SCALE_SHIFT);
if (ret < 0)
return ret;
return 0;
}
static const struct snd_kcontrol_new atmel_pdmic_snd_controls[] = {
SOC_SINGLE_EXT_TLV("Mic Capture Volume", PDMIC_DSPR1, PDMIC_DSPR1_DGAIN_SHIFT,
ARRAY_SIZE(mic_gain_table)-1, 0,
pdmic_get_mic_volsw, pdmic_put_mic_volsw, mic_gain_tlv),
SOC_SINGLE("High Pass Filter Switch", PDMIC_DSPR0,
PDMIC_DSPR0_HPFBYP_SHIFT, 1, 1),
SOC_SINGLE("SINCC Filter Switch", PDMIC_DSPR0, PDMIC_DSPR0_SINBYP_SHIFT, 1, 1),
};
static int atmel_pdmic_component_probe(struct snd_soc_component *component)
{
struct snd_soc_card *card = snd_soc_component_get_drvdata(component);
struct atmel_pdmic *dd = snd_soc_card_get_drvdata(card);
snd_soc_component_update_bits(component, PDMIC_DSPR1, PDMIC_DSPR1_OFFSET_MASK,
(u32)(dd->pdata->mic_offset << PDMIC_DSPR1_OFFSET_SHIFT));
return 0;
}
#define PDMIC_MR_PRESCAL_MAX_VAL 127
static int
atmel_pdmic_cpu_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *cpu_dai)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct atmel_pdmic *dd = snd_soc_card_get_drvdata(rtd->card);
struct snd_soc_component *component = cpu_dai->component;
unsigned int rate_min = substream->runtime->hw.rate_min;
unsigned int rate_max = substream->runtime->hw.rate_max;
int fs = params_rate(params);
int bits = params_width(params);
unsigned long pclk_rate, gclk_rate;
unsigned int f_pdmic;
u32 mr_val, dspr0_val, pclk_prescal, gclk_prescal;
if (params_channels(params) != 1) {
dev_err(component->dev,
"only supports one channel\n");
return -EINVAL;
}
if ((fs < rate_min) || (fs > rate_max)) {
dev_err(component->dev,
"sample rate is %dHz, min rate is %dHz, max rate is %dHz\n",
fs, rate_min, rate_max);
return -EINVAL;
}
switch (bits) {
case 16:
dspr0_val = (PDMIC_DSPR0_SIZE_16_BITS
<< PDMIC_DSPR0_SIZE_SHIFT);
break;
case 32:
dspr0_val = (PDMIC_DSPR0_SIZE_32_BITS
<< PDMIC_DSPR0_SIZE_SHIFT);
break;
default:
return -EINVAL;
}
if ((fs << 7) > (rate_max << 6)) {
f_pdmic = fs << 6;
dspr0_val |= PDMIC_DSPR0_OSR_64 << PDMIC_DSPR0_OSR_SHIFT;
} else {
f_pdmic = fs << 7;
dspr0_val |= PDMIC_DSPR0_OSR_128 << PDMIC_DSPR0_OSR_SHIFT;
}
pclk_rate = clk_get_rate(dd->pclk);
gclk_rate = clk_get_rate(dd->gclk);
/* PRESCAL = SELCK/(2*f_pdmic) - 1*/
pclk_prescal = (u32)(pclk_rate/(f_pdmic << 1)) - 1;
gclk_prescal = (u32)(gclk_rate/(f_pdmic << 1)) - 1;
if ((pclk_prescal > PDMIC_MR_PRESCAL_MAX_VAL) ||
(gclk_rate/((gclk_prescal + 1) << 1) <
pclk_rate/((pclk_prescal + 1) << 1))) {
mr_val = gclk_prescal << PDMIC_MR_PRESCAL_SHIFT;
mr_val |= PDMIC_MR_CLKS_GCK << PDMIC_MR_CLKS_SHIFT;
} else {
mr_val = pclk_prescal << PDMIC_MR_PRESCAL_SHIFT;
mr_val |= PDMIC_MR_CLKS_PCK << PDMIC_MR_CLKS_SHIFT;
}
snd_soc_component_update_bits(component, PDMIC_MR,
PDMIC_MR_PRESCAL_MASK | PDMIC_MR_CLKS_MASK, mr_val);
snd_soc_component_update_bits(component, PDMIC_DSPR0,
PDMIC_DSPR0_OSR_MASK | PDMIC_DSPR0_SIZE_MASK, dspr0_val);
return 0;
}
static int atmel_pdmic_cpu_dai_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *cpu_dai)
{
struct snd_soc_component *component = cpu_dai->component;
u32 val;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
val = PDMIC_CR_ENPDM_EN << PDMIC_CR_ENPDM_SHIFT;
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
val = PDMIC_CR_ENPDM_DIS << PDMIC_CR_ENPDM_SHIFT;
break;
default:
return -EINVAL;
}
snd_soc_component_update_bits(component, PDMIC_CR, PDMIC_CR_ENPDM_MASK, val);
return 0;
}
static const struct snd_soc_dai_ops atmel_pdmic_cpu_dai_ops = {
.startup = atmel_pdmic_cpu_dai_startup,
.shutdown = atmel_pdmic_cpu_dai_shutdown,
.prepare = atmel_pdmic_cpu_dai_prepare,
.hw_params = atmel_pdmic_cpu_dai_hw_params,
.trigger = atmel_pdmic_cpu_dai_trigger,
};
static struct snd_soc_dai_driver atmel_pdmic_cpu_dai = {
.capture = {
.stream_name = "Capture",
.channels_min = 1,
.channels_max = 1,
.rates = SNDRV_PCM_RATE_KNOT,
.formats = ATMEL_PDMIC_FORMATS,
},
.ops = &atmel_pdmic_cpu_dai_ops,
};
static const struct snd_soc_component_driver atmel_pdmic_cpu_dai_component = {
.name = "atmel-pdmic",
.probe = atmel_pdmic_component_probe,
.controls = atmel_pdmic_snd_controls,
.num_controls = ARRAY_SIZE(atmel_pdmic_snd_controls),
.idle_bias_on = 1,
.use_pmdown_time = 1,
.legacy_dai_naming = 1,
};
/* ASoC sound card */
static int atmel_pdmic_asoc_card_init(struct device *dev,
struct snd_soc_card *card)
{
struct snd_soc_dai_link *dai_link;
struct atmel_pdmic *dd = snd_soc_card_get_drvdata(card);
struct snd_soc_dai_link_component *comp;
dai_link = devm_kzalloc(dev, sizeof(*dai_link), GFP_KERNEL);
if (!dai_link)
return -ENOMEM;
comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
if (!comp)
return -ENOMEM;
dai_link->cpus = comp;
dai_link->codecs = &asoc_dummy_dlc;
dai_link->num_cpus = 1;
dai_link->num_codecs = 1;
dai_link->name = "PDMIC";
dai_link->stream_name = "PDMIC PCM";
dai_link->cpus->dai_name = dev_name(dev);
card->dai_link = dai_link;
card->num_links = 1;
card->name = dd->pdata->card_name;
card->dev = dev;
return 0;
}
static void atmel_pdmic_get_sample_rate(struct atmel_pdmic *dd,
unsigned int *rate_min, unsigned int *rate_max)
{
u32 mic_min_freq = dd->pdata->mic_min_freq;
u32 mic_max_freq = dd->pdata->mic_max_freq;
u32 clk_max_rate = (u32)(clk_get_rate(dd->pclk) >> 1);
u32 clk_min_rate = (u32)(clk_get_rate(dd->gclk) >> 8);
if (mic_max_freq > clk_max_rate)
mic_max_freq = clk_max_rate;
if (mic_min_freq < clk_min_rate)
mic_min_freq = clk_min_rate;
*rate_min = DIV_ROUND_CLOSEST(mic_min_freq, 128);
*rate_max = mic_max_freq >> 6;
}
/* PDMIC interrupt handler */
static irqreturn_t atmel_pdmic_interrupt(int irq, void *dev_id)
{
struct atmel_pdmic *dd = (struct atmel_pdmic *)dev_id;
u32 pdmic_isr;
irqreturn_t ret = IRQ_NONE;
regmap_read(dd->regmap, PDMIC_ISR, &pdmic_isr);
if (pdmic_isr & PDMIC_ISR_OVRE) {
regmap_update_bits(dd->regmap, PDMIC_CR, PDMIC_CR_ENPDM_MASK,
PDMIC_CR_ENPDM_DIS << PDMIC_CR_ENPDM_SHIFT);
snd_pcm_stop_xrun(dd->substream);
ret = IRQ_HANDLED;
}
return ret;
}
/* regmap configuration */
#define ATMEL_PDMIC_REG_MAX 0x124
static const struct regmap_config atmel_pdmic_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = ATMEL_PDMIC_REG_MAX,
};
static int atmel_pdmic_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct atmel_pdmic *dd;
struct resource *res;
void __iomem *io_base;
const struct atmel_pdmic_pdata *pdata;
struct snd_soc_card *card;
unsigned int rate_min, rate_max;
int ret;
pdata = atmel_pdmic_dt_init(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
dd = devm_kzalloc(dev, sizeof(*dd), GFP_KERNEL);
if (!dd)
return -ENOMEM;
dd->pdata = pdata;
dd->dev = dev;
dd->irq = platform_get_irq(pdev, 0);
if (dd->irq < 0)
return dd->irq;
dd->pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(dd->pclk)) {
ret = PTR_ERR(dd->pclk);
dev_err(dev, "failed to get peripheral clock: %d\n", ret);
return ret;
}
dd->gclk = devm_clk_get(dev, "gclk");
if (IS_ERR(dd->gclk)) {
ret = PTR_ERR(dd->gclk);
dev_err(dev, "failed to get GCK: %d\n", ret);
return ret;
}
/* The gclk clock frequency must always be three times
* lower than the pclk clock frequency
*/
ret = clk_set_rate(dd->gclk, clk_get_rate(dd->pclk)/3);
if (ret) {
dev_err(dev, "failed to set GCK clock rate: %d\n", ret);
return ret;
}
io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
dd->phy_base = res->start;
dd->regmap = devm_regmap_init_mmio(dev, io_base,
&atmel_pdmic_regmap_config);
if (IS_ERR(dd->regmap)) {
ret = PTR_ERR(dd->regmap);
dev_err(dev, "failed to init register map: %d\n", ret);
return ret;
}
ret = devm_request_irq(dev, dd->irq, atmel_pdmic_interrupt, 0,
"PDMIC", (void *)dd);
if (ret < 0) {
dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n",
dd->irq, ret);
return ret;
}
/* Get the minimal and maximal sample rate that the microphone supports */
atmel_pdmic_get_sample_rate(dd, &rate_min, &rate_max);
/* register cpu dai */
atmel_pdmic_cpu_dai.capture.rate_min = rate_min;
atmel_pdmic_cpu_dai.capture.rate_max = rate_max;
ret = devm_snd_soc_register_component(dev,
&atmel_pdmic_cpu_dai_component,
&atmel_pdmic_cpu_dai, 1);
if (ret) {
dev_err(dev, "could not register CPU DAI: %d\n", ret);
return ret;
}
/* register platform */
ret = devm_snd_dmaengine_pcm_register(dev,
&atmel_pdmic_dmaengine_pcm_config,
0);
if (ret) {
dev_err(dev, "could not register platform: %d\n", ret);
return ret;
}
/* register sound card */
card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
if (!card) {
ret = -ENOMEM;
goto unregister_codec;
}
snd_soc_card_set_drvdata(card, dd);
ret = atmel_pdmic_asoc_card_init(dev, card);
if (ret) {
dev_err(dev, "failed to init sound card: %d\n", ret);
goto unregister_codec;
}
ret = devm_snd_soc_register_card(dev, card);
if (ret) {
dev_err(dev, "failed to register sound card: %d\n", ret);
goto unregister_codec;
}
return 0;
unregister_codec:
return ret;
}
static struct platform_driver atmel_pdmic_driver = {
.driver = {
.name = "atmel-pdmic",
.of_match_table = atmel_pdmic_of_match,
.pm = &snd_soc_pm_ops,
},
.probe = atmel_pdmic_probe,
};
module_platform_driver(atmel_pdmic_driver);
MODULE_DESCRIPTION("Atmel PDMIC driver under ALSA SoC architecture");
MODULE_AUTHOR("Songjun Wu <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | sound/soc/atmel/atmel-pdmic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sam9g20_wm8731 -- SoC audio for AT91SAM9G20-based
* ATMEL AT91SAM9G20ek board.
*
* Copyright (C) 2005 SAN People
* Copyright (C) 2008 Atmel
*
* Authors: Sedji Gaouaou <[email protected]>
*
* Based on ati_b1_wm8731.c by:
* Frank Mandarino <[email protected]>
* Copyright 2006 Endrelia Technologies Inc.
* Based on corgi.c by:
* Copyright 2005 Wolfson Microelectronics PLC.
* Copyright 2005 Openedhand Ltd.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/atmel-ssc.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include "../codecs/wm8731.h"
#include "atmel-pcm.h"
#include "atmel_ssc_dai.h"
#define MCLK_RATE 12000000
/*
* As shipped the board does not have inputs. However, it is relatively
* straightforward to modify the board to hook them up so support is left
* in the driver.
*/
#undef ENABLE_MIC_INPUT
static const struct snd_soc_dapm_widget at91sam9g20ek_dapm_widgets[] = {
SND_SOC_DAPM_MIC("Int Mic", NULL),
SND_SOC_DAPM_SPK("Ext Spk", NULL),
};
static const struct snd_soc_dapm_route intercon[] = {
/* speaker connected to LHPOUT/RHPOUT */
{"Ext Spk", NULL, "LHPOUT"},
{"Ext Spk", NULL, "RHPOUT"},
/* mic is connected to Mic Jack, with WM8731 Mic Bias */
{"MICIN", NULL, "Mic Bias"},
{"Mic Bias", NULL, "Int Mic"},
};
/*
* Logic for a wm8731 as connected on a at91sam9g20ek board.
*/
static int at91sam9g20ek_wm8731_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct device *dev = rtd->dev;
int ret;
dev_dbg(dev, "%s called\n", __func__);
ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_MCLK,
MCLK_RATE, SND_SOC_CLOCK_IN);
if (ret < 0) {
dev_err(dev, "Failed to set WM8731 SYSCLK: %d\n", ret);
return ret;
}
#ifndef ENABLE_MIC_INPUT
snd_soc_dapm_nc_pin(&rtd->card->dapm, "Int Mic");
#endif
return 0;
}
SND_SOC_DAILINK_DEFS(pcm,
DAILINK_COMP_ARRAY(COMP_CPU("at91rm9200_ssc.0")),
DAILINK_COMP_ARRAY(COMP_CODEC("wm8731.0-001b", "wm8731-hifi")),
DAILINK_COMP_ARRAY(COMP_PLATFORM("at91rm9200_ssc.0")));
static struct snd_soc_dai_link at91sam9g20ek_dai = {
.name = "WM8731",
.stream_name = "WM8731 PCM",
.init = at91sam9g20ek_wm8731_init,
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBP_CFP,
#ifndef ENABLE_MIC_INPUT
.playback_only = true,
#endif
SND_SOC_DAILINK_REG(pcm),
};
static struct snd_soc_card snd_soc_at91sam9g20ek = {
.name = "AT91SAMG20-EK",
.owner = THIS_MODULE,
.dai_link = &at91sam9g20ek_dai,
.num_links = 1,
.dapm_widgets = at91sam9g20ek_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(at91sam9g20ek_dapm_widgets),
.dapm_routes = intercon,
.num_dapm_routes = ARRAY_SIZE(intercon),
.fully_routed = true,
};
static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *codec_np, *cpu_np;
struct snd_soc_card *card = &snd_soc_at91sam9g20ek;
int ret;
if (!np) {
return -ENODEV;
}
ret = atmel_ssc_set_audio(0);
if (ret) {
dev_err(&pdev->dev, "ssc channel is not valid: %d\n", ret);
return ret;
}
card->dev = &pdev->dev;
/* Parse device node info */
ret = snd_soc_of_parse_card_name(card, "atmel,model");
if (ret)
goto err;
ret = snd_soc_of_parse_audio_routing(card,
"atmel,audio-routing");
if (ret)
goto err;
/* Parse codec info */
at91sam9g20ek_dai.codecs->name = NULL;
codec_np = of_parse_phandle(np, "atmel,audio-codec", 0);
if (!codec_np) {
dev_err(&pdev->dev, "codec info missing\n");
ret = -EINVAL;
goto err;
}
at91sam9g20ek_dai.codecs->of_node = codec_np;
/* Parse dai and platform info */
at91sam9g20ek_dai.cpus->dai_name = NULL;
at91sam9g20ek_dai.platforms->name = NULL;
cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0);
if (!cpu_np) {
dev_err(&pdev->dev, "dai and pcm info missing\n");
of_node_put(codec_np);
ret = -EINVAL;
goto err;
}
at91sam9g20ek_dai.cpus->of_node = cpu_np;
at91sam9g20ek_dai.platforms->of_node = cpu_np;
of_node_put(codec_np);
of_node_put(cpu_np);
ret = snd_soc_register_card(card);
if (ret) {
dev_err_probe(&pdev->dev, ret,
"snd_soc_register_card() failed\n");
goto err;
}
return 0;
err:
atmel_ssc_put_audio(0);
return ret;
}
static void at91sam9g20ek_audio_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
snd_soc_unregister_card(card);
atmel_ssc_put_audio(0);
}
#ifdef CONFIG_OF
static const struct of_device_id at91sam9g20ek_wm8731_dt_ids[] = {
{ .compatible = "atmel,at91sam9g20ek-wm8731-audio", },
{ }
};
MODULE_DEVICE_TABLE(of, at91sam9g20ek_wm8731_dt_ids);
#endif
static struct platform_driver at91sam9g20ek_audio_driver = {
.driver = {
.name = "at91sam9g20ek-audio",
.of_match_table = of_match_ptr(at91sam9g20ek_wm8731_dt_ids),
},
.probe = at91sam9g20ek_audio_probe,
.remove_new = at91sam9g20ek_audio_remove,
};
module_platform_driver(at91sam9g20ek_audio_driver);
/* Module information */
MODULE_AUTHOR("Sedji Gaouaou <[email protected]>");
MODULE_DESCRIPTION("ALSA SoC AT91SAM9G20EK_WM8731");
MODULE_ALIAS("platform:at91sam9g20ek-audio");
MODULE_LICENSE("GPL");
| linux-master | sound/soc/atmel/sam9g20_wm8731.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* atmel-pcm-dma.c -- ALSA PCM DMA support for the Atmel SoC.
*
* Copyright (C) 2012 Atmel
*
* Author: Bo Shen <[email protected]>
*
* Based on atmel-pcm by:
* Sedji Gaouaou <[email protected]>
* Copyright 2008 Atmel
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/atmel-ssc.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
#include "atmel-pcm.h"
/*--------------------------------------------------------------------------*\
* Hardware definition
\*--------------------------------------------------------------------------*/
static const struct snd_pcm_hardware atmel_pcm_dma_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_PAUSE,
.period_bytes_min = 256, /* lighting DMA overhead */
.period_bytes_max = 2 * 0xffff, /* if 2 bytes format */
.periods_min = 8,
.periods_max = 1024, /* no limit */
.buffer_bytes_max = 512 * 1024,
};
/*
* atmel_pcm_dma_irq: SSC interrupt handler for DMAENGINE enabled SSC
*
* We use DMAENGINE to send/receive data to/from SSC so this ISR is only to
* check if any overrun occured.
*/
static void atmel_pcm_dma_irq(u32 ssc_sr,
struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct atmel_pcm_dma_params *prtd;
prtd = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
if (ssc_sr & prtd->mask->ssc_error) {
if (snd_pcm_running(substream))
pr_warn("atmel-pcm: buffer %s on %s (SSC_SR=%#x)\n",
substream->stream == SNDRV_PCM_STREAM_PLAYBACK
? "underrun" : "overrun", prtd->name,
ssc_sr);
/* stop RX and capture: will be enabled again at restart */
ssc_writex(prtd->ssc->regs, SSC_CR, prtd->mask->ssc_disable);
snd_pcm_stop_xrun(substream);
/* now drain RHR and read status to remove xrun condition */
ssc_readx(prtd->ssc->regs, SSC_RHR);
ssc_readx(prtd->ssc->regs, SSC_SR);
}
}
static int atmel_pcm_configure_dma(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct atmel_pcm_dma_params *prtd;
struct ssc_device *ssc;
int ret;
prtd = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
ssc = prtd->ssc;
ret = snd_hwparams_to_dma_slave_config(substream, params, slave_config);
if (ret) {
pr_err("atmel-pcm: hwparams to dma slave configure failed\n");
return ret;
}
slave_config->dst_addr = ssc->phybase + SSC_THR;
slave_config->dst_maxburst = 1;
slave_config->src_addr = ssc->phybase + SSC_RHR;
slave_config->src_maxburst = 1;
prtd->dma_intr_handler = atmel_pcm_dma_irq;
return 0;
}
static const struct snd_dmaengine_pcm_config atmel_dmaengine_pcm_config = {
.prepare_slave_config = atmel_pcm_configure_dma,
.pcm_hardware = &atmel_pcm_dma_hardware,
.prealloc_buffer_size = 64 * 1024,
};
int atmel_pcm_dma_platform_register(struct device *dev)
{
return devm_snd_dmaengine_pcm_register(dev,
&atmel_dmaengine_pcm_config, 0);
}
EXPORT_SYMBOL(atmel_pcm_dma_platform_register);
MODULE_AUTHOR("Bo Shen <[email protected]>");
MODULE_DESCRIPTION("Atmel DMA based PCM module");
MODULE_LICENSE("GPL");
| linux-master | sound/soc/atmel/atmel-pcm-dma.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* sam9x5_wm8731 -- SoC audio for AT91SAM9X5-based boards
* that are using WM8731 as codec.
*
* Copyright (C) 2011 Atmel,
* Nicolas Ferre <[email protected]>
*
* Copyright (C) 2013 Paratronic,
* Richard Genoud <[email protected]>
*
* Based on sam9g20_wm8731.c by:
* Sedji Gaouaou <[email protected]>
*/
#include <linux/of.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <sound/soc.h>
#include <sound/soc-dai.h>
#include <sound/soc-dapm.h>
#include "../codecs/wm8731.h"
#include "atmel_ssc_dai.h"
#define MCLK_RATE 12288000
#define DRV_NAME "sam9x5-snd-wm8731"
struct sam9x5_drvdata {
int ssc_id;
};
/*
* Logic for a wm8731 as connected on a at91sam9x5ek based board.
*/
static int sam9x5_wm8731_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
struct device *dev = rtd->dev;
int ret;
dev_dbg(dev, "%s called\n", __func__);
/* set the codec system clock for DAC and ADC */
ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_XTAL,
MCLK_RATE, SND_SOC_CLOCK_IN);
if (ret < 0) {
dev_err(dev, "Failed to set WM8731 SYSCLK: %d\n", ret);
return ret;
}
return 0;
}
/*
* Audio paths on at91sam9x5ek board:
*
* |A| ------------> | | ---R----> Headphone Jack
* |T| <----\ | WM | ---L--/
* |9| ---> CLK <--> | 8731 | <--R----- Line In Jack
* |1| <------------ | | <--L--/
*/
static const struct snd_soc_dapm_widget sam9x5_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_LINE("Line In Jack", NULL),
};
static int sam9x5_wm8731_driver_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *codec_np, *cpu_np;
struct snd_soc_card *card;
struct snd_soc_dai_link *dai;
struct sam9x5_drvdata *priv;
struct snd_soc_dai_link_component *comp;
int ret;
if (!np) {
dev_err(&pdev->dev, "No device node supplied\n");
return -EINVAL;
}
card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL);
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
dai = devm_kzalloc(&pdev->dev, sizeof(*dai), GFP_KERNEL);
comp = devm_kzalloc(&pdev->dev, 3 * sizeof(*comp), GFP_KERNEL);
if (!dai || !card || !priv || !comp) {
ret = -ENOMEM;
goto out;
}
snd_soc_card_set_drvdata(card, priv);
card->dev = &pdev->dev;
card->owner = THIS_MODULE;
card->dai_link = dai;
card->num_links = 1;
card->dapm_widgets = sam9x5_dapm_widgets;
card->num_dapm_widgets = ARRAY_SIZE(sam9x5_dapm_widgets);
dai->cpus = &comp[0];
dai->num_cpus = 1;
dai->codecs = &comp[1];
dai->num_codecs = 1;
dai->platforms = &comp[2];
dai->num_platforms = 1;
dai->name = "WM8731";
dai->stream_name = "WM8731 PCM";
dai->codecs->dai_name = "wm8731-hifi";
dai->init = sam9x5_wm8731_init;
dai->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBP_CFP;
ret = snd_soc_of_parse_card_name(card, "atmel,model");
if (ret) {
dev_err(&pdev->dev, "atmel,model node missing\n");
goto out;
}
ret = snd_soc_of_parse_audio_routing(card, "atmel,audio-routing");
if (ret) {
dev_err(&pdev->dev, "atmel,audio-routing node missing\n");
goto out;
}
codec_np = of_parse_phandle(np, "atmel,audio-codec", 0);
if (!codec_np) {
dev_err(&pdev->dev, "atmel,audio-codec node missing\n");
ret = -EINVAL;
goto out;
}
dai->codecs->of_node = codec_np;
cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0);
if (!cpu_np) {
dev_err(&pdev->dev, "atmel,ssc-controller node missing\n");
ret = -EINVAL;
goto out_put_codec_np;
}
dai->cpus->of_node = cpu_np;
dai->platforms->of_node = cpu_np;
priv->ssc_id = of_alias_get_id(cpu_np, "ssc");
ret = atmel_ssc_set_audio(priv->ssc_id);
if (ret != 0) {
dev_err(&pdev->dev, "Failed to set SSC %d for audio: %d\n",
ret, priv->ssc_id);
goto out_put_cpu_np;
}
ret = devm_snd_soc_register_card(&pdev->dev, card);
if (ret) {
dev_err(&pdev->dev, "Platform device allocation failed\n");
goto out_put_audio;
}
dev_dbg(&pdev->dev, "%s ok\n", __func__);
goto out_put_cpu_np;
out_put_audio:
atmel_ssc_put_audio(priv->ssc_id);
out_put_cpu_np:
of_node_put(cpu_np);
out_put_codec_np:
of_node_put(codec_np);
out:
return ret;
}
static void sam9x5_wm8731_driver_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
struct sam9x5_drvdata *priv = card->drvdata;
atmel_ssc_put_audio(priv->ssc_id);
}
static const struct of_device_id sam9x5_wm8731_of_match[] = {
{ .compatible = "atmel,sam9x5-wm8731-audio", },
{},
};
MODULE_DEVICE_TABLE(of, sam9x5_wm8731_of_match);
static struct platform_driver sam9x5_wm8731_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(sam9x5_wm8731_of_match),
},
.probe = sam9x5_wm8731_driver_probe,
.remove_new = sam9x5_wm8731_driver_remove,
};
module_platform_driver(sam9x5_wm8731_driver);
/* Module information */
MODULE_AUTHOR("Nicolas Ferre <[email protected]>");
MODULE_AUTHOR("Richard Genoud <[email protected]>");
MODULE_DESCRIPTION("ALSA SoC machine driver for AT91SAM9x5 - WM8731");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | sound/soc/atmel/sam9x5_wm8731.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* atmel_wm8904 - Atmel ASoC driver for boards with WM8904 codec.
*
* Copyright (C) 2012 Atmel
*
* Author: Bo Shen <[email protected]>
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <sound/soc.h>
#include "../codecs/wm8904.h"
#include "atmel_ssc_dai.h"
static const struct snd_soc_dapm_widget atmel_asoc_wm8904_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone Jack", NULL),
SND_SOC_DAPM_MIC("Mic", NULL),
SND_SOC_DAPM_LINE("Line In Jack", NULL),
};
static int atmel_asoc_wm8904_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int ret;
ret = snd_soc_dai_set_pll(codec_dai, WM8904_FLL_MCLK, WM8904_FLL_MCLK,
32768, params_rate(params) * 256);
if (ret < 0) {
pr_err("%s - failed to set wm8904 codec PLL.", __func__);
return ret;
}
/*
* As here wm8904 use FLL output as its system clock
* so calling set_sysclk won't care freq parameter
* then we pass 0
*/
ret = snd_soc_dai_set_sysclk(codec_dai, WM8904_CLK_FLL,
0, SND_SOC_CLOCK_IN);
if (ret < 0) {
pr_err("%s -failed to set wm8904 SYSCLK\n", __func__);
return ret;
}
return 0;
}
static const struct snd_soc_ops atmel_asoc_wm8904_ops = {
.hw_params = atmel_asoc_wm8904_hw_params,
};
SND_SOC_DAILINK_DEFS(pcm,
DAILINK_COMP_ARRAY(COMP_EMPTY()),
DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "wm8904-hifi")),
DAILINK_COMP_ARRAY(COMP_EMPTY()));
static struct snd_soc_dai_link atmel_asoc_wm8904_dailink = {
.name = "WM8904",
.stream_name = "WM8904 PCM",
.dai_fmt = SND_SOC_DAIFMT_I2S
| SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBP_CFP,
.ops = &atmel_asoc_wm8904_ops,
SND_SOC_DAILINK_REG(pcm),
};
static struct snd_soc_card atmel_asoc_wm8904_card = {
.name = "atmel_asoc_wm8904",
.owner = THIS_MODULE,
.dai_link = &atmel_asoc_wm8904_dailink,
.num_links = 1,
.dapm_widgets = atmel_asoc_wm8904_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(atmel_asoc_wm8904_dapm_widgets),
.fully_routed = true,
};
static int atmel_asoc_wm8904_dt_init(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *codec_np, *cpu_np;
struct snd_soc_card *card = &atmel_asoc_wm8904_card;
struct snd_soc_dai_link *dailink = &atmel_asoc_wm8904_dailink;
int ret;
if (!np) {
dev_err(&pdev->dev, "only device tree supported\n");
return -EINVAL;
}
ret = snd_soc_of_parse_card_name(card, "atmel,model");
if (ret) {
dev_err(&pdev->dev, "failed to parse card name\n");
return ret;
}
ret = snd_soc_of_parse_audio_routing(card, "atmel,audio-routing");
if (ret) {
dev_err(&pdev->dev, "failed to parse audio routing\n");
return ret;
}
cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0);
if (!cpu_np) {
dev_err(&pdev->dev, "failed to get dai and pcm info\n");
ret = -EINVAL;
return ret;
}
dailink->cpus->of_node = cpu_np;
dailink->platforms->of_node = cpu_np;
of_node_put(cpu_np);
codec_np = of_parse_phandle(np, "atmel,audio-codec", 0);
if (!codec_np) {
dev_err(&pdev->dev, "failed to get codec info\n");
ret = -EINVAL;
return ret;
}
dailink->codecs->of_node = codec_np;
of_node_put(codec_np);
return 0;
}
static int atmel_asoc_wm8904_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &atmel_asoc_wm8904_card;
struct snd_soc_dai_link *dailink = &atmel_asoc_wm8904_dailink;
int id, ret;
card->dev = &pdev->dev;
ret = atmel_asoc_wm8904_dt_init(pdev);
if (ret) {
dev_err(&pdev->dev, "failed to init dt info\n");
return ret;
}
id = of_alias_get_id((struct device_node *)dailink->cpus->of_node, "ssc");
ret = atmel_ssc_set_audio(id);
if (ret != 0) {
dev_err(&pdev->dev, "failed to set SSC %d for audio\n", id);
return ret;
}
ret = snd_soc_register_card(card);
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed\n");
goto err_set_audio;
}
return 0;
err_set_audio:
atmel_ssc_put_audio(id);
return ret;
}
static void atmel_asoc_wm8904_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
struct snd_soc_dai_link *dailink = &atmel_asoc_wm8904_dailink;
int id;
id = of_alias_get_id((struct device_node *)dailink->cpus->of_node, "ssc");
snd_soc_unregister_card(card);
atmel_ssc_put_audio(id);
}
#ifdef CONFIG_OF
static const struct of_device_id atmel_asoc_wm8904_dt_ids[] = {
{ .compatible = "atmel,asoc-wm8904", },
{ }
};
MODULE_DEVICE_TABLE(of, atmel_asoc_wm8904_dt_ids);
#endif
static struct platform_driver atmel_asoc_wm8904_driver = {
.driver = {
.name = "atmel-wm8904-audio",
.of_match_table = of_match_ptr(atmel_asoc_wm8904_dt_ids),
.pm = &snd_soc_pm_ops,
},
.probe = atmel_asoc_wm8904_probe,
.remove_new = atmel_asoc_wm8904_remove,
};
module_platform_driver(atmel_asoc_wm8904_driver);
/* Module information */
MODULE_AUTHOR("Bo Shen <[email protected]>");
MODULE_DESCRIPTION("ALSA SoC machine driver for Atmel EK with WM8904 codec");
MODULE_LICENSE("GPL");
| linux-master | sound/soc/atmel/atmel_wm8904.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ASoC driver for PROTO AudioCODEC (with a WM8731)
*
* Author: Florian Meier, <[email protected]>
* Copyright 2013
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <sound/jack.h>
#include "../codecs/wm8731.h"
#define XTAL_RATE 12288000 /* This is fixed on this board */
static int snd_proto_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_card *card = rtd->card;
struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
/* Set proto sysclk */
int ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_XTAL,
XTAL_RATE, SND_SOC_CLOCK_IN);
if (ret < 0) {
dev_err(card->dev, "Failed to set WM8731 SYSCLK: %d\n",
ret);
return ret;
}
return 0;
}
static const struct snd_soc_dapm_widget snd_proto_widget[] = {
SND_SOC_DAPM_MIC("Microphone Jack", NULL),
SND_SOC_DAPM_HP("Headphone Jack", NULL),
};
static const struct snd_soc_dapm_route snd_proto_route[] = {
/* speaker connected to LHPOUT/RHPOUT */
{"Headphone Jack", NULL, "LHPOUT"},
{"Headphone Jack", NULL, "RHPOUT"},
/* mic is connected to Mic Jack, with WM8731 Mic Bias */
{"MICIN", NULL, "Mic Bias"},
{"Mic Bias", NULL, "Microphone Jack"},
};
/* audio machine driver */
static struct snd_soc_card snd_proto = {
.name = "snd_mikroe_proto",
.owner = THIS_MODULE,
.dapm_widgets = snd_proto_widget,
.num_dapm_widgets = ARRAY_SIZE(snd_proto_widget),
.dapm_routes = snd_proto_route,
.num_dapm_routes = ARRAY_SIZE(snd_proto_route),
};
static int snd_proto_probe(struct platform_device *pdev)
{
struct snd_soc_dai_link *dai;
struct snd_soc_dai_link_component *comp;
struct device_node *np = pdev->dev.of_node;
struct device_node *codec_np, *cpu_np;
struct device_node *bitclkmaster = NULL;
struct device_node *framemaster = NULL;
unsigned int dai_fmt;
int ret = 0;
if (!np) {
dev_err(&pdev->dev, "No device node supplied\n");
return -EINVAL;
}
snd_proto.dev = &pdev->dev;
ret = snd_soc_of_parse_card_name(&snd_proto, "model");
if (ret)
return ret;
dai = devm_kzalloc(&pdev->dev, sizeof(*dai), GFP_KERNEL);
if (!dai)
return -ENOMEM;
/* for cpus/codecs/platforms */
comp = devm_kzalloc(&pdev->dev, 3 * sizeof(*comp), GFP_KERNEL);
if (!comp)
return -ENOMEM;
snd_proto.dai_link = dai;
snd_proto.num_links = 1;
dai->cpus = &comp[0];
dai->num_cpus = 1;
dai->codecs = &comp[1];
dai->num_codecs = 1;
dai->platforms = &comp[2];
dai->num_platforms = 1;
dai->name = "WM8731";
dai->stream_name = "WM8731 HiFi";
dai->codecs->dai_name = "wm8731-hifi";
dai->init = &snd_proto_init;
codec_np = of_parse_phandle(np, "audio-codec", 0);
if (!codec_np) {
dev_err(&pdev->dev, "audio-codec node missing\n");
return -EINVAL;
}
dai->codecs->of_node = codec_np;
cpu_np = of_parse_phandle(np, "i2s-controller", 0);
if (!cpu_np) {
dev_err(&pdev->dev, "i2s-controller missing\n");
ret = -EINVAL;
goto put_codec_node;
}
dai->cpus->of_node = cpu_np;
dai->platforms->of_node = cpu_np;
dai_fmt = snd_soc_daifmt_parse_format(np, NULL);
snd_soc_daifmt_parse_clock_provider_as_phandle(np, NULL,
&bitclkmaster, &framemaster);
if (bitclkmaster != framemaster) {
dev_err(&pdev->dev, "Must be the same bitclock and frame master\n");
ret = -EINVAL;
goto put_cpu_node;
}
if (bitclkmaster) {
if (codec_np == bitclkmaster)
dai_fmt |= SND_SOC_DAIFMT_CBP_CFP;
else
dai_fmt |= SND_SOC_DAIFMT_CBC_CFC;
} else {
dai_fmt |= snd_soc_daifmt_parse_clock_provider_as_flag(np, NULL);
}
dai->dai_fmt = dai_fmt;
ret = snd_soc_register_card(&snd_proto);
if (ret)
dev_err_probe(&pdev->dev, ret,
"snd_soc_register_card() failed\n");
put_cpu_node:
of_node_put(bitclkmaster);
of_node_put(framemaster);
of_node_put(cpu_np);
put_codec_node:
of_node_put(codec_np);
return ret;
}
static void snd_proto_remove(struct platform_device *pdev)
{
snd_soc_unregister_card(&snd_proto);
}
static const struct of_device_id snd_proto_of_match[] = {
{ .compatible = "mikroe,mikroe-proto", },
{},
};
MODULE_DEVICE_TABLE(of, snd_proto_of_match);
static struct platform_driver snd_proto_driver = {
.driver = {
.name = "snd-mikroe-proto",
.of_match_table = snd_proto_of_match,
},
.probe = snd_proto_probe,
.remove_new = snd_proto_remove,
};
module_platform_driver(snd_proto_driver);
MODULE_AUTHOR("Florian Meier");
MODULE_DESCRIPTION("ASoC Driver for PROTO board (WM8731)");
MODULE_LICENSE("GPL");
| linux-master | sound/soc/atmel/mikroe-proto.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Atmel ALSA SoC Audio Class D Amplifier (CLASSD) driver
*
* Copyright (C) 2015 Atmel
*
* Author: Songjun Wu <[email protected]>
*/
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <sound/core.h>
#include <sound/dmaengine_pcm.h>
#include <sound/pcm_params.h>
#include <sound/tlv.h>
#include "atmel-classd.h"
struct atmel_classd_pdata {
bool non_overlap_enable;
int non_overlap_time;
int pwm_type;
const char *card_name;
};
struct atmel_classd {
dma_addr_t phy_base;
struct regmap *regmap;
struct clk *pclk;
struct clk *gclk;
struct device *dev;
int irq;
const struct atmel_classd_pdata *pdata;
};
#ifdef CONFIG_OF
static const struct of_device_id atmel_classd_of_match[] = {
{
.compatible = "atmel,sama5d2-classd",
}, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(of, atmel_classd_of_match);
static struct atmel_classd_pdata *atmel_classd_dt_init(struct device *dev)
{
struct device_node *np = dev->of_node;
struct atmel_classd_pdata *pdata;
const char *pwm_type_s;
int ret;
if (!np) {
dev_err(dev, "device node not found\n");
return ERR_PTR(-EINVAL);
}
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
ret = of_property_read_string(np, "atmel,pwm-type", &pwm_type_s);
if ((ret == 0) && (strcmp(pwm_type_s, "diff") == 0))
pdata->pwm_type = CLASSD_MR_PWMTYP_DIFF;
else
pdata->pwm_type = CLASSD_MR_PWMTYP_SINGLE;
ret = of_property_read_u32(np,
"atmel,non-overlap-time", &pdata->non_overlap_time);
if (ret)
pdata->non_overlap_enable = false;
else
pdata->non_overlap_enable = true;
ret = of_property_read_string(np, "atmel,model", &pdata->card_name);
if (ret)
pdata->card_name = "CLASSD";
return pdata;
}
#else
static inline struct atmel_classd_pdata *
atmel_classd_dt_init(struct device *dev)
{
return ERR_PTR(-EINVAL);
}
#endif
#define ATMEL_CLASSD_RATES (SNDRV_PCM_RATE_8000 \
| SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 \
| SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 \
| SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 \
| SNDRV_PCM_RATE_96000)
static const struct snd_pcm_hardware atmel_classd_hw = {
.info = SNDRV_PCM_INFO_MMAP
| SNDRV_PCM_INFO_MMAP_VALID
| SNDRV_PCM_INFO_INTERLEAVED
| SNDRV_PCM_INFO_RESUME
| SNDRV_PCM_INFO_PAUSE,
.formats = (SNDRV_PCM_FMTBIT_S16_LE),
.rates = ATMEL_CLASSD_RATES,
.rate_min = 8000,
.rate_max = 96000,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = 64 * 1024,
.period_bytes_min = 256,
.period_bytes_max = 32 * 1024,
.periods_min = 2,
.periods_max = 256,
};
#define ATMEL_CLASSD_PREALLOC_BUF_SIZE (64 * 1024)
/* cpu dai component */
static int atmel_classd_cpu_dai_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct atmel_classd *dd = snd_soc_card_get_drvdata(rtd->card);
int err;
regmap_write(dd->regmap, CLASSD_THR, 0x0);
err = clk_prepare_enable(dd->pclk);
if (err)
return err;
err = clk_prepare_enable(dd->gclk);
if (err) {
clk_disable_unprepare(dd->pclk);
return err;
}
return 0;
}
/* platform */
static int
atmel_classd_platform_configure_dma(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct dma_slave_config *slave_config)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct atmel_classd *dd = snd_soc_card_get_drvdata(rtd->card);
if (params_physical_width(params) != 16) {
dev_err(dd->dev,
"only supports 16-bit audio data\n");
return -EINVAL;
}
if (params_channels(params) == 1)
slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
else
slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
slave_config->direction = DMA_MEM_TO_DEV;
slave_config->dst_addr = dd->phy_base + CLASSD_THR;
slave_config->dst_maxburst = 1;
slave_config->src_maxburst = 1;
slave_config->device_fc = false;
return 0;
}
static const struct snd_dmaengine_pcm_config
atmel_classd_dmaengine_pcm_config = {
.prepare_slave_config = atmel_classd_platform_configure_dma,
.pcm_hardware = &atmel_classd_hw,
.prealloc_buffer_size = ATMEL_CLASSD_PREALLOC_BUF_SIZE,
};
/* codec */
static const char * const mono_mode_text[] = {
"mix", "sat", "left", "right"
};
static SOC_ENUM_SINGLE_DECL(classd_mono_mode_enum,
CLASSD_INTPMR, CLASSD_INTPMR_MONO_MODE_SHIFT,
mono_mode_text);
static const char * const eqcfg_text[] = {
"Treble-12dB", "Treble-6dB",
"Medium-8dB", "Medium-3dB",
"Bass-12dB", "Bass-6dB",
"0 dB",
"Bass+6dB", "Bass+12dB",
"Medium+3dB", "Medium+8dB",
"Treble+6dB", "Treble+12dB",
};
static const unsigned int eqcfg_value[] = {
CLASSD_INTPMR_EQCFG_T_CUT_12, CLASSD_INTPMR_EQCFG_T_CUT_6,
CLASSD_INTPMR_EQCFG_M_CUT_8, CLASSD_INTPMR_EQCFG_M_CUT_3,
CLASSD_INTPMR_EQCFG_B_CUT_12, CLASSD_INTPMR_EQCFG_B_CUT_6,
CLASSD_INTPMR_EQCFG_FLAT,
CLASSD_INTPMR_EQCFG_B_BOOST_6, CLASSD_INTPMR_EQCFG_B_BOOST_12,
CLASSD_INTPMR_EQCFG_M_BOOST_3, CLASSD_INTPMR_EQCFG_M_BOOST_8,
CLASSD_INTPMR_EQCFG_T_BOOST_6, CLASSD_INTPMR_EQCFG_T_BOOST_12,
};
static SOC_VALUE_ENUM_SINGLE_DECL(classd_eqcfg_enum,
CLASSD_INTPMR, CLASSD_INTPMR_EQCFG_SHIFT, 0xf,
eqcfg_text, eqcfg_value);
static const DECLARE_TLV_DB_SCALE(classd_digital_tlv, -7800, 100, 1);
static const struct snd_kcontrol_new atmel_classd_snd_controls[] = {
SOC_DOUBLE_TLV("Playback Volume", CLASSD_INTPMR,
CLASSD_INTPMR_ATTL_SHIFT, CLASSD_INTPMR_ATTR_SHIFT,
78, 1, classd_digital_tlv),
SOC_SINGLE("Deemphasis Switch", CLASSD_INTPMR,
CLASSD_INTPMR_DEEMP_SHIFT, 1, 0),
SOC_SINGLE("Mono Switch", CLASSD_INTPMR, CLASSD_INTPMR_MONO_SHIFT, 1, 0),
SOC_SINGLE("Swap Switch", CLASSD_INTPMR, CLASSD_INTPMR_SWAP_SHIFT, 1, 0),
SOC_ENUM("Mono Mode", classd_mono_mode_enum),
SOC_ENUM("EQ", classd_eqcfg_enum),
};
static const char * const pwm_type[] = {
"Single ended", "Differential"
};
static int atmel_classd_component_probe(struct snd_soc_component *component)
{
struct snd_soc_card *card = snd_soc_component_get_drvdata(component);
struct atmel_classd *dd = snd_soc_card_get_drvdata(card);
const struct atmel_classd_pdata *pdata = dd->pdata;
u32 mask, val;
mask = CLASSD_MR_PWMTYP_MASK;
val = pdata->pwm_type << CLASSD_MR_PWMTYP_SHIFT;
mask |= CLASSD_MR_NON_OVERLAP_MASK;
if (pdata->non_overlap_enable) {
val |= (CLASSD_MR_NON_OVERLAP_EN
<< CLASSD_MR_NON_OVERLAP_SHIFT);
mask |= CLASSD_MR_NOVR_VAL_MASK;
switch (pdata->non_overlap_time) {
case 5:
val |= (CLASSD_MR_NOVR_VAL_5NS
<< CLASSD_MR_NOVR_VAL_SHIFT);
break;
case 10:
val |= (CLASSD_MR_NOVR_VAL_10NS
<< CLASSD_MR_NOVR_VAL_SHIFT);
break;
case 15:
val |= (CLASSD_MR_NOVR_VAL_15NS
<< CLASSD_MR_NOVR_VAL_SHIFT);
break;
case 20:
val |= (CLASSD_MR_NOVR_VAL_20NS
<< CLASSD_MR_NOVR_VAL_SHIFT);
break;
default:
val |= (CLASSD_MR_NOVR_VAL_10NS
<< CLASSD_MR_NOVR_VAL_SHIFT);
dev_warn(component->dev,
"non-overlapping value %d is invalid, the default value 10 is specified\n",
pdata->non_overlap_time);
break;
}
}
snd_soc_component_update_bits(component, CLASSD_MR, mask, val);
dev_info(component->dev,
"PWM modulation type is %s, non-overlapping is %s\n",
pwm_type[pdata->pwm_type],
pdata->non_overlap_enable?"enabled":"disabled");
return 0;
}
static int atmel_classd_component_resume(struct snd_soc_component *component)
{
struct snd_soc_card *card = snd_soc_component_get_drvdata(component);
struct atmel_classd *dd = snd_soc_card_get_drvdata(card);
return regcache_sync(dd->regmap);
}
static int atmel_classd_cpu_dai_mute_stream(struct snd_soc_dai *cpu_dai,
int mute, int direction)
{
struct snd_soc_component *component = cpu_dai->component;
u32 mask, val;
mask = CLASSD_MR_LMUTE_MASK | CLASSD_MR_RMUTE_MASK;
if (mute)
val = mask;
else
val = 0;
snd_soc_component_update_bits(component, CLASSD_MR, mask, val);
return 0;
}
#define CLASSD_GCLK_RATE_11M2896_MPY_8 (112896 * 100 * 8)
#define CLASSD_GCLK_RATE_12M288_MPY_8 (12288 * 1000 * 8)
static struct {
int rate;
int sample_rate;
int dsp_clk;
unsigned long gclk_rate;
} const sample_rates[] = {
{ 8000, CLASSD_INTPMR_FRAME_8K,
CLASSD_INTPMR_DSP_CLK_FREQ_12M288, CLASSD_GCLK_RATE_12M288_MPY_8 },
{ 16000, CLASSD_INTPMR_FRAME_16K,
CLASSD_INTPMR_DSP_CLK_FREQ_12M288, CLASSD_GCLK_RATE_12M288_MPY_8 },
{ 32000, CLASSD_INTPMR_FRAME_32K,
CLASSD_INTPMR_DSP_CLK_FREQ_12M288, CLASSD_GCLK_RATE_12M288_MPY_8 },
{ 48000, CLASSD_INTPMR_FRAME_48K,
CLASSD_INTPMR_DSP_CLK_FREQ_12M288, CLASSD_GCLK_RATE_12M288_MPY_8 },
{ 96000, CLASSD_INTPMR_FRAME_96K,
CLASSD_INTPMR_DSP_CLK_FREQ_12M288, CLASSD_GCLK_RATE_12M288_MPY_8 },
{ 22050, CLASSD_INTPMR_FRAME_22K,
CLASSD_INTPMR_DSP_CLK_FREQ_11M2896, CLASSD_GCLK_RATE_11M2896_MPY_8 },
{ 44100, CLASSD_INTPMR_FRAME_44K,
CLASSD_INTPMR_DSP_CLK_FREQ_11M2896, CLASSD_GCLK_RATE_11M2896_MPY_8 },
{ 88200, CLASSD_INTPMR_FRAME_88K,
CLASSD_INTPMR_DSP_CLK_FREQ_11M2896, CLASSD_GCLK_RATE_11M2896_MPY_8 },
};
static int
atmel_classd_cpu_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *cpu_dai)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct atmel_classd *dd = snd_soc_card_get_drvdata(rtd->card);
struct snd_soc_component *component = cpu_dai->component;
int fs;
int i, best, best_val, cur_val, ret;
u32 mask, val;
fs = params_rate(params);
best = 0;
best_val = abs(fs - sample_rates[0].rate);
for (i = 1; i < ARRAY_SIZE(sample_rates); i++) {
/* Closest match */
cur_val = abs(fs - sample_rates[i].rate);
if (cur_val < best_val) {
best = i;
best_val = cur_val;
}
}
dev_dbg(component->dev,
"Selected SAMPLE_RATE of %dHz, GCLK_RATE of %ldHz\n",
sample_rates[best].rate, sample_rates[best].gclk_rate);
clk_disable_unprepare(dd->gclk);
ret = clk_set_rate(dd->gclk, sample_rates[best].gclk_rate);
if (ret)
return ret;
mask = CLASSD_INTPMR_DSP_CLK_FREQ_MASK | CLASSD_INTPMR_FRAME_MASK;
val = (sample_rates[best].dsp_clk << CLASSD_INTPMR_DSP_CLK_FREQ_SHIFT)
| (sample_rates[best].sample_rate << CLASSD_INTPMR_FRAME_SHIFT);
snd_soc_component_update_bits(component, CLASSD_INTPMR, mask, val);
return clk_prepare_enable(dd->gclk);
}
static void
atmel_classd_cpu_dai_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct atmel_classd *dd = snd_soc_card_get_drvdata(rtd->card);
clk_disable_unprepare(dd->gclk);
}
static int atmel_classd_cpu_dai_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct snd_soc_component *component = cpu_dai->component;
snd_soc_component_update_bits(component, CLASSD_MR,
CLASSD_MR_LEN_MASK | CLASSD_MR_REN_MASK,
(CLASSD_MR_LEN_DIS << CLASSD_MR_LEN_SHIFT)
|(CLASSD_MR_REN_DIS << CLASSD_MR_REN_SHIFT));
return 0;
}
static int atmel_classd_cpu_dai_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *cpu_dai)
{
struct snd_soc_component *component = cpu_dai->component;
u32 mask, val;
mask = CLASSD_MR_LEN_MASK | CLASSD_MR_REN_MASK;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
val = mask;
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
val = (CLASSD_MR_LEN_DIS << CLASSD_MR_LEN_SHIFT)
| (CLASSD_MR_REN_DIS << CLASSD_MR_REN_SHIFT);
break;
default:
return -EINVAL;
}
snd_soc_component_update_bits(component, CLASSD_MR, mask, val);
return 0;
}
static const struct snd_soc_dai_ops atmel_classd_cpu_dai_ops = {
.startup = atmel_classd_cpu_dai_startup,
.shutdown = atmel_classd_cpu_dai_shutdown,
.mute_stream = atmel_classd_cpu_dai_mute_stream,
.hw_params = atmel_classd_cpu_dai_hw_params,
.prepare = atmel_classd_cpu_dai_prepare,
.trigger = atmel_classd_cpu_dai_trigger,
.no_capture_mute = 1,
};
static struct snd_soc_dai_driver atmel_classd_cpu_dai = {
.playback = {
.stream_name = "Playback",
.channels_min = 1,
.channels_max = 2,
.rates = ATMEL_CLASSD_RATES,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.ops = &atmel_classd_cpu_dai_ops,
};
static const struct snd_soc_component_driver atmel_classd_cpu_dai_component = {
.name = "atmel-classd",
.probe = atmel_classd_component_probe,
.resume = atmel_classd_component_resume,
.controls = atmel_classd_snd_controls,
.num_controls = ARRAY_SIZE(atmel_classd_snd_controls),
.idle_bias_on = 1,
.use_pmdown_time = 1,
.legacy_dai_naming = 1,
};
/* ASoC sound card */
static int atmel_classd_asoc_card_init(struct device *dev,
struct snd_soc_card *card)
{
struct snd_soc_dai_link *dai_link;
struct atmel_classd *dd = snd_soc_card_get_drvdata(card);
struct snd_soc_dai_link_component *comp;
dai_link = devm_kzalloc(dev, sizeof(*dai_link), GFP_KERNEL);
if (!dai_link)
return -ENOMEM;
comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
if (!comp)
return -ENOMEM;
dai_link->cpus = comp;
dai_link->codecs = &asoc_dummy_dlc;
dai_link->num_cpus = 1;
dai_link->num_codecs = 1;
dai_link->name = "CLASSD";
dai_link->stream_name = "CLASSD PCM";
dai_link->cpus->dai_name = dev_name(dev);
card->dai_link = dai_link;
card->num_links = 1;
card->name = dd->pdata->card_name;
card->dev = dev;
return 0;
};
/* regmap configuration */
static const struct reg_default atmel_classd_reg_defaults[] = {
{ CLASSD_INTPMR, 0x00301212 },
};
#define ATMEL_CLASSD_REG_MAX 0xE4
static const struct regmap_config atmel_classd_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = ATMEL_CLASSD_REG_MAX,
.cache_type = REGCACHE_FLAT,
.reg_defaults = atmel_classd_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(atmel_classd_reg_defaults),
};
static int atmel_classd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct atmel_classd *dd;
struct resource *res;
void __iomem *io_base;
const struct atmel_classd_pdata *pdata;
struct snd_soc_card *card;
int ret;
pdata = dev_get_platdata(dev);
if (!pdata) {
pdata = atmel_classd_dt_init(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
}
dd = devm_kzalloc(dev, sizeof(*dd), GFP_KERNEL);
if (!dd)
return -ENOMEM;
dd->pdata = pdata;
dd->irq = platform_get_irq(pdev, 0);
if (dd->irq < 0)
return dd->irq;
dd->pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(dd->pclk)) {
ret = PTR_ERR(dd->pclk);
dev_err(dev, "failed to get peripheral clock: %d\n", ret);
return ret;
}
dd->gclk = devm_clk_get(dev, "gclk");
if (IS_ERR(dd->gclk)) {
ret = PTR_ERR(dd->gclk);
dev_err(dev, "failed to get GCK clock: %d\n", ret);
return ret;
}
io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
dd->phy_base = res->start;
dd->dev = dev;
dd->regmap = devm_regmap_init_mmio(dev, io_base,
&atmel_classd_regmap_config);
if (IS_ERR(dd->regmap)) {
ret = PTR_ERR(dd->regmap);
dev_err(dev, "failed to init register map: %d\n", ret);
return ret;
}
ret = devm_snd_soc_register_component(dev,
&atmel_classd_cpu_dai_component,
&atmel_classd_cpu_dai, 1);
if (ret) {
dev_err(dev, "could not register CPU DAI: %d\n", ret);
return ret;
}
ret = devm_snd_dmaengine_pcm_register(dev,
&atmel_classd_dmaengine_pcm_config,
0);
if (ret) {
dev_err(dev, "could not register platform: %d\n", ret);
return ret;
}
/* register sound card */
card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
if (!card) {
ret = -ENOMEM;
goto unregister_codec;
}
snd_soc_card_set_drvdata(card, dd);
ret = atmel_classd_asoc_card_init(dev, card);
if (ret) {
dev_err(dev, "failed to init sound card\n");
goto unregister_codec;
}
ret = devm_snd_soc_register_card(dev, card);
if (ret) {
dev_err(dev, "failed to register sound card: %d\n", ret);
goto unregister_codec;
}
return 0;
unregister_codec:
return ret;
}
static struct platform_driver atmel_classd_driver = {
.driver = {
.name = "atmel-classd",
.of_match_table = of_match_ptr(atmel_classd_of_match),
.pm = &snd_soc_pm_ops,
},
.probe = atmel_classd_probe,
};
module_platform_driver(atmel_classd_driver);
MODULE_DESCRIPTION("Atmel ClassD driver under ALSA SoC architecture");
MODULE_AUTHOR("Songjun Wu <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | sound/soc/atmel/atmel-classd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* atmel_ssc_dai.c -- ALSA SoC ATMEL SSC Audio Layer Platform driver
*
* Copyright (C) 2005 SAN People
* Copyright (C) 2008 Atmel
*
* Author: Sedji Gaouaou <[email protected]>
* ATMEL CORP.
*
* Based on at91-ssc.c by
* Frank Mandarino <[email protected]>
* Based on pxa2xx Platform drivers by
* Liam Girdwood <[email protected]>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/atmel_pdc.h>
#include <linux/atmel-ssc.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include "atmel-pcm.h"
#include "atmel_ssc_dai.h"
#define NUM_SSC_DEVICES 3
/*
* SSC PDC registers required by the PCM DMA engine.
*/
static struct atmel_pdc_regs pdc_tx_reg = {
.xpr = ATMEL_PDC_TPR,
.xcr = ATMEL_PDC_TCR,
.xnpr = ATMEL_PDC_TNPR,
.xncr = ATMEL_PDC_TNCR,
};
static struct atmel_pdc_regs pdc_rx_reg = {
.xpr = ATMEL_PDC_RPR,
.xcr = ATMEL_PDC_RCR,
.xnpr = ATMEL_PDC_RNPR,
.xncr = ATMEL_PDC_RNCR,
};
/*
* SSC & PDC status bits for transmit and receive.
*/
static struct atmel_ssc_mask ssc_tx_mask = {
.ssc_enable = SSC_BIT(CR_TXEN),
.ssc_disable = SSC_BIT(CR_TXDIS),
.ssc_endx = SSC_BIT(SR_ENDTX),
.ssc_endbuf = SSC_BIT(SR_TXBUFE),
.ssc_error = SSC_BIT(SR_OVRUN),
.pdc_enable = ATMEL_PDC_TXTEN,
.pdc_disable = ATMEL_PDC_TXTDIS,
};
static struct atmel_ssc_mask ssc_rx_mask = {
.ssc_enable = SSC_BIT(CR_RXEN),
.ssc_disable = SSC_BIT(CR_RXDIS),
.ssc_endx = SSC_BIT(SR_ENDRX),
.ssc_endbuf = SSC_BIT(SR_RXBUFF),
.ssc_error = SSC_BIT(SR_OVRUN),
.pdc_enable = ATMEL_PDC_RXTEN,
.pdc_disable = ATMEL_PDC_RXTDIS,
};
/*
* DMA parameters.
*/
static struct atmel_pcm_dma_params ssc_dma_params[NUM_SSC_DEVICES][2] = {
{{
.name = "SSC0 PCM out",
.pdc = &pdc_tx_reg,
.mask = &ssc_tx_mask,
},
{
.name = "SSC0 PCM in",
.pdc = &pdc_rx_reg,
.mask = &ssc_rx_mask,
} },
{{
.name = "SSC1 PCM out",
.pdc = &pdc_tx_reg,
.mask = &ssc_tx_mask,
},
{
.name = "SSC1 PCM in",
.pdc = &pdc_rx_reg,
.mask = &ssc_rx_mask,
} },
{{
.name = "SSC2 PCM out",
.pdc = &pdc_tx_reg,
.mask = &ssc_tx_mask,
},
{
.name = "SSC2 PCM in",
.pdc = &pdc_rx_reg,
.mask = &ssc_rx_mask,
} },
};
static struct atmel_ssc_info ssc_info[NUM_SSC_DEVICES] = {
{
.name = "ssc0",
.dir_mask = SSC_DIR_MASK_UNUSED,
.initialized = 0,
},
{
.name = "ssc1",
.dir_mask = SSC_DIR_MASK_UNUSED,
.initialized = 0,
},
{
.name = "ssc2",
.dir_mask = SSC_DIR_MASK_UNUSED,
.initialized = 0,
},
};
/*
* SSC interrupt handler. Passes PDC interrupts to the DMA
* interrupt handler in the PCM driver.
*/
static irqreturn_t atmel_ssc_interrupt(int irq, void *dev_id)
{
struct atmel_ssc_info *ssc_p = dev_id;
struct atmel_pcm_dma_params *dma_params;
u32 ssc_sr;
u32 ssc_substream_mask;
int i;
ssc_sr = (unsigned long)ssc_readl(ssc_p->ssc->regs, SR)
& (unsigned long)ssc_readl(ssc_p->ssc->regs, IMR);
/*
* Loop through the substreams attached to this SSC. If
* a DMA-related interrupt occurred on that substream, call
* the DMA interrupt handler function, if one has been
* registered in the dma_params structure by the PCM driver.
*/
for (i = 0; i < ARRAY_SIZE(ssc_p->dma_params); i++) {
dma_params = ssc_p->dma_params[i];
if ((dma_params != NULL) &&
(dma_params->dma_intr_handler != NULL)) {
ssc_substream_mask = (dma_params->mask->ssc_endx |
dma_params->mask->ssc_endbuf);
if (ssc_sr & ssc_substream_mask) {
dma_params->dma_intr_handler(ssc_sr,
dma_params->
substream);
}
}
}
return IRQ_HANDLED;
}
/*
* When the bit clock is input, limit the maximum rate according to the
* Serial Clock Ratio Considerations section from the SSC documentation:
*
* The Transmitter and the Receiver can be programmed to operate
* with the clock signals provided on either the TK or RK pins.
* This allows the SSC to support many slave-mode data transfers.
* In this case, the maximum clock speed allowed on the RK pin is:
* - Peripheral clock divided by 2 if Receiver Frame Synchro is input
* - Peripheral clock divided by 3 if Receiver Frame Synchro is output
* In addition, the maximum clock speed allowed on the TK pin is:
* - Peripheral clock divided by 6 if Transmit Frame Synchro is input
* - Peripheral clock divided by 2 if Transmit Frame Synchro is output
*
* When the bit clock is output, limit the rate according to the
* SSC divider restrictions.
*/
static int atmel_ssc_hw_rule_rate(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct atmel_ssc_info *ssc_p = rule->private;
struct ssc_device *ssc = ssc_p->ssc;
struct snd_interval *i = hw_param_interval(params, rule->var);
struct snd_interval t;
struct snd_ratnum r = {
.den_min = 1,
.den_max = 4095,
.den_step = 1,
};
unsigned int num = 0, den = 0;
int frame_size;
int mck_div = 2;
int ret;
frame_size = snd_soc_params_to_frame_size(params);
if (frame_size < 0)
return frame_size;
switch (ssc_p->daifmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
case SND_SOC_DAIFMT_BC_FP:
if ((ssc_p->dir_mask & SSC_DIR_MASK_CAPTURE)
&& ssc->clk_from_rk_pin)
/* Receiver Frame Synchro (i.e. capture)
* is output (format is _CFS) and the RK pin
* is used for input (format is _CBM_).
*/
mck_div = 3;
break;
case SND_SOC_DAIFMT_BC_FC:
if ((ssc_p->dir_mask & SSC_DIR_MASK_PLAYBACK)
&& !ssc->clk_from_rk_pin)
/* Transmit Frame Synchro (i.e. playback)
* is input (format is _CFM) and the TK pin
* is used for input (format _CBM_ but not
* using the RK pin).
*/
mck_div = 6;
break;
}
switch (ssc_p->daifmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
case SND_SOC_DAIFMT_BP_FP:
r.num = ssc_p->mck_rate / mck_div / frame_size;
ret = snd_interval_ratnum(i, 1, &r, &num, &den);
if (ret >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
params->rate_num = num;
params->rate_den = den;
}
break;
case SND_SOC_DAIFMT_BC_FP:
case SND_SOC_DAIFMT_BC_FC:
t.min = 8000;
t.max = ssc_p->mck_rate / mck_div / frame_size;
t.openmin = t.openmax = 0;
t.integer = 0;
ret = snd_interval_refine(i, &t);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
/*-------------------------------------------------------------------------*\
* DAI functions
\*-------------------------------------------------------------------------*/
/*
* Startup. Only that one substream allowed in each direction.
*/
static int atmel_ssc_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct platform_device *pdev = to_platform_device(dai->dev);
struct atmel_ssc_info *ssc_p = &ssc_info[pdev->id];
struct atmel_pcm_dma_params *dma_params;
int dir, dir_mask;
int ret;
pr_debug("atmel_ssc_startup: SSC_SR=0x%x\n",
ssc_readl(ssc_p->ssc->regs, SR));
/* Enable PMC peripheral clock for this SSC */
pr_debug("atmel_ssc_dai: Starting clock\n");
ret = clk_enable(ssc_p->ssc->clk);
if (ret)
return ret;
ssc_p->mck_rate = clk_get_rate(ssc_p->ssc->clk);
/* Reset the SSC unless initialized to keep it in a clean state */
if (!ssc_p->initialized)
ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
dir = 0;
dir_mask = SSC_DIR_MASK_PLAYBACK;
} else {
dir = 1;
dir_mask = SSC_DIR_MASK_CAPTURE;
}
ret = snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
atmel_ssc_hw_rule_rate,
ssc_p,
SNDRV_PCM_HW_PARAM_FRAME_BITS,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
if (ret < 0) {
dev_err(dai->dev, "Failed to specify rate rule: %d\n", ret);
return ret;
}
dma_params = &ssc_dma_params[pdev->id][dir];
dma_params->ssc = ssc_p->ssc;
dma_params->substream = substream;
ssc_p->dma_params[dir] = dma_params;
snd_soc_dai_set_dma_data(dai, substream, dma_params);
if (ssc_p->dir_mask & dir_mask)
return -EBUSY;
ssc_p->dir_mask |= dir_mask;
return 0;
}
/*
* Shutdown. Clear DMA parameters and shutdown the SSC if there
* are no other substreams open.
*/
static void atmel_ssc_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct platform_device *pdev = to_platform_device(dai->dev);
struct atmel_ssc_info *ssc_p = &ssc_info[pdev->id];
struct atmel_pcm_dma_params *dma_params;
int dir, dir_mask;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dir = 0;
else
dir = 1;
dma_params = ssc_p->dma_params[dir];
if (dma_params != NULL) {
dma_params->ssc = NULL;
dma_params->substream = NULL;
ssc_p->dma_params[dir] = NULL;
}
dir_mask = 1 << dir;
ssc_p->dir_mask &= ~dir_mask;
if (!ssc_p->dir_mask) {
if (ssc_p->initialized) {
free_irq(ssc_p->ssc->irq, ssc_p);
ssc_p->initialized = 0;
}
/* Reset the SSC */
ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
/* Clear the SSC dividers */
ssc_p->cmr_div = ssc_p->tcmr_period = ssc_p->rcmr_period = 0;
ssc_p->forced_divider = 0;
}
/* Shutdown the SSC clock. */
pr_debug("atmel_ssc_dai: Stopping clock\n");
clk_disable(ssc_p->ssc->clk);
}
/*
* Record the DAI format for use in hw_params().
*/
static int atmel_ssc_set_dai_fmt(struct snd_soc_dai *cpu_dai,
unsigned int fmt)
{
struct platform_device *pdev = to_platform_device(cpu_dai->dev);
struct atmel_ssc_info *ssc_p = &ssc_info[pdev->id];
ssc_p->daifmt = fmt;
return 0;
}
/*
* Record SSC clock dividers for use in hw_params().
*/
static int atmel_ssc_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
int div_id, int div)
{
struct platform_device *pdev = to_platform_device(cpu_dai->dev);
struct atmel_ssc_info *ssc_p = &ssc_info[pdev->id];
switch (div_id) {
case ATMEL_SSC_CMR_DIV:
/*
* The same master clock divider is used for both
* transmit and receive, so if a value has already
* been set, it must match this value.
*/
if (ssc_p->dir_mask !=
(SSC_DIR_MASK_PLAYBACK | SSC_DIR_MASK_CAPTURE))
ssc_p->cmr_div = div;
else if (ssc_p->cmr_div == 0)
ssc_p->cmr_div = div;
else
if (div != ssc_p->cmr_div)
return -EBUSY;
ssc_p->forced_divider |= BIT(ATMEL_SSC_CMR_DIV);
break;
case ATMEL_SSC_TCMR_PERIOD:
ssc_p->tcmr_period = div;
ssc_p->forced_divider |= BIT(ATMEL_SSC_TCMR_PERIOD);
break;
case ATMEL_SSC_RCMR_PERIOD:
ssc_p->rcmr_period = div;
ssc_p->forced_divider |= BIT(ATMEL_SSC_RCMR_PERIOD);
break;
default:
return -EINVAL;
}
return 0;
}
/* Is the cpu-dai master of the frame clock? */
static int atmel_ssc_cfs(struct atmel_ssc_info *ssc_p)
{
switch (ssc_p->daifmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
case SND_SOC_DAIFMT_BC_FP:
case SND_SOC_DAIFMT_BP_FP:
return 1;
}
return 0;
}
/* Is the cpu-dai master of the bit clock? */
static int atmel_ssc_cbs(struct atmel_ssc_info *ssc_p)
{
switch (ssc_p->daifmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
case SND_SOC_DAIFMT_BP_FC:
case SND_SOC_DAIFMT_BP_FP:
return 1;
}
return 0;
}
/*
* Configure the SSC.
*/
static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct platform_device *pdev = to_platform_device(dai->dev);
int id = pdev->id;
struct atmel_ssc_info *ssc_p = &ssc_info[id];
struct ssc_device *ssc = ssc_p->ssc;
struct atmel_pcm_dma_params *dma_params;
int dir, channels, bits;
u32 tfmr, rfmr, tcmr, rcmr;
int ret;
int fslen, fslen_ext, fs_osync, fs_edge;
u32 cmr_div;
u32 tcmr_period;
u32 rcmr_period;
/*
* Currently, there is only one set of dma params for
* each direction. If more are added, this code will
* have to be changed to select the proper set.
*/
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dir = 0;
else
dir = 1;
/*
* If the cpu dai should provide BCLK, but noone has provided the
* divider needed for that to work, fall back to something sensible.
*/
cmr_div = ssc_p->cmr_div;
if (!(ssc_p->forced_divider & BIT(ATMEL_SSC_CMR_DIV)) &&
atmel_ssc_cbs(ssc_p)) {
int bclk_rate = snd_soc_params_to_bclk(params);
if (bclk_rate < 0) {
dev_err(dai->dev, "unable to calculate cmr_div: %d\n",
bclk_rate);
return bclk_rate;
}
cmr_div = DIV_ROUND_CLOSEST(ssc_p->mck_rate, 2 * bclk_rate);
}
/*
* If the cpu dai should provide LRCLK, but noone has provided the
* dividers needed for that to work, fall back to something sensible.
*/
tcmr_period = ssc_p->tcmr_period;
rcmr_period = ssc_p->rcmr_period;
if (atmel_ssc_cfs(ssc_p)) {
int frame_size = snd_soc_params_to_frame_size(params);
if (frame_size < 0) {
dev_err(dai->dev,
"unable to calculate tx/rx cmr_period: %d\n",
frame_size);
return frame_size;
}
if (!(ssc_p->forced_divider & BIT(ATMEL_SSC_TCMR_PERIOD)))
tcmr_period = frame_size / 2 - 1;
if (!(ssc_p->forced_divider & BIT(ATMEL_SSC_RCMR_PERIOD)))
rcmr_period = frame_size / 2 - 1;
}
dma_params = ssc_p->dma_params[dir];
channels = params_channels(params);
/*
* Determine sample size in bits and the PDC increment.
*/
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S8:
bits = 8;
dma_params->pdc_xfer_size = 1;
break;
case SNDRV_PCM_FORMAT_S16_LE:
bits = 16;
dma_params->pdc_xfer_size = 2;
break;
case SNDRV_PCM_FORMAT_S24_LE:
bits = 24;
dma_params->pdc_xfer_size = 4;
break;
case SNDRV_PCM_FORMAT_S32_LE:
bits = 32;
dma_params->pdc_xfer_size = 4;
break;
default:
printk(KERN_WARNING "atmel_ssc_dai: unsupported PCM format");
return -EINVAL;
}
/*
* Compute SSC register settings.
*/
fslen_ext = (bits - 1) / 16;
fslen = (bits - 1) % 16;
switch (ssc_p->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_LEFT_J:
fs_osync = SSC_FSOS_POSITIVE;
fs_edge = SSC_START_RISING_RF;
rcmr = SSC_BF(RCMR_STTDLY, 0);
tcmr = SSC_BF(TCMR_STTDLY, 0);
break;
case SND_SOC_DAIFMT_I2S:
fs_osync = SSC_FSOS_NEGATIVE;
fs_edge = SSC_START_FALLING_RF;
rcmr = SSC_BF(RCMR_STTDLY, 1);
tcmr = SSC_BF(TCMR_STTDLY, 1);
break;
case SND_SOC_DAIFMT_DSP_A:
/*
* DSP/PCM Mode A format
*
* Data is transferred on first BCLK after LRC pulse rising
* edge.If stereo, the right channel data is contiguous with
* the left channel data.
*/
fs_osync = SSC_FSOS_POSITIVE;
fs_edge = SSC_START_RISING_RF;
fslen = fslen_ext = 0;
rcmr = SSC_BF(RCMR_STTDLY, 1);
tcmr = SSC_BF(TCMR_STTDLY, 1);
break;
default:
printk(KERN_WARNING "atmel_ssc_dai: unsupported DAI format 0x%x\n",
ssc_p->daifmt);
return -EINVAL;
}
if (!atmel_ssc_cfs(ssc_p)) {
fslen = fslen_ext = 0;
rcmr_period = tcmr_period = 0;
fs_osync = SSC_FSOS_NONE;
}
rcmr |= SSC_BF(RCMR_START, fs_edge);
tcmr |= SSC_BF(TCMR_START, fs_edge);
if (atmel_ssc_cbs(ssc_p)) {
/*
* SSC provides BCLK
*
* The SSC transmit and receive clocks are generated from the
* MCK divider, and the BCLK signal is output
* on the SSC TK line.
*/
rcmr |= SSC_BF(RCMR_CKS, SSC_CKS_DIV)
| SSC_BF(RCMR_CKO, SSC_CKO_NONE);
tcmr |= SSC_BF(TCMR_CKS, SSC_CKS_DIV)
| SSC_BF(TCMR_CKO, SSC_CKO_CONTINUOUS);
} else {
rcmr |= SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ?
SSC_CKS_PIN : SSC_CKS_CLOCK)
| SSC_BF(RCMR_CKO, SSC_CKO_NONE);
tcmr |= SSC_BF(TCMR_CKS, ssc->clk_from_rk_pin ?
SSC_CKS_CLOCK : SSC_CKS_PIN)
| SSC_BF(TCMR_CKO, SSC_CKO_NONE);
}
rcmr |= SSC_BF(RCMR_PERIOD, rcmr_period)
| SSC_BF(RCMR_CKI, SSC_CKI_RISING);
tcmr |= SSC_BF(TCMR_PERIOD, tcmr_period)
| SSC_BF(TCMR_CKI, SSC_CKI_FALLING);
rfmr = SSC_BF(RFMR_FSLEN_EXT, fslen_ext)
| SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
| SSC_BF(RFMR_FSOS, fs_osync)
| SSC_BF(RFMR_FSLEN, fslen)
| SSC_BF(RFMR_DATNB, (channels - 1))
| SSC_BIT(RFMR_MSBF)
| SSC_BF(RFMR_LOOP, 0)
| SSC_BF(RFMR_DATLEN, (bits - 1));
tfmr = SSC_BF(TFMR_FSLEN_EXT, fslen_ext)
| SSC_BF(TFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
| SSC_BF(TFMR_FSDEN, 0)
| SSC_BF(TFMR_FSOS, fs_osync)
| SSC_BF(TFMR_FSLEN, fslen)
| SSC_BF(TFMR_DATNB, (channels - 1))
| SSC_BIT(TFMR_MSBF)
| SSC_BF(TFMR_DATDEF, 0)
| SSC_BF(TFMR_DATLEN, (bits - 1));
if (fslen_ext && !ssc->pdata->has_fslen_ext) {
dev_err(dai->dev, "sample size %d is too large for SSC device\n",
bits);
return -EINVAL;
}
pr_debug("atmel_ssc_hw_params: "
"RCMR=%08x RFMR=%08x TCMR=%08x TFMR=%08x\n",
rcmr, rfmr, tcmr, tfmr);
if (!ssc_p->initialized) {
if (!ssc_p->ssc->pdata->use_dma) {
ssc_writel(ssc_p->ssc->regs, PDC_RPR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_RCR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_RNPR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_RNCR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_TPR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_TCR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_TNPR, 0);
ssc_writel(ssc_p->ssc->regs, PDC_TNCR, 0);
}
ret = request_irq(ssc_p->ssc->irq, atmel_ssc_interrupt, 0,
ssc_p->name, ssc_p);
if (ret < 0) {
printk(KERN_WARNING
"atmel_ssc_dai: request_irq failure\n");
pr_debug("Atmel_ssc_dai: Stopping clock\n");
clk_disable(ssc_p->ssc->clk);
return ret;
}
ssc_p->initialized = 1;
}
/* set SSC clock mode register */
ssc_writel(ssc_p->ssc->regs, CMR, cmr_div);
/* set receive clock mode and format */
ssc_writel(ssc_p->ssc->regs, RCMR, rcmr);
ssc_writel(ssc_p->ssc->regs, RFMR, rfmr);
/* set transmit clock mode and format */
ssc_writel(ssc_p->ssc->regs, TCMR, tcmr);
ssc_writel(ssc_p->ssc->regs, TFMR, tfmr);
pr_debug("atmel_ssc_dai,hw_params: SSC initialized\n");
return 0;
}
static int atmel_ssc_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct platform_device *pdev = to_platform_device(dai->dev);
struct atmel_ssc_info *ssc_p = &ssc_info[pdev->id];
struct atmel_pcm_dma_params *dma_params;
int dir;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dir = 0;
else
dir = 1;
dma_params = ssc_p->dma_params[dir];
ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable);
ssc_writel(ssc_p->ssc->regs, IDR, dma_params->mask->ssc_error);
pr_debug("%s enabled SSC_SR=0x%08x\n",
dir ? "receive" : "transmit",
ssc_readl(ssc_p->ssc->regs, SR));
return 0;
}
static int atmel_ssc_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
struct platform_device *pdev = to_platform_device(dai->dev);
struct atmel_ssc_info *ssc_p = &ssc_info[pdev->id];
struct atmel_pcm_dma_params *dma_params;
int dir;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dir = 0;
else
dir = 1;
dma_params = ssc_p->dma_params[dir];
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable);
break;
default:
ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable);
break;
}
return 0;
}
static int atmel_ssc_suspend(struct snd_soc_component *component)
{
struct atmel_ssc_info *ssc_p;
struct platform_device *pdev = to_platform_device(component->dev);
if (!snd_soc_component_active(component))
return 0;
ssc_p = &ssc_info[pdev->id];
/* Save the status register before disabling transmit and receive */
ssc_p->ssc_state.ssc_sr = ssc_readl(ssc_p->ssc->regs, SR);
ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_TXDIS) | SSC_BIT(CR_RXDIS));
/* Save the current interrupt mask, then disable unmasked interrupts */
ssc_p->ssc_state.ssc_imr = ssc_readl(ssc_p->ssc->regs, IMR);
ssc_writel(ssc_p->ssc->regs, IDR, ssc_p->ssc_state.ssc_imr);
ssc_p->ssc_state.ssc_cmr = ssc_readl(ssc_p->ssc->regs, CMR);
ssc_p->ssc_state.ssc_rcmr = ssc_readl(ssc_p->ssc->regs, RCMR);
ssc_p->ssc_state.ssc_rfmr = ssc_readl(ssc_p->ssc->regs, RFMR);
ssc_p->ssc_state.ssc_tcmr = ssc_readl(ssc_p->ssc->regs, TCMR);
ssc_p->ssc_state.ssc_tfmr = ssc_readl(ssc_p->ssc->regs, TFMR);
return 0;
}
static int atmel_ssc_resume(struct snd_soc_component *component)
{
struct atmel_ssc_info *ssc_p;
struct platform_device *pdev = to_platform_device(component->dev);
u32 cr;
if (!snd_soc_component_active(component))
return 0;
ssc_p = &ssc_info[pdev->id];
/* restore SSC register settings */
ssc_writel(ssc_p->ssc->regs, TFMR, ssc_p->ssc_state.ssc_tfmr);
ssc_writel(ssc_p->ssc->regs, TCMR, ssc_p->ssc_state.ssc_tcmr);
ssc_writel(ssc_p->ssc->regs, RFMR, ssc_p->ssc_state.ssc_rfmr);
ssc_writel(ssc_p->ssc->regs, RCMR, ssc_p->ssc_state.ssc_rcmr);
ssc_writel(ssc_p->ssc->regs, CMR, ssc_p->ssc_state.ssc_cmr);
/* re-enable interrupts */
ssc_writel(ssc_p->ssc->regs, IER, ssc_p->ssc_state.ssc_imr);
/* Re-enable receive and transmit as appropriate */
cr = 0;
cr |=
(ssc_p->ssc_state.ssc_sr & SSC_BIT(SR_RXEN)) ? SSC_BIT(CR_RXEN) : 0;
cr |=
(ssc_p->ssc_state.ssc_sr & SSC_BIT(SR_TXEN)) ? SSC_BIT(CR_TXEN) : 0;
ssc_writel(ssc_p->ssc->regs, CR, cr);
return 0;
}
#define ATMEL_SSC_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
static const struct snd_soc_dai_ops atmel_ssc_dai_ops = {
.startup = atmel_ssc_startup,
.shutdown = atmel_ssc_shutdown,
.prepare = atmel_ssc_prepare,
.trigger = atmel_ssc_trigger,
.hw_params = atmel_ssc_hw_params,
.set_fmt = atmel_ssc_set_dai_fmt,
.set_clkdiv = atmel_ssc_set_dai_clkdiv,
};
static struct snd_soc_dai_driver atmel_ssc_dai = {
.playback = {
.channels_min = 1,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_CONTINUOUS,
.rate_min = 8000,
.rate_max = 384000,
.formats = ATMEL_SSC_FORMATS,},
.capture = {
.channels_min = 1,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_CONTINUOUS,
.rate_min = 8000,
.rate_max = 384000,
.formats = ATMEL_SSC_FORMATS,},
.ops = &atmel_ssc_dai_ops,
};
static const struct snd_soc_component_driver atmel_ssc_component = {
.name = "atmel-ssc",
.suspend = pm_ptr(atmel_ssc_suspend),
.resume = pm_ptr(atmel_ssc_resume),
.legacy_dai_naming = 1,
};
static int asoc_ssc_init(struct device *dev)
{
struct ssc_device *ssc = dev_get_drvdata(dev);
int ret;
ret = devm_snd_soc_register_component(dev, &atmel_ssc_component,
&atmel_ssc_dai, 1);
if (ret) {
dev_err(dev, "Could not register DAI: %d\n", ret);
return ret;
}
if (ssc->pdata->use_dma)
ret = atmel_pcm_dma_platform_register(dev);
else
ret = atmel_pcm_pdc_platform_register(dev);
if (ret) {
dev_err(dev, "Could not register PCM: %d\n", ret);
return ret;
}
return 0;
}
/**
* atmel_ssc_set_audio - Allocate the specified SSC for audio use.
* @ssc_id: SSD ID in [0, NUM_SSC_DEVICES[
*/
int atmel_ssc_set_audio(int ssc_id)
{
struct ssc_device *ssc;
/* If we can grab the SSC briefly to parent the DAI device off it */
ssc = ssc_request(ssc_id);
if (IS_ERR(ssc)) {
pr_err("Unable to parent ASoC SSC DAI on SSC: %ld\n",
PTR_ERR(ssc));
return PTR_ERR(ssc);
} else {
ssc_info[ssc_id].ssc = ssc;
}
return asoc_ssc_init(&ssc->pdev->dev);
}
EXPORT_SYMBOL_GPL(atmel_ssc_set_audio);
void atmel_ssc_put_audio(int ssc_id)
{
struct ssc_device *ssc = ssc_info[ssc_id].ssc;
ssc_free(ssc);
}
EXPORT_SYMBOL_GPL(atmel_ssc_put_audio);
/* Module information */
MODULE_AUTHOR("Sedji Gaouaou, [email protected], www.atmel.com");
MODULE_DESCRIPTION("ATMEL SSC ASoC Interface");
MODULE_LICENSE("GPL");
| linux-master | sound/soc/atmel/atmel_ssc_dai.c |
// SPDX-License-Identifier: GPL-2.0
//
// Driver for Microchip S/PDIF TX Controller
//
// Copyright (C) 2020 Microchip Technology Inc. and its subsidiaries
//
// Author: Codrin Ciubotariu <[email protected]>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
#include <sound/asoundef.h>
#include <sound/dmaengine_pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
/*
* ---- S/PDIF Transmitter Controller Register map ----
*/
#define SPDIFTX_CR 0x00 /* Control Register */
#define SPDIFTX_MR 0x04 /* Mode Register */
#define SPDIFTX_CDR 0x0C /* Common Data Register */
#define SPDIFTX_IER 0x14 /* Interrupt Enable Register */
#define SPDIFTX_IDR 0x18 /* Interrupt Disable Register */
#define SPDIFTX_IMR 0x1C /* Interrupt Mask Register */
#define SPDIFTX_ISR 0x20 /* Interrupt Status Register */
#define SPDIFTX_CH1UD(reg) (0x50 + (reg) * 4) /* User Data 1 Register x */
#define SPDIFTX_CH1S(reg) (0x80 + (reg) * 4) /* Channel Status 1 Register x */
#define SPDIFTX_VERSION 0xF0
/*
* ---- Control Register (Write-only) ----
*/
#define SPDIFTX_CR_SWRST BIT(0) /* Software Reset */
#define SPDIFTX_CR_FCLR BIT(1) /* FIFO clear */
/*
* ---- Mode Register (Read/Write) ----
*/
/* Transmit Enable */
#define SPDIFTX_MR_TXEN_MASK GENMASK(0, 0)
#define SPDIFTX_MR_TXEN_DISABLE (0 << 0)
#define SPDIFTX_MR_TXEN_ENABLE (1 << 0)
/* Multichannel Transfer */
#define SPDIFTX_MR_MULTICH_MASK GENAMSK(1, 1)
#define SPDIFTX_MR_MULTICH_MONO (0 << 1)
#define SPDIFTX_MR_MULTICH_DUAL (1 << 1)
/* Data Word Endian Mode */
#define SPDIFTX_MR_ENDIAN_MASK GENMASK(2, 2)
#define SPDIFTX_MR_ENDIAN_LITTLE (0 << 2)
#define SPDIFTX_MR_ENDIAN_BIG (1 << 2)
/* Data Justification */
#define SPDIFTX_MR_JUSTIFY_MASK GENMASK(3, 3)
#define SPDIFTX_MR_JUSTIFY_LSB (0 << 3)
#define SPDIFTX_MR_JUSTIFY_MSB (1 << 3)
/* Common Audio Register Transfer Mode */
#define SPDIFTX_MR_CMODE_MASK GENMASK(5, 4)
#define SPDIFTX_MR_CMODE_INDEX_ACCESS (0 << 4)
#define SPDIFTX_MR_CMODE_TOGGLE_ACCESS (1 << 4)
#define SPDIFTX_MR_CMODE_INTERLVD_ACCESS (2 << 4)
/* Valid Bits per Sample */
#define SPDIFTX_MR_VBPS_MASK GENMASK(13, 8)
/* Chunk Size */
#define SPDIFTX_MR_CHUNK_MASK GENMASK(19, 16)
/* Validity Bits for Channels 1 and 2 */
#define SPDIFTX_MR_VALID1 BIT(24)
#define SPDIFTX_MR_VALID2 BIT(25)
/* Disable Null Frame on underrun */
#define SPDIFTX_MR_DNFR_MASK GENMASK(27, 27)
#define SPDIFTX_MR_DNFR_INVALID (0 << 27)
#define SPDIFTX_MR_DNFR_VALID (1 << 27)
/* Bytes per Sample */
#define SPDIFTX_MR_BPS_MASK GENMASK(29, 28)
/*
* ---- Interrupt Enable/Disable/Mask/Status Register (Write/Read-only) ----
*/
#define SPDIFTX_IR_TXRDY BIT(0)
#define SPDIFTX_IR_TXEMPTY BIT(1)
#define SPDIFTX_IR_TXFULL BIT(2)
#define SPDIFTX_IR_TXCHUNK BIT(3)
#define SPDIFTX_IR_TXUDR BIT(4)
#define SPDIFTX_IR_TXOVR BIT(5)
#define SPDIFTX_IR_CSRDY BIT(6)
#define SPDIFTX_IR_UDRDY BIT(7)
#define SPDIFTX_IR_TXRDYCH(ch) BIT((ch) + 8)
#define SPDIFTX_IR_SECE BIT(10)
#define SPDIFTX_IR_TXUDRCH(ch) BIT((ch) + 11)
#define SPDIFTX_IR_BEND BIT(13)
static bool mchp_spdiftx_readable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case SPDIFTX_MR:
case SPDIFTX_IMR:
case SPDIFTX_ISR:
case SPDIFTX_CH1UD(0):
case SPDIFTX_CH1UD(1):
case SPDIFTX_CH1UD(2):
case SPDIFTX_CH1UD(3):
case SPDIFTX_CH1UD(4):
case SPDIFTX_CH1UD(5):
case SPDIFTX_CH1S(0):
case SPDIFTX_CH1S(1):
case SPDIFTX_CH1S(2):
case SPDIFTX_CH1S(3):
case SPDIFTX_CH1S(4):
case SPDIFTX_CH1S(5):
return true;
default:
return false;
}
}
static bool mchp_spdiftx_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case SPDIFTX_CR:
case SPDIFTX_MR:
case SPDIFTX_CDR:
case SPDIFTX_IER:
case SPDIFTX_IDR:
case SPDIFTX_CH1UD(0):
case SPDIFTX_CH1UD(1):
case SPDIFTX_CH1UD(2):
case SPDIFTX_CH1UD(3):
case SPDIFTX_CH1UD(4):
case SPDIFTX_CH1UD(5):
case SPDIFTX_CH1S(0):
case SPDIFTX_CH1S(1):
case SPDIFTX_CH1S(2):
case SPDIFTX_CH1S(3):
case SPDIFTX_CH1S(4):
case SPDIFTX_CH1S(5):
return true;
default:
return false;
}
}
static bool mchp_spdiftx_precious_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case SPDIFTX_CDR:
case SPDIFTX_ISR:
return true;
default:
return false;
}
}
static const struct regmap_config mchp_spdiftx_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = SPDIFTX_VERSION,
.readable_reg = mchp_spdiftx_readable_reg,
.writeable_reg = mchp_spdiftx_writeable_reg,
.precious_reg = mchp_spdiftx_precious_reg,
.cache_type = REGCACHE_FLAT,
};
#define SPDIFTX_GCLK_RATIO 128
#define SPDIFTX_CS_BITS 192
#define SPDIFTX_UD_BITS 192
struct mchp_spdiftx_mixer_control {
unsigned char ch_stat[SPDIFTX_CS_BITS / 8];
unsigned char user_data[SPDIFTX_UD_BITS / 8];
spinlock_t lock; /* exclusive access to control data */
};
struct mchp_spdiftx_dev {
struct mchp_spdiftx_mixer_control control;
struct snd_dmaengine_dai_dma_data playback;
struct device *dev;
struct regmap *regmap;
struct clk *pclk;
struct clk *gclk;
unsigned int fmt;
unsigned int suspend_irq;
};
static inline int mchp_spdiftx_is_running(struct mchp_spdiftx_dev *dev)
{
u32 mr;
regmap_read(dev->regmap, SPDIFTX_MR, &mr);
return !!(mr & SPDIFTX_MR_TXEN_ENABLE);
}
static void mchp_spdiftx_channel_status_write(struct mchp_spdiftx_dev *dev)
{
struct mchp_spdiftx_mixer_control *ctrl = &dev->control;
u32 val;
int i;
for (i = 0; i < ARRAY_SIZE(ctrl->ch_stat) / 4; i++) {
val = (ctrl->ch_stat[(i * 4) + 0] << 0) |
(ctrl->ch_stat[(i * 4) + 1] << 8) |
(ctrl->ch_stat[(i * 4) + 2] << 16) |
(ctrl->ch_stat[(i * 4) + 3] << 24);
regmap_write(dev->regmap, SPDIFTX_CH1S(i), val);
}
}
static void mchp_spdiftx_user_data_write(struct mchp_spdiftx_dev *dev)
{
struct mchp_spdiftx_mixer_control *ctrl = &dev->control;
u32 val;
int i;
for (i = 0; i < ARRAY_SIZE(ctrl->user_data) / 4; i++) {
val = (ctrl->user_data[(i * 4) + 0] << 0) |
(ctrl->user_data[(i * 4) + 1] << 8) |
(ctrl->user_data[(i * 4) + 2] << 16) |
(ctrl->user_data[(i * 4) + 3] << 24);
regmap_write(dev->regmap, SPDIFTX_CH1UD(i), val);
}
}
static irqreturn_t mchp_spdiftx_interrupt(int irq, void *dev_id)
{
struct mchp_spdiftx_dev *dev = dev_id;
struct mchp_spdiftx_mixer_control *ctrl = &dev->control;
u32 sr, imr, pending, idr = 0;
regmap_read(dev->regmap, SPDIFTX_ISR, &sr);
regmap_read(dev->regmap, SPDIFTX_IMR, &imr);
pending = sr & imr;
if (!pending)
return IRQ_NONE;
if (pending & SPDIFTX_IR_TXUDR) {
dev_warn(dev->dev, "underflow detected\n");
idr |= SPDIFTX_IR_TXUDR;
}
if (pending & SPDIFTX_IR_TXOVR) {
dev_warn(dev->dev, "overflow detected\n");
idr |= SPDIFTX_IR_TXOVR;
}
if (pending & SPDIFTX_IR_UDRDY) {
spin_lock(&ctrl->lock);
mchp_spdiftx_user_data_write(dev);
spin_unlock(&ctrl->lock);
idr |= SPDIFTX_IR_UDRDY;
}
if (pending & SPDIFTX_IR_CSRDY) {
spin_lock(&ctrl->lock);
mchp_spdiftx_channel_status_write(dev);
spin_unlock(&ctrl->lock);
idr |= SPDIFTX_IR_CSRDY;
}
regmap_write(dev->regmap, SPDIFTX_IDR, idr);
return IRQ_HANDLED;
}
static int mchp_spdiftx_dai_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct mchp_spdiftx_dev *dev = snd_soc_dai_get_drvdata(dai);
/* Software reset the IP */
regmap_write(dev->regmap, SPDIFTX_CR,
SPDIFTX_CR_SWRST | SPDIFTX_CR_FCLR);
return 0;
}
static void mchp_spdiftx_dai_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct mchp_spdiftx_dev *dev = snd_soc_dai_get_drvdata(dai);
/* Disable interrupts */
regmap_write(dev->regmap, SPDIFTX_IDR, 0xffffffff);
}
static int mchp_spdiftx_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct mchp_spdiftx_dev *dev = snd_soc_dai_get_drvdata(dai);
struct mchp_spdiftx_mixer_control *ctrl = &dev->control;
int ret;
/* do not start/stop while channel status or user data is updated */
spin_lock(&ctrl->lock);
switch (cmd) {
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_START:
regmap_write(dev->regmap, SPDIFTX_IER, dev->suspend_irq |
SPDIFTX_IR_TXUDR | SPDIFTX_IR_TXOVR);
dev->suspend_irq = 0;
fallthrough;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
ret = regmap_update_bits(dev->regmap, SPDIFTX_MR, SPDIFTX_MR_TXEN_MASK,
SPDIFTX_MR_TXEN_ENABLE);
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
regmap_read(dev->regmap, SPDIFTX_IMR, &dev->suspend_irq);
fallthrough;
case SNDRV_PCM_TRIGGER_STOP:
regmap_write(dev->regmap, SPDIFTX_IDR, dev->suspend_irq |
SPDIFTX_IR_TXUDR | SPDIFTX_IR_TXOVR);
fallthrough;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
ret = regmap_update_bits(dev->regmap, SPDIFTX_MR, SPDIFTX_MR_TXEN_MASK,
SPDIFTX_MR_TXEN_DISABLE);
break;
default:
ret = -EINVAL;
}
spin_unlock(&ctrl->lock);
if (ret)
dev_err(dev->dev, "unable to start/stop TX: %d\n", ret);
return ret;
}
static int mchp_spdiftx_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
unsigned long flags;
struct mchp_spdiftx_dev *dev = snd_soc_dai_get_drvdata(dai);
struct mchp_spdiftx_mixer_control *ctrl = &dev->control;
u32 mr;
unsigned int bps = params_physical_width(params) / 8;
unsigned char aes3;
int ret;
dev_dbg(dev->dev, "%s() rate=%u format=%#x width=%u channels=%u\n",
__func__, params_rate(params), params_format(params),
params_width(params), params_channels(params));
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
dev_err(dev->dev, "Capture is not supported\n");
return -EINVAL;
}
regmap_read(dev->regmap, SPDIFTX_MR, &mr);
if (mr & SPDIFTX_MR_TXEN_ENABLE) {
dev_err(dev->dev, "PCM already running\n");
return -EBUSY;
}
/* Defaults: Toggle mode, justify to LSB, chunksize 1 */
mr = SPDIFTX_MR_CMODE_TOGGLE_ACCESS | SPDIFTX_MR_JUSTIFY_LSB;
dev->playback.maxburst = 1;
switch (params_channels(params)) {
case 1:
mr |= SPDIFTX_MR_MULTICH_MONO;
break;
case 2:
mr |= SPDIFTX_MR_MULTICH_DUAL;
if (bps > 2)
dev->playback.maxburst = 2;
break;
default:
dev_err(dev->dev, "unsupported number of channels: %d\n",
params_channels(params));
return -EINVAL;
}
mr |= FIELD_PREP(SPDIFTX_MR_CHUNK_MASK, dev->playback.maxburst);
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S8:
mr |= FIELD_PREP(SPDIFTX_MR_VBPS_MASK, 8);
break;
case SNDRV_PCM_FORMAT_S16_BE:
mr |= SPDIFTX_MR_ENDIAN_BIG;
fallthrough;
case SNDRV_PCM_FORMAT_S16_LE:
mr |= FIELD_PREP(SPDIFTX_MR_VBPS_MASK, 16);
break;
case SNDRV_PCM_FORMAT_S18_3BE:
mr |= SPDIFTX_MR_ENDIAN_BIG;
fallthrough;
case SNDRV_PCM_FORMAT_S18_3LE:
mr |= FIELD_PREP(SPDIFTX_MR_VBPS_MASK, 18);
break;
case SNDRV_PCM_FORMAT_S20_3BE:
mr |= SPDIFTX_MR_ENDIAN_BIG;
fallthrough;
case SNDRV_PCM_FORMAT_S20_3LE:
mr |= FIELD_PREP(SPDIFTX_MR_VBPS_MASK, 20);
break;
case SNDRV_PCM_FORMAT_S24_3BE:
mr |= SPDIFTX_MR_ENDIAN_BIG;
fallthrough;
case SNDRV_PCM_FORMAT_S24_3LE:
mr |= FIELD_PREP(SPDIFTX_MR_VBPS_MASK, 24);
break;
case SNDRV_PCM_FORMAT_S24_BE:
mr |= SPDIFTX_MR_ENDIAN_BIG;
fallthrough;
case SNDRV_PCM_FORMAT_S24_LE:
mr |= FIELD_PREP(SPDIFTX_MR_VBPS_MASK, 24);
break;
case SNDRV_PCM_FORMAT_S32_BE:
mr |= SPDIFTX_MR_ENDIAN_BIG;
fallthrough;
case SNDRV_PCM_FORMAT_S32_LE:
mr |= FIELD_PREP(SPDIFTX_MR_VBPS_MASK, 32);
break;
default:
dev_err(dev->dev, "unsupported PCM format: %d\n",
params_format(params));
return -EINVAL;
}
mr |= FIELD_PREP(SPDIFTX_MR_BPS_MASK, bps - 1);
switch (params_rate(params)) {
case 22050:
aes3 = IEC958_AES3_CON_FS_22050;
break;
case 24000:
aes3 = IEC958_AES3_CON_FS_24000;
break;
case 32000:
aes3 = IEC958_AES3_CON_FS_32000;
break;
case 44100:
aes3 = IEC958_AES3_CON_FS_44100;
break;
case 48000:
aes3 = IEC958_AES3_CON_FS_48000;
break;
case 88200:
aes3 = IEC958_AES3_CON_FS_88200;
break;
case 96000:
aes3 = IEC958_AES3_CON_FS_96000;
break;
case 176400:
aes3 = IEC958_AES3_CON_FS_176400;
break;
case 192000:
aes3 = IEC958_AES3_CON_FS_192000;
break;
case 8000:
case 11025:
case 16000:
case 64000:
aes3 = IEC958_AES3_CON_FS_NOTID;
break;
default:
dev_err(dev->dev, "unsupported sample frequency: %u\n",
params_rate(params));
return -EINVAL;
}
spin_lock_irqsave(&ctrl->lock, flags);
ctrl->ch_stat[3] &= ~IEC958_AES3_CON_FS;
ctrl->ch_stat[3] |= aes3;
mchp_spdiftx_channel_status_write(dev);
spin_unlock_irqrestore(&ctrl->lock, flags);
/* GCLK is enabled by runtime PM. */
clk_disable_unprepare(dev->gclk);
ret = clk_set_rate(dev->gclk, params_rate(params) *
SPDIFTX_GCLK_RATIO);
if (ret) {
dev_err(dev->dev,
"unable to change gclk rate to: rate %u * ratio %u\n",
params_rate(params), SPDIFTX_GCLK_RATIO);
return ret;
}
ret = clk_prepare_enable(dev->gclk);
if (ret) {
dev_err(dev->dev, "unable to enable gclk: %d\n", ret);
return ret;
}
dev_dbg(dev->dev, "%s(): GCLK set to %d\n", __func__,
params_rate(params) * SPDIFTX_GCLK_RATIO);
regmap_write(dev->regmap, SPDIFTX_MR, mr);
return 0;
}
static int mchp_spdiftx_hw_free(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct mchp_spdiftx_dev *dev = snd_soc_dai_get_drvdata(dai);
return regmap_write(dev->regmap, SPDIFTX_CR,
SPDIFTX_CR_SWRST | SPDIFTX_CR_FCLR);
}
#define MCHP_SPDIFTX_RATES SNDRV_PCM_RATE_8000_192000
#define MCHP_SPDIFTX_FORMATS (SNDRV_PCM_FMTBIT_S8 | \
SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_U16_BE | \
SNDRV_PCM_FMTBIT_S18_3LE | \
SNDRV_PCM_FMTBIT_S18_3BE | \
SNDRV_PCM_FMTBIT_S20_3LE | \
SNDRV_PCM_FMTBIT_S20_3BE | \
SNDRV_PCM_FMTBIT_S24_3LE | \
SNDRV_PCM_FMTBIT_S24_3BE | \
SNDRV_PCM_FMTBIT_S24_LE | \
SNDRV_PCM_FMTBIT_S24_BE | \
SNDRV_PCM_FMTBIT_S32_LE | \
SNDRV_PCM_FMTBIT_S32_BE \
)
static int mchp_spdiftx_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
uinfo->count = 1;
return 0;
}
static int mchp_spdiftx_cs_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uvalue)
{
unsigned long flags;
struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
struct mchp_spdiftx_dev *dev = snd_soc_dai_get_drvdata(dai);
struct mchp_spdiftx_mixer_control *ctrl = &dev->control;
spin_lock_irqsave(&ctrl->lock, flags);
memcpy(uvalue->value.iec958.status, ctrl->ch_stat,
sizeof(ctrl->ch_stat));
spin_unlock_irqrestore(&ctrl->lock, flags);
return 0;
}
static int mchp_spdiftx_cs_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uvalue)
{
unsigned long flags;
struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
struct mchp_spdiftx_dev *dev = snd_soc_dai_get_drvdata(dai);
struct mchp_spdiftx_mixer_control *ctrl = &dev->control;
int changed = 0;
int i;
spin_lock_irqsave(&ctrl->lock, flags);
for (i = 0; i < ARRAY_SIZE(ctrl->ch_stat); i++) {
if (ctrl->ch_stat[i] != uvalue->value.iec958.status[i])
changed = 1;
ctrl->ch_stat[i] = uvalue->value.iec958.status[i];
}
if (changed) {
/* don't enable IP while we copy the channel status */
if (mchp_spdiftx_is_running(dev)) {
/*
* if SPDIF is running, wait for interrupt to write
* channel status
*/
regmap_write(dev->regmap, SPDIFTX_IER,
SPDIFTX_IR_CSRDY);
} else {
mchp_spdiftx_channel_status_write(dev);
}
}
spin_unlock_irqrestore(&ctrl->lock, flags);
return changed;
}
static int mchp_spdiftx_cs_mask(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uvalue)
{
memset(uvalue->value.iec958.status, 0xff,
sizeof(uvalue->value.iec958.status));
return 0;
}
static int mchp_spdiftx_subcode_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uvalue)
{
struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
struct mchp_spdiftx_dev *dev = snd_soc_dai_get_drvdata(dai);
struct mchp_spdiftx_mixer_control *ctrl = &dev->control;
unsigned long flags;
spin_lock_irqsave(&ctrl->lock, flags);
memcpy(uvalue->value.iec958.subcode, ctrl->user_data,
sizeof(ctrl->user_data));
spin_unlock_irqrestore(&ctrl->lock, flags);
return 0;
}
static int mchp_spdiftx_subcode_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *uvalue)
{
unsigned long flags;
struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
struct mchp_spdiftx_dev *dev = snd_soc_dai_get_drvdata(dai);
struct mchp_spdiftx_mixer_control *ctrl = &dev->control;
int changed = 0;
int i;
spin_lock_irqsave(&ctrl->lock, flags);
for (i = 0; i < ARRAY_SIZE(ctrl->user_data); i++) {
if (ctrl->user_data[i] != uvalue->value.iec958.subcode[i])
changed = 1;
ctrl->user_data[i] = uvalue->value.iec958.subcode[i];
}
if (changed) {
if (mchp_spdiftx_is_running(dev)) {
/*
* if SPDIF is running, wait for interrupt to write
* user data
*/
regmap_write(dev->regmap, SPDIFTX_IER,
SPDIFTX_IR_UDRDY);
} else {
mchp_spdiftx_user_data_write(dev);
}
}
spin_unlock_irqrestore(&ctrl->lock, flags);
return changed;
}
static struct snd_kcontrol_new mchp_spdiftx_ctrls[] = {
/* Channel status controller */
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = mchp_spdiftx_info,
.get = mchp_spdiftx_cs_get,
.put = mchp_spdiftx_cs_put,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, MASK),
.access = SNDRV_CTL_ELEM_ACCESS_READ,
SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = mchp_spdiftx_info,
.get = mchp_spdiftx_cs_mask,
},
/* User bits controller */
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "IEC958 Subcode Playback Default",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.info = mchp_spdiftx_info,
.get = mchp_spdiftx_subcode_get,
.put = mchp_spdiftx_subcode_put,
},
};
static int mchp_spdiftx_dai_probe(struct snd_soc_dai *dai)
{
struct mchp_spdiftx_dev *dev = snd_soc_dai_get_drvdata(dai);
snd_soc_dai_init_dma_data(dai, &dev->playback, NULL);
/* Add controls */
snd_soc_add_dai_controls(dai, mchp_spdiftx_ctrls,
ARRAY_SIZE(mchp_spdiftx_ctrls));
return 0;
}
static const struct snd_soc_dai_ops mchp_spdiftx_dai_ops = {
.probe = mchp_spdiftx_dai_probe,
.startup = mchp_spdiftx_dai_startup,
.shutdown = mchp_spdiftx_dai_shutdown,
.trigger = mchp_spdiftx_trigger,
.hw_params = mchp_spdiftx_hw_params,
.hw_free = mchp_spdiftx_hw_free,
};
static struct snd_soc_dai_driver mchp_spdiftx_dai = {
.name = "mchp-spdiftx",
.playback = {
.stream_name = "S/PDIF Playback",
.channels_min = 1,
.channels_max = 2,
.rates = MCHP_SPDIFTX_RATES,
.formats = MCHP_SPDIFTX_FORMATS,
},
.ops = &mchp_spdiftx_dai_ops,
};
static const struct snd_soc_component_driver mchp_spdiftx_component = {
.name = "mchp-spdiftx",
.legacy_dai_naming = 1,
};
static const struct of_device_id mchp_spdiftx_dt_ids[] = {
{
.compatible = "microchip,sama7g5-spdiftx",
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mchp_spdiftx_dt_ids);
static int mchp_spdiftx_runtime_suspend(struct device *dev)
{
struct mchp_spdiftx_dev *spdiftx = dev_get_drvdata(dev);
regcache_cache_only(spdiftx->regmap, true);
clk_disable_unprepare(spdiftx->gclk);
clk_disable_unprepare(spdiftx->pclk);
return 0;
}
static int mchp_spdiftx_runtime_resume(struct device *dev)
{
struct mchp_spdiftx_dev *spdiftx = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(spdiftx->pclk);
if (ret) {
dev_err(spdiftx->dev,
"failed to enable the peripheral clock: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(spdiftx->gclk);
if (ret) {
dev_err(spdiftx->dev,
"failed to enable generic clock: %d\n", ret);
goto disable_pclk;
}
regcache_cache_only(spdiftx->regmap, false);
regcache_mark_dirty(spdiftx->regmap);
ret = regcache_sync(spdiftx->regmap);
if (ret) {
regcache_cache_only(spdiftx->regmap, true);
clk_disable_unprepare(spdiftx->gclk);
disable_pclk:
clk_disable_unprepare(spdiftx->pclk);
}
return ret;
}
static const struct dev_pm_ops mchp_spdiftx_pm_ops = {
SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
RUNTIME_PM_OPS(mchp_spdiftx_runtime_suspend, mchp_spdiftx_runtime_resume,
NULL)
};
static int mchp_spdiftx_probe(struct platform_device *pdev)
{
struct mchp_spdiftx_dev *dev;
struct resource *mem;
struct regmap *regmap;
void __iomem *base;
struct mchp_spdiftx_mixer_control *ctrl;
int irq;
int err;
/* Get memory for driver data. */
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
/* Map I/O registers. */
base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(base))
return PTR_ERR(base);
regmap = devm_regmap_init_mmio(&pdev->dev, base,
&mchp_spdiftx_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
/* Request IRQ */
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
err = devm_request_irq(&pdev->dev, irq, mchp_spdiftx_interrupt, 0,
dev_name(&pdev->dev), dev);
if (err)
return err;
/* Get the peripheral clock */
dev->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(dev->pclk)) {
err = PTR_ERR(dev->pclk);
dev_err(&pdev->dev,
"failed to get the peripheral clock: %d\n", err);
return err;
}
/* Get the generic clock */
dev->gclk = devm_clk_get(&pdev->dev, "gclk");
if (IS_ERR(dev->gclk)) {
err = PTR_ERR(dev->gclk);
dev_err(&pdev->dev,
"failed to get the PMC generic clock: %d\n", err);
return err;
}
ctrl = &dev->control;
spin_lock_init(&ctrl->lock);
/* Init channel status */
ctrl->ch_stat[0] = IEC958_AES0_CON_NOT_COPYRIGHT |
IEC958_AES0_CON_EMPHASIS_NONE;
dev->dev = &pdev->dev;
dev->regmap = regmap;
platform_set_drvdata(pdev, dev);
pm_runtime_enable(dev->dev);
if (!pm_runtime_enabled(dev->dev)) {
err = mchp_spdiftx_runtime_resume(dev->dev);
if (err)
return err;
}
dev->playback.addr = (dma_addr_t)mem->start + SPDIFTX_CDR;
dev->playback.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
err = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
if (err) {
dev_err(&pdev->dev, "failed to register PMC: %d\n", err);
goto pm_runtime_suspend;
}
err = devm_snd_soc_register_component(&pdev->dev,
&mchp_spdiftx_component,
&mchp_spdiftx_dai, 1);
if (err) {
dev_err(&pdev->dev, "failed to register component: %d\n", err);
goto pm_runtime_suspend;
}
return 0;
pm_runtime_suspend:
if (!pm_runtime_status_suspended(dev->dev))
mchp_spdiftx_runtime_suspend(dev->dev);
pm_runtime_disable(dev->dev);
return err;
}
static void mchp_spdiftx_remove(struct platform_device *pdev)
{
struct mchp_spdiftx_dev *dev = platform_get_drvdata(pdev);
if (!pm_runtime_status_suspended(dev->dev))
mchp_spdiftx_runtime_suspend(dev->dev);
pm_runtime_disable(dev->dev);
}
static struct platform_driver mchp_spdiftx_driver = {
.probe = mchp_spdiftx_probe,
.remove_new = mchp_spdiftx_remove,
.driver = {
.name = "mchp_spdiftx",
.of_match_table = mchp_spdiftx_dt_ids,
.pm = pm_ptr(&mchp_spdiftx_pm_ops)
},
};
module_platform_driver(mchp_spdiftx_driver);
MODULE_AUTHOR("Codrin Ciubotariu <[email protected]>");
MODULE_DESCRIPTION("Microchip S/PDIF TX Controller Driver");
MODULE_LICENSE("GPL v2");
| linux-master | sound/soc/atmel/mchp-spdiftx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* miscellaneous helper functions
*
* Copyright (c) Clemens Ladisch <[email protected]>
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/firewire.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "lib.h"
#define ERROR_RETRY_DELAY_MS 20
/**
* snd_fw_transaction - send a request and wait for its completion
* @unit: the driver's unit on the target device
* @tcode: the transaction code
* @offset: the address in the target's address space
* @buffer: input/output data
* @length: length of @buffer
* @flags: use %FW_FIXED_GENERATION and add the generation value to attempt the
* request only in that generation; use %FW_QUIET to suppress error
* messages
*
* Submits an asynchronous request to the target device, and waits for the
* response. The node ID and the current generation are derived from @unit.
* On a bus reset or an error, the transaction is retried a few times.
* Returns zero on success, or a negative error code.
*/
int snd_fw_transaction(struct fw_unit *unit, int tcode,
u64 offset, void *buffer, size_t length,
unsigned int flags)
{
struct fw_device *device = fw_parent_device(unit);
int generation, rcode, tries = 0;
generation = flags & FW_GENERATION_MASK;
for (;;) {
if (!(flags & FW_FIXED_GENERATION)) {
generation = device->generation;
smp_rmb(); /* node_id vs. generation */
}
rcode = fw_run_transaction(device->card, tcode,
device->node_id, generation,
device->max_speed, offset,
buffer, length);
if (rcode == RCODE_COMPLETE)
return 0;
if (rcode == RCODE_GENERATION && (flags & FW_FIXED_GENERATION))
return -EAGAIN;
if (rcode_is_permanent_error(rcode) || ++tries >= 3) {
if (!(flags & FW_QUIET))
dev_err(&unit->device,
"transaction failed: %s\n",
fw_rcode_string(rcode));
return -EIO;
}
msleep(ERROR_RETRY_DELAY_MS);
}
}
EXPORT_SYMBOL(snd_fw_transaction);
MODULE_DESCRIPTION("FireWire audio helper functions");
MODULE_AUTHOR("Clemens Ladisch <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | sound/firewire/lib.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Apple iSight audio driver
*
* Copyright (c) Clemens Ladisch <[email protected]>
*/
#include <asm/byteorder.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/string.h>
#include <sound/control.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/pcm.h>
#include <sound/tlv.h>
#include "lib.h"
#include "iso-resources.h"
#include "packets-buffer.h"
#define OUI_APPLE 0x000a27
#define MODEL_APPLE_ISIGHT 0x000008
#define SW_ISIGHT_AUDIO 0x000010
#define REG_AUDIO_ENABLE 0x000
#define AUDIO_ENABLE 0x80000000
#define REG_DEF_AUDIO_GAIN 0x204
#define REG_GAIN_RAW_START 0x210
#define REG_GAIN_RAW_END 0x214
#define REG_GAIN_DB_START 0x218
#define REG_GAIN_DB_END 0x21c
#define REG_SAMPLE_RATE_INQUIRY 0x280
#define REG_ISO_TX_CONFIG 0x300
#define SPEED_SHIFT 16
#define REG_SAMPLE_RATE 0x400
#define RATE_48000 0x80000000
#define REG_GAIN 0x500
#define REG_MUTE 0x504
#define MAX_FRAMES_PER_PACKET 475
#define QUEUE_LENGTH 20
struct isight {
struct snd_card *card;
struct fw_unit *unit;
struct fw_device *device;
u64 audio_base;
struct snd_pcm_substream *pcm;
struct mutex mutex;
struct iso_packets_buffer buffer;
struct fw_iso_resources resources;
struct fw_iso_context *context;
bool pcm_active;
bool pcm_running;
bool first_packet;
int packet_index;
u32 total_samples;
unsigned int buffer_pointer;
unsigned int period_counter;
s32 gain_min, gain_max;
unsigned int gain_tlv[4];
};
struct audio_payload {
__be32 sample_count;
__be32 signature;
__be32 sample_total;
__be32 reserved;
__be16 samples[2 * MAX_FRAMES_PER_PACKET];
};
MODULE_DESCRIPTION("iSight audio driver");
MODULE_AUTHOR("Clemens Ladisch <[email protected]>");
MODULE_LICENSE("GPL");
static struct fw_iso_packet audio_packet = {
.payload_length = sizeof(struct audio_payload),
.interrupt = 1,
.header_length = 4,
};
static void isight_update_pointers(struct isight *isight, unsigned int count)
{
struct snd_pcm_runtime *runtime = isight->pcm->runtime;
unsigned int ptr;
smp_wmb(); /* update buffer data before buffer pointer */
ptr = isight->buffer_pointer;
ptr += count;
if (ptr >= runtime->buffer_size)
ptr -= runtime->buffer_size;
WRITE_ONCE(isight->buffer_pointer, ptr);
isight->period_counter += count;
if (isight->period_counter >= runtime->period_size) {
isight->period_counter -= runtime->period_size;
snd_pcm_period_elapsed(isight->pcm);
}
}
static void isight_samples(struct isight *isight,
const __be16 *samples, unsigned int count)
{
struct snd_pcm_runtime *runtime;
unsigned int count1;
if (!READ_ONCE(isight->pcm_running))
return;
runtime = isight->pcm->runtime;
if (isight->buffer_pointer + count <= runtime->buffer_size) {
memcpy(runtime->dma_area + isight->buffer_pointer * 4,
samples, count * 4);
} else {
count1 = runtime->buffer_size - isight->buffer_pointer;
memcpy(runtime->dma_area + isight->buffer_pointer * 4,
samples, count1 * 4);
samples += count1 * 2;
memcpy(runtime->dma_area, samples, (count - count1) * 4);
}
isight_update_pointers(isight, count);
}
static void isight_pcm_abort(struct isight *isight)
{
if (READ_ONCE(isight->pcm_active))
snd_pcm_stop_xrun(isight->pcm);
}
static void isight_dropped_samples(struct isight *isight, unsigned int total)
{
struct snd_pcm_runtime *runtime;
u32 dropped;
unsigned int count1;
if (!READ_ONCE(isight->pcm_running))
return;
runtime = isight->pcm->runtime;
dropped = total - isight->total_samples;
if (dropped < runtime->buffer_size) {
if (isight->buffer_pointer + dropped <= runtime->buffer_size) {
memset(runtime->dma_area + isight->buffer_pointer * 4,
0, dropped * 4);
} else {
count1 = runtime->buffer_size - isight->buffer_pointer;
memset(runtime->dma_area + isight->buffer_pointer * 4,
0, count1 * 4);
memset(runtime->dma_area, 0, (dropped - count1) * 4);
}
isight_update_pointers(isight, dropped);
} else {
isight_pcm_abort(isight);
}
}
static void isight_packet(struct fw_iso_context *context, u32 cycle,
size_t header_length, void *header, void *data)
{
struct isight *isight = data;
const struct audio_payload *payload;
unsigned int index, length, count, total;
int err;
if (isight->packet_index < 0)
return;
index = isight->packet_index;
payload = isight->buffer.packets[index].buffer;
length = be32_to_cpup(header) >> 16;
if (likely(length >= 16 &&
payload->signature == cpu_to_be32(0x73676874/*"sght"*/))) {
count = be32_to_cpu(payload->sample_count);
if (likely(count <= (length - 16) / 4)) {
total = be32_to_cpu(payload->sample_total);
if (unlikely(total != isight->total_samples)) {
if (!isight->first_packet)
isight_dropped_samples(isight, total);
isight->first_packet = false;
isight->total_samples = total;
}
isight_samples(isight, payload->samples, count);
isight->total_samples += count;
}
}
err = fw_iso_context_queue(isight->context, &audio_packet,
&isight->buffer.iso_buffer,
isight->buffer.packets[index].offset);
if (err < 0) {
dev_err(&isight->unit->device, "queueing error: %d\n", err);
isight_pcm_abort(isight);
isight->packet_index = -1;
return;
}
fw_iso_context_queue_flush(isight->context);
if (++index >= QUEUE_LENGTH)
index = 0;
isight->packet_index = index;
}
static int isight_connect(struct isight *isight)
{
int ch, err;
__be32 value;
retry_after_bus_reset:
ch = fw_iso_resources_allocate(&isight->resources,
sizeof(struct audio_payload),
isight->device->max_speed);
if (ch < 0) {
err = ch;
goto error;
}
value = cpu_to_be32(ch | (isight->device->max_speed << SPEED_SHIFT));
err = snd_fw_transaction(isight->unit, TCODE_WRITE_QUADLET_REQUEST,
isight->audio_base + REG_ISO_TX_CONFIG,
&value, 4, FW_FIXED_GENERATION |
isight->resources.generation);
if (err == -EAGAIN) {
fw_iso_resources_free(&isight->resources);
goto retry_after_bus_reset;
} else if (err < 0) {
goto err_resources;
}
return 0;
err_resources:
fw_iso_resources_free(&isight->resources);
error:
return err;
}
static int isight_open(struct snd_pcm_substream *substream)
{
static const struct snd_pcm_hardware hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_BATCH |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER,
.formats = SNDRV_PCM_FMTBIT_S16_BE,
.rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 4 * 1024 * 1024,
.period_bytes_min = MAX_FRAMES_PER_PACKET * 4,
.period_bytes_max = 1024 * 1024,
.periods_min = 2,
.periods_max = UINT_MAX,
};
struct isight *isight = substream->private_data;
substream->runtime->hw = hardware;
return iso_packets_buffer_init(&isight->buffer, isight->unit,
QUEUE_LENGTH,
sizeof(struct audio_payload),
DMA_FROM_DEVICE);
}
static int isight_close(struct snd_pcm_substream *substream)
{
struct isight *isight = substream->private_data;
iso_packets_buffer_destroy(&isight->buffer, isight->unit);
return 0;
}
static int isight_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct isight *isight = substream->private_data;
WRITE_ONCE(isight->pcm_active, true);
return 0;
}
static int reg_read(struct isight *isight, int offset, __be32 *value)
{
return snd_fw_transaction(isight->unit, TCODE_READ_QUADLET_REQUEST,
isight->audio_base + offset, value, 4, 0);
}
static int reg_write(struct isight *isight, int offset, __be32 value)
{
return snd_fw_transaction(isight->unit, TCODE_WRITE_QUADLET_REQUEST,
isight->audio_base + offset, &value, 4, 0);
}
static void isight_stop_streaming(struct isight *isight)
{
__be32 value;
if (!isight->context)
return;
fw_iso_context_stop(isight->context);
fw_iso_context_destroy(isight->context);
isight->context = NULL;
fw_iso_resources_free(&isight->resources);
value = 0;
snd_fw_transaction(isight->unit, TCODE_WRITE_QUADLET_REQUEST,
isight->audio_base + REG_AUDIO_ENABLE,
&value, 4, FW_QUIET);
}
static int isight_hw_free(struct snd_pcm_substream *substream)
{
struct isight *isight = substream->private_data;
WRITE_ONCE(isight->pcm_active, false);
mutex_lock(&isight->mutex);
isight_stop_streaming(isight);
mutex_unlock(&isight->mutex);
return 0;
}
static int isight_start_streaming(struct isight *isight)
{
unsigned int i;
int err;
if (isight->context) {
if (isight->packet_index < 0)
isight_stop_streaming(isight);
else
return 0;
}
err = reg_write(isight, REG_SAMPLE_RATE, cpu_to_be32(RATE_48000));
if (err < 0)
goto error;
err = isight_connect(isight);
if (err < 0)
goto error;
err = reg_write(isight, REG_AUDIO_ENABLE, cpu_to_be32(AUDIO_ENABLE));
if (err < 0)
goto err_resources;
isight->context = fw_iso_context_create(isight->device->card,
FW_ISO_CONTEXT_RECEIVE,
isight->resources.channel,
isight->device->max_speed,
4, isight_packet, isight);
if (IS_ERR(isight->context)) {
err = PTR_ERR(isight->context);
isight->context = NULL;
goto err_resources;
}
for (i = 0; i < QUEUE_LENGTH; ++i) {
err = fw_iso_context_queue(isight->context, &audio_packet,
&isight->buffer.iso_buffer,
isight->buffer.packets[i].offset);
if (err < 0)
goto err_context;
}
isight->first_packet = true;
isight->packet_index = 0;
err = fw_iso_context_start(isight->context, -1, 0,
FW_ISO_CONTEXT_MATCH_ALL_TAGS/*?*/);
if (err < 0)
goto err_context;
return 0;
err_context:
fw_iso_context_destroy(isight->context);
isight->context = NULL;
err_resources:
fw_iso_resources_free(&isight->resources);
reg_write(isight, REG_AUDIO_ENABLE, 0);
error:
return err;
}
static int isight_prepare(struct snd_pcm_substream *substream)
{
struct isight *isight = substream->private_data;
int err;
isight->buffer_pointer = 0;
isight->period_counter = 0;
mutex_lock(&isight->mutex);
err = isight_start_streaming(isight);
mutex_unlock(&isight->mutex);
return err;
}
static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct isight *isight = substream->private_data;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
WRITE_ONCE(isight->pcm_running, true);
break;
case SNDRV_PCM_TRIGGER_STOP:
WRITE_ONCE(isight->pcm_running, false);
break;
default:
return -EINVAL;
}
return 0;
}
static snd_pcm_uframes_t isight_pointer(struct snd_pcm_substream *substream)
{
struct isight *isight = substream->private_data;
return READ_ONCE(isight->buffer_pointer);
}
static int isight_create_pcm(struct isight *isight)
{
static const struct snd_pcm_ops ops = {
.open = isight_open,
.close = isight_close,
.hw_params = isight_hw_params,
.hw_free = isight_hw_free,
.prepare = isight_prepare,
.trigger = isight_trigger,
.pointer = isight_pointer,
};
struct snd_pcm *pcm;
int err;
err = snd_pcm_new(isight->card, "iSight", 0, 0, 1, &pcm);
if (err < 0)
return err;
pcm->private_data = isight;
strcpy(pcm->name, "iSight");
isight->pcm = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
isight->pcm->ops = &ops;
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0);
return 0;
}
static int isight_gain_info(struct snd_kcontrol *ctl,
struct snd_ctl_elem_info *info)
{
struct isight *isight = ctl->private_data;
info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
info->count = 1;
info->value.integer.min = isight->gain_min;
info->value.integer.max = isight->gain_max;
return 0;
}
static int isight_gain_get(struct snd_kcontrol *ctl,
struct snd_ctl_elem_value *value)
{
struct isight *isight = ctl->private_data;
__be32 gain;
int err;
err = reg_read(isight, REG_GAIN, &gain);
if (err < 0)
return err;
value->value.integer.value[0] = (s32)be32_to_cpu(gain);
return 0;
}
static int isight_gain_put(struct snd_kcontrol *ctl,
struct snd_ctl_elem_value *value)
{
struct isight *isight = ctl->private_data;
if (value->value.integer.value[0] < isight->gain_min ||
value->value.integer.value[0] > isight->gain_max)
return -EINVAL;
return reg_write(isight, REG_GAIN,
cpu_to_be32(value->value.integer.value[0]));
}
static int isight_mute_get(struct snd_kcontrol *ctl,
struct snd_ctl_elem_value *value)
{
struct isight *isight = ctl->private_data;
__be32 mute;
int err;
err = reg_read(isight, REG_MUTE, &mute);
if (err < 0)
return err;
value->value.integer.value[0] = !mute;
return 0;
}
static int isight_mute_put(struct snd_kcontrol *ctl,
struct snd_ctl_elem_value *value)
{
struct isight *isight = ctl->private_data;
return reg_write(isight, REG_MUTE,
(__force __be32)!value->value.integer.value[0]);
}
static int isight_create_mixer(struct isight *isight)
{
static const struct snd_kcontrol_new gain_control = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Mic Capture Volume",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ,
.info = isight_gain_info,
.get = isight_gain_get,
.put = isight_gain_put,
};
static const struct snd_kcontrol_new mute_control = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Mic Capture Switch",
.info = snd_ctl_boolean_mono_info,
.get = isight_mute_get,
.put = isight_mute_put,
};
__be32 value;
struct snd_kcontrol *ctl;
int err;
err = reg_read(isight, REG_GAIN_RAW_START, &value);
if (err < 0)
return err;
isight->gain_min = be32_to_cpu(value);
err = reg_read(isight, REG_GAIN_RAW_END, &value);
if (err < 0)
return err;
isight->gain_max = be32_to_cpu(value);
isight->gain_tlv[SNDRV_CTL_TLVO_TYPE] = SNDRV_CTL_TLVT_DB_MINMAX;
isight->gain_tlv[SNDRV_CTL_TLVO_LEN] = 2 * sizeof(unsigned int);
err = reg_read(isight, REG_GAIN_DB_START, &value);
if (err < 0)
return err;
isight->gain_tlv[SNDRV_CTL_TLVO_DB_MINMAX_MIN] =
(s32)be32_to_cpu(value) * 100;
err = reg_read(isight, REG_GAIN_DB_END, &value);
if (err < 0)
return err;
isight->gain_tlv[SNDRV_CTL_TLVO_DB_MINMAX_MAX] =
(s32)be32_to_cpu(value) * 100;
ctl = snd_ctl_new1(&gain_control, isight);
if (ctl)
ctl->tlv.p = isight->gain_tlv;
err = snd_ctl_add(isight->card, ctl);
if (err < 0)
return err;
err = snd_ctl_add(isight->card, snd_ctl_new1(&mute_control, isight));
if (err < 0)
return err;
return 0;
}
static void isight_card_free(struct snd_card *card)
{
struct isight *isight = card->private_data;
fw_iso_resources_destroy(&isight->resources);
}
static u64 get_unit_base(struct fw_unit *unit)
{
struct fw_csr_iterator i;
int key, value;
fw_csr_iterator_init(&i, unit->directory);
while (fw_csr_iterator_next(&i, &key, &value))
if (key == CSR_OFFSET)
return CSR_REGISTER_BASE + value * 4;
return 0;
}
static int isight_probe(struct fw_unit *unit,
const struct ieee1394_device_id *id)
{
struct fw_device *fw_dev = fw_parent_device(unit);
struct snd_card *card;
struct isight *isight;
int err;
err = snd_card_new(&unit->device, -1, NULL, THIS_MODULE,
sizeof(*isight), &card);
if (err < 0)
return err;
isight = card->private_data;
isight->card = card;
mutex_init(&isight->mutex);
isight->unit = fw_unit_get(unit);
isight->device = fw_dev;
isight->audio_base = get_unit_base(unit);
if (!isight->audio_base) {
dev_err(&unit->device, "audio unit base not found\n");
err = -ENXIO;
goto error;
}
fw_iso_resources_init(&isight->resources, unit);
card->private_free = isight_card_free;
strcpy(card->driver, "iSight");
strcpy(card->shortname, "Apple iSight");
snprintf(card->longname, sizeof(card->longname),
"Apple iSight (GUID %08x%08x) at %s, S%d",
fw_dev->config_rom[3], fw_dev->config_rom[4],
dev_name(&unit->device), 100 << fw_dev->max_speed);
strcpy(card->mixername, "iSight");
err = isight_create_pcm(isight);
if (err < 0)
goto error;
err = isight_create_mixer(isight);
if (err < 0)
goto error;
err = snd_card_register(card);
if (err < 0)
goto error;
dev_set_drvdata(&unit->device, isight);
return 0;
error:
snd_card_free(card);
mutex_destroy(&isight->mutex);
fw_unit_put(isight->unit);
return err;
}
static void isight_bus_reset(struct fw_unit *unit)
{
struct isight *isight = dev_get_drvdata(&unit->device);
if (fw_iso_resources_update(&isight->resources) < 0) {
isight_pcm_abort(isight);
mutex_lock(&isight->mutex);
isight_stop_streaming(isight);
mutex_unlock(&isight->mutex);
}
}
static void isight_remove(struct fw_unit *unit)
{
struct isight *isight = dev_get_drvdata(&unit->device);
isight_pcm_abort(isight);
snd_card_disconnect(isight->card);
mutex_lock(&isight->mutex);
isight_stop_streaming(isight);
mutex_unlock(&isight->mutex);
// Block till all of ALSA character devices are released.
snd_card_free(isight->card);
mutex_destroy(&isight->mutex);
fw_unit_put(isight->unit);
}
static const struct ieee1394_device_id isight_id_table[] = {
{
.match_flags = IEEE1394_MATCH_SPECIFIER_ID |
IEEE1394_MATCH_VERSION,
.specifier_id = OUI_APPLE,
.version = SW_ISIGHT_AUDIO,
},
{ }
};
MODULE_DEVICE_TABLE(ieee1394, isight_id_table);
static struct fw_driver isight_driver = {
.driver = {
.owner = THIS_MODULE,
.name = KBUILD_MODNAME,
.bus = &fw_bus_type,
},
.probe = isight_probe,
.update = isight_bus_reset,
.remove = isight_remove,
.id_table = isight_id_table,
};
static int __init alsa_isight_init(void)
{
return driver_register(&isight_driver.driver);
}
static void __exit alsa_isight_exit(void)
{
driver_unregister(&isight_driver.driver);
}
module_init(alsa_isight_init);
module_exit(alsa_isight_exit);
| linux-master | sound/firewire/isight.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Audio and Music Data Transmission Protocol (IEC 61883-6) streams
* with Common Isochronous Packet (IEC 61883-1) headers
*
* Copyright (c) Clemens Ladisch <[email protected]>
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include "amdtp-stream.h"
#define TICKS_PER_CYCLE 3072
#define CYCLES_PER_SECOND 8000
#define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
#define OHCI_SECOND_MODULUS 8
/* Always support Linux tracing subsystem. */
#define CREATE_TRACE_POINTS
#include "amdtp-stream-trace.h"
#define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */
/* isochronous header parameters */
#define ISO_DATA_LENGTH_SHIFT 16
#define TAG_NO_CIP_HEADER 0
#define TAG_CIP 1
// Common Isochronous Packet (CIP) header parameters. Use two quadlets CIP header when supported.
#define CIP_HEADER_QUADLETS 2
#define CIP_EOH_SHIFT 31
#define CIP_EOH (1u << CIP_EOH_SHIFT)
#define CIP_EOH_MASK 0x80000000
#define CIP_SID_SHIFT 24
#define CIP_SID_MASK 0x3f000000
#define CIP_DBS_MASK 0x00ff0000
#define CIP_DBS_SHIFT 16
#define CIP_SPH_MASK 0x00000400
#define CIP_SPH_SHIFT 10
#define CIP_DBC_MASK 0x000000ff
#define CIP_FMT_SHIFT 24
#define CIP_FMT_MASK 0x3f000000
#define CIP_FDF_MASK 0x00ff0000
#define CIP_FDF_SHIFT 16
#define CIP_FDF_NO_DATA 0xff
#define CIP_SYT_MASK 0x0000ffff
#define CIP_SYT_NO_INFO 0xffff
#define CIP_SYT_CYCLE_MODULUS 16
#define CIP_NO_DATA ((CIP_FDF_NO_DATA << CIP_FDF_SHIFT) | CIP_SYT_NO_INFO)
#define CIP_HEADER_SIZE (sizeof(__be32) * CIP_HEADER_QUADLETS)
/* Audio and Music transfer protocol specific parameters */
#define CIP_FMT_AM 0x10
#define AMDTP_FDF_NO_DATA 0xff
// For iso header and tstamp.
#define IR_CTX_HEADER_DEFAULT_QUADLETS 2
// Add nothing.
#define IR_CTX_HEADER_SIZE_NO_CIP (sizeof(__be32) * IR_CTX_HEADER_DEFAULT_QUADLETS)
// Add two quadlets CIP header.
#define IR_CTX_HEADER_SIZE_CIP (IR_CTX_HEADER_SIZE_NO_CIP + CIP_HEADER_SIZE)
#define HEADER_TSTAMP_MASK 0x0000ffff
#define IT_PKT_HEADER_SIZE_CIP CIP_HEADER_SIZE
#define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing.
// The initial firmware of OXFW970 can postpone transmission of packet during finishing
// asynchronous transaction. This module accepts 5 cycles to skip as maximum to avoid buffer
// overrun. Actual device can skip more, then this module stops the packet streaming.
#define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5
/**
* amdtp_stream_init - initialize an AMDTP stream structure
* @s: the AMDTP stream to initialize
* @unit: the target of the stream
* @dir: the direction of stream
* @flags: the details of the streaming protocol consist of cip_flags enumeration-constants.
* @fmt: the value of fmt field in CIP header
* @process_ctx_payloads: callback handler to process payloads of isoc context
* @protocol_size: the size to allocate newly for protocol
*/
int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
enum amdtp_stream_direction dir, unsigned int flags,
unsigned int fmt,
amdtp_stream_process_ctx_payloads_t process_ctx_payloads,
unsigned int protocol_size)
{
if (process_ctx_payloads == NULL)
return -EINVAL;
s->protocol = kzalloc(protocol_size, GFP_KERNEL);
if (!s->protocol)
return -ENOMEM;
s->unit = unit;
s->direction = dir;
s->flags = flags;
s->context = ERR_PTR(-1);
mutex_init(&s->mutex);
s->packet_index = 0;
init_waitqueue_head(&s->ready_wait);
s->fmt = fmt;
s->process_ctx_payloads = process_ctx_payloads;
return 0;
}
EXPORT_SYMBOL(amdtp_stream_init);
/**
* amdtp_stream_destroy - free stream resources
* @s: the AMDTP stream to destroy
*/
void amdtp_stream_destroy(struct amdtp_stream *s)
{
/* Not initialized. */
if (s->protocol == NULL)
return;
WARN_ON(amdtp_stream_running(s));
kfree(s->protocol);
mutex_destroy(&s->mutex);
}
EXPORT_SYMBOL(amdtp_stream_destroy);
const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = {
[CIP_SFC_32000] = 8,
[CIP_SFC_44100] = 8,
[CIP_SFC_48000] = 8,
[CIP_SFC_88200] = 16,
[CIP_SFC_96000] = 16,
[CIP_SFC_176400] = 32,
[CIP_SFC_192000] = 32,
};
EXPORT_SYMBOL(amdtp_syt_intervals);
const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = {
[CIP_SFC_32000] = 32000,
[CIP_SFC_44100] = 44100,
[CIP_SFC_48000] = 48000,
[CIP_SFC_88200] = 88200,
[CIP_SFC_96000] = 96000,
[CIP_SFC_176400] = 176400,
[CIP_SFC_192000] = 192000,
};
EXPORT_SYMBOL(amdtp_rate_table);
static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_interval *s = hw_param_interval(params, rule->var);
const struct snd_interval *r =
hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval t = {0};
unsigned int step = 0;
int i;
for (i = 0; i < CIP_SFC_COUNT; ++i) {
if (snd_interval_test(r, amdtp_rate_table[i]))
step = max(step, amdtp_syt_intervals[i]);
}
t.min = roundup(s->min, step);
t.max = rounddown(s->max, step);
t.integer = 1;
return snd_interval_refine(s, &t);
}
/**
* amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
* @s: the AMDTP stream, which must be initialized.
* @runtime: the PCM substream runtime
*/
int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
struct snd_pcm_runtime *runtime)
{
struct snd_pcm_hardware *hw = &runtime->hw;
unsigned int ctx_header_size;
unsigned int maximum_usec_per_period;
int err;
hw->info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_JOINT_DUPLEX |
SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP;
hw->periods_min = 2;
hw->periods_max = UINT_MAX;
/* bytes for a frame */
hw->period_bytes_min = 4 * hw->channels_max;
/* Just to prevent from allocating much pages. */
hw->period_bytes_max = hw->period_bytes_min * 2048;
hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
// Linux driver for 1394 OHCI controller voluntarily flushes isoc
// context when total size of accumulated context header reaches
// PAGE_SIZE. This kicks work for the isoc context and brings
// callback in the middle of scheduled interrupts.
// Although AMDTP streams in the same domain use the same events per
// IRQ, use the largest size of context header between IT/IR contexts.
// Here, use the value of context header in IR context is for both
// contexts.
if (!(s->flags & CIP_NO_HEADER))
ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
else
ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE /
CYCLES_PER_SECOND / ctx_header_size;
// In IEC 61883-6, one isoc packet can transfer events up to the value
// of syt interval. This comes from the interval of isoc cycle. As 1394
// OHCI controller can generate hardware IRQ per isoc packet, the
// interval is 125 usec.
// However, there are two ways of transmission in IEC 61883-6; blocking
// and non-blocking modes. In blocking mode, the sequence of isoc packet
// includes 'empty' or 'NODATA' packets which include no event. In
// non-blocking mode, the number of events per packet is variable up to
// the syt interval.
// Due to the above protocol design, the minimum PCM frames per
// interrupt should be double of the value of syt interval, thus it is
// 250 usec.
err = snd_pcm_hw_constraint_minmax(runtime,
SNDRV_PCM_HW_PARAM_PERIOD_TIME,
250, maximum_usec_per_period);
if (err < 0)
goto end;
/* Non-Blocking stream has no more constraints */
if (!(s->flags & CIP_BLOCKING))
goto end;
/*
* One AMDTP packet can include some frames. In blocking mode, the
* number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
* depending on its sampling rate. For accurate period interrupt, it's
* preferrable to align period/buffer sizes to current SYT_INTERVAL.
*/
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
apply_constraint_to_size, NULL,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
SNDRV_PCM_HW_PARAM_RATE, -1);
if (err < 0)
goto end;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
apply_constraint_to_size, NULL,
SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
SNDRV_PCM_HW_PARAM_RATE, -1);
if (err < 0)
goto end;
end:
return err;
}
EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints);
/**
* amdtp_stream_set_parameters - set stream parameters
* @s: the AMDTP stream to configure
* @rate: the sample rate
* @data_block_quadlets: the size of a data block in quadlet unit
* @pcm_frame_multiplier: the multiplier to compute the number of PCM frames by the number of AMDTP
* events.
*
* The parameters must be set before the stream is started, and must not be
* changed while the stream is running.
*/
int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate,
unsigned int data_block_quadlets, unsigned int pcm_frame_multiplier)
{
unsigned int sfc;
for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) {
if (amdtp_rate_table[sfc] == rate)
break;
}
if (sfc == ARRAY_SIZE(amdtp_rate_table))
return -EINVAL;
s->sfc = sfc;
s->data_block_quadlets = data_block_quadlets;
s->syt_interval = amdtp_syt_intervals[sfc];
// default buffering in the device.
s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
// additional buffering needed to adjust for no-data packets.
if (s->flags & CIP_BLOCKING)
s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate;
s->pcm_frame_multiplier = pcm_frame_multiplier;
return 0;
}
EXPORT_SYMBOL(amdtp_stream_set_parameters);
// The CIP header is processed in context header apart from context payload.
static int amdtp_stream_get_max_ctx_payload_size(struct amdtp_stream *s)
{
unsigned int multiplier;
if (s->flags & CIP_JUMBO_PAYLOAD)
multiplier = IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES;
else
multiplier = 1;
return s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier;
}
/**
* amdtp_stream_get_max_payload - get the stream's packet size
* @s: the AMDTP stream
*
* This function must not be called before the stream has been configured
* with amdtp_stream_set_parameters().
*/
unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s)
{
unsigned int cip_header_size;
if (!(s->flags & CIP_NO_HEADER))
cip_header_size = CIP_HEADER_SIZE;
else
cip_header_size = 0;
return cip_header_size + amdtp_stream_get_max_ctx_payload_size(s);
}
EXPORT_SYMBOL(amdtp_stream_get_max_payload);
/**
* amdtp_stream_pcm_prepare - prepare PCM device for running
* @s: the AMDTP stream
*
* This function should be called from the PCM device's .prepare callback.
*/
void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
{
s->pcm_buffer_pointer = 0;
s->pcm_period_pointer = 0;
}
EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
#define prev_packet_desc(s, desc) \
list_prev_entry_circular(desc, &s->packet_descs_list, link)
static void pool_blocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
unsigned int size, unsigned int pos, unsigned int count)
{
const unsigned int syt_interval = s->syt_interval;
int i;
for (i = 0; i < count; ++i) {
struct seq_desc *desc = descs + pos;
if (desc->syt_offset != CIP_SYT_NO_INFO)
desc->data_blocks = syt_interval;
else
desc->data_blocks = 0;
pos = (pos + 1) % size;
}
}
static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
unsigned int size, unsigned int pos,
unsigned int count)
{
const enum cip_sfc sfc = s->sfc;
unsigned int state = s->ctx_data.rx.data_block_state;
int i;
for (i = 0; i < count; ++i) {
struct seq_desc *desc = descs + pos;
if (!cip_sfc_is_base_44100(sfc)) {
// Sample_rate / 8000 is an integer, and precomputed.
desc->data_blocks = state;
} else {
unsigned int phase = state;
/*
* This calculates the number of data blocks per packet so that
* 1) the overall rate is correct and exactly synchronized to
* the bus clock, and
* 2) packets with a rounded-up number of blocks occur as early
* as possible in the sequence (to prevent underruns of the
* device's buffer).
*/
if (sfc == CIP_SFC_44100)
/* 6 6 5 6 5 6 5 ... */
desc->data_blocks = 5 + ((phase & 1) ^ (phase == 0 || phase >= 40));
else
/* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
desc->data_blocks = 11 * (sfc >> 1) + (phase == 0);
if (++phase >= (80 >> (sfc >> 1)))
phase = 0;
state = phase;
}
pos = (pos + 1) % size;
}
s->ctx_data.rx.data_block_state = state;
}
static unsigned int calculate_syt_offset(unsigned int *last_syt_offset,
unsigned int *syt_offset_state, enum cip_sfc sfc)
{
unsigned int syt_offset;
if (*last_syt_offset < TICKS_PER_CYCLE) {
if (!cip_sfc_is_base_44100(sfc))
syt_offset = *last_syt_offset + *syt_offset_state;
else {
/*
* The time, in ticks, of the n'th SYT_INTERVAL sample is:
* n * SYT_INTERVAL * 24576000 / sample_rate
* Modulo TICKS_PER_CYCLE, the difference between successive
* elements is about 1386.23. Rounding the results of this
* formula to the SYT precision results in a sequence of
* differences that begins with:
* 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
* This code generates _exactly_ the same sequence.
*/
unsigned int phase = *syt_offset_state;
unsigned int index = phase % 13;
syt_offset = *last_syt_offset;
syt_offset += 1386 + ((index && !(index & 3)) ||
phase == 146);
if (++phase >= 147)
phase = 0;
*syt_offset_state = phase;
}
} else
syt_offset = *last_syt_offset - TICKS_PER_CYCLE;
*last_syt_offset = syt_offset;
if (syt_offset >= TICKS_PER_CYCLE)
syt_offset = CIP_SYT_NO_INFO;
return syt_offset;
}
static void pool_ideal_syt_offsets(struct amdtp_stream *s, struct seq_desc *descs,
unsigned int size, unsigned int pos, unsigned int count)
{
const enum cip_sfc sfc = s->sfc;
unsigned int last = s->ctx_data.rx.last_syt_offset;
unsigned int state = s->ctx_data.rx.syt_offset_state;
int i;
for (i = 0; i < count; ++i) {
struct seq_desc *desc = descs + pos;
desc->syt_offset = calculate_syt_offset(&last, &state, sfc);
pos = (pos + 1) % size;
}
s->ctx_data.rx.last_syt_offset = last;
s->ctx_data.rx.syt_offset_state = state;
}
static unsigned int compute_syt_offset(unsigned int syt, unsigned int cycle,
unsigned int transfer_delay)
{
unsigned int cycle_lo = (cycle % CYCLES_PER_SECOND) & 0x0f;
unsigned int syt_cycle_lo = (syt & 0xf000) >> 12;
unsigned int syt_offset;
// Round up.
if (syt_cycle_lo < cycle_lo)
syt_cycle_lo += CIP_SYT_CYCLE_MODULUS;
syt_cycle_lo -= cycle_lo;
// Subtract transfer delay so that the synchronization offset is not so large
// at transmission.
syt_offset = syt_cycle_lo * TICKS_PER_CYCLE + (syt & 0x0fff);
if (syt_offset < transfer_delay)
syt_offset += CIP_SYT_CYCLE_MODULUS * TICKS_PER_CYCLE;
return syt_offset - transfer_delay;
}
// Both of the producer and consumer of the queue runs in the same clock of IEEE 1394 bus.
// Additionally, the sequence of tx packets is severely checked against any discontinuity
// before filling entries in the queue. The calculation is safe even if it looks fragile by
// overrun.
static unsigned int calculate_cached_cycle_count(struct amdtp_stream *s, unsigned int head)
{
const unsigned int cache_size = s->ctx_data.tx.cache.size;
unsigned int cycles = s->ctx_data.tx.cache.pos;
if (cycles < head)
cycles += cache_size;
cycles -= head;
return cycles;
}
static void cache_seq(struct amdtp_stream *s, const struct pkt_desc *src, unsigned int desc_count)
{
const unsigned int transfer_delay = s->transfer_delay;
const unsigned int cache_size = s->ctx_data.tx.cache.size;
struct seq_desc *cache = s->ctx_data.tx.cache.descs;
unsigned int cache_pos = s->ctx_data.tx.cache.pos;
bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
int i;
for (i = 0; i < desc_count; ++i) {
struct seq_desc *dst = cache + cache_pos;
if (aware_syt && src->syt != CIP_SYT_NO_INFO)
dst->syt_offset = compute_syt_offset(src->syt, src->cycle, transfer_delay);
else
dst->syt_offset = CIP_SYT_NO_INFO;
dst->data_blocks = src->data_blocks;
cache_pos = (cache_pos + 1) % cache_size;
src = amdtp_stream_next_packet_desc(s, src);
}
s->ctx_data.tx.cache.pos = cache_pos;
}
static void pool_ideal_seq_descs(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
unsigned int pos, unsigned int count)
{
pool_ideal_syt_offsets(s, descs, size, pos, count);
if (s->flags & CIP_BLOCKING)
pool_blocking_data_blocks(s, descs, size, pos, count);
else
pool_ideal_nonblocking_data_blocks(s, descs, size, pos, count);
}
static void pool_replayed_seq(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
unsigned int pos, unsigned int count)
{
struct amdtp_stream *target = s->ctx_data.rx.replay_target;
const struct seq_desc *cache = target->ctx_data.tx.cache.descs;
const unsigned int cache_size = target->ctx_data.tx.cache.size;
unsigned int cache_pos = s->ctx_data.rx.cache_pos;
int i;
for (i = 0; i < count; ++i) {
descs[pos] = cache[cache_pos];
cache_pos = (cache_pos + 1) % cache_size;
pos = (pos + 1) % size;
}
s->ctx_data.rx.cache_pos = cache_pos;
}
static void pool_seq_descs(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
unsigned int pos, unsigned int count)
{
struct amdtp_domain *d = s->domain;
void (*pool_seq_descs)(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
unsigned int pos, unsigned int count);
if (!d->replay.enable || !s->ctx_data.rx.replay_target) {
pool_seq_descs = pool_ideal_seq_descs;
} else {
if (!d->replay.on_the_fly) {
pool_seq_descs = pool_replayed_seq;
} else {
struct amdtp_stream *tx = s->ctx_data.rx.replay_target;
const unsigned int cache_size = tx->ctx_data.tx.cache.size;
const unsigned int cache_pos = s->ctx_data.rx.cache_pos;
unsigned int cached_cycles = calculate_cached_cycle_count(tx, cache_pos);
if (cached_cycles > count && cached_cycles > cache_size / 2)
pool_seq_descs = pool_replayed_seq;
else
pool_seq_descs = pool_ideal_seq_descs;
}
}
pool_seq_descs(s, descs, size, pos, count);
}
static void update_pcm_pointers(struct amdtp_stream *s,
struct snd_pcm_substream *pcm,
unsigned int frames)
{
unsigned int ptr;
ptr = s->pcm_buffer_pointer + frames;
if (ptr >= pcm->runtime->buffer_size)
ptr -= pcm->runtime->buffer_size;
WRITE_ONCE(s->pcm_buffer_pointer, ptr);
s->pcm_period_pointer += frames;
if (s->pcm_period_pointer >= pcm->runtime->period_size) {
s->pcm_period_pointer -= pcm->runtime->period_size;
// The program in user process should periodically check the status of intermediate
// buffer associated to PCM substream to process PCM frames in the buffer, instead
// of receiving notification of period elapsed by poll wait.
if (!pcm->runtime->no_period_wakeup) {
if (in_softirq()) {
// In software IRQ context for 1394 OHCI.
snd_pcm_period_elapsed(pcm);
} else {
// In process context of ALSA PCM application under acquired lock of
// PCM substream.
snd_pcm_period_elapsed_under_stream_lock(pcm);
}
}
}
}
static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
bool sched_irq)
{
int err;
params->interrupt = sched_irq;
params->tag = s->tag;
params->sy = 0;
err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer,
s->buffer.packets[s->packet_index].offset);
if (err < 0) {
dev_err(&s->unit->device, "queueing error: %d\n", err);
goto end;
}
if (++s->packet_index >= s->queue_size)
s->packet_index = 0;
end:
return err;
}
static inline int queue_out_packet(struct amdtp_stream *s,
struct fw_iso_packet *params, bool sched_irq)
{
params->skip =
!!(params->header_length == 0 && params->payload_length == 0);
return queue_packet(s, params, sched_irq);
}
static inline int queue_in_packet(struct amdtp_stream *s,
struct fw_iso_packet *params)
{
// Queue one packet for IR context.
params->header_length = s->ctx_data.tx.ctx_header_size;
params->payload_length = s->ctx_data.tx.max_ctx_payload_length;
params->skip = false;
return queue_packet(s, params, false);
}
static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2],
unsigned int data_block_counter, unsigned int syt)
{
cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
(s->data_block_quadlets << CIP_DBS_SHIFT) |
((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
data_block_counter);
cip_header[1] = cpu_to_be32(CIP_EOH |
((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
(syt & CIP_SYT_MASK));
}
static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
struct fw_iso_packet *params, unsigned int header_length,
unsigned int data_blocks,
unsigned int data_block_counter,
unsigned int syt, unsigned int index, u32 curr_cycle_time)
{
unsigned int payload_length;
__be32 *cip_header;
payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets;
params->payload_length = payload_length;
if (header_length > 0) {
cip_header = (__be32 *)params->header;
generate_cip_header(s, cip_header, data_block_counter, syt);
params->header_length = header_length;
} else {
cip_header = NULL;
}
trace_amdtp_packet(s, cycle, cip_header, payload_length + header_length, data_blocks,
data_block_counter, s->packet_index, index, curr_cycle_time);
}
static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
unsigned int payload_length,
unsigned int *data_blocks,
unsigned int *data_block_counter, unsigned int *syt)
{
u32 cip_header[2];
unsigned int sph;
unsigned int fmt;
unsigned int fdf;
unsigned int dbc;
bool lost;
cip_header[0] = be32_to_cpu(buf[0]);
cip_header[1] = be32_to_cpu(buf[1]);
/*
* This module supports 'Two-quadlet CIP header with SYT field'.
* For convenience, also check FMT field is AM824 or not.
*/
if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) ||
((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) &&
(!(s->flags & CIP_HEADER_WITHOUT_EOH))) {
dev_info_ratelimited(&s->unit->device,
"Invalid CIP header for AMDTP: %08X:%08X\n",
cip_header[0], cip_header[1]);
return -EAGAIN;
}
/* Check valid protocol or not. */
sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT;
fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT;
if (sph != s->sph || fmt != s->fmt) {
dev_info_ratelimited(&s->unit->device,
"Detect unexpected protocol: %08x %08x\n",
cip_header[0], cip_header[1]);
return -EAGAIN;
}
/* Calculate data blocks */
fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT;
if (payload_length == 0 || (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
*data_blocks = 0;
} else {
unsigned int data_block_quadlets =
(cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT;
/* avoid division by zero */
if (data_block_quadlets == 0) {
dev_err(&s->unit->device,
"Detect invalid value in dbs field: %08X\n",
cip_header[0]);
return -EPROTO;
}
if (s->flags & CIP_WRONG_DBS)
data_block_quadlets = s->data_block_quadlets;
*data_blocks = payload_length / sizeof(__be32) / data_block_quadlets;
}
/* Check data block counter continuity */
dbc = cip_header[0] & CIP_DBC_MASK;
if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
*data_block_counter != UINT_MAX)
dbc = *data_block_counter;
if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) ||
*data_block_counter == UINT_MAX) {
lost = false;
} else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
lost = dbc != *data_block_counter;
} else {
unsigned int dbc_interval;
if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
dbc_interval = s->ctx_data.tx.dbc_interval;
else
dbc_interval = *data_blocks;
lost = dbc != ((*data_block_counter + dbc_interval) & 0xff);
}
if (lost) {
dev_err(&s->unit->device,
"Detect discontinuity of CIP: %02X %02X\n",
*data_block_counter, dbc);
return -EIO;
}
*data_block_counter = dbc;
if (!(s->flags & CIP_UNAWARE_SYT))
*syt = cip_header[1] & CIP_SYT_MASK;
return 0;
}
static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
const __be32 *ctx_header,
unsigned int *data_blocks,
unsigned int *data_block_counter,
unsigned int *syt, unsigned int packet_index, unsigned int index,
u32 curr_cycle_time)
{
unsigned int payload_length;
const __be32 *cip_header;
unsigned int cip_header_size;
payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
if (!(s->flags & CIP_NO_HEADER))
cip_header_size = CIP_HEADER_SIZE;
else
cip_header_size = 0;
if (payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
dev_err(&s->unit->device,
"Detect jumbo payload: %04x %04x\n",
payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
return -EIO;
}
if (cip_header_size > 0) {
if (payload_length >= cip_header_size) {
int err;
cip_header = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS;
err = check_cip_header(s, cip_header, payload_length - cip_header_size,
data_blocks, data_block_counter, syt);
if (err < 0)
return err;
} else {
// Handle the cycle so that empty packet arrives.
cip_header = NULL;
*data_blocks = 0;
*syt = 0;
}
} else {
cip_header = NULL;
*data_blocks = payload_length / sizeof(__be32) / s->data_block_quadlets;
*syt = 0;
if (*data_block_counter == UINT_MAX)
*data_block_counter = 0;
}
trace_amdtp_packet(s, cycle, cip_header, payload_length, *data_blocks,
*data_block_counter, packet_index, index, curr_cycle_time);
return 0;
}
// In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
// the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
// it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
static inline u32 compute_ohci_iso_ctx_cycle_count(u32 tstamp)
{
return (((tstamp >> 13) & 0x07) * CYCLES_PER_SECOND) + (tstamp & 0x1fff);
}
static inline u32 compute_ohci_cycle_count(__be32 ctx_header_tstamp)
{
u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK;
return compute_ohci_iso_ctx_cycle_count(tstamp);
}
static inline u32 increment_ohci_cycle_count(u32 cycle, unsigned int addend)
{
cycle += addend;
if (cycle >= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND)
cycle -= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND;
return cycle;
}
static inline u32 decrement_ohci_cycle_count(u32 minuend, u32 subtrahend)
{
if (minuend < subtrahend)
minuend += OHCI_SECOND_MODULUS * CYCLES_PER_SECOND;
return minuend - subtrahend;
}
static int compare_ohci_cycle_count(u32 lval, u32 rval)
{
if (lval == rval)
return 0;
else if (lval < rval && rval - lval < OHCI_SECOND_MODULUS * CYCLES_PER_SECOND / 2)
return -1;
else
return 1;
}
// Align to actual cycle count for the packet which is going to be scheduled.
// This module queued the same number of isochronous cycle as the size of queue
// to kip isochronous cycle, therefore it's OK to just increment the cycle by
// the size of queue for scheduled cycle.
static inline u32 compute_ohci_it_cycle(const __be32 ctx_header_tstamp,
unsigned int queue_size)
{
u32 cycle = compute_ohci_cycle_count(ctx_header_tstamp);
return increment_ohci_cycle_count(cycle, queue_size);
}
static int generate_tx_packet_descs(struct amdtp_stream *s, struct pkt_desc *desc,
const __be32 *ctx_header, unsigned int packet_count,
unsigned int *desc_count)
{
unsigned int next_cycle = s->next_cycle;
unsigned int dbc = s->data_block_counter;
unsigned int packet_index = s->packet_index;
unsigned int queue_size = s->queue_size;
u32 curr_cycle_time = 0;
int i;
int err;
if (trace_amdtp_packet_enabled())
(void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &curr_cycle_time);
*desc_count = 0;
for (i = 0; i < packet_count; ++i) {
unsigned int cycle;
bool lost;
unsigned int data_blocks;
unsigned int syt;
cycle = compute_ohci_cycle_count(ctx_header[1]);
lost = (next_cycle != cycle);
if (lost) {
if (s->flags & CIP_NO_HEADER) {
// Fireface skips transmission just for an isoc cycle corresponding
// to empty packet.
unsigned int prev_cycle = next_cycle;
next_cycle = increment_ohci_cycle_count(next_cycle, 1);
lost = (next_cycle != cycle);
if (!lost) {
// Prepare a description for the skipped cycle for
// sequence replay.
desc->cycle = prev_cycle;
desc->syt = 0;
desc->data_blocks = 0;
desc->data_block_counter = dbc;
desc->ctx_payload = NULL;
desc = amdtp_stream_next_packet_desc(s, desc);
++(*desc_count);
}
} else if (s->flags & CIP_JUMBO_PAYLOAD) {
// OXFW970 skips transmission for several isoc cycles during
// asynchronous transaction. The sequence replay is impossible due
// to the reason.
unsigned int safe_cycle = increment_ohci_cycle_count(next_cycle,
IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES);
lost = (compare_ohci_cycle_count(safe_cycle, cycle) > 0);
}
if (lost) {
dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n",
next_cycle, cycle);
return -EIO;
}
}
err = parse_ir_ctx_header(s, cycle, ctx_header, &data_blocks, &dbc, &syt,
packet_index, i, curr_cycle_time);
if (err < 0)
return err;
desc->cycle = cycle;
desc->syt = syt;
desc->data_blocks = data_blocks;
desc->data_block_counter = dbc;
desc->ctx_payload = s->buffer.packets[packet_index].buffer;
if (!(s->flags & CIP_DBC_IS_END_EVENT))
dbc = (dbc + desc->data_blocks) & 0xff;
next_cycle = increment_ohci_cycle_count(next_cycle, 1);
desc = amdtp_stream_next_packet_desc(s, desc);
++(*desc_count);
ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
packet_index = (packet_index + 1) % queue_size;
}
s->next_cycle = next_cycle;
s->data_block_counter = dbc;
return 0;
}
static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle,
unsigned int transfer_delay)
{
unsigned int syt;
syt_offset += transfer_delay;
syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) |
(syt_offset % TICKS_PER_CYCLE);
return syt & CIP_SYT_MASK;
}
static void generate_rx_packet_descs(struct amdtp_stream *s, struct pkt_desc *desc,
const __be32 *ctx_header, unsigned int packet_count)
{
struct seq_desc *seq_descs = s->ctx_data.rx.seq.descs;
unsigned int seq_size = s->ctx_data.rx.seq.size;
unsigned int seq_pos = s->ctx_data.rx.seq.pos;
unsigned int dbc = s->data_block_counter;
bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
int i;
pool_seq_descs(s, seq_descs, seq_size, seq_pos, packet_count);
for (i = 0; i < packet_count; ++i) {
unsigned int index = (s->packet_index + i) % s->queue_size;
const struct seq_desc *seq = seq_descs + seq_pos;
desc->cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size);
if (aware_syt && seq->syt_offset != CIP_SYT_NO_INFO)
desc->syt = compute_syt(seq->syt_offset, desc->cycle, s->transfer_delay);
else
desc->syt = CIP_SYT_NO_INFO;
desc->data_blocks = seq->data_blocks;
if (s->flags & CIP_DBC_IS_END_EVENT)
dbc = (dbc + desc->data_blocks) & 0xff;
desc->data_block_counter = dbc;
if (!(s->flags & CIP_DBC_IS_END_EVENT))
dbc = (dbc + desc->data_blocks) & 0xff;
desc->ctx_payload = s->buffer.packets[index].buffer;
seq_pos = (seq_pos + 1) % seq_size;
desc = amdtp_stream_next_packet_desc(s, desc);
++ctx_header;
}
s->data_block_counter = dbc;
s->ctx_data.rx.seq.pos = seq_pos;
}
static inline void cancel_stream(struct amdtp_stream *s)
{
s->packet_index = -1;
if (in_softirq())
amdtp_stream_pcm_abort(s);
WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
}
static snd_pcm_sframes_t compute_pcm_extra_delay(struct amdtp_stream *s,
const struct pkt_desc *desc, unsigned int count)
{
unsigned int data_block_count = 0;
u32 latest_cycle;
u32 cycle_time;
u32 curr_cycle;
u32 cycle_gap;
int i, err;
if (count == 0)
goto end;
// Forward to the latest record.
for (i = 0; i < count - 1; ++i)
desc = amdtp_stream_next_packet_desc(s, desc);
latest_cycle = desc->cycle;
err = fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &cycle_time);
if (err < 0)
goto end;
// Compute cycle count with lower 3 bits of second field and cycle field like timestamp
// format of 1394 OHCI isochronous context.
curr_cycle = compute_ohci_iso_ctx_cycle_count((cycle_time >> 12) & 0x0000ffff);
if (s->direction == AMDTP_IN_STREAM) {
// NOTE: The AMDTP packet descriptor should be for the past isochronous cycle since
// it corresponds to arrived isochronous packet.
if (compare_ohci_cycle_count(latest_cycle, curr_cycle) > 0)
goto end;
cycle_gap = decrement_ohci_cycle_count(curr_cycle, latest_cycle);
// NOTE: estimate delay by recent history of arrived AMDTP packets. The estimated
// value expectedly corresponds to a few packets (0-2) since the packet arrived at
// the most recent isochronous cycle has been already processed.
for (i = 0; i < cycle_gap; ++i) {
desc = amdtp_stream_next_packet_desc(s, desc);
data_block_count += desc->data_blocks;
}
} else {
// NOTE: The AMDTP packet descriptor should be for the future isochronous cycle
// since it was already scheduled.
if (compare_ohci_cycle_count(latest_cycle, curr_cycle) < 0)
goto end;
cycle_gap = decrement_ohci_cycle_count(latest_cycle, curr_cycle);
// NOTE: use history of scheduled packets.
for (i = 0; i < cycle_gap; ++i) {
data_block_count += desc->data_blocks;
desc = prev_packet_desc(s, desc);
}
}
end:
return data_block_count * s->pcm_frame_multiplier;
}
static void process_ctx_payloads(struct amdtp_stream *s,
const struct pkt_desc *desc,
unsigned int count)
{
struct snd_pcm_substream *pcm;
int i;
pcm = READ_ONCE(s->pcm);
s->process_ctx_payloads(s, desc, count, pcm);
if (pcm) {
unsigned int data_block_count = 0;
pcm->runtime->delay = compute_pcm_extra_delay(s, desc, count);
for (i = 0; i < count; ++i) {
data_block_count += desc->data_blocks;
desc = amdtp_stream_next_packet_desc(s, desc);
}
update_pcm_pointers(s, pcm, data_block_count * s->pcm_frame_multiplier);
}
}
static void process_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
void *header, void *private_data)
{
struct amdtp_stream *s = private_data;
const struct amdtp_domain *d = s->domain;
const __be32 *ctx_header = header;
const unsigned int events_per_period = d->events_per_period;
unsigned int event_count = s->ctx_data.rx.event_count;
struct pkt_desc *desc = s->packet_descs_cursor;
unsigned int pkt_header_length;
unsigned int packets;
u32 curr_cycle_time;
bool need_hw_irq;
int i;
if (s->packet_index < 0)
return;
// Calculate the number of packets in buffer and check XRUN.
packets = header_length / sizeof(*ctx_header);
generate_rx_packet_descs(s, desc, ctx_header, packets);
process_ctx_payloads(s, desc, packets);
if (!(s->flags & CIP_NO_HEADER))
pkt_header_length = IT_PKT_HEADER_SIZE_CIP;
else
pkt_header_length = 0;
if (s == d->irq_target) {
// At NO_PERIOD_WAKEUP mode, the packets for all IT/IR contexts are processed by
// the tasks of user process operating ALSA PCM character device by calling ioctl(2)
// with some requests, instead of scheduled hardware IRQ of an IT context.
struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
need_hw_irq = !pcm || !pcm->runtime->no_period_wakeup;
} else {
need_hw_irq = false;
}
if (trace_amdtp_packet_enabled())
(void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &curr_cycle_time);
for (i = 0; i < packets; ++i) {
struct {
struct fw_iso_packet params;
__be32 header[CIP_HEADER_QUADLETS];
} template = { {0}, {0} };
bool sched_irq = false;
build_it_pkt_header(s, desc->cycle, &template.params, pkt_header_length,
desc->data_blocks, desc->data_block_counter,
desc->syt, i, curr_cycle_time);
if (s == s->domain->irq_target) {
event_count += desc->data_blocks;
if (event_count >= events_per_period) {
event_count -= events_per_period;
sched_irq = need_hw_irq;
}
}
if (queue_out_packet(s, &template.params, sched_irq) < 0) {
cancel_stream(s);
return;
}
desc = amdtp_stream_next_packet_desc(s, desc);
}
s->ctx_data.rx.event_count = event_count;
s->packet_descs_cursor = desc;
}
static void skip_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
void *header, void *private_data)
{
struct amdtp_stream *s = private_data;
struct amdtp_domain *d = s->domain;
const __be32 *ctx_header = header;
unsigned int packets;
unsigned int cycle;
int i;
if (s->packet_index < 0)
return;
packets = header_length / sizeof(*ctx_header);
cycle = compute_ohci_it_cycle(ctx_header[packets - 1], s->queue_size);
s->next_cycle = increment_ohci_cycle_count(cycle, 1);
for (i = 0; i < packets; ++i) {
struct fw_iso_packet params = {
.header_length = 0,
.payload_length = 0,
};
bool sched_irq = (s == d->irq_target && i == packets - 1);
if (queue_out_packet(s, ¶ms, sched_irq) < 0) {
cancel_stream(s);
return;
}
}
}
static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
void *header, void *private_data);
static void process_rx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
size_t header_length, void *header, void *private_data)
{
struct amdtp_stream *s = private_data;
struct amdtp_domain *d = s->domain;
__be32 *ctx_header = header;
const unsigned int queue_size = s->queue_size;
unsigned int packets;
unsigned int offset;
if (s->packet_index < 0)
return;
packets = header_length / sizeof(*ctx_header);
offset = 0;
while (offset < packets) {
unsigned int cycle = compute_ohci_it_cycle(ctx_header[offset], queue_size);
if (compare_ohci_cycle_count(cycle, d->processing_cycle.rx_start) >= 0)
break;
++offset;
}
if (offset > 0) {
unsigned int length = sizeof(*ctx_header) * offset;
skip_rx_packets(context, tstamp, length, ctx_header, private_data);
if (amdtp_streaming_error(s))
return;
ctx_header += offset;
header_length -= length;
}
if (offset < packets) {
s->ready_processing = true;
wake_up(&s->ready_wait);
if (d->replay.enable)
s->ctx_data.rx.cache_pos = 0;
process_rx_packets(context, tstamp, header_length, ctx_header, private_data);
if (amdtp_streaming_error(s))
return;
if (s == d->irq_target)
s->context->callback.sc = irq_target_callback;
else
s->context->callback.sc = process_rx_packets;
}
}
static void process_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
void *header, void *private_data)
{
struct amdtp_stream *s = private_data;
__be32 *ctx_header = header;
struct pkt_desc *desc = s->packet_descs_cursor;
unsigned int packet_count;
unsigned int desc_count;
int i;
int err;
if (s->packet_index < 0)
return;
// Calculate the number of packets in buffer and check XRUN.
packet_count = header_length / s->ctx_data.tx.ctx_header_size;
desc_count = 0;
err = generate_tx_packet_descs(s, desc, ctx_header, packet_count, &desc_count);
if (err < 0) {
if (err != -EAGAIN) {
cancel_stream(s);
return;
}
} else {
struct amdtp_domain *d = s->domain;
process_ctx_payloads(s, desc, desc_count);
if (d->replay.enable)
cache_seq(s, desc, desc_count);
for (i = 0; i < desc_count; ++i)
desc = amdtp_stream_next_packet_desc(s, desc);
s->packet_descs_cursor = desc;
}
for (i = 0; i < packet_count; ++i) {
struct fw_iso_packet params = {0};
if (queue_in_packet(s, ¶ms) < 0) {
cancel_stream(s);
return;
}
}
}
static void drop_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
void *header, void *private_data)
{
struct amdtp_stream *s = private_data;
const __be32 *ctx_header = header;
unsigned int packets;
unsigned int cycle;
int i;
if (s->packet_index < 0)
return;
packets = header_length / s->ctx_data.tx.ctx_header_size;
ctx_header += (packets - 1) * s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
cycle = compute_ohci_cycle_count(ctx_header[1]);
s->next_cycle = increment_ohci_cycle_count(cycle, 1);
for (i = 0; i < packets; ++i) {
struct fw_iso_packet params = {0};
if (queue_in_packet(s, ¶ms) < 0) {
cancel_stream(s);
return;
}
}
}
static void process_tx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
size_t header_length, void *header, void *private_data)
{
struct amdtp_stream *s = private_data;
struct amdtp_domain *d = s->domain;
__be32 *ctx_header;
unsigned int packets;
unsigned int offset;
if (s->packet_index < 0)
return;
packets = header_length / s->ctx_data.tx.ctx_header_size;
offset = 0;
ctx_header = header;
while (offset < packets) {
unsigned int cycle = compute_ohci_cycle_count(ctx_header[1]);
if (compare_ohci_cycle_count(cycle, d->processing_cycle.tx_start) >= 0)
break;
ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
++offset;
}
ctx_header = header;
if (offset > 0) {
size_t length = s->ctx_data.tx.ctx_header_size * offset;
drop_tx_packets(context, tstamp, length, ctx_header, s);
if (amdtp_streaming_error(s))
return;
ctx_header += length / sizeof(*ctx_header);
header_length -= length;
}
if (offset < packets) {
s->ready_processing = true;
wake_up(&s->ready_wait);
process_tx_packets(context, tstamp, header_length, ctx_header, s);
if (amdtp_streaming_error(s))
return;
context->callback.sc = process_tx_packets;
}
}
static void drop_tx_packets_initially(struct fw_iso_context *context, u32 tstamp,
size_t header_length, void *header, void *private_data)
{
struct amdtp_stream *s = private_data;
struct amdtp_domain *d = s->domain;
__be32 *ctx_header;
unsigned int count;
unsigned int events;
int i;
if (s->packet_index < 0)
return;
count = header_length / s->ctx_data.tx.ctx_header_size;
// Attempt to detect any event in the batch of packets.
events = 0;
ctx_header = header;
for (i = 0; i < count; ++i) {
unsigned int payload_quads =
(be32_to_cpu(*ctx_header) >> ISO_DATA_LENGTH_SHIFT) / sizeof(__be32);
unsigned int data_blocks;
if (s->flags & CIP_NO_HEADER) {
data_blocks = payload_quads / s->data_block_quadlets;
} else {
__be32 *cip_headers = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS;
if (payload_quads < CIP_HEADER_QUADLETS) {
data_blocks = 0;
} else {
payload_quads -= CIP_HEADER_QUADLETS;
if (s->flags & CIP_UNAWARE_SYT) {
data_blocks = payload_quads / s->data_block_quadlets;
} else {
u32 cip1 = be32_to_cpu(cip_headers[1]);
// NODATA packet can includes any data blocks but they are
// not available as event.
if ((cip1 & CIP_NO_DATA) == CIP_NO_DATA)
data_blocks = 0;
else
data_blocks = payload_quads / s->data_block_quadlets;
}
}
}
events += data_blocks;
ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
}
drop_tx_packets(context, tstamp, header_length, header, s);
if (events > 0)
s->ctx_data.tx.event_starts = true;
// Decide the cycle count to begin processing content of packet in IR contexts.
{
unsigned int stream_count = 0;
unsigned int event_starts_count = 0;
unsigned int cycle = UINT_MAX;
list_for_each_entry(s, &d->streams, list) {
if (s->direction == AMDTP_IN_STREAM) {
++stream_count;
if (s->ctx_data.tx.event_starts)
++event_starts_count;
}
}
if (stream_count == event_starts_count) {
unsigned int next_cycle;
list_for_each_entry(s, &d->streams, list) {
if (s->direction != AMDTP_IN_STREAM)
continue;
next_cycle = increment_ohci_cycle_count(s->next_cycle,
d->processing_cycle.tx_init_skip);
if (cycle == UINT_MAX ||
compare_ohci_cycle_count(next_cycle, cycle) > 0)
cycle = next_cycle;
s->context->callback.sc = process_tx_packets_intermediately;
}
d->processing_cycle.tx_start = cycle;
}
}
}
static void process_ctxs_in_domain(struct amdtp_domain *d)
{
struct amdtp_stream *s;
list_for_each_entry(s, &d->streams, list) {
if (s != d->irq_target && amdtp_stream_running(s))
fw_iso_context_flush_completions(s->context);
if (amdtp_streaming_error(s))
goto error;
}
return;
error:
if (amdtp_stream_running(d->irq_target))
cancel_stream(d->irq_target);
list_for_each_entry(s, &d->streams, list) {
if (amdtp_stream_running(s))
cancel_stream(s);
}
}
static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
void *header, void *private_data)
{
struct amdtp_stream *s = private_data;
struct amdtp_domain *d = s->domain;
process_rx_packets(context, tstamp, header_length, header, private_data);
process_ctxs_in_domain(d);
}
static void irq_target_callback_intermediately(struct fw_iso_context *context, u32 tstamp,
size_t header_length, void *header, void *private_data)
{
struct amdtp_stream *s = private_data;
struct amdtp_domain *d = s->domain;
process_rx_packets_intermediately(context, tstamp, header_length, header, private_data);
process_ctxs_in_domain(d);
}
static void irq_target_callback_skip(struct fw_iso_context *context, u32 tstamp,
size_t header_length, void *header, void *private_data)
{
struct amdtp_stream *s = private_data;
struct amdtp_domain *d = s->domain;
bool ready_to_start;
skip_rx_packets(context, tstamp, header_length, header, private_data);
process_ctxs_in_domain(d);
if (d->replay.enable && !d->replay.on_the_fly) {
unsigned int rx_count = 0;
unsigned int rx_ready_count = 0;
struct amdtp_stream *rx;
list_for_each_entry(rx, &d->streams, list) {
struct amdtp_stream *tx;
unsigned int cached_cycles;
if (rx->direction != AMDTP_OUT_STREAM)
continue;
++rx_count;
tx = rx->ctx_data.rx.replay_target;
cached_cycles = calculate_cached_cycle_count(tx, 0);
if (cached_cycles > tx->ctx_data.tx.cache.size / 2)
++rx_ready_count;
}
ready_to_start = (rx_count == rx_ready_count);
} else {
ready_to_start = true;
}
// Decide the cycle count to begin processing content of packet in IT contexts. All of IT
// contexts are expected to start and get callback when reaching here.
if (ready_to_start) {
unsigned int cycle = s->next_cycle;
list_for_each_entry(s, &d->streams, list) {
if (s->direction != AMDTP_OUT_STREAM)
continue;
if (compare_ohci_cycle_count(s->next_cycle, cycle) > 0)
cycle = s->next_cycle;
if (s == d->irq_target)
s->context->callback.sc = irq_target_callback_intermediately;
else
s->context->callback.sc = process_rx_packets_intermediately;
}
d->processing_cycle.rx_start = cycle;
}
}
// This is executed one time. For in-stream, first packet has come. For out-stream, prepared to
// transmit first packet.
static void amdtp_stream_first_callback(struct fw_iso_context *context,
u32 tstamp, size_t header_length,
void *header, void *private_data)
{
struct amdtp_stream *s = private_data;
struct amdtp_domain *d = s->domain;
if (s->direction == AMDTP_IN_STREAM) {
context->callback.sc = drop_tx_packets_initially;
} else {
if (s == d->irq_target)
context->callback.sc = irq_target_callback_skip;
else
context->callback.sc = skip_rx_packets;
}
context->callback.sc(context, tstamp, header_length, header, s);
}
/**
* amdtp_stream_start - start transferring packets
* @s: the AMDTP stream to start
* @channel: the isochronous channel on the bus
* @speed: firewire speed code
* @queue_size: The number of packets in the queue.
* @idle_irq_interval: the interval to queue packet during initial state.
*
* The stream cannot be started until it has been configured with
* amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
* device can be started.
*/
static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
unsigned int queue_size, unsigned int idle_irq_interval)
{
bool is_irq_target = (s == s->domain->irq_target);
unsigned int ctx_header_size;
unsigned int max_ctx_payload_size;
enum dma_data_direction dir;
struct pkt_desc *descs;
int i, type, tag, err;
mutex_lock(&s->mutex);
if (WARN_ON(amdtp_stream_running(s) ||
(s->data_block_quadlets < 1))) {
err = -EBADFD;
goto err_unlock;
}
if (s->direction == AMDTP_IN_STREAM) {
// NOTE: IT context should be used for constant IRQ.
if (is_irq_target) {
err = -EINVAL;
goto err_unlock;
}
s->data_block_counter = UINT_MAX;
} else {
s->data_block_counter = 0;
}
// initialize packet buffer.
if (s->direction == AMDTP_IN_STREAM) {
dir = DMA_FROM_DEVICE;
type = FW_ISO_CONTEXT_RECEIVE;
if (!(s->flags & CIP_NO_HEADER))
ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
else
ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
} else {
dir = DMA_TO_DEVICE;
type = FW_ISO_CONTEXT_TRANSMIT;
ctx_header_size = 0; // No effect for IT context.
}
max_ctx_payload_size = amdtp_stream_get_max_ctx_payload_size(s);
err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size, max_ctx_payload_size, dir);
if (err < 0)
goto err_unlock;
s->queue_size = queue_size;
s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
type, channel, speed, ctx_header_size,
amdtp_stream_first_callback, s);
if (IS_ERR(s->context)) {
err = PTR_ERR(s->context);
if (err == -EBUSY)
dev_err(&s->unit->device,
"no free stream on this controller\n");
goto err_buffer;
}
amdtp_stream_update(s);
if (s->direction == AMDTP_IN_STREAM) {
s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size;
s->ctx_data.tx.ctx_header_size = ctx_header_size;
s->ctx_data.tx.event_starts = false;
if (s->domain->replay.enable) {
// struct fw_iso_context.drop_overflow_headers is false therefore it's
// possible to cache much unexpectedly.
s->ctx_data.tx.cache.size = max_t(unsigned int, s->syt_interval * 2,
queue_size * 3 / 2);
s->ctx_data.tx.cache.pos = 0;
s->ctx_data.tx.cache.descs = kcalloc(s->ctx_data.tx.cache.size,
sizeof(*s->ctx_data.tx.cache.descs), GFP_KERNEL);
if (!s->ctx_data.tx.cache.descs) {
err = -ENOMEM;
goto err_context;
}
}
} else {
static const struct {
unsigned int data_block;
unsigned int syt_offset;
} *entry, initial_state[] = {
[CIP_SFC_32000] = { 4, 3072 },
[CIP_SFC_48000] = { 6, 1024 },
[CIP_SFC_96000] = { 12, 1024 },
[CIP_SFC_192000] = { 24, 1024 },
[CIP_SFC_44100] = { 0, 67 },
[CIP_SFC_88200] = { 0, 67 },
[CIP_SFC_176400] = { 0, 67 },
};
s->ctx_data.rx.seq.descs = kcalloc(queue_size, sizeof(*s->ctx_data.rx.seq.descs), GFP_KERNEL);
if (!s->ctx_data.rx.seq.descs) {
err = -ENOMEM;
goto err_context;
}
s->ctx_data.rx.seq.size = queue_size;
s->ctx_data.rx.seq.pos = 0;
entry = &initial_state[s->sfc];
s->ctx_data.rx.data_block_state = entry->data_block;
s->ctx_data.rx.syt_offset_state = entry->syt_offset;
s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE;
s->ctx_data.rx.event_count = 0;
}
if (s->flags & CIP_NO_HEADER)
s->tag = TAG_NO_CIP_HEADER;
else
s->tag = TAG_CIP;
// NOTE: When operating without hardIRQ/softIRQ, applications tends to call ioctl request
// for runtime of PCM substream in the interval equivalent to the size of PCM buffer. It
// could take a round over queue of AMDTP packet descriptors and small loss of history. For
// safe, keep more 8 elements for the queue, equivalent to 1 ms.
descs = kcalloc(s->queue_size + 8, sizeof(*descs), GFP_KERNEL);
if (!descs) {
err = -ENOMEM;
goto err_context;
}
s->packet_descs = descs;
INIT_LIST_HEAD(&s->packet_descs_list);
for (i = 0; i < s->queue_size; ++i) {
INIT_LIST_HEAD(&descs->link);
list_add_tail(&descs->link, &s->packet_descs_list);
++descs;
}
s->packet_descs_cursor = list_first_entry(&s->packet_descs_list, struct pkt_desc, link);
s->packet_index = 0;
do {
struct fw_iso_packet params;
if (s->direction == AMDTP_IN_STREAM) {
err = queue_in_packet(s, ¶ms);
} else {
bool sched_irq = false;
params.header_length = 0;
params.payload_length = 0;
if (is_irq_target) {
sched_irq = !((s->packet_index + 1) %
idle_irq_interval);
}
err = queue_out_packet(s, ¶ms, sched_irq);
}
if (err < 0)
goto err_pkt_descs;
} while (s->packet_index > 0);
/* NOTE: TAG1 matches CIP. This just affects in stream. */
tag = FW_ISO_CONTEXT_MATCH_TAG1;
if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER))
tag |= FW_ISO_CONTEXT_MATCH_TAG0;
s->ready_processing = false;
err = fw_iso_context_start(s->context, -1, 0, tag);
if (err < 0)
goto err_pkt_descs;
mutex_unlock(&s->mutex);
return 0;
err_pkt_descs:
kfree(s->packet_descs);
s->packet_descs = NULL;
err_context:
if (s->direction == AMDTP_OUT_STREAM) {
kfree(s->ctx_data.rx.seq.descs);
} else {
if (s->domain->replay.enable)
kfree(s->ctx_data.tx.cache.descs);
}
fw_iso_context_destroy(s->context);
s->context = ERR_PTR(-1);
err_buffer:
iso_packets_buffer_destroy(&s->buffer, s->unit);
err_unlock:
mutex_unlock(&s->mutex);
return err;
}
/**
* amdtp_domain_stream_pcm_pointer - get the PCM buffer position
* @d: the AMDTP domain.
* @s: the AMDTP stream that transports the PCM data
*
* Returns the current buffer position, in frames.
*/
unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
struct amdtp_stream *s)
{
struct amdtp_stream *irq_target = d->irq_target;
// Process isochronous packets queued till recent isochronous cycle to handle PCM frames.
if (irq_target && amdtp_stream_running(irq_target)) {
// In software IRQ context, the call causes dead-lock to disable the tasklet
// synchronously.
if (!in_softirq())
fw_iso_context_flush_completions(irq_target->context);
}
return READ_ONCE(s->pcm_buffer_pointer);
}
EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer);
/**
* amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
* @d: the AMDTP domain.
* @s: the AMDTP stream that transfers the PCM frames
*
* Returns zero always.
*/
int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s)
{
struct amdtp_stream *irq_target = d->irq_target;
// Process isochronous packets for recent isochronous cycle to handle
// queued PCM frames.
if (irq_target && amdtp_stream_running(irq_target))
fw_iso_context_flush_completions(irq_target->context);
return 0;
}
EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack);
/**
* amdtp_stream_update - update the stream after a bus reset
* @s: the AMDTP stream
*/
void amdtp_stream_update(struct amdtp_stream *s)
{
/* Precomputing. */
WRITE_ONCE(s->source_node_id_field,
(fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
}
EXPORT_SYMBOL(amdtp_stream_update);
/**
* amdtp_stream_stop - stop sending packets
* @s: the AMDTP stream to stop
*
* All PCM and MIDI devices of the stream must be stopped before the stream
* itself can be stopped.
*/
static void amdtp_stream_stop(struct amdtp_stream *s)
{
mutex_lock(&s->mutex);
if (!amdtp_stream_running(s)) {
mutex_unlock(&s->mutex);
return;
}
fw_iso_context_stop(s->context);
fw_iso_context_destroy(s->context);
s->context = ERR_PTR(-1);
iso_packets_buffer_destroy(&s->buffer, s->unit);
kfree(s->packet_descs);
s->packet_descs = NULL;
if (s->direction == AMDTP_OUT_STREAM) {
kfree(s->ctx_data.rx.seq.descs);
} else {
if (s->domain->replay.enable)
kfree(s->ctx_data.tx.cache.descs);
}
mutex_unlock(&s->mutex);
}
/**
* amdtp_stream_pcm_abort - abort the running PCM device
* @s: the AMDTP stream about to be stopped
*
* If the isochronous stream needs to be stopped asynchronously, call this
* function first to stop the PCM device.
*/
void amdtp_stream_pcm_abort(struct amdtp_stream *s)
{
struct snd_pcm_substream *pcm;
pcm = READ_ONCE(s->pcm);
if (pcm)
snd_pcm_stop_xrun(pcm);
}
EXPORT_SYMBOL(amdtp_stream_pcm_abort);
/**
* amdtp_domain_init - initialize an AMDTP domain structure
* @d: the AMDTP domain to initialize.
*/
int amdtp_domain_init(struct amdtp_domain *d)
{
INIT_LIST_HEAD(&d->streams);
d->events_per_period = 0;
return 0;
}
EXPORT_SYMBOL_GPL(amdtp_domain_init);
/**
* amdtp_domain_destroy - destroy an AMDTP domain structure
* @d: the AMDTP domain to destroy.
*/
void amdtp_domain_destroy(struct amdtp_domain *d)
{
// At present nothing to do.
return;
}
EXPORT_SYMBOL_GPL(amdtp_domain_destroy);
/**
* amdtp_domain_add_stream - register isoc context into the domain.
* @d: the AMDTP domain.
* @s: the AMDTP stream.
* @channel: the isochronous channel on the bus.
* @speed: firewire speed code.
*/
int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
int channel, int speed)
{
struct amdtp_stream *tmp;
list_for_each_entry(tmp, &d->streams, list) {
if (s == tmp)
return -EBUSY;
}
list_add(&s->list, &d->streams);
s->channel = channel;
s->speed = speed;
s->domain = d;
return 0;
}
EXPORT_SYMBOL_GPL(amdtp_domain_add_stream);
// Make the reference from rx stream to tx stream for sequence replay. When the number of tx streams
// is less than the number of rx streams, the first tx stream is selected.
static int make_association(struct amdtp_domain *d)
{
unsigned int dst_index = 0;
struct amdtp_stream *rx;
// Make association to replay target.
list_for_each_entry(rx, &d->streams, list) {
if (rx->direction == AMDTP_OUT_STREAM) {
unsigned int src_index = 0;
struct amdtp_stream *tx = NULL;
struct amdtp_stream *s;
list_for_each_entry(s, &d->streams, list) {
if (s->direction == AMDTP_IN_STREAM) {
if (dst_index == src_index) {
tx = s;
break;
}
++src_index;
}
}
if (!tx) {
// Select the first entry.
list_for_each_entry(s, &d->streams, list) {
if (s->direction == AMDTP_IN_STREAM) {
tx = s;
break;
}
}
// No target is available to replay sequence.
if (!tx)
return -EINVAL;
}
rx->ctx_data.rx.replay_target = tx;
++dst_index;
}
}
return 0;
}
/**
* amdtp_domain_start - start sending packets for isoc context in the domain.
* @d: the AMDTP domain.
* @tx_init_skip_cycles: the number of cycles to skip processing packets at initial stage of IR
* contexts.
* @replay_seq: whether to replay the sequence of packet in IR context for the sequence of packet in
* IT context.
* @replay_on_the_fly: transfer rx packets according to nominal frequency, then begin to replay
* according to arrival of events in tx packets.
*/
int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles, bool replay_seq,
bool replay_on_the_fly)
{
unsigned int events_per_buffer = d->events_per_buffer;
unsigned int events_per_period = d->events_per_period;
unsigned int queue_size;
struct amdtp_stream *s;
bool found = false;
int err;
if (replay_seq) {
err = make_association(d);
if (err < 0)
return err;
}
d->replay.enable = replay_seq;
d->replay.on_the_fly = replay_on_the_fly;
// Select an IT context as IRQ target.
list_for_each_entry(s, &d->streams, list) {
if (s->direction == AMDTP_OUT_STREAM) {
found = true;
break;
}
}
if (!found)
return -ENXIO;
d->irq_target = s;
d->processing_cycle.tx_init_skip = tx_init_skip_cycles;
// This is a case that AMDTP streams in domain run just for MIDI
// substream. Use the number of events equivalent to 10 msec as
// interval of hardware IRQ.
if (events_per_period == 0)
events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100;
if (events_per_buffer == 0)
events_per_buffer = events_per_period * 3;
queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
amdtp_rate_table[d->irq_target->sfc]);
list_for_each_entry(s, &d->streams, list) {
unsigned int idle_irq_interval = 0;
if (s->direction == AMDTP_OUT_STREAM && s == d->irq_target) {
idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period,
amdtp_rate_table[d->irq_target->sfc]);
}
// Starts immediately but actually DMA context starts several hundred cycles later.
err = amdtp_stream_start(s, s->channel, s->speed, queue_size, idle_irq_interval);
if (err < 0)
goto error;
}
return 0;
error:
list_for_each_entry(s, &d->streams, list)
amdtp_stream_stop(s);
return err;
}
EXPORT_SYMBOL_GPL(amdtp_domain_start);
/**
* amdtp_domain_stop - stop sending packets for isoc context in the same domain.
* @d: the AMDTP domain to which the isoc contexts belong.
*/
void amdtp_domain_stop(struct amdtp_domain *d)
{
struct amdtp_stream *s, *next;
if (d->irq_target)
amdtp_stream_stop(d->irq_target);
list_for_each_entry_safe(s, next, &d->streams, list) {
list_del(&s->list);
if (s != d->irq_target)
amdtp_stream_stop(s);
}
d->events_per_period = 0;
d->irq_target = NULL;
}
EXPORT_SYMBOL_GPL(amdtp_domain_stop);
| linux-master | sound/firewire/amdtp-stream.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* helpers for managing a buffer for many packets
*
* Copyright (c) Clemens Ladisch <[email protected]>
*/
#include <linux/firewire.h>
#include <linux/export.h>
#include <linux/slab.h>
#include "packets-buffer.h"
/**
* iso_packets_buffer_init - allocates the memory for packets
* @b: the buffer structure to initialize
* @unit: the device at the other end of the stream
* @count: the number of packets
* @packet_size: the (maximum) size of a packet, in bytes
* @direction: %DMA_TO_DEVICE or %DMA_FROM_DEVICE
*/
int iso_packets_buffer_init(struct iso_packets_buffer *b, struct fw_unit *unit,
unsigned int count, unsigned int packet_size,
enum dma_data_direction direction)
{
unsigned int packets_per_page, pages;
unsigned int i, page_index, offset_in_page;
void *p;
int err;
b->packets = kmalloc_array(count, sizeof(*b->packets), GFP_KERNEL);
if (!b->packets) {
err = -ENOMEM;
goto error;
}
packet_size = L1_CACHE_ALIGN(packet_size);
packets_per_page = PAGE_SIZE / packet_size;
if (WARN_ON(!packets_per_page)) {
err = -EINVAL;
goto err_packets;
}
pages = DIV_ROUND_UP(count, packets_per_page);
err = fw_iso_buffer_init(&b->iso_buffer, fw_parent_device(unit)->card,
pages, direction);
if (err < 0)
goto err_packets;
for (i = 0; i < count; ++i) {
page_index = i / packets_per_page;
p = page_address(b->iso_buffer.pages[page_index]);
offset_in_page = (i % packets_per_page) * packet_size;
b->packets[i].buffer = p + offset_in_page;
b->packets[i].offset = page_index * PAGE_SIZE + offset_in_page;
}
return 0;
err_packets:
kfree(b->packets);
error:
return err;
}
EXPORT_SYMBOL(iso_packets_buffer_init);
/**
* iso_packets_buffer_destroy - frees packet buffer resources
* @b: the buffer structure to free
* @unit: the device at the other end of the stream
*/
void iso_packets_buffer_destroy(struct iso_packets_buffer *b,
struct fw_unit *unit)
{
fw_iso_buffer_destroy(&b->iso_buffer, fw_parent_device(unit)->card);
kfree(b->packets);
}
EXPORT_SYMBOL(iso_packets_buffer_destroy);
| linux-master | sound/firewire/packets-buffer.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* isochronous resources helper functions
*
* Copyright (c) Clemens Ladisch <[email protected]>
*/
#include <linux/device.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/export.h>
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include "iso-resources.h"
/**
* fw_iso_resources_init - initializes a &struct fw_iso_resources
* @r: the resource manager to initialize
* @unit: the device unit for which the resources will be needed
*
* If the device does not support all channel numbers, change @r->channels_mask
* after calling this function.
*/
int fw_iso_resources_init(struct fw_iso_resources *r, struct fw_unit *unit)
{
r->channels_mask = ~0uLL;
r->unit = unit;
mutex_init(&r->mutex);
r->allocated = false;
return 0;
}
EXPORT_SYMBOL(fw_iso_resources_init);
/**
* fw_iso_resources_destroy - destroy a resource manager
* @r: the resource manager that is no longer needed
*/
void fw_iso_resources_destroy(struct fw_iso_resources *r)
{
WARN_ON(r->allocated);
mutex_destroy(&r->mutex);
}
EXPORT_SYMBOL(fw_iso_resources_destroy);
static unsigned int packet_bandwidth(unsigned int max_payload_bytes, int speed)
{
unsigned int bytes, s400_bytes;
/* iso packets have three header quadlets and quadlet-aligned payload */
bytes = 3 * 4 + ALIGN(max_payload_bytes, 4);
/* convert to bandwidth units (quadlets at S1600 = bytes at S400) */
if (speed <= SCODE_400)
s400_bytes = bytes * (1 << (SCODE_400 - speed));
else
s400_bytes = DIV_ROUND_UP(bytes, 1 << (speed - SCODE_400));
return s400_bytes;
}
static int current_bandwidth_overhead(struct fw_card *card)
{
/*
* Under the usual pessimistic assumption (cable length 4.5 m), the
* isochronous overhead for N cables is 1.797 µs + N * 0.494 µs, or
* 88.3 + N * 24.3 in bandwidth units.
*
* The calculation below tries to deduce N from the current gap count.
* If the gap count has been optimized by measuring the actual packet
* transmission time, this derived overhead should be near the actual
* overhead as well.
*/
return card->gap_count < 63 ? card->gap_count * 97 / 10 + 89 : 512;
}
static int wait_isoch_resource_delay_after_bus_reset(struct fw_card *card)
{
for (;;) {
s64 delay = (card->reset_jiffies + HZ) - get_jiffies_64();
if (delay <= 0)
return 0;
if (schedule_timeout_interruptible(delay) > 0)
return -ERESTARTSYS;
}
}
/**
* fw_iso_resources_allocate - allocate isochronous channel and bandwidth
* @r: the resource manager
* @max_payload_bytes: the amount of data (including CIP headers) per packet
* @speed: the speed (e.g., SCODE_400) at which the packets will be sent
*
* This function allocates one isochronous channel and enough bandwidth for the
* specified packet size.
*
* Returns the channel number that the caller must use for streaming, or
* a negative error code. Due to potentionally long delays, this function is
* interruptible and can return -ERESTARTSYS. On success, the caller is
* responsible for calling fw_iso_resources_update() on bus resets, and
* fw_iso_resources_free() when the resources are not longer needed.
*/
int fw_iso_resources_allocate(struct fw_iso_resources *r,
unsigned int max_payload_bytes, int speed)
{
struct fw_card *card = fw_parent_device(r->unit)->card;
int bandwidth, channel, err;
if (WARN_ON(r->allocated))
return -EBADFD;
r->bandwidth = packet_bandwidth(max_payload_bytes, speed);
retry_after_bus_reset:
spin_lock_irq(&card->lock);
r->generation = card->generation;
r->bandwidth_overhead = current_bandwidth_overhead(card);
spin_unlock_irq(&card->lock);
err = wait_isoch_resource_delay_after_bus_reset(card);
if (err < 0)
return err;
mutex_lock(&r->mutex);
bandwidth = r->bandwidth + r->bandwidth_overhead;
fw_iso_resource_manage(card, r->generation, r->channels_mask,
&channel, &bandwidth, true);
if (channel == -EAGAIN) {
mutex_unlock(&r->mutex);
goto retry_after_bus_reset;
}
if (channel >= 0) {
r->channel = channel;
r->allocated = true;
} else {
if (channel == -EBUSY)
dev_err(&r->unit->device,
"isochronous resources exhausted\n");
else
dev_err(&r->unit->device,
"isochronous resource allocation failed\n");
}
mutex_unlock(&r->mutex);
return channel;
}
EXPORT_SYMBOL(fw_iso_resources_allocate);
/**
* fw_iso_resources_update - update resource allocations after a bus reset
* @r: the resource manager
*
* This function must be called from the driver's .update handler to reallocate
* any resources that were allocated before the bus reset. It is safe to call
* this function if no resources are currently allocated.
*
* Returns a negative error code on failure. If this happens, the caller must
* stop streaming.
*/
int fw_iso_resources_update(struct fw_iso_resources *r)
{
struct fw_card *card = fw_parent_device(r->unit)->card;
int bandwidth, channel;
mutex_lock(&r->mutex);
if (!r->allocated) {
mutex_unlock(&r->mutex);
return 0;
}
spin_lock_irq(&card->lock);
r->generation = card->generation;
r->bandwidth_overhead = current_bandwidth_overhead(card);
spin_unlock_irq(&card->lock);
bandwidth = r->bandwidth + r->bandwidth_overhead;
fw_iso_resource_manage(card, r->generation, 1uLL << r->channel,
&channel, &bandwidth, true);
/*
* When another bus reset happens, pretend that the allocation
* succeeded; we will try again for the new generation later.
*/
if (channel < 0 && channel != -EAGAIN) {
r->allocated = false;
if (channel == -EBUSY)
dev_err(&r->unit->device,
"isochronous resources exhausted\n");
else
dev_err(&r->unit->device,
"isochronous resource allocation failed\n");
}
mutex_unlock(&r->mutex);
return channel;
}
EXPORT_SYMBOL(fw_iso_resources_update);
/**
* fw_iso_resources_free - frees allocated resources
* @r: the resource manager
*
* This function deallocates the channel and bandwidth, if allocated.
*/
void fw_iso_resources_free(struct fw_iso_resources *r)
{
struct fw_card *card;
int bandwidth, channel;
/* Not initialized. */
if (r->unit == NULL)
return;
card = fw_parent_device(r->unit)->card;
mutex_lock(&r->mutex);
if (r->allocated) {
bandwidth = r->bandwidth + r->bandwidth_overhead;
fw_iso_resource_manage(card, r->generation, 1uLL << r->channel,
&channel, &bandwidth, false);
if (channel < 0)
dev_err(&r->unit->device,
"isochronous resource deallocation failed\n");
r->allocated = false;
}
mutex_unlock(&r->mutex);
}
EXPORT_SYMBOL(fw_iso_resources_free);
| linux-master | sound/firewire/iso-resources.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Function Control Protocol (IEC 61883-1) helper functions
*
* Copyright (c) Clemens Ladisch <[email protected]>
*/
#include <linux/device.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include "fcp.h"
#include "lib.h"
#include "amdtp-stream.h"
#define CTS_AVC 0x00
#define ERROR_RETRIES 3
#define ERROR_DELAY_MS 5
#define FCP_TIMEOUT_MS 125
int avc_general_set_sig_fmt(struct fw_unit *unit, unsigned int rate,
enum avc_general_plug_dir dir,
unsigned short pid)
{
unsigned int sfc;
u8 *buf;
bool flag;
int err;
flag = false;
for (sfc = 0; sfc < CIP_SFC_COUNT; sfc++) {
if (amdtp_rate_table[sfc] == rate) {
flag = true;
break;
}
}
if (!flag)
return -EINVAL;
buf = kzalloc(8, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
buf[0] = 0x00; /* AV/C CONTROL */
buf[1] = 0xff; /* UNIT */
if (dir == AVC_GENERAL_PLUG_DIR_IN)
buf[2] = 0x19; /* INPUT PLUG SIGNAL FORMAT */
else
buf[2] = 0x18; /* OUTPUT PLUG SIGNAL FORMAT */
buf[3] = 0xff & pid; /* plug id */
buf[4] = 0x90; /* EOH_1, Form_1, FMT. AM824 */
buf[5] = 0x07 & sfc; /* FDF-hi. AM824, frequency */
buf[6] = 0xff; /* FDF-mid. AM824, SYT hi (not used)*/
buf[7] = 0xff; /* FDF-low. AM824, SYT lo (not used) */
/* do transaction and check buf[1-5] are the same against command */
err = fcp_avc_transaction(unit, buf, 8, buf, 8,
BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5));
if (err < 0)
;
else if (err < 8)
err = -EIO;
else if (buf[0] == 0x08) /* NOT IMPLEMENTED */
err = -ENOSYS;
else if (buf[0] == 0x0a) /* REJECTED */
err = -EINVAL;
if (err < 0)
goto end;
err = 0;
end:
kfree(buf);
return err;
}
EXPORT_SYMBOL(avc_general_set_sig_fmt);
int avc_general_get_sig_fmt(struct fw_unit *unit, unsigned int *rate,
enum avc_general_plug_dir dir,
unsigned short pid)
{
unsigned int sfc;
u8 *buf;
int err;
buf = kzalloc(8, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
buf[0] = 0x01; /* AV/C STATUS */
buf[1] = 0xff; /* Unit */
if (dir == AVC_GENERAL_PLUG_DIR_IN)
buf[2] = 0x19; /* INPUT PLUG SIGNAL FORMAT */
else
buf[2] = 0x18; /* OUTPUT PLUG SIGNAL FORMAT */
buf[3] = 0xff & pid; /* plug id */
buf[4] = 0x90; /* EOH_1, Form_1, FMT. AM824 */
buf[5] = 0xff; /* FDF-hi. AM824, frequency */
buf[6] = 0xff; /* FDF-mid. AM824, SYT hi (not used) */
buf[7] = 0xff; /* FDF-low. AM824, SYT lo (not used) */
/* do transaction and check buf[1-4] are the same against command */
err = fcp_avc_transaction(unit, buf, 8, buf, 8,
BIT(1) | BIT(2) | BIT(3) | BIT(4));
if (err < 0)
;
else if (err < 8)
err = -EIO;
else if (buf[0] == 0x08) /* NOT IMPLEMENTED */
err = -ENOSYS;
else if (buf[0] == 0x0a) /* REJECTED */
err = -EINVAL;
else if (buf[0] == 0x0b) /* IN TRANSITION */
err = -EAGAIN;
if (err < 0)
goto end;
/* check sfc field and pick up rate */
sfc = 0x07 & buf[5];
if (sfc >= CIP_SFC_COUNT) {
err = -EAGAIN; /* also in transition */
goto end;
}
*rate = amdtp_rate_table[sfc];
err = 0;
end:
kfree(buf);
return err;
}
EXPORT_SYMBOL(avc_general_get_sig_fmt);
int avc_general_get_plug_info(struct fw_unit *unit, unsigned int subunit_type,
unsigned int subunit_id, unsigned int subfunction,
u8 info[AVC_PLUG_INFO_BUF_BYTES])
{
u8 *buf;
int err;
/* extended subunit in spec.4.2 is not supported */
if ((subunit_type == 0x1E) || (subunit_id == 5))
return -EINVAL;
buf = kzalloc(8, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
buf[0] = 0x01; /* AV/C STATUS */
/* UNIT or Subunit, Functionblock */
buf[1] = ((subunit_type & 0x1f) << 3) | (subunit_id & 0x7);
buf[2] = 0x02; /* PLUG INFO */
buf[3] = 0xff & subfunction;
err = fcp_avc_transaction(unit, buf, 8, buf, 8, BIT(1) | BIT(2));
if (err < 0)
;
else if (err < 8)
err = -EIO;
else if (buf[0] == 0x08) /* NOT IMPLEMENTED */
err = -ENOSYS;
else if (buf[0] == 0x0a) /* REJECTED */
err = -EINVAL;
else if (buf[0] == 0x0b) /* IN TRANSITION */
err = -EAGAIN;
if (err < 0)
goto end;
info[0] = buf[4];
info[1] = buf[5];
info[2] = buf[6];
info[3] = buf[7];
err = 0;
end:
kfree(buf);
return err;
}
EXPORT_SYMBOL(avc_general_get_plug_info);
static DEFINE_SPINLOCK(transactions_lock);
static LIST_HEAD(transactions);
enum fcp_state {
STATE_PENDING,
STATE_BUS_RESET,
STATE_COMPLETE,
STATE_DEFERRED,
};
struct fcp_transaction {
struct list_head list;
struct fw_unit *unit;
void *response_buffer;
unsigned int response_size;
unsigned int response_match_bytes;
enum fcp_state state;
wait_queue_head_t wait;
bool deferrable;
};
/**
* fcp_avc_transaction - send an AV/C command and wait for its response
* @unit: a unit on the target device
* @command: a buffer containing the command frame; must be DMA-able
* @command_size: the size of @command
* @response: a buffer for the response frame
* @response_size: the maximum size of @response
* @response_match_bytes: a bitmap specifying the bytes used to detect the
* correct response frame
*
* This function sends a FCP command frame to the target and waits for the
* corresponding response frame to be returned.
*
* Because it is possible for multiple FCP transactions to be active at the
* same time, the correct response frame is detected by the value of certain
* bytes. These bytes must be set in @response before calling this function,
* and the corresponding bits must be set in @response_match_bytes.
*
* @command and @response can point to the same buffer.
*
* Returns the actual size of the response frame, or a negative error code.
*/
int fcp_avc_transaction(struct fw_unit *unit,
const void *command, unsigned int command_size,
void *response, unsigned int response_size,
unsigned int response_match_bytes)
{
struct fcp_transaction t;
int tcode, ret, tries = 0;
t.unit = unit;
t.response_buffer = response;
t.response_size = response_size;
t.response_match_bytes = response_match_bytes;
t.state = STATE_PENDING;
init_waitqueue_head(&t.wait);
t.deferrable = (*(const u8 *)command == 0x00 || *(const u8 *)command == 0x03);
spin_lock_irq(&transactions_lock);
list_add_tail(&t.list, &transactions);
spin_unlock_irq(&transactions_lock);
for (;;) {
tcode = command_size == 4 ? TCODE_WRITE_QUADLET_REQUEST
: TCODE_WRITE_BLOCK_REQUEST;
ret = snd_fw_transaction(t.unit, tcode,
CSR_REGISTER_BASE + CSR_FCP_COMMAND,
(void *)command, command_size, 0);
if (ret < 0)
break;
deferred:
wait_event_timeout(t.wait, t.state != STATE_PENDING,
msecs_to_jiffies(FCP_TIMEOUT_MS));
if (t.state == STATE_DEFERRED) {
/*
* 'AV/C General Specification' define no time limit
* on command completion once an INTERIM response has
* been sent. but we promise to finish this function
* for a caller. Here we use FCP_TIMEOUT_MS for next
* interval. This is not in the specification.
*/
t.state = STATE_PENDING;
goto deferred;
} else if (t.state == STATE_COMPLETE) {
ret = t.response_size;
break;
} else if (t.state == STATE_BUS_RESET) {
msleep(ERROR_DELAY_MS);
} else if (++tries >= ERROR_RETRIES) {
dev_err(&t.unit->device, "FCP command timed out\n");
ret = -EIO;
break;
}
}
spin_lock_irq(&transactions_lock);
list_del(&t.list);
spin_unlock_irq(&transactions_lock);
return ret;
}
EXPORT_SYMBOL(fcp_avc_transaction);
/**
* fcp_bus_reset - inform the target handler about a bus reset
* @unit: the unit that might be used by fcp_avc_transaction()
*
* This function must be called from the driver's .update handler to inform
* the FCP transaction handler that a bus reset has happened. Any pending FCP
* transactions are retried.
*/
void fcp_bus_reset(struct fw_unit *unit)
{
struct fcp_transaction *t;
spin_lock_irq(&transactions_lock);
list_for_each_entry(t, &transactions, list) {
if (t->unit == unit &&
(t->state == STATE_PENDING ||
t->state == STATE_DEFERRED)) {
t->state = STATE_BUS_RESET;
wake_up(&t->wait);
}
}
spin_unlock_irq(&transactions_lock);
}
EXPORT_SYMBOL(fcp_bus_reset);
/* checks whether the response matches the masked bytes in response_buffer */
static bool is_matching_response(struct fcp_transaction *transaction,
const void *response, size_t length)
{
const u8 *p1, *p2;
unsigned int mask, i;
p1 = response;
p2 = transaction->response_buffer;
mask = transaction->response_match_bytes;
for (i = 0; ; ++i) {
if ((mask & 1) && p1[i] != p2[i])
return false;
mask >>= 1;
if (!mask)
return true;
if (--length == 0)
return false;
}
}
static void fcp_response(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source,
int generation, unsigned long long offset,
void *data, size_t length, void *callback_data)
{
struct fcp_transaction *t;
unsigned long flags;
if (length < 1 || (*(const u8 *)data & 0xf0) != CTS_AVC)
return;
spin_lock_irqsave(&transactions_lock, flags);
list_for_each_entry(t, &transactions, list) {
struct fw_device *device = fw_parent_device(t->unit);
if (device->card != card ||
device->generation != generation)
continue;
smp_rmb(); /* node_id vs. generation */
if (device->node_id != source)
continue;
if (t->state == STATE_PENDING &&
is_matching_response(t, data, length)) {
if (t->deferrable && *(const u8 *)data == 0x0f) {
t->state = STATE_DEFERRED;
} else {
t->state = STATE_COMPLETE;
t->response_size = min_t(unsigned int, length,
t->response_size);
memcpy(t->response_buffer, data,
t->response_size);
}
wake_up(&t->wait);
}
}
spin_unlock_irqrestore(&transactions_lock, flags);
}
static struct fw_address_handler response_register_handler = {
.length = 0x200,
.address_callback = fcp_response,
};
static int __init fcp_module_init(void)
{
static const struct fw_address_region response_register_region = {
.start = CSR_REGISTER_BASE + CSR_FCP_RESPONSE,
.end = CSR_REGISTER_BASE + CSR_FCP_END,
};
fw_core_add_address_handler(&response_register_handler,
&response_register_region);
return 0;
}
static void __exit fcp_module_exit(void)
{
WARN_ON(!list_empty(&transactions));
fw_core_remove_address_handler(&response_register_handler);
}
module_init(fcp_module_init);
module_exit(fcp_module_exit);
| linux-master | sound/firewire/fcp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Connection Management Procedures (IEC 61883-1) helper functions
*
* Copyright (c) Clemens Ladisch <[email protected]>
*/
#include <linux/device.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/module.h>
#include <linux/sched.h>
#include "lib.h"
#include "iso-resources.h"
#include "cmp.h"
/* MPR common fields */
#define MPR_SPEED_MASK 0xc0000000
#define MPR_SPEED_SHIFT 30
#define MPR_XSPEED_MASK 0x00000060
#define MPR_XSPEED_SHIFT 5
#define MPR_PLUGS_MASK 0x0000001f
/* PCR common fields */
#define PCR_ONLINE 0x80000000
#define PCR_BCAST_CONN 0x40000000
#define PCR_P2P_CONN_MASK 0x3f000000
#define PCR_P2P_CONN_SHIFT 24
#define PCR_CHANNEL_MASK 0x003f0000
#define PCR_CHANNEL_SHIFT 16
/* oPCR specific fields */
#define OPCR_XSPEED_MASK 0x00C00000
#define OPCR_XSPEED_SHIFT 22
#define OPCR_SPEED_MASK 0x0000C000
#define OPCR_SPEED_SHIFT 14
#define OPCR_OVERHEAD_ID_MASK 0x00003C00
#define OPCR_OVERHEAD_ID_SHIFT 10
enum bus_reset_handling {
ABORT_ON_BUS_RESET,
SUCCEED_ON_BUS_RESET,
};
static __printf(2, 3)
void cmp_error(struct cmp_connection *c, const char *fmt, ...)
{
va_list va;
va_start(va, fmt);
dev_err(&c->resources.unit->device, "%cPCR%u: %pV",
(c->direction == CMP_INPUT) ? 'i' : 'o',
c->pcr_index, &(struct va_format){ fmt, &va });
va_end(va);
}
static u64 mpr_address(struct cmp_connection *c)
{
if (c->direction == CMP_INPUT)
return CSR_REGISTER_BASE + CSR_IMPR;
else
return CSR_REGISTER_BASE + CSR_OMPR;
}
static u64 pcr_address(struct cmp_connection *c)
{
if (c->direction == CMP_INPUT)
return CSR_REGISTER_BASE + CSR_IPCR(c->pcr_index);
else
return CSR_REGISTER_BASE + CSR_OPCR(c->pcr_index);
}
static int pcr_modify(struct cmp_connection *c,
__be32 (*modify)(struct cmp_connection *c, __be32 old),
int (*check)(struct cmp_connection *c, __be32 pcr),
enum bus_reset_handling bus_reset_handling)
{
__be32 old_arg, buffer[2];
int err;
buffer[0] = c->last_pcr_value;
for (;;) {
old_arg = buffer[0];
buffer[1] = modify(c, buffer[0]);
err = snd_fw_transaction(
c->resources.unit, TCODE_LOCK_COMPARE_SWAP,
pcr_address(c), buffer, 8,
FW_FIXED_GENERATION | c->resources.generation);
if (err < 0) {
if (err == -EAGAIN &&
bus_reset_handling == SUCCEED_ON_BUS_RESET)
err = 0;
return err;
}
if (buffer[0] == old_arg) /* success? */
break;
if (check) {
err = check(c, buffer[0]);
if (err < 0)
return err;
}
}
c->last_pcr_value = buffer[1];
return 0;
}
/**
* cmp_connection_init - initializes a connection manager
* @c: the connection manager to initialize
* @unit: a unit of the target device
* @direction: input or output
* @pcr_index: the index of the iPCR/oPCR on the target device
*/
int cmp_connection_init(struct cmp_connection *c,
struct fw_unit *unit,
enum cmp_direction direction,
unsigned int pcr_index)
{
__be32 mpr_be;
u32 mpr;
int err;
c->direction = direction;
err = snd_fw_transaction(unit, TCODE_READ_QUADLET_REQUEST,
mpr_address(c), &mpr_be, 4, 0);
if (err < 0)
return err;
mpr = be32_to_cpu(mpr_be);
if (pcr_index >= (mpr & MPR_PLUGS_MASK))
return -EINVAL;
err = fw_iso_resources_init(&c->resources, unit);
if (err < 0)
return err;
c->connected = false;
mutex_init(&c->mutex);
c->last_pcr_value = cpu_to_be32(0x80000000);
c->pcr_index = pcr_index;
c->max_speed = (mpr & MPR_SPEED_MASK) >> MPR_SPEED_SHIFT;
if (c->max_speed == SCODE_BETA)
c->max_speed += (mpr & MPR_XSPEED_MASK) >> MPR_XSPEED_SHIFT;
return 0;
}
EXPORT_SYMBOL(cmp_connection_init);
/**
* cmp_connection_check_used - check connection is already esablished or not
* @c: the connection manager to be checked
* @used: the pointer to store the result of checking the connection
*/
int cmp_connection_check_used(struct cmp_connection *c, bool *used)
{
__be32 pcr;
int err;
err = snd_fw_transaction(
c->resources.unit, TCODE_READ_QUADLET_REQUEST,
pcr_address(c), &pcr, 4, 0);
if (err >= 0)
*used = !!(pcr & cpu_to_be32(PCR_BCAST_CONN |
PCR_P2P_CONN_MASK));
return err;
}
EXPORT_SYMBOL(cmp_connection_check_used);
/**
* cmp_connection_destroy - free connection manager resources
* @c: the connection manager
*/
void cmp_connection_destroy(struct cmp_connection *c)
{
WARN_ON(c->connected);
mutex_destroy(&c->mutex);
fw_iso_resources_destroy(&c->resources);
}
EXPORT_SYMBOL(cmp_connection_destroy);
int cmp_connection_reserve(struct cmp_connection *c,
unsigned int max_payload_bytes)
{
int err;
mutex_lock(&c->mutex);
if (WARN_ON(c->resources.allocated)) {
err = -EBUSY;
goto end;
}
c->speed = min(c->max_speed,
fw_parent_device(c->resources.unit)->max_speed);
err = fw_iso_resources_allocate(&c->resources, max_payload_bytes,
c->speed);
end:
mutex_unlock(&c->mutex);
return err;
}
EXPORT_SYMBOL(cmp_connection_reserve);
void cmp_connection_release(struct cmp_connection *c)
{
mutex_lock(&c->mutex);
fw_iso_resources_free(&c->resources);
mutex_unlock(&c->mutex);
}
EXPORT_SYMBOL(cmp_connection_release);
static __be32 ipcr_set_modify(struct cmp_connection *c, __be32 ipcr)
{
ipcr &= ~cpu_to_be32(PCR_BCAST_CONN |
PCR_P2P_CONN_MASK |
PCR_CHANNEL_MASK);
ipcr |= cpu_to_be32(1 << PCR_P2P_CONN_SHIFT);
ipcr |= cpu_to_be32(c->resources.channel << PCR_CHANNEL_SHIFT);
return ipcr;
}
static int get_overhead_id(struct cmp_connection *c)
{
int id;
/*
* apply "oPCR overhead ID encoding"
* the encoding table can convert up to 512.
* here the value over 512 is converted as the same way as 512.
*/
for (id = 1; id < 16; id++) {
if (c->resources.bandwidth_overhead < (id << 5))
break;
}
if (id == 16)
id = 0;
return id;
}
static __be32 opcr_set_modify(struct cmp_connection *c, __be32 opcr)
{
unsigned int spd, xspd;
/* generate speed and extended speed field value */
if (c->speed > SCODE_400) {
spd = SCODE_800;
xspd = c->speed - SCODE_800;
} else {
spd = c->speed;
xspd = 0;
}
opcr &= ~cpu_to_be32(PCR_BCAST_CONN |
PCR_P2P_CONN_MASK |
OPCR_XSPEED_MASK |
PCR_CHANNEL_MASK |
OPCR_SPEED_MASK |
OPCR_OVERHEAD_ID_MASK);
opcr |= cpu_to_be32(1 << PCR_P2P_CONN_SHIFT);
opcr |= cpu_to_be32(xspd << OPCR_XSPEED_SHIFT);
opcr |= cpu_to_be32(c->resources.channel << PCR_CHANNEL_SHIFT);
opcr |= cpu_to_be32(spd << OPCR_SPEED_SHIFT);
opcr |= cpu_to_be32(get_overhead_id(c) << OPCR_OVERHEAD_ID_SHIFT);
return opcr;
}
static int pcr_set_check(struct cmp_connection *c, __be32 pcr)
{
if (pcr & cpu_to_be32(PCR_BCAST_CONN |
PCR_P2P_CONN_MASK)) {
cmp_error(c, "plug is already in use\n");
return -EBUSY;
}
if (!(pcr & cpu_to_be32(PCR_ONLINE))) {
cmp_error(c, "plug is not on-line\n");
return -ECONNREFUSED;
}
return 0;
}
/**
* cmp_connection_establish - establish a connection to the target
* @c: the connection manager
*
* This function establishes a point-to-point connection from the local
* computer to the target by allocating isochronous resources (channel and
* bandwidth) and setting the target's input/output plug control register.
* When this function succeeds, the caller is responsible for starting
* transmitting packets.
*/
int cmp_connection_establish(struct cmp_connection *c)
{
int err;
mutex_lock(&c->mutex);
if (WARN_ON(c->connected)) {
mutex_unlock(&c->mutex);
return -EISCONN;
}
retry_after_bus_reset:
if (c->direction == CMP_OUTPUT)
err = pcr_modify(c, opcr_set_modify, pcr_set_check,
ABORT_ON_BUS_RESET);
else
err = pcr_modify(c, ipcr_set_modify, pcr_set_check,
ABORT_ON_BUS_RESET);
if (err == -EAGAIN) {
err = fw_iso_resources_update(&c->resources);
if (err >= 0)
goto retry_after_bus_reset;
}
if (err >= 0)
c->connected = true;
mutex_unlock(&c->mutex);
return err;
}
EXPORT_SYMBOL(cmp_connection_establish);
/**
* cmp_connection_update - update the connection after a bus reset
* @c: the connection manager
*
* This function must be called from the driver's .update handler to
* reestablish any connection that might have been active.
*
* Returns zero on success, or a negative error code. On an error, the
* connection is broken and the caller must stop transmitting iso packets.
*/
int cmp_connection_update(struct cmp_connection *c)
{
int err;
mutex_lock(&c->mutex);
if (!c->connected) {
mutex_unlock(&c->mutex);
return 0;
}
err = fw_iso_resources_update(&c->resources);
if (err < 0)
goto err_unconnect;
if (c->direction == CMP_OUTPUT)
err = pcr_modify(c, opcr_set_modify, pcr_set_check,
SUCCEED_ON_BUS_RESET);
else
err = pcr_modify(c, ipcr_set_modify, pcr_set_check,
SUCCEED_ON_BUS_RESET);
if (err < 0)
goto err_unconnect;
mutex_unlock(&c->mutex);
return 0;
err_unconnect:
c->connected = false;
mutex_unlock(&c->mutex);
return err;
}
EXPORT_SYMBOL(cmp_connection_update);
static __be32 pcr_break_modify(struct cmp_connection *c, __be32 pcr)
{
return pcr & ~cpu_to_be32(PCR_BCAST_CONN | PCR_P2P_CONN_MASK);
}
/**
* cmp_connection_break - break the connection to the target
* @c: the connection manager
*
* This function deactives the connection in the target's input/output plug
* control register, and frees the isochronous resources of the connection.
* Before calling this function, the caller should cease transmitting packets.
*/
void cmp_connection_break(struct cmp_connection *c)
{
int err;
mutex_lock(&c->mutex);
if (!c->connected) {
mutex_unlock(&c->mutex);
return;
}
err = pcr_modify(c, pcr_break_modify, NULL, SUCCEED_ON_BUS_RESET);
if (err < 0)
cmp_error(c, "plug is still connected\n");
c->connected = false;
mutex_unlock(&c->mutex);
}
EXPORT_SYMBOL(cmp_connection_break);
| linux-master | sound/firewire/cmp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AM824 format in Audio and Music Data Transmission Protocol (IEC 61883-6)
*
* Copyright (c) Clemens Ladisch <[email protected]>
* Copyright (c) 2015 Takashi Sakamoto <[email protected]>
*/
#include <linux/slab.h>
#include "amdtp-am824.h"
#define CIP_FMT_AM 0x10
/* "Clock-based rate control mode" is just supported. */
#define AMDTP_FDF_AM824 0x00
/*
* Nominally 3125 bytes/second, but the MIDI port's clock might be
* 1% too slow, and the bus clock 100 ppm too fast.
*/
#define MIDI_BYTES_PER_SECOND 3093
/*
* Several devices look only at the first eight data blocks.
* In any case, this is more than enough for the MIDI data rate.
*/
#define MAX_MIDI_RX_BLOCKS 8
struct amdtp_am824 {
struct snd_rawmidi_substream *midi[AM824_MAX_CHANNELS_FOR_MIDI * 8];
int midi_fifo_limit;
int midi_fifo_used[AM824_MAX_CHANNELS_FOR_MIDI * 8];
unsigned int pcm_channels;
unsigned int midi_ports;
u8 pcm_positions[AM824_MAX_CHANNELS_FOR_PCM];
u8 midi_position;
};
/**
* amdtp_am824_set_parameters - set stream parameters
* @s: the AMDTP stream to configure
* @rate: the sample rate
* @pcm_channels: the number of PCM samples in each data block, to be encoded
* as AM824 multi-bit linear audio
* @midi_ports: the number of MIDI ports (i.e., MPX-MIDI Data Channels)
* @double_pcm_frames: one data block transfers two PCM frames
*
* The parameters must be set before the stream is started, and must not be
* changed while the stream is running.
*/
int amdtp_am824_set_parameters(struct amdtp_stream *s, unsigned int rate,
unsigned int pcm_channels,
unsigned int midi_ports,
bool double_pcm_frames)
{
struct amdtp_am824 *p = s->protocol;
unsigned int midi_channels;
unsigned int pcm_frame_multiplier;
int i, err;
if (amdtp_stream_running(s))
return -EINVAL;
if (pcm_channels > AM824_MAX_CHANNELS_FOR_PCM)
return -EINVAL;
midi_channels = DIV_ROUND_UP(midi_ports, 8);
if (midi_channels > AM824_MAX_CHANNELS_FOR_MIDI)
return -EINVAL;
if (WARN_ON(amdtp_stream_running(s)) ||
WARN_ON(pcm_channels > AM824_MAX_CHANNELS_FOR_PCM) ||
WARN_ON(midi_channels > AM824_MAX_CHANNELS_FOR_MIDI))
return -EINVAL;
/*
* In IEC 61883-6, one data block represents one event. In ALSA, one
* event equals to one PCM frame. But Dice has a quirk at higher
* sampling rate to transfer two PCM frames in one data block.
*/
if (double_pcm_frames)
pcm_frame_multiplier = 2;
else
pcm_frame_multiplier = 1;
err = amdtp_stream_set_parameters(s, rate, pcm_channels + midi_channels,
pcm_frame_multiplier);
if (err < 0)
return err;
if (s->direction == AMDTP_OUT_STREAM)
s->ctx_data.rx.fdf = AMDTP_FDF_AM824 | s->sfc;
p->pcm_channels = pcm_channels;
p->midi_ports = midi_ports;
/* init the position map for PCM and MIDI channels */
for (i = 0; i < pcm_channels; i++)
p->pcm_positions[i] = i;
p->midi_position = p->pcm_channels;
/*
* We do not know the actual MIDI FIFO size of most devices. Just
* assume two bytes, i.e., one byte can be received over the bus while
* the previous one is transmitted over MIDI.
* (The value here is adjusted for midi_ratelimit_per_packet().)
*/
p->midi_fifo_limit = rate - MIDI_BYTES_PER_SECOND * s->syt_interval + 1;
return 0;
}
EXPORT_SYMBOL_GPL(amdtp_am824_set_parameters);
/**
* amdtp_am824_set_pcm_position - set an index of data channel for a channel
* of PCM frame
* @s: the AMDTP stream
* @index: the index of data channel in an data block
* @position: the channel of PCM frame
*/
void amdtp_am824_set_pcm_position(struct amdtp_stream *s, unsigned int index,
unsigned int position)
{
struct amdtp_am824 *p = s->protocol;
if (index < p->pcm_channels)
p->pcm_positions[index] = position;
}
EXPORT_SYMBOL_GPL(amdtp_am824_set_pcm_position);
/**
* amdtp_am824_set_midi_position - set a index of data channel for MIDI
* conformant data channel
* @s: the AMDTP stream
* @position: the index of data channel in an data block
*/
void amdtp_am824_set_midi_position(struct amdtp_stream *s,
unsigned int position)
{
struct amdtp_am824 *p = s->protocol;
p->midi_position = position;
}
EXPORT_SYMBOL_GPL(amdtp_am824_set_midi_position);
static void write_pcm_s32(struct amdtp_stream *s, struct snd_pcm_substream *pcm,
__be32 *buffer, unsigned int frames,
unsigned int pcm_frames)
{
struct amdtp_am824 *p = s->protocol;
unsigned int channels = p->pcm_channels;
struct snd_pcm_runtime *runtime = pcm->runtime;
unsigned int pcm_buffer_pointer;
int remaining_frames;
const u32 *src;
int i, c;
pcm_buffer_pointer = s->pcm_buffer_pointer + pcm_frames;
pcm_buffer_pointer %= runtime->buffer_size;
src = (void *)runtime->dma_area +
frames_to_bytes(runtime, pcm_buffer_pointer);
remaining_frames = runtime->buffer_size - pcm_buffer_pointer;
for (i = 0; i < frames; ++i) {
for (c = 0; c < channels; ++c) {
buffer[p->pcm_positions[c]] =
cpu_to_be32((*src >> 8) | 0x40000000);
src++;
}
buffer += s->data_block_quadlets;
if (--remaining_frames == 0)
src = (void *)runtime->dma_area;
}
}
static void read_pcm_s32(struct amdtp_stream *s, struct snd_pcm_substream *pcm,
__be32 *buffer, unsigned int frames,
unsigned int pcm_frames)
{
struct amdtp_am824 *p = s->protocol;
unsigned int channels = p->pcm_channels;
struct snd_pcm_runtime *runtime = pcm->runtime;
unsigned int pcm_buffer_pointer;
int remaining_frames;
u32 *dst;
int i, c;
pcm_buffer_pointer = s->pcm_buffer_pointer + pcm_frames;
pcm_buffer_pointer %= runtime->buffer_size;
dst = (void *)runtime->dma_area +
frames_to_bytes(runtime, pcm_buffer_pointer);
remaining_frames = runtime->buffer_size - pcm_buffer_pointer;
for (i = 0; i < frames; ++i) {
for (c = 0; c < channels; ++c) {
*dst = be32_to_cpu(buffer[p->pcm_positions[c]]) << 8;
dst++;
}
buffer += s->data_block_quadlets;
if (--remaining_frames == 0)
dst = (void *)runtime->dma_area;
}
}
static void write_pcm_silence(struct amdtp_stream *s,
__be32 *buffer, unsigned int frames)
{
struct amdtp_am824 *p = s->protocol;
unsigned int i, c, channels = p->pcm_channels;
for (i = 0; i < frames; ++i) {
for (c = 0; c < channels; ++c)
buffer[p->pcm_positions[c]] = cpu_to_be32(0x40000000);
buffer += s->data_block_quadlets;
}
}
/**
* amdtp_am824_add_pcm_hw_constraints - add hw constraints for PCM substream
* @s: the AMDTP stream for AM824 data block, must be initialized.
* @runtime: the PCM substream runtime
*
*/
int amdtp_am824_add_pcm_hw_constraints(struct amdtp_stream *s,
struct snd_pcm_runtime *runtime)
{
int err;
err = amdtp_stream_add_pcm_hw_constraints(s, runtime);
if (err < 0)
return err;
/* AM824 in IEC 61883-6 can deliver 24bit data. */
return snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
}
EXPORT_SYMBOL_GPL(amdtp_am824_add_pcm_hw_constraints);
/**
* amdtp_am824_midi_trigger - start/stop playback/capture with a MIDI device
* @s: the AMDTP stream
* @port: index of MIDI port
* @midi: the MIDI device to be started, or %NULL to stop the current device
*
* Call this function on a running isochronous stream to enable the actual
* transmission of MIDI data. This function should be called from the MIDI
* device's .trigger callback.
*/
void amdtp_am824_midi_trigger(struct amdtp_stream *s, unsigned int port,
struct snd_rawmidi_substream *midi)
{
struct amdtp_am824 *p = s->protocol;
if (port < p->midi_ports)
WRITE_ONCE(p->midi[port], midi);
}
EXPORT_SYMBOL_GPL(amdtp_am824_midi_trigger);
/*
* To avoid sending MIDI bytes at too high a rate, assume that the receiving
* device has a FIFO, and track how much it is filled. This values increases
* by one whenever we send one byte in a packet, but the FIFO empties at
* a constant rate independent of our packet rate. One packet has syt_interval
* samples, so the number of bytes that empty out of the FIFO, per packet(!),
* is MIDI_BYTES_PER_SECOND * syt_interval / sample_rate. To avoid storing
* fractional values, the values in midi_fifo_used[] are measured in bytes
* multiplied by the sample rate.
*/
static bool midi_ratelimit_per_packet(struct amdtp_stream *s, unsigned int port)
{
struct amdtp_am824 *p = s->protocol;
int used;
used = p->midi_fifo_used[port];
if (used == 0) /* common shortcut */
return true;
used -= MIDI_BYTES_PER_SECOND * s->syt_interval;
used = max(used, 0);
p->midi_fifo_used[port] = used;
return used < p->midi_fifo_limit;
}
static void midi_rate_use_one_byte(struct amdtp_stream *s, unsigned int port)
{
struct amdtp_am824 *p = s->protocol;
p->midi_fifo_used[port] += amdtp_rate_table[s->sfc];
}
static void write_midi_messages(struct amdtp_stream *s, __be32 *buffer,
unsigned int frames, unsigned int data_block_counter)
{
struct amdtp_am824 *p = s->protocol;
unsigned int f, port;
u8 *b;
for (f = 0; f < frames; f++) {
b = (u8 *)&buffer[p->midi_position];
port = (data_block_counter + f) % 8;
if (f < MAX_MIDI_RX_BLOCKS &&
midi_ratelimit_per_packet(s, port) &&
p->midi[port] != NULL &&
snd_rawmidi_transmit(p->midi[port], &b[1], 1) == 1) {
midi_rate_use_one_byte(s, port);
b[0] = 0x81;
} else {
b[0] = 0x80;
b[1] = 0;
}
b[2] = 0;
b[3] = 0;
buffer += s->data_block_quadlets;
}
}
static void read_midi_messages(struct amdtp_stream *s, __be32 *buffer,
unsigned int frames, unsigned int data_block_counter)
{
struct amdtp_am824 *p = s->protocol;
int len;
u8 *b;
int f;
for (f = 0; f < frames; f++) {
unsigned int port = f;
if (!(s->flags & CIP_UNALIGHED_DBC))
port += data_block_counter;
port %= 8;
b = (u8 *)&buffer[p->midi_position];
len = b[0] - 0x80;
if ((1 <= len) && (len <= 3) && (p->midi[port]))
snd_rawmidi_receive(p->midi[port], b + 1, len);
buffer += s->data_block_quadlets;
}
}
static void process_it_ctx_payloads(struct amdtp_stream *s, const struct pkt_desc *desc,
unsigned int count, struct snd_pcm_substream *pcm)
{
struct amdtp_am824 *p = s->protocol;
unsigned int pcm_frames = 0;
int i;
for (i = 0; i < count; ++i) {
__be32 *buf = desc->ctx_payload;
unsigned int data_blocks = desc->data_blocks;
if (pcm) {
write_pcm_s32(s, pcm, buf, data_blocks, pcm_frames);
pcm_frames += data_blocks * s->pcm_frame_multiplier;
} else {
write_pcm_silence(s, buf, data_blocks);
}
if (p->midi_ports) {
write_midi_messages(s, buf, data_blocks,
desc->data_block_counter);
}
desc = amdtp_stream_next_packet_desc(s, desc);
}
}
static void process_ir_ctx_payloads(struct amdtp_stream *s, const struct pkt_desc *desc,
unsigned int count, struct snd_pcm_substream *pcm)
{
struct amdtp_am824 *p = s->protocol;
unsigned int pcm_frames = 0;
int i;
for (i = 0; i < count; ++i) {
__be32 *buf = desc->ctx_payload;
unsigned int data_blocks = desc->data_blocks;
if (pcm) {
read_pcm_s32(s, pcm, buf, data_blocks, pcm_frames);
pcm_frames += data_blocks * s->pcm_frame_multiplier;
}
if (p->midi_ports) {
read_midi_messages(s, buf, data_blocks,
desc->data_block_counter);
}
desc = amdtp_stream_next_packet_desc(s, desc);
}
}
/**
* amdtp_am824_init - initialize an AMDTP stream structure to handle AM824
* data block
* @s: the AMDTP stream to initialize
* @unit: the target of the stream
* @dir: the direction of stream
* @flags: the details of the streaming protocol consist of cip_flags enumeration-constants.
*/
int amdtp_am824_init(struct amdtp_stream *s, struct fw_unit *unit,
enum amdtp_stream_direction dir, unsigned int flags)
{
amdtp_stream_process_ctx_payloads_t process_ctx_payloads;
if (dir == AMDTP_IN_STREAM)
process_ctx_payloads = process_ir_ctx_payloads;
else
process_ctx_payloads = process_it_ctx_payloads;
return amdtp_stream_init(s, unit, dir, flags, CIP_FMT_AM,
process_ctx_payloads, sizeof(struct amdtp_am824));
}
EXPORT_SYMBOL_GPL(amdtp_am824_init);
| linux-master | sound/firewire/amdtp-am824.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ff-transaction.c - a part of driver for RME Fireface series
*
* Copyright (c) 2015-2017 Takashi Sakamoto
*/
#include "ff.h"
static void finish_transmit_midi_msg(struct snd_ff *ff, unsigned int port,
int rcode)
{
struct snd_rawmidi_substream *substream =
READ_ONCE(ff->rx_midi_substreams[port]);
if (rcode_is_permanent_error(rcode)) {
ff->rx_midi_error[port] = true;
return;
}
if (rcode != RCODE_COMPLETE) {
/* Transfer the message again, immediately. */
ff->next_ktime[port] = 0;
schedule_work(&ff->rx_midi_work[port]);
return;
}
snd_rawmidi_transmit_ack(substream, ff->rx_bytes[port]);
ff->rx_bytes[port] = 0;
if (!snd_rawmidi_transmit_empty(substream))
schedule_work(&ff->rx_midi_work[port]);
}
static void finish_transmit_midi0_msg(struct fw_card *card, int rcode,
void *data, size_t length,
void *callback_data)
{
struct snd_ff *ff =
container_of(callback_data, struct snd_ff, transactions[0]);
finish_transmit_midi_msg(ff, 0, rcode);
}
static void finish_transmit_midi1_msg(struct fw_card *card, int rcode,
void *data, size_t length,
void *callback_data)
{
struct snd_ff *ff =
container_of(callback_data, struct snd_ff, transactions[1]);
finish_transmit_midi_msg(ff, 1, rcode);
}
static void transmit_midi_msg(struct snd_ff *ff, unsigned int port)
{
struct snd_rawmidi_substream *substream =
READ_ONCE(ff->rx_midi_substreams[port]);
int quad_count;
struct fw_device *fw_dev = fw_parent_device(ff->unit);
unsigned long long addr;
int generation;
fw_transaction_callback_t callback;
int tcode;
if (substream == NULL || snd_rawmidi_transmit_empty(substream))
return;
if (ff->rx_bytes[port] > 0 || ff->rx_midi_error[port])
return;
/* Do it in next chance. */
if (ktime_after(ff->next_ktime[port], ktime_get())) {
schedule_work(&ff->rx_midi_work[port]);
return;
}
quad_count = ff->spec->protocol->fill_midi_msg(ff, substream, port);
if (quad_count <= 0)
return;
if (port == 0) {
addr = ff->spec->midi_rx_addrs[0];
callback = finish_transmit_midi0_msg;
} else {
addr = ff->spec->midi_rx_addrs[1];
callback = finish_transmit_midi1_msg;
}
/* Set interval to next transaction. */
ff->next_ktime[port] = ktime_add_ns(ktime_get(),
ff->rx_bytes[port] * 8 * (NSEC_PER_SEC / 31250));
if (quad_count == 1)
tcode = TCODE_WRITE_QUADLET_REQUEST;
else
tcode = TCODE_WRITE_BLOCK_REQUEST;
/*
* In Linux FireWire core, when generation is updated with memory
* barrier, node id has already been updated. In this module, After
* this smp_rmb(), load/store instructions to memory are completed.
* Thus, both of generation and node id are available with recent
* values. This is a light-serialization solution to handle bus reset
* events on IEEE 1394 bus.
*/
generation = fw_dev->generation;
smp_rmb();
fw_send_request(fw_dev->card, &ff->transactions[port], tcode,
fw_dev->node_id, generation, fw_dev->max_speed,
addr, &ff->msg_buf[port], quad_count * 4,
callback, &ff->transactions[port]);
}
static void transmit_midi0_msg(struct work_struct *work)
{
struct snd_ff *ff = container_of(work, struct snd_ff, rx_midi_work[0]);
transmit_midi_msg(ff, 0);
}
static void transmit_midi1_msg(struct work_struct *work)
{
struct snd_ff *ff = container_of(work, struct snd_ff, rx_midi_work[1]);
transmit_midi_msg(ff, 1);
}
static void handle_msg(struct fw_card *card, struct fw_request *request, int tcode,
int destination, int source, int generation, unsigned long long offset,
void *data, size_t length, void *callback_data)
{
struct snd_ff *ff = callback_data;
__le32 *buf = data;
u32 tstamp = fw_request_get_timestamp(request);
unsigned long flag;
fw_send_response(card, request, RCODE_COMPLETE);
offset -= ff->async_handler.offset;
spin_lock_irqsave(&ff->lock, flag);
ff->spec->protocol->handle_msg(ff, (unsigned int)offset, buf, length, tstamp);
spin_unlock_irqrestore(&ff->lock, flag);
}
static int allocate_own_address(struct snd_ff *ff, int i)
{
struct fw_address_region midi_msg_region;
int err;
ff->async_handler.length = ff->spec->midi_addr_range;
ff->async_handler.address_callback = handle_msg;
ff->async_handler.callback_data = ff;
midi_msg_region.start = 0x000100000000ull * i;
midi_msg_region.end = midi_msg_region.start + ff->async_handler.length;
err = fw_core_add_address_handler(&ff->async_handler, &midi_msg_region);
if (err >= 0) {
/* Controllers are allowed to register this region. */
if (ff->async_handler.offset & 0x0000ffffffff) {
fw_core_remove_address_handler(&ff->async_handler);
err = -EAGAIN;
}
}
return err;
}
// Controllers are allowed to register higher 4 bytes of destination address to
// receive asynchronous transactions for MIDI messages, while the way to
// register lower 4 bytes of address is different depending on protocols. For
// details, please refer to comments in protocol implementations.
//
// This driver expects userspace applications to configure registers for the
// lower address because in most cases such registers has the other settings.
int snd_ff_transaction_reregister(struct snd_ff *ff)
{
struct fw_card *fw_card = fw_parent_device(ff->unit)->card;
u32 addr;
__le32 reg;
/*
* Controllers are allowed to register its node ID and upper 2 byte of
* local address to listen asynchronous transactions.
*/
addr = (fw_card->node_id << 16) | (ff->async_handler.offset >> 32);
reg = cpu_to_le32(addr);
return snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
ff->spec->midi_high_addr,
®, sizeof(reg), 0);
}
int snd_ff_transaction_register(struct snd_ff *ff)
{
int i, err;
/*
* Allocate in Memory Space of IEC 13213, but lower 4 byte in LSB should
* be zero due to device specification.
*/
for (i = 0; i < 0xffff; i++) {
err = allocate_own_address(ff, i);
if (err != -EBUSY && err != -EAGAIN)
break;
}
if (err < 0)
return err;
err = snd_ff_transaction_reregister(ff);
if (err < 0)
return err;
INIT_WORK(&ff->rx_midi_work[0], transmit_midi0_msg);
INIT_WORK(&ff->rx_midi_work[1], transmit_midi1_msg);
return 0;
}
void snd_ff_transaction_unregister(struct snd_ff *ff)
{
__le32 reg;
if (ff->async_handler.callback_data == NULL)
return;
ff->async_handler.callback_data = NULL;
/* Release higher 4 bytes of address. */
reg = cpu_to_le32(0x00000000);
snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
ff->spec->midi_high_addr,
®, sizeof(reg), 0);
fw_core_remove_address_handler(&ff->async_handler);
}
| linux-master | sound/firewire/fireface/ff-transaction.c |
Subsets and Splits