跳转至

AudioRecord使用例程*

参考 app/audio_record_sample_app

1. 样例使用说明*

音频获取示例

用于客户应用端开发,演示如何获取麦克风原始音频,算法后的音频,和如何自定义协议串口DMA传输

客户也可以在在这个地方进行音频的编码压缩,以方便对外传输

tools.zip里面包含了windows和linux端配套的串口接收程序源代码,方便客户自定义协议的时候验证传输的音频

2. 样例代码*

#include <autoconf.h>
#include <stdint.h>
#include <math.h>
#include <system_init.h>
#include <gx_clock.h>
#include <gx_uart.h>
#include <gx_pmu.h>
#include <ovp_app.h>
#include <ovp_buffer.h>
#include <vpa_context.h>
#include <vpa_helper.h>
#include <gx_aout.h>
#include <gx_sdm.h>
#include <gx_cache.h>
#include <gx_dcache.h>
#include <gx_padmux.h>
#include "test_wav.h"

/*
音频获取示例
用于客户应用端开发,演示如何获取麦克风原始音频,算法后的音频,和如何自定义协议串口DMA传输
tools.zip里面包含了windows和linux端配套的串口接收程序源代码,方便客户自定义协议的时候验证传输的音频
*/

#define LOG_TAG "[AUDIO_OUT_SAMPLE_APP]"
#define AUDIO_OUT_BUFFER_LEN    (8 * 1024) // 注意:驱动的音频缓冲区,必须64字节对齐
//=================================================================================================

static uint8_t *g_player_buffer;
static int g_offset = 0;
static int g_all_len = 174080;

static int32_t _vol_to_db(int32_t volume)
{
    const int32_t vol_to_db[] = {-16, -12, -8, -6, -4, -2, 0, 2, 4, 5, 6};
    volume = (volume > 100 ? 100 : volume) < 0 ? 0 : volume;
    volume = (volume + 5) / 10;

    uint32_t level = (int)(1024 * pow(10.f, vol_to_db[volume] / 20.f)); // 注意,使用pow接口一定要包含头文件 <math.h>,否则会调用其它库函数提供的pow接口,导致计算错误
    // printf("cur vol[%d] dB[%d] l[%d]\n", volume, vol_to_db[volume], level);

    return level;
}

static int app_suspend(void *priv)
{
    printf(LOG_TAG " ---- %s ----\n", __func__);
    return 0;
}

static int app_resume(void *priv)
{
    printf(LOG_TAG " ---- %s ----\n", __func__);
    return 0;
}

// 注意,该接口在中断上下文,不要在这里做太耗时的事情
static int _aout_new_frame_callback(GX_AOUT_FRAME *cur_frame, void *priv)
{
    g_offset += AUDIO_OUT_BUFFER_LEN/2;
    if ((g_offset + AUDIO_OUT_BUFFER_LEN/2) > g_all_len) g_offset = 0; // 本地音频播放到最后了,就重新开始播

    if (cur_frame->saddr == 0)
    {
        memcpy(g_player_buffer+AUDIO_OUT_BUFFER_LEN/2, input_pcm+g_offset, AUDIO_OUT_BUFFER_LEN/2);
        gx_dcache_clean_range((uint32_t *)g_player_buffer+AUDIO_OUT_BUFFER_LEN/2, AUDIO_OUT_BUFFER_LEN/2);

        GX_AOUT_FRAME frame;
        frame.saddr = AUDIO_OUT_BUFFER_LEN/2;
        frame.eaddr = frame.saddr + AUDIO_OUT_BUFFER_LEN/2 - 1;
        gx_aout_route_push_frame(&frame);
    }
    else
    {
        memcpy(g_player_buffer, input_pcm+g_offset, AUDIO_OUT_BUFFER_LEN/2);
        gx_dcache_clean_range((uint32_t *)g_player_buffer, AUDIO_OUT_BUFFER_LEN/2);

        GX_AOUT_FRAME frame;
        frame.saddr = 0;
        frame.eaddr = frame.saddr + AUDIO_OUT_BUFFER_LEN/2 - 1;
        gx_aout_route_push_frame(&frame);
    }

    return 0;
}

static int audio_out_init(void)
{
    printf(LOG_TAG " ---- %s ----\n", __func__);

    padmux_set(0, 1); // DAC_P
    padmux_set(1, 1); // DAC_N

    gx_clk_mod_set_gate(GX_CLK_MOD_AUDPCLK, 1);
    gx_clk_mod_set_gate(GX_CLK_MOD_AOUT, 1);
    unsigned int freq = (gx_clk_mod_get_freq(GX_CLK_MOD_AOUT) + 5) / 10 * 10;
    printf("Aout Frequence:%d\n", freq);
    freq = gx_clk_mod_get_freq(GX_CLK_MOD_SDM);
    printf("Aout SDM Frequence:%d\n", freq);
    gx_sdm_init();

    GX_SDM_CONFIG config;
    config.sample_rate = GX_SDM_INPUT_16KHZ;
    config.source      = GX_SDM_SRC_AOUT;
    config.mode        = GX_SDM_SAWTOOTH_MODE_TD;

    gx_sdm_config(&config);
    gx_sdm_start();
    gx_sdm_set_lowpower_mode_on();

    gx_aout_init();
    gx_aout_route_set_channel_mode(GX_AOUT_CH_MODE_STEREO);
    gx_aout_output_set_source(GX_AOUT_OUT_PORT_SDM, GX_AOUT_OUT_SRC_AFTER_EQ_DRC);

    GX_AOUT_PCM pcm = {0};
    pcm.sample_rate = GX_AOUT_SAMPLE_RATE_16KHZ;
    pcm.channels    = GX_AOUT_SIGNAL_CHANNEL;
    pcm.bits        = GX_AOUT_BIT_16;
    pcm.storage     = GX_AOUT_INTERLACE;
    pcm.endian      = GX_AOUT_LITTLE_ENDIAN;

    g_player_buffer = ovp_malloc(AUDIO_OUT_BUFFER_LEN);
    if (g_player_buffer == NULL){
        printf("voice play malloc failed!\n");
        return -1;
    }

    pcm.left_buffer  = (uint32_t)g_player_buffer & 0x0fffffff;
    pcm.right_buffer = 0;
    pcm.size         = AUDIO_OUT_BUFFER_LEN;
    gx_aout_route_set_pcm(&pcm);

    gx_aout_route_set_fade_in_speed(3);
    gx_aout_route_set_fade_out_speed(3);

    GX_AOUT_CB aout_cb = {0};
    aout_cb.newframe_cb_func   = _aout_new_frame_callback;
    aout_cb.fifo_empty_cb_func = NULL;
    aout_cb.fade_done_cb_func  = NULL;
    gx_aout_set_cb(&aout_cb);
    gx_aout_interrupt_enable(GX_AOUT_INT_ALL);
    gx_aout_route_enable(1);

    gx_aout_route_set_volume_level(_vol_to_db(10)); // 音量控制,范围是0-100

    // 因为音频播放走DMA的,直接访问内存而不会访问cache。需要刷cache,将cache上的数据回写到内存上
    memcpy(g_player_buffer, input_pcm, AUDIO_OUT_BUFFER_LEN/2);
    gx_dcache_clean_range((uint32_t *)g_player_buffer, AUDIO_OUT_BUFFER_LEN/2);

    // 播放音频,这里设置后,是播放音频缓冲区0-4K字节的音频
    GX_AOUT_FRAME frame;
    frame.saddr = 0;
    frame.eaddr = frame.saddr + AUDIO_OUT_BUFFER_LEN/2 - 1;
    gx_aout_route_push_frame(&frame);

    return 0;
}

static int app_init(void)
{
    gx_uart_init(0, 921600); // 串口0录音,尽量高波特率
    audio_out_init(); // 本地播放开启,用于协助录硬回采的音频,方便硬件工程师分析回采是否正常。如果想录麦克风原始音频,把这行屏幕掉,或者拔掉喇叭
    return 0;
}

#define FRAM (1280)
static unsigned char send_buf[FRAM+5] __attribute__((aligned(DCACHE_LINE_SIZE)));
static volatile uint8_t tx_done = 1;

static int uart_tx_complete(int port, void *priv)
{
    tx_done = 1;
    return 0;
}

static void AudioSendTask(VPA_CONTEXT *context)
{
    VPA_CONTEXT_HEADER *ctx_header = context->ctx_header;
    unsigned int  mic_buffer_len_per_context = vpa_get_context_length(ctx_header);
    unsigned char *cur_mic_buffer = vpa_get_mic_frame(context, 0, 0); // 录音单通道麦克风原始音频,如果开启了 AEC算法,就是录制 AEC算法后的音频
    //unsigned char *cur_mic_buffer = vpa_get_ref_frame(context, 0, 0); // 录音单通道回采原始音频
    //printf("mic_buffer_len_per_context = %d\n", mic_buffer_len_per_context); // FRAM大小不清楚的话,可以通过打印看mic_buffer_len_per_context,确保它们是一样大小的

    if (mic_buffer_len_per_context != FRAM) printf("error! mic_buffer_len_per_context is not match FRAM\n");

    while (tx_done == 0);
    gx_dcache_clean_range((uint32_t *)cur_mic_buffer, mic_buffer_len_per_context);
    unsigned char* p_tmp = send_buf + 5;
    send_buf[0] = 0x11; // package head
    send_buf[1] = 0x22; // package head
    send_buf[2] = 0x33; // package head
    send_buf[3] = 0x44; // package head
    send_buf[4] = 0x01; // package type, 0x01 audio
    memcpy(p_tmp, cur_mic_buffer, FRAM);
    tx_done = 0;
    gx_uart_async_send_buffer(0, send_buf, FRAM+5, uart_tx_complete, NULL);
}

// App Event Process
static int app_event_response(APP_EVENT *app_event)
{
    // for audio send
    if (app_event->event_id == EVENT_AUDIO_IN_RECORD_DONE)
    {
        VPA_CONTEXT *context;
        vpa_get_context(app_event->ctx_index, &context);
        AudioSendTask(context);
    }

    return 0;
}

// APP Main Loop
static int app_task_loop(void)
{
    return 0;
}

OVP_APP ovp_app = {
    .app_name = "audio record app",
    .AppInit = app_init,
    .AppEventResponse = app_event_response,
    .AppTaskLoop = app_task_loop,
    .AppSuspend = app_suspend,
    .suspend_priv = "app_suspend",
    .AppResume = app_resume,
    .resume_priv = "app_resume",
};

OVP_REGISTER_APP(ovp_app);