前言
因笔者的项目中需要在android系统中实现虚拟声卡,实现android本地声音播放到远程终端、远程终端MIC录音送至android系统来;
验证过使用libmedia库实现AudioRecord和AudioTrack的方案,此方案由于音频路由策略和声音焦点问题,无法实现项目目标。
只能重构sound-hal层、通过libtinyalsa库直接控制声卡方式来实现此部分功能;因此就有了这篇记录文章,我们一起梳理如何实现。
因此hal层驱动不用向android层提供api接口,也不用实现sound-hal层所以接口。
首先、先贴上sound-hal层框架代码
int
virt_sound_adev_open(const hw_module_t* module, const char* name, hw_device_t** device)
{
ALOGV(" %s (name=%s)", __func__, name);
struct audio_device *adev;
adev = (struct audio_device*) calloc(1, sizeof(struct audio_device));
if (!adev)
return -ENOMEM;
adev->device.common.tag = HARDWARE_DEVICE_TAG;
adev->device.common.version = AUDIO_DEVICE_API_VERSION_2_0;
adev->device.common.module = (struct hw_module_t *)module;
adev->device.common.close = virt_sound_adev_close;
adev->device.open_output_stream = adev_open_output_stream;
adev->device.close_output_stream = adev_close_output_stream;
adev->device.open_input_stream = adev_open_input_stream;
adev->device.close_input_stream = adev_close_input_stream;
*device = &adev->device.common;
for(int i =0; i < OUTPUT_TOTAL; i++){
adev->outputs[i] = NULL;
}
ALOGV(" %s() %d,device= 0x%x", __func__, __LINE__, *device);
return 0;
}
int virt_sound_adev_close(hw_device_t *device)
{
ALOGV("%s() %d \n", __func__, __LINE__);
free(device);
return 0;
}
这个代码您是否似曾相识呢?这是sound-hal层的通用代码,都是通过构造 audio_device 对象,
来实现对声卡管理的.
打开音频输出流方法,如下:
int adev_open_output_stream(struct audio_hw_device *dev,
audio_io_handle_t handle,
audio_devices_t devices,
audio_output_flags_t flags,
struct audio_config *config,
struct audio_stream_out **stream_out,
const char *address __unused)
{
int ret;
struct audio_device *adev = (struct audio_device *)dev;
struct stream_out *out;
enum output_type type = OUTPUT_LOW_LATENCY;
ALOGD("%s():%d adev=0x%x, dev=0x%x, stream_out=0x%x", __func__, __LINE__, adev, dev, *stream_out);
out = (struct stream_out *)calloc(1, sizeof(struct stream_out));
if (!out)
return -ENOMEM;
out->supported_channel_masks[0] = AUDIO_CHANNEL_OUT_STEREO;
out->supported_channel_masks[1] = AUDIO_CHANNEL_OUT_MONO;
if(config != NULL)
memcpy(&(out->aud_config), config, sizeof(struct audio_config));
out->channel_mask = AUDIO_CHANNEL_OUT_STEREO;
out->flags = flags;
out->output_direct = false;
out->channel_buffer = NULL;
out->bitstream_buffer = NULL;
{
//> author:ljb
out->config = pcm_config; //> define at audio_hw.hpp
out->pcm_device = PCM_DEVICE;
type = OUTPUT_LOW_LATENCY;
out->output_direct = false;
out->output_direct_mode = VIRTUAL_SOUND;
out->config.format = PCM_FORMAT_S16_LE;
}
out->stream.write = out_write; //> 向声卡中写音频数据入口方法
out->dev = adev;
out->dev->pre_output_device_id = OUT_DEVICE_SPEAKER;
out->dev->pre_input_source_id = IN_SOURCE_MIC;
out->standby = true;
out->nframes = 0;
out->slice_time_up = 0;
out->slice_time_down = 0;
pthread_mutex_lock(&adev->lock_outputs);
if (adev->outputs[type]) {
pthread_mutex_unlock(&adev->lock_outputs);
ret = -EBUSY;
goto err_open;
}
adev->outputs[type] = out;
pthread_mutex_unlock(&adev->lock_outputs);
*stream_out = &out->stream;
ALOGD(" %s():%d okay",__func__, __LINE__);
return 0;
err_open:
free(out);
*stream_out = NULL;
ALOGW(" %s() %d dev open errors", __func__, __LINE__);
return ret;
}
void adev_close_output_stream(struct audio_hw_device *dev,
struct audio_stream_out *stream)
{
int type;
struct stream_out *out = (struct stream_out *)&stream->common;
struct audio_device *adev = out->dev;
lock_all_outputs(adev);
int i;
if (!out->standby) {
for (i = 0; i < PCM_TOTAL; i++) {
if (out->pcm[i]) {
pcm_close(out->pcm[i]);
out->pcm[i] = NULL;
}
}
out->standby = true;
out->nframes = 0;
unlock_all_outputs(adev, NULL);
adev = (struct audio_device *)dev;
pthread_mutex_lock(&adev->lock_outputs);
for (type = 0; type < (int)OUTPUT_TOTAL; ++type) {
if (adev->outputs[type] == (struct stream_out *) stream) {
adev->outputs[type] = NULL;
break;
}
}
pthread_mutex_unlock(&adev->lock_outputs);
free(stream);
ALOGD(" %s() okay %d run over", __func__, __LINE__);
}
上面是输出流方法,就是audioTrack播放音频时、打开的音频输出流;当然我们编写此hal驱动
不向audioTrack模块开放,供我们自己封装使用(可以理解为构成播放器)。
音频输出流建立好,我们如何向声卡中送音频输入呢?
向声卡中送入音频数据
ssize_t out_write(struct audio_stream_out *stream, const void* buffer,
size_t bytes)
{
int ret = 0;
int i;
struct stream_out *out = (struct stream_out *)stream;
struct audio_device *adev = out->dev;
size_t newbytes = bytes * 2;
out->out_data_size = bytes;
out_dump(out, 0);
pthread_mutex_lock(&out->lock);
if (out->standby) {
pthread_mutex_unlock(&out->lock);
lock_all_outputs(adev);
if (!out->standby) {
unlock_all_outputs(adev, out);
goto false_alarm;
}
out->pcm[PCM_CARD] = pcm_open(PCM_CARD, PCM_DEVICE_SCO, PCM_OUT | PCM_MONOTONIC, &out->config);
ALOGD("%s()%d pcm_open(card=%d, device=%d,PCM_OUT,config.channels=0x%x) \n",
__func__, __LINE__, PCM_CARD, PCM_DEVICE_SCO, out->config.channels);
if (out->pcm[PCM_CARD] && !pcm_is_ready(out->pcm[PCM_CARD])) {
ALOGE("pcm_open(PCM_CARD) failed: %s",
pcm_get_error(out->pcm[PCM_CARD]));
pcm_close(out->pcm[PCM_CARD]);
unlock_all_outputs(adev, NULL);
goto final_exit;
}
out->standby = false;
unlock_all_outputs(adev, out);
}
false_alarm:
if (out->disabled) {
ret = -EPIPE;
goto exit;
}
ret = pcm_write(out->pcm[PCM_CARD], (void *)buffer, bytes);
if (ret == 0) {
out->written += bytes / (out->config.channels * sizeof(short));
out->nframes = out->written;
}
exit:
pthread_mutex_unlock(&out->lock);
final_exit:
if (ret != 0) {
ALOGD("AudioData write error , keep slience! ret = %d", ret);
usleep(bytes * 1000000 / audio_stream_out_frame_size(stream) /
out_get_sample_rate(&stream->common));
}
ALOGD("%s(),%d written=%d, bytes=%d \n", __func__, __LINE__, out->written, bytes);
return bytes;
}
此方法是向声卡中写数据具体实现,前面声卡设备和输出流只是构造内存中管理对象,
检测 out->standby 标识为如果没有打开声卡,就通过 pcm_open() 函数直接打开声卡,
打开后通过 pcm_write() 函数向声卡中写入音频数据,此时声卡就能够发声了。
声卡的关闭是在 adev_close_output_stream() 方法中实现的,sound-hal驱动
是不是也很简单呢=,不用向android用户空间提供api接口就这么简单;那我们如何使用
这个sound-hal呢?
sound-hal使用方法
下面是创建音频播放线程,在socket上读取音频数据、把音频数据写入声卡中。
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/un.h>
#include <signal.h>
#include <cutils/log.h>
#include <cutils/sockets.h>
#include <cutils/properties.h>
#include <arpa/inet.h>
#include <netinet/in.h>
#include "virtAudioHal/virt_sound_hal.h"
#include "virt_sound_output.h"
#define LOG_TAG "virt_sound_output"
#define LOG_NDEBUG 0
//> audio raw data sample rate = 44100, AUDIO_CHANNEL_OUT_STEREO, PCM_FORMAT_S16_LE
static const audio_config_t VIRTUAL_SOUND_OUTPUT_INITIALIZER = {
/* .sample_rate = */ 44100,
/* .channel_mask = */ AUDIO_CHANNEL_OUT_STEREO,
/* .format = */ (audio_format_t)PCM_FORMAT_S16_LE,
/* .offload_info = */ {
/* .version = */ AUDIO_OFFLOAD_INFO_VERSION_CURRENT,
/* .size = */ sizeof(audio_offload_info_t),
/* .sample_rate = */ 0,
/* .channel_mask = */ 0,
/* .format = */ AUDIO_FORMAT_DEFAULT,
/* .stream_type = */ AUDIO_STREAM_VOICE_CALL,
/* .bit_rate = */ 0,
/* .duration_us = */ 0,
/* .has_video = */ false,
/* .is_streaming = */ false,
/* .bit_width = */ 16,
/* .offload_buffer_size = */ 0,
/* .usage = */ AUDIO_USAGE_UNKNOWN
},
/* .frame_count = */ 4,
};
static struct audio_device *g_Dev = NULL;
static struct stream_out* g_stream_out;
static int OUT_BUFFER_SIZE = 1024 *2;
static int virt_snd_output_fd = -1;
static bool virt_snd_output_running = false;
static bool virt_snd_start_running = false;
void exit_sound_output_process(){
virt_snd_start_running = false;
virt_snd_output_running = false;
}
static void IPC_disconnect_handle_pipe(int signo){
ALOGV("[%s: %d] pipe socket disconnect, sig:%d \n", __FILE__, __LINE__, signo);
exit_sound_output_process();
}
static void* sound_output_thread(void* argv)
{
(void)argv;
int readLen,writeLen;
char empty = 'c';
struct sigaction sa;
sa.sa_handler = IPC_disconnect_handle_pipe;
sigemptyset(&sa.sa_mask);
sa.sa_flags = 0;
sigaction(SIGPIPE, &sa, NULL);
void * out_buffer = NULL;
int ret = adev_open_output_stream((struct audio_hw_device*)g_Dev,
0,
0,
0,
(struct audio_config *)&VIRTUAL_SOUND_OUTPUT_INITIALIZER,
&g_stream_out,
&empty);
if(ret < 0){
ALOGE(" %s() open device failed, exit thread %d \n", __func__, __LINE__);
return ((void*)0);
}
out_buffer = malloc(OUT_BUFFER_SIZE);
if(out_buffer == NULL){
ALOGE(" %s() malloc failed, exit thread %d \n", __func__, __LINE__);
goto exit_thread;
}
ALOGD(" %s() : %d open output_stream success ", __func__, __LINE__);
int count = 0;
while(virt_snd_output_running)
{
while(virt_snd_start_running){
readLen = read(virt_snd_output_fd, out_buffer, OUT_BUFFER_SIZE);
if(readLen <= 0){
ALOGE("%s:%d read socket data ERROR,len = %d", __func__, __LINE__, readLen);
exit_sound_output_process();
break;
}
writeLen = out_write((struct audio_stream_out*)g_stream_out, out_buffer, readLen);
if(writeLen < 0){
ALOGE("%s:%d write pcm data ERROR,len = %d", __func__, __LINE__, writeLen);
break;
}
ALOGD(" %s() pcm write bytes=%d, count= %d \n", __func__, writeLen, count++);
}
usleep(100000);
}
exit_thread:
adev_close_output_stream((struct audio_hw_device*)g_Dev, (struct audio_stream_out*)g_stream_out);
close(virt_snd_output_fd);
free(out_buffer);
ALOGV(" %s():%d thread exited ... \n", __func__, __LINE__);
return ((void*)0);
}
int create_sound_output_thread(int c_socket, struct audio_device* dev)
{
pthread_t id;
if(dev == NULL || c_socket < 0){
ALOGE(" %s() thread input parament ERROR %d \n", __func__, __LINE__);
return -1;
}
if(virt_snd_output_running == false){
virt_snd_output_running = true;
virt_snd_start_running = true;
virt_snd_output_fd = c_socket;
g_Dev = dev;
if(pthread_create(&id, NULL, sound_output_thread, NULL)!=0){
ALOGE("native thread create fail");
return -1;
}
ALOGD(" %s():%d created thread success \n", __func__, __LINE__);
return 0;
} else {
ALOGD(" %s():%d Thread has exsited, please destroy it \n", __func__, __LINE__);
return 1;
}
}
此 create_sound_output_thread() 是创建音频输出线程接口,需要传递socket fd和audio_device*给线程,音频播放线程会从
socket中读取pcm数据、并写入声卡,音频格式 AUDIO_CHANNEL_OUT_STEREO、44100、PCM_FORMAT_S16_LE的源数据。
入口中 audio_device 如何创建呢?我们在看下面创建代码。
创建 audio_device 设备
下面代码是调用 virt_sound_adev_open() 来创建 audio_device 设备。
#include <fcntl.h>
#include <sys/stat.h>
#include <cutils/log.h>
#include <cutils/sockets.h>
#include <cutils/properties.h>
#include <arpa/inet.h>
#include <netinet/in.h>
#include "virtAudioHal/virt_sound_hal.h"
#include "virtual_sound_driver.h"
#include "virt_sound_output.h"
#include "virt_sound_input.h"
#define LOG_TAG "virt_sound_driver"
#define LOG_NDEBUG 0
static struct audio_device *g_Dev = NULL;
const static hw_module_t *module = NULL;
static int socket_count = 0;
static int bufferSizeInBytes = 1024*4;
static int server_sockfd = -1;
static bool g_bQuitThread = false;
static bool g_ThreadAction = true;
static void IPC_disconnect_handle_pipe(int signo){
ALOGW("[%s: %d] server socket pipe disconnected. sig:%d \n", __FILE__, __LINE__, signo);
}
int main(int argc, char const *argv[])
{
int count = 0;
int rc = 0;
int readLen = 0;
void* inBuffer = NULL;
int ret;
int client_sockfd;
struct sockaddr_un clientAddr;
socklen_t len = sizeof(clientAddr);
struct sigaction sa;
sa.sa_handler = IPC_disconnect_handle_pipe;
sigemptyset(&sa.sa_mask);
sa.sa_flags = 0;
sigaction(SIGPIPE, &sa, NULL);
ret = virt_sound_adev_open(module, "mmm.virt.sound", &g_Dev);
if(ret < 0){
ALOGE(" %s():%d virt_sound_adev_open() failed ret=%d, exit thread \n", __func__, __LINE__, ret);
return -1;
}
g_bQuitThread = false;
ALOGD(" %s() %d wait for connecting... \n", __func__, __LINE__);
while(!g_bQuitThread){
server_sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
if(server_sockfd < 0){
usleep(50000);
ALOGV("[%s: %d] socket: %s create error !\n", __func__, __LINE__, SOCKETNAME);
close(server_sockfd);
continue;
}
struct sockaddr_un serverAddr;
memset(&serverAddr, 0, sizeof(serverAddr));
serverAddr.sun_family = AF_UNIX;
strcpy(serverAddr.sun_path, SOCKETNAME);
serverAddr.sun_path[0] = 0; //> abstract address first byte must be '0' .
len = strlen(SOCKETNAME)+offsetof(struct sockaddr_un,sun_path);
rc = bind(server_sockfd, (struct sockaddr*)&serverAddr, len);
if(rc < 0){
usleep(50000);
ALOGW("[%s: %d] socket: %s bind error !\n", __func__, __LINE__, SOCKETNAME);
close(server_sockfd);
continue;
}
rc = listen(server_sockfd, 2);
if(rc < 0){
usleep(50000);
ALOGW("[%s: %d] socket: %s listen error !\n", __func__, __LINE__, SOCKETNAME);
close(server_sockfd);
continue;
}
inBuffer = malloc(bufferSizeInBytes);
if(inBuffer == NULL){
ALOGE("[%s: %d] malloc size=%d failed \n", __func__, __LINE__, bufferSizeInBytes);
break;
}
while(g_ThreadAction){
memset(&clientAddr, 0 , sizeof(clientAddr));
client_sockfd = accept(server_sockfd, (struct sockaddr*)&clientAddr, &len);
if(client_sockfd < 0){
usleep(50000);
ALOGE("[%s: %d] socket: %s listen error !\n", __func__, __LINE__, SOCKETNAME);
close(client_sockfd);
continue;
}
fcntl(client_sockfd, F_SETFD, FD_CLOEXEC);
ALOGD("[%s: %d] socket: '%d' connected VirtualSoundThread server !\n", __func__, __LINE__, client_sockfd);
memset(inBuffer, 0, bufferSizeInBytes);
readLen = read(client_sockfd, inBuffer, bufferSizeInBytes);
if(readLen <= 0){
ALOGE("Error:[%s: %d] read data stream error \n", __func__, __LINE__);
break;
} else {
char *string = (char*)inBuffer;
ALOGD("[%s: %d] read data='%s' , length: %d \n", __func__, __LINE__, string, readLen);
rc = strcmp(string,OUT_AUTHORIZE); //> authorize client
if(rc == 0){
create_sound_output_thread(client_sockfd, (struct audio_device *)g_Dev);
} else {
rc = strcmp(string,IN_AUTHORIZE); //> authorize client
if(rc == 0){
create_sound_input_thread(client_sockfd, (struct audio_device *)g_Dev);
} else {
ALOGD("%s() : %d socket: '%d' authorize failed by closed!\n", __func__, __LINE__, client_sockfd);
close(client_sockfd);
}
}
}
}
free(inBuffer);
close(client_sockfd);
close(server_sockfd);
ALOGV("[%s: %d] network disconnect, restart ... \n", __func__, __LINE__);
}
virt_sound_adev_close((hw_device_t*)g_Dev);
ALOGV("[%s: %d] VirtualSoundThread exit ... \n", __func__, __LINE__);
return 0;
}
void exit_virt_sound_service()
{
g_bQuitThread = true;
g_ThreadAction = false;
exit_sound_input_thread();
exit_sound_output_thread();
}
此service程序通过 virt_sound_adev_open() 创建音频设备,此设备生命周期与此service相同,至此我们就具有
audio-device来管理此声卡。
通过 socket(AF_UNIX, SOCK_STREAM, 0) 创建的UNIX SOCKET 套接字,来实现音频的输入与输出数据传输,
unix socket 不是本文介绍内容,看官如需了解此部分内容、请在我博客中搜索关键字。
当客户端链接到server-socket、并通过 OUT_AUTHORIZE 鉴权验证,service就创建 create_sound_output_thread() 输出流线程;
此输出流线程就向把网络上pcm数据源源不断的写入声卡中,也就实现播放器功能。
如果看到此处、恭喜你已经get到本篇文章重点了。我们通过sound-hal以及用例,系统的串通起来;聪明你可能会感觉sound-hal层驱动
设计逻辑与linux驱动框架如此相似呢。创新一直都不是凭空而来的。
上面内容作为框架认知够了,如果深入此部分内容的话,我们还需要了解接下来内容,您就可以写自己的sound-hal层程序了。
android系统sound结构相关的数据结构
@hardware/libhardware/include/hardware.h
typedef struct hw_module_methods_t {
/** Open a specific device */
int (*open)(const struct hw_module_t* module, const char* id,
struct hw_device_t** device);
} hw_module_methods_t;
typedef struct hw_module_t {
uint32_t tag;
uint16_t module_api_version;
#define version_major module_api_version
uint16_t hal_api_version;
#define version_minor hal_api_version
const char *id;
const char *name;
const char *author;
struct hw_module_methods_t* methods;
void* dso;
#ifdef __LP64__
uint64_t reserved[32-7];
#else
/** padding to 128 bytes, reserved for future use */
uint32_t reserved[32-7];
#endif
} hw_module_t;
typedef struct hw_device_t {
uint32_t tag;
uint32_t version;
struct hw_module_t* module;
/** padding reserved for future use */
#ifdef __LP64__
uint64_t reserved[12];
#else
uint32_t reserved[12];
#endif
int (*close)(struct hw_device_t* device);
} hw_device_t;
以上hw_module_methods_t、hw_module_t和hw_device_t是framework框架HAL的接口定义,
此部分我们只需了解,因我们不向android用户空间提供接口,不用实现。
sound config 相关
@system/media/audio/include/system/audio.h
typedef struct {
uint16_t version; // version of the info structure
uint16_t size; // total size of the structure including version and size
uint32_t sample_rate; // sample rate in Hz
audio_channel_mask_t channel_mask; // channel mask
audio_format_t format; // audio format
audio_stream_type_t stream_type; // stream type
uint32_t bit_rate; // bit rate in bits per second
int64_t duration_us; // duration in microseconds, -1 if unknown
bool has_video; // true if stream is tied to a video stream
bool is_streaming; // true if streaming, false if local playback
uint32_t bit_width;
uint32_t offload_buffer_size; // offload fragment size
audio_usage_t usage;
} audio_offload_info_t;
struct audio_config {
uint32_t sample_rate;
audio_channel_mask_t channel_mask;
audio_format_t format;
audio_offload_info_t offload_info;
size_t frame_count;
};
typedef struct audio_config audio_config_t;
audio stream 相关
@hardware/libhardware/include/hardware/audio.h
struct audio_stream {
/**
* Put the audio hardware input/output into standby mode.
* Driver should exit from standby mode at the next I/O operation.
* Returns 0 on success and <0 on failure.
*/
int (*standby)(struct audio_stream *stream);
/** dump the state of the audio input/output device */
int (*dump)(const struct audio_stream *stream, int fd);
/** Return the set of device(s) which this stream is connected to */
audio_devices_t (*get_device)(const struct audio_stream *stream);
/**
* Currently unused - set_device() corresponds to set_parameters() with key
* AUDIO_PARAMETER_STREAM_ROUTING for both input and output.
* AUDIO_PARAMETER_STREAM_INPUT_SOURCE is an additional information used by
* input streams only.
*/
int (*set_device)(struct audio_stream *stream, audio_devices_t device);
};
typedef struct audio_stream audio_stream_t;
struct audio_stream_out {
/**
* Common methods of the audio stream out. This *must* be the first member of audio_stream_out
* as users of this structure will cast a audio_stream to audio_stream_out pointer in contexts
* where it's known the audio_stream references an audio_stream_out.
*/
struct audio_stream common;
/**
* Write audio buffer to driver. Returns number of bytes written, or a
* negative status_t. If at least one frame was written successfully prior to the error,
* it is suggested that the driver return that successful (short) byte count
* and then return an error in the subsequent call.
*
* If set_callback() has previously been called to enable non-blocking mode
* the write() is not allowed to block. It must write only the number of
* bytes that currently fit in the driver/hardware buffer and then return
* this byte count. If this is less than the requested write size the
* callback function must be called when more space is available in the
* driver/hardware buffer.
*/
ssize_t (*write)(struct audio_stream_out *stream, const void* buffer,
size_t bytes);
/**
* Notifies to the audio driver to stop playback however the queued buffers are
* retained by the hardware. Useful for implementing pause/resume. Empty implementation
* if not supported however should be implemented for hardware with non-trivial
* latency. In the pause state audio hardware could still be using power. User may
* consider calling suspend after a timeout.
*
* Implementation of this function is mandatory for offloaded playback.
*/
int (*pause)(struct audio_stream_out* stream);
/**
* Notifies to the audio driver to flush the queued data. Stream must already
* be paused before calling flush().
*
* Implementation of this function is mandatory for offloaded playback.
*/
int (*flush)(struct audio_stream_out* stream);
/**
* Called by the framework to start a stream operating in mmap mode.
* create_mmap_buffer must be called before calling start()
*
* \note Function only implemented by streams operating in mmap mode.
*
* \param[in] stream the stream object.
* \return 0 in case of success.
* -ENOSYS if called out of sequence or on non mmap stream
*/
int (*start)(const struct audio_stream_out* stream);
/**
* Called by the framework to stop a stream operating in mmap mode.
* Must be called after start()
*
* \note Function only implemented by streams operating in mmap mode.
*
* \param[in] stream the stream object.
* \return 0 in case of success.
* -ENOSYS if called out of sequence or on non mmap stream
*/
int (*stop)(const struct audio_stream_out* stream);
};
typedef struct audio_stream_out audio_stream_out_t;
struct audio_stream_in {
/**
* Common methods of the audio stream in. This *must* be the first member of audio_stream_in
* as users of this structure will cast a audio_stream to audio_stream_in pointer in contexts
* where it's known the audio_stream references an audio_stream_in.
*/
struct audio_stream common;
/** Read audio buffer in from audio driver. Returns number of bytes read, or a
* negative status_t. If at least one frame was read prior to the error,
* read should return that byte count and then return an error in the subsequent call.
*/
ssize_t (*read)(struct audio_stream_in *stream, void* buffer,
size_t bytes);
/**
* Called by the framework to start a stream operating in mmap mode.
* create_mmap_buffer must be called before calling start()
*
* \note Function only implemented by streams operating in mmap mode.
*
* \param[in] stream the stream object.
* \return 0 in case off success.
* -ENOSYS if called out of sequence or on non mmap stream
*/
int (*start)(const struct audio_stream_in* stream);
/**
* Called by the framework to stop a stream operating in mmap mode.
*
* \note Function only implemented by streams operating in mmap mode.
*
* \param[in] stream the stream object.
* \return 0 in case of success.
* -ENOSYS if called out of sequence or on non mmap stream
*/
int (*stop)(const struct audio_stream_in* stream);
};
typedef struct audio_stream_in audio_stream_in_t;
struct audio_module {
struct hw_module_t common;
};
struct audio_hw_device {
/**
* Common methods of the audio device. This *must* be the first member of audio_hw_device
* as users of this structure will cast a hw_device_t to audio_hw_device pointer in contexts
* where it's known the hw_device_t references an audio_hw_device.
*/
struct hw_device_t common;
};
typedef struct audio_hw_device audio_hw_device_t;
此sound-hal 定义的数据结构
@vendor/metis/virt_sound_hal/include/virtAudioHal/virt_sound_hw.h
struct audio_device {
struct audio_hw_device device;
pthread_mutex_t lock; /* see note below on mutex acquisition order */
audio_devices_t out_device; /* "or" of stream_out.device for all active output streams */
audio_devices_t in_device;
struct pcm *pcm_voice_out;
struct pcm *pcm_voice_in;
struct stream_out *outputs[OUTPUT_TOTAL];
pthread_mutex_t lock_outputs; /* see note below on mutex acquisition order */
};
struct stream_out {
struct audio_stream_out stream;
pthread_mutex_t lock; /* see note below on mutex acquisition order */
struct pcm *pcm[PCM_TOTAL];
struct pcm_config config;
struct audio_config aud_config;
unsigned int pcm_device;
bool standby; /* true if all PCMs are inactive */
bool disabled;
audio_channel_mask_t channel_mask;
/* Array of supported channel mask configurations. +1 so that the last entry is always 0 */
audio_channel_mask_t supported_channel_masks[MAX_SUPPORTED_CHANNEL_MASKS + 1];
bool muted;
uint64_t written; /* total frames written, not cleared when entering standby */
uint64_t nframes;
bool output_direct;
int slice_time_up;
int slice_time_down;
int out_data_size;
struct audio_device *dev;
struct resampler_itfe *resampler;
};
struct stream_in {
struct audio_stream_in stream;
pthread_mutex_t lock; /* see note below on mutex acquisition order */
struct pcm *pcm;
bool standby;
unsigned int requested_rate;
struct resampler_itfe *resampler;
struct resampler_buffer_provider buf_provider;
int16_t *buffer;
size_t frames_in;
int read_status;
struct pcm_config *config;
struct audio_device *dev;
};
上面是sound-hal例程中使用的、用户定义 stream_out 和 stream_in 的内容。
把service编译到android系统
下面贴处Android.mk 内容,供有需求的朋友参考。
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_MODULE := virtual_sound_engine
LOCAL_MODULE_TAGS := optional
#LOCAL_MULTILIB := both
#LOCAL_MODULE_PATH_32 := $(TARGET_OUT)/lib
#LOCAL_MODULE_PATH_64 := $(TARGET_OUT)/lib64
LOCAL_SRC_FILES := \
virt_sound_driver.c \
virt_sound_output.c \
virt_sound_input.c
LOCAL_C_INCLUDES += \
$(LOCAL_PATH) \
$(KERNEL_HEADERS) \
external/speex/include \
external/tinyalsa/include \
frameworks/av/include \
frameworks/av/include/media \
frameworks/native/include \
\
frameworks/av/services/audioflinger \
frameworks/av/services/audiopolicy \
frameworks/av/services/audiopolicy/common/managerdefinitions/include \
frameworks/av/services/audiopolicy/common/include \
frameworks/av/services/audiopolicy/engine/interface \
frameworks/av/services/audiopolicy/service \
frameworks/av/services/medialog \
frameworks/av/services/soundtrigger \
system/media/audio_route/include \
vendor/metis/r_submix/include \
LOCAL_SHARED_LIBRARIES:= \
liblog libutils libcutils \
libbinder \
libmedia \
libaudioutils \
libaudiomanager \
libaudioclient \
libmediametrics \
libvirt_sound_hal \
LOCAL_CFLAGS += -g -DBUILD_FOR_ANDROID
LOCAL_LD_FLAGS += -nostartfiles
LOCAL_PRELINK_MODULE := false
LOCAL_INIT_RC := VirtualSoundEngine.rc
$(info "LOCAL_MODULE = $(LOCAL_MODULE)")
include $(BUILD_EXECUTABLE)
include $(call all-makefiles-under,$(LOCAL_PATH))
把 sound-hal 编译到系统中 Android.bp 文件
// Copyright (C) 2012 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
cc_library_headers {
name: "libvirt_sound_hw_headers",
vendor_available: true,
export_include_dirs: ["include"],
}
cc_library_shared {
name: "libvirt_sound_hw",
srcs: ["virt_sound_hw.c"],
include_dirs: [
"external/speex/include",
"external/tinyalsa/include",
"frameworks/av/include/",
"frameworks/native/include/",
"frameworks/av/include/media",
"system/media/audio_route/include"
],
shared_libs: [
"liblog",
"libcutils",
"libutils",
"libtinyalsa",
"libaudioutils",
"libhardware_legacy",
"libspeexresampler",
],
local_include_dirs: ["include"],
export_include_dirs: ["include"],
header_libs: ["libvirt_sound_hw_headers"],
export_header_lib_headers: ["libvirt_sound_hw_headers"],
static_libs: [
"libmedia_helper",
"libspeex",
],
cflags: [
"-Wno-unused-parameter",
"-Wno-error",
"-fPIC",
"-DANDROID",
],
}
由于音频输入流与输出流类似,未贴出代码,有需求的可以私信我。
文章为做过多文字解释,关键流程和关系笔者已尽力描述,感谢大家阅读;此文对应如有启发或帮助,
您的点赞或关注,是笔者孜孜不倦的动力,感谢。