Android音訊驅動學習(一) Audio HAL
Hal載入過程
載入audio hal需要分三步
1、hw_get_module_by_class :載入hal module
2、audio_hw_device_open:呼叫audio device open
3、open_output_stream:開啟output
DevicesFactory::loadAudioInterface(const char *if_name, audio_hw_device_t **dev)
rc = hw_get_module_by_class(AUDIO_HARDWARE_MODULE_ID, if_name, &mod);
if (rc) {
ALOGE("%s couldn't load audio hw module %s.%s (%s)", __func__,
AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
goto out;
}
實際呼叫到了audio_hw.c中adev_open(),只會被呼叫一次,也就是給硬體模組中的函式指標賦值open()。
rc = audio_hw_device_open(mod, dev);
if (rc) {
ALOGE("%s couldn't open audio hw device in %s.%s (%s)", __func__,
AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
goto out;
}
if ((*dev)->common.version < AUDIO_DEVICE_API_VERSION_MIN) {
ALOGE("%s wrong audio hw device version %04x", __func__, (*dev)->common.version);
rc = -EINVAL;
audio_hw_device_close(*dev);
goto out;
}
獲取到dev裝置之後,,會呼叫openOutputStream來開啟所有支援的Output,最終呼叫到Device.cpp:
Return<void> Device::openOutputStream(int32_t ioHandle, const DeviceAddress &device,
142 const AudioConfig &config, AudioOutputFlagBitfield flags,
143 const SourceMetadata& /* sourceMetadata */,
144 openOutputStream_cb _hidl_cb) {
145 audio_config_t halConfig;
146 HidlUtils::audioConfigToHal(config, &halConfig);
147 audio_stream_out_t *halStream;
148 ALOGV(
149 "open_output_stream handle: %d devices: %x flags: %#x "
150 "srate: %d format %#x channels %x address %s",
151 ioHandle, static_cast<audio_devices_t>(device.device),
152 static_cast<audio_output_flags_t>(flags), halConfig.sample_rate, halConfig.format,
153 halConfig.channel_mask, deviceAddressToHal(device).c_str());
154 int status =
155 mDevice->open_output_stream(mDevice, ioHandle, static_cast<audio_devices_t>(device.device),
156 static_cast<audio_output_flags_t>(flags), &halConfig,
157 &halStream, deviceAddressToHal(device).c_str());
158 ALOGV("open_output_stream status %d stream %p", status, halStream);
159 sp<IStreamOut> streamOut;
160 if (status == OK) {
161 streamOut = new StreamOut(this, halStream);
162 }
163 AudioConfig suggestedConfig;
164 HidlUtils::audioConfigFromHal(halConfig, &suggestedConfig);
165 _hidl_cb(analyzeStatus("open_output_stream", status), streamOut, suggestedConfig);
166 return Void();
167 }
以上基本載入完hal層,其實最終獲取到兩個物件:dev,stream。對於Hal層所有的操作都是基於這兩個控制程式碼,這點可以對照看下audio hal介面定義的地方:audio.h
static inline int audio_hw_device_open(const struct hw_module_t* module,
struct audio_hw_device** device)
{
return module->methods->open(module, AUDIO_HARDWARE_INTERFACE,
TO_HW_DEVICE_T_OPEN(device));
}
之前呼叫的audio_hw_device_open,就是呼叫audio.h這個地方,具體實現就是在定義結構體audio_module的地方,不同平臺不一樣,最終一般都是呼叫到audio hal的adev_open之類的函式。看下這裡的傳參:struct audio_hw_device** device,這個結構體就是最終需要open的device。一般的廠商都會封裝一層audio_hw_device,因為audio_hw_device都是原生的介面,廠商需要自己新增一定介面。
audio_hw_device結構體提供的介面一般都為對device直接操作的,如get_supported_devices、set_mode、set_mic_mute、setParameter之類,其中有兩個重要介面:open_output_stream(播放output)、open_input_stream(錄音output)
這就是之前提到的第三步,廠商實現這兩個函式,最終返回結構體:audio_stream_in、audio_stream_out。
這兩個結構體提供的介面一般都是對於流進行的,如read、write、start、stop。Flinger執行緒對於hal層操作一般都是最終呼叫這兩個結構體。
所以之前說到的兩個物件dev,stream,dev就是audio_hw_device,stream就是audio_stream_in、audio_stream_out。
MTK Audio Hal
初始化
- Module定義
如之前所說,平臺會定義audio_module,然後policy再根據xml去loadmodule。
Mtk上audio_module定義:
struct legacy_audio_module HAL_MODULE_INFO_SYM = {
1284 .module = {
1285 .common = {
1286 .tag = HARDWARE_MODULE_TAG,
1287 .module_api_version = AUDIO_MODULE_API_VERSION_0_1,
1288 .hal_api_version = HARDWARE_HAL_API_VERSION,
1289 .id = AUDIO_HARDWARE_MODULE_ID,
1290 .name = "MTK Audio HW HAL",
1291 .author = "MTK",
1292 .methods = &legacy_audio_module_methods,
1293 .dso = NULL,
1294 .reserved = {0},
1295 },
1296 },
1297 };
1279 static struct hw_module_methods_t legacy_audio_module_methods = {
1280 .open = legacy_adev_open
1281 };
所以1.1上面提到的module->methods->open就是呼叫到 legacy_adev_open。
- legacy_adev_open
可以看到主要做的就是建立了一個legacy_audio_device,
這裡先看下這個結構體定義,可以看出來legacy_audio_device就是之前提過的MTK對於audio_hw_device的封裝
103 struct legacy_audio_device {
104 struct audio_hw_device_mtk device;
105
106 AudioMTKHardwareInterface *hwif;
107 };
struct audio_hw_device_mtk: audio_hw_device {
57
58 int (*xway_play_start)(struct audio_hw_device *dev, int sample_rate);
59 int (*xway_play_stop)(struct audio_hw_device *dev);
60 int (*xway_play_write)(struct audio_hw_device *dev, void *buffer, int size_bytes);
61 int (*xway_getfreebuffercount)(struct audio_hw_device *dev);
62 int (*xway_rec_start)(struct audio_hw_device *dev, int smple_rate);
63 int (*xway_rec_stop)(struct audio_hw_device *dev);
64 int (*xway_rec_read)(struct audio_hw_device *dev, void *buffer, int size_bytes);
65
66 int (*setup_parameters_callback)(struct audio_hw_device *dev, device_parameters_callback_t callback, void *cookie);
67 int (*set_audio_parameter_changed_callback)(struct audio_hw_device *dev, device_audio_parameter_changed_callback_t callback, void *cookie);
68 int (*clear_audio_parameter_changed_callback)(struct audio_hw_device *dev, void *cookie);
69 };
legacy_adev_open給各個函式指標介面賦值。
這些函式就是音訊對於裝置操作主要的介面,基本上每一個都很重要,後續會一一說明用處。
1183 static int legacy_adev_open(const hw_module_t *module, const char *name,
......
1191 struct legacy_audio_device *ladev;
1198 ladev = (struct legacy_audio_device *)calloc(1, sizeof(*ladev));
1203 ladev->device.common.tag = HARDWARE_DEVICE_TAG;
1204 #ifdef MTK_SUPPORT_AUDIO_DEVICE_API3
1205 ladev->device.common.version = AUDIO_DEVICE_API_VERSION_3_0;
1206 #else
1207 ladev->device.common.version = AUDIO_DEVICE_API_VERSION_2_0;
1208 #endif
1209 ladev->device.common.module = const_cast<hw_module_t *>(module);
1210 ladev->device.common.close = legacy_adev_close;
1211
1212 ladev->device.get_supported_devices = adev_get_supported_devices;
1213 ladev->device.init_check = adev_init_check;
1214 ladev->device.set_voice_volume = adev_set_voice_volume;
1215 ladev->device.set_master_volume = adev_set_master_volume;
1216 ladev->device.get_master_volume = adev_get_master_volume;
1217 ladev->device.set_mode = adev_set_mode;
1218 ladev->device.set_mic_mute = adev_set_mic_mute;
1219 ladev->device.get_mic_mute = adev_get_mic_mute;
1220 ladev->device.set_parameters = adev_set_parameters;
1221 ladev->device.get_parameters = adev_get_parameters;
1222 ladev->device.get_input_buffer_size = adev_get_input_buffer_size;
1223 ladev->device.open_output_stream = adev_open_output_stream;
1224 ladev->device.close_output_stream = adev_close_output_stream;
1225 ladev->device.open_input_stream = adev_open_input_stream;
1226 ladev->device.close_input_stream = adev_close_input_stream;
1227
1228 ladev->device.get_microphones = adev_get_microphones;
1229
1230 ladev->device.dump = adev_dump;
1231
1232 ladev->device.create_audio_patch = adev_create_audio_patch;
1233 ladev->device.release_audio_patch = adev_release_audio_patch;
1234 ladev->device.get_audio_port = adev_get_audio_port;
1235 ladev->device.set_audio_port_config = adev_set_audio_port_config;
......
1248 ladev->device.xway_play_start = adev_xway_play_start;
1249 ladev->device.xway_play_stop = adev_xway_play_stop;
1250 ladev->device.xway_play_write = adev_xway_play_write;
1251 ladev->device.xway_getfreebuffercount = adev_xway_getfreebuffercount;
1252 ladev->device.xway_rec_start = adev_xway_rec_start;
1253 ladev->device.xway_rec_stop = adev_xway_rec_stop;
1254 ladev->device.xway_rec_read = adev_xway_rec_read;
1255
1256 // added for HIDL extend
1257 ladev->device.setup_parameters_callback = adev_setup_parameters_callback;
1258 ladev->device.set_audio_parameter_changed_callback = adev_set_audio_parameters_changed_callback;
1259 ladev->device.clear_audio_parameter_changed_callback = adev_clear_audio_parameters_changed_callback;
1261 pthread_mutex_lock(&gHwInstanceLock);
1262 ladev->hwif = createMTKAudioHardware();
函式的後面可以看到這裡還建立了ladev->hwif = createMTKAudioHardware();這是legacy_audio_device的另外一個成員變數AudioMTKHardwareInterface ,如名字定義就是mtk底層的介面。跟蹤這個呼叫,其實返回的就是AudioALSAHardware的單例,回過頭看框架圖,
AudioALSAHardware就是MTK V3 Audio hal邏輯程式碼的起始點。
5657 AudioMTKHardwareInterface *AudioMTKHardwareInterface::create() {
5658 /*
5659 * FIXME: This code needs to instantiate the correct audio device
5660 * interface. For now - we use compile-time switches.
5661 */
5662 AudioMTKHardwareInterface *hw = 0;
5663 char value[PROPERTY_VALUE_MAX];
5664
5665 ALOGV("Creating MTK AudioHardware");
5666 //hw = new android::AudioALSAHardware();
5667 hw = android::AudioALSAHardware::GetInstance();
5668
5669 return hw;
5670
5671 }
5672
5673 extern "C" AudioMTKHardwareInterface *createMTKAudioHardware() {
5674 /*
5675 * FIXME: This code needs to instantiate the correct audio device
5676 * interface. For now - we use compile-time switches.
5677 */
5678 return AudioMTKHardwareInterface::create();
5679
5680 }
5681
-
V3重要的類
以上可以看到走進V3目錄下的AudioALSAHardware,MTk Audio Hal主要的邏輯程式碼都在V3目錄下,上面的legacy相關流程主要是將V3封裝一層,迎合上層介面定義,看一下V3目錄重要的類:
AudioALSAStreamManager是入口管理下面的AudioALSAStreamIn和AudioALSAStreamOut
AudioALSAStreamOut管理著AudioALSAPlaybackXXXX
AudioALSAStreamIn管理著AudioALSACaptureXXXX,
AudioALSAPlaybackXXXX與AudioALSACaptureXXXX這兩個類裡面的主要函式是open(),read()和write(),主要是負責對PCM buf的讀寫到Linux 的ALSA裡面。
AudioALSASpeechXXXX類是Aduio的一個演算法處理。
AudioALSAHardwareResourceManager這個類主要用於開啟和關閉硬體裝置,如MIC,喇叭等
AudioALSAVolumeController,這個類在下面的框架圖沒有體現,但是也很常用,主要用於Audio系統的音量控制,音量補償,音訊引數也在此得到應用。 -
V3框架圖
-
V3初始化
接下來就以AudioALSAHardware為入口看下Mtk hal具體如何實現。
建構函式:
812 AudioALSAHardware::AudioALSAHardware() :
814 mAudioMessengerIPI(AudioMessengerIPI::getInstance()),
819 mAudioSpeechEnhanceInfoInstance(AudioSpeechEnhanceInfo::getInstance()),
823 mAudioAlsaDeviceInstance(AudioALSADeviceParser::getInstance()),
825 mANCController(AudioALSAANCController::getInstance()),
843 mStreamManager = AudioALSAStreamManager::getInstance();
844 mSpeechPhoneCallController = AudioALSASpeechPhoneCallController::getInstance();
845 mAudioALSAParamTunerInstance = AudioALSAParamTuner::getInstance();
889 mAudioHalBtscoWB = (bool)get_uint32_from_mixctrl(PROPERTY_KEY_BTSCO_WB_ON);
890 ALOGD("%s(), mAudioHalBtscoWB = %d", __FUNCTION__, mAudioHalBtscoWB);
891 if (mAudioHalBtscoWB == true) {
892 WCNChipController::GetInstance()->SetBTCurrentSamplingRateNumber(16000);
893 AudioBTCVSDControl::getInstance()->BT_SCO_SetMode(true);
894 mSpeechPhoneCallController->setBTMode(true);
895 } else {
896 WCNChipController::GetInstance()->SetBTCurrentSamplingRateNumber(8000);
897 AudioBTCVSDControl::getInstance()->BT_SCO_SetMode(false);
898 mSpeechPhoneCallController->setBTMode(false);
899 }
910 if (mixer_ctl_set_value(mixer_get_ctl_by_name(AudioALSADriverUtility::getInstance()->getMixer(),
911 "aaudio_ion"), 0, 1)) {
912 ALOGW("%s(), aaudio_ion enable fail", __FUNCTION__);
913 }
這裡建構函式初始化了很多東西,縮寫的名字看不出實際意義,暫時先不看了,用到的時候再看其用處,之前說過adev_open之後的流程就是開啟output/input,這裡以播放為例子來理流程:adev_open_output_stream
790 out->legacy_out = ladev->hwif->openOutputStreamWithFlags(devices, flags,
791 (int *) &config->format,
792 &config->channel_mask,
793 &config->sample_rate, &status);
之前就看到賦值給hwif的就是這個AudioALSAHardware,所以這裡的openOutputStreamWithFlags呼叫的就是AudioALSAHardware的函式:
5641 AudioMTKStreamOutInterface *AudioALSAHardware::openOutputStreamWithFlags(uint32_t devices,
5642 audio_output_flags_t flags,
5643 int *format,
5644 uint32_t *channels,
5645 uint32_t *sampleRate,
5646 status_t *status) {
5647 return mStreamManager->openOutputStream(devices, format, channels, sampleRate, status, flags);
5648 }
這個mStreamManager就是構造體函式裡初始化的AudioALSAStreamManager。
323 AudioMTKStreamOutInterface *AudioALSAStreamManager::openOutputStream(
324 uint32_t devices,
325 int *format,
326 uint32_t *channels,
327 uint32_t *sampleRate,
328 status_t *status,
329 uint32_t output_flag) {
350 AudioALSAStreamOut *pAudioALSAStreamOut = new AudioALSAStreamOut();
351 pAudioALSAStreamOut->set(devices, format, channels, sampleRate, status, output_flag);
365 pAudioALSAStreamOut->setIdentity(mStreamOutIndex);
366 mStreamOutVector.add(mStreamOutIndex, pAudioALSAStreamOut);
可以看到主要做的就是new了AudioALSAStreamOut,並將這些引數傳入:
324 uint32_t devices,//裝置
325 int *format,//傳入的資料型別,如PCM_32BIT/PCM_16BIT/AAC/MP3,總之正常傳入pcm,offload模式傳入mp3或者aac檔案
326 uint32_t *channels,//聲道數
327 uint32_t *sampleRate,//取樣率
328 status_t *status,
329 uint32_t output_flag//這個就是output型別,一般有DIRECT、fast、deep_buffer、compress_offload等
Flag表:
AUDIO_OUTPUT_FLAG | Description |
---|---|
AUDIO_OUTPUT_FLAG_DIRECT | 表示音訊流直接輸出到音訊裝置,不需要軟體混音,一般用於 HDMI 裝置聲音輸出 |
AUDIO_OUTPUT_FLAG_PRIMARY | 表示音訊流需要輸出到主輸出裝置,一般用於鈴聲類聲音 |
AUDIO_OUTPUT_FLAG_FAST | 表示音訊流需要快速輸出到音訊裝置,一般用於按鍵音、遊戲背景音等對時延要求高的場景 |
AUDIO_OUTPUT_FLAG_DEEP_BUFFER | 表示音訊流輸出可以接受較大的時延,一般用於音樂、視訊播放等對時延要求不高的場景 |
AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | 表示音訊流沒有經過軟體解碼,需要輸出到硬體解碼器,由硬體解碼器進行解碼 |
AudioALSAStreamOut.cpp:
146 status_t AudioALSAStreamOut::set(
160 // device
161 mStreamAttributeSource.output_devices = static_cast<audio_devices_t>(devices);
162 mStreamAttributeSource.policyDevice = mStreamAttributeSource.output_devices;
163
164 // check format
165 if (*format == AUDIO_FORMAT_PCM_16_BIT ||
166 *format == AUDIO_FORMAT_PCM_8_24_BIT ||
167 *format == AUDIO_FORMAT_PCM_32_BIT) {
168 mStreamAttributeSource.audio_format = static_cast<audio_format_t>(*format);
169 } else if (*format == AUDIO_FORMAT_MP3) {
170 ALOGD("%s(), format mp3", __FUNCTION__);
171 mStreamAttributeSource.audio_format = static_cast<audio_format_t>(*format);
172 mStreamAttributeSource.audio_offload_format = *format;
173 } else if (*format == AUDIO_FORMAT_AAC_LC) {
174 ALOGD("%s(), format aac", __FUNCTION__);
175 mStreamAttributeSource.audio_format = static_cast<audio_format_t>(*format);
176 mStreamAttributeSource.audio_offload_format = *format;
177 } else {
178 ALOGE("%s(), wrong format 0x%x, use 0x%x instead.", __FUNCTION__, *format, kDefaultOutputSourceFormat);
179
180 *format = kDefaultOutputSourceFormat;
181 *status = BAD_VALUE;
182 }
183
184 // check channel mask
185 if (mStreamAttributeSource.output_devices == AUDIO_DEVICE_OUT_AUX_DIGITAL) { // HDMI
186 if (*channels == AUDIO_CHANNEL_OUT_STEREO) {
187 mStreamOutType = STREAM_OUT_HDMI_STEREO;
188
189 mStreamAttributeSource.audio_channel_mask = *channels;
190 mStreamAttributeSource.num_channels = popcount(*channels);
191
192 mStreamOutHDMIStereo = this;
193 mStreamOutHDMIStereoCount++;
194 ALOGD("%s(), mStreamOutHDMIStereoCount =%d", __FUNCTION__, mStreamOutHDMIStereoCount);
195 } else if (*channels == AUDIO_CHANNEL_OUT_5POINT1 ||
196 *channels == AUDIO_CHANNEL_OUT_7POINT1) {
197 mStreamOutType = STREAM_OUT_HDMI_MULTI_CHANNEL;
198
199 mStreamAttributeSource.audio_channel_mask = *channels;
200 mStreamAttributeSource.num_channels = popcount(*channels);
201 } else {
202 ALOGE("%s(), wrong channels 0x%x, use 0x%x instead.", __FUNCTION__, *channels, kDefaultOutputSourceChannelMask);
203
204 *channels = kDefaultOutputSourceChannelMask;
205 *status = BAD_VALUE;
206 }
207 } else if (devices == AUDIO_DEVICE_OUT_SPEAKER_SAFE) { // Primary
208 mStreamOutType = STREAM_OUT_VOICE_DL;
209 mStreamAttributeSource.audio_channel_mask = *channels;
210 mStreamAttributeSource.num_channels = popcount(*channels);
211 } else if (*channels == kDefaultOutputSourceChannelMask || *channels == AUDIO_CHANNEL_OUT_MONO) { // Primary
212 mStreamAttributeSource.audio_channel_mask = *channels;
213 mStreamAttributeSource.num_channels = popcount(*channels);
214 } else {
215 ALOGE("%s(), wrong channels 0x%x, use 0x%x instead.", __FUNCTION__, *channels, kDefaultOutputSourceChannelMask);
216
217 *channels = kDefaultOutputSourceChannelMask;
218 *status = BAD_VALUE;
219 }
220
221 // check sample rate
222 if (SampleRateSupport(*sampleRate) == true) {
223 if ((mStreamAttributeSource.num_channels == 2) && (mStreamAttributeSource.output_devices == AUDIO_DEVICE_OUT_AUX_DIGITAL)) {
224 mStreamAttributeSource.sample_rate = 44100;
225 } else {
226 mStreamAttributeSource.sample_rate = *sampleRate;
227 }
228 if ((mStreamOutType == STREAM_OUT_PRIMARY || mStreamOutType == STREAM_OUT_VOICE_DL) && ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0)) {
229 AudioALSASampleRateController::getInstance()->setPrimaryStreamOutSampleRate(*sampleRate);
230 }
231 } else {
232 ALOGE("%s(), wrong sampleRate %d, use %d instead.", __FUNCTION__, *sampleRate, kDefaultOutputSourceSampleRate);
233
234 *sampleRate = kDefaultOutputSourceSampleRate;
235 *status = BAD_VALUE;
236 }
239
240 mStreamAttributeSource.mAudioOutputFlags = (audio_output_flags_t)flags;
241 collectPlatformOutputFlags(mStreamAttributeSource.mAudioOutputFlags);
242
243 if (mStreamAttributeSource.mAudioOutputFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
244 mStreamAttributeSource.usePolicyDevice = true;
245 char result[PROPERTY_VALUE_MAX];
246 property_get(allow_offload_propty, result, "1");
247 offloadflag = atoi(result);
248 mStreamAttributeSource.offload_codec_info.disable_codec = offloadflag ? 0 : 1;
249 ALOGD("%s(),mStreamAttributeSource.offload_codec_info.disable_codec =%d ", __FUNCTION__, mStreamAttributeSource.offload_codec_info.disable_codec);
250 }
AudioALSAStreamOut的初始化可以看到主要就是將之前傳進來引數經過處理設定給mStreamAttributeSource物件。至此從AudioPolicy loadModule並openOutput的流程,hal層所作的就完成了,這裡糾正了以前一個觀念,看起來這裡從開機的時候就載入並開啟了所有output,output就對應著裝置,這樣不會產生功耗問題嗎?看完對應的hal層流程,其實看起來只是初始化,實際裝置的操作需要呼叫tinyAlsa的介面,而以上流程看起來並沒有,所以FW層的openOutput就是隻是初始化hal層而已,並不會實際操作硬體。
播放
- 流程圖
- 程式碼跟蹤
FW層來說播放的話一般就是
AudioTrack:start
AudioFlinger->addTrack
AudioPolicyManager->startOutput
Threads:NormalSink->write
Policy的startOutput其實並沒有幹啥實際的事,所以對於hal播放的啟動就是最後這個NormalSink->write,跟過FW的流程就可以知道這裡的NormalSink就是Hidl的StreamOutHal,Hidl的StreamOutHal就是之前adev_open_output_stream返回的audio_stream_out結構體變數,看下之前的定義:
813 out->stream.write = out_write;
249 static ssize_t out_write(struct audio_stream_out *stream, const void *buffer,
250 size_t bytes) {
251 #ifdef AUDIO_HAL_PROFILE_ENTRY_FUNCTION
252 AudioAutoTimeProfile _p(__func__, AUDIO_HAL_FUNCTION_WRITE_NS);
253 #endif
254 struct legacy_stream_out *out =
255 reinterpret_cast<struct legacy_stream_out *>(stream);
256 return out->legacy_out->write(buffer, bytes);
257 }
之前跟蹤過這個legacy_out就是 ladev->hwif->openOutputStreamWithFlags的返回值,也就是上面提到的AudioALSAStreamOut。所以最後呼叫的就是AudioALSAStreamOut的write函式:
ssize_t AudioALSAStreamOut::write(const void *buffer, size_t bytes) {
......
if (mStandby == true) {
status = open();
mPlaybackHandler->setFirstDataWriteFlag(true);
......
514 mPlaybackHandler->preWriteOperation(buffer, bytes);
515 outputSize = mPlaybackHandler->write(buffer, bytes);
AudioALSAStreamOut::open
1103 status_t AudioALSAStreamOut::open() {
mPlaybackHandler = mStreamManager->createPlaybackHandler(&mStreamAttributeSource);
1142 if (mPlaybackHandler) {
1143 // open audio hardware
1144 status = mPlaybackHandler->open();
1145
AudioALSAStreamManager::createPlaybackHandler
626 if (isPhoneCallOpen() == true) {
case AUDIO_DEVICE_OUT_SPEAKER_SAFE: {
pPlaybackHandler = new AudioALSAPlaybackHandlerSpeakerProtection(stream_attribute_source);
......
644 case AUDIO_DEVICE_OUT_AUX_DIGITAL:
645 pPlaybackHandler = new AudioALSAPlaybackHandlerHDMI(stream_attribute_source);
} else {
652 switch (stream_attribute_source->output_devices) {
653 case AUDIO_DEVICE_OUT_BLUETOOTH_SCO:
654 case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET:
655 case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT: {
656 if (!stream_attribute_source->isMixerOut) {
657 pPlaybackHandler = new AudioALSAPlaybackHandlerMixer(stream_attribute_source);
658 } else {
659 if (WCNChipController::GetInstance()->IsBTMergeInterfaceSupported() == true) {
660 pPlaybackHandler = new AudioALSAPlaybackHandlerBTSCO(stream_attribute_source);
661 } else {
662 pPlaybackHandler = new AudioALSAPlaybackHandlerBTCVSD(stream_attribute_source);
663 }
664 }
665 break;
666 }
667 case AUDIO_DEVICE_OUT_AUX_DIGITAL: {
668 pPlaybackHandler = new AudioALSAPlaybackHandlerHDMI(stream_attribute_source);
669 break;
670 }
671 case AUDIO_DEVICE_OUT_FM: {
672 pPlaybackHandler = new AudioALSAPlaybackHandlerFMTransmitter(stream_attribute_source);
673 break;
674 }
675 case AUDIO_DEVICE_OUT_EARPIECE:
676 case AUDIO_DEVICE_OUT_WIRED_HEADSET:
677 case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
678 case AUDIO_DEVICE_OUT_SPEAKER:
679 default: {
680 if (isBtSpkDevice(stream_attribute_source->output_devices)) {
681 if (!stream_attribute_source->isMixerOut) {
682 pPlaybackHandler = new AudioALSAPlaybackHandlerMixer(stream_attribute_source);
683 break;
684 }
685 }
686
687 #if !defined(MTK_BASIC_PACKAGE)
688 if (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD & stream_attribute_source->mAudioOutputFlags) {
689 pPlaybackHandler = new AudioALSAPlaybackHandlerOffload(stream_attribute_source);
690 break;
691 } else
692 #endif
693 {
694 #if defined(MTK_MAXIM_SPEAKER_SUPPORT) || (MTK_AUDIO_SMARTPASCP_SUPPORT)
695 if (AudioSmartPaController::getInstance()->isSwDspSpkProtect(stream_attribute_source->output_devices)) {
696 if (!stream_attribute_source->isMixerOut) {
697 pPlaybackHandler = new AudioALSAPlaybackHandlerMixer(stream_attribute_source);
698 break;
699 }
700 #if defined(MTK_MAXIM_SPEAKER_SUPPORT)
701 if (AudioSmartPaController::getInstance()->getSpkProtectType() == SPK_AP_DSP) {
702 pPlaybackHandler = new AudioALSAPlaybackHandlerSpeakerProtection(stream_attribute_source);
703 break;
704 }
705 #elif defined(MTK_AUDIO_SMARTPASCP_SUPPORT)
706 if (AudioSmartPaController::getInstance()->getSpkProtectType() == SPK_APSCP_DSP) {
707 pPlaybackHandler = new AudioALSAPlaybackHandlerSpeakerProtectionDsp(stream_attribute_source);
708 break;
709 }
710 #endif
715 } else
716 #endif // end of #if defined(MTK_MAXIM_SPEAKER_SUPPORT) || (MTK_AUDIO_SMARTPASCP_SUPPORT)
717 {
718 #ifdef DOWNLINK_LOW_LATENCY
719 if (AUDIO_OUTPUT_FLAG_FAST & stream_attribute_source->mAudioOutputFlags &&
720 !(AUDIO_OUTPUT_FLAG_PRIMARY & stream_attribute_source->mAudioOutputFlags)) {
721 pPlaybackHandler = new AudioALSAPlaybackHandlerFast(stream_attribute_source);
722 break;
723 }
724 #if defined(MTK_AUDIO_AAUDIO_SUPPORT)
725 else if (AUDIO_OUTPUT_FLAG_MMAP_NOIRQ & stream_attribute_source->mAudioOutputFlags) {
726 pPlaybackHandler = new AudioALSAPlaybackHandlerAAudio(stream_attribute_source);
727 break;
728 }
729 #endif
730 else
731 #endif
732 {
733 if (AudioSmartPaController::getInstance()->isInCalibration()) {
734 pPlaybackHandler = new AudioALSAPlaybackHandlerNormal(stream_attribute_source);
735 break;
736 }
737 #if defined(MTK_AUDIODSP_SUPPORT)
738 if (AudioDspStreamManager::getInstance()->getDspOutHandlerEnable(stream_attribute_source->mAudioOutputFlags)) {
739 pPlaybackHandler = new AudioALSAPlaybackHandlerDsp(stream_attribute_source);
740 } else {
741 pPlaybackHandler = new AudioALSAPlaybackHandlerNormal(stream_attribute_source);
742 }
743 break;
744 #else
745 pPlaybackHandler = new AudioALSAPlaybackHandlerNormal(stream_attribute_source);
746 break;
747 #endif
這個函式根據不同的device建立不同的AudioALSAPlaybackHandler,這裡以normal為例繼續跟蹤,AudioALSAPlaybackHandlerNormal建構函式就是初始化一系列引數,沒啥需要看的,所以可以直接看之前AudioALSAStreamOut呼叫的mPlaybackHandler->open();
220 status_t AudioALSAPlaybackHandlerNormal::open() {
//可以看到這裡根據不同的flag選擇不同pcmindex、cardindex、playbackSeq
238 if (isIsolatedDeepBuffer(mStreamAttributeSource->mAudioOutputFlags)) {
239
240 ALOGD("%s(), isolated deep buffer keypcmDeepBuffer = %s", __FUNCTION__, keypcmDeepBuffer.string());
241
242 pcmindex = AudioALSADeviceParser::getInstance()->GetPcmIndexByString(keypcmDeepBuffer);
243 cardindex = AudioALSADeviceParser::getInstance()->GetCardIndexByString(keypcmDeepBuffer);
244
245 // use playback 2
246 if (keypcmDeepBuffer.compare(keypcmPlayback2) == 0) {
247 playbackSeq = String8(AUDIO_CTL_PLAYBACK2);
248 } else {
249 playbackSeq = String8(AUDIO_CTL_PLAYBACK3);
250 }
251
252 if (mixer_ctl_set_value(mixer_get_ctl_by_name(mMixer, "deep_buffer_scenario"), 0, 1)) {
253 ALOGW("%s(), deep_buffer_scenario enable fail", __FUNCTION__);
254 }
255 } else if (mStreamAttributeSource->mAudioOutputFlags & AUDIO_OUTPUT_FLAG_VOIP_RX) {
256 pcmindex = AudioALSADeviceParser::getInstance()->GetPcmIndexByString(keypcmPlayback12);
257 cardindex = AudioALSADeviceParser::getInstance()->GetCardIndexByString(keypcmPlayback12);
258 playbackSeq = String8(AUDIO_CTL_PLAYBACK12);
259 } else {
260 pcmindex = AudioALSADeviceParser::getInstance()->GetPcmIndexByString(keypcmPlayback1);
261 cardindex = AudioALSADeviceParser::getInstance()->GetCardIndexByString(keypcmPlayback1);
262 playbackSeq = String8(AUDIO_CTL_PLAYBACK1);
263 }
264
265 mApTurnOnSequence = getPlaybackTurnOnSequence(TURN_ON_SEQUENCE_1, playbackSeq);
266 mApTurnOnSequence2 = getPlaybackTurnOnSequence(TURN_ON_SEQUENCE_2, playbackSeq);
267 #if defined(MTK_AUDIODSP_SUPPORT)
268 mApTurnOnSequence3 = getPlaybackTurnOnSequence(TURN_ON_SEQUENCE_3, playbackSeq);
269 mApTurnOnSequenceDsp = getPlaybackTurnOnSequence(TURN_ON_SEQUENCE_DSP, playbackSeq);
270 #endif
271 mHardwareResourceManager->setCustOutputDevTurnOnSeq(mStreamAttributeSource->output_devices,
272 mTurnOnSeqCustDev1, mTurnOnSeqCustDev2);
273
274 mHardwareResourceManager->enableTurnOnSequence(mApTurnOnSequence);
275 mHardwareResourceManager->enableTurnOnSequence(mApTurnOnSequence2);
276 mHardwareResourceManager->enableTurnOnSequence(mApTurnOnSequenceDsp);
277 mHardwareResourceManager->enableTurnOnSequence(mApTurnOnSequence3);
278 mHardwareResourceManager->enableTurnOnSequence(mTurnOnSeqCustDev1);
279 mHardwareResourceManager->enableTurnOnSequence(mTurnOnSeqCustDev2);
289 //ListPcmDriver(cardindex, pcmindex);
290
291 struct pcm_params *params;
292 params = pcm_params_get(cardindex, pcmindex, PCM_OUT);
我知道的是一個flag對應上層的一個通路,而這裡根據flag來選擇pcm以及音效卡id,可以看一個例子: keypcmPlayback1,定義在AudioALSADeviceString.h中:
static String8 keypcmPlayback1 = String8(“Playback_1”);
Driver中有dai_link的結構體陣列,其中就定了一個pcm Playback_1,所以這就是一個FE PCM的名字,所以這裡可以理解為一個flag對應著一個FE PCM。
364 /* Front End DAI links */
365 {
366 .name = "Playback_1",
367 .stream_name = "Playback_1",
368 .cpu_dai_name = "DL1",
369 .codec_name = "snd-soc-dummy",
370 .codec_dai_name = "snd-soc-dummy-dai",
371 .trigger = {SND_SOC_DPCM_TRIGGER_PRE,
372 SND_SOC_DPCM_TRIGGER_PRE},
373 .dynamic = 1,
374 .dpcm_playback = 1,
375 },
這兩個函式:GetCardIndexByString、GetPcmIndexByString,就是根據這個字串取到pcm id以及card id。對應關係的話在AudioALSADeviceParser初始化的時候讀取/proc/asound/pcm並存入陣列。最終通過pcm_params_get根據card 和pcm id獲取pcm_params,這是TinyAlsa的一個介面。
每一個flag判斷還會給Sequence賦值, 比如:
1、playbackSeq = String8(AUDIO_CTL_PLAYBACK1);
2、mApTurnOnSequence1=getPlaybackTurnOnSequence(TURN_ON_SEQUENCE_1,playbackSeq);
3、mHardwareResourceManager->enableTurnOnSequence(mApTurnOnSequence1);
enableTurnOnSequence:
AudioALSAHardwareResourceManager::enableTurnOnSequence
ret = mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(sequence);
AudioALSADeviceConfigManager::ApplyDeviceTurnonSequenceByName
DeviceCtlDescriptor *descriptor = GetDeviceDescriptorbyname(DeviceName);
String8 cltname = descriptor->mDeviceCltonVector.itemAt(count);
String8 cltvalue = descriptor->mDeviceCltonVector.itemAt(count + 1);
if (setMixerCtl(cltname, cltvalue)) {
setMixerCtl是封裝的
mixer_get_ctl_by_name、mixer_ctl_get_type、mixer_get_ctl、mixer_get_ctl_by_name、mixer_ctl_set_value
等一系列mixer操作,最終是通過mixer_ctl_set_value來開啟通路,以上這些均為TinyMix的介面,主要是用於通路裝置操作。在手機目錄下執行tinymix,可以獲取到所有的ctl列表,就是代表所有裝置。
接著看open函式後面最主要的
405 unsigned int flag = PCM_MMAP | PCM_OUT | PCM_MONOTONIC;
406 openPcmDriverWithFlag(pcmindex, flag);
status_t AudioALSAPlaybackHandlerBase::openPcmDriverWithFlag(const unsigned int device, unsigned int flag) {
244 mPcmflag = flag;
245 mPcm = pcm_open(AudioALSADeviceParser::getInstance()->GetCardIndex(),
246 device, flag, &mConfig
......
259 if (mPcmflag & PCM_MMAP) {
260 audio_pcm_write_wrapper_fp = pcm_mmap_write;
261 } else {
262 audio_pcm_write_wrapper_fp = pcm_write;
263 }
這就是這一步的關鍵了,通過 pcm_open開啟pcm裝置,並且定義好pcm_write相關函式指標。至此open的過程就結束了。
回頭看之前AudioALSAStreamOut write流程:
515 outputSize = mPlaybackHandler->write(buffer, bytes);
ssize_t AudioALSAPlaybackHandlerNormal::write(const void *buffer, size_t bytes) {
721 void *pBufferAfterDcRemoval = NULL;
722 uint32_t bytesAfterDcRemoval = 0;
723 // DC removal before DRC
724 doDcRemoval(pBuffer, bytes, &pBufferAfterDcRemoval, &bytesAfterDcRemoval);
725
726
727 // stereo to mono for speaker
728 doStereoToMonoConversionIfNeed(pBufferAfterDcRemoval, bytesAfterDcRemoval);
729
771 // post processing (can handle both Q1P16 and Q1P31 by audio_format_t)
772 void *pBufferAfterPostProcessing = NULL;
773 uint32_t bytesAfterPostProcessing = 0;
774 doPostProcessing(pBufferAfterDcRemoval, bytesAfterDcRemoval, &pBufferAfterPostProcessing, &bytesAfterPostProcessing);
775
776 // SRC
777 void *pBufferAfterBliSrc = NULL;
778 uint32_t bytesAfterBliSrc = 0;
779 doBliSrc(pBufferAfterPostProcessing, bytesAfterPostProcessing, &pBufferAfterBliSrc, &bytesAfterBliSrc);
780
781 // bit conversion
782 void *pBufferAfterBitConvertion = NULL;
783 uint32_t bytesAfterBitConvertion = 0;
784 doBitConversion(pBufferAfterBliSrc, bytesAfterBliSrc, &pBufferAfterBitConvertion, &bytesAfterBitConvertion);
785
786 // data pending
787 pBufferAfterPending = NULL;
788 bytesAfterpending = 0;
789 dodataPending(pBufferAfterBitConvertion, bytesAfterBitConvertion, &pBufferAfterPending, &bytesAfterpending);
794 // pcm dump
795 WritePcmDumpData(pBufferAfterPending, bytesAfterpending);
809 // write data to pcm driver
810 int retval = pcmWrite(mPcm, pBufferAfterPending, bytesAfterpending);
以上可以看到在pcmWrite之前對buffer進行了一系列處理:
doDcRemoval 去除頻譜的直流分量
doStereoToMonoConversionIfNeed 立體聲轉換為單聲道
doPostProcessing
doBliSrc 重取樣
doBitConversion 位寬轉換
dodataPending
doDcRemoval、doBliSrc、doBitConversion 這三個定義在AudioALSAPlaybackHandlerBase.cpp中,AudioALSAPlaybackHandlerBase在初始化的時候會載入/vendor/lib64/libaudiocomponentengine_vendor.so,並且通過dlsym返回相應控制程式碼createMtkDcRemove、createMtkAudioSrc、createMtkAudioBitConverter。在呼叫doDcRemoval、doBliSrc、doBitConversion的時候,就會呼叫*->process來進行相應操作。
doPostProcessing看起來像是進行音效處理,doStereoToMonoConversionIfNeed就是將立體聲轉化為單聲道,這兩個需要後續遇到的時候再行debug分析。考慮src之後可能會影響對齊,dodataPending中做了個64位對齊。以上操作,如果音樂在其間出了問題,每一步之間都可以手動新增dump,來定位錯誤點後具體分析原因。
最後一系列操作結束後再呼叫pcmWrite將buffer寫入driver。pcmWrite就是呼叫之前賦值的audio_pcm_write_wrapper_fp ,也就是TinyAlsa的介面:pcm_write或 pcm_mmap_write,mmap就是低時延型別的介面。
裝置通路
在播放的流程中提到了裝置的開啟,其中粗略的帶過了enableTurnOnSequence的流程,這個需要好好看下,首先在AudioALSADeviceConfigManager初始化的時候會呼叫
int ret = LoadAudioConfig(AUDIO_DEVICE_EXT_CONFIG_FILE);
#define AUDIO_DEVICE_EXT_CONFIG_FILE "/vendor/etc/audio_device.xml"
隨便看一下這個檔案的幾個節點:
<path name="speaker_output" value="turnoff">
<kctl name="Speaker_Amp_Switch" value="Off" />
</path>
<path name="builtin_Mic_Mic2" value="turnon">
<kctl name="Audio_MicSource1_Setting" value="ADC1" />
<kctl name="Audio_ADC_1_Switch" value="On" />
<kctl name="Audio_ADC_2_Switch" value="On" />
<kctl name="Audio_Preamp1_Switch" value="IN_ADC3" />
<kctl name="Audio_Preamp2_Switch" value="IN_ADC3" />
</path>
Kctl就是tinymix可以直接操作的kcontrol,path就是實際裝置通路。再回到LoadAudioConfig,這個函式就是解析這個xml,每個path都存入 mDeviceVector,其子節點kctl存入mDeviceVector->path的mDeviceCltonVector或者mDeviceCltoffVector或者mDeviceCltsettingVector。字面意思,開、關、值設定。
再回頭看enableTurnOnSequence會呼叫ApplyDeviceTurnonSequenceByName
458 status_t AudioALSADeviceConfigManager::ApplyDeviceTurnonSequenceByName(const char *DeviceName) {
//GetDeviceDescriptorbyname就是根據DeviceName選出mDeviceVector中相同名字的path
459 DeviceCtlDescriptor *descriptor = GetDeviceDescriptorbyname(DeviceName);
460 if (descriptor == NULL) {
461 ALOGE("%s DeviceName = %s descriptor == NULL", __FUNCTION__, DeviceName);
462 return INVALID_OPERATION;
463 }
464 ALOGD("%s() DeviceName = %s descriptor->DeviceStatusCounte = %d", __FUNCTION__, DeviceName, descriptor->DeviceStatusCounter);
465 if (descriptor->DeviceStatusCounter == 0) {
466 for (size_t count = 0; count < descriptor->mDeviceCltonVector.size(); count += 2) {
//mDeviceVector就是path,由於這裡是開,所以這裡mDeviceVector->mDeviceCltonVector就是xml中的kctl
467 String8 cltname = descriptor->mDeviceCltonVector.itemAt(count);
468 String8 cltvalue = descriptor->mDeviceCltonVector.itemAt(count + 1);
469 ALOGV("cltname = %s cltvalue = %s", cltname.string(), cltvalue.string());
470 #if defined(CUSTOM_AUDIO_SPEAKER_SEQ_SUPPORT)
471 if ((strcmp(cltname.c_str(), "Receiver_Speaker_Switch") == 0)
472 && (strcmp(DeviceName, AUDIO_DEVICE_EXT_SPEAKER) == 0)
473 && (INTERVAL_EXTSPEAKER_AMP_SW > 0)) {
474 ALOGD_IF(mLogEnable, "%s(), ext speaker on, AMP to Analog SW interval[%d]",
475 __FUNCTION__, INTERVAL_EXTSPEAKER_AMP_SW);
476 usleep(INTERVAL_EXTSPEAKER_AMP_SW);
477 }
478 #endif//CUSTOM_AUDIO_SPEAKER_SEQ_SUPPORT
//傳入kctl的鍵值對,呼叫setMixerCtl,也就是mixer_ctl_set_value
479 if (setMixerCtl(cltname, cltvalue)) {
480 ALOGE("Error: %s cltname.string () = %s cltvalue.string () = %s", __FUNCTION__, cltname.string(), cltvalue.string());
481 ASSERT(false);
482 }
483 }
484 }
以上可以看出你呼叫ApplyDeviceTurnonSequenceByName,就是傳入path名字,這個函式會開啟該path所有kctl子節點,同樣的ApplyDeviceTurnoffSequenceByName就是關閉所有kctl。
那麼可以看一下哪邊用到ApplyDeviceTurnoffSequenceByName比較多:
AudioALSAHardwareResourceManager.cpp
668 ret = mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(sequence); in enableTurnOnSequence()
944 … mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_MIC_MIC1_INVERSE); in startInputDevice()
946 … mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_MIC_MIC1); in startInputDevice()
950 … mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_MIC_MIC2_INVERSE); in startInputDevice()
952 … mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_MIC_MIC2); in startInputDevice()
957 mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_MIC_MIC3); in startInputDevice()
961 mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_MIC_MIC4); in startInputDevice()
965 mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_MIC_MIC5); in startInputDevice()
969 … mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_TRIPLE_MIC); in startInputDevice()
972 … mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(getStartInputDeviceForDualMic()); in startInputDevice()
975 … mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_SINGLE_MIC); in startInputDevice()
992 … mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_BACK_MIC_INVERSE); in startInputDevice()
994 … mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_BUILTIN_BACK_MIC); in startInputDevice()
1002 mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_HEADSET_MIC); in startInputDevice()
1327 mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_SIDETONE); in EnableSideToneFilter()
1644 mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_2IN1_SPEAKER); in OpenReceiverPath()
1646 mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_RECEIVER); in OpenReceiverPath()
1691 mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_HIFI_DAC); in OpenHeadphonePath()
1693 mDeviceConfigManager->ApplyDeviceTurnonSequenceByName(AUDIO_DEVICE_HEADPHONE); in OpenHeadphonePath()
.....省略
可以看到主要都是在AudioALSAHardwareResourceManager.cpp中,AudioALSAHardwareResourceManager.cpp就是用來控制裝置通路的開啟和關閉,還有turnoff的就不列出來了。
有一些漏音問題或者雜音問題就可以通過這裡通路開啟關閉時序調整來解決。
涉及到通路切換pop音,也可以考慮在呼叫這些函式之後對音量進行一個淡入淡出的操作。
音量增益
MTk平臺音量增益控制都在AudioALSAVolumeController.cpp中,可以看到比較熟悉的setMasterVolume。
status_t AudioALSAVolumeController::setMasterVolume(float v, audio_mode_t mode, uint32_t devices)
int MapVolume = AudioALSAVolumeController::logToLinear(v);
setMasterVolume中根據mode、devices來選擇應用不同的增益配置。
1382 case (AUDIO_DEVICE_OUT_EARPIECE): {
1383 ApplyAudioGain(MapVolume, mode, Audio_Earpiece);
1384 break;
1385 }
void AudioALSAVolumeController::ApplyAudioGain(int Gain, uint32_t mode, uint32_t device) {
1278 int DegradedBGain = mVolumeRange[device];
1279 DegradedBGain = DegradedBGain + (DEVICE_VOLUME_RANGE - DegradedBGain) * ((VOLUME_MAPPING_STEP - Gain) / VOLUME_MAPPING_STEP);
1280 ALOGD("ApplyAudioGain DegradedBGain = %d mVolumeRange[mode] = %d ", DegradedBGain, mVolumeRange[device]);
1281 if (device == Audio_Earpiece || device == Audio_DualMode_Earpiece || device == Sipcall_Earpiece) {
1282 SetReceiverGain(DegradedBGain);
1283 } else if ((device == Audio_Headset) || (device == Audio_Headphone) || (device == Sipcall_Headset) || (device == Sipcall_Headphone)) {
1284 ALOGD("ApplyAudioGain Audio_Headset\n");
1285 #ifdef USE_PREV_DESINGED //no headphone impedance
1286 if (GetHeadPhoneImpedanceEnable() == true) {
1287 DegradedBGain += MapHeadPhoneImpedance();
1288 ALOGD("GetHeadPhoneImpedanceEnable DegradedBGain = %d ", DegradedBGain);
1289
1290 SetHeadPhoneLGain(DegradedBGain);
1291 SetHeadPhoneRGain(DegradedBGain);
1292 } else
1293 #endif
1294 {
1295 SetHeadPhoneLGain(DegradedBGain);
1296 SetHeadPhoneRGain(DegradedBGain);
1297 }
1298 } else if ((device == Audio_DualMode_Headset) || (device == Audio_DualMode_Headphone)) {
1299 SetHeadPhoneLGain(DegradedBGain);
1300 SetHeadPhoneRGain(DegradedBGain);
1301 } else if (device == Audio_Speaker) {
1302 ALOGD("ApplyAudioGain Audio_Speaker\n");
1303 if (DegradedBGain >= (_countof(DL_PGA_LINEOUT_GAIN) - 1)) {
1304 DegradedBGain = _countof(DL_PGA_LINEOUT_GAIN) - 1;
1305 }
1306 SetLinoutLGain(DegradedBGain);
1307 SetLinoutRGain(DegradedBGain);
1308 }
計算出增益之後,通過SetHeadPhoneLGain、SetReceiverGain、SetLinoutRGain設定。
void AudioALSAVolumeController::SetReceiverGain(int DegradedBGain) {
937 enum mixer_ctl_type type;
938 ctl = mixer_get_ctl_by_name(mMixer, "Handset_PGA_GAIN");
939 type = mixer_ctl_get_type(ctl);
940 if (mixer_ctl_set_enum_by_string(ctl, DL_PGA_Handset_GAIN[index])) {
可以看見是通過tinymix設定下去。Speaker用的是ApplyExtAmpHeadPhoneGain,但最終看到也還是通過tinymix設定。
void AudioALSAVolumeController::ApplyExtAmpHeadPhoneGain(int Gain, uint32_t mode, uint32_t device) {
1343 SetLinoutLGain(DegradedBGain);
1344 SetLinoutRGain(DegradedBGain);
QCOM Audio Hal
音訊框圖
概念
-
Front End PCMs:音訊前端,一個前端對應著一個 PCM 裝置
FE PCMs:
deep_buffer
low_latency
mutil_channel
compress_offload
audio_record
usb_audio
a2dp_audio
voice_call -
Back End DAIs:音訊後端,一個後端對應著一個 DAI 介面,一個 FE PCM 能夠連線到一個或多個 BE DAI
BE DAI:
SLIM_BUS
Aux_PCM
Primary_MI2S
Secondary_MI2S
Tertiary_MI2S
Quatermary_MI2S -
Audio Device:有 headset、speaker、earpiece、mic、bt、modem 等;不同的裝置可能與不同的 DAI 介面連線,也可能與同一個 DAI 介面連線(如上圖,Speaker 和 Earpiece 都連線到 DAI1)
-
Usecase:
·usecase 通俗表示音訊場景,對應著音訊前端,比如:
·low_latency:按鍵音、觸控音、遊戲背景音等低延時的放音場景
·deep_buffer:音樂、視訊等對時延要求不高的放音場景
·compress_offload:mp3、flac、aac等格式的音源播放場景,這種音源不需要軟體解 碼,直接把資料送到硬體解碼器(aDSP),由硬體解碼器(aDSP)進行解碼
·record:普通錄音場景
·record_low_latency:低延時的錄音場景
·voice_call:語音通話場景
·voip_call:網路通話場景
音訊通路連線
通路連結流程:
FE_PCMs <=> BE_DAIs <=> Devices
- 開啟FE pcm
int start_output_stream(struct stream_out *out)
{
int ret = 0;
struct audio_usecase *uc_info;
struct audio_device *adev = out->dev;
// 根據 usecase 找到對應 FE PCM id
out->pcm_device_id = platform_get_pcm_device_id(out->usecase, PCM_PLAYBACK);
if (out->pcm_device_id < 0) {
ALOGE("%s: Invalid PCM device id(%d) for the usecase(%d)",
__func__, out->pcm_device_id, out->usecase);
ret = -EINVAL;
goto error_open;
}
// 為這個音訊流新建一個 usecase 例項
uc_info = (struct audio_usecase *)calloc(1, sizeof(struct audio_usecase));
if (!uc_info) {
ret = -ENOMEM;
goto error_config;
}
uc_info->id = out->usecase; // 音訊流對應的 usecase
uc_info->type = PCM_PLAYBACK; // 音訊流的流向
uc_info->stream.out = out;
uc_info->devices = out->devices; // 音訊流的初始裝置
uc_info->in_snd_device = SND_DEVICE_NONE;
uc_info->out_snd_device = SND_DEVICE_NONE;
list_add_tail(&adev->usecase_list, &uc_info->list); // 把新建的 usecase 例項新增到連結串列中
// 根據 usecase、out->devices,為音訊流選擇相應的音訊裝置
select_devices(adev, out->usecase);
ALOGV("%s: Opening PCM device card_id(%d) device_id(%d) format(%#x)",
__func__, adev->snd_card, out->pcm_device_id, out->config.format);
if (!is_offload_usecase(out->usecase)) {
unsigned int flags = PCM_OUT;
unsigned int pcm_open_retry_count = 0;
if (out->usecase == USECASE_AUDIO_PLAYBACK_AFE_PROXY) {
flags |= PCM_MMAP | PCM_NOIRQ;
pcm_open_retry_count = PROXY_OPEN_RETRY_COUNT;
} else if (out->realtime) {
flags |= PCM_MMAP | PCM_NOIRQ;
} else
flags |= PCM_MONOTONIC;
while (1) {
// 開啟 FE PCM
out->pcm = pcm_open(adev->snd_card, out->pcm_device_id,
flags, &out->config);
if (out->pcm == NULL || !pcm_is_ready(out->pcm)) {
ALOGE("%s: %s", __func__, pcm_get_error(out->pcm));
if (out->pcm != NULL) {
pcm_close(out->pcm);
out->pcm = NULL;
}
if (pcm_open_retry_count-- == 0) {
ret = -EIO;
goto error_open;
}
usleep(PROXY_OPEN_WAIT_TIME * 1000);
continue;
}
break;
}
- BE_DAIs
mixer_pahts.xml 中看到 usecase 相關的通路:
<path name="deep-buffer-playback speaker">
<ctl name="QUAT_MI2S_RX Audio Mixer MultiMedia1" value="1" />
</path>
<path name="deep-buffer-playback headphones">
<ctl name="TERT_MI2S_RX Audio Mixer MultiMedia1" value="1" />
</path>
<path name="deep-buffer-playback earphones">
<ctl name="QUAT_MI2S_RX Audio Mixer MultiMedia1" value="1" />
</path>
這些通路其實就是連線 usecase、device 之間的路由。比如 “deep-buffer-playback speaker” 是連線 deep-buffer-playback FE PCM、speaker Device 之間的路由,開啟 “deep-buffer-playback speaker”,則把 deep-buffer-playback FE PCM 和 speaker Device 連線起來;關閉 “deep-buffer-playback speaker”,則斷開 deep-buffer-playback FE PCM 和 speaker Device 的連線。
之前提到“device 連線著唯一的 BE DAI,確定了 device 也就能確定所連線的 BE DAI”,因此這些路由通路其實都隱含著 BE DAI 的連線:FE PCM 並非直接到 device 的,而是 FE PCM 先連線到 BE DAI,BE DAI 再連線到 device。這點有助於理解路由控制元件,路由控制元件面向的是 FE PCM 和 BE DAI 之間的連線,回放型別的路由控制元件名稱一般是: $BE_DAI Audio Mixer
F
E
P
C
M
,
錄
制
類
型
的
路
由
控
件
名
稱
一
般
是
:
FE_PCM,錄製型別的路由控制元件名稱一般是:
FEPCM,錄制類型的路由控件名稱一般是:FE_PCM Audio Mixer $BE_DAI,這很容易分辨。
例如 “deep-buffer-playback speaker” 通路中的路由控制元件:
<ctl name="QUAT_MI2S_RX Audio Mixer MultiMedia1" value="1" />
MultiMedia1:deep_buffer usacase 對應的 FE PCM
QUAT_MI2S_RX:speaker device 所連線的 BE DAI
Audio Mixer:表示 DSP 路由功能
value:1 表示連線,0 表示斷開連線
這個ctl的意思是:把 MultiMedia1 PCM 與 QUAT_MI2S_RX DAI 連線起來。並沒有指明 QUAT_MI2S_RX DAI 與 speaker device 之間的連線,因為 BE DAIs 與 Devices 之間並不需要路由控制元件,如之前所強調”device 連線著唯一的 BE DAI,確定了 device 也就能確定所連線的 BE DAI“。
路由操作函式是 enable_audio_route()/disable_audio_route(),這兩個函式名稱很貼合,控制 FE PCMs 與 BE DAIs 的連線或斷開。
程式碼流程很簡單,把 usecase 和 device 拼接起來就是路由的 path name 了,然後再呼叫 audio_route_apply_and_update_path() 來設定路由通路:
const char * const use_case_table[AUDIO_USECASE_MAX] = {
[USECASE_AUDIO_PLAYBACK_DEEP_BUFFER] = "deep-buffer-playback",
[USECASE_AUDIO_PLAYBACK_LOW_LATENCY] = "low-latency-playback",
//...
};
const char * const backend_tag_table[SND_DEVICE_MAX] = {
[SND_DEVICE_OUT_HANDSET] = "earphones";
[SND_DEVICE_OUT_SPEAKER] = "speaker";
[SND_DEVICE_OUT_SPEAKER] = "headphones";
//...
};
void platform_add_backend_name(char *mixer_path, snd_device_t snd_device,
struct audio_usecase *usecase)
{
if ((snd_device < SND_DEVICE_MIN) || (snd_device >= SND_DEVICE_MAX)) {
ALOGE("%s: Invalid snd_device = %d", __func__, snd_device);
return;
}
const char * suffix = backend_tag_table[snd_device];
if (suffix != NULL) {
strlcat(mixer_path, " ", MIXER_PATH_MAX_LENGTH);
strlcat(mixer_path, suffix, MIXER_PATH_MAX_LENGTH);
}
}
int enable_audio_route(struct audio_device *adev,
struct audio_usecase *usecase)
{
snd_device_t snd_device;
char mixer_path[MIXER_PATH_MAX_LENGTH];
if (usecase == NULL)
return -EINVAL;
ALOGV("%s: enter: usecase(%d)", __func__, usecase->id);
if (usecase->type == PCM_CAPTURE)
snd_device = usecase->in_snd_device;
else
snd_device = usecase->out_snd_device
strlcpy(mixer_path, use_case_table[usecase->id], MIXER_PATH_MAX_LENGTH);
platform_add_backend_name(mixer_path, snd_device, usecase);
ALOGD("%s: apply mixer and update path: %s", __func__, mixer_path);
audio_route_apply_and_update_path(adev->audio_route, mixer_path);
TinyAlsa介面呼叫
HAL與ALSA對接使用了TinyALSA庫,這個很重要。TinyALSA是一個輕量級的封裝庫,對ALSA介面進行了二次封裝,簡化了對ALSA的操作,具體原始碼目錄在/external/tinyalsa。這個庫銜接了Hal與Linux,這個是連線驅動的關鍵。
編譯tinyalsa配套工具
程式碼路徑:external/tinyalsa/
編譯完後會產生tinyplay/tinymix/tinycap等等工具。
tinymix: 檢視配置混音器
tinyplay: 播放音訊
tinycap: 錄音
tinyalsa命令
-
Tinymix:檢視和更改ctl
tinymix不加任何引數-顯示當前配置情況
tinymix [ctl][value] 設定ctl值 -
Tinyplay:播放音樂
tinyplay /sdcard/0_16.wav -
Tinycap:錄音
tinycap /sdcard/test.wav
API
主要api:
Pcm:
struct pcm *pcm_open(unsigned int card, unsigned int device, unsigned int flags, struct pcm_config *config);
int pcm_write(struct pcm *pcm, const void *data, unsigned int count); //返回0表示成功
int pcm_read(struct pcm *pcm, void *data, unsigned int count);//返回0表示成功
int pcm_close(struct pcm *pcm);
Mixer:
int mixer_ctl_set_value(struct mixer_ctl *ctl, int count, char ** argv)
void mixer_ctl_get(struct mixer_ctl *ctl, unsigned *value)
void mixer_close(struct mixer *mixer)
int mixer_ctl_set(struct mixer_ctl *ctl, unsigned percent)
struct mixer *mixer_open(const char *device)
int mixer_ctl_select(struct mixer_ctl *ctl, const char *value)
相關文章
- Android Framework 音訊子系統(12)HAL層分析AndroidFramework音訊
- android驅動學習入門-android應用怎麼呼叫驅動2Android
- 安卓驅動、HAL、JNI與java安卓Java
- OpenHarmony 3.2 Beta Audio——音訊渲染音訊
- Flutter Flame教程5 -- Audio 音訊Flutter音訊
- 微信audio音訊不能播放音訊
- Core Audio音訊基礎概述音訊
- Android Audio HAL 介面介紹之 adev_set_parameters()和out_set_parameters()Androiddev
- win10 64位realtek hd audio音訊驅動怎麼設定_win10電腦realtek音訊設定操作方法Win10音訊
- HTML5 Audio(音訊)簡介HTML音訊
- Audio Unit採集音訊實戰音訊
- Audio Hijack for Mac音訊錄製工具Mac音訊
- 戴爾電腦驅動更新/(音效卡驅動錯誤)The Waves audio driver is not supported by vour current audio codec.
- 音視訊學習 (十一) Android 端實現 rtmp 推流Android
- DVD音訊提取工具:DVD Audio Extractor for Mac音訊Mac
- 音訊轉換器:Bigasoft Audio Converter for Mac音訊Mac
- Mac音訊轉換器:Bigasoft Audio ConverterMac音訊
- Easy Audio Mixer Mac(音訊混音器)Mac音訊
- Tuneskit Audio Capture for Mac 音訊錄製工具APTMac音訊
- mac無損音訊錄音機:AudFree Audio Capture for MacMac音訊APT
- (乾貨)Ai音響和Linux音訊驅動小談AILinux音訊
- Android音訊處理知識(一)MediaRecorder錄製音訊Android音訊
- 無損音訊捕捉工具:DRmare Audio Capture for Mac音訊APTMac
- DRM音訊轉換工具:DRmare Audio Converter Mac音訊Mac
- DRmare Audio Converter Mac(DRM音訊轉換器)Mac音訊
- 音訊錄製軟體Audio Hijack-Macw音訊Mac
- flash轉音訊軟體(thundersoft flash to audio converter)音訊
- android HAL層程式碼Android
- 【Camera專題】你應該熟悉的Camera驅動框架一(Hal層->kernel層)框架
- 騰訊互動白板+即時通訊+實時音視訊,Android學生端接入Android
- 音視訊學習路線
- NoteBurner iTunes DRM Audio Converter for Mac 音訊轉換工具Mac音訊
- Viwizard Audio Converter for Mac(音訊格式轉換軟體)Mac音訊
- IOS音視訊(四十三)AVFoundation 之 Audio SessioniOSSession
- 一個android 的HAL示例中遇到的坑。Android
- 音視訊學習(一)-- 基礎知識準備
- mtd裝置驅動(待學習)
- JS指定音訊audio在某個時間點進行播放,獲取當前音訊audio的長度,音訊時長格式轉化JS音訊