现在的位置: 首页 > 综合 > 正文

Audio笔记之AudioTrack

2017年11月10日 ⁄ 综合 ⁄ 共 25890字 ⁄ 字号 评论关闭

        播放声音可以用MediaPlayer和AudioTrack,两者都提供了java API供应用开发者使用。虽然都可以播放声音,但两者还是有很大的区别的。其中最大的区别是MediaPlayer可以播放多种格式的声音文件,例如MP3,AAC,WAV,OGG,MIDI等。MediaPlayer会在framework层创建对应的音频解码器。而AudioTrack只能播放已经解码的PCM流,如果是文件的话只支持wav格式的音频文件,因为wav格式的音频文件大部分都是PCM流。AudioTrack不创建解码器,所以只能播放不需要解码的wav文件。当然两者之间还是有紧密的联系,MediaPlayer在framework层还是会创建AudioTrack,把解码后的PCM数流传递给AudioTrack,AudioTrack再传递给AudioFlinger进行混音,然后才传递给硬件播放,所以是MediaPlayer包含了AudioTrack。使用AudioTrack播放音乐示例:

/*cts/tests/tests/media/src/android/media/cts*/

    public voidtestSetStereoVolumeMax()  throwsException {

        final String TEST_NAME= "testSetStereoVolumeMax";

        final int TEST_SR =22050;

        final int TEST_CONF =AudioFormat.CHANNEL_CONFIGURATION_STEREO;

        final int TEST_FORMAT= AudioFormat.ENCODING_PCM_16BIT;

        final int TEST_MODE =AudioTrack.MODE_STREAM;

        final intTEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;


        // --------initialization --------------

        /*Step1.*/
       //根据Hal buff size计算需要为AudioTrack分配的buff size,
       //后续会将这些buff分配给track的mblck,用来进行数据传输
        int  minBuffSize = AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT);

        /*Step 2.*/
        AudioTrack  track = newAudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF,
                                                                         TEST_FORMAT, 2 * minBuffSize,TEST_MODE);

        byte data[] = newbyte[minBuffSize];

        // -------- test--------------

        track.write(data, OFFSET_DEFAULT, data.length);

        track.write(data, OFFSET_DEFAULT, data.length);

        track.play();

        float maxVol =AudioTrack.getMaxVolume();

        assertTrue(TEST_NAME, track.setStereoVolume(maxVol, maxVol) == AudioTrack.SUCCESS);

        // -------- tear down--------------

        track.release();

    }

  //默认sessionId设置为0 
   public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
            int bufferSizeInBytes, int mode)
    throws IllegalArgumentException {
        this(streamType, sampleRateInHz, channelConfig, audioFormat,
                bufferSizeInBytes, mode, 0 /*session*/);
    }

public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
            int bufferSizeInBytes, int mode, int sessionId)
    throws IllegalArgumentException {
        // mState already == STATE_UNINITIALIZED

        // remember which looper is associated with the AudioTrack instantiation
        Looper looper;
        if ((looper = Looper.myLooper()) == null) {
            looper = Looper.getMainLooper();
        }
       //保留创建该AudioTrack的loop,后续可以进行message传递
        mInitializationLooper = looper;
/** 
     * 参数检查 
     * 1.检查streamType是合法,并赋值给mStreamType 
     * 2.检查sampleRateInHz是否在4000到48000之间,并赋值给mSampleRate 
     * 3.设置mChannels,将声道转换为 CHANNEL_OUT_MONO(单声道)
            或CHANNEL_OUT_STEREO(双声道)
     * 4.设置mAudioFormat:  
     *      ENCODING_PCM_16BIT、ENCODING_DEFAULT ---> ENCODING_PCM_16BIT 
     *      ENCODING_PCM_8BIT ---> ENCODING_PCM_8BIT 
     * 5.设置mDataLoadMode: MODE_STREAM 或MODE_STATIC 
     */  
        audioParamCheck(streamType, sampleRateInHz, channelConfig, audioFormat, mode);

    / *根据采样精度计算每帧字节大小和缓冲区帧数=缓冲区/每帧大小 ,
      *且缓冲区帧数必须是每帧大小的整数倍
     */  
        audioBuffSizeCheck(bufferSizeInBytes);

        if (sessionId < 0) {
            throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
        }

        int[] session = new int[1];
        session[0] = sessionId;
        // native initialization
        int initResult = native_setup(new WeakReference<AudioTrack>(this),
                mStreamType, mSampleRate, mChannels, mAudioFormat,
                mNativeBufferSizeInBytes, mDataLoadMode, session);
        if (initResult != SUCCESS) {
            loge("Error code "+initResult+" when initializing AudioTrack.");
            return; // with mState == STATE_UNINITIALIZED
        }
       //更新为native层返回的sessionId
        mSessionId = session[0];

        if (mDataLoadMode == MODE_STATIC) {
            mState = STATE_NO_STATIC_DATA;
        } else {
            mState = STATE_INITIALIZED;
        }
    }

static jint
android_media_AudioTrack_native_setup(JNIEnv *env, jobject thiz, jobject weak_this,
        jint streamType, jint sampleRateInHertz, jint javaChannelMask,
        jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession)
{
    ALOGV("sampleRate=%d, audioFormat(from Java)=%d, channel mask=%x, buffSize=%d",
        sampleRateInHertz, audioFormat, javaChannelMask, buffSizeInBytes);
    uint32_t afSampleRate;
    size_t afFrameCount;
    //通过AudioSystem从AudioPolicyService中读取对应音频流类型的帧数
    //需要强调的是,getOutputxxxx函数其实很复杂,以getOutputFrameCount为例
    //具体过程如下:
    //1、通过streamType找到strategy,
    //2、然后通过strategy找到device,
    //3、最后通过device找到output
    //4、返回output的FrameCount(硬件缓冲区包含的帧数)
    if (AudioSystem::getOutputFrameCount(&afFrameCount, (audio_stream_type_t) streamType) != NO_ERROR) {
        ALOGE("Error creating AudioTrack: Could not get AudioSystem frame count.");
        return (jint) AUDIOTRACK_ERROR_SETUP_AUDIOSYSTEM;
    }
    if (AudioSystem::getOutputSamplingRate(&afSampleRate, (audio_stream_type_t) streamType) != NO_ERROR) {
        ALOGE("Error creating AudioTrack: Could not get AudioSystem sampling rate.");
        return (jint) AUDIOTRACK_ERROR_SETUP_AUDIOSYSTEM;
    }

    // Java channel masks don't map directly to the native definition, but it's a simple shift
    // to skip the two deprecated channel configurations "default" and "mono".
    // native层没有default和mono类型的channel定义,需要转化
    uint32_t nativeChannelMask = ((uint32_t)javaChannelMask) >> 2;

    if (!audio_is_output_channel(nativeChannelMask)) {
        ALOGE("Error creating AudioTrack: invalid channel mask %#x.", javaChannelMask);
        return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;
    }
    //计算bit 1的数目,每个bit代表一个声道
    int nbChannels = popcount(nativeChannelMask);

    // check the stream type
    audio_stream_type_t atStreamType;
    switch (streamType) {
    case AUDIO_STREAM_VOICE_CALL:
    case AUDIO_STREAM_SYSTEM:
    case AUDIO_STREAM_RING:
    case AUDIO_STREAM_MUSIC:
    case AUDIO_STREAM_ALARM:
    case AUDIO_STREAM_NOTIFICATION:
    case AUDIO_STREAM_BLUETOOTH_SCO:
    case AUDIO_STREAM_DTMF:
        atStreamType = (audio_stream_type_t) streamType;
        break;
    default:
        ALOGE("Error creating AudioTrack: unknown stream type.");
        return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDSTREAMTYPE;
    }

    // check the format.
    // This function was called from Java, so we compare the format against the Java constants
    if ((audioFormat != ENCODING_PCM_16BIT) && (audioFormat != ENCODING_PCM_8BIT)) {
        ALOGE("Error creating AudioTrack: unsupported audio format.");
        return (jint) AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT;
    }

    // for the moment 8bitPCM in MODE_STATIC is not supported natively in the AudioTrack C++ class
    // so we declare everything as 16bitPCM, the 8->16bit conversion for MODE_STATIC will be handled
    // in android_media_AudioTrack_native_write_byte()
    // 扩展精度8BIT->16BIT和缓冲区*2,通过将精度设置为16BIT
    //因为在static模式下,底层只支持16BIT,
    //后续写数据的过程中也要做对应的处理
    if ((audioFormat == ENCODING_PCM_8BIT)
        && (memoryMode == MODE_STATIC)) {
        ALOGV("android_media_AudioTrack_native_setup(): requesting MODE_STATIC for 8bit \
            buff size of %dbytes, switching to 16bit, buff size of %dbytes",
            buffSizeInBytes, 2*buffSizeInBytes);
        audioFormat = ENCODING_PCM_16BIT;
        // we will need twice the memory to store the data
        buffSizeInBytes *= 2;
    }

    // /根据不同的采样方式得到一个采样点的字节数,然后计算缓冲区帧数
    int bytesPerSample = audioFormat == ENCODING_PCM_16BIT ? 2 : 1;
    audio_format_t format = audioFormat == ENCODING_PCM_16BIT ?
            AUDIO_FORMAT_PCM_16_BIT : AUDIO_FORMAT_PCM_8_BIT;
    int frameCount = buffSizeInBytes / (nbChannels * bytesPerSample);

    jclass clazz = env->GetObjectClass(thiz);
    if (clazz == NULL) {
        ALOGE("Can't find %s when setting up callback.", kClassPathName);
        return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
    }

    if (jSession == NULL) {
        ALOGE("Error creating AudioTrack: invalid session ID pointer");
        return (jint) AUDIOTRACK_ERROR;
    }

    jint* nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
    if (nSession == NULL) {
        ALOGE("Error creating AudioTrack: Error retrieving session id pointer");
        return (jint) AUDIOTRACK_ERROR;
    }
    int sessionId = nSession[0];
    env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
    nSession = NULL;

    // create the native AudioTrack object
    sp<AudioTrack> lpTrack = new AudioTrack();

    // initialize the callback information:
    // this data will be passed with every AudioTrack callback
    AudioTrackJniStorage* lpJniStorage = new AudioTrackJniStorage();
    lpJniStorage->mStreamType = atStreamType;
    lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
    // we use a weak reference so the AudioTrack object can be garbage collected.
    lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
    lpJniStorage->mCallbackData.busy = false;

    // initialize the native AudioTrack object
    switch (memoryMode) {
    case MODE_STREAM:
        //STREAM模式,现在没有申请共享内存,后续Track对象通过
        //AudioFlinger中的Client对象申请heap空间给mCblk进行数据传输
        lpTrack->set(
            atStreamType,// stream type
            sampleRateInHertz,
            format,// word length, PCM
            nativeChannelMask,
            frameCount,
            AUDIO_OUTPUT_FLAG_NONE,
            audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)
            0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
            0,// shared mem
            true,// thread can call Java
            sessionId);// audio session ID
        break;

    case MODE_STATIC:
        // STATIC模式,为AudioTrack分配共享内存区域,大小为buffSizeInBytes
    //后续会将这些buff分配给track的mCblk进行数据传输
        if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
            ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");
            goto native_init_failure;
        }

        lpTrack->set(
            atStreamType,// stream type
            sampleRateInHertz,
            format,// word length, PCM
            nativeChannelMask,
            frameCount,
            AUDIO_OUTPUT_FLAG_NONE,
            audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));
            0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
            lpJniStorage->mMemBase,// shared mem
            true,// thread can call Java
            sessionId);// audio session ID
        break;

    default:
        ALOGE("Unknown mode %d", memoryMode);
        goto native_init_failure;
    }

    if (lpTrack->initCheck() != NO_ERROR) {
        ALOGE("Error initializing AudioTrack");
        goto native_init_failure;
    }

    nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
    if (nSession == NULL) {
        ALOGE("Error creating AudioTrack: Error retrieving session id pointer");
        goto native_init_failure;
    }
    // read the audio session ID back from AudioTrack in case we create a new session
    //从C层的AudioTrack中更新sessionId
    nSession[0] = lpTrack->getSessionId();
    env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
    nSession = NULL;

    {   // scope for the lock
        Mutex::Autolock l(sLock);
        sAudioTrackCallBackCookies.add(&lpJniStorage->mCallbackData);
    }
    // save our newly created C++ AudioTrack in the "nativeTrackInJavaObj" field
    // of the Java object (in mNativeTrackInJavaObj)
    setAudioTrack(env, thiz, lpTrack);

    // save the JNI resources so we can free them later
    //ALOGV("storing lpJniStorage: %x\n", (long)lpJniStorage);
    env->SetLongField(thiz, javaAudioTrackFields.jniData, (jlong)lpJniStorage);

    return (jint) AUDIOTRACK_SUCCESS;

    // failures:
native_init_failure:
    if (nSession != NULL) {
        env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
    }
    env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_class);
    env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_ref);
    delete lpJniStorage;
    env->SetLongField(thiz, javaAudioTrackFields.jniData, 0);

    return (jint) AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
}

static void audioCallback(int event, void* user, void *info) {

    audiotrack_callback_cookie *callbackInfo = (audiotrack_callback_cookie *)user;
    {
        Mutex::Autolock l(sLock);
        if (sAudioTrackCallBackCookies.indexOf(callbackInfo) < 0) {
            return;
        }
        callbackInfo->busy = true;
    }

    switch (event) {
    case AudioTrack::EVENT_MARKER: {
        JNIEnv *env = AndroidRuntime::getJNIEnv();
        if (user != NULL && env != NULL) {
            env->CallStaticVoidMethod(
                callbackInfo->audioTrack_class,
                javaAudioTrackFields.postNativeEventInJava,
                callbackInfo->audioTrack_ref, event, 0,0, NULL);
            if (env->ExceptionCheck()) {
                env->ExceptionDescribe();
                env->ExceptionClear();
            }
        }
        } break;

    case AudioTrack::EVENT_NEW_POS: {
        JNIEnv *env = AndroidRuntime::getJNIEnv();
        if (user != NULL && env != NULL) {
            env->CallStaticVoidMethod(
                callbackInfo->audioTrack_class,
                javaAudioTrackFields.postNativeEventInJava,
                callbackInfo->audioTrack_ref, event, 0,0, NULL);
            if (env->ExceptionCheck()) {
                env->ExceptionDescribe();
                env->ExceptionClear();
            }
        }
        } break;
    }

    {
        Mutex::Autolock l(sLock);
        callbackInfo->busy = false;
        callbackInfo->cond.broadcast();
    }
}

            status_t    set(audio_stream_type_t streamType,
                            uint32_t sampleRate,
                            audio_format_t format,
                            audio_channel_mask_t channelMask,
                            int frameCount      = 0,
                            audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                            callback_t cbf      = NULL,
                            void* user          = NULL,
                            int notificationFrames = 0,
                            const sp<IMemory>& sharedBuffer = 0,
                            bool threadCanCallJava = false,
                            int sessionId       = 0,
                            transfer_type transferType = TRANSFER_DEFAULT,
                            const audio_offload_info_t *offloadInfo = NULL,
                            int uid = -1);

status_t AudioTrack::set(
        audio_stream_type_t streamType,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        int frameCountInt,
        audio_output_flags_t flags,
        callback_t cbf,
        void* user,
        int notificationFrames,
        const sp<IMemory>& sharedBuffer,
        bool threadCanCallJava,
        int sessionId,
        transfer_type transferType,
        const audio_offload_info_t *offloadInfo,
        int uid)
{
    //根据transferType、共享内存、回调函数、threadCanCallJava参数
    //设置音频数据传输类型 ,一共有4中传输类型,不过常用就只有
    // CALLBACK和SHARED
    switch (transferType) {
    case TRANSFER_DEFAULT:
        if (sharedBuffer != 0) {
            transferType = TRANSFER_SHARED;
        } else if (cbf == NULL || threadCanCallJava) {
            transferType = TRANSFER_SYNC;
        } else {
            transferType = TRANSFER_CALLBACK;
        }
        break;
    case TRANSFER_CALLBACK:
        if (cbf == NULL || sharedBuffer != 0) {
            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
            return BAD_VALUE;
        }
        break;
    case TRANSFER_OBTAIN:
    case TRANSFER_SYNC:
        if (sharedBuffer != 0) {
            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
            return BAD_VALUE;
        }
        break;
    case TRANSFER_SHARED:
        if (sharedBuffer == 0) {
            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
            return BAD_VALUE;
        }
        break;
    default:
        ALOGE("Invalid transfer type %d", transferType);
        return BAD_VALUE;
    }
    mTransfer = transferType;

    // FIXME "int" here is legacy and will be replaced by size_t later
    if (frameCountInt < 0) {
        ALOGE("Invalid frame count %d", frameCountInt);
        return BAD_VALUE;
    }
    size_t frameCount = frameCountInt;

    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
            sharedBuffer->size());

    ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);

    AutoMutex lock(mLock);

    // invariant that mAudioTrack != 0 is true only after set() returns successfully
    if (mAudioTrack != 0) {
        ALOGE("Track already in use");
        return INVALID_OPERATION;
    }

    mOutput = 0;

    // handle default values first.
    if (streamType == AUDIO_STREAM_DEFAULT) {
        streamType = AUDIO_STREAM_MUSIC;
    }

    if (sampleRate == 0) {
        uint32_t afSampleRate;
        if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
            return NO_INIT;
        }
        sampleRate = afSampleRate;
    }
    mSampleRate = sampleRate;

    // these below should probably come from the audioFlinger too...
    if (format == AUDIO_FORMAT_DEFAULT) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    }
    if (channelMask == 0) {
        channelMask = AUDIO_CHANNEL_OUT_STEREO;
    }

    // validate parameters
    if (!audio_is_valid_format(format)) {
        ALOGE("Invalid format %d", format);
        return BAD_VALUE;
    }

    // AudioFlinger does not currently support 8-bit data in shared memory
    //STATCI模式时,不支持8BIT精度,JNI层已转换到16BIT
    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
        ALOGE("8-bit data in shared memory is not supported");
        return BAD_VALUE;
    }

    // force direct flag if format is not linear PCM
    // or offload was requested
    //如果是offload类型或者非PCM类型的数据,强制设为direct类型
    //即如果不是PCM类型,则直接输出,由硬件进行解析
    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
            || !audio_is_linear_pcm(format)) {
        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
                    ? "Offload request, forcing to Direct Output"
                    : "Not linear PCM, forcing to Direct Output");
        flags = (audio_output_flags_t)
                // FIXME why can't we allow direct AND fast?
                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
    }
    // only allow deep buffering for music stream type
    if (streamType != AUDIO_STREAM_MUSIC) {
        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
    }

    if (!audio_is_output_channel(channelMask)) {
        ALOGE("Invalid channel mask %#x", channelMask);
        return BAD_VALUE;
    }
    mChannelMask = channelMask;
    uint32_t channelCount = popcount(channelMask);
    mChannelCount = channelCount;

    if (audio_is_linear_pcm(format)) {
        mFrameSize = channelCount * audio_bytes_per_sample(format);
        mFrameSizeAF = channelCount * sizeof(int16_t);
    } else {
        mFrameSize = sizeof(uint8_t);
        mFrameSizeAF = sizeof(uint8_t);
    }

    audio_io_handle_t output = AudioSystem::getOutput(
                                    streamType,
                                    sampleRate, format, channelMask,
                                    flags,
                                    offloadInfo);

    if (output == 0) {
        ALOGE("Could not get audio output for stream type %d", streamType);
        return BAD_VALUE;
    }

    mVolume[LEFT] = 1.0f;
    mVolume[RIGHT] = 1.0f;
    mSendLevel = 0.0f;
    mFrameCount = frameCount;
    mReqFrameCount = frameCount;
    mNotificationFramesReq = notificationFrames;
    mNotificationFramesAct = 0;
    mSessionId = sessionId;
    if (uid == -1 || (IPCThreadState::self()->getCallingPid() != getpid())) {
        mClientUid = IPCThreadState::self()->getCallingUid();
    } else {
        mClientUid = uid;
    }
    mAuxEffectId = 0;
    mFlags = flags;
    mCbf = cbf;
    /*cbf为JNI层设置的回调函数
       因为cbf是audioCallback不为空,所以这里会启动一个AudioTrackThread线程。
       这个线程是用于AudioTrack(native)与AudioTrack(java)间的数据事件通知的,
       这就为上层应用处理事件提供了一个入口,包括:
       EVENT_MORE_DATA = 0,  /*请求写入更多数据*/
       EVENT_UNDERRUN = 1,  /*PCM 缓冲发生了underrun*/
       EVENT_LOOP_END = 2,   /*到达loop end,loop count!=null从loop start重新开始回放*/
       EVENT_MARKER = 3,     /*Playback head在指定的位置,参考setMarkerPosition*/
       EVENT_NEW_POS = 4,   /*Playback head在一个新的位置,参考setPositionUpdatePeriod */
       EVENT_BUFFER_END = 5 /*Playback head在buffer末尾*/
      */
    if (cbf != NULL) {
        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
    }

    // create the IAudioTrack
    status_t status = createTrack_l(streamType,
                                  sampleRate,
                                  format,
                                  frameCount,
                                  flags,
                                  sharedBuffer,
                                  output,
                                  0 /*epoch*/);

    if (status != NO_ERROR) {
        if (mAudioTrackThread != 0) {
            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
            mAudioTrackThread->requestExitAndWait();
            mAudioTrackThread.clear();
        }
        //Use of direct and offloaded output streams is ref counted by audio policy manager.
        // As getOutput was called above and resulted in an output stream to be opened,
        // we need to release it.
        AudioSystem::releaseOutput(output);
        return status;
    }

    mStatus = NO_ERROR;
    mStreamType = streamType;
    mFormat = format;
    mSharedBuffer = sharedBuffer;
    mState = STATE_STOPPED;
    mUserData = user;
    mLoopPeriod = 0;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    AudioSystem::acquireAudioSessionId(mSessionId);
    mSequence = 1;
    mObservedSequence = mSequence;
    mInUnderrun = false;
    mOutput = output;

    return NO_ERROR;
}

// must be called with mLock held
status_t AudioTrack::createTrack_l(
        audio_stream_type_t streamType,
        uint32_t sampleRate,
        audio_format_t format,
        size_t frameCount,
        audio_output_flags_t flags,
        const sp<IMemory>& sharedBuffer,
        audio_io_handle_t output,
        size_t epoch)
{
    status_t status;
    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
    if (audioFlinger == 0) {
        ALOGE("Could not get audioflinger");
        return NO_INIT;
    }

    // Not all of these values are needed under all conditions, but it is easier to get them all
    // 读取Hal层的各类参数
    uint32_t afLatency;
    status = AudioSystem::getLatency(output, streamType, &afLatency);
    if (status != NO_ERROR) {
        ALOGE("getLatency(%d) failed status %d", output, status);
        return NO_INIT;
    }

    size_t afFrameCount;
    status = AudioSystem::getFrameCount(output, streamType, &afFrameCount);
    if (status != NO_ERROR) {
        ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType, status);
        return NO_INIT;
    }

    uint32_t afSampleRate;
    status = AudioSystem::getSamplingRate(output, streamType, &afSampleRate);
    if (status != NO_ERROR) {
        ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, streamType, status);
        return NO_INIT;
    }

    // Client decides whether the track is TIMED (see below), but can only express a preference
    // for FAST.  Server will perform additional tests.
    if ((flags & AUDIO_OUTPUT_FLAG_FAST) && !(
            // either of these use cases:
            // use case 1: shared buffer
            (sharedBuffer != 0) ||
            // use case 2: callback handler
            (mCbf != NULL))) {
        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
        // once denied, do not request again if IAudioTrack is re-created
        flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
        mFlags = flags;
    }
    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);

    if ((flags & AUDIO_OUTPUT_FLAG_FAST) && sampleRate != afSampleRate) {
        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client due to mismatching sample rate (%d vs %d)",
              sampleRate, afSampleRate);
        flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
    }

    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
    //  n = 1   fast track with single buffering; nBuffering is ignored
    //  n = 2   fast track with double buffering
    //  n = 2   normal track, no sample rate conversion
    //  n = 3   normal track, with sample rate conversion
    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
    // 根据硬件采样率来设置缓冲区,最少为硬件缓冲的两倍
    const uint32_t nBuffering = (sampleRate == afSampleRate) ? 2 : 3;

    mNotificationFramesAct = mNotificationFramesReq;
     //以下三种情况需要更新frameCount
     //1、非PCM类型数据,如果是STATIC类型,则通过缓冲区来
     //      获得缓冲区帧数,否则使用Hal层参数更新
     //2、PCM类型数据,如果是STATIC类型,对齐缓冲区buffer,
     //      则通过缓冲区来获得缓冲区帧数
     //3、PCM类型数据,如果是STREAM类型,且非FAST TRACK时,
     //       通过硬件延时来计算最小缓冲区,比较当前缓冲区和最小缓冲区
     //        如果最小缓冲区大于当前缓冲区,则更新缓冲区帧数
     //4、如果是FAST TRACK类型,由服务端进行缓冲区帧数的计算和校验
    if (!audio_is_linear_pcm(format)) {

        if (sharedBuffer != 0) { //static模式  
            // Same comment as below about ignoring frameCount parameter for set()
            frameCount = sharedBuffer->size();
        } else if (frameCount == 0) {
            frameCount = afFrameCount;
        }
        if (mNotificationFramesAct != frameCount) {
            mNotificationFramesAct = frameCount;
        }
    } else if (sharedBuffer != 0) { // static模式  

        // Ensure that buffer alignment matches channel count
        // 8-bit data in shared memory is not currently supported by AudioFlinger
        // buffer对齐,原理未知
        size_t alignment = /* format == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2;
        if (mChannelCount > 1) {
            // More than 2 channels does not require stronger alignment than stereo
            alignment <<= 1;
        }
        if (((uintptr_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
            ALOGE("Invalid buffer alignment: address %p, channel count %u",
                    sharedBuffer->pointer(), mChannelCount);
            return BAD_VALUE;
        }

        // When initializing a shared buffer AudioTrack via constructors,
        // there's no frameCount parameter.
        // But when initializing a shared buffer AudioTrack via set(),
        // there _is_ a frameCount parameter.  We silently ignore it.
        //因为AudioTrack的构造函数没有传入frameCount,
        frameCount = sharedBuffer->size()/mChannelCount/sizeof(int16_t);

    } else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {

        // FIXME move these calculations and associated checks to server

        // Ensure that buffer depth covers at least audio hardware latency
        //根据硬件延迟计算最少需要几个硬件buffer块大小的缓冲区
        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
        ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
                afFrameCount, minBufCount, afSampleRate, afLatency);
        if (minBufCount <= nBuffering) {
            minBufCount = nBuffering;
        }
        //相当于minFrameCount/ sampleRate = (afFrameCount/ afSampleRate)* minBufCount
        //将minBufCount代入,即minFrameCount = (afLatency* sampleRate)/1000
        //当硬件有延迟时,需要一定的内存缓冲区来进行缓冲,避免数据丢失,
        //值得说明的是理想情况下,SIZE(内存缓冲区)==SIZE(硬件缓冲区)
        size_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
        ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
                ", afLatency=%d",
                minFrameCount, afFrameCount, minBufCount, sampleRate, afSampleRate, afLatency);

        if (frameCount == 0) {
            frameCount = minFrameCount;
        } else if (frameCount < minFrameCount) {
            // not ALOGW because it happens all the time when playing key clicks over A2DP
            ALOGV("Minimum buffer size corrected from %d to %d",
                     frameCount, minFrameCount);
            frameCount = minFrameCount;
        }
        // Make sure that application is notified with sufficient margin before underrun
        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
            mNotificationFramesAct = frameCount/nBuffering;
        }

    } else {
        // For fast tracks, the frame count calculations and checks are done by server
    }

    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
    if (mIsTimed) {
        trackFlags |= IAudioFlinger::TRACK_TIMED;
    }

    pid_t tid = -1;
    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
        trackFlags |= IAudioFlinger::TRACK_FAST;
        if (mAudioTrackThread != 0) {
            tid = mAudioTrackThread->getTid();
        }
    }

    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
    }
    //调用Flinger构造Track对象,并返回TrackHand的句柄,需要继续分析
    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
                                                      sampleRate,
                                                      // AudioFlinger only sees 16-bit PCM
                                                      format == AUDIO_FORMAT_PCM_8_BIT ?
                                                              AUDIO_FORMAT_PCM_16_BIT : format,
                                                      mChannelMask,
                                                      frameCount,
                                                      &trackFlags,
                                                      sharedBuffer,
                                                      output,
                                                      tid,
                                                      &mSessionId,
                                                      mName,
                                                      mClientUid,
                                                      &status);

    if (track == 0) {
        ALOGE("AudioFlinger could not create track, status: %d", status);
        return status;
    }
    //保存AudioFlinge端申请的track对象(其实是TrackHandl)对象,及Cblk管理的内存
    sp<IMemory> iMem = track->getCblk();
    if (iMem == 0) {
        ALOGE("Could not get control block");
        return NO_INIT;
    }
    // invariant that mAudioTrack != 0 is true only after set() returns successfully
    if (mAudioTrack != 0) {
        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
        mDeathNotifier.clear();
    }
    mAudioTrack = track;
    mCblkMemory = iMem;
    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer());
    mCblk = cblk;
    size_t temp = cblk->frameCount_;

   //比较请求的缓冲区帧数frameCount和实际分配的缓冲区帧数temp,并将请求的帧数更新为实际帧数
    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
        // In current design, AudioTrack client checks and ensures frame count validity before
        // passing it to AudioFlinger so AudioFlinger should not return a different value except
        // for fast track as it uses a special method of assigning frame count.
        ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
    }
    frameCount = temp;
    mAwaitBoost = false;
    //flags中包含创建fast track的标志位
    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
        //audioflinge创建完fast track之后返回的标志位,如果该bit为1,表示创建成功,否则失败
        if (trackFlags & IAudioFlinger::TRACK_FAST) {  
            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);  
            mAwaitBoost = true;  
            if (sharedBuffer == 0) {  
                // Theoretically double-buffering is not required for fast tracks,  
                // due to tighter scheduling.  But in practice, to accommodate kernels with  
                // scheduling jitter, and apps with computation jitter, we use double-buffering.  
                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {  
                    mNotificationFramesAct = frameCount/nBuffering;  
                }  
            }  
        } else {  
            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);  
            // once denied, do not request again if IAudioTrack is re-created  
            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);  
            mFlags = flags;  
            if (sharedBuffer == 0) {  
                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {  
                    mNotificationFramesAct = frameCount/nBuffering;  
                }  
            }  
        }  
    }  
    //flags中包含创建offload的标志位
    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {  
        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {  
            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");  
        } else {  
            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");  
            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);  
            mFlags = flags;  
            return NO_INIT;  
        }  
    }  
 
    mRefreshRemaining = true;  
 
    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers  
    // is the value of pointer() for the shared buffer, otherwise buffers points  
    // immediately after the control block.  This address is for the mapping within client  
    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
    // 如果是SREAM模式,则需要跳过cblk的地址,如果是STATIC模式,则直接使用shareBuffer的首地址 
    void* buffers;  
    if (sharedBuffer == 0) {  
        buffers = (char*)cblk + sizeof(audio_track_cblk_t);  
    } else {  
        buffers = sharedBuffer->pointer();  
    }  
    //操作audioflinge端与该effect关联的track,由于effectId==0,实际上是将该
    //track对应的mAuxBuffer置为unll,相当于初始化该track,具体过程后续分析
    mAudioTrack->attachAuxEffect(mAuxEffectId);

    // FIXME don't believe this lie  
    mLatency = afLatency + (1000*frameCount) / sampleRate;  
    mFrameCount = frameCount;  
    // If IAudioTrack is re-created, don't let the requested frameCount  
    // decrease.  This can confuse clients that cache frameCount().  
    if (frameCount > mReqFrameCount) {  
        mReqFrameCount = frameCount;  
    }  
 
    // update proxy
    // 根据STREAM和STATIC类型构造AudioTrack的代理对象,供客户端调用
    // 保存缓冲区控制块、缓冲区数据块首地址、已申请缓冲区帧数、AudioFlinge端每帧大小 
    if (sharedBuffer == 0) {  
        mStaticProxy.clear();  
        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);  
    } else {  
        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);  
        mProxy = mStaticProxy;  
    }  
    mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) |  
            uint16_t(mVolume[LEFT] * 0x1000));  
    mProxy->setSendLevel(mSendLevel);  
    mProxy->setSampleRate(mSampleRate);  
    mProxy->setEpoch(epoch);  
    mProxy->setMinimum(mNotificationFramesAct);  
 
    mDeathNotifier = new DeathNotifier(this);  
    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);  
 
    return NO_ERROR;  
}

抱歉!评论已关闭.