分享

转载:awesomeplayer 读取音视频解码数据流

 昵称11302113 2012-12-14
这里分析Android4.0.1本地视频数据读取的流程,其他过程类似

当播放条件准备妥当之后,就要循环进行读取视频的原始数据放到MediaBuffer,将MediaBuffer中的数据输送到解码器中解码,解码后的数据放到MediaBuffer中,在将这MediaBuffer中的数据进行render显示。

本文主要侧重读取原始数据的流程,主要是代码跟踪,不夹杂个人分析,有些mpeg4的算法不懂。

1:onVideoEvent中开始读取数据,具体代码如下:
void AwesomePlayer::onVideoEvent() {
if (!mVideoBuffer) {
MediaSource::ReadOptions options;
if (mSeeking != NO_SEEK) {
LOGV("seeking to %lld us (%.2f secs)", mSeekTimeUs, mSeekTimeUs / 1E6);

options.setSeekTo(
mSeekTimeUs,
mSeeking == SEEK_VIDEO_ONLY
MediaSource::ReadOptions::SEEK_NEXT_SYNC
: MediaSource::ReadOptions::SEEK_CLOSEST_SYNC);
}
for (;;) {
status_t err = mVideoSource->read(&mVideoBuffer, &options);
}
}
}
蓝色为核心代码,如果是正常读取,options为null,否则这个结构体中包含了seek到的时间和seek的模式,用于具体从文件中哪里开始读取,传入的mVideoBuffer引用用来装解码后的数据

2:蓝色部分调用的是OMXCodec::read函数,这个函数中核心的代码如下:
status_t OMXCodec::read(
MediaBuffer **buffer, const ReadOptions *options) {
status_t err = OK;
*buffer = NULL;
bool seeking = false;
int64_t seekTimeUs;
ReadOptions::SeekMode seekMode;
if (options && options->getSeekTo(&seekTimeUs, &seekMode)) {
seeking = true;
}

if (seeking) {
CODEC_LOGV("seeking to %lld us (%.2f secs)", seekTimeUs, seekTimeUs / 1E6);
CHECK(seekTimeUs >= 0);
mSeekTimeUs = seekTimeUs;
mSeekMode = seekMode;
}

drainInputBuffers();
size_t index = *mFilledBuffers.begin(); // A list of indices into mPortStatus[kPortIndexOutput] filled with data.
mFilledBuffers.erase(mFilledBuffers.begin());

BufferInfo *info = &mPortBuffers[kPortIndexOutput].editItemAt(index);
CHECK_EQ((int)info->mStatus, (int)OWNED_BY_US);
info->mStatus = OWNED_BY_CLIENT;

info->mMediaBuffer->add_ref();
*buffer = info->mMediaBuffer;

return OK;
}
两点:
a,drainInputBuffers开始了数据的读取;
b,mFilledBuffers从这个队列中读取已经解码后的数据放入到传入的MediaBuffer中,mFilledBuffers队列中的MediaBuffer就是drainInputBuffers中写进去的

3:跟到drainInputBuffer中看看
bool OMXCodec::drainInputBuffer(BufferInfo *info) {
CODEC_LOGV("calling emptyBuffer with codec specific data");
status_t err = mOMX->emptyBuffer(
mNode, info->mBuffer, 0, size,
OMX_BUFFERFLAG_ENDOFFRAME | OMX_BUFFERFLAG_CODECCONFIG,
0);
CHECK_EQ(err, (status_t)OK);
info->mStatus = OWNED_BY_COMPONENT;

status_t err;
bool signalEOS = false;
int64_t timestampUs = 0;
size_t offset = 0;
int32_t n = 0;

for (;;) {
MediaBuffer *srcBuffer;
if (mSeekTimeUs >= 0) {
MediaSource::ReadOptions options;
options.setSeekTo(mSeekTimeUs, mSeekMode);

mSeekTimeUs = -1;
mSeekMode = ReadOptions::SEEK_CLOSEST_SYNC;
err = mSource->read(&srcBuffer, &options);

if (err == OK) {
int64_t targetTimeUs;
if (srcBuffer->meta_data()->findInt64(
kKeyTargetTime, &targetTimeUs)
&& targetTimeUs >= 0) {
CODEC_LOGV("targetTimeUs = %lld us", targetTimeUs);
mTargetTimeUs = targetTimeUs;
} else {
mTargetTimeUs = -1;
}
}
}
}
CODEC_LOGV("Calling emptyBuffer on buffer %p (length %d), "
"timestamp %lld us (%.2f secs)",
info->mBuffer, offset,
timestampUs, timestampUs / 1E6);
err = mOMX->emptyBuffer(
mNode, info->mBuffer, 0, offset,
flags, timestampUs);
info->mStatus = OWNED_BY_COMPONENT;
return true;
}
两点:
a,调用err = mSource->read(&srcBuffer, &options);从原始文件中读取原始数据,
b,往srcBuffer中读取数据前后,都调用omx转移已经读取到该info中的数据,目的是解码,解码后的数据就房子了mFilledBuffers这个队列中;

4:针对mpeg4类型的视频,上面的read函数调用的是MPEG4Source的read函数,核心代码如下:
status_t MPEG4Source::read(
MediaBuffer **out, const ReadOptions *options) {
*out = NULL;

int64_t seekTimeUs;
ReadOptions::SeekMode mode;
if (options && options->getSeekTo(&seekTimeUs, &mode)) {
if分支是用于有seek的流程
1:首先找到seektime附近的sampleIndex;
2:然后找到sampleIndex附近的关键帧的syncSampleIndex;
3:然后syncSampleIndex找到具体的sampleTime,sampleTime就是目前需要播放到的位置;
4:mSampleTable->getMetaDataForSample调用这个函数找到sampleTime时间的offset和size;
5:有了offset和size之后剩下就是调用mDataSource->readAt(offset, (uint8_t *)mBuffer->data(), size);读取数据放到buffer中去了;
uint32_t findFlags = 0;
switch (mode) {
case ReadOptions::SEEK_PREVIOUS_SYNC:
findFlags = SampleTable::kFlagBefore;
break;
case ReadOptions::SEEK_NEXT_SYNC:
findFlags = SampleTable::kFlagAfter;
break;
case ReadOptions::SEEK_CLOSEST_SYNC:
case ReadOptions::SEEK_CLOSEST:
findFlags = SampleTable::kFlagClosest;
break;
default:
CHECK(!"Should not be here.");
break;
}

uint32_t sampleIndex;
status_t err = mSampleTable->findSampleAtTime(
seekTimeUs * mTimescale / 1000000,
&sampleIndex, findFlags);

uint32_t syncSampleIndex;
if (err == OK) {
err = mSampleTable->findSyncSampleNear(
sampleIndex, &syncSampleIndex, findFlags);
}

uint32_t sampleTime;
if (err == OK) {
err = mSampleTable->getMetaDataForSample(
sampleIndex, NULL, NULL, &sampleTime);
}

if (mode == ReadOptions::SEEK_CLOSEST) {
targetSampleTimeUs = (sampleTime * 1000000ll) / mTimescale;
}

mCurrentSampleIndex = syncSampleIndex;
}

off64_t offset;
size_t size;
uint32_t cts;
bool isSyncSample;
bool newBuffer = false;
if (mBuffer == NULL) {
newBuffer = true;

status_t err =
mSampleTable->getMetaDataForSample(
mCurrentSampleIndex, &offset, &size, &cts, &isSyncSample);

if (err != OK) {
return err;
}

err = mGroup->acquire_buffer(&mBuffer);

if (err != OK) {
CHECK(mBuffer == NULL);
return err;
}
}

if (!mIsAVC || mWantsNALFragments) {
if (newBuffer) {
ssize_t num_bytes_read =
mDataSource->readAt(offset, (uint8_t *)mBuffer->data(), size);

CHECK(mBuffer != NULL);
mBuffer->set_range(0, size);
mBuffer->meta_data()->clear();
mBuffer->meta_data()->setInt64(
kKeyTime, ((int64_t)cts * 1000000) / mTimescale);

if (isSyncSample) {
mBuffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
}

++mCurrentSampleIndex;
}

if (!mIsAVC) {
*out = mBuffer;
mBuffer = NULL;

return OK;
}

// Each NAL unit is split up into its constituent fragments and
// each one of them returned in its own buffer.

CHECK(mBuffer->range_length() >= mNALLengthSize);

const uint8_t *src =
(const uint8_t *)mBuffer->data() + mBuffer->range_offset();

size_t nal_size = parseNALSize(src);

MediaBuffer *clone = mBuffer->clone();
CHECK(clone != NULL);
clone->set_range(mBuffer->range_offset() + mNALLengthSize, nal_size);

CHECK(mBuffer != NULL);
mBuffer->set_range(
mBuffer->range_offset() + mNALLengthSize + nal_size,
mBuffer->range_length() - mNALLengthSize - nal_size);

if (mBuffer->range_length() == 0) {
mBuffer->release();
mBuffer = NULL;
}

*out = clone;

return OK;
}
}
蓝色部分为主要的流程

5:后续就是开始调用SampleTable.cpp和SampleIterator.cpp这两个类的相关函数解析文件和读取数据,最主要的函数时通过sampleIndex获取offset和size信息了,代码如下:
status_t SampleTable::getMetaDataForSample(
uint32_t sampleIndex,
off64_t *offset,
size_t *size,
uint32_t *compositionTime,
bool *isSyncSample) {
Mutex::Autolock autoLock(mLock);
status_t err;
if ((err = mSampleIterator->seekTo(sampleIndex)) != OK) {
return err;
}
if (offset) {
*offset = mSampleIterator->getSampleOffset();
}

if (size) {
*size = mSampleIterator->getSampleSize();
}
if (compositionTime) {
*compositionTime = mSampleIterator->getSampleTime();
}
if (isSyncSample) {
*isSyncSample = false;
if (mSyncSampleOffset < 0) {
// Every sample is a sync sample.
*isSyncSample = true;
} else {
size_t i = (mLastSyncSampleIndex < mNumSyncSamples)
&& (mSyncSamples[mLastSyncSampleIndex] <= sampleIndex)
mLastSyncSampleIndex : 0;
while (i < mNumSyncSamples && mSyncSamples[i] < sampleIndex) {
++i;
}
if (i < mNumSyncSamples && mSyncSamples[i] == sampleIndex) {
*isSyncSample = true;
}
mLastSyncSampleIndex = i;
}
}
return OK;
}
下面这个函数没有看懂,对具体的mpeg4压缩协议没有进行深入了解
status_t SampleIterator::findSampleTime(
uint32_t sampleIndex, uint32_t *time) {
if (sampleIndex >= mTable->mNumSampleSizes) {
return ERROR_OUT_OF_RANGE;
}

while (sampleIndex >= mTTSSampleIndex + mTTSCount) {
if (mTimeToSampleIndex == mTable->mTimeToSampleCount) {
return ERROR_OUT_OF_RANGE;
}

mTTSSampleIndex += mTTSCount;
mTTSSampleTime += mTTSCount * mTTSDuration;

mTTSCount = mTable->mTimeToSample[2 * mTimeToSampleIndex];
mTTSDuration = mTable->mTimeToSample[2 * mTimeToSampleIndex + 1];

++mTimeToSampleIndex;
}

*time = mTTSSampleTime + mTTSDuration * (sampleIndex - mTTSSampleIndex);

*time += mTable->getCompositionTimeOffset(sampleIndex);

return OK;
}

    本站是提供个人知识管理的网络存储空间,所有内容均由用户发布,不代表本站观点。请注意甄别内容中的联系方式、诱导购买等信息,谨防诈骗。如发现有害或侵权内容,请点击一键举报。
    转藏 分享 献花(0

    0条评论

    发表

    请遵守用户 评论公约

    类似文章 更多