我试图将H264编码数据和G711 PCM数据封装到mov
多媒体容器中。我正在根据编码的数据创建AVPacket
,最初,视频/音频帧的PTS和DTS值相当于AV_NOPTS_VALUE
。因此,我使用当前时间信息计算了DTS。我的密码-
bool AudioVideoRecorder::WriteVideo(const unsigned char *pData, size_t iDataSize, bool const bIFrame) {
.....................................
.....................................
.....................................
AVPacket pkt = {0};
av_init_packet(&pkt);
int64_t dts = av_gettime();
dts = av_rescale_q(dts, (AVRational){1, 1000000}, m_pVideoStream->time_base);
int duration = 90000 / VIDEO_FRAME_RATE;
if(m_prevVideoDts > 0LL) {
duration = dts - m_prevVideoDts;
}
m_prevVideoDts = dts;
pkt.pts = AV_NOPTS_VALUE;
pkt.dts = m_currVideoDts;
m_currVideoDts += duration;
pkt.duration = duration;
if(bIFrame) {
pkt.flags |= AV_PKT_FLAG_KEY;
}
pkt.stream_index = m_pVideoStream->index;
pkt.data = (uint8_t*) pData;
pkt.size = iDataSize;
int ret = av_interleaved_write_frame(m_pFormatCtx, &pkt);
if(ret < 0) {
LogErr("Writing video frame failed.");
return false;
}
Log("Writing video frame done.");
av_free_packet(&pkt);
return true;
}
bool AudioVideoRecorder::WriteAudio(const unsigned char *pEncodedData, size_t iDataSize) {
.................................
.................................
.................................
AVPacket pkt = {0};
av_init_packet(&pkt);
int64_t dts = av_gettime();
dts = av_rescale_q(dts, (AVRational){1, 1000000}, (AVRational){1, 90000});
int duration = AUDIO_STREAM_DURATION; // 20
if(m_prevAudioDts > 0LL) {
duration = dts - m_prevAudioDts;
}
m_prevAudioDts = dts;
pkt.pts = AV_NOPTS_VALUE;
pkt.dts = m_currAudioDts;
m_currAudioDts += duration;
pkt.duration = duration;
pkt.stream_index = m_pAudioStream->index;
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.data = (uint8_t*) pEncodedData;
pkt.size = iDataSize;
int ret = av_interleaved_write_frame(m_pFormatCtx, &pkt);
if(ret < 0) {
LogErr("Writing audio frame failed: %d", ret);
return false;
}
Log("Writing audio frame done.");
av_free_packet(&pkt);
return true;
}
我加入了这样的溪流-
AVStream* AudioVideoRecorder::AddMediaStream(enum AVCodecID codecID) {
................................
.................................
pStream = avformat_new_stream(m_pFormatCtx, codec);
if (!pStream) {
LogErr("Could not allocate stream.");
return NULL;
}
pStream->id = m_pFormatCtx->nb_streams - 1;
pCodecCtx = pStream->codec;
pCodecCtx->codec_id = codecID;
switch(codec->type) {
case AVMEDIA_TYPE_VIDEO:
pCodecCtx->bit_rate = VIDEO_BIT_RATE;
pCodecCtx->width = PICTURE_WIDTH;
pCodecCtx->height = PICTURE_HEIGHT;
pStream->time_base = (AVRational){1, 90000};
pStream->avg_frame_rate = (AVRational){90000, 1};
pStream->r_frame_rate = (AVRational){90000, 1}; // though the frame rate is variable and around 15 fps
pCodecCtx->pix_fmt = STREAM_PIX_FMT;
m_pVideoStream = pStream;
break;
case AVMEDIA_TYPE_AUDIO:
pCodecCtx->sample_fmt = AV_SAMPLE_FMT_S16;
pCodecCtx->bit_rate = AUDIO_BIT_RATE;
pCodecCtx->sample_rate = AUDIO_SAMPLE_RATE;
pCodecCtx->channels = 1;
m_pAudioStream = pStream;
break;
default:
break;
}
/* Some formats want stream headers to be separate. */
if (m_pOutputFmt->flags & AVFMT_GLOBALHEADER)
m_pFormatCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;
return pStream;
}
这种计算有几个问题:
WriteAudio(..)
)有点像3秒,那么延迟的帧应该以3秒的延迟开始播放,但它不是。延迟帧与前一帧连续播放。编辑:
根据RonaldS.Bultje的建议,我所理解的是:
m_pAudioStream->time_base = (AVRational){1, 9000}; // actually no need to set as 9000 is already default value for audio as you said
m_pVideoStream->time_base = (AVRational){1, 9000};
应该像现在一样设置音频和视频流现在在相同的时间基本单位。
在视频方面:
...................
...................
int64_t dts = av_gettime(); // get current time in microseconds
dts *= 9000;
dts /= 1000000; // 1 second = 10^6 microseconds
pkt.pts = AV_NOPTS_VALUE; // is it okay?
pkt.dts = dts;
// and no need to set pkt.duration, right?
音频:(和视频完全一样,对吧?)
...................
...................
int64_t dts = av_gettime(); // get current time in microseconds
dts *= 9000;
dts /= 1000000; // 1 second = 10^6 microseconds
pkt.pts = AV_NOPTS_VALUE; // is it okay?
pkt.dts = dts;
// and no need to set pkt.duration, right?
我认为他们现在就像共享同一个currDts
,对吗?如果我哪里错了或者遗漏了什么,请纠正我。
另外,如果我想使用视频流时基作为(AVRational){1, frameRate}
,使用音频流时基作为(AVRational){1, sampleRate}
,那么正确的代码应该是什么样的呢?
编辑2.0:
m_pAudioStream->time_base = (AVRational){1, VIDEO_FRAME_RATE};
m_pVideoStream->time_base = (AVRational){1, VIDEO_FRAME_RATE};
和
bool AudioVideoRecorder::WriteAudio(const unsigned char *pEncodedData, size_t iDataSize) {
...........................
......................
AVPacket pkt = {0};
av_init_packet(&pkt);
int64_t dts = av_gettime() / 1000; // convert into millisecond
dts = dts * VIDEO_FRAME_RATE;
if(m_dtsOffset < 0) {
m_dtsOffset = dts;
}
pkt.pts = AV_NOPTS_VALUE;
pkt.dts = (dts - m_dtsOffset);
pkt.stream_index = m_pAudioStream->index;
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.data = (uint8_t*) pEncodedData;
pkt.size = iDataSize;
int ret = av_interleaved_write_frame(m_pFormatCtx, &pkt);
if(ret < 0) {
LogErr("Writing audio frame failed: %d", ret);
return false;
}
Log("Writing audio frame done.");
av_free_packet(&pkt);
return true;
}
bool AudioVideoRecorder::WriteVideo(const unsigned char *pData, size_t iDataSize, bool const bIFrame) {
........................................
.................................
AVPacket pkt = {0};
av_init_packet(&pkt);
int64_t dts = av_gettime() / 1000;
dts = dts * VIDEO_FRAME_RATE;
if(m_dtsOffset < 0) {
m_dtsOffset = dts;
}
pkt.pts = AV_NOPTS_VALUE;
pkt.dts = (dts - m_dtsOffset);
if(bIFrame) {
pkt.flags |= AV_PKT_FLAG_KEY;
}
pkt.stream_index = m_pVideoStream->index;
pkt.data = (uint8_t*) pData;
pkt.size = iDataSize;
int ret = av_interleaved_write_frame(m_pFormatCtx, &pkt);
if(ret < 0) {
LogErr("Writing video frame failed.");
return false;
}
Log("Writing video frame done.");
av_free_packet(&pkt);
return true;
}
最后一个零钱可以吗?视频和音频似乎是同步的。唯一的问题是-音频播放没有延迟,无论数据包到达延迟。就像-
邮包到达:12 3 4.(3秒后下一帧到达) ..5
音频播放:1 2 3 4(无延迟)5
编辑3.0:
零音频样本数据:
AVFrame* pSilentData;
pSilentData = av_frame_alloc();
memset(&pSilentData->data[0], 0, iDataSize);
pkt.data = (uint8_t*) pSilentData;
pkt.size = iDataSize;
av_freep(&pSilentData->data[0]);
av_frame_free(&pSilentData);
这样可以吗?但是在将其写入文件容器后,在播放媒体时会出现点噪声。有什么问题吗?
编辑4.0:
好吧,0xff
。所以-
memset(&pSilentData->data[0], 0xff, iDataSize);
解决我的问题。
发布于 2015-08-12 20:36:32
时间戳(如dts)应该以AVStream.time_base为单位。您请求的视频时基为1/90000,默认音频时基(1/9000),但您使用的是1/100000的时间基来编写dts值。我也不确定是否保证在头编写期间维护所请求的时间基,您的muxer可能会更改这些值,并期望您处理新的值。
所以这样的代码:
int64_t dts = av_gettime();dts = av_rescale_q( dts,(AVRational){1,1000000},(AVRational){1,90000});int工期= AUDIO_STREAM_DURATION;// 20 if(m_prevAudioDts > 0LL) {dts- m_prevAudioDts;}
不起作用。将其更改为使用音频流的时间基的东西,除非您知道自己在做什么,否则不要设置持续时间。(视频也是如此)
m_prevAudioDts = dts;pkt.pts = AV_NOPTS_VALUE;pkt.dts = m_currAudioDts;m_currAudioDts +=持续时间;pkt.duration =持续时间;
这看起来令人毛骨悚然,特别是与视频代码相结合。这里的问题是,这两个包的第一个数据包都将有一个时间戳为零,而不考虑流之间的包间延迟。您需要在所有流之间共享一个父currDts,否则您的流将永远不同步。
编辑
因此,关于您的编辑,如果您有音频间隙,我认为您需要插入沉默(零音频样本数据)的时间间隔。
https://stackoverflow.com/questions/31973107
复制相似问题