ffmpeg音视频合成

原理 : 主要是拿到视频文件得视频流,然后拿到音频文件的音频流,根据时间戳一帧一帧的封装成一个新的视频文件

效果:音频文件和视频文件合成一个文件,合成的文件时间就是两个文件中短的时间。 源代码如下:具体看注释


#include <jni.h>
#include <android/log.h>

extern "C" {
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
};


#define  LOG_TAG    "JNI_TAG"
#define  LOGD(...)  __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__)


extern "C"
JNIEXPORT void JNICALL
Java_com_zzw_ffmpegdemo_FFmpegHelper_megre(JNIEnv *env, jobject instance, jstring musicPath_,
                                           jstring videoPath_,jstring outPath_) {


    AVOutputFormat *ofmt = NULL;
    //Input AVFormatContext and Output AVFormatContext
    AVFormatContext *ifmt_ctx_v = NULL, *ifmt_ctx_a = NULL,*ofmt_ctx = NULL;
    int ret, i;
    int videoindex_v=-1,videoindex_out=-1;
    int audioindex_a=-1,audioindex_out=-1;
    int frame_index=0;
    int64_t cur_pts_v=0,cur_pts_a=0;


    const char *musicPath = env->GetStringUTFChars(musicPath_, 0);
    const char *videoPath = env->GetStringUTFChars(videoPath_, 0);
    const char *outPath = env->GetStringUTFChars(outPath_, 0);

    av_register_all();
    //--------------------------------input init start---------------------------------------------
    if ((ret = avformat_open_input(&ifmt_ctx_v, videoPath, 0, 0)) < 0) {//打开输入的视频文件
        LOGD( "Could not open input video file.");
        goto end;
    }
   if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0) {//获取视频文件信息
        LOGD( "Failed to retrieve input video stream information");
        goto end;
    }
    if ((ret = avformat_open_input(&ifmt_ctx_a, musicPath, 0, 0)) < 0) {//打开输入的音频文件
        LOGD( "Could not open input audio file.");
        goto end;
    }
    if ((ret = avformat_find_stream_info(ifmt_ctx_a, 0)) < 0) {//获取音频文件信息
        LOGD( "Failed to retrieve input audio stream information");
        goto end;
    }

//    LOGD("===========Input Information==========\n");
//    av_dump_format(ifmt_ctx_v, 0, videoPath, 0);
//    av_dump_format(ifmt_ctx_a, 0, musicPath, 0);
//    LOGD("======================================\n");

    //--------------------------------input init end---------------------------------------------

    //--------------------------------out init start---------------------------------------------
    //初始化输出码流的AVFormatContext
    avformat_alloc_output_context2(&ofmt_ctx,NULL,NULL, outPath);
    if(!ofmt_ctx){
        LOGD( "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }
    ofmt = ofmt_ctx->oformat;
    //--------------------------------out init end-----------------------------------------------


    //--------------------------------相关值获取-----------------------------------------------
    //从输入video的AVStream中获取一个video输出的out_stream
    for (i = 0; i < ifmt_ctx_v->nb_streams; i++) {
        if(ifmt_ctx_v->streams[i]->codecpar->codec_type==AVMEDIA_TYPE_VIDEO){
            AVStream* in_stream = ifmt_ctx_v->streams[i];
            AVCodec *dec = avcodec_find_decoder(in_stream->codecpar->codec_id);
            if(!dec){
                LOGD( "Could not find decoder\n");
                ret = AVERROR_UNKNOWN;
                goto end;
            }
            AVStream* out_stream = avformat_new_stream(ofmt_ctx,dec);
            videoindex_v =i;
            if(!out_stream){
                LOGD( "Failed allocating output stream\n");
                ret = AVERROR_UNKNOWN;
                goto end;
            }
            videoindex_out=out_stream->index;

            AVCodecContext* avCodecContext = avcodec_alloc_context3(dec);
            if ((ret =avcodec_parameters_to_context(avCodecContext, in_stream->codecpar)) < 0) {
                avcodec_free_context(&avCodecContext);
                avCodecContext = NULL;
                LOGD("can not fill decodecctx");
                goto end;
            }
            avCodecContext->codec_tag = 0;
            if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {
                avCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
            }
            ret = avcodec_parameters_from_context(out_stream->codecpar, avCodecContext);
            if (ret < 0) {
                printf("Failed to copy context input to output stream codec context\n");
                goto end;
            }
            break;
        }
    }
    //从输入audio的AVStream中获取一个audio输出的out_stream
    for (i = 0; i < ifmt_ctx_a->nb_streams; i++) {
        if(ifmt_ctx_a->streams[i]->codecpar->codec_type==AVMEDIA_TYPE_AUDIO){
            AVStream* in_stream = ifmt_ctx_a->streams[i];
            AVCodec *dec = avcodec_find_decoder(in_stream->codecpar->codec_id);
            if(!dec){
                LOGD( "Could not find decoder\n");
                ret = AVERROR_UNKNOWN;
                goto end;
            }
            AVStream* out_stream = avformat_new_stream(ofmt_ctx,dec);
            audioindex_a =i;
            if(!out_stream){
                LOGD( "Failed allocating output stream\n");
                ret = AVERROR_UNKNOWN;
                goto end;
            }
            audioindex_out=out_stream->index;

            AVCodecContext* avCodecContext = avcodec_alloc_context3(dec);
            if ((ret =avcodec_parameters_to_context(avCodecContext, in_stream->codecpar)) < 0) {
                avcodec_free_context(&avCodecContext);
                avCodecContext = NULL;
                LOGD("can not fill decodecctx");
                goto end;
            }
            avCodecContext->codec_tag = 0;
            if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {
                avCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
            }
            ret = avcodec_parameters_from_context(out_stream->codecpar, avCodecContext);
            if (ret < 0) {
                printf("Failed to copy context input to output stream codec context\n");
                goto end;
            }
            break;
        }
    }

//    LOGD("==========Output Information==========\n");
//    av_dump_format(ofmt_ctx, 0, outPath, 1);
//    LOGD("======================================\n");


//    -------------------------------合成文件-------------------------------------------

    // Open output file
    if (!(ofmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&ofmt_ctx->pb, outPath, AVIO_FLAG_WRITE);
        if (ret < 0) {
            LOGD("Could not open output file %s ", outPath);
            goto end;
        }
    }

    // Write file header
    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0) {
        LOGD("Error occurred when opening output file\n");
        goto end;
    }


    while (1) {
        AVFormatContext *ifmt_ctx;
        int stream_index=0;
        AVStream *in_stream, *out_stream;
        AVPacket *pkt = av_packet_alloc();

        //Get an AVPacket .   av_compare_ts是比较时间戳用的。通过该函数可以决定该写入视频还是音频。
        //video 在 audio之前
        if(av_compare_ts(cur_pts_v,
                         ifmt_ctx_v->streams[videoindex_v]->time_base,
                         cur_pts_a,
                         ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0){
            ifmt_ctx=ifmt_ctx_v;
            stream_index=videoindex_out;
        } else{
            ifmt_ctx=ifmt_ctx_a;
            stream_index=audioindex_out;
        }

        //如果video在audio之后
        if(av_compare_ts(cur_pts_v,
                         ifmt_ctx_v->streams[videoindex_v]->time_base,
                         cur_pts_a,
                         ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0){
            ifmt_ctx=ifmt_ctx_v;
            stream_index=videoindex_out;

            if(av_read_frame(ifmt_ctx, pkt) >= 0){
                do{
                    if(pkt->stream_index==videoindex_v){
                        in_stream  = ifmt_ctx->streams[pkt->stream_index];
                        out_stream = ofmt_ctx->streams[stream_index];
                        //FIX:No PTS (Example: Raw H.264) H.264裸流没有PTS,因此必须手动写入PTS
                        //Simple Write PTS
                        if(pkt->pts==AV_NOPTS_VALUE){
                            //Write PTS
                            AVRational time_base1=in_stream->time_base;
                            //Duration between 2 frames (us)
                            int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate);
                            //Parameters
                            pkt->pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
                            pkt->dts=pkt->pts;
                            pkt->duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
                            frame_index++;
                        }

                        cur_pts_v=pkt->pts;
                        break;
                    }
                }while(av_read_frame(ifmt_ctx, pkt) >= 0);
            }else{
                av_packet_free(&pkt);
                av_free(pkt);
                break;
            }
        }else{
            ifmt_ctx=ifmt_ctx_a;
            stream_index=audioindex_out;
            if(av_read_frame(ifmt_ctx, pkt) >= 0){
                do{
                    if(pkt->stream_index==audioindex_a){
                        in_stream  = ifmt_ctx->streams[pkt->stream_index];
                        out_stream = ofmt_ctx->streams[stream_index];
                        //FIX:No PTS
                        //Simple Write PTS
                        if(pkt->pts==AV_NOPTS_VALUE){
                            //Write PTS
                            AVRational time_base1=in_stream->time_base;
                            //Duration between 2 frames (us)
                            int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate);
                            //Parameters
                            pkt->pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
                            pkt->dts=pkt->pts;
                            pkt->duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
                            frame_index++;
                        }
                        cur_pts_a=pkt->pts;
                        break;
                    }
                }while(av_read_frame(ifmt_ctx, pkt) >= 0);
            }else{
                av_packet_free(&pkt);
                av_free(pkt);
                break;
            }
        }

        //Convert PTS/DTS
        pkt->pts = av_rescale_q_rnd(pkt->pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pkt->dts = av_rescale_q_rnd(pkt->dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pkt->duration = av_rescale_q(pkt->duration, in_stream->time_base, out_stream->time_base);
        pkt->pos = -1;
        pkt->stream_index=stream_index;

        LOGD("Write 1 Packet. size:%5d\tpts:%lld\n",pkt->size,pkt->pts);
        //Write AVPacket 音频或视频裸流
        if (av_interleaved_write_frame(ofmt_ctx, pkt) < 0) {
            LOGD( "Error muxing packet\n");
            av_packet_free(&pkt);
            av_free(pkt);
            break;
        }
        av_packet_free(&pkt);
        av_free(pkt);
    }
    //Write file trailer
    av_write_trailer(ofmt_ctx);

end:
    avformat_close_input(&ifmt_ctx_v);
    avformat_close_input(&ifmt_ctx_a);
    /* close output */
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
        avio_close(ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);
    env->ReleaseStringUTFChars(musicPath_, musicPath);
    env->ReleaseStringUTFChars(videoPath_, videoPath);
    env->ReleaseStringUTFChars(outPath_, outPath);
}

本文参与腾讯云自媒体分享计划,欢迎正在阅读的你也加入,一起分享。

发表于

我来说两句

0 条评论
登录 后参与评论

相关文章

来自专栏游戏杂谈

cocos2d-x 2.x版本接入bugly的总结

最开始项目使用的是自己DIY的很简陋的上报系统,后来改成google breakpad来上报,发现其实都做的不太理想,游戏引擎因为版本历史问题存在一些崩溃问题。...

1550
来自专栏Golang语言社区

Go中的依赖注入

I have written a small utility package to handle dependency injection in Go (it'...

2564
来自专栏菩提树下的杨过

Flash在线拍摄用户头象

很多网站在上传用户头象时,除了传统方式上传外,都支持在线摄像头拍照并做简单编辑,完成之后再将图象数据提交到服务端(比如ASP.Net),这几天正好需要这个功能,...

3278
来自专栏王磊的博客

JSON.stringify转换Date不正确的解決方法

JSON.stringify转换Date不正确的原因:国际时区(UTC)和中国时区(GMT)的原因,东八区+8等于国际时区。 解决方法,重新Es5的Date.p...

3014
来自专栏Java成神之路

Java微信公众平台开发_04_自定义菜单

自定义菜单中请求包的数据是Json字符串格式的,请参见:  Java_数据交换_fastJSON_01_用法入门

1433
来自专栏AhDung

js日期计算及快速获取周、月、季度起止日

机缘巧合,这段接触了一下js,刚开始各种磕碰各种不顺手,一个日期计算都折腾我半天,积累了一些,赶紧码下:  

3422
来自专栏Golang语言社区

在Go中使用服务对象模式

NOTE: Most of the code and ideas in this post are things I have been experimenti...

1052
来自专栏WindCoder

Best Programming Editors? A Never Ending Battle With No Clear Winner

原文:Best Programming Editors? A Never Ending Battle With No Clear Winner

741
来自专栏逸鹏说道

逆天通用水印扩展篇~新增剪贴板系列的功能和手动配置,卸除原基础不常用的功能

常用技能:http://www.cnblogs.com/dunitian/p/4822808.html#skill 逆天博客:http://dnt.dkil.n...

28510
来自专栏Jerry的SAP技术分享

如何在SAP CRM里创建和消费Web service

The following steps demonstrates how to expose a function module as a web servic...

1261

扫码关注云+社区

领取腾讯云代金券