原理

打开输入—->打开输出—->根据输入来创建流—->拷贝流设置—->循环读帧—->判断时间点是否到达切割点,并做设置—->设置pts和dts—->写入—->善后

重点是pts和dts如何设置。参考《ffmpeg学习日记25-pts,dts概念的理解》

示例代码

CMakeLists.txt

cmake_minimum_required(VERSION 3.14)project(clipVideo )#set(CMAKE_CXX_STANDARD 11)#set(CMAKE_CXX_STANDARD_REQUIRED YES)set(CMAKE_AUTOMOC ON) # Meta-Object Compilerset(CMAKE_AUTORCC ON) # Resource Compilerset(CMAKE_AUTOUIC ON) # User Interface Compilerset(CMAKE_BUILD_TYPE Debug)#find_package(PkgConfig REQUIRED)#pkg_check_modules(AVLIB REQUIRED IMPORTED_TARGET libavcodec libavformat libavutil libswresample libswscale)if(CMAKE_SYSTEM_NAME MATCHES "Linux")set(CURRENT_SYSTEM Linux)elseif(CMAKE_SYSTEM_NAME MATCHES "Windows")set(CURRENT_SYSTEM Windows)endif()set(FFMPEG_LIB_DIR ${PROJECT_SOURCE_DIR}/../../lib)set(FFMPEG_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/../../include)include_directories(${PROJECT_SOURCE_DIR}#${PROJECT_SOURCE_DIR}/../../include${FFMPEG_INCLUDE_DIR})link_libraries(#${PROJECT_SOURCE_DIR}/../../lib${FFMPEG_LIB_DIR})#对于find_package找不到的外部依赖库,可以用add_library添加# SHARED表示添加的是动态库# IMPORTED表示是引入已经存在的动态库add_library( avcodec STATIC IMPORTED)add_library( avfilter STATIC IMPORTED )add_library( swresample STATIC IMPORTED )add_library( swscale STATIC IMPORTED )add_library( avformat STATIC IMPORTED )add_library( avutil STATIC IMPORTED )#指定所添加依赖库的导入路径set_target_properties( avcodec PROPERTIES IMPORTED_LOCATION ${FFMPEG_LIB_DIR}/avcodec.lib )set_target_properties( avfilter PROPERTIES IMPORTED_LOCATION ${FFMPEG_LIB_DIR}/avfilter.lib )set_target_properties( swresample PROPERTIES IMPORTED_LOCATION ${FFMPEG_LIB_DIR}/swresample.lib )set_target_properties( swscale PROPERTIES IMPORTED_LOCATION ${FFMPEG_LIB_DIR}/swscale.lib )set_target_properties( avformat PROPERTIES IMPORTED_LOCATION ${FFMPEG_LIB_DIR}/avformat.lib )set_target_properties( avutil PROPERTIES IMPORTED_LOCATION ${FFMPEG_LIB_DIR}/avutil.lib )add_executable(${PROJECT_NAME} main.cpp)target_link_libraries(${PROJECT_NAME}avcodec avfilter avformat avutil#postproc swresample swscale)

main.cpp

#include #include #ifdef __cplusplusextern "C" {#endif#include #include "libavutil/avutil.h"#include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __cplusplus}#endif#ifdef _WIN32#include #elif __APPLE__ || __linux__#include#endifusing namespace std;int main(){cout << "Hello World!" << endl;printf("ffmpeg version:%s\n",av_version_info());std::cout << "current path:" << getcwd(NULL,0) << std::endl;int ret = 0;float fromSeconds = 3.0,endSeconds = 8.0;AVFormatContext *ifmt_ctx = NULL,*ofmt1_ctx = NULL;std::string ifileName = "../../../15s.mp4",ofileName1 = "1.mp4";AVPacket pkt;if ( (ret = avformat_open_input(&ifmt_ctx,ifileName.c_str(),NULL,NULL)) < 0){std::cout << "can not open the in put file format context!" << std::endl;return 0;}if( (ret = avformat_find_stream_info(ifmt_ctx,NULL)) < 0){std::cout << "can not find the input stream info" << std::endl;return 0;}avformat_alloc_output_context2(&ofmt1_ctx,NULL,NULL,ofileName1.c_str());if(!ofmt1_ctx){std::cout << "could not creat output1 context" << std::endl;return 0;}//拷贝参数到输出结构for(int i = 0;i nb_streams;i++){AVStream *instream = ifmt_ctx->streams[i];AVCodec *codec = avcodec_find_decoder(instream->codecpar->codec_id);AVStream *outStream = avformat_new_stream(ofmt1_ctx,codec);if(!outStream){std::cout << "failed allow output stream" <codecpar);if(ret < 0){std::cout << "failed to copy instream codecpar to codec context" <codec_tag = 0;//这个标志是什么意思???if (ofmt1_ctx->oformat->flags & AVFMT_GLOBALHEADER){codecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;}ret = avcodec_parameters_from_context(outStream->codecpar,codecCtx);if (ret < 0){std::cout << "failed to copy codec context to outstram codecpar context" <flags & AVFMT_NOFILE)){ret = avio_open(&ofmt1_ctx->pb,ofileName1.c_str(),AVIO_FLAG_WRITE);if (ret < 0){std::cout << "could not open output 1.mp4" << std::endl;return 0;}}//写文件头ret = avformat_write_header(ofmt1_ctx,NULL);if (ret < 0){std::cout << "error write header of 1.mp4" << std::endl;return 0;}ret = av_seek_frame(ifmt_ctx, -1, (int64_t)(fromSeconds * AV_TIME_BASE), AVSEEK_FLAG_ANY);std::cout << "ret:" << ret <nb_streams);memset(dstStartFrom, 0, sizeof(int64_t) * ifmt_ctx->nb_streams);int64_t *ptsStartFrom = (int64_t *)malloc(sizeof(int64_t) * ifmt_ctx->nb_streams);memset(ptsStartFrom, 0, sizeof(int64_t) * ifmt_ctx->nb_streams);while(1){AVStream *instream,*outstream;ret = av_read_frame(ifmt_ctx,&pkt);if (ret < 0)break;std::cout << "pkt.pts:" << pkt.pts <streams[pkt.stream_index];outstream = ofmt1_ctx->streams[pkt.stream_index];if (av_q2d(instream->time_base) * pkt.pts > endSeconds){av_packet_unref(&pkt);break;}if (dstStartFrom[pkt.stream_index] == 0){dstStartFrom[pkt.stream_index] = pkt.dts;///printf("dstStartFrom: %s\n", av_strerror(dstStartFrom[pkt.stream_index]));}if (ptsStartFrom[pkt.stream_index] == 0){ptsStartFrom[pkt.stream_index] = pkt.dts;//printf("ptsStartFrom: %s\n", av_strerror(ptsStartFrom[pkt.stream_index]));}pkt.pts = av_rescale_q_rnd(pkt.pts - ptsStartFrom[pkt.stream_index],instream->time_base,outstream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX) );//pkt.dts = av_rescale_q_rnd(pkt.dts - dstStartFrom[pkt.stream_index],instream->time_base,outstream->time_base, (enum AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX) );if (pkt.pts < 0){pkt.pts = 0;}if(pkt.dts time_base,outstream->time_base);pkt.pos = -1;ret = av_interleaved_write_frame(ofmt1_ctx,&pkt);if(ret){std::cout << "error muxing packet" <flags & AVFMT_NOFILE)){avio_closep(&ofmt1_ctx->pb);}avformat_free_context(ofmt1_ctx);return 0;}

总结

  1. 这里的操作是将一种视频格式切割,之后生成的视频是同样的格式,所以直接是参数的复制即可,如果要生成另一种格式的视频,应该要单独设置设置输出视频格式的参数。

参考

  • ffmpeg实现视频切割

  • FFmpeg ‘avcodec_copy_context’ deprecated (视频裁剪)

  • av_seek_frame使用详解

  • FFmpeg中的时间基(time_base), AV_TIME_BASE

  • FFmpeg之时间戳详解