QT 关于ffmpeg的保存图片的例子
我在程序里面使用这诶个函数释放空间的时候,老是找不到,没搞清楚是我编译的问题还是版本到问题。
我打开avformat.h 里面是有这个函数的。我查看了一下,调用的头文件有没有错。才发现我有两个文件,内容有些不一样。于是我把av_free_packet()复制到没有的那个,编译提示定义了两次,在avcodec.h里面有个定义。暂时没搞明白,我直接把那个定义函数复制到avcodec里面,编译通过。但是保存的图片没有东西,源码上传了。
void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame)
{
FILE *pFile;
char szFilename[32];
int y;
// Open file
sprintf(szFilename, "frame%d.ppm", iFrame);
pFile=fopen(szFilename, "wb");
if(pFile==NULL)
return;
// Write header
fprintf(pFile, "P6\n%d %d\n255\n", width, height);
// Write pixel data
for(y=0; y<height; y++)
fwrite(pFrame->data[0], 1, width*3, pFile);
// Close file
fclose(pFile);
}
av_register_all();
const char *filename="img/test.avi";
if(av_open_input_file(&pFormatCtx,filename,NULL,0,0)!=0) printf("av_open_input_file ERR!\n");
else{
if(av_find_stream_info(pFormatCtx)<0) printf("av_find_stream_info ERR!\n");
else{
dump_format(pFormatCtx, 0, filename, false);
videoStream=-1;
for(i=0; i<pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO)
{
videoStream=i;
break;
}
if(videoStream==-1) printf("videoStream ERR!\n");
else{
pCodecCtx=pFormatCtx->streams[videoStream]->codec;
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL) printf("Unsupported codec!\n");
else{
if(pCodec->capabilities & CODEC_CAP_TRUNCATED)
pCodecCtx->flags|=CODEC_FLAG_TRUNCATED;
if(avcodec_open(pCodecCtx, pCodec)<0) printf("avcodec_open ERR!\n");
else{
// if(pCodecCtx->frame_rate>1000 && pCodecCtx->frame_rate_base==1)
// pCodecCtx->frame_rate_base=1000;
// Hack to correct wrong frame rates that seem to be generated by some codecs
if(pCodecCtx->time_base.num>1000 && pCodecCtx->time_base.den==1)
pCodecCtx->time_base.den=1000;
pFrame=avcodec_alloc_frame();
pFrameRGB=avcodec_alloc_frame();
if(pFrameRGB==NULL) printf("pFrameRGB==NULL ERR!\n");
else{
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
pCodecCtx->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
pCodecCtx->width, pCodecCtx->height);
// Read frames and save first five frames to disk
i=0;
while(av_read_frame(pFormatCtx, &packet)>=0) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,packet.data, packet.size);
// Did we get a video frame?
if(frameFinished) {
// Convert the image from its native format to RGB
static struct SwsContext *img_convert_ctx;
#if 0
img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24,(AVPicture*)pFrame, pCodecCtx->pix_fmt, pCodecCtx->width,pCodecCtx->height);
#endif
// Convert the image into YUV format that SDL uses
if(img_convert_ctx == NULL) {
int w = pCodecCtx->width;
int h = pCodecCtx->height;
img_convert_ctx = sws_getContext(w, h,
pCodecCtx->pix_fmt,
w, h, PIX_FMT_RGB24, SWS_BICUBIC,
NULL, NULL, NULL);
if(img_convert_ctx == NULL) {
fprintf(stderr, "Cannot initialize the conversion context!\n");
exit(1);
}
}
if(sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize)<=0)
printf("sws_scale ERR\n");
else{
// Save the frame to disk
i++;
if(i<=100 &&i>80)
SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height,i);
}
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
//内存 泄漏
}
}
}
}
}
}
}
av_free(buffer);
av_free(pFrameRGB);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
av_close_input_file(pFormatCtx);
[解决办法]
估计你这个问题再这里是得不到答案了
我接个分吧