现在的位置: 首页 > 综合 > 正文

ffmpeg-tutorial01.c改在新版SDK下运行

2014年10月29日 ⁄ 综合 ⁄ 共 4891字 ⁄ 字号 评论关闭

今天下载了ffmpeg-full-sdk-v3.2,配置vc6后,运行了tutorial1-6,原始的tutorial在新版本的SDK下运行需要作一些修改,现记录如下:

如果直接运行tutorial01,会提示:

tutorial01.obj : error LNK2001: unresolved external symbol _img_convert

Debug/tutorial01.exe : fatal error LNK1120: 1 unresolved externals

Error executing link.exe.

Creating browse info file...

tutorial01.exe - 2 error(s), 0 warning(s)

找不到img_convert,这是因为ffmpeg sdk新版本中有了更高级的函数sws_scale.

将程序改动:

#include "libswscale/swscale.h"

main中加入:

struct SwsContext *ctx;

   ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,

    pCodecCtx->pix_fmt,

    pCodecCtx->width, pCodecCtx->height,

    PIX_FMT_YUV420P,

    SWS_BICUBIC, NULL, NULL, NULL);

sws_scale(ctx, pFrame->data, pFrame->linesize,

0, pCodecCtx->height,pict.data, pict.linesize);

下面在tutorial01.c改后可以运行的代码:

// tutorial01.c

// Code based on a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de)

// Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1

// A small sample program that shows how to use libavformat and libavcodec to

// read video from a file.

//

// Use

//

// gcc -o tutorial01 tutorial01.c -lavformat -lavcodec -lz

//

// to build (assuming libavformat and libavcodec are correctly installed

// your system).

//

// Run using

//

// tutorial01 myvideofile.mpg

//

// to write the first five frames from "myvideofile.mpg" to disk in PPM

// format.

#include "libavcodec/avcodec.h"

#include "libavformat/avformat.h"

#include "libswscale/swscale.h"

#include <stdio.h>

void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame) {

  FILE *pFile;

  char szFilename[32];

  int  y;

 

  // Open file

  sprintf(szFilename, "frame%d.ppm", iFrame);

  pFile=fopen(szFilename, "wb");

  if(pFile==NULL)

    return;

 

  // Write header

  fprintf(pFile, "P6\n%d %d\n255\n", width, height);

 

  // Write pixel data

  for(y=0; y<height; y++)

    fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width*3, pFile);

 

  // Close file

  fclose(pFile);

}

int main(int argc, char *argv[]) {

  AVFormatContext *pFormatCtx;

  int             i, videoStream;

  AVCodecContext  *pCodecCtx;

  AVCodec         *pCodec;

  AVFrame         *pFrame;

  AVFrame         *pFrameRGB;

  AVPacket        packet;

  int             frameFinished;

  int             numBytes;

  uint8_t         *buffer;

  struct SwsContext      *ctx;

 

  if(argc < 2) {

    printf("Please provide a movie file\n");

    return -1;

  }

  // Register all formats and codecs

  av_register_all();

 

  // Open video file

  if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)

    return -1; // Couldn't open file

 

  // Retrieve stream information

  if(av_find_stream_info(pFormatCtx)<0)

    return -1; // Couldn't find stream information

 

  // Dump information about file onto standard error

  dump_format(pFormatCtx, 0, argv[1], 0);

 

  // Find the first video stream

  videoStream=-1;

  for(i=0; i<pFormatCtx->nb_streams; i++)

    if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) {

      videoStream=i;

      break;

    }

  if(videoStream==-1)

    return -1; // Didn't find a video stream

 

  // Get a pointer to the codec context for the video stream

  pCodecCtx=pFormatCtx->streams[videoStream]->codec;

 

  // Find the decoder for the video stream

  pCodec=avcodec_find_decoder(pCodecCtx->codec_id);

  if(pCodec==NULL) {

    fprintf(stderr, "Unsupported codec!\n");

    return -1; // Codec not found

  }

  // Open codec

  if(avcodec_open(pCodecCtx, pCodec)<0)

    return -1; // Could not open codec

 

  // Allocate video frame

  pFrame=avcodec_alloc_frame();

 

  // Allocate an AVFrame structure

  pFrameRGB=avcodec_alloc_frame();

  if(pFrameRGB==NULL)

    return -1;

 

  // Determine required buffer size and allocate buffer

  numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,

         pCodecCtx->height);

  buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

 

  // Assign appropriate parts of buffer to image planes in pFrameRGB

  // Note that pFrameRGB is an AVFrame, but AVFrame is a superset

  // of AVPicture

  avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,

   pCodecCtx->width, pCodecCtx->height);

  //注意,这里是PIX_FMT_RGB24,它决定了图片的格式

  ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,

   pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,

   PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);

 

  // Read frames and save first five frames to disk

  i=0;

  while(av_read_frame(pFormatCtx, &packet)>=0) {

    // Is this a packet from the video stream?

    if(packet.stream_index==videoStream) {

      // Decode video frame

      avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,

      packet.data, packet.size);

     

      // Did we get a video frame?

      if(frameFinished) {

    // Convert the image from its native format to RGB

    // img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24,

    //                   (AVPicture*)pFrame, pCodecCtx->pix_fmt, pCodecCtx->width,

    //                  pCodecCtx->height);

   

    sws_scale(ctx, pFrame->data, pFrame->linesize,

     0, pCodecCtx->height,(AVPicture *)pFrameRGB->data,

     (AVPicture *)pFrameRGB->linesize);

//    sws_scale(ctx, pFrame->data, pFrame->linesize,

//     0, pCodecCtx->height,pict.data, pict.linesize);

   

    // Save the frame to disk

    if(++i<=5)

     SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height,

      i);

      }

    }

   

    // Free the packet that was allocated by av_read_frame

    av_free_packet(&packet);

  }

 

  // Free the RGB image

  av_free(buffer);

  av_free(pFrameRGB);

 

  // Free the YUV frame

  av_free(pFrame);

 

  // Close the codec

  avcodec_close(pCodecCtx);

 

  // Close the video file

  av_close_input_file(pFormatCtx);

 

  return 0;

}



抱歉!评论已关闭.