How to get duration information (and more) from ffmpeg
I messed around with ffmpeg a while ago and found the learning curve to be pretty steep. So even though the OP asked this question months ago, I'll post some code in case others here on SO are looking to do something similar. The Open() function below is complete but has many asserts and lacks in the way of proper error handling.
Right off, one immediate difference I see is that I used av_open_input_file instead of avformat_open_input. I also didn't use av_dump_format.
Calculating the duration can be tricky, especially with H.264 and MPEG-2; see how durationSec is calculated below.
Note: This example also uses the JUCE C++ Utility Library.
Note2: This code is a modified version of the ffmpeg tutorial.
void VideoCanvas::Open(const char* videoFileName)
{
Logger::writeToLog(String(L"Opening video file ") + videoFileName);
Close();
AVCodec *pCodec;
// register all formats and codecs
av_register_all();
// open video file
int ret = av_open_input_file(&pFormatCtx, videoFileName, NULL, 0, NULL);
if (ret != 0) {
Logger::writeToLog("Unable to open video file: " + String(videoFileName));
Close();
return;
}
// Retrieve stream information
ret = av_find_stream_info(pFormatCtx);
jassert(ret >= 0);
// Find the first video stream
videoStream = -1;
audioStream = -1;
for(int i=0; i<pFormatCtx->nb_streams; i++) {
if (pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && videoStream < 0) {
videoStream = i;
}
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audioStream < 0) {
audioStream = i;
}
} // end for i
jassert(videoStream != -1);
jassert(audioStream != -1);
// Get a pointer to the codec context for the video stream
pCodecCtx=pFormatCtx->streams[videoStream]->codec;
jassert(pCodecCtx != nullptr);
/**
* This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identically 1.
* - encoding: MUST be set by user.
* - decoding: Set by libavcodec.
*/
AVRational avr = pCodecCtx->time_base;
Logger::writeToLog("time_base = " + String(avr.num) + "/" + String(avr.den));
/**
* For some codecs, the time base is closer to the field rate than the frame rate.
* Most notably, H.264 and MPEG-2 specify time_base as half of frame duration
* if no telecine is used ...
*
* Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2.
*/
ticksPerFrame = pCodecCtx->ticks_per_frame;
Logger::writeToLog("ticks_per_frame = " + String(pCodecCtx->ticks_per_frame));
durationSec = static_cast<double>(pFormatCtx->streams[videoStream]->duration) * static_cast<double>(ticksPerFrame) / static_cast<double>(avr.den);
double fH = durationSec / 3600.;
int H = static_cast<int>(fH);
double fM = (fH - H) * 60.;
int M = static_cast<int>(fM);
double fS = (fM - M) * 60.;
int S = static_cast<int>(fS);
Logger::writeToLog("Video stream duration = " + String(H) + "H " + String(M) + "M " + String(fS, 3) + "S");
// calculate frame rate based on time_base and ticks_per_frame
frameRate = static_cast<double>(avr.den) / static_cast<double>(avr.num * pCodecCtx->ticks_per_frame);
Logger::writeToLog("Frame rate = " + String(frameRate) );
// audio codec context
if (audioStream != -1) {
aCodecCtx = pFormatCtx->streams[audioStream]->codec;
Logger::writeToLog("Audio sample rate = " + String(aCodecCtx->sample_rate));
Logger::writeToLog("Audio channels = " + String(aCodecCtx->channels));
}
jassert(aCodecCtx != nullptr);
// format:
// The "S" in "S16SYS" stands for "signed", the 16 says that each sample is 16 bits long,
// and "SYS" means that the endian-order will depend on the system you are on. This is the
// format that avcodec_decode_audio2 will give us the audio in.
// open the audio codec
if (audioStream != -1) {
aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
if (!aCodec) {
Logger::writeToLog(L"Unsupported codec ID = " + String(aCodecCtx->codec_id) );
Close();
return; // TODO: should we just play video if audio codec doesn't work?
}
avcodec_open(aCodecCtx, aCodec);
}
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec == nullptr) {
jassert(false);
// fprintf(stderr, "Unsupported codec!\n");
//return -1; // Codec not found
}
// Open video codec
ret = avcodec_open(pCodecCtx, pCodec);
jassert(ret >= 0);
// Allocate video frame
pFrame=avcodec_alloc_frame();
jassert(pFrame != nullptr);
// Allocate an AVFrame structure
pFrameRGB=avcodec_alloc_frame();
jassert(pFrameRGB != nullptr);
int numBytes = avpicture_get_size(PIX_FMT_RGB32, pCodecCtx->width, pCodecCtx->height);
jassert(numBytes != 0);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
jassert(buffer != nullptr);
// note: the pixel format here is RGB, but sws_getContext() needs to be PIX_FMT_BGR24 to match (BGR)
// this might have to do w/ endian-ness....make sure this is platform independent
if (m_image != nullptr) delete m_image;
m_image = new Image(Image::ARGB, pCodecCtx->width, pCodecCtx->height, true);
int dstW = pCodecCtx->width; // don't rescale
int dstH = pCodecCtx->height;
Logger::writeToLog(L"Video width = " + String(dstW));
Logger::writeToLog(L"Video height = " + String(dstH));
// this should only have to be done once
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, dstW, dstH, PIX_FMT_RGB32, SWS_FAST_BILINEAR, NULL, NULL, NULL);
jassert(img_convert_ctx != nullptr);
setSize(pCodecCtx->width, pCodecCtx->height);
} // Open()
av_dump_format
does before it readspFormatCtv->duration
that makes the field valid. In other words, there is additional code that must be executed before the duration becomes valid. Trace through some code that works and you should find the missing piece. BTW, are you still interested in an answer for this? – Jovian