1. What are muxer and demuxer respectively?

Muxer means to encapsulate (FLV and MP4 are both audio and video streaming packages) and demuxer means to unencapsulate (YUV and PCM data) MP4

2. How do I need to play a encapsulated MP4 file? (Must be able to say the four steps)

3. How to extract YUV and PCM from MP4 using command line? (As a reference for subsequent code extraction)

ffmpeg -c:v h264 -c:a aac -i out_dog.mp4 out_dog_cmd_yuv -f f32le out_dog_cmd.pcm
Copy the code

4. Key codes for unsealing are as follows

  • ffmpegs.h
#ifndef FFMPEGS_H
#define FFMPEGS_H

#include <QFile>

extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/avutil.h>
}

typedef struct {
 const char *filename;
 int sampleRate;
 AVSampleFormat sampleFmt;
 int chLayout;
} AudioDecodeSpec;

typedef struct {
 const char *filename;
 int width;
 int height;
 AVPixelFormat pixFmt;
 int fps;
} VideoDecodeSpec;

class FFmpegs {
public:
 FFmpegs();

 void demux(const char *inFilename,
            AudioDecodeSpec &aOut,
            VideoDecodeSpec &vOut);
 
 /// unpack the context
 AVFormatContext *_fmtCtx = nullptr;
 
 /// decode the context
 AVCodecContext *_aDecodeCtx = nullptr, *_vDecodeCtx = nullptr;
 
 / / / flow index
 int _aStreamIdx = 0, _vStreamIdx = 0;
 / / file
 QFile _aOutFile, _vOutFile;
 /// function arguments
 AudioDecodeSpec *_aOut = nullptr;
 VideoDecodeSpec *_vOut = nullptr;
 // store the data before decoding
// AVPacket *_pkt = nullptr;
 // store the decoded data
 AVFrame *_frame = nullptr;
 // A buffer to store a frame of decoded images
 uint8_t *_imgBuf[4] = {nullptr};
 int _imgLinesizes[4] = {0};
 int _imgSize = 0;
 /// Size of each audio sample frame (including all tracks)
 int _sampleFrameSize = 0;
 // The size of each audio sample (mono)
 int _sampleSize = 0;
 
 int initVideoInfo(a);
 int initAudioInfo(a);
 int initDecoder(AVCodecContext **decodeCtx,
                 int *streamIdx,
                 AVMediaType type);
 int decode(AVCodecContext *decodeCtx,
            AVPacket *pkt,
            void (FFmpegs::*func)());
 void writeVideoFrame(a);
 void writeAudioFrame(a);
 
 
};

#endif // FFMPEGS_H

Copy the code
  • ffmpegs.m
#include "ffmpegs.h"
#include <QDebug>
#include <QFile>

extern "C" {
#include <libavutil/imgutils.h>
}

#define ERROR_BUF \
 char errbuf[1024]; \
 av_strerror(ret, errbuf, sizeof (errbuf));

#define END(func) \
 if (ret < 0) { \
     ERROR_BUF; \
     qDebug() << #func << "error"<< errbuf; \ goto end; The \}

#define RET(func) \
 if (ret < 0) { \
     ERROR_BUF; \
     qDebug() << #func << "error"<< errbuf; \ return ret; The \}


FFmpegs::FFmpegs() {

}

void FFmpegs::demux(const char *inFilename,
                 AudioDecodeSpec &aOut,
                 VideoDecodeSpec &vOut) {
 /// Keep the parameters
 _aOut = &aOut;
 _vOut = &vOut;
 
 AVPacket *pkt = nullptr;
 
 // return the result
 int ret = 0;
 
 // create an unpack context and open the file
 ret = avformat_open_input(&_fmtCtx, inFilename, nullptr.nullptr);
 END(avformat_open_input);
 
 // retrieve the stream information
 ret = avformat_find_stream_info(_fmtCtx, nullptr);
 END(avformat_find_stream_info);
 
 /// Prints the stream information to the console
 av_dump_format(_fmtCtx, 0, inFilename, 0);
 fflush(stderr);
 
 // initialize the audio message
 ret = initAudioInfo();
 if (ret < 0) {
     goto end;
 }
 
 // initialize the video message
 ret = initVideoInfo();
 if (ret < 0) {
     goto end;
 }
 
 // initialize frame
 _frame = av_frame_alloc();
 if(! _frame) { qDebug() <<"av_frame_alloc error";
     goto end;
 }
 
 // initialize PKT
 pkt = av_packet_alloc();
 pkt->data = nullptr;
 pkt->size = 0;
 
 /// read data from the input file
 while (av_read_frame(_fmtCtx, pkt) == 0) {
     if (pkt->stream_index == _aStreamIdx) { /// read audio data
         ret = decode(_aDecodeCtx, pkt, &FFmpegs::writeAudioFrame);
     } else if (pkt->stream_index == _vStreamIdx) {/// read the video data
         ret = decode(_vDecodeCtx, pkt, &FFmpegs::writeVideoFrame);
     }
     /// Free some extra memory that the PKT internal pointer points to
     av_packet_unref(pkt);
     if (ret < 0) {
         gotoend; }}/// flush the buffer
 decode(_aDecodeCtx, nullptr, &FFmpegs::writeAudioFrame);
 decode(_vDecodeCtx, nullptr, &FFmpegs::writeVideoFrame);
 
end:
 _aOutFile.close();
 _vOutFile.close();
 avcodec_free_context(&_aDecodeCtx);
 avcodec_free_context(&_vDecodeCtx);
 avformat_close_input(&_fmtCtx);
 av_frame_free(&_frame);
 av_packet_free(&pkt);
 av_freep(&_imgBuf[0]);
}

// initialize the audio message
int FFmpegs::initAudioInfo(a) {
 // initialize the decoder
 int ret = initDecoder(&_aDecodeCtx, &_aStreamIdx, AVMEDIA_TYPE_AUDIO);
 RET(initDecoder);
 
 /// open the file
 _aOutFile.setFileName(_aOut->filename);
 if(! _aOutFile.open(QFile::WriteOnly)) { qDebug() <<"file open error" << _aOut->filename;
     return - 1;
 }
 
 /// Save the audio parameters
 _aOut->sampleRate = _aDecodeCtx->sample_rate;
 _aOut->sampleFmt = _aDecodeCtx->sample_fmt;
 _aOut->chLayout = _aDecodeCtx->channel_layout;
 
 // The size of the audio sample frame
 _sampleSize = av_get_bytes_per_sample(_aOut->sampleFmt);
 _sampleFrameSize = _sampleSize * _aDecodeCtx->channels;
 
 return 0;
 
}

// initialize the video message
int FFmpegs::initVideoInfo(a) {
 // initialize the decoder
 int ret = initDecoder(&_vDecodeCtx, &_vStreamIdx, AVMEDIA_TYPE_VIDEO);
 RET(initDecoder);
 
 /// open the file
 _vOutFile.setFileName(_vOut->filename);
 if(! _vOutFile.open(QFile::WriteOnly)) { qDebug() <<"file open error" << _vOut->filename;
     return - 1;
 }
 
 /// Save the video parameters
 _vOut->width = _vDecodeCtx->width;
 _vOut->height = _vDecodeCtx->height;
 _vOut->pixFmt = _vDecodeCtx->pix_fmt;
 
 / / / frame rate
 AVRational framerate = av_guess_frame_rate(_fmtCtx,
                                            _fmtCtx->streams[_vStreamIdx],
                                            nullptr);
 
 _vOut->fps = framerate.num / framerate.den;
 
 // create a buffer to store a decoded frame
 ret = av_image_alloc(_imgBuf, _imgLinesizes,
                      _vOut->width, _vOut->height,
                      _vOut->pixFmt, 1);
 RET(av_image_alloc);
 _imgSize = ret;
 
 return 0;
}

// initialize the decoder
int FFmpegs::initDecoder(AVCodecContext **decodeCtx,
                      int *streamIdx,
                      AVMediaType type) {
 // find the most appropriate stream information according to type
 /// The return value is the stream index
 int ret = av_find_best_stream(_fmtCtx, type, - 1.- 1.nullptr.0);
 RET(av_find_best_stream);
 
 / / / inspection flow
 *streamIdx = ret;
 AVStream *stream = _fmtCtx->streams[*streamIdx];
 if(! stream) { qDebug() <<"stream is empty";
     return - 1;
 }
 
 /// Find the appropriate decoder for the current stream
 AVCodec *decoder = avcodec_find_decoder(stream->codecpar->codec_id);
 if(! decoder) { qDebug() <<"decoder not found" << stream->codecpar->codec_id;
     return - 1;
 }
 
 /// initialize the code context
 *decodeCtx = avcodec_alloc_context3(decoder);
 if(! decodeCtx) { qDebug() <<"avcodec_alloc_context3 error";
     return - 1;
 }
 
 // copy the parameters from the stream into the decoding context
 ret = avcodec_parameters_to_context(*decodeCtx, stream->codecpar);
 RET(avcodec_parameters_to_context);
 
 /// open the decoder
 ret = avcodec_open2(*decodeCtx, decoder, nullptr);
 RET(avcodec_open2);
 
 return 0;
}

int FFmpegs::decode(AVCodecContext *decodeCtx,
                 AVPacket *pkt,
                 void (FFmpegs::*func)()) {
 /// send compressed data to the decoder
 int ret = avcodec_send_packet(decodeCtx, pkt);
 RET(avcodec_send_packet);
 
 while (true) {
     // get the decoded data
     ret = avcodec_receive_frame(decodeCtx, _frame);
     if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
         return 0;
     }
     RET(avcodec_receive_frame)
     
     // execute the code that writes to the file
     (this->*func)(); }}void FFmpegs::writeVideoFrame(a) {
 // copy the frame data to the _imgBuf buffer
 av_image_copy(_imgBuf, _imgLinesizes,
               (const uint8_t **)_frame->data, _frame->linesize,
               _vOut->pixFmt, _vOut->width, _vOut->height);
 // write buffer data to file
 _vOutFile.write((char *)_imgBuf[0], _imgSize);
}

void FFmpegs::writeAudioFrame(a) {
 // libfdk_aAC decoder, decoded PCM format: S16
 /// aAC decoder, decoded PCM format: FTLP
 
 ///LLLL RRRR DDDD FFFF
 
 if (av_sample_fmt_is_planar(_aOut->sampleFmt)) { ///planar
     /// Outer loop: number of samples per channel
     ///si = sample index
     for (int si = 0; si < _frame->nb_samples; si++) {
         /// Inner loop: how many channels are there
         ///ci = channel index
         for (int ci = 0; ci < _aDecodeCtx->channels; ci++) {
             char *begin = (char*)(_frame->data[ci] + si * _sampleSize); _aOutFile.write(begin, _sampleSize); }}}else { / / / not planar
     _aOutFile.write((char *)_frame->data[0], _frame->nb_samples * _sampleFrameSize); }}Copy the code