[TOC]
開始前的BB
開始準備搞播放器了,還不知道怎麼跟大佬們講,頭疼
想來想去,我感覺先實現一個簡單的視訊播放器,視訊和音訊自同步,來讓各位大佬們先來體驗一下,有個大體的脈絡老夫擼碼就是一把梭
我們先粗暴的分為兩個執行緒,一個負責音訊的播放,一個負責視訊的播放,根據之前的我們寫過的東西,我們來改一改
在chapter_09/
中新建兩個類VideoThread
和AudioThread
,一個負責視訊的解碼,一個負責音訊的解碼,渲染的話我們新建一個AVRender
,專門負責渲染以及視窗事件的管理
千言萬語註釋中
AVRender 渲染以及事件處理
AVRender.h
//
// Created by MirsFang on 2019-03-25.
//
#ifndef LEARNFFMPEG_AVRENDER_H
#define LEARNFFMPEG_AVRENDER_H
#define WINDOW_WIDTH 1080
#define WINDOW_HEIGHT 720
#include <iostream>
extern "C" {
#include <SDL2/SDL.h>
#include <libavcodec/avcodec.h>
}
/** 音視訊渲染器 **/
class AVRender {
public:
AVRender();
~AVRender();
/**
* 開啟音訊
*
* @param sample_rate 取樣率
* @param channel 通道數
* @param samples 取樣大小(一幀的音訊資料大小)
* @param userdata 使用者資料
* @param fillaudio 回撥函式
*/
void openAudio(int sample_rate, Uint8 channel, Uint16 samples, void *userdata,
void (*fill_audio)(void *codecContext, Uint8 *stream, int len));
/** 迴圈獲取事件 **/
void loopEvent();
/** 渲染視訊
*
* @param frame 視訊幀
* @param duration 幀持續的時間
*/
void renderVideo(AVFrame *frame,Uint32 duration);
private:
/** SDL視窗 **/
SDL_Window *window;
/** SDL渲染者 **/
SDL_Renderer *render;
/** SDL紋理 **/
SDL_Texture *texture;
/** 顯示區域 **/
SDL_Rect rect;
/** 自己想要的輸出的音訊格式 **/
SDL_AudioSpec wantSpec;
};
#endif //LEARNFFMPEG_AVRENDER_H
複製程式碼
AVRender.cpp
//
// Created by MirsFang on 2019-03-25.
//
#include "AVRender.h"
AVRender::AVRender() {
//初始化SDL2
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_EVENTS)) {
std::cout << "[error] SDL Init error !" << std::endl;
return;
}
//建立window
window = SDL_CreateWindow("LearnFFmpeg", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, WINDOW_WIDTH,
WINDOW_HEIGHT, SDL_WINDOW_OPENGL);
if (!window) {
std::cout << "[error] SDL Create window error!" << std::endl;
return;
}
//建立Render
render = SDL_CreateRenderer(window, -1, 0);
//建立Texture
texture = SDL_CreateTexture(render, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, WINDOW_WIDTH, WINDOW_HEIGHT);
//初始化Rect
rect.x = 0;
rect.y = 0;
rect.w = WINDOW_WIDTH;
rect.h = WINDOW_HEIGHT;
}
AVRender::~AVRender() {
SDL_CloseAudio();
SDL_Quit();
if(render)SDL_DestroyRenderer(render);
if(texture)SDL_DestroyTexture(texture);
if(window)SDL_DestroyWindow(window);
}
void AVRender::loopEvent() {
SDL_Event event;
for (;;) {
SDL_PollEvent(&event);
switch (event.type) {
case SDL_KEYDOWN:
switch (event.key.keysym.sym) {
}
break;
case SDL_QUIT:
return;
default:
break;
}
}
}
void AVRender::renderVideo(AVFrame *frame, Uint32 duration) {
if (frame == nullptr)return;
//上傳YUV到Texture
SDL_UpdateYUVTexture(texture, &rect,
frame->data[0], frame->linesize[0],
frame->data[1], frame->linesize[1],
frame->data[2], frame->linesize[2]
);
SDL_RenderClear(render);
SDL_RenderCopy(render, texture, NULL, &rect);
SDL_RenderPresent(render);
SDL_Delay(duration);
}
void AVRender::openAudio(int sample_rate, Uint8 channel, Uint16 samples, void *userdata,
void (*fill_audio)(void *, Uint8 *, int)) {
//初始化SDL中自己想設定的引數
wantSpec.freq = sample_rate;
wantSpec.format = AUDIO_S16SYS;
wantSpec.channels = channel;
wantSpec.silence = 0;
wantSpec.samples = samples;
wantSpec.callback = fill_audio;
wantSpec.userdata = userdata;
//開啟音訊之後wantSpec的值可能會有改動,返回實際裝置的引數值
if (SDL_OpenAudio(&wantSpec, NULL) < 0) {
std::cout << "[error] open audio error" << std::endl;
return;
}
SDL_PauseAudio(0);
}
複製程式碼
VideoThread 視訊解碼
視訊解碼類VideoThread.h
//
// Created by MirsFang on 2019-03-25.
//
#ifndef LEARNFFMPEG_VIDEOTHREAD_H
#define LEARNFFMPEG_VIDEOTHREAD_H
#include <pthread.h>
#include <iostream>
#include "AVRender.h"
extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
};
/** 視訊執行緒 **/
class VideoThread {
public:
VideoThread();
~VideoThread();
/** 設定視訊路徑 **/
void setUrl(const char *url);
/** 設定渲染器 **/
void setRender(AVRender *render);
/** 開始執行執行緒 **/
void start();
private:
AVFormatContext *format_context;
AVCodecContext *codec_context;
AVCodec *codec;
AVPacket *packet;
AVFrame *frame;
const char *url;
int video_index;
pthread_t pid;
pthread_mutex_t mutex;
AVRender *avRender;
double last_pts = 0;
/** 幀間距同步 **/
bool is_interval_sync = true;
static void *start_thread(void *arg);
void run();
/** 初始化解碼器 **/
void prepare_codec();
/** 解碼資料幀 **/
void decodec_frame();
/**
* 根據幀率獲取顯示時間
* @param frame_rate 幀率
* @return 需要顯示的時長
*/
Uint32 sync_frame_rate(double frame_rate);
/**
* 根據幀間隔獲取一幀顯示的時長
* @param timebase
* @param pts 秒
* @return
*/
double sync_frame_interval(AVRational timebase, int pts);
};
#endif //LEARNFFMPEG_VIDEOTHREAD_H
複製程式碼
VideoThread.cpp
//
// Created by MirsFang on 2019-03-25.
//
#include "VideoThread.h"
VideoThread::VideoThread() {
}
VideoThread::~VideoThread() {
if (format_context != nullptr) avformat_close_input(&format_context);
if (codec_context != nullptr) avcodec_free_context(&codec_context);
if (packet != nullptr) av_packet_free(&packet);
if (frame != nullptr) av_frame_free(&frame);
}
void VideoThread::start() {
prepare_codec();
if (pthread_create(&pid, NULL, start_thread, (void *) this) != 0) {
std::cout << "初始化視訊執行緒失敗!" << std::endl;
return;
}
}
void *VideoThread::start_thread(void *arg) {
VideoThread *audioThread = (VideoThread *) arg;
audioThread->run();
return nullptr;
}
void VideoThread::run() {
std::cout << "視訊執行緒執行中..." << std::endl;
decodec_frame();
}
void VideoThread::setRender(AVRender *render) {
this->avRender = render;
}
void VideoThread::setUrl(const char *url) {
this->url = url;
}
void VideoThread::prepare_codec() {
int retcode;
//初始化FormatContext
format_context = avformat_alloc_context();
if (!format_context) {
std::cout << "[error] alloc format context error!" << std::endl;
return;
}
//開啟輸入流
retcode = avformat_open_input(&format_context, url, nullptr, nullptr);
if (retcode != 0) {
std::cout << "[error] open input error!" << std::endl;
return;
}
//讀取媒體檔案資訊
retcode = avformat_find_stream_info(format_context, NULL);
if (retcode != 0) {
std::cout << "[error] find stream error!" << std::endl;
return;
}
//分配codecContext
codec_context = avcodec_alloc_context3(NULL);
if (!codec_context) {
std::cout << "[error] alloc codec context error!" << std::endl;
return;
}
//尋找到視訊流的下標
video_index = av_find_best_stream(format_context, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
//將視訊流的的編解碼資訊拷貝到codecContext中
retcode = avcodec_parameters_to_context(codec_context, format_context->streams[video_index]->codecpar);
if (retcode != 0) {
std::cout << "[error] parameters to context error!" << std::endl;
return;
}
//查詢解碼器
codec = avcodec_find_decoder(codec_context->codec_id);
if (codec == nullptr) {
std::cout << "[error] find decoder error!" << std::endl;
return;
}
//開啟解碼器
retcode = avcodec_open2(codec_context, codec, nullptr);
if (retcode != 0) {
std::cout << "[error] open decodec error!" << std::endl;
return;
}
//初始化一個packet
packet = av_packet_alloc();
//初始化一個Frame
frame = av_frame_alloc();
}
void VideoThread::decodec_frame() {
int sendcode = 0;
//計算幀率
double frameRate = av_q2d(format_context->streams[video_index]->avg_frame_rate);
//計算顯示的時間
Uint32 display_time_ms = 0;
if (!is_interval_sync) {
display_time_ms = sync_frame_rate(frameRate);
}
//記錄幀間延遲
clock_t start = 0, finish = 0;
//讀取包
while (av_read_frame(format_context, packet) == 0) {
if (packet->stream_index != video_index)continue;
//接受解碼後的幀資料
while (avcodec_receive_frame(codec_context, frame) == 0) {
/**
* 如果開啟幀間隔同步模式,那麼是根據
*
* 顯示時長 = 當前幀 - 上一幀 - 單幀解碼耗時
*
* 可得出當前幀真正要顯示的時間
*
* **/
if (is_interval_sync) {
//計算上一幀與當前幀的延時
display_time_ms = (Uint32) (
sync_frame_interval(format_context->streams[video_index]->time_base, frame->pts) * 1000);
//幀解碼結束時間
finish = clock();
double diff_time = (finish - start) / 1000;
//減去幀間解碼時差 幀解碼開始時間 - 幀解碼結束時間
if (display_time_ms > diff_time)display_time_ms = display_time_ms - (Uint32) diff_time;
}
//繪製影象
if (avRender)avRender->renderVideo(frame, display_time_ms);
av_frame_unref(frame);
//幀解碼開始時間
start = clock();
}
//傳送解碼前的包資料
sendcode = avcodec_send_packet(codec_context, packet);
//根據傳送的返回值判斷狀態
if (sendcode == 0) {
// std::cout << "[debug] " << "SUCCESS" << std::endl;
} else if (sendcode == AVERROR_EOF) {
std::cout << "[debug] " << "EOF" << std::endl;
} else if (sendcode == AVERROR(EAGAIN)) {
std::cout << "[debug] " << "EAGAIN" << std::endl;
} else {
std::cout << "[debug] " << av_err2str(AVERROR(sendcode)) << std::endl;
}
av_packet_unref(packet);
}
}
Uint32 VideoThread::sync_frame_rate(double frame_rate) {
return 1 * 1000 / frame_rate;
}
double VideoThread::sync_frame_interval(AVRational timebase, int pts) {
double display = (pts - last_pts) * av_q2d(timebase);
last_pts = pts;
std::cout << "pts : " << pts * av_q2d(timebase) << " -- display :" << display << std::endl;
return display;
}
複製程式碼
AudioThread 音訊解碼
AudioThread
//
// Created by MirsFang on 2019-03-25.
//
#ifndef LEARNFFMPEG_AUDIOTHREAD_H
#define LEARNFFMPEG_AUDIOTHREAD_H
#include <pthread.h>
#include <iostream>
extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswresample/swresample.h>
};
#include "AVRender.h"
/**
* 音訊執行緒
*/
class AudioThread {
public:
AudioThread();
~AudioThread();
void setUrl(const char *url);
/** 開啟執行緒 **/
void start();
/** 設定渲染器 **/
void setRender(AVRender *render);
private:
/** 重取樣上下文 **/
SwrContext *convert_context;
AVFormatContext *format_context;
AVCodecContext *codec_context;
AVCodec *codec;
AVPacket *packet;
AVFrame *frame;
int audioIndex = -1;
uint64_t out_chn_layout = AV_CH_LAYOUT_STEREO; //輸出的通道佈局 雙聲道
enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16; //輸出的聲音格式
int out_sample_rate = 44100; //輸出的取樣率
int out_nb_samples = -1; //輸出的音訊取樣
int out_channels = -1; //輸出的通道數
int out_buffer_size = -1; //輸出buff大小
unsigned char *outBuff = NULL;//輸出的Buffer資料
uint64_t in_chn_layout = -1; //輸入的通道佈局
pthread_t pid;
pthread_mutex_t mutex;
AVRender *av_render;
const char *url;
static void *start_thread(void *arg);
void run();
/** 初始化解碼器 **/
void prepare_codec();
};
#endif //LEARNFFMPEG_AUDIOTHREAD_H
複製程式碼
AudioThread.cpp
//
// Created by MirsFang on 2019-03-25.
//
#include "AudioThread.h"
#define MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio 48000 * (32/8)
//一幀PCM的資料長度
unsigned int audioLen = 0;
unsigned char *audioChunk = nullptr;
//當前讀取的位置
unsigned char *audioPos = nullptr;
/** 被SDL2呼叫的回撥函式 當需要獲取資料喂入硬體播放的時候呼叫 **/
void fill_audio(void *codecContext, Uint8 *stream, int len) {
//SDL2中必須首先使用SDL_memset()將stream中的資料設定為0
SDL_memset(stream, 0, len);
if (audioLen == 0)
return;
len = (len > audioLen ? audioLen : len);
//將資料合併到 stream 裡
SDL_MixAudio(stream, audioPos, len, SDL_MIX_MAXVOLUME);
//一幀的資料控制
audioPos += len;
audioLen -= len;
}
AudioThread::AudioThread() {
}
AudioThread::~AudioThread() {
if (format_context != nullptr) avformat_close_input(&format_context);
if (codec_context != nullptr) avcodec_free_context(&codec_context);
if (packet != nullptr) av_packet_free(&packet);
if (frame != nullptr) av_frame_free(&frame);
if (convert_context != nullptr) swr_free(&convert_context);
}
void AudioThread::start() {
prepare_codec();
if (pthread_create(&pid, NULL, start_thread, (void *) this) != 0) {
std::cout << "初始化音訊執行緒失敗!" << std::endl;
return;
}
}
void *AudioThread::start_thread(void *arg) {
AudioThread *audioThread = (AudioThread *) arg;
audioThread->run();
return nullptr;
}
void AudioThread::run() {
std::cout << "音訊執行緒已啟動" << std::endl;
//迴圈讀取packet並且解碼
int sendcode = 0;
while (av_read_frame(format_context, packet) >= 0) {
if (packet->stream_index != audioIndex)continue;
//接受解碼後的音訊資料
while (avcodec_receive_frame(codec_context, frame) == 0) {
swr_convert(convert_context, &outBuff, MAX_AUDIO_FRAME_SIZE, (const uint8_t **) frame->data,
frame->nb_samples);
//如果沒有播放完就等待1ms
while (audioLen > 0)
SDL_Delay(1);
//同步資料
audioChunk = (unsigned char *) outBuff;
audioPos = audioChunk;
audioLen = out_buffer_size;
av_frame_unref(frame);
}
//傳送解碼前的包資料
sendcode = avcodec_send_packet(codec_context, packet);
//根據傳送的返回值判斷狀態
if (sendcode == 0) {
// std::cout << "[debug] " << "SUCCESS" << std::endl;
} else if (sendcode == AVERROR_EOF) {
std::cout << "[debug] " << "EOF" << std::endl;
} else if (sendcode == AVERROR(EAGAIN)) {
std::cout << "[debug] " << "EAGAIN" << std::endl;
} else {
std::cout << "[debug] " << av_err2str(AVERROR(sendcode)) << std::endl;
}
av_packet_unref(packet);
}
}
void AudioThread::setRender(AVRender *render) {
this->av_render = render;
}
void AudioThread::prepare_codec() {
int retcode;
//初始化FormatContext
format_context = avformat_alloc_context();
if (!format_context) {
std::cout << "[error] alloc format context error!" << std::endl;
return;
}
//開啟輸入流
retcode = avformat_open_input(&format_context, url, nullptr, nullptr);
if (retcode != 0) {
std::cout << "[error] open input error!" << std::endl;
return;
}
//讀取媒體檔案資訊
retcode = avformat_find_stream_info(format_context, NULL);
if (retcode != 0) {
std::cout << "[error] find stream error!" << std::endl;
return;
}
//分配codecContext
codec_context = avcodec_alloc_context3(NULL);
if (!codec_context) {
std::cout << "[error] alloc codec context error!" << std::endl;
return;
}
//尋找到音訊流的下標
audioIndex = av_find_best_stream(format_context, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
//將視訊流的的編解碼資訊拷貝到codecContext中
retcode = avcodec_parameters_to_context(codec_context, format_context->streams[audioIndex]->codecpar);
if (retcode != 0) {
std::cout << "[error] parameters to context error!" << std::endl;
return;
}
//查詢解碼器
codec = avcodec_find_decoder(codec_context->codec_id);
if (codec == nullptr) {
std::cout << "[error] find decoder error!" << std::endl;
return;
}
//開啟解碼器
retcode = avcodec_open2(codec_context, codec, nullptr);
if (retcode != 0) {
std::cout << "[error] open decodec error!" << std::endl;
return;
}
//初始化一個packet
packet = av_packet_alloc();
//初始化一個Frame
frame = av_frame_alloc();
/** ########## 獲取實際音訊的引數 ##########**/
//單個通道中的取樣數
out_nb_samples = codec_context->frame_size;
//輸出的聲道數
out_channels = av_get_channel_layout_nb_channels(out_chn_layout);
//輸出音訊的佈局
in_chn_layout = av_get_default_channel_layout(codec_context->channels);
/** 計算重取樣後的實際資料大小,並分配空間 **/
//計算輸出的buffer的大小
out_buffer_size = av_samples_get_buffer_size(NULL, out_channels, out_nb_samples, out_sample_fmt, 1);
//分配輸出buffer的空間
outBuff = (unsigned char *) av_malloc(MAX_AUDIO_FRAME_SIZE * 2); //雙聲道
//初始化SDL中自己想設定的引數
if (av_render)av_render->openAudio(out_sample_rate, out_channels, out_nb_samples, codec_context, fill_audio);
convert_context = swr_alloc_set_opts(NULL, out_chn_layout, out_sample_fmt, out_sample_rate,
in_chn_layout, codec_context->sample_fmt, codec_context->sample_rate, 0,
NULL);
//初始化SwResample的Context
swr_init(convert_context);
}
void AudioThread::setUrl(const char *url) {
this->url = url;
}
複製程式碼
我們在Main
方法中
#ifdef chapter_09
//例項化渲染器
AVRender* render = new AVRender();
//初始化視訊執行緒
VideoThread *videoThread = new VideoThread();
videoThread->setRender(render);
videoThread->setUrl(url);
//初始化音訊執行緒
AudioThread *audioThread = new AudioThread();
audioThread->setRender(render);
audioThread->setUrl(url);
//開啟音視訊執行緒
videoThread->start();
audioThread->start();
//事件迴圈
render->loopEvent();
#endif
複製程式碼
如果沒錯,那麼就應該正常的播放視訊了。。。
祝各位大佬們好運
未完持續 ...