关于ffmpeg:ffplay音视频解码线程

44次阅读

共计 7546 个字符,预计需要花费 19 分钟才能阅读完成。

后面咱们介绍了 ffplay 的调试环境集成、ffplay 总体架构、ffplay 的读取线程等相干内容,明天介绍下 ffplay 解码线程工作流程。

因为视频解码和音频解码的过程大略统一,因而本文次要介绍视频的解码线程内容,字幕的解码疏忽 …

咱们还是从这张图开始:

图导出的可能有点含糊,再加上上传图床后不晓得有没有更加含糊了,想要高清大图的能够后盾留言,加 v 信索取。

从图中能够看出,解码线程的次要工作内容是将资源包从待解码队列中取出,而后送进解码器,最初将解码出的数据帧放入帧队列中,期待 SDL 获取播放。

解码过程

解码线程是在关上流的时候创立的,也就是在函数 stream_component_open 创立的,视频解码线程的工作函数是video_thread

以下是函数 video_thread 的内容,能够看到去掉 filter 的相干解决后,这个函数是十分精简的,就是在 for 循环中通过函数 get_video_frame 获取到一帧图像数据后,调整数据帧的 pts,而后将数据帧放入帧队列中:

/**
 * 视频解码
 * @param arg
 * @return
 */
static int video_thread(void *arg)
{
    VideoState *is = arg;
    // 调配 frame
    AVFrame *frame = av_frame_alloc();
    double pts;
    double duration;
    int ret;
    AVRational tb = is->video_st->time_base;
    AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);

#if CONFIG_AVFILTER
    AVFilterGraph *graph = NULL;
    AVFilterContext *filt_out = NULL, *filt_in = NULL;
    int last_w = 0;
    int last_h = 0;
    enum AVPixelFormat last_format = -2;
    int last_serial = -1;
    int last_vfilter_idx = 0;
#endif

    if (!frame)
        return AVERROR(ENOMEM);

    for (;;) {
        // 获取一帧视频图像,返回负值会退出线程,什么时候会返回负值?问管家 VideoState
        ret = get_video_frame(is, frame);
        if (ret < 0)
            goto the_end;
        if (!ret)
            continue;

#if CONFIG_AVFILTER
        if (   last_w != frame->width
            || last_h != frame->height
            || last_format != frame->format
            || last_serial != is->viddec.pkt_serial
            || last_vfilter_idx != is->vfilter_idx) {
            av_log(NULL, AV_LOG_DEBUG,
                   "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
                   last_w, last_h,
                   (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
                   frame->width, frame->height,
                   (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
            avfilter_graph_free(&graph);
            graph = avfilter_graph_alloc();
            if (!graph) {ret = AVERROR(ENOMEM);
                goto the_end;
            }
            graph->nb_threads = filter_nbthreads;
            if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
                SDL_Event event;
                event.type = FF_QUIT_EVENT;
                event.user.data1 = is;
                SDL_PushEvent(&event);
                goto the_end;
            }
            filt_in  = is->in_video_filter;
            filt_out = is->out_video_filter;
            last_w = frame->width;
            last_h = frame->height;
            last_format = frame->format;
            last_serial = is->viddec.pkt_serial;
            last_vfilter_idx = is->vfilter_idx;
            frame_rate = av_buffersink_get_frame_rate(filt_out);
        }

        ret = av_buffersrc_add_frame(filt_in, frame);
        if (ret < 0)
            goto the_end;

        while (ret >= 0) {is->frame_last_returned_time = av_gettime_relative() / 1000000.0;

            ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
            if (ret < 0) {if (ret == AVERROR_EOF)
                    is->viddec.finished = is->viddec.pkt_serial;
                ret = 0;
                break;
            }

            is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
            if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
                is->frame_last_filter_delay = 0;
            tb = av_buffersink_get_time_base(filt_out);
#endif
            // 计算继续播放工夫
            duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
            pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
            // 将解码失去的帧放进队列
            ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
            av_frame_unref(frame);
#if CONFIG_AVFILTER
            if (is->videoq.serial != is->viddec.pkt_serial)
                break;
        }
#endif

        if (ret < 0)
            goto the_end;
    }
 the_end:
#if CONFIG_AVFILTER
    avfilter_graph_free(&graph);
#endif
    av_frame_free(&frame);
    return 0;
}

咱们来看看函数 get_video_frame 做了什么:

/**
 * 获取一帧图像
 * @param is
 * @param frame
 * @return
 */
static int get_video_frame(VideoState *is, AVFrame *frame)
{
    int got_picture;
    // 获取解码数据
    if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
        return -1;

    // 剖析是否须要丢帧,例如解码进去的图片曾经比真正播放的音频都慢了,那就要丢帧了
    if (got_picture) {
        double dpts = NAN;

        if (frame->pts != AV_NOPTS_VALUE)
            // 计算 pts
            dpts = av_q2d(is->video_st->time_base) * frame->pts;

        frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);

        // 同步时钟不以视频为基准时
        if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {if (frame->pts != AV_NOPTS_VALUE) {// 实践上如果须要间断接上播放的话  dpts + diff = get_master_clock(is)
                // 所以能够算出 diff  留神绝对值
                double diff = dpts - get_master_clock(is);
                if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
                    diff - is->frame_last_filter_delay < 0 &&
                    is->viddec.pkt_serial == is->vidclk.serial &&
                    is->videoq.nb_packets) {
                    is->frame_drops_early++;
                    av_frame_unref(frame);
                    got_picture = 0;
                }
            }
        }
    }

    return got_picture;
}

这个函数外部又调用了函数 decoder_decode_frame 获取解码帧,而后依据同步时钟判断是否须要进行丢帧解决。那么咱们再看看函数decoder_decode_frame


static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) {int ret = AVERROR(EAGAIN);

    for (;;) {
        // 解码器的序列须要和解码包队列的序列统一
        if (d->queue->serial == d->pkt_serial) {
            do {
                // 申请退出了则返回 -1
                if (d->queue->abort_request)
                    return -1;

                switch (d->avctx->codec_type) {
                    case AVMEDIA_TYPE_VIDEO:
                        // 获取 1 帧解码数据
                        ret = avcodec_receive_frame(d->avctx, frame);
                        if (ret >= 0) {
                            // 更新 pts 为 AVPacket 的 pts
                            if (decoder_reorder_pts == -1) {frame->pts = frame->best_effort_timestamp;} else if (!decoder_reorder_pts) {frame->pts = frame->pkt_dts;}
                        }
                        break;
                    case AVMEDIA_TYPE_AUDIO:
                        ret = avcodec_receive_frame(d->avctx, frame);
                        if (ret >= 0) {AVRational tb = (AVRational){1, frame->sample_rate};
                            if (frame->pts != AV_NOPTS_VALUE)
                                frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
                            else if (d->next_pts != AV_NOPTS_VALUE)
                                frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
                            if (frame->pts != AV_NOPTS_VALUE) {
                                d->next_pts = frame->pts + frame->nb_samples;
                                d->next_pts_tb = tb;
                            }
                        }
                        break;
                }
                if (ret == AVERROR_EOF) {
                    d->finished = d->pkt_serial;
                    // 刷新解码器
                    avcodec_flush_buffers(d->avctx);
                    return 0;
                }
                if (ret >= 0)
                    return 1;
            } while (ret != AVERROR(EAGAIN));
        }

        do {if (d->queue->nb_packets == 0)
                // 队列空了,唤醒读取线程,连忙读取数据
                SDL_CondSignal(d->empty_queue_cond);
            if (d->packet_pending) {
                // 解决有缓冲的数据
                d->packet_pending = 0;
            } else {
                int old_serial = d->pkt_serial;
                if (packet_queue_get(d->queue, d->pkt, 1, &d->pkt_serial) < 0)
                    return -1;
                if (old_serial != d->pkt_serial) {
                    // 刷新解码器
                    avcodec_flush_buffers(d->avctx);
                    d->finished = 0;
                    d->next_pts = d->start_pts;
                    d->next_pts_tb = d->start_pts_tb;
                }
            }
            if (d->queue->serial == d->pkt_serial)
                break;
            av_packet_unref(d->pkt);
        } while (1);

        if (d->avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
            int got_frame = 0;
            ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, d->pkt);
            if (ret < 0) {ret = AVERROR(EAGAIN);
            } else {if (got_frame && !d->pkt->data) {d->packet_pending = 1;}
                ret = got_frame ? 0 : (d->pkt->data ? AVERROR(EAGAIN) : AVERROR_EOF);
            }
            av_packet_unref(d->pkt);
        } else {
            // 数据送进解码器失败,遇到 EAGAIN 怎么办?if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) {av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
                d->packet_pending = 1;
            } else {av_packet_unref(d->pkt);
            }
        }
    }
}

原来函数 decoder_decode_frame 才是真正的解码外围,能够看到视频、音频和字幕都是通过调用这个函数进行解码的。这个函数的次要工作内容就是在 for 循环中一直取出数据包而后调用 FFmpeg 的 API 进行解码。

1、首先判断待解码队列的播放序列和解码器的播放序列是否统一:

if (d->queue->serial == d->pkt_serial) 

如果统一则通过 FFmpeg 函数 avcodec_receive_frame 获取数据帧,而后更新数据帧 pts。

在这里有个疑难,咱们之前不是说解码过程是先 avcodec_send_packet 而后是 n 个 avcodec_receive_frame 获取数据帧吗?这里怎么间接就先 avcodec_receive_frame 获取数据帧了呢?

这是因为函数 decoder_decode_frame 是一个一直被调用的 for 循环,优先调用 avcodec_receive_frame 能够避免解码器外部缓冲过多数据而导致解码包无奈送入解码器的景象呈现。

2、待解码队列中是否有数据,如果没有则唤醒读取线程

 if (d->queue->nb_packets == 0)
                // 队列空了,唤醒读取线程,连忙读取数据
                SDL_CondSignal(d->empty_queue_cond);

3、判断是否有送入解码器失败的包,如果有则优先解决

 if (d->packet_pending) {
                // 解决有缓冲的数据
                d->packet_pending = 0;
            }

这包是在调用 avcodec_send_packet 返回了 AVERROR(EAGAIN) 时所缓存下来的。

4、如果没有缓存的包则从队列中获取 packet_queue_get

  int old_serial = d->pkt_serial;
                if (packet_queue_get(d->queue, d->pkt, 1, &d->pkt_serial) < 0)
                    return -1;
                if (old_serial != d->pkt_serial) {
                    // 刷新解码器
                    avcodec_flush_buffers(d->avctx);
                    d->finished = 0;
                    d->next_pts = d->start_pts;
                    d->next_pts_tb = d->start_pts_tb;
                }

5、将包送进解码器,如果返回的值是AVERROR(EAGAIN),则将包缓存起来,下次再优先送入解码器解决

同时这里也能够解析了在第一步中为什么优先调用 avcodec_receive_frame 的起因,就是为了数据能够顺利地送进解码器。

 // 数据送进解码器失败,遇到 EAGAIN 怎么办?if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) {av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
                d->packet_pending = 1;
            } else {av_packet_unref(d->pkt);
            }

举荐浏览

FFmpeg 连载 1 - 开发环境搭建
FFmpeg 连载 2 - 拆散视频和音频
FFmpeg 连载 3 - 视频解码
FFmpeg 连载 4 - 音频解码
FFmpeg 连载 5 - 音视频编码
FFmpeg 连载 6 - 音频重采样
FFmpeg 连载 8 - 视频合并以及替换视频背景音乐实战
ffplay 调试环境搭建
ffplay 整体框架
ffplay 数据读取线程

关注我,一起提高,人生不止 coding!!!

正文完
 0