if (dst == src)
return 0;
- // We can't fail if SPS isn't set at it breaks current skip_frame code
- //if (!h1->ps.sps)
- // return AVERROR_INVALIDDATA;
+ if (inited && !h1->ps.sps)
+ return AVERROR_INVALIDDATA;
if (inited &&
(h->width != h1->width ||
// SPS/PPS
for (i = 0; i < FF_ARRAY_ELEMS(h->ps.sps_list); i++) {
- av_buffer_unref(&h->ps.sps_list[i]);
- if (h1->ps.sps_list[i]) {
- h->ps.sps_list[i] = av_buffer_ref(h1->ps.sps_list[i]);
- if (!h->ps.sps_list[i])
- return AVERROR(ENOMEM);
- }
+ ret = av_buffer_replace(&h->ps.sps_list[i], h1->ps.sps_list[i]);
+ if (ret < 0)
+ return ret;
}
for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) {
- av_buffer_unref(&h->ps.pps_list[i]);
- if (h1->ps.pps_list[i]) {
- h->ps.pps_list[i] = av_buffer_ref(h1->ps.pps_list[i]);
- if (!h->ps.pps_list[i])
- return AVERROR(ENOMEM);
- }
+ ret = av_buffer_replace(&h->ps.pps_list[i], h1->ps.pps_list[i]);
+ if (ret < 0)
+ return ret;
}
- av_buffer_unref(&h->ps.pps_ref);
+ ret = av_buffer_replace(&h->ps.pps_ref, h1->ps.pps_ref);
+ if (ret < 0)
+ return ret;
h->ps.pps = NULL;
h->ps.sps = NULL;
if (h1->ps.pps_ref) {
- h->ps.pps_ref = av_buffer_ref(h1->ps.pps_ref);
- if (!h->ps.pps_ref)
- return AVERROR(ENOMEM);
h->ps.pps = (const PPS*)h->ps.pps_ref->data;
h->ps.sps = h->ps.pps->sps;
}
h->frame_recovered = h1->frame_recovered;
- av_buffer_unref(&h->sei.a53_caption.buf_ref);
- if (h1->sei.a53_caption.buf_ref) {
- h->sei.a53_caption.buf_ref = av_buffer_ref(h1->sei.a53_caption.buf_ref);
- if (!h->sei.a53_caption.buf_ref)
- return AVERROR(ENOMEM);
+ ret = av_buffer_replace(&h->sei.a53_caption.buf_ref, h1->sei.a53_caption.buf_ref);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < h->sei.unregistered.nb_buf_ref; i++)
+ av_buffer_unref(&h->sei.unregistered.buf_ref[i]);
+ h->sei.unregistered.nb_buf_ref = 0;
+
+ if (h1->sei.unregistered.nb_buf_ref) {
+ ret = av_reallocp_array(&h->sei.unregistered.buf_ref,
+ h1->sei.unregistered.nb_buf_ref,
+ sizeof(*h->sei.unregistered.buf_ref));
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < h1->sei.unregistered.nb_buf_ref; i++) {
+ h->sei.unregistered.buf_ref[i] = av_buffer_ref(h1->sei.unregistered.buf_ref[i]);
+ if (!h->sei.unregistered.buf_ref[i])
+ return AVERROR(ENOMEM);
+ h->sei.unregistered.nb_buf_ref++;
+ }
}
+ h->sei.unregistered.x264_build = h1->sei.unregistered.x264_build;
if (!h->cur_pic_ptr)
return 0;
return err;
}
+int ff_h264_update_thread_context_for_user(AVCodecContext *dst,
+ const AVCodecContext *src)
+{
+ H264Context *h = dst->priv_data;
+ const H264Context *h1 = src->priv_data;
+
+ h->is_avc = h1->is_avc;
+ h->nal_length_size = h1->nal_length_size;
+
+ return 0;
+}
+
static int h264_frame_start(H264Context *h)
{
H264Picture *pic;
const SPS *sps = h->ps.sps;
int i, ret;
+ if (!sps) {
+ ret = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+
ff_set_sar(h->avctx, sps->sar);
av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt,
&h->chroma_x_shift, &h->chroma_y_shift);
{
const SPS *sps = h->ps.sps;
H264Picture *cur = h->cur_pic_ptr;
+ AVFrame *out = cur->f;
- cur->f->interlaced_frame = 0;
- cur->f->repeat_pict = 0;
+ out->interlaced_frame = 0;
+ out->repeat_pict = 0;
/* Signal interlacing information externally. */
/* Prioritize picture timing SEI information over used
break;
case H264_SEI_PIC_STRUCT_TOP_FIELD:
case H264_SEI_PIC_STRUCT_BOTTOM_FIELD:
- cur->f->interlaced_frame = 1;
+ out->interlaced_frame = 1;
break;
case H264_SEI_PIC_STRUCT_TOP_BOTTOM:
case H264_SEI_PIC_STRUCT_BOTTOM_TOP:
if (FIELD_OR_MBAFF_PICTURE(h))
- cur->f->interlaced_frame = 1;
+ out->interlaced_frame = 1;
else
// try to flag soft telecine progressive
- cur->f->interlaced_frame = h->prev_interlaced_frame;
+ out->interlaced_frame = h->prev_interlaced_frame;
break;
case H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
case H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
/* Signal the possibility of telecined film externally
* (pic_struct 5,6). From these hints, let the applications
* decide if they apply deinterlacing. */
- cur->f->repeat_pict = 1;
+ out->repeat_pict = 1;
break;
case H264_SEI_PIC_STRUCT_FRAME_DOUBLING:
- cur->f->repeat_pict = 2;
+ out->repeat_pict = 2;
break;
case H264_SEI_PIC_STRUCT_FRAME_TRIPLING:
- cur->f->repeat_pict = 4;
+ out->repeat_pict = 4;
break;
}
if ((pt->ct_type & 3) &&
pt->pic_struct <= H264_SEI_PIC_STRUCT_BOTTOM_TOP)
- cur->f->interlaced_frame = (pt->ct_type & (1 << 1)) != 0;
+ out->interlaced_frame = (pt->ct_type & (1 << 1)) != 0;
} else {
/* Derive interlacing flag from used decoding process. */
- cur->f->interlaced_frame = FIELD_OR_MBAFF_PICTURE(h);
+ out->interlaced_frame = FIELD_OR_MBAFF_PICTURE(h);
}
- h->prev_interlaced_frame = cur->f->interlaced_frame;
+ h->prev_interlaced_frame = out->interlaced_frame;
if (cur->field_poc[0] != cur->field_poc[1]) {
/* Derive top_field_first from field pocs. */
- cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1];
+ out->top_field_first = cur->field_poc[0] < cur->field_poc[1];
} else {
if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
/* Use picture timing SEI information. Even if it is a
* information of a past frame, better than nothing. */
if (h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM ||
h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
- cur->f->top_field_first = 1;
+ out->top_field_first = 1;
else
- cur->f->top_field_first = 0;
- } else if (cur->f->interlaced_frame) {
+ out->top_field_first = 0;
+ } else if (out->interlaced_frame) {
/* Default to top field first when pic_struct_present_flag
* is not set but interlaced frame detected */
- cur->f->top_field_first = 1;
+ out->top_field_first = 1;
} else {
/* Most likely progressive */
- cur->f->top_field_first = 0;
+ out->top_field_first = 0;
}
}
h->sei.frame_packing.content_interpretation_type > 0 &&
h->sei.frame_packing.content_interpretation_type < 3) {
H264SEIFramePacking *fp = &h->sei.frame_packing;
- AVStereo3D *stereo = av_stereo3d_create_side_data(cur->f);
+ AVStereo3D *stereo = av_stereo3d_create_side_data(out);
if (stereo) {
switch (fp->arrangement_type) {
case H264_SEI_FPA_TYPE_CHECKERBOARD:
h->sei.display_orientation.vflip)) {
H264SEIDisplayOrientation *o = &h->sei.display_orientation;
double angle = o->anticlockwise_rotation * 360 / (double) (1 << 16);
- AVFrameSideData *rotation = av_frame_new_side_data(cur->f,
+ AVFrameSideData *rotation = av_frame_new_side_data(out,
AV_FRAME_DATA_DISPLAYMATRIX,
sizeof(int32_t) * 9);
if (rotation) {
}
if (h->sei.afd.present) {
- AVFrameSideData *sd = av_frame_new_side_data(cur->f, AV_FRAME_DATA_AFD,
+ AVFrameSideData *sd = av_frame_new_side_data(out, AV_FRAME_DATA_AFD,
sizeof(uint8_t));
if (sd) {
if (h->sei.a53_caption.buf_ref) {
H264SEIA53Caption *a53 = &h->sei.a53_caption;
- AVFrameSideData *sd = av_frame_new_side_data_from_buf(cur->f, AV_FRAME_DATA_A53_CC, a53->buf_ref);
+ AVFrameSideData *sd = av_frame_new_side_data_from_buf(out, AV_FRAME_DATA_A53_CC, a53->buf_ref);
if (!sd)
av_buffer_unref(&a53->buf_ref);
a53->buf_ref = NULL;
H264SEIUnregistered *unreg = &h->sei.unregistered;
if (unreg->buf_ref[i]) {
- AVFrameSideData *sd = av_frame_new_side_data_from_buf(cur->f,
+ AVFrameSideData *sd = av_frame_new_side_data_from_buf(out,
AV_FRAME_DATA_SEI_UNREGISTERED,
unreg->buf_ref[i]);
if (!sd)
h->sei.unregistered.nb_buf_ref = 0;
if (h->sei.picture_timing.timecode_cnt > 0) {
- uint32_t tc = 0;
uint32_t *tc_sd;
+ char tcbuf[AV_TIMECODE_STR_SIZE];
- AVFrameSideData *tcside = av_frame_new_side_data(cur->f,
+ AVFrameSideData *tcside = av_frame_new_side_data(out,
AV_FRAME_DATA_S12M_TIMECODE,
sizeof(uint32_t)*4);
if (!tcside)
tc_sd[0] = h->sei.picture_timing.timecode_cnt;
for (int i = 0; i < tc_sd[0]; i++) {
- uint32_t frames;
-
- /* For SMPTE 12-M timecodes, frame count is a special case if > 30 FPS.
- See SMPTE ST 12-1:2014 Sec 12.1 for more info. */
- if (av_cmp_q(h->avctx->framerate, (AVRational) {30, 1}) == 1) {
- frames = h->sei.picture_timing.timecode[i].frame / 2;
- if (h->sei.picture_timing.timecode[i].frame % 2 == 1) {
- if (av_cmp_q(h->avctx->framerate, (AVRational) {50, 1}) == 0)
- tc |= (1 << 7);
- else
- tc |= (1 << 23);
- }
- } else {
- frames = h->sei.picture_timing.timecode[i].frame;
- }
-
- tc |= h->sei.picture_timing.timecode[i].dropframe << 30;
- tc |= (frames / 10) << 28;
- tc |= (frames % 10) << 24;
- tc |= (h->sei.picture_timing.timecode[i].seconds / 10) << 20;
- tc |= (h->sei.picture_timing.timecode[i].seconds % 10) << 16;
- tc |= (h->sei.picture_timing.timecode[i].minutes / 10) << 12;
- tc |= (h->sei.picture_timing.timecode[i].minutes % 10) << 8;
- tc |= (h->sei.picture_timing.timecode[i].hours / 10) << 4;
- tc |= (h->sei.picture_timing.timecode[i].hours % 10);
+ int drop = h->sei.picture_timing.timecode[i].dropframe;
+ int hh = h->sei.picture_timing.timecode[i].hours;
+ int mm = h->sei.picture_timing.timecode[i].minutes;
+ int ss = h->sei.picture_timing.timecode[i].seconds;
+ int ff = h->sei.picture_timing.timecode[i].frame;
- tc_sd[i + 1] = tc;
+ tc_sd[i + 1] = av_timecode_get_smpte(h->avctx->framerate, drop, hh, mm, ss, ff);
+ av_timecode_make_smpte_tc_string2(tcbuf, h->avctx->framerate, tc_sd[i + 1], 0, 0);
+ av_dict_set(&out->metadata, "timecode", tcbuf, 0);
}
h->sei.picture_timing.timecode_cnt = 0;
}
ff_thread_await_progress(&prev->tf, INT_MAX, 0);
if (prev->field_picture)
ff_thread_await_progress(&prev->tf, INT_MAX, 1);
- av_image_copy(h->short_ref[0]->f->data,
- h->short_ref[0]->f->linesize,
- (const uint8_t **)prev->f->data,
- prev->f->linesize,
- prev->f->format,
- prev->f->width,
- prev->f->height);
- h->short_ref[0]->poc = prev->poc + 2;
+ ff_thread_release_buffer(h->avctx, &h->short_ref[0]->tf);
+ h->short_ref[0]->tf.f = h->short_ref[0]->f;
+ ret = ff_thread_ref_frame(&h->short_ref[0]->tf, &prev->tf);
+ if (ret < 0)
+ return ret;
+ h->short_ref[0]->poc = prev->poc + 2U;
+ ff_thread_report_progress(&h->short_ref[0]->tf, INT_MAX, 0);
+ if (h->short_ref[0]->field_picture)
+ ff_thread_report_progress(&h->short_ref[0]->tf, INT_MAX, 1);
} else if (!h->frame_recovered && !h->avctx->hwaccel)
ff_color_frame(h->short_ref[0]->f, c);
h->short_ref[0]->frame_num = h->poc.prev_frame_num;