X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fpthread.c;h=b646f66ab1c3c349dd93042df2cb1d30cc068f99;hb=530cd2893e020206262a146d567c707ef93baf31;hp=0688d9d8f04e5129a0578e79de07203d76420184;hpb=3dc99a18d4ae2b9bcc96e00b7f589128717aec64;p=ffmpeg diff --git a/libavcodec/pthread.c b/libavcodec/pthread.c index 0688d9d8f04..b646f66ab1c 100644 --- a/libavcodec/pthread.c +++ b/libavcodec/pthread.c @@ -31,32 +31,18 @@ #include "config.h" -#if HAVE_SCHED_GETAFFINITY -#define _GNU_SOURCE -#include -#endif -#if HAVE_GETPROCESSAFFINITYMASK -#include -#endif -#if HAVE_SYSCTL -#if HAVE_SYS_PARAM_H -#include -#endif -#include -#include -#endif -#if HAVE_SYSCONF -#include -#endif - #include "avcodec.h" #include "internal.h" #include "thread.h" +#include "libavutil/avassert.h" +#include "libavutil/common.h" +#include "libavutil/cpu.h" +#include "libavutil/internal.h" #if HAVE_PTHREADS #include #elif HAVE_W32THREADS -#include "w32pthreads.h" +#include "compat/w32pthreads.h" #endif typedef int (action_func)(AVCodecContext *c, void *arg); @@ -75,13 +61,11 @@ typedef struct ThreadContext { pthread_cond_t last_job_cond; pthread_cond_t current_job_cond; pthread_mutex_t current_job_lock; + unsigned current_execute; int current_job; int done; } ThreadContext; -/// Max number of frame buffers that can be allocated when using frame threads. -#define MAX_BUFFERS (32+1) - /** * Context used by codec threads and stored in their AVCodecContext thread_opaque. */ @@ -100,7 +84,8 @@ typedef struct PerThreadContext { AVCodecContext *avctx; ///< Context used to decode packets passed to this thread. AVPacket avpkt; ///< Input packet (for decoding) or output (for encoding). - int allocated_buf_size; ///< Size allocated for avpkt.data + uint8_t *buf; ///< backup storage for packet data when the input packet is not refcounted + int allocated_buf_size; ///< Size allocated for buf AVFrame frame; ///< Output frame (for decoding) or input (for encoding). int got_frame; ///< The output of got_picture_ptr from the last avcodec_decode_video() call. @@ -120,16 +105,12 @@ typedef struct PerThreadContext { * Array of frames passed to ff_thread_release_buffer(). * Frames are released after all threads referencing them are finished. */ - AVFrame released_buffers[MAX_BUFFERS]; - int num_released_buffers; - - /** - * Array of progress values used by ff_thread_get_buffer(). - */ - int progress[MAX_BUFFERS][2]; - uint8_t progress_used[MAX_BUFFERS]; + AVFrame *released_buffers; + int num_released_buffers; + int released_buffers_allocated; AVFrame *requested_frame; ///< AVFrame the codec passed to get_buffer() + int requested_flags; ///< flags passed to get_buffer() for requested_frame } PerThreadContext; /** @@ -157,44 +138,11 @@ typedef struct FrameThreadContext { * limit the number of threads to 16 for automatic detection */ #define MAX_AUTO_THREADS 16 -static int get_logical_cpus(AVCodecContext *avctx) -{ - int ret, nb_cpus = 1; -#if HAVE_SCHED_GETAFFINITY && defined(CPU_COUNT) - cpu_set_t cpuset; - - CPU_ZERO(&cpuset); - - ret = sched_getaffinity(0, sizeof(cpuset), &cpuset); - if (!ret) { - nb_cpus = CPU_COUNT(&cpuset); - } -#elif HAVE_GETPROCESSAFFINITYMASK - DWORD_PTR proc_aff, sys_aff; - ret = GetProcessAffinityMask(GetCurrentProcess(), &proc_aff, &sys_aff); - if (ret) - nb_cpus = av_popcount64(proc_aff); -#elif HAVE_SYSCTL && defined(HW_NCPU) - int mib[2] = { CTL_HW, HW_NCPU }; - size_t len = sizeof(nb_cpus); - - ret = sysctl(mib, 2, &nb_cpus, &len, NULL, 0); - if (ret == -1) - nb_cpus = 0; -#elif HAVE_SYSCONF && defined(_SC_NPROC_ONLN) - nb_cpus = sysconf(_SC_NPROC_ONLN); -#elif HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN) - nb_cpus = sysconf(_SC_NPROCESSORS_ONLN); -#endif - av_log(avctx, AV_LOG_DEBUG, "detected %d logical cores\n", nb_cpus); - return nb_cpus; -} - - static void* attribute_align_arg worker(void *v) { AVCodecContext *avctx = v; ThreadContext *c = avctx->thread_opaque; + unsigned last_execute = 0; int our_job = c->job_count; int thread_count = avctx->thread_count; int self_id; @@ -206,7 +154,9 @@ static void* attribute_align_arg worker(void *v) if (c->current_job == thread_count + c->job_count) pthread_cond_signal(&c->last_job_cond); - pthread_cond_wait(&c->current_job_cond, &c->current_job_lock); + while (last_execute == c->current_execute && !c->done) + pthread_cond_wait(&c->current_job_cond, &c->current_job_lock); + last_execute = c->current_execute; our_job = self_id; if (c->done) { @@ -226,7 +176,8 @@ static void* attribute_align_arg worker(void *v) static av_always_inline void avcodec_thread_park_workers(ThreadContext *c, int thread_count) { - pthread_cond_wait(&c->last_job_cond, &c->current_job_lock); + while (c->current_job != thread_count + c->job_count) + pthread_cond_wait(&c->last_job_cond, &c->current_job_lock); pthread_mutex_unlock(&c->current_job_lock); } @@ -275,6 +226,7 @@ static int avcodec_thread_execute(AVCodecContext *avctx, action_func* func, void c->rets = &dummy_ret; c->rets_count = 1; } + c->current_execute++; pthread_cond_broadcast(&c->current_job_cond); avcodec_thread_park_workers(c, avctx->thread_count); @@ -289,14 +241,15 @@ static int avcodec_thread_execute2(AVCodecContext *avctx, action_func2* func2, v return avcodec_thread_execute(avctx, NULL, arg, ret, job_count, 0); } -static int thread_init(AVCodecContext *avctx) +static int thread_init_internal(AVCodecContext *avctx) { int i; ThreadContext *c; int thread_count = avctx->thread_count; if (!thread_count) { - int nb_cpus = get_logical_cpus(avctx); + int nb_cpus = av_cpu_count(); + av_log(avctx, AV_LOG_DEBUG, "detected %d logical cores\n", nb_cpus); // use number of cores + 1 as thread count if there is more than one if (nb_cpus > 1) thread_count = avctx->thread_count = FFMIN(nb_cpus + 1, MAX_AUTO_THREADS); @@ -356,7 +309,7 @@ static attribute_align_arg void *frame_worker_thread(void *arg) PerThreadContext *p = arg; FrameThreadContext *fctx = p->parent; AVCodecContext *avctx = p->avctx; - AVCodec *codec = avctx->codec; + const AVCodec *codec = avctx->codec; while (1) { if (p->state == STATE_INPUT_READY && !fctx->die) { @@ -376,6 +329,10 @@ static attribute_align_arg void *frame_worker_thread(void *arg) p->got_frame = 0; p->result = codec->decode(avctx, &p->frame, &p->got_frame, &p->avpkt); + /* many decoders assign whole AVFrames, thus overwriting extended_data; + * make sure it's set correctly */ + p->frame.extended_data = p->frame.data; + if (p->state == STATE_SETTING_UP) ff_thread_finish_setup(avctx); p->state = STATE_INPUT_READY; @@ -402,7 +359,6 @@ static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src, int err = 0; if (dst != src) { - dst->sub_id = src->sub_id; dst->time_base = src->time_base; dst->width = src->width; dst->height = src->height; @@ -413,7 +369,6 @@ static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src, dst->has_b_frames = src->has_b_frames; dst->idct_algo = src->idct_algo; - dst->slice_count = src->slice_count; dst->bits_per_coded_sample = src->bits_per_coded_sample; dst->sample_aspect_ratio = src->sample_aspect_ratio; @@ -430,6 +385,9 @@ static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src, dst->colorspace = src->colorspace; dst->color_range = src->color_range; dst->chroma_sample_location = src->chroma_sample_location; + + dst->hwaccel = src->hwaccel; + dst->hwaccel_context = src->hwaccel_context; } if (for_user) { @@ -447,37 +405,50 @@ static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src, * * @param dst The destination context. * @param src The source context. + * @return 0 on success, negative error code on failure */ -static void update_context_from_user(AVCodecContext *dst, AVCodecContext *src) +static int update_context_from_user(AVCodecContext *dst, AVCodecContext *src) { #define copy_fields(s, e) memcpy(&dst->s, &src->s, (char*)&dst->e - (char*)&dst->s); dst->flags = src->flags; dst->draw_horiz_band= src->draw_horiz_band; + dst->get_buffer2 = src->get_buffer2; +#if FF_API_GET_BUFFER +FF_DISABLE_DEPRECATION_WARNINGS dst->get_buffer = src->get_buffer; dst->release_buffer = src->release_buffer; +FF_ENABLE_DEPRECATION_WARNINGS +#endif dst->opaque = src->opaque; - dst->dsp_mask = src->dsp_mask; dst->debug = src->debug; dst->debug_mv = src->debug_mv; dst->slice_flags = src->slice_flags; dst->flags2 = src->flags2; - copy_fields(skip_loop_filter, bidir_refine); + copy_fields(skip_loop_filter, subtitle_header); dst->frame_number = src->frame_number; dst->reordered_opaque = src->reordered_opaque; -#undef copy_fields -} -static void free_progress(AVFrame *f) -{ - PerThreadContext *p = f->owner->thread_opaque; - int *progress = f->thread_opaque; - - p->progress_used[(progress - p->progress[0]) / 2] = 0; + if (src->slice_count && src->slice_offset) { + if (dst->slice_count < src->slice_count) { + int *tmp = av_realloc(dst->slice_offset, src->slice_count * + sizeof(*dst->slice_offset)); + if (!tmp) { + av_free(dst->slice_offset); + return AVERROR(ENOMEM); + } + dst->slice_offset = tmp; + } + memcpy(dst->slice_offset, src->slice_offset, + src->slice_count * sizeof(*dst->slice_offset)); + } + dst->slice_count = src->slice_count; + return 0; +#undef copy_fields } /// Releases the buffers that this decoding thread was the last user of. @@ -489,11 +460,13 @@ static void release_delayed_buffers(PerThreadContext *p) AVFrame *f; pthread_mutex_lock(&fctx->buffer_mutex); + + // fix extended data in case the caller screwed it up + av_assert0(p->avctx->codec_type == AVMEDIA_TYPE_VIDEO); f = &p->released_buffers[--p->num_released_buffers]; - free_progress(f); - f->thread_opaque = NULL; + f->extended_data = f->data; + av_frame_unref(f); - f->owner->release_buffer(f->owner, f); pthread_mutex_unlock(&fctx->buffer_mutex); } } @@ -502,8 +475,7 @@ static int submit_packet(PerThreadContext *p, AVPacket *avpkt) { FrameThreadContext *fctx = p->parent; PerThreadContext *prev_thread = fctx->prev_thread; - AVCodec *codec = p->avctx->codec; - uint8_t *buf = p->avpkt.data; + const AVCodec *codec = p->avctx->codec; if (!avpkt->size && !(codec->capabilities & CODEC_CAP_DELAY)) return 0; @@ -527,11 +499,16 @@ static int submit_packet(PerThreadContext *p, AVPacket *avpkt) } } - av_fast_malloc(&buf, &p->allocated_buf_size, avpkt->size + FF_INPUT_BUFFER_PADDING_SIZE); + av_buffer_unref(&p->avpkt.buf); p->avpkt = *avpkt; - p->avpkt.data = buf; - memcpy(buf, avpkt->data, avpkt->size); - memset(buf + avpkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE); + if (avpkt->buf) + p->avpkt.buf = av_buffer_ref(avpkt->buf); + else { + av_fast_malloc(&p->buf, &p->allocated_buf_size, avpkt->size + FF_INPUT_BUFFER_PADDING_SIZE); + p->avpkt.data = p->buf; + memcpy(p->buf, avpkt->data, avpkt->size); + memset(p->buf + avpkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE); + } p->state = STATE_SETTING_UP; pthread_cond_signal(&p->input_cond); @@ -543,15 +520,20 @@ static int submit_packet(PerThreadContext *p, AVPacket *avpkt) * and it calls back to the client here. */ - if (!p->avctx->thread_safe_callbacks && - p->avctx->get_buffer != avcodec_default_get_buffer) { +FF_DISABLE_DEPRECATION_WARNINGS + if (!p->avctx->thread_safe_callbacks && ( +#if FF_API_GET_BUFFER + p->avctx->get_buffer || +#endif + p->avctx->get_buffer2 != avcodec_default_get_buffer2)) { +FF_ENABLE_DEPRECATION_WARNINGS while (p->state != STATE_SETUP_FINISHED && p->state != STATE_INPUT_READY) { pthread_mutex_lock(&p->progress_mutex); while (p->state == STATE_SETTING_UP) pthread_cond_wait(&p->progress_cond, &p->progress_mutex); if (p->state == STATE_GET_BUFFER) { - p->result = p->avctx->get_buffer(p->avctx, p->requested_frame); + p->result = ff_get_buffer(p->avctx, p->requested_frame, p->requested_flags); p->state = STATE_SETTING_UP; pthread_cond_signal(&p->progress_cond); } @@ -579,7 +561,8 @@ int ff_thread_decode_frame(AVCodecContext *avctx, */ p = &fctx->threads[fctx->next_decoding]; - update_context_from_user(p->avctx, avctx); + err = update_context_from_user(p->avctx, avctx); + if (err) return err; err = submit_packet(p, avpkt); if (err) return err; @@ -587,11 +570,12 @@ int ff_thread_decode_frame(AVCodecContext *avctx, * If we're still receiving the initial packets, don't return a frame. */ - if (fctx->delaying && avpkt->size) { + if (fctx->delaying) { if (fctx->next_decoding >= (avctx->thread_count-1)) fctx->delaying = 0; *got_picture_ptr=0; - return avpkt->size; + if (avpkt->size) + return avpkt->size; } /* @@ -611,13 +595,9 @@ int ff_thread_decode_frame(AVCodecContext *avctx, pthread_mutex_unlock(&p->progress_mutex); } - *picture = p->frame; + av_frame_move_ref(picture, &p->frame); *got_picture_ptr = p->got_frame; picture->pkt_dts = p->avpkt.dts; - picture->sample_aspect_ratio = avctx->sample_aspect_ratio; - picture->width = avctx->width; - picture->height = avctx->height; - picture->format = avctx->pix_fmt; /* * A later call with avkpt->size == 0 may loop over all threads, @@ -640,10 +620,10 @@ int ff_thread_decode_frame(AVCodecContext *avctx, return (p->result >= 0) ? avpkt->size : p->result; } -void ff_thread_report_progress(AVFrame *f, int n, int field) +void ff_thread_report_progress(ThreadFrame *f, int n, int field) { PerThreadContext *p; - int *progress = f->thread_opaque; + int *progress = f->progress ? (int*)f->progress->data : NULL; if (!progress || progress[field] >= n) return; @@ -658,10 +638,10 @@ void ff_thread_report_progress(AVFrame *f, int n, int field) pthread_mutex_unlock(&p->progress_mutex); } -void ff_thread_await_progress(AVFrame *f, int n, int field) +void ff_thread_await_progress(ThreadFrame *f, int n, int field) { PerThreadContext *p; - int *progress = f->thread_opaque; + int *progress = f->progress ? (int*)f->progress->data : NULL; if (!progress || progress[field] >= n) return; @@ -707,7 +687,7 @@ static void park_frame_worker_threads(FrameThreadContext *fctx, int thread_count static void frame_thread_free(AVCodecContext *avctx, int thread_count) { FrameThreadContext *fctx = avctx->thread_opaque; - AVCodec *codec = avctx->codec; + const AVCodec *codec = avctx->codec; int i; park_frame_worker_threads(fctx, thread_count); @@ -733,23 +713,25 @@ static void frame_thread_free(AVCodecContext *avctx, int thread_count) avctx->codec = NULL; release_delayed_buffers(p); + av_frame_unref(&p->frame); } for (i = 0; i < thread_count; i++) { PerThreadContext *p = &fctx->threads[i]; - avcodec_default_free_buffers(p->avctx); - pthread_mutex_destroy(&p->mutex); pthread_mutex_destroy(&p->progress_mutex); pthread_cond_destroy(&p->input_cond); pthread_cond_destroy(&p->progress_cond); pthread_cond_destroy(&p->output_cond); - av_freep(&p->avpkt.data); + av_buffer_unref(&p->avpkt.buf); + av_freep(&p->buf); + av_freep(&p->released_buffers); if (i) { av_freep(&p->avctx->priv_data); av_freep(&p->avctx->internal); + av_freep(&p->avctx->slice_offset); } av_freep(&p->avctx); @@ -763,13 +745,14 @@ static void frame_thread_free(AVCodecContext *avctx, int thread_count) static int frame_thread_init(AVCodecContext *avctx) { int thread_count = avctx->thread_count; - AVCodec *codec = avctx->codec; + const AVCodec *codec = avctx->codec; AVCodecContext *src = avctx; FrameThreadContext *fctx; int i, err = 0; if (!thread_count) { - int nb_cpus = get_logical_cpus(avctx); + int nb_cpus = av_cpu_count(); + av_log(avctx, AV_LOG_DEBUG, "detected %d logical cores\n", nb_cpus); // use number of cores + 1 as thread count if there is more than one if (nb_cpus > 1) thread_count = avctx->thread_count = FFMIN(nb_cpus + 1, MAX_AUTO_THREADS); @@ -852,6 +835,7 @@ error: void ff_thread_flush(AVCodecContext *avctx) { + int i; FrameThreadContext *fctx = avctx->thread_opaque; if (!avctx->thread_opaque) return; @@ -867,36 +851,25 @@ void ff_thread_flush(AVCodecContext *avctx) fctx->next_decoding = fctx->next_finished = 0; fctx->delaying = 1; fctx->prev_thread = NULL; -} - -static int *allocate_progress(PerThreadContext *p) -{ - int i; - - for (i = 0; i < MAX_BUFFERS; i++) - if (!p->progress_used[i]) break; + for (i = 0; i < avctx->thread_count; i++) { + PerThreadContext *p = &fctx->threads[i]; + // Make sure decode flush calls with size=0 won't return old frames + p->got_frame = 0; + av_frame_unref(&p->frame); - if (i == MAX_BUFFERS) { - av_log(p->avctx, AV_LOG_ERROR, "allocate_progress() overflow\n"); - return NULL; + release_delayed_buffers(p); } - - p->progress_used[i] = 1; - - return p->progress[i]; } -int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f) +int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags) { PerThreadContext *p = avctx->thread_opaque; - int *progress, err; + int err; f->owner = avctx; - if (!(avctx->active_thread_type&FF_THREAD_FRAME)) { - f->thread_opaque = NULL; - return avctx->get_buffer(avctx, f); - } + if (!(avctx->active_thread_type & FF_THREAD_FRAME)) + return ff_get_buffer(avctx, f->f, flags); if (p->state != STATE_SETTING_UP && (avctx->codec->update_thread_context || !avctx->thread_safe_callbacks)) { @@ -904,22 +877,29 @@ int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f) return -1; } - pthread_mutex_lock(&p->parent->buffer_mutex); - f->thread_opaque = progress = allocate_progress(p); + if (avctx->internal->allocate_progress) { + int *progress; + f->progress = av_buffer_alloc(2 * sizeof(int)); + if (!f->progress) { + return AVERROR(ENOMEM); + } + progress = (int*)f->progress->data; - if (!progress) { - pthread_mutex_unlock(&p->parent->buffer_mutex); - return -1; + progress[0] = progress[1] = -1; } - progress[0] = - progress[1] = -1; - - if (avctx->thread_safe_callbacks || - avctx->get_buffer == avcodec_default_get_buffer) { - err = avctx->get_buffer(avctx, f); + pthread_mutex_lock(&p->parent->buffer_mutex); +FF_DISABLE_DEPRECATION_WARNINGS + if (avctx->thread_safe_callbacks || ( +#if FF_API_GET_BUFFER + !avctx->get_buffer && +#endif + avctx->get_buffer2 == avcodec_default_get_buffer2)) { +FF_ENABLE_DEPRECATION_WARNINGS + err = ff_get_buffer(avctx, f->f, flags); } else { - p->requested_frame = f; + p->requested_frame = f->f; + p->requested_flags = flags; p->state = STATE_GET_BUFFER; pthread_mutex_lock(&p->progress_mutex); pthread_cond_signal(&p->progress_cond); @@ -931,38 +911,66 @@ int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f) pthread_mutex_unlock(&p->progress_mutex); - if (!avctx->codec->update_thread_context) - ff_thread_finish_setup(avctx); } + if (!avctx->thread_safe_callbacks && !avctx->codec->update_thread_context) + ff_thread_finish_setup(avctx); + + if (err) + av_buffer_unref(&f->progress); pthread_mutex_unlock(&p->parent->buffer_mutex); return err; } -void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f) +void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f) { PerThreadContext *p = avctx->thread_opaque; FrameThreadContext *fctx; + AVFrame *dst, *tmp; +FF_DISABLE_DEPRECATION_WARNINGS + int can_direct_free = !(avctx->active_thread_type & FF_THREAD_FRAME) || + avctx->thread_safe_callbacks || + ( +#if FF_API_GET_BUFFER + !avctx->get_buffer && +#endif + avctx->get_buffer2 == avcodec_default_get_buffer2); +FF_ENABLE_DEPRECATION_WARNINGS - if (!(avctx->active_thread_type&FF_THREAD_FRAME)) { - avctx->release_buffer(avctx, f); + if (!f->f->data[0]) return; - } - if (p->num_released_buffers >= MAX_BUFFERS) { - av_log(p->avctx, AV_LOG_ERROR, "too many thread_release_buffer calls!\n"); + if (avctx->debug & FF_DEBUG_BUFFERS) + av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f); + + av_buffer_unref(&f->progress); + f->owner = NULL; + + if (can_direct_free) { + av_frame_unref(f->f); return; } - if(avctx->debug & FF_DEBUG_BUFFERS) - av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f); - fctx = p->parent; pthread_mutex_lock(&fctx->buffer_mutex); - p->released_buffers[p->num_released_buffers++] = *f; + + if (p->num_released_buffers + 1 >= INT_MAX / sizeof(*p->released_buffers)) + goto fail; + tmp = av_fast_realloc(p->released_buffers, &p->released_buffers_allocated, + (p->num_released_buffers + 1) * + sizeof(*p->released_buffers)); + if (!tmp) + goto fail; + p->released_buffers = tmp; + + dst = &p->released_buffers[p->num_released_buffers]; + av_frame_move_ref(dst, f->f); + + p->num_released_buffers++; + +fail: pthread_mutex_unlock(&fctx->buffer_mutex); - memset(f->data, 0, sizeof(f->data)); } /** @@ -991,27 +999,25 @@ static void validate_thread_parameters(AVCodecContext *avctx) avctx->thread_count = 1; avctx->active_thread_type = 0; } + + if (avctx->thread_count > MAX_AUTO_THREADS) + av_log(avctx, AV_LOG_WARNING, + "Application has requested %d threads. Using a thread count greater than %d is not recommended.\n", + avctx->thread_count, MAX_AUTO_THREADS); } int ff_thread_init(AVCodecContext *avctx) { - if (avctx->thread_opaque) { - av_log(avctx, AV_LOG_ERROR, "avcodec_thread_init is ignored after avcodec_open\n"); - return -1; - } - #if HAVE_W32THREADS w32thread_init(); #endif - if (avctx->codec) { - validate_thread_parameters(avctx); + validate_thread_parameters(avctx); - if (avctx->active_thread_type&FF_THREAD_SLICE) - return thread_init(avctx); - else if (avctx->active_thread_type&FF_THREAD_FRAME) - return frame_thread_init(avctx); - } + if (avctx->active_thread_type&FF_THREAD_SLICE) + return thread_init_internal(avctx); + else if (avctx->active_thread_type&FF_THREAD_FRAME) + return frame_thread_init(avctx); return 0; }