- support for WavPack muxing (raw and in Matroska)
- XVideo output device
- vignette filter
+- True Audio (TTA) encoder
+- Go2Webinar decoder
version 1.2:
- vf_hqdn3d.c
- vf_hue.c
- vf_kerndeint.c
+ - vf_mcdeint.c
- vf_mp.c
- vf_noise.c
- vf_owdenoise.c
truespeech.c Kostya Shishkov
tscc.c Kostya Shishkov
tta.c Alex Beregszaszi, Jaikrishnan Menon
+ ttaenc.c Paul B Mahol
txd.c Ivo van Poorten
ulti* Kostya Shishkov
v410*.c Derek Buitenhuis
flv_encoder_select="h263_encoder"
fourxm_decoder_select="dsputil"
fraps_decoder_select="dsputil huffman"
+g2m_decoder_select="dsputil zlib"
g729_decoder_select="dsputil"
h261_decoder_select="error_resilience mpegvideo"
h261_encoder_select="aandcttables mpegvideoenc"
hue_filter_deps="gpl"
interlace_filter_deps="gpl"
kerndeint_filter_deps="gpl"
+mcdeint_filter_deps="avcodec gpl"
movie_filter_deps="avcodec avformat"
mp_filter_deps="gpl avcodec swscale inline_asm"
mpdecimate_filter_deps="gpl avcodec"
enabled_any sndio_indev sndio_outdev && check_lib2 sndio.h sio_open -lsndio
if enabled libcdio; then
- check_lib2 "cdio/cdda.h cdio/paranoia.h" cdio_cddap_open -lcdio_paranoia -lcdio_cdda -lcdio || check_lib2 "cdio/paranoia/cdda.h cdio/paranoia/paranoia.h" cdio_cddap_open -lcdio_paranoia -lcdio_cdda -lcdio
+ check_lib2 "cdio/cdda.h cdio/paranoia.h" cdio_cddap_open -lcdio_paranoia -lcdio_cdda -lcdio ||
+ check_lib2 "cdio/paranoia/cdda.h cdio/paranoia/paranoia.h" cdio_cddap_open -lcdio_paranoia -lcdio_cdda -lcdio ||
+ die "ERROR: libcdio-paranoia not found"
fi
enabled x11grab &&
@end example
@end itemize
+@section mcdeint
+
+Apply motion-compensation deinterlacing.
+
+It needs one field per frame as input and must thus be used together
+with yadif=1/3 or equivalent.
+
+This filter accepts the following options:
+@table @option
+@item mode
+Set the deinterlacing mode.
+
+It accepts one of the following values:
+@table @samp
+@item fast
+@item medium
+@item slow
+use iterative motion estimation
+@item extra_slow
+like @samp{slow}, but use multiple reference frames.
+@end table
+Default value is @samp{fast}.
+
+@item parity
+Set the picture field parity assumed for the input video. It must be
+one of the following values:
+
+@table @samp
+@item 0, tff
+assume top field first
+@item 1, bff
+assume bottom field first
+@end table
+
+Default value is @samp{bff}.
+
+@item qp
+Set per-block quantization parameter (QP) used by the internal
+encoder.
+
+Higher values should result in a smoother motion vector field but less
+optimal individual vectors. Default value is 1.
+@end table
+
@section mp
Apply an MPlayer filter to the input video.
@item fil
@item fspp
@item ilpack
-@item mcdeint
@item perspective
@item phase
@item pp7
@tab Sorenson H.263 used in Flash
@item Forward Uncompressed @tab @tab X
@item Fraps @tab @tab X
+@item Go2Webinar @tab @tab X
+ @tab fourcc: G2M4
@item H.261 @tab X @tab X
@item H.263 / H.263-1996 @tab X @tab X
@item H.263+ / H.263-1998 / H.263 version 2 @tab X @tab X
@item Speex @tab E @tab E
@tab supported through external library libspeex
@item TAK (Tom's lossless Audio Kompressor) @tab @tab X
-@item True Audio (TTA) @tab @tab X
+@item True Audio (TTA) @tab X @tab X
@item TrueHD @tab @tab X
@tab Used in HD-DVD and Blu-Ray discs.
@item TwinVQ (VQF flavor) @tab @tab X
f = decoded_frame;
err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
AV_BUFFERSRC_FLAG_PUSH);
+ if (err == AVERROR_EOF)
+ err = 0; /* ignore */
if (err < 0)
break;
}
} else
f = decoded_frame;
ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
- if (ret < 0) {
+ if (ret == AVERROR_EOF) {
+ ret = 0; /* ignore */
+ } else if (ret < 0) {
av_log(NULL, AV_LOG_FATAL,
"Failed to inject frame into filter network: %s\n", av_err2str(ret));
exit(1);
A/V sync as SDL does not have hardware buffer fullness info. */
#define SDL_AUDIO_BUFFER_SIZE 1024
-/* no AV sync correction is done if below the AV sync threshold */
-#define AV_SYNC_THRESHOLD 0.01
+/* no AV sync correction is done if below the minimum AV sync threshold */
+#define AV_SYNC_THRESHOLD_MIN 0.01
+/* AV sync correction is done if above the maximum AV sync threshold */
+#define AV_SYNC_THRESHOLD_MAX 0.1
+/* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
+#define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
/* no AV correction is done if too big error */
#define AV_NOSYNC_THRESHOLD 10.0
SDL_cond *cond;
} PacketQueue;
-#define VIDEO_PICTURE_QUEUE_SIZE 4
+#define VIDEO_PICTURE_QUEUE_SIZE 3
#define SUBPICTURE_QUEUE_SIZE 4
typedef struct VideoPicture {
enum AVSampleFormat fmt;
} AudioParams;
+typedef struct Clock {
+ double pts; /* clock base */
+ double pts_drift; /* clock base minus time at which we updated the clock */
+ double last_updated;
+ double speed;
+ int serial; /* clock is based on a packet with this serial */
+ int paused;
+ int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
+} Clock;
+
enum {
AV_SYNC_AUDIO_MASTER, /* default choice */
AV_SYNC_VIDEO_MASTER,
AVFormatContext *ic;
int realtime;
+ Clock audclk;
+ Clock vidclk;
+ Clock extclk;
+
int audio_stream;
int av_sync_type;
- double external_clock; ///< external clock base
- double external_clock_drift; ///< external clock base - time (av_gettime) at which we updated external_clock
- int64_t external_clock_time; ///< last reference time
- double external_clock_speed; ///< speed of the external clock
double audio_clock;
int audio_clock_serial;
double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
int64_t video_current_pos; // current displayed file pos
double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
- int video_clock_serial;
VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
int pictq_size, pictq_rindex, pictq_windex;
SDL_mutex *pictq_mutex;
w = default_width;
h = default_height;
}
+ w = FFMIN(16383, w);
if (screen && is->width == screen->w && screen->w == w
&& is->height== screen->h && screen->h == h && !force_set_video_mode)
return 0;
video_image_display(is);
}
-/* get the current audio clock value */
-static double get_audio_clock(VideoState *is)
+static double get_clock(Clock *c)
{
- if (is->audio_clock_serial != is->audioq.serial)
+ if (*c->queue_serial != c->serial)
return NAN;
- if (is->paused) {
- return is->audio_current_pts;
+ if (c->paused) {
+ return c->pts;
} else {
- return is->audio_current_pts_drift + av_gettime() / 1000000.0;
+ double time = av_gettime() / 1000000.0;
+ return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
}
}
-/* get the current video clock value */
-static double get_video_clock(VideoState *is)
+static void set_clock_at(Clock *c, double pts, int serial, double time)
{
- if (is->video_clock_serial != is->videoq.serial)
- return NAN;
- if (is->paused) {
- return is->video_current_pts;
- } else {
- return is->video_current_pts_drift + av_gettime() / 1000000.0;
- }
+ c->pts = pts;
+ c->last_updated = time;
+ c->pts_drift = c->pts - time;
+ c->serial = serial;
}
-/* get the current external clock value */
-static double get_external_clock(VideoState *is)
+static void set_clock(Clock *c, double pts, int serial)
{
- if (is->paused) {
- return is->external_clock;
- } else {
- double time = av_gettime() / 1000000.0;
- return is->external_clock_drift + time - (time - is->external_clock_time / 1000000.0) * (1.0 - is->external_clock_speed);
- }
+ double time = av_gettime() / 1000000.0;
+ set_clock_at(c, pts, serial, time);
+}
+
+static void set_clock_speed(Clock *c, double speed)
+{
+ set_clock(c, get_clock(c), c->serial);
+ c->speed = speed;
+}
+
+static void init_clock(Clock *c, int *queue_serial)
+{
+ c->speed = 1.0;
+ c->paused = 0;
+ c->queue_serial = queue_serial;
+ set_clock(c, NAN, -1);
+}
+
+static void sync_clock_to_slave(Clock *c, Clock *slave)
+{
+ double clock = get_clock(c);
+ double slave_clock = get_clock(slave);
+ if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
+ set_clock(c, slave_clock, slave->serial);
}
static int get_master_sync_type(VideoState *is) {
switch (get_master_sync_type(is)) {
case AV_SYNC_VIDEO_MASTER:
- val = get_video_clock(is);
+ val = get_clock(&is->vidclk);
break;
case AV_SYNC_AUDIO_MASTER:
- val = get_audio_clock(is);
+ val = get_clock(&is->audclk);
break;
default:
- val = get_external_clock(is);
+ val = get_clock(&is->extclk);
break;
}
return val;
}
-static void update_external_clock_pts(VideoState *is, double pts)
-{
- is->external_clock_time = av_gettime();
- is->external_clock = pts;
- is->external_clock_drift = pts - is->external_clock_time / 1000000.0;
-}
-
-static void check_external_clock_sync(VideoState *is, double pts) {
- double ext_clock = get_external_clock(is);
- if (isnan(ext_clock) || fabs(ext_clock - pts) > AV_NOSYNC_THRESHOLD) {
- update_external_clock_pts(is, pts);
- }
-}
-
-static void update_external_clock_speed(VideoState *is, double speed) {
- update_external_clock_pts(is, get_external_clock(is));
- is->external_clock_speed = speed;
-}
-
static void check_external_clock_speed(VideoState *is) {
if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
- update_external_clock_speed(is, FFMAX(EXTERNAL_CLOCK_SPEED_MIN, is->external_clock_speed - EXTERNAL_CLOCK_SPEED_STEP));
+ set_clock_speed(&is->extclk, FFMAX(EXTERNAL_CLOCK_SPEED_MIN, is->extclk.speed - EXTERNAL_CLOCK_SPEED_STEP));
} else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
(is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
- update_external_clock_speed(is, FFMIN(EXTERNAL_CLOCK_SPEED_MAX, is->external_clock_speed + EXTERNAL_CLOCK_SPEED_STEP));
+ set_clock_speed(&is->extclk, FFMIN(EXTERNAL_CLOCK_SPEED_MAX, is->extclk.speed + EXTERNAL_CLOCK_SPEED_STEP));
} else {
- double speed = is->external_clock_speed;
+ double speed = is->extclk.speed;
if (speed != 1.0)
- update_external_clock_speed(is, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
+ set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
}
}
static void stream_toggle_pause(VideoState *is)
{
if (is->paused) {
- is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
+ is->frame_timer += av_gettime() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
if (is->read_pause_return != AVERROR(ENOSYS)) {
- is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
+ is->vidclk.paused = 0;
}
- is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
+ set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
}
- update_external_clock_pts(is, get_external_clock(is));
- is->paused = !is->paused;
+ set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
+ is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
}
static void toggle_pause(VideoState *is)
if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
/* if video is slave, we try to correct big delays by
duplicating or deleting a frame */
- diff = get_video_clock(is) - get_master_clock(is);
+ diff = get_clock(&is->vidclk) - get_master_clock(is);
/* skip or repeat frame. We take into account the
delay to compute the threshold. I still don't know
if it is the best guess */
- sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
- if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
+ sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
+ if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
if (diff <= -sync_threshold)
- delay = 0;
+ delay = FFMAX(0, delay + diff);
+ else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
+ delay = delay + diff;
else if (diff >= sync_threshold)
delay = 2 * delay;
}
prevvp = &is->pictq[(is->pictq_rindex + VIDEO_PICTURE_QUEUE_SIZE - 1) % VIDEO_PICTURE_QUEUE_SIZE];
if (prevvp->allocated && prevvp->serial == is->videoq.serial) {
SDL_LockMutex(is->pictq_mutex);
- if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE - 1) {
+ if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE) {
if (--is->pictq_rindex == -1)
is->pictq_rindex = VIDEO_PICTURE_QUEUE_SIZE - 1;
is->pictq_size++;
}
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
- double time = av_gettime() / 1000000.0;
/* update current video pts */
- is->video_current_pts = pts;
- is->video_current_pts_drift = is->video_current_pts - time;
+ set_clock(&is->vidclk, pts, serial);
+ sync_clock_to_slave(&is->extclk, &is->vidclk);
is->video_current_pos = pos;
is->frame_last_pts = pts;
- is->video_clock_serial = serial;
- if (is->videoq.serial == serial)
- check_external_clock_sync(is, is->video_current_pts);
}
/* called to display each frame */
/* if duration of the last frame was sane, update last_duration in video state */
is->frame_last_duration = last_duration;
}
- delay = compute_target_delay(is->frame_last_duration, is);
+ if (redisplay)
+ delay = 0.0;
+ else
+ delay = compute_target_delay(is->frame_last_duration, is);
time= av_gettime()/1000000.0;
- if (time < is->frame_timer + delay) {
+ if (time < is->frame_timer + delay && !redisplay) {
*remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
return;
}
- if (delay > 0)
- is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
+ is->frame_timer += delay;
+ if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
+ is->frame_timer = time;
SDL_LockMutex(is->pictq_mutex);
- if (!isnan(vp->pts))
+ if (!redisplay && !isnan(vp->pts))
update_video_pts(is, vp->pts, vp->pos, vp->serial);
SDL_UnlockMutex(is->pictq_mutex);
sqsize = is->subtitleq.size;
av_diff = 0;
if (is->audio_st && is->video_st)
- av_diff = get_audio_clock(is) - get_video_clock(is);
- printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
+ av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
+ else if (is->video_st)
+ av_diff = get_master_clock(is) - get_clock(&is->vidclk);
+ else if (is->audio_st)
+ av_diff = get_master_clock(is) - get_clock(&is->audclk);
+ printf("%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
get_master_clock(is),
+ (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
av_diff,
is->frame_drops_early + is->frame_drops_late,
aqsize / 1024,
static void alloc_picture(VideoState *is)
{
VideoPicture *vp;
+ int64_t bufferdiff;
vp = &is->pictq[is->pictq_windex];
vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
SDL_YV12_OVERLAY,
screen);
- if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
+ bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
+ if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < vp->height * vp->bmp->pitches[0]) {
/* SDL allocates a buffer smaller than requested if the video
* overlay hardware is unable to support the requested size. */
fprintf(stderr, "Error: the video system does not support an image\n"
SDL_LockMutex(is->pictq_mutex);
/* keep the last already displayed picture in the queue */
- while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 2 &&
+ while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 1 &&
!is->videoq.abort_request) {
SDL_CondWait(is->pictq_cond, is->pictq_mutex);
}
if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
SDL_LockMutex(is->pictq_mutex);
if (is->frame_last_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE) {
- double clockdiff = get_video_clock(is) - get_master_clock(is);
+ double clockdiff = get_clock(&is->vidclk) - get_master_clock(is);
double ptsdiff = dpts - is->frame_last_pts;
if (!isnan(clockdiff) && fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
!isnan(ptsdiff) && ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
double diff, avg_diff;
int min_nb_samples, max_nb_samples;
- diff = get_audio_clock(is) - get_master_clock(is);
+ diff = get_clock(&is->audclk) - get_master_clock(is);
if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
/* Let's assume the audio driver that is used by SDL has two periods. */
- is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
- is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
- if (is->audioq.serial == is->audio_clock_serial)
- check_external_clock_sync(is, is->audio_current_pts);
+ set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
+ sync_clock_to_slave(&is->extclk, &is->audclk);
}
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
packet_queue_put(&is->videoq, &flush_pkt);
}
if (is->seek_flags & AVSEEK_FLAG_BYTE) {
- update_external_clock_pts(is, NAN);
+ set_clock(&is->extclk, NAN, 0);
} else {
- update_external_clock_pts(is, seek_target / (double)AV_TIME_BASE);
+ set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
}
}
is->seek_req = 0;
is->continue_read_thread = SDL_CreateCond();
- update_external_clock_pts(is, NAN);
- update_external_clock_speed(is, 1.0);
- is->audio_current_pts_drift = -av_gettime() / 1000000.0;
- is->video_current_pts_drift = is->audio_current_pts_drift;
+ init_clock(&is->vidclk, &is->videoq.serial);
+ init_clock(&is->audclk, &is->audioq.serial);
+ init_clock(&is->extclk, &is->extclk.serial);
is->audio_clock_serial = -1;
- is->video_clock_serial = -1;
is->audio_last_serial = -1;
is->av_sync_type = av_sync_type;
is->read_tid = SDL_CreateThread(read_thread, is);
}
break;
case SDL_VIDEORESIZE:
- screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
+ screen = SDL_SetVideoMode(FFMIN(16383, event.resize.w), event.resize.h, 0,
SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
- screen_width = cur_stream->width = event.resize.w;
- screen_height = cur_stream->height = event.resize.h;
+ if (!screen) {
+ fprintf(stderr, "Failed to set video mode\n");
+ do_exit(cur_stream);
+ }
+ screen_width = cur_stream->width = screen->w;
+ screen_height = cur_stream->height = screen->h;
cur_stream->force_refresh = 1;
break;
case SDL_QUIT:
OBJS-$(CONFIG_FOURXM_DECODER) += 4xm.o
OBJS-$(CONFIG_FRAPS_DECODER) += fraps.o
OBJS-$(CONFIG_FRWU_DECODER) += frwu.o
+OBJS-$(CONFIG_G2M_DECODER) += g2meet.o mjpeg.o
OBJS-$(CONFIG_G723_1_DECODER) += g723_1.o acelp_vectors.o \
celp_filters.o celp_math.o
OBJS-$(CONFIG_G723_1_ENCODER) += g723_1.o acelp_vectors.o celp_math.o
OBJS-$(CONFIG_TRUESPEECH_DECODER) += truespeech.o
OBJS-$(CONFIG_TSCC_DECODER) += tscc.o msrledec.o
OBJS-$(CONFIG_TSCC2_DECODER) += tscc2.o
-OBJS-$(CONFIG_TTA_DECODER) += tta.o
+OBJS-$(CONFIG_TTA_DECODER) += tta.o ttadata.o
+OBJS-$(CONFIG_TTA_ENCODER) += ttaenc.o ttadata.o
OBJS-$(CONFIG_TWINVQ_DECODER) += twinvq.o
OBJS-$(CONFIG_TXD_DECODER) += txd.o s3tc.o
OBJS-$(CONFIG_ULTI_DECODER) += ulti.o
REGISTER_DECODER(FOURXM, fourxm);
REGISTER_DECODER(FRAPS, fraps);
REGISTER_DECODER(FRWU, frwu);
+ REGISTER_DECODER(G2M, g2m);
REGISTER_ENCDEC (GIF, gif);
REGISTER_ENCDEC (H261, h261);
REGISTER_ENCDEC (H263, h263);
REGISTER_DECODER(TAK, tak);
REGISTER_DECODER(TRUEHD, truehd);
REGISTER_DECODER(TRUESPEECH, truespeech);
- REGISTER_DECODER(TTA, tta);
+ REGISTER_ENCDEC (TTA, tta);
REGISTER_DECODER(TWINVQ, twinvq);
REGISTER_DECODER(VMDAUDIO, vmdaudio);
REGISTER_ENCDEC (VORBIS, vorbis);
/*
* ARM NEON optimised integer operations
- * Copyright (c) 2009 Kostya Shishkov
+ * Copyright (c) 2009 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
AV_CODEC_ID_VP9,
AV_CODEC_ID_AIC,
AV_CODEC_ID_ESCAPE130_DEPRECATED,
+ AV_CODEC_ID_G2M_DEPRECATED,
AV_CODEC_ID_BRENDER_PIX= MKBETAG('B','P','I','X'),
AV_CODEC_ID_Y41P = MKBETAG('Y','4','1','P'),
* by data.
*/
AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL,
+
+ /**
+ * The optional first identifier line of a WebVTT cue.
+ */
+ AV_PKT_DATA_WEBVTT_IDENTIFIER,
+
+ /**
+ * The optional settings (rendering instructions) that immediately
+ * follow the timestamp specifier of a WebVTT cue.
+ */
+ AV_PKT_DATA_WEBVTT_SETTINGS,
};
/**
* Code outside libavcodec should access this field using:
* av_codec_{get,set}_pkt_timebase(avctx)
* - encoding unused.
- * - decodimg set by user
+ * - decoding set by user.
*/
AVRational pkt_timebase;
int av_packet_split_side_data(AVPacket *pkt){
if (!pkt->side_data_elems && pkt->size >12 && AV_RB64(pkt->data + pkt->size - 8) == FF_MERGE_MARKER){
int i;
- unsigned int size;
+ unsigned int size, orig_pktsize = pkt->size;
uint8_t *p;
p = pkt->data + pkt->size - 8 - 5;
p-= size+5;
}
pkt->size -= 8;
+ /* FFMIN() prevents overflow in case the packet wasn't allocated with
+ * proper padding.
+ * If the side data is smaller than the buffer padding size, the
+ * remaining bytes should have already been filled with zeros by the
+ * original packet allocation anyway. */
+ memset(pkt->data + pkt->size, 0,
+ FFMIN(orig_pktsize - pkt->size, FF_INPUT_BUFFER_PADDING_SIZE));
pkt->side_data_elems = i+1;
return 1;
}
/*
* Bink video decoder
- * Copyright (C) 2009 Kostya Shishkov
+ * Copyright (C) 2009 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
/*
* Bink DSP routines
- * Copyright (c) 2009 Kostya Shishkov
+ * Copyright (c) 2009 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
/*
* Bink DSP routines
- * Copyright (c) 2009 Kostya Shishkov
+ * Copyright (c) 2009 Konstantin Shishkov
*
* This file is part of Libav.
*
.long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"),
.props = AV_CODEC_PROP_INTRA_ONLY,
},
- {
- .id = AV_CODEC_ID_G2M,
- .type = AVMEDIA_TYPE_VIDEO,
- .name = "g2m",
- .long_name = NULL_IF_CONFIG_SMALL("GoToMeeting"),
- },
{
.id = AV_CODEC_ID_AVUI,
.type = AVMEDIA_TYPE_VIDEO,
.long_name = NULL_IF_CONFIG_SMALL("Sigmatel Motion Video"),
},
+ {
+ .id = AV_CODEC_ID_G2M,
+ .type = AVMEDIA_TYPE_VIDEO,
+ .name = "g2m",
+ .long_name = NULL_IF_CONFIG_SMALL("Go2Meeting"),
+ .props = AV_CODEC_PROP_LOSSY,
+ },
/* various PCM "codecs" */
{
ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
av_frame_unref(p);
- av_frame_ref(p, pict);
+ if ((ret = av_frame_ref(p, pict)) < 0)
+ return ret;
p->pict_type = AV_PICTURE_TYPE_I;
if (avctx->gop_size == 0 || f->picture_number % avctx->gop_size == 0) {
--- /dev/null
+/*
+ * Go2Webinar decoder
+ * Copyright (c) 2012 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Go2Webinar decoder
+ */
+
+#include <zlib.h>
+
+#include "libavutil/intreadwrite.h"
+#include "avcodec.h"
+#include "bytestream.h"
+#include "dsputil.h"
+#include "get_bits.h"
+#include "internal.h"
+#include "mjpeg.h"
+
+enum ChunkType {
+ FRAME_INFO = 0xC8,
+ TILE_DATA,
+ CURSOR_POS,
+ CURSOR_SHAPE,
+ CHUNK_CC,
+ CHUNK_CD
+};
+
+enum Compression {
+ COMPR_EPIC_J_B = 2,
+ COMPR_KEMPF_J_B,
+};
+
+static const uint8_t luma_quant[64] = {
+ 8, 6, 5, 8, 12, 20, 26, 31,
+ 6, 6, 7, 10, 13, 29, 30, 28,
+ 7, 7, 8, 12, 20, 29, 35, 28,
+ 7, 9, 11, 15, 26, 44, 40, 31,
+ 9, 11, 19, 28, 34, 55, 52, 39,
+ 12, 18, 28, 32, 41, 52, 57, 46,
+ 25, 32, 39, 44, 52, 61, 60, 51,
+ 36, 46, 48, 49, 56, 50, 52, 50
+};
+
+static const uint8_t chroma_quant[64] = {
+ 9, 9, 12, 24, 50, 50, 50, 50,
+ 9, 11, 13, 33, 50, 50, 50, 50,
+ 12, 13, 28, 50, 50, 50, 50, 50,
+ 24, 33, 50, 50, 50, 50, 50, 50,
+ 50, 50, 50, 50, 50, 50, 50, 50,
+ 50, 50, 50, 50, 50, 50, 50, 50,
+ 50, 50, 50, 50, 50, 50, 50, 50,
+ 50, 50, 50, 50, 50, 50, 50, 50,
+};
+
+typedef struct JPGContext {
+ DSPContext dsp;
+ ScanTable scantable;
+
+ VLC dc_vlc[2], ac_vlc[2];
+ int prev_dc[3];
+ DECLARE_ALIGNED(16, int16_t, block)[6][64];
+
+ uint8_t *buf;
+} JPGContext;
+
+typedef struct G2MContext {
+ JPGContext jc;
+ int version;
+
+ int compression;
+ int width, height, bpp;
+ int tile_width, tile_height;
+ int tiles_x, tiles_y, tile_x, tile_y;
+
+ int got_header;
+
+ uint8_t *framebuf;
+ int framebuf_stride, old_width, old_height;
+
+ uint8_t *synth_tile, *jpeg_tile;
+ int tile_stride, old_tile_w, old_tile_h;
+
+ uint8_t *kempf_buf, *kempf_flags;
+
+ uint8_t *cursor;
+ int cursor_stride;
+ int cursor_fmt;
+ int cursor_w, cursor_h, cursor_x, cursor_y;
+ int cursor_hot_x, cursor_hot_y;
+} G2MContext;
+
+static av_cold int build_vlc(VLC *vlc, const uint8_t *bits_table,
+ const uint8_t *val_table, int nb_codes,
+ int is_ac)
+{
+ uint8_t huff_size[256] = { 0 };
+ uint16_t huff_code[256];
+ uint16_t huff_sym[256];
+ int i;
+
+ ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table);
+
+ for (i = 0; i < 256; i++)
+ huff_sym[i] = i + 16 * is_ac;
+
+ if (is_ac)
+ huff_sym[0] = 16 * 256;
+
+ return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1,
+ huff_code, 2, 2, huff_sym, 2, 2, 0);
+}
+
+static av_cold int jpg_init(AVCodecContext *avctx, JPGContext *c)
+{
+ int ret;
+
+ ret = build_vlc(&c->dc_vlc[0], avpriv_mjpeg_bits_dc_luminance,
+ avpriv_mjpeg_val_dc, 12, 0);
+ if (ret)
+ return ret;
+ ret = build_vlc(&c->dc_vlc[1], avpriv_mjpeg_bits_dc_chrominance,
+ avpriv_mjpeg_val_dc, 12, 0);
+ if (ret)
+ return ret;
+ ret = build_vlc(&c->ac_vlc[0], avpriv_mjpeg_bits_ac_luminance,
+ avpriv_mjpeg_val_ac_luminance, 251, 1);
+ if (ret)
+ return ret;
+ ret = build_vlc(&c->ac_vlc[1], avpriv_mjpeg_bits_ac_chrominance,
+ avpriv_mjpeg_val_ac_chrominance, 251, 1);
+ if (ret)
+ return ret;
+
+ ff_dsputil_init(&c->dsp, avctx);
+ ff_init_scantable(c->dsp.idct_permutation, &c->scantable,
+ ff_zigzag_direct);
+
+ return 0;
+}
+
+static av_cold void jpg_free_context(JPGContext *ctx)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ ff_free_vlc(&ctx->dc_vlc[i]);
+ ff_free_vlc(&ctx->ac_vlc[i]);
+ }
+
+ av_freep(&ctx->buf);
+}
+
+static void jpg_unescape(const uint8_t *src, int src_size,
+ uint8_t *dst, int *dst_size)
+{
+ const uint8_t *src_end = src + src_size;
+ uint8_t *dst_start = dst;
+
+ while (src < src_end) {
+ uint8_t x = *src++;
+
+ *dst++ = x;
+
+ if (x == 0xFF && !*src)
+ src++;
+ }
+ *dst_size = dst - dst_start;
+}
+
+static int jpg_decode_block(JPGContext *c, GetBitContext *gb,
+ int plane, int16_t *block)
+{
+ int dc, val, pos;
+ const int is_chroma = !!plane;
+ const uint8_t *qmat = is_chroma ? chroma_quant : luma_quant;
+
+ c->dsp.clear_block(block);
+ dc = get_vlc2(gb, c->dc_vlc[is_chroma].table, 9, 3);
+ if (dc < 0)
+ return AVERROR_INVALIDDATA;
+ if (dc)
+ dc = get_xbits(gb, dc);
+ dc = dc * qmat[0] + c->prev_dc[plane];
+ block[0] = dc;
+ c->prev_dc[plane] = dc;
+
+ pos = 0;
+ while (pos < 63) {
+ val = get_vlc2(gb, c->ac_vlc[is_chroma].table, 9, 3);
+ if (val < 0)
+ return AVERROR_INVALIDDATA;
+ pos += val >> 4;
+ val &= 0xF;
+ if (pos > 63)
+ return val ? AVERROR_INVALIDDATA : 0;
+ if (val) {
+ int nbits = val;
+
+ val = get_xbits(gb, nbits);
+ val *= qmat[ff_zigzag_direct[pos]];
+ block[c->scantable.permutated[pos]] = val;
+ }
+ }
+ return 0;
+}
+
+static inline void yuv2rgb(uint8_t *out, int Y, int U, int V)
+{
+ out[0] = av_clip_uint8(Y + ( 91881 * V + 32768 >> 16));
+ out[1] = av_clip_uint8(Y + (-22554 * U - 46802 * V + 32768 >> 16));
+ out[2] = av_clip_uint8(Y + (116130 * U + 32768 >> 16));
+}
+
+static int jpg_decode_data(JPGContext *c, int width, int height,
+ const uint8_t *src, int src_size,
+ uint8_t *dst, int dst_stride,
+ const uint8_t *mask, int mask_stride, int num_mbs,
+ int swapuv)
+{
+ GetBitContext gb;
+ uint8_t *tmp;
+ int mb_w, mb_h, mb_x, mb_y, i, j;
+ int bx, by;
+ int unesc_size;
+ int ret;
+
+ tmp = av_realloc(c->buf, src_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!tmp)
+ return AVERROR(ENOMEM);
+ c->buf = tmp;
+ jpg_unescape(src, src_size, c->buf, &unesc_size);
+ memset(c->buf + unesc_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
+ init_get_bits(&gb, c->buf, unesc_size * 8);
+
+ width = FFALIGN(width, 16);
+ mb_w = width >> 4;
+ mb_h = (height + 15) >> 4;
+
+ if (!num_mbs)
+ num_mbs = mb_w * mb_h;
+
+ for (i = 0; i < 3; i++)
+ c->prev_dc[i] = 1024;
+ bx = by = 0;
+ for (mb_y = 0; mb_y < mb_h; mb_y++) {
+ for (mb_x = 0; mb_x < mb_w; mb_x++) {
+ if (mask && !mask[mb_x]) {
+ bx += 16;
+ continue;
+ }
+ for (j = 0; j < 2; j++) {
+ for (i = 0; i < 2; i++) {
+ if ((ret = jpg_decode_block(c, &gb, 0,
+ c->block[i + j * 2])) != 0)
+ return ret;
+ c->dsp.idct(c->block[i + j * 2]);
+ }
+ }
+ for (i = 1; i < 3; i++) {
+ if ((ret = jpg_decode_block(c, &gb, i, c->block[i + 3])) != 0)
+ return ret;
+ c->dsp.idct(c->block[i + 3]);
+ }
+
+ for (j = 0; j < 16; j++) {
+ uint8_t *out = dst + bx * 3 + (by + j) * dst_stride;
+ for (i = 0; i < 16; i++) {
+ int Y, U, V;
+
+ Y = c->block[(j >> 3) * 2 + (i >> 3)][(i & 7) + (j & 7) * 8];
+ U = c->block[4 ^ swapuv][(i >> 1) + (j >> 1) * 8] - 128;
+ V = c->block[5 ^ swapuv][(i >> 1) + (j >> 1) * 8] - 128;
+ yuv2rgb(out + i * 3, Y, U, V);
+ }
+ }
+
+ if (!--num_mbs)
+ return 0;
+ bx += 16;
+ }
+ bx = 0;
+ by += 16;
+ if (mask)
+ mask += mask_stride;
+ }
+
+ return 0;
+}
+
+static void kempf_restore_buf(const uint8_t *src, int len,
+ uint8_t *dst, int stride,
+ const uint8_t *jpeg_tile, int tile_stride,
+ int width, int height,
+ const uint8_t *pal, int npal, int tidx)
+{
+ GetBitContext gb;
+ int i, j, nb, col;
+
+ init_get_bits(&gb, src, len * 8);
+
+ if (npal <= 2) nb = 1;
+ else if (npal <= 4) nb = 2;
+ else if (npal <= 16) nb = 4;
+ else nb = 8;
+
+ for (j = 0; j < height; j++, dst += stride, jpeg_tile += tile_stride) {
+ if (get_bits(&gb, 8))
+ continue;
+ for (i = 0; i < width; i++) {
+ col = get_bits(&gb, nb);
+ if (col != tidx)
+ memcpy(dst + i * 3, pal + col * 3, 3);
+ else
+ memcpy(dst + i * 3, jpeg_tile + i * 3, 3);
+ }
+ }
+}
+
+static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y,
+ const uint8_t *src, int src_size)
+{
+ int width, height;
+ int hdr, zsize, npal, tidx = -1, ret;
+ int i, j;
+ const uint8_t *src_end = src + src_size;
+ uint8_t pal[768], transp[3];
+ uLongf dlen = (c->tile_width + 1) * c->tile_height;
+ int sub_type;
+ int nblocks, cblocks, bstride;
+ int bits, bitbuf, coded;
+ uint8_t *dst = c->framebuf + tile_x * c->tile_width * 3 +
+ tile_y * c->tile_height * c->framebuf_stride;
+
+ if (src_size < 2)
+ return AVERROR_INVALIDDATA;
+
+ width = FFMIN(c->width - tile_x * c->tile_width, c->tile_width);
+ height = FFMIN(c->height - tile_y * c->tile_height, c->tile_height);
+
+ hdr = *src++;
+ sub_type = hdr >> 5;
+ if (sub_type == 0) {
+ int j;
+ memcpy(transp, src, 3);
+ src += 3;
+ for (j = 0; j < height; j++, dst += c->framebuf_stride)
+ for (i = 0; i < width; i++)
+ memcpy(dst + i * 3, transp, 3);
+ return 0;
+ } else if (sub_type == 1) {
+ return jpg_decode_data(&c->jc, width, height, src, src_end - src,
+ dst, c->framebuf_stride, NULL, 0, 0, 0);
+ }
+
+ if (sub_type != 2) {
+ memcpy(transp, src, 3);
+ src += 3;
+ }
+ npal = *src++ + 1;
+ memcpy(pal, src, npal * 3); src += npal * 3;
+ if (sub_type != 2) {
+ for (i = 0; i < npal; i++) {
+ if (!memcmp(pal + i * 3, transp, 3)) {
+ tidx = i;
+ break;
+ }
+ }
+ }
+
+ if (src_end - src < 2)
+ return 0;
+ zsize = (src[0] << 8) | src[1]; src += 2;
+
+ if (src_end - src < zsize)
+ return AVERROR_INVALIDDATA;
+
+ ret = uncompress(c->kempf_buf, &dlen, src, zsize);
+ if (ret)
+ return AVERROR_INVALIDDATA;
+ src += zsize;
+
+ if (sub_type == 2) {
+ kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
+ NULL, 0, width, height, pal, npal, tidx);
+ return 0;
+ }
+
+ nblocks = *src++ + 1;
+ cblocks = 0;
+ bstride = FFALIGN(width, 16) >> 4;
+ // blocks are coded LSB and we need normal bitreader for JPEG data
+ bits = 0;
+ for (i = 0; i < (FFALIGN(height, 16) >> 4); i++) {
+ for (j = 0; j < (FFALIGN(width, 16) >> 4); j++) {
+ if (!bits) {
+ bitbuf = *src++;
+ bits = 8;
+ }
+ coded = bitbuf & 1;
+ bits--;
+ bitbuf >>= 1;
+ cblocks += coded;
+ if (cblocks > nblocks)
+ return AVERROR_INVALIDDATA;
+ c->kempf_flags[j + i * bstride] = coded;
+ }
+ }
+
+ memset(c->jpeg_tile, 0, c->tile_stride * height);
+ jpg_decode_data(&c->jc, width, height, src, src_end - src,
+ c->jpeg_tile, c->tile_stride,
+ c->kempf_flags, bstride, nblocks, 0);
+
+ kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
+ c->jpeg_tile, c->tile_stride,
+ width, height, pal, npal, tidx);
+
+ return 0;
+}
+
+static int g2m_init_buffers(G2MContext *c)
+{
+ int aligned_height;
+
+ if (!c->framebuf || c->old_width < c->width || c->height < c->height) {
+ c->framebuf_stride = FFALIGN(c->width * 3, 16);
+ aligned_height = FFALIGN(c->height, 16);
+ av_free(c->framebuf);
+ c->framebuf = av_mallocz(c->framebuf_stride * aligned_height);
+ if (!c->framebuf)
+ return AVERROR(ENOMEM);
+ }
+ if (!c->synth_tile || !c->jpeg_tile ||
+ c->old_tile_w < c->tile_width ||
+ c->old_tile_h < c->tile_height) {
+ c->tile_stride = FFALIGN(c->tile_width * 3, 16);
+ aligned_height = FFALIGN(c->tile_height, 16);
+ av_free(c->synth_tile);
+ av_free(c->jpeg_tile);
+ c->synth_tile = av_mallocz(c->tile_stride * aligned_height);
+ c->jpeg_tile = av_mallocz(c->tile_stride * aligned_height);
+ c->kempf_buf = av_mallocz((c->tile_width + 1) * aligned_height
+ + FF_INPUT_BUFFER_PADDING_SIZE);
+ c->kempf_flags = av_mallocz( c->tile_width * aligned_height);
+ if (!c->synth_tile || !c->jpeg_tile ||
+ !c->kempf_buf || !c->kempf_flags)
+ return AVERROR(ENOMEM);
+ }
+
+ return 0;
+}
+
+static int g2m_load_cursor(G2MContext *c, GetByteContext *gb)
+{
+ int i, j, k;
+ uint8_t *dst;
+ uint32_t bits;
+
+ c->cursor_stride = c->cursor_w * 4;
+ c->cursor = av_realloc(c->cursor, c->cursor_stride * c->cursor_h);
+ if (!c->cursor)
+ return AVERROR(ENOMEM);
+
+ dst = c->cursor;
+ switch (c->cursor_fmt) {
+ case 1: // old monochrome
+ for (j = 0; j < c->cursor_h; j++) {
+ for (i = 0; i < c->cursor_w; i += 32) {
+ bits = bytestream2_get_be32(gb);
+ for (k = 0; k < 32; k++) {
+ dst[0] = !!(bits & 0x80000000);
+ dst += 4;
+ bits <<= 1;
+ }
+ }
+ }
+
+ dst = c->cursor;
+ for (j = 0; j < c->cursor_h; j++) {
+ for (i = 0; i < c->cursor_w; i += 32) {
+ bits = bytestream2_get_be32(gb);
+ for (k = 0; k < 32; k++) {
+ int mask_bit = !!(bits & 0x80000000);
+ switch (dst[0] * 2 + mask_bit) {
+ case 0:
+ dst[0] = 0xFF; dst[1] = 0x00;
+ dst[2] = 0x00; dst[3] = 0x00;
+ break;
+ case 1:
+ dst[0] = 0xFF; dst[1] = 0xFF;
+ dst[2] = 0xFF; dst[3] = 0xFF;
+ break;
+ default:
+ dst[0] = 0x00; dst[1] = 0x00;
+ dst[2] = 0x00; dst[3] = 0x00;
+ }
+ dst += 4;
+ bits <<= 1;
+ }
+ }
+ }
+ break;
+ case 32: // full colour
+ /* skip monochrome version of the cursor and decode RGBA instead */
+ bytestream2_skip(gb, c->cursor_h * (FFALIGN(c->cursor_w, 32) >> 3));
+ for (j = 0; j < c->cursor_h; j++) {
+ for (i = 0; i < c->cursor_w; i++) {
+ int val = bytestream2_get_be32(gb);
+ *dst++ = val >> 0;
+ *dst++ = val >> 8;
+ *dst++ = val >> 16;
+ *dst++ = val >> 24;
+ }
+ }
+ break;
+ default:
+ return AVERROR_PATCHWELCOME;
+ }
+ return 0;
+}
+
+#define APPLY_ALPHA(src, new, alpha) \
+ src = (src * (256 - alpha) + new * alpha) >> 8
+
+static void g2m_paint_cursor(G2MContext *c, uint8_t *dst, int stride)
+{
+ int i, j;
+ int x, y, w, h;
+ const uint8_t *cursor;
+
+ if (!c->cursor)
+ return;
+
+ x = c->cursor_x - c->cursor_hot_x;
+ y = c->cursor_y - c->cursor_hot_y;
+
+ cursor = c->cursor;
+ w = c->cursor_w;
+ h = c->cursor_h;
+
+ if (x + w > c->width)
+ w = c->width - x;
+ if (y + h > c->height)
+ h = c->height - y;
+ if (x < 0) {
+ w += x;
+ cursor += -x * 4;
+ } else {
+ dst += x * 3;
+ }
+ if (y < 0) {
+ h += y;
+ cursor += -y * c->cursor_stride;
+ } else {
+ dst += y * stride;
+ }
+ if (w < 0 || h < 0)
+ return;
+
+ for (j = 0; j < h; j++) {
+ for (i = 0; i < w; i++) {
+ uint8_t alpha = cursor[i * 4];
+ APPLY_ALPHA(dst[i * 3 + 0], cursor[i * 4 + 1], alpha);
+ APPLY_ALPHA(dst[i * 3 + 1], cursor[i * 4 + 2], alpha);
+ APPLY_ALPHA(dst[i * 3 + 2], cursor[i * 4 + 3], alpha);
+ }
+ dst += stride;
+ cursor += c->cursor_stride;
+ }
+}
+
+static int g2m_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_picture_ptr, AVPacket *avpkt)
+{
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ G2MContext *c = avctx->priv_data;
+ AVFrame *pic = data;
+ GetByteContext bc, tbc;
+ int magic;
+ int got_header = 0;
+ uint32_t chunk_size, cur_size;
+ int chunk_type;
+ int i;
+ int ret;
+
+ if (buf_size < 12) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Frame should have at least 12 bytes, got %d instead\n",
+ buf_size);
+ return AVERROR_INVALIDDATA;
+ }
+
+ bytestream2_init(&bc, buf, buf_size);
+
+ magic = bytestream2_get_be32(&bc);
+ if ((magic & ~0xF) != MKBETAG('G', '2', 'M', '0') ||
+ (magic & 0xF) < 2 || (magic & 0xF) > 4) {
+ av_log(avctx, AV_LOG_ERROR, "Wrong magic %08X\n", magic);
+ return AVERROR_INVALIDDATA;
+ }
+
+ if ((magic & 0xF) != 4) {
+ av_log(avctx, AV_LOG_ERROR, "G2M2 and G2M3 are not yet supported\n");
+ return AVERROR(ENOSYS);
+ }
+
+ while (bytestream2_get_bytes_left(&bc) > 5) {
+ chunk_size = bytestream2_get_le32(&bc) - 1;
+ chunk_type = bytestream2_get_byte(&bc);
+ if (chunk_size > bytestream2_get_bytes_left(&bc)) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid chunk size %d type %02X\n",
+ chunk_size, chunk_type);
+ break;
+ }
+ switch (chunk_type) {
+ case FRAME_INFO:
+ c->got_header = 0;
+ if (chunk_size < 21) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid frame info size %d\n",
+ chunk_size);
+ break;
+ }
+ c->width = bytestream2_get_be32(&bc);
+ c->height = bytestream2_get_be32(&bc);
+ if (c->width < 16 || c->width > avctx->width ||
+ c->height < 16 || c->height > avctx->height) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid frame dimensions %dx%d\n",
+ c->width, c->height);
+ c->width = c->height = 0;
+ bytestream2_skip(&bc, bytestream2_get_bytes_left(&bc));
+ }
+ if (c->width != avctx->width || c->height != avctx->height)
+ avcodec_set_dimensions(avctx, c->width, c->height);
+ c->compression = bytestream2_get_be32(&bc);
+ if (c->compression != 2 && c->compression != 3) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Unknown compression method %d\n",
+ c->compression);
+ return AVERROR_PATCHWELCOME;
+ }
+ c->tile_width = bytestream2_get_be32(&bc);
+ c->tile_height = bytestream2_get_be32(&bc);
+ if (!c->tile_width || !c->tile_height) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid tile dimensions %dx%d\n",
+ c->tile_width, c->tile_height);
+ return AVERROR_INVALIDDATA;
+ }
+ c->tiles_x = (c->width + c->tile_width - 1) / c->tile_width;
+ c->tiles_y = (c->height + c->tile_height - 1) / c->tile_height;
+ c->bpp = bytestream2_get_byte(&bc);
+ chunk_size -= 21;
+ bytestream2_skip(&bc, chunk_size);
+ if (g2m_init_buffers(c))
+ return AVERROR(ENOMEM);
+ got_header = 1;
+ break;
+ case TILE_DATA:
+ if (!c->tiles_x || !c->tiles_y) {
+ av_log(avctx, AV_LOG_WARNING,
+ "No frame header - skipping tile\n");
+ bytestream2_skip(&bc, bytestream2_get_bytes_left(&bc));
+ break;
+ }
+ if (chunk_size < 2) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid tile data size %d\n",
+ chunk_size);
+ break;
+ }
+ c->tile_x = bytestream2_get_byte(&bc);
+ c->tile_y = bytestream2_get_byte(&bc);
+ if (c->tile_x >= c->tiles_x || c->tile_y >= c->tiles_y) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid tile pos %d,%d (in %dx%d grid)\n",
+ c->tile_x, c->tile_y, c->tiles_x, c->tiles_y);
+ break;
+ }
+ chunk_size -= 2;
+ ret = 0;
+ switch (c->compression) {
+ case COMPR_EPIC_J_B:
+ av_log(avctx, AV_LOG_ERROR,
+ "ePIC j-b compression is not implemented yet\n");
+ return AVERROR(ENOSYS);
+ case COMPR_KEMPF_J_B:
+ ret = kempf_decode_tile(c, c->tile_x, c->tile_y,
+ buf + bytestream2_tell(&bc),
+ chunk_size);
+ break;
+ }
+ if (ret && c->framebuf)
+ av_log(avctx, AV_LOG_ERROR, "Error decoding tile %d,%d\n",
+ c->tile_x, c->tile_y);
+ bytestream2_skip(&bc, chunk_size);
+ break;
+ case CURSOR_POS:
+ if (chunk_size < 5) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid cursor pos size %d\n",
+ chunk_size);
+ break;
+ }
+ c->cursor_x = bytestream2_get_be16(&bc);
+ c->cursor_y = bytestream2_get_be16(&bc);
+ bytestream2_skip(&bc, chunk_size - 4);
+ break;
+ case CURSOR_SHAPE:
+ if (chunk_size < 8) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %d\n",
+ chunk_size);
+ break;
+ }
+ bytestream2_init(&tbc, buf + bytestream2_tell(&bc),
+ chunk_size - 4);
+ cur_size = bytestream2_get_be32(&tbc);
+ c->cursor_w = bytestream2_get_byte(&tbc);
+ c->cursor_h = bytestream2_get_byte(&tbc);
+ c->cursor_hot_x = bytestream2_get_byte(&tbc);
+ c->cursor_hot_y = bytestream2_get_byte(&tbc);
+ c->cursor_fmt = bytestream2_get_byte(&tbc);
+ if (cur_size >= chunk_size ||
+ c->cursor_w * c->cursor_h / 4 > cur_size) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %d\n",
+ chunk_size);
+ break;
+ }
+ g2m_load_cursor(c, &tbc);
+ bytestream2_skip(&bc, chunk_size);
+ break;
+ case CHUNK_CC:
+ case CHUNK_CD:
+ bytestream2_skip(&bc, chunk_size);
+ break;
+ default:
+ av_log(avctx, AV_LOG_WARNING, "Skipping chunk type %02X\n",
+ chunk_type);
+ bytestream2_skip(&bc, chunk_size);
+ }
+ }
+ if (got_header)
+ c->got_header = 1;
+
+ if (c->width && c->height) {
+ if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
+ }
+
+ pic->key_frame = got_header;
+ pic->pict_type = got_header ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
+
+ for (i = 0; i < avctx->height; i++)
+ memcpy(pic->data[0] + i * pic->linesize[0],
+ c->framebuf + i * c->framebuf_stride,
+ c->width * 3);
+ g2m_paint_cursor(c, pic->data[0], pic->linesize[0]);
+
+ *got_picture_ptr = 1;
+ }
+
+ return buf_size;
+}
+
+static av_cold int g2m_decode_init(AVCodecContext *avctx)
+{
+ G2MContext * const c = avctx->priv_data;
+ int ret;
+
+ if ((ret = jpg_init(avctx, &c->jc)) != 0) {
+ av_log(avctx, AV_LOG_ERROR, "Cannot initialise VLCs\n");
+ jpg_free_context(&c->jc);
+ return AVERROR(ENOMEM);
+ }
+
+ avctx->pix_fmt = PIX_FMT_RGB24;
+
+ return 0;
+}
+
+static av_cold int g2m_decode_end(AVCodecContext *avctx)
+{
+ G2MContext * const c = avctx->priv_data;
+
+ jpg_free_context(&c->jc);
+
+ av_freep(&c->kempf_buf);
+ av_freep(&c->kempf_flags);
+ av_freep(&c->synth_tile);
+ av_freep(&c->jpeg_tile);
+ av_freep(&c->cursor);
+ av_freep(&c->framebuf);
+
+ return 0;
+}
+
+AVCodec ff_g2m_decoder = {
+ .name = "g2m",
+ .long_name = NULL_IF_CONFIG_SMALL("Go2Meeting"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_G2M,
+ .priv_data_size = sizeof(G2MContext),
+ .init = g2m_decode_init,
+ .close = g2m_decode_end,
+ .decode = g2m_decode_frame,
+ .capabilities = CODEC_CAP_DR1,
+};
qp_bd_offset = 6*(sps->bit_depth_luma-8);
if (sps->bit_depth_luma > 14) {
av_log(h->avctx, AV_LOG_ERROR, "Invalid luma bit depth=%d\n", sps->bit_depth_luma);
- return AVERROR_INVALIDDATA;
+ goto fail;
} else if (sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13) {
av_log(h->avctx, AV_LOG_ERROR, "Unimplemented luma bit depth=%d\n", sps->bit_depth_luma);
- return AVERROR_PATCHWELCOME;
+ goto fail;
}
pps->cabac= get_bits1(&h->gb);
band->coord[1][j] = ff_jpeg2000_ceildiv(band->coord[1][j], dy);
band->prec = av_malloc_array(reslevel->num_precincts_x *
- reslevel->num_precincts_y,
+ (uint64_t)reslevel->num_precincts_y,
sizeof(*band->prec));
if (!band->prec)
return AVERROR(ENOMEM);
return AVERROR(ENOMEM);
prec->cblk = av_malloc_array(prec->nb_codeblocks_width *
- prec->nb_codeblocks_height,
+ (uint64_t)prec->nb_codeblocks_height,
sizeof(*prec->cblk));
if (!prec->cblk)
return AVERROR(ENOMEM);
* JPEG 2000 image decoder
*/
+#include "libavutil/avassert.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "avcodec.h"
Jpeg2000Component *comp = tile->comp + compno;
float *datap = comp->f_data;
int32_t *i_datap = comp->i_data;
+ int cbps = s->cbps[compno];
+ int w = tile->comp[compno].coord[0][1] - s->image_offset_x;
y = tile->comp[compno].coord[1][0] - s->image_offset_y;
line = picture->data[0] + y * picture->linesize[0];
x = tile->comp[compno].coord[0][0] - s->image_offset_x;
dst = line + x * s->ncomponents + compno;
- for (; x < tile->comp[compno].coord[0][1] - s->image_offset_x; x += s->cdx[compno]) {
- int val;
- /* DC level shift and clip see ISO 15444-1:2002 G.1.2 */
- if (tile->codsty->transform == FF_DWT97)
- val = lrintf(*datap) + (1 << (s->cbps[compno] - 1));
- else
- val = *i_datap + (1 << (s->cbps[compno] - 1));
- val = av_clip(val, 0, (1 << s->cbps[compno]) - 1);
- *dst = val << (8 - s->cbps[compno]);
- datap++;
- i_datap++;
- dst += s->ncomponents;
+ if (tile->codsty->transform == FF_DWT97) {
+ for (; x < w; x += s->cdx[compno]) {
+ int val = lrintf(*datap) + (1 << (cbps - 1));
+ /* DC level shift and clip see ISO 15444-1:2002 G.1.2 */
+ val = av_clip(val, 0, (1 << cbps) - 1);
+ *dst = val << (8 - cbps);
+ datap++;
+ dst += s->ncomponents;
+ }
+ } else {
+ for (; x < w; x += s->cdx[compno]) {
+ int val = *i_datap + (1 << (cbps - 1));
+ /* DC level shift and clip see ISO 15444-1:2002 G.1.2 */
+ val = av_clip(val, 0, (1 << cbps) - 1);
+ *dst = val << (8 - cbps);
+ i_datap++;
+ dst += s->ncomponents;
+ }
}
line += picture->linesize[0];
}
float *datap = comp->f_data;
int32_t *i_datap = comp->i_data;
uint16_t *linel;
+ int cbps = s->cbps[compno];
+ int w = tile->comp[compno].coord[0][1] - s->image_offset_x;
y = tile->comp[compno].coord[1][0] - s->image_offset_y;
linel = (uint16_t *)picture->data[0] + y * (picture->linesize[0] >> 1);
x = tile->comp[compno].coord[0][0] - s->image_offset_x;
dst = linel + (x * s->ncomponents + compno);
- for (; x < tile->comp[compno].coord[0][1] - s->image_offset_x; x += s-> cdx[compno]) {
- int val;
- /* DC level shift and clip see ISO 15444-1:2002 G.1.2 */
- if (tile->codsty->transform == FF_DWT97)
- val = lrintf(*datap) + (1 << (s->cbps[compno] - 1));
- else
- val = *i_datap + (1 << (s->cbps[compno] - 1));
- val = av_clip(val, 0, (1 << s->cbps[compno]) - 1);
- /* align 12 bit values in little-endian mode */
- *dst = val << (16 - s->cbps[compno]);
- datap++;
- i_datap++;
- dst += s->ncomponents;
+ if (tile->codsty->transform == FF_DWT97) {
+ for (; x < w; x += s-> cdx[compno]) {
+ int val = lrintf(*datap) + (1 << (cbps - 1));
+ /* DC level shift and clip see ISO 15444-1:2002 G.1.2 */
+ val = av_clip(val, 0, (1 << cbps) - 1);
+ /* align 12 bit values in little-endian mode */
+ *dst = val << (16 - cbps);
+ datap++;
+ dst += s->ncomponents;
+ }
+ } else {
+ for (; x < w; x += s-> cdx[compno]) {
+ int val = *i_datap + (1 << (cbps - 1));
+ /* DC level shift and clip see ISO 15444-1:2002 G.1.2 */
+ val = av_clip(val, 0, (1 << cbps) - 1);
+ /* align 12 bit values in little-endian mode */
+ *dst = val << (16 - cbps);
+ i_datap++;
+ dst += s->ncomponents;
+ }
}
linel += picture->linesize[0] >> 1;
}
}
}
+
return 0;
}
av_freep(&s->tile[tileno].comp);
}
av_freep(&s->tile);
+ s->numXtiles = s->numYtiles = 0;
}
static int jpeg2000_read_main_headers(Jpeg2000DecoderContext *s)
break;
case JPEG2000_SOT:
if (!(ret = get_sot(s, len))) {
+ av_assert1(s->curtileno >= 0);
codsty = s->tile[s->curtileno].codsty;
qntsty = s->tile[s->curtileno].qntsty;
properties = s->tile[s->curtileno].properties;
goto end;
/* get picture buffer */
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) {
- av_log(avctx, AV_LOG_ERROR, "ff_thread_get_buffer() failed.\n");
+ if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
goto end;
- }
picture->pict_type = AV_PICTURE_TYPE_I;
picture->key_frame = 1;
extend97_float(p, i0, i1);
- /*step 1*/
- for (i = i0 / 2 - 1; i < i1 / 2 + 2; i++)
- p[2 * i] *= F_LFTG_K;
- /* step 2*/
- for (i = i0 / 2 - 2; i < i1 / 2 + 2; i++)
- p[2 * i + 1] *= F_LFTG_X;
- /* step 3*/
for (i = i0 / 2 - 1; i < i1 / 2 + 2; i++)
p[2 * i] -= F_LFTG_DELTA * (p[2 * i - 1] + p[2 * i + 1]);
/* step 4 */
int i, j = 0;
// copy with interleaving
for (i = mh; i < lh; i += 2, j++)
- l[i] = data[w * lp + j];
+ l[i] = data[w * lp + j] * F_LFTG_K;
for (i = 1 - mh; i < lh; i += 2, j++)
- l[i] = data[w * lp + j];
+ l[i] = data[w * lp + j] * F_LFTG_X;
sr_1d97_float(line, mh, mh + lh);
int i, j = 0;
// copy with interleaving
for (i = mv; i < lv; i += 2, j++)
- l[i] = data[w * j + lp];
+ l[i] = data[w * j + lp] * F_LFTG_K;
for (i = 1 - mv; i < lv; i += 2, j++)
- l[i] = data[w * j + lp];
+ l[i] = data[w * j + lp] * F_LFTG_X;
sr_1d97_float(line, mv, mv + lv);
extend97_int(p, i0, i1);
- /*step 1*/
- for (i = i0 / 2 - 1; i < i1 / 2 + 2; i++)
- p[2 * i] = ((p[2 * i] * I_LFTG_K) + (1 << 15)) >> 16;
- /* step 2*/
- for (i = i0 / 2 - 2; i < i1 / 2 + 2; i++)
- p[2 * i + 1] = ((p[2 * i + 1] * I_LFTG_X) + (1 << 15)) >> 16;
- /* step 3*/
for (i = i0 / 2 - 1; i < i1 / 2 + 2; i++)
p[2 * i] -= (I_LFTG_DELTA * (p[2 * i - 1] + p[2 * i + 1]) + (1 << 15)) >> 16;
/* step 4 */
l = line + mh;
for (lp = 0; lp < lv; lp++) {
int i, j = 0;
- // copy with interleaving
+ // rescale with interleaving
for (i = mh; i < lh; i += 2, j++)
- l[i] = data[w * lp + j];
+ l[i] = ((data[w * lp + j] * I_LFTG_K) + (1 << 15)) >> 16;
for (i = 1 - mh; i < lh; i += 2, j++)
- l[i] = data[w * lp + j];
+ l[i] = ((data[w * lp + j] * I_LFTG_X) + (1 << 15)) >> 16;
sr_1d97_int(line, mh, mh + lh);
l = line + mv;
for (lp = 0; lp < lh; lp++) {
int i, j = 0;
- // copy with interleaving
+ // rescale with interleaving
for (i = mv; i < lv; i += 2, j++)
- l[i] = data[w * j + lp];
+ l[i] = ((data[w * j + lp] * I_LFTG_K) + (1 << 15)) >> 16;
for (i = 1 - mv; i < lv; i += 2, j++)
- l[i] = data[w * j + lp];
+ l[i] = ((data[w * j + lp] * I_LFTG_X) + (1 << 15)) >> 16;
sr_1d97_int(line, mv, mv + lv);
* @author Konstantin Shishkov
*/
-#include "avcodec.h"
-#include "bytestream.h"
#include "config.h"
#if CONFIG_ZLIB
#include <zlib.h>
#endif
-#include "lzw.h"
-#include "tiff.h"
-#include "tiff_data.h"
-#include "faxcompr.h"
-#include "internal.h"
-#include "mathops.h"
+
#include "libavutil/attributes.h"
+#include "libavutil/avstring.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/imgutils.h"
-#include "libavutil/avstring.h"
+#include "avcodec.h"
+#include "bytestream.h"
+#include "faxcompr.h"
+#include "internal.h"
+#include "lzw.h"
+#include "mathops.h"
+#include "tiff.h"
+#include "tiff_data.h"
typedef struct TiffContext {
AVCodecContext *avctx;
int le;
enum TiffCompr compr;
int invert;
+ int planar;
int fax_opts;
int predictor;
int fill_order;
static unsigned tget(GetByteContext *gb, int type, int le)
{
switch (type) {
- case TIFF_BYTE : return bytestream2_get_byte(gb);
- case TIFF_SHORT: return tget_short(gb, le);
- case TIFF_LONG : return tget_long(gb, le);
- default : return UINT_MAX;
+ case TIFF_BYTE:
+ return bytestream2_get_byte(gb);
+ case TIFF_SHORT:
+ return tget_short(gb, le);
+ case TIFF_LONG:
+ return tget_long(gb, le);
+ default:
+ return UINT_MAX;
}
}
z_stream zstream = { 0 };
int zret;
- zstream.next_in = (uint8_t *)src;
- zstream.avail_in = size;
- zstream.next_out = dst;
+ zstream.next_in = (uint8_t *)src;
+ zstream.avail_in = size;
+ zstream.next_out = dst;
zstream.avail_out = *len;
- zret = inflateInit(&zstream);
+ zret = inflateInit(&zstream);
if (zret != Z_OK) {
av_log(NULL, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
return zret;
{
int c, line, pixels, code, ret;
const uint8_t *ssrc = src;
- int width = ((s->width * s->bpp) + 7) >> 3;
+ int width = ((s->width * s->bpp) + 7) >> 3;
+
+ if (s->planar)
+ width /= s->bppcount;
if (size <= 0)
return AVERROR_INVALIDDATA;
unsigned long outlen;
int ret;
outlen = width * lines;
- zbuf = av_malloc(outlen);
+ zbuf = av_malloc(outlen);
if (!zbuf)
return AVERROR(ENOMEM);
if (s->fill_order) {
- if ((ret = deinvert_buffer(s, src, size)) < 0)
+ if ((ret = deinvert_buffer(s, src, size)) < 0) {
+ av_free(zbuf);
return ret;
+ }
ssrc = src = s->deinvert_buf;
}
ret = tiff_uncompress(zbuf, &outlen, src, size);
}
src = zbuf;
for (line = 0; line < lines; line++) {
- if(s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8){
+ if(s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
horizontal_fill(s->bpp, dst, 1, src, 0, width, 0);
}else{
memcpy(dst, src, width);
return ret;
}
}
- if (s->compr == TIFF_CCITT_RLE || s->compr == TIFF_G3
- || s->compr == TIFF_G4) {
+ if (s->compr == TIFF_CCITT_RLE ||
+ s->compr == TIFF_G3 ||
+ s->compr == TIFF_G4) {
int i, ret = 0;
uint8_t *src2 = av_malloc((unsigned)size +
FF_INPUT_BUFFER_PADDING_SIZE);
}
horizontal_fill(s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
dst, 1, src, 0, code, pixels);
- src += code;
+ src += code;
pixels += code;
} else if (code != -128) { // -127..-1
code = (-code) + 1;
s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
break;
case 243:
- s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
+ s->avctx->pix_fmt = s->planar ? AV_PIX_FMT_GBRP : AV_PIX_FMT_RGB24;
break;
case 161:
s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE;
break;
case 162:
- s->avctx->pix_fmt = AV_PIX_FMT_GRAY8A;
+ s->avctx->pix_fmt = s->planar ? AV_PIX_FMT_NONE : AV_PIX_FMT_GRAY8A;
break;
case 324:
- s->avctx->pix_fmt = AV_PIX_FMT_RGBA;
+ s->avctx->pix_fmt = s->planar ? AV_PIX_FMT_GBRAP : AV_PIX_FMT_RGBA;
break;
case 483:
- s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE;
+ s->avctx->pix_fmt = s->planar ? (s->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE) :
+ (s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE);
break;
case 644:
- s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE;
+ s->avctx->pix_fmt = s->planar ? (s->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE) :
+ (s->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE);
break;
default:
av_log(s->avctx, AV_LOG_ERROR,
bytestream2_seek(&s->gb, off, SEEK_SET);
}
} else {
- if (count <= 4 && type_sizes[type] * count <= 4) {
+ if (count <= 4 && type_sizes[type] * count <= 4)
bytestream2_seek(&s->gb, -4, SEEK_CUR);
- } else {
+ else
bytestream2_seek(&s->gb, off, SEEK_SET);
- }
}
switch (tag) {
s->bppcount = value;
break;
case TIFF_COMPR:
- s->compr = value;
+ s->compr = value;
s->predictor = 0;
switch (s->compr) {
case TIFF_RAW:
break;
case TIFF_STRIP_OFFS:
if (count == 1) {
- s->strippos = 0;
- s->stripoff = value;
+ s->strippos = 0;
+ s->stripoff = value;
} else
s->strippos = off;
s->strips = count;
case TIFF_STRIP_SIZE:
if (count == 1) {
s->stripsizesoff = 0;
- s->stripsize = value;
- s->strips = 1;
+ s->stripsize = value;
+ s->strips = 1;
} else {
s->stripsizesoff = off;
}
s->palette_is_set = 1;
break;
case TIFF_PLANAR:
- if (value == 2) {
- av_log(s->avctx, AV_LOG_ERROR, "Planar format is not supported\n");
- return AVERROR_PATCHWELCOME;
- }
+ s->planar = value == 2;
break;
case TIFF_T4OPTIONS:
if (s->compr == TIFF_G3)
TiffContext *const s = avctx->priv_data;
AVFrame *const p = data;
unsigned off;
- int id, le, ret;
- int i, j, entries;
- int stride;
+ int id, le, ret, plane, planes;
+ int i, j, entries, stride;
unsigned soff, ssize;
uint8_t *dst;
GetByteContext stripsizes;
bytestream2_init(&s->gb, avpkt->data, avpkt->size);
- //parse image header
+ // parse image header
if (avpkt->size < 8)
return AVERROR_INVALIDDATA;
- id = bytestream2_get_le16u(&s->gb);
+ id = bytestream2_get_le16u(&s->gb);
if (id == 0x4949)
le = 1;
else if (id == 0x4D4D)
av_log(avctx, AV_LOG_ERROR, "TIFF header not found\n");
return AVERROR_INVALIDDATA;
}
- s->le = le;
+ s->le = le;
// TIFF_BPP is not a required tag and defaults to 1
- s->bppcount = s->bpp = 1;
- s->invert = 0;
- s->compr = TIFF_RAW;
+ s->bppcount = s->bpp = 1;
+ s->invert = 0;
+ s->compr = TIFF_RAW;
s->fill_order = 0;
free_geotags(s);
av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
s->stripsize = avpkt->size - s->stripoff;
}
- stride = p->linesize[0];
- dst = p->data[0];
if (s->stripsizesoff) {
if (s->stripsizesoff >= (unsigned)avpkt->size)
return AVERROR_INVALIDDATA;
}
+ planes = s->planar ? s->bppcount : 1;
+ for (plane = 0; plane < planes; plane++) {
+ stride = p->linesize[plane];
+ dst = p->data[plane];
for (i = 0; i < s->height; i += s->rps) {
if (s->stripsizesoff)
ssize = tget(&stripsizes, s->sstype, s->le);
dst += s->rps * stride;
}
if (s->predictor == 2) {
- dst = p->data[0];
- soff = s->bpp >> 3;
+ dst = p->data[plane];
+ soff = s->bpp >> 3;
ssize = s->width * soff;
if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE ||
s->avctx->pix_fmt == AV_PIX_FMT_RGBA64LE) {
}
if (s->invert) {
- dst = p->data[0];
+ dst = p->data[plane];
for (i = 0; i < s->height; i++) {
- for (j = 0; j < p->linesize[0]; j++)
+ for (j = 0; j < p->linesize[plane]; j++)
dst[j] = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255) - dst[j];
- dst += p->linesize[0];
+ dst += p->linesize[plane];
}
}
+ }
+
+ if (s->planar && s->bppcount > 2) {
+ FFSWAP(uint8_t*, p->data[0], p->data[2]);
+ FFSWAP(int, p->linesize[0], p->linesize[2]);
+ FFSWAP(uint8_t*, p->data[0], p->data[1]);
+ FFSWAP(int, p->linesize[0], p->linesize[1]);
+ }
+
*got_frame = 1;
return avpkt->size;
{
TiffContext *s = avctx->priv_data;
- s->width = 0;
+ s->width = 0;
s->height = 0;
- s->avctx = avctx;
+ s->avctx = avctx;
ff_lzw_decode_open(&s->lzw);
ff_ccitt_unpack_init();
AVCodec ff_tiff_decoder = {
.name = "tiff",
+ .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_TIFF,
.priv_data_size = sizeof(TiffContext),
.close = tiff_end,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
- .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
};
#include <stdint.h>
/** abridged list of TIFF tags */
-enum TiffTags{
- TIFF_SUBFILE = 0xfe,
- TIFF_WIDTH = 0x100,
+enum TiffTags {
+ TIFF_SUBFILE = 0xfe,
+ TIFF_WIDTH = 0x100,
TIFF_HEIGHT,
TIFF_BPP,
TIFF_COMPR,
- TIFF_INVERT = 0x106,
- TIFF_FILL_ORDER = 0x10A,
- TIFF_DOCUMENT_NAME = 0x10D,
- TIFF_IMAGE_DESCRIPTION = 0x10E,
- TIFF_MAKE = 0x10F,
- TIFF_MODEL = 0x110,
- TIFF_STRIP_OFFS = 0x111,
- TIFF_SAMPLES_PER_PIXEL = 0x115,
- TIFF_ROWSPERSTRIP = 0x116,
+ TIFF_INVERT = 0x106,
+ TIFF_FILL_ORDER = 0x10A,
+ TIFF_DOCUMENT_NAME = 0x10D,
+ TIFF_IMAGE_DESCRIPTION = 0x10E,
+ TIFF_MAKE = 0x10F,
+ TIFF_MODEL = 0x110,
+ TIFF_STRIP_OFFS = 0x111,
+ TIFF_SAMPLES_PER_PIXEL = 0x115,
+ TIFF_ROWSPERSTRIP = 0x116,
TIFF_STRIP_SIZE,
- TIFF_XRES = 0x11A,
- TIFF_YRES = 0x11B,
- TIFF_PLANAR = 0x11C,
- TIFF_PAGE_NAME = 0x11D,
- TIFF_XPOS = 0x11E,
- TIFF_YPOS = 0x11F,
- TIFF_T4OPTIONS = 0x124,
+ TIFF_XRES = 0x11A,
+ TIFF_YRES = 0x11B,
+ TIFF_PLANAR = 0x11C,
+ TIFF_PAGE_NAME = 0x11D,
+ TIFF_XPOS = 0x11E,
+ TIFF_YPOS = 0x11F,
+ TIFF_T4OPTIONS = 0x124,
TIFF_T6OPTIONS,
- TIFF_RES_UNIT = 0x128,
- TIFF_PAGE_NUMBER = 0x129,
- TIFF_SOFTWARE_NAME = 0x131,
- TIFF_DATE = 0x132,
- TIFF_ARTIST = 0x13B,
- TIFF_HOST_COMPUTER = 0x13C,
- TIFF_PREDICTOR = 0x13D,
- TIFF_PAL = 0x140,
- TIFF_TILE_WIDTH = 0x142,
- TIFF_TILE_LENGTH = 0x143,
- TIFF_TILE_OFFSETS = 0x144,
- TIFF_TILE_BYTE_COUNTS = 0x145,
- TIFF_EXTRASAMPLES = 0x152,
+ TIFF_RES_UNIT = 0x128,
+ TIFF_PAGE_NUMBER = 0x129,
+ TIFF_SOFTWARE_NAME = 0x131,
+ TIFF_DATE = 0x132,
+ TIFF_ARTIST = 0x13B,
+ TIFF_HOST_COMPUTER = 0x13C,
+ TIFF_PREDICTOR = 0x13D,
+ TIFF_PAL = 0x140,
+ TIFF_TILE_WIDTH = 0x142,
+ TIFF_TILE_LENGTH = 0x143,
+ TIFF_TILE_OFFSETS = 0x144,
+ TIFF_TILE_BYTE_COUNTS = 0x145,
+ TIFF_EXTRASAMPLES = 0x152,
TIFF_YCBCR_COEFFICIENTS = 0x211,
- TIFF_YCBCR_SUBSAMPLING = 0x212,
- TIFF_YCBCR_POSITIONING = 0x213,
- TIFF_REFERENCE_BW = 0x214,
- TIFF_COPYRIGHT = 0x8298,
- TIFF_MODEL_TIEPOINT = 0x8482,
- TIFF_MODEL_PIXEL_SCALE = 0x830E,
- TIFF_MODEL_TRANSFORMATION = 0x8480,
- TIFF_GEO_KEY_DIRECTORY = 0x87AF,
- TIFF_GEO_DOUBLE_PARAMS = 0x87B0,
- TIFF_GEO_ASCII_PARAMS = 0x87B1
+ TIFF_YCBCR_SUBSAMPLING = 0x212,
+ TIFF_YCBCR_POSITIONING = 0x213,
+ TIFF_REFERENCE_BW = 0x214,
+ TIFF_COPYRIGHT = 0x8298,
+ TIFF_MODEL_TIEPOINT = 0x8482,
+ TIFF_MODEL_PIXEL_SCALE = 0x830E,
+ TIFF_MODEL_TRANSFORMATION= 0x8480,
+ TIFF_GEO_KEY_DIRECTORY = 0x87AF,
+ TIFF_GEO_DOUBLE_PARAMS = 0x87B0,
+ TIFF_GEO_ASCII_PARAMS = 0x87B1
};
/** list of TIFF compression types */
-enum TiffCompr{
+enum TiffCompr {
TIFF_RAW = 1,
TIFF_CCITT_RLE,
TIFF_G3,
TIFF_NEWJPEG,
TIFF_ADOBE_DEFLATE,
TIFF_PACKBITS = 0x8005,
- TIFF_DEFLATE = 0x80B2
+ TIFF_DEFLATE = 0x80B2
};
-enum TiffTypes{
+enum TiffTypes {
TIFF_BYTE = 1,
TIFF_STRING,
TIFF_SHORT,
* @author Bartlomiej Wolowiec
*/
+#include "config.h"
+#if CONFIG_ZLIB
+#include <zlib.h>
+#endif
+
#include "libavutil/imgutils.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
-
#include "avcodec.h"
-#include "config.h"
-#if CONFIG_ZLIB
-#include <zlib.h>
-#endif
#include "bytestream.h"
#include "internal.h"
-#include "tiff.h"
-#include "rle.h"
#include "lzw.h"
#include "put_bits.h"
+#include "rle.h"
+#include "tiff.h"
#define TIFF_MAX_ENTRY 32
};
typedef struct TiffEncoderContext {
- AVClass *class; ///< for private options
+ AVClass *class; ///< for private options
AVCodecContext *avctx;
AVFrame picture;
- int width; ///< picture width
- int height; ///< picture height
- unsigned int bpp; ///< bits per pixel
- int compr; ///< compression level
- int bpp_tab_size; ///< bpp_tab size
- int photometric_interpretation; ///< photometric interpretation
- int strips; ///< number of strips
+ int width; ///< picture width
+ int height; ///< picture height
+ unsigned int bpp; ///< bits per pixel
+ int compr; ///< compression level
+ int bpp_tab_size; ///< bpp_tab size
+ int photometric_interpretation; ///< photometric interpretation
+ int strips; ///< number of strips
uint32_t *strip_sizes;
unsigned int strip_sizes_size;
uint32_t *strip_offsets;
unsigned int strip_offsets_size;
uint8_t *yuv_line;
unsigned int yuv_line_size;
- int rps; ///< row per strip
- uint8_t entries[TIFF_MAX_ENTRY*12]; ///< entires in header
- int num_entries; ///< number of entires
- uint8_t **buf; ///< actual position in buffer
- uint8_t *buf_start; ///< pointer to first byte in buffer
- int buf_size; ///< buffer size
- uint16_t subsampling[2]; ///< YUV subsampling factors
- struct LZWEncodeState *lzws; ///< LZW Encode state
- uint32_t dpi; ///< image resolution in DPI
+ int rps; ///< row per strip
+ uint8_t entries[TIFF_MAX_ENTRY * 12]; ///< entries in header
+ int num_entries; ///< number of entries
+ uint8_t **buf; ///< actual position in buffer
+ uint8_t *buf_start; ///< pointer to first byte in buffer
+ int buf_size; ///< buffer size
+ uint16_t subsampling[2]; ///< YUV subsampling factors
+ struct LZWEncodeState *lzws; ///< LZW encode state
+ uint32_t dpi; ///< image resolution in DPI
} TiffEncoderContext;
-
/**
* Check free space in buffer.
*
* @param need Needed bytes
* @return 0 - ok, 1 - no free space
*/
-static inline int check_size(TiffEncoderContext * s, uint64_t need)
+static inline int check_size(TiffEncoderContext *s, uint64_t need)
{
if (s->buf_size < *s->buf - s->buf_start + need) {
*s->buf = s->buf_start + s->buf_size + 1;
* @param type type of values
* @param flip = 0 - normal copy, >0 - flip
*/
-static void tnput(uint8_t ** p, int n, const uint8_t * val, enum TiffTypes type,
+static void tnput(uint8_t **p, int n, const uint8_t *val, enum TiffTypes type,
int flip)
{
int i;
#if HAVE_BIGENDIAN
- flip ^= ((int[]) {0, 0, 0, 1, 3, 3})[type];
+ flip ^= ((int[]) { 0, 0, 0, 1, 3, 3 })[type];
#endif
for (i = 0; i < n * type_sizes2[type]; i++)
*(*p)++ = val[i ^ flip];
* @param count the number of values
* @param ptr_val pointer to values
*/
-static void add_entry(TiffEncoderContext * s,
- enum TiffTags tag, enum TiffTypes type, int count,
- const void *ptr_val)
+static void add_entry(TiffEncoderContext *s, enum TiffTags tag,
+ enum TiffTypes type, int count, const void *ptr_val)
{
uint8_t *entries_ptr = s->entries + 12 * s->num_entries;
s->num_entries++;
}
-static void add_entry1(TiffEncoderContext * s,
- enum TiffTags tag, enum TiffTypes type, int val){
- uint16_t w = val;
- uint32_t dw= val;
+static void add_entry1(TiffEncoderContext *s,
+ enum TiffTags tag, enum TiffTypes type, int val)
+{
+ uint16_t w = val;
+ uint32_t dw = val;
add_entry(s, tag, type, 1, type == TIFF_SHORT ? (void *)&w : (void *)&dw);
}
* @param compr compression method
* @return number of output bytes. If an output error is encountered, -1 is returned
*/
-static int encode_strip(TiffEncoderContext * s, const int8_t * src,
- uint8_t * dst, int n, int compr)
+static int encode_strip(TiffEncoderContext *s, const int8_t *src,
+ uint8_t *dst, int n, int compr)
{
-
switch (compr) {
#if CONFIG_ZLIB
case TIFF_DEFLATE:
case TIFF_ADOBE_DEFLATE:
- {
- unsigned long zlen = s->buf_size - (*s->buf - s->buf_start);
- if (compress(dst, &zlen, src, n) != Z_OK) {
- av_log(s->avctx, AV_LOG_ERROR, "Compressing failed\n");
- return -1;
- }
- return zlen;
+ {
+ unsigned long zlen = s->buf_size - (*s->buf - s->buf_start);
+ if (compress(dst, &zlen, src, n) != Z_OK) {
+ av_log(s->avctx, AV_LOG_ERROR, "Compressing failed\n");
+ return -1;
}
+ return zlen;
+ }
#endif
case TIFF_RAW:
if (check_size(s, n))
memcpy(dst, src, n);
return n;
case TIFF_PACKBITS:
- return ff_rle_encode(dst, s->buf_size - (*s->buf - s->buf_start), src, 1, n, 2, 0xff, -1, 0);
+ return ff_rle_encode(dst, s->buf_size - (*s->buf - s->buf_start),
+ src, 1, n, 2, 0xff, -1, 0);
case TIFF_LZW:
return ff_lzw_encode(s->lzws, src, n);
default:
}
}
-static void pack_yuv(TiffEncoderContext * s, uint8_t * dst, int lnum)
+static void pack_yuv(TiffEncoderContext *s, uint8_t *dst, int lnum)
{
AVFrame *p = &s->picture;
int i, j, k;
- int w = (s->width - 1) / s->subsampling[0] + 1;
+ int w = (s->width - 1) / s->subsampling[0] + 1;
uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]];
- if(s->width % s->subsampling[0] || s->height % s->subsampling[1]){
- for (i = 0; i < w; i++){
+ if (s->width % s->subsampling[0] || s->height % s->subsampling[1]) {
+ for (i = 0; i < w; i++) {
for (j = 0; j < s->subsampling[1]; j++)
for (k = 0; k < s->subsampling[0]; k++)
*dst++ = p->data[0][FFMIN(lnum + j, s->height-1) * p->linesize[0] +
*dst++ = *pv++;
}
}else{
- for (i = 0; i < w; i++){
+ for (i = 0; i < w; i++) {
for (j = 0; j < s->subsampling[1]; j++)
for (k = 0; k < s->subsampling[0]; k++)
*dst++ = p->data[0][(lnum + j) * p->linesize[0] +
{
TiffEncoderContext *s = avctx->priv_data;
- avctx->coded_frame= &s->picture;
+ avctx->coded_frame = &s->picture;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
s->avctx = avctx;
return 0;
}
-static int encode_frame(AVCodecContext * avctx, AVPacket *pkt,
+static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
uint8_t *offset;
uint32_t strips;
int bytes_per_row;
- uint32_t res[2] = { s->dpi, 1 }; // image resolution (72/1)
+ uint32_t res[2] = { s->dpi, 1 }; // image resolution (72/1)
uint16_t bpp_tab[4];
int ret = -1;
int is_yuv = 0, alpha = 0;
*p = *pict;
- s->width = avctx->width;
- s->height = avctx->height;
+ s->width = avctx->width;
+ s->height = avctx->height;
s->subsampling[0] = 1;
s->subsampling[1] = 1;
avctx->bits_per_coded_sample =
- s->bpp = av_get_bits_per_pixel(desc);
+ s->bpp = av_get_bits_per_pixel(desc);
s->bpp_tab_size = desc->nb_components;
switch (avctx->pix_fmt) {
case AV_PIX_FMT_YUV444P:
case AV_PIX_FMT_YUV410P:
case AV_PIX_FMT_YUV411P:
+ av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &shift_h, &shift_v);
s->photometric_interpretation = 6;
- avcodec_get_chroma_sub_sample(avctx->pix_fmt, &shift_h, &shift_v);
- s->subsampling[0] = 1 << shift_h;
- s->subsampling[1] = 1 << shift_v;
- is_yuv = 1;
+ s->subsampling[0] = 1 << shift_h;
+ s->subsampling[1] = 1 << shift_v;
+ is_yuv = 1;
break;
default:
av_log(s->avctx, AV_LOG_ERROR,
for (i = 0; i < s->bpp_tab_size; i++)
bpp_tab[i] = desc->comp[i].depth_minus1 + 1;
- if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE || s->compr == TIFF_LZW)
- //best choose for DEFLATE
+ if (s->compr == TIFF_DEFLATE ||
+ s->compr == TIFF_ADOBE_DEFLATE ||
+ s->compr == TIFF_LZW)
+ // best choice for DEFLATE
s->rps = s->height;
else
- s->rps = FFMAX(8192 / (((s->width * s->bpp) >> 3) + 1), 1); // suggest size of strip
- s->rps = ((s->rps - 1) / s->subsampling[1] + 1) * s->subsampling[1]; // round rps up
+ // suggest size of strip
+ s->rps = FFMAX(8192 / (((s->width * s->bpp) >> 3) + 1), 1);
+ // round rps up
+ s->rps = ((s->rps - 1) / s->subsampling[1] + 1) * s->subsampling[1];
strips = (s->height - 1) / s->rps + 1;
- if ((ret = ff_alloc_packet2(avctx, pkt, avctx->width * avctx->height * s->bpp * 2 +
- avctx->height * 4 + FF_MIN_BUFFER_SIZE)) < 0)
+ if ((ret = ff_alloc_packet2(avctx, pkt,
+ avctx->width * avctx->height * s->bpp * 2 +
+ avctx->height * 4 + FF_MIN_BUFFER_SIZE)) < 0)
return ret;
ptr = pkt->data;
s->buf_start = pkt->data;
offset = ptr;
bytestream_put_le32(&ptr, 0);
- av_fast_padded_mallocz(&s->strip_sizes, &s->strip_sizes_size, sizeof(s->strip_sizes[0]) * strips);
+ av_fast_padded_mallocz(&s->strip_sizes , &s->strip_sizes_size , sizeof(s->strip_sizes [0]) * strips);
av_fast_padded_mallocz(&s->strip_offsets, &s->strip_offsets_size, sizeof(s->strip_offsets[0]) * strips);
if (!s->strip_sizes || !s->strip_offsets) {
goto fail;
}
- bytes_per_row = (((s->width - 1)/s->subsampling[0] + 1) * s->bpp
- * s->subsampling[0] * s->subsampling[1] + 7) >> 3;
- if (is_yuv){
+ bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp *
+ s->subsampling[0] * s->subsampling[1] + 7) >> 3;
+ if (is_yuv) {
av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row);
- if (s->yuv_line == NULL){
+ if (s->yuv_line == NULL) {
av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
ret = AVERROR(ENOMEM);
goto fail;
goto fail;
}
s->strip_offsets[0] = ptr - pkt->data;
- zn = 0;
+ zn = 0;
for (j = 0; j < s->rps; j++) {
- if (is_yuv){
+ if (is_yuv) {
pack_yuv(s, s->yuv_line, j);
memcpy(zbuf + zn, s->yuv_line, bytes_per_row);
j += s->subsampling[1] - 1;
- }
- else
+ } else
memcpy(zbuf + j * bytes_per_row,
p->data[0] + j * p->linesize[0], bytes_per_row);
zn += bytes_per_row;
av_log(s->avctx, AV_LOG_ERROR, "Encode strip failed\n");
goto fail;
}
- ptr += ret;
+ ptr += ret;
s->strip_sizes[0] = ptr - pkt->data - s->strip_offsets[0];
} else
#endif
{
- if (s->compr == TIFF_LZW) {
- s->lzws = av_malloc(ff_lzw_encode_state_size);
- if (!s->lzws) {
- ret = AVERROR(ENOMEM);
- goto fail;
- }
+ if (s->compr == TIFF_LZW) {
+ s->lzws = av_malloc(ff_lzw_encode_state_size);
+ if (!s->lzws) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
}
- for (i = 0; i < s->height; i++) {
- if (s->strip_sizes[i / s->rps] == 0) {
- if(s->compr == TIFF_LZW){
- ff_lzw_encode_init(s->lzws, ptr, s->buf_size - (*s->buf - s->buf_start),
- 12, FF_LZW_TIFF, put_bits);
- }
- s->strip_offsets[i / s->rps] = ptr - pkt->data;
- }
- if (is_yuv){
- pack_yuv(s, s->yuv_line, i);
- ret = encode_strip(s, s->yuv_line, ptr, bytes_per_row, s->compr);
- i += s->subsampling[1] - 1;
- }
- else
- ret = encode_strip(s, p->data[0] + i * p->linesize[0],
- ptr, bytes_per_row, s->compr);
- if (ret < 0) {
- av_log(s->avctx, AV_LOG_ERROR, "Encode strip failed\n");
- goto fail;
- }
- s->strip_sizes[i / s->rps] += ret;
- ptr += ret;
- if(s->compr == TIFF_LZW && (i==s->height-1 || i%s->rps == s->rps-1)){
- ret = ff_lzw_encode_flush(s->lzws, flush_put_bits);
- s->strip_sizes[(i / s->rps )] += ret ;
- ptr += ret;
+ }
+ for (i = 0; i < s->height; i++) {
+ if (s->strip_sizes[i / s->rps] == 0) {
+ if (s->compr == TIFF_LZW) {
+ ff_lzw_encode_init(s->lzws, ptr,
+ s->buf_size - (*s->buf - s->buf_start),
+ 12, FF_LZW_TIFF, put_bits);
}
+ s->strip_offsets[i / s->rps] = ptr - pkt->data;
+ }
+ if (is_yuv) {
+ pack_yuv(s, s->yuv_line, i);
+ ret = encode_strip(s, s->yuv_line, ptr, bytes_per_row, s->compr);
+ i += s->subsampling[1] - 1;
+ } else
+ ret = encode_strip(s, p->data[0] + i * p->linesize[0],
+ ptr, bytes_per_row, s->compr);
+ if (ret < 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "Encode strip failed\n");
+ goto fail;
}
- if(s->compr == TIFF_LZW)
- av_free(s->lzws);
+ s->strip_sizes[i / s->rps] += ret;
+ ptr += ret;
+ if (s->compr == TIFF_LZW &&
+ (i == s->height - 1 || i % s->rps == s->rps - 1)) {
+ ret = ff_lzw_encode_flush(s->lzws, flush_put_bits);
+ s->strip_sizes[(i / s->rps)] += ret;
+ ptr += ret;
+ }
+ }
+ if (s->compr == TIFF_LZW)
+ av_free(s->lzws);
}
s->num_entries = 0;
- add_entry1(s,TIFF_SUBFILE, TIFF_LONG, 0);
- add_entry1(s,TIFF_WIDTH, TIFF_LONG, s->width);
- add_entry1(s,TIFF_HEIGHT, TIFF_LONG, s->height);
+ add_entry1(s, TIFF_SUBFILE, TIFF_LONG, 0);
+ add_entry1(s, TIFF_WIDTH, TIFF_LONG, s->width);
+ add_entry1(s, TIFF_HEIGHT, TIFF_LONG, s->height);
if (s->bpp_tab_size)
- add_entry(s, TIFF_BPP, TIFF_SHORT, s->bpp_tab_size, bpp_tab);
+ add_entry(s, TIFF_BPP, TIFF_SHORT, s->bpp_tab_size, bpp_tab);
- add_entry1(s,TIFF_COMPR, TIFF_SHORT, s->compr);
- add_entry1(s,TIFF_INVERT, TIFF_SHORT, s->photometric_interpretation);
- add_entry(s, TIFF_STRIP_OFFS, TIFF_LONG, strips, s->strip_offsets);
+ add_entry1(s, TIFF_COMPR, TIFF_SHORT, s->compr);
+ add_entry1(s, TIFF_INVERT, TIFF_SHORT, s->photometric_interpretation);
+ add_entry(s, TIFF_STRIP_OFFS, TIFF_LONG, strips, s->strip_offsets);
if (s->bpp_tab_size)
- add_entry1(s,TIFF_SAMPLES_PER_PIXEL, TIFF_SHORT, s->bpp_tab_size);
+ add_entry1(s, TIFF_SAMPLES_PER_PIXEL, TIFF_SHORT, s->bpp_tab_size);
- add_entry1(s,TIFF_ROWSPERSTRIP, TIFF_LONG, s->rps);
- add_entry(s, TIFF_STRIP_SIZE, TIFF_LONG, strips, s->strip_sizes);
- add_entry(s, TIFF_XRES, TIFF_RATIONAL, 1, res);
- add_entry(s, TIFF_YRES, TIFF_RATIONAL, 1, res);
- add_entry1(s,TIFF_RES_UNIT, TIFF_SHORT, 2);
+ add_entry1(s, TIFF_ROWSPERSTRIP, TIFF_LONG, s->rps);
+ add_entry(s, TIFF_STRIP_SIZE, TIFF_LONG, strips, s->strip_sizes);
+ add_entry(s, TIFF_XRES, TIFF_RATIONAL, 1, res);
+ add_entry(s, TIFF_YRES, TIFF_RATIONAL, 1, res);
+ add_entry1(s, TIFF_RES_UNIT, TIFF_SHORT, 2);
- if(!(avctx->flags & CODEC_FLAG_BITEXACT))
- add_entry(s, TIFF_SOFTWARE_NAME, TIFF_STRING,
- strlen(LIBAVCODEC_IDENT) + 1, LIBAVCODEC_IDENT);
+ if (!(avctx->flags & CODEC_FLAG_BITEXACT))
+ add_entry(s, TIFF_SOFTWARE_NAME, TIFF_STRING,
+ strlen(LIBAVCODEC_IDENT) + 1, LIBAVCODEC_IDENT);
if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
uint16_t pal[256 * 3];
for (i = 0; i < 256; i++) {
uint32_t rgb = *(uint32_t *) (p->data[1] + i * 4);
pal[i] = ((rgb >> 16) & 0xff) * 257;
- pal[i + 256] = ((rgb >> 8 ) & 0xff) * 257;
- pal[i + 512] = ( rgb & 0xff) * 257;
+ pal[i + 256] = ((rgb >> 8) & 0xff) * 257;
+ pal[i + 512] = (rgb & 0xff) * 257;
}
add_entry(s, TIFF_PAL, TIFF_SHORT, 256 * 3, pal);
}
if (alpha)
add_entry1(s,TIFF_EXTRASAMPLES, TIFF_SHORT, 2);
- if (is_yuv){
+ if (is_yuv) {
/** according to CCIR Recommendation 601.1 */
- uint32_t refbw[12] = {15, 1, 235, 1, 128, 1, 240, 1, 128, 1, 240, 1};
+ uint32_t refbw[12] = { 15, 1, 235, 1, 128, 1, 240, 1, 128, 1, 240, 1 };
add_entry(s, TIFF_YCBCR_SUBSAMPLING, TIFF_SHORT, 2, s->subsampling);
if (avctx->chroma_sample_location == AVCHROMA_LOC_TOPLEFT)
add_entry1(s, TIFF_YCBCR_POSITIONING, TIFF_SHORT, 2);
add_entry(s, TIFF_REFERENCE_BW, TIFF_RATIONAL, 6, refbw);
}
- bytestream_put_le32(&offset, ptr - pkt->data); // write offset to dir
+ // write offset to dir
+ bytestream_put_le32(&offset, ptr - pkt->data);
if (check_size(s, 6 + s->num_entries * 12)) {
ret = AVERROR(EINVAL);
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{"dpi", "set the image resolution (in dpi)", OFFSET(dpi), AV_OPT_TYPE_INT, {.i64 = 72}, 1, 0x10000, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_ENCODING_PARAM},
- { "compression_algo", NULL, OFFSET(compr), AV_OPT_TYPE_INT, {.i64 = TIFF_PACKBITS}, TIFF_RAW, TIFF_DEFLATE, VE, "compression_algo" },
- { "packbits", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = TIFF_PACKBITS}, 0, 0, VE, "compression_algo" },
- { "raw", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = TIFF_RAW}, 0, 0, VE, "compression_algo" },
- { "lzw", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = TIFF_LZW}, 0, 0, VE, "compression_algo" },
+ { "compression_algo", NULL, OFFSET(compr), AV_OPT_TYPE_INT, { .i64 = TIFF_PACKBITS }, TIFF_RAW, TIFF_DEFLATE, VE, "compression_algo" },
+ { "packbits", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = TIFF_PACKBITS }, 0, 0, VE, "compression_algo" },
+ { "raw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = TIFF_RAW }, 0, 0, VE, "compression_algo" },
+ { "lzw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = TIFF_LZW }, 0, 0, VE, "compression_algo" },
#if CONFIG_ZLIB
- { "deflate", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = TIFF_DEFLATE}, 0, 0, VE, "compression_algo" },
+ { "deflate", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = TIFF_DEFLATE }, 0, 0, VE, "compression_algo" },
#endif
{ NULL },
};
AVCodec ff_tiff_encoder = {
.name = "tiff",
+ .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_TIFF,
.priv_data_size = sizeof(TiffEncoderContext),
AV_PIX_FMT_RGBA, AV_PIX_FMT_RGBA64LE,
AV_PIX_FMT_NONE
},
- .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
.priv_class = &tiffenc_class,
};
#define BITSTREAM_READER_LE
#include <limits.h>
+#include "ttadata.h"
#include "avcodec.h"
#include "get_bits.h"
+#include "unary.h"
#include "internal.h"
#include "libavutil/crc.h"
#include "libavutil/intreadwrite.h"
#define FORMAT_SIMPLE 1
#define FORMAT_ENCRYPTED 2
-#define MAX_ORDER 16
-typedef struct TTAFilter {
- int32_t shift, round, error;
- int32_t qm[MAX_ORDER];
- int32_t dx[MAX_ORDER];
- int32_t dl[MAX_ORDER];
-} TTAFilter;
-
-typedef struct TTARice {
- uint32_t k0, k1, sum0, sum1;
-} TTARice;
-
-typedef struct TTAChannel {
- int32_t predictor;
- TTAFilter filter;
- TTARice rice;
-} TTAChannel;
-
typedef struct TTAContext {
AVClass *class;
AVCodecContext *avctx;
- GetBitContext gb;
const AVCRC *crc_table;
int format, channels, bps;
TTAChannel *ch_ctx;
} TTAContext;
-static const uint32_t shift_1[] = {
- 0x00000001, 0x00000002, 0x00000004, 0x00000008,
- 0x00000010, 0x00000020, 0x00000040, 0x00000080,
- 0x00000100, 0x00000200, 0x00000400, 0x00000800,
- 0x00001000, 0x00002000, 0x00004000, 0x00008000,
- 0x00010000, 0x00020000, 0x00040000, 0x00080000,
- 0x00100000, 0x00200000, 0x00400000, 0x00800000,
- 0x01000000, 0x02000000, 0x04000000, 0x08000000,
- 0x10000000, 0x20000000, 0x40000000, 0x80000000,
- 0x80000000, 0x80000000, 0x80000000, 0x80000000,
- 0x80000000, 0x80000000, 0x80000000, 0x80000000
-};
-
-static const uint32_t * const shift_16 = shift_1 + 4;
-
-static const int32_t ttafilter_configs[4] = {
- 10,
- 9,
- 10,
- 12
-};
-
-static void ttafilter_init(TTAContext *s, TTAFilter *c, int32_t shift) {
- memset(c, 0, sizeof(TTAFilter));
- if (s->format == FORMAT_ENCRYPTED) {
- int i;
- for (i = 0; i < 8; i++)
- c->qm[i] = sign_extend(s->crc_pass[i], 8);
- }
- c->shift = shift;
- c->round = shift_1[shift-1];
-// c->round = 1 << (shift - 1);
-}
-
static inline void ttafilter_process(TTAFilter *c, int32_t *in)
{
register int32_t *dl = c->dl, *qm = c->qm, *dx = c->dx, sum = c->round;
dl[5] += dl[6]; dl[4] += dl[5];
}
-static void rice_init(TTARice *c, uint32_t k0, uint32_t k1)
-{
- c->k0 = k0;
- c->k1 = k1;
- c->sum0 = shift_16[k0];
- c->sum1 = shift_16[k1];
-}
-
-static int tta_get_unary(GetBitContext *gb)
-{
- int ret = 0;
-
- // count ones
- while (get_bits_left(gb) > 0 && get_bits1(gb))
- ret++;
- return ret;
-}
-
static const int64_t tta_channel_layouts[7] = {
AV_CH_LAYOUT_STEREO,
AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY,
static av_cold int tta_decode_init(AVCodecContext * avctx)
{
TTAContext *s = avctx->priv_data;
+ GetBitContext gb;
int total_frames;
s->avctx = avctx;
return AVERROR_INVALIDDATA;
s->crc_table = av_crc_get_table(AV_CRC_32_IEEE_LE);
- init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8);
- if (show_bits_long(&s->gb, 32) == AV_RL32("TTA1"))
- {
+ init_get_bits(&gb, avctx->extradata, avctx->extradata_size * 8);
+ if (show_bits_long(&gb, 32) == AV_RL32("TTA1")) {
/* signature */
- skip_bits_long(&s->gb, 32);
+ skip_bits_long(&gb, 32);
- s->format = get_bits(&s->gb, 16);
+ s->format = get_bits(&gb, 16);
if (s->format > 2) {
av_log(avctx, AV_LOG_ERROR, "Invalid format\n");
return AVERROR_INVALIDDATA;
}
AV_WL64(s->crc_pass, tta_check_crc64(s->pass));
}
- avctx->channels = s->channels = get_bits(&s->gb, 16);
+ avctx->channels = s->channels = get_bits(&gb, 16);
if (s->channels > 1 && s->channels < 9)
avctx->channel_layout = tta_channel_layouts[s->channels-2];
- avctx->bits_per_raw_sample = get_bits(&s->gb, 16);
+ avctx->bits_per_raw_sample = get_bits(&gb, 16);
s->bps = (avctx->bits_per_raw_sample + 7) / 8;
- avctx->sample_rate = get_bits_long(&s->gb, 32);
- s->data_length = get_bits_long(&s->gb, 32);
- skip_bits_long(&s->gb, 32); // CRC32 of header
+ avctx->sample_rate = get_bits_long(&gb, 32);
+ s->data_length = get_bits_long(&gb, 32);
+ skip_bits_long(&gb, 32); // CRC32 of header
if (s->channels == 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
TTAContext *s = avctx->priv_data;
+ GetBitContext gb;
int i, ret;
int cur_chan = 0, framelen = s->frame_length;
int32_t *p;
return AVERROR_INVALIDDATA;
}
- if ((ret = init_get_bits8(&s->gb, avpkt->data, avpkt->size)) < 0)
+ if ((ret = init_get_bits8(&gb, avpkt->data, avpkt->size)) < 0)
return ret;
/* get output buffer */
// init per channel states
for (i = 0; i < s->channels; i++) {
+ TTAFilter *filter = &s->ch_ctx[i].filter;
s->ch_ctx[i].predictor = 0;
- ttafilter_init(s, &s->ch_ctx[i].filter, ttafilter_configs[s->bps-1]);
- rice_init(&s->ch_ctx[i].rice, 10, 10);
+ ff_tta_filter_init(filter, ff_tta_filter_configs[s->bps-1]);
+ if (s->format == FORMAT_ENCRYPTED) {
+ int i;
+ for (i = 0; i < 8; i++)
+ filter->qm[i] = sign_extend(s->crc_pass[i], 8);
+ }
+ ff_tta_rice_init(&s->ch_ctx[i].rice, 10, 10);
}
i = 0;
uint32_t unary, depth, k;
int32_t value;
- unary = tta_get_unary(&s->gb);
+ unary = get_unary(&gb, 0, get_bits_left(&gb));
if (unary == 0) {
depth = 0;
unary--;
}
- if (get_bits_left(&s->gb) < k) {
+ if (get_bits_left(&gb) < k) {
ret = AVERROR_INVALIDDATA;
goto error;
}
ret = AVERROR_INVALIDDATA;
goto error;
}
- value = (unary << k) + get_bits(&s->gb, k);
+ value = (unary << k) + get_bits(&gb, k);
} else
value = unary;
switch (depth) {
case 1:
rice->sum1 += value - (rice->sum1 >> 4);
- if (rice->k1 > 0 && rice->sum1 < shift_16[rice->k1])
+ if (rice->k1 > 0 && rice->sum1 < ff_tta_shift_16[rice->k1])
rice->k1--;
- else if(rice->sum1 > shift_16[rice->k1 + 1])
+ else if(rice->sum1 > ff_tta_shift_16[rice->k1 + 1])
rice->k1++;
- value += shift_1[rice->k0];
+ value += ff_tta_shift_1[rice->k0];
default:
rice->sum0 += value - (rice->sum0 >> 4);
- if (rice->k0 > 0 && rice->sum0 < shift_16[rice->k0])
+ if (rice->k0 > 0 && rice->sum0 < ff_tta_shift_16[rice->k0])
rice->k0--;
- else if(rice->sum0 > shift_16[rice->k0 + 1])
+ else if(rice->sum0 > ff_tta_shift_16[rice->k0 + 1])
rice->k0++;
}
cur_chan = 0;
i++;
// check for last frame
- if (i == s->last_frame_length && get_bits_left(&s->gb) / 8 == 4) {
+ if (i == s->last_frame_length && get_bits_left(&gb) / 8 == 4) {
frame->nb_samples = framelen = s->last_frame_length;
break;
}
}
}
- align_get_bits(&s->gb);
- if (get_bits_left(&s->gb) < 32) {
+ align_get_bits(&gb);
+ if (get_bits_left(&gb) < 32) {
ret = AVERROR_INVALIDDATA;
goto error;
}
- skip_bits_long(&s->gb, 32); // frame crc
+ skip_bits_long(&gb, 32); // frame crc
// convert to output buffer
switch (s->bps) {
case 3: {
// shift samples for 24-bit sample format
int32_t *samples = (int32_t *)frame->data[0];
- for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++)
+ for (i = 0; i < framelen * s->channels; i++)
*samples++ <<= 8;
// reset decode buffer
s->decode_buffer = NULL;
--- /dev/null
+/*
+ * TTA (The Lossless True Audio) data
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "ttadata.h"
+
+const uint32_t ff_tta_shift_1[] = {
+ 0x00000001, 0x00000002, 0x00000004, 0x00000008,
+ 0x00000010, 0x00000020, 0x00000040, 0x00000080,
+ 0x00000100, 0x00000200, 0x00000400, 0x00000800,
+ 0x00001000, 0x00002000, 0x00004000, 0x00008000,
+ 0x00010000, 0x00020000, 0x00040000, 0x00080000,
+ 0x00100000, 0x00200000, 0x00400000, 0x00800000,
+ 0x01000000, 0x02000000, 0x04000000, 0x08000000,
+ 0x10000000, 0x20000000, 0x40000000, 0x80000000,
+ 0x80000000, 0x80000000, 0x80000000, 0x80000000,
+ 0x80000000, 0x80000000, 0x80000000, 0x80000000
+};
+
+const uint32_t * const ff_tta_shift_16 = ff_tta_shift_1 + 4;
+
+const uint8_t ff_tta_filter_configs[] = { 10, 9, 10, 12 };
+
+void ff_tta_rice_init(TTARice *c, uint32_t k0, uint32_t k1)
+{
+ c->k0 = k0;
+ c->k1 = k1;
+ c->sum0 = ff_tta_shift_16[k0];
+ c->sum1 = ff_tta_shift_16[k1];
+}
+
+void ff_tta_filter_init(TTAFilter *c, int32_t shift) {
+ memset(c, 0, sizeof(TTAFilter));
+ c->shift = shift;
+ c->round = ff_tta_shift_1[shift-1];
+}
--- /dev/null
+/*
+ * TTA (The Lossless True Audio) data
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_TTADATA_H
+#define AVCODEC_TTADATA_H
+
+#include "internal.h"
+
+#define MAX_ORDER 16
+typedef struct TTAFilter {
+ int32_t shift, round, error;
+ int32_t qm[MAX_ORDER];
+ int32_t dx[MAX_ORDER];
+ int32_t dl[MAX_ORDER];
+} TTAFilter;
+
+typedef struct TTARice {
+ uint32_t k0, k1, sum0, sum1;
+} TTARice;
+
+typedef struct TTAChannel {
+ int32_t predictor;
+ TTAFilter filter;
+ TTARice rice;
+} TTAChannel;
+
+extern const uint32_t ff_tta_shift_1[];
+extern const uint32_t * const ff_tta_shift_16;
+extern const uint8_t ff_tta_filter_configs[];
+
+void ff_tta_rice_init(TTARice *c, uint32_t k0, uint32_t k1);
+void ff_tta_filter_init(TTAFilter *c, int32_t shift);
+#endif /* AVCODEC_TTADATA_H */
--- /dev/null
+/*
+ * TTA (The Lossless True Audio) encoder
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define BITSTREAM_WRITER_LE
+#include "ttadata.h"
+#include "avcodec.h"
+#include "put_bits.h"
+#include "internal.h"
+#include "libavutil/crc.h"
+
+typedef struct TTAEncContext {
+ const AVCRC *crc_table;
+ int bps;
+ TTAChannel *ch_ctx;
+} TTAEncContext;
+
+static av_cold int tta_encode_init(AVCodecContext *avctx)
+{
+ TTAEncContext *s = avctx->priv_data;
+
+ s->crc_table = av_crc_get_table(AV_CRC_32_IEEE_LE);
+
+ switch (avctx->sample_fmt) {
+ case AV_SAMPLE_FMT_U8:
+ avctx->bits_per_raw_sample = 8;
+ break;
+ case AV_SAMPLE_FMT_S16:
+ avctx->bits_per_raw_sample = 16;
+ break;
+ case AV_SAMPLE_FMT_S32:
+ if (avctx->bits_per_raw_sample > 24)
+ av_log(avctx, AV_LOG_WARNING, "encoding as 24 bits-per-sample\n");
+ avctx->bits_per_raw_sample = 24;
+ }
+
+ s->bps = avctx->bits_per_raw_sample >> 3;
+ avctx->frame_size = 256 * avctx->sample_rate / 245;
+
+ s->ch_ctx = av_malloc(avctx->channels * sizeof(*s->ch_ctx));
+ if (!s->ch_ctx)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static inline void ttafilter_process(TTAFilter *c, int32_t *in)
+{
+ register int32_t *dl = c->dl, *qm = c->qm, *dx = c->dx, sum = c->round;
+
+ if (c->error < 0) {
+ qm[0] -= dx[0]; qm[1] -= dx[1]; qm[2] -= dx[2]; qm[3] -= dx[3];
+ qm[4] -= dx[4]; qm[5] -= dx[5]; qm[6] -= dx[6]; qm[7] -= dx[7];
+ } else if (c->error > 0) {
+ qm[0] += dx[0]; qm[1] += dx[1]; qm[2] += dx[2]; qm[3] += dx[3];
+ qm[4] += dx[4]; qm[5] += dx[5]; qm[6] += dx[6]; qm[7] += dx[7];
+ }
+
+ sum += dl[0] * qm[0] + dl[1] * qm[1] + dl[2] * qm[2] + dl[3] * qm[3] +
+ dl[4] * qm[4] + dl[5] * qm[5] + dl[6] * qm[6] + dl[7] * qm[7];
+
+ dx[0] = dx[1]; dx[1] = dx[2]; dx[2] = dx[3]; dx[3] = dx[4];
+ dl[0] = dl[1]; dl[1] = dl[2]; dl[2] = dl[3]; dl[3] = dl[4];
+
+ dx[4] = ((dl[4] >> 30) | 1);
+ dx[5] = ((dl[5] >> 30) | 2) & ~1;
+ dx[6] = ((dl[6] >> 30) | 2) & ~1;
+ dx[7] = ((dl[7] >> 30) | 4) & ~3;
+
+ dl[4] = -dl[5]; dl[5] = -dl[6];
+ dl[6] = *in - dl[7]; dl[7] = *in;
+ dl[5] += dl[6]; dl[4] += dl[5];
+
+ *in -= (sum >> c->shift);
+ c->error = *in;
+}
+
+static int32_t get_sample(const AVFrame *frame, int sample,
+ enum AVSampleFormat format)
+{
+ int32_t ret;
+
+ if (format == AV_SAMPLE_FMT_U8) {
+ ret = frame->data[0][sample] - 0x80;
+ } else if (format == AV_SAMPLE_FMT_S16) {
+ const int16_t *ptr = (const int16_t *)frame->data[0];
+ ret = ptr[sample];
+ } else {
+ const int32_t *ptr = (const int32_t *)frame->data[0];
+ ret = ptr[sample] >> 8;
+ }
+
+ return ret;
+}
+
+static int tta_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr)
+{
+ TTAEncContext *s = avctx->priv_data;
+ PutBitContext pb;
+ int ret, i, out_bytes, cur_chan = 0, res = 0, samples = 0;
+
+ if ((ret = ff_alloc_packet2(avctx, avpkt, frame->nb_samples * 2 * s->bps)) < 0)
+ return ret;
+ init_put_bits(&pb, avpkt->data, avpkt->size);
+
+ // init per channel states
+ for (i = 0; i < avctx->channels; i++) {
+ s->ch_ctx[i].predictor = 0;
+ ff_tta_filter_init(&s->ch_ctx[i].filter, ff_tta_filter_configs[s->bps - 1]);
+ ff_tta_rice_init(&s->ch_ctx[i].rice, 10, 10);
+ }
+
+ for (i = 0; i < frame->nb_samples * avctx->channels; i++) {
+ TTAChannel *c = &s->ch_ctx[cur_chan];
+ TTAFilter *filter = &c->filter;
+ TTARice *rice = &c->rice;
+ uint32_t k, unary, outval;
+ int32_t value, temp;
+
+ value = get_sample(frame, samples++, avctx->sample_fmt);
+
+ if (avctx->channels > 1) {
+ if (cur_chan < avctx->channels - 1)
+ value = res = get_sample(frame, samples, avctx->sample_fmt) - value;
+ else
+ value -= res / 2;
+ }
+
+ temp = value;
+#define PRED(x, k) (int32_t)((((uint64_t)x << k) - x) >> k)
+ switch (s->bps) {
+ case 1: value -= PRED(c->predictor, 4); break;
+ case 2:
+ case 3: value -= PRED(c->predictor, 5); break;
+ }
+ c->predictor = temp;
+
+ ttafilter_process(filter, &value);
+ outval = (value > 0) ? (value << 1) - 1: -value << 1;
+
+ k = rice->k0;
+
+ rice->sum0 += outval - (rice->sum0 >> 4);
+ if (rice->k0 > 0 && rice->sum0 < ff_tta_shift_16[rice->k0])
+ rice->k0--;
+ else if (rice->sum0 > ff_tta_shift_16[rice->k0 + 1])
+ rice->k0++;
+
+ if (outval >= ff_tta_shift_1[k]) {
+ outval -= ff_tta_shift_1[k];
+ k = rice->k1;
+
+ rice->sum1 += outval - (rice->sum1 >> 4);
+ if (rice->k1 > 0 && rice->sum1 < ff_tta_shift_16[rice->k1])
+ rice->k1--;
+ else if (rice->sum1 > ff_tta_shift_16[rice->k1 + 1])
+ rice->k1++;
+
+ unary = 1 + (outval >> k);
+ do {
+ if (unary > 31) {
+ put_bits(&pb, 31, 0x7FFFFFFF);
+ unary -= 31;
+ } else {
+ put_bits(&pb, unary, (1 << unary) - 1);
+ unary = 0;
+ }
+ } while (unary);
+ }
+
+ put_bits(&pb, 1, 0);
+
+ if (k)
+ put_bits(&pb, k, outval & (ff_tta_shift_1[k] - 1));
+
+ if (cur_chan < avctx->channels - 1)
+ cur_chan++;
+ else
+ cur_chan = 0;
+ }
+
+ flush_put_bits(&pb);
+ out_bytes = put_bits_count(&pb) >> 3;
+ put_bits32(&pb, av_crc(s->crc_table, UINT32_MAX, avpkt->data, out_bytes) ^ UINT32_MAX);
+ flush_put_bits(&pb);
+
+ avpkt->pts = frame->pts;
+ avpkt->size = out_bytes + 4;
+ avpkt->duration = ff_samples_to_time_base(avctx, frame->nb_samples);
+ *got_packet_ptr = 1;
+ return 0;
+}
+
+static av_cold int tta_encode_close(AVCodecContext *avctx)
+{
+ TTAEncContext *s = avctx->priv_data;
+ av_freep(&s->ch_ctx);
+ return 0;
+}
+
+AVCodec ff_tta_encoder = {
+ .name = "tta",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_TTA,
+ .priv_data_size = sizeof(TTAEncContext),
+ .init = tta_encode_init,
+ .close = tta_encode_close,
+ .encode2 = tta_encode_frame,
+ .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_LOSSLESS,
+ .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_U8,
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_S32,
+ AV_SAMPLE_FMT_NONE },
+ .long_name = NULL_IF_CONFIG_SMALL("TTA (True Audio)"),
+};
case AV_CODEC_ID_OPUS_DEPRECATED: return AV_CODEC_ID_OPUS;
case AV_CODEC_ID_TAK_DEPRECATED : return AV_CODEC_ID_TAK;
case AV_CODEC_ID_ESCAPE130_DEPRECATED : return AV_CODEC_ID_ESCAPE130;
+ case AV_CODEC_ID_G2M_DEPRECATED : return AV_CODEC_ID_G2M;
default : return id;
}
}
#include "libavutil/avutil.h"
#define LIBAVCODEC_VERSION_MAJOR 55
-#define LIBAVCODEC_VERSION_MINOR 12
-#define LIBAVCODEC_VERSION_MICRO 102
+#define LIBAVCODEC_VERSION_MINOR 15
+#define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
LIBAVCODEC_VERSION_MINOR, \
int src_count, int src_size, int dest_len)
{
unsigned char *pd;
- int i, j, l;
+ int i, l, used = 0;
unsigned char *dest_end = dest + dest_len;
GetByteContext gb;
+ uint16_t run_val;
bytestream2_init(&gb, src, src_size);
pd = dest;
if (bytestream2_get_bytes_left(&gb) < 1)
return 0;
*pd++ = bytestream2_get_byteu(&gb);
+ used++;
}
- src_count >>= 1;
- i = 0;
do {
if (bytestream2_get_bytes_left(&gb) < 1)
break;
bytestream2_get_bufferu(&gb, pd, l);
pd += l;
} else {
- int ps[2];
if (dest_end - pd < 2*l || bytestream2_get_bytes_left(&gb) < 2)
return bytestream2_tell(&gb);
- ps[0] = bytestream2_get_byteu(&gb);
- ps[1] = bytestream2_get_byteu(&gb);
- for (j = 0; j < l; j++) {
- *pd++ = ps[0];
- *pd++ = ps[1];
+ run_val = bytestream2_get_ne16(&gb);
+ for (i = 0; i < l; i++) {
+ AV_WN16(pd, run_val);
+ pd += 2;
}
+ l *= 2;
}
- i += l;
- } while (i < src_count);
+ used += l;
+ } while (used < src_count);
return bytestream2_tell(&gb);
}
return AVERROR_INVALIDDATA;
meth = bytestream2_get_byteu(&gb);
if (meth & 0x80) {
+ if (!s->unpack_buffer_size) {
+ av_log(s->avctx, AV_LOG_ERROR,
+ "Trying to unpack LZ-compressed frame with no LZ buffer\n");
+ return AVERROR_INVALIDDATA;
+ }
lz_unpack(gb.buffer, bytestream2_get_bytes_left(&gb),
s->unpack_buffer, s->unpack_buffer_size);
meth &= 0x7F;
len = bytestream2_get_byte(&gb);
if (len & 0x80) {
len = (len & 0x7F) + 1;
- if (bytestream2_get_byte(&gb) == 0xFF)
+ if (bytestream2_peek_byte(&gb) == 0xFF) {
+ int slen = len;
+ bytestream2_get_byte(&gb);
len = rle_unpack(gb.buffer, &dp[ofs],
len, bytestream2_get_bytes_left(&gb),
frame_width - ofs);
- else
+ ofs += slen;
+ bytestream2_skip(&gb, len);
+ } else {
bytestream2_get_buffer(&gb, &dp[ofs], len);
- bytestream2_skip(&gb, len);
+ ofs += len;
+ }
} else {
/* interframe pixel copy */
if (ofs + len + 1 > frame_width || !s->prev_frame.data[0])
vmd_header = (unsigned char *)avctx->extradata;
s->unpack_buffer_size = AV_RL32(&vmd_header[800]);
- s->unpack_buffer = av_malloc(s->unpack_buffer_size);
- if (!s->unpack_buffer)
- return AVERROR(ENOMEM);
+ if (s->unpack_buffer_size) {
+ s->unpack_buffer = av_malloc(s->unpack_buffer_size);
+ if (!s->unpack_buffer)
+ return AVERROR(ENOMEM);
+ }
/* load up the initial palette */
raw_palette = &vmd_header[28];
/* get output buffer */
frame->nb_samples = s->samples + 1;
- if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
- av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
return ret;
- }
frame->nb_samples = s->samples;
}
FFLIBS-$(CONFIG_ATEMPO_FILTER) += avcodec
FFLIBS-$(CONFIG_DECIMATE_FILTER) += avcodec
FFLIBS-$(CONFIG_DESHAKE_FILTER) += avcodec
+FFLIBS-$(CONFIG_MCDEINT_FILTER) += avcodec
FFLIBS-$(CONFIG_MOVIE_FILTER) += avformat avcodec
FFLIBS-$(CONFIG_MP_FILTER) += avcodec
FFLIBS-$(CONFIG_PAN_FILTER) += swresample
OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUTYUV_FILTER) += vf_lut.o
+OBJS-$(CONFIG_MCDEINT_FILTER) += vf_mcdeint.o
OBJS-$(CONFIG_MP_FILTER) += vf_mp.o
OBJS-$(CONFIG_MPDECIMATE_FILTER) += vf_mpdecimate.o
OBJS-$(CONFIG_NEGATE_FILTER) += vf_lut.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_fil.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_fspp.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_ilpack.o
-OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_mcdeint.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_perspective.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_phase.o
OBJS-$(CONFIG_MP_FILTER) += libmpcodecs/vf_pp7.o
return 0;
}
-static inline void stat(AudioStatsContext *s, ChannelStats *p, double d)
+static inline void update_stat(AudioStatsContext *s, ChannelStats *p, double d)
{
if (d < p->min) {
p->min = d;
src = (const double *)buf->extended_data[c];
for (i = 0; i < buf->nb_samples; i++, src++)
- stat(s, p, *src);
+ update_stat(s, p, *src);
}
break;
case AV_SAMPLE_FMT_DBL:
for (i = 0; i < buf->nb_samples; i++) {
for (c = 0; c < channels; c++, src++)
- stat(s, &s->chstats[c], *src);
+ update_stat(s, &s->chstats[c], *src);
}
break;
}
REGISTER_FILTER(LUT, lut, vf);
REGISTER_FILTER(LUTRGB, lutrgb, vf);
REGISTER_FILTER(LUTYUV, lutyuv, vf);
+ REGISTER_FILTER(MCDEINT, mcdeint, vf);
REGISTER_FILTER(MP, mp, vf);
REGISTER_FILTER(MPDECIMATE, mpdecimate, vf);
REGISTER_FILTER(NEGATE, negate, vf);
if (av_opt_find(ctx, key, NULL, 0, 0)) {
ret = av_opt_set(ctx, key, value, 0);
- if (ret < 0)
+ if (ret < 0) {
+ av_free(value);
+ av_free(parsed_key);
return ret;
+ }
} else {
av_dict_set(options, key, value, 0);
if ((ret = av_opt_set(ctx->priv, key, value, 0)) < 0) {
+++ /dev/null
-/*
- * Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
- *
- * This file is part of MPlayer.
- *
- * MPlayer is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * MPlayer is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with MPlayer; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-
-/*
-Known Issues:
-* The motion estimation is somewhat at the mercy of the input, if the input
- frames are created purely based on spatial interpolation then for example
- a thin black line or another random and not interpolateable pattern
- will cause problems
- Note: completly ignoring the "unavailable" lines during motion estimation
- didnt look any better, so the most obvious solution would be to improve
- tfields or penalize problematic motion vectors ...
-
-* If non iterative ME is used then snow currently ignores the OBMC window
- and as a result sometimes creates artifacts
-
-* only past frames are used, we should ideally use future frames too, something
- like filtering the whole movie in forward and then backward direction seems
- like a interresting idea but the current filter framework is FAR from
- supporting such things
-
-* combining the motion compensated image with the input image also isnt
- as trivial as it seems, simple blindly taking even lines from one and
- odd ones from the other doesnt work at all as ME/MC sometimes simple
- has nothing in the previous frames which matches the current, the current
- algo has been found by trial and error and almost certainly can be
- improved ...
-*/
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <inttypes.h>
-#include <math.h>
-
-#include "mp_msg.h"
-#include "cpudetect.h"
-
-#include "libavutil/common.h"
-#include "libavutil/internal.h"
-#include "libavutil/intreadwrite.h"
-#include "libavcodec/avcodec.h"
-#include "libavcodec/dsputil.h"
-
-#undef fprintf
-#undef free
-#undef malloc
-
-#include "img_format.h"
-#include "mp_image.h"
-#include "vf.h"
-#include "av_helpers.h"
-
-#define MIN(a,b) ((a) > (b) ? (b) : (a))
-#define MAX(a,b) ((a) < (b) ? (b) : (a))
-#define ABS(a) ((a) > 0 ? (a) : (-(a)))
-
-//===========================================================================//
-
-struct vf_priv_s {
- int mode;
- int qp;
- int parity;
-#if 0
- int temp_stride[3];
- uint8_t *src[3];
- int16_t *temp[3];
-#endif
- int outbuf_size;
- uint8_t *outbuf;
- AVCodecContext *avctx_enc;
- AVFrame *frame;
- AVFrame *frame_dec;
-};
-
-static void filter(struct vf_priv_s *p, uint8_t *dst[3], uint8_t *src[3], int dst_stride[3], int src_stride[3], int width, int height){
- int x, y, i;
-
- for(i=0; i<3; i++){
- p->frame->data[i]= src[i];
- p->frame->linesize[i]= src_stride[i];
- }
-
- p->avctx_enc->me_cmp=
- p->avctx_enc->me_sub_cmp= FF_CMP_SAD /*| (p->parity ? FF_CMP_ODD : FF_CMP_EVEN)*/;
- p->frame->quality= p->qp*FF_QP2LAMBDA;
- avcodec_encode_video(p->avctx_enc, p->outbuf, p->outbuf_size, p->frame);
- p->frame_dec = p->avctx_enc->coded_frame;
-
- for(i=0; i<3; i++){
- int is_chroma= !!i;
- int w= width >>is_chroma;
- int h= height>>is_chroma;
- int fils= p->frame_dec->linesize[i];
- int srcs= src_stride[i];
-
- for(y=0; y<h; y++){
- if((y ^ p->parity) & 1){
- for(x=0; x<w; x++){
- if((x-2)+(y-1)*w>=0 && (x+2)+(y+1)*w<w*h){ //FIXME either alloc larger images or optimize this
- uint8_t *filp= &p->frame_dec->data[i][x + y*fils];
- uint8_t *srcp= &src[i][x + y*srcs];
- int diff0= filp[-fils] - srcp[-srcs];
- int diff1= filp[+fils] - srcp[+srcs];
- int spatial_score= ABS(srcp[-srcs-1] - srcp[+srcs-1])
- +ABS(srcp[-srcs ] - srcp[+srcs ])
- +ABS(srcp[-srcs+1] - srcp[+srcs+1]) - 1;
- int temp= filp[0];
-
-#define CHECK(j)\
- { int score= ABS(srcp[-srcs-1+(j)] - srcp[+srcs-1-(j)])\
- + ABS(srcp[-srcs +(j)] - srcp[+srcs -(j)])\
- + ABS(srcp[-srcs+1+(j)] - srcp[+srcs+1-(j)]);\
- if(score < spatial_score){\
- spatial_score= score;\
- diff0= filp[-fils+(j)] - srcp[-srcs+(j)];\
- diff1= filp[+fils-(j)] - srcp[+srcs-(j)];
-
- CHECK(-1) CHECK(-2) }} }}
- CHECK( 1) CHECK( 2) }} }}
-#if 0
- if((diff0 ^ diff1) > 0){
- int mindiff= ABS(diff0) > ABS(diff1) ? diff1 : diff0;
- temp-= mindiff;
- }
-#elif 1
- if(diff0 + diff1 > 0)
- temp-= (diff0 + diff1 - ABS( ABS(diff0) - ABS(diff1) )/2)/2;
- else
- temp-= (diff0 + diff1 + ABS( ABS(diff0) - ABS(diff1) )/2)/2;
-#else
- temp-= (diff0 + diff1)/2;
-#endif
-#if 1
- filp[0]=
- dst[i][x + y*dst_stride[i]]= temp > 255U ? ~(temp>>31) : temp;
-#else
- dst[i][x + y*dst_stride[i]]= filp[0];
- filp[0]= temp > 255U ? ~(temp>>31) : temp;
-#endif
- }else
- dst[i][x + y*dst_stride[i]]= p->frame_dec->data[i][x + y*fils];
- }
- }
- }
- for(y=0; y<h; y++){
- if(!((y ^ p->parity) & 1)){
- for(x=0; x<w; x++){
-#if 1
- p->frame_dec->data[i][x + y*fils]=
- dst[i][x + y*dst_stride[i]]= src[i][x + y*srcs];
-#else
- dst[i][x + y*dst_stride[i]]= p->frame_dec->data[i][x + y*fils];
- p->frame_dec->data[i][x + y*fils]= src[i][x + y*srcs];
-#endif
- }
- }
- }
- }
- p->parity ^= 1;
-
-}
-
-static int config(struct vf_instance *vf,
- int width, int height, int d_width, int d_height,
- unsigned int flags, unsigned int outfmt){
- int i;
- AVCodec *enc= avcodec_find_encoder(AV_CODEC_ID_SNOW);
-
- for(i=0; i<3; i++){
- AVCodecContext *avctx_enc;
- AVDictionary *opts = NULL;
-#if 0
- int is_chroma= !!i;
- int w= ((width + 31) & (~31))>>is_chroma;
- int h= ((height + 31) & (~31))>>is_chroma;
-
- vf->priv->temp_stride[i]= w;
- vf->priv->temp[i]= malloc(vf->priv->temp_stride[i]*h*sizeof(int16_t));
- vf->priv->src [i]= malloc(vf->priv->temp_stride[i]*h*sizeof(uint8_t));
-#endif
- avctx_enc=
- vf->priv->avctx_enc= avcodec_alloc_context3(enc);
- avctx_enc->width = width;
- avctx_enc->height = height;
- avctx_enc->time_base= (AVRational){1,25}; // meaningless
- avctx_enc->gop_size = 300;
- avctx_enc->max_b_frames= 0;
- avctx_enc->pix_fmt = AV_PIX_FMT_YUV420P;
- avctx_enc->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
- avctx_enc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
- avctx_enc->global_quality= 1;
- av_dict_set(&opts, "memc_only", "1", 0);
- avctx_enc->me_cmp=
- avctx_enc->me_sub_cmp= FF_CMP_SAD; //SSE;
- avctx_enc->mb_cmp= FF_CMP_SSE;
-
- switch(vf->priv->mode){
- case 3:
- avctx_enc->refs= 3;
- case 2:
- avctx_enc->me_method= ME_ITER;
- case 1:
- avctx_enc->flags |= CODEC_FLAG_4MV;
- avctx_enc->dia_size=2;
-// avctx_enc->mb_decision = MB_DECISION_RD;
- case 0:
- avctx_enc->flags |= CODEC_FLAG_QPEL;
- }
-
- avcodec_open2(avctx_enc, enc, &opts);
- av_dict_free(&opts);
-
- }
- vf->priv->frame= avcodec_alloc_frame();
-
- vf->priv->outbuf_size= width*height*10;
- vf->priv->outbuf= malloc(vf->priv->outbuf_size);
-
- return ff_vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
-}
-
-static void get_image(struct vf_instance *vf, mp_image_t *mpi){
- if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change
-return; //caused problems, dunno why
- // ok, we can do pp in-place (or pp disabled):
- vf->dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
- mpi->type, mpi->flags | MP_IMGFLAG_READABLE, mpi->width, mpi->height);
- mpi->planes[0]=vf->dmpi->planes[0];
- mpi->stride[0]=vf->dmpi->stride[0];
- mpi->width=vf->dmpi->width;
- if(mpi->flags&MP_IMGFLAG_PLANAR){
- mpi->planes[1]=vf->dmpi->planes[1];
- mpi->planes[2]=vf->dmpi->planes[2];
- mpi->stride[1]=vf->dmpi->stride[1];
- mpi->stride[2]=vf->dmpi->stride[2];
- }
- mpi->flags|=MP_IMGFLAG_DIRECT;
-}
-
-static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts){
- mp_image_t *dmpi;
-
- if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
- // no DR, so get a new image! hope we'll get DR buffer:
- dmpi=ff_vf_get_image(vf->next,mpi->imgfmt,
- MP_IMGTYPE_TEMP,
- MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE,
- mpi->width,mpi->height);
- ff_vf_clone_mpi_attributes(dmpi, mpi);
- }else{
- dmpi=vf->dmpi;
- }
-
- filter(vf->priv, dmpi->planes, mpi->planes, dmpi->stride, mpi->stride, mpi->w, mpi->h);
-
- return ff_vf_next_put_image(vf,dmpi, pts);
-}
-
-static void uninit(struct vf_instance *vf){
- if(!vf->priv) return;
-
-#if 0
- for(i=0; i<3; i++){
- free(vf->priv->temp[i]);
- vf->priv->temp[i]= NULL;
- free(vf->priv->src[i]);
- vf->priv->src[i]= NULL;
- }
-#endif
- if (vf->priv->avctx_enc) {
- avcodec_close(vf->priv->avctx_enc);
- av_freep(&vf->priv->avctx_enc);
- }
-
- free(vf->priv->outbuf);
- free(vf->priv);
- vf->priv=NULL;
-}
-
-//===========================================================================//
-static int query_format(struct vf_instance *vf, unsigned int fmt){
- switch(fmt){
- case IMGFMT_YV12:
- case IMGFMT_I420:
- case IMGFMT_IYUV:
- case IMGFMT_Y800:
- case IMGFMT_Y8:
- return ff_vf_next_query_format(vf,fmt);
- }
- return 0;
-}
-
-static int vf_open(vf_instance_t *vf, char *args){
-
- vf->config=config;
- vf->put_image=put_image;
- vf->get_image=get_image;
- vf->query_format=query_format;
- vf->uninit=uninit;
- vf->priv=malloc(sizeof(struct vf_priv_s));
- memset(vf->priv, 0, sizeof(struct vf_priv_s));
-
- ff_init_avcodec();
-
- vf->priv->mode=0;
- vf->priv->parity= -1;
- vf->priv->qp=1;
-
- if (args) sscanf(args, "%d:%d:%d", &vf->priv->mode, &vf->priv->parity, &vf->priv->qp);
-
- return 1;
-}
-
-const vf_info_t ff_vf_info_mcdeint = {
- "motion compensating deinterlacer",
- "mcdeint",
- "Michael Niedermayer",
- "",
- vf_open,
- NULL
-};
drop = 0;
if (drop) {
- s->eof = 1;
+ s->eof = inlink->closed = 1;
goto drop;
}
}
}
if (drop) {
- s->eof = 1;
+ s->eof = inlink->closed = 1;
goto drop;
}
}
#include "libavutil/avutil.h"
#define LIBAVFILTER_VERSION_MAJOR 3
-#define LIBAVFILTER_VERSION_MINOR 73
-#define LIBAVFILTER_VERSION_MICRO 100
+#define LIBAVFILTER_VERSION_MINOR 74
+#define LIBAVFILTER_VERSION_MICRO 101
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
LIBAVFILTER_VERSION_MINOR, \
--- /dev/null
+/*
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Motion Compensation Deinterlacer
+ * Ported from MPlayer libmpcodecs/vf_mcdeint.c.
+ *
+ * Known Issues:
+ *
+ * The motion estimation is somewhat at the mercy of the input, if the
+ * input frames are created purely based on spatial interpolation then
+ * for example a thin black line or another random and not
+ * interpolateable pattern will cause problems.
+ * Note: completely ignoring the "unavailable" lines during motion
+ * estimation did not look any better, so the most obvious solution
+ * would be to improve tfields or penalize problematic motion vectors.
+ *
+ * If non iterative ME is used then snow currently ignores the OBMC
+ * window and as a result sometimes creates artifacts.
+ *
+ * Only past frames are used, we should ideally use future frames too,
+ * something like filtering the whole movie in forward and then
+ * backward direction seems like a interesting idea but the current
+ * filter framework is FAR from supporting such things.
+ *
+ * Combining the motion compensated image with the input image also is
+ * not as trivial as it seems, simple blindly taking even lines from
+ * one and odd ones from the other does not work at all as ME/MC
+ * sometimes has nothing in the previous frames which matches the
+ * current. The current algorithm has been found by trial and error
+ * and almost certainly can be improved...
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavcodec/avcodec.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+enum MCDeintMode {
+ MODE_FAST = 0,
+ MODE_MEDIUM,
+ MODE_SLOW,
+ MODE_EXTRA_SLOW,
+ MODE_NB,
+};
+
+enum MCDeintParity {
+ PARITY_TFF = 0, ///< top field first
+ PARITY_BFF = 1, ///< bottom field first
+};
+
+typedef struct {
+ const AVClass *class;
+ enum MCDeintMode mode;
+ enum MCDeintParity parity;
+ int qp;
+ AVCodecContext *enc_ctx;
+} MCDeintContext;
+
+#define OFFSET(x) offsetof(MCDeintContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
+
+static const AVOption mcdeint_options[] = {
+ { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_FAST}, 0, MODE_NB-1, FLAGS, .unit="mode" },
+ CONST("fast", NULL, MODE_FAST, "mode"),
+ CONST("medium", NULL, MODE_MEDIUM, "mode"),
+ CONST("slow", NULL, MODE_SLOW, "mode"),
+ CONST("extra_slow", NULL, MODE_EXTRA_SLOW, "mode"),
+
+ { "parity", "set the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=PARITY_BFF}, -1, 1, FLAGS, "parity" },
+ CONST("tff", "assume top field first", PARITY_TFF, "parity"),
+ CONST("bff", "assume bottom field first", PARITY_BFF, "parity"),
+
+ { "qp", "set qp", OFFSET(qp), AV_OPT_TYPE_INT, {.i64=1}, INT_MIN, INT_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(mcdeint);
+
+static int config_props(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ MCDeintContext *mcdeint = ctx->priv;
+ AVCodec *enc;
+ AVCodecContext *enc_ctx;
+ AVDictionary *opts = NULL;
+ int ret;
+
+ if (!(enc = avcodec_find_encoder(AV_CODEC_ID_SNOW))) {
+ av_log(ctx, AV_LOG_ERROR, "Snow encoder is not enabled in libavcodec\n");
+ return AVERROR(EINVAL);
+ }
+
+ mcdeint->enc_ctx = avcodec_alloc_context3(enc);
+ if (!mcdeint->enc_ctx)
+ return AVERROR(ENOMEM);
+ enc_ctx = mcdeint->enc_ctx;
+ enc_ctx->width = inlink->w;
+ enc_ctx->height = inlink->h;
+ enc_ctx->time_base = (AVRational){1,25}; // meaningless
+ enc_ctx->gop_size = 300;
+ enc_ctx->max_b_frames = 0;
+ enc_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
+ enc_ctx->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
+ enc_ctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
+ enc_ctx->global_quality = 1;
+ enc_ctx->me_cmp = enc_ctx->me_sub_cmp = FF_CMP_SAD;
+ enc_ctx->mb_cmp = FF_CMP_SSE;
+ av_dict_set(&opts, "memc_only", "1", 0);
+
+ switch (mcdeint->mode) {
+ case MODE_EXTRA_SLOW:
+ enc_ctx->refs = 3;
+ case MODE_SLOW:
+ enc_ctx->me_method = ME_ITER;
+ case MODE_MEDIUM:
+ enc_ctx->flags |= CODEC_FLAG_4MV;
+ enc_ctx->dia_size = 2;
+ case MODE_FAST:
+ enc_ctx->flags |= CODEC_FLAG_QPEL;
+ }
+
+ ret = avcodec_open2(enc_ctx, enc, &opts);
+ av_dict_free(&opts);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ MCDeintContext *mcdeint = ctx->priv;
+
+ if (mcdeint->enc_ctx) {
+ avcodec_close(mcdeint->enc_ctx);
+ av_freep(&mcdeint->enc_ctx);
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum PixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
+{
+ MCDeintContext *mcdeint = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *outpic, *frame_dec;
+ AVPacket pkt;
+ int x, y, i, ret, got_frame = 0;
+
+ outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpic) {
+ av_frame_free(&inpic);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outpic, inpic);
+ inpic->quality = mcdeint->qp * FF_QP2LAMBDA;
+
+ av_init_packet(&pkt);
+ pkt.data = NULL; // packet data will be allocated by the encoder
+ pkt.size = 0;
+
+ ret = avcodec_encode_video2(mcdeint->enc_ctx, &pkt, inpic, &got_frame);
+ if (ret < 0)
+ goto end;
+
+ frame_dec = mcdeint->enc_ctx->coded_frame;
+
+ for (i = 0; i < 3; i++) {
+ int is_chroma = !!i;
+ int w = FF_CEIL_RSHIFT(inlink->w, is_chroma);
+ int h = FF_CEIL_RSHIFT(inlink->h, is_chroma);
+ int fils = frame_dec->linesize[i];
+ int srcs = inpic ->linesize[i];
+ int dsts = outpic ->linesize[i];
+
+ for (y = 0; y < h; y++) {
+ if ((y ^ mcdeint->parity) & 1) {
+ for (x = 0; x < w; x++) {
+ uint8_t *filp = &frame_dec->data[i][x + y*fils];
+ uint8_t *srcp = &inpic ->data[i][x + y*srcs];
+ uint8_t *dstp = &outpic ->data[i][x + y*dsts];
+
+ if (y > 0 && y < h-1){
+ int is_edge = x < 3 || x > w-4;
+ int diff0 = filp[-fils] - srcp[-srcs];
+ int diff1 = filp[+fils] - srcp[+srcs];
+ int temp = filp[0];
+
+#define DELTA(j) av_clip(j, -x, w-1-x)
+
+#define GET_SCORE_EDGE(j)\
+ FFABS(srcp[-srcs+DELTA(-1+(j))] - srcp[+srcs+DELTA(-1-(j))])+\
+ FFABS(srcp[-srcs+DELTA(j) ] - srcp[+srcs+DELTA( -(j))])+\
+ FFABS(srcp[-srcs+DELTA(1+(j)) ] - srcp[+srcs+DELTA( 1-(j))])
+
+#define GET_SCORE(j)\
+ FFABS(srcp[-srcs-1+(j)] - srcp[+srcs-1-(j)])+\
+ FFABS(srcp[-srcs +(j)] - srcp[+srcs -(j)])+\
+ FFABS(srcp[-srcs+1+(j)] - srcp[+srcs+1-(j)])
+
+#define CHECK_EDGE(j)\
+ { int score = GET_SCORE_EDGE(j);\
+ if (score < spatial_score){\
+ spatial_score = score;\
+ diff0 = filp[-fils+DELTA(j)] - srcp[-srcs+DELTA(j)];\
+ diff1 = filp[+fils+DELTA(-(j))] - srcp[+srcs+DELTA(-(j))];\
+
+#define CHECK(j)\
+ { int score = GET_SCORE(j);\
+ if (score < spatial_score){\
+ spatial_score= score;\
+ diff0 = filp[-fils+(j)] - srcp[-srcs+(j)];\
+ diff1 = filp[+fils-(j)] - srcp[+srcs-(j)];\
+
+ if (is_edge) {
+ int spatial_score = GET_SCORE_EDGE(0) - 1;
+ CHECK_EDGE(-1) CHECK_EDGE(-2) }} }}
+ CHECK_EDGE( 1) CHECK_EDGE( 2) }} }}
+ } else {
+ int spatial_score = GET_SCORE(0) - 1;
+ CHECK(-1) CHECK(-2) }} }}
+ CHECK( 1) CHECK( 2) }} }}
+ }
+
+
+ if (diff0 + diff1 > 0)
+ temp -= (diff0 + diff1 - FFABS(FFABS(diff0) - FFABS(diff1)) / 2) / 2;
+ else
+ temp -= (diff0 + diff1 + FFABS(FFABS(diff0) - FFABS(diff1)) / 2) / 2;
+ *filp = *dstp = temp > 255U ? ~(temp>>31) : temp;
+ } else {
+ *dstp = *filp;
+ }
+ }
+ }
+ }
+
+ for (y = 0; y < h; y++) {
+ if (!((y ^ mcdeint->parity) & 1)) {
+ for (x = 0; x < w; x++) {
+ frame_dec->data[i][x + y*fils] =
+ outpic ->data[i][x + y*dsts] = inpic->data[i][x + y*srcs];
+ }
+ }
+ }
+ }
+ mcdeint->parity ^= 1;
+
+end:
+ av_free_packet(&pkt);
+ av_frame_free(&inpic);
+ if (ret < 0) {
+ av_frame_free(&outpic);
+ return ret;
+ }
+ return ff_filter_frame(outlink, outpic);
+}
+
+static const AVFilterPad mcdeint_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+static const AVFilterPad mcdeint_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter avfilter_vf_mcdeint = {
+ .name = "mcdeint",
+ .description = NULL_IF_CONFIG_SMALL("Apply motion compensating deinterlacing."),
+ .priv_size = sizeof(MCDeintContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = mcdeint_inputs,
+ .outputs = mcdeint_outputs,
+ .priv_class = &mcdeint_class,
+};
extern const vf_info_t ff_vf_info_fil;
extern const vf_info_t ff_vf_info_fspp;
extern const vf_info_t ff_vf_info_ilpack;
-extern const vf_info_t ff_vf_info_mcdeint;
extern const vf_info_t ff_vf_info_perspective;
extern const vf_info_t ff_vf_info_phase;
extern const vf_info_t ff_vf_info_pp7;
&ff_vf_info_fil,
&ff_vf_info_fspp,
&ff_vf_info_ilpack,
- &ff_vf_info_mcdeint,
&ff_vf_info_perspective,
&ff_vf_info_phase,
&ff_vf_info_pp7,
/* XXX: prevent data copy... */
if (av_new_packet(pkt, pkt_size + offset) < 0) {
av_free(pkt);
- return AVERROR(ENOMEM);
+ res = AVERROR(ENOMEM);
+ goto fail;
}
if (st->codec->codec_id == AV_CODEC_ID_PRORES) {
memcpy(pkt->data + offset, pkt_data, pkt_size);
if (pkt_data != data)
- av_free(pkt_data);
+ av_freep(&pkt_data);
pkt->flags = is_keyframe;
pkt->stream_index = st->index;
int64_t tfrf_offset;
} MOVFragmentInfo;
-typedef struct MOVIndex {
+typedef struct MOVTrack {
int mode;
int entry;
unsigned timescale;
#include "libavutil/avutil.h"
#include "network.h"
+#include "url.h"
#include "libavcodec/internal.h"
#include "libavutil/mem.h"
#include "url.h"
return 0;
}
+
+int ff_listen_bind(int fd, const struct sockaddr *addr,
+ socklen_t addrlen, int timeout)
+{
+ int ret;
+ int reuse = 1;
+ struct pollfd lp = { fd, POLLIN, 0 };
+ if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse))) {
+ av_log(NULL, AV_LOG_WARNING, "setsockopt(SO_REUSEADDR) failed\n");
+ }
+ ret = bind(fd, addr, addrlen);
+ if (ret)
+ return ff_neterrno();
+
+ ret = listen(fd, 1);
+ if (ret)
+ return ff_neterrno();
+
+ ret = poll(&lp, 1, timeout >= 0 ? timeout : -1);
+ if (ret <= 0)
+ return AVERROR(ETIMEDOUT);
+
+ ret = accept(fd, NULL, NULL);
+ if (ret < 0)
+ return ff_neterrno();
+
+ closesocket(fd);
+
+ ff_socket_nonblock(ret, 1);
+ return ret;
+}
+
+int ff_listen_connect(int fd, const struct sockaddr *addr,
+ socklen_t addrlen, int rw_timeout, URLContext *h)
+{
+ struct pollfd p = {fd, POLLOUT, 0};
+ int64_t wait_started;
+ int ret;
+ socklen_t optlen;
+
+ ff_socket_nonblock(fd, 1);
+
+ while ((ret = connect(fd, addr, addrlen))) {
+ ret = ff_neterrno();
+ switch (ret) {
+ case AVERROR(EINTR):
+ if (ff_check_interrupt(&h->interrupt_callback))
+ return AVERROR_EXIT;
+ continue;
+ case AVERROR(EINPROGRESS):
+ case AVERROR(EAGAIN):
+ wait_started = av_gettime();
+ do {
+ if (ff_check_interrupt(&h->interrupt_callback))
+ return AVERROR_EXIT;
+ ret = poll(&p, 1, 100);
+ if (ret > 0)
+ break;
+ } while (!rw_timeout || (av_gettime() - wait_started < rw_timeout));
+ if (ret <= 0)
+ return AVERROR(ETIMEDOUT);
+ optlen = sizeof(ret);
+ if (getsockopt (fd, SOL_SOCKET, SO_ERROR, &ret, &optlen))
+ ret = AVUNERROR(ff_neterrno());
+ if (ret != 0) {
+ char errbuf[100];
+ ret = AVERROR(ret);
+ av_strerror(ret, errbuf, sizeof(errbuf));
+ av_log(h, AV_LOG_ERROR,
+ "Connection to %s failed: %s\n",
+ h->filename, errbuf);
+ }
+ default:
+ return ret;
+ }
+ }
+ return ret;
+}
#include "libavutil/error.h"
#include "os_support.h"
#include "avio.h"
+#include "url.h"
#if HAVE_UNISTD_H
#include <unistd.h>
int ff_is_multicast_address(struct sockaddr *addr);
+int ff_listen_bind(int fd, const struct sockaddr *addr,
+ socklen_t addrlen, int timeout);
+int ff_listen_connect(int fd, const struct sockaddr *addr,
+ socklen_t addrlen, int timeout,
+ URLContext *h);
#endif /* AVFORMAT_NETWORK_H */
#include "libavutil/dict.h"
#include "libavutil/intreadwrite.h"
-//#define DEBUG_DUMP_INDEX // XXX dumbdriving-271.nsv breaks with it commented!!
-#define CHECK_SUBSEQUENT_NSVS
-//#define DISABLE_AUDIO
-
/* max bytes to crawl for trying to resync
* stupid streaming servers don't start at chunk boundaries...
*/
av_dlog(s, "NSV got index; filepos %"PRId64"\n", avio_tell(pb));
-#ifdef DEBUG_DUMP_INDEX
-#define V(v) ((v<0x20 || v > 127)?'.':v)
- /* dump index */
- av_dlog(s, "NSV %d INDEX ENTRIES:\n", table_entries);
- av_dlog(s, "NSV [dataoffset][fileoffset]\n", table_entries);
- for (i = 0; i < table_entries; i++) {
- unsigned char b[8];
- avio_seek(pb, size + nsv->nsvs_file_offset[i], SEEK_SET);
- avio_read(pb, b, 8);
- av_dlog(s, "NSV [0x%08lx][0x%08lx]: %02x %02x %02x %02x %02x %02x %02x %02x"
- "%c%c%c%c%c%c%c%c\n",
- nsv->nsvs_file_offset[i], size + nsv->nsvs_file_offset[i],
- b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
- V(b[0]), V(b[1]), V(b[2]), V(b[3]), V(b[4]), V(b[5]), V(b[6]), V(b[7]) );
- }
- //avio_seek(pb, size, SEEK_SET); /* go back to end of header */
-#undef V
-#endif
-
avio_seek(pb, nsv->base_offset + size, SEEK_SET); /* required for dumbdriving-271.nsv (2 extra bytes) */
if (url_feof(pb))
}
}
if (atag != T_NONE) {
-#ifndef DISABLE_AUDIO
st = avformat_new_stream(s, NULL);
if (!st)
goto fail;
avpriv_set_pts_info(st, 64, 1, framerate.num*1000);
st->start_time = 0;
st->duration = (int64_t)nsv->duration * framerate.num;
-#endif
}
-#ifdef CHECK_SUBSEQUENT_NSVS
} else {
if (nsv->vtag != vtag || nsv->atag != atag || nsv->vwidth != vwidth || nsv->vheight != vwidth) {
av_dlog(s, "NSV NSVs header values differ from the first one!!!\n");
//return -1;
}
-#endif /* CHECK_SUBSEQUENT_NSVS */
}
nsv->state = NSV_HAS_READ_NSVS;
{ AV_CODEC_ID_DPX, MKTAG('d', 'p', 'x', ' ') },
{ AV_CODEC_ID_KGV1, MKTAG('K', 'G', 'V', '1') },
{ AV_CODEC_ID_LAGARITH, MKTAG('L', 'A', 'G', 'S') },
- { AV_CODEC_ID_G2M, MKTAG('G', '2', 'M', '2') },
- { AV_CODEC_ID_G2M, MKTAG('G', '2', 'M', '3') },
- { AV_CODEC_ID_G2M, MKTAG('G', '2', 'M', '4') },
{ AV_CODEC_ID_AMV, MKTAG('A', 'M', 'V', 'F') },
{ AV_CODEC_ID_UTVIDEO, MKTAG('U', 'L', 'R', 'A') },
{ AV_CODEC_ID_UTVIDEO, MKTAG('U', 'L', 'R', 'G') },
{ AV_CODEC_ID_SVQ3, MKTAG('S', 'V', 'Q', '3') },
{ AV_CODEC_ID_012V, MKTAG('0', '1', '2', 'v') },
{ AV_CODEC_ID_012V, MKTAG('a', '1', '2', 'v') },
+ { AV_CODEC_ID_G2M, MKTAG('G', '2', 'M', '2') },
+ { AV_CODEC_ID_G2M, MKTAG('G', '2', 'M', '3') },
+ { AV_CODEC_ID_G2M, MKTAG('G', '2', 'M', '4') },
{ AV_CODEC_ID_NONE, 0 }
};
/*
* RTMP definitions
- * Copyright (c) 2009 Kostya Shishkov
+ * Copyright (c) 2009 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
/*
* RTMP input format
- * Copyright (c) 2009 Kostya Shishkov
+ * Copyright (c) 2009 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
/*
* RTMP packet utilities
- * Copyright (c) 2009 Kostya Shishkov
+ * Copyright (c) 2009 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
/*
* RTMP network protocol
- * Copyright (c) 2009 Kostya Shishkov
+ * Copyright (c) 2009 Konstantin Shishkov
*
* This file is part of FFmpeg.
*
const char *p;
char buf[256];
int ret;
- socklen_t optlen;
char hostname[1024],proto[1024],path[1024];
char portstr[10];
h->rw_timeout = 5000000;
cur_ai = ai;
restart:
- ret = AVERROR(EIO);
fd = socket(cur_ai->ai_family, cur_ai->ai_socktype, cur_ai->ai_protocol);
- if (fd < 0)
+ if (fd < 0) {
+ ret = ff_neterrno();
goto fail;
+ }
if (s->listen) {
- int fd1;
- int reuse = 1;
- struct pollfd lp = { fd, POLLIN, 0 };
- setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse));
- ret = bind(fd, cur_ai->ai_addr, cur_ai->ai_addrlen);
- if (ret) {
- ret = ff_neterrno();
- goto fail1;
- }
- ret = listen(fd, 1);
- if (ret) {
- ret = ff_neterrno();
- goto fail1;
- }
- ret = poll(&lp, 1, s->listen_timeout >= 0 ? s->listen_timeout : -1);
- if (ret <= 0) {
- ret = AVERROR(ETIMEDOUT);
- goto fail1;
- }
- fd1 = accept(fd, NULL, NULL);
- if (fd1 < 0) {
- ret = ff_neterrno();
+ if ((fd = ff_listen_bind(fd, cur_ai->ai_addr, cur_ai->ai_addrlen,
+ s->listen_timeout)) < 0) {
+ ret = fd;
goto fail1;
}
- closesocket(fd);
- fd = fd1;
- ff_socket_nonblock(fd, 1);
} else {
- redo:
- ff_socket_nonblock(fd, 1);
- ret = connect(fd, cur_ai->ai_addr, cur_ai->ai_addrlen);
- }
-
- if (ret < 0) {
- struct pollfd p = {fd, POLLOUT, 0};
- int64_t wait_started;
- ret = ff_neterrno();
- if (ret == AVERROR(EINTR)) {
- if (ff_check_interrupt(&h->interrupt_callback)) {
- ret = AVERROR_EXIT;
- goto fail1;
- }
- goto redo;
- }
- if (ret != AVERROR(EINPROGRESS) &&
- ret != AVERROR(EAGAIN))
- goto fail;
+ if ((ret = ff_listen_connect(fd, cur_ai->ai_addr, cur_ai->ai_addrlen,
+ h->rw_timeout, h)) < 0) {
- /* wait until we are connected or until abort */
- wait_started = av_gettime();
- do {
- if (ff_check_interrupt(&h->interrupt_callback)) {
- ret = AVERROR_EXIT;
+ if (ret == AVERROR_EXIT)
goto fail1;
- }
- ret = poll(&p, 1, 100);
- if (ret > 0)
- break;
- } while (!h->rw_timeout || (av_gettime() - wait_started < h->rw_timeout));
- if (ret <= 0) {
- ret = AVERROR(ETIMEDOUT);
- goto fail;
- }
- /* test error */
- optlen = sizeof(ret);
- if (getsockopt (fd, SOL_SOCKET, SO_ERROR, &ret, &optlen))
- ret = AVUNERROR(ff_neterrno());
- if (ret != 0) {
- char errbuf[100];
- ret = AVERROR(ret);
- av_strerror(ret, errbuf, sizeof(errbuf));
- av_log(h, AV_LOG_ERROR,
- "TCP connection to %s:%d failed: %s\n",
- hostname, port, errbuf);
- goto fail;
+ else
+ goto fail;
}
}
+
h->is_streamed = 1;
s->fd = fd;
freeaddrinfo(ai);
cur_ai = cur_ai->ai_next;
if (fd >= 0)
closesocket(fd);
+ ret = 0;
goto restart;
}
fail1:
int i;
int64_t pos;
AVPacket *sub;
- const char *p, *identifier;
- //const char *settings = NULL;
+ const char *p, *identifier, *settings;
+ int identifier_len, settings_len;
int64_t ts_start, ts_end;
ff_subtitles_read_chunk(s->pb, &cue);
continue;
/* optional cue identifier (can be a number like in SRT or some kind of
- * chaptering id), silently skip it */
- for (i = 0; p[i] && p[i] != '\n'; i++) {
+ * chaptering id) */
+ for (i = 0; p[i] && p[i] != '\n' && p[i] != '\r'; i++) {
if (!strncmp(p + i, "-->", 3)) {
identifier = NULL;
break;
}
}
- if (identifier)
- p += strcspn(p, "\n");
+ if (!identifier)
+ identifier_len = 0;
+ else {
+ identifier_len = strcspn(p, "\r\n");
+ p += identifier_len;
+ if (*p == '\r')
+ p++;
+ if (*p == '\n')
+ p++;
+ }
/* cue timestamps */
if ((ts_start = read_ts(p)) == AV_NOPTS_VALUE)
if ((ts_end = read_ts(p)) == AV_NOPTS_VALUE)
break;
- /* optional cue settings, TODO: store in side_data */
+ /* optional cue settings */
p += strcspn(p, "\n\t ");
while (*p == '\t' || *p == ' ')
p++;
- if (*p != '\n') {
- //settings = p;
- p += strcspn(p, "\n");
- }
+ settings = p;
+ settings_len = strcspn(p, "\r\n");
+ p += settings_len;
+ if (*p == '\r')
+ p++;
if (*p == '\n')
p++;
sub->pos = pos;
sub->pts = ts_start;
sub->duration = ts_end - ts_start;
+
+#define SET_SIDE_DATA(name, type) do { \
+ if (name##_len) { \
+ uint8_t *buf = av_packet_new_side_data(sub, type, name##_len); \
+ if (!buf) { \
+ res = AVERROR(ENOMEM); \
+ goto end; \
+ } \
+ memcpy(buf, name, name##_len); \
+ } \
+} while (0)
+
+ SET_SIDE_DATA(identifier, AV_PKT_DATA_WEBVTT_IDENTIFIER);
+ SET_SIDE_DATA(settings, AV_PKT_DATA_WEBVTT_SETTINGS);
}
ff_subtitles_queue_finalize(&webvtt->q);
/*
* WavPack muxer
- * Copyright (c) 2013 Kostya Shishkov <kostya.shishkov@gmail.com>
+ * Copyright (c) 2013 Konstantin Shishkov <kostya.shishkov@gmail.com>
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
rational.h \
samplefmt.h \
sha.h \
+ sha512.h \
time.h \
timecode.h \
timestamp.h \
rc4.o \
samplefmt.o \
sha.o \
+ sha512.o \
time.o \
timecode.o \
tree.o \
random_seed \
rational \
sha \
+ sha512 \
tree \
xtea \
{
static volatile int printed;
- int ret, nb_cpus = 1;
+ int nb_cpus = 1;
#if HAVE_SCHED_GETAFFINITY && defined(CPU_COUNT)
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
- ret = sched_getaffinity(0, sizeof(cpuset), &cpuset);
- if (!ret) {
+ if (!sched_getaffinity(0, sizeof(cpuset), &cpuset))
nb_cpus = CPU_COUNT(&cpuset);
- }
#elif HAVE_GETPROCESSAFFINITYMASK
DWORD_PTR proc_aff, sys_aff;
- ret = GetProcessAffinityMask(GetCurrentProcess(), &proc_aff, &sys_aff);
- if (ret)
+ if (GetProcessAffinityMask(GetCurrentProcess(), &proc_aff, &sys_aff))
nb_cpus = av_popcount64(proc_aff);
#elif HAVE_SYSCTL && defined(HW_NCPU)
int mib[2] = { CTL_HW, HW_NCPU };
size_t len = sizeof(nb_cpus);
- ret = sysctl(mib, 2, &nb_cpus, &len, NULL, 0);
- if (ret == -1)
+ if (sysctl(mib, 2, &nb_cpus, &len, NULL, 0) == -1)
nb_cpus = 0;
#elif HAVE_SYSCONF && defined(_SC_NPROC_ONLN)
nb_cpus = sysconf(_SC_NPROC_ONLN);
#define Ch(x,y,z) (((x) & ((y) ^ (z))) ^ (z))
-#define Maj(x,y,z) ((((x) | (y)) & (z)) | ((x) & (y)))
+#define Maj(z,y,x) ((((x) | (y)) & (z)) | ((x) & (y)))
#define Sigma0_256(x) (rol((x), 30) ^ rol((x), 19) ^ rol((x), 10))
#define Sigma1_256(x) (rol((x), 26) ^ rol((x), 21) ^ rol((x), 7))
--- /dev/null
+/*
+ * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (C) 2009 Konstantin Shishkov
+ * Copyright (C) 2013 James Almer
+ * based on BSD-licensed SHA-2 code by Aaron D. Gifford
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <string.h>
+
+#include "attributes.h"
+#include "avutil.h"
+#include "bswap.h"
+#include "sha512.h"
+#include "intreadwrite.h"
+#include "mem.h"
+
+/** hash context */
+typedef struct AVSHA512 {
+ uint8_t digest_len; ///< digest length in 64-bit words
+ uint64_t count; ///< number of bytes in buffer
+ uint8_t buffer[128]; ///< 1024-bit buffer of input values used in hash updating
+ uint64_t state[8]; ///< current hash value
+} AVSHA512;
+
+const int av_sha512_size = sizeof(AVSHA512);
+
+struct AVSHA512 *av_sha512_alloc(void)
+{
+ return av_mallocz(sizeof(struct AVSHA512));
+}
+
+static const uint64_t K512[80] = {
+ UINT64_C(0x428a2f98d728ae22), UINT64_C(0x7137449123ef65cd),
+ UINT64_C(0xb5c0fbcfec4d3b2f), UINT64_C(0xe9b5dba58189dbbc),
+ UINT64_C(0x3956c25bf348b538), UINT64_C(0x59f111f1b605d019),
+ UINT64_C(0x923f82a4af194f9b), UINT64_C(0xab1c5ed5da6d8118),
+ UINT64_C(0xd807aa98a3030242), UINT64_C(0x12835b0145706fbe),
+ UINT64_C(0x243185be4ee4b28c), UINT64_C(0x550c7dc3d5ffb4e2),
+ UINT64_C(0x72be5d74f27b896f), UINT64_C(0x80deb1fe3b1696b1),
+ UINT64_C(0x9bdc06a725c71235), UINT64_C(0xc19bf174cf692694),
+ UINT64_C(0xe49b69c19ef14ad2), UINT64_C(0xefbe4786384f25e3),
+ UINT64_C(0x0fc19dc68b8cd5b5), UINT64_C(0x240ca1cc77ac9c65),
+ UINT64_C(0x2de92c6f592b0275), UINT64_C(0x4a7484aa6ea6e483),
+ UINT64_C(0x5cb0a9dcbd41fbd4), UINT64_C(0x76f988da831153b5),
+ UINT64_C(0x983e5152ee66dfab), UINT64_C(0xa831c66d2db43210),
+ UINT64_C(0xb00327c898fb213f), UINT64_C(0xbf597fc7beef0ee4),
+ UINT64_C(0xc6e00bf33da88fc2), UINT64_C(0xd5a79147930aa725),
+ UINT64_C(0x06ca6351e003826f), UINT64_C(0x142929670a0e6e70),
+ UINT64_C(0x27b70a8546d22ffc), UINT64_C(0x2e1b21385c26c926),
+ UINT64_C(0x4d2c6dfc5ac42aed), UINT64_C(0x53380d139d95b3df),
+ UINT64_C(0x650a73548baf63de), UINT64_C(0x766a0abb3c77b2a8),
+ UINT64_C(0x81c2c92e47edaee6), UINT64_C(0x92722c851482353b),
+ UINT64_C(0xa2bfe8a14cf10364), UINT64_C(0xa81a664bbc423001),
+ UINT64_C(0xc24b8b70d0f89791), UINT64_C(0xc76c51a30654be30),
+ UINT64_C(0xd192e819d6ef5218), UINT64_C(0xd69906245565a910),
+ UINT64_C(0xf40e35855771202a), UINT64_C(0x106aa07032bbd1b8),
+ UINT64_C(0x19a4c116b8d2d0c8), UINT64_C(0x1e376c085141ab53),
+ UINT64_C(0x2748774cdf8eeb99), UINT64_C(0x34b0bcb5e19b48a8),
+ UINT64_C(0x391c0cb3c5c95a63), UINT64_C(0x4ed8aa4ae3418acb),
+ UINT64_C(0x5b9cca4f7763e373), UINT64_C(0x682e6ff3d6b2b8a3),
+ UINT64_C(0x748f82ee5defb2fc), UINT64_C(0x78a5636f43172f60),
+ UINT64_C(0x84c87814a1f0ab72), UINT64_C(0x8cc702081a6439ec),
+ UINT64_C(0x90befffa23631e28), UINT64_C(0xa4506cebde82bde9),
+ UINT64_C(0xbef9a3f7b2c67915), UINT64_C(0xc67178f2e372532b),
+ UINT64_C(0xca273eceea26619c), UINT64_C(0xd186b8c721c0c207),
+ UINT64_C(0xeada7dd6cde0eb1e), UINT64_C(0xf57d4f7fee6ed178),
+ UINT64_C(0x06f067aa72176fba), UINT64_C(0x0a637dc5a2c898a6),
+ UINT64_C(0x113f9804bef90dae), UINT64_C(0x1b710b35131c471b),
+ UINT64_C(0x28db77f523047d84), UINT64_C(0x32caab7b40c72493),
+ UINT64_C(0x3c9ebe0a15c9bebc), UINT64_C(0x431d67c49c100d4c),
+ UINT64_C(0x4cc5d4becb3e42b6), UINT64_C(0x597f299cfc657e2a),
+ UINT64_C(0x5fcb6fab3ad6faec), UINT64_C(0x6c44198c4a475817),
+};
+
+#define ror(value, bits) (((value) >> (bits)) | ((value) << (64 - (bits))))
+
+#define Ch(x,y,z) (((x) & ((y) ^ (z))) ^ (z))
+#define Maj(z,y,x) ((((x) | (y)) & (z)) | ((x) & (y)))
+
+#define Sigma0_512(x) (ror((x), 28) ^ ror((x), 34) ^ ror((x), 39))
+#define Sigma1_512(x) (ror((x), 14) ^ ror((x), 18) ^ ror((x), 41))
+#define sigma0_512(x) (ror((x), 1) ^ ror((x), 8) ^ ((x) >> 7))
+#define sigma1_512(x) (ror((x), 19) ^ ror((x), 61) ^ ((x) >> 6))
+
+#define blk0(i) (block[i] = AV_RB64(buffer + 8 * (i)))
+#define blk(i) (block[i] = block[i - 16] + sigma0_512(block[i - 15]) + \
+ sigma1_512(block[i - 2]) + block[i - 7])
+
+#define ROUND512(a,b,c,d,e,f,g,h) \
+ T1 += (h) + Sigma1_512(e) + Ch((e), (f), (g)) + K512[i]; \
+ (d) += T1; \
+ (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \
+ i++
+
+#define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
+ T1 = blk0(i); \
+ ROUND512(a,b,c,d,e,f,g,h)
+
+#define ROUND512_16_TO_80(a,b,c,d,e,f,g,h) \
+ T1 = blk(i); \
+ ROUND512(a,b,c,d,e,f,g,h)
+
+static void sha512_transform(uint64_t *state, const uint8_t buffer[128])
+{
+ uint64_t a, b, c, d, e, f, g, h;
+ uint64_t block[80];
+ uint64_t T1;
+ int i;
+
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+ f = state[5];
+ g = state[6];
+ h = state[7];
+#if CONFIG_SMALL
+ for (i = 0; i < 80; i++) {
+ uint64_t T2;
+ if (i < 16)
+ T1 = blk0(i);
+ else
+ T1 = blk(i);
+ T1 += h + Sigma1_512(e) + Ch(e, f, g) + K512[i];
+ T2 = Sigma0_512(a) + Maj(a, b, c);
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+ }
+#else
+ for (i = 0; i < 16 - 7;) {
+ ROUND512_0_TO_15(a, b, c, d, e, f, g, h);
+ ROUND512_0_TO_15(h, a, b, c, d, e, f, g);
+ ROUND512_0_TO_15(g, h, a, b, c, d, e, f);
+ ROUND512_0_TO_15(f, g, h, a, b, c, d, e);
+ ROUND512_0_TO_15(e, f, g, h, a, b, c, d);
+ ROUND512_0_TO_15(d, e, f, g, h, a, b, c);
+ ROUND512_0_TO_15(c, d, e, f, g, h, a, b);
+ ROUND512_0_TO_15(b, c, d, e, f, g, h, a);
+ }
+
+ for (; i < 80 - 7;) {
+ ROUND512_16_TO_80(a, b, c, d, e, f, g, h);
+ ROUND512_16_TO_80(h, a, b, c, d, e, f, g);
+ ROUND512_16_TO_80(g, h, a, b, c, d, e, f);
+ ROUND512_16_TO_80(f, g, h, a, b, c, d, e);
+ ROUND512_16_TO_80(e, f, g, h, a, b, c, d);
+ ROUND512_16_TO_80(d, e, f, g, h, a, b, c);
+ ROUND512_16_TO_80(c, d, e, f, g, h, a, b);
+ ROUND512_16_TO_80(b, c, d, e, f, g, h, a);
+ }
+#endif
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
+ state[5] += f;
+ state[6] += g;
+ state[7] += h;
+}
+
+
+av_cold int av_sha512_init(AVSHA512 *ctx, int bits)
+{
+ ctx->digest_len = bits >> 6;
+ switch (bits) {
+ case 224: // SHA-512/224
+ ctx->state[0] = UINT64_C(0x8C3D37C819544DA2);
+ ctx->state[1] = UINT64_C(0x73E1996689DCD4D6);
+ ctx->state[2] = UINT64_C(0x1DFAB7AE32FF9C82);
+ ctx->state[3] = UINT64_C(0x679DD514582F9FCF);
+ ctx->state[4] = UINT64_C(0x0F6D2B697BD44DA8);
+ ctx->state[5] = UINT64_C(0x77E36F7304C48942);
+ ctx->state[6] = UINT64_C(0x3F9D85A86A1D36C8);
+ ctx->state[7] = UINT64_C(0x1112E6AD91D692A1);
+ break;
+ case 256: // SHA-512/256
+ ctx->state[0] = UINT64_C(0x22312194FC2BF72C);
+ ctx->state[1] = UINT64_C(0x9F555FA3C84C64C2);
+ ctx->state[2] = UINT64_C(0x2393B86B6F53B151);
+ ctx->state[3] = UINT64_C(0x963877195940EABD);
+ ctx->state[4] = UINT64_C(0x96283EE2A88EFFE3);
+ ctx->state[5] = UINT64_C(0xBE5E1E2553863992);
+ ctx->state[6] = UINT64_C(0x2B0199FC2C85B8AA);
+ ctx->state[7] = UINT64_C(0x0EB72DDC81C52CA2);
+ break;
+ case 384: // SHA-384
+ ctx->state[0] = UINT64_C(0xCBBB9D5DC1059ED8);
+ ctx->state[1] = UINT64_C(0x629A292A367CD507);
+ ctx->state[2] = UINT64_C(0x9159015A3070DD17);
+ ctx->state[3] = UINT64_C(0x152FECD8F70E5939);
+ ctx->state[4] = UINT64_C(0x67332667FFC00B31);
+ ctx->state[5] = UINT64_C(0x8EB44A8768581511);
+ ctx->state[6] = UINT64_C(0xDB0C2E0D64F98FA7);
+ ctx->state[7] = UINT64_C(0x47B5481DBEFA4FA4);
+ break;
+ case 512: // SHA-512
+ ctx->state[0] = UINT64_C(0x6A09E667F3BCC908);
+ ctx->state[1] = UINT64_C(0xBB67AE8584CAA73B);
+ ctx->state[2] = UINT64_C(0x3C6EF372FE94F82B);
+ ctx->state[3] = UINT64_C(0xA54FF53A5F1D36F1);
+ ctx->state[4] = UINT64_C(0x510E527FADE682D1);
+ ctx->state[5] = UINT64_C(0x9B05688C2B3E6C1F);
+ ctx->state[6] = UINT64_C(0x1F83D9ABFB41BD6B);
+ ctx->state[7] = UINT64_C(0x5BE0CD19137E2179);
+ break;
+ default:
+ return -1;
+ }
+ ctx->count = 0;
+ return 0;
+}
+
+void av_sha512_update(AVSHA512* ctx, const uint8_t* data, unsigned int len)
+{
+ unsigned int i, j;
+
+ j = ctx->count & 127;
+ ctx->count += len;
+#if CONFIG_SMALL
+ for (i = 0; i < len; i++) {
+ ctx->buffer[j++] = data[i];
+ if (128 == j) {
+ sha512_transform(ctx->state, ctx->buffer);
+ j = 0;
+ }
+ }
+#else
+ if ((j + len) > 127) {
+ memcpy(&ctx->buffer[j], data, (i = 128 - j));
+ sha512_transform(ctx->state, ctx->buffer);
+ for (; i + 127 < len; i += 128)
+ sha512_transform(ctx->state, &data[i]);
+ j = 0;
+ } else
+ i = 0;
+ memcpy(&ctx->buffer[j], &data[i], len - i);
+#endif
+}
+
+void av_sha512_final(AVSHA512* ctx, uint8_t *digest)
+{
+ uint64_t i = 0;
+ uint64_t finalcount = av_be2ne64(ctx->count << 3);
+
+ av_sha512_update(ctx, "\200", 1);
+ while ((ctx->count & 127) != 112)
+ av_sha512_update(ctx, "", 1);
+ av_sha512_update(ctx, (uint8_t *)&i, 8);
+ av_sha512_update(ctx, (uint8_t *)&finalcount, 8); /* Should cause a transform() */
+ for (i = 0; i < ctx->digest_len; i++)
+ AV_WB64(digest + i*8, ctx->state[i]);
+ if (ctx->digest_len & 1) /* SHA512/224 is 28 bytes, and is not divisible by 8. */
+ AV_WB32(digest + i*8, ctx->state[i] >> 32);
+}
+
+#ifdef TEST
+#include <stdio.h>
+
+int main(void)
+{
+ int i, j, k;
+ AVSHA512 ctx;
+ unsigned char digest[64];
+ const int lengths[4] = { 224, 256, 384, 512 };
+
+ for (j = 0; j < 4; j++) {
+ if (j < 2) printf("Testing SHA-512/%d\n", lengths[j]);
+ else printf("Testing SHA-%d\n", lengths[j]);
+ for (k = 0; k < 3; k++) {
+ av_sha512_init(&ctx, lengths[j]);
+ if (k == 0)
+ av_sha512_update(&ctx, "abc", 3);
+ else if (k == 1)
+ av_sha512_update(&ctx, "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"
+ "hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu", 112);
+ else
+ for (i = 0; i < 1000*1000; i++)
+ av_sha512_update(&ctx, "a", 1);
+ av_sha512_final(&ctx, digest);
+ for (i = 0; i < lengths[j] >> 3; i++)
+ printf("%02X", digest[i]);
+ putchar('\n');
+ }
+ switch (j) { //test vectors (from FIPS PUB 180-4 Apendix A)
+ case 0:
+ printf("4634270f 707b6a54 daae7530 460842e2 0e37ed26 5ceee9a4 3e8924aa\n"
+ "23fec5bb 94d60b23 30819264 0b0c4533 35d66473 4fe40e72 68674af9\n"
+ "37ab331d 76f0d36d e422bd0e deb22a28 accd487b 7a8453ae 965dd287\n");
+ break;
+ case 1:
+ printf("53048e26 81941ef9 9b2e29b7 6b4c7dab e4c2d0c6 34fc6d46 e0e2f131 07e7af23\n"
+ "3928e184 fb8690f8 40da3988 121d31be 65cb9d3e f83ee614 6feac861 e19b563a\n"
+ "9a59a052 930187a9 7038cae6 92f30708 aa649192 3ef51943 94dc68d5 6c74fb21\n");
+ break;
+ case 2:
+ printf("cb00753f 45a35e8b b5a03d69 9ac65007 272c32ab 0eded163 "
+ "1a8b605a 43ff5bed 8086072b a1e7cc23 58baeca1 34c825a7\n"
+ "09330c33 f71147e8 3d192fc7 82cd1b47 53111b17 3b3b05d2 "
+ "2fa08086 e3b0f712 fcc7c71a 557e2db9 66c3e9fa 91746039\n"
+ "9d0e1809 716474cb 086e834e 310a4a1c ed149e9c 00f24852 "
+ "7972cec5 704c2a5b 07b8b3dc 38ecc4eb ae97ddd8 7f3d8985\n");
+ break;
+ case 3:
+ printf("ddaf35a1 93617aba cc417349 ae204131 12e6fa4e 89a97ea2 0a9eeee6 4b55d39a "
+ "2192992a 274fc1a8 36ba3c23 a3feebbd 454d4423 643ce80e 2a9ac94f a54ca49f\n"
+ "8e959b75 dae313da 8cf4f728 14fc143f 8f7779c6 eb9f7fa1 7299aead b6889018 "
+ "501d289e 4900f7e4 331b99de c4b5433a c7d329ee b6dd2654 5e96e55b 874be909\n"
+ "e718483d 0ce76964 4e2e42c7 bc15b463 8e1f98b1 3b204428 5632a803 afa973eb "
+ "de0ff244 877ea60a 4cb0432c e577c31b eb009c5c 2c49aa2e 4eadb217 ad8cc09b\n");
+ break;
+ }
+ }
+
+ return 0;
+}
+#endif
--- /dev/null
+/*
+ * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (C) 2013 James Almer <jamrial@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_SHA512_H
+#define AVUTIL_SHA512_H
+
+#include <stdint.h>
+
+#include "attributes.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_sha512 SHA512
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+extern const int av_sha512_size;
+
+struct AVSHA512;
+
+/**
+ * Allocate an AVSHA512 context.
+ */
+struct AVSHA512 *av_sha512_alloc(void);
+
+/**
+ * Initialize SHA-2 512 hashing.
+ *
+ * @param context pointer to the function context (of size av_sha512_size)
+ * @param bits number of bits in digest (224, 256, 384 or 512 bits)
+ * @return zero if initialization succeeded, -1 otherwise
+ */
+int av_sha512_init(struct AVSHA512* context, int bits);
+
+/**
+ * Update hash value.
+ *
+ * @param context hash function context
+ * @param data input data to update hash with
+ * @param len input data length
+ */
+void av_sha512_update(struct AVSHA512* context, const uint8_t* data, unsigned int len);
+
+/**
+ * Finish hashing and output digest value.
+ *
+ * @param context hash function context
+ * @param digest buffer where output digest value is stored
+ */
+void av_sha512_final(struct AVSHA512* context, uint8_t *digest);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_SHA512_H */
(vector unsigned short) \
vec_max(y, ((vector signed short) { 0 })))
-//#define out_pixels(a, b, c, ptr) vec_mstrgb32(__typeof__(a), ((__typeof__(a)) { 255 }), a, a, a, ptr)
-
static inline void cvtyuvtoRGB(SwsContext *c, vector signed short Y,
vector signed short U, vector signed short V,
vector signed short *R, vector signed short *G,
FATE_FILTER-$(call FILTERDEMDEC, YADIF, MPEGTS, MPEG2VIDEO) += $(FATE_YADIF)
-FATE_SAMPLES_AVCONV += $(FATE_FILTER-yes)
+FATE_MCDEINT += fate-filter-mcdeint-fast
+fate-filter-mcdeint-fast: CMD = framecrc -flags bitexact -idct simple -i $(TARGET_SAMPLES)/mpeg2/mpeg2_field_encoding.ts -vframes 30 -vf mcdeint=fast
+
+FATE_MCDEINT += fate-filter-mcdeint-medium
+fate-filter-mcdeint-medium: CMD = framecrc -flags bitexact -idct simple -i $(TARGET_SAMPLES)/mpeg2/mpeg2_field_encoding.ts -vframes 30 -vf mcdeint=mode=medium
+FATE_FILTER-$(call ALLYES, MCDEINT_FILTER, MPEGTS_DEMUXER, MPEG2VIDEO_DECODER SNOW_ENCODER) += $(FATE_MCDEINT)
+
+FATE_SAMPLES_AVCONV += $(FATE_FILTER-yes)
FATE_FILTER-$(call ALLYES, AVDEVICE LIFE_FILTER) += fate-filter-lavd-life
fate-filter-lavd-life: CMD = framecrc -f lavfi -i life=s=40x40:r=5:seed=42:mold=64:ratio=0.1:death_color=red:life_color=green -t 2
fate-sha: libavutil/sha-test$(EXESUF)
fate-sha: CMD = run libavutil/sha-test
+FATE_LIBAVUTIL += fate-sha512
+fate-sha512: libavutil/sha512-test$(EXESUF)
+fate-sha512: CMD = run libavutil/sha512-test
+
FATE_LIBAVUTIL += fate-xtea
fate-xtea: libavutil/xtea-test$(EXESUF)
fate-xtea: CMD = run libavutil/xtea-test
--- /dev/null
+#tb 0: 1/25
+0, 9, 9, 1, 622080, 0xb3b66c5c
+0, 10, 10, 1, 622080, 0xc6568bd7
+0, 11, 11, 1, 622080, 0xa5b543c3
+0, 12, 12, 1, 622080, 0x4095ac51
+0, 13, 13, 1, 622080, 0xccd8c1d9
+0, 14, 14, 1, 622080, 0x84a88f22
+0, 15, 15, 1, 622080, 0x7273c26b
+0, 16, 16, 1, 622080, 0xac188c41
+0, 17, 17, 1, 622080, 0xf32f6fb4
+0, 18, 18, 1, 622080, 0xd696ccce
+0, 19, 19, 1, 622080, 0x9778a418
+0, 20, 20, 1, 622080, 0xf2b5be2e
+0, 21, 21, 1, 622080, 0x653ee12a
+0, 22, 22, 1, 622080, 0xe7fce188
+0, 23, 23, 1, 622080, 0x6e9f1deb
+0, 24, 24, 1, 622080, 0x33090aac
+0, 25, 25, 1, 622080, 0x840a57f1
+0, 26, 26, 1, 622080, 0x635e430a
+0, 27, 27, 1, 622080, 0x52f98809
+0, 28, 28, 1, 622080, 0xc567b6a5
+0, 29, 29, 1, 622080, 0x4134f583
+0, 30, 30, 1, 622080, 0xd02a73bc
+0, 31, 31, 1, 622080, 0x763085d6
+0, 32, 32, 1, 622080, 0x77fdc7a6
+0, 33, 33, 1, 622080, 0x77f71b9f
+0, 34, 34, 1, 622080, 0x71c91244
+0, 35, 35, 1, 622080, 0xc7b86da5
+0, 36, 36, 1, 622080, 0x1edf8890
+0, 37, 37, 1, 622080, 0x03c82bec
+0, 38, 38, 1, 622080, 0x148b6a04
--- /dev/null
+#tb 0: 1/25
+0, 9, 9, 1, 622080, 0xb3b66c5c
+0, 10, 10, 1, 622080, 0xc69368eb
+0, 11, 11, 1, 622080, 0x76bdde33
+0, 12, 12, 1, 622080, 0x5a04d7a6
+0, 13, 13, 1, 622080, 0x68eacaec
+0, 14, 14, 1, 622080, 0x1e888865
+0, 15, 15, 1, 622080, 0x188ad805
+0, 16, 16, 1, 622080, 0x268e94ce
+0, 17, 17, 1, 622080, 0x89da806c
+0, 18, 18, 1, 622080, 0x507ec6c9
+0, 19, 19, 1, 622080, 0xf20ba69b
+0, 20, 20, 1, 622080, 0x5786a96e
+0, 21, 21, 1, 622080, 0xf9d2fd6e
+0, 22, 22, 1, 622080, 0x4b69ef51
+0, 23, 23, 1, 622080, 0x19a22b06
+0, 24, 24, 1, 622080, 0x13a30e94
+0, 25, 25, 1, 622080, 0x02435f86
+0, 26, 26, 1, 622080, 0x06794a00
+0, 27, 27, 1, 622080, 0x289e8aea
+0, 28, 28, 1, 622080, 0x494ab1a4
+0, 29, 29, 1, 622080, 0xc1c6f5da
+0, 30, 30, 1, 622080, 0x1f6d6764
+0, 31, 31, 1, 622080, 0xaa898832
+0, 32, 32, 1, 622080, 0x6935c412
+0, 33, 33, 1, 622080, 0x825e2a67
+0, 34, 34, 1, 622080, 0xd8ee113a
+0, 35, 35, 1, 622080, 0x9ffc7f17
+0, 36, 36, 1, 622080, 0xa7819ac8
+0, 37, 37, 1, 622080, 0xa412377d
+0, 38, 38, 1, 622080, 0x538376bc
--- /dev/null
+Testing SHA-512/224
+4634270F707B6A54DAAE7530460842E20E37ED265CEEE9A43E8924AA
+23FEC5BB94D60B23308192640B0C453335D664734FE40E7268674AF9
+37AB331D76F0D36DE422BD0EDEB22A28ACCD487B7A8453AE965DD287
+4634270f 707b6a54 daae7530 460842e2 0e37ed26 5ceee9a4 3e8924aa
+23fec5bb 94d60b23 30819264 0b0c4533 35d66473 4fe40e72 68674af9
+37ab331d 76f0d36d e422bd0e deb22a28 accd487b 7a8453ae 965dd287
+Testing SHA-512/256
+53048E2681941EF99B2E29B76B4C7DABE4C2D0C634FC6D46E0E2F13107E7AF23
+3928E184FB8690F840DA3988121D31BE65CB9D3EF83EE6146FEAC861E19B563A
+9A59A052930187A97038CAE692F30708AA6491923EF5194394DC68D56C74FB21
+53048e26 81941ef9 9b2e29b7 6b4c7dab e4c2d0c6 34fc6d46 e0e2f131 07e7af23
+3928e184 fb8690f8 40da3988 121d31be 65cb9d3e f83ee614 6feac861 e19b563a
+9a59a052 930187a9 7038cae6 92f30708 aa649192 3ef51943 94dc68d5 6c74fb21
+Testing SHA-384
+CB00753F45A35E8BB5A03D699AC65007272C32AB0EDED1631A8B605A43FF5BED8086072BA1E7CC2358BAECA134C825A7
+09330C33F71147E83D192FC782CD1B4753111B173B3B05D22FA08086E3B0F712FCC7C71A557E2DB966C3E9FA91746039
+9D0E1809716474CB086E834E310A4A1CED149E9C00F248527972CEC5704C2A5B07B8B3DC38ECC4EBAE97DDD87F3D8985
+cb00753f 45a35e8b b5a03d69 9ac65007 272c32ab 0eded163 1a8b605a 43ff5bed 8086072b a1e7cc23 58baeca1 34c825a7
+09330c33 f71147e8 3d192fc7 82cd1b47 53111b17 3b3b05d2 2fa08086 e3b0f712 fcc7c71a 557e2db9 66c3e9fa 91746039
+9d0e1809 716474cb 086e834e 310a4a1c ed149e9c 00f24852 7972cec5 704c2a5b 07b8b3dc 38ecc4eb ae97ddd8 7f3d8985
+Testing SHA-512
+DDAF35A193617ABACC417349AE20413112E6FA4E89A97EA20A9EEEE64B55D39A2192992A274FC1A836BA3C23A3FEEBBD454D4423643CE80E2A9AC94FA54CA49F
+8E959B75DAE313DA8CF4F72814FC143F8F7779C6EB9F7FA17299AEADB6889018501D289E4900F7E4331B99DEC4B5433AC7D329EEB6DD26545E96E55B874BE909
+E718483D0CE769644E2E42C7BC15B4638E1F98B13B2044285632A803AFA973EBDE0FF244877EA60A4CB0432CE577C31BEB009C5C2C49AA2E4EADB217AD8CC09B
+ddaf35a1 93617aba cc417349 ae204131 12e6fa4e 89a97ea2 0a9eeee6 4b55d39a 2192992a 274fc1a8 36ba3c23 a3feebbd 454d4423 643ce80e 2a9ac94f a54ca49f
+8e959b75 dae313da 8cf4f728 14fc143f 8f7779c6 eb9f7fa1 7299aead b6889018 501d289e 4900f7e4 331b99de c4b5433a c7d329ee b6dd2654 5e96e55b 874be909
+e718483d 0ce76964 4e2e42c7 bc15b463 8e1f98b1 3b204428 5632a803 afa973eb de0ff244 877ea60a 4cb0432c e577c31b eb009c5c 2c49aa2e 4eadb217 ad8cc09b
c135eb14e9f219242180270c2a242634 *tests/data/fate/vsynth1-jpeg2000-97.avi
2243132 tests/data/fate/vsynth1-jpeg2000-97.avi
-30a9c13e18fe4acaf28062b5003bb671 *tests/data/fate/vsynth1-jpeg2000-97.out.rawvideo
-stddev: 6.41 PSNR: 31.99 MAXDIFF: 75 bytes: 7603200/ 7603200
+e1a095b40d7f6440f6c46f2995c4759c *tests/data/fate/vsynth1-jpeg2000-97.out.rawvideo
+stddev: 6.23 PSNR: 32.23 MAXDIFF: 75 bytes: 7603200/ 7603200
3ac3e49a89136bddde9e44bac3e5b4ed *tests/data/fate/vsynth2-jpeg2000-97.avi
1118952 tests/data/fate/vsynth2-jpeg2000-97.avi
-9d69ac6d46152ed2d6dd6a90d5793c80 *tests/data/fate/vsynth2-jpeg2000-97.out.rawvideo
-stddev: 5.32 PSNR: 33.61 MAXDIFF: 60 bytes: 7603200/ 7603200
+8ac8b9ee81fa73c873668e9f6b78764d *tests/data/fate/vsynth2-jpeg2000-97.out.rawvideo
+stddev: 4.95 PSNR: 34.23 MAXDIFF: 60 bytes: 7603200/ 7603200
buf[count] = 0;
if (buf[0] != '#') {
- av_expr_parse_and_eval(&d, buf,
- NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, 0, NULL);
+ int ret = av_expr_parse_and_eval(&d, buf,
+ NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, 0, NULL);
if (echo)
fprintf(outfile, "%s ", buf);
- fprintf(outfile, "%s%f\n", prompt, d);
+ if (ret >= 0) fprintf(outfile, "%s%f\n", prompt, d);
+ else fprintf(outfile, "%s%s\n", prompt, av_err2str(ret));
}
count = 0;
} else {
for (;;) {
ssize_t size = read(fd, buffer, SIZE);
if (size < 0) {
+ close(fd);
finish();
printf("+READ-FAILED: %s", strerror(errno));
ret = 2;
atom_type = BE_32(&moov_atom[i]);
if (atom_type == STCO_ATOM) {
printf(" patching stco atom...\n");
- atom_size = BE_32(&moov_atom[i - 4]);
+ atom_size = (uint32_t)BE_32(&moov_atom[i - 4]);
if (i + atom_size - 4 > moov_atom_size) {
printf(" bad atom size\n");
goto error_out;
goto error_out;
}
for (j = 0; j < offset_count; j++) {
- current_offset = BE_32(&moov_atom[i + 12 + j * 4]);
+ current_offset = (uint32_t)BE_32(&moov_atom[i + 12 + j * 4]);
current_offset += moov_atom_size;
moov_atom[i + 12 + j * 4 + 0] = (current_offset >> 24) & 0xFF;
moov_atom[i + 12 + j * 4 + 1] = (current_offset >> 16) & 0xFF;
i += atom_size - 4;
} else if (atom_type == CO64_ATOM) {
printf(" patching co64 atom...\n");
- atom_size = BE_32(&moov_atom[i - 4]);
+ atom_size = (uint32_t)BE_32(&moov_atom[i - 4]);
if (i + atom_size - 4 > moov_atom_size) {
printf(" bad atom size\n");
goto error_out;