2 * Copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
3 * Copyright (c) 2014 Arwa Arif <arwaarif1994@gmail.com>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * Postprocessing filter - 7
26 * Originally written by Michael Niedermayer for the MPlayer
27 * project, and ported by Arwa Arif for FFmpeg.
30 #include "libavutil/avassert.h"
31 #include "libavutil/imgutils.h"
32 #include "libavutil/mem_internal.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/pixdesc.h"
44 #define OFFSET(x) offsetof(PP7Context, x)
45 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
46 static const AVOption pp7_options[] = {
47 { "qp", "force a constant quantizer parameter", OFFSET(qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 64, FLAGS },
48 { "mode", "set thresholding mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_MEDIUM}, 0, 2, FLAGS, "mode" },
49 { "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_HARD}, INT_MIN, INT_MAX, FLAGS, "mode" },
50 { "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_SOFT}, INT_MIN, INT_MAX, FLAGS, "mode" },
51 { "medium", "medium thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_MEDIUM}, INT_MIN, INT_MAX, FLAGS, "mode" },
55 AVFILTER_DEFINE_CLASS(pp7);
57 DECLARE_ALIGNED(8, static const uint8_t, dither)[8][8] = {
58 { 0, 48, 12, 60, 3, 51, 15, 63, },
59 { 32, 16, 44, 28, 35, 19, 47, 31, },
60 { 8, 56, 4, 52, 11, 59, 7, 55, },
61 { 40, 24, 36, 20, 43, 27, 39, 23, },
62 { 2, 50, 14, 62, 1, 49, 13, 61, },
63 { 34, 18, 46, 30, 33, 17, 45, 29, },
64 { 10, 58, 6, 54, 9, 57, 5, 53, },
65 { 42, 26, 38, 22, 41, 25, 37, 21, },
72 #define SN1 2.2360679775
73 #define SN2 3.16227766017
76 static const int factor[16] = {
77 N / (N0 * N0), N / (N0 * N1), N / (N0 * N0), N / (N0 * N2),
78 N / (N1 * N0), N / (N1 * N1), N / (N1 * N0), N / (N1 * N2),
79 N / (N0 * N0), N / (N0 * N1), N / (N0 * N0), N / (N0 * N2),
80 N / (N2 * N0), N / (N2 * N1), N / (N2 * N0), N / (N2 * N2),
83 static void init_thres2(PP7Context *p)
88 for (qp = 0; qp < 99; qp++) {
89 for (i = 0; i < 16; i++) {
90 p->thres2[qp][i] = ((i&1) ? SN2 : SN0) * ((i&4) ? SN2 : SN0) * FFMAX(1, qp) * (1<<2) - 1 - bias;
95 static inline void dctA_c(int16_t *dst, uint8_t *src, int stride)
99 for (i = 0; i < 4; i++) {
100 int s0 = src[0 * stride] + src[6 * stride];
101 int s1 = src[1 * stride] + src[5 * stride];
102 int s2 = src[2 * stride] + src[4 * stride];
103 int s3 = src[3 * stride];
111 dst[1] = 2 * s3 + s2;
112 dst[3] = s3 - 2 * s2;
118 static void dctB_c(int16_t *dst, int16_t *src)
122 for (i = 0; i < 4; i++) {
123 int s0 = src[0 * 4] + src[6 * 4];
124 int s1 = src[1 * 4] + src[5 * 4];
125 int s2 = src[2 * 4] + src[4 * 4];
134 dst[1 * 4] = 2 * s3 + s2;
135 dst[3 * 4] = s3 - 2 * s2;
141 static int hardthresh_c(PP7Context *p, int16_t *src, int qp)
146 a = src[0] * factor[0];
147 for (i = 1; i < 16; i++) {
148 unsigned int threshold1 = p->thres2[qp][i];
149 unsigned int threshold2 = threshold1 << 1;
151 if (((unsigned)(level + threshold1)) > threshold2)
152 a += level * factor[i];
154 return (a + (1 << 11)) >> 12;
157 static int mediumthresh_c(PP7Context *p, int16_t *src, int qp)
162 a = src[0] * factor[0];
163 for (i = 1; i < 16; i++) {
164 unsigned int threshold1 = p->thres2[qp][i];
165 unsigned int threshold2 = threshold1 << 1;
167 if (((unsigned)(level + threshold1)) > threshold2) {
168 if (((unsigned)(level + 2 * threshold1)) > 2 * threshold2)
169 a += level * factor[i];
172 a += 2 * (level - (int)threshold1) * factor[i];
174 a += 2 * (level + (int)threshold1) * factor[i];
178 return (a + (1 << 11)) >> 12;
181 static int softthresh_c(PP7Context *p, int16_t *src, int qp)
186 a = src[0] * factor[0];
187 for (i = 1; i < 16; i++) {
188 unsigned int threshold1 = p->thres2[qp][i];
189 unsigned int threshold2 = threshold1 << 1;
191 if (((unsigned)(level + threshold1)) > threshold2) {
193 a += (level - (int)threshold1) * factor[i];
195 a += (level + (int)threshold1) * factor[i];
198 return (a + (1 << 11)) >> 12;
201 static void filter(PP7Context *p, uint8_t *dst, uint8_t *src,
202 int dst_stride, int src_stride,
203 int width, int height,
204 uint8_t *qp_store, int qp_stride, int is_luma)
207 const int stride = is_luma ? p->temp_stride : ((width + 16 + 15) & (~15));
208 uint8_t *p_src = p->src + 8 * stride;
209 int16_t *block = (int16_t *)p->src;
210 int16_t *temp = (int16_t *)(p->src + 32);
212 if (!src || !dst) return;
213 for (y = 0; y < height; y++) {
214 int index = 8 + 8 * stride + y * stride;
215 memcpy(p_src + index, src + y * src_stride, width);
216 for (x = 0; x < 8; x++) {
217 p_src[index - x - 1]= p_src[index + x ];
218 p_src[index + width + x ]= p_src[index + width - x - 1];
221 for (y = 0; y < 8; y++) {
222 memcpy(p_src + ( 7 - y ) * stride, p_src + ( y + 8 ) * stride, stride);
223 memcpy(p_src + (height + 8 + y) * stride, p_src + (height - y + 7) * stride, stride);
225 //FIXME (try edge emu)
227 for (y = 0; y < height; y++) {
228 for (x = -8; x < 0; x += 4) {
229 const int index = x + y * stride + (8 - 3) * (1 + stride) + 8; //FIXME silly offset
230 uint8_t *src = p_src + index;
231 int16_t *tp = temp + 4 * x;
233 dctA_c(tp + 4 * 8, src, stride);
235 for (x = 0; x < width; ) {
236 const int qps = 3 + is_luma;
238 int end = FFMIN(x + 8, width);
243 qp = qp_store[ (FFMIN(x, width - 1) >> qps) + (FFMIN(y, height - 1) >> qps) * qp_stride];
244 qp = ff_norm_qscale(qp, p->qscale_type);
246 for (; x < end; x++) {
247 const int index = x + y * stride + (8 - 3) * (1 + stride) + 8; //FIXME silly offset
248 uint8_t *src = p_src + index;
249 int16_t *tp = temp + 4 * x;
253 dctA_c(tp + 4 * 8, src, stride);
257 v = p->requantize(p, block, qp);
258 v = (v + dither[y & 7][x & 7]) >> 6;
259 if ((unsigned)v > 255)
261 dst[x + y * dst_stride] = v;
267 static int query_formats(AVFilterContext *ctx)
269 static const enum AVPixelFormat pix_fmts[] = {
270 AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
271 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
272 AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
273 AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
274 AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
276 AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
279 AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
281 return AVERROR(ENOMEM);
282 return ff_set_common_formats(ctx, fmts_list);
285 static int config_input(AVFilterLink *inlink)
287 AVFilterContext *ctx = inlink->dst;
288 PP7Context *pp7 = ctx->priv;
289 const int h = FFALIGN(inlink->h + 16, 16);
290 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
292 pp7->hsub = desc->log2_chroma_w;
293 pp7->vsub = desc->log2_chroma_h;
295 pp7->temp_stride = FFALIGN(inlink->w + 16, 16);
296 pp7->src = av_malloc_array(pp7->temp_stride, (h + 8) * sizeof(uint8_t));
299 return AVERROR(ENOMEM);
304 case 0: pp7->requantize = hardthresh_c; break;
305 case 1: pp7->requantize = softthresh_c; break;
307 case 2: pp7->requantize = mediumthresh_c; break;
313 ff_pp7_init_x86(pp7);
318 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
320 AVFilterContext *ctx = inlink->dst;
321 PP7Context *pp7 = ctx->priv;
322 AVFilterLink *outlink = ctx->outputs[0];
326 uint8_t *qp_table = NULL;
329 qp_table = av_frame_get_qp_table(in, &qp_stride, &pp7->qscale_type);
331 if (!ctx->is_disabled) {
332 const int cw = AV_CEIL_RSHIFT(inlink->w, pp7->hsub);
333 const int ch = AV_CEIL_RSHIFT(inlink->h, pp7->vsub);
335 /* get a new frame if in-place is not possible or if the dimensions
336 * are not multiple of 8 */
337 if (!av_frame_is_writable(in) || (inlink->w & 7) || (inlink->h & 7)) {
338 const int aligned_w = FFALIGN(inlink->w, 8);
339 const int aligned_h = FFALIGN(inlink->h, 8);
341 out = ff_get_video_buffer(outlink, aligned_w, aligned_h);
344 return AVERROR(ENOMEM);
346 av_frame_copy_props(out, in);
347 out->width = in->width;
348 out->height = in->height;
351 if (qp_table || pp7->qp) {
353 filter(pp7, out->data[0], in->data[0], out->linesize[0], in->linesize[0],
354 inlink->w, inlink->h, qp_table, qp_stride, 1);
355 filter(pp7, out->data[1], in->data[1], out->linesize[1], in->linesize[1],
356 cw, ch, qp_table, qp_stride, 0);
357 filter(pp7, out->data[2], in->data[2], out->linesize[2], in->linesize[2],
358 cw, ch, qp_table, qp_stride, 0);
365 av_image_copy_plane(out->data[3], out->linesize[3],
366 in ->data[3], in ->linesize[3],
367 inlink->w, inlink->h);
370 return ff_filter_frame(outlink, out);
373 static av_cold void uninit(AVFilterContext *ctx)
375 PP7Context *pp7 = ctx->priv;
379 static const AVFilterPad pp7_inputs[] = {
382 .type = AVMEDIA_TYPE_VIDEO,
383 .config_props = config_input,
384 .filter_frame = filter_frame,
389 static const AVFilterPad pp7_outputs[] = {
392 .type = AVMEDIA_TYPE_VIDEO,
397 AVFilter ff_vf_pp7 = {
399 .description = NULL_IF_CONFIG_SMALL("Apply Postprocessing 7 filter."),
400 .priv_size = sizeof(PP7Context),
402 .query_formats = query_formats,
403 .inputs = pp7_inputs,
404 .outputs = pp7_outputs,
405 .priv_class = &pp7_class,
406 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,