2 * Copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
3 * Copyright (c) 2014 Arwa Arif <arwaarif1994@gmail.com>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * Postprocessing filter - 7
26 * Originally written by Michael Niedermayer for the MPlayer
27 * project, and ported by Arwa Arif for FFmpeg.
30 #include "libavutil/avassert.h"
31 #include "libavutil/imgutils.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
36 #include "libavcodec/avcodec.h"
44 #define OFFSET(x) offsetof(PP7Context, x)
45 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
46 static const AVOption pp7_options[] = {
47 { "qp", "force a constant quantizer parameter", OFFSET(qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 64, FLAGS },
48 { "mode", "set thresholding mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_MEDIUM}, 0, 2, FLAGS, "mode" },
49 { "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_HARD}, INT_MIN, INT_MAX, FLAGS, "mode" },
50 { "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_SOFT}, INT_MIN, INT_MAX, FLAGS, "mode" },
51 { "medium", "medium thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_MEDIUM}, INT_MIN, INT_MAX, FLAGS, "mode" },
55 AVFILTER_DEFINE_CLASS(pp7);
57 DECLARE_ALIGNED(8, static const uint8_t, dither)[8][8] = {
58 { 0, 48, 12, 60, 3, 51, 15, 63, },
59 { 32, 16, 44, 28, 35, 19, 47, 31, },
60 { 8, 56, 4, 52, 11, 59, 7, 55, },
61 { 40, 24, 36, 20, 43, 27, 39, 23, },
62 { 2, 50, 14, 62, 1, 49, 13, 61, },
63 { 34, 18, 46, 30, 33, 17, 45, 29, },
64 { 10, 58, 6, 54, 9, 57, 5, 53, },
65 { 42, 26, 38, 22, 41, 25, 37, 21, },
72 #define SN1 2.2360679775
73 #define SN2 3.16227766017
76 static const int factor[16] = {
77 N / (N0 * N0), N / (N0 * N1), N / (N0 * N0), N / (N0 * N2),
78 N / (N1 * N0), N / (N1 * N1), N / (N1 * N0), N / (N1 * N2),
79 N / (N0 * N0), N / (N0 * N1), N / (N0 * N0), N / (N0 * N2),
80 N / (N2 * N0), N / (N2 * N1), N / (N2 * N0), N / (N2 * N2),
83 static const int thres[16] = {
84 N / (SN0 * SN0), N / (SN0 * SN2), N / (SN0 * SN0), N / (SN0 * SN2),
85 N / (SN2 * SN0), N / (SN2 * SN2), N / (SN2 * SN0), N / (SN2 * SN2),
86 N / (SN0 * SN0), N / (SN0 * SN2), N / (SN0 * SN0), N / (SN0 * SN2),
87 N / (SN2 * SN0), N / (SN2 * SN2), N / (SN2 * SN0), N / (SN2 * SN2),
90 static inline int norm_qscale(int qscale, int type)
93 case FF_QSCALE_TYPE_MPEG1: return qscale;
94 case FF_QSCALE_TYPE_MPEG2: return qscale >> 1;
95 case FF_QSCALE_TYPE_H264: return qscale >> 2;
96 case FF_QSCALE_TYPE_VP56: return (63 - qscale + 2) >> 2;
101 static void init_thres2(PP7Context *p)
104 int bias = 0; //FIXME
106 for (qp = 0; qp < 99; qp++) {
107 for (i = 0; i < 16; i++) {
108 p->thres2[qp][i] = ((i&1) ? SN2 : SN0) * ((i&4) ? SN2 : SN0) * FFMAX(1, qp) * (1<<2) - 1 - bias;
113 static inline void dctA_c(int16_t *dst, uint8_t *src, int stride)
117 for (i = 0; i < 4; i++) {
118 int s0 = src[0 * stride] + src[6 * stride];
119 int s1 = src[1 * stride] + src[5 * stride];
120 int s2 = src[2 * stride] + src[4 * stride];
121 int s3 = src[3 * stride];
129 dst[1] = 2 * s3 + s2;
130 dst[3] = s3 - 2 * s2;
136 static void dctB_c(int16_t *dst, int16_t *src)
140 for (i = 0; i < 4; i++) {
141 int s0 = src[0 * 4] + src[6 * 4];
142 int s1 = src[1 * 4] + src[5 * 4];
143 int s2 = src[2 * 4] + src[4 * 4];
152 dst[1 * 4] = 2 * s3 + s2;
153 dst[3 * 4] = s3 - 2 * s2;
159 static int hardthresh_c(PP7Context *p, int16_t *src, int qp)
164 a = src[0] * factor[0];
165 for (i = 1; i < 16; i++) {
166 unsigned int threshold1 = p->thres2[qp][i];
167 unsigned int threshold2 = threshold1 << 1;
169 if (((unsigned)(level + threshold1)) > threshold2)
170 a += level * factor[i];
172 return (a + (1 << 11)) >> 12;
175 static int mediumthresh_c(PP7Context *p, int16_t *src, int qp)
180 a = src[0] * factor[0];
181 for (i = 1; i < 16; i++) {
182 unsigned int threshold1 = p->thres2[qp][i];
183 unsigned int threshold2 = threshold1 << 1;
185 if (((unsigned)(level + threshold1)) > threshold2) {
186 if (((unsigned)(level + 2 * threshold1)) > 2 * threshold2)
187 a += level * factor[i];
190 a += 2 * (level - (int)threshold1) * factor[i];
192 a += 2 * (level + (int)threshold1) * factor[i];
196 return (a + (1 << 11)) >> 12;
199 static int softthresh_c(PP7Context *p, int16_t *src, int qp)
204 a = src[0] * factor[0];
205 for (i = 1; i < 16; i++) {
206 unsigned int threshold1 = p->thres2[qp][i];
207 unsigned int threshold2 = threshold1 << 1;
209 if (((unsigned)(level + threshold1)) > threshold2) {
211 a += (level - (int)threshold1) * factor[i];
213 a += (level + (int)threshold1) * factor[i];
216 return (a + (1 << 11)) >> 12;
219 static void filter(PP7Context *p, uint8_t *dst, uint8_t *src,
220 int dst_stride, int src_stride,
221 int width, int height,
222 uint8_t *qp_store, int qp_stride, int is_luma)
225 const int stride = is_luma ? p->temp_stride : ((width + 16 + 15) & (~15));
226 uint8_t *p_src = p->src + 8 * stride;
227 int16_t *block = (int16_t *)p->src;
228 int16_t *temp = (int16_t *)(p->src + 32);
230 if (!src || !dst) return;
231 for (y = 0; y < height; y++) {
232 int index = 8 + 8 * stride + y * stride;
233 memcpy(p_src + index, src + y * src_stride, width);
234 for (x = 0; x < 8; x++) {
235 p_src[index - x - 1]= p_src[index + x ];
236 p_src[index + width + x ]= p_src[index + width - x - 1];
239 for (y = 0; y < 8; y++) {
240 memcpy(p_src + ( 7 - y ) * stride, p_src + ( y + 8 ) * stride, stride);
241 memcpy(p_src + (height + 8 + y) * stride, p_src + (height - y + 7) * stride, stride);
243 //FIXME (try edge emu)
245 for (y = 0; y < height; y++) {
246 for (x = -8; x < 0; x += 4) {
247 const int index = x + y * stride + (8 - 3) * (1 + stride) + 8; //FIXME silly offset
248 uint8_t *src = p_src + index;
249 int16_t *tp = temp + 4 * x;
251 dctA_c(tp + 4 * 8, src, stride);
253 for (x = 0; x < width; ) {
254 const int qps = 3 + is_luma;
256 int end = FFMIN(x + 8, width);
261 qp = qp_store[ (FFMIN(x, width - 1) >> qps) + (FFMIN(y, height - 1) >> qps) * qp_stride];
262 qp = norm_qscale(qp, p->qscale_type);
264 for (; x < end; x++) {
265 const int index = x + y * stride + (8 - 3) * (1 + stride) + 8; //FIXME silly offset
266 uint8_t *src = p_src + index;
267 int16_t *tp = temp + 4 * x;
271 dctA_c(tp + 4 * 8, src, stride);
275 v = p->requantize(p, block, qp);
276 v = (v + dither[y & 7][x & 7]) >> 6;
277 if ((unsigned)v > 255)
279 dst[x + y * dst_stride] = v;
285 static int query_formats(AVFilterContext *ctx)
287 static const enum PixelFormat pix_fmts[] = {
288 AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
289 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
290 AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
291 AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
292 AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ440P,
293 AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
295 ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
299 static int config_input(AVFilterLink *inlink)
301 AVFilterContext *ctx = inlink->dst;
302 PP7Context *pp7 = ctx->priv;
303 const int h = FFALIGN(inlink->h + 16, 16);
304 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
306 pp7->hsub = desc->log2_chroma_w;
307 pp7->vsub = desc->log2_chroma_h;
309 pp7->temp_stride = FFALIGN(inlink->w + 16, 16);
310 pp7->src = av_malloc(pp7->temp_stride * (h + 8) * sizeof(uint8_t));
313 return AVERROR(ENOMEM);
318 case 0: pp7->requantize = hardthresh_c; break;
319 case 1: pp7->requantize = softthresh_c; break;
321 case 2: pp7->requantize = mediumthresh_c; break;
327 ff_pp7_init_x86(pp7);
332 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
334 AVFilterContext *ctx = inlink->dst;
335 PP7Context *pp7 = ctx->priv;
336 AVFilterLink *outlink = ctx->outputs[0];
340 uint8_t *qp_table = NULL;
343 qp_table = av_frame_get_qp_table(in, &qp_stride, &pp7->qscale_type);
345 if (!ctx->is_disabled) {
346 const int cw = FF_CEIL_RSHIFT(inlink->w, pp7->hsub);
347 const int ch = FF_CEIL_RSHIFT(inlink->h, pp7->vsub);
349 /* get a new frame if in-place is not possible or if the dimensions
350 * are not multiple of 8 */
351 if (!av_frame_is_writable(in) || (inlink->w & 7) || (inlink->h & 7)) {
352 const int aligned_w = FFALIGN(inlink->w, 8);
353 const int aligned_h = FFALIGN(inlink->h, 8);
355 out = ff_get_video_buffer(outlink, aligned_w, aligned_h);
358 return AVERROR(ENOMEM);
360 av_frame_copy_props(out, in);
363 if (qp_table || pp7->qp) {
365 filter(pp7, out->data[0], in->data[0], out->linesize[0], in->linesize[0],
366 inlink->w, inlink->h, qp_table, qp_stride, 1);
367 filter(pp7, out->data[1], in->data[1], out->linesize[1], in->linesize[1],
368 cw, ch, qp_table, qp_stride, 0);
369 filter(pp7, out->data[2], in->data[2], out->linesize[2], in->linesize[2],
370 cw, ch, qp_table, qp_stride, 0);
377 av_image_copy_plane(out->data[3], out->linesize[3],
378 in ->data[3], in ->linesize[3],
379 inlink->w, inlink->h);
382 return ff_filter_frame(outlink, out);
385 static av_cold void uninit(AVFilterContext *ctx)
387 PP7Context *pp7 = ctx->priv;
391 static const AVFilterPad pp7_inputs[] = {
394 .type = AVMEDIA_TYPE_VIDEO,
395 .config_props = config_input,
396 .filter_frame = filter_frame,
401 static const AVFilterPad pp7_outputs[] = {
404 .type = AVMEDIA_TYPE_VIDEO,
409 AVFilter ff_vf_pp7 = {
411 .description = NULL_IF_CONFIG_SMALL("Apply Postprocessing 7 filter."),
412 .priv_size = sizeof(PP7Context),
414 .query_formats = query_formats,
415 .inputs = pp7_inputs,
416 .outputs = pp7_outputs,
417 .priv_class = &pp7_class,
418 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,