* Watermark Hook
* Copyright (c) 2005 Marcus Engene myfirstname(at)mylastname.se
*
- * flags to watermark:
+ * parameters for watermark:
* -m nbr = nbr is 0..1. 0 is the default mode, see below.
* -t nbr = nbr is six digit hex. Threshold.
- * -f file = File is the filename of watermark image. You must specify this!
+ * -f file = file is the watermark image filename. You must specify this!
*
* MODE 0:
- * The watermarkpicture works like this. (Assuming colorintencities 0..0xff)
+ * The watermark picture works like this (assuming color intensities 0..0xff):
* Per color do this:
- * If mask color is 0x80, no change to original frame.
- * If mask color is < 0x80 the abs difference is subtracted from frame. If
+ * If mask color is 0x80, no change to the original frame.
+ * If mask color is < 0x80 the abs difference is subtracted from the frame. If
* result < 0, result = 0
- * If mask color is > 0x80 the abs difference is added to frame. If result
+ * If mask color is > 0x80 the abs difference is added to the frame. If result
* > 0xff, result = 0xff
*
- * You can override the 0x80 level with the -t flag. Eg if threshold is 000000
- * the color values of watermark is added to destination.
+ * You can override the 0x80 level with the -t flag. E.g. if threshold is
+ * 000000 the color value of watermark is added to the destination.
*
* This way a mask that is visible both in light pictures and in dark can be
- * made (fex by using a picture generated by gimp and the bump map tool).
+ * made (fex by using a picture generated by Gimp and the bump map tool).
*
* An example watermark file is at
* http://engene.se/ffmpeg_watermark.gif
*
* MODE 1:
* Per color do this:
- * If mask color > threshold color, watermark pixel is going to be used.
+ * If mask color > threshold color then the watermark pixel is used.
*
* Example usage:
* ffmpeg -i infile -vhook '/path/watermark.so -f wm.gif' -an out.mov
* ffmpeg -i infile -vhook '/path/watermark.so -f wm.gif -m 1 -t 222222' -an out.mov
*
* Note that the entire vhook argument is encapsulated in ''. This
- * way, arguments to the vhook won't be mixed up with those to ffmpeg.
+ * way, arguments to the vhook won't be mixed up with those for ffmpeg.
*
* This file is part of FFmpeg.
*
#include <unistd.h>
#include <stdarg.h>
-#include "common.h"
-#include "avformat.h"
+#include "libavutil/common.h"
+#include "libavformat/avformat.h"
+#include "libavformat/framehook.h"
+#include "libswscale/swscale.h"
-#include "framehook.h"
-#include "cmdutils.h"
+static int sws_flags = SWS_BICUBIC;
typedef struct {
char filename[2000];
int thrG;
int thrB;
int mode;
+
+ // This vhook first converts frame to RGB ...
+ struct SwsContext *toRGB_convert_ctx;
+ // ... then converts a watermark and applies it to the RGB frame ...
+ struct SwsContext *watermark_convert_ctx;
+ // ... and finally converts back frame from RGB to initial format
+ struct SwsContext *fromRGB_convert_ctx;
} ContextInfo;
int get_watermark_picture(ContextInfo *ci, int cleanup);
ContextInfo *ci;
ci = (ContextInfo *) ctx;
- if (ci) get_watermark_picture(ci, 1);
-
+ if (ci) {
+ get_watermark_picture(ci, 1);
+ sws_freeContext(ci->toRGB_convert_ctx);
+ sws_freeContext(ci->watermark_convert_ctx);
+ sws_freeContext(ci->fromRGB_convert_ctx);
+ }
av_free(ctx);
}
int thrG = ci->thrG;
int thrB = ci->thrB;
- if (pix_fmt != PIX_FMT_RGBA32) {
+ if (pix_fmt != PIX_FMT_RGB32) {
int size;
- size = avpicture_get_size(PIX_FMT_RGBA32, src_width, src_height);
+ size = avpicture_get_size(PIX_FMT_RGB32, src_width, src_height);
buf = av_malloc(size);
- avpicture_fill(&picture1, buf, PIX_FMT_RGBA32, src_width, src_height);
- if (img_convert(&picture1, PIX_FMT_RGBA32,
- picture, pix_fmt, src_width, src_height) < 0) {
- av_free(buf);
+ avpicture_fill(&picture1, buf, PIX_FMT_RGB32, src_width, src_height);
+
+ // if we already got a SWS context, let's realloc if is not re-useable
+ ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx,
+ src_width, src_height, pix_fmt,
+ src_width, src_height, PIX_FMT_RGB32,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->toRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the toRGB conversion context\n");
return;
}
+
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->toRGB_convert_ctx,
+ picture->data, picture->linesize, 0, src_height,
+ picture1.data, picture1.linesize);
+
pict = &picture1;
}
ym_size = ci->y_size;
// I'll do the *4 => <<2 crap later. Most compilers understand that anyway.
- // According to avcodec.h PIX_FMT_RGBA32 is handled in endian specific manner.
+ // According to avcodec.h PIX_FMT_RGB32 is handled in endian specific manner.
for (y=0; y<src_height; y++) {
offs = y * (src_width * 4);
offsm = (((y * ym_size) / src_height) * 4) * xm_size; // offsm first in maskline. byteoffs!
- if (pix_fmt != PIX_FMT_RGBA32) {
- if (img_convert(picture, pix_fmt,
- &picture1, PIX_FMT_RGBA32, src_width, src_height) < 0) {
+ if (pix_fmt != PIX_FMT_RGB32) {
+ ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx,
+ src_width, src_height, PIX_FMT_RGB32,
+ src_width, src_height, pix_fmt,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->fromRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the fromRGB conversion context\n");
+ return;
}
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->fromRGB_convert_ctx,
+ picture1.data, picture1.linesize, 0, src_height,
+ picture->data, picture->linesize);
}
av_free(buf);
uint32_t pixel;
uint32_t pixelm;
- if (pix_fmt != PIX_FMT_RGBA32) {
+ if (pix_fmt != PIX_FMT_RGB32) {
int size;
- size = avpicture_get_size(PIX_FMT_RGBA32, src_width, src_height);
+ size = avpicture_get_size(PIX_FMT_RGB32, src_width, src_height);
buf = av_malloc(size);
- avpicture_fill(&picture1, buf, PIX_FMT_RGBA32, src_width, src_height);
- if (img_convert(&picture1, PIX_FMT_RGBA32,
- picture, pix_fmt, src_width, src_height) < 0) {
- av_free(buf);
+ avpicture_fill(&picture1, buf, PIX_FMT_RGB32, src_width, src_height);
+
+ // if we already got a SWS context, let's realloc if is not re-useable
+ ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx,
+ src_width, src_height, pix_fmt,
+ src_width, src_height, PIX_FMT_RGB32,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->toRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the toRGB conversion context\n");
return;
}
+
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->toRGB_convert_ctx,
+ picture->data, picture->linesize, 0, src_height,
+ picture1.data, picture1.linesize);
+
pict = &picture1;
}
ym_size = ci->y_size;
// I'll do the *4 => <<2 crap later. Most compilers understand that anyway.
- // According to avcodec.h PIX_FMT_RGBA32 is handled in endian specific manner.
+ // According to avcodec.h PIX_FMT_RGB32 is handled in endian specific manner.
for (y=0; y<src_height; y++) {
offs = y * (src_width * 4);
offsm = (((y * ym_size) / src_height) * 4) * xm_size; // offsm first in maskline. byteoffs!
} // foreach X
} // foreach Y
- if (pix_fmt != PIX_FMT_RGBA32) {
- if (img_convert(picture, pix_fmt,
- &picture1, PIX_FMT_RGBA32, src_width, src_height) < 0) {
+ if (pix_fmt != PIX_FMT_RGB32) {
+ ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx,
+ src_width, src_height, PIX_FMT_RGB32,
+ src_width, src_height, pix_fmt,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->fromRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the fromRGB conversion context\n");
+ return;
}
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->fromRGB_convert_ctx,
+ picture1.data, picture1.linesize, 0, src_height,
+ picture->data, picture->linesize);
}
av_free(buf);
{
ContextInfo *ci = (ContextInfo *) ctx;
if (1 == ci->mode) {
- return Process1(ctx, picture, pix_fmt, src_width, src_height, pts);
+ Process1(ctx, picture, pix_fmt, src_width, src_height, pts);
} else {
- return Process0(ctx, picture, pix_fmt, src_width, src_height, pts);
+ Process0(ctx, picture, pix_fmt, src_width, src_height, pts);
}
}
return -1;
}
- // Inform the codec that we can handle truncated bitstreams -- i.e.,
- // bitstreams where frame boundaries can fall in the middle of packets
- if (ci->pCodec->capabilities & CODEC_CAP_TRUNCATED)
- ci->pCodecCtx->flags|=CODEC_FLAG_TRUNCATED;
// Open codec
if(avcodec_open(ci->pCodecCtx, ci->pCodec)<0) {
}
// Determine required buffer size and allocate buffer
- ci->numBytes = avpicture_get_size(PIX_FMT_RGBA32, ci->pCodecCtx->width,
+ ci->numBytes = avpicture_get_size(PIX_FMT_RGB32, ci->pCodecCtx->width,
ci->pCodecCtx->height);
ci->buffer = av_malloc(ci->numBytes);
// Assign appropriate parts of buffer to image planes in pFrameRGB
- avpicture_fill((AVPicture *)ci->pFrameRGB, ci->buffer, PIX_FMT_RGBA32,
+ avpicture_fill((AVPicture *)ci->pFrameRGB, ci->buffer, PIX_FMT_RGB32,
ci->pCodecCtx->width, ci->pCodecCtx->height);
}
// TODO loop, pingpong etc?
// Did we get a video frame?
if(ci->frameFinished)
{
- // Convert the image from its native format to RGBA32
- img_convert((AVPicture *)ci->pFrameRGB, PIX_FMT_RGBA32,
- (AVPicture*)(ci->pFrame), ci->pCodecCtx->pix_fmt, ci->pCodecCtx->width,
- ci->pCodecCtx->height);
+ // Convert the image from its native format to RGB32
+ ci->watermark_convert_ctx =
+ sws_getCachedContext(ci->watermark_convert_ctx,
+ ci->pCodecCtx->width, ci->pCodecCtx->height, ci->pCodecCtx->pix_fmt,
+ ci->pCodecCtx->width, ci->pCodecCtx->height, PIX_FMT_RGB32,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->watermark_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the watermark conversion context\n");
+ return -1;
+ }
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->watermark_convert_ctx,
+ ci->pFrame->data, ci->pFrame->linesize, 0, ci->pCodecCtx->height,
+ ci->pFrameRGB->data, ci->pFrameRGB->linesize);
// Process the video frame (save to disk etc.)
//fprintf(stderr,"banan() New frame!\n");