]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/libmp3lame.c
swscale: Provide the right alignment for external mmx asm
[ffmpeg] / libavcodec / libmp3lame.c
index 365b6395f8f4f5ef3250e234a0926ffb201647c5..7a76d1fe37957ed3d6b65ecde8b75839c31e3ed2 100644 (file)
 
 #include <lame/lame.h>
 
+#include "libavutil/audioconvert.h"
+#include "libavutil/common.h"
 #include "libavutil/intreadwrite.h"
 #include "libavutil/log.h"
 #include "libavutil/opt.h"
 #include "avcodec.h"
+#include "audio_frame_queue.h"
 #include "internal.h"
 #include "mpegaudio.h"
 #include "mpegaudiodecheader.h"
 
 typedef struct LAMEContext {
     AVClass *class;
+    AVCodecContext *avctx;
     lame_global_flags *gfp;
     uint8_t buffer[BUFFER_SIZE];
     int buffer_index;
     int reservoir;
+    void *planar_samples[2];
+    AudioFrameQueue afq;
 } LAMEContext;
 
 
@@ -49,7 +55,13 @@ static av_cold int mp3lame_encode_close(AVCodecContext *avctx)
 {
     LAMEContext *s = avctx->priv_data;
 
+#if FF_API_OLD_ENCODE_AUDIO
     av_freep(&avctx->coded_frame);
+#endif
+    av_freep(&s->planar_samples[0]);
+    av_freep(&s->planar_samples[1]);
+
+    ff_af_queue_close(&s->afq);
 
     lame_close(s->gfp);
     return 0;
@@ -60,15 +72,12 @@ static av_cold int mp3lame_encode_init(AVCodecContext *avctx)
     LAMEContext *s = avctx->priv_data;
     int ret;
 
+    s->avctx = avctx;
+
     /* initialize LAME and get defaults */
     if ((s->gfp = lame_init()) == NULL)
         return AVERROR(ENOMEM);
 
-    /* channels */
-    if (avctx->channels > 2) {
-        ret =  AVERROR(EINVAL);
-        goto error;
-    }
     lame_set_num_channels(s->gfp, avctx->channels);
     lame_set_mode(s->gfp, avctx->channels > 1 ? JOINT_STEREO : MONO);
 
@@ -103,12 +112,33 @@ static av_cold int mp3lame_encode_init(AVCodecContext *avctx)
         goto error;
     }
 
+    /* get encoder delay */
+    avctx->delay = lame_get_encoder_delay(s->gfp) + 528 + 1;
+    ff_af_queue_init(avctx, &s->afq);
+
     avctx->frame_size  = lame_get_framesize(s->gfp);
+
+#if FF_API_OLD_ENCODE_AUDIO
     avctx->coded_frame = avcodec_alloc_frame();
     if (!avctx->coded_frame) {
         ret = AVERROR(ENOMEM);
         goto error;
     }
+#endif
+
+    /* sample format */
+    if (avctx->sample_fmt == AV_SAMPLE_FMT_S32 ||
+        avctx->sample_fmt == AV_SAMPLE_FMT_FLT) {
+        int ch;
+        for (ch = 0; ch < avctx->channels; ch++) {
+            s->planar_samples[ch] = av_malloc(avctx->frame_size *
+                                              av_get_bytes_per_sample(avctx->sample_fmt));
+            if (!s->planar_samples[ch]) {
+                ret = AVERROR(ENOMEM);
+                goto error;
+            }
+        }
+    }
 
     return 0;
 error:
@@ -116,25 +146,76 @@ error:
     return ret;
 }
 
-static int mp3lame_encode_frame(AVCodecContext *avctx, unsigned char *frame,
-                                int buf_size, void *data)
+#define DEINTERLEAVE(type, scale) do {                  \
+    int ch, i;                                          \
+    for (ch = 0; ch < s->avctx->channels; ch++) {       \
+        const type *input = samples;                    \
+        type      *output = s->planar_samples[ch];      \
+        input += ch;                                    \
+        for (i = 0; i < nb_samples; i++) {              \
+            output[i] = *input * scale;                 \
+            input += s->avctx->channels;                \
+        }                                               \
+    }                                                   \
+} while (0)
+
+static int encode_frame_int16(LAMEContext *s, void *samples, int nb_samples)
+{
+    if (s->avctx->channels > 1) {
+        return lame_encode_buffer_interleaved(s->gfp, samples,
+                                              nb_samples,
+                                              s->buffer + s->buffer_index,
+                                              BUFFER_SIZE - s->buffer_index);
+    } else {
+        return lame_encode_buffer(s->gfp, samples, NULL, nb_samples,
+                                  s->buffer + s->buffer_index,
+                                  BUFFER_SIZE - s->buffer_index);
+    }
+}
+
+static int encode_frame_int32(LAMEContext *s, void *samples, int nb_samples)
+{
+    DEINTERLEAVE(int32_t, 1);
+
+    return lame_encode_buffer_int(s->gfp,
+                                  s->planar_samples[0], s->planar_samples[1],
+                                  nb_samples,
+                                  s->buffer + s->buffer_index,
+                                  BUFFER_SIZE - s->buffer_index);
+}
+
+static int encode_frame_float(LAMEContext *s, void *samples, int nb_samples)
+{
+    DEINTERLEAVE(float, 32768.0f);
+
+    return lame_encode_buffer_float(s->gfp,
+                                    s->planar_samples[0], s->planar_samples[1],
+                                    nb_samples,
+                                    s->buffer + s->buffer_index,
+                                    BUFFER_SIZE - s->buffer_index);
+}
+
+static int mp3lame_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
+                                const AVFrame *frame, int *got_packet_ptr)
 {
     LAMEContext *s = avctx->priv_data;
     MPADecodeHeader hdr;
-    int len;
+    int len, ret;
     int lame_result;
 
-    if (data) {
-        if (avctx->channels > 1) {
-            lame_result = lame_encode_buffer_interleaved(s->gfp, data,
-                                                         avctx->frame_size,
-                                                         s->buffer + s->buffer_index,
-                                                         BUFFER_SIZE - s->buffer_index);
-        } else {
-            lame_result = lame_encode_buffer(s->gfp, data, data,
-                                             avctx->frame_size, s->buffer +
-                                             s->buffer_index, BUFFER_SIZE -
-                                             s->buffer_index);
+    if (frame) {
+        switch (avctx->sample_fmt) {
+        case AV_SAMPLE_FMT_S16:
+            lame_result = encode_frame_int16(s, frame->data[0], frame->nb_samples);
+            break;
+        case AV_SAMPLE_FMT_S32:
+            lame_result = encode_frame_int32(s, frame->data[0], frame->nb_samples);
+            break;
+        case AV_SAMPLE_FMT_FLT:
+            lame_result = encode_frame_float(s, frame->data[0], frame->nb_samples);
+            break;
+        default:
+            return AVERROR_BUG;
         }
     } else {
         lame_result = lame_encode_flush(s->gfp, s->buffer + s->buffer_index,
@@ -150,6 +231,12 @@ static int mp3lame_encode_frame(AVCodecContext *avctx, unsigned char *frame,
     }
     s->buffer_index += lame_result;
 
+    /* add current frame to the queue */
+    if (frame) {
+        if ((ret = ff_af_queue_add(&s->afq, frame) < 0))
+            return ret;
+    }
+
     /* Move 1 frame from the LAME buffer to the output packet, if available.
        We have to parse the first frame header in the output buffer to
        determine the frame size. */
@@ -163,18 +250,28 @@ static int mp3lame_encode_frame(AVCodecContext *avctx, unsigned char *frame,
     av_dlog(avctx, "in:%d packet-len:%d index:%d\n", avctx->frame_size, len,
             s->buffer_index);
     if (len <= s->buffer_index) {
-        memcpy(frame, s->buffer, len);
+        if ((ret = ff_alloc_packet(avpkt, len))) {
+            av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
+            return ret;
+        }
+        memcpy(avpkt->data, s->buffer, len);
         s->buffer_index -= len;
         memmove(s->buffer, s->buffer + len, s->buffer_index);
-        return len;
-    } else
-        return 0;
+
+        /* Get the next frame pts/duration */
+        ff_af_queue_remove(&s->afq, avctx->frame_size, &avpkt->pts,
+                           &avpkt->duration);
+
+        avpkt->size = len;
+        *got_packet_ptr = 1;
+    }
+    return 0;
 }
 
 #define OFFSET(x) offsetof(LAMEContext, x)
 #define AE AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
 static const AVOption options[] = {
-    { "reservoir", "Use bit reservoir.", OFFSET(reservoir), AV_OPT_TYPE_INT, { 1 }, 0, 1, AE },
+    { "reservoir", "Use bit reservoir.", OFFSET(reservoir), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, AE },
     { NULL },
 };
 
@@ -197,15 +294,20 @@ static const int libmp3lame_sample_rates[] = {
 AVCodec ff_libmp3lame_encoder = {
     .name                  = "libmp3lame",
     .type                  = AVMEDIA_TYPE_AUDIO,
-    .id                    = CODEC_ID_MP3,
+    .id                    = AV_CODEC_ID_MP3,
     .priv_data_size        = sizeof(LAMEContext),
     .init                  = mp3lame_encode_init,
-    .encode                = mp3lame_encode_frame,
+    .encode2               = mp3lame_encode_frame,
     .close                 = mp3lame_encode_close,
-    .capabilities          = CODEC_CAP_DELAY,
-    .sample_fmts           = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16,
+    .capabilities          = CODEC_CAP_DELAY | CODEC_CAP_SMALL_LAST_FRAME,
+    .sample_fmts           = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32,
+                                                             AV_SAMPLE_FMT_FLT,
+                                                             AV_SAMPLE_FMT_S16,
                                                              AV_SAMPLE_FMT_NONE },
     .supported_samplerates = libmp3lame_sample_rates,
+    .channel_layouts       = (const uint64_t[]) { AV_CH_LAYOUT_MONO,
+                                                  AV_CH_LAYOUT_STEREO,
+                                                  0 },
     .long_name             = NULL_IF_CONFIG_SMALL("libmp3lame MP3 (MPEG audio layer 3)"),
     .priv_class            = &libmp3lame_class,
     .defaults              = libmp3lame_defaults,