3 * Copyright (c) 2004-2007 Michael Niedermayer
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/intreadwrite.h"
23 #include "libavutil/tree.h"
24 #include "libavcodec/mpegaudiodata.h"
28 static int find_expected_header(AVCodecContext *c, int size, int key_frame, uint8_t out[64]){
29 int sample_rate= c->sample_rate;
36 if(c->codec_id == CODEC_ID_MPEG4){
43 }else if(c->codec_id == CODEC_ID_MPEG1VIDEO || c->codec_id == CODEC_ID_MPEG2VIDEO){
45 }else if(c->codec_id == CODEC_ID_H264){
47 }else if(c->codec_id == CODEC_ID_MP3 || c->codec_id == CODEC_ID_MP2){
48 int lsf, mpeg25, sample_rate_index, bitrate_index, frame_size;
49 int layer= c->codec_id == CODEC_ID_MP3 ? 3 : 2;
50 unsigned int header= 0xFFF00000;
52 lsf = sample_rate < (24000+32000)/2;
53 mpeg25 = sample_rate < (12000+16000)/2;
54 sample_rate <<= lsf + mpeg25;
55 if (sample_rate < (32000 + 44100)/2) sample_rate_index=2;
56 else if(sample_rate < (44100 + 48000)/2) sample_rate_index=0;
57 else sample_rate_index=1;
59 sample_rate= ff_mpa_freq_tab[sample_rate_index] >> (lsf + mpeg25);
61 for(bitrate_index=2; bitrate_index<30; bitrate_index++){
62 frame_size = ff_mpa_bitrate_tab[lsf][layer-1][bitrate_index>>1];
63 frame_size = (frame_size * 144000) / (sample_rate << lsf) + (bitrate_index&1);
65 if(frame_size == size)
70 header |= (4-layer)<<17;
71 header |= 1<<16; //no crc
74 return 2; //we guess there is no crc, if there is one the user clearly does not care about overhead
75 if(bitrate_index == 30)
76 return -1; //something is wrong ...
78 header |= (bitrate_index>>1)<<12;
79 header |= sample_rate_index<<10;
80 header |= (bitrate_index&1)<<9;
82 return 2; //FIXME actually put the needed ones in build_elision_headers()
83 return 3; //we guess that the private bit is not set
84 //FIXME the above assumptions should be checked, if these turn out false too often something should be done
89 static int find_header_idx(AVFormatContext *s, AVCodecContext *c, int size, int frame_type){
90 NUTContext *nut = s->priv_data;
93 int len= find_expected_header(c, size, frame_type, out);
95 //av_log(NULL, AV_LOG_ERROR, "expected_h len=%d size=%d codec_id=%d\n", len, size, c->codec_id);
97 for(i=1; i<nut->header_count; i++){
98 if( len == nut->header_len[i]
99 && !memcmp(out, nut->header[i], len)){
100 // av_log(NULL, AV_LOG_ERROR, "found %d\n", i);
104 // av_log(NULL, AV_LOG_ERROR, "nothing found\n");
108 static void build_elision_headers(AVFormatContext *s){
109 NUTContext *nut = s->priv_data;
112 //FIXME write a 2pass mode to find the maximal headers
113 static const uint8_t headers[][5]={
114 {3, 0x00, 0x00, 0x01},
115 {4, 0x00, 0x00, 0x01, 0xB6},
116 {2, 0xFF, 0xFA}, //mp3+crc
117 {2, 0xFF, 0xFB}, //mp3
118 {2, 0xFF, 0xFC}, //mp2+crc
119 {2, 0xFF, 0xFD}, //mp2
122 nut->header_count= 7;
123 for(i=1; i<nut->header_count; i++){
124 nut->header_len[i]= headers[i-1][0];
125 nut->header [i]= &headers[i-1][1];
129 static void build_frame_code(AVFormatContext *s){
130 NUTContext *nut = s->priv_data;
131 int key_frame, index, pred, stream_id;
134 int keyframe_0_esc= s->nb_streams > 2;
138 ft= &nut->frame_code[start];
139 ft->flags= FLAG_CODED;
145 /* keyframe = 0 escape */
146 FrameCode *ft= &nut->frame_code[start];
147 ft->flags= FLAG_STREAM_ID | FLAG_SIZE_MSB | FLAG_CODED_PTS;
152 for(stream_id= 0; stream_id<s->nb_streams; stream_id++){
153 int start2= start + (end-start)*stream_id / s->nb_streams;
154 int end2 = start + (end-start)*(stream_id+1) / s->nb_streams;
155 AVCodecContext *codec = s->streams[stream_id]->codec;
156 int is_audio= codec->codec_type == AVMEDIA_TYPE_AUDIO;
157 int intra_only= /*codec->intra_only || */is_audio;
160 for(key_frame=0; key_frame<2; key_frame++){
161 if(intra_only && keyframe_0_esc && key_frame==0)
165 FrameCode *ft= &nut->frame_code[start2];
166 ft->flags= FLAG_KEY*key_frame;
167 ft->flags|= FLAG_SIZE_MSB | FLAG_CODED_PTS;
168 ft->stream_id= stream_id;
171 ft->header_idx= find_header_idx(s, codec, -1, key_frame);
176 key_frame= intra_only;
179 int frame_bytes= codec->frame_size*(int64_t)codec->bit_rate / (8*codec->sample_rate);
181 for(pts=0; pts<2; pts++){
182 for(pred=0; pred<2; pred++){
183 FrameCode *ft= &nut->frame_code[start2];
184 ft->flags= FLAG_KEY*key_frame;
185 ft->stream_id= stream_id;
186 ft->size_mul=frame_bytes + 2;
187 ft->size_lsb=frame_bytes + pred;
189 ft->header_idx= find_header_idx(s, codec, frame_bytes + pred, key_frame);
194 FrameCode *ft= &nut->frame_code[start2];
195 ft->flags= FLAG_KEY | FLAG_SIZE_MSB;
196 ft->stream_id= stream_id;
203 if(codec->has_b_frames){
210 }else if(codec->codec_id == CODEC_ID_VORBIS){
220 for(pred=0; pred<pred_count; pred++){
221 int start3= start2 + (end2-start2)*pred / pred_count;
222 int end3 = start2 + (end2-start2)*(pred+1) / pred_count;
224 for(index=start3; index<end3; index++){
225 FrameCode *ft= &nut->frame_code[index];
226 ft->flags= FLAG_KEY*key_frame;
227 ft->flags|= FLAG_SIZE_MSB;
228 ft->stream_id= stream_id;
229 //FIXME use single byte size and pred from last
230 ft->size_mul= end3-start3;
231 ft->size_lsb= index - start3;
232 ft->pts_delta= pred_table[pred];
234 ft->header_idx= find_header_idx(s, codec, -1, key_frame);
238 memmove(&nut->frame_code['N'+1], &nut->frame_code['N'], sizeof(FrameCode)*(255-'N'));
239 nut->frame_code[ 0].flags=
240 nut->frame_code[255].flags=
241 nut->frame_code['N'].flags= FLAG_INVALID;
245 * Gets the length in bytes which is needed to store val as v.
247 static int get_length(uint64_t val){
256 static void put_v(ByteIOContext *bc, uint64_t val){
257 int i= get_length(val);
260 put_byte(bc, 128 | (val>>(7*i)));
262 put_byte(bc, val&127);
265 static void put_tt(NUTContext *nut, StreamContext *nus, ByteIOContext *bc, uint64_t val){
266 val *= nut->time_base_count;
267 val += nus->time_base - nut->time_base;
272 * Stores a string as vb.
274 static void put_str(ByteIOContext *bc, const char *string){
275 int len= strlen(string);
278 put_buffer(bc, string, len);
281 static void put_s(ByteIOContext *bc, int64_t val){
282 put_v(bc, 2*FFABS(val) - (val>0));
286 static inline void put_v_trace(ByteIOContext *bc, uint64_t v, char *file, char *func, int line){
287 av_log(NULL, AV_LOG_DEBUG, "put_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
292 static inline void put_s_trace(ByteIOContext *bc, int64_t v, char *file, char *func, int line){
293 av_log(NULL, AV_LOG_DEBUG, "put_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
297 #define put_v(bc, v) put_v_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
298 #define put_s(bc, v) put_s_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
301 //FIXME remove calculate_checksum
302 static void put_packet(NUTContext *nut, ByteIOContext *bc, ByteIOContext *dyn_bc, int calculate_checksum, uint64_t startcode){
303 uint8_t *dyn_buf=NULL;
304 int dyn_size= url_close_dyn_buf(dyn_bc, &dyn_buf);
305 int forw_ptr= dyn_size + 4*calculate_checksum;
308 init_checksum(bc, ff_crc04C11DB7_update, 0);
309 put_be64(bc, startcode);
312 put_le32(bc, get_checksum(bc));
314 if(calculate_checksum)
315 init_checksum(bc, ff_crc04C11DB7_update, 0);
316 put_buffer(bc, dyn_buf, dyn_size);
317 if(calculate_checksum)
318 put_le32(bc, get_checksum(bc));
323 static void write_mainheader(NUTContext *nut, ByteIOContext *bc){
324 int i, j, tmp_pts, tmp_flags, tmp_stream, tmp_mul, tmp_size, tmp_fields, tmp_head_idx;
327 put_v(bc, 3); /* version */
328 put_v(bc, nut->avf->nb_streams);
329 put_v(bc, nut->max_distance);
330 put_v(bc, nut->time_base_count);
332 for(i=0; i<nut->time_base_count; i++){
333 put_v(bc, nut->time_base[i].num);
334 put_v(bc, nut->time_base[i].den);
340 tmp_match= 1-(1LL<<62);
346 if(tmp_pts != nut->frame_code[i].pts_delta) tmp_fields=1;
347 if(tmp_mul != nut->frame_code[i].size_mul ) tmp_fields=2;
348 if(tmp_stream != nut->frame_code[i].stream_id) tmp_fields=3;
349 if(tmp_size != nut->frame_code[i].size_lsb ) tmp_fields=4;
350 // if(tmp_res != nut->frame_code[i].res ) tmp_fields=5;
351 if(tmp_head_idx!=nut->frame_code[i].header_idx)tmp_fields=8;
353 tmp_pts = nut->frame_code[i].pts_delta;
354 tmp_flags = nut->frame_code[i].flags;
355 tmp_stream= nut->frame_code[i].stream_id;
356 tmp_mul = nut->frame_code[i].size_mul;
357 tmp_size = nut->frame_code[i].size_lsb;
358 // tmp_res = nut->frame_code[i].res;
359 tmp_head_idx= nut->frame_code[i].header_idx;
361 for(j=0; i<256; j++,i++){
366 if(nut->frame_code[i].pts_delta != tmp_pts ) break;
367 if(nut->frame_code[i].flags != tmp_flags ) break;
368 if(nut->frame_code[i].stream_id != tmp_stream) break;
369 if(nut->frame_code[i].size_mul != tmp_mul ) break;
370 if(nut->frame_code[i].size_lsb != tmp_size+j) break;
371 // if(nut->frame_code[i].res != tmp_res ) break;
372 if(nut->frame_code[i].header_idx!= tmp_head_idx) break;
374 if(j != tmp_mul - tmp_size) tmp_fields=6;
376 put_v(bc, tmp_flags);
377 put_v(bc, tmp_fields);
378 if(tmp_fields>0) put_s(bc, tmp_pts);
379 if(tmp_fields>1) put_v(bc, tmp_mul);
380 if(tmp_fields>2) put_v(bc, tmp_stream);
381 if(tmp_fields>3) put_v(bc, tmp_size);
382 if(tmp_fields>4) put_v(bc, 0 /*tmp_res*/);
383 if(tmp_fields>5) put_v(bc, j);
384 if(tmp_fields>6) put_v(bc, tmp_match);
385 if(tmp_fields>7) put_v(bc, tmp_head_idx);
387 put_v(bc, nut->header_count-1);
388 for(i=1; i<nut->header_count; i++){
389 put_v(bc, nut->header_len[i]);
390 put_buffer(bc, nut->header[i], nut->header_len[i]);
394 static int write_streamheader(AVFormatContext *avctx, ByteIOContext *bc, AVStream *st, int i){
395 NUTContext *nut = avctx->priv_data;
396 AVCodecContext *codec = st->codec;
398 switch(codec->codec_type){
399 case AVMEDIA_TYPE_VIDEO: put_v(bc, 0); break;
400 case AVMEDIA_TYPE_AUDIO: put_v(bc, 1); break;
401 case AVMEDIA_TYPE_SUBTITLE: put_v(bc, 2); break;
402 default : put_v(bc, 3); break;
405 if (codec->codec_tag){
406 put_le32(bc, codec->codec_tag);
408 av_log(avctx, AV_LOG_ERROR, "No codec tag defined for stream %d\n", i);
409 return AVERROR(EINVAL);
412 put_v(bc, nut->stream[i].time_base - nut->time_base);
413 put_v(bc, nut->stream[i].msb_pts_shift);
414 put_v(bc, nut->stream[i].max_pts_distance);
415 put_v(bc, codec->has_b_frames);
416 put_byte(bc, 0); /* flags: 0x1 - fixed_fps, 0x2 - index_present */
418 put_v(bc, codec->extradata_size);
419 put_buffer(bc, codec->extradata, codec->extradata_size);
421 switch(codec->codec_type){
422 case AVMEDIA_TYPE_AUDIO:
423 put_v(bc, codec->sample_rate);
425 put_v(bc, codec->channels);
427 case AVMEDIA_TYPE_VIDEO:
428 put_v(bc, codec->width);
429 put_v(bc, codec->height);
431 if(st->sample_aspect_ratio.num<=0 || st->sample_aspect_ratio.den<=0){
435 put_v(bc, st->sample_aspect_ratio.num);
436 put_v(bc, st->sample_aspect_ratio.den);
438 put_v(bc, 0); /* csp type -- unknown */
446 static int add_info(ByteIOContext *bc, const char *type, const char *value){
453 static int write_globalinfo(NUTContext *nut, ByteIOContext *bc){
454 AVFormatContext *s= nut->avf;
455 AVMetadataTag *t = NULL;
456 ByteIOContext *dyn_bc;
457 uint8_t *dyn_buf=NULL;
458 int count=0, dyn_size;
459 int ret = url_open_dyn_buf(&dyn_bc);
463 while ((t = av_metadata_get(s->metadata, "", t, AV_METADATA_IGNORE_SUFFIX)))
464 count += add_info(dyn_bc, t->key, t->value);
466 put_v(bc, 0); //stream_if_plus1
467 put_v(bc, 0); //chapter_id
468 put_v(bc, 0); //timestamp_start
469 put_v(bc, 0); //length
473 dyn_size= url_close_dyn_buf(dyn_bc, &dyn_buf);
474 put_buffer(bc, dyn_buf, dyn_size);
479 static int write_streaminfo(NUTContext *nut, ByteIOContext *bc, int stream_id){
480 AVFormatContext *s= nut->avf;
481 AVStream* st = s->streams[stream_id];
482 ByteIOContext *dyn_bc;
483 uint8_t *dyn_buf=NULL;
484 int count=0, dyn_size, i;
485 int ret = url_open_dyn_buf(&dyn_bc);
489 for (i=0; ff_nut_dispositions[i].flag; ++i) {
490 if (st->disposition & ff_nut_dispositions[i].flag)
491 count += add_info(dyn_bc, "Disposition", ff_nut_dispositions[i].str);
493 dyn_size = url_close_dyn_buf(dyn_bc, &dyn_buf);
496 put_v(bc, stream_id + 1); //stream_id_plus1
497 put_v(bc, 0); //chapter_id
498 put_v(bc, 0); //timestamp_start
499 put_v(bc, 0); //length
503 put_buffer(bc, dyn_buf, dyn_size);
510 static int write_headers(AVFormatContext *avctx, ByteIOContext *bc){
511 NUTContext *nut = avctx->priv_data;
512 ByteIOContext *dyn_bc;
515 ret = url_open_dyn_buf(&dyn_bc);
518 write_mainheader(nut, dyn_bc);
519 put_packet(nut, bc, dyn_bc, 1, MAIN_STARTCODE);
521 for (i=0; i < nut->avf->nb_streams; i++){
522 ret = url_open_dyn_buf(&dyn_bc);
525 if ((ret = write_streamheader(avctx, dyn_bc, nut->avf->streams[i], i)) < 0)
527 put_packet(nut, bc, dyn_bc, 1, STREAM_STARTCODE);
530 ret = url_open_dyn_buf(&dyn_bc);
533 write_globalinfo(nut, dyn_bc);
534 put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
536 for (i = 0; i < nut->avf->nb_streams; i++) {
537 ret = url_open_dyn_buf(&dyn_bc);
540 ret = write_streaminfo(nut, dyn_bc, i);
544 put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
547 url_close_dyn_buf(dyn_bc, &buf);
552 nut->last_syncpoint_pos= INT_MIN;
557 static int write_header(AVFormatContext *s){
558 NUTContext *nut = s->priv_data;
559 ByteIOContext *bc = s->pb;
564 nut->stream = av_mallocz(sizeof(StreamContext)*s->nb_streams);
565 nut->time_base= av_mallocz(sizeof(AVRational )*s->nb_streams);
567 for(i=0; i<s->nb_streams; i++){
568 AVStream *st= s->streams[i];
570 AVRational time_base;
571 ff_parse_specific_params(st->codec, &time_base.den, &ssize, &time_base.num);
573 av_set_pts_info(st, 64, time_base.num, time_base.den);
575 for(j=0; j<nut->time_base_count; j++){
576 if(!memcmp(&time_base, &nut->time_base[j], sizeof(AVRational))){
580 nut->time_base[j]= time_base;
581 nut->stream[i].time_base= &nut->time_base[j];
582 if(j==nut->time_base_count)
583 nut->time_base_count++;
585 if(av_q2d(time_base) >= 0.001)
586 nut->stream[i].msb_pts_shift = 7;
588 nut->stream[i].msb_pts_shift = 14;
589 nut->stream[i].max_pts_distance= FFMAX(1/av_q2d(time_base), 1);
592 nut->max_distance = MAX_DISTANCE;
593 build_elision_headers(s);
595 assert(nut->frame_code['N'].flags == FLAG_INVALID);
597 put_buffer(bc, ID_STRING, strlen(ID_STRING));
600 if ((ret = write_headers(s, bc)) < 0)
603 put_flush_packet(bc);
610 static int get_needed_flags(NUTContext *nut, StreamContext *nus, FrameCode *fc, AVPacket *pkt){
613 if(pkt->flags & AV_PKT_FLAG_KEY ) flags |= FLAG_KEY;
614 if(pkt->stream_index != fc->stream_id ) flags |= FLAG_STREAM_ID;
615 if(pkt->size / fc->size_mul ) flags |= FLAG_SIZE_MSB;
616 if(pkt->pts - nus->last_pts != fc->pts_delta) flags |= FLAG_CODED_PTS;
617 if(pkt->size > 2*nut->max_distance ) flags |= FLAG_CHECKSUM;
618 if(FFABS(pkt->pts - nus->last_pts)
619 > nus->max_pts_distance) flags |= FLAG_CHECKSUM;
620 if( pkt->size < nut->header_len[fc->header_idx]
621 || (pkt->size > 4096 && fc->header_idx)
622 || memcmp(pkt->data, nut->header[fc->header_idx], nut->header_len[fc->header_idx]))
623 flags |= FLAG_HEADER_IDX;
625 return flags | (fc->flags & FLAG_CODED);
628 static int find_best_header_idx(NUTContext *nut, AVPacket *pkt){
636 for(i=1; i<nut->header_count; i++){
637 if( pkt->size >= nut->header_len[i]
638 && nut->header_len[i] > best_len
639 && !memcmp(pkt->data, nut->header[i], nut->header_len[i])){
641 best_len= nut->header_len[i];
647 static int write_packet(AVFormatContext *s, AVPacket *pkt){
648 NUTContext *nut = s->priv_data;
649 StreamContext *nus= &nut->stream[pkt->stream_index];
650 ByteIOContext *bc = s->pb, *dyn_bc;
653 int best_length, frame_code, flags, needed_flags, i, header_idx, best_header_idx;
654 int key_frame = !!(pkt->flags & AV_PKT_FLAG_KEY);
661 if(1LL<<(20+3*nut->header_count) <= url_ftell(bc))
662 write_headers(s, bc);
664 if(key_frame && !(nus->last_flags & FLAG_KEY))
667 if(pkt->size + 30/*FIXME check*/ + url_ftell(bc) >= nut->last_syncpoint_pos + nut->max_distance)
670 //FIXME: Ensure store_sp is 1 in the first place.
673 Syncpoint *sp, dummy= {.pos= INT64_MAX};
675 ff_nut_reset_ts(nut, *nus->time_base, pkt->dts);
676 for(i=0; i<s->nb_streams; i++){
677 AVStream *st= s->streams[i];
678 int64_t dts_tb = av_rescale_rnd(pkt->dts,
679 nus->time_base->num * (int64_t)nut->stream[i].time_base->den,
680 nus->time_base->den * (int64_t)nut->stream[i].time_base->num,
682 int index= av_index_search_timestamp(st, dts_tb, AVSEEK_FLAG_BACKWARD);
683 if(index>=0) dummy.pos= FFMIN(dummy.pos, st->index_entries[index].pos);
685 if(dummy.pos == INT64_MAX)
687 sp= av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pos_cmp,
690 nut->last_syncpoint_pos= url_ftell(bc);
691 ret = url_open_dyn_buf(&dyn_bc);
694 put_tt(nut, nus, dyn_bc, pkt->dts);
695 put_v(dyn_bc, sp ? (nut->last_syncpoint_pos - sp->pos)>>4 : 0);
696 put_packet(nut, bc, dyn_bc, 1, SYNCPOINT_STARTCODE);
698 ff_nut_add_sp(nut, nut->last_syncpoint_pos, 0/*unused*/, pkt->dts);
700 assert(nus->last_pts != AV_NOPTS_VALUE);
702 coded_pts = pkt->pts & ((1<<nus->msb_pts_shift)-1);
703 if(ff_lsb2full(nus, coded_pts) != pkt->pts)
704 coded_pts= pkt->pts + (1<<nus->msb_pts_shift);
706 best_header_idx= find_best_header_idx(nut, pkt);
710 for(i=0; i<256; i++){
712 FrameCode *fc= &nut->frame_code[i];
713 int flags= fc->flags;
715 if(flags & FLAG_INVALID)
717 needed_flags= get_needed_flags(nut, nus, fc, pkt);
719 if(flags & FLAG_CODED){
721 flags = needed_flags;
724 if((flags & needed_flags) != needed_flags)
727 if((flags ^ needed_flags) & FLAG_KEY)
730 if(flags & FLAG_STREAM_ID)
731 length+= get_length(pkt->stream_index);
733 if(pkt->size % fc->size_mul != fc->size_lsb)
735 if(flags & FLAG_SIZE_MSB)
736 length += get_length(pkt->size / fc->size_mul);
738 if(flags & FLAG_CHECKSUM)
741 if(flags & FLAG_CODED_PTS)
742 length += get_length(coded_pts);
744 if( (flags & FLAG_CODED)
745 && nut->header_len[best_header_idx] > nut->header_len[fc->header_idx]+1){
746 flags |= FLAG_HEADER_IDX;
749 if(flags & FLAG_HEADER_IDX){
750 length += 1 - nut->header_len[best_header_idx];
752 length -= nut->header_len[fc->header_idx];
756 length+= !(flags & FLAG_CODED_PTS);
757 length+= !(flags & FLAG_CHECKSUM);
759 if(length < best_length){
764 assert(frame_code != -1);
765 fc= &nut->frame_code[frame_code];
767 needed_flags= get_needed_flags(nut, nus, fc, pkt);
768 header_idx= fc->header_idx;
770 init_checksum(bc, ff_crc04C11DB7_update, 0);
771 put_byte(bc, frame_code);
772 if(flags & FLAG_CODED){
773 put_v(bc, (flags^needed_flags) & ~(FLAG_CODED));
774 flags = needed_flags;
776 if(flags & FLAG_STREAM_ID) put_v(bc, pkt->stream_index);
777 if(flags & FLAG_CODED_PTS) put_v(bc, coded_pts);
778 if(flags & FLAG_SIZE_MSB) put_v(bc, pkt->size / fc->size_mul);
779 if(flags & FLAG_HEADER_IDX) put_v(bc, header_idx= best_header_idx);
781 if(flags & FLAG_CHECKSUM) put_le32(bc, get_checksum(bc));
782 else get_checksum(bc);
784 put_buffer(bc, pkt->data + nut->header_len[header_idx], pkt->size - nut->header_len[header_idx]);
785 nus->last_flags= flags;
786 nus->last_pts= pkt->pts;
788 //FIXME just store one per syncpoint
791 s->streams[pkt->stream_index],
792 nut->last_syncpoint_pos,
801 static int write_trailer(AVFormatContext *s){
802 NUTContext *nut= s->priv_data;
803 ByteIOContext *bc= s->pb;
805 while(nut->header_count<3)
806 write_headers(s, bc);
807 put_flush_packet(bc);
809 av_freep(&nut->stream);
810 av_freep(&nut->time_base);
815 AVOutputFormat nut_muxer = {
817 NULL_IF_CONFIG_SMALL("NUT format"),
823 #elif CONFIG_LIBMP3LAME
832 .flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS,
833 .codec_tag = (const AVCodecTag * const []){ ff_codec_bmp_tags, ff_nut_video_tags, ff_codec_wav_tags, ff_nut_subtitle_tags, 0 },
834 .metadata_conv = ff_nut_metadata_conv,