From 0f1eb23a8e3f46b8c2f17cd37afd07cb77561386 Mon Sep 17 00:00:00 2001 From: turly221 Date: Mon, 9 Dec 2024 16:05:50 +0000 Subject: [PATCH 01/13] commit patch 19381892 --- libavcodec/apedec.c | 8 +- libavcodec/apedec.c.orig | 1592 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 1597 insertions(+), 3 deletions(-) create mode 100644 libavcodec/apedec.c.orig diff --git a/libavcodec/apedec.c b/libavcodec/apedec.c index b99598b4ee74e..072e3b42cff06 100644 --- a/libavcodec/apedec.c +++ b/libavcodec/apedec.c @@ -1412,6 +1412,7 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data, int32_t *sample24; int i, ch, ret; int blockstodecode; + uint64_t decoded_buffer_size; /* this should never be negative, but bad things will happen if it is, so check it just to make sure. */ @@ -1467,7 +1468,7 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data, skip_bits_long(&s->gb, offset); } - if (!nblocks || nblocks > INT_MAX) { + if (!nblocks || nblocks > INT_MAX / 2 / sizeof(*s->decoded_buffer) - 8) { av_log(avctx, AV_LOG_ERROR, "Invalid sample count: %"PRIu32".\n", nblocks); return AVERROR_INVALIDDATA; @@ -1493,8 +1494,9 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data, blockstodecode = s->samples; /* reallocate decoded sample buffer if needed */ - av_fast_malloc(&s->decoded_buffer, &s->decoded_size, - 2 * FFALIGN(blockstodecode, 8) * sizeof(*s->decoded_buffer)); + decoded_buffer_size = 2LL * FFALIGN(blockstodecode, 8) * sizeof(*s->decoded_buffer); + av_assert0(decoded_buffer_size <= INT_MAX); + av_fast_malloc(&s->decoded_buffer, &s->decoded_size, decoded_buffer_size); if (!s->decoded_buffer) return AVERROR(ENOMEM); memset(s->decoded_buffer, 0, s->decoded_size); diff --git a/libavcodec/apedec.c.orig b/libavcodec/apedec.c.orig new file mode 100644 index 0000000000000..b99598b4ee74e --- /dev/null +++ b/libavcodec/apedec.c.orig @@ -0,0 +1,1592 @@ +/* + * Monkey's Audio lossless audio decoder + * Copyright (c) 2007 Benjamin Zores + * based upon libdemac from Dave Chapman. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +#include "libavutil/avassert.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "lossless_audiodsp.h" +#include "avcodec.h" +#include "bswapdsp.h" +#include "bytestream.h" +#include "internal.h" +#include "get_bits.h" +#include "unary.h" + +/** + * @file + * Monkey's Audio lossless audio decoder + */ + +#define MAX_CHANNELS 2 +#define MAX_BYTESPERSAMPLE 3 + +#define APE_FRAMECODE_MONO_SILENCE 1 +#define APE_FRAMECODE_STEREO_SILENCE 3 +#define APE_FRAMECODE_PSEUDO_STEREO 4 + +#define HISTORY_SIZE 512 +#define PREDICTOR_ORDER 8 +/** Total size of all predictor histories */ +#define PREDICTOR_SIZE 50 + +#define YDELAYA (18 + PREDICTOR_ORDER*4) +#define YDELAYB (18 + PREDICTOR_ORDER*3) +#define XDELAYA (18 + PREDICTOR_ORDER*2) +#define XDELAYB (18 + PREDICTOR_ORDER) + +#define YADAPTCOEFFSA 18 +#define XADAPTCOEFFSA 14 +#define YADAPTCOEFFSB 10 +#define XADAPTCOEFFSB 5 + +/** + * Possible compression levels + * @{ + */ +enum APECompressionLevel { + COMPRESSION_LEVEL_FAST = 1000, + COMPRESSION_LEVEL_NORMAL = 2000, + COMPRESSION_LEVEL_HIGH = 3000, + COMPRESSION_LEVEL_EXTRA_HIGH = 4000, + COMPRESSION_LEVEL_INSANE = 5000 +}; +/** @} */ + +#define APE_FILTER_LEVELS 3 + +/** Filter orders depending on compression level */ +static const uint16_t ape_filter_orders[5][APE_FILTER_LEVELS] = { + { 0, 0, 0 }, + { 16, 0, 0 }, + { 64, 0, 0 }, + { 32, 256, 0 }, + { 16, 256, 1280 } +}; + +/** Filter fraction bits depending on compression level */ +static const uint8_t ape_filter_fracbits[5][APE_FILTER_LEVELS] = { + { 0, 0, 0 }, + { 11, 0, 0 }, + { 11, 0, 0 }, + { 10, 13, 0 }, + { 11, 13, 15 } +}; + + +/** Filters applied to the decoded data */ +typedef struct APEFilter { + int16_t *coeffs; ///< actual coefficients used in filtering + int16_t *adaptcoeffs; ///< adaptive filter coefficients used for correcting of actual filter coefficients + int16_t *historybuffer; ///< filter memory + int16_t *delay; ///< filtered values + + int avg; +} APEFilter; + +typedef struct APERice { + uint32_t k; + uint32_t ksum; +} APERice; + +typedef struct APERangecoder { + uint32_t low; ///< low end of interval + uint32_t range; ///< length of interval + uint32_t help; ///< bytes_to_follow resp. intermediate value + unsigned int buffer; ///< buffer for input/output +} APERangecoder; + +/** Filter histories */ +typedef struct APEPredictor { + int32_t *buf; + + int32_t lastA[2]; + + int32_t filterA[2]; + int32_t filterB[2]; + + int32_t coeffsA[2][4]; ///< adaption coefficients + int32_t coeffsB[2][5]; ///< adaption coefficients + int32_t historybuffer[HISTORY_SIZE + PREDICTOR_SIZE]; + + unsigned int sample_pos; +} APEPredictor; + +/** Decoder context */ +typedef struct APEContext { + AVClass *class; ///< class for AVOptions + AVCodecContext *avctx; + BswapDSPContext bdsp; + LLAudDSPContext adsp; + int channels; + int samples; ///< samples left to decode in current frame + int bps; + + int fileversion; ///< codec version, very important in decoding process + int compression_level; ///< compression levels + int fset; ///< which filter set to use (calculated from compression level) + int flags; ///< global decoder flags + + uint32_t CRC; ///< frame CRC + int frameflags; ///< frame flags + APEPredictor predictor; ///< predictor used for final reconstruction + + int32_t *decoded_buffer; + int decoded_size; + int32_t *decoded[MAX_CHANNELS]; ///< decoded data for each channel + int blocks_per_loop; ///< maximum number of samples to decode for each call + + int16_t* filterbuf[APE_FILTER_LEVELS]; ///< filter memory + + APERangecoder rc; ///< rangecoder used to decode actual values + APERice riceX; ///< rice code parameters for the second channel + APERice riceY; ///< rice code parameters for the first channel + APEFilter filters[APE_FILTER_LEVELS][2]; ///< filters used for reconstruction + GetBitContext gb; + + uint8_t *data; ///< current frame data + uint8_t *data_end; ///< frame data end + int data_size; ///< frame data allocated size + const uint8_t *ptr; ///< current position in frame data + + int error; + + void (*entropy_decode_mono)(struct APEContext *ctx, int blockstodecode); + void (*entropy_decode_stereo)(struct APEContext *ctx, int blockstodecode); + void (*predictor_decode_mono)(struct APEContext *ctx, int count); + void (*predictor_decode_stereo)(struct APEContext *ctx, int count); +} APEContext; + +static void ape_apply_filters(APEContext *ctx, int32_t *decoded0, + int32_t *decoded1, int count); + +static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode); +static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode); +static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode); +static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode); +static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode); +static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode); +static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode); +static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode); +static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode); + +static void predictor_decode_mono_3800(APEContext *ctx, int count); +static void predictor_decode_stereo_3800(APEContext *ctx, int count); +static void predictor_decode_mono_3930(APEContext *ctx, int count); +static void predictor_decode_stereo_3930(APEContext *ctx, int count); +static void predictor_decode_mono_3950(APEContext *ctx, int count); +static void predictor_decode_stereo_3950(APEContext *ctx, int count); + +static av_cold int ape_decode_close(AVCodecContext *avctx) +{ + APEContext *s = avctx->priv_data; + int i; + + for (i = 0; i < APE_FILTER_LEVELS; i++) + av_freep(&s->filterbuf[i]); + + av_freep(&s->decoded_buffer); + av_freep(&s->data); + s->decoded_size = s->data_size = 0; + + return 0; +} + +static av_cold int ape_decode_init(AVCodecContext *avctx) +{ + APEContext *s = avctx->priv_data; + int i; + + if (avctx->extradata_size != 6) { + av_log(avctx, AV_LOG_ERROR, "Incorrect extradata\n"); + return AVERROR(EINVAL); + } + if (avctx->channels > 2) { + av_log(avctx, AV_LOG_ERROR, "Only mono and stereo is supported\n"); + return AVERROR(EINVAL); + } + s->bps = avctx->bits_per_coded_sample; + switch (s->bps) { + case 8: + avctx->sample_fmt = AV_SAMPLE_FMT_U8P; + break; + case 16: + avctx->sample_fmt = AV_SAMPLE_FMT_S16P; + break; + case 24: + avctx->sample_fmt = AV_SAMPLE_FMT_S32P; + break; + default: + avpriv_request_sample(avctx, + "%d bits per coded sample", s->bps); + return AVERROR_PATCHWELCOME; + } + s->avctx = avctx; + s->channels = avctx->channels; + s->fileversion = AV_RL16(avctx->extradata); + s->compression_level = AV_RL16(avctx->extradata + 2); + s->flags = AV_RL16(avctx->extradata + 4); + + av_log(avctx, AV_LOG_VERBOSE, "Compression Level: %d - Flags: %d\n", + s->compression_level, s->flags); + if (s->compression_level % 1000 || s->compression_level > COMPRESSION_LEVEL_INSANE || + !s->compression_level || + (s->fileversion < 3930 && s->compression_level == COMPRESSION_LEVEL_INSANE)) { + av_log(avctx, AV_LOG_ERROR, "Incorrect compression level %d\n", + s->compression_level); + return AVERROR_INVALIDDATA; + } + s->fset = s->compression_level / 1000 - 1; + for (i = 0; i < APE_FILTER_LEVELS; i++) { + if (!ape_filter_orders[s->fset][i]) + break; + FF_ALLOC_OR_GOTO(avctx, s->filterbuf[i], + (ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4, + filter_alloc_fail); + } + + if (s->fileversion < 3860) { + s->entropy_decode_mono = entropy_decode_mono_0000; + s->entropy_decode_stereo = entropy_decode_stereo_0000; + } else if (s->fileversion < 3900) { + s->entropy_decode_mono = entropy_decode_mono_3860; + s->entropy_decode_stereo = entropy_decode_stereo_3860; + } else if (s->fileversion < 3930) { + s->entropy_decode_mono = entropy_decode_mono_3900; + s->entropy_decode_stereo = entropy_decode_stereo_3900; + } else if (s->fileversion < 3990) { + s->entropy_decode_mono = entropy_decode_mono_3900; + s->entropy_decode_stereo = entropy_decode_stereo_3930; + } else { + s->entropy_decode_mono = entropy_decode_mono_3990; + s->entropy_decode_stereo = entropy_decode_stereo_3990; + } + + if (s->fileversion < 3930) { + s->predictor_decode_mono = predictor_decode_mono_3800; + s->predictor_decode_stereo = predictor_decode_stereo_3800; + } else if (s->fileversion < 3950) { + s->predictor_decode_mono = predictor_decode_mono_3930; + s->predictor_decode_stereo = predictor_decode_stereo_3930; + } else { + s->predictor_decode_mono = predictor_decode_mono_3950; + s->predictor_decode_stereo = predictor_decode_stereo_3950; + } + + ff_bswapdsp_init(&s->bdsp); + ff_llauddsp_init(&s->adsp); + avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO; + + return 0; +filter_alloc_fail: + ape_decode_close(avctx); + return AVERROR(ENOMEM); +} + +/** + * @name APE range decoding functions + * @{ + */ + +#define CODE_BITS 32 +#define TOP_VALUE ((unsigned int)1 << (CODE_BITS-1)) +#define SHIFT_BITS (CODE_BITS - 9) +#define EXTRA_BITS ((CODE_BITS-2) % 8 + 1) +#define BOTTOM_VALUE (TOP_VALUE >> 8) + +/** Start the decoder */ +static inline void range_start_decoding(APEContext *ctx) +{ + ctx->rc.buffer = bytestream_get_byte(&ctx->ptr); + ctx->rc.low = ctx->rc.buffer >> (8 - EXTRA_BITS); + ctx->rc.range = (uint32_t) 1 << EXTRA_BITS; +} + +/** Perform normalization */ +static inline void range_dec_normalize(APEContext *ctx) +{ + while (ctx->rc.range <= BOTTOM_VALUE) { + ctx->rc.buffer <<= 8; + if(ctx->ptr < ctx->data_end) { + ctx->rc.buffer += *ctx->ptr; + ctx->ptr++; + } else { + ctx->error = 1; + } + ctx->rc.low = (ctx->rc.low << 8) | ((ctx->rc.buffer >> 1) & 0xFF); + ctx->rc.range <<= 8; + } +} + +/** + * Calculate cumulative frequency for next symbol. Does NO update! + * @param ctx decoder context + * @param tot_f is the total frequency or (code_value)1<rc.help = ctx->rc.range / tot_f; + return ctx->rc.low / ctx->rc.help; +} + +/** + * Decode value with given size in bits + * @param ctx decoder context + * @param shift number of bits to decode + */ +static inline int range_decode_culshift(APEContext *ctx, int shift) +{ + range_dec_normalize(ctx); + ctx->rc.help = ctx->rc.range >> shift; + return ctx->rc.low / ctx->rc.help; +} + + +/** + * Update decoding state + * @param ctx decoder context + * @param sy_f the interval length (frequency of the symbol) + * @param lt_f the lower end (frequency sum of < symbols) + */ +static inline void range_decode_update(APEContext *ctx, int sy_f, int lt_f) +{ + ctx->rc.low -= ctx->rc.help * lt_f; + ctx->rc.range = ctx->rc.help * sy_f; +} + +/** Decode n bits (n <= 16) without modelling */ +static inline int range_decode_bits(APEContext *ctx, int n) +{ + int sym = range_decode_culshift(ctx, n); + range_decode_update(ctx, 1, sym); + return sym; +} + + +#define MODEL_ELEMENTS 64 + +/** + * Fixed probabilities for symbols in Monkey Audio version 3.97 + */ +static const uint16_t counts_3970[22] = { + 0, 14824, 28224, 39348, 47855, 53994, 58171, 60926, + 62682, 63786, 64463, 64878, 65126, 65276, 65365, 65419, + 65450, 65469, 65480, 65487, 65491, 65493, +}; + +/** + * Probability ranges for symbols in Monkey Audio version 3.97 + */ +static const uint16_t counts_diff_3970[21] = { + 14824, 13400, 11124, 8507, 6139, 4177, 2755, 1756, + 1104, 677, 415, 248, 150, 89, 54, 31, + 19, 11, 7, 4, 2, +}; + +/** + * Fixed probabilities for symbols in Monkey Audio version 3.98 + */ +static const uint16_t counts_3980[22] = { + 0, 19578, 36160, 48417, 56323, 60899, 63265, 64435, + 64971, 65232, 65351, 65416, 65447, 65466, 65476, 65482, + 65485, 65488, 65490, 65491, 65492, 65493, +}; + +/** + * Probability ranges for symbols in Monkey Audio version 3.98 + */ +static const uint16_t counts_diff_3980[21] = { + 19578, 16582, 12257, 7906, 4576, 2366, 1170, 536, + 261, 119, 65, 31, 19, 10, 6, 3, + 3, 2, 1, 1, 1, +}; + +/** + * Decode symbol + * @param ctx decoder context + * @param counts probability range start position + * @param counts_diff probability range widths + */ +static inline int range_get_symbol(APEContext *ctx, + const uint16_t counts[], + const uint16_t counts_diff[]) +{ + int symbol, cf; + + cf = range_decode_culshift(ctx, 16); + + if(cf > 65492){ + symbol= cf - 65535 + 63; + range_decode_update(ctx, 1, cf); + if(cf > 65535) + ctx->error=1; + return symbol; + } + /* figure out the symbol inefficiently; a binary search would be much better */ + for (symbol = 0; counts[symbol + 1] <= cf; symbol++); + + range_decode_update(ctx, counts_diff[symbol], counts[symbol]); + + return symbol; +} +/** @} */ // group rangecoder + +static inline void update_rice(APERice *rice, unsigned int x) +{ + int lim = rice->k ? (1 << (rice->k + 4)) : 0; + rice->ksum += ((x + 1) / 2) - ((rice->ksum + 16) >> 5); + + if (rice->ksum < lim) + rice->k--; + else if (rice->ksum >= (1 << (rice->k + 5))) + rice->k++; +} + +static inline int get_rice_ook(GetBitContext *gb, int k) +{ + unsigned int x; + + x = get_unary(gb, 1, get_bits_left(gb)); + + if (k) + x = (x << k) | get_bits(gb, k); + + return x; +} + +static inline int ape_decode_value_3860(APEContext *ctx, GetBitContext *gb, + APERice *rice) +{ + unsigned int x, overflow; + + overflow = get_unary(gb, 1, get_bits_left(gb)); + + if (ctx->fileversion > 3880) { + while (overflow >= 16) { + overflow -= 16; + rice->k += 4; + } + } + + if (!rice->k) + x = overflow; + else if(rice->k <= MIN_CACHE_BITS) { + x = (overflow << rice->k) + get_bits(gb, rice->k); + } else { + av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", rice->k); + return AVERROR_INVALIDDATA; + } + rice->ksum += x - (rice->ksum + 8 >> 4); + if (rice->ksum < (rice->k ? 1 << (rice->k + 4) : 0)) + rice->k--; + else if (rice->ksum >= (1 << (rice->k + 5)) && rice->k < 24) + rice->k++; + + /* Convert to signed */ + return ((x >> 1) ^ ((x & 1) - 1)) + 1; +} + +static inline int ape_decode_value_3900(APEContext *ctx, APERice *rice) +{ + unsigned int x, overflow; + int tmpk; + + overflow = range_get_symbol(ctx, counts_3970, counts_diff_3970); + + if (overflow == (MODEL_ELEMENTS - 1)) { + tmpk = range_decode_bits(ctx, 5); + overflow = 0; + } else + tmpk = (rice->k < 1) ? 0 : rice->k - 1; + + if (tmpk <= 16 || ctx->fileversion < 3910) { + if (tmpk > 23) { + av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk); + return AVERROR_INVALIDDATA; + } + x = range_decode_bits(ctx, tmpk); + } else if (tmpk <= 31) { + x = range_decode_bits(ctx, 16); + x |= (range_decode_bits(ctx, tmpk - 16) << 16); + } else { + av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %d\n", tmpk); + return AVERROR_INVALIDDATA; + } + x += overflow << tmpk; + + update_rice(rice, x); + + /* Convert to signed */ + return ((x >> 1) ^ ((x & 1) - 1)) + 1; +} + +static inline int ape_decode_value_3990(APEContext *ctx, APERice *rice) +{ + unsigned int x, overflow; + int base, pivot; + + pivot = rice->ksum >> 5; + if (pivot == 0) + pivot = 1; + + overflow = range_get_symbol(ctx, counts_3980, counts_diff_3980); + + if (overflow == (MODEL_ELEMENTS - 1)) { + overflow = range_decode_bits(ctx, 16) << 16; + overflow |= range_decode_bits(ctx, 16); + } + + if (pivot < 0x10000) { + base = range_decode_culfreq(ctx, pivot); + range_decode_update(ctx, 1, base); + } else { + int base_hi = pivot, base_lo; + int bbits = 0; + + while (base_hi & ~0xFFFF) { + base_hi >>= 1; + bbits++; + } + base_hi = range_decode_culfreq(ctx, base_hi + 1); + range_decode_update(ctx, 1, base_hi); + base_lo = range_decode_culfreq(ctx, 1 << bbits); + range_decode_update(ctx, 1, base_lo); + + base = (base_hi << bbits) + base_lo; + } + + x = base + overflow * pivot; + + update_rice(rice, x); + + /* Convert to signed */ + return ((x >> 1) ^ ((x & 1) - 1)) + 1; +} + +static void decode_array_0000(APEContext *ctx, GetBitContext *gb, + int32_t *out, APERice *rice, int blockstodecode) +{ + int i; + int ksummax, ksummin; + + rice->ksum = 0; + for (i = 0; i < FFMIN(blockstodecode, 5); i++) { + out[i] = get_rice_ook(&ctx->gb, 10); + rice->ksum += out[i]; + } + rice->k = av_log2(rice->ksum / 10) + 1; + if (rice->k >= 24) + return; + for (; i < FFMIN(blockstodecode, 64); i++) { + out[i] = get_rice_ook(&ctx->gb, rice->k); + rice->ksum += out[i]; + rice->k = av_log2(rice->ksum / ((i + 1) * 2)) + 1; + if (rice->k >= 24) + return; + } + ksummax = 1 << rice->k + 7; + ksummin = rice->k ? (1 << rice->k + 6) : 0; + for (; i < blockstodecode; i++) { + out[i] = get_rice_ook(&ctx->gb, rice->k); + rice->ksum += out[i] - out[i - 64]; + while (rice->ksum < ksummin) { + rice->k--; + ksummin = rice->k ? ksummin >> 1 : 0; + ksummax >>= 1; + } + while (rice->ksum >= ksummax) { + rice->k++; + if (rice->k > 24) + return; + ksummax <<= 1; + ksummin = ksummin ? ksummin << 1 : 128; + } + } + + for (i = 0; i < blockstodecode; i++) + out[i] = ((out[i] >> 1) ^ ((out[i] & 1) - 1)) + 1; +} + +static void entropy_decode_mono_0000(APEContext *ctx, int blockstodecode) +{ + decode_array_0000(ctx, &ctx->gb, ctx->decoded[0], &ctx->riceY, + blockstodecode); +} + +static void entropy_decode_stereo_0000(APEContext *ctx, int blockstodecode) +{ + decode_array_0000(ctx, &ctx->gb, ctx->decoded[0], &ctx->riceY, + blockstodecode); + decode_array_0000(ctx, &ctx->gb, ctx->decoded[1], &ctx->riceX, + blockstodecode); +} + +static void entropy_decode_mono_3860(APEContext *ctx, int blockstodecode) +{ + int32_t *decoded0 = ctx->decoded[0]; + + while (blockstodecode--) + *decoded0++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceY); +} + +static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode) +{ + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; + int blocks = blockstodecode; + + while (blockstodecode--) + *decoded0++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceY); + while (blocks--) + *decoded1++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceX); +} + +static void entropy_decode_mono_3900(APEContext *ctx, int blockstodecode) +{ + int32_t *decoded0 = ctx->decoded[0]; + + while (blockstodecode--) + *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY); +} + +static void entropy_decode_stereo_3900(APEContext *ctx, int blockstodecode) +{ + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; + int blocks = blockstodecode; + + while (blockstodecode--) + *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY); + range_dec_normalize(ctx); + // because of some implementation peculiarities we need to backpedal here + ctx->ptr -= 1; + range_start_decoding(ctx); + while (blocks--) + *decoded1++ = ape_decode_value_3900(ctx, &ctx->riceX); +} + +static void entropy_decode_stereo_3930(APEContext *ctx, int blockstodecode) +{ + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; + + while (blockstodecode--) { + *decoded0++ = ape_decode_value_3900(ctx, &ctx->riceY); + *decoded1++ = ape_decode_value_3900(ctx, &ctx->riceX); + } +} + +static void entropy_decode_mono_3990(APEContext *ctx, int blockstodecode) +{ + int32_t *decoded0 = ctx->decoded[0]; + + while (blockstodecode--) + *decoded0++ = ape_decode_value_3990(ctx, &ctx->riceY); +} + +static void entropy_decode_stereo_3990(APEContext *ctx, int blockstodecode) +{ + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; + + while (blockstodecode--) { + *decoded0++ = ape_decode_value_3990(ctx, &ctx->riceY); + *decoded1++ = ape_decode_value_3990(ctx, &ctx->riceX); + } +} + +static int init_entropy_decoder(APEContext *ctx) +{ + /* Read the CRC */ + if (ctx->fileversion >= 3900) { + if (ctx->data_end - ctx->ptr < 6) + return AVERROR_INVALIDDATA; + ctx->CRC = bytestream_get_be32(&ctx->ptr); + } else { + ctx->CRC = get_bits_long(&ctx->gb, 32); + } + + /* Read the frame flags if they exist */ + ctx->frameflags = 0; + if ((ctx->fileversion > 3820) && (ctx->CRC & 0x80000000)) { + ctx->CRC &= ~0x80000000; + + if (ctx->data_end - ctx->ptr < 6) + return AVERROR_INVALIDDATA; + ctx->frameflags = bytestream_get_be32(&ctx->ptr); + } + + /* Initialize the rice structs */ + ctx->riceX.k = 10; + ctx->riceX.ksum = (1 << ctx->riceX.k) * 16; + ctx->riceY.k = 10; + ctx->riceY.ksum = (1 << ctx->riceY.k) * 16; + + if (ctx->fileversion >= 3900) { + /* The first 8 bits of input are ignored. */ + ctx->ptr++; + + range_start_decoding(ctx); + } + + return 0; +} + +static const int32_t initial_coeffs_fast_3320[1] = { + 375, +}; + +static const int32_t initial_coeffs_a_3800[3] = { + 64, 115, 64, +}; + +static const int32_t initial_coeffs_b_3800[2] = { + 740, 0 +}; + +static const int32_t initial_coeffs_3930[4] = { + 360, 317, -109, 98 +}; + +static void init_predictor_decoder(APEContext *ctx) +{ + APEPredictor *p = &ctx->predictor; + + /* Zero the history buffers */ + memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(*p->historybuffer)); + p->buf = p->historybuffer; + + /* Initialize and zero the coefficients */ + if (ctx->fileversion < 3930) { + if (ctx->compression_level == COMPRESSION_LEVEL_FAST) { + memcpy(p->coeffsA[0], initial_coeffs_fast_3320, + sizeof(initial_coeffs_fast_3320)); + memcpy(p->coeffsA[1], initial_coeffs_fast_3320, + sizeof(initial_coeffs_fast_3320)); + } else { + memcpy(p->coeffsA[0], initial_coeffs_a_3800, + sizeof(initial_coeffs_a_3800)); + memcpy(p->coeffsA[1], initial_coeffs_a_3800, + sizeof(initial_coeffs_a_3800)); + } + } else { + memcpy(p->coeffsA[0], initial_coeffs_3930, sizeof(initial_coeffs_3930)); + memcpy(p->coeffsA[1], initial_coeffs_3930, sizeof(initial_coeffs_3930)); + } + memset(p->coeffsB, 0, sizeof(p->coeffsB)); + if (ctx->fileversion < 3930) { + memcpy(p->coeffsB[0], initial_coeffs_b_3800, + sizeof(initial_coeffs_b_3800)); + memcpy(p->coeffsB[1], initial_coeffs_b_3800, + sizeof(initial_coeffs_b_3800)); + } + + p->filterA[0] = p->filterA[1] = 0; + p->filterB[0] = p->filterB[1] = 0; + p->lastA[0] = p->lastA[1] = 0; + + p->sample_pos = 0; +} + +/** Get inverse sign of integer (-1 for positive, 1 for negative and 0 for zero) */ +static inline int APESIGN(int32_t x) { + return (x < 0) - (x > 0); +} + +static av_always_inline int filter_fast_3320(APEPredictor *p, + const int decoded, const int filter, + const int delayA) +{ + int32_t predictionA; + + p->buf[delayA] = p->lastA[filter]; + if (p->sample_pos < 3) { + p->lastA[filter] = decoded; + p->filterA[filter] = decoded; + return decoded; + } + + predictionA = p->buf[delayA] * 2 - p->buf[delayA - 1]; + p->lastA[filter] = decoded + (predictionA * p->coeffsA[filter][0] >> 9); + + if ((decoded ^ predictionA) > 0) + p->coeffsA[filter][0]++; + else + p->coeffsA[filter][0]--; + + p->filterA[filter] += p->lastA[filter]; + + return p->filterA[filter]; +} + +static av_always_inline int filter_3800(APEPredictor *p, + const int decoded, const int filter, + const int delayA, const int delayB, + const int start, const int shift) +{ + int32_t predictionA, predictionB, sign; + int32_t d0, d1, d2, d3, d4; + + p->buf[delayA] = p->lastA[filter]; + p->buf[delayB] = p->filterB[filter]; + if (p->sample_pos < start) { + predictionA = decoded + p->filterA[filter]; + p->lastA[filter] = decoded; + p->filterB[filter] = decoded; + p->filterA[filter] = predictionA; + return predictionA; + } + d2 = p->buf[delayA]; + d1 = (p->buf[delayA] - p->buf[delayA - 1]) << 1; + d0 = p->buf[delayA] + ((p->buf[delayA - 2] - p->buf[delayA - 1]) << 3); + d3 = p->buf[delayB] * 2 - p->buf[delayB - 1]; + d4 = p->buf[delayB]; + + predictionA = d0 * p->coeffsA[filter][0] + + d1 * p->coeffsA[filter][1] + + d2 * p->coeffsA[filter][2]; + + sign = APESIGN(decoded); + p->coeffsA[filter][0] += (((d0 >> 30) & 2) - 1) * sign; + p->coeffsA[filter][1] += (((d1 >> 28) & 8) - 4) * sign; + p->coeffsA[filter][2] += (((d2 >> 28) & 8) - 4) * sign; + + predictionB = d3 * p->coeffsB[filter][0] - + d4 * p->coeffsB[filter][1]; + p->lastA[filter] = decoded + (predictionA >> 11); + sign = APESIGN(p->lastA[filter]); + p->coeffsB[filter][0] += (((d3 >> 29) & 4) - 2) * sign; + p->coeffsB[filter][1] -= (((d4 >> 30) & 2) - 1) * sign; + + p->filterB[filter] = p->lastA[filter] + (predictionB >> shift); + p->filterA[filter] = p->filterB[filter] + ((p->filterA[filter] * 31) >> 5); + + return p->filterA[filter]; +} + +static void long_filter_high_3800(int32_t *buffer, int order, int shift, int length) +{ + int i, j; + int32_t dotprod, sign; + int32_t coeffs[256], delay[256]; + + if (order >= length) + return; + + memset(coeffs, 0, order * sizeof(*coeffs)); + for (i = 0; i < order; i++) + delay[i] = buffer[i]; + for (i = order; i < length; i++) { + dotprod = 0; + sign = APESIGN(buffer[i]); + for (j = 0; j < order; j++) { + dotprod += delay[j] * coeffs[j]; + coeffs[j] += ((delay[j] >> 31) | 1) * sign; + } + buffer[i] -= dotprod >> shift; + for (j = 0; j < order - 1; j++) + delay[j] = delay[j + 1]; + delay[order - 1] = buffer[i]; + } +} + +static void long_filter_ehigh_3830(int32_t *buffer, int length) +{ + int i, j; + int32_t dotprod, sign; + int32_t coeffs[8] = { 0 }, delay[8] = { 0 }; + + for (i = 0; i < length; i++) { + dotprod = 0; + sign = APESIGN(buffer[i]); + for (j = 7; j >= 0; j--) { + dotprod += delay[j] * coeffs[j]; + coeffs[j] += ((delay[j] >> 31) | 1) * sign; + } + for (j = 7; j > 0; j--) + delay[j] = delay[j - 1]; + delay[0] = buffer[i]; + buffer[i] -= dotprod >> 9; + } +} + +static void predictor_decode_stereo_3800(APEContext *ctx, int count) +{ + APEPredictor *p = &ctx->predictor; + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; + int start = 4, shift = 10; + + if (ctx->compression_level == COMPRESSION_LEVEL_HIGH) { + start = 16; + long_filter_high_3800(decoded0, 16, 9, count); + long_filter_high_3800(decoded1, 16, 9, count); + } else if (ctx->compression_level == COMPRESSION_LEVEL_EXTRA_HIGH) { + int order = 128, shift2 = 11; + + if (ctx->fileversion >= 3830) { + order <<= 1; + shift++; + shift2++; + long_filter_ehigh_3830(decoded0 + order, count - order); + long_filter_ehigh_3830(decoded1 + order, count - order); + } + start = order; + long_filter_high_3800(decoded0, order, shift2, count); + long_filter_high_3800(decoded1, order, shift2, count); + } + + while (count--) { + int X = *decoded0, Y = *decoded1; + if (ctx->compression_level == COMPRESSION_LEVEL_FAST) { + *decoded0 = filter_fast_3320(p, Y, 0, YDELAYA); + decoded0++; + *decoded1 = filter_fast_3320(p, X, 1, XDELAYA); + decoded1++; + } else { + *decoded0 = filter_3800(p, Y, 0, YDELAYA, YDELAYB, + start, shift); + decoded0++; + *decoded1 = filter_3800(p, X, 1, XDELAYA, XDELAYB, + start, shift); + decoded1++; + } + + /* Combined */ + p->buf++; + p->sample_pos++; + + /* Have we filled the history buffer? */ + if (p->buf == p->historybuffer + HISTORY_SIZE) { + memmove(p->historybuffer, p->buf, + PREDICTOR_SIZE * sizeof(*p->historybuffer)); + p->buf = p->historybuffer; + } + } +} + +static void predictor_decode_mono_3800(APEContext *ctx, int count) +{ + APEPredictor *p = &ctx->predictor; + int32_t *decoded0 = ctx->decoded[0]; + int start = 4, shift = 10; + + if (ctx->compression_level == COMPRESSION_LEVEL_HIGH) { + start = 16; + long_filter_high_3800(decoded0, 16, 9, count); + } else if (ctx->compression_level == COMPRESSION_LEVEL_EXTRA_HIGH) { + int order = 128, shift2 = 11; + + if (ctx->fileversion >= 3830) { + order <<= 1; + shift++; + shift2++; + long_filter_ehigh_3830(decoded0 + order, count - order); + } + start = order; + long_filter_high_3800(decoded0, order, shift2, count); + } + + while (count--) { + if (ctx->compression_level == COMPRESSION_LEVEL_FAST) { + *decoded0 = filter_fast_3320(p, *decoded0, 0, YDELAYA); + decoded0++; + } else { + *decoded0 = filter_3800(p, *decoded0, 0, YDELAYA, YDELAYB, + start, shift); + decoded0++; + } + + /* Combined */ + p->buf++; + p->sample_pos++; + + /* Have we filled the history buffer? */ + if (p->buf == p->historybuffer + HISTORY_SIZE) { + memmove(p->historybuffer, p->buf, + PREDICTOR_SIZE * sizeof(*p->historybuffer)); + p->buf = p->historybuffer; + } + } +} + +static av_always_inline int predictor_update_3930(APEPredictor *p, + const int decoded, const int filter, + const int delayA) +{ + int32_t predictionA, sign; + int32_t d0, d1, d2, d3; + + p->buf[delayA] = p->lastA[filter]; + d0 = p->buf[delayA ]; + d1 = p->buf[delayA ] - p->buf[delayA - 1]; + d2 = p->buf[delayA - 1] - p->buf[delayA - 2]; + d3 = p->buf[delayA - 2] - p->buf[delayA - 3]; + + predictionA = d0 * p->coeffsA[filter][0] + + d1 * p->coeffsA[filter][1] + + d2 * p->coeffsA[filter][2] + + d3 * p->coeffsA[filter][3]; + + p->lastA[filter] = decoded + (predictionA >> 9); + p->filterA[filter] = p->lastA[filter] + ((p->filterA[filter] * 31) >> 5); + + sign = APESIGN(decoded); + p->coeffsA[filter][0] += ((d0 < 0) * 2 - 1) * sign; + p->coeffsA[filter][1] += ((d1 < 0) * 2 - 1) * sign; + p->coeffsA[filter][2] += ((d2 < 0) * 2 - 1) * sign; + p->coeffsA[filter][3] += ((d3 < 0) * 2 - 1) * sign; + + return p->filterA[filter]; +} + +static void predictor_decode_stereo_3930(APEContext *ctx, int count) +{ + APEPredictor *p = &ctx->predictor; + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; + + ape_apply_filters(ctx, ctx->decoded[0], ctx->decoded[1], count); + + while (count--) { + /* Predictor Y */ + int Y = *decoded1, X = *decoded0; + *decoded0 = predictor_update_3930(p, Y, 0, YDELAYA); + decoded0++; + *decoded1 = predictor_update_3930(p, X, 1, XDELAYA); + decoded1++; + + /* Combined */ + p->buf++; + + /* Have we filled the history buffer? */ + if (p->buf == p->historybuffer + HISTORY_SIZE) { + memmove(p->historybuffer, p->buf, + PREDICTOR_SIZE * sizeof(*p->historybuffer)); + p->buf = p->historybuffer; + } + } +} + +static void predictor_decode_mono_3930(APEContext *ctx, int count) +{ + APEPredictor *p = &ctx->predictor; + int32_t *decoded0 = ctx->decoded[0]; + + ape_apply_filters(ctx, ctx->decoded[0], NULL, count); + + while (count--) { + *decoded0 = predictor_update_3930(p, *decoded0, 0, YDELAYA); + decoded0++; + + p->buf++; + + /* Have we filled the history buffer? */ + if (p->buf == p->historybuffer + HISTORY_SIZE) { + memmove(p->historybuffer, p->buf, + PREDICTOR_SIZE * sizeof(*p->historybuffer)); + p->buf = p->historybuffer; + } + } +} + +static av_always_inline int predictor_update_filter(APEPredictor *p, + const int decoded, const int filter, + const int delayA, const int delayB, + const int adaptA, const int adaptB) +{ + int32_t predictionA, predictionB, sign; + + p->buf[delayA] = p->lastA[filter]; + p->buf[adaptA] = APESIGN(p->buf[delayA]); + p->buf[delayA - 1] = p->buf[delayA] - p->buf[delayA - 1]; + p->buf[adaptA - 1] = APESIGN(p->buf[delayA - 1]); + + predictionA = p->buf[delayA ] * p->coeffsA[filter][0] + + p->buf[delayA - 1] * p->coeffsA[filter][1] + + p->buf[delayA - 2] * p->coeffsA[filter][2] + + p->buf[delayA - 3] * p->coeffsA[filter][3]; + + /* Apply a scaled first-order filter compression */ + p->buf[delayB] = p->filterA[filter ^ 1] - ((p->filterB[filter] * 31) >> 5); + p->buf[adaptB] = APESIGN(p->buf[delayB]); + p->buf[delayB - 1] = p->buf[delayB] - p->buf[delayB - 1]; + p->buf[adaptB - 1] = APESIGN(p->buf[delayB - 1]); + p->filterB[filter] = p->filterA[filter ^ 1]; + + predictionB = p->buf[delayB ] * p->coeffsB[filter][0] + + p->buf[delayB - 1] * p->coeffsB[filter][1] + + p->buf[delayB - 2] * p->coeffsB[filter][2] + + p->buf[delayB - 3] * p->coeffsB[filter][3] + + p->buf[delayB - 4] * p->coeffsB[filter][4]; + + p->lastA[filter] = decoded + ((predictionA + (predictionB >> 1)) >> 10); + p->filterA[filter] = p->lastA[filter] + ((p->filterA[filter] * 31) >> 5); + + sign = APESIGN(decoded); + p->coeffsA[filter][0] += p->buf[adaptA ] * sign; + p->coeffsA[filter][1] += p->buf[adaptA - 1] * sign; + p->coeffsA[filter][2] += p->buf[adaptA - 2] * sign; + p->coeffsA[filter][3] += p->buf[adaptA - 3] * sign; + p->coeffsB[filter][0] += p->buf[adaptB ] * sign; + p->coeffsB[filter][1] += p->buf[adaptB - 1] * sign; + p->coeffsB[filter][2] += p->buf[adaptB - 2] * sign; + p->coeffsB[filter][3] += p->buf[adaptB - 3] * sign; + p->coeffsB[filter][4] += p->buf[adaptB - 4] * sign; + + return p->filterA[filter]; +} + +static void predictor_decode_stereo_3950(APEContext *ctx, int count) +{ + APEPredictor *p = &ctx->predictor; + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; + + ape_apply_filters(ctx, ctx->decoded[0], ctx->decoded[1], count); + + while (count--) { + /* Predictor Y */ + *decoded0 = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB, + YADAPTCOEFFSA, YADAPTCOEFFSB); + decoded0++; + *decoded1 = predictor_update_filter(p, *decoded1, 1, XDELAYA, XDELAYB, + XADAPTCOEFFSA, XADAPTCOEFFSB); + decoded1++; + + /* Combined */ + p->buf++; + + /* Have we filled the history buffer? */ + if (p->buf == p->historybuffer + HISTORY_SIZE) { + memmove(p->historybuffer, p->buf, + PREDICTOR_SIZE * sizeof(*p->historybuffer)); + p->buf = p->historybuffer; + } + } +} + +static void predictor_decode_mono_3950(APEContext *ctx, int count) +{ + APEPredictor *p = &ctx->predictor; + int32_t *decoded0 = ctx->decoded[0]; + int32_t predictionA, currentA, A, sign; + + ape_apply_filters(ctx, ctx->decoded[0], NULL, count); + + currentA = p->lastA[0]; + + while (count--) { + A = *decoded0; + + p->buf[YDELAYA] = currentA; + p->buf[YDELAYA - 1] = p->buf[YDELAYA] - p->buf[YDELAYA - 1]; + + predictionA = p->buf[YDELAYA ] * p->coeffsA[0][0] + + p->buf[YDELAYA - 1] * p->coeffsA[0][1] + + p->buf[YDELAYA - 2] * p->coeffsA[0][2] + + p->buf[YDELAYA - 3] * p->coeffsA[0][3]; + + currentA = A + (predictionA >> 10); + + p->buf[YADAPTCOEFFSA] = APESIGN(p->buf[YDELAYA ]); + p->buf[YADAPTCOEFFSA - 1] = APESIGN(p->buf[YDELAYA - 1]); + + sign = APESIGN(A); + p->coeffsA[0][0] += p->buf[YADAPTCOEFFSA ] * sign; + p->coeffsA[0][1] += p->buf[YADAPTCOEFFSA - 1] * sign; + p->coeffsA[0][2] += p->buf[YADAPTCOEFFSA - 2] * sign; + p->coeffsA[0][3] += p->buf[YADAPTCOEFFSA - 3] * sign; + + p->buf++; + + /* Have we filled the history buffer? */ + if (p->buf == p->historybuffer + HISTORY_SIZE) { + memmove(p->historybuffer, p->buf, + PREDICTOR_SIZE * sizeof(*p->historybuffer)); + p->buf = p->historybuffer; + } + + p->filterA[0] = currentA + ((p->filterA[0] * 31) >> 5); + *(decoded0++) = p->filterA[0]; + } + + p->lastA[0] = currentA; +} + +static void do_init_filter(APEFilter *f, int16_t *buf, int order) +{ + f->coeffs = buf; + f->historybuffer = buf + order; + f->delay = f->historybuffer + order * 2; + f->adaptcoeffs = f->historybuffer + order; + + memset(f->historybuffer, 0, (order * 2) * sizeof(*f->historybuffer)); + memset(f->coeffs, 0, order * sizeof(*f->coeffs)); + f->avg = 0; +} + +static void init_filter(APEContext *ctx, APEFilter *f, int16_t *buf, int order) +{ + do_init_filter(&f[0], buf, order); + do_init_filter(&f[1], buf + order * 3 + HISTORY_SIZE, order); +} + +static void do_apply_filter(APEContext *ctx, int version, APEFilter *f, + int32_t *data, int count, int order, int fracbits) +{ + int res; + int absres; + + while (count--) { + /* round fixedpoint scalar product */ + res = ctx->adsp.scalarproduct_and_madd_int16(f->coeffs, + f->delay - order, + f->adaptcoeffs - order, + order, APESIGN(*data)); + res = (res + (1 << (fracbits - 1))) >> fracbits; + res += *data; + *data++ = res; + + /* Update the output history */ + *f->delay++ = av_clip_int16(res); + + if (version < 3980) { + /* Version ??? to < 3.98 files (untested) */ + f->adaptcoeffs[0] = (res == 0) ? 0 : ((res >> 28) & 8) - 4; + f->adaptcoeffs[-4] >>= 1; + f->adaptcoeffs[-8] >>= 1; + } else { + /* Version 3.98 and later files */ + + /* Update the adaption coefficients */ + absres = FFABS(res); + if (absres) + *f->adaptcoeffs = APESIGN(res) * + (8 << ((absres > f->avg * 3) + (absres > f->avg * 4 / 3))); + /* equivalent to the following code + if (absres <= f->avg * 4 / 3) + *f->adaptcoeffs = APESIGN(res) * 8; + else if (absres <= f->avg * 3) + *f->adaptcoeffs = APESIGN(res) * 16; + else + *f->adaptcoeffs = APESIGN(res) * 32; + */ + else + *f->adaptcoeffs = 0; + + f->avg += (absres - f->avg) / 16; + + f->adaptcoeffs[-1] >>= 1; + f->adaptcoeffs[-2] >>= 1; + f->adaptcoeffs[-8] >>= 1; + } + + f->adaptcoeffs++; + + /* Have we filled the history buffer? */ + if (f->delay == f->historybuffer + HISTORY_SIZE + (order * 2)) { + memmove(f->historybuffer, f->delay - (order * 2), + (order * 2) * sizeof(*f->historybuffer)); + f->delay = f->historybuffer + order * 2; + f->adaptcoeffs = f->historybuffer + order; + } + } +} + +static void apply_filter(APEContext *ctx, APEFilter *f, + int32_t *data0, int32_t *data1, + int count, int order, int fracbits) +{ + do_apply_filter(ctx, ctx->fileversion, &f[0], data0, count, order, fracbits); + if (data1) + do_apply_filter(ctx, ctx->fileversion, &f[1], data1, count, order, fracbits); +} + +static void ape_apply_filters(APEContext *ctx, int32_t *decoded0, + int32_t *decoded1, int count) +{ + int i; + + for (i = 0; i < APE_FILTER_LEVELS; i++) { + if (!ape_filter_orders[ctx->fset][i]) + break; + apply_filter(ctx, ctx->filters[i], decoded0, decoded1, count, + ape_filter_orders[ctx->fset][i], + ape_filter_fracbits[ctx->fset][i]); + } +} + +static int init_frame_decoder(APEContext *ctx) +{ + int i, ret; + if ((ret = init_entropy_decoder(ctx)) < 0) + return ret; + init_predictor_decoder(ctx); + + for (i = 0; i < APE_FILTER_LEVELS; i++) { + if (!ape_filter_orders[ctx->fset][i]) + break; + init_filter(ctx, ctx->filters[i], ctx->filterbuf[i], + ape_filter_orders[ctx->fset][i]); + } + return 0; +} + +static void ape_unpack_mono(APEContext *ctx, int count) +{ + if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) { + /* We are pure silence, so we're done. */ + av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence mono\n"); + return; + } + + ctx->entropy_decode_mono(ctx, count); + + /* Now apply the predictor decoding */ + ctx->predictor_decode_mono(ctx, count); + + /* Pseudo-stereo - just copy left channel to right channel */ + if (ctx->channels == 2) { + memcpy(ctx->decoded[1], ctx->decoded[0], count * sizeof(*ctx->decoded[1])); + } +} + +static void ape_unpack_stereo(APEContext *ctx, int count) +{ + int32_t left, right; + int32_t *decoded0 = ctx->decoded[0]; + int32_t *decoded1 = ctx->decoded[1]; + + if ((ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) == APE_FRAMECODE_STEREO_SILENCE) { + /* We are pure silence, so we're done. */ + av_log(ctx->avctx, AV_LOG_DEBUG, "pure silence stereo\n"); + return; + } + + ctx->entropy_decode_stereo(ctx, count); + + /* Now apply the predictor decoding */ + ctx->predictor_decode_stereo(ctx, count); + + /* Decorrelate and scale to output depth */ + while (count--) { + left = *decoded1 - (*decoded0 / 2); + right = left + *decoded0; + + *(decoded0++) = left; + *(decoded1++) = right; + } +} + +static int ape_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame_ptr, AVPacket *avpkt) +{ + AVFrame *frame = data; + const uint8_t *buf = avpkt->data; + APEContext *s = avctx->priv_data; + uint8_t *sample8; + int16_t *sample16; + int32_t *sample24; + int i, ch, ret; + int blockstodecode; + + /* this should never be negative, but bad things will happen if it is, so + check it just to make sure. */ + av_assert0(s->samples >= 0); + + if(!s->samples){ + uint32_t nblocks, offset; + int buf_size; + + if (!avpkt->size) { + *got_frame_ptr = 0; + return 0; + } + if (avpkt->size < 8) { + av_log(avctx, AV_LOG_ERROR, "Packet is too small\n"); + return AVERROR_INVALIDDATA; + } + buf_size = avpkt->size & ~3; + if (buf_size != avpkt->size) { + av_log(avctx, AV_LOG_WARNING, "packet size is not a multiple of 4. " + "extra bytes at the end will be skipped.\n"); + } + if (s->fileversion < 3950) // previous versions overread two bytes + buf_size += 2; + av_fast_padded_malloc(&s->data, &s->data_size, buf_size); + if (!s->data) + return AVERROR(ENOMEM); + s->bdsp.bswap_buf((uint32_t *) s->data, (const uint32_t *) buf, + buf_size >> 2); + memset(s->data + (buf_size & ~3), 0, buf_size & 3); + s->ptr = s->data; + s->data_end = s->data + buf_size; + + nblocks = bytestream_get_be32(&s->ptr); + offset = bytestream_get_be32(&s->ptr); + if (s->fileversion >= 3900) { + if (offset > 3) { + av_log(avctx, AV_LOG_ERROR, "Incorrect offset passed\n"); + s->data = NULL; + return AVERROR_INVALIDDATA; + } + if (s->data_end - s->ptr < offset) { + av_log(avctx, AV_LOG_ERROR, "Packet is too small\n"); + return AVERROR_INVALIDDATA; + } + s->ptr += offset; + } else { + if ((ret = init_get_bits8(&s->gb, s->ptr, s->data_end - s->ptr)) < 0) + return ret; + if (s->fileversion > 3800) + skip_bits_long(&s->gb, offset * 8); + else + skip_bits_long(&s->gb, offset); + } + + if (!nblocks || nblocks > INT_MAX) { + av_log(avctx, AV_LOG_ERROR, "Invalid sample count: %"PRIu32".\n", + nblocks); + return AVERROR_INVALIDDATA; + } + + /* Initialize the frame decoder */ + if (init_frame_decoder(s) < 0) { + av_log(avctx, AV_LOG_ERROR, "Error reading frame header\n"); + return AVERROR_INVALIDDATA; + } + s->samples = nblocks; + } + + if (!s->data) { + *got_frame_ptr = 0; + return avpkt->size; + } + + blockstodecode = FFMIN(s->blocks_per_loop, s->samples); + // for old files coefficients were not interleaved, + // so we need to decode all of them at once + if (s->fileversion < 3930) + blockstodecode = s->samples; + + /* reallocate decoded sample buffer if needed */ + av_fast_malloc(&s->decoded_buffer, &s->decoded_size, + 2 * FFALIGN(blockstodecode, 8) * sizeof(*s->decoded_buffer)); + if (!s->decoded_buffer) + return AVERROR(ENOMEM); + memset(s->decoded_buffer, 0, s->decoded_size); + s->decoded[0] = s->decoded_buffer; + s->decoded[1] = s->decoded_buffer + FFALIGN(blockstodecode, 8); + + /* get output buffer */ + frame->nb_samples = blockstodecode; + if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) + return ret; + + s->error=0; + + if ((s->channels == 1) || (s->frameflags & APE_FRAMECODE_PSEUDO_STEREO)) + ape_unpack_mono(s, blockstodecode); + else + ape_unpack_stereo(s, blockstodecode); + emms_c(); + + if (s->error) { + s->samples=0; + av_log(avctx, AV_LOG_ERROR, "Error decoding frame\n"); + return AVERROR_INVALIDDATA; + } + + switch (s->bps) { + case 8: + for (ch = 0; ch < s->channels; ch++) { + sample8 = (uint8_t *)frame->data[ch]; + for (i = 0; i < blockstodecode; i++) + *sample8++ = (s->decoded[ch][i] + 0x80) & 0xff; + } + break; + case 16: + for (ch = 0; ch < s->channels; ch++) { + sample16 = (int16_t *)frame->data[ch]; + for (i = 0; i < blockstodecode; i++) + *sample16++ = s->decoded[ch][i]; + } + break; + case 24: + for (ch = 0; ch < s->channels; ch++) { + sample24 = (int32_t *)frame->data[ch]; + for (i = 0; i < blockstodecode; i++) + *sample24++ = s->decoded[ch][i] << 8; + } + break; + } + + s->samples -= blockstodecode; + + *got_frame_ptr = 1; + + return !s->samples ? avpkt->size : 0; +} + +static void ape_flush(AVCodecContext *avctx) +{ + APEContext *s = avctx->priv_data; + s->samples= 0; +} + +#define OFFSET(x) offsetof(APEContext, x) +#define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM) +static const AVOption options[] = { + { "max_samples", "maximum number of samples decoded per call", OFFSET(blocks_per_loop), AV_OPT_TYPE_INT, { .i64 = 4608 }, 1, INT_MAX, PAR, "max_samples" }, + { "all", "no maximum. decode all samples for each packet at once", 0, AV_OPT_TYPE_CONST, { .i64 = INT_MAX }, INT_MIN, INT_MAX, PAR, "max_samples" }, + { NULL}, +}; + +static const AVClass ape_decoder_class = { + .class_name = "APE decoder", + .item_name = av_default_item_name, + .option = options, + .version = LIBAVUTIL_VERSION_INT, +}; + +AVCodec ff_ape_decoder = { + .name = "ape", + .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"), + .type = AVMEDIA_TYPE_AUDIO, + .id = AV_CODEC_ID_APE, + .priv_data_size = sizeof(APEContext), + .init = ape_decode_init, + .close = ape_decode_close, + .decode = ape_decode_frame, + .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DELAY | + AV_CODEC_CAP_DR1, + .flush = ape_flush, + .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P, + AV_SAMPLE_FMT_S16P, + AV_SAMPLE_FMT_S32P, + AV_SAMPLE_FMT_NONE }, + .priv_class = &ape_decoder_class, +}; From c2a89584e657151f3eafb0d96f836ba183930675 Mon Sep 17 00:00:00 2001 From: turly221 Date: Mon, 9 Dec 2024 16:05:52 +0000 Subject: [PATCH 02/13] commit patch 18622210 --- libavcodec/dnxhddec.c | 8 +- libavcodec/dnxhddec.c.orig | 712 +++++++++++++++++++++++++++++++++++++ 2 files changed, 718 insertions(+), 2 deletions(-) create mode 100644 libavcodec/dnxhddec.c.orig diff --git a/libavcodec/dnxhddec.c b/libavcodec/dnxhddec.c index 4d1b006bb50b5..66a0de2e627af 100644 --- a/libavcodec/dnxhddec.c +++ b/libavcodec/dnxhddec.c @@ -294,14 +294,18 @@ static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame, if (ctx->mb_height > 68 && ff_dnxhd_check_header_prefix_hr(header_prefix)) { ctx->data_offset = 0x170 + (ctx->mb_height << 2); } else { - if (ctx->mb_height > 68 || - (ctx->mb_height << frame->interlaced_frame) > (ctx->height + 15) >> 4) { + if (ctx->mb_height > 68) { av_log(ctx->avctx, AV_LOG_ERROR, "mb height too big: %d\n", ctx->mb_height); return AVERROR_INVALIDDATA; } ctx->data_offset = 0x280; } + if ((ctx->mb_height << frame->interlaced_frame) > (ctx->height + 15) >> 4) { + av_log(ctx->avctx, AV_LOG_ERROR, + "mb height too big: %d\n", ctx->mb_height); + return AVERROR_INVALIDDATA; + } if (buf_size < ctx->data_offset) { av_log(ctx->avctx, AV_LOG_ERROR, diff --git a/libavcodec/dnxhddec.c.orig b/libavcodec/dnxhddec.c.orig new file mode 100644 index 0000000000000..4d1b006bb50b5 --- /dev/null +++ b/libavcodec/dnxhddec.c.orig @@ -0,0 +1,712 @@ +/* + * VC3/DNxHD decoder. + * Copyright (c) 2007 SmartJog S.A., Baptiste Coudurier + * Copyright (c) 2011 MirriAd Ltd + * Copyright (c) 2015 Christophe Gisquet + * + * 10 bit support added by MirriAd Ltd, Joseph Artsimovich + * Slice multithreading and MB interlaced support added by Christophe Gisquet + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/imgutils.h" +#include "libavutil/timer.h" +#include "avcodec.h" +#include "blockdsp.h" +#define UNCHECKED_BITSTREAM_READER 1 +#include "get_bits.h" +#include "dnxhddata.h" +#include "idctdsp.h" +#include "internal.h" +#include "profiles.h" +#include "thread.h" + +typedef struct RowContext { + DECLARE_ALIGNED(16, int16_t, blocks)[12][64]; + int luma_scale[64]; + int chroma_scale[64]; + GetBitContext gb; + int last_dc[3]; + int last_qscale; + int errors; + /** -1:not set yet 0:off=RGB 1:on=YUV 2:variable */ + int format; +} RowContext; + +typedef struct DNXHDContext { + AVCodecContext *avctx; + RowContext *rows; + BlockDSPContext bdsp; + const uint8_t* buf; + int buf_size; + int64_t cid; ///< compression id + unsigned int width, height; + enum AVPixelFormat pix_fmt; + unsigned int mb_width, mb_height; + uint32_t mb_scan_index[256]; + int data_offset; // End of mb_scan_index, where macroblocks start + int cur_field; ///< current interlaced field + VLC ac_vlc, dc_vlc, run_vlc; + IDCTDSPContext idsp; + ScanTable scantable; + const CIDEntry *cid_table; + int bit_depth; // 8, 10, 12 or 0 if not initialized at all. + int is_444; + int mbaff; + int act; + int (*decode_dct_block)(const struct DNXHDContext *ctx, + RowContext *row, int n); +} DNXHDContext; + +#define DNXHD_VLC_BITS 9 +#define DNXHD_DC_VLC_BITS 7 + +static int dnxhd_decode_dct_block_8(const DNXHDContext *ctx, + RowContext *row, int n); +static int dnxhd_decode_dct_block_10(const DNXHDContext *ctx, + RowContext *row, int n); +static int dnxhd_decode_dct_block_10_444(const DNXHDContext *ctx, + RowContext *row, int n); +static int dnxhd_decode_dct_block_12(const DNXHDContext *ctx, + RowContext *row, int n); +static int dnxhd_decode_dct_block_12_444(const DNXHDContext *ctx, + RowContext *row, int n); + +static av_cold int dnxhd_decode_init(AVCodecContext *avctx) +{ + DNXHDContext *ctx = avctx->priv_data; + + ctx->avctx = avctx; + ctx->cid = -1; + avctx->colorspace = AVCOL_SPC_BT709; + + avctx->coded_width = FFALIGN(avctx->width, 16); + avctx->coded_height = FFALIGN(avctx->height, 16); + + ctx->rows = av_mallocz_array(avctx->thread_count, sizeof(RowContext)); + if (!ctx->rows) + return AVERROR(ENOMEM); + + return 0; +} + +static int dnxhd_init_vlc(DNXHDContext *ctx, uint32_t cid, int bitdepth) +{ + if (cid != ctx->cid) { + int index; + + if ((index = ff_dnxhd_get_cid_table(cid)) < 0) { + av_log(ctx->avctx, AV_LOG_ERROR, "unsupported cid %d\n", cid); + return AVERROR(ENOSYS); + } + if (ff_dnxhd_cid_table[index].bit_depth != bitdepth && + ff_dnxhd_cid_table[index].bit_depth != DNXHD_VARIABLE) { + av_log(ctx->avctx, AV_LOG_ERROR, "bit depth mismatches %d %d\n", ff_dnxhd_cid_table[index].bit_depth, bitdepth); + return AVERROR_INVALIDDATA; + } + ctx->cid_table = &ff_dnxhd_cid_table[index]; + av_log(ctx->avctx, AV_LOG_VERBOSE, "Profile cid %d.\n", cid); + + ff_free_vlc(&ctx->ac_vlc); + ff_free_vlc(&ctx->dc_vlc); + ff_free_vlc(&ctx->run_vlc); + + init_vlc(&ctx->ac_vlc, DNXHD_VLC_BITS, 257, + ctx->cid_table->ac_bits, 1, 1, + ctx->cid_table->ac_codes, 2, 2, 0); + init_vlc(&ctx->dc_vlc, DNXHD_DC_VLC_BITS, bitdepth > 8 ? 14 : 12, + ctx->cid_table->dc_bits, 1, 1, + ctx->cid_table->dc_codes, 1, 1, 0); + init_vlc(&ctx->run_vlc, DNXHD_VLC_BITS, 62, + ctx->cid_table->run_bits, 1, 1, + ctx->cid_table->run_codes, 2, 2, 0); + + ctx->cid = cid; + } + return 0; +} + +static av_cold int dnxhd_decode_init_thread_copy(AVCodecContext *avctx) +{ + DNXHDContext *ctx = avctx->priv_data; + + // make sure VLC tables will be loaded when cid is parsed + ctx->cid = -1; + + ctx->rows = av_mallocz_array(avctx->thread_count, sizeof(RowContext)); + if (!ctx->rows) + return AVERROR(ENOMEM); + + return 0; +} + +static int dnxhd_get_profile(int cid) +{ + switch(cid) { + case 1270: + return FF_PROFILE_DNXHR_444; + case 1271: + return FF_PROFILE_DNXHR_HQX; + case 1272: + return FF_PROFILE_DNXHR_HQ; + case 1273: + return FF_PROFILE_DNXHR_SQ; + case 1274: + return FF_PROFILE_DNXHR_LB; + } + return FF_PROFILE_DNXHD; +} + +static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame, + const uint8_t *buf, int buf_size, + int first_field) +{ + int i, cid, ret; + int old_bit_depth = ctx->bit_depth, bitdepth; + uint64_t header_prefix; + if (buf_size < 0x280) { + av_log(ctx->avctx, AV_LOG_ERROR, + "buffer too small (%d < 640).\n", buf_size); + return AVERROR_INVALIDDATA; + } + + header_prefix = ff_dnxhd_parse_header_prefix(buf); + if (header_prefix == 0) { + av_log(ctx->avctx, AV_LOG_ERROR, + "unknown header 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X\n", + buf[0], buf[1], buf[2], buf[3], buf[4]); + return AVERROR_INVALIDDATA; + } + if (buf[5] & 2) { /* interlaced */ + ctx->cur_field = buf[5] & 1; + frame->interlaced_frame = 1; + frame->top_field_first = first_field ^ ctx->cur_field; + av_log(ctx->avctx, AV_LOG_DEBUG, + "interlaced %d, cur field %d\n", buf[5] & 3, ctx->cur_field); + } else { + ctx->cur_field = 0; + } + ctx->mbaff = (buf[0x6] >> 5) & 1; + + ctx->height = AV_RB16(buf + 0x18); + ctx->width = AV_RB16(buf + 0x1a); + + switch(buf[0x21] >> 5) { + case 1: bitdepth = 8; break; + case 2: bitdepth = 10; break; + case 3: bitdepth = 12; break; + default: + av_log(ctx->avctx, AV_LOG_ERROR, + "Unknown bitdepth indicator (%d)\n", buf[0x21] >> 5); + return AVERROR_INVALIDDATA; + } + + cid = AV_RB32(buf + 0x28); + + ctx->avctx->profile = dnxhd_get_profile(cid); + + if ((ret = dnxhd_init_vlc(ctx, cid, bitdepth)) < 0) + return ret; + if (ctx->mbaff && ctx->cid_table->cid != 1260) + av_log(ctx->avctx, AV_LOG_WARNING, + "Adaptive MB interlace flag in an unsupported profile.\n"); + + ctx->act = buf[0x2C] & 7; + if (ctx->act && ctx->cid_table->cid != 1256 && ctx->cid_table->cid != 1270) + av_log(ctx->avctx, AV_LOG_WARNING, + "Adaptive color transform in an unsupported profile.\n"); + + ctx->is_444 = (buf[0x2C] >> 6) & 1; + if (ctx->is_444) { + if (bitdepth == 8) { + avpriv_request_sample(ctx->avctx, "4:4:4 8 bits\n"); + return AVERROR_INVALIDDATA; + } else if (bitdepth == 10) { + ctx->decode_dct_block = dnxhd_decode_dct_block_10_444; + ctx->pix_fmt = ctx->act ? AV_PIX_FMT_YUV444P10 + : AV_PIX_FMT_GBRP10; + } else { + ctx->decode_dct_block = dnxhd_decode_dct_block_12_444; + ctx->pix_fmt = ctx->act ? AV_PIX_FMT_YUV444P12 + : AV_PIX_FMT_GBRP12; + } + } else if (bitdepth == 12) { + ctx->decode_dct_block = dnxhd_decode_dct_block_12; + ctx->pix_fmt = AV_PIX_FMT_YUV422P12; + } else if (bitdepth == 10) { + ctx->decode_dct_block = dnxhd_decode_dct_block_10; + ctx->pix_fmt = AV_PIX_FMT_YUV422P10; + } else { + ctx->decode_dct_block = dnxhd_decode_dct_block_8; + ctx->pix_fmt = AV_PIX_FMT_YUV422P; + } + + ctx->avctx->bits_per_raw_sample = ctx->bit_depth = bitdepth; + if (ctx->bit_depth != old_bit_depth) { + ff_blockdsp_init(&ctx->bdsp, ctx->avctx); + ff_idctdsp_init(&ctx->idsp, ctx->avctx); + ff_init_scantable(ctx->idsp.idct_permutation, &ctx->scantable, + ff_zigzag_direct); + } + + // make sure profile size constraints are respected + // DNx100 allows 1920->1440 and 1280->960 subsampling + if (ctx->width != ctx->cid_table->width && + ctx->cid_table->width != DNXHD_VARIABLE) { + av_reduce(&ctx->avctx->sample_aspect_ratio.num, + &ctx->avctx->sample_aspect_ratio.den, + ctx->width, ctx->cid_table->width, 255); + ctx->width = ctx->cid_table->width; + } + + if (buf_size < ctx->cid_table->coding_unit_size) { + av_log(ctx->avctx, AV_LOG_ERROR, "incorrect frame size (%d < %d).\n", + buf_size, ctx->cid_table->coding_unit_size); + return AVERROR_INVALIDDATA; + } + + ctx->mb_width = (ctx->width + 15)>> 4; + ctx->mb_height = buf[0x16d]; + + if ((ctx->height + 15) >> 4 == ctx->mb_height && frame->interlaced_frame) + ctx->height <<= 1; + + av_log(ctx->avctx, AV_LOG_VERBOSE, "%dx%d, 4:%s %d bits, MBAFF=%d ACT=%d\n", + ctx->width, ctx->height, ctx->is_444 ? "4:4" : "2:2", + ctx->bit_depth, ctx->mbaff, ctx->act); + + // Newer format supports variable mb_scan_index sizes + if (ctx->mb_height > 68 && ff_dnxhd_check_header_prefix_hr(header_prefix)) { + ctx->data_offset = 0x170 + (ctx->mb_height << 2); + } else { + if (ctx->mb_height > 68 || + (ctx->mb_height << frame->interlaced_frame) > (ctx->height + 15) >> 4) { + av_log(ctx->avctx, AV_LOG_ERROR, + "mb height too big: %d\n", ctx->mb_height); + return AVERROR_INVALIDDATA; + } + ctx->data_offset = 0x280; + } + + if (buf_size < ctx->data_offset) { + av_log(ctx->avctx, AV_LOG_ERROR, + "buffer too small (%d < %d).\n", buf_size, ctx->data_offset); + return AVERROR_INVALIDDATA; + } + + av_assert0((unsigned)ctx->mb_height <= FF_ARRAY_ELEMS(ctx->mb_scan_index)); + + for (i = 0; i < ctx->mb_height; i++) { + ctx->mb_scan_index[i] = AV_RB32(buf + 0x170 + (i << 2)); + ff_dlog(ctx->avctx, "mb scan index %d, pos %d: %u\n", i, 0x170 + (i << 2), ctx->mb_scan_index[i]); + if (buf_size - ctx->data_offset < ctx->mb_scan_index[i]) { + av_log(ctx->avctx, AV_LOG_ERROR, + "invalid mb scan index (%u vs %u).\n", + ctx->mb_scan_index[i], buf_size - ctx->data_offset); + return AVERROR_INVALIDDATA; + } + } + + return 0; +} + +static av_always_inline int dnxhd_decode_dct_block(const DNXHDContext *ctx, + RowContext *row, + int n, + int index_bits, + int level_bias, + int level_shift, + int dc_shift) +{ + int i, j, index1, index2, len, flags; + int level, component, sign; + const int *scale; + const uint8_t *weight_matrix; + const uint8_t *ac_info = ctx->cid_table->ac_info; + int16_t *block = row->blocks[n]; + const int eob_index = ctx->cid_table->eob_index; + int ret = 0; + OPEN_READER(bs, &row->gb); + + ctx->bdsp.clear_block(block); + + if (!ctx->is_444) { + if (n & 2) { + component = 1 + (n & 1); + scale = row->chroma_scale; + weight_matrix = ctx->cid_table->chroma_weight; + } else { + component = 0; + scale = row->luma_scale; + weight_matrix = ctx->cid_table->luma_weight; + } + } else { + component = (n >> 1) % 3; + if (component) { + scale = row->chroma_scale; + weight_matrix = ctx->cid_table->chroma_weight; + } else { + scale = row->luma_scale; + weight_matrix = ctx->cid_table->luma_weight; + } + } + + UPDATE_CACHE(bs, &row->gb); + GET_VLC(len, bs, &row->gb, ctx->dc_vlc.table, DNXHD_DC_VLC_BITS, 1); + if (len) { + level = GET_CACHE(bs, &row->gb); + LAST_SKIP_BITS(bs, &row->gb, len); + sign = ~level >> 31; + level = (NEG_USR32(sign ^ level, len) ^ sign) - sign; + row->last_dc[component] += level * (1 << dc_shift); + } + block[0] = row->last_dc[component]; + + i = 0; + + UPDATE_CACHE(bs, &row->gb); + GET_VLC(index1, bs, &row->gb, ctx->ac_vlc.table, + DNXHD_VLC_BITS, 2); + + while (index1 != eob_index) { + level = ac_info[2*index1+0]; + flags = ac_info[2*index1+1]; + + sign = SHOW_SBITS(bs, &row->gb, 1); + SKIP_BITS(bs, &row->gb, 1); + + if (flags & 1) { + level += SHOW_UBITS(bs, &row->gb, index_bits) << 7; + SKIP_BITS(bs, &row->gb, index_bits); + } + + if (flags & 2) { + UPDATE_CACHE(bs, &row->gb); + GET_VLC(index2, bs, &row->gb, ctx->run_vlc.table, + DNXHD_VLC_BITS, 2); + i += ctx->cid_table->run[index2]; + } + + if (++i > 63) { + av_log(ctx->avctx, AV_LOG_ERROR, "ac tex damaged %d, %d\n", n, i); + ret = -1; + break; + } + + j = ctx->scantable.permutated[i]; + level *= scale[i]; + level += scale[i] >> 1; + if (level_bias < 32 || weight_matrix[i] != level_bias) + level += level_bias; // 1<<(level_shift-1) + level >>= level_shift; + + block[j] = (level ^ sign) - sign; + + UPDATE_CACHE(bs, &row->gb); + GET_VLC(index1, bs, &row->gb, ctx->ac_vlc.table, + DNXHD_VLC_BITS, 2); + } + + CLOSE_READER(bs, &row->gb); + return ret; +} + +static int dnxhd_decode_dct_block_8(const DNXHDContext *ctx, + RowContext *row, int n) +{ + return dnxhd_decode_dct_block(ctx, row, n, 4, 32, 6, 0); +} + +static int dnxhd_decode_dct_block_10(const DNXHDContext *ctx, + RowContext *row, int n) +{ + return dnxhd_decode_dct_block(ctx, row, n, 6, 8, 4, 0); +} + +static int dnxhd_decode_dct_block_10_444(const DNXHDContext *ctx, + RowContext *row, int n) +{ + return dnxhd_decode_dct_block(ctx, row, n, 6, 32, 6, 0); +} + +static int dnxhd_decode_dct_block_12(const DNXHDContext *ctx, + RowContext *row, int n) +{ + return dnxhd_decode_dct_block(ctx, row, n, 6, 8, 4, 2); +} + +static int dnxhd_decode_dct_block_12_444(const DNXHDContext *ctx, + RowContext *row, int n) +{ + return dnxhd_decode_dct_block(ctx, row, n, 6, 32, 4, 2); +} + +static int dnxhd_decode_macroblock(const DNXHDContext *ctx, RowContext *row, + AVFrame *frame, int x, int y) +{ + int shift1 = ctx->bit_depth >= 10; + int dct_linesize_luma = frame->linesize[0]; + int dct_linesize_chroma = frame->linesize[1]; + uint8_t *dest_y, *dest_u, *dest_v; + int dct_y_offset, dct_x_offset; + int qscale, i, act; + int interlaced_mb = 0; + + if (ctx->mbaff) { + interlaced_mb = get_bits1(&row->gb); + qscale = get_bits(&row->gb, 10); + } else { + qscale = get_bits(&row->gb, 11); + } + act = get_bits1(&row->gb); + if (act) { + if (!ctx->act) { + static int act_warned; + if (!act_warned) { + act_warned = 1; + av_log(ctx->avctx, AV_LOG_ERROR, + "ACT flag set, in violation of frame header.\n"); + } + } else if (row->format == -1) { + row->format = act; + } else if (row->format != act) { + row->format = 2; // Variable + } + } + + if (qscale != row->last_qscale) { + for (i = 0; i < 64; i++) { + row->luma_scale[i] = qscale * ctx->cid_table->luma_weight[i]; + row->chroma_scale[i] = qscale * ctx->cid_table->chroma_weight[i]; + } + row->last_qscale = qscale; + } + + for (i = 0; i < 8 + 4 * ctx->is_444; i++) { + if (ctx->decode_dct_block(ctx, row, i) < 0) + return AVERROR_INVALIDDATA; + } + + if (frame->interlaced_frame) { + dct_linesize_luma <<= 1; + dct_linesize_chroma <<= 1; + } + + dest_y = frame->data[0] + ((y * dct_linesize_luma) << 4) + (x << (4 + shift1)); + dest_u = frame->data[1] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1 + ctx->is_444)); + dest_v = frame->data[2] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1 + ctx->is_444)); + + if (frame->interlaced_frame && ctx->cur_field) { + dest_y += frame->linesize[0]; + dest_u += frame->linesize[1]; + dest_v += frame->linesize[2]; + } + if (interlaced_mb) { + dct_linesize_luma <<= 1; + dct_linesize_chroma <<= 1; + } + + dct_y_offset = interlaced_mb ? frame->linesize[0] : (dct_linesize_luma << 3); + dct_x_offset = 8 << shift1; + if (!ctx->is_444) { + ctx->idsp.idct_put(dest_y, dct_linesize_luma, row->blocks[0]); + ctx->idsp.idct_put(dest_y + dct_x_offset, dct_linesize_luma, row->blocks[1]); + ctx->idsp.idct_put(dest_y + dct_y_offset, dct_linesize_luma, row->blocks[4]); + ctx->idsp.idct_put(dest_y + dct_y_offset + dct_x_offset, dct_linesize_luma, row->blocks[5]); + + if (!(ctx->avctx->flags & AV_CODEC_FLAG_GRAY)) { + dct_y_offset = interlaced_mb ? frame->linesize[1] : (dct_linesize_chroma << 3); + ctx->idsp.idct_put(dest_u, dct_linesize_chroma, row->blocks[2]); + ctx->idsp.idct_put(dest_v, dct_linesize_chroma, row->blocks[3]); + ctx->idsp.idct_put(dest_u + dct_y_offset, dct_linesize_chroma, row->blocks[6]); + ctx->idsp.idct_put(dest_v + dct_y_offset, dct_linesize_chroma, row->blocks[7]); + } + } else { + ctx->idsp.idct_put(dest_y, dct_linesize_luma, row->blocks[0]); + ctx->idsp.idct_put(dest_y + dct_x_offset, dct_linesize_luma, row->blocks[1]); + ctx->idsp.idct_put(dest_y + dct_y_offset, dct_linesize_luma, row->blocks[6]); + ctx->idsp.idct_put(dest_y + dct_y_offset + dct_x_offset, dct_linesize_luma, row->blocks[7]); + + if (!(ctx->avctx->flags & AV_CODEC_FLAG_GRAY)) { + dct_y_offset = interlaced_mb ? frame->linesize[1] : (dct_linesize_chroma << 3); + ctx->idsp.idct_put(dest_u, dct_linesize_chroma, row->blocks[2]); + ctx->idsp.idct_put(dest_u + dct_x_offset, dct_linesize_chroma, row->blocks[3]); + ctx->idsp.idct_put(dest_u + dct_y_offset, dct_linesize_chroma, row->blocks[8]); + ctx->idsp.idct_put(dest_u + dct_y_offset + dct_x_offset, dct_linesize_chroma, row->blocks[9]); + ctx->idsp.idct_put(dest_v, dct_linesize_chroma, row->blocks[4]); + ctx->idsp.idct_put(dest_v + dct_x_offset, dct_linesize_chroma, row->blocks[5]); + ctx->idsp.idct_put(dest_v + dct_y_offset, dct_linesize_chroma, row->blocks[10]); + ctx->idsp.idct_put(dest_v + dct_y_offset + dct_x_offset, dct_linesize_chroma, row->blocks[11]); + } + } + + return 0; +} + +static int dnxhd_decode_row(AVCodecContext *avctx, void *data, + int rownb, int threadnb) +{ + const DNXHDContext *ctx = avctx->priv_data; + uint32_t offset = ctx->mb_scan_index[rownb]; + RowContext *row = ctx->rows + threadnb; + int x; + + row->last_dc[0] = + row->last_dc[1] = + row->last_dc[2] = 1 << (ctx->bit_depth + 2); // for levels +2^(bitdepth-1) + init_get_bits(&row->gb, ctx->buf + offset, (ctx->buf_size - offset) << 3); + for (x = 0; x < ctx->mb_width; x++) { + //START_TIMER; + int ret = dnxhd_decode_macroblock(ctx, row, data, x, rownb); + if (ret < 0) { + row->errors++; + return ret; + } + //STOP_TIMER("decode macroblock"); + } + + return 0; +} + +static int dnxhd_decode_frame(AVCodecContext *avctx, void *data, + int *got_frame, AVPacket *avpkt) +{ + const uint8_t *buf = avpkt->data; + int buf_size = avpkt->size; + DNXHDContext *ctx = avctx->priv_data; + ThreadFrame frame = { .f = data }; + AVFrame *picture = data; + int first_field = 1; + int ret, i; + + ff_dlog(avctx, "frame size %d\n", buf_size); + + for (i = 0; i < avctx->thread_count; i++) + ctx->rows[i].format = -1; + +decode_coding_unit: + if ((ret = dnxhd_decode_header(ctx, picture, buf, buf_size, first_field)) < 0) + return ret; + + if ((avctx->width || avctx->height) && + (ctx->width != avctx->width || ctx->height != avctx->height)) { + av_log(avctx, AV_LOG_WARNING, "frame size changed: %dx%d -> %dx%d\n", + avctx->width, avctx->height, ctx->width, ctx->height); + first_field = 1; + } + if (avctx->pix_fmt != AV_PIX_FMT_NONE && avctx->pix_fmt != ctx->pix_fmt) { + av_log(avctx, AV_LOG_WARNING, "pix_fmt changed: %s -> %s\n", + av_get_pix_fmt_name(avctx->pix_fmt), av_get_pix_fmt_name(ctx->pix_fmt)); + first_field = 1; + } + + avctx->pix_fmt = ctx->pix_fmt; + ret = ff_set_dimensions(avctx, ctx->width, ctx->height); + if (ret < 0) + return ret; + + if (first_field) { + if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) + return ret; + picture->pict_type = AV_PICTURE_TYPE_I; + picture->key_frame = 1; + } + + ctx->buf_size = buf_size - ctx->data_offset; + ctx->buf = buf + ctx->data_offset; + avctx->execute2(avctx, dnxhd_decode_row, picture, NULL, ctx->mb_height); + + if (first_field && picture->interlaced_frame) { + buf += ctx->cid_table->coding_unit_size; + buf_size -= ctx->cid_table->coding_unit_size; + first_field = 0; + goto decode_coding_unit; + } + + ret = 0; + for (i = 0; i < avctx->thread_count; i++) { + ret += ctx->rows[i].errors; + ctx->rows[i].errors = 0; + } + + if (ctx->act) { + static int act_warned; + int format = ctx->rows[0].format; + for (i = 1; i < avctx->thread_count; i++) { + if (ctx->rows[i].format != format && + ctx->rows[i].format != -1 /* not run */) { + format = 2; + break; + } + } + switch (format) { + case -1: + case 2: + if (!act_warned) { + act_warned = 1; + av_log(ctx->avctx, AV_LOG_ERROR, + "Unsupported: variable ACT flag.\n"); + } + break; + case 0: + ctx->pix_fmt = ctx->bit_depth==10 + ? AV_PIX_FMT_GBRP10 : AV_PIX_FMT_GBRP12; + break; + case 1: + ctx->pix_fmt = ctx->bit_depth==10 + ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_YUV444P12; + break; + } + } + avctx->pix_fmt = ctx->pix_fmt; + if (ret) { + av_log(ctx->avctx, AV_LOG_ERROR, "%d lines with errors\n", ret); + return AVERROR_INVALIDDATA; + } + + *got_frame = 1; + return avpkt->size; +} + +static av_cold int dnxhd_decode_close(AVCodecContext *avctx) +{ + DNXHDContext *ctx = avctx->priv_data; + + ff_free_vlc(&ctx->ac_vlc); + ff_free_vlc(&ctx->dc_vlc); + ff_free_vlc(&ctx->run_vlc); + + av_freep(&ctx->rows); + + return 0; +} + +AVCodec ff_dnxhd_decoder = { + .name = "dnxhd", + .long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"), + .type = AVMEDIA_TYPE_VIDEO, + .id = AV_CODEC_ID_DNXHD, + .priv_data_size = sizeof(DNXHDContext), + .init = dnxhd_decode_init, + .close = dnxhd_decode_close, + .decode = dnxhd_decode_frame, + .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | + AV_CODEC_CAP_SLICE_THREADS, + .init_thread_copy = ONLY_IF_THREADS_ENABLED(dnxhd_decode_init_thread_copy), + .profiles = NULL_IF_CONFIG_SMALL(ff_dnxhd_profiles), +}; From dafc656b8f0e6f8eafb7b455a82ccf2ba9843c84 Mon Sep 17 00:00:00 2001 From: turly221 Date: Mon, 9 Dec 2024 16:05:54 +0000 Subject: [PATCH 03/13] commit patch 23035798 --- libavformat/rtpdec_h264.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libavformat/rtpdec_h264.c b/libavformat/rtpdec_h264.c index 8dd56a549e444..6f8148ab6d5db 100644 --- a/libavformat/rtpdec_h264.c +++ b/libavformat/rtpdec_h264.c @@ -166,7 +166,7 @@ static int sdp_parse_fmtp_config_h264(AVFormatContext *s, parse_profile_level_id(s, h264_data, value); } else if (!strcmp(attr, "sprop-parameter-sets")) { int ret; - if (value[strlen(value) - 1] == ',') { + if (*value == 0 || value[strlen(value) - 1] == ',') { av_log(s, AV_LOG_WARNING, "Missing PPS in sprop-parameter-sets, ignoring\n"); return 0; } From 656eb99bf448a099cc88293c4ee8f2ecf7e1e9f7 Mon Sep 17 00:00:00 2001 From: turly221 Date: Mon, 9 Dec 2024 16:05:56 +0000 Subject: [PATCH 04/13] commit patch 18622211 --- libavcodec/ffv1dec.c | 2 +- libavcodec/ffv1dec.c.orig | 1040 +++++++++++++++++++++++++++++++++++++ 2 files changed, 1041 insertions(+), 1 deletion(-) create mode 100644 libavcodec/ffv1dec.c.orig diff --git a/libavcodec/ffv1dec.c b/libavcodec/ffv1dec.c index a57ec5363c490..6f6f4ae09c471 100644 --- a/libavcodec/ffv1dec.c +++ b/libavcodec/ffv1dec.c @@ -684,7 +684,7 @@ static int read_header(FFV1Context *f) } else { const uint8_t *p = c->bytestream_end; for (f->slice_count = 0; - f->slice_count < MAX_SLICES && 3 < p - c->bytestream_start; + f->slice_count < MAX_SLICES && 3 + 5*!!f->ec < p - c->bytestream_start; f->slice_count++) { int trailer = 3 + 5*!!f->ec; int size = AV_RB24(p-trailer); diff --git a/libavcodec/ffv1dec.c.orig b/libavcodec/ffv1dec.c.orig new file mode 100644 index 0000000000000..a57ec5363c490 --- /dev/null +++ b/libavcodec/ffv1dec.c.orig @@ -0,0 +1,1040 @@ +/* + * FFV1 decoder + * + * Copyright (c) 2003-2013 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * FF Video Codec 1 (a lossless codec) decoder + */ + +#include "libavutil/avassert.h" +#include "libavutil/crc.h" +#include "libavutil/opt.h" +#include "libavutil/imgutils.h" +#include "libavutil/pixdesc.h" +#include "libavutil/timer.h" +#include "avcodec.h" +#include "internal.h" +#include "get_bits.h" +#include "rangecoder.h" +#include "golomb.h" +#include "mathops.h" +#include "ffv1.h" + +static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, + int is_signed) +{ + if (get_rac(c, state + 0)) + return 0; + else { + int i, e; + unsigned a; + e = 0; + while (get_rac(c, state + 1 + FFMIN(e, 9))) { // 1..10 + e++; + if (e > 31) + return AVERROR_INVALIDDATA; + } + + a = 1; + for (i = e - 1; i >= 0; i--) + a += a + get_rac(c, state + 22 + FFMIN(i, 9)); // 22..31 + + e = -(is_signed && get_rac(c, state + 11 + FFMIN(e, 10))); // 11..21 + return (a ^ e) - e; + } +} + +static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed) +{ + return get_symbol_inline(c, state, is_signed); +} + +static inline int get_vlc_symbol(GetBitContext *gb, VlcState *const state, + int bits) +{ + int k, i, v, ret; + + i = state->count; + k = 0; + while (i < state->error_sum) { // FIXME: optimize + k++; + i += i; + } + + v = get_sr_golomb(gb, k, 12, bits); + ff_dlog(NULL, "v:%d bias:%d error:%d drift:%d count:%d k:%d", + v, state->bias, state->error_sum, state->drift, state->count, k); + +#if 0 // JPEG LS + if (k == 0 && 2 * state->drift <= -state->count) + v ^= (-1); +#else + v ^= ((2 * state->drift + state->count) >> 31); +#endif + + ret = fold(v + state->bias, bits); + + update_vlc_state(state, v); + + return ret; +} + +#define TYPE int16_t +#define RENAME(name) name +#include "ffv1dec_template.c" +#undef TYPE +#undef RENAME + +#define TYPE int32_t +#define RENAME(name) name ## 32 +#include "ffv1dec_template.c" + +static void decode_plane(FFV1Context *s, uint8_t *src, + int w, int h, int stride, int plane_index, + int pixel_stride) +{ + int x, y; + int16_t *sample[2]; + sample[0] = s->sample_buffer + 3; + sample[1] = s->sample_buffer + w + 6 + 3; + + s->run_index = 0; + + memset(s->sample_buffer, 0, 2 * (w + 6) * sizeof(*s->sample_buffer)); + + for (y = 0; y < h; y++) { + int16_t *temp = sample[0]; // FIXME: try a normal buffer + + sample[0] = sample[1]; + sample[1] = temp; + + sample[1][-1] = sample[0][0]; + sample[0][w] = sample[0][w - 1]; + +// { START_TIMER + if (s->avctx->bits_per_raw_sample <= 8) { + decode_line(s, w, sample, plane_index, 8); + for (x = 0; x < w; x++) + src[x*pixel_stride + stride * y] = sample[1][x]; + } else { + decode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample); + if (s->packed_at_lsb) { + for (x = 0; x < w; x++) { + ((uint16_t*)(src + stride*y))[x*pixel_stride] = sample[1][x]; + } + } else { + for (x = 0; x < w; x++) { + ((uint16_t*)(src + stride*y))[x*pixel_stride] = sample[1][x] << (16 - s->avctx->bits_per_raw_sample); + } + } + } +// STOP_TIMER("decode-line") } + } +} + +static int decode_slice_header(FFV1Context *f, FFV1Context *fs) +{ + RangeCoder *c = &fs->c; + uint8_t state[CONTEXT_SIZE]; + unsigned ps, i, context_count; + memset(state, 128, sizeof(state)); + + av_assert0(f->version > 2); + + fs->slice_x = get_symbol(c, state, 0) * f->width ; + fs->slice_y = get_symbol(c, state, 0) * f->height; + fs->slice_width = (get_symbol(c, state, 0) + 1) * f->width + fs->slice_x; + fs->slice_height = (get_symbol(c, state, 0) + 1) * f->height + fs->slice_y; + + fs->slice_x /= f->num_h_slices; + fs->slice_y /= f->num_v_slices; + fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x; + fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y; + if ((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height) + return -1; + if ( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width + || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height) + return -1; + + for (i = 0; i < f->plane_count; i++) { + PlaneContext * const p = &fs->plane[i]; + int idx = get_symbol(c, state, 0); + if (idx >= (unsigned)f->quant_table_count) { + av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n"); + return -1; + } + p->quant_table_index = idx; + memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table)); + context_count = f->context_count[idx]; + + if (p->context_count < context_count) { + av_freep(&p->state); + av_freep(&p->vlc_state); + } + p->context_count = context_count; + } + + ps = get_symbol(c, state, 0); + if (ps == 1) { + f->cur->interlaced_frame = 1; + f->cur->top_field_first = 1; + } else if (ps == 2) { + f->cur->interlaced_frame = 1; + f->cur->top_field_first = 0; + } else if (ps == 3) { + f->cur->interlaced_frame = 0; + } + f->cur->sample_aspect_ratio.num = get_symbol(c, state, 0); + f->cur->sample_aspect_ratio.den = get_symbol(c, state, 0); + + if (av_image_check_sar(f->width, f->height, + f->cur->sample_aspect_ratio) < 0) { + av_log(f->avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n", + f->cur->sample_aspect_ratio.num, + f->cur->sample_aspect_ratio.den); + f->cur->sample_aspect_ratio = (AVRational){ 0, 1 }; + } + + if (fs->version > 3) { + fs->slice_reset_contexts = get_rac(c, state); + fs->slice_coding_mode = get_symbol(c, state, 0); + if (fs->slice_coding_mode != 1) { + fs->slice_rct_by_coef = get_symbol(c, state, 0); + fs->slice_rct_ry_coef = get_symbol(c, state, 0); + if ((uint64_t)fs->slice_rct_by_coef + (uint64_t)fs->slice_rct_ry_coef > 4) { + av_log(f->avctx, AV_LOG_ERROR, "slice_rct_y_coef out of range\n"); + return AVERROR_INVALIDDATA; + } + } + } + + return 0; +} + +static int decode_slice(AVCodecContext *c, void *arg) +{ + FFV1Context *fs = *(void **)arg; + FFV1Context *f = fs->avctx->priv_data; + int width, height, x, y, ret; + const int ps = av_pix_fmt_desc_get(c->pix_fmt)->comp[0].step; + AVFrame * const p = f->cur; + int i, si; + + for( si=0; fs != f->slice_context[si]; si ++) + ; + + if(f->fsrc && !p->key_frame) + ff_thread_await_progress(&f->last_picture, si, 0); + + if(f->fsrc && !p->key_frame) { + FFV1Context *fssrc = f->fsrc->slice_context[si]; + FFV1Context *fsdst = f->slice_context[si]; + av_assert1(fsdst->plane_count == fssrc->plane_count); + av_assert1(fsdst == fs); + + if (!p->key_frame) + fsdst->slice_damaged |= fssrc->slice_damaged; + + for (i = 0; i < f->plane_count; i++) { + PlaneContext *psrc = &fssrc->plane[i]; + PlaneContext *pdst = &fsdst->plane[i]; + + av_free(pdst->state); + av_free(pdst->vlc_state); + memcpy(pdst, psrc, sizeof(*pdst)); + pdst->state = NULL; + pdst->vlc_state = NULL; + + if (fssrc->ac) { + pdst->state = av_malloc_array(CONTEXT_SIZE, psrc->context_count); + memcpy(pdst->state, psrc->state, CONTEXT_SIZE * psrc->context_count); + } else { + pdst->vlc_state = av_malloc_array(sizeof(*pdst->vlc_state), psrc->context_count); + memcpy(pdst->vlc_state, psrc->vlc_state, sizeof(*pdst->vlc_state) * psrc->context_count); + } + } + } + + fs->slice_rct_by_coef = 1; + fs->slice_rct_ry_coef = 1; + + if (f->version > 2) { + if (ff_ffv1_init_slice_state(f, fs) < 0) + return AVERROR(ENOMEM); + if (decode_slice_header(f, fs) < 0) { + fs->slice_x = fs->slice_y = fs->slice_height = fs->slice_width = 0; + fs->slice_damaged = 1; + return AVERROR_INVALIDDATA; + } + } + if ((ret = ff_ffv1_init_slice_state(f, fs)) < 0) + return ret; + if (f->cur->key_frame || fs->slice_reset_contexts) + ff_ffv1_clear_slice_state(f, fs); + + width = fs->slice_width; + height = fs->slice_height; + x = fs->slice_x; + y = fs->slice_y; + + if (fs->ac == AC_GOLOMB_RICE) { + if (f->version == 3 && f->micro_version > 1 || f->version > 3) + get_rac(&fs->c, (uint8_t[]) { 129 }); + fs->ac_byte_count = f->version > 2 || (!x && !y) ? fs->c.bytestream - fs->c.bytestream_start - 1 : 0; + init_get_bits(&fs->gb, + fs->c.bytestream_start + fs->ac_byte_count, + (fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count) * 8); + } + + av_assert1(width && height); + if (f->colorspace == 0 && (f->chroma_planes || !fs->transparency)) { + const int chroma_width = AV_CEIL_RSHIFT(width, f->chroma_h_shift); + const int chroma_height = AV_CEIL_RSHIFT(height, f->chroma_v_shift); + const int cx = x >> f->chroma_h_shift; + const int cy = y >> f->chroma_v_shift; + decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0, 1); + + if (f->chroma_planes) { + decode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1, 1); + decode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1, 1); + } + if (fs->transparency) + decode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], (f->version >= 4 && !f->chroma_planes) ? 1 : 2, 1); + } else if (f->colorspace == 0) { + decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0] , width, height, p->linesize[0], 0, 2); + decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0] + 1, width, height, p->linesize[0], 1, 2); + } else if (f->use32bit) { + uint8_t *planes[3] = { p->data[0] + ps * x + y * p->linesize[0], + p->data[1] + ps * x + y * p->linesize[1], + p->data[2] + ps * x + y * p->linesize[2] }; + decode_rgb_frame32(fs, planes, width, height, p->linesize); + } else { + uint8_t *planes[3] = { p->data[0] + ps * x + y * p->linesize[0], + p->data[1] + ps * x + y * p->linesize[1], + p->data[2] + ps * x + y * p->linesize[2] }; + decode_rgb_frame(fs, planes, width, height, p->linesize); + } + if (fs->ac != AC_GOLOMB_RICE && f->version > 2) { + int v; + get_rac(&fs->c, (uint8_t[]) { 129 }); + v = fs->c.bytestream_end - fs->c.bytestream - 2 - 5*f->ec; + if (v) { + av_log(f->avctx, AV_LOG_ERROR, "bytestream end mismatching by %d\n", v); + fs->slice_damaged = 1; + } + } + + emms_c(); + + ff_thread_report_progress(&f->picture, si, 0); + + return 0; +} + +static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale) +{ + int v; + int i = 0; + uint8_t state[CONTEXT_SIZE]; + + memset(state, 128, sizeof(state)); + + for (v = 0; i < 128; v++) { + unsigned len = get_symbol(c, state, 0) + 1; + + if (len > 128 - i || !len) + return AVERROR_INVALIDDATA; + + while (len--) { + quant_table[i] = scale * v; + i++; + } + } + + for (i = 1; i < 128; i++) + quant_table[256 - i] = -quant_table[i]; + quant_table[128] = -quant_table[127]; + + return 2 * v - 1; +} + +static int read_quant_tables(RangeCoder *c, + int16_t quant_table[MAX_CONTEXT_INPUTS][256]) +{ + int i; + int context_count = 1; + + for (i = 0; i < 5; i++) { + int ret = read_quant_table(c, quant_table[i], context_count); + if (ret < 0) + return ret; + context_count *= ret; + if (context_count > 32768U) { + return AVERROR_INVALIDDATA; + } + } + return (context_count + 1) / 2; +} + +static int read_extra_header(FFV1Context *f) +{ + RangeCoder *const c = &f->c; + uint8_t state[CONTEXT_SIZE]; + int i, j, k, ret; + uint8_t state2[32][CONTEXT_SIZE]; + unsigned crc = 0; + + memset(state2, 128, sizeof(state2)); + memset(state, 128, sizeof(state)); + + ff_init_range_decoder(c, f->avctx->extradata, f->avctx->extradata_size); + ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8); + + f->version = get_symbol(c, state, 0); + if (f->version < 2) { + av_log(f->avctx, AV_LOG_ERROR, "Invalid version in global header\n"); + return AVERROR_INVALIDDATA; + } + if (f->version > 2) { + c->bytestream_end -= 4; + f->micro_version = get_symbol(c, state, 0); + if (f->micro_version < 0) + return AVERROR_INVALIDDATA; + } + f->ac = get_symbol(c, state, 0); + + if (f->ac == AC_RANGE_CUSTOM_TAB) { + for (i = 1; i < 256; i++) + f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i]; + } + + f->colorspace = get_symbol(c, state, 0); //YUV cs type + f->avctx->bits_per_raw_sample = get_symbol(c, state, 0); + f->chroma_planes = get_rac(c, state); + f->chroma_h_shift = get_symbol(c, state, 0); + f->chroma_v_shift = get_symbol(c, state, 0); + f->transparency = get_rac(c, state); + f->plane_count = 1 + (f->chroma_planes || f->version<4) + f->transparency; + f->num_h_slices = 1 + get_symbol(c, state, 0); + f->num_v_slices = 1 + get_symbol(c, state, 0); + + if (f->chroma_h_shift > 4U || f->chroma_v_shift > 4U) { + av_log(f->avctx, AV_LOG_ERROR, "chroma shift parameters %d %d are invalid\n", + f->chroma_h_shift, f->chroma_v_shift); + return AVERROR_INVALIDDATA; + } + + if (f->num_h_slices > (unsigned)f->width || !f->num_h_slices || + f->num_v_slices > (unsigned)f->height || !f->num_v_slices + ) { + av_log(f->avctx, AV_LOG_ERROR, "slice count invalid\n"); + return AVERROR_INVALIDDATA; + } + + f->quant_table_count = get_symbol(c, state, 0); + if (f->quant_table_count > (unsigned)MAX_QUANT_TABLES || !f->quant_table_count) { + av_log(f->avctx, AV_LOG_ERROR, "quant table count %d is invalid\n", f->quant_table_count); + f->quant_table_count = 0; + return AVERROR_INVALIDDATA; + } + + for (i = 0; i < f->quant_table_count; i++) { + f->context_count[i] = read_quant_tables(c, f->quant_tables[i]); + if (f->context_count[i] < 0) { + av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n"); + return AVERROR_INVALIDDATA; + } + } + if ((ret = ff_ffv1_allocate_initial_states(f)) < 0) + return ret; + + for (i = 0; i < f->quant_table_count; i++) + if (get_rac(c, state)) { + for (j = 0; j < f->context_count[i]; j++) + for (k = 0; k < CONTEXT_SIZE; k++) { + int pred = j ? f->initial_states[i][j - 1][k] : 128; + f->initial_states[i][j][k] = + (pred + get_symbol(c, state2[k], 1)) & 0xFF; + } + } + + if (f->version > 2) { + f->ec = get_symbol(c, state, 0); + if (f->micro_version > 2) + f->intra = get_symbol(c, state, 0); + } + + if (f->version > 2) { + unsigned v; + v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, + f->avctx->extradata, f->avctx->extradata_size); + if (v || f->avctx->extradata_size < 4) { + av_log(f->avctx, AV_LOG_ERROR, "CRC mismatch %X!\n", v); + return AVERROR_INVALIDDATA; + } + crc = AV_RB32(f->avctx->extradata + f->avctx->extradata_size - 4); + } + + if (f->avctx->debug & FF_DEBUG_PICT_INFO) + av_log(f->avctx, AV_LOG_DEBUG, + "global: ver:%d.%d, coder:%d, colorspace: %d bpr:%d chroma:%d(%d:%d), alpha:%d slices:%dx%d qtabs:%d ec:%d intra:%d CRC:0x%08X\n", + f->version, f->micro_version, + f->ac, + f->colorspace, + f->avctx->bits_per_raw_sample, + f->chroma_planes, f->chroma_h_shift, f->chroma_v_shift, + f->transparency, + f->num_h_slices, f->num_v_slices, + f->quant_table_count, + f->ec, + f->intra, + crc + ); + return 0; +} + +static int read_header(FFV1Context *f) +{ + uint8_t state[CONTEXT_SIZE]; + int i, j, context_count = -1; //-1 to avoid warning + RangeCoder *const c = &f->slice_context[0]->c; + + memset(state, 128, sizeof(state)); + + if (f->version < 2) { + int chroma_planes, chroma_h_shift, chroma_v_shift, transparency, colorspace, bits_per_raw_sample; + unsigned v= get_symbol(c, state, 0); + if (v >= 2) { + av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v); + return AVERROR_INVALIDDATA; + } + f->version = v; + f->ac = get_symbol(c, state, 0); + + if (f->ac == AC_RANGE_CUSTOM_TAB) { + for (i = 1; i < 256; i++) + f->state_transition[i] = get_symbol(c, state, 1) + c->one_state[i]; + } + + colorspace = get_symbol(c, state, 0); //YUV cs type + bits_per_raw_sample = f->version > 0 ? get_symbol(c, state, 0) : f->avctx->bits_per_raw_sample; + chroma_planes = get_rac(c, state); + chroma_h_shift = get_symbol(c, state, 0); + chroma_v_shift = get_symbol(c, state, 0); + transparency = get_rac(c, state); + if (colorspace == 0 && f->avctx->skip_alpha) + transparency = 0; + + if (f->plane_count) { + if (colorspace != f->colorspace || + bits_per_raw_sample != f->avctx->bits_per_raw_sample || + chroma_planes != f->chroma_planes || + chroma_h_shift != f->chroma_h_shift || + chroma_v_shift != f->chroma_v_shift || + transparency != f->transparency) { + av_log(f->avctx, AV_LOG_ERROR, "Invalid change of global parameters\n"); + return AVERROR_INVALIDDATA; + } + } + + if (chroma_h_shift > 4U || chroma_v_shift > 4U) { + av_log(f->avctx, AV_LOG_ERROR, "chroma shift parameters %d %d are invalid\n", + chroma_h_shift, chroma_v_shift); + return AVERROR_INVALIDDATA; + } + + f->colorspace = colorspace; + f->avctx->bits_per_raw_sample = bits_per_raw_sample; + f->chroma_planes = chroma_planes; + f->chroma_h_shift = chroma_h_shift; + f->chroma_v_shift = chroma_v_shift; + f->transparency = transparency; + + f->plane_count = 2 + f->transparency; + } + + if (f->colorspace == 0) { + if (!f->transparency && !f->chroma_planes) { + if (f->avctx->bits_per_raw_sample <= 8) + f->avctx->pix_fmt = AV_PIX_FMT_GRAY8; + else + f->avctx->pix_fmt = AV_PIX_FMT_GRAY16; + } else if (f->transparency && !f->chroma_planes) { + if (f->avctx->bits_per_raw_sample <= 8) + f->avctx->pix_fmt = AV_PIX_FMT_YA8; + else + return AVERROR(ENOSYS); + } else if (f->avctx->bits_per_raw_sample<=8 && !f->transparency) { + switch(16 * f->chroma_h_shift + f->chroma_v_shift) { + case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P; break; + case 0x01: f->avctx->pix_fmt = AV_PIX_FMT_YUV440P; break; + case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P; break; + case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P; break; + case 0x20: f->avctx->pix_fmt = AV_PIX_FMT_YUV411P; break; + case 0x22: f->avctx->pix_fmt = AV_PIX_FMT_YUV410P; break; + } + } else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) { + switch(16*f->chroma_h_shift + f->chroma_v_shift) { + case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break; + case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break; + case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break; + } + } else if (f->avctx->bits_per_raw_sample == 9 && !f->transparency) { + f->packed_at_lsb = 1; + switch(16 * f->chroma_h_shift + f->chroma_v_shift) { + case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P9; break; + case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P9; break; + case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P9; break; + } + } else if (f->avctx->bits_per_raw_sample == 9 && f->transparency) { + f->packed_at_lsb = 1; + switch(16 * f->chroma_h_shift + f->chroma_v_shift) { + case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P9; break; + case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P9; break; + case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P9; break; + } + } else if (f->avctx->bits_per_raw_sample == 10 && !f->transparency) { + f->packed_at_lsb = 1; + switch(16 * f->chroma_h_shift + f->chroma_v_shift) { + case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P10; break; + case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P10; break; + case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P10; break; + } + } else if (f->avctx->bits_per_raw_sample == 10 && f->transparency) { + f->packed_at_lsb = 1; + switch(16 * f->chroma_h_shift + f->chroma_v_shift) { + case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P10; break; + case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P10; break; + case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P10; break; + } + } else if (f->avctx->bits_per_raw_sample == 16 && !f->transparency){ + switch(16 * f->chroma_h_shift + f->chroma_v_shift) { + case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P16; break; + case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P16; break; + case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P16; break; + } + } else if (f->avctx->bits_per_raw_sample == 16 && f->transparency){ + switch(16 * f->chroma_h_shift + f->chroma_v_shift) { + case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P16; break; + case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P16; break; + case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16; break; + } + } + } else if (f->colorspace == 1) { + if (f->chroma_h_shift || f->chroma_v_shift) { + av_log(f->avctx, AV_LOG_ERROR, + "chroma subsampling not supported in this colorspace\n"); + return AVERROR(ENOSYS); + } + if ( f->avctx->bits_per_raw_sample <= 8 && !f->transparency) + f->avctx->pix_fmt = AV_PIX_FMT_0RGB32; + else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) + f->avctx->pix_fmt = AV_PIX_FMT_RGB32; + else if (f->avctx->bits_per_raw_sample == 9 && !f->transparency) + f->avctx->pix_fmt = AV_PIX_FMT_GBRP9; + else if (f->avctx->bits_per_raw_sample == 10 && !f->transparency) + f->avctx->pix_fmt = AV_PIX_FMT_GBRP10; + else if (f->avctx->bits_per_raw_sample == 12 && !f->transparency) + f->avctx->pix_fmt = AV_PIX_FMT_GBRP12; + else if (f->avctx->bits_per_raw_sample == 14 && !f->transparency) + f->avctx->pix_fmt = AV_PIX_FMT_GBRP14; + else if (f->avctx->bits_per_raw_sample == 16 && !f->transparency) { + f->avctx->pix_fmt = AV_PIX_FMT_GBRP16; + f->use32bit = 1; + } + } else { + av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n"); + return AVERROR(ENOSYS); + } + if (f->avctx->pix_fmt == AV_PIX_FMT_NONE) { + av_log(f->avctx, AV_LOG_ERROR, "format not supported\n"); + return AVERROR(ENOSYS); + } + + ff_dlog(f->avctx, "%d %d %d\n", + f->chroma_h_shift, f->chroma_v_shift, f->avctx->pix_fmt); + if (f->version < 2) { + context_count = read_quant_tables(c, f->quant_table); + if (context_count < 0) { + av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n"); + return AVERROR_INVALIDDATA; + } + f->slice_count = f->max_slice_count; + } else if (f->version < 3) { + f->slice_count = get_symbol(c, state, 0); + } else { + const uint8_t *p = c->bytestream_end; + for (f->slice_count = 0; + f->slice_count < MAX_SLICES && 3 < p - c->bytestream_start; + f->slice_count++) { + int trailer = 3 + 5*!!f->ec; + int size = AV_RB24(p-trailer); + if (size + trailer > p - c->bytestream_start) + break; + p -= size + trailer; + } + } + if (f->slice_count > (unsigned)MAX_SLICES || f->slice_count <= 0 || f->slice_count > f->max_slice_count) { + av_log(f->avctx, AV_LOG_ERROR, "slice count %d is invalid (max=%d)\n", f->slice_count, f->max_slice_count); + return AVERROR_INVALIDDATA; + } + + for (j = 0; j < f->slice_count; j++) { + FFV1Context *fs = f->slice_context[j]; + fs->ac = f->ac; + fs->packed_at_lsb = f->packed_at_lsb; + + fs->slice_damaged = 0; + + if (f->version == 2) { + fs->slice_x = get_symbol(c, state, 0) * f->width ; + fs->slice_y = get_symbol(c, state, 0) * f->height; + fs->slice_width = (get_symbol(c, state, 0) + 1) * f->width + fs->slice_x; + fs->slice_height = (get_symbol(c, state, 0) + 1) * f->height + fs->slice_y; + + fs->slice_x /= f->num_h_slices; + fs->slice_y /= f->num_v_slices; + fs->slice_width = fs->slice_width / f->num_h_slices - fs->slice_x; + fs->slice_height = fs->slice_height / f->num_v_slices - fs->slice_y; + if ((unsigned)fs->slice_width > f->width || + (unsigned)fs->slice_height > f->height) + return AVERROR_INVALIDDATA; + if ( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width + || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height) + return AVERROR_INVALIDDATA; + } + + for (i = 0; i < f->plane_count; i++) { + PlaneContext *const p = &fs->plane[i]; + + if (f->version == 2) { + int idx = get_symbol(c, state, 0); + if (idx > (unsigned)f->quant_table_count) { + av_log(f->avctx, AV_LOG_ERROR, + "quant_table_index out of range\n"); + return AVERROR_INVALIDDATA; + } + p->quant_table_index = idx; + memcpy(p->quant_table, f->quant_tables[idx], + sizeof(p->quant_table)); + context_count = f->context_count[idx]; + } else { + memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table)); + } + + if (f->version <= 2) { + av_assert0(context_count >= 0); + if (p->context_count < context_count) { + av_freep(&p->state); + av_freep(&p->vlc_state); + } + p->context_count = context_count; + } + } + } + return 0; +} + +static av_cold int decode_init(AVCodecContext *avctx) +{ + FFV1Context *f = avctx->priv_data; + int ret; + + if ((ret = ff_ffv1_common_init(avctx)) < 0) + return ret; + + if (avctx->extradata_size > 0 && (ret = read_extra_header(f)) < 0) + return ret; + + if ((ret = ff_ffv1_init_slice_contexts(f)) < 0) + return ret; + + avctx->internal->allocate_progress = 1; + + return 0; +} + +static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) +{ + uint8_t *buf = avpkt->data; + int buf_size = avpkt->size; + FFV1Context *f = avctx->priv_data; + RangeCoder *const c = &f->slice_context[0]->c; + int i, ret; + uint8_t keystate = 128; + uint8_t *buf_p; + AVFrame *p; + + if (f->last_picture.f) + ff_thread_release_buffer(avctx, &f->last_picture); + FFSWAP(ThreadFrame, f->picture, f->last_picture); + + f->cur = p = f->picture.f; + + if (f->version < 3 && avctx->field_order > AV_FIELD_PROGRESSIVE) { + /* we have interlaced material flagged in container */ + p->interlaced_frame = 1; + if (avctx->field_order == AV_FIELD_TT || avctx->field_order == AV_FIELD_TB) + p->top_field_first = 1; + } + + f->avctx = avctx; + ff_init_range_decoder(c, buf, buf_size); + ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8); + + p->pict_type = AV_PICTURE_TYPE_I; //FIXME I vs. P + if (get_rac(c, &keystate)) { + p->key_frame = 1; + f->key_frame_ok = 0; + if ((ret = read_header(f)) < 0) + return ret; + f->key_frame_ok = 1; + } else { + if (!f->key_frame_ok) { + av_log(avctx, AV_LOG_ERROR, + "Cannot decode non-keyframe without valid keyframe\n"); + return AVERROR_INVALIDDATA; + } + p->key_frame = 0; + } + + if ((ret = ff_thread_get_buffer(avctx, &f->picture, AV_GET_BUFFER_FLAG_REF)) < 0) + return ret; + + if (avctx->debug & FF_DEBUG_PICT_INFO) + av_log(avctx, AV_LOG_DEBUG, "ver:%d keyframe:%d coder:%d ec:%d slices:%d bps:%d\n", + f->version, p->key_frame, f->ac, f->ec, f->slice_count, f->avctx->bits_per_raw_sample); + + ff_thread_finish_setup(avctx); + + buf_p = buf + buf_size; + for (i = f->slice_count - 1; i >= 0; i--) { + FFV1Context *fs = f->slice_context[i]; + int trailer = 3 + 5*!!f->ec; + int v; + + if (i || f->version > 2) v = AV_RB24(buf_p-trailer) + trailer; + else v = buf_p - c->bytestream_start; + if (buf_p - c->bytestream_start < v) { + av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n"); + ff_thread_report_progress(&f->picture, INT_MAX, 0); + return AVERROR_INVALIDDATA; + } + buf_p -= v; + + if (f->ec) { + unsigned crc = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, buf_p, v); + if (crc) { + int64_t ts = avpkt->pts != AV_NOPTS_VALUE ? avpkt->pts : avpkt->dts; + av_log(f->avctx, AV_LOG_ERROR, "CRC mismatch %X!", crc); + if (ts != AV_NOPTS_VALUE && avctx->pkt_timebase.num) { + av_log(f->avctx, AV_LOG_ERROR, "at %f seconds\n", ts*av_q2d(avctx->pkt_timebase)); + } else if (ts != AV_NOPTS_VALUE) { + av_log(f->avctx, AV_LOG_ERROR, "at %"PRId64"\n", ts); + } else { + av_log(f->avctx, AV_LOG_ERROR, "\n"); + } + fs->slice_damaged = 1; + } + if (avctx->debug & FF_DEBUG_PICT_INFO) { + av_log(avctx, AV_LOG_DEBUG, "slice %d, CRC: 0x%08X\n", i, AV_RB32(buf_p + v - 4)); + } + } + + if (i) { + ff_init_range_decoder(&fs->c, buf_p, v); + } else + fs->c.bytestream_end = buf_p + v; + + fs->avctx = avctx; + fs->cur = p; + } + + avctx->execute(avctx, + decode_slice, + &f->slice_context[0], + NULL, + f->slice_count, + sizeof(void*)); + + for (i = f->slice_count - 1; i >= 0; i--) { + FFV1Context *fs = f->slice_context[i]; + int j; + if (fs->slice_damaged && f->last_picture.f->data[0]) { + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt); + const uint8_t *src[4]; + uint8_t *dst[4]; + ff_thread_await_progress(&f->last_picture, INT_MAX, 0); + for (j = 0; j < desc->nb_components; j++) { + int pixshift = desc->comp[j].depth > 8; + int sh = (j == 1 || j == 2) ? f->chroma_h_shift : 0; + int sv = (j == 1 || j == 2) ? f->chroma_v_shift : 0; + dst[j] = p->data[j] + p->linesize[j] * + (fs->slice_y >> sv) + ((fs->slice_x >> sh) << pixshift); + src[j] = f->last_picture.f->data[j] + f->last_picture.f->linesize[j] * + (fs->slice_y >> sv) + ((fs->slice_x >> sh) << pixshift); + + } + if (desc->flags & AV_PIX_FMT_FLAG_PAL || + desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL) { + dst[1] = p->data[1]; + src[1] = f->last_picture.f->data[1]; + } + av_image_copy(dst, p->linesize, src, + f->last_picture.f->linesize, + avctx->pix_fmt, + fs->slice_width, + fs->slice_height); + } + } + ff_thread_report_progress(&f->picture, INT_MAX, 0); + + f->picture_number++; + + if (f->last_picture.f) + ff_thread_release_buffer(avctx, &f->last_picture); + f->cur = NULL; + if ((ret = av_frame_ref(data, f->picture.f)) < 0) + return ret; + + *got_frame = 1; + + return buf_size; +} + +#if HAVE_THREADS +static int init_thread_copy(AVCodecContext *avctx) +{ + FFV1Context *f = avctx->priv_data; + int i, ret; + + f->picture.f = NULL; + f->last_picture.f = NULL; + f->sample_buffer = NULL; + f->max_slice_count = 0; + f->slice_count = 0; + + for (i = 0; i < f->quant_table_count; i++) { + av_assert0(f->version > 1); + f->initial_states[i] = av_memdup(f->initial_states[i], + f->context_count[i] * sizeof(*f->initial_states[i])); + } + + f->picture.f = av_frame_alloc(); + f->last_picture.f = av_frame_alloc(); + + if ((ret = ff_ffv1_init_slice_contexts(f)) < 0) + return ret; + + return 0; +} +#endif + +static void copy_fields(FFV1Context *fsdst, FFV1Context *fssrc, FFV1Context *fsrc) +{ + fsdst->version = fsrc->version; + fsdst->micro_version = fsrc->micro_version; + fsdst->chroma_planes = fsrc->chroma_planes; + fsdst->chroma_h_shift = fsrc->chroma_h_shift; + fsdst->chroma_v_shift = fsrc->chroma_v_shift; + fsdst->transparency = fsrc->transparency; + fsdst->plane_count = fsrc->plane_count; + fsdst->ac = fsrc->ac; + fsdst->colorspace = fsrc->colorspace; + + fsdst->ec = fsrc->ec; + fsdst->intra = fsrc->intra; + fsdst->slice_damaged = fssrc->slice_damaged; + fsdst->key_frame_ok = fsrc->key_frame_ok; + + fsdst->bits_per_raw_sample = fsrc->bits_per_raw_sample; + fsdst->packed_at_lsb = fsrc->packed_at_lsb; + fsdst->slice_count = fsrc->slice_count; + if (fsrc->version<3){ + fsdst->slice_x = fssrc->slice_x; + fsdst->slice_y = fssrc->slice_y; + fsdst->slice_width = fssrc->slice_width; + fsdst->slice_height = fssrc->slice_height; + } +} + +#if HAVE_THREADS +static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src) +{ + FFV1Context *fsrc = src->priv_data; + FFV1Context *fdst = dst->priv_data; + int i, ret; + + if (dst == src) + return 0; + + { + ThreadFrame picture = fdst->picture, last_picture = fdst->last_picture; + uint8_t (*initial_states[MAX_QUANT_TABLES])[32]; + struct FFV1Context *slice_context[MAX_SLICES]; + memcpy(initial_states, fdst->initial_states, sizeof(fdst->initial_states)); + memcpy(slice_context, fdst->slice_context , sizeof(fdst->slice_context)); + + memcpy(fdst, fsrc, sizeof(*fdst)); + memcpy(fdst->initial_states, initial_states, sizeof(fdst->initial_states)); + memcpy(fdst->slice_context, slice_context , sizeof(fdst->slice_context)); + fdst->picture = picture; + fdst->last_picture = last_picture; + for (i = 0; inum_h_slices * fdst->num_v_slices; i++) { + FFV1Context *fssrc = fsrc->slice_context[i]; + FFV1Context *fsdst = fdst->slice_context[i]; + copy_fields(fsdst, fssrc, fsrc); + } + av_assert0(!fdst->plane[0].state); + av_assert0(!fdst->sample_buffer); + } + + av_assert1(fdst->max_slice_count == fsrc->max_slice_count); + + + ff_thread_release_buffer(dst, &fdst->picture); + if (fsrc->picture.f->data[0]) { + if ((ret = ff_thread_ref_frame(&fdst->picture, &fsrc->picture)) < 0) + return ret; + } + + fdst->fsrc = fsrc; + + return 0; +} +#endif + +AVCodec ff_ffv1_decoder = { + .name = "ffv1", + .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"), + .type = AVMEDIA_TYPE_VIDEO, + .id = AV_CODEC_ID_FFV1, + .priv_data_size = sizeof(FFV1Context), + .init = decode_init, + .close = ff_ffv1_close, + .decode = decode_frame, + .init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy), + .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context), + .capabilities = AV_CODEC_CAP_DR1 /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/ | + AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS, + .caps_internal = FF_CODEC_CAP_INIT_CLEANUP +}; From dad4fa292cd5b18dd564eb7cec77e0dddba6cf31 Mon Sep 17 00:00:00 2001 From: turly221 Date: Mon, 9 Dec 2024 16:05:58 +0000 Subject: [PATCH 05/13] commit patch 22783140 --- libavcodec/utvideodec.c | 9 + libavcodec/utvideodec.c.orig | 840 +++++++++++++++++++++++++++++++++++ 2 files changed, 849 insertions(+) create mode 100644 libavcodec/utvideodec.c.orig diff --git a/libavcodec/utvideodec.c b/libavcodec/utvideodec.c index 650c0ec67db38..556d0fa707695 100644 --- a/libavcodec/utvideodec.c +++ b/libavcodec/utvideodec.c @@ -28,6 +28,7 @@ #include #include "libavutil/intreadwrite.h" +#include "libavutil/pixdesc.h" #include "avcodec.h" #include "bswapdsp.h" #include "bytestream.h" @@ -720,6 +721,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, static av_cold int decode_init(AVCodecContext *avctx) { UtvideoContext * const c = avctx->priv_data; + int h_shift, v_shift; c->avctx = avctx; @@ -815,6 +817,13 @@ static av_cold int decode_init(AVCodecContext *avctx) return AVERROR_INVALIDDATA; } + av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &h_shift, &v_shift); + if ((avctx->width & ((1<height & ((1< +#include + +#include "libavutil/intreadwrite.h" +#include "avcodec.h" +#include "bswapdsp.h" +#include "bytestream.h" +#include "get_bits.h" +#include "thread.h" +#include "utvideo.h" + +static int build_huff10(const uint8_t *src, VLC *vlc, int *fsym) +{ + int i; + HuffEntry he[1024]; + int last; + uint32_t codes[1024]; + uint8_t bits[1024]; + uint16_t syms[1024]; + uint32_t code; + + *fsym = -1; + for (i = 0; i < 1024; i++) { + he[i].sym = i; + he[i].len = *src++; + } + qsort(he, 1024, sizeof(*he), ff_ut10_huff_cmp_len); + + if (!he[0].len) { + *fsym = he[0].sym; + return 0; + } + + last = 1023; + while (he[last].len == 255 && last) + last--; + + if (he[last].len > 32) { + return -1; + } + + code = 1; + for (i = last; i >= 0; i--) { + codes[i] = code >> (32 - he[i].len); + bits[i] = he[i].len; + syms[i] = he[i].sym; + code += 0x80000000u >> (he[i].len - 1); + } + + return ff_init_vlc_sparse(vlc, FFMIN(he[last].len, 11), last + 1, + bits, sizeof(*bits), sizeof(*bits), + codes, sizeof(*codes), sizeof(*codes), + syms, sizeof(*syms), sizeof(*syms), 0); +} + +static int build_huff(const uint8_t *src, VLC *vlc, int *fsym) +{ + int i; + HuffEntry he[256]; + int last; + uint32_t codes[256]; + uint8_t bits[256]; + uint8_t syms[256]; + uint32_t code; + + *fsym = -1; + for (i = 0; i < 256; i++) { + he[i].sym = i; + he[i].len = *src++; + } + qsort(he, 256, sizeof(*he), ff_ut_huff_cmp_len); + + if (!he[0].len) { + *fsym = he[0].sym; + return 0; + } + + last = 255; + while (he[last].len == 255 && last) + last--; + + if (he[last].len > 32) + return -1; + + code = 1; + for (i = last; i >= 0; i--) { + codes[i] = code >> (32 - he[i].len); + bits[i] = he[i].len; + syms[i] = he[i].sym; + code += 0x80000000u >> (he[i].len - 1); + } + + return ff_init_vlc_sparse(vlc, FFMIN(he[last].len, 11), last + 1, + bits, sizeof(*bits), sizeof(*bits), + codes, sizeof(*codes), sizeof(*codes), + syms, sizeof(*syms), sizeof(*syms), 0); +} + +static int decode_plane10(UtvideoContext *c, int plane_no, + uint16_t *dst, int step, int stride, + int width, int height, + const uint8_t *src, const uint8_t *huff, + int use_pred) +{ + int i, j, slice, pix, ret; + int sstart, send; + VLC vlc; + GetBitContext gb; + int prev, fsym; + + if ((ret = build_huff10(huff, &vlc, &fsym)) < 0) { + av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n"); + return ret; + } + if (fsym >= 0) { // build_huff reported a symbol to fill slices with + send = 0; + for (slice = 0; slice < c->slices; slice++) { + uint16_t *dest; + + sstart = send; + send = (height * (slice + 1) / c->slices); + dest = dst + sstart * stride; + + prev = 0x200; + for (j = sstart; j < send; j++) { + for (i = 0; i < width * step; i += step) { + pix = fsym; + if (use_pred) { + prev += pix; + prev &= 0x3FF; + pix = prev; + } + dest[i] = pix; + } + dest += stride; + } + } + return 0; + } + + send = 0; + for (slice = 0; slice < c->slices; slice++) { + uint16_t *dest; + int slice_data_start, slice_data_end, slice_size; + + sstart = send; + send = (height * (slice + 1) / c->slices); + dest = dst + sstart * stride; + + // slice offset and size validation was done earlier + slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0; + slice_data_end = AV_RL32(src + slice * 4); + slice_size = slice_data_end - slice_data_start; + + if (!slice_size) { + av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol " + "yet a slice has a length of zero.\n"); + goto fail; + } + + memcpy(c->slice_bits, src + slice_data_start + c->slices * 4, + slice_size); + memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE); + c->bdsp.bswap_buf((uint32_t *) c->slice_bits, + (uint32_t *) c->slice_bits, + (slice_data_end - slice_data_start + 3) >> 2); + init_get_bits(&gb, c->slice_bits, slice_size * 8); + + prev = 0x200; + for (j = sstart; j < send; j++) { + for (i = 0; i < width * step; i += step) { + if (get_bits_left(&gb) <= 0) { + av_log(c->avctx, AV_LOG_ERROR, + "Slice decoding ran out of bits\n"); + goto fail; + } + pix = get_vlc2(&gb, vlc.table, vlc.bits, 3); + if (pix < 0) { + av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n"); + goto fail; + } + if (use_pred) { + prev += pix; + prev &= 0x3FF; + pix = prev; + } + dest[i] = pix; + } + dest += stride; + } + if (get_bits_left(&gb) > 32) + av_log(c->avctx, AV_LOG_WARNING, + "%d bits left after decoding slice\n", get_bits_left(&gb)); + } + + ff_free_vlc(&vlc); + + return 0; +fail: + ff_free_vlc(&vlc); + return AVERROR_INVALIDDATA; +} + +static int decode_plane(UtvideoContext *c, int plane_no, + uint8_t *dst, int step, int stride, + int width, int height, + const uint8_t *src, int use_pred) +{ + int i, j, slice, pix; + int sstart, send; + VLC vlc; + GetBitContext gb; + int prev, fsym; + const int cmask = ~(!plane_no && c->avctx->pix_fmt == AV_PIX_FMT_YUV420P); + + if (build_huff(src, &vlc, &fsym)) { + av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n"); + return AVERROR_INVALIDDATA; + } + if (fsym >= 0) { // build_huff reported a symbol to fill slices with + send = 0; + for (slice = 0; slice < c->slices; slice++) { + uint8_t *dest; + + sstart = send; + send = (height * (slice + 1) / c->slices) & cmask; + dest = dst + sstart * stride; + + prev = 0x80; + for (j = sstart; j < send; j++) { + for (i = 0; i < width * step; i += step) { + pix = fsym; + if (use_pred) { + prev += pix; + pix = prev; + } + dest[i] = pix; + } + dest += stride; + } + } + return 0; + } + + src += 256; + + send = 0; + for (slice = 0; slice < c->slices; slice++) { + uint8_t *dest; + int slice_data_start, slice_data_end, slice_size; + + sstart = send; + send = (height * (slice + 1) / c->slices) & cmask; + dest = dst + sstart * stride; + + // slice offset and size validation was done earlier + slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0; + slice_data_end = AV_RL32(src + slice * 4); + slice_size = slice_data_end - slice_data_start; + + if (!slice_size) { + av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol " + "yet a slice has a length of zero.\n"); + goto fail; + } + + memcpy(c->slice_bits, src + slice_data_start + c->slices * 4, + slice_size); + memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE); + c->bdsp.bswap_buf((uint32_t *) c->slice_bits, + (uint32_t *) c->slice_bits, + (slice_data_end - slice_data_start + 3) >> 2); + init_get_bits(&gb, c->slice_bits, slice_size * 8); + + prev = 0x80; + for (j = sstart; j < send; j++) { + for (i = 0; i < width * step; i += step) { + if (get_bits_left(&gb) <= 0) { + av_log(c->avctx, AV_LOG_ERROR, + "Slice decoding ran out of bits\n"); + goto fail; + } + pix = get_vlc2(&gb, vlc.table, vlc.bits, 3); + if (pix < 0) { + av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n"); + goto fail; + } + if (use_pred) { + prev += pix; + pix = prev; + } + dest[i] = pix; + } + dest += stride; + } + if (get_bits_left(&gb) > 32) + av_log(c->avctx, AV_LOG_WARNING, + "%d bits left after decoding slice\n", get_bits_left(&gb)); + } + + ff_free_vlc(&vlc); + + return 0; +fail: + ff_free_vlc(&vlc); + return AVERROR_INVALIDDATA; +} + +static void restore_rgb_planes(uint8_t *src, int step, int stride, int width, + int height) +{ + int i, j; + uint8_t r, g, b; + + for (j = 0; j < height; j++) { + for (i = 0; i < width * step; i += step) { + r = src[i]; + g = src[i + 1]; + b = src[i + 2]; + src[i] = r + g - 0x80; + src[i + 2] = b + g - 0x80; + } + src += stride; + } +} + +static void restore_rgb_planes10(AVFrame *frame, int width, int height) +{ + uint16_t *src_r = (uint16_t *)frame->data[2]; + uint16_t *src_g = (uint16_t *)frame->data[0]; + uint16_t *src_b = (uint16_t *)frame->data[1]; + int r, g, b; + int i, j; + + for (j = 0; j < height; j++) { + for (i = 0; i < width; i++) { + r = src_r[i]; + g = src_g[i]; + b = src_b[i]; + src_r[i] = (r + g - 0x200) & 0x3FF; + src_b[i] = (b + g - 0x200) & 0x3FF; + } + src_r += frame->linesize[2] / 2; + src_g += frame->linesize[0] / 2; + src_b += frame->linesize[1] / 2; + } +} + +static void restore_median(uint8_t *src, int step, int stride, + int width, int height, int slices, int rmode) +{ + int i, j, slice; + int A, B, C; + uint8_t *bsrc; + int slice_start, slice_height; + const int cmask = ~rmode; + + for (slice = 0; slice < slices; slice++) { + slice_start = ((slice * height) / slices) & cmask; + slice_height = ((((slice + 1) * height) / slices) & cmask) - + slice_start; + + if (!slice_height) + continue; + bsrc = src + slice_start * stride; + + // first line - left neighbour prediction + bsrc[0] += 0x80; + A = bsrc[0]; + for (i = step; i < width * step; i += step) { + bsrc[i] += A; + A = bsrc[i]; + } + bsrc += stride; + if (slice_height <= 1) + continue; + // second line - first element has top prediction, the rest uses median + C = bsrc[-stride]; + bsrc[0] += C; + A = bsrc[0]; + for (i = step; i < width * step; i += step) { + B = bsrc[i - stride]; + bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); + C = B; + A = bsrc[i]; + } + bsrc += stride; + // the rest of lines use continuous median prediction + for (j = 2; j < slice_height; j++) { + for (i = 0; i < width * step; i += step) { + B = bsrc[i - stride]; + bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); + C = B; + A = bsrc[i]; + } + bsrc += stride; + } + } +} + +/* UtVideo interlaced mode treats every two lines as a single one, + * so restoring function should take care of possible padding between + * two parts of the same "line". + */ +static void restore_median_il(uint8_t *src, int step, int stride, + int width, int height, int slices, int rmode) +{ + int i, j, slice; + int A, B, C; + uint8_t *bsrc; + int slice_start, slice_height; + const int cmask = ~(rmode ? 3 : 1); + const int stride2 = stride << 1; + + for (slice = 0; slice < slices; slice++) { + slice_start = ((slice * height) / slices) & cmask; + slice_height = ((((slice + 1) * height) / slices) & cmask) - + slice_start; + slice_height >>= 1; + if (!slice_height) + continue; + + bsrc = src + slice_start * stride; + + // first line - left neighbour prediction + bsrc[0] += 0x80; + A = bsrc[0]; + for (i = step; i < width * step; i += step) { + bsrc[i] += A; + A = bsrc[i]; + } + for (i = 0; i < width * step; i += step) { + bsrc[stride + i] += A; + A = bsrc[stride + i]; + } + bsrc += stride2; + if (slice_height <= 1) + continue; + // second line - first element has top prediction, the rest uses median + C = bsrc[-stride2]; + bsrc[0] += C; + A = bsrc[0]; + for (i = step; i < width * step; i += step) { + B = bsrc[i - stride2]; + bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); + C = B; + A = bsrc[i]; + } + for (i = 0; i < width * step; i += step) { + B = bsrc[i - stride]; + bsrc[stride + i] += mid_pred(A, B, (uint8_t)(A + B - C)); + C = B; + A = bsrc[stride + i]; + } + bsrc += stride2; + // the rest of lines use continuous median prediction + for (j = 2; j < slice_height; j++) { + for (i = 0; i < width * step; i += step) { + B = bsrc[i - stride2]; + bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); + C = B; + A = bsrc[i]; + } + for (i = 0; i < width * step; i += step) { + B = bsrc[i - stride]; + bsrc[i + stride] += mid_pred(A, B, (uint8_t)(A + B - C)); + C = B; + A = bsrc[i + stride]; + } + bsrc += stride2; + } + } +} + +static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, + AVPacket *avpkt) +{ + const uint8_t *buf = avpkt->data; + int buf_size = avpkt->size; + UtvideoContext *c = avctx->priv_data; + int i, j; + const uint8_t *plane_start[5]; + int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size; + int ret; + GetByteContext gb; + ThreadFrame frame = { .f = data }; + + if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) + return ret; + + /* parse plane structure to get frame flags and validate slice offsets */ + bytestream2_init(&gb, buf, buf_size); + if (c->pro) { + if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) { + av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n"); + return AVERROR_INVALIDDATA; + } + c->frame_info = bytestream2_get_le32u(&gb); + c->slices = ((c->frame_info >> 16) & 0xff) + 1; + for (i = 0; i < c->planes; i++) { + plane_start[i] = gb.buffer; + if (bytestream2_get_bytes_left(&gb) < 1024 + 4 * c->slices) { + av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n"); + return AVERROR_INVALIDDATA; + } + slice_start = 0; + slice_end = 0; + for (j = 0; j < c->slices; j++) { + slice_end = bytestream2_get_le32u(&gb); + if (slice_end < 0 || slice_end < slice_start || + bytestream2_get_bytes_left(&gb) < slice_end) { + av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n"); + return AVERROR_INVALIDDATA; + } + slice_size = slice_end - slice_start; + slice_start = slice_end; + max_slice_size = FFMAX(max_slice_size, slice_size); + } + plane_size = slice_end; + bytestream2_skipu(&gb, plane_size); + bytestream2_skipu(&gb, 1024); + } + plane_start[c->planes] = gb.buffer; + } else { + for (i = 0; i < c->planes; i++) { + plane_start[i] = gb.buffer; + if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) { + av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n"); + return AVERROR_INVALIDDATA; + } + bytestream2_skipu(&gb, 256); + slice_start = 0; + slice_end = 0; + for (j = 0; j < c->slices; j++) { + slice_end = bytestream2_get_le32u(&gb); + if (slice_end < 0 || slice_end < slice_start || + bytestream2_get_bytes_left(&gb) < slice_end) { + av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n"); + return AVERROR_INVALIDDATA; + } + slice_size = slice_end - slice_start; + slice_start = slice_end; + max_slice_size = FFMAX(max_slice_size, slice_size); + } + plane_size = slice_end; + bytestream2_skipu(&gb, plane_size); + } + plane_start[c->planes] = gb.buffer; + if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) { + av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n"); + return AVERROR_INVALIDDATA; + } + c->frame_info = bytestream2_get_le32u(&gb); + } + av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n", + c->frame_info); + + c->frame_pred = (c->frame_info >> 8) & 3; + + if (c->frame_pred == PRED_GRADIENT) { + avpriv_request_sample(avctx, "Frame with gradient prediction"); + return AVERROR_PATCHWELCOME; + } + + av_fast_malloc(&c->slice_bits, &c->slice_bits_size, + max_slice_size + AV_INPUT_BUFFER_PADDING_SIZE); + + if (!c->slice_bits) { + av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n"); + return AVERROR(ENOMEM); + } + + switch (c->avctx->pix_fmt) { + case AV_PIX_FMT_RGB24: + case AV_PIX_FMT_RGBA: + for (i = 0; i < c->planes; i++) { + ret = decode_plane(c, i, frame.f->data[0] + ff_ut_rgb_order[i], + c->planes, frame.f->linesize[0], avctx->width, + avctx->height, plane_start[i], + c->frame_pred == PRED_LEFT); + if (ret) + return ret; + if (c->frame_pred == PRED_MEDIAN) { + if (!c->interlaced) { + restore_median(frame.f->data[0] + ff_ut_rgb_order[i], + c->planes, frame.f->linesize[0], avctx->width, + avctx->height, c->slices, 0); + } else { + restore_median_il(frame.f->data[0] + ff_ut_rgb_order[i], + c->planes, frame.f->linesize[0], + avctx->width, avctx->height, c->slices, + 0); + } + } + } + restore_rgb_planes(frame.f->data[0], c->planes, frame.f->linesize[0], + avctx->width, avctx->height); + break; + case AV_PIX_FMT_GBRAP10: + case AV_PIX_FMT_GBRP10: + for (i = 0; i < c->planes; i++) { + ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], 1, + frame.f->linesize[i] / 2, avctx->width, + avctx->height, plane_start[i], + plane_start[i + 1] - 1024, + c->frame_pred == PRED_LEFT); + if (ret) + return ret; + } + restore_rgb_planes10(frame.f, avctx->width, avctx->height); + break; + case AV_PIX_FMT_YUV420P: + for (i = 0; i < 3; i++) { + ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i], + avctx->width >> !!i, avctx->height >> !!i, + plane_start[i], c->frame_pred == PRED_LEFT); + if (ret) + return ret; + if (c->frame_pred == PRED_MEDIAN) { + if (!c->interlaced) { + restore_median(frame.f->data[i], 1, frame.f->linesize[i], + avctx->width >> !!i, avctx->height >> !!i, + c->slices, !i); + } else { + restore_median_il(frame.f->data[i], 1, frame.f->linesize[i], + avctx->width >> !!i, + avctx->height >> !!i, + c->slices, !i); + } + } + } + break; + case AV_PIX_FMT_YUV422P: + for (i = 0; i < 3; i++) { + ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i], + avctx->width >> !!i, avctx->height, + plane_start[i], c->frame_pred == PRED_LEFT); + if (ret) + return ret; + if (c->frame_pred == PRED_MEDIAN) { + if (!c->interlaced) { + restore_median(frame.f->data[i], 1, frame.f->linesize[i], + avctx->width >> !!i, avctx->height, + c->slices, 0); + } else { + restore_median_il(frame.f->data[i], 1, frame.f->linesize[i], + avctx->width >> !!i, avctx->height, + c->slices, 0); + } + } + } + break; + case AV_PIX_FMT_YUV444P: + for (i = 0; i < 3; i++) { + ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i], + avctx->width, avctx->height, + plane_start[i], c->frame_pred == PRED_LEFT); + if (ret) + return ret; + if (c->frame_pred == PRED_MEDIAN) { + if (!c->interlaced) { + restore_median(frame.f->data[i], 1, frame.f->linesize[i], + avctx->width, avctx->height, + c->slices, 0); + } else { + restore_median_il(frame.f->data[i], 1, frame.f->linesize[i], + avctx->width, avctx->height, + c->slices, 0); + } + } + } + break; + case AV_PIX_FMT_YUV422P10: + for (i = 0; i < 3; i++) { + ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], 1, frame.f->linesize[i] / 2, + avctx->width >> !!i, avctx->height, + plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT); + if (ret) + return ret; + } + break; + } + + frame.f->key_frame = 1; + frame.f->pict_type = AV_PICTURE_TYPE_I; + frame.f->interlaced_frame = !!c->interlaced; + + *got_frame = 1; + + /* always report that the buffer was completely consumed */ + return buf_size; +} + +static av_cold int decode_init(AVCodecContext *avctx) +{ + UtvideoContext * const c = avctx->priv_data; + + c->avctx = avctx; + + ff_bswapdsp_init(&c->bdsp); + + if (avctx->extradata_size >= 16) { + av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n", + avctx->extradata[3], avctx->extradata[2], + avctx->extradata[1], avctx->extradata[0]); + av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n", + AV_RB32(avctx->extradata + 4)); + c->frame_info_size = AV_RL32(avctx->extradata + 8); + c->flags = AV_RL32(avctx->extradata + 12); + + if (c->frame_info_size != 4) + avpriv_request_sample(avctx, "Frame info not 4 bytes"); + av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08"PRIX32"\n", c->flags); + c->slices = (c->flags >> 24) + 1; + c->compression = c->flags & 1; + c->interlaced = c->flags & 0x800; + } else if (avctx->extradata_size == 8) { + av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n", + avctx->extradata[3], avctx->extradata[2], + avctx->extradata[1], avctx->extradata[0]); + av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n", + AV_RB32(avctx->extradata + 4)); + c->interlaced = 0; + c->pro = 1; + c->frame_info_size = 4; + } else { + av_log(avctx, AV_LOG_ERROR, + "Insufficient extradata size %d, should be at least 16\n", + avctx->extradata_size); + return AVERROR_INVALIDDATA; + } + + c->slice_bits_size = 0; + + switch (avctx->codec_tag) { + case MKTAG('U', 'L', 'R', 'G'): + c->planes = 3; + avctx->pix_fmt = AV_PIX_FMT_RGB24; + break; + case MKTAG('U', 'L', 'R', 'A'): + c->planes = 4; + avctx->pix_fmt = AV_PIX_FMT_RGBA; + break; + case MKTAG('U', 'L', 'Y', '0'): + c->planes = 3; + avctx->pix_fmt = AV_PIX_FMT_YUV420P; + avctx->colorspace = AVCOL_SPC_BT470BG; + break; + case MKTAG('U', 'L', 'Y', '2'): + c->planes = 3; + avctx->pix_fmt = AV_PIX_FMT_YUV422P; + avctx->colorspace = AVCOL_SPC_BT470BG; + break; + case MKTAG('U', 'L', 'Y', '4'): + c->planes = 3; + avctx->pix_fmt = AV_PIX_FMT_YUV444P; + avctx->colorspace = AVCOL_SPC_BT470BG; + break; + case MKTAG('U', 'Q', 'Y', '2'): + c->planes = 3; + avctx->pix_fmt = AV_PIX_FMT_YUV422P10; + break; + case MKTAG('U', 'Q', 'R', 'G'): + c->planes = 3; + avctx->pix_fmt = AV_PIX_FMT_GBRP10; + break; + case MKTAG('U', 'Q', 'R', 'A'): + c->planes = 4; + avctx->pix_fmt = AV_PIX_FMT_GBRAP10; + break; + case MKTAG('U', 'L', 'H', '0'): + c->planes = 3; + avctx->pix_fmt = AV_PIX_FMT_YUV420P; + avctx->colorspace = AVCOL_SPC_BT709; + break; + case MKTAG('U', 'L', 'H', '2'): + c->planes = 3; + avctx->pix_fmt = AV_PIX_FMT_YUV422P; + avctx->colorspace = AVCOL_SPC_BT709; + break; + case MKTAG('U', 'L', 'H', '4'): + c->planes = 3; + avctx->pix_fmt = AV_PIX_FMT_YUV444P; + avctx->colorspace = AVCOL_SPC_BT709; + break; + default: + av_log(avctx, AV_LOG_ERROR, "Unknown Ut Video FOURCC provided (%08X)\n", + avctx->codec_tag); + return AVERROR_INVALIDDATA; + } + + return 0; +} + +static av_cold int decode_end(AVCodecContext *avctx) +{ + UtvideoContext * const c = avctx->priv_data; + + av_freep(&c->slice_bits); + + return 0; +} + +AVCodec ff_utvideo_decoder = { + .name = "utvideo", + .long_name = NULL_IF_CONFIG_SMALL("Ut Video"), + .type = AVMEDIA_TYPE_VIDEO, + .id = AV_CODEC_ID_UTVIDEO, + .priv_data_size = sizeof(UtvideoContext), + .init = decode_init, + .close = decode_end, + .decode = decode_frame, + .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS, +}; From aaae734f3496c78259da09f2736d571ee7b39194 Mon Sep 17 00:00:00 2001 From: turly221 Date: Mon, 9 Dec 2024 16:06:00 +0000 Subject: [PATCH 06/13] commit patch 26397029 --- libavformat/mms.c | 44 ++++++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/libavformat/mms.c b/libavformat/mms.c index 17fa76a8d44d6..768fda652543b 100644 --- a/libavformat/mms.c +++ b/libavformat/mms.c @@ -94,24 +94,26 @@ int ff_mms_asf_header_parser(MMSContext *mms) } } } else if (!memcmp(p, ff_asf_stream_header, sizeof(ff_asf_guid))) { - flags = AV_RL16(p + sizeof(ff_asf_guid)*3 + 24); - stream_id = flags & 0x7F; - //The second condition is for checking CS_PKT_STREAM_ID_REQUEST packet size, - //we can calculate the packet size by stream_num. - //Please see function send_stream_selection_request(). - if (mms->stream_num < MMS_MAX_STREAMS && - 46 + mms->stream_num * 6 < sizeof(mms->out_buffer)) { - mms->streams = av_fast_realloc(mms->streams, - &mms->nb_streams_allocated, - (mms->stream_num + 1) * sizeof(MMSStream)); - if (!mms->streams) - return AVERROR(ENOMEM); - mms->streams[mms->stream_num].id = stream_id; - mms->stream_num++; - } else { - av_log(NULL, AV_LOG_ERROR, - "Corrupt stream (too many A/V streams)\n"); - return AVERROR_INVALIDDATA; + if (end - p >= (sizeof(ff_asf_guid) * 3 + 26)) { + flags = AV_RL16(p + sizeof(ff_asf_guid)*3 + 24); + stream_id = flags & 0x7F; + //The second condition is for checking CS_PKT_STREAM_ID_REQUEST packet size, + //we can calculate the packet size by stream_num. + //Please see function send_stream_selection_request(). + if (mms->stream_num < MMS_MAX_STREAMS && + 46 + mms->stream_num * 6 < sizeof(mms->out_buffer)) { + mms->streams = av_fast_realloc(mms->streams, + &mms->nb_streams_allocated, + (mms->stream_num + 1) * sizeof(MMSStream)); + if (!mms->streams) + return AVERROR(ENOMEM); + mms->streams[mms->stream_num].id = stream_id; + mms->stream_num++; + } else { + av_log(NULL, AV_LOG_ERROR, + "Corrupt stream (too many A/V streams)\n"); + return AVERROR_INVALIDDATA; + } } } else if (!memcmp(p, ff_asf_ext_stream_header, sizeof(ff_asf_guid))) { if (end - p >= 88) { @@ -143,6 +145,12 @@ int ff_mms_asf_header_parser(MMSContext *mms) } } else if (!memcmp(p, ff_asf_head1_guid, sizeof(ff_asf_guid))) { chunksize = 46; // see references [2] section 3.4. This should be set 46. + if (chunksize > end - p) { + av_log(NULL, AV_LOG_ERROR, + "Corrupt stream (header chunksize %"PRId64" is invalid)\n", + chunksize); + return AVERROR_INVALIDDATA; + } } p += chunksize; } From d07946953164feaa6da210119897b6553335b610 Mon Sep 17 00:00:00 2001 From: turly221 Date: Mon, 9 Dec 2024 16:06:01 +0000 Subject: [PATCH 07/13] commit patch 24557399 --- libavformat/rmdec.c | 2 +- libavformat/rmdec.c.orig | 1411 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 1412 insertions(+), 1 deletion(-) create mode 100644 libavformat/rmdec.c.orig diff --git a/libavformat/rmdec.c b/libavformat/rmdec.c index 4d565291af250..55fb3084f2e89 100644 --- a/libavformat/rmdec.c +++ b/libavformat/rmdec.c @@ -524,7 +524,7 @@ static int rm_read_multi(AVFormatContext *s, AVIOContext *pb, size2 = avio_rb32(pb); ret = ff_rm_read_mdpr_codecdata(s, s->pb, st2, st2->priv_data, - size2, mime); + size2, NULL); if (ret < 0) return ret; } diff --git a/libavformat/rmdec.c.orig b/libavformat/rmdec.c.orig new file mode 100644 index 0000000000000..4d565291af250 --- /dev/null +++ b/libavformat/rmdec.c.orig @@ -0,0 +1,1411 @@ +/* + * "Real" compatible demuxer. + * Copyright (c) 2000, 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +#include "libavutil/avassert.h" +#include "libavutil/avstring.h" +#include "libavutil/channel_layout.h" +#include "libavutil/internal.h" +#include "libavutil/intreadwrite.h" +#include "libavutil/dict.h" +#include "avformat.h" +#include "avio_internal.h" +#include "internal.h" +#include "rmsipr.h" +#include "rm.h" + +#define DEINT_ID_GENR MKTAG('g', 'e', 'n', 'r') ///< interleaving for Cooker/ATRAC +#define DEINT_ID_INT0 MKTAG('I', 'n', 't', '0') ///< no interleaving needed +#define DEINT_ID_INT4 MKTAG('I', 'n', 't', '4') ///< interleaving for 28.8 +#define DEINT_ID_SIPR MKTAG('s', 'i', 'p', 'r') ///< interleaving for Sipro +#define DEINT_ID_VBRF MKTAG('v', 'b', 'r', 'f') ///< VBR case for AAC +#define DEINT_ID_VBRS MKTAG('v', 'b', 'r', 's') ///< VBR case for AAC + +struct RMStream { + AVPacket pkt; ///< place to store merged video frame / reordered audio data + int videobufsize; ///< current assembled frame size + int videobufpos; ///< position for the next slice in the video buffer + int curpic_num; ///< picture number of current frame + int cur_slice, slices; + int64_t pktpos; ///< first slice position in file + /// Audio descrambling matrix parameters + int64_t audiotimestamp; ///< Audio packet timestamp + int sub_packet_cnt; // Subpacket counter, used while reading + int sub_packet_size, sub_packet_h, coded_framesize; ///< Descrambling parameters from container + int audio_framesize; /// Audio frame size from container + int sub_packet_lengths[16]; /// Length of each subpacket + int32_t deint_id; ///< deinterleaver used in audio stream +}; + +typedef struct RMDemuxContext { + int nb_packets; + int old_format; + int current_stream; + int remaining_len; + int audio_stream_num; ///< Stream number for audio packets + int audio_pkt_cnt; ///< Output packet counter + int data_end; +} RMDemuxContext; + +static int rm_read_close(AVFormatContext *s); + +static inline void get_strl(AVIOContext *pb, char *buf, int buf_size, int len) +{ + int i; + char *q, r; + + q = buf; + for(i=0;i 0) *q = '\0'; +} + +static void get_str8(AVIOContext *pb, char *buf, int buf_size) +{ + get_strl(pb, buf, buf_size, avio_r8(pb)); +} + +static int rm_read_extradata(AVFormatContext *s, AVIOContext *pb, AVCodecParameters *par, unsigned size) +{ + if (size >= 1<<24) { + av_log(s, AV_LOG_ERROR, "extradata size %u too large\n", size); + return -1; + } + if (ff_get_extradata(s, par, pb, size) < 0) + return AVERROR(ENOMEM); + return 0; +} + +static void rm_read_metadata(AVFormatContext *s, AVIOContext *pb, int wide) +{ + char buf[1024]; + int i; + + for (i=0; imetadata, ff_rm_metadata[i], buf, 0); + } +} + +RMStream *ff_rm_alloc_rmstream (void) +{ + RMStream *rms = av_mallocz(sizeof(RMStream)); + if (!rms) + return NULL; + rms->curpic_num = -1; + return rms; +} + +void ff_rm_free_rmstream (RMStream *rms) +{ + av_packet_unref(&rms->pkt); +} + +static int rm_read_audio_stream_info(AVFormatContext *s, AVIOContext *pb, + AVStream *st, RMStream *ast, int read_all) +{ + char buf[256]; + uint32_t version; + int ret; + + /* ra type header */ + version = avio_rb16(pb); /* version */ + if (version == 3) { + unsigned bytes_per_minute; + int header_size = avio_rb16(pb); + int64_t startpos = avio_tell(pb); + avio_skip(pb, 8); + bytes_per_minute = avio_rb16(pb); + avio_skip(pb, 4); + rm_read_metadata(s, pb, 0); + if ((startpos + header_size) >= avio_tell(pb) + 2) { + // fourcc (should always be "lpcJ") + avio_r8(pb); + get_str8(pb, buf, sizeof(buf)); + } + // Skip extra header crap (this should never happen) + if ((startpos + header_size) > avio_tell(pb)) + avio_skip(pb, header_size + startpos - avio_tell(pb)); + if (bytes_per_minute) + st->codecpar->bit_rate = 8LL * bytes_per_minute / 60; + st->codecpar->sample_rate = 8000; + st->codecpar->channels = 1; + st->codecpar->channel_layout = AV_CH_LAYOUT_MONO; + st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; + st->codecpar->codec_id = AV_CODEC_ID_RA_144; + ast->deint_id = DEINT_ID_INT0; + } else { + int flavor, sub_packet_h, coded_framesize, sub_packet_size; + int codecdata_length; + unsigned bytes_per_minute; + /* old version (4) */ + avio_skip(pb, 2); /* unused */ + avio_rb32(pb); /* .ra4 */ + avio_rb32(pb); /* data size */ + avio_rb16(pb); /* version2 */ + avio_rb32(pb); /* header size */ + flavor= avio_rb16(pb); /* add codec info / flavor */ + ast->coded_framesize = coded_framesize = avio_rb32(pb); /* coded frame size */ + avio_rb32(pb); /* ??? */ + bytes_per_minute = avio_rb32(pb); + if (version == 4) { + if (bytes_per_minute) + st->codecpar->bit_rate = 8LL * bytes_per_minute / 60; + } + avio_rb32(pb); /* ??? */ + ast->sub_packet_h = sub_packet_h = avio_rb16(pb); /* 1 */ + st->codecpar->block_align= avio_rb16(pb); /* frame size */ + ast->sub_packet_size = sub_packet_size = avio_rb16(pb); /* sub packet size */ + avio_rb16(pb); /* ??? */ + if (version == 5) { + avio_rb16(pb); avio_rb16(pb); avio_rb16(pb); + } + st->codecpar->sample_rate = avio_rb16(pb); + avio_rb32(pb); + st->codecpar->channels = avio_rb16(pb); + if (version == 5) { + ast->deint_id = avio_rl32(pb); + avio_read(pb, buf, 4); + buf[4] = 0; + } else { + AV_WL32(buf, 0); + get_str8(pb, buf, sizeof(buf)); /* desc */ + ast->deint_id = AV_RL32(buf); + get_str8(pb, buf, sizeof(buf)); /* desc */ + } + st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; + st->codecpar->codec_tag = AV_RL32(buf); + st->codecpar->codec_id = ff_codec_get_id(ff_rm_codec_tags, + st->codecpar->codec_tag); + + switch (st->codecpar->codec_id) { + case AV_CODEC_ID_AC3: + st->need_parsing = AVSTREAM_PARSE_FULL; + break; + case AV_CODEC_ID_RA_288: + st->codecpar->extradata_size= 0; + av_freep(&st->codecpar->extradata); + ast->audio_framesize = st->codecpar->block_align; + st->codecpar->block_align = coded_framesize; + break; + case AV_CODEC_ID_COOK: + st->need_parsing = AVSTREAM_PARSE_HEADERS; + case AV_CODEC_ID_ATRAC3: + case AV_CODEC_ID_SIPR: + if (read_all) { + codecdata_length = 0; + } else { + avio_rb16(pb); avio_r8(pb); + if (version == 5) + avio_r8(pb); + codecdata_length = avio_rb32(pb); + if(codecdata_length + AV_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){ + av_log(s, AV_LOG_ERROR, "codecdata_length too large\n"); + return -1; + } + } + + ast->audio_framesize = st->codecpar->block_align; + if (st->codecpar->codec_id == AV_CODEC_ID_SIPR) { + if (flavor > 3) { + av_log(s, AV_LOG_ERROR, "bad SIPR file flavor %d\n", + flavor); + return -1; + } + st->codecpar->block_align = ff_sipr_subpk_size[flavor]; + } else { + if(sub_packet_size <= 0){ + av_log(s, AV_LOG_ERROR, "sub_packet_size is invalid\n"); + return -1; + } + st->codecpar->block_align = ast->sub_packet_size; + } + if ((ret = rm_read_extradata(s, pb, st->codecpar, codecdata_length)) < 0) + return ret; + + break; + case AV_CODEC_ID_AAC: + avio_rb16(pb); avio_r8(pb); + if (version == 5) + avio_r8(pb); + codecdata_length = avio_rb32(pb); + if(codecdata_length + AV_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){ + av_log(s, AV_LOG_ERROR, "codecdata_length too large\n"); + return -1; + } + if (codecdata_length >= 1) { + avio_r8(pb); + if ((ret = rm_read_extradata(s, pb, st->codecpar, codecdata_length - 1)) < 0) + return ret; + } + break; + } + switch (ast->deint_id) { + case DEINT_ID_INT4: + if (ast->coded_framesize > ast->audio_framesize || + sub_packet_h <= 1 || + ast->coded_framesize * sub_packet_h > (2 + (sub_packet_h & 1)) * ast->audio_framesize) + return AVERROR_INVALIDDATA; + if (ast->coded_framesize * sub_packet_h != 2*ast->audio_framesize) { + avpriv_request_sample(s, "mismatching interleaver parameters"); + return AVERROR_INVALIDDATA; + } + break; + case DEINT_ID_GENR: + if (ast->sub_packet_size <= 0 || + ast->sub_packet_size > ast->audio_framesize) + return AVERROR_INVALIDDATA; + if (ast->audio_framesize % ast->sub_packet_size) + return AVERROR_INVALIDDATA; + break; + case DEINT_ID_SIPR: + case DEINT_ID_INT0: + case DEINT_ID_VBRS: + case DEINT_ID_VBRF: + break; + default: + av_log(s, AV_LOG_ERROR ,"Unknown interleaver %"PRIX32"\n", ast->deint_id); + return AVERROR_INVALIDDATA; + } + if (ast->deint_id == DEINT_ID_INT4 || + ast->deint_id == DEINT_ID_GENR || + ast->deint_id == DEINT_ID_SIPR) { + if (st->codecpar->block_align <= 0 || + ast->audio_framesize * sub_packet_h > (unsigned)INT_MAX || + ast->audio_framesize * sub_packet_h < st->codecpar->block_align) + return AVERROR_INVALIDDATA; + if (av_new_packet(&ast->pkt, ast->audio_framesize * sub_packet_h) < 0) + return AVERROR(ENOMEM); + } + + if (read_all) { + avio_r8(pb); + avio_r8(pb); + avio_r8(pb); + rm_read_metadata(s, pb, 0); + } + } + return 0; +} + +int ff_rm_read_mdpr_codecdata(AVFormatContext *s, AVIOContext *pb, + AVStream *st, RMStream *rst, + unsigned int codec_data_size, const uint8_t *mime) +{ + unsigned int v; + int size; + int64_t codec_pos; + int ret; + + if (codec_data_size > INT_MAX) + return AVERROR_INVALIDDATA; + if (codec_data_size == 0) + return 0; + + avpriv_set_pts_info(st, 64, 1, 1000); + codec_pos = avio_tell(pb); + v = avio_rb32(pb); + + if (v == MKTAG(0xfd, 'a', 'r', '.')) { + /* ra type header */ + if (rm_read_audio_stream_info(s, pb, st, rst, 0)) + return -1; + } else if (v == MKBETAG('L', 'S', 'D', ':')) { + avio_seek(pb, -4, SEEK_CUR); + if ((ret = rm_read_extradata(s, pb, st->codecpar, codec_data_size)) < 0) + return ret; + + st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; + st->codecpar->codec_tag = AV_RL32(st->codecpar->extradata); + st->codecpar->codec_id = ff_codec_get_id(ff_rm_codec_tags, + st->codecpar->codec_tag); + } else if(mime && !strcmp(mime, "logical-fileinfo")){ + int stream_count, rule_count, property_count, i; + ff_free_stream(s, st); + if (avio_rb16(pb) != 0) { + av_log(s, AV_LOG_WARNING, "Unsupported version\n"); + goto skip; + } + stream_count = avio_rb16(pb); + avio_skip(pb, 6*stream_count); + rule_count = avio_rb16(pb); + avio_skip(pb, 2*rule_count); + property_count = avio_rb16(pb); + for(i=0; imetadata, name, val, 0); + break; + default: avio_skip(pb, avio_rb16(pb)); + } + } + } else { + int fps; + if (avio_rl32(pb) != MKTAG('V', 'I', 'D', 'O')) { + fail1: + av_log(s, AV_LOG_WARNING, "Unsupported stream type %08x\n", v); + goto skip; + } + st->codecpar->codec_tag = avio_rl32(pb); + st->codecpar->codec_id = ff_codec_get_id(ff_rm_codec_tags, + st->codecpar->codec_tag); + av_log(s, AV_LOG_TRACE, "%X %X\n", st->codecpar->codec_tag, MKTAG('R', 'V', '2', '0')); + if (st->codecpar->codec_id == AV_CODEC_ID_NONE) + goto fail1; + st->codecpar->width = avio_rb16(pb); + st->codecpar->height = avio_rb16(pb); + avio_skip(pb, 2); // looks like bits per sample + avio_skip(pb, 4); // always zero? + st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; + st->need_parsing = AVSTREAM_PARSE_TIMESTAMPS; + fps = avio_rb32(pb); + + if ((ret = rm_read_extradata(s, pb, st->codecpar, codec_data_size - (avio_tell(pb) - codec_pos))) < 0) + return ret; + + if (fps > 0) { + av_reduce(&st->avg_frame_rate.den, &st->avg_frame_rate.num, + 0x10000, fps, (1 << 30) - 1); +#if FF_API_R_FRAME_RATE + st->r_frame_rate = st->avg_frame_rate; +#endif + } else if (s->error_recognition & AV_EF_EXPLODE) { + av_log(s, AV_LOG_ERROR, "Invalid framerate\n"); + return AVERROR_INVALIDDATA; + } + } + +skip: + /* skip codec info */ + size = avio_tell(pb) - codec_pos; + if (codec_data_size >= size) { + avio_skip(pb, codec_data_size - size); + } else { + av_log(s, AV_LOG_WARNING, "codec_data_size %u < size %d\n", codec_data_size, size); + } + + return 0; +} + +/** this function assumes that the demuxer has already seeked to the start + * of the INDX chunk, and will bail out if not. */ +static int rm_read_index(AVFormatContext *s) +{ + AVIOContext *pb = s->pb; + unsigned int size, n_pkts, str_id, next_off, n, pos, pts; + AVStream *st; + + do { + if (avio_rl32(pb) != MKTAG('I','N','D','X')) + return -1; + size = avio_rb32(pb); + if (size < 20) + return -1; + avio_skip(pb, 2); + n_pkts = avio_rb32(pb); + str_id = avio_rb16(pb); + next_off = avio_rb32(pb); + for (n = 0; n < s->nb_streams; n++) + if (s->streams[n]->id == str_id) { + st = s->streams[n]; + break; + } + if (n == s->nb_streams) { + av_log(s, AV_LOG_ERROR, + "Invalid stream index %d for index at pos %"PRId64"\n", + str_id, avio_tell(pb)); + goto skip; + } else if ((avio_size(pb) - avio_tell(pb)) / 14 < n_pkts) { + av_log(s, AV_LOG_ERROR, + "Nr. of packets in packet index for stream index %d " + "exceeds filesize (%"PRId64" at %"PRId64" = %"PRId64")\n", + str_id, avio_size(pb), avio_tell(pb), + (avio_size(pb) - avio_tell(pb)) / 14); + goto skip; + } + + for (n = 0; n < n_pkts; n++) { + avio_skip(pb, 2); + pts = avio_rb32(pb); + pos = avio_rb32(pb); + avio_skip(pb, 4); /* packet no. */ + + av_add_index_entry(st, pos, pts, 0, 0, AVINDEX_KEYFRAME); + } + +skip: + if (next_off && avio_tell(pb) < next_off && + avio_seek(pb, next_off, SEEK_SET) < 0) { + av_log(s, AV_LOG_ERROR, + "Non-linear index detected, not supported\n"); + return -1; + } + } while (next_off); + + return 0; +} + +static int rm_read_header_old(AVFormatContext *s) +{ + RMDemuxContext *rm = s->priv_data; + AVStream *st; + + rm->old_format = 1; + st = avformat_new_stream(s, NULL); + if (!st) + return -1; + st->priv_data = ff_rm_alloc_rmstream(); + if (!st->priv_data) + return AVERROR(ENOMEM); + return rm_read_audio_stream_info(s, s->pb, st, st->priv_data, 1); +} + +static int rm_read_multi(AVFormatContext *s, AVIOContext *pb, + AVStream *st, char *mime) +{ + int number_of_streams = avio_rb16(pb); + int number_of_mdpr; + int i, ret; + unsigned size2; + for (i = 0; i 0) { + st2 = avformat_new_stream(s, NULL); + if (!st2) { + ret = AVERROR(ENOMEM); + return ret; + } + st2->id = st->id + (i<<16); + st2->codecpar->bit_rate = st->codecpar->bit_rate; + st2->start_time = st->start_time; + st2->duration = st->duration; + st2->codecpar->codec_type = AVMEDIA_TYPE_DATA; + st2->priv_data = ff_rm_alloc_rmstream(); + if (!st2->priv_data) + return AVERROR(ENOMEM); + } else + st2 = st; + + size2 = avio_rb32(pb); + ret = ff_rm_read_mdpr_codecdata(s, s->pb, st2, st2->priv_data, + size2, mime); + if (ret < 0) + return ret; + } + return 0; +} + +static int rm_read_header(AVFormatContext *s) +{ + RMDemuxContext *rm = s->priv_data; + AVStream *st; + AVIOContext *pb = s->pb; + unsigned int tag; + int tag_size; + unsigned int start_time, duration; + unsigned int data_off = 0, indx_off = 0; + char buf[128], mime[128]; + int flags = 0; + int ret = -1; + unsigned size, v; + int64_t codec_pos; + + tag = avio_rl32(pb); + if (tag == MKTAG('.', 'r', 'a', 0xfd)) { + /* very old .ra format */ + return rm_read_header_old(s); + } else if (tag != MKTAG('.', 'R', 'M', 'F')) { + return AVERROR(EIO); + } + + tag_size = avio_rb32(pb); + avio_skip(pb, tag_size - 8); + + for(;;) { + if (avio_feof(pb)) + goto fail; + tag = avio_rl32(pb); + tag_size = avio_rb32(pb); + avio_rb16(pb); + av_log(s, AV_LOG_TRACE, "tag=%c%c%c%c (%08x) size=%d\n", + (tag ) & 0xff, + (tag >> 8) & 0xff, + (tag >> 16) & 0xff, + (tag >> 24) & 0xff, + tag, + tag_size); + if (tag_size < 10 && tag != MKTAG('D', 'A', 'T', 'A')) + goto fail; + switch(tag) { + case MKTAG('P', 'R', 'O', 'P'): + /* file header */ + avio_rb32(pb); /* max bit rate */ + avio_rb32(pb); /* avg bit rate */ + avio_rb32(pb); /* max packet size */ + avio_rb32(pb); /* avg packet size */ + avio_rb32(pb); /* nb packets */ + duration = avio_rb32(pb); /* duration */ + s->duration = av_rescale(duration, AV_TIME_BASE, 1000); + avio_rb32(pb); /* preroll */ + indx_off = avio_rb32(pb); /* index offset */ + data_off = avio_rb32(pb); /* data offset */ + avio_rb16(pb); /* nb streams */ + flags = avio_rb16(pb); /* flags */ + break; + case MKTAG('C', 'O', 'N', 'T'): + rm_read_metadata(s, pb, 1); + break; + case MKTAG('M', 'D', 'P', 'R'): + st = avformat_new_stream(s, NULL); + if (!st) { + ret = AVERROR(ENOMEM); + goto fail; + } + st->id = avio_rb16(pb); + avio_rb32(pb); /* max bit rate */ + st->codecpar->bit_rate = avio_rb32(pb); /* bit rate */ + avio_rb32(pb); /* max packet size */ + avio_rb32(pb); /* avg packet size */ + start_time = avio_rb32(pb); /* start time */ + avio_rb32(pb); /* preroll */ + duration = avio_rb32(pb); /* duration */ + st->start_time = start_time; + st->duration = duration; + if(duration>0) + s->duration = AV_NOPTS_VALUE; + get_str8(pb, buf, sizeof(buf)); /* desc */ + get_str8(pb, mime, sizeof(mime)); /* mimetype */ + st->codecpar->codec_type = AVMEDIA_TYPE_DATA; + st->priv_data = ff_rm_alloc_rmstream(); + if (!st->priv_data) + return AVERROR(ENOMEM); + + size = avio_rb32(pb); + codec_pos = avio_tell(pb); + + ffio_ensure_seekback(pb, 4); + v = avio_rb32(pb); + if (v == MKBETAG('M', 'L', 'T', 'I')) { + ret = rm_read_multi(s, s->pb, st, mime); + if (ret < 0) + goto fail; + avio_seek(pb, codec_pos + size, SEEK_SET); + } else { + avio_skip(pb, -4); + if (ff_rm_read_mdpr_codecdata(s, s->pb, st, st->priv_data, + size, mime) < 0) + goto fail; + } + + break; + case MKTAG('D', 'A', 'T', 'A'): + goto header_end; + default: + /* unknown tag: skip it */ + avio_skip(pb, tag_size - 10); + break; + } + } + header_end: + rm->nb_packets = avio_rb32(pb); /* number of packets */ + if (!rm->nb_packets && (flags & 4)) + rm->nb_packets = 3600 * 25; + avio_rb32(pb); /* next data header */ + + if (!data_off) + data_off = avio_tell(pb) - 18; + if (indx_off && pb->seekable && !(s->flags & AVFMT_FLAG_IGNIDX) && + avio_seek(pb, indx_off, SEEK_SET) >= 0) { + rm_read_index(s); + avio_seek(pb, data_off + 18, SEEK_SET); + } + + return 0; + +fail: + rm_read_close(s); + return ret; +} + +static int get_num(AVIOContext *pb, int *len) +{ + int n, n1; + + n = avio_rb16(pb); + (*len)-=2; + n &= 0x7FFF; + if (n >= 0x4000) { + return n - 0x4000; + } else { + n1 = avio_rb16(pb); + (*len)-=2; + return (n << 16) | n1; + } +} + +/* multiple of 20 bytes for ra144 (ugly) */ +#define RAW_PACKET_SIZE 1000 + +static int rm_sync(AVFormatContext *s, int64_t *timestamp, int *flags, int *stream_index, int64_t *pos){ + RMDemuxContext *rm = s->priv_data; + AVIOContext *pb = s->pb; + AVStream *st; + uint32_t state=0xFFFFFFFF; + + while(!avio_feof(pb)){ + int len, num, i; + int mlti_id; + *pos= avio_tell(pb) - 3; + if(rm->remaining_len > 0){ + num= rm->current_stream; + mlti_id = 0; + len= rm->remaining_len; + *timestamp = AV_NOPTS_VALUE; + *flags= 0; + }else{ + state= (state<<8) + avio_r8(pb); + + if(state == MKBETAG('I', 'N', 'D', 'X')){ + int n_pkts, expected_len; + len = avio_rb32(pb); + avio_skip(pb, 2); + n_pkts = avio_rb32(pb); + expected_len = 20 + n_pkts * 14; + if (len == 20) + /* some files don't add index entries to chunk size... */ + len = expected_len; + else if (len != expected_len) + av_log(s, AV_LOG_WARNING, + "Index size %d (%d pkts) is wrong, should be %d.\n", + len, n_pkts, expected_len); + len -= 14; // we already read part of the index header + if(len<0) + continue; + goto skip; + } else if (state == MKBETAG('D','A','T','A')) { + av_log(s, AV_LOG_WARNING, + "DATA tag in middle of chunk, file may be broken.\n"); + } + + if(state > (unsigned)0xFFFF || state <= 12) + continue; + len=state - 12; + state= 0xFFFFFFFF; + + num = avio_rb16(pb); + *timestamp = avio_rb32(pb); + mlti_id = (avio_r8(pb)>>1)-1<<16; + mlti_id = FFMAX(mlti_id, 0); + *flags = avio_r8(pb); /* flags */ + } + for(i=0;inb_streams;i++) { + st = s->streams[i]; + if (mlti_id + num == st->id) + break; + } + if (i == s->nb_streams) { +skip: + /* skip packet if unknown number */ + avio_skip(pb, len); + rm->remaining_len = 0; + continue; + } + *stream_index= i; + + return len; + } + return -1; +} + +static int rm_assemble_video_frame(AVFormatContext *s, AVIOContext *pb, + RMDemuxContext *rm, RMStream *vst, + AVPacket *pkt, int len, int *pseq, + int64_t *timestamp) +{ + int hdr; + int seq = 0, pic_num = 0, len2 = 0, pos = 0; //init to silence compiler warning + int type; + int ret; + + hdr = avio_r8(pb); len--; + type = hdr >> 6; + + if(type != 3){ // not frame as a part of packet + seq = avio_r8(pb); len--; + } + if(type != 1){ // not whole frame + len2 = get_num(pb, &len); + pos = get_num(pb, &len); + pic_num = avio_r8(pb); len--; + } + if(len<0) { + av_log(s, AV_LOG_ERROR, "Insufficient data\n"); + return -1; + } + rm->remaining_len = len; + if(type&1){ // frame, not slice + if(type == 3){ // frame as a part of packet + len= len2; + *timestamp = pos; + } + if(rm->remaining_len < len) { + av_log(s, AV_LOG_ERROR, "Insufficient remaining len\n"); + return -1; + } + rm->remaining_len -= len; + if(av_new_packet(pkt, len + 9) < 0) + return AVERROR(EIO); + pkt->data[0] = 0; + AV_WL32(pkt->data + 1, 1); + AV_WL32(pkt->data + 5, 0); + if ((ret = avio_read(pb, pkt->data + 9, len)) != len) { + av_packet_unref(pkt); + av_log(s, AV_LOG_ERROR, "Failed to read %d bytes\n", len); + return ret < 0 ? ret : AVERROR(EIO); + } + return 0; + } + //now we have to deal with single slice + + *pseq = seq; + if((seq & 0x7F) == 1 || vst->curpic_num != pic_num){ + if (len2 > ffio_limit(pb, len2)) { + av_log(s, AV_LOG_ERROR, "Impossibly sized packet\n"); + return AVERROR_INVALIDDATA; + } + vst->slices = ((hdr & 0x3F) << 1) + 1; + vst->videobufsize = len2 + 8*vst->slices + 1; + av_packet_unref(&vst->pkt); //FIXME this should be output. + if(av_new_packet(&vst->pkt, vst->videobufsize) < 0) + return AVERROR(ENOMEM); + memset(vst->pkt.data, 0, vst->pkt.size); + vst->videobufpos = 8*vst->slices + 1; + vst->cur_slice = 0; + vst->curpic_num = pic_num; + vst->pktpos = avio_tell(pb); + } + if(type == 2) + len = FFMIN(len, pos); + + if(++vst->cur_slice > vst->slices) { + av_log(s, AV_LOG_ERROR, "cur slice %d, too large\n", vst->cur_slice); + return 1; + } + if(!vst->pkt.data) + return AVERROR(ENOMEM); + AV_WL32(vst->pkt.data - 7 + 8*vst->cur_slice, 1); + AV_WL32(vst->pkt.data - 3 + 8*vst->cur_slice, vst->videobufpos - 8*vst->slices - 1); + if(vst->videobufpos + len > vst->videobufsize) { + av_log(s, AV_LOG_ERROR, "outside videobufsize\n"); + return 1; + } + if (avio_read(pb, vst->pkt.data + vst->videobufpos, len) != len) + return AVERROR(EIO); + vst->videobufpos += len; + rm->remaining_len-= len; + + if (type == 2 || vst->videobufpos == vst->videobufsize) { + vst->pkt.data[0] = vst->cur_slice-1; + *pkt= vst->pkt; + vst->pkt.data= NULL; + vst->pkt.size= 0; + vst->pkt.buf = NULL; + if(vst->slices != vst->cur_slice) //FIXME find out how to set slices correct from the begin + memmove(pkt->data + 1 + 8*vst->cur_slice, pkt->data + 1 + 8*vst->slices, + vst->videobufpos - 1 - 8*vst->slices); + pkt->size = vst->videobufpos + 8*(vst->cur_slice - vst->slices); + pkt->pts = AV_NOPTS_VALUE; + pkt->pos = vst->pktpos; + vst->slices = 0; + return 0; + } + + return 1; +} + +static inline void +rm_ac3_swap_bytes (AVStream *st, AVPacket *pkt) +{ + uint8_t *ptr; + int j; + + if (st->codecpar->codec_id == AV_CODEC_ID_AC3) { + ptr = pkt->data; + for (j=0;jsize;j+=2) { + FFSWAP(int, ptr[0], ptr[1]); + ptr += 2; + } + } +} + +static int readfull(AVFormatContext *s, AVIOContext *pb, uint8_t *dst, int n) { + int ret = avio_read(pb, dst, n); + if (ret != n) { + if (ret >= 0) memset(dst + ret, 0, n - ret); + else memset(dst , 0, n); + av_log(s, AV_LOG_ERROR, "Failed to fully read block\n"); + } + return ret; +} + +int +ff_rm_parse_packet (AVFormatContext *s, AVIOContext *pb, + AVStream *st, RMStream *ast, int len, AVPacket *pkt, + int *seq, int flags, int64_t timestamp) +{ + RMDemuxContext *rm = s->priv_data; + int ret; + + if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { + rm->current_stream= st->id; + ret = rm_assemble_video_frame(s, pb, rm, ast, pkt, len, seq, ×tamp); + if(ret) + return ret < 0 ? ret : -1; //got partial frame or error + } else if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { + if ((ast->deint_id == DEINT_ID_GENR) || + (ast->deint_id == DEINT_ID_INT4) || + (ast->deint_id == DEINT_ID_SIPR)) { + int x; + int sps = ast->sub_packet_size; + int cfs = ast->coded_framesize; + int h = ast->sub_packet_h; + int y = ast->sub_packet_cnt; + int w = ast->audio_framesize; + + if (flags & 2) + y = ast->sub_packet_cnt = 0; + if (!y) + ast->audiotimestamp = timestamp; + + switch (ast->deint_id) { + case DEINT_ID_INT4: + for (x = 0; x < h/2; x++) + readfull(s, pb, ast->pkt.data+x*2*w+y*cfs, cfs); + break; + case DEINT_ID_GENR: + for (x = 0; x < w/sps; x++) + readfull(s, pb, ast->pkt.data+sps*(h*x+((h+1)/2)*(y&1)+(y>>1)), sps); + break; + case DEINT_ID_SIPR: + readfull(s, pb, ast->pkt.data + y * w, w); + break; + } + + if (++(ast->sub_packet_cnt) < h) + return -1; + if (ast->deint_id == DEINT_ID_SIPR) + ff_rm_reorder_sipr_data(ast->pkt.data, h, w); + + ast->sub_packet_cnt = 0; + rm->audio_stream_num = st->index; + if (st->codecpar->block_align <= 0) { + av_log(s, AV_LOG_ERROR, "Invalid block alignment %d\n", st->codecpar->block_align); + return AVERROR_INVALIDDATA; + } + rm->audio_pkt_cnt = h * w / st->codecpar->block_align; + } else if ((ast->deint_id == DEINT_ID_VBRF) || + (ast->deint_id == DEINT_ID_VBRS)) { + int x; + rm->audio_stream_num = st->index; + ast->sub_packet_cnt = (avio_rb16(pb) & 0xf0) >> 4; + if (ast->sub_packet_cnt) { + for (x = 0; x < ast->sub_packet_cnt; x++) + ast->sub_packet_lengths[x] = avio_rb16(pb); + rm->audio_pkt_cnt = ast->sub_packet_cnt; + ast->audiotimestamp = timestamp; + } else + return -1; + } else { + if ((ret = av_get_packet(pb, pkt, len)) < 0) + return ret; + rm_ac3_swap_bytes(st, pkt); + } + } else { + if ((ret = av_get_packet(pb, pkt, len)) < 0) + return ret; + } + + pkt->stream_index = st->index; + +#if 0 + if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { + if(st->codecpar->codec_id == AV_CODEC_ID_RV20){ + int seq= 128*(pkt->data[2]&0x7F) + (pkt->data[3]>>1); + av_log(s, AV_LOG_DEBUG, "%d %"PRId64" %d\n", *timestamp, *timestamp*512LL/25, seq); + + seq |= (timestamp&~0x3FFF); + if(seq - timestamp > 0x2000) seq -= 0x4000; + if(seq - timestamp < -0x2000) seq += 0x4000; + } + } +#endif + + pkt->pts = timestamp; + if (flags & 2) + pkt->flags |= AV_PKT_FLAG_KEY; + + return st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ? rm->audio_pkt_cnt : 0; +} + +int +ff_rm_retrieve_cache (AVFormatContext *s, AVIOContext *pb, + AVStream *st, RMStream *ast, AVPacket *pkt) +{ + RMDemuxContext *rm = s->priv_data; + + av_assert0 (rm->audio_pkt_cnt > 0); + + if (ast->deint_id == DEINT_ID_VBRF || + ast->deint_id == DEINT_ID_VBRS) { + int ret = av_get_packet(pb, pkt, ast->sub_packet_lengths[ast->sub_packet_cnt - rm->audio_pkt_cnt]); + if (ret < 0) + return ret; + } else { + int ret = av_new_packet(pkt, st->codecpar->block_align); + if (ret < 0) + return ret; + memcpy(pkt->data, ast->pkt.data + st->codecpar->block_align * //FIXME avoid this + (ast->sub_packet_h * ast->audio_framesize / st->codecpar->block_align - rm->audio_pkt_cnt), + st->codecpar->block_align); + } + rm->audio_pkt_cnt--; + if ((pkt->pts = ast->audiotimestamp) != AV_NOPTS_VALUE) { + ast->audiotimestamp = AV_NOPTS_VALUE; + pkt->flags = AV_PKT_FLAG_KEY; + } else + pkt->flags = 0; + pkt->stream_index = st->index; + + return rm->audio_pkt_cnt; +} + +static int rm_read_packet(AVFormatContext *s, AVPacket *pkt) +{ + RMDemuxContext *rm = s->priv_data; + AVStream *st = NULL; // init to silence compiler warning + int i, len, res, seq = 1; + int64_t timestamp, pos; + int flags; + + for (;;) { + if (rm->audio_pkt_cnt) { + // If there are queued audio packet return them first + st = s->streams[rm->audio_stream_num]; + res = ff_rm_retrieve_cache(s, s->pb, st, st->priv_data, pkt); + if(res < 0) + return res; + flags = 0; + } else { + if (rm->old_format) { + RMStream *ast; + + st = s->streams[0]; + ast = st->priv_data; + timestamp = AV_NOPTS_VALUE; + len = !ast->audio_framesize ? RAW_PACKET_SIZE : + ast->coded_framesize * ast->sub_packet_h / 2; + flags = (seq++ == 1) ? 2 : 0; + pos = avio_tell(s->pb); + } else { + len = rm_sync(s, ×tamp, &flags, &i, &pos); + if (len > 0) + st = s->streams[i]; + } + + if (avio_feof(s->pb)) + return AVERROR_EOF; + if (len <= 0) + return AVERROR(EIO); + + res = ff_rm_parse_packet (s, s->pb, st, st->priv_data, len, pkt, + &seq, flags, timestamp); + if (res < -1) + return res; + if((flags&2) && (seq&0x7F) == 1) + av_add_index_entry(st, pos, timestamp, 0, 0, AVINDEX_KEYFRAME); + if (res) + continue; + } + + if( (st->discard >= AVDISCARD_NONKEY && !(flags&2)) + || st->discard >= AVDISCARD_ALL){ + av_packet_unref(pkt); + } else + break; + } + + return 0; +} + +static int rm_read_close(AVFormatContext *s) +{ + int i; + + for (i=0;inb_streams;i++) + ff_rm_free_rmstream(s->streams[i]->priv_data); + + return 0; +} + +static int rm_probe(AVProbeData *p) +{ + /* check file header */ + if ((p->buf[0] == '.' && p->buf[1] == 'R' && + p->buf[2] == 'M' && p->buf[3] == 'F' && + p->buf[4] == 0 && p->buf[5] == 0) || + (p->buf[0] == '.' && p->buf[1] == 'r' && + p->buf[2] == 'a' && p->buf[3] == 0xfd)) + return AVPROBE_SCORE_MAX; + else + return 0; +} + +static int64_t rm_read_dts(AVFormatContext *s, int stream_index, + int64_t *ppos, int64_t pos_limit) +{ + RMDemuxContext *rm = s->priv_data; + int64_t pos, dts; + int stream_index2, flags, len, h; + + pos = *ppos; + + if(rm->old_format) + return AV_NOPTS_VALUE; + + if (avio_seek(s->pb, pos, SEEK_SET) < 0) + return AV_NOPTS_VALUE; + + rm->remaining_len=0; + for(;;){ + int seq=1; + AVStream *st; + + len = rm_sync(s, &dts, &flags, &stream_index2, &pos); + if(len<0) + return AV_NOPTS_VALUE; + + st = s->streams[stream_index2]; + if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { + h= avio_r8(s->pb); len--; + if(!(h & 0x40)){ + seq = avio_r8(s->pb); len--; + } + } + + if((flags&2) && (seq&0x7F) == 1){ + av_log(s, AV_LOG_TRACE, "%d %d-%d %"PRId64" %d\n", + flags, stream_index2, stream_index, dts, seq); + av_add_index_entry(st, pos, dts, 0, 0, AVINDEX_KEYFRAME); + if(stream_index2 == stream_index) + break; + } + + avio_skip(s->pb, len); + } + *ppos = pos; + return dts; +} + +static int rm_read_seek(AVFormatContext *s, int stream_index, + int64_t pts, int flags) +{ + RMDemuxContext *rm = s->priv_data; + + if (ff_seek_frame_binary(s, stream_index, pts, flags) < 0) + return -1; + rm->audio_pkt_cnt = 0; + return 0; +} + + +AVInputFormat ff_rm_demuxer = { + .name = "rm", + .long_name = NULL_IF_CONFIG_SMALL("RealMedia"), + .priv_data_size = sizeof(RMDemuxContext), + .read_probe = rm_probe, + .read_header = rm_read_header, + .read_packet = rm_read_packet, + .read_close = rm_read_close, + .read_timestamp = rm_read_dts, + .read_seek = rm_read_seek, +}; + +AVInputFormat ff_rdt_demuxer = { + .name = "rdt", + .long_name = NULL_IF_CONFIG_SMALL("RDT demuxer"), + .priv_data_size = sizeof(RMDemuxContext), + .read_close = rm_read_close, + .flags = AVFMT_NOFILE, +}; + +static int ivr_probe(AVProbeData *p) +{ + if (memcmp(p->buf, ".R1M\x0\x1\x1", 7) && + memcmp(p->buf, ".REC", 4)) + return 0; + + return AVPROBE_SCORE_MAX; +} + +static int ivr_read_header(AVFormatContext *s) +{ + unsigned tag, type, len, tlen, value; + int i, j, n, count, nb_streams = 0, ret; + uint8_t key[256], val[256]; + AVIOContext *pb = s->pb; + AVStream *st; + int64_t pos, offset, temp; + + pos = avio_tell(pb); + tag = avio_rl32(pb); + if (tag == MKTAG('.','R','1','M')) { + if (avio_rb16(pb) != 1) + return AVERROR_INVALIDDATA; + if (avio_r8(pb) != 1) + return AVERROR_INVALIDDATA; + len = avio_rb32(pb); + avio_skip(pb, len); + avio_skip(pb, 5); + temp = avio_rb64(pb); + while (!avio_feof(pb) && temp) { + offset = temp; + temp = avio_rb64(pb); + } + avio_skip(pb, offset - avio_tell(pb)); + if (avio_r8(pb) != 1) + return AVERROR_INVALIDDATA; + len = avio_rb32(pb); + avio_skip(pb, len); + if (avio_r8(pb) != 2) + return AVERROR_INVALIDDATA; + avio_skip(pb, 16); + pos = avio_tell(pb); + tag = avio_rl32(pb); + } + + if (tag != MKTAG('.','R','E','C')) + return AVERROR_INVALIDDATA; + + if (avio_r8(pb) != 0) + return AVERROR_INVALIDDATA; + count = avio_rb32(pb); + for (i = 0; i < count; i++) { + if (avio_feof(pb)) + return AVERROR_INVALIDDATA; + + type = avio_r8(pb); + tlen = avio_rb32(pb); + avio_get_str(pb, tlen, key, sizeof(key)); + len = avio_rb32(pb); + if (type == 5) { + avio_get_str(pb, len, val, sizeof(val)); + av_log(s, AV_LOG_DEBUG, "%s = '%s'\n", key, val); + } else if (type == 4) { + av_log(s, AV_LOG_DEBUG, "%s = '0x", key); + for (j = 0; j < len; j++) + av_log(s, AV_LOG_DEBUG, "%X", avio_r8(pb)); + av_log(s, AV_LOG_DEBUG, "'\n"); + } else if (len == 4 && type == 3 && !strncmp(key, "StreamCount", tlen)) { + nb_streams = value = avio_rb32(pb); + } else if (len == 4 && type == 3) { + value = avio_rb32(pb); + av_log(s, AV_LOG_DEBUG, "%s = %d\n", key, value); + } else { + av_log(s, AV_LOG_DEBUG, "Skipping unsupported key: %s\n", key); + avio_skip(pb, len); + } + } + + for (n = 0; n < nb_streams; n++) { + st = avformat_new_stream(s, NULL); + if (!st) + return AVERROR(ENOMEM); + st->priv_data = ff_rm_alloc_rmstream(); + if (!st->priv_data) + return AVERROR(ENOMEM); + + if (avio_r8(pb) != 1) + return AVERROR_INVALIDDATA; + + count = avio_rb32(pb); + for (i = 0; i < count; i++) { + if (avio_feof(pb)) + return AVERROR_INVALIDDATA; + + type = avio_r8(pb); + tlen = avio_rb32(pb); + avio_get_str(pb, tlen, key, sizeof(key)); + len = avio_rb32(pb); + if (type == 5) { + avio_get_str(pb, len, val, sizeof(val)); + av_log(s, AV_LOG_DEBUG, "%s = '%s'\n", key, val); + } else if (type == 4 && !strncmp(key, "OpaqueData", tlen)) { + ret = ffio_ensure_seekback(pb, 4); + if (ret < 0) + return ret; + if (avio_rb32(pb) == MKBETAG('M', 'L', 'T', 'I')) { + ret = rm_read_multi(s, pb, st, NULL); + } else { + avio_seek(pb, -4, SEEK_CUR); + ret = ff_rm_read_mdpr_codecdata(s, pb, st, st->priv_data, len, NULL); + } + + if (ret < 0) + return ret; + } else if (type == 4) { + int j; + + av_log(s, AV_LOG_DEBUG, "%s = '0x", key); + for (j = 0; j < len; j++) + av_log(s, AV_LOG_DEBUG, "%X", avio_r8(pb)); + av_log(s, AV_LOG_DEBUG, "'\n"); + } else if (len == 4 && type == 3 && !strncmp(key, "Duration", tlen)) { + st->duration = avio_rb32(pb); + } else if (len == 4 && type == 3) { + value = avio_rb32(pb); + av_log(s, AV_LOG_DEBUG, "%s = %d\n", key, value); + } else { + av_log(s, AV_LOG_DEBUG, "Skipping unsupported key: %s\n", key); + avio_skip(pb, len); + } + } + } + + if (avio_r8(pb) != 6) + return AVERROR_INVALIDDATA; + avio_skip(pb, 12); + avio_skip(pb, avio_rb64(pb) + pos - avio_tell(s->pb)); + if (avio_r8(pb) != 8) + return AVERROR_INVALIDDATA; + avio_skip(pb, 8); + + return 0; +} + +static int ivr_read_packet(AVFormatContext *s, AVPacket *pkt) +{ + RMDemuxContext *rm = s->priv_data; + int ret = AVERROR_EOF, opcode; + AVIOContext *pb = s->pb; + unsigned size, index; + int64_t pos, pts; + + if (avio_feof(pb) || rm->data_end) + return AVERROR_EOF; + + pos = avio_tell(pb); + + for (;;) { + if (rm->audio_pkt_cnt) { + // If there are queued audio packet return them first + AVStream *st; + + st = s->streams[rm->audio_stream_num]; + ret = ff_rm_retrieve_cache(s, pb, st, st->priv_data, pkt); + if (ret < 0) { + return ret; + } + } else { + if (rm->remaining_len) { + avio_skip(pb, rm->remaining_len); + rm->remaining_len = 0; + } + + if (avio_feof(pb)) + return AVERROR_EOF; + + opcode = avio_r8(pb); + if (opcode == 2) { + AVStream *st; + int seq = 1; + + pts = avio_rb32(pb); + index = avio_rb16(pb); + if (index >= s->nb_streams) + return AVERROR_INVALIDDATA; + + avio_skip(pb, 4); + size = avio_rb32(pb); + avio_skip(pb, 4); + + if (size < 1 || size > INT_MAX/4) { + av_log(s, AV_LOG_ERROR, "size %u is invalid\n", size); + return AVERROR_INVALIDDATA; + } + + st = s->streams[index]; + ret = ff_rm_parse_packet(s, pb, st, st->priv_data, size, pkt, + &seq, 0, pts); + if (ret < -1) { + return ret; + } else if (ret) { + continue; + } + + pkt->pos = pos; + pkt->pts = pts; + pkt->stream_index = index; + } else if (opcode == 7) { + pos = avio_rb64(pb); + if (!pos) { + rm->data_end = 1; + return AVERROR_EOF; + } + } else { + av_log(s, AV_LOG_ERROR, "Unsupported opcode=%d at %"PRIX64"\n", opcode, avio_tell(pb) - 1); + return AVERROR(EIO); + } + } + + break; + } + + return ret; +} + +AVInputFormat ff_ivr_demuxer = { + .name = "ivr", + .long_name = NULL_IF_CONFIG_SMALL("IVR (Internet Video Recording)"), + .priv_data_size = sizeof(RMDemuxContext), + .read_probe = ivr_probe, + .read_header = ivr_read_header, + .read_packet = ivr_read_packet, + .read_close = rm_read_close, + .extensions = "ivr", +}; From f86a94e3116626801eae9498d1d5e55ab8208e1c Mon Sep 17 00:00:00 2001 From: turly221 Date: Mon, 9 Dec 2024 16:06:03 +0000 Subject: [PATCH 08/13] commit patch 18558903 --- libavformat/pva.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/libavformat/pva.c b/libavformat/pva.c index 16381db9054f4..04ae8e2800cce 100644 --- a/libavformat/pva.c +++ b/libavformat/pva.c @@ -134,6 +134,10 @@ static int read_part_of_packet(AVFormatContext *s, int64_t *pts, pes_flags = avio_rb16(pb); pes_header_data_length = avio_r8(pb); + if (avio_feof(pb)) { + return AVERROR_EOF; + } + if (pes_signal != 1 || pes_header_data_length == 0) { pva_log(s, AV_LOG_WARNING, "expected non empty signaled PES packet, " "trying to recover\n"); From fa2e1c178be8be088b35e4edae5c532fdd330cad Mon Sep 17 00:00:00 2001 From: turly221 Date: Mon, 9 Dec 2024 16:06:05 +0000 Subject: [PATCH 09/13] commit patch 27983284 --- libavformat/movenc.c | 5 + libavformat/movenc.c.orig | 6344 +++++++++++++++++++++++++++++++++++++ 2 files changed, 6349 insertions(+) create mode 100644 libavformat/movenc.c.orig diff --git a/libavformat/movenc.c b/libavformat/movenc.c index 6228192f98906..5f40f2622492b 100644 --- a/libavformat/movenc.c +++ b/libavformat/movenc.c @@ -4780,6 +4780,11 @@ int ff_mov_write_packet(AVFormatContext *s, AVPacket *pkt) else samples_in_chunk = 1; + if (samples_in_chunk < 1) { + av_log(s, AV_LOG_ERROR, "fatal error, input packet contains no samples\n"); + return AVERROR_PATCHWELCOME; + } + /* copy extradata if it exists */ if (trk->vos_len == 0 && par->extradata_size > 0 && !TAG_IS_AVCI(trk->tag) && diff --git a/libavformat/movenc.c.orig b/libavformat/movenc.c.orig new file mode 100644 index 0000000000000..6228192f98906 --- /dev/null +++ b/libavformat/movenc.c.orig @@ -0,0 +1,6344 @@ +/* + * MOV, 3GP, MP4 muxer + * Copyright (c) 2003 Thomas Raivio + * Copyright (c) 2004 Gildas Bazin + * Copyright (c) 2009 Baptiste Coudurier + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include + +#include "movenc.h" +#include "avformat.h" +#include "avio_internal.h" +#include "riff.h" +#include "avio.h" +#include "isom.h" +#include "avc.h" +#include "libavcodec/ac3_parser.h" +#include "libavcodec/dnxhddata.h" +#include "libavcodec/get_bits.h" +#include "libavcodec/put_bits.h" +#include "libavcodec/vc1_common.h" +#include "libavcodec/raw.h" +#include "internal.h" +#include "libavutil/avstring.h" +#include "libavutil/intfloat.h" +#include "libavutil/mathematics.h" +#include "libavutil/libm.h" +#include "libavutil/opt.h" +#include "libavutil/dict.h" +#include "libavutil/pixdesc.h" +#include "libavutil/timecode.h" +#include "libavutil/color_utils.h" +#include "hevc.h" +#include "rtpenc.h" +#include "mov_chan.h" +#include "vpcc.h" + +static const AVOption options[] = { + { "movflags", "MOV muxer flags", offsetof(MOVMuxContext, flags), AV_OPT_TYPE_FLAGS, {.i64 = 0}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, + { "rtphint", "Add RTP hint tracks", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_RTP_HINT}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, + { "moov_size", "maximum moov size so it can be placed at the begin", offsetof(MOVMuxContext, reserved_moov_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, 0 }, + { "empty_moov", "Make the initial moov atom empty", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_EMPTY_MOOV}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, + { "frag_keyframe", "Fragment at video keyframes", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_FRAG_KEYFRAME}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, + { "separate_moof", "Write separate moof/mdat atoms for each track", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_SEPARATE_MOOF}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, + { "frag_custom", "Flush fragments on caller requests", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_FRAG_CUSTOM}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, + { "isml", "Create a live smooth streaming feed (for pushing to a publishing point)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_ISML}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, + { "faststart", "Run a second pass to put the index (moov atom) at the beginning of the file", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_FASTSTART}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, + { "omit_tfhd_offset", "Omit the base data offset in tfhd atoms", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_OMIT_TFHD_OFFSET}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, + { "disable_chpl", "Disable Nero chapter atom", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_DISABLE_CHPL}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, + { "default_base_moof", "Set the default-base-is-moof flag in tfhd atoms", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_DEFAULT_BASE_MOOF}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, + { "dash", "Write DASH compatible fragmented MP4", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_DASH}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, + { "frag_discont", "Signal that the next fragment is discontinuous from earlier ones", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_FRAG_DISCONT}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, + { "delay_moov", "Delay writing the initial moov until the first fragment is cut, or until the first fragment flush", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_DELAY_MOOV}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, + { "global_sidx", "Write a global sidx index at the start of the file", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_GLOBAL_SIDX}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, + { "write_colr", "Write colr atom (Experimental, may be renamed or changed, do not use from scripts)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_WRITE_COLR}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, + { "write_gama", "Write deprecated gama atom", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_WRITE_GAMA}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, + { "use_metadata_tags", "Use mdta atom for metadata.", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_USE_MDTA}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, + FF_RTP_FLAG_OPTS(MOVMuxContext, rtp_flags), + { "skip_iods", "Skip writing iods atom.", offsetof(MOVMuxContext, iods_skip), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM}, + { "iods_audio_profile", "iods audio profile atom.", offsetof(MOVMuxContext, iods_audio_profile), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 255, AV_OPT_FLAG_ENCODING_PARAM}, + { "iods_video_profile", "iods video profile atom.", offsetof(MOVMuxContext, iods_video_profile), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 255, AV_OPT_FLAG_ENCODING_PARAM}, + { "frag_duration", "Maximum fragment duration", offsetof(MOVMuxContext, max_fragment_duration), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM}, + { "min_frag_duration", "Minimum fragment duration", offsetof(MOVMuxContext, min_fragment_duration), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM}, + { "frag_size", "Maximum fragment size", offsetof(MOVMuxContext, max_fragment_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM}, + { "ism_lookahead", "Number of lookahead entries for ISM files", offsetof(MOVMuxContext, ism_lookahead), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM}, + { "video_track_timescale", "set timescale of all video tracks", offsetof(MOVMuxContext, video_track_timescale), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM}, + { "brand", "Override major brand", offsetof(MOVMuxContext, major_brand), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = AV_OPT_FLAG_ENCODING_PARAM }, + { "use_editlist", "use edit list", offsetof(MOVMuxContext, use_editlist), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, AV_OPT_FLAG_ENCODING_PARAM}, + { "fragment_index", "Fragment number of the next fragment", offsetof(MOVMuxContext, fragments), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM}, + { "mov_gamma", "gamma value for gama atom", offsetof(MOVMuxContext, gamma), AV_OPT_TYPE_FLOAT, {.dbl = 0.0 }, 0.0, 10, AV_OPT_FLAG_ENCODING_PARAM}, + { "frag_interleave", "Interleave samples within fragments (max number of consecutive samples, lower is tighter interleaving, but with more overhead)", offsetof(MOVMuxContext, frag_interleave), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM }, + { "encryption_scheme", "Configures the encryption scheme, allowed values are none, cenc-aes-ctr", offsetof(MOVMuxContext, encryption_scheme_str), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = AV_OPT_FLAG_ENCODING_PARAM }, + { "encryption_key", "The media encryption key (hex)", offsetof(MOVMuxContext, encryption_key), AV_OPT_TYPE_BINARY, .flags = AV_OPT_FLAG_ENCODING_PARAM }, + { "encryption_kid", "The media encryption key identifier (hex)", offsetof(MOVMuxContext, encryption_kid), AV_OPT_TYPE_BINARY, .flags = AV_OPT_FLAG_ENCODING_PARAM }, + { "use_stream_ids_as_track_ids", "use stream ids as track ids", offsetof(MOVMuxContext, use_stream_ids_as_track_ids), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM}, + { "write_tmcd", "force or disable writing tmcd", offsetof(MOVMuxContext, write_tmcd), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, AV_OPT_FLAG_ENCODING_PARAM}, + { NULL }, +}; + +#define MOV_CLASS(flavor)\ +static const AVClass flavor ## _muxer_class = {\ + .class_name = #flavor " muxer",\ + .item_name = av_default_item_name,\ + .option = options,\ + .version = LIBAVUTIL_VERSION_INT,\ +}; + +static int get_moov_size(AVFormatContext *s); + +static int utf8len(const uint8_t *b) +{ + int len = 0; + int val; + while (*b) { + GET_UTF8(val, *b++, return -1;) + len++; + } + return len; +} + +//FIXME support 64 bit variant with wide placeholders +static int64_t update_size(AVIOContext *pb, int64_t pos) +{ + int64_t curpos = avio_tell(pb); + avio_seek(pb, pos, SEEK_SET); + avio_wb32(pb, curpos - pos); /* rewrite size */ + avio_seek(pb, curpos, SEEK_SET); + + return curpos - pos; +} + +static int co64_required(const MOVTrack *track) +{ + if (track->entry > 0 && track->cluster[track->entry - 1].pos + track->data_offset > UINT32_MAX) + return 1; + return 0; +} + +/* Chunk offset atom */ +static int mov_write_stco_tag(AVIOContext *pb, MOVTrack *track) +{ + int i; + int mode64 = co64_required(track); // use 32 bit size variant if possible + int64_t pos = avio_tell(pb); + avio_wb32(pb, 0); /* size */ + if (mode64) + ffio_wfourcc(pb, "co64"); + else + ffio_wfourcc(pb, "stco"); + avio_wb32(pb, 0); /* version & flags */ + avio_wb32(pb, track->chunkCount); /* entry count */ + for (i = 0; i < track->entry; i++) { + if (!track->cluster[i].chunkNum) + continue; + if (mode64 == 1) + avio_wb64(pb, track->cluster[i].pos + track->data_offset); + else + avio_wb32(pb, track->cluster[i].pos + track->data_offset); + } + return update_size(pb, pos); +} + +/* Sample size atom */ +static int mov_write_stsz_tag(AVIOContext *pb, MOVTrack *track) +{ + int equalChunks = 1; + int i, j, entries = 0, tst = -1, oldtst = -1; + + int64_t pos = avio_tell(pb); + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "stsz"); + avio_wb32(pb, 0); /* version & flags */ + + for (i = 0; i < track->entry; i++) { + tst = track->cluster[i].size / track->cluster[i].entries; + if (oldtst != -1 && tst != oldtst) + equalChunks = 0; + oldtst = tst; + entries += track->cluster[i].entries; + } + if (equalChunks && track->entry) { + int sSize = track->entry ? track->cluster[0].size / track->cluster[0].entries : 0; + sSize = FFMAX(1, sSize); // adpcm mono case could make sSize == 0 + avio_wb32(pb, sSize); // sample size + avio_wb32(pb, entries); // sample count + } else { + avio_wb32(pb, 0); // sample size + avio_wb32(pb, entries); // sample count + for (i = 0; i < track->entry; i++) { + for (j = 0; j < track->cluster[i].entries; j++) { + avio_wb32(pb, track->cluster[i].size / + track->cluster[i].entries); + } + } + } + return update_size(pb, pos); +} + +/* Sample to chunk atom */ +static int mov_write_stsc_tag(AVIOContext *pb, MOVTrack *track) +{ + int index = 0, oldval = -1, i; + int64_t entryPos, curpos; + + int64_t pos = avio_tell(pb); + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "stsc"); + avio_wb32(pb, 0); // version & flags + entryPos = avio_tell(pb); + avio_wb32(pb, track->chunkCount); // entry count + for (i = 0; i < track->entry; i++) { + if (oldval != track->cluster[i].samples_in_chunk && track->cluster[i].chunkNum) { + avio_wb32(pb, track->cluster[i].chunkNum); // first chunk + avio_wb32(pb, track->cluster[i].samples_in_chunk); // samples per chunk + avio_wb32(pb, 0x1); // sample description index + oldval = track->cluster[i].samples_in_chunk; + index++; + } + } + curpos = avio_tell(pb); + avio_seek(pb, entryPos, SEEK_SET); + avio_wb32(pb, index); // rewrite size + avio_seek(pb, curpos, SEEK_SET); + + return update_size(pb, pos); +} + +/* Sync sample atom */ +static int mov_write_stss_tag(AVIOContext *pb, MOVTrack *track, uint32_t flag) +{ + int64_t curpos, entryPos; + int i, index = 0; + int64_t pos = avio_tell(pb); + avio_wb32(pb, 0); // size + ffio_wfourcc(pb, flag == MOV_SYNC_SAMPLE ? "stss" : "stps"); + avio_wb32(pb, 0); // version & flags + entryPos = avio_tell(pb); + avio_wb32(pb, track->entry); // entry count + for (i = 0; i < track->entry; i++) { + if (track->cluster[i].flags & flag) { + avio_wb32(pb, i + 1); + index++; + } + } + curpos = avio_tell(pb); + avio_seek(pb, entryPos, SEEK_SET); + avio_wb32(pb, index); // rewrite size + avio_seek(pb, curpos, SEEK_SET); + return update_size(pb, pos); +} + +static int mov_write_amr_tag(AVIOContext *pb, MOVTrack *track) +{ + avio_wb32(pb, 0x11); /* size */ + if (track->mode == MODE_MOV) ffio_wfourcc(pb, "samr"); + else ffio_wfourcc(pb, "damr"); + ffio_wfourcc(pb, "FFMP"); + avio_w8(pb, 0); /* decoder version */ + + avio_wb16(pb, 0x81FF); /* Mode set (all modes for AMR_NB) */ + avio_w8(pb, 0x00); /* Mode change period (no restriction) */ + avio_w8(pb, 0x01); /* Frames per sample */ + return 0x11; +} + +static int mov_write_ac3_tag(AVIOContext *pb, MOVTrack *track) +{ + GetBitContext gbc; + PutBitContext pbc; + uint8_t buf[3]; + int fscod, bsid, bsmod, acmod, lfeon, frmsizecod; + + if (track->vos_len < 7) + return -1; + + avio_wb32(pb, 11); + ffio_wfourcc(pb, "dac3"); + + init_get_bits(&gbc, track->vos_data + 4, (track->vos_len - 4) * 8); + fscod = get_bits(&gbc, 2); + frmsizecod = get_bits(&gbc, 6); + bsid = get_bits(&gbc, 5); + bsmod = get_bits(&gbc, 3); + acmod = get_bits(&gbc, 3); + if (acmod == 2) { + skip_bits(&gbc, 2); // dsurmod + } else { + if ((acmod & 1) && acmod != 1) + skip_bits(&gbc, 2); // cmixlev + if (acmod & 4) + skip_bits(&gbc, 2); // surmixlev + } + lfeon = get_bits1(&gbc); + + init_put_bits(&pbc, buf, sizeof(buf)); + put_bits(&pbc, 2, fscod); + put_bits(&pbc, 5, bsid); + put_bits(&pbc, 3, bsmod); + put_bits(&pbc, 3, acmod); + put_bits(&pbc, 1, lfeon); + put_bits(&pbc, 5, frmsizecod >> 1); // bit_rate_code + put_bits(&pbc, 5, 0); // reserved + + flush_put_bits(&pbc); + avio_write(pb, buf, sizeof(buf)); + + return 11; +} + +struct eac3_info { + AVPacket pkt; + uint8_t ec3_done; + uint8_t num_blocks; + + /* Layout of the EC3SpecificBox */ + /* maximum bitrate */ + uint16_t data_rate; + /* number of independent substreams */ + uint8_t num_ind_sub; + struct { + /* sample rate code (see ff_ac3_sample_rate_tab) 2 bits */ + uint8_t fscod; + /* bit stream identification 5 bits */ + uint8_t bsid; + /* one bit reserved */ + /* audio service mixing (not supported yet) 1 bit */ + /* bit stream mode 3 bits */ + uint8_t bsmod; + /* audio coding mode 3 bits */ + uint8_t acmod; + /* sub woofer on 1 bit */ + uint8_t lfeon; + /* 3 bits reserved */ + /* number of dependent substreams associated with this substream 4 bits */ + uint8_t num_dep_sub; + /* channel locations of the dependent substream(s), if any, 9 bits */ + uint16_t chan_loc; + /* if there is no dependent substream, then one bit reserved instead */ + } substream[1]; /* TODO: support 8 independent substreams */ +}; + +#if CONFIG_AC3_PARSER +static int handle_eac3(MOVMuxContext *mov, AVPacket *pkt, MOVTrack *track) +{ + GetBitContext gbc; + AC3HeaderInfo tmp, *hdr = &tmp; + struct eac3_info *info; + int num_blocks; + + if (!track->eac3_priv && !(track->eac3_priv = av_mallocz(sizeof(*info)))) + return AVERROR(ENOMEM); + info = track->eac3_priv; + + init_get_bits(&gbc, pkt->data, pkt->size * 8); + if (avpriv_ac3_parse_header(&gbc, &hdr) < 0) { + /* drop the packets until we see a good one */ + if (!track->entry) { + av_log(mov, AV_LOG_WARNING, "Dropping invalid packet from start of the stream\n"); + return 0; + } + return AVERROR_INVALIDDATA; + } + + info->data_rate = FFMAX(info->data_rate, hdr->bit_rate / 1000); + num_blocks = hdr->num_blocks; + + if (!info->ec3_done) { + /* AC-3 substream must be the first one */ + if (hdr->bitstream_id <= 10 && hdr->substreamid != 0) + return AVERROR(EINVAL); + + /* this should always be the case, given that our AC-3 parser + * concatenates dependent frames to their independent parent */ + if (hdr->frame_type == EAC3_FRAME_TYPE_INDEPENDENT) { + /* substream ids must be incremental */ + if (hdr->substreamid > info->num_ind_sub + 1) + return AVERROR(EINVAL); + + if (hdr->substreamid == info->num_ind_sub + 1) { + //info->num_ind_sub++; + avpriv_request_sample(track->par, "Multiple independent substreams"); + return AVERROR_PATCHWELCOME; + } else if (hdr->substreamid < info->num_ind_sub || + hdr->substreamid == 0 && info->substream[0].bsid) { + info->ec3_done = 1; + goto concatenate; + } + } + + /* fill the info needed for the "dec3" atom */ + info->substream[hdr->substreamid].fscod = hdr->sr_code; + info->substream[hdr->substreamid].bsid = hdr->bitstream_id; + info->substream[hdr->substreamid].bsmod = hdr->bitstream_mode; + info->substream[hdr->substreamid].acmod = hdr->channel_mode; + info->substream[hdr->substreamid].lfeon = hdr->lfe_on; + + /* Parse dependent substream(s), if any */ + if (pkt->size != hdr->frame_size) { + int cumul_size = hdr->frame_size; + int parent = hdr->substreamid; + + while (cumul_size != pkt->size) { + int i; + init_get_bits(&gbc, pkt->data + cumul_size, (pkt->size - cumul_size) * 8); + if (avpriv_ac3_parse_header(&gbc, &hdr) < 0) + return AVERROR_INVALIDDATA; + if (hdr->frame_type != EAC3_FRAME_TYPE_DEPENDENT) + return AVERROR(EINVAL); + cumul_size += hdr->frame_size; + info->substream[parent].num_dep_sub++; + + /* header is parsed up to lfeon, but custom channel map may be needed */ + /* skip bsid */ + skip_bits(&gbc, 5); + /* skip volume control params */ + for (i = 0; i < (hdr->channel_mode ? 1 : 2); i++) { + skip_bits(&gbc, 5); // skip dialog normalization + if (get_bits1(&gbc)) { + skip_bits(&gbc, 8); // skip compression gain word + } + } + /* get the dependent stream channel map, if exists */ + if (get_bits1(&gbc)) + info->substream[parent].chan_loc |= (get_bits(&gbc, 16) >> 5) & 0x1f; + else + info->substream[parent].chan_loc |= hdr->channel_mode; + } + } + } + +concatenate: + if (!info->num_blocks && num_blocks == 6) + return pkt->size; + else if (info->num_blocks + num_blocks > 6) + return AVERROR_INVALIDDATA; + + if (!info->num_blocks) { + int ret; + if ((ret = av_copy_packet(&info->pkt, pkt)) < 0) + return ret; + info->num_blocks = num_blocks; + return 0; + } else { + int ret; + if ((ret = av_grow_packet(&info->pkt, pkt->size)) < 0) + return ret; + memcpy(info->pkt.data + info->pkt.size - pkt->size, pkt->data, pkt->size); + info->num_blocks += num_blocks; + info->pkt.duration += pkt->duration; + if ((ret = av_copy_packet_side_data(&info->pkt, pkt)) < 0) + return ret; + if (info->num_blocks != 6) + return 0; + av_packet_unref(pkt); + if ((ret = av_copy_packet(pkt, &info->pkt)) < 0) + return ret; + av_packet_unref(&info->pkt); + info->num_blocks = 0; + } + + return pkt->size; +} +#endif + +static int mov_write_eac3_tag(AVIOContext *pb, MOVTrack *track) +{ + PutBitContext pbc; + uint8_t *buf; + struct eac3_info *info; + int size, i; + + if (!track->eac3_priv) + return AVERROR(EINVAL); + + info = track->eac3_priv; + size = 2 + 4 * (info->num_ind_sub + 1); + buf = av_malloc(size); + if (!buf) { + size = AVERROR(ENOMEM); + goto end; + } + + init_put_bits(&pbc, buf, size); + put_bits(&pbc, 13, info->data_rate); + put_bits(&pbc, 3, info->num_ind_sub); + for (i = 0; i <= info->num_ind_sub; i++) { + put_bits(&pbc, 2, info->substream[i].fscod); + put_bits(&pbc, 5, info->substream[i].bsid); + put_bits(&pbc, 1, 0); /* reserved */ + put_bits(&pbc, 1, 0); /* asvc */ + put_bits(&pbc, 3, info->substream[i].bsmod); + put_bits(&pbc, 3, info->substream[i].acmod); + put_bits(&pbc, 1, info->substream[i].lfeon); + put_bits(&pbc, 5, 0); /* reserved */ + put_bits(&pbc, 4, info->substream[i].num_dep_sub); + if (!info->substream[i].num_dep_sub) { + put_bits(&pbc, 1, 0); /* reserved */ + size--; + } else { + put_bits(&pbc, 9, info->substream[i].chan_loc); + } + } + flush_put_bits(&pbc); + + avio_wb32(pb, size + 8); + ffio_wfourcc(pb, "dec3"); + avio_write(pb, buf, size); + + av_free(buf); + +end: + av_packet_unref(&info->pkt); + av_freep(&track->eac3_priv); + + return size; +} + +/** + * This function writes extradata "as is". + * Extradata must be formatted like a valid atom (with size and tag). + */ +static int mov_write_extradata_tag(AVIOContext *pb, MOVTrack *track) +{ + avio_write(pb, track->par->extradata, track->par->extradata_size); + return track->par->extradata_size; +} + +static int mov_write_enda_tag(AVIOContext *pb) +{ + avio_wb32(pb, 10); + ffio_wfourcc(pb, "enda"); + avio_wb16(pb, 1); /* little endian */ + return 10; +} + +static int mov_write_enda_tag_be(AVIOContext *pb) +{ + avio_wb32(pb, 10); + ffio_wfourcc(pb, "enda"); + avio_wb16(pb, 0); /* big endian */ + return 10; +} + +static void put_descr(AVIOContext *pb, int tag, unsigned int size) +{ + int i = 3; + avio_w8(pb, tag); + for (; i > 0; i--) + avio_w8(pb, (size >> (7 * i)) | 0x80); + avio_w8(pb, size & 0x7F); +} + +static unsigned compute_avg_bitrate(MOVTrack *track) +{ + uint64_t size = 0; + int i; + if (!track->track_duration) + return 0; + for (i = 0; i < track->entry; i++) + size += track->cluster[i].size; + return size * 8 * track->timescale / track->track_duration; +} + +static int mov_write_esds_tag(AVIOContext *pb, MOVTrack *track) // Basic +{ + AVCPBProperties *props; + int64_t pos = avio_tell(pb); + int decoder_specific_info_len = track->vos_len ? 5 + track->vos_len : 0; + unsigned avg_bitrate; + + avio_wb32(pb, 0); // size + ffio_wfourcc(pb, "esds"); + avio_wb32(pb, 0); // Version + + // ES descriptor + put_descr(pb, 0x03, 3 + 5+13 + decoder_specific_info_len + 5+1); + avio_wb16(pb, track->track_id); + avio_w8(pb, 0x00); // flags (= no flags) + + // DecoderConfig descriptor + put_descr(pb, 0x04, 13 + decoder_specific_info_len); + + // Object type indication + if ((track->par->codec_id == AV_CODEC_ID_MP2 || + track->par->codec_id == AV_CODEC_ID_MP3) && + track->par->sample_rate > 24000) + avio_w8(pb, 0x6B); // 11172-3 + else + avio_w8(pb, ff_codec_get_tag(ff_mp4_obj_type, track->par->codec_id)); + + // the following fields is made of 6 bits to identify the streamtype (4 for video, 5 for audio) + // plus 1 bit to indicate upstream and 1 bit set to 1 (reserved) + if (track->par->codec_id == AV_CODEC_ID_DVD_SUBTITLE) + avio_w8(pb, (0x38 << 2) | 1); // flags (= NeroSubpicStream) + else if (track->par->codec_type == AVMEDIA_TYPE_AUDIO) + avio_w8(pb, 0x15); // flags (= Audiostream) + else + avio_w8(pb, 0x11); // flags (= Visualstream) + + props = (AVCPBProperties*)av_stream_get_side_data(track->st, AV_PKT_DATA_CPB_PROPERTIES, + NULL); + + avio_wb24(pb, props ? props->buffer_size / 8 : 0); // Buffersize DB + + avg_bitrate = compute_avg_bitrate(track); + avio_wb32(pb, props ? FFMAX3(props->max_bitrate, props->avg_bitrate, avg_bitrate) : FFMAX(track->par->bit_rate, avg_bitrate)); // maxbitrate (FIXME should be max rate in any 1 sec window) + avio_wb32(pb, avg_bitrate); + + if (track->vos_len) { + // DecoderSpecific info descriptor + put_descr(pb, 0x05, track->vos_len); + avio_write(pb, track->vos_data, track->vos_len); + } + + // SL descriptor + put_descr(pb, 0x06, 1); + avio_w8(pb, 0x02); + return update_size(pb, pos); +} + +static int mov_pcm_le_gt16(enum AVCodecID codec_id) +{ + return codec_id == AV_CODEC_ID_PCM_S24LE || + codec_id == AV_CODEC_ID_PCM_S32LE || + codec_id == AV_CODEC_ID_PCM_F32LE || + codec_id == AV_CODEC_ID_PCM_F64LE; +} + +static int mov_pcm_be_gt16(enum AVCodecID codec_id) +{ + return codec_id == AV_CODEC_ID_PCM_S24BE || + codec_id == AV_CODEC_ID_PCM_S32BE || + codec_id == AV_CODEC_ID_PCM_F32BE || + codec_id == AV_CODEC_ID_PCM_F64BE; +} + +static int mov_write_ms_tag(AVFormatContext *s, AVIOContext *pb, MOVTrack *track) +{ + int ret; + int64_t pos = avio_tell(pb); + avio_wb32(pb, 0); + avio_wl32(pb, track->tag); // store it byteswapped + track->par->codec_tag = av_bswap16(track->tag >> 16); + if ((ret = ff_put_wav_header(s, pb, track->par, 0)) < 0) + return ret; + return update_size(pb, pos); +} + +static int mov_write_wfex_tag(AVFormatContext *s, AVIOContext *pb, MOVTrack *track) +{ + int ret; + int64_t pos = avio_tell(pb); + avio_wb32(pb, 0); + ffio_wfourcc(pb, "wfex"); + if ((ret = ff_put_wav_header(s, pb, track->st->codecpar, FF_PUT_WAV_HEADER_FORCE_WAVEFORMATEX)) < 0) + return ret; + return update_size(pb, pos); +} + +static int mov_write_chan_tag(AVFormatContext *s, AVIOContext *pb, MOVTrack *track) +{ + uint32_t layout_tag, bitmap; + int64_t pos = avio_tell(pb); + + layout_tag = ff_mov_get_channel_layout_tag(track->par->codec_id, + track->par->channel_layout, + &bitmap); + if (!layout_tag) { + av_log(s, AV_LOG_WARNING, "not writing 'chan' tag due to " + "lack of channel information\n"); + return 0; + } + + if (track->multichannel_as_mono) + return 0; + + avio_wb32(pb, 0); // Size + ffio_wfourcc(pb, "chan"); // Type + avio_w8(pb, 0); // Version + avio_wb24(pb, 0); // Flags + avio_wb32(pb, layout_tag); // mChannelLayoutTag + avio_wb32(pb, bitmap); // mChannelBitmap + avio_wb32(pb, 0); // mNumberChannelDescriptions + + return update_size(pb, pos); +} + +static int mov_write_wave_tag(AVFormatContext *s, AVIOContext *pb, MOVTrack *track) +{ + int64_t pos = avio_tell(pb); + + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "wave"); + + if (track->par->codec_id != AV_CODEC_ID_QDM2) { + avio_wb32(pb, 12); /* size */ + ffio_wfourcc(pb, "frma"); + avio_wl32(pb, track->tag); + } + + if (track->par->codec_id == AV_CODEC_ID_AAC) { + /* useless atom needed by mplayer, ipod, not needed by quicktime */ + avio_wb32(pb, 12); /* size */ + ffio_wfourcc(pb, "mp4a"); + avio_wb32(pb, 0); + mov_write_esds_tag(pb, track); + } else if (mov_pcm_le_gt16(track->par->codec_id)) { + mov_write_enda_tag(pb); + } else if (mov_pcm_be_gt16(track->par->codec_id)) { + mov_write_enda_tag_be(pb); + } else if (track->par->codec_id == AV_CODEC_ID_AMR_NB) { + mov_write_amr_tag(pb, track); + } else if (track->par->codec_id == AV_CODEC_ID_AC3) { + mov_write_ac3_tag(pb, track); + } else if (track->par->codec_id == AV_CODEC_ID_EAC3) { + mov_write_eac3_tag(pb, track); + } else if (track->par->codec_id == AV_CODEC_ID_ALAC || + track->par->codec_id == AV_CODEC_ID_QDM2) { + mov_write_extradata_tag(pb, track); + } else if (track->par->codec_id == AV_CODEC_ID_ADPCM_MS || + track->par->codec_id == AV_CODEC_ID_ADPCM_IMA_WAV) { + mov_write_ms_tag(s, pb, track); + } + + avio_wb32(pb, 8); /* size */ + avio_wb32(pb, 0); /* null tag */ + + return update_size(pb, pos); +} + +static int mov_write_dvc1_structs(MOVTrack *track, uint8_t *buf) +{ + uint8_t *unescaped; + const uint8_t *start, *next, *end = track->vos_data + track->vos_len; + int unescaped_size, seq_found = 0; + int level = 0, interlace = 0; + int packet_seq = track->vc1_info.packet_seq; + int packet_entry = track->vc1_info.packet_entry; + int slices = track->vc1_info.slices; + PutBitContext pbc; + + if (track->start_dts == AV_NOPTS_VALUE) { + /* No packets written yet, vc1_info isn't authoritative yet. */ + /* Assume inline sequence and entry headers. */ + packet_seq = packet_entry = 1; + av_log(NULL, AV_LOG_WARNING, + "moov atom written before any packets, unable to write correct " + "dvc1 atom. Set the delay_moov flag to fix this.\n"); + } + + unescaped = av_mallocz(track->vos_len + AV_INPUT_BUFFER_PADDING_SIZE); + if (!unescaped) + return AVERROR(ENOMEM); + start = find_next_marker(track->vos_data, end); + for (next = start; next < end; start = next) { + GetBitContext gb; + int size; + next = find_next_marker(start + 4, end); + size = next - start - 4; + if (size <= 0) + continue; + unescaped_size = vc1_unescape_buffer(start + 4, size, unescaped); + init_get_bits(&gb, unescaped, 8 * unescaped_size); + if (AV_RB32(start) == VC1_CODE_SEQHDR) { + int profile = get_bits(&gb, 2); + if (profile != PROFILE_ADVANCED) { + av_free(unescaped); + return AVERROR(ENOSYS); + } + seq_found = 1; + level = get_bits(&gb, 3); + /* chromaformat, frmrtq_postproc, bitrtq_postproc, postprocflag, + * width, height */ + skip_bits_long(&gb, 2 + 3 + 5 + 1 + 2*12); + skip_bits(&gb, 1); /* broadcast */ + interlace = get_bits1(&gb); + skip_bits(&gb, 4); /* tfcntrflag, finterpflag, reserved, psf */ + } + } + if (!seq_found) { + av_free(unescaped); + return AVERROR(ENOSYS); + } + + init_put_bits(&pbc, buf, 7); + /* VC1DecSpecStruc */ + put_bits(&pbc, 4, 12); /* profile - advanced */ + put_bits(&pbc, 3, level); + put_bits(&pbc, 1, 0); /* reserved */ + /* VC1AdvDecSpecStruc */ + put_bits(&pbc, 3, level); + put_bits(&pbc, 1, 0); /* cbr */ + put_bits(&pbc, 6, 0); /* reserved */ + put_bits(&pbc, 1, !interlace); /* no interlace */ + put_bits(&pbc, 1, !packet_seq); /* no multiple seq */ + put_bits(&pbc, 1, !packet_entry); /* no multiple entry */ + put_bits(&pbc, 1, !slices); /* no slice code */ + put_bits(&pbc, 1, 0); /* no bframe */ + put_bits(&pbc, 1, 0); /* reserved */ + + /* framerate */ + if (track->st->avg_frame_rate.num > 0 && track->st->avg_frame_rate.den > 0) + put_bits32(&pbc, track->st->avg_frame_rate.num / track->st->avg_frame_rate.den); + else + put_bits32(&pbc, 0xffffffff); + + flush_put_bits(&pbc); + + av_free(unescaped); + + return 0; +} + +static int mov_write_dvc1_tag(AVIOContext *pb, MOVTrack *track) +{ + uint8_t buf[7] = { 0 }; + int ret; + + if ((ret = mov_write_dvc1_structs(track, buf)) < 0) + return ret; + + avio_wb32(pb, track->vos_len + 8 + sizeof(buf)); + ffio_wfourcc(pb, "dvc1"); + avio_write(pb, buf, sizeof(buf)); + avio_write(pb, track->vos_data, track->vos_len); + + return 0; +} + +static int mov_write_glbl_tag(AVIOContext *pb, MOVTrack *track) +{ + avio_wb32(pb, track->vos_len + 8); + ffio_wfourcc(pb, "glbl"); + avio_write(pb, track->vos_data, track->vos_len); + return 8 + track->vos_len; +} + +/** + * Compute flags for 'lpcm' tag. + * See CoreAudioTypes and AudioStreamBasicDescription at Apple. + */ +static int mov_get_lpcm_flags(enum AVCodecID codec_id) +{ + switch (codec_id) { + case AV_CODEC_ID_PCM_F32BE: + case AV_CODEC_ID_PCM_F64BE: + return 11; + case AV_CODEC_ID_PCM_F32LE: + case AV_CODEC_ID_PCM_F64LE: + return 9; + case AV_CODEC_ID_PCM_U8: + return 10; + case AV_CODEC_ID_PCM_S16BE: + case AV_CODEC_ID_PCM_S24BE: + case AV_CODEC_ID_PCM_S32BE: + return 14; + case AV_CODEC_ID_PCM_S8: + case AV_CODEC_ID_PCM_S16LE: + case AV_CODEC_ID_PCM_S24LE: + case AV_CODEC_ID_PCM_S32LE: + return 12; + default: + return 0; + } +} + +static int get_cluster_duration(MOVTrack *track, int cluster_idx) +{ + int64_t next_dts; + + if (cluster_idx >= track->entry) + return 0; + + if (cluster_idx + 1 == track->entry) + next_dts = track->track_duration + track->start_dts; + else + next_dts = track->cluster[cluster_idx + 1].dts; + + next_dts -= track->cluster[cluster_idx].dts; + + av_assert0(next_dts >= 0); + av_assert0(next_dts <= INT_MAX); + + return next_dts; +} + +static int get_samples_per_packet(MOVTrack *track) +{ + int i, first_duration; + +// return track->par->frame_size; + + /* use 1 for raw PCM */ + if (!track->audio_vbr) + return 1; + + /* check to see if duration is constant for all clusters */ + if (!track->entry) + return 0; + first_duration = get_cluster_duration(track, 0); + for (i = 1; i < track->entry; i++) { + if (get_cluster_duration(track, i) != first_duration) + return 0; + } + return first_duration; +} + +static int mov_write_audio_tag(AVFormatContext *s, AVIOContext *pb, MOVMuxContext *mov, MOVTrack *track) +{ + int64_t pos = avio_tell(pb); + int version = 0; + uint32_t tag = track->tag; + + if (track->mode == MODE_MOV) { + if (track->timescale > UINT16_MAX) { + if (mov_get_lpcm_flags(track->par->codec_id)) + tag = AV_RL32("lpcm"); + version = 2; + } else if (track->audio_vbr || mov_pcm_le_gt16(track->par->codec_id) || + mov_pcm_be_gt16(track->par->codec_id) || + track->par->codec_id == AV_CODEC_ID_ADPCM_MS || + track->par->codec_id == AV_CODEC_ID_ADPCM_IMA_WAV || + track->par->codec_id == AV_CODEC_ID_QDM2) { + version = 1; + } + } + + avio_wb32(pb, 0); /* size */ + if (mov->encryption_scheme != MOV_ENC_NONE) { + ffio_wfourcc(pb, "enca"); + } else { + avio_wl32(pb, tag); // store it byteswapped + } + avio_wb32(pb, 0); /* Reserved */ + avio_wb16(pb, 0); /* Reserved */ + avio_wb16(pb, 1); /* Data-reference index, XXX == 1 */ + + /* SoundDescription */ + avio_wb16(pb, version); /* Version */ + avio_wb16(pb, 0); /* Revision level */ + avio_wb32(pb, 0); /* Reserved */ + + if (version == 2) { + avio_wb16(pb, 3); + avio_wb16(pb, 16); + avio_wb16(pb, 0xfffe); + avio_wb16(pb, 0); + avio_wb32(pb, 0x00010000); + avio_wb32(pb, 72); + avio_wb64(pb, av_double2int(track->par->sample_rate)); + avio_wb32(pb, track->par->channels); + avio_wb32(pb, 0x7F000000); + avio_wb32(pb, av_get_bits_per_sample(track->par->codec_id)); + avio_wb32(pb, mov_get_lpcm_flags(track->par->codec_id)); + avio_wb32(pb, track->sample_size); + avio_wb32(pb, get_samples_per_packet(track)); + } else { + if (track->mode == MODE_MOV) { + avio_wb16(pb, track->par->channels); + if (track->par->codec_id == AV_CODEC_ID_PCM_U8 || + track->par->codec_id == AV_CODEC_ID_PCM_S8) + avio_wb16(pb, 8); /* bits per sample */ + else if (track->par->codec_id == AV_CODEC_ID_ADPCM_G726) + avio_wb16(pb, track->par->bits_per_coded_sample); + else + avio_wb16(pb, 16); + avio_wb16(pb, track->audio_vbr ? -2 : 0); /* compression ID */ + } else { /* reserved for mp4/3gp */ + avio_wb16(pb, 2); + avio_wb16(pb, 16); + avio_wb16(pb, 0); + } + + avio_wb16(pb, 0); /* packet size (= 0) */ + avio_wb16(pb, track->par->sample_rate <= UINT16_MAX ? + track->par->sample_rate : 0); + avio_wb16(pb, 0); /* Reserved */ + } + + if (version == 1) { /* SoundDescription V1 extended info */ + if (mov_pcm_le_gt16(track->par->codec_id) || + mov_pcm_be_gt16(track->par->codec_id)) + avio_wb32(pb, 1); /* must be 1 for uncompressed formats */ + else + avio_wb32(pb, track->par->frame_size); /* Samples per packet */ + avio_wb32(pb, track->sample_size / track->par->channels); /* Bytes per packet */ + avio_wb32(pb, track->sample_size); /* Bytes per frame */ + avio_wb32(pb, 2); /* Bytes per sample */ + } + + if (track->mode == MODE_MOV && + (track->par->codec_id == AV_CODEC_ID_AAC || + track->par->codec_id == AV_CODEC_ID_AC3 || + track->par->codec_id == AV_CODEC_ID_EAC3 || + track->par->codec_id == AV_CODEC_ID_AMR_NB || + track->par->codec_id == AV_CODEC_ID_ALAC || + track->par->codec_id == AV_CODEC_ID_ADPCM_MS || + track->par->codec_id == AV_CODEC_ID_ADPCM_IMA_WAV || + track->par->codec_id == AV_CODEC_ID_QDM2 || + (mov_pcm_le_gt16(track->par->codec_id) && version==1) || + (mov_pcm_be_gt16(track->par->codec_id) && version==1))) + mov_write_wave_tag(s, pb, track); + else if (track->tag == MKTAG('m','p','4','a')) + mov_write_esds_tag(pb, track); + else if (track->par->codec_id == AV_CODEC_ID_AMR_NB) + mov_write_amr_tag(pb, track); + else if (track->par->codec_id == AV_CODEC_ID_AC3) + mov_write_ac3_tag(pb, track); + else if (track->par->codec_id == AV_CODEC_ID_EAC3) + mov_write_eac3_tag(pb, track); + else if (track->par->codec_id == AV_CODEC_ID_ALAC) + mov_write_extradata_tag(pb, track); + else if (track->par->codec_id == AV_CODEC_ID_WMAPRO) + mov_write_wfex_tag(s, pb, track); + else if (track->vos_len > 0) + mov_write_glbl_tag(pb, track); + + if (track->mode == MODE_MOV && track->par->codec_type == AVMEDIA_TYPE_AUDIO) + mov_write_chan_tag(s, pb, track); + + if (mov->encryption_scheme != MOV_ENC_NONE) { + ff_mov_cenc_write_sinf_tag(track, pb, mov->encryption_kid); + } + + return update_size(pb, pos); +} + +static int mov_write_d263_tag(AVIOContext *pb) +{ + avio_wb32(pb, 0xf); /* size */ + ffio_wfourcc(pb, "d263"); + ffio_wfourcc(pb, "FFMP"); + avio_w8(pb, 0); /* decoder version */ + /* FIXME use AVCodecContext level/profile, when encoder will set values */ + avio_w8(pb, 0xa); /* level */ + avio_w8(pb, 0); /* profile */ + return 0xf; +} + +static int mov_write_avcc_tag(AVIOContext *pb, MOVTrack *track) +{ + int64_t pos = avio_tell(pb); + + avio_wb32(pb, 0); + ffio_wfourcc(pb, "avcC"); + ff_isom_write_avcc(pb, track->vos_data, track->vos_len); + return update_size(pb, pos); +} + +static int mov_write_vpcc_tag(AVFormatContext *s, AVIOContext *pb, MOVTrack *track) +{ + int64_t pos = avio_tell(pb); + + avio_wb32(pb, 0); + ffio_wfourcc(pb, "vpcC"); + avio_wb32(pb, 0); /* version & flags */ + ff_isom_write_vpcc(s, pb, track->par); + return update_size(pb, pos); +} + +static int mov_write_hvcc_tag(AVIOContext *pb, MOVTrack *track) +{ + int64_t pos = avio_tell(pb); + + avio_wb32(pb, 0); + ffio_wfourcc(pb, "hvcC"); + ff_isom_write_hvcc(pb, track->vos_data, track->vos_len, 0); + return update_size(pb, pos); +} + +/* also used by all avid codecs (dv, imx, meridien) and their variants */ +static int mov_write_avid_tag(AVIOContext *pb, MOVTrack *track) +{ + int i; + int interlaced; + int cid; + int display_width = track->par->width; + + if (track->vos_data && track->vos_len > 0x29) { + if (ff_dnxhd_parse_header_prefix(track->vos_data) != 0) { + /* looks like a DNxHD bit stream */ + interlaced = (track->vos_data[5] & 2); + cid = AV_RB32(track->vos_data + 0x28); + } else { + av_log(NULL, AV_LOG_WARNING, "Could not locate DNxHD bit stream in vos_data\n"); + return 0; + } + } else { + av_log(NULL, AV_LOG_WARNING, "Could not locate DNxHD bit stream, vos_data too small\n"); + return 0; + } + + avio_wb32(pb, 24); /* size */ + ffio_wfourcc(pb, "ACLR"); + ffio_wfourcc(pb, "ACLR"); + ffio_wfourcc(pb, "0001"); + if (track->par->color_range == AVCOL_RANGE_MPEG || /* Legal range (16-235) */ + track->par->color_range == AVCOL_RANGE_UNSPECIFIED) { + avio_wb32(pb, 1); /* Corresponds to 709 in official encoder */ + } else { /* Full range (0-255) */ + avio_wb32(pb, 2); /* Corresponds to RGB in official encoder */ + } + avio_wb32(pb, 0); /* unknown */ + + if (track->tag == MKTAG('A','V','d','h')) { + avio_wb32(pb, 32); + ffio_wfourcc(pb, "ADHR"); + ffio_wfourcc(pb, "0001"); + avio_wb32(pb, cid); + avio_wb32(pb, 0); /* unknown */ + avio_wb32(pb, 1); /* unknown */ + avio_wb32(pb, 0); /* unknown */ + avio_wb32(pb, 0); /* unknown */ + return 0; + } + + avio_wb32(pb, 24); /* size */ + ffio_wfourcc(pb, "APRG"); + ffio_wfourcc(pb, "APRG"); + ffio_wfourcc(pb, "0001"); + avio_wb32(pb, 1); /* unknown */ + avio_wb32(pb, 0); /* unknown */ + + avio_wb32(pb, 120); /* size */ + ffio_wfourcc(pb, "ARES"); + ffio_wfourcc(pb, "ARES"); + ffio_wfourcc(pb, "0001"); + avio_wb32(pb, cid); /* dnxhd cid, some id ? */ + if ( track->par->sample_aspect_ratio.num > 0 + && track->par->sample_aspect_ratio.den > 0) + display_width = display_width * track->par->sample_aspect_ratio.num / track->par->sample_aspect_ratio.den; + avio_wb32(pb, display_width); + /* values below are based on samples created with quicktime and avid codecs */ + if (interlaced) { + avio_wb32(pb, track->par->height / 2); + avio_wb32(pb, 2); /* unknown */ + avio_wb32(pb, 0); /* unknown */ + avio_wb32(pb, 4); /* unknown */ + } else { + avio_wb32(pb, track->par->height); + avio_wb32(pb, 1); /* unknown */ + avio_wb32(pb, 0); /* unknown */ + if (track->par->height == 1080) + avio_wb32(pb, 5); /* unknown */ + else + avio_wb32(pb, 6); /* unknown */ + } + /* padding */ + for (i = 0; i < 10; i++) + avio_wb64(pb, 0); + + return 0; +} + +static int mov_write_dpxe_tag(AVIOContext *pb, MOVTrack *track) +{ + avio_wb32(pb, 12); + ffio_wfourcc(pb, "DpxE"); + if (track->par->extradata_size >= 12 && + !memcmp(&track->par->extradata[4], "DpxE", 4)) { + avio_wb32(pb, track->par->extradata[11]); + } else { + avio_wb32(pb, 1); + } + return 0; +} + +static int mp4_get_codec_tag(AVFormatContext *s, MOVTrack *track) +{ + int tag = track->par->codec_tag; + + if (!ff_codec_get_tag(ff_mp4_obj_type, track->par->codec_id)) + return 0; + + if (track->par->codec_id == AV_CODEC_ID_H264) tag = MKTAG('a','v','c','1'); + else if (track->par->codec_id == AV_CODEC_ID_HEVC) tag = MKTAG('h','e','v','1'); + else if (track->par->codec_id == AV_CODEC_ID_VP9) tag = MKTAG('v','p','0','9'); + else if (track->par->codec_id == AV_CODEC_ID_AC3) tag = MKTAG('a','c','-','3'); + else if (track->par->codec_id == AV_CODEC_ID_EAC3) tag = MKTAG('e','c','-','3'); + else if (track->par->codec_id == AV_CODEC_ID_DIRAC) tag = MKTAG('d','r','a','c'); + else if (track->par->codec_id == AV_CODEC_ID_MOV_TEXT) tag = MKTAG('t','x','3','g'); + else if (track->par->codec_id == AV_CODEC_ID_VC1) tag = MKTAG('v','c','-','1'); + else if (track->par->codec_type == AVMEDIA_TYPE_VIDEO) tag = MKTAG('m','p','4','v'); + else if (track->par->codec_type == AVMEDIA_TYPE_AUDIO) tag = MKTAG('m','p','4','a'); + else if (track->par->codec_id == AV_CODEC_ID_DVD_SUBTITLE) tag = MKTAG('m','p','4','s'); + + return tag; +} + +static const AVCodecTag codec_ipod_tags[] = { + { AV_CODEC_ID_H264, MKTAG('a','v','c','1') }, + { AV_CODEC_ID_MPEG4, MKTAG('m','p','4','v') }, + { AV_CODEC_ID_AAC, MKTAG('m','p','4','a') }, + { AV_CODEC_ID_ALAC, MKTAG('a','l','a','c') }, + { AV_CODEC_ID_AC3, MKTAG('a','c','-','3') }, + { AV_CODEC_ID_MOV_TEXT, MKTAG('t','x','3','g') }, + { AV_CODEC_ID_MOV_TEXT, MKTAG('t','e','x','t') }, + { AV_CODEC_ID_NONE, 0 }, +}; + +static int ipod_get_codec_tag(AVFormatContext *s, MOVTrack *track) +{ + int tag = track->par->codec_tag; + + // keep original tag for subs, ipod supports both formats + if (!(track->par->codec_type == AVMEDIA_TYPE_SUBTITLE && + (tag == MKTAG('t', 'x', '3', 'g') || + tag == MKTAG('t', 'e', 'x', 't')))) + tag = ff_codec_get_tag(codec_ipod_tags, track->par->codec_id); + + if (!av_match_ext(s->filename, "m4a") && + !av_match_ext(s->filename, "m4b") && + !av_match_ext(s->filename, "m4v")) + av_log(s, AV_LOG_WARNING, "Warning, extension is not .m4a, .m4v nor .m4b " + "Quicktime/Ipod might not play the file\n"); + + return tag; +} + +static int mov_get_dv_codec_tag(AVFormatContext *s, MOVTrack *track) +{ + int tag; + + if (track->par->width == 720) { /* SD */ + if (track->par->height == 480) { /* NTSC */ + if (track->par->format == AV_PIX_FMT_YUV422P) tag = MKTAG('d','v','5','n'); + else tag = MKTAG('d','v','c',' '); + }else if (track->par->format == AV_PIX_FMT_YUV422P) tag = MKTAG('d','v','5','p'); + else if (track->par->format == AV_PIX_FMT_YUV420P) tag = MKTAG('d','v','c','p'); + else tag = MKTAG('d','v','p','p'); + } else if (track->par->height == 720) { /* HD 720 line */ + if (track->st->time_base.den == 50) tag = MKTAG('d','v','h','q'); + else tag = MKTAG('d','v','h','p'); + } else if (track->par->height == 1080) { /* HD 1080 line */ + if (track->st->time_base.den == 25) tag = MKTAG('d','v','h','5'); + else tag = MKTAG('d','v','h','6'); + } else { + av_log(s, AV_LOG_ERROR, "unsupported height for dv codec\n"); + return 0; + } + + return tag; +} + +static AVRational find_fps(AVFormatContext *s, AVStream *st) +{ + AVRational rate = st->avg_frame_rate; + +#if FF_API_LAVF_AVCTX + FF_DISABLE_DEPRECATION_WARNINGS + rate = av_inv_q(st->codec->time_base); + if (av_timecode_check_frame_rate(rate) < 0) { + av_log(s, AV_LOG_DEBUG, "timecode: tbc=%d/%d invalid, fallback on %d/%d\n", + rate.num, rate.den, st->avg_frame_rate.num, st->avg_frame_rate.den); + rate = st->avg_frame_rate; + } + FF_ENABLE_DEPRECATION_WARNINGS +#endif + + return rate; +} + +static int mov_get_mpeg2_xdcam_codec_tag(AVFormatContext *s, MOVTrack *track) +{ + int tag = track->par->codec_tag; + int interlaced = track->par->field_order > AV_FIELD_PROGRESSIVE; + AVStream *st = track->st; + int rate = av_q2d(find_fps(s, st)); + + if (!tag) + tag = MKTAG('m', '2', 'v', '1'); //fallback tag + + if (track->par->format == AV_PIX_FMT_YUV420P) { + if (track->par->width == 1280 && track->par->height == 720) { + if (!interlaced) { + if (rate == 24) tag = MKTAG('x','d','v','4'); + else if (rate == 25) tag = MKTAG('x','d','v','5'); + else if (rate == 30) tag = MKTAG('x','d','v','1'); + else if (rate == 50) tag = MKTAG('x','d','v','a'); + else if (rate == 60) tag = MKTAG('x','d','v','9'); + } + } else if (track->par->width == 1440 && track->par->height == 1080) { + if (!interlaced) { + if (rate == 24) tag = MKTAG('x','d','v','6'); + else if (rate == 25) tag = MKTAG('x','d','v','7'); + else if (rate == 30) tag = MKTAG('x','d','v','8'); + } else { + if (rate == 25) tag = MKTAG('x','d','v','3'); + else if (rate == 30) tag = MKTAG('x','d','v','2'); + } + } else if (track->par->width == 1920 && track->par->height == 1080) { + if (!interlaced) { + if (rate == 24) tag = MKTAG('x','d','v','d'); + else if (rate == 25) tag = MKTAG('x','d','v','e'); + else if (rate == 30) tag = MKTAG('x','d','v','f'); + } else { + if (rate == 25) tag = MKTAG('x','d','v','c'); + else if (rate == 30) tag = MKTAG('x','d','v','b'); + } + } + } else if (track->par->format == AV_PIX_FMT_YUV422P) { + if (track->par->width == 1280 && track->par->height == 720) { + if (!interlaced) { + if (rate == 24) tag = MKTAG('x','d','5','4'); + else if (rate == 25) tag = MKTAG('x','d','5','5'); + else if (rate == 30) tag = MKTAG('x','d','5','1'); + else if (rate == 50) tag = MKTAG('x','d','5','a'); + else if (rate == 60) tag = MKTAG('x','d','5','9'); + } + } else if (track->par->width == 1920 && track->par->height == 1080) { + if (!interlaced) { + if (rate == 24) tag = MKTAG('x','d','5','d'); + else if (rate == 25) tag = MKTAG('x','d','5','e'); + else if (rate == 30) tag = MKTAG('x','d','5','f'); + } else { + if (rate == 25) tag = MKTAG('x','d','5','c'); + else if (rate == 30) tag = MKTAG('x','d','5','b'); + } + } + } + + return tag; +} + +static int mov_get_h264_codec_tag(AVFormatContext *s, MOVTrack *track) +{ + int tag = track->par->codec_tag; + int interlaced = track->par->field_order > AV_FIELD_PROGRESSIVE; + AVStream *st = track->st; + int rate = av_q2d(find_fps(s, st)); + + if (!tag) + tag = MKTAG('a', 'v', 'c', 'i'); //fallback tag + + if (track->par->format == AV_PIX_FMT_YUV420P10) { + if (track->par->width == 960 && track->par->height == 720) { + if (!interlaced) { + if (rate == 24) tag = MKTAG('a','i','5','p'); + else if (rate == 25) tag = MKTAG('a','i','5','q'); + else if (rate == 30) tag = MKTAG('a','i','5','p'); + else if (rate == 50) tag = MKTAG('a','i','5','q'); + else if (rate == 60) tag = MKTAG('a','i','5','p'); + } + } else if (track->par->width == 1440 && track->par->height == 1080) { + if (!interlaced) { + if (rate == 24) tag = MKTAG('a','i','5','3'); + else if (rate == 25) tag = MKTAG('a','i','5','2'); + else if (rate == 30) tag = MKTAG('a','i','5','3'); + } else { + if (rate == 50) tag = MKTAG('a','i','5','5'); + else if (rate == 60) tag = MKTAG('a','i','5','6'); + } + } + } else if (track->par->format == AV_PIX_FMT_YUV422P10) { + if (track->par->width == 1280 && track->par->height == 720) { + if (!interlaced) { + if (rate == 24) tag = MKTAG('a','i','1','p'); + else if (rate == 25) tag = MKTAG('a','i','1','q'); + else if (rate == 30) tag = MKTAG('a','i','1','p'); + else if (rate == 50) tag = MKTAG('a','i','1','q'); + else if (rate == 60) tag = MKTAG('a','i','1','p'); + } + } else if (track->par->width == 1920 && track->par->height == 1080) { + if (!interlaced) { + if (rate == 24) tag = MKTAG('a','i','1','3'); + else if (rate == 25) tag = MKTAG('a','i','1','2'); + else if (rate == 30) tag = MKTAG('a','i','1','3'); + } else { + if (rate == 25) tag = MKTAG('a','i','1','5'); + else if (rate == 50) tag = MKTAG('a','i','1','5'); + else if (rate == 60) tag = MKTAG('a','i','1','6'); + } + } else if ( track->par->width == 4096 && track->par->height == 2160 + || track->par->width == 3840 && track->par->height == 2160 + || track->par->width == 2048 && track->par->height == 1080) { + tag = MKTAG('a','i','v','x'); + } + } + + return tag; +} + +static const struct { + enum AVPixelFormat pix_fmt; + uint32_t tag; + unsigned bps; +} mov_pix_fmt_tags[] = { + { AV_PIX_FMT_YUYV422, MKTAG('y','u','v','2'), 0 }, + { AV_PIX_FMT_YUYV422, MKTAG('y','u','v','s'), 0 }, + { AV_PIX_FMT_UYVY422, MKTAG('2','v','u','y'), 0 }, + { AV_PIX_FMT_RGB555BE,MKTAG('r','a','w',' '), 16 }, + { AV_PIX_FMT_RGB555LE,MKTAG('L','5','5','5'), 16 }, + { AV_PIX_FMT_RGB565LE,MKTAG('L','5','6','5'), 16 }, + { AV_PIX_FMT_RGB565BE,MKTAG('B','5','6','5'), 16 }, + { AV_PIX_FMT_GRAY16BE,MKTAG('b','1','6','g'), 16 }, + { AV_PIX_FMT_RGB24, MKTAG('r','a','w',' '), 24 }, + { AV_PIX_FMT_BGR24, MKTAG('2','4','B','G'), 24 }, + { AV_PIX_FMT_ARGB, MKTAG('r','a','w',' '), 32 }, + { AV_PIX_FMT_BGRA, MKTAG('B','G','R','A'), 32 }, + { AV_PIX_FMT_RGBA, MKTAG('R','G','B','A'), 32 }, + { AV_PIX_FMT_ABGR, MKTAG('A','B','G','R'), 32 }, + { AV_PIX_FMT_RGB48BE, MKTAG('b','4','8','r'), 48 }, +}; + +static int mov_get_dnxhd_codec_tag(AVFormatContext *s, MOVTrack *track) +{ + int tag = MKTAG('A','V','d','n'); + if (track->par->profile != FF_PROFILE_UNKNOWN && + track->par->profile != FF_PROFILE_DNXHD) + tag = MKTAG('A','V','d','h'); + return tag; +} + +static int mov_get_rawvideo_codec_tag(AVFormatContext *s, MOVTrack *track) +{ + int tag = track->par->codec_tag; + int i; + enum AVPixelFormat pix_fmt; + + for (i = 0; i < FF_ARRAY_ELEMS(mov_pix_fmt_tags); i++) { + if (track->par->format == mov_pix_fmt_tags[i].pix_fmt) { + tag = mov_pix_fmt_tags[i].tag; + track->par->bits_per_coded_sample = mov_pix_fmt_tags[i].bps; + if (track->par->codec_tag == mov_pix_fmt_tags[i].tag) + break; + } + } + + pix_fmt = avpriv_find_pix_fmt(avpriv_pix_fmt_bps_mov, + track->par->bits_per_coded_sample); + if (tag == MKTAG('r','a','w',' ') && + track->par->format != pix_fmt && + track->par->format != AV_PIX_FMT_NONE) + av_log(s, AV_LOG_ERROR, "%s rawvideo cannot be written to mov, output file will be unreadable\n", + av_get_pix_fmt_name(track->par->format)); + return tag; +} + +static int mov_get_codec_tag(AVFormatContext *s, MOVTrack *track) +{ + int tag = track->par->codec_tag; + + if (!tag || (s->strict_std_compliance >= FF_COMPLIANCE_NORMAL && + (track->par->codec_id == AV_CODEC_ID_DVVIDEO || + track->par->codec_id == AV_CODEC_ID_RAWVIDEO || + track->par->codec_id == AV_CODEC_ID_H263 || + track->par->codec_id == AV_CODEC_ID_H264 || + track->par->codec_id == AV_CODEC_ID_DNXHD || + track->par->codec_id == AV_CODEC_ID_MPEG2VIDEO || + av_get_bits_per_sample(track->par->codec_id)))) { // pcm audio + if (track->par->codec_id == AV_CODEC_ID_DVVIDEO) + tag = mov_get_dv_codec_tag(s, track); + else if (track->par->codec_id == AV_CODEC_ID_RAWVIDEO) + tag = mov_get_rawvideo_codec_tag(s, track); + else if (track->par->codec_id == AV_CODEC_ID_MPEG2VIDEO) + tag = mov_get_mpeg2_xdcam_codec_tag(s, track); + else if (track->par->codec_id == AV_CODEC_ID_H264) + tag = mov_get_h264_codec_tag(s, track); + else if (track->par->codec_id == AV_CODEC_ID_DNXHD) + tag = mov_get_dnxhd_codec_tag(s, track); + else if (track->par->codec_type == AVMEDIA_TYPE_VIDEO) { + tag = ff_codec_get_tag(ff_codec_movvideo_tags, track->par->codec_id); + if (!tag) { // if no mac fcc found, try with Microsoft tags + tag = ff_codec_get_tag(ff_codec_bmp_tags, track->par->codec_id); + if (tag) + av_log(s, AV_LOG_WARNING, "Using MS style video codec tag, " + "the file may be unplayable!\n"); + } + } else if (track->par->codec_type == AVMEDIA_TYPE_AUDIO) { + tag = ff_codec_get_tag(ff_codec_movaudio_tags, track->par->codec_id); + if (!tag) { // if no mac fcc found, try with Microsoft tags + int ms_tag = ff_codec_get_tag(ff_codec_wav_tags, track->par->codec_id); + if (ms_tag) { + tag = MKTAG('m', 's', ((ms_tag >> 8) & 0xff), (ms_tag & 0xff)); + av_log(s, AV_LOG_WARNING, "Using MS style audio codec tag, " + "the file may be unplayable!\n"); + } + } + } else if (track->par->codec_type == AVMEDIA_TYPE_SUBTITLE) + tag = ff_codec_get_tag(ff_codec_movsubtitle_tags, track->par->codec_id); + } + + return tag; +} + +static const AVCodecTag codec_3gp_tags[] = { + { AV_CODEC_ID_H263, MKTAG('s','2','6','3') }, + { AV_CODEC_ID_H264, MKTAG('a','v','c','1') }, + { AV_CODEC_ID_MPEG4, MKTAG('m','p','4','v') }, + { AV_CODEC_ID_AAC, MKTAG('m','p','4','a') }, + { AV_CODEC_ID_AMR_NB, MKTAG('s','a','m','r') }, + { AV_CODEC_ID_AMR_WB, MKTAG('s','a','w','b') }, + { AV_CODEC_ID_MOV_TEXT, MKTAG('t','x','3','g') }, + { AV_CODEC_ID_NONE, 0 }, +}; + +static const AVCodecTag codec_f4v_tags[] = { // XXX: add GIF/PNG/JPEG? + { AV_CODEC_ID_MP3, MKTAG('.','m','p','3') }, + { AV_CODEC_ID_AAC, MKTAG('m','p','4','a') }, + { AV_CODEC_ID_H264, MKTAG('a','v','c','1') }, + { AV_CODEC_ID_VP6A, MKTAG('V','P','6','A') }, + { AV_CODEC_ID_VP6F, MKTAG('V','P','6','F') }, + { AV_CODEC_ID_NONE, 0 }, +}; + +static int mov_find_codec_tag(AVFormatContext *s, MOVTrack *track) +{ + int tag; + + if (track->mode == MODE_MP4 || track->mode == MODE_PSP) + tag = mp4_get_codec_tag(s, track); + else if (track->mode == MODE_ISM) { + tag = mp4_get_codec_tag(s, track); + if (!tag && track->par->codec_id == AV_CODEC_ID_WMAPRO) + tag = MKTAG('w', 'm', 'a', ' '); + } else if (track->mode == MODE_IPOD) + tag = ipod_get_codec_tag(s, track); + else if (track->mode & MODE_3GP) + tag = ff_codec_get_tag(codec_3gp_tags, track->par->codec_id); + else if (track->mode == MODE_F4V) + tag = ff_codec_get_tag(codec_f4v_tags, track->par->codec_id); + else + tag = mov_get_codec_tag(s, track); + + return tag; +} + +/** Write uuid atom. + * Needed to make file play in iPods running newest firmware + * goes after avcC atom in moov.trak.mdia.minf.stbl.stsd.avc1 + */ +static int mov_write_uuid_tag_ipod(AVIOContext *pb) +{ + avio_wb32(pb, 28); + ffio_wfourcc(pb, "uuid"); + avio_wb32(pb, 0x6b6840f2); + avio_wb32(pb, 0x5f244fc5); + avio_wb32(pb, 0xba39a51b); + avio_wb32(pb, 0xcf0323f3); + avio_wb32(pb, 0x0); + return 28; +} + +static const uint16_t fiel_data[] = { + 0x0000, 0x0100, 0x0201, 0x0206, 0x0209, 0x020e +}; + +static int mov_write_fiel_tag(AVIOContext *pb, MOVTrack *track, int field_order) +{ + unsigned mov_field_order = 0; + if (field_order < FF_ARRAY_ELEMS(fiel_data)) + mov_field_order = fiel_data[field_order]; + else + return 0; + avio_wb32(pb, 10); + ffio_wfourcc(pb, "fiel"); + avio_wb16(pb, mov_field_order); + return 10; +} + +static int mov_write_subtitle_tag(AVIOContext *pb, MOVTrack *track) +{ + int64_t pos = avio_tell(pb); + avio_wb32(pb, 0); /* size */ + avio_wl32(pb, track->tag); // store it byteswapped + avio_wb32(pb, 0); /* Reserved */ + avio_wb16(pb, 0); /* Reserved */ + avio_wb16(pb, 1); /* Data-reference index */ + + if (track->par->codec_id == AV_CODEC_ID_DVD_SUBTITLE) + mov_write_esds_tag(pb, track); + else if (track->par->extradata_size) + avio_write(pb, track->par->extradata, track->par->extradata_size); + + return update_size(pb, pos); +} + +static int mov_write_pasp_tag(AVIOContext *pb, MOVTrack *track) +{ + AVRational sar; + av_reduce(&sar.num, &sar.den, track->par->sample_aspect_ratio.num, + track->par->sample_aspect_ratio.den, INT_MAX); + + avio_wb32(pb, 16); + ffio_wfourcc(pb, "pasp"); + avio_wb32(pb, sar.num); + avio_wb32(pb, sar.den); + return 16; +} + +static int mov_write_gama_tag(AVIOContext *pb, MOVTrack *track, double gamma) +{ + uint32_t gama = 0; + if (gamma <= 0.0) + { + gamma = avpriv_get_gamma_from_trc(track->par->color_trc); + } + av_log(pb, AV_LOG_DEBUG, "gamma value %g\n", gamma); + + if (gamma > 1e-6) { + gama = (uint32_t)lrint((double)(1<<16) * gamma); + av_log(pb, AV_LOG_DEBUG, "writing gama value %d\n", gama); + + av_assert0(track->mode == MODE_MOV); + avio_wb32(pb, 12); + ffio_wfourcc(pb, "gama"); + avio_wb32(pb, gama); + return 12; + } + else { + av_log(pb, AV_LOG_WARNING, "gamma value unknown, unable to write gama atom\n"); + } + return 0; +} + +static int mov_write_colr_tag(AVIOContext *pb, MOVTrack *track) +{ + // Ref (MOV): https://developer.apple.com/library/mac/technotes/tn2162/_index.html#//apple_ref/doc/uid/DTS40013070-CH1-TNTAG9 + // Ref (MP4): ISO/IEC 14496-12:2012 + + if (track->par->color_primaries == AVCOL_PRI_UNSPECIFIED && + track->par->color_trc == AVCOL_TRC_UNSPECIFIED && + track->par->color_space == AVCOL_SPC_UNSPECIFIED) { + if ((track->par->width >= 1920 && track->par->height >= 1080) + || (track->par->width == 1280 && track->par->height == 720)) { + av_log(NULL, AV_LOG_WARNING, "color primaries unspecified, assuming bt709\n"); + track->par->color_primaries = AVCOL_PRI_BT709; + } else if (track->par->width == 720 && track->height == 576) { + av_log(NULL, AV_LOG_WARNING, "color primaries unspecified, assuming bt470bg\n"); + track->par->color_primaries = AVCOL_PRI_BT470BG; + } else if (track->par->width == 720 && + (track->height == 486 || track->height == 480)) { + av_log(NULL, AV_LOG_WARNING, "color primaries unspecified, assuming smpte170\n"); + track->par->color_primaries = AVCOL_PRI_SMPTE170M; + } else { + av_log(NULL, AV_LOG_WARNING, "color primaries unspecified, unable to assume anything\n"); + } + switch (track->par->color_primaries) { + case AVCOL_PRI_BT709: + track->par->color_trc = AVCOL_TRC_BT709; + track->par->color_space = AVCOL_SPC_BT709; + break; + case AVCOL_PRI_SMPTE170M: + case AVCOL_PRI_BT470BG: + track->par->color_trc = AVCOL_TRC_BT709; + track->par->color_space = AVCOL_SPC_SMPTE170M; + break; + } + } + + /* We should only ever be called by MOV or MP4. */ + av_assert0(track->mode == MODE_MOV || track->mode == MODE_MP4); + + avio_wb32(pb, 18 + (track->mode == MODE_MP4)); + ffio_wfourcc(pb, "colr"); + if (track->mode == MODE_MP4) + ffio_wfourcc(pb, "nclx"); + else + ffio_wfourcc(pb, "nclc"); + switch (track->par->color_primaries) { + case AVCOL_PRI_BT709: avio_wb16(pb, 1); break; + case AVCOL_PRI_SMPTE170M: + case AVCOL_PRI_SMPTE240M: avio_wb16(pb, 6); break; + case AVCOL_PRI_BT470BG: avio_wb16(pb, 5); break; + default: avio_wb16(pb, 2); + } + switch (track->par->color_trc) { + case AVCOL_TRC_BT709: avio_wb16(pb, 1); break; + case AVCOL_TRC_SMPTE170M: avio_wb16(pb, 1); break; // remapped + case AVCOL_TRC_SMPTE240M: avio_wb16(pb, 7); break; + default: avio_wb16(pb, 2); + } + switch (track->par->color_space) { + case AVCOL_SPC_BT709: avio_wb16(pb, 1); break; + case AVCOL_SPC_BT470BG: + case AVCOL_SPC_SMPTE170M: avio_wb16(pb, 6); break; + case AVCOL_SPC_SMPTE240M: avio_wb16(pb, 7); break; + default: avio_wb16(pb, 2); + } + + if (track->mode == MODE_MP4) { + int full_range = track->par->color_range == AVCOL_RANGE_JPEG; + avio_w8(pb, full_range << 7); + return 19; + } else { + return 18; + } +} + +static void find_compressor(char * compressor_name, int len, MOVTrack *track) +{ + AVDictionaryEntry *encoder; + int xdcam_res = (track->par->width == 1280 && track->par->height == 720) + || (track->par->width == 1440 && track->par->height == 1080) + || (track->par->width == 1920 && track->par->height == 1080); + + if (track->mode == MODE_MOV && + (encoder = av_dict_get(track->st->metadata, "encoder", NULL, 0))) { + av_strlcpy(compressor_name, encoder->value, 32); + } else if (track->par->codec_id == AV_CODEC_ID_MPEG2VIDEO && xdcam_res) { + int interlaced = track->par->field_order > AV_FIELD_PROGRESSIVE; + AVStream *st = track->st; + int rate = av_q2d(find_fps(NULL, st)); + av_strlcatf(compressor_name, len, "XDCAM"); + if (track->par->format == AV_PIX_FMT_YUV422P) { + av_strlcatf(compressor_name, len, " HD422"); + } else if(track->par->width == 1440) { + av_strlcatf(compressor_name, len, " HD"); + } else + av_strlcatf(compressor_name, len, " EX"); + + av_strlcatf(compressor_name, len, " %d%c", track->par->height, interlaced ? 'i' : 'p'); + + av_strlcatf(compressor_name, len, "%d", rate * (interlaced + 1)); + } +} + +static int mov_write_video_tag(AVIOContext *pb, MOVMuxContext *mov, MOVTrack *track) +{ + int64_t pos = avio_tell(pb); + char compressor_name[32] = { 0 }; + int avid = 0; + + avio_wb32(pb, 0); /* size */ + if (mov->encryption_scheme != MOV_ENC_NONE) { + ffio_wfourcc(pb, "encv"); + } else { + avio_wl32(pb, track->tag); // store it byteswapped + } + avio_wb32(pb, 0); /* Reserved */ + avio_wb16(pb, 0); /* Reserved */ + avio_wb16(pb, 1); /* Data-reference index */ + + avio_wb16(pb, 0); /* Codec stream version */ + avio_wb16(pb, 0); /* Codec stream revision (=0) */ + if (track->mode == MODE_MOV) { + ffio_wfourcc(pb, "FFMP"); /* Vendor */ + if (track->par->codec_id == AV_CODEC_ID_RAWVIDEO) { + avio_wb32(pb, 0); /* Temporal Quality */ + avio_wb32(pb, 0x400); /* Spatial Quality = lossless*/ + } else { + avio_wb32(pb, 0x200); /* Temporal Quality = normal */ + avio_wb32(pb, 0x200); /* Spatial Quality = normal */ + } + } else { + avio_wb32(pb, 0); /* Reserved */ + avio_wb32(pb, 0); /* Reserved */ + avio_wb32(pb, 0); /* Reserved */ + } + avio_wb16(pb, track->par->width); /* Video width */ + avio_wb16(pb, track->height); /* Video height */ + avio_wb32(pb, 0x00480000); /* Horizontal resolution 72dpi */ + avio_wb32(pb, 0x00480000); /* Vertical resolution 72dpi */ + avio_wb32(pb, 0); /* Data size (= 0) */ + avio_wb16(pb, 1); /* Frame count (= 1) */ + + /* FIXME not sure, ISO 14496-1 draft where it shall be set to 0 */ + find_compressor(compressor_name, 32, track); + avio_w8(pb, strlen(compressor_name)); + avio_write(pb, compressor_name, 31); + + if (track->mode == MODE_MOV && track->par->bits_per_coded_sample) + avio_wb16(pb, track->par->bits_per_coded_sample | + (track->par->format == AV_PIX_FMT_GRAY8 ? 0x20 : 0)); + else + avio_wb16(pb, 0x18); /* Reserved */ + + if (track->mode == MODE_MOV && track->par->format == AV_PIX_FMT_PAL8) { + int pal_size = 1 << track->par->bits_per_coded_sample; + int i; + avio_wb16(pb, 0); /* Color table ID */ + avio_wb32(pb, 0); /* Color table seed */ + avio_wb16(pb, 0x8000); /* Color table flags */ + avio_wb16(pb, pal_size - 1); /* Color table size (zero-relative) */ + for (i = 0; i < pal_size; i++) { + uint32_t rgb = track->palette[i]; + uint16_t r = (rgb >> 16) & 0xff; + uint16_t g = (rgb >> 8) & 0xff; + uint16_t b = rgb & 0xff; + avio_wb16(pb, 0); + avio_wb16(pb, (r << 8) | r); + avio_wb16(pb, (g << 8) | g); + avio_wb16(pb, (b << 8) | b); + } + } else + avio_wb16(pb, 0xffff); /* Reserved */ + + if (track->tag == MKTAG('m','p','4','v')) + mov_write_esds_tag(pb, track); + else if (track->par->codec_id == AV_CODEC_ID_H263) + mov_write_d263_tag(pb); + else if (track->par->codec_id == AV_CODEC_ID_AVUI || + track->par->codec_id == AV_CODEC_ID_SVQ3) { + mov_write_extradata_tag(pb, track); + avio_wb32(pb, 0); + } else if (track->par->codec_id == AV_CODEC_ID_DNXHD) { + mov_write_avid_tag(pb, track); + avid = 1; + } else if (track->par->codec_id == AV_CODEC_ID_HEVC) + mov_write_hvcc_tag(pb, track); + else if (track->par->codec_id == AV_CODEC_ID_H264 && !TAG_IS_AVCI(track->tag)) { + mov_write_avcc_tag(pb, track); + if (track->mode == MODE_IPOD) + mov_write_uuid_tag_ipod(pb); + } else if (track->par->codec_id == AV_CODEC_ID_VP9) { + mov_write_vpcc_tag(mov->fc, pb, track); + } else if (track->par->codec_id == AV_CODEC_ID_VC1 && track->vos_len > 0) + mov_write_dvc1_tag(pb, track); + else if (track->par->codec_id == AV_CODEC_ID_VP6F || + track->par->codec_id == AV_CODEC_ID_VP6A) { + /* Don't write any potential extradata here - the cropping + * is signalled via the normal width/height fields. */ + } else if (track->par->codec_id == AV_CODEC_ID_R10K) { + if (track->par->codec_tag == MKTAG('R','1','0','k')) + mov_write_dpxe_tag(pb, track); + } else if (track->vos_len > 0) + mov_write_glbl_tag(pb, track); + + if (track->par->codec_id != AV_CODEC_ID_H264 && + track->par->codec_id != AV_CODEC_ID_MPEG4 && + track->par->codec_id != AV_CODEC_ID_DNXHD) { + int field_order = track->par->field_order; + +#if FF_API_LAVF_AVCTX + FF_DISABLE_DEPRECATION_WARNINGS + if (field_order != track->st->codec->field_order && track->st->codec->field_order != AV_FIELD_UNKNOWN) + field_order = track->st->codec->field_order; + FF_ENABLE_DEPRECATION_WARNINGS +#endif + + if (field_order != AV_FIELD_UNKNOWN) + mov_write_fiel_tag(pb, track, field_order); + } + + if (mov->flags & FF_MOV_FLAG_WRITE_GAMA) { + if (track->mode == MODE_MOV) + mov_write_gama_tag(pb, track, mov->gamma); + else + av_log(mov->fc, AV_LOG_WARNING, "Not writing 'gama' atom. Format is not MOV.\n"); + } + if (mov->flags & FF_MOV_FLAG_WRITE_COLR) { + if (track->mode == MODE_MOV || track->mode == MODE_MP4) + mov_write_colr_tag(pb, track); + else + av_log(mov->fc, AV_LOG_WARNING, "Not writing 'colr' atom. Format is not MOV or MP4.\n"); + } + + if (track->par->sample_aspect_ratio.den && track->par->sample_aspect_ratio.num) { + mov_write_pasp_tag(pb, track); + } + + if (mov->encryption_scheme != MOV_ENC_NONE) { + ff_mov_cenc_write_sinf_tag(track, pb, mov->encryption_kid); + } + + /* extra padding for avid stsd */ + /* https://developer.apple.com/library/mac/documentation/QuickTime/QTFF/QTFFChap2/qtff2.html#//apple_ref/doc/uid/TP40000939-CH204-61112 */ + if (avid) + avio_wb32(pb, 0); + + return update_size(pb, pos); +} + +static int mov_write_rtp_tag(AVIOContext *pb, MOVTrack *track) +{ + int64_t pos = avio_tell(pb); + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "rtp "); + avio_wb32(pb, 0); /* Reserved */ + avio_wb16(pb, 0); /* Reserved */ + avio_wb16(pb, 1); /* Data-reference index */ + + avio_wb16(pb, 1); /* Hint track version */ + avio_wb16(pb, 1); /* Highest compatible version */ + avio_wb32(pb, track->max_packet_size); /* Max packet size */ + + avio_wb32(pb, 12); /* size */ + ffio_wfourcc(pb, "tims"); + avio_wb32(pb, track->timescale); + + return update_size(pb, pos); +} + +static int mov_write_source_reference_tag(AVIOContext *pb, MOVTrack *track, const char *reel_name) +{ + uint64_t str_size =strlen(reel_name); + int64_t pos = avio_tell(pb); + + if (str_size >= UINT16_MAX){ + av_log(NULL, AV_LOG_ERROR, "reel_name length %"PRIu64" is too large\n", str_size); + avio_wb16(pb, 0); + return AVERROR(EINVAL); + } + + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "name"); /* Data format */ + avio_wb16(pb, str_size); /* string size */ + avio_wb16(pb, track->language); /* langcode */ + avio_write(pb, reel_name, str_size); /* reel name */ + return update_size(pb,pos); +} + +static int mov_write_tmcd_tag(AVIOContext *pb, MOVTrack *track) +{ + int64_t pos = avio_tell(pb); +#if 1 + int frame_duration; + int nb_frames; + AVDictionaryEntry *t = NULL; + + if (!track->st->avg_frame_rate.num || !track->st->avg_frame_rate.den) { +#if FF_API_LAVF_AVCTX + FF_DISABLE_DEPRECATION_WARNINGS + frame_duration = av_rescale(track->timescale, track->st->codec->time_base.num, track->st->codec->time_base.den); + nb_frames = ROUNDED_DIV(track->st->codec->time_base.den, track->st->codec->time_base.num); + FF_ENABLE_DEPRECATION_WARNINGS +#else + av_log(NULL, AV_LOG_ERROR, "avg_frame_rate not set for tmcd track.\n"); + return AVERROR(EINVAL); +#endif + } else { + frame_duration = av_rescale(track->timescale, track->st->avg_frame_rate.num, track->st->avg_frame_rate.den); + nb_frames = ROUNDED_DIV(track->st->avg_frame_rate.den, track->st->avg_frame_rate.num); + } + + if (nb_frames > 255) { + av_log(NULL, AV_LOG_ERROR, "fps %d is too large\n", nb_frames); + return AVERROR(EINVAL); + } + + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "tmcd"); /* Data format */ + avio_wb32(pb, 0); /* Reserved */ + avio_wb32(pb, 1); /* Data reference index */ + avio_wb32(pb, 0); /* Flags */ + avio_wb32(pb, track->timecode_flags); /* Flags (timecode) */ + avio_wb32(pb, track->timescale); /* Timescale */ + avio_wb32(pb, frame_duration); /* Frame duration */ + avio_w8(pb, nb_frames); /* Number of frames */ + avio_w8(pb, 0); /* Reserved */ + + t = av_dict_get(track->st->metadata, "reel_name", NULL, 0); + if (t && utf8len(t->value) && track->mode != MODE_MP4) + mov_write_source_reference_tag(pb, track, t->value); + else + avio_wb16(pb, 0); /* zero size */ +#else + + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "tmcd"); /* Data format */ + avio_wb32(pb, 0); /* Reserved */ + avio_wb32(pb, 1); /* Data reference index */ + if (track->par->extradata_size) + avio_write(pb, track->par->extradata, track->par->extradata_size); +#endif + return update_size(pb, pos); +} + +static int mov_write_stsd_tag(AVFormatContext *s, AVIOContext *pb, MOVMuxContext *mov, MOVTrack *track) +{ + int64_t pos = avio_tell(pb); + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "stsd"); + avio_wb32(pb, 0); /* version & flags */ + avio_wb32(pb, 1); /* entry count */ + if (track->par->codec_type == AVMEDIA_TYPE_VIDEO) + mov_write_video_tag(pb, mov, track); + else if (track->par->codec_type == AVMEDIA_TYPE_AUDIO) + mov_write_audio_tag(s, pb, mov, track); + else if (track->par->codec_type == AVMEDIA_TYPE_SUBTITLE) + mov_write_subtitle_tag(pb, track); + else if (track->par->codec_tag == MKTAG('r','t','p',' ')) + mov_write_rtp_tag(pb, track); + else if (track->par->codec_tag == MKTAG('t','m','c','d')) + mov_write_tmcd_tag(pb, track); + return update_size(pb, pos); +} + +static int mov_write_ctts_tag(AVIOContext *pb, MOVTrack *track) +{ + MOVStts *ctts_entries; + uint32_t entries = 0; + uint32_t atom_size; + int i; + + ctts_entries = av_malloc_array((track->entry + 1), sizeof(*ctts_entries)); /* worst case */ + if (!ctts_entries) + return AVERROR(ENOMEM); + ctts_entries[0].count = 1; + ctts_entries[0].duration = track->cluster[0].cts; + for (i = 1; i < track->entry; i++) { + if (track->cluster[i].cts == ctts_entries[entries].duration) { + ctts_entries[entries].count++; /* compress */ + } else { + entries++; + ctts_entries[entries].duration = track->cluster[i].cts; + ctts_entries[entries].count = 1; + } + } + entries++; /* last one */ + atom_size = 16 + (entries * 8); + avio_wb32(pb, atom_size); /* size */ + ffio_wfourcc(pb, "ctts"); + avio_wb32(pb, 0); /* version & flags */ + avio_wb32(pb, entries); /* entry count */ + for (i = 0; i < entries; i++) { + avio_wb32(pb, ctts_entries[i].count); + avio_wb32(pb, ctts_entries[i].duration); + } + av_free(ctts_entries); + return atom_size; +} + +/* Time to sample atom */ +static int mov_write_stts_tag(AVIOContext *pb, MOVTrack *track) +{ + MOVStts *stts_entries = NULL; + uint32_t entries = -1; + uint32_t atom_size; + int i; + + if (track->par->codec_type == AVMEDIA_TYPE_AUDIO && !track->audio_vbr) { + stts_entries = av_malloc(sizeof(*stts_entries)); /* one entry */ + if (!stts_entries) + return AVERROR(ENOMEM); + stts_entries[0].count = track->sample_count; + stts_entries[0].duration = 1; + entries = 1; + } else { + if (track->entry) { + stts_entries = av_malloc_array(track->entry, sizeof(*stts_entries)); /* worst case */ + if (!stts_entries) + return AVERROR(ENOMEM); + } + for (i = 0; i < track->entry; i++) { + int duration = get_cluster_duration(track, i); + if (i && duration == stts_entries[entries].duration) { + stts_entries[entries].count++; /* compress */ + } else { + entries++; + stts_entries[entries].duration = duration; + stts_entries[entries].count = 1; + } + } + entries++; /* last one */ + } + atom_size = 16 + (entries * 8); + avio_wb32(pb, atom_size); /* size */ + ffio_wfourcc(pb, "stts"); + avio_wb32(pb, 0); /* version & flags */ + avio_wb32(pb, entries); /* entry count */ + for (i = 0; i < entries; i++) { + avio_wb32(pb, stts_entries[i].count); + avio_wb32(pb, stts_entries[i].duration); + } + av_free(stts_entries); + return atom_size; +} + +static int mov_write_dref_tag(AVIOContext *pb) +{ + avio_wb32(pb, 28); /* size */ + ffio_wfourcc(pb, "dref"); + avio_wb32(pb, 0); /* version & flags */ + avio_wb32(pb, 1); /* entry count */ + + avio_wb32(pb, 0xc); /* size */ + //FIXME add the alis and rsrc atom + ffio_wfourcc(pb, "url "); + avio_wb32(pb, 1); /* version & flags */ + + return 28; +} + +static int mov_write_stbl_tag(AVFormatContext *s, AVIOContext *pb, MOVMuxContext *mov, MOVTrack *track) +{ + int64_t pos = avio_tell(pb); + int ret; + + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "stbl"); + mov_write_stsd_tag(s, pb, mov, track); + mov_write_stts_tag(pb, track); + if ((track->par->codec_type == AVMEDIA_TYPE_VIDEO || + track->par->codec_tag == MKTAG('r','t','p',' ')) && + track->has_keyframes && track->has_keyframes < track->entry) + mov_write_stss_tag(pb, track, MOV_SYNC_SAMPLE); + if (track->mode == MODE_MOV && track->flags & MOV_TRACK_STPS) + mov_write_stss_tag(pb, track, MOV_PARTIAL_SYNC_SAMPLE); + if (track->par->codec_type == AVMEDIA_TYPE_VIDEO && + track->flags & MOV_TRACK_CTTS && track->entry) { + + if ((ret = mov_write_ctts_tag(pb, track)) < 0) + return ret; + } + mov_write_stsc_tag(pb, track); + mov_write_stsz_tag(pb, track); + mov_write_stco_tag(pb, track); + if (mov->encryption_scheme == MOV_ENC_CENC_AES_CTR) { + ff_mov_cenc_write_stbl_atoms(&track->cenc, pb); + } + return update_size(pb, pos); +} + +static int mov_write_dinf_tag(AVIOContext *pb) +{ + int64_t pos = avio_tell(pb); + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "dinf"); + mov_write_dref_tag(pb); + return update_size(pb, pos); +} + +static int mov_write_nmhd_tag(AVIOContext *pb) +{ + avio_wb32(pb, 12); + ffio_wfourcc(pb, "nmhd"); + avio_wb32(pb, 0); + return 12; +} + +static int mov_write_tcmi_tag(AVIOContext *pb, MOVTrack *track) +{ + int64_t pos = avio_tell(pb); + const char *font = "Lucida Grande"; + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "tcmi"); /* timecode media information atom */ + avio_wb32(pb, 0); /* version & flags */ + avio_wb16(pb, 0); /* text font */ + avio_wb16(pb, 0); /* text face */ + avio_wb16(pb, 12); /* text size */ + avio_wb16(pb, 0); /* (unknown, not in the QT specs...) */ + avio_wb16(pb, 0x0000); /* text color (red) */ + avio_wb16(pb, 0x0000); /* text color (green) */ + avio_wb16(pb, 0x0000); /* text color (blue) */ + avio_wb16(pb, 0xffff); /* background color (red) */ + avio_wb16(pb, 0xffff); /* background color (green) */ + avio_wb16(pb, 0xffff); /* background color (blue) */ + avio_w8(pb, strlen(font)); /* font len (part of the pascal string) */ + avio_write(pb, font, strlen(font)); /* font name */ + return update_size(pb, pos); +} + +static int mov_write_gmhd_tag(AVIOContext *pb, MOVTrack *track) +{ + int64_t pos = avio_tell(pb); + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "gmhd"); + avio_wb32(pb, 0x18); /* gmin size */ + ffio_wfourcc(pb, "gmin");/* generic media info */ + avio_wb32(pb, 0); /* version & flags */ + avio_wb16(pb, 0x40); /* graphics mode = */ + avio_wb16(pb, 0x8000); /* opColor (r?) */ + avio_wb16(pb, 0x8000); /* opColor (g?) */ + avio_wb16(pb, 0x8000); /* opColor (b?) */ + avio_wb16(pb, 0); /* balance */ + avio_wb16(pb, 0); /* reserved */ + + /* + * This special text atom is required for + * Apple Quicktime chapters. The contents + * don't appear to be documented, so the + * bytes are copied verbatim. + */ + if (track->tag != MKTAG('c','6','0','8')) { + avio_wb32(pb, 0x2C); /* size */ + ffio_wfourcc(pb, "text"); + avio_wb16(pb, 0x01); + avio_wb32(pb, 0x00); + avio_wb32(pb, 0x00); + avio_wb32(pb, 0x00); + avio_wb32(pb, 0x01); + avio_wb32(pb, 0x00); + avio_wb32(pb, 0x00); + avio_wb32(pb, 0x00); + avio_wb32(pb, 0x00004000); + avio_wb16(pb, 0x0000); + } + + if (track->par->codec_tag == MKTAG('t','m','c','d')) { + int64_t tmcd_pos = avio_tell(pb); + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "tmcd"); + mov_write_tcmi_tag(pb, track); + update_size(pb, tmcd_pos); + } + return update_size(pb, pos); +} + +static int mov_write_smhd_tag(AVIOContext *pb) +{ + avio_wb32(pb, 16); /* size */ + ffio_wfourcc(pb, "smhd"); + avio_wb32(pb, 0); /* version & flags */ + avio_wb16(pb, 0); /* reserved (balance, normally = 0) */ + avio_wb16(pb, 0); /* reserved */ + return 16; +} + +static int mov_write_vmhd_tag(AVIOContext *pb) +{ + avio_wb32(pb, 0x14); /* size (always 0x14) */ + ffio_wfourcc(pb, "vmhd"); + avio_wb32(pb, 0x01); /* version & flags */ + avio_wb64(pb, 0); /* reserved (graphics mode = copy) */ + return 0x14; +} + +static int is_clcp_track(MOVTrack *track) +{ + return track->tag == MKTAG('c','7','0','8') || + track->tag == MKTAG('c','6','0','8'); +} + +static int mov_write_hdlr_tag(AVFormatContext *s, AVIOContext *pb, MOVTrack *track) +{ + const char *hdlr, *descr = NULL, *hdlr_type = NULL; + int64_t pos = avio_tell(pb); + + hdlr = "dhlr"; + hdlr_type = "url "; + descr = "DataHandler"; + + if (track) { + hdlr = (track->mode == MODE_MOV) ? "mhlr" : "\0\0\0\0"; + if (track->par->codec_type == AVMEDIA_TYPE_VIDEO) { + hdlr_type = "vide"; + descr = "VideoHandler"; + } else if (track->par->codec_type == AVMEDIA_TYPE_AUDIO) { + hdlr_type = "soun"; + descr = "SoundHandler"; + } else if (track->par->codec_type == AVMEDIA_TYPE_SUBTITLE) { + if (is_clcp_track(track)) { + hdlr_type = "clcp"; + descr = "ClosedCaptionHandler"; + } else { + if (track->tag == MKTAG('t','x','3','g')) { + hdlr_type = "sbtl"; + } else if (track->tag == MKTAG('m','p','4','s')) { + hdlr_type = "subp"; + } else { + hdlr_type = "text"; + } + descr = "SubtitleHandler"; + } + } else if (track->par->codec_tag == MKTAG('r','t','p',' ')) { + hdlr_type = "hint"; + descr = "HintHandler"; + } else if (track->par->codec_tag == MKTAG('t','m','c','d')) { + hdlr_type = "tmcd"; + descr = "TimeCodeHandler"; + } else { + char tag_buf[32]; + av_get_codec_tag_string(tag_buf, sizeof(tag_buf), + track->par->codec_tag); + + av_log(s, AV_LOG_WARNING, + "Unknown hldr_type for %s / 0x%04X, writing dummy values\n", + tag_buf, track->par->codec_tag); + } + if (track->st) { + // hdlr.name is used by some players to identify the content title + // of the track. So if an alternate handler description is + // specified, use it. + AVDictionaryEntry *t; + t = av_dict_get(track->st->metadata, "handler", NULL, 0); + if (t && utf8len(t->value)) + descr = t->value; + } + } + + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "hdlr"); + avio_wb32(pb, 0); /* Version & flags */ + avio_write(pb, hdlr, 4); /* handler */ + ffio_wfourcc(pb, hdlr_type); /* handler type */ + avio_wb32(pb, 0); /* reserved */ + avio_wb32(pb, 0); /* reserved */ + avio_wb32(pb, 0); /* reserved */ + if (!track || track->mode == MODE_MOV) + avio_w8(pb, strlen(descr)); /* pascal string */ + avio_write(pb, descr, strlen(descr)); /* handler description */ + if (track && track->mode != MODE_MOV) + avio_w8(pb, 0); /* c string */ + return update_size(pb, pos); +} + +static int mov_write_hmhd_tag(AVIOContext *pb) +{ + /* This atom must be present, but leaving the values at zero + * seems harmless. */ + avio_wb32(pb, 28); /* size */ + ffio_wfourcc(pb, "hmhd"); + avio_wb32(pb, 0); /* version, flags */ + avio_wb16(pb, 0); /* maxPDUsize */ + avio_wb16(pb, 0); /* avgPDUsize */ + avio_wb32(pb, 0); /* maxbitrate */ + avio_wb32(pb, 0); /* avgbitrate */ + avio_wb32(pb, 0); /* reserved */ + return 28; +} + +static int mov_write_minf_tag(AVFormatContext *s, AVIOContext *pb, MOVMuxContext *mov, MOVTrack *track) +{ + int64_t pos = avio_tell(pb); + int ret; + + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "minf"); + if (track->par->codec_type == AVMEDIA_TYPE_VIDEO) + mov_write_vmhd_tag(pb); + else if (track->par->codec_type == AVMEDIA_TYPE_AUDIO) + mov_write_smhd_tag(pb); + else if (track->par->codec_type == AVMEDIA_TYPE_SUBTITLE) { + if (track->tag == MKTAG('t','e','x','t') || is_clcp_track(track)) { + mov_write_gmhd_tag(pb, track); + } else { + mov_write_nmhd_tag(pb); + } + } else if (track->tag == MKTAG('r','t','p',' ')) { + mov_write_hmhd_tag(pb); + } else if (track->tag == MKTAG('t','m','c','d')) { + if (track->mode != MODE_MOV) + mov_write_nmhd_tag(pb); + else + mov_write_gmhd_tag(pb, track); + } + if (track->mode == MODE_MOV) /* FIXME: Why do it for MODE_MOV only ? */ + mov_write_hdlr_tag(s, pb, NULL); + mov_write_dinf_tag(pb); + if ((ret = mov_write_stbl_tag(s, pb, mov, track)) < 0) + return ret; + return update_size(pb, pos); +} + +static int mov_write_mdhd_tag(AVIOContext *pb, MOVMuxContext *mov, + MOVTrack *track) +{ + int version = track->track_duration < INT32_MAX ? 0 : 1; + + if (track->mode == MODE_ISM) + version = 1; + + (version == 1) ? avio_wb32(pb, 44) : avio_wb32(pb, 32); /* size */ + ffio_wfourcc(pb, "mdhd"); + avio_w8(pb, version); + avio_wb24(pb, 0); /* flags */ + if (version == 1) { + avio_wb64(pb, track->time); + avio_wb64(pb, track->time); + } else { + avio_wb32(pb, track->time); /* creation time */ + avio_wb32(pb, track->time); /* modification time */ + } + avio_wb32(pb, track->timescale); /* time scale (sample rate for audio) */ + if (!track->entry && mov->mode == MODE_ISM) + (version == 1) ? avio_wb64(pb, UINT64_C(0xffffffffffffffff)) : avio_wb32(pb, 0xffffffff); + else if (!track->entry) + (version == 1) ? avio_wb64(pb, 0) : avio_wb32(pb, 0); + else + (version == 1) ? avio_wb64(pb, track->track_duration) : avio_wb32(pb, track->track_duration); /* duration */ + avio_wb16(pb, track->language); /* language */ + avio_wb16(pb, 0); /* reserved (quality) */ + + if (version != 0 && track->mode == MODE_MOV) { + av_log(NULL, AV_LOG_ERROR, + "FATAL error, file duration too long for timebase, this file will not be\n" + "playable with quicktime. Choose a different timebase or a different\n" + "container format\n"); + } + + return 32; +} + +static int mov_write_mdia_tag(AVFormatContext *s, AVIOContext *pb, + MOVMuxContext *mov, MOVTrack *track) +{ + int64_t pos = avio_tell(pb); + int ret; + + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "mdia"); + mov_write_mdhd_tag(pb, mov, track); + mov_write_hdlr_tag(s, pb, track); + if ((ret = mov_write_minf_tag(s, pb, mov, track)) < 0) + return ret; + return update_size(pb, pos); +} + +/* transformation matrix + |a b u| + |c d v| + |tx ty w| */ +static void write_matrix(AVIOContext *pb, int16_t a, int16_t b, int16_t c, + int16_t d, int16_t tx, int16_t ty) +{ + avio_wb32(pb, a << 16); /* 16.16 format */ + avio_wb32(pb, b << 16); /* 16.16 format */ + avio_wb32(pb, 0); /* u in 2.30 format */ + avio_wb32(pb, c << 16); /* 16.16 format */ + avio_wb32(pb, d << 16); /* 16.16 format */ + avio_wb32(pb, 0); /* v in 2.30 format */ + avio_wb32(pb, tx << 16); /* 16.16 format */ + avio_wb32(pb, ty << 16); /* 16.16 format */ + avio_wb32(pb, 1 << 30); /* w in 2.30 format */ +} + +static int mov_write_tkhd_tag(AVIOContext *pb, MOVMuxContext *mov, + MOVTrack *track, AVStream *st) +{ + int64_t duration = av_rescale_rnd(track->track_duration, MOV_TIMESCALE, + track->timescale, AV_ROUND_UP); + int version = duration < INT32_MAX ? 0 : 1; + int flags = MOV_TKHD_FLAG_IN_MOVIE; + int rotation = 0; + int group = 0; + + uint32_t *display_matrix = NULL; + int display_matrix_size, i; + + if (st) { + if (mov->per_stream_grouping) + group = st->index; + else + group = st->codecpar->codec_type; + + display_matrix = (uint32_t*)av_stream_get_side_data(st, AV_PKT_DATA_DISPLAYMATRIX, + &display_matrix_size); + if (display_matrix && display_matrix_size < 9 * sizeof(*display_matrix)) + display_matrix = NULL; + } + + if (track->flags & MOV_TRACK_ENABLED) + flags |= MOV_TKHD_FLAG_ENABLED; + + if (track->mode == MODE_ISM) + version = 1; + + (version == 1) ? avio_wb32(pb, 104) : avio_wb32(pb, 92); /* size */ + ffio_wfourcc(pb, "tkhd"); + avio_w8(pb, version); + avio_wb24(pb, flags); + if (version == 1) { + avio_wb64(pb, track->time); + avio_wb64(pb, track->time); + } else { + avio_wb32(pb, track->time); /* creation time */ + avio_wb32(pb, track->time); /* modification time */ + } + avio_wb32(pb, track->track_id); /* track-id */ + avio_wb32(pb, 0); /* reserved */ + if (!track->entry && mov->mode == MODE_ISM) + (version == 1) ? avio_wb64(pb, UINT64_C(0xffffffffffffffff)) : avio_wb32(pb, 0xffffffff); + else if (!track->entry) + (version == 1) ? avio_wb64(pb, 0) : avio_wb32(pb, 0); + else + (version == 1) ? avio_wb64(pb, duration) : avio_wb32(pb, duration); + + avio_wb32(pb, 0); /* reserved */ + avio_wb32(pb, 0); /* reserved */ + avio_wb16(pb, 0); /* layer */ + avio_wb16(pb, group); /* alternate group) */ + /* Volume, only for audio */ + if (track->par->codec_type == AVMEDIA_TYPE_AUDIO) + avio_wb16(pb, 0x0100); + else + avio_wb16(pb, 0); + avio_wb16(pb, 0); /* reserved */ + + /* Matrix structure */ + if (st && st->metadata) { + AVDictionaryEntry *rot = av_dict_get(st->metadata, "rotate", NULL, 0); + rotation = (rot && rot->value) ? atoi(rot->value) : 0; + } + if (display_matrix) { + for (i = 0; i < 9; i++) + avio_wb32(pb, display_matrix[i]); + } else if (rotation == 90) { + write_matrix(pb, 0, 1, -1, 0, track->par->height, 0); + } else if (rotation == 180) { + write_matrix(pb, -1, 0, 0, -1, track->par->width, track->par->height); + } else if (rotation == 270) { + write_matrix(pb, 0, -1, 1, 0, 0, track->par->width); + } else { + write_matrix(pb, 1, 0, 0, 1, 0, 0); + } + /* Track width and height, for visual only */ + if (st && (track->par->codec_type == AVMEDIA_TYPE_VIDEO || + track->par->codec_type == AVMEDIA_TYPE_SUBTITLE)) { + int64_t track_width_1616; + if (track->mode == MODE_MOV) { + track_width_1616 = track->par->width * 0x10000ULL; + } else { + track_width_1616 = av_rescale(st->sample_aspect_ratio.num, + track->par->width * 0x10000LL, + st->sample_aspect_ratio.den); + if (!track_width_1616 || + track->height != track->par->height || + track_width_1616 > UINT32_MAX) + track_width_1616 = track->par->width * 0x10000ULL; + } + if (track_width_1616 > UINT32_MAX) { + av_log(mov->fc, AV_LOG_WARNING, "track width is too large\n"); + track_width_1616 = 0; + } + avio_wb32(pb, track_width_1616); + if (track->height > 0xFFFF) { + av_log(mov->fc, AV_LOG_WARNING, "track height is too large\n"); + avio_wb32(pb, 0); + } else + avio_wb32(pb, track->height * 0x10000U); + } else { + avio_wb32(pb, 0); + avio_wb32(pb, 0); + } + return 0x5c; +} + +static int mov_write_tapt_tag(AVIOContext *pb, MOVTrack *track) +{ + int32_t width = av_rescale(track->par->sample_aspect_ratio.num, track->par->width, + track->par->sample_aspect_ratio.den); + + int64_t pos = avio_tell(pb); + + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "tapt"); + + avio_wb32(pb, 20); + ffio_wfourcc(pb, "clef"); + avio_wb32(pb, 0); + avio_wb32(pb, width << 16); + avio_wb32(pb, track->par->height << 16); + + avio_wb32(pb, 20); + ffio_wfourcc(pb, "prof"); + avio_wb32(pb, 0); + avio_wb32(pb, width << 16); + avio_wb32(pb, track->par->height << 16); + + avio_wb32(pb, 20); + ffio_wfourcc(pb, "enof"); + avio_wb32(pb, 0); + avio_wb32(pb, track->par->width << 16); + avio_wb32(pb, track->par->height << 16); + + return update_size(pb, pos); +} + +// This box seems important for the psp playback ... without it the movie seems to hang +static int mov_write_edts_tag(AVIOContext *pb, MOVMuxContext *mov, + MOVTrack *track) +{ + int64_t duration = av_rescale_rnd(track->track_duration, MOV_TIMESCALE, + track->timescale, AV_ROUND_UP); + int version = duration < INT32_MAX ? 0 : 1; + int entry_size, entry_count, size; + int64_t delay, start_ct = track->start_cts; + int64_t start_dts = track->start_dts; + + if (track->entry) { + if (start_dts != track->cluster[0].dts || start_ct != track->cluster[0].cts) { + + av_log(mov->fc, AV_LOG_DEBUG, + "EDTS using dts:%"PRId64" cts:%d instead of dts:%"PRId64" cts:%"PRId64" tid:%d\n", + track->cluster[0].dts, track->cluster[0].cts, + start_dts, start_ct, track->track_id); + start_dts = track->cluster[0].dts; + start_ct = track->cluster[0].cts; + } + } + + delay = av_rescale_rnd(start_dts + start_ct, MOV_TIMESCALE, + track->timescale, AV_ROUND_DOWN); + version |= delay < INT32_MAX ? 0 : 1; + + entry_size = (version == 1) ? 20 : 12; + entry_count = 1 + (delay > 0); + size = 24 + entry_count * entry_size; + + /* write the atom data */ + avio_wb32(pb, size); + ffio_wfourcc(pb, "edts"); + avio_wb32(pb, size - 8); + ffio_wfourcc(pb, "elst"); + avio_w8(pb, version); + avio_wb24(pb, 0); /* flags */ + + avio_wb32(pb, entry_count); + if (delay > 0) { /* add an empty edit to delay presentation */ + /* In the positive delay case, the delay includes the cts + * offset, and the second edit list entry below trims out + * the same amount from the actual content. This makes sure + * that the offset last sample is included in the edit + * list duration as well. */ + if (version == 1) { + avio_wb64(pb, delay); + avio_wb64(pb, -1); + } else { + avio_wb32(pb, delay); + avio_wb32(pb, -1); + } + avio_wb32(pb, 0x00010000); + } else { + /* Avoid accidentally ending up with start_ct = -1 which has got a + * special meaning. Normally start_ct should end up positive or zero + * here, but use FFMIN in case dts is a small positive integer + * rounded to 0 when represented in MOV_TIMESCALE units. */ + av_assert0(av_rescale_rnd(start_dts, MOV_TIMESCALE, track->timescale, AV_ROUND_DOWN) <= 0); + start_ct = -FFMIN(start_dts, 0); + /* Note, this delay is calculated from the pts of the first sample, + * ensuring that we don't reduce the duration for cases with + * dts<0 pts=0. */ + duration += delay; + } + + /* For fragmented files, we don't know the full length yet. Setting + * duration to 0 allows us to only specify the offset, including + * the rest of the content (from all future fragments) without specifying + * an explicit duration. */ + if (mov->flags & FF_MOV_FLAG_FRAGMENT) + duration = 0; + + /* duration */ + if (version == 1) { + avio_wb64(pb, duration); + avio_wb64(pb, start_ct); + } else { + avio_wb32(pb, duration); + avio_wb32(pb, start_ct); + } + avio_wb32(pb, 0x00010000); + return size; +} + +static int mov_write_tref_tag(AVIOContext *pb, MOVTrack *track) +{ + avio_wb32(pb, 20); // size + ffio_wfourcc(pb, "tref"); + avio_wb32(pb, 12); // size (subatom) + avio_wl32(pb, track->tref_tag); + avio_wb32(pb, track->tref_id); + return 20; +} + +// goes at the end of each track! ... Critical for PSP playback ("Incompatible data" without it) +static int mov_write_uuid_tag_psp(AVIOContext *pb, MOVTrack *mov) +{ + avio_wb32(pb, 0x34); /* size ... reports as 28 in mp4box! */ + ffio_wfourcc(pb, "uuid"); + ffio_wfourcc(pb, "USMT"); + avio_wb32(pb, 0x21d24fce); + avio_wb32(pb, 0xbb88695c); + avio_wb32(pb, 0xfac9c740); + avio_wb32(pb, 0x1c); // another size here! + ffio_wfourcc(pb, "MTDT"); + avio_wb32(pb, 0x00010012); + avio_wb32(pb, 0x0a); + avio_wb32(pb, 0x55c40000); + avio_wb32(pb, 0x1); + avio_wb32(pb, 0x0); + return 0x34; +} + +static int mov_write_udta_sdp(AVIOContext *pb, MOVTrack *track) +{ + AVFormatContext *ctx = track->rtp_ctx; + char buf[1000] = ""; + int len; + + ff_sdp_write_media(buf, sizeof(buf), ctx->streams[0], track->src_track, + NULL, NULL, 0, 0, ctx); + av_strlcatf(buf, sizeof(buf), "a=control:streamid=%d\r\n", track->track_id); + len = strlen(buf); + + avio_wb32(pb, len + 24); + ffio_wfourcc(pb, "udta"); + avio_wb32(pb, len + 16); + ffio_wfourcc(pb, "hnti"); + avio_wb32(pb, len + 8); + ffio_wfourcc(pb, "sdp "); + avio_write(pb, buf, len); + return len + 24; +} + +static int mov_write_track_metadata(AVIOContext *pb, AVStream *st, + const char *tag, const char *str) +{ + int64_t pos = avio_tell(pb); + AVDictionaryEntry *t = av_dict_get(st->metadata, str, NULL, 0); + if (!t || !utf8len(t->value)) + return 0; + + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, tag); /* type */ + avio_write(pb, t->value, strlen(t->value)); /* UTF8 string value */ + return update_size(pb, pos); +} + +static int mov_write_track_udta_tag(AVIOContext *pb, MOVMuxContext *mov, + AVStream *st) +{ + AVIOContext *pb_buf; + int ret, size; + uint8_t *buf; + + if (!st) + return 0; + + ret = avio_open_dyn_buf(&pb_buf); + if (ret < 0) + return ret; + + if (mov->mode & MODE_MP4) + mov_write_track_metadata(pb_buf, st, "name", "title"); + + if ((size = avio_close_dyn_buf(pb_buf, &buf)) > 0) { + avio_wb32(pb, size + 8); + ffio_wfourcc(pb, "udta"); + avio_write(pb, buf, size); + } + av_free(buf); + + return 0; +} + +static int mov_write_trak_tag(AVFormatContext *s, AVIOContext *pb, MOVMuxContext *mov, + MOVTrack *track, AVStream *st) +{ + int64_t pos = avio_tell(pb); + int entry_backup = track->entry; + int chunk_backup = track->chunkCount; + int ret; + + /* If we want to have an empty moov, but some samples already have been + * buffered (delay_moov), pretend that no samples have been written yet. */ + if (mov->flags & FF_MOV_FLAG_EMPTY_MOOV) + track->chunkCount = track->entry = 0; + + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "trak"); + mov_write_tkhd_tag(pb, mov, track, st); + + av_assert2(mov->use_editlist >= 0); + + if (track->start_dts != AV_NOPTS_VALUE) { + if (mov->use_editlist) + mov_write_edts_tag(pb, mov, track); // PSP Movies and several other cases require edts box + else if ((track->entry && track->cluster[0].dts) || track->mode == MODE_PSP || is_clcp_track(track)) + av_log(mov->fc, AV_LOG_WARNING, + "Not writing any edit list even though one would have been required\n"); + } + + if (track->tref_tag) + mov_write_tref_tag(pb, track); + + if ((ret = mov_write_mdia_tag(s, pb, mov, track)) < 0) + return ret; + if (track->mode == MODE_PSP) + mov_write_uuid_tag_psp(pb, track); // PSP Movies require this uuid box + if (track->tag == MKTAG('r','t','p',' ')) + mov_write_udta_sdp(pb, track); + if (track->mode == MODE_MOV) { + if (track->par->codec_type == AVMEDIA_TYPE_VIDEO) { + double sample_aspect_ratio = av_q2d(st->sample_aspect_ratio); + if (st->sample_aspect_ratio.num && 1.0 != sample_aspect_ratio) { + mov_write_tapt_tag(pb, track); + } + } + if (is_clcp_track(track) && st->sample_aspect_ratio.num) { + mov_write_tapt_tag(pb, track); + } + } + mov_write_track_udta_tag(pb, mov, st); + track->entry = entry_backup; + track->chunkCount = chunk_backup; + return update_size(pb, pos); +} + +static int mov_write_iods_tag(AVIOContext *pb, MOVMuxContext *mov) +{ + int i, has_audio = 0, has_video = 0; + int64_t pos = avio_tell(pb); + int audio_profile = mov->iods_audio_profile; + int video_profile = mov->iods_video_profile; + for (i = 0; i < mov->nb_streams; i++) { + if (mov->tracks[i].entry > 0 || mov->flags & FF_MOV_FLAG_EMPTY_MOOV) { + has_audio |= mov->tracks[i].par->codec_type == AVMEDIA_TYPE_AUDIO; + has_video |= mov->tracks[i].par->codec_type == AVMEDIA_TYPE_VIDEO; + } + } + if (audio_profile < 0) + audio_profile = 0xFF - has_audio; + if (video_profile < 0) + video_profile = 0xFF - has_video; + avio_wb32(pb, 0x0); /* size */ + ffio_wfourcc(pb, "iods"); + avio_wb32(pb, 0); /* version & flags */ + put_descr(pb, 0x10, 7); + avio_wb16(pb, 0x004f); + avio_w8(pb, 0xff); + avio_w8(pb, 0xff); + avio_w8(pb, audio_profile); + avio_w8(pb, video_profile); + avio_w8(pb, 0xff); + return update_size(pb, pos); +} + +static int mov_write_trex_tag(AVIOContext *pb, MOVTrack *track) +{ + avio_wb32(pb, 0x20); /* size */ + ffio_wfourcc(pb, "trex"); + avio_wb32(pb, 0); /* version & flags */ + avio_wb32(pb, track->track_id); /* track ID */ + avio_wb32(pb, 1); /* default sample description index */ + avio_wb32(pb, 0); /* default sample duration */ + avio_wb32(pb, 0); /* default sample size */ + avio_wb32(pb, 0); /* default sample flags */ + return 0; +} + +static int mov_write_mvex_tag(AVIOContext *pb, MOVMuxContext *mov) +{ + int64_t pos = avio_tell(pb); + int i; + avio_wb32(pb, 0x0); /* size */ + ffio_wfourcc(pb, "mvex"); + for (i = 0; i < mov->nb_streams; i++) + mov_write_trex_tag(pb, &mov->tracks[i]); + return update_size(pb, pos); +} + +static int mov_write_mvhd_tag(AVIOContext *pb, MOVMuxContext *mov) +{ + int max_track_id = 1, i; + int64_t max_track_len = 0; + int version; + + for (i = 0; i < mov->nb_streams; i++) { + if (mov->tracks[i].entry > 0 && mov->tracks[i].timescale) { + int64_t max_track_len_temp = av_rescale_rnd(mov->tracks[i].track_duration, + MOV_TIMESCALE, + mov->tracks[i].timescale, + AV_ROUND_UP); + if (max_track_len < max_track_len_temp) + max_track_len = max_track_len_temp; + if (max_track_id < mov->tracks[i].track_id) + max_track_id = mov->tracks[i].track_id; + } + } + /* If using delay_moov, make sure the output is the same as if no + * samples had been written yet. */ + if (mov->flags & FF_MOV_FLAG_EMPTY_MOOV) { + max_track_len = 0; + max_track_id = 1; + } + + version = max_track_len < UINT32_MAX ? 0 : 1; + avio_wb32(pb, version == 1 ? 120 : 108); /* size */ + + ffio_wfourcc(pb, "mvhd"); + avio_w8(pb, version); + avio_wb24(pb, 0); /* flags */ + if (version == 1) { + avio_wb64(pb, mov->time); + avio_wb64(pb, mov->time); + } else { + avio_wb32(pb, mov->time); /* creation time */ + avio_wb32(pb, mov->time); /* modification time */ + } + avio_wb32(pb, MOV_TIMESCALE); + (version == 1) ? avio_wb64(pb, max_track_len) : avio_wb32(pb, max_track_len); /* duration of longest track */ + + avio_wb32(pb, 0x00010000); /* reserved (preferred rate) 1.0 = normal */ + avio_wb16(pb, 0x0100); /* reserved (preferred volume) 1.0 = normal */ + avio_wb16(pb, 0); /* reserved */ + avio_wb32(pb, 0); /* reserved */ + avio_wb32(pb, 0); /* reserved */ + + /* Matrix structure */ + write_matrix(pb, 1, 0, 0, 1, 0, 0); + + avio_wb32(pb, 0); /* reserved (preview time) */ + avio_wb32(pb, 0); /* reserved (preview duration) */ + avio_wb32(pb, 0); /* reserved (poster time) */ + avio_wb32(pb, 0); /* reserved (selection time) */ + avio_wb32(pb, 0); /* reserved (selection duration) */ + avio_wb32(pb, 0); /* reserved (current time) */ + avio_wb32(pb, max_track_id + 1); /* Next track id */ + return 0x6c; +} + +static int mov_write_itunes_hdlr_tag(AVIOContext *pb, MOVMuxContext *mov, + AVFormatContext *s) +{ + avio_wb32(pb, 33); /* size */ + ffio_wfourcc(pb, "hdlr"); + avio_wb32(pb, 0); + avio_wb32(pb, 0); + ffio_wfourcc(pb, "mdir"); + ffio_wfourcc(pb, "appl"); + avio_wb32(pb, 0); + avio_wb32(pb, 0); + avio_w8(pb, 0); + return 33; +} + +/* helper function to write a data tag with the specified string as data */ +static int mov_write_string_data_tag(AVIOContext *pb, const char *data, int lang, int long_style) +{ + if (long_style) { + int size = 16 + strlen(data); + avio_wb32(pb, size); /* size */ + ffio_wfourcc(pb, "data"); + avio_wb32(pb, 1); + avio_wb32(pb, 0); + avio_write(pb, data, strlen(data)); + return size; + } else { + if (!lang) + lang = ff_mov_iso639_to_lang("und", 1); + avio_wb16(pb, strlen(data)); /* string length */ + avio_wb16(pb, lang); + avio_write(pb, data, strlen(data)); + return strlen(data) + 4; + } +} + +static int mov_write_string_tag(AVIOContext *pb, const char *name, + const char *value, int lang, int long_style) +{ + int size = 0; + if (value && value[0]) { + int64_t pos = avio_tell(pb); + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, name); + mov_write_string_data_tag(pb, value, lang, long_style); + size = update_size(pb, pos); + } + return size; +} + +static AVDictionaryEntry *get_metadata_lang(AVFormatContext *s, + const char *tag, int *lang) +{ + int l, len, len2; + AVDictionaryEntry *t, *t2 = NULL; + char tag2[16]; + + *lang = 0; + + if (!(t = av_dict_get(s->metadata, tag, NULL, 0))) + return NULL; + + len = strlen(t->key); + snprintf(tag2, sizeof(tag2), "%s-", tag); + while ((t2 = av_dict_get(s->metadata, tag2, t2, AV_DICT_IGNORE_SUFFIX))) { + len2 = strlen(t2->key); + if (len2 == len + 4 && !strcmp(t->value, t2->value) + && (l = ff_mov_iso639_to_lang(&t2->key[len2 - 3], 1)) >= 0) { + *lang = l; + return t; + } + } + return t; +} + +static int mov_write_string_metadata(AVFormatContext *s, AVIOContext *pb, + const char *name, const char *tag, + int long_style) +{ + int lang; + AVDictionaryEntry *t = get_metadata_lang(s, tag, &lang); + if (!t) + return 0; + return mov_write_string_tag(pb, name, t->value, lang, long_style); +} + +/* iTunes bpm number */ +static int mov_write_tmpo_tag(AVIOContext *pb, AVFormatContext *s) +{ + AVDictionaryEntry *t = av_dict_get(s->metadata, "tmpo", NULL, 0); + int size = 0, tmpo = t ? atoi(t->value) : 0; + if (tmpo) { + size = 26; + avio_wb32(pb, size); + ffio_wfourcc(pb, "tmpo"); + avio_wb32(pb, size-8); /* size */ + ffio_wfourcc(pb, "data"); + avio_wb32(pb, 0x15); //type specifier + avio_wb32(pb, 0); + avio_wb16(pb, tmpo); // data + } + return size; +} + +/* 3GPP TS 26.244 */ +static int mov_write_loci_tag(AVFormatContext *s, AVIOContext *pb) +{ + int lang; + int64_t pos = avio_tell(pb); + double latitude, longitude, altitude; + int32_t latitude_fix, longitude_fix, altitude_fix; + AVDictionaryEntry *t = get_metadata_lang(s, "location", &lang); + const char *ptr, *place = ""; + char *end; + static const char *astronomical_body = "earth"; + if (!t) + return 0; + + ptr = t->value; + longitude = strtod(ptr, &end); + if (end == ptr) { + av_log(s, AV_LOG_WARNING, "malformed location metadata\n"); + return 0; + } + ptr = end; + latitude = strtod(ptr, &end); + if (end == ptr) { + av_log(s, AV_LOG_WARNING, "malformed location metadata\n"); + return 0; + } + ptr = end; + altitude = strtod(ptr, &end); + /* If no altitude was present, the default 0 should be fine */ + if (*end == '/') + place = end + 1; + + latitude_fix = (int32_t) ((1 << 16) * latitude); + longitude_fix = (int32_t) ((1 << 16) * longitude); + altitude_fix = (int32_t) ((1 << 16) * altitude); + + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "loci"); /* type */ + avio_wb32(pb, 0); /* version + flags */ + avio_wb16(pb, lang); + avio_write(pb, place, strlen(place) + 1); + avio_w8(pb, 0); /* role of place (0 == shooting location, 1 == real location, 2 == fictional location) */ + avio_wb32(pb, latitude_fix); + avio_wb32(pb, longitude_fix); + avio_wb32(pb, altitude_fix); + avio_write(pb, astronomical_body, strlen(astronomical_body) + 1); + avio_w8(pb, 0); /* additional notes, null terminated string */ + + return update_size(pb, pos); +} + +/* iTunes track or disc number */ +static int mov_write_trkn_tag(AVIOContext *pb, MOVMuxContext *mov, + AVFormatContext *s, int disc) +{ + AVDictionaryEntry *t = av_dict_get(s->metadata, + disc ? "disc" : "track", + NULL, 0); + int size = 0, track = t ? atoi(t->value) : 0; + if (track) { + int tracks = 0; + char *slash = strchr(t->value, '/'); + if (slash) + tracks = atoi(slash + 1); + avio_wb32(pb, 32); /* size */ + ffio_wfourcc(pb, disc ? "disk" : "trkn"); + avio_wb32(pb, 24); /* size */ + ffio_wfourcc(pb, "data"); + avio_wb32(pb, 0); // 8 bytes empty + avio_wb32(pb, 0); + avio_wb16(pb, 0); // empty + avio_wb16(pb, track); // track / disc number + avio_wb16(pb, tracks); // total track / disc number + avio_wb16(pb, 0); // empty + size = 32; + } + return size; +} + +static int mov_write_int8_metadata(AVFormatContext *s, AVIOContext *pb, + const char *name, const char *tag, + int len) +{ + AVDictionaryEntry *t = NULL; + uint8_t num; + int size = 24 + len; + + if (len != 1 && len != 4) + return -1; + + if (!(t = av_dict_get(s->metadata, tag, NULL, 0))) + return 0; + num = atoi(t->value); + + avio_wb32(pb, size); + ffio_wfourcc(pb, name); + avio_wb32(pb, size - 8); + ffio_wfourcc(pb, "data"); + avio_wb32(pb, 0x15); + avio_wb32(pb, 0); + if (len==4) avio_wb32(pb, num); + else avio_w8 (pb, num); + + return size; +} + +/* iTunes meta data list */ +static int mov_write_ilst_tag(AVIOContext *pb, MOVMuxContext *mov, + AVFormatContext *s) +{ + int64_t pos = avio_tell(pb); + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "ilst"); + mov_write_string_metadata(s, pb, "\251nam", "title" , 1); + mov_write_string_metadata(s, pb, "\251ART", "artist" , 1); + mov_write_string_metadata(s, pb, "aART", "album_artist", 1); + mov_write_string_metadata(s, pb, "\251wrt", "composer" , 1); + mov_write_string_metadata(s, pb, "\251alb", "album" , 1); + mov_write_string_metadata(s, pb, "\251day", "date" , 1); + if (!mov_write_string_metadata(s, pb, "\251too", "encoding_tool", 1)) { + if (!(s->flags & AVFMT_FLAG_BITEXACT)) + mov_write_string_tag(pb, "\251too", LIBAVFORMAT_IDENT, 0, 1); + } + mov_write_string_metadata(s, pb, "\251cmt", "comment" , 1); + mov_write_string_metadata(s, pb, "\251gen", "genre" , 1); + mov_write_string_metadata(s, pb, "\251cpy", "copyright", 1); + mov_write_string_metadata(s, pb, "\251grp", "grouping" , 1); + mov_write_string_metadata(s, pb, "\251lyr", "lyrics" , 1); + mov_write_string_metadata(s, pb, "desc", "description",1); + mov_write_string_metadata(s, pb, "ldes", "synopsis" , 1); + mov_write_string_metadata(s, pb, "tvsh", "show" , 1); + mov_write_string_metadata(s, pb, "tven", "episode_id",1); + mov_write_string_metadata(s, pb, "tvnn", "network" , 1); + mov_write_int8_metadata (s, pb, "tves", "episode_sort",4); + mov_write_int8_metadata (s, pb, "tvsn", "season_number",4); + mov_write_int8_metadata (s, pb, "stik", "media_type",1); + mov_write_int8_metadata (s, pb, "hdvd", "hd_video", 1); + mov_write_int8_metadata (s, pb, "pgap", "gapless_playback",1); + mov_write_int8_metadata (s, pb, "cpil", "compilation", 1); + mov_write_trkn_tag(pb, mov, s, 0); // track number + mov_write_trkn_tag(pb, mov, s, 1); // disc number + mov_write_tmpo_tag(pb, s); + return update_size(pb, pos); +} + +static int mov_write_mdta_hdlr_tag(AVIOContext *pb, MOVMuxContext *mov, + AVFormatContext *s) +{ + avio_wb32(pb, 33); /* size */ + ffio_wfourcc(pb, "hdlr"); + avio_wb32(pb, 0); + avio_wb32(pb, 0); + ffio_wfourcc(pb, "mdta"); + avio_wb32(pb, 0); + avio_wb32(pb, 0); + avio_wb32(pb, 0); + avio_w8(pb, 0); + return 33; +} + +static int mov_write_mdta_keys_tag(AVIOContext *pb, MOVMuxContext *mov, + AVFormatContext *s) +{ + AVDictionaryEntry *t = NULL; + int64_t pos = avio_tell(pb); + int64_t curpos, entry_pos; + int count = 0; + + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "keys"); + avio_wb32(pb, 0); + entry_pos = avio_tell(pb); + avio_wb32(pb, 0); /* entry count */ + + while (t = av_dict_get(s->metadata, "", t, AV_DICT_IGNORE_SUFFIX)) { + avio_wb32(pb, strlen(t->key) + 8); + ffio_wfourcc(pb, "mdta"); + avio_write(pb, t->key, strlen(t->key)); + count += 1; + } + curpos = avio_tell(pb); + avio_seek(pb, entry_pos, SEEK_SET); + avio_wb32(pb, count); // rewrite entry count + avio_seek(pb, curpos, SEEK_SET); + + return update_size(pb, pos); +} + +static int mov_write_mdta_ilst_tag(AVIOContext *pb, MOVMuxContext *mov, + AVFormatContext *s) +{ + AVDictionaryEntry *t = NULL; + int64_t pos = avio_tell(pb); + int count = 1; /* keys are 1-index based */ + + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "ilst"); + + while (t = av_dict_get(s->metadata, "", t, AV_DICT_IGNORE_SUFFIX)) { + int64_t entry_pos = avio_tell(pb); + avio_wb32(pb, 0); /* size */ + avio_wb32(pb, count); /* key */ + mov_write_string_data_tag(pb, t->value, 0, 1); + update_size(pb, entry_pos); + count += 1; + } + return update_size(pb, pos); +} + +/* meta data tags */ +static int mov_write_meta_tag(AVIOContext *pb, MOVMuxContext *mov, + AVFormatContext *s) +{ + int size = 0; + int64_t pos = avio_tell(pb); + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "meta"); + avio_wb32(pb, 0); + if (mov->flags & FF_MOV_FLAG_USE_MDTA) { + mov_write_mdta_hdlr_tag(pb, mov, s); + mov_write_mdta_keys_tag(pb, mov, s); + mov_write_mdta_ilst_tag(pb, mov, s); + } + else { + /* iTunes metadata tag */ + mov_write_itunes_hdlr_tag(pb, mov, s); + mov_write_ilst_tag(pb, mov, s); + } + size = update_size(pb, pos); + return size; +} + +static int mov_write_raw_metadata_tag(AVFormatContext *s, AVIOContext *pb, + const char *name, const char *key) +{ + int len; + AVDictionaryEntry *t; + + if (!(t = av_dict_get(s->metadata, key, NULL, 0))) + return 0; + + len = strlen(t->value); + if (len > 0) { + int size = len + 8; + avio_wb32(pb, size); + ffio_wfourcc(pb, name); + avio_write(pb, t->value, len); + return size; + } + return 0; +} + +static int ascii_to_wc(AVIOContext *pb, const uint8_t *b) +{ + int val; + while (*b) { + GET_UTF8(val, *b++, return -1;) + avio_wb16(pb, val); + } + avio_wb16(pb, 0x00); + return 0; +} + +static uint16_t language_code(const char *str) +{ + return (((str[0] - 0x60) & 0x1F) << 10) + + (((str[1] - 0x60) & 0x1F) << 5) + + (( str[2] - 0x60) & 0x1F); +} + +static int mov_write_3gp_udta_tag(AVIOContext *pb, AVFormatContext *s, + const char *tag, const char *str) +{ + int64_t pos = avio_tell(pb); + AVDictionaryEntry *t = av_dict_get(s->metadata, str, NULL, 0); + if (!t || !utf8len(t->value)) + return 0; + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, tag); /* type */ + avio_wb32(pb, 0); /* version + flags */ + if (!strcmp(tag, "yrrc")) + avio_wb16(pb, atoi(t->value)); + else { + avio_wb16(pb, language_code("eng")); /* language */ + avio_write(pb, t->value, strlen(t->value) + 1); /* UTF8 string value */ + if (!strcmp(tag, "albm") && + (t = av_dict_get(s->metadata, "track", NULL, 0))) + avio_w8(pb, atoi(t->value)); + } + return update_size(pb, pos); +} + +static int mov_write_chpl_tag(AVIOContext *pb, AVFormatContext *s) +{ + int64_t pos = avio_tell(pb); + int i, nb_chapters = FFMIN(s->nb_chapters, 255); + + avio_wb32(pb, 0); // size + ffio_wfourcc(pb, "chpl"); + avio_wb32(pb, 0x01000000); // version + flags + avio_wb32(pb, 0); // unknown + avio_w8(pb, nb_chapters); + + for (i = 0; i < nb_chapters; i++) { + AVChapter *c = s->chapters[i]; + AVDictionaryEntry *t; + avio_wb64(pb, av_rescale_q(c->start, c->time_base, (AVRational){1,10000000})); + + if ((t = av_dict_get(c->metadata, "title", NULL, 0))) { + int len = FFMIN(strlen(t->value), 255); + avio_w8(pb, len); + avio_write(pb, t->value, len); + } else + avio_w8(pb, 0); + } + return update_size(pb, pos); +} + +static int mov_write_udta_tag(AVIOContext *pb, MOVMuxContext *mov, + AVFormatContext *s) +{ + AVIOContext *pb_buf; + int ret, size; + uint8_t *buf; + + ret = avio_open_dyn_buf(&pb_buf); + if (ret < 0) + return ret; + + if (mov->mode & MODE_3GP) { + mov_write_3gp_udta_tag(pb_buf, s, "perf", "artist"); + mov_write_3gp_udta_tag(pb_buf, s, "titl", "title"); + mov_write_3gp_udta_tag(pb_buf, s, "auth", "author"); + mov_write_3gp_udta_tag(pb_buf, s, "gnre", "genre"); + mov_write_3gp_udta_tag(pb_buf, s, "dscp", "comment"); + mov_write_3gp_udta_tag(pb_buf, s, "albm", "album"); + mov_write_3gp_udta_tag(pb_buf, s, "cprt", "copyright"); + mov_write_3gp_udta_tag(pb_buf, s, "yrrc", "date"); + mov_write_loci_tag(s, pb_buf); + } else if (mov->mode == MODE_MOV && !(mov->flags & FF_MOV_FLAG_USE_MDTA)) { // the title field breaks gtkpod with mp4 and my suspicion is that stuff is not valid in mp4 + mov_write_string_metadata(s, pb_buf, "\251ART", "artist", 0); + mov_write_string_metadata(s, pb_buf, "\251nam", "title", 0); + mov_write_string_metadata(s, pb_buf, "\251aut", "author", 0); + mov_write_string_metadata(s, pb_buf, "\251alb", "album", 0); + mov_write_string_metadata(s, pb_buf, "\251day", "date", 0); + mov_write_string_metadata(s, pb_buf, "\251swr", "encoder", 0); + // currently ignored by mov.c + mov_write_string_metadata(s, pb_buf, "\251des", "comment", 0); + // add support for libquicktime, this atom is also actually read by mov.c + mov_write_string_metadata(s, pb_buf, "\251cmt", "comment", 0); + mov_write_string_metadata(s, pb_buf, "\251gen", "genre", 0); + mov_write_string_metadata(s, pb_buf, "\251cpy", "copyright", 0); + mov_write_string_metadata(s, pb_buf, "\251mak", "make", 0); + mov_write_string_metadata(s, pb_buf, "\251mod", "model", 0); + mov_write_string_metadata(s, pb_buf, "\251xyz", "location", 0); + mov_write_raw_metadata_tag(s, pb_buf, "XMP_", "xmp"); + } else { + /* iTunes meta data */ + mov_write_meta_tag(pb_buf, mov, s); + mov_write_loci_tag(s, pb_buf); + } + + if (s->nb_chapters && !(mov->flags & FF_MOV_FLAG_DISABLE_CHPL)) + mov_write_chpl_tag(pb_buf, s); + + if ((size = avio_close_dyn_buf(pb_buf, &buf)) > 0) { + avio_wb32(pb, size + 8); + ffio_wfourcc(pb, "udta"); + avio_write(pb, buf, size); + } + av_free(buf); + + return 0; +} + +static void mov_write_psp_udta_tag(AVIOContext *pb, + const char *str, const char *lang, int type) +{ + int len = utf8len(str) + 1; + if (len <= 0) + return; + avio_wb16(pb, len * 2 + 10); /* size */ + avio_wb32(pb, type); /* type */ + avio_wb16(pb, language_code(lang)); /* language */ + avio_wb16(pb, 0x01); /* ? */ + ascii_to_wc(pb, str); +} + +static int mov_write_uuidusmt_tag(AVIOContext *pb, AVFormatContext *s) +{ + AVDictionaryEntry *title = av_dict_get(s->metadata, "title", NULL, 0); + int64_t pos, pos2; + + if (title) { + pos = avio_tell(pb); + avio_wb32(pb, 0); /* size placeholder*/ + ffio_wfourcc(pb, "uuid"); + ffio_wfourcc(pb, "USMT"); + avio_wb32(pb, 0x21d24fce); /* 96 bit UUID */ + avio_wb32(pb, 0xbb88695c); + avio_wb32(pb, 0xfac9c740); + + pos2 = avio_tell(pb); + avio_wb32(pb, 0); /* size placeholder*/ + ffio_wfourcc(pb, "MTDT"); + avio_wb16(pb, 4); + + // ? + avio_wb16(pb, 0x0C); /* size */ + avio_wb32(pb, 0x0B); /* type */ + avio_wb16(pb, language_code("und")); /* language */ + avio_wb16(pb, 0x0); /* ? */ + avio_wb16(pb, 0x021C); /* data */ + + if (!(s->flags & AVFMT_FLAG_BITEXACT)) + mov_write_psp_udta_tag(pb, LIBAVCODEC_IDENT, "eng", 0x04); + mov_write_psp_udta_tag(pb, title->value, "eng", 0x01); + mov_write_psp_udta_tag(pb, "2006/04/01 11:11:11", "und", 0x03); + + update_size(pb, pos2); + return update_size(pb, pos); + } + + return 0; +} + +static void build_chunks(MOVTrack *trk) +{ + int i; + MOVIentry *chunk = &trk->cluster[0]; + uint64_t chunkSize = chunk->size; + chunk->chunkNum = 1; + if (trk->chunkCount) + return; + trk->chunkCount = 1; + for (i = 1; ientry; i++){ + if (chunk->pos + chunkSize == trk->cluster[i].pos && + chunkSize + trk->cluster[i].size < (1<<20)){ + chunkSize += trk->cluster[i].size; + chunk->samples_in_chunk += trk->cluster[i].entries; + } else { + trk->cluster[i].chunkNum = chunk->chunkNum+1; + chunk=&trk->cluster[i]; + chunkSize = chunk->size; + trk->chunkCount++; + } + } +} + +/** + * Assign track ids. If option "use_stream_ids_as_track_ids" is set, + * the stream ids are used as track ids. + * + * This assumes mov->tracks and s->streams are in the same order and + * there are no gaps in either of them (so mov->tracks[n] refers to + * s->streams[n]). + * + * As an exception, there can be more entries in + * s->streams than in mov->tracks, in which case new track ids are + * generated (starting after the largest found stream id). + */ +static int mov_setup_track_ids(MOVMuxContext *mov, AVFormatContext *s) +{ + int i; + + if (mov->track_ids_ok) + return 0; + + if (mov->use_stream_ids_as_track_ids) { + int next_generated_track_id = 0; + for (i = 0; i < s->nb_streams; i++) { + if (s->streams[i]->id > next_generated_track_id) + next_generated_track_id = s->streams[i]->id; + } + + for (i = 0; i < mov->nb_streams; i++) { + if (mov->tracks[i].entry <= 0 && !(mov->flags & FF_MOV_FLAG_FRAGMENT)) + continue; + + mov->tracks[i].track_id = i >= s->nb_streams ? ++next_generated_track_id : s->streams[i]->id; + } + } else { + for (i = 0; i < mov->nb_streams; i++) { + if (mov->tracks[i].entry <= 0 && !(mov->flags & FF_MOV_FLAG_FRAGMENT)) + continue; + + mov->tracks[i].track_id = i + 1; + } + } + + mov->track_ids_ok = 1; + + return 0; +} + +static int mov_write_moov_tag(AVIOContext *pb, MOVMuxContext *mov, + AVFormatContext *s) +{ + int i; + int64_t pos = avio_tell(pb); + avio_wb32(pb, 0); /* size placeholder*/ + ffio_wfourcc(pb, "moov"); + + mov_setup_track_ids(mov, s); + + for (i = 0; i < mov->nb_streams; i++) { + if (mov->tracks[i].entry <= 0 && !(mov->flags & FF_MOV_FLAG_FRAGMENT)) + continue; + + mov->tracks[i].time = mov->time; + + if (mov->tracks[i].entry) + build_chunks(&mov->tracks[i]); + } + + if (mov->chapter_track) + for (i = 0; i < s->nb_streams; i++) { + mov->tracks[i].tref_tag = MKTAG('c','h','a','p'); + mov->tracks[i].tref_id = mov->tracks[mov->chapter_track].track_id; + } + for (i = 0; i < mov->nb_streams; i++) { + MOVTrack *track = &mov->tracks[i]; + if (track->tag == MKTAG('r','t','p',' ')) { + track->tref_tag = MKTAG('h','i','n','t'); + track->tref_id = mov->tracks[track->src_track].track_id; + } else if (track->par->codec_type == AVMEDIA_TYPE_AUDIO) { + int * fallback, size; + fallback = (int*)av_stream_get_side_data(track->st, + AV_PKT_DATA_FALLBACK_TRACK, + &size); + if (fallback != NULL && size == sizeof(int)) { + if (*fallback >= 0 && *fallback < mov->nb_streams) { + track->tref_tag = MKTAG('f','a','l','l'); + track->tref_id = mov->tracks[*fallback].track_id; + } + } + } + } + for (i = 0; i < mov->nb_streams; i++) { + if (mov->tracks[i].tag == MKTAG('t','m','c','d')) { + int src_trk = mov->tracks[i].src_track; + mov->tracks[src_trk].tref_tag = mov->tracks[i].tag; + mov->tracks[src_trk].tref_id = mov->tracks[i].track_id; + //src_trk may have a different timescale than the tmcd track + mov->tracks[i].track_duration = av_rescale(mov->tracks[src_trk].track_duration, + mov->tracks[i].timescale, + mov->tracks[src_trk].timescale); + } + } + + mov_write_mvhd_tag(pb, mov); + if (mov->mode != MODE_MOV && !mov->iods_skip) + mov_write_iods_tag(pb, mov); + for (i = 0; i < mov->nb_streams; i++) { + if (mov->tracks[i].entry > 0 || mov->flags & FF_MOV_FLAG_FRAGMENT) { + int ret = mov_write_trak_tag(s, pb, mov, &(mov->tracks[i]), i < s->nb_streams ? s->streams[i] : NULL); + if (ret < 0) + return ret; + } + } + if (mov->flags & FF_MOV_FLAG_FRAGMENT) + mov_write_mvex_tag(pb, mov); /* QuickTime requires trak to precede this */ + + if (mov->mode == MODE_PSP) + mov_write_uuidusmt_tag(pb, s); + else + mov_write_udta_tag(pb, mov, s); + + return update_size(pb, pos); +} + +static void param_write_int(AVIOContext *pb, const char *name, int value) +{ + avio_printf(pb, "\n", name, value); +} + +static void param_write_string(AVIOContext *pb, const char *name, const char *value) +{ + avio_printf(pb, "\n", name, value); +} + +static void param_write_hex(AVIOContext *pb, const char *name, const uint8_t *value, int len) +{ + char buf[150]; + len = FFMIN(sizeof(buf) / 2 - 1, len); + ff_data_to_hex(buf, value, len, 0); + buf[2 * len] = '\0'; + avio_printf(pb, "\n", name, buf); +} + +static int mov_write_isml_manifest(AVIOContext *pb, MOVMuxContext *mov, AVFormatContext *s) +{ + int64_t pos = avio_tell(pb); + int i; + int64_t manifest_bit_rate = 0; + AVCPBProperties *props = NULL; + + static const uint8_t uuid[] = { + 0xa5, 0xd4, 0x0b, 0x30, 0xe8, 0x14, 0x11, 0xdd, + 0xba, 0x2f, 0x08, 0x00, 0x20, 0x0c, 0x9a, 0x66 + }; + + avio_wb32(pb, 0); + ffio_wfourcc(pb, "uuid"); + avio_write(pb, uuid, sizeof(uuid)); + avio_wb32(pb, 0); + + avio_printf(pb, "\n"); + avio_printf(pb, "\n"); + avio_printf(pb, "\n"); + if (!(mov->fc->flags & AVFMT_FLAG_BITEXACT)) + avio_printf(pb, "\n", + LIBAVFORMAT_IDENT); + avio_printf(pb, "\n"); + avio_printf(pb, "\n"); + avio_printf(pb, "\n"); + + mov_setup_track_ids(mov, s); + + for (i = 0; i < mov->nb_streams; i++) { + MOVTrack *track = &mov->tracks[i]; + const char *type; + int track_id = track->track_id; + + AVStream *st = track->st; + AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL,0); + + if (track->par->codec_type == AVMEDIA_TYPE_VIDEO) { + type = "video"; + } else if (track->par->codec_type == AVMEDIA_TYPE_AUDIO) { + type = "audio"; + } else { + continue; + } + + props = (AVCPBProperties*)av_stream_get_side_data(track->st, AV_PKT_DATA_CPB_PROPERTIES, NULL); + + if (track->par->bit_rate) { + manifest_bit_rate = track->par->bit_rate; + } else if (props) { + manifest_bit_rate = props->max_bitrate; + } + + avio_printf(pb, "<%s systemBitrate=\"%"PRId64"\">\n", type, + manifest_bit_rate); + param_write_int(pb, "systemBitrate", manifest_bit_rate); + param_write_int(pb, "trackID", track_id); + param_write_string(pb, "systemLanguage", lang ? lang->value : "und"); + if (track->par->codec_type == AVMEDIA_TYPE_VIDEO) { + if (track->par->codec_id == AV_CODEC_ID_H264) { + uint8_t *ptr; + int size = track->par->extradata_size; + if (!ff_avc_write_annexb_extradata(track->par->extradata, &ptr, + &size)) { + param_write_hex(pb, "CodecPrivateData", + ptr ? ptr : track->par->extradata, + size); + av_free(ptr); + } + param_write_string(pb, "FourCC", "H264"); + } else if (track->par->codec_id == AV_CODEC_ID_VC1) { + param_write_string(pb, "FourCC", "WVC1"); + param_write_hex(pb, "CodecPrivateData", track->par->extradata, + track->par->extradata_size); + } + param_write_int(pb, "MaxWidth", track->par->width); + param_write_int(pb, "MaxHeight", track->par->height); + param_write_int(pb, "DisplayWidth", track->par->width); + param_write_int(pb, "DisplayHeight", track->par->height); + } else { + if (track->par->codec_id == AV_CODEC_ID_AAC) { + switch (track->par->profile) + { + case FF_PROFILE_AAC_HE_V2: + param_write_string(pb, "FourCC", "AACP"); + break; + case FF_PROFILE_AAC_HE: + param_write_string(pb, "FourCC", "AACH"); + break; + default: + param_write_string(pb, "FourCC", "AACL"); + } + } else if (track->par->codec_id == AV_CODEC_ID_WMAPRO) { + param_write_string(pb, "FourCC", "WMAP"); + } + param_write_hex(pb, "CodecPrivateData", track->par->extradata, + track->par->extradata_size); + param_write_int(pb, "AudioTag", ff_codec_get_tag(ff_codec_wav_tags, + track->par->codec_id)); + param_write_int(pb, "Channels", track->par->channels); + param_write_int(pb, "SamplingRate", track->par->sample_rate); + param_write_int(pb, "BitsPerSample", 16); + param_write_int(pb, "PacketSize", track->par->block_align ? + track->par->block_align : 4); + } + avio_printf(pb, "\n", type); + } + avio_printf(pb, "\n"); + avio_printf(pb, "\n"); + avio_printf(pb, "\n"); + + return update_size(pb, pos); +} + +static int mov_write_mfhd_tag(AVIOContext *pb, MOVMuxContext *mov) +{ + avio_wb32(pb, 16); + ffio_wfourcc(pb, "mfhd"); + avio_wb32(pb, 0); + avio_wb32(pb, mov->fragments); + return 0; +} + +static uint32_t get_sample_flags(MOVTrack *track, MOVIentry *entry) +{ + return entry->flags & MOV_SYNC_SAMPLE ? MOV_FRAG_SAMPLE_FLAG_DEPENDS_NO : + (MOV_FRAG_SAMPLE_FLAG_DEPENDS_YES | MOV_FRAG_SAMPLE_FLAG_IS_NON_SYNC); +} + +static int mov_write_tfhd_tag(AVIOContext *pb, MOVMuxContext *mov, + MOVTrack *track, int64_t moof_offset) +{ + int64_t pos = avio_tell(pb); + uint32_t flags = MOV_TFHD_DEFAULT_SIZE | MOV_TFHD_DEFAULT_DURATION | + MOV_TFHD_BASE_DATA_OFFSET; + if (!track->entry) { + flags |= MOV_TFHD_DURATION_IS_EMPTY; + } else { + flags |= MOV_TFHD_DEFAULT_FLAGS; + } + if (mov->flags & FF_MOV_FLAG_OMIT_TFHD_OFFSET) + flags &= ~MOV_TFHD_BASE_DATA_OFFSET; + if (mov->flags & FF_MOV_FLAG_DEFAULT_BASE_MOOF) { + flags &= ~MOV_TFHD_BASE_DATA_OFFSET; + flags |= MOV_TFHD_DEFAULT_BASE_IS_MOOF; + } + + /* Don't set a default sample size, the silverlight player refuses + * to play files with that set. Don't set a default sample duration, + * WMP freaks out if it is set. Don't set a base data offset, PIFF + * file format says it MUST NOT be set. */ + if (track->mode == MODE_ISM) + flags &= ~(MOV_TFHD_DEFAULT_SIZE | MOV_TFHD_DEFAULT_DURATION | + MOV_TFHD_BASE_DATA_OFFSET); + + avio_wb32(pb, 0); /* size placeholder */ + ffio_wfourcc(pb, "tfhd"); + avio_w8(pb, 0); /* version */ + avio_wb24(pb, flags); + + avio_wb32(pb, track->track_id); /* track-id */ + if (flags & MOV_TFHD_BASE_DATA_OFFSET) + avio_wb64(pb, moof_offset); + if (flags & MOV_TFHD_DEFAULT_DURATION) { + track->default_duration = get_cluster_duration(track, 0); + avio_wb32(pb, track->default_duration); + } + if (flags & MOV_TFHD_DEFAULT_SIZE) { + track->default_size = track->entry ? track->cluster[0].size : 1; + avio_wb32(pb, track->default_size); + } else + track->default_size = -1; + + if (flags & MOV_TFHD_DEFAULT_FLAGS) { + /* Set the default flags based on the second sample, if available. + * If the first sample is different, that can be signaled via a separate field. */ + if (track->entry > 1) + track->default_sample_flags = get_sample_flags(track, &track->cluster[1]); + else + track->default_sample_flags = + track->par->codec_type == AVMEDIA_TYPE_VIDEO ? + (MOV_FRAG_SAMPLE_FLAG_DEPENDS_YES | MOV_FRAG_SAMPLE_FLAG_IS_NON_SYNC) : + MOV_FRAG_SAMPLE_FLAG_DEPENDS_NO; + avio_wb32(pb, track->default_sample_flags); + } + + return update_size(pb, pos); +} + +static int mov_write_trun_tag(AVIOContext *pb, MOVMuxContext *mov, + MOVTrack *track, int moof_size, + int first, int end) +{ + int64_t pos = avio_tell(pb); + uint32_t flags = MOV_TRUN_DATA_OFFSET; + int i; + + for (i = first; i < end; i++) { + if (get_cluster_duration(track, i) != track->default_duration) + flags |= MOV_TRUN_SAMPLE_DURATION; + if (track->cluster[i].size != track->default_size) + flags |= MOV_TRUN_SAMPLE_SIZE; + if (i > first && get_sample_flags(track, &track->cluster[i]) != track->default_sample_flags) + flags |= MOV_TRUN_SAMPLE_FLAGS; + } + if (!(flags & MOV_TRUN_SAMPLE_FLAGS) && track->entry > 0 && + get_sample_flags(track, &track->cluster[0]) != track->default_sample_flags) + flags |= MOV_TRUN_FIRST_SAMPLE_FLAGS; + if (track->flags & MOV_TRACK_CTTS) + flags |= MOV_TRUN_SAMPLE_CTS; + + avio_wb32(pb, 0); /* size placeholder */ + ffio_wfourcc(pb, "trun"); + avio_w8(pb, 0); /* version */ + avio_wb24(pb, flags); + + avio_wb32(pb, end - first); /* sample count */ + if (mov->flags & FF_MOV_FLAG_OMIT_TFHD_OFFSET && + !(mov->flags & FF_MOV_FLAG_DEFAULT_BASE_MOOF) && + !mov->first_trun) + avio_wb32(pb, 0); /* Later tracks follow immediately after the previous one */ + else + avio_wb32(pb, moof_size + 8 + track->data_offset + + track->cluster[first].pos); /* data offset */ + if (flags & MOV_TRUN_FIRST_SAMPLE_FLAGS) + avio_wb32(pb, get_sample_flags(track, &track->cluster[first])); + + for (i = first; i < end; i++) { + if (flags & MOV_TRUN_SAMPLE_DURATION) + avio_wb32(pb, get_cluster_duration(track, i)); + if (flags & MOV_TRUN_SAMPLE_SIZE) + avio_wb32(pb, track->cluster[i].size); + if (flags & MOV_TRUN_SAMPLE_FLAGS) + avio_wb32(pb, get_sample_flags(track, &track->cluster[i])); + if (flags & MOV_TRUN_SAMPLE_CTS) + avio_wb32(pb, track->cluster[i].cts); + } + + mov->first_trun = 0; + return update_size(pb, pos); +} + +static int mov_write_tfxd_tag(AVIOContext *pb, MOVTrack *track) +{ + int64_t pos = avio_tell(pb); + static const uint8_t uuid[] = { + 0x6d, 0x1d, 0x9b, 0x05, 0x42, 0xd5, 0x44, 0xe6, + 0x80, 0xe2, 0x14, 0x1d, 0xaf, 0xf7, 0x57, 0xb2 + }; + + avio_wb32(pb, 0); /* size placeholder */ + ffio_wfourcc(pb, "uuid"); + avio_write(pb, uuid, sizeof(uuid)); + avio_w8(pb, 1); + avio_wb24(pb, 0); + avio_wb64(pb, track->start_dts + track->frag_start + + track->cluster[0].cts); + avio_wb64(pb, track->end_pts - + (track->cluster[0].dts + track->cluster[0].cts)); + + return update_size(pb, pos); +} + +static int mov_write_tfrf_tag(AVIOContext *pb, MOVMuxContext *mov, + MOVTrack *track, int entry) +{ + int n = track->nb_frag_info - 1 - entry, i; + int size = 8 + 16 + 4 + 1 + 16*n; + static const uint8_t uuid[] = { + 0xd4, 0x80, 0x7e, 0xf2, 0xca, 0x39, 0x46, 0x95, + 0x8e, 0x54, 0x26, 0xcb, 0x9e, 0x46, 0xa7, 0x9f + }; + + if (entry < 0) + return 0; + + avio_seek(pb, track->frag_info[entry].tfrf_offset, SEEK_SET); + avio_wb32(pb, size); + ffio_wfourcc(pb, "uuid"); + avio_write(pb, uuid, sizeof(uuid)); + avio_w8(pb, 1); + avio_wb24(pb, 0); + avio_w8(pb, n); + for (i = 0; i < n; i++) { + int index = entry + 1 + i; + avio_wb64(pb, track->frag_info[index].time); + avio_wb64(pb, track->frag_info[index].duration); + } + if (n < mov->ism_lookahead) { + int free_size = 16 * (mov->ism_lookahead - n); + avio_wb32(pb, free_size); + ffio_wfourcc(pb, "free"); + ffio_fill(pb, 0, free_size - 8); + } + + return 0; +} + +static int mov_write_tfrf_tags(AVIOContext *pb, MOVMuxContext *mov, + MOVTrack *track) +{ + int64_t pos = avio_tell(pb); + int i; + for (i = 0; i < mov->ism_lookahead; i++) { + /* Update the tfrf tag for the last ism_lookahead fragments, + * nb_frag_info - 1 is the next fragment to be written. */ + mov_write_tfrf_tag(pb, mov, track, track->nb_frag_info - 2 - i); + } + avio_seek(pb, pos, SEEK_SET); + return 0; +} + +static int mov_add_tfra_entries(AVIOContext *pb, MOVMuxContext *mov, int tracks, + int size) +{ + int i; + for (i = 0; i < mov->nb_streams; i++) { + MOVTrack *track = &mov->tracks[i]; + MOVFragmentInfo *info; + if ((tracks >= 0 && i != tracks) || !track->entry) + continue; + track->nb_frag_info++; + if (track->nb_frag_info >= track->frag_info_capacity) { + unsigned new_capacity = track->nb_frag_info + MOV_FRAG_INFO_ALLOC_INCREMENT; + if (av_reallocp_array(&track->frag_info, + new_capacity, + sizeof(*track->frag_info))) + return AVERROR(ENOMEM); + track->frag_info_capacity = new_capacity; + } + info = &track->frag_info[track->nb_frag_info - 1]; + info->offset = avio_tell(pb); + info->size = size; + // Try to recreate the original pts for the first packet + // from the fields we have stored + info->time = track->start_dts + track->frag_start + + track->cluster[0].cts; + info->duration = track->end_pts - + (track->cluster[0].dts + track->cluster[0].cts); + // If the pts is less than zero, we will have trimmed + // away parts of the media track using an edit list, + // and the corresponding start presentation time is zero. + if (info->time < 0) { + info->duration += info->time; + info->time = 0; + } + info->tfrf_offset = 0; + mov_write_tfrf_tags(pb, mov, track); + } + return 0; +} + +static int mov_write_tfdt_tag(AVIOContext *pb, MOVTrack *track) +{ + int64_t pos = avio_tell(pb); + + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "tfdt"); + avio_w8(pb, 1); /* version */ + avio_wb24(pb, 0); + avio_wb64(pb, track->frag_start); + return update_size(pb, pos); +} + +static int mov_write_traf_tag(AVIOContext *pb, MOVMuxContext *mov, + MOVTrack *track, int64_t moof_offset, + int moof_size) +{ + int64_t pos = avio_tell(pb); + int i, start = 0; + avio_wb32(pb, 0); /* size placeholder */ + ffio_wfourcc(pb, "traf"); + + mov_write_tfhd_tag(pb, mov, track, moof_offset); + if (mov->mode != MODE_ISM) + mov_write_tfdt_tag(pb, track); + for (i = 1; i < track->entry; i++) { + if (track->cluster[i].pos != track->cluster[i - 1].pos + track->cluster[i - 1].size) { + mov_write_trun_tag(pb, mov, track, moof_size, start, i); + start = i; + } + } + mov_write_trun_tag(pb, mov, track, moof_size, start, track->entry); + if (mov->mode == MODE_ISM) { + mov_write_tfxd_tag(pb, track); + + if (mov->ism_lookahead) { + int i, size = 16 + 4 + 1 + 16 * mov->ism_lookahead; + + if (track->nb_frag_info > 0) { + MOVFragmentInfo *info = &track->frag_info[track->nb_frag_info - 1]; + if (!info->tfrf_offset) + info->tfrf_offset = avio_tell(pb); + } + avio_wb32(pb, 8 + size); + ffio_wfourcc(pb, "free"); + for (i = 0; i < size; i++) + avio_w8(pb, 0); + } + } + + return update_size(pb, pos); +} + +static int mov_write_moof_tag_internal(AVIOContext *pb, MOVMuxContext *mov, + int tracks, int moof_size) +{ + int64_t pos = avio_tell(pb); + int i; + + avio_wb32(pb, 0); /* size placeholder */ + ffio_wfourcc(pb, "moof"); + mov->first_trun = 1; + + mov_write_mfhd_tag(pb, mov); + for (i = 0; i < mov->nb_streams; i++) { + MOVTrack *track = &mov->tracks[i]; + if (tracks >= 0 && i != tracks) + continue; + if (!track->entry) + continue; + mov_write_traf_tag(pb, mov, track, pos, moof_size); + } + + return update_size(pb, pos); +} + +static int mov_write_sidx_tag(AVIOContext *pb, + MOVTrack *track, int ref_size, int total_sidx_size) +{ + int64_t pos = avio_tell(pb), offset_pos, end_pos; + int64_t presentation_time, duration, offset; + int starts_with_SAP, i, entries; + + if (track->entry) { + entries = 1; + presentation_time = track->start_dts + track->frag_start + + track->cluster[0].cts; + duration = track->end_pts - + (track->cluster[0].dts + track->cluster[0].cts); + starts_with_SAP = track->cluster[0].flags & MOV_SYNC_SAMPLE; + + // pts<0 should be cut away using edts + if (presentation_time < 0) { + duration += presentation_time; + presentation_time = 0; + } + } else { + entries = track->nb_frag_info; + if (entries <= 0) + return 0; + presentation_time = track->frag_info[0].time; + } + + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "sidx"); + avio_w8(pb, 1); /* version */ + avio_wb24(pb, 0); + avio_wb32(pb, track->track_id); /* reference_ID */ + avio_wb32(pb, track->timescale); /* timescale */ + avio_wb64(pb, presentation_time); /* earliest_presentation_time */ + offset_pos = avio_tell(pb); + avio_wb64(pb, 0); /* first_offset (offset to referenced moof) */ + avio_wb16(pb, 0); /* reserved */ + + avio_wb16(pb, entries); /* reference_count */ + for (i = 0; i < entries; i++) { + if (!track->entry) { + if (i > 1 && track->frag_info[i].offset != track->frag_info[i - 1].offset + track->frag_info[i - 1].size) { + av_log(NULL, AV_LOG_ERROR, "Non-consecutive fragments, writing incorrect sidx\n"); + } + duration = track->frag_info[i].duration; + ref_size = track->frag_info[i].size; + starts_with_SAP = 1; + } + avio_wb32(pb, (0 << 31) | (ref_size & 0x7fffffff)); /* reference_type (0 = media) | referenced_size */ + avio_wb32(pb, duration); /* subsegment_duration */ + avio_wb32(pb, (starts_with_SAP << 31) | (0 << 28) | 0); /* starts_with_SAP | SAP_type | SAP_delta_time */ + } + + end_pos = avio_tell(pb); + offset = pos + total_sidx_size - end_pos; + avio_seek(pb, offset_pos, SEEK_SET); + avio_wb64(pb, offset); + avio_seek(pb, end_pos, SEEK_SET); + return update_size(pb, pos); +} + +static int mov_write_sidx_tags(AVIOContext *pb, MOVMuxContext *mov, + int tracks, int ref_size) +{ + int i, round, ret; + AVIOContext *avio_buf; + int total_size = 0; + for (round = 0; round < 2; round++) { + // First run one round to calculate the total size of all + // sidx atoms. + // This would be much simpler if we'd only write one sidx + // atom, for the first track in the moof. + if (round == 0) { + if ((ret = ffio_open_null_buf(&avio_buf)) < 0) + return ret; + } else { + avio_buf = pb; + } + for (i = 0; i < mov->nb_streams; i++) { + MOVTrack *track = &mov->tracks[i]; + if (tracks >= 0 && i != tracks) + continue; + // When writing a sidx for the full file, entry is 0, but + // we want to include all tracks. ref_size is 0 in this case, + // since we read it from frag_info instead. + if (!track->entry && ref_size > 0) + continue; + total_size -= mov_write_sidx_tag(avio_buf, track, ref_size, + total_size); + } + if (round == 0) + total_size = ffio_close_null_buf(avio_buf); + } + return 0; +} + +static int mov_write_moof_tag(AVIOContext *pb, MOVMuxContext *mov, int tracks, + int64_t mdat_size) +{ + AVIOContext *avio_buf; + int ret, moof_size; + + if ((ret = ffio_open_null_buf(&avio_buf)) < 0) + return ret; + mov_write_moof_tag_internal(avio_buf, mov, tracks, 0); + moof_size = ffio_close_null_buf(avio_buf); + + if (mov->flags & FF_MOV_FLAG_DASH && !(mov->flags & FF_MOV_FLAG_GLOBAL_SIDX)) + mov_write_sidx_tags(pb, mov, tracks, moof_size + 8 + mdat_size); + + if ((ret = mov_add_tfra_entries(pb, mov, tracks, moof_size + 8 + mdat_size)) < 0) + return ret; + + return mov_write_moof_tag_internal(pb, mov, tracks, moof_size); +} + +static int mov_write_tfra_tag(AVIOContext *pb, MOVTrack *track) +{ + int64_t pos = avio_tell(pb); + int i; + + avio_wb32(pb, 0); /* size placeholder */ + ffio_wfourcc(pb, "tfra"); + avio_w8(pb, 1); /* version */ + avio_wb24(pb, 0); + + avio_wb32(pb, track->track_id); + avio_wb32(pb, 0); /* length of traf/trun/sample num */ + avio_wb32(pb, track->nb_frag_info); + for (i = 0; i < track->nb_frag_info; i++) { + avio_wb64(pb, track->frag_info[i].time); + avio_wb64(pb, track->frag_info[i].offset + track->data_offset); + avio_w8(pb, 1); /* traf number */ + avio_w8(pb, 1); /* trun number */ + avio_w8(pb, 1); /* sample number */ + } + + return update_size(pb, pos); +} + +static int mov_write_mfra_tag(AVIOContext *pb, MOVMuxContext *mov) +{ + int64_t pos = avio_tell(pb); + int i; + + avio_wb32(pb, 0); /* size placeholder */ + ffio_wfourcc(pb, "mfra"); + /* An empty mfra atom is enough to indicate to the publishing point that + * the stream has ended. */ + if (mov->flags & FF_MOV_FLAG_ISML) + return update_size(pb, pos); + + for (i = 0; i < mov->nb_streams; i++) { + MOVTrack *track = &mov->tracks[i]; + if (track->nb_frag_info) + mov_write_tfra_tag(pb, track); + } + + avio_wb32(pb, 16); + ffio_wfourcc(pb, "mfro"); + avio_wb32(pb, 0); /* version + flags */ + avio_wb32(pb, avio_tell(pb) + 4 - pos); + + return update_size(pb, pos); +} + +static int mov_write_mdat_tag(AVIOContext *pb, MOVMuxContext *mov) +{ + avio_wb32(pb, 8); // placeholder for extended size field (64 bit) + ffio_wfourcc(pb, mov->mode == MODE_MOV ? "wide" : "free"); + + mov->mdat_pos = avio_tell(pb); + avio_wb32(pb, 0); /* size placeholder*/ + ffio_wfourcc(pb, "mdat"); + return 0; +} + +/* TODO: This needs to be more general */ +static int mov_write_ftyp_tag(AVIOContext *pb, AVFormatContext *s) +{ + MOVMuxContext *mov = s->priv_data; + int64_t pos = avio_tell(pb); + int has_h264 = 0, has_video = 0; + int minor = 0x200; + int i; + + for (i = 0; i < s->nb_streams; i++) { + AVStream *st = s->streams[i]; + if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) + has_video = 1; + if (st->codecpar->codec_id == AV_CODEC_ID_H264) + has_h264 = 1; + } + + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "ftyp"); + + if (mov->major_brand && strlen(mov->major_brand) >= 4) + ffio_wfourcc(pb, mov->major_brand); + else if (mov->mode == MODE_3GP) { + ffio_wfourcc(pb, has_h264 ? "3gp6" : "3gp4"); + minor = has_h264 ? 0x100 : 0x200; + } else if (mov->mode & MODE_3G2) { + ffio_wfourcc(pb, has_h264 ? "3g2b" : "3g2a"); + minor = has_h264 ? 0x20000 : 0x10000; + } else if (mov->mode == MODE_PSP) + ffio_wfourcc(pb, "MSNV"); + else if (mov->mode == MODE_MP4 && mov->flags & FF_MOV_FLAG_DEFAULT_BASE_MOOF) + ffio_wfourcc(pb, "iso5"); // Required when using default-base-is-moof + else if (mov->mode == MODE_MP4) + ffio_wfourcc(pb, "isom"); + else if (mov->mode == MODE_IPOD) + ffio_wfourcc(pb, has_video ? "M4V ":"M4A "); + else if (mov->mode == MODE_ISM) + ffio_wfourcc(pb, "isml"); + else if (mov->mode == MODE_F4V) + ffio_wfourcc(pb, "f4v "); + else + ffio_wfourcc(pb, "qt "); + + avio_wb32(pb, minor); + + if (mov->mode == MODE_MOV) + ffio_wfourcc(pb, "qt "); + else if (mov->mode == MODE_ISM) { + ffio_wfourcc(pb, "piff"); + } else if (!(mov->flags & FF_MOV_FLAG_DEFAULT_BASE_MOOF)) { + ffio_wfourcc(pb, "isom"); + ffio_wfourcc(pb, "iso2"); + if (has_h264) + ffio_wfourcc(pb, "avc1"); + } + + // We add tfdt atoms when fragmenting, signal this with the iso6 compatible + // brand. This is compatible with users that don't understand tfdt. + if (mov->flags & FF_MOV_FLAG_FRAGMENT && mov->mode != MODE_ISM) + ffio_wfourcc(pb, "iso6"); + + if (mov->mode == MODE_3GP) + ffio_wfourcc(pb, has_h264 ? "3gp6":"3gp4"); + else if (mov->mode & MODE_3G2) + ffio_wfourcc(pb, has_h264 ? "3g2b":"3g2a"); + else if (mov->mode == MODE_PSP) + ffio_wfourcc(pb, "MSNV"); + else if (mov->mode == MODE_MP4) + ffio_wfourcc(pb, "mp41"); + + if (mov->flags & FF_MOV_FLAG_DASH && mov->flags & FF_MOV_FLAG_GLOBAL_SIDX) + ffio_wfourcc(pb, "dash"); + + return update_size(pb, pos); +} + +static int mov_write_uuidprof_tag(AVIOContext *pb, AVFormatContext *s) +{ + AVStream *video_st = s->streams[0]; + AVCodecParameters *video_par = s->streams[0]->codecpar; + AVCodecParameters *audio_par = s->streams[1]->codecpar; + int audio_rate = audio_par->sample_rate; + int64_t frame_rate = (video_st->avg_frame_rate.num * 0x10000LL) / video_st->avg_frame_rate.den; + int audio_kbitrate = audio_par->bit_rate / 1000; + int video_kbitrate = FFMIN(video_par->bit_rate / 1000, 800 - audio_kbitrate); + + if (frame_rate < 0 || frame_rate > INT32_MAX) { + av_log(s, AV_LOG_ERROR, "Frame rate %f outside supported range\n", frame_rate / (double)0x10000); + return AVERROR(EINVAL); + } + + avio_wb32(pb, 0x94); /* size */ + ffio_wfourcc(pb, "uuid"); + ffio_wfourcc(pb, "PROF"); + + avio_wb32(pb, 0x21d24fce); /* 96 bit UUID */ + avio_wb32(pb, 0xbb88695c); + avio_wb32(pb, 0xfac9c740); + + avio_wb32(pb, 0x0); /* ? */ + avio_wb32(pb, 0x3); /* 3 sections ? */ + + avio_wb32(pb, 0x14); /* size */ + ffio_wfourcc(pb, "FPRF"); + avio_wb32(pb, 0x0); /* ? */ + avio_wb32(pb, 0x0); /* ? */ + avio_wb32(pb, 0x0); /* ? */ + + avio_wb32(pb, 0x2c); /* size */ + ffio_wfourcc(pb, "APRF"); /* audio */ + avio_wb32(pb, 0x0); + avio_wb32(pb, 0x2); /* TrackID */ + ffio_wfourcc(pb, "mp4a"); + avio_wb32(pb, 0x20f); + avio_wb32(pb, 0x0); + avio_wb32(pb, audio_kbitrate); + avio_wb32(pb, audio_kbitrate); + avio_wb32(pb, audio_rate); + avio_wb32(pb, audio_par->channels); + + avio_wb32(pb, 0x34); /* size */ + ffio_wfourcc(pb, "VPRF"); /* video */ + avio_wb32(pb, 0x0); + avio_wb32(pb, 0x1); /* TrackID */ + if (video_par->codec_id == AV_CODEC_ID_H264) { + ffio_wfourcc(pb, "avc1"); + avio_wb16(pb, 0x014D); + avio_wb16(pb, 0x0015); + } else { + ffio_wfourcc(pb, "mp4v"); + avio_wb16(pb, 0x0000); + avio_wb16(pb, 0x0103); + } + avio_wb32(pb, 0x0); + avio_wb32(pb, video_kbitrate); + avio_wb32(pb, video_kbitrate); + avio_wb32(pb, frame_rate); + avio_wb32(pb, frame_rate); + avio_wb16(pb, video_par->width); + avio_wb16(pb, video_par->height); + avio_wb32(pb, 0x010001); /* ? */ + + return 0; +} + +static int mov_write_identification(AVIOContext *pb, AVFormatContext *s) +{ + MOVMuxContext *mov = s->priv_data; + int i; + + mov_write_ftyp_tag(pb,s); + if (mov->mode == MODE_PSP) { + int video_streams_nb = 0, audio_streams_nb = 0, other_streams_nb = 0; + for (i = 0; i < s->nb_streams; i++) { + AVStream *st = s->streams[i]; + if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) + video_streams_nb++; + else if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) + audio_streams_nb++; + else + other_streams_nb++; + } + + if (video_streams_nb != 1 || audio_streams_nb != 1 || other_streams_nb) { + av_log(s, AV_LOG_ERROR, "PSP mode need one video and one audio stream\n"); + return AVERROR(EINVAL); + } + return mov_write_uuidprof_tag(pb, s); + } + return 0; +} + +static int mov_parse_mpeg2_frame(AVPacket *pkt, uint32_t *flags) +{ + uint32_t c = -1; + int i, closed_gop = 0; + + for (i = 0; i < pkt->size - 4; i++) { + c = (c << 8) + pkt->data[i]; + if (c == 0x1b8) { // gop + closed_gop = pkt->data[i + 4] >> 6 & 0x01; + } else if (c == 0x100) { // pic + int temp_ref = (pkt->data[i + 1] << 2) | (pkt->data[i + 2] >> 6); + if (!temp_ref || closed_gop) // I picture is not reordered + *flags = MOV_SYNC_SAMPLE; + else + *flags = MOV_PARTIAL_SYNC_SAMPLE; + break; + } + } + return 0; +} + +static void mov_parse_vc1_frame(AVPacket *pkt, MOVTrack *trk) +{ + const uint8_t *start, *next, *end = pkt->data + pkt->size; + int seq = 0, entry = 0; + int key = pkt->flags & AV_PKT_FLAG_KEY; + start = find_next_marker(pkt->data, end); + for (next = start; next < end; start = next) { + next = find_next_marker(start + 4, end); + switch (AV_RB32(start)) { + case VC1_CODE_SEQHDR: + seq = 1; + break; + case VC1_CODE_ENTRYPOINT: + entry = 1; + break; + case VC1_CODE_SLICE: + trk->vc1_info.slices = 1; + break; + } + } + if (!trk->entry && trk->vc1_info.first_packet_seen) + trk->vc1_info.first_frag_written = 1; + if (!trk->entry && !trk->vc1_info.first_frag_written) { + /* First packet in first fragment */ + trk->vc1_info.first_packet_seq = seq; + trk->vc1_info.first_packet_entry = entry; + trk->vc1_info.first_packet_seen = 1; + } else if ((seq && !trk->vc1_info.packet_seq) || + (entry && !trk->vc1_info.packet_entry)) { + int i; + for (i = 0; i < trk->entry; i++) + trk->cluster[i].flags &= ~MOV_SYNC_SAMPLE; + trk->has_keyframes = 0; + if (seq) + trk->vc1_info.packet_seq = 1; + if (entry) + trk->vc1_info.packet_entry = 1; + if (!trk->vc1_info.first_frag_written) { + /* First fragment */ + if ((!seq || trk->vc1_info.first_packet_seq) && + (!entry || trk->vc1_info.first_packet_entry)) { + /* First packet had the same headers as this one, readd the + * sync sample flag. */ + trk->cluster[0].flags |= MOV_SYNC_SAMPLE; + trk->has_keyframes = 1; + } + } + } + if (trk->vc1_info.packet_seq && trk->vc1_info.packet_entry) + key = seq && entry; + else if (trk->vc1_info.packet_seq) + key = seq; + else if (trk->vc1_info.packet_entry) + key = entry; + if (key) { + trk->cluster[trk->entry].flags |= MOV_SYNC_SAMPLE; + trk->has_keyframes++; + } +} + +static int mov_flush_fragment_interleaving(AVFormatContext *s, MOVTrack *track) +{ + MOVMuxContext *mov = s->priv_data; + int ret, buf_size; + uint8_t *buf; + int i, offset; + + if (!track->mdat_buf) + return 0; + if (!mov->mdat_buf) { + if ((ret = avio_open_dyn_buf(&mov->mdat_buf)) < 0) + return ret; + } + buf_size = avio_close_dyn_buf(track->mdat_buf, &buf); + track->mdat_buf = NULL; + + offset = avio_tell(mov->mdat_buf); + avio_write(mov->mdat_buf, buf, buf_size); + av_free(buf); + + for (i = track->entries_flushed; i < track->entry; i++) + track->cluster[i].pos += offset; + track->entries_flushed = track->entry; + return 0; +} + +static int mov_flush_fragment(AVFormatContext *s, int force) +{ + MOVMuxContext *mov = s->priv_data; + int i, first_track = -1; + int64_t mdat_size = 0; + int ret; + int has_video = 0, starts_with_key = 0, first_video_track = 1; + + if (!(mov->flags & FF_MOV_FLAG_FRAGMENT)) + return 0; + + // Try to fill in the duration of the last packet in each stream + // from queued packets in the interleave queues. If the flushing + // of fragments was triggered automatically by an AVPacket, we + // already have reliable info for the end of that track, but other + // tracks may need to be filled in. + for (i = 0; i < s->nb_streams; i++) { + MOVTrack *track = &mov->tracks[i]; + if (!track->end_reliable) { + int64_t ts_offset; + const AVPacket *next = ff_interleaved_peek(s, i, &ts_offset); + if (next) { + track->track_duration = next->dts - track->start_dts + ts_offset; + if (next->pts != AV_NOPTS_VALUE) + track->end_pts = next->pts; + else + track->end_pts = next->dts; + track->end_pts += ts_offset; + } + } + } + + for (i = 0; i < mov->nb_streams; i++) { + MOVTrack *track = &mov->tracks[i]; + if (track->entry <= 1) + continue; + // Sample durations are calculated as the diff of dts values, + // but for the last sample in a fragment, we don't know the dts + // of the first sample in the next fragment, so we have to rely + // on what was set as duration in the AVPacket. Not all callers + // set this though, so we might want to replace it with an + // estimate if it currently is zero. + if (get_cluster_duration(track, track->entry - 1) != 0) + continue; + // Use the duration (i.e. dts diff) of the second last sample for + // the last one. This is a wild guess (and fatal if it turns out + // to be too long), but probably the best we can do - having a zero + // duration is bad as well. + track->track_duration += get_cluster_duration(track, track->entry - 2); + track->end_pts += get_cluster_duration(track, track->entry - 2); + if (!mov->missing_duration_warned) { + av_log(s, AV_LOG_WARNING, + "Estimating the duration of the last packet in a " + "fragment, consider setting the duration field in " + "AVPacket instead.\n"); + mov->missing_duration_warned = 1; + } + } + + if (!mov->moov_written) { + int64_t pos = avio_tell(s->pb); + uint8_t *buf; + int buf_size, moov_size; + + for (i = 0; i < mov->nb_streams; i++) + if (!mov->tracks[i].entry) + break; + /* Don't write the initial moov unless all tracks have data */ + if (i < mov->nb_streams && !force) + return 0; + + moov_size = get_moov_size(s); + for (i = 0; i < mov->nb_streams; i++) + mov->tracks[i].data_offset = pos + moov_size + 8; + + avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_HEADER); + if (mov->flags & FF_MOV_FLAG_DELAY_MOOV) + mov_write_identification(s->pb, s); + if ((ret = mov_write_moov_tag(s->pb, mov, s)) < 0) + return ret; + + if (mov->flags & FF_MOV_FLAG_DELAY_MOOV) { + if (mov->flags & FF_MOV_FLAG_GLOBAL_SIDX) + mov->reserved_header_pos = avio_tell(s->pb); + avio_flush(s->pb); + mov->moov_written = 1; + return 0; + } + + buf_size = avio_close_dyn_buf(mov->mdat_buf, &buf); + mov->mdat_buf = NULL; + avio_wb32(s->pb, buf_size + 8); + ffio_wfourcc(s->pb, "mdat"); + avio_write(s->pb, buf, buf_size); + av_free(buf); + + if (mov->flags & FF_MOV_FLAG_GLOBAL_SIDX) + mov->reserved_header_pos = avio_tell(s->pb); + + mov->moov_written = 1; + mov->mdat_size = 0; + for (i = 0; i < mov->nb_streams; i++) { + if (mov->tracks[i].entry) + mov->tracks[i].frag_start += mov->tracks[i].start_dts + + mov->tracks[i].track_duration - + mov->tracks[i].cluster[0].dts; + mov->tracks[i].entry = 0; + mov->tracks[i].end_reliable = 0; + } + avio_flush(s->pb); + return 0; + } + + if (mov->frag_interleave) { + for (i = 0; i < mov->nb_streams; i++) { + MOVTrack *track = &mov->tracks[i]; + int ret; + if ((ret = mov_flush_fragment_interleaving(s, track)) < 0) + return ret; + } + + if (!mov->mdat_buf) + return 0; + mdat_size = avio_tell(mov->mdat_buf); + } + + for (i = 0; i < mov->nb_streams; i++) { + MOVTrack *track = &mov->tracks[i]; + if (mov->flags & FF_MOV_FLAG_SEPARATE_MOOF || mov->frag_interleave) + track->data_offset = 0; + else + track->data_offset = mdat_size; + if (track->par->codec_type == AVMEDIA_TYPE_VIDEO) { + has_video = 1; + if (first_video_track) { + if (track->entry) + starts_with_key = track->cluster[0].flags & MOV_SYNC_SAMPLE; + first_video_track = 0; + } + } + if (!track->entry) + continue; + if (track->mdat_buf) + mdat_size += avio_tell(track->mdat_buf); + if (first_track < 0) + first_track = i; + } + + if (!mdat_size) + return 0; + + avio_write_marker(s->pb, + av_rescale(mov->tracks[first_track].cluster[0].dts, AV_TIME_BASE, mov->tracks[first_track].timescale), + (has_video ? starts_with_key : mov->tracks[first_track].cluster[0].flags & MOV_SYNC_SAMPLE) ? AVIO_DATA_MARKER_SYNC_POINT : AVIO_DATA_MARKER_BOUNDARY_POINT); + + for (i = 0; i < mov->nb_streams; i++) { + MOVTrack *track = &mov->tracks[i]; + int buf_size, write_moof = 1, moof_tracks = -1; + uint8_t *buf; + int64_t duration = 0; + + if (track->entry) + duration = track->start_dts + track->track_duration - + track->cluster[0].dts; + if (mov->flags & FF_MOV_FLAG_SEPARATE_MOOF) { + if (!track->mdat_buf) + continue; + mdat_size = avio_tell(track->mdat_buf); + moof_tracks = i; + } else { + write_moof = i == first_track; + } + + if (write_moof) { + avio_flush(s->pb); + + mov_write_moof_tag(s->pb, mov, moof_tracks, mdat_size); + mov->fragments++; + + avio_wb32(s->pb, mdat_size + 8); + ffio_wfourcc(s->pb, "mdat"); + } + + if (track->entry) + track->frag_start += duration; + track->entry = 0; + track->entries_flushed = 0; + track->end_reliable = 0; + if (!mov->frag_interleave) { + if (!track->mdat_buf) + continue; + buf_size = avio_close_dyn_buf(track->mdat_buf, &buf); + track->mdat_buf = NULL; + } else { + if (!mov->mdat_buf) + continue; + buf_size = avio_close_dyn_buf(mov->mdat_buf, &buf); + mov->mdat_buf = NULL; + } + + avio_write(s->pb, buf, buf_size); + av_free(buf); + } + + mov->mdat_size = 0; + + avio_flush(s->pb); + return 0; +} + +static int mov_auto_flush_fragment(AVFormatContext *s, int force) +{ + MOVMuxContext *mov = s->priv_data; + int had_moov = mov->moov_written; + int ret = mov_flush_fragment(s, force); + if (ret < 0) + return ret; + // If using delay_moov, the first flush only wrote the moov, + // not the actual moof+mdat pair, thus flush once again. + if (!had_moov && mov->flags & FF_MOV_FLAG_DELAY_MOOV) + ret = mov_flush_fragment(s, force); + return ret; +} + +static int check_pkt(AVFormatContext *s, AVPacket *pkt) +{ + MOVMuxContext *mov = s->priv_data; + MOVTrack *trk = &mov->tracks[pkt->stream_index]; + int64_t ref; + uint64_t duration; + + if (trk->entry) { + ref = trk->cluster[trk->entry - 1].dts; + } else if ( trk->start_dts != AV_NOPTS_VALUE + && !trk->frag_discont) { + ref = trk->start_dts + trk->track_duration; + } else + ref = pkt->dts; // Skip tests for the first packet + + duration = pkt->dts - ref; + if (pkt->dts < ref || duration >= INT_MAX) { + av_log(s, AV_LOG_ERROR, "Application provided duration: %"PRId64" / timestamp: %"PRId64" is out of range for mov/mp4 format\n", + duration, pkt->dts + ); + + pkt->dts = ref + 1; + pkt->pts = AV_NOPTS_VALUE; + } + + if (pkt->duration < 0 || pkt->duration > INT_MAX) { + av_log(s, AV_LOG_ERROR, "Application provided duration: %"PRId64" is invalid\n", pkt->duration); + return AVERROR(EINVAL); + } + return 0; +} + +int ff_mov_write_packet(AVFormatContext *s, AVPacket *pkt) +{ + MOVMuxContext *mov = s->priv_data; + AVIOContext *pb = s->pb; + MOVTrack *trk = &mov->tracks[pkt->stream_index]; + AVCodecParameters *par = trk->par; + unsigned int samples_in_chunk = 0; + int size = pkt->size, ret = 0; + uint8_t *reformatted_data = NULL; + + ret = check_pkt(s, pkt); + if (ret < 0) + return ret; + + if (mov->flags & FF_MOV_FLAG_FRAGMENT) { + int ret; + if (mov->moov_written || mov->flags & FF_MOV_FLAG_EMPTY_MOOV) { + if (mov->frag_interleave && mov->fragments > 0) { + if (trk->entry - trk->entries_flushed >= mov->frag_interleave) { + if ((ret = mov_flush_fragment_interleaving(s, trk)) < 0) + return ret; + } + } + + if (!trk->mdat_buf) { + if ((ret = avio_open_dyn_buf(&trk->mdat_buf)) < 0) + return ret; + } + pb = trk->mdat_buf; + } else { + if (!mov->mdat_buf) { + if ((ret = avio_open_dyn_buf(&mov->mdat_buf)) < 0) + return ret; + } + pb = mov->mdat_buf; + } + } + + if (par->codec_id == AV_CODEC_ID_AMR_NB) { + /* We must find out how many AMR blocks there are in one packet */ + static const uint16_t packed_size[16] = + {13, 14, 16, 18, 20, 21, 27, 32, 6, 0, 0, 0, 0, 0, 0, 1}; + int len = 0; + + while (len < size && samples_in_chunk < 100) { + len += packed_size[(pkt->data[len] >> 3) & 0x0F]; + samples_in_chunk++; + } + if (samples_in_chunk > 1) { + av_log(s, AV_LOG_ERROR, "fatal error, input is not a single packet, implement a AVParser for it\n"); + return -1; + } + } else if (par->codec_id == AV_CODEC_ID_ADPCM_MS || + par->codec_id == AV_CODEC_ID_ADPCM_IMA_WAV) { + samples_in_chunk = trk->par->frame_size; + } else if (trk->sample_size) + samples_in_chunk = size / trk->sample_size; + else + samples_in_chunk = 1; + + /* copy extradata if it exists */ + if (trk->vos_len == 0 && par->extradata_size > 0 && + !TAG_IS_AVCI(trk->tag) && + (par->codec_id != AV_CODEC_ID_DNXHD)) { + trk->vos_len = par->extradata_size; + trk->vos_data = av_malloc(trk->vos_len); + if (!trk->vos_data) { + ret = AVERROR(ENOMEM); + goto err; + } + memcpy(trk->vos_data, par->extradata, trk->vos_len); + } + + if (par->codec_id == AV_CODEC_ID_AAC && pkt->size > 2 && + (AV_RB16(pkt->data) & 0xfff0) == 0xfff0) { + if (!s->streams[pkt->stream_index]->nb_frames) { + av_log(s, AV_LOG_ERROR, "Malformed AAC bitstream detected: " + "use the audio bitstream filter 'aac_adtstoasc' to fix it " + "('-bsf:a aac_adtstoasc' option with ffmpeg)\n"); + return -1; + } + av_log(s, AV_LOG_WARNING, "aac bitstream error\n"); + } + if (par->codec_id == AV_CODEC_ID_H264 && trk->vos_len > 0 && *(uint8_t *)trk->vos_data != 1 && !TAG_IS_AVCI(trk->tag)) { + /* from x264 or from bytestream H.264 */ + /* NAL reformatting needed */ + if (trk->hint_track >= 0 && trk->hint_track < mov->nb_streams) { + ff_avc_parse_nal_units_buf(pkt->data, &reformatted_data, + &size); + avio_write(pb, reformatted_data, size); + } else { + if (mov->encryption_scheme == MOV_ENC_CENC_AES_CTR) { + size = ff_mov_cenc_avc_parse_nal_units(&trk->cenc, pb, pkt->data, size); + if (size < 0) { + ret = size; + goto err; + } + } else { + size = ff_avc_parse_nal_units(pb, pkt->data, pkt->size); + } + } + } else if (par->codec_id == AV_CODEC_ID_HEVC && trk->vos_len > 6 && + (AV_RB24(trk->vos_data) == 1 || AV_RB32(trk->vos_data) == 1)) { + /* extradata is Annex B, assume the bitstream is too and convert it */ + if (trk->hint_track >= 0 && trk->hint_track < mov->nb_streams) { + ff_hevc_annexb2mp4_buf(pkt->data, &reformatted_data, &size, 0, NULL); + avio_write(pb, reformatted_data, size); + } else { + size = ff_hevc_annexb2mp4(pb, pkt->data, pkt->size, 0, NULL); + } +#if CONFIG_AC3_PARSER + } else if (par->codec_id == AV_CODEC_ID_EAC3) { + size = handle_eac3(mov, pkt, trk); + if (size < 0) + return size; + else if (!size) + goto end; + avio_write(pb, pkt->data, size); +#endif + } else { + if (mov->encryption_scheme == MOV_ENC_CENC_AES_CTR) { + if (par->codec_id == AV_CODEC_ID_H264 && par->extradata_size > 4) { + int nal_size_length = (par->extradata[4] & 0x3) + 1; + ret = ff_mov_cenc_avc_write_nal_units(s, &trk->cenc, nal_size_length, pb, pkt->data, size); + } else { + ret = ff_mov_cenc_write_packet(&trk->cenc, pb, pkt->data, size); + } + + if (ret) { + goto err; + } + } else { + avio_write(pb, pkt->data, size); + } + } + + if ((par->codec_id == AV_CODEC_ID_DNXHD || + par->codec_id == AV_CODEC_ID_AC3) && !trk->vos_len) { + /* copy frame to create needed atoms */ + trk->vos_len = size; + trk->vos_data = av_malloc(size); + if (!trk->vos_data) { + ret = AVERROR(ENOMEM); + goto err; + } + memcpy(trk->vos_data, pkt->data, size); + } + + if (trk->entry >= trk->cluster_capacity) { + unsigned new_capacity = 2 * (trk->entry + MOV_INDEX_CLUSTER_SIZE); + if (av_reallocp_array(&trk->cluster, new_capacity, + sizeof(*trk->cluster))) { + ret = AVERROR(ENOMEM); + goto err; + } + trk->cluster_capacity = new_capacity; + } + + trk->cluster[trk->entry].pos = avio_tell(pb) - size; + trk->cluster[trk->entry].samples_in_chunk = samples_in_chunk; + trk->cluster[trk->entry].chunkNum = 0; + trk->cluster[trk->entry].size = size; + trk->cluster[trk->entry].entries = samples_in_chunk; + trk->cluster[trk->entry].dts = pkt->dts; + if (!trk->entry && trk->start_dts != AV_NOPTS_VALUE) { + if (!trk->frag_discont) { + /* First packet of a new fragment. We already wrote the duration + * of the last packet of the previous fragment based on track_duration, + * which might not exactly match our dts. Therefore adjust the dts + * of this packet to be what the previous packets duration implies. */ + trk->cluster[trk->entry].dts = trk->start_dts + trk->track_duration; + /* We also may have written the pts and the corresponding duration + * in sidx/tfrf/tfxd tags; make sure the sidx pts and duration match up with + * the next fragment. This means the cts of the first sample must + * be the same in all fragments, unless end_pts was updated by + * the packet causing the fragment to be written. */ + if ((mov->flags & FF_MOV_FLAG_DASH && !(mov->flags & FF_MOV_FLAG_GLOBAL_SIDX)) || + mov->mode == MODE_ISM) + pkt->pts = pkt->dts + trk->end_pts - trk->cluster[trk->entry].dts; + } else { + /* New fragment, but discontinuous from previous fragments. + * Pretend the duration sum of the earlier fragments is + * pkt->dts - trk->start_dts. */ + trk->frag_start = pkt->dts - trk->start_dts; + trk->end_pts = AV_NOPTS_VALUE; + trk->frag_discont = 0; + } + } + + if (!trk->entry && trk->start_dts == AV_NOPTS_VALUE && !mov->use_editlist && + s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_MAKE_ZERO) { + /* Not using edit lists and shifting the first track to start from zero. + * If the other streams start from a later timestamp, we won't be able + * to signal the difference in starting time without an edit list. + * Thus move the timestamp for this first sample to 0, increasing + * its duration instead. */ + trk->cluster[trk->entry].dts = trk->start_dts = 0; + } + if (trk->start_dts == AV_NOPTS_VALUE) { + trk->start_dts = pkt->dts; + if (trk->frag_discont) { + if (mov->use_editlist) { + /* Pretend the whole stream started at pts=0, with earlier fragments + * already written. If the stream started at pts=0, the duration sum + * of earlier fragments would have been pkt->pts. */ + trk->frag_start = pkt->pts; + trk->start_dts = pkt->dts - pkt->pts; + } else { + /* Pretend the whole stream started at dts=0, with earlier fragments + * already written, with a duration summing up to pkt->dts. */ + trk->frag_start = pkt->dts; + trk->start_dts = 0; + } + trk->frag_discont = 0; + } else if (pkt->dts && mov->moov_written) + av_log(s, AV_LOG_WARNING, + "Track %d starts with a nonzero dts %"PRId64", while the moov " + "already has been written. Set the delay_moov flag to handle " + "this case.\n", + pkt->stream_index, pkt->dts); + } + trk->track_duration = pkt->dts - trk->start_dts + pkt->duration; + trk->last_sample_is_subtitle_end = 0; + + if (pkt->pts == AV_NOPTS_VALUE) { + av_log(s, AV_LOG_WARNING, "pts has no value\n"); + pkt->pts = pkt->dts; + } + if (pkt->dts != pkt->pts) + trk->flags |= MOV_TRACK_CTTS; + trk->cluster[trk->entry].cts = pkt->pts - pkt->dts; + trk->cluster[trk->entry].flags = 0; + if (trk->start_cts == AV_NOPTS_VALUE) + trk->start_cts = pkt->pts - pkt->dts; + if (trk->end_pts == AV_NOPTS_VALUE) + trk->end_pts = trk->cluster[trk->entry].dts + + trk->cluster[trk->entry].cts + pkt->duration; + else + trk->end_pts = FFMAX(trk->end_pts, trk->cluster[trk->entry].dts + + trk->cluster[trk->entry].cts + + pkt->duration); + + if (par->codec_id == AV_CODEC_ID_VC1) { + mov_parse_vc1_frame(pkt, trk); + } else if (pkt->flags & AV_PKT_FLAG_KEY) { + if (mov->mode == MODE_MOV && par->codec_id == AV_CODEC_ID_MPEG2VIDEO && + trk->entry > 0) { // force sync sample for the first key frame + mov_parse_mpeg2_frame(pkt, &trk->cluster[trk->entry].flags); + if (trk->cluster[trk->entry].flags & MOV_PARTIAL_SYNC_SAMPLE) + trk->flags |= MOV_TRACK_STPS; + } else { + trk->cluster[trk->entry].flags = MOV_SYNC_SAMPLE; + } + if (trk->cluster[trk->entry].flags & MOV_SYNC_SAMPLE) + trk->has_keyframes++; + } + trk->entry++; + trk->sample_count += samples_in_chunk; + mov->mdat_size += size; + + if (trk->hint_track >= 0 && trk->hint_track < mov->nb_streams) + ff_mov_add_hinted_packet(s, pkt, trk->hint_track, trk->entry, + reformatted_data, size); + +end: +err: + + av_free(reformatted_data); + return ret; +} + +static int mov_write_single_packet(AVFormatContext *s, AVPacket *pkt) +{ + MOVMuxContext *mov = s->priv_data; + MOVTrack *trk = &mov->tracks[pkt->stream_index]; + AVCodecParameters *par = trk->par; + int64_t frag_duration = 0; + int size = pkt->size; + + int ret = check_pkt(s, pkt); + if (ret < 0) + return ret; + + if (mov->flags & FF_MOV_FLAG_FRAG_DISCONT) { + int i; + for (i = 0; i < s->nb_streams; i++) + mov->tracks[i].frag_discont = 1; + mov->flags &= ~FF_MOV_FLAG_FRAG_DISCONT; + } + + if (!pkt->size) { + if (trk->start_dts == AV_NOPTS_VALUE && trk->frag_discont) { + trk->start_dts = pkt->dts; + if (pkt->pts != AV_NOPTS_VALUE) + trk->start_cts = pkt->pts - pkt->dts; + else + trk->start_cts = 0; + } + + if (trk->par->codec_id == AV_CODEC_ID_MP4ALS) { + int side_size = 0; + uint8_t *side = av_packet_get_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size); + if (side && side_size > 0 && (side_size != par->extradata_size || memcmp(side, par->extradata, side_size))) { + void *newextra = av_mallocz(side_size + AV_INPUT_BUFFER_PADDING_SIZE); + if (!newextra) + return AVERROR(ENOMEM); + av_free(par->extradata); + par->extradata = newextra; + memcpy(par->extradata, side, side_size); + par->extradata_size = side_size; + mov->need_rewrite_extradata = 1; + } + } + + return 0; /* Discard 0 sized packets */ + } + + if (trk->entry && pkt->stream_index < s->nb_streams) + frag_duration = av_rescale_q(pkt->dts - trk->cluster[0].dts, + s->streams[pkt->stream_index]->time_base, + AV_TIME_BASE_Q); + if ((mov->max_fragment_duration && + frag_duration >= mov->max_fragment_duration) || + (mov->max_fragment_size && mov->mdat_size + size >= mov->max_fragment_size) || + (mov->flags & FF_MOV_FLAG_FRAG_KEYFRAME && + par->codec_type == AVMEDIA_TYPE_VIDEO && + trk->entry && pkt->flags & AV_PKT_FLAG_KEY)) { + if (frag_duration >= mov->min_fragment_duration) { + // Set the duration of this track to line up with the next + // sample in this track. This avoids relying on AVPacket + // duration, but only helps for this particular track, not + // for the other ones that are flushed at the same time. + trk->track_duration = pkt->dts - trk->start_dts; + if (pkt->pts != AV_NOPTS_VALUE) + trk->end_pts = pkt->pts; + else + trk->end_pts = pkt->dts; + trk->end_reliable = 1; + mov_auto_flush_fragment(s, 0); + } + } + + return ff_mov_write_packet(s, pkt); +} + +static int mov_write_subtitle_end_packet(AVFormatContext *s, + int stream_index, + int64_t dts) { + AVPacket end; + uint8_t data[2] = {0}; + int ret; + + av_init_packet(&end); + end.size = sizeof(data); + end.data = data; + end.pts = dts; + end.dts = dts; + end.duration = 0; + end.stream_index = stream_index; + + ret = mov_write_single_packet(s, &end); + av_packet_unref(&end); + + return ret; +} + +static int mov_write_packet(AVFormatContext *s, AVPacket *pkt) +{ + if (!pkt) { + mov_flush_fragment(s, 1); + return 1; + } else { + int i; + MOVMuxContext *mov = s->priv_data; + MOVTrack *trk = &mov->tracks[pkt->stream_index]; + + if (!pkt->size) + return mov_write_single_packet(s, pkt); /* Passthrough. */ + + /* + * Subtitles require special handling. + * + * 1) For full complaince, every track must have a sample at + * dts == 0, which is rarely true for subtitles. So, as soon + * as we see any packet with dts > 0, write an empty subtitle + * at dts == 0 for any subtitle track with no samples in it. + * + * 2) For each subtitle track, check if the current packet's + * dts is past the duration of the last subtitle sample. If + * so, we now need to write an end sample for that subtitle. + * + * This must be done conditionally to allow for subtitles that + * immediately replace each other, in which case an end sample + * is not needed, and is, in fact, actively harmful. + * + * 3) See mov_write_trailer for how the final end sample is + * handled. + */ + for (i = 0; i < mov->nb_streams; i++) { + MOVTrack *trk = &mov->tracks[i]; + int ret; + + if (trk->par->codec_id == AV_CODEC_ID_MOV_TEXT && + trk->track_duration < pkt->dts && + (trk->entry == 0 || !trk->last_sample_is_subtitle_end)) { + ret = mov_write_subtitle_end_packet(s, i, trk->track_duration); + if (ret < 0) return ret; + trk->last_sample_is_subtitle_end = 1; + } + } + + if (trk->mode == MODE_MOV && trk->par->codec_type == AVMEDIA_TYPE_VIDEO) { + AVPacket *opkt = pkt; + int reshuffle_ret, ret; + if (trk->is_unaligned_qt_rgb) { + int64_t bpc = trk->par->bits_per_coded_sample != 15 ? trk->par->bits_per_coded_sample : 16; + int expected_stride = ((trk->par->width * bpc + 15) >> 4)*2; + reshuffle_ret = ff_reshuffle_raw_rgb(s, &pkt, trk->par, expected_stride); + if (reshuffle_ret < 0) + return reshuffle_ret; + } else + reshuffle_ret = 0; + if (trk->par->format == AV_PIX_FMT_PAL8 && !trk->pal_done) { + ret = ff_get_packet_palette(s, opkt, reshuffle_ret, trk->palette); + if (ret < 0) + goto fail; + if (ret) + trk->pal_done++; + } else if (trk->par->codec_id == AV_CODEC_ID_RAWVIDEO && + (trk->par->format == AV_PIX_FMT_GRAY8 || + trk->par->format == AV_PIX_FMT_MONOBLACK)) { + for (i = 0; i < pkt->size; i++) + pkt->data[i] = ~pkt->data[i]; + } + if (reshuffle_ret) { + ret = mov_write_single_packet(s, pkt); +fail: + if (reshuffle_ret) + av_packet_free(&pkt); + return ret; + } + } + + return mov_write_single_packet(s, pkt); + } +} + +// QuickTime chapters involve an additional text track with the chapter names +// as samples, and a tref pointing from the other tracks to the chapter one. +static int mov_create_chapter_track(AVFormatContext *s, int tracknum) +{ + AVIOContext *pb; + + MOVMuxContext *mov = s->priv_data; + MOVTrack *track = &mov->tracks[tracknum]; + AVPacket pkt = { .stream_index = tracknum, .flags = AV_PKT_FLAG_KEY }; + int i, len; + + track->mode = mov->mode; + track->tag = MKTAG('t','e','x','t'); + track->timescale = MOV_TIMESCALE; + track->par = avcodec_parameters_alloc(); + if (!track->par) + return AVERROR(ENOMEM); + track->par->codec_type = AVMEDIA_TYPE_SUBTITLE; +#if 0 + // These properties are required to make QT recognize the chapter track + uint8_t chapter_properties[43] = { 0, 0, 0, 0, 0, 0, 0, 1, }; + if (ff_alloc_extradata(track->par, sizeof(chapter_properties))) + return AVERROR(ENOMEM); + memcpy(track->par->extradata, chapter_properties, sizeof(chapter_properties)); +#else + if (avio_open_dyn_buf(&pb) >= 0) { + int size; + uint8_t *buf; + + /* Stub header (usually for Quicktime chapter track) */ + // TextSampleEntry + avio_wb32(pb, 0x01); // displayFlags + avio_w8(pb, 0x00); // horizontal justification + avio_w8(pb, 0x00); // vertical justification + avio_w8(pb, 0x00); // bgColourRed + avio_w8(pb, 0x00); // bgColourGreen + avio_w8(pb, 0x00); // bgColourBlue + avio_w8(pb, 0x00); // bgColourAlpha + // BoxRecord + avio_wb16(pb, 0x00); // defTextBoxTop + avio_wb16(pb, 0x00); // defTextBoxLeft + avio_wb16(pb, 0x00); // defTextBoxBottom + avio_wb16(pb, 0x00); // defTextBoxRight + // StyleRecord + avio_wb16(pb, 0x00); // startChar + avio_wb16(pb, 0x00); // endChar + avio_wb16(pb, 0x01); // fontID + avio_w8(pb, 0x00); // fontStyleFlags + avio_w8(pb, 0x00); // fontSize + avio_w8(pb, 0x00); // fgColourRed + avio_w8(pb, 0x00); // fgColourGreen + avio_w8(pb, 0x00); // fgColourBlue + avio_w8(pb, 0x00); // fgColourAlpha + // FontTableBox + avio_wb32(pb, 0x0D); // box size + ffio_wfourcc(pb, "ftab"); // box atom name + avio_wb16(pb, 0x01); // entry count + // FontRecord + avio_wb16(pb, 0x01); // font ID + avio_w8(pb, 0x00); // font name length + + if ((size = avio_close_dyn_buf(pb, &buf)) > 0) { + track->par->extradata = buf; + track->par->extradata_size = size; + } else { + av_freep(&buf); + } + } +#endif + + for (i = 0; i < s->nb_chapters; i++) { + AVChapter *c = s->chapters[i]; + AVDictionaryEntry *t; + + int64_t end = av_rescale_q(c->end, c->time_base, (AVRational){1,MOV_TIMESCALE}); + pkt.pts = pkt.dts = av_rescale_q(c->start, c->time_base, (AVRational){1,MOV_TIMESCALE}); + pkt.duration = end - pkt.dts; + + if ((t = av_dict_get(c->metadata, "title", NULL, 0))) { + static const char encd[12] = { + 0x00, 0x00, 0x00, 0x0C, + 'e', 'n', 'c', 'd', + 0x00, 0x00, 0x01, 0x00 }; + len = strlen(t->value); + pkt.size = len + 2 + 12; + pkt.data = av_malloc(pkt.size); + if (!pkt.data) + return AVERROR(ENOMEM); + AV_WB16(pkt.data, len); + memcpy(pkt.data + 2, t->value, len); + memcpy(pkt.data + len + 2, encd, sizeof(encd)); + ff_mov_write_packet(s, &pkt); + av_freep(&pkt.data); + } + } + + return 0; +} + + +static int mov_check_timecode_track(AVFormatContext *s, AVTimecode *tc, int src_index, const char *tcstr) +{ + int ret; + + /* compute the frame number */ + ret = av_timecode_init_from_string(tc, find_fps(s, s->streams[src_index]), tcstr, s); + return ret; +} + +static int mov_create_timecode_track(AVFormatContext *s, int index, int src_index, AVTimecode tc) +{ + int ret; + MOVMuxContext *mov = s->priv_data; + MOVTrack *track = &mov->tracks[index]; + AVStream *src_st = s->streams[src_index]; + AVPacket pkt = {.stream_index = index, .flags = AV_PKT_FLAG_KEY, .size = 4}; + AVRational rate = find_fps(s, src_st); + + /* tmcd track based on video stream */ + track->mode = mov->mode; + track->tag = MKTAG('t','m','c','d'); + track->src_track = src_index; + track->timescale = mov->tracks[src_index].timescale; + if (tc.flags & AV_TIMECODE_FLAG_DROPFRAME) + track->timecode_flags |= MOV_TIMECODE_FLAG_DROPFRAME; + + /* set st to src_st for metadata access*/ + track->st = src_st; + + /* encode context: tmcd data stream */ + track->par = avcodec_parameters_alloc(); + if (!track->par) + return AVERROR(ENOMEM); + track->par->codec_type = AVMEDIA_TYPE_DATA; + track->par->codec_tag = track->tag; + track->st->avg_frame_rate = av_inv_q(rate); + + /* the tmcd track just contains one packet with the frame number */ + pkt.data = av_malloc(pkt.size); + if (!pkt.data) + return AVERROR(ENOMEM); + AV_WB32(pkt.data, tc.start); + ret = ff_mov_write_packet(s, &pkt); + av_free(pkt.data); + return ret; +} + +/* + * st->disposition controls the "enabled" flag in the tkhd tag. + * QuickTime will not play a track if it is not enabled. So make sure + * that one track of each type (audio, video, subtitle) is enabled. + * + * Subtitles are special. For audio and video, setting "enabled" also + * makes the track "default" (i.e. it is rendered when played). For + * subtitles, an "enabled" subtitle is not rendered by default, but + * if no subtitle is enabled, the subtitle menu in QuickTime will be + * empty! + */ +static void enable_tracks(AVFormatContext *s) +{ + MOVMuxContext *mov = s->priv_data; + int i; + int enabled[AVMEDIA_TYPE_NB]; + int first[AVMEDIA_TYPE_NB]; + + for (i = 0; i < AVMEDIA_TYPE_NB; i++) { + enabled[i] = 0; + first[i] = -1; + } + + for (i = 0; i < s->nb_streams; i++) { + AVStream *st = s->streams[i]; + + if (st->codecpar->codec_type <= AVMEDIA_TYPE_UNKNOWN || + st->codecpar->codec_type >= AVMEDIA_TYPE_NB) + continue; + + if (first[st->codecpar->codec_type] < 0) + first[st->codecpar->codec_type] = i; + if (st->disposition & AV_DISPOSITION_DEFAULT) { + mov->tracks[i].flags |= MOV_TRACK_ENABLED; + enabled[st->codecpar->codec_type]++; + } + } + + for (i = 0; i < AVMEDIA_TYPE_NB; i++) { + switch (i) { + case AVMEDIA_TYPE_VIDEO: + case AVMEDIA_TYPE_AUDIO: + case AVMEDIA_TYPE_SUBTITLE: + if (enabled[i] > 1) + mov->per_stream_grouping = 1; + if (!enabled[i] && first[i] >= 0) + mov->tracks[first[i]].flags |= MOV_TRACK_ENABLED; + break; + } + } +} + +static void mov_free(AVFormatContext *s) +{ + MOVMuxContext *mov = s->priv_data; + int i; + + if (mov->chapter_track) { + if (mov->tracks[mov->chapter_track].par) + av_freep(&mov->tracks[mov->chapter_track].par->extradata); + av_freep(&mov->tracks[mov->chapter_track].par); + } + + for (i = 0; i < mov->nb_streams; i++) { + if (mov->tracks[i].tag == MKTAG('r','t','p',' ')) + ff_mov_close_hinting(&mov->tracks[i]); + else if (mov->tracks[i].tag == MKTAG('t','m','c','d') && mov->nb_meta_tmcd) + av_freep(&mov->tracks[i].par); + av_freep(&mov->tracks[i].cluster); + av_freep(&mov->tracks[i].frag_info); + + if (mov->tracks[i].vos_len) + av_freep(&mov->tracks[i].vos_data); + + ff_mov_cenc_free(&mov->tracks[i].cenc); + } + + av_freep(&mov->tracks); +} + +static uint32_t rgb_to_yuv(uint32_t rgb) +{ + uint8_t r, g, b; + int y, cb, cr; + + r = (rgb >> 16) & 0xFF; + g = (rgb >> 8) & 0xFF; + b = (rgb ) & 0xFF; + + y = av_clip_uint8(( 16000 + 257 * r + 504 * g + 98 * b)/1000); + cb = av_clip_uint8((128000 - 148 * r - 291 * g + 439 * b)/1000); + cr = av_clip_uint8((128000 + 439 * r - 368 * g - 71 * b)/1000); + + return (y << 16) | (cr << 8) | cb; +} + +static int mov_create_dvd_sub_decoder_specific_info(MOVTrack *track, + AVStream *st) +{ + int i, width = 720, height = 480; + int have_palette = 0, have_size = 0; + uint32_t palette[16]; + char *cur = st->codecpar->extradata; + + while (cur && *cur) { + if (strncmp("palette:", cur, 8) == 0) { + int i, count; + count = sscanf(cur + 8, + "%06"PRIx32", %06"PRIx32", %06"PRIx32", %06"PRIx32", " + "%06"PRIx32", %06"PRIx32", %06"PRIx32", %06"PRIx32", " + "%06"PRIx32", %06"PRIx32", %06"PRIx32", %06"PRIx32", " + "%06"PRIx32", %06"PRIx32", %06"PRIx32", %06"PRIx32"", + &palette[ 0], &palette[ 1], &palette[ 2], &palette[ 3], + &palette[ 4], &palette[ 5], &palette[ 6], &palette[ 7], + &palette[ 8], &palette[ 9], &palette[10], &palette[11], + &palette[12], &palette[13], &palette[14], &palette[15]); + + for (i = 0; i < count; i++) { + palette[i] = rgb_to_yuv(palette[i]); + } + have_palette = 1; + } else if (!strncmp("size:", cur, 5)) { + sscanf(cur + 5, "%dx%d", &width, &height); + have_size = 1; + } + if (have_palette && have_size) + break; + cur += strcspn(cur, "\n\r"); + cur += strspn(cur, "\n\r"); + } + if (have_palette) { + track->vos_data = av_malloc(16*4); + if (!track->vos_data) + return AVERROR(ENOMEM); + for (i = 0; i < 16; i++) { + AV_WB32(track->vos_data + i * 4, palette[i]); + } + track->vos_len = 16 * 4; + } + st->codecpar->width = width; + st->codecpar->height = track->height = height; + + return 0; +} + +static int mov_init(AVFormatContext *s) +{ + MOVMuxContext *mov = s->priv_data; + AVDictionaryEntry *global_tcr = av_dict_get(s->metadata, "timecode", NULL, 0); + int i, ret, hint_track = 0, tmcd_track = 0; + + mov->fc = s; + + /* Default mode == MP4 */ + mov->mode = MODE_MP4; + + if (s->oformat) { + if (!strcmp("3gp", s->oformat->name)) mov->mode = MODE_3GP; + else if (!strcmp("3g2", s->oformat->name)) mov->mode = MODE_3GP|MODE_3G2; + else if (!strcmp("mov", s->oformat->name)) mov->mode = MODE_MOV; + else if (!strcmp("psp", s->oformat->name)) mov->mode = MODE_PSP; + else if (!strcmp("ipod",s->oformat->name)) mov->mode = MODE_IPOD; + else if (!strcmp("ismv",s->oformat->name)) mov->mode = MODE_ISM; + else if (!strcmp("f4v", s->oformat->name)) mov->mode = MODE_F4V; + } + + if (mov->flags & FF_MOV_FLAG_DELAY_MOOV) + mov->flags |= FF_MOV_FLAG_EMPTY_MOOV; + + /* Set the FRAGMENT flag if any of the fragmentation methods are + * enabled. */ + if (mov->max_fragment_duration || mov->max_fragment_size || + mov->flags & (FF_MOV_FLAG_EMPTY_MOOV | + FF_MOV_FLAG_FRAG_KEYFRAME | + FF_MOV_FLAG_FRAG_CUSTOM)) + mov->flags |= FF_MOV_FLAG_FRAGMENT; + + /* Set other implicit flags immediately */ + if (mov->mode == MODE_ISM) + mov->flags |= FF_MOV_FLAG_EMPTY_MOOV | FF_MOV_FLAG_SEPARATE_MOOF | + FF_MOV_FLAG_FRAGMENT; + if (mov->flags & FF_MOV_FLAG_DASH) + mov->flags |= FF_MOV_FLAG_FRAGMENT | FF_MOV_FLAG_EMPTY_MOOV | + FF_MOV_FLAG_DEFAULT_BASE_MOOF; + + if (mov->flags & FF_MOV_FLAG_EMPTY_MOOV && s->flags & AVFMT_FLAG_AUTO_BSF) { + av_log(s, AV_LOG_VERBOSE, "Empty MOOV enabled; disabling automatic bitstream filtering\n"); + s->flags &= ~AVFMT_FLAG_AUTO_BSF; + } + + if (mov->flags & FF_MOV_FLAG_FASTSTART) { + mov->reserved_moov_size = -1; + } + + if (mov->use_editlist < 0) { + mov->use_editlist = 1; + if (mov->flags & FF_MOV_FLAG_FRAGMENT && + !(mov->flags & FF_MOV_FLAG_DELAY_MOOV)) { + // If we can avoid needing an edit list by shifting the + // tracks, prefer that over (trying to) write edit lists + // in fragmented output. + if (s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_AUTO || + s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_MAKE_ZERO) + mov->use_editlist = 0; + } + } + if (mov->flags & FF_MOV_FLAG_EMPTY_MOOV && + !(mov->flags & FF_MOV_FLAG_DELAY_MOOV) && mov->use_editlist) + av_log(s, AV_LOG_WARNING, "No meaningful edit list will be written when using empty_moov without delay_moov\n"); + + if (!mov->use_editlist && s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_AUTO) + s->avoid_negative_ts = AVFMT_AVOID_NEG_TS_MAKE_ZERO; + + /* Clear the omit_tfhd_offset flag if default_base_moof is set; + * if the latter is set that's enough and omit_tfhd_offset doesn't + * add anything extra on top of that. */ + if (mov->flags & FF_MOV_FLAG_OMIT_TFHD_OFFSET && + mov->flags & FF_MOV_FLAG_DEFAULT_BASE_MOOF) + mov->flags &= ~FF_MOV_FLAG_OMIT_TFHD_OFFSET; + + if (mov->frag_interleave && + mov->flags & (FF_MOV_FLAG_OMIT_TFHD_OFFSET | FF_MOV_FLAG_SEPARATE_MOOF)) { + av_log(s, AV_LOG_ERROR, + "Sample interleaving in fragments is mutually exclusive with " + "omit_tfhd_offset and separate_moof\n"); + return AVERROR(EINVAL); + } + + /* Non-seekable output is ok if using fragmentation. If ism_lookahead + * is enabled, we don't support non-seekable output at all. */ + if (!s->pb->seekable && + (!(mov->flags & FF_MOV_FLAG_FRAGMENT) || mov->ism_lookahead)) { + av_log(s, AV_LOG_ERROR, "muxer does not support non seekable output\n"); + return AVERROR(EINVAL); + } + + mov->nb_streams = s->nb_streams; + if (mov->mode & (MODE_MP4|MODE_MOV|MODE_IPOD) && s->nb_chapters) + mov->chapter_track = mov->nb_streams++; + + if (mov->flags & FF_MOV_FLAG_RTP_HINT) { + /* Add hint tracks for each audio and video stream */ + hint_track = mov->nb_streams; + for (i = 0; i < s->nb_streams; i++) { + AVStream *st = s->streams[i]; + if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || + st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { + mov->nb_streams++; + } + } + } + + if ( mov->write_tmcd == -1 && (mov->mode == MODE_MOV || mov->mode == MODE_MP4) + || mov->write_tmcd == 1) { + tmcd_track = mov->nb_streams; + + /* +1 tmcd track for each video stream with a timecode */ + for (i = 0; i < s->nb_streams; i++) { + AVStream *st = s->streams[i]; + AVDictionaryEntry *t = global_tcr; + if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && + (t || (t=av_dict_get(st->metadata, "timecode", NULL, 0)))) { + AVTimecode tc; + ret = mov_check_timecode_track(s, &tc, i, t->value); + if (ret >= 0) + mov->nb_meta_tmcd++; + } + } + + /* check if there is already a tmcd track to remux */ + if (mov->nb_meta_tmcd) { + for (i = 0; i < s->nb_streams; i++) { + AVStream *st = s->streams[i]; + if (st->codecpar->codec_tag == MKTAG('t','m','c','d')) { + av_log(s, AV_LOG_WARNING, "You requested a copy of the original timecode track " + "so timecode metadata are now ignored\n"); + mov->nb_meta_tmcd = 0; + } + } + } + + mov->nb_streams += mov->nb_meta_tmcd; + } + + // Reserve an extra stream for chapters for the case where chapters + // are written in the trailer + mov->tracks = av_mallocz_array((mov->nb_streams + 1), sizeof(*mov->tracks)); + if (!mov->tracks) + return AVERROR(ENOMEM); + + if (mov->encryption_scheme_str != NULL && strcmp(mov->encryption_scheme_str, "none") != 0) { + if (strcmp(mov->encryption_scheme_str, "cenc-aes-ctr") == 0) { + mov->encryption_scheme = MOV_ENC_CENC_AES_CTR; + + if (mov->encryption_key_len != AES_CTR_KEY_SIZE) { + av_log(s, AV_LOG_ERROR, "Invalid encryption key len %d expected %d\n", + mov->encryption_key_len, AES_CTR_KEY_SIZE); + return AVERROR(EINVAL); + } + + if (mov->encryption_kid_len != CENC_KID_SIZE) { + av_log(s, AV_LOG_ERROR, "Invalid encryption kid len %d expected %d\n", + mov->encryption_kid_len, CENC_KID_SIZE); + return AVERROR(EINVAL); + } + } else { + av_log(s, AV_LOG_ERROR, "unsupported encryption scheme %s\n", + mov->encryption_scheme_str); + return AVERROR(EINVAL); + } + } + + for (i = 0; i < s->nb_streams; i++) { + AVStream *st= s->streams[i]; + MOVTrack *track= &mov->tracks[i]; + AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL,0); + + track->st = st; + track->par = st->codecpar; + track->language = ff_mov_iso639_to_lang(lang?lang->value:"und", mov->mode!=MODE_MOV); + if (track->language < 0) + track->language = 0; + track->mode = mov->mode; + track->tag = mov_find_codec_tag(s, track); + if (!track->tag) { + av_log(s, AV_LOG_ERROR, "Could not find tag for codec %s in stream #%d, " + "codec not currently supported in container\n", + avcodec_get_name(st->codecpar->codec_id), i); + return AVERROR(EINVAL); + } + /* If hinting of this track is enabled by a later hint track, + * this is updated. */ + track->hint_track = -1; + track->start_dts = AV_NOPTS_VALUE; + track->start_cts = AV_NOPTS_VALUE; + track->end_pts = AV_NOPTS_VALUE; + if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { + if (track->tag == MKTAG('m','x','3','p') || track->tag == MKTAG('m','x','3','n') || + track->tag == MKTAG('m','x','4','p') || track->tag == MKTAG('m','x','4','n') || + track->tag == MKTAG('m','x','5','p') || track->tag == MKTAG('m','x','5','n')) { + if (st->codecpar->width != 720 || (st->codecpar->height != 608 && st->codecpar->height != 512)) { + av_log(s, AV_LOG_ERROR, "D-10/IMX must use 720x608 or 720x512 video resolution\n"); + return AVERROR(EINVAL); + } + track->height = track->tag >> 24 == 'n' ? 486 : 576; + } + if (mov->video_track_timescale) { + track->timescale = mov->video_track_timescale; + } else { + track->timescale = st->time_base.den; + while(track->timescale < 10000) + track->timescale *= 2; + } + if (st->codecpar->width > 65535 || st->codecpar->height > 65535) { + av_log(s, AV_LOG_ERROR, "Resolution %dx%d too large for mov/mp4\n", st->codecpar->width, st->codecpar->height); + return AVERROR(EINVAL); + } + if (track->mode == MODE_MOV && track->timescale > 100000) + av_log(s, AV_LOG_WARNING, + "WARNING codec timebase is very high. If duration is too long,\n" + "file may not be playable by quicktime. Specify a shorter timebase\n" + "or choose different container.\n"); + if (track->mode == MODE_MOV && + track->par->codec_id == AV_CODEC_ID_RAWVIDEO && + track->tag == MKTAG('r','a','w',' ')) { + enum AVPixelFormat pix_fmt = track->par->format; + if (pix_fmt == AV_PIX_FMT_NONE && track->par->bits_per_coded_sample == 1) + pix_fmt = AV_PIX_FMT_MONOWHITE; + track->is_unaligned_qt_rgb = + pix_fmt == AV_PIX_FMT_RGB24 || + pix_fmt == AV_PIX_FMT_BGR24 || + pix_fmt == AV_PIX_FMT_PAL8 || + pix_fmt == AV_PIX_FMT_GRAY8 || + pix_fmt == AV_PIX_FMT_MONOWHITE || + pix_fmt == AV_PIX_FMT_MONOBLACK; + } + if (track->mode == MODE_MP4 && + track->par->codec_id == AV_CODEC_ID_VP9) { + if (s->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { + av_log(s, AV_LOG_ERROR, + "VP9 in MP4 support is experimental, add " + "'-strict %d' if you want to use it.\n", + FF_COMPLIANCE_EXPERIMENTAL); + return AVERROR_EXPERIMENTAL; + } + } + } else if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { + track->timescale = st->codecpar->sample_rate; + if (!st->codecpar->frame_size && !av_get_bits_per_sample(st->codecpar->codec_id)) { + av_log(s, AV_LOG_WARNING, "track %d: codec frame size is not set\n", i); + track->audio_vbr = 1; + }else if (st->codecpar->codec_id == AV_CODEC_ID_ADPCM_MS || + st->codecpar->codec_id == AV_CODEC_ID_ADPCM_IMA_WAV || + st->codecpar->codec_id == AV_CODEC_ID_ILBC){ + if (!st->codecpar->block_align) { + av_log(s, AV_LOG_ERROR, "track %d: codec block align is not set for adpcm\n", i); + return AVERROR(EINVAL); + } + track->sample_size = st->codecpar->block_align; + }else if (st->codecpar->frame_size > 1){ /* assume compressed audio */ + track->audio_vbr = 1; + }else{ + track->sample_size = (av_get_bits_per_sample(st->codecpar->codec_id) >> 3) * st->codecpar->channels; + } + if (st->codecpar->codec_id == AV_CODEC_ID_ILBC || + st->codecpar->codec_id == AV_CODEC_ID_ADPCM_IMA_QT) { + track->audio_vbr = 1; + } + if (track->mode != MODE_MOV && + track->par->codec_id == AV_CODEC_ID_MP3 && track->timescale < 16000) { + if (s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) { + av_log(s, AV_LOG_ERROR, "track %d: muxing mp3 at %dhz is not standard, to mux anyway set strict to -1\n", + i, track->par->sample_rate); + return AVERROR(EINVAL); + } else { + av_log(s, AV_LOG_WARNING, "track %d: muxing mp3 at %dhz is not standard in MP4\n", + i, track->par->sample_rate); + } + } + } else if (st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) { + track->timescale = st->time_base.den; + } else if (st->codecpar->codec_type == AVMEDIA_TYPE_DATA) { + track->timescale = st->time_base.den; + } else { + track->timescale = MOV_TIMESCALE; + } + if (!track->height) + track->height = st->codecpar->height; + /* The ism specific timescale isn't mandatory, but is assumed by + * some tools, such as mp4split. */ + if (mov->mode == MODE_ISM) + track->timescale = 10000000; + + avpriv_set_pts_info(st, 64, 1, track->timescale); + + if (mov->encryption_scheme == MOV_ENC_CENC_AES_CTR) { + ret = ff_mov_cenc_init(&track->cenc, mov->encryption_key, + track->par->codec_id == AV_CODEC_ID_H264, s->flags & AVFMT_FLAG_BITEXACT); + if (ret) + return ret; + } + } + + enable_tracks(s); + return 0; +} + +static int mov_write_header(AVFormatContext *s) +{ + AVIOContext *pb = s->pb; + MOVMuxContext *mov = s->priv_data; + AVDictionaryEntry *t, *global_tcr = av_dict_get(s->metadata, "timecode", NULL, 0); + int i, ret, hint_track = 0, tmcd_track = 0, nb_tracks = s->nb_streams; + + if (mov->mode & (MODE_MP4|MODE_MOV|MODE_IPOD) && s->nb_chapters) + nb_tracks++; + + if (mov->flags & FF_MOV_FLAG_RTP_HINT) { + /* Add hint tracks for each audio and video stream */ + hint_track = nb_tracks; + for (i = 0; i < s->nb_streams; i++) { + AVStream *st = s->streams[i]; + if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || + st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { + nb_tracks++; + } + } + } + + if (mov->mode == MODE_MOV || mov->mode == MODE_MP4) + tmcd_track = nb_tracks; + + for (i = 0; i < s->nb_streams; i++) { + int j; + AVStream *st= s->streams[i]; + MOVTrack *track= &mov->tracks[i]; + + /* copy extradata if it exists */ + if (st->codecpar->extradata_size) { + if (st->codecpar->codec_id == AV_CODEC_ID_DVD_SUBTITLE) + mov_create_dvd_sub_decoder_specific_info(track, st); + else if (!TAG_IS_AVCI(track->tag) && st->codecpar->codec_id != AV_CODEC_ID_DNXHD) { + track->vos_len = st->codecpar->extradata_size; + track->vos_data = av_malloc(track->vos_len); + if (!track->vos_data) { + return AVERROR(ENOMEM); + } + memcpy(track->vos_data, st->codecpar->extradata, track->vos_len); + } + } + + if (st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO || + track->par->channel_layout != AV_CH_LAYOUT_MONO) + continue; + + for (j = 0; j < s->nb_streams; j++) { + AVStream *stj= s->streams[j]; + MOVTrack *trackj= &mov->tracks[j]; + if (j == i) + continue; + + if (stj->codecpar->codec_type != AVMEDIA_TYPE_AUDIO || + trackj->par->channel_layout != AV_CH_LAYOUT_MONO || + trackj->language != track->language || + trackj->tag != track->tag + ) + continue; + track->multichannel_as_mono++; + } + } + + if (!(mov->flags & FF_MOV_FLAG_DELAY_MOOV)) { + if ((ret = mov_write_identification(pb, s)) < 0) + return ret; + } + + if (mov->reserved_moov_size){ + mov->reserved_header_pos = avio_tell(pb); + if (mov->reserved_moov_size > 0) + avio_skip(pb, mov->reserved_moov_size); + } + + if (mov->flags & FF_MOV_FLAG_FRAGMENT) { + /* If no fragmentation options have been set, set a default. */ + if (!(mov->flags & (FF_MOV_FLAG_FRAG_KEYFRAME | + FF_MOV_FLAG_FRAG_CUSTOM)) && + !mov->max_fragment_duration && !mov->max_fragment_size) + mov->flags |= FF_MOV_FLAG_FRAG_KEYFRAME; + } else { + if (mov->flags & FF_MOV_FLAG_FASTSTART) + mov->reserved_header_pos = avio_tell(pb); + mov_write_mdat_tag(pb, mov); + } + + ff_parse_creation_time_metadata(s, &mov->time, 1); + if (mov->time) + mov->time += 0x7C25B080; // 1970 based -> 1904 based + + if (mov->chapter_track) + if ((ret = mov_create_chapter_track(s, mov->chapter_track)) < 0) + return ret; + + if (mov->flags & FF_MOV_FLAG_RTP_HINT) { + /* Initialize the hint tracks for each audio and video stream */ + for (i = 0; i < s->nb_streams; i++) { + AVStream *st = s->streams[i]; + if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || + st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { + if ((ret = ff_mov_init_hinting(s, hint_track, i)) < 0) + return ret; + hint_track++; + } + } + } + + if (mov->nb_meta_tmcd) { + /* Initialize the tmcd tracks */ + for (i = 0; i < s->nb_streams; i++) { + AVStream *st = s->streams[i]; + t = global_tcr; + + if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { + AVTimecode tc; + if (!t) + t = av_dict_get(st->metadata, "timecode", NULL, 0); + if (!t) + continue; + if (mov_check_timecode_track(s, &tc, i, t->value) < 0) + continue; + if ((ret = mov_create_timecode_track(s, tmcd_track, i, tc)) < 0) + return ret; + tmcd_track++; + } + } + } + + avio_flush(pb); + + if (mov->flags & FF_MOV_FLAG_ISML) + mov_write_isml_manifest(pb, mov, s); + + if (mov->flags & FF_MOV_FLAG_EMPTY_MOOV && + !(mov->flags & FF_MOV_FLAG_DELAY_MOOV)) { + if ((ret = mov_write_moov_tag(pb, mov, s)) < 0) + return ret; + avio_flush(pb); + mov->moov_written = 1; + if (mov->flags & FF_MOV_FLAG_GLOBAL_SIDX) + mov->reserved_header_pos = avio_tell(pb); + } + + return 0; +} + +static int get_moov_size(AVFormatContext *s) +{ + int ret; + AVIOContext *moov_buf; + MOVMuxContext *mov = s->priv_data; + + if ((ret = ffio_open_null_buf(&moov_buf)) < 0) + return ret; + if ((ret = mov_write_moov_tag(moov_buf, mov, s)) < 0) + return ret; + return ffio_close_null_buf(moov_buf); +} + +static int get_sidx_size(AVFormatContext *s) +{ + int ret; + AVIOContext *buf; + MOVMuxContext *mov = s->priv_data; + + if ((ret = ffio_open_null_buf(&buf)) < 0) + return ret; + mov_write_sidx_tags(buf, mov, -1, 0); + return ffio_close_null_buf(buf); +} + +/* + * This function gets the moov size if moved to the top of the file: the chunk + * offset table can switch between stco (32-bit entries) to co64 (64-bit + * entries) when the moov is moved to the beginning, so the size of the moov + * would change. It also updates the chunk offset tables. + */ +static int compute_moov_size(AVFormatContext *s) +{ + int i, moov_size, moov_size2; + MOVMuxContext *mov = s->priv_data; + + moov_size = get_moov_size(s); + if (moov_size < 0) + return moov_size; + + for (i = 0; i < mov->nb_streams; i++) + mov->tracks[i].data_offset += moov_size; + + moov_size2 = get_moov_size(s); + if (moov_size2 < 0) + return moov_size2; + + /* if the size changed, we just switched from stco to co64 and need to + * update the offsets */ + if (moov_size2 != moov_size) + for (i = 0; i < mov->nb_streams; i++) + mov->tracks[i].data_offset += moov_size2 - moov_size; + + return moov_size2; +} + +static int compute_sidx_size(AVFormatContext *s) +{ + int i, sidx_size; + MOVMuxContext *mov = s->priv_data; + + sidx_size = get_sidx_size(s); + if (sidx_size < 0) + return sidx_size; + + for (i = 0; i < mov->nb_streams; i++) + mov->tracks[i].data_offset += sidx_size; + + return sidx_size; +} + +static int shift_data(AVFormatContext *s) +{ + int ret = 0, moov_size; + MOVMuxContext *mov = s->priv_data; + int64_t pos, pos_end = avio_tell(s->pb); + uint8_t *buf, *read_buf[2]; + int read_buf_id = 0; + int read_size[2]; + AVIOContext *read_pb; + + if (mov->flags & FF_MOV_FLAG_FRAGMENT) + moov_size = compute_sidx_size(s); + else + moov_size = compute_moov_size(s); + if (moov_size < 0) + return moov_size; + + buf = av_malloc(moov_size * 2); + if (!buf) + return AVERROR(ENOMEM); + read_buf[0] = buf; + read_buf[1] = buf + moov_size; + + /* Shift the data: the AVIO context of the output can only be used for + * writing, so we re-open the same output, but for reading. It also avoids + * a read/seek/write/seek back and forth. */ + avio_flush(s->pb); + ret = s->io_open(s, &read_pb, s->filename, AVIO_FLAG_READ, NULL); + if (ret < 0) { + av_log(s, AV_LOG_ERROR, "Unable to re-open %s output file for " + "the second pass (faststart)\n", s->filename); + goto end; + } + + /* mark the end of the shift to up to the last data we wrote, and get ready + * for writing */ + pos_end = avio_tell(s->pb); + avio_seek(s->pb, mov->reserved_header_pos + moov_size, SEEK_SET); + + /* start reading at where the new moov will be placed */ + avio_seek(read_pb, mov->reserved_header_pos, SEEK_SET); + pos = avio_tell(read_pb); + +#define READ_BLOCK do { \ + read_size[read_buf_id] = avio_read(read_pb, read_buf[read_buf_id], moov_size); \ + read_buf_id ^= 1; \ +} while (0) + + /* shift data by chunk of at most moov_size */ + READ_BLOCK; + do { + int n; + READ_BLOCK; + n = read_size[read_buf_id]; + if (n <= 0) + break; + avio_write(s->pb, read_buf[read_buf_id], n); + pos += n; + } while (pos < pos_end); + ff_format_io_close(s, &read_pb); + +end: + av_free(buf); + return ret; +} + +static int mov_write_trailer(AVFormatContext *s) +{ + MOVMuxContext *mov = s->priv_data; + AVIOContext *pb = s->pb; + int res = 0; + int i; + int64_t moov_pos; + + if (mov->need_rewrite_extradata) { + for (i = 0; i < s->nb_streams; i++) { + MOVTrack *track = &mov->tracks[i]; + AVCodecParameters *par = track->par; + + track->vos_len = par->extradata_size; + track->vos_data = av_malloc(track->vos_len); + if (!track->vos_data) + return AVERROR(ENOMEM); + memcpy(track->vos_data, par->extradata, track->vos_len); + } + mov->need_rewrite_extradata = 0; + } + + /* + * Before actually writing the trailer, make sure that there are no + * dangling subtitles, that need a terminating sample. + */ + for (i = 0; i < mov->nb_streams; i++) { + MOVTrack *trk = &mov->tracks[i]; + if (trk->par->codec_id == AV_CODEC_ID_MOV_TEXT && + !trk->last_sample_is_subtitle_end) { + mov_write_subtitle_end_packet(s, i, trk->track_duration); + trk->last_sample_is_subtitle_end = 1; + } + } + + // If there were no chapters when the header was written, but there + // are chapters now, write them in the trailer. This only works + // when we are not doing fragments. + if (!mov->chapter_track && !(mov->flags & FF_MOV_FLAG_FRAGMENT)) { + if (mov->mode & (MODE_MP4|MODE_MOV|MODE_IPOD) && s->nb_chapters) { + mov->chapter_track = mov->nb_streams++; + if ((res = mov_create_chapter_track(s, mov->chapter_track)) < 0) + return res; + } + } + + if (!(mov->flags & FF_MOV_FLAG_FRAGMENT)) { + moov_pos = avio_tell(pb); + + /* Write size of mdat tag */ + if (mov->mdat_size + 8 <= UINT32_MAX) { + avio_seek(pb, mov->mdat_pos, SEEK_SET); + avio_wb32(pb, mov->mdat_size + 8); + } else { + /* overwrite 'wide' placeholder atom */ + avio_seek(pb, mov->mdat_pos - 8, SEEK_SET); + /* special value: real atom size will be 64 bit value after + * tag field */ + avio_wb32(pb, 1); + ffio_wfourcc(pb, "mdat"); + avio_wb64(pb, mov->mdat_size + 16); + } + avio_seek(pb, mov->reserved_moov_size > 0 ? mov->reserved_header_pos : moov_pos, SEEK_SET); + + if (mov->flags & FF_MOV_FLAG_FASTSTART) { + av_log(s, AV_LOG_INFO, "Starting second pass: moving the moov atom to the beginning of the file\n"); + res = shift_data(s); + if (res < 0) + return res; + avio_seek(pb, mov->reserved_header_pos, SEEK_SET); + if ((res = mov_write_moov_tag(pb, mov, s)) < 0) + return res; + } else if (mov->reserved_moov_size > 0) { + int64_t size; + if ((res = mov_write_moov_tag(pb, mov, s)) < 0) + return res; + size = mov->reserved_moov_size - (avio_tell(pb) - mov->reserved_header_pos); + if (size < 8){ + av_log(s, AV_LOG_ERROR, "reserved_moov_size is too small, needed %"PRId64" additional\n", 8-size); + return AVERROR(EINVAL); + } + avio_wb32(pb, size); + ffio_wfourcc(pb, "free"); + ffio_fill(pb, 0, size - 8); + avio_seek(pb, moov_pos, SEEK_SET); + } else { + if ((res = mov_write_moov_tag(pb, mov, s)) < 0) + return res; + } + res = 0; + } else { + mov_auto_flush_fragment(s, 1); + for (i = 0; i < mov->nb_streams; i++) + mov->tracks[i].data_offset = 0; + if (mov->flags & FF_MOV_FLAG_GLOBAL_SIDX) { + int64_t end; + av_log(s, AV_LOG_INFO, "Starting second pass: inserting sidx atoms\n"); + res = shift_data(s); + if (res < 0) + return res; + end = avio_tell(pb); + avio_seek(pb, mov->reserved_header_pos, SEEK_SET); + mov_write_sidx_tags(pb, mov, -1, 0); + avio_seek(pb, end, SEEK_SET); + avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_TRAILER); + mov_write_mfra_tag(pb, mov); + } else { + avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_TRAILER); + mov_write_mfra_tag(pb, mov); + } + } + + return res; +} + +static int mov_check_bitstream(struct AVFormatContext *s, const AVPacket *pkt) +{ + int ret = 1; + AVStream *st = s->streams[pkt->stream_index]; + + if (st->codecpar->codec_id == AV_CODEC_ID_AAC) { + if (pkt->size > 2 && (AV_RB16(pkt->data) & 0xfff0) == 0xfff0) + ret = ff_stream_add_bitstream_filter(st, "aac_adtstoasc", NULL); + } + + return ret; +} + +#if CONFIG_MOV_MUXER +MOV_CLASS(mov) +AVOutputFormat ff_mov_muxer = { + .name = "mov", + .long_name = NULL_IF_CONFIG_SMALL("QuickTime / MOV"), + .extensions = "mov", + .priv_data_size = sizeof(MOVMuxContext), + .audio_codec = AV_CODEC_ID_AAC, + .video_codec = CONFIG_LIBX264_ENCODER ? + AV_CODEC_ID_H264 : AV_CODEC_ID_MPEG4, + .init = mov_init, + .write_header = mov_write_header, + .write_packet = mov_write_packet, + .write_trailer = mov_write_trailer, + .deinit = mov_free, + .flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE, + .codec_tag = (const AVCodecTag* const []){ + ff_codec_movvideo_tags, ff_codec_movaudio_tags, 0 + }, + .check_bitstream = mov_check_bitstream, + .priv_class = &mov_muxer_class, +}; +#endif +#if CONFIG_TGP_MUXER +MOV_CLASS(tgp) +AVOutputFormat ff_tgp_muxer = { + .name = "3gp", + .long_name = NULL_IF_CONFIG_SMALL("3GP (3GPP file format)"), + .extensions = "3gp", + .priv_data_size = sizeof(MOVMuxContext), + .audio_codec = AV_CODEC_ID_AMR_NB, + .video_codec = AV_CODEC_ID_H263, + .init = mov_init, + .write_header = mov_write_header, + .write_packet = mov_write_packet, + .write_trailer = mov_write_trailer, + .deinit = mov_free, + .flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE, + .codec_tag = (const AVCodecTag* const []){ codec_3gp_tags, 0 }, + .check_bitstream = mov_check_bitstream, + .priv_class = &tgp_muxer_class, +}; +#endif +#if CONFIG_MP4_MUXER +MOV_CLASS(mp4) +AVOutputFormat ff_mp4_muxer = { + .name = "mp4", + .long_name = NULL_IF_CONFIG_SMALL("MP4 (MPEG-4 Part 14)"), + .mime_type = "video/mp4", + .extensions = "mp4", + .priv_data_size = sizeof(MOVMuxContext), + .audio_codec = AV_CODEC_ID_AAC, + .video_codec = CONFIG_LIBX264_ENCODER ? + AV_CODEC_ID_H264 : AV_CODEC_ID_MPEG4, + .init = mov_init, + .write_header = mov_write_header, + .write_packet = mov_write_packet, + .write_trailer = mov_write_trailer, + .deinit = mov_free, + .flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE, + .codec_tag = (const AVCodecTag* const []){ ff_mp4_obj_type, 0 }, + .check_bitstream = mov_check_bitstream, + .priv_class = &mp4_muxer_class, +}; +#endif +#if CONFIG_PSP_MUXER +MOV_CLASS(psp) +AVOutputFormat ff_psp_muxer = { + .name = "psp", + .long_name = NULL_IF_CONFIG_SMALL("PSP MP4 (MPEG-4 Part 14)"), + .extensions = "mp4,psp", + .priv_data_size = sizeof(MOVMuxContext), + .audio_codec = AV_CODEC_ID_AAC, + .video_codec = CONFIG_LIBX264_ENCODER ? + AV_CODEC_ID_H264 : AV_CODEC_ID_MPEG4, + .init = mov_init, + .write_header = mov_write_header, + .write_packet = mov_write_packet, + .write_trailer = mov_write_trailer, + .deinit = mov_free, + .flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE, + .codec_tag = (const AVCodecTag* const []){ ff_mp4_obj_type, 0 }, + .check_bitstream = mov_check_bitstream, + .priv_class = &psp_muxer_class, +}; +#endif +#if CONFIG_TG2_MUXER +MOV_CLASS(tg2) +AVOutputFormat ff_tg2_muxer = { + .name = "3g2", + .long_name = NULL_IF_CONFIG_SMALL("3GP2 (3GPP2 file format)"), + .extensions = "3g2", + .priv_data_size = sizeof(MOVMuxContext), + .audio_codec = AV_CODEC_ID_AMR_NB, + .video_codec = AV_CODEC_ID_H263, + .init = mov_init, + .write_header = mov_write_header, + .write_packet = mov_write_packet, + .write_trailer = mov_write_trailer, + .deinit = mov_free, + .flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE, + .codec_tag = (const AVCodecTag* const []){ codec_3gp_tags, 0 }, + .check_bitstream = mov_check_bitstream, + .priv_class = &tg2_muxer_class, +}; +#endif +#if CONFIG_IPOD_MUXER +MOV_CLASS(ipod) +AVOutputFormat ff_ipod_muxer = { + .name = "ipod", + .long_name = NULL_IF_CONFIG_SMALL("iPod H.264 MP4 (MPEG-4 Part 14)"), + .mime_type = "video/mp4", + .extensions = "m4v,m4a", + .priv_data_size = sizeof(MOVMuxContext), + .audio_codec = AV_CODEC_ID_AAC, + .video_codec = AV_CODEC_ID_H264, + .init = mov_init, + .write_header = mov_write_header, + .write_packet = mov_write_packet, + .write_trailer = mov_write_trailer, + .deinit = mov_free, + .flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE, + .codec_tag = (const AVCodecTag* const []){ codec_ipod_tags, 0 }, + .check_bitstream = mov_check_bitstream, + .priv_class = &ipod_muxer_class, +}; +#endif +#if CONFIG_ISMV_MUXER +MOV_CLASS(ismv) +AVOutputFormat ff_ismv_muxer = { + .name = "ismv", + .long_name = NULL_IF_CONFIG_SMALL("ISMV/ISMA (Smooth Streaming)"), + .mime_type = "video/mp4", + .extensions = "ismv,isma", + .priv_data_size = sizeof(MOVMuxContext), + .audio_codec = AV_CODEC_ID_AAC, + .video_codec = AV_CODEC_ID_H264, + .init = mov_init, + .write_header = mov_write_header, + .write_packet = mov_write_packet, + .write_trailer = mov_write_trailer, + .deinit = mov_free, + .flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH | AVFMT_TS_NEGATIVE, + .codec_tag = (const AVCodecTag* const []){ ff_mp4_obj_type, 0 }, + .check_bitstream = mov_check_bitstream, + .priv_class = &ismv_muxer_class, +}; +#endif +#if CONFIG_F4V_MUXER +MOV_CLASS(f4v) +AVOutputFormat ff_f4v_muxer = { + .name = "f4v", + .long_name = NULL_IF_CONFIG_SMALL("F4V Adobe Flash Video"), + .mime_type = "application/f4v", + .extensions = "f4v", + .priv_data_size = sizeof(MOVMuxContext), + .audio_codec = AV_CODEC_ID_AAC, + .video_codec = AV_CODEC_ID_H264, + .init = mov_init, + .write_header = mov_write_header, + .write_packet = mov_write_packet, + .write_trailer = mov_write_trailer, + .deinit = mov_free, + .flags = AVFMT_GLOBALHEADER | AVFMT_ALLOW_FLUSH, + .codec_tag = (const AVCodecTag* const []){ codec_f4v_tags, 0 }, + .check_bitstream = mov_check_bitstream, + .priv_class = &f4v_muxer_class, +}; +#endif From a915fd9a8cbac5bb94a373bfc29bb6c0331bae6d Mon Sep 17 00:00:00 2001 From: turly221 Date: Mon, 9 Dec 2024 16:06:07 +0000 Subject: [PATCH 10/13] commit patch 22530026 --- libavcodec/vqavideo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libavcodec/vqavideo.c b/libavcodec/vqavideo.c index 81d50bb5a4f8d..00229b622892a 100644 --- a/libavcodec/vqavideo.c +++ b/libavcodec/vqavideo.c @@ -147,7 +147,7 @@ static av_cold int vqa_decode_init(AVCodecContext *avctx) } s->width = AV_RL16(&s->avctx->extradata[6]); s->height = AV_RL16(&s->avctx->extradata[8]); - if ((ret = av_image_check_size(s->width, s->height, 0, avctx)) < 0) { + if ((ret = ff_set_dimensions(avctx, s->width, s->height)) < 0) { s->width= s->height= 0; return ret; } From 3e4d77266e3a7feeb4baa4a90d8e17703f3f2cfa Mon Sep 17 00:00:00 2001 From: turly221 Date: Mon, 9 Dec 2024 16:06:09 +0000 Subject: [PATCH 11/13] commit patch 20711574 --- libavformat/tty.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/libavformat/tty.c b/libavformat/tty.c index b407645ee4b95..6228762e4ab0e 100644 --- a/libavformat/tty.c +++ b/libavformat/tty.c @@ -34,6 +34,13 @@ #include "internal.h" #include "sauce.h" +static int isansicode(int x) +{ + return x == 0x1B || x == 0x0A || x == 0x0D || (x >= 0x20 && x < 0x7f); +} + +static const char tty_extensions[31] = "ans,art,asc,diz,ice,nfo,txt,vt"; + typedef struct TtyDemuxContext { AVClass *class; int chars_per_frame; @@ -42,6 +49,17 @@ typedef struct TtyDemuxContext { AVRational framerate; /**< Set by a private option. */ } TtyDemuxContext; +static int read_probe(const AVProbeData *p) +{ + int cnt = 0; + + for (int i = 0; i < p->buf_size; i++) + cnt += !!isansicode(p->buf[i]); + + return (cnt * 100LL / p->buf_size) * (cnt > 400) * + !!av_match_ext(p->filename, tty_extensions); +} + /** * Parse EFI header */ @@ -153,8 +171,9 @@ AVInputFormat ff_tty_demuxer = { .name = "tty", .long_name = NULL_IF_CONFIG_SMALL("Tele-typewriter"), .priv_data_size = sizeof(TtyDemuxContext), + .read_probe = read_probe, .read_header = read_header, .read_packet = read_packet, - .extensions = "ans,art,asc,diz,ice,nfo,txt,vt", + .extensions = tty_extensions, .priv_class = &tty_demuxer_class, }; From c34b016304017c4ed8995b8ac133621639648547 Mon Sep 17 00:00:00 2001 From: turly221 Date: Mon, 9 Dec 2024 16:06:10 +0000 Subject: [PATCH 12/13] commit patch 22001740 --- libavcodec/vp3.c | 7 +- libavcodec/vp3.c.orig | 2594 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 2600 insertions(+), 1 deletion(-) create mode 100644 libavcodec/vp3.c.orig diff --git a/libavcodec/vp3.c b/libavcodec/vp3.c index b10cb39f8ae20..1596ff34ef2b4 100644 --- a/libavcodec/vp3.c +++ b/libavcodec/vp3.c @@ -2093,8 +2093,13 @@ static int vp3_decode_frame(AVCodecContext *avctx, if (ff_thread_get_buffer(avctx, &s->current_frame, AV_GET_BUFFER_FLAG_REF) < 0) goto error; - if (!s->edge_emu_buffer) + if (!s->edge_emu_buffer) { s->edge_emu_buffer = av_malloc(9 * FFABS(s->current_frame.f->linesize[0])); + if (!s->edge_emu_buffer) { + ret = AVERROR(ENOMEM); + goto error; + } + } if (s->keyframe) { if (!s->theora) { diff --git a/libavcodec/vp3.c.orig b/libavcodec/vp3.c.orig new file mode 100644 index 0000000000000..b10cb39f8ae20 --- /dev/null +++ b/libavcodec/vp3.c.orig @@ -0,0 +1,2594 @@ +/* + * Copyright (C) 2003-2004 The FFmpeg project + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * On2 VP3 Video Decoder + * + * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx) + * For more information about the VP3 coding process, visit: + * http://wiki.multimedia.cx/index.php?title=On2_VP3 + * + * Theora decoder by Alex Beregszaszi + */ + +#include +#include +#include + +#include "libavutil/imgutils.h" + +#include "avcodec.h" +#include "get_bits.h" +#include "hpeldsp.h" +#include "internal.h" +#include "mathops.h" +#include "thread.h" +#include "videodsp.h" +#include "vp3data.h" +#include "vp3dsp.h" +#include "xiph.h" + +#define FRAGMENT_PIXELS 8 + +// FIXME split things out into their own arrays +typedef struct Vp3Fragment { + int16_t dc; + uint8_t coding_method; + uint8_t qpi; +} Vp3Fragment; + +#define SB_NOT_CODED 0 +#define SB_PARTIALLY_CODED 1 +#define SB_FULLY_CODED 2 + +// This is the maximum length of a single long bit run that can be encoded +// for superblock coding or block qps. Theora special-cases this to read a +// bit instead of flipping the current bit to allow for runs longer than 4129. +#define MAXIMUM_LONG_BIT_RUN 4129 + +#define MODE_INTER_NO_MV 0 +#define MODE_INTRA 1 +#define MODE_INTER_PLUS_MV 2 +#define MODE_INTER_LAST_MV 3 +#define MODE_INTER_PRIOR_LAST 4 +#define MODE_USING_GOLDEN 5 +#define MODE_GOLDEN_MV 6 +#define MODE_INTER_FOURMV 7 +#define CODING_MODE_COUNT 8 + +/* special internal mode */ +#define MODE_COPY 8 + +static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb); +static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb); + + +/* There are 6 preset schemes, plus a free-form scheme */ +static const int ModeAlphabet[6][CODING_MODE_COUNT] = { + /* scheme 1: Last motion vector dominates */ + { MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST, + MODE_INTER_PLUS_MV, MODE_INTER_NO_MV, + MODE_INTRA, MODE_USING_GOLDEN, + MODE_GOLDEN_MV, MODE_INTER_FOURMV }, + + /* scheme 2 */ + { MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST, + MODE_INTER_NO_MV, MODE_INTER_PLUS_MV, + MODE_INTRA, MODE_USING_GOLDEN, + MODE_GOLDEN_MV, MODE_INTER_FOURMV }, + + /* scheme 3 */ + { MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV, + MODE_INTER_PRIOR_LAST, MODE_INTER_NO_MV, + MODE_INTRA, MODE_USING_GOLDEN, + MODE_GOLDEN_MV, MODE_INTER_FOURMV }, + + /* scheme 4 */ + { MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV, + MODE_INTER_NO_MV, MODE_INTER_PRIOR_LAST, + MODE_INTRA, MODE_USING_GOLDEN, + MODE_GOLDEN_MV, MODE_INTER_FOURMV }, + + /* scheme 5: No motion vector dominates */ + { MODE_INTER_NO_MV, MODE_INTER_LAST_MV, + MODE_INTER_PRIOR_LAST, MODE_INTER_PLUS_MV, + MODE_INTRA, MODE_USING_GOLDEN, + MODE_GOLDEN_MV, MODE_INTER_FOURMV }, + + /* scheme 6 */ + { MODE_INTER_NO_MV, MODE_USING_GOLDEN, + MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST, + MODE_INTER_PLUS_MV, MODE_INTRA, + MODE_GOLDEN_MV, MODE_INTER_FOURMV }, +}; + +static const uint8_t hilbert_offset[16][2] = { + { 0, 0 }, { 1, 0 }, { 1, 1 }, { 0, 1 }, + { 0, 2 }, { 0, 3 }, { 1, 3 }, { 1, 2 }, + { 2, 2 }, { 2, 3 }, { 3, 3 }, { 3, 2 }, + { 3, 1 }, { 2, 1 }, { 2, 0 }, { 3, 0 } +}; + +#define MIN_DEQUANT_VAL 2 + +typedef struct Vp3DecodeContext { + AVCodecContext *avctx; + int theora, theora_tables, theora_header; + int version; + int width, height; + int chroma_x_shift, chroma_y_shift; + ThreadFrame golden_frame; + ThreadFrame last_frame; + ThreadFrame current_frame; + int keyframe; + uint8_t idct_permutation[64]; + uint8_t idct_scantable[64]; + HpelDSPContext hdsp; + VideoDSPContext vdsp; + VP3DSPContext vp3dsp; + DECLARE_ALIGNED(16, int16_t, block)[64]; + int flipped_image; + int last_slice_end; + int skip_loop_filter; + + int qps[3]; + int nqps; + int last_qps[3]; + + int superblock_count; + int y_superblock_width; + int y_superblock_height; + int y_superblock_count; + int c_superblock_width; + int c_superblock_height; + int c_superblock_count; + int u_superblock_start; + int v_superblock_start; + unsigned char *superblock_coding; + + int macroblock_count; + int macroblock_width; + int macroblock_height; + + int fragment_count; + int fragment_width[2]; + int fragment_height[2]; + + Vp3Fragment *all_fragments; + int fragment_start[3]; + int data_offset[3]; + uint8_t offset_x; + uint8_t offset_y; + int offset_x_warned; + + int8_t (*motion_val[2])[2]; + + /* tables */ + uint16_t coded_dc_scale_factor[64]; + uint32_t coded_ac_scale_factor[64]; + uint8_t base_matrix[384][64]; + uint8_t qr_count[2][3]; + uint8_t qr_size[2][3][64]; + uint16_t qr_base[2][3][64]; + + /** + * This is a list of all tokens in bitstream order. Reordering takes place + * by pulling from each level during IDCT. As a consequence, IDCT must be + * in Hilbert order, making the minimum slice height 64 for 4:2:0 and 32 + * otherwise. The 32 different tokens with up to 12 bits of extradata are + * collapsed into 3 types, packed as follows: + * (from the low to high bits) + * + * 2 bits: type (0,1,2) + * 0: EOB run, 14 bits for run length (12 needed) + * 1: zero run, 7 bits for run length + * 7 bits for the next coefficient (3 needed) + * 2: coefficient, 14 bits (11 needed) + * + * Coefficients are signed, so are packed in the highest bits for automatic + * sign extension. + */ + int16_t *dct_tokens[3][64]; + int16_t *dct_tokens_base; +#define TOKEN_EOB(eob_run) ((eob_run) << 2) +#define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) * 512) + ((zero_run) << 2) + 1) +#define TOKEN_COEFF(coeff) (((coeff) * 4) + 2) + + /** + * number of blocks that contain DCT coefficients at + * the given level or higher + */ + int num_coded_frags[3][64]; + int total_num_coded_frags; + + /* this is a list of indexes into the all_fragments array indicating + * which of the fragments are coded */ + int *coded_fragment_list[3]; + + VLC dc_vlc[16]; + VLC ac_vlc_1[16]; + VLC ac_vlc_2[16]; + VLC ac_vlc_3[16]; + VLC ac_vlc_4[16]; + + VLC superblock_run_length_vlc; + VLC fragment_run_length_vlc; + VLC mode_code_vlc; + VLC motion_vector_vlc; + + /* these arrays need to be on 16-byte boundaries since SSE2 operations + * index into them */ + DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64]; ///< qmat[qpi][is_inter][plane] + + /* This table contains superblock_count * 16 entries. Each set of 16 + * numbers corresponds to the fragment indexes 0..15 of the superblock. + * An entry will be -1 to indicate that no entry corresponds to that + * index. */ + int *superblock_fragments; + + /* This is an array that indicates how a particular macroblock + * is coded. */ + unsigned char *macroblock_coding; + + uint8_t *edge_emu_buffer; + + /* Huffman decode */ + int hti; + unsigned int hbits; + int entries; + int huff_code_size; + uint32_t huffman_table[80][32][2]; + + uint8_t filter_limit_values[64]; + DECLARE_ALIGNED(8, int, bounding_values_array)[256 + 2]; +} Vp3DecodeContext; + +/************************************************************************ + * VP3 specific functions + ************************************************************************/ + +static av_cold void free_tables(AVCodecContext *avctx) +{ + Vp3DecodeContext *s = avctx->priv_data; + + av_freep(&s->superblock_coding); + av_freep(&s->all_fragments); + av_freep(&s->coded_fragment_list[0]); + av_freep(&s->dct_tokens_base); + av_freep(&s->superblock_fragments); + av_freep(&s->macroblock_coding); + av_freep(&s->motion_val[0]); + av_freep(&s->motion_val[1]); +} + +static void vp3_decode_flush(AVCodecContext *avctx) +{ + Vp3DecodeContext *s = avctx->priv_data; + + if (s->golden_frame.f) + ff_thread_release_buffer(avctx, &s->golden_frame); + if (s->last_frame.f) + ff_thread_release_buffer(avctx, &s->last_frame); + if (s->current_frame.f) + ff_thread_release_buffer(avctx, &s->current_frame); +} + +static av_cold int vp3_decode_end(AVCodecContext *avctx) +{ + Vp3DecodeContext *s = avctx->priv_data; + int i; + + free_tables(avctx); + av_freep(&s->edge_emu_buffer); + + s->theora_tables = 0; + + /* release all frames */ + vp3_decode_flush(avctx); + av_frame_free(&s->current_frame.f); + av_frame_free(&s->last_frame.f); + av_frame_free(&s->golden_frame.f); + + if (avctx->internal->is_copy) + return 0; + + for (i = 0; i < 16; i++) { + ff_free_vlc(&s->dc_vlc[i]); + ff_free_vlc(&s->ac_vlc_1[i]); + ff_free_vlc(&s->ac_vlc_2[i]); + ff_free_vlc(&s->ac_vlc_3[i]); + ff_free_vlc(&s->ac_vlc_4[i]); + } + + ff_free_vlc(&s->superblock_run_length_vlc); + ff_free_vlc(&s->fragment_run_length_vlc); + ff_free_vlc(&s->mode_code_vlc); + ff_free_vlc(&s->motion_vector_vlc); + + return 0; +} + +/** + * This function sets up all of the various blocks mappings: + * superblocks <-> fragments, macroblocks <-> fragments, + * superblocks <-> macroblocks + * + * @return 0 is successful; returns 1 if *anything* went wrong. + */ +static int init_block_mapping(Vp3DecodeContext *s) +{ + int sb_x, sb_y, plane; + int x, y, i, j = 0; + + for (plane = 0; plane < 3; plane++) { + int sb_width = plane ? s->c_superblock_width + : s->y_superblock_width; + int sb_height = plane ? s->c_superblock_height + : s->y_superblock_height; + int frag_width = s->fragment_width[!!plane]; + int frag_height = s->fragment_height[!!plane]; + + for (sb_y = 0; sb_y < sb_height; sb_y++) + for (sb_x = 0; sb_x < sb_width; sb_x++) + for (i = 0; i < 16; i++) { + x = 4 * sb_x + hilbert_offset[i][0]; + y = 4 * sb_y + hilbert_offset[i][1]; + + if (x < frag_width && y < frag_height) + s->superblock_fragments[j++] = s->fragment_start[plane] + + y * frag_width + x; + else + s->superblock_fragments[j++] = -1; + } + } + + return 0; /* successful path out */ +} + +/* + * This function sets up the dequantization tables used for a particular + * frame. + */ +static void init_dequantizer(Vp3DecodeContext *s, int qpi) +{ + int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]]; + int dc_scale_factor = s->coded_dc_scale_factor[s->qps[qpi]]; + int i, plane, inter, qri, bmi, bmj, qistart; + + for (inter = 0; inter < 2; inter++) { + for (plane = 0; plane < 3; plane++) { + int sum = 0; + for (qri = 0; qri < s->qr_count[inter][plane]; qri++) { + sum += s->qr_size[inter][plane][qri]; + if (s->qps[qpi] <= sum) + break; + } + qistart = sum - s->qr_size[inter][plane][qri]; + bmi = s->qr_base[inter][plane][qri]; + bmj = s->qr_base[inter][plane][qri + 1]; + for (i = 0; i < 64; i++) { + int coeff = (2 * (sum - s->qps[qpi]) * s->base_matrix[bmi][i] - + 2 * (qistart - s->qps[qpi]) * s->base_matrix[bmj][i] + + s->qr_size[inter][plane][qri]) / + (2 * s->qr_size[inter][plane][qri]); + + int qmin = 8 << (inter + !i); + int qscale = i ? ac_scale_factor : dc_scale_factor; + + s->qmat[qpi][inter][plane][s->idct_permutation[i]] = + av_clip((qscale * coeff) / 100 * 4, qmin, 4096); + } + /* all DC coefficients use the same quant so as not to interfere + * with DC prediction */ + s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0]; + } + } +} + +/* + * This function initializes the loop filter boundary limits if the frame's + * quality index is different from the previous frame's. + * + * The filter_limit_values may not be larger than 127. + */ +static void init_loop_filter(Vp3DecodeContext *s) +{ + int *bounding_values = s->bounding_values_array + 127; + int filter_limit; + int x; + int value; + + filter_limit = s->filter_limit_values[s->qps[0]]; + av_assert0(filter_limit < 128U); + + /* set up the bounding values */ + memset(s->bounding_values_array, 0, 256 * sizeof(int)); + for (x = 0; x < filter_limit; x++) { + bounding_values[-x] = -x; + bounding_values[x] = x; + } + for (x = value = filter_limit; x < 128 && value; x++, value--) { + bounding_values[ x] = value; + bounding_values[-x] = -value; + } + if (value) + bounding_values[128] = value; + bounding_values[129] = bounding_values[130] = filter_limit * 0x02020202; +} + +/* + * This function unpacks all of the superblock/macroblock/fragment coding + * information from the bitstream. + */ +static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb) +{ + int superblock_starts[3] = { + 0, s->u_superblock_start, s->v_superblock_start + }; + int bit = 0; + int current_superblock = 0; + int current_run = 0; + int num_partial_superblocks = 0; + + int i, j; + int current_fragment; + int plane; + + if (s->keyframe) { + memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count); + } else { + /* unpack the list of partially-coded superblocks */ + bit = get_bits1(gb) ^ 1; + current_run = 0; + + while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) { + if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN) + bit = get_bits1(gb); + else + bit ^= 1; + + current_run = get_vlc2(gb, s->superblock_run_length_vlc.table, + 6, 2) + 1; + if (current_run == 34) + current_run += get_bits(gb, 12); + + if (current_run > s->superblock_count - current_superblock) { + av_log(s->avctx, AV_LOG_ERROR, + "Invalid partially coded superblock run length\n"); + return -1; + } + + memset(s->superblock_coding + current_superblock, bit, current_run); + + current_superblock += current_run; + if (bit) + num_partial_superblocks += current_run; + } + + /* unpack the list of fully coded superblocks if any of the blocks were + * not marked as partially coded in the previous step */ + if (num_partial_superblocks < s->superblock_count) { + int superblocks_decoded = 0; + + current_superblock = 0; + bit = get_bits1(gb) ^ 1; + current_run = 0; + + while (superblocks_decoded < s->superblock_count - num_partial_superblocks && + get_bits_left(gb) > 0) { + if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN) + bit = get_bits1(gb); + else + bit ^= 1; + + current_run = get_vlc2(gb, s->superblock_run_length_vlc.table, + 6, 2) + 1; + if (current_run == 34) + current_run += get_bits(gb, 12); + + for (j = 0; j < current_run; current_superblock++) { + if (current_superblock >= s->superblock_count) { + av_log(s->avctx, AV_LOG_ERROR, + "Invalid fully coded superblock run length\n"); + return -1; + } + + /* skip any superblocks already marked as partially coded */ + if (s->superblock_coding[current_superblock] == SB_NOT_CODED) { + s->superblock_coding[current_superblock] = 2 * bit; + j++; + } + } + superblocks_decoded += current_run; + } + } + + /* if there were partial blocks, initialize bitstream for + * unpacking fragment codings */ + if (num_partial_superblocks) { + current_run = 0; + bit = get_bits1(gb); + /* toggle the bit because as soon as the first run length is + * fetched the bit will be toggled again */ + bit ^= 1; + } + } + + /* figure out which fragments are coded; iterate through each + * superblock (all planes) */ + s->total_num_coded_frags = 0; + memset(s->macroblock_coding, MODE_COPY, s->macroblock_count); + + for (plane = 0; plane < 3; plane++) { + int sb_start = superblock_starts[plane]; + int sb_end = sb_start + (plane ? s->c_superblock_count + : s->y_superblock_count); + int num_coded_frags = 0; + + for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) { + /* iterate through all 16 fragments in a superblock */ + for (j = 0; j < 16; j++) { + /* if the fragment is in bounds, check its coding status */ + current_fragment = s->superblock_fragments[i * 16 + j]; + if (current_fragment != -1) { + int coded = s->superblock_coding[i]; + + if (s->superblock_coding[i] == SB_PARTIALLY_CODED) { + /* fragment may or may not be coded; this is the case + * that cares about the fragment coding runs */ + if (current_run-- == 0) { + bit ^= 1; + current_run = get_vlc2(gb, s->fragment_run_length_vlc.table, 5, 2); + } + coded = bit; + } + + if (coded) { + /* default mode; actual mode will be decoded in + * the next phase */ + s->all_fragments[current_fragment].coding_method = + MODE_INTER_NO_MV; + s->coded_fragment_list[plane][num_coded_frags++] = + current_fragment; + } else { + /* not coded; copy this fragment from the prior frame */ + s->all_fragments[current_fragment].coding_method = + MODE_COPY; + } + } + } + } + s->total_num_coded_frags += num_coded_frags; + for (i = 0; i < 64; i++) + s->num_coded_frags[plane][i] = num_coded_frags; + if (plane < 2) + s->coded_fragment_list[plane + 1] = s->coded_fragment_list[plane] + + num_coded_frags; + } + return 0; +} + +/* + * This function unpacks all the coding mode data for individual macroblocks + * from the bitstream. + */ +static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb) +{ + int i, j, k, sb_x, sb_y; + int scheme; + int current_macroblock; + int current_fragment; + int coding_mode; + int custom_mode_alphabet[CODING_MODE_COUNT]; + const int *alphabet; + Vp3Fragment *frag; + + if (s->keyframe) { + for (i = 0; i < s->fragment_count; i++) + s->all_fragments[i].coding_method = MODE_INTRA; + } else { + /* fetch the mode coding scheme for this frame */ + scheme = get_bits(gb, 3); + + /* is it a custom coding scheme? */ + if (scheme == 0) { + for (i = 0; i < 8; i++) + custom_mode_alphabet[i] = MODE_INTER_NO_MV; + for (i = 0; i < 8; i++) + custom_mode_alphabet[get_bits(gb, 3)] = i; + alphabet = custom_mode_alphabet; + } else + alphabet = ModeAlphabet[scheme - 1]; + + /* iterate through all of the macroblocks that contain 1 or more + * coded fragments */ + for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) { + for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) { + if (get_bits_left(gb) <= 0) + return -1; + + for (j = 0; j < 4; j++) { + int mb_x = 2 * sb_x + (j >> 1); + int mb_y = 2 * sb_y + (((j >> 1) + j) & 1); + current_macroblock = mb_y * s->macroblock_width + mb_x; + + if (mb_x >= s->macroblock_width || + mb_y >= s->macroblock_height) + continue; + +#define BLOCK_X (2 * mb_x + (k & 1)) +#define BLOCK_Y (2 * mb_y + (k >> 1)) + /* coding modes are only stored if the macroblock has + * at least one luma block coded, otherwise it must be + * INTER_NO_MV */ + for (k = 0; k < 4; k++) { + current_fragment = BLOCK_Y * + s->fragment_width[0] + BLOCK_X; + if (s->all_fragments[current_fragment].coding_method != MODE_COPY) + break; + } + if (k == 4) { + s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV; + continue; + } + + /* mode 7 means get 3 bits for each coding mode */ + if (scheme == 7) + coding_mode = get_bits(gb, 3); + else + coding_mode = alphabet[get_vlc2(gb, s->mode_code_vlc.table, 3, 3)]; + + s->macroblock_coding[current_macroblock] = coding_mode; + for (k = 0; k < 4; k++) { + frag = s->all_fragments + BLOCK_Y * s->fragment_width[0] + BLOCK_X; + if (frag->coding_method != MODE_COPY) + frag->coding_method = coding_mode; + } + +#define SET_CHROMA_MODES \ + if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \ + frag[s->fragment_start[1]].coding_method = coding_mode; \ + if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \ + frag[s->fragment_start[2]].coding_method = coding_mode; + + if (s->chroma_y_shift) { + frag = s->all_fragments + mb_y * + s->fragment_width[1] + mb_x; + SET_CHROMA_MODES + } else if (s->chroma_x_shift) { + frag = s->all_fragments + + 2 * mb_y * s->fragment_width[1] + mb_x; + for (k = 0; k < 2; k++) { + SET_CHROMA_MODES + frag += s->fragment_width[1]; + } + } else { + for (k = 0; k < 4; k++) { + frag = s->all_fragments + + BLOCK_Y * s->fragment_width[1] + BLOCK_X; + SET_CHROMA_MODES + } + } + } + } + } + } + + return 0; +} + +/* + * This function unpacks all the motion vectors for the individual + * macroblocks from the bitstream. + */ +static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb) +{ + int j, k, sb_x, sb_y; + int coding_mode; + int motion_x[4]; + int motion_y[4]; + int last_motion_x = 0; + int last_motion_y = 0; + int prior_last_motion_x = 0; + int prior_last_motion_y = 0; + int current_macroblock; + int current_fragment; + int frag; + + if (s->keyframe) + return 0; + + /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */ + coding_mode = get_bits1(gb); + + /* iterate through all of the macroblocks that contain 1 or more + * coded fragments */ + for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) { + for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) { + if (get_bits_left(gb) <= 0) + return -1; + + for (j = 0; j < 4; j++) { + int mb_x = 2 * sb_x + (j >> 1); + int mb_y = 2 * sb_y + (((j >> 1) + j) & 1); + current_macroblock = mb_y * s->macroblock_width + mb_x; + + if (mb_x >= s->macroblock_width || + mb_y >= s->macroblock_height || + s->macroblock_coding[current_macroblock] == MODE_COPY) + continue; + + switch (s->macroblock_coding[current_macroblock]) { + case MODE_INTER_PLUS_MV: + case MODE_GOLDEN_MV: + /* all 6 fragments use the same motion vector */ + if (coding_mode == 0) { + motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; + motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; + } else { + motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)]; + motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)]; + } + + /* vector maintenance, only on MODE_INTER_PLUS_MV */ + if (s->macroblock_coding[current_macroblock] == MODE_INTER_PLUS_MV) { + prior_last_motion_x = last_motion_x; + prior_last_motion_y = last_motion_y; + last_motion_x = motion_x[0]; + last_motion_y = motion_y[0]; + } + break; + + case MODE_INTER_FOURMV: + /* vector maintenance */ + prior_last_motion_x = last_motion_x; + prior_last_motion_y = last_motion_y; + + /* fetch 4 vectors from the bitstream, one for each + * Y fragment, then average for the C fragment vectors */ + for (k = 0; k < 4; k++) { + current_fragment = BLOCK_Y * s->fragment_width[0] + BLOCK_X; + if (s->all_fragments[current_fragment].coding_method != MODE_COPY) { + if (coding_mode == 0) { + motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; + motion_y[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)]; + } else { + motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)]; + motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)]; + } + last_motion_x = motion_x[k]; + last_motion_y = motion_y[k]; + } else { + motion_x[k] = 0; + motion_y[k] = 0; + } + } + break; + + case MODE_INTER_LAST_MV: + /* all 6 fragments use the last motion vector */ + motion_x[0] = last_motion_x; + motion_y[0] = last_motion_y; + + /* no vector maintenance (last vector remains the + * last vector) */ + break; + + case MODE_INTER_PRIOR_LAST: + /* all 6 fragments use the motion vector prior to the + * last motion vector */ + motion_x[0] = prior_last_motion_x; + motion_y[0] = prior_last_motion_y; + + /* vector maintenance */ + prior_last_motion_x = last_motion_x; + prior_last_motion_y = last_motion_y; + last_motion_x = motion_x[0]; + last_motion_y = motion_y[0]; + break; + + default: + /* covers intra, inter without MV, golden without MV */ + motion_x[0] = 0; + motion_y[0] = 0; + + /* no vector maintenance */ + break; + } + + /* assign the motion vectors to the correct fragments */ + for (k = 0; k < 4; k++) { + current_fragment = + BLOCK_Y * s->fragment_width[0] + BLOCK_X; + if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { + s->motion_val[0][current_fragment][0] = motion_x[k]; + s->motion_val[0][current_fragment][1] = motion_y[k]; + } else { + s->motion_val[0][current_fragment][0] = motion_x[0]; + s->motion_val[0][current_fragment][1] = motion_y[0]; + } + } + + if (s->chroma_y_shift) { + if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { + motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] + + motion_x[2] + motion_x[3], 2); + motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] + + motion_y[2] + motion_y[3], 2); + } + motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1); + motion_y[0] = (motion_y[0] >> 1) | (motion_y[0] & 1); + frag = mb_y * s->fragment_width[1] + mb_x; + s->motion_val[1][frag][0] = motion_x[0]; + s->motion_val[1][frag][1] = motion_y[0]; + } else if (s->chroma_x_shift) { + if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { + motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1); + motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1); + motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1); + motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1); + } else { + motion_x[1] = motion_x[0]; + motion_y[1] = motion_y[0]; + } + motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1); + motion_x[1] = (motion_x[1] >> 1) | (motion_x[1] & 1); + + frag = 2 * mb_y * s->fragment_width[1] + mb_x; + for (k = 0; k < 2; k++) { + s->motion_val[1][frag][0] = motion_x[k]; + s->motion_val[1][frag][1] = motion_y[k]; + frag += s->fragment_width[1]; + } + } else { + for (k = 0; k < 4; k++) { + frag = BLOCK_Y * s->fragment_width[1] + BLOCK_X; + if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) { + s->motion_val[1][frag][0] = motion_x[k]; + s->motion_val[1][frag][1] = motion_y[k]; + } else { + s->motion_val[1][frag][0] = motion_x[0]; + s->motion_val[1][frag][1] = motion_y[0]; + } + } + } + } + } + } + + return 0; +} + +static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb) +{ + int qpi, i, j, bit, run_length, blocks_decoded, num_blocks_at_qpi; + int num_blocks = s->total_num_coded_frags; + + for (qpi = 0; qpi < s->nqps - 1 && num_blocks > 0; qpi++) { + i = blocks_decoded = num_blocks_at_qpi = 0; + + bit = get_bits1(gb) ^ 1; + run_length = 0; + + do { + if (run_length == MAXIMUM_LONG_BIT_RUN) + bit = get_bits1(gb); + else + bit ^= 1; + + run_length = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1; + if (run_length == 34) + run_length += get_bits(gb, 12); + blocks_decoded += run_length; + + if (!bit) + num_blocks_at_qpi += run_length; + + for (j = 0; j < run_length; i++) { + if (i >= s->total_num_coded_frags) + return -1; + + if (s->all_fragments[s->coded_fragment_list[0][i]].qpi == qpi) { + s->all_fragments[s->coded_fragment_list[0][i]].qpi += bit; + j++; + } + } + } while (blocks_decoded < num_blocks && get_bits_left(gb) > 0); + + num_blocks -= num_blocks_at_qpi; + } + + return 0; +} + +/* + * This function is called by unpack_dct_coeffs() to extract the VLCs from + * the bitstream. The VLCs encode tokens which are used to unpack DCT + * data. This function unpacks all the VLCs for either the Y plane or both + * C planes, and is called for DC coefficients or different AC coefficient + * levels (since different coefficient types require different VLC tables. + * + * This function returns a residual eob run. E.g, if a particular token gave + * instructions to EOB the next 5 fragments and there were only 2 fragments + * left in the current fragment range, 3 would be returned so that it could + * be passed into the next call to this same function. + */ +static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, + VLC *table, int coeff_index, + int plane, + int eob_run) +{ + int i, j = 0; + int token; + int zero_run = 0; + int16_t coeff = 0; + int bits_to_get; + int blocks_ended; + int coeff_i = 0; + int num_coeffs = s->num_coded_frags[plane][coeff_index]; + int16_t *dct_tokens = s->dct_tokens[plane][coeff_index]; + + /* local references to structure members to avoid repeated dereferences */ + int *coded_fragment_list = s->coded_fragment_list[plane]; + Vp3Fragment *all_fragments = s->all_fragments; + VLC_TYPE(*vlc_table)[2] = table->table; + + if (num_coeffs < 0) + av_log(s->avctx, AV_LOG_ERROR, + "Invalid number of coefficients at level %d\n", coeff_index); + + if (eob_run > num_coeffs) { + coeff_i = + blocks_ended = num_coeffs; + eob_run -= num_coeffs; + } else { + coeff_i = + blocks_ended = eob_run; + eob_run = 0; + } + + // insert fake EOB token to cover the split between planes or zzi + if (blocks_ended) + dct_tokens[j++] = blocks_ended << 2; + + while (coeff_i < num_coeffs && get_bits_left(gb) > 0) { + /* decode a VLC into a token */ + token = get_vlc2(gb, vlc_table, 11, 3); + /* use the token to get a zero run, a coefficient, and an eob run */ + if ((unsigned) token <= 6U) { + eob_run = eob_run_base[token]; + if (eob_run_get_bits[token]) + eob_run += get_bits(gb, eob_run_get_bits[token]); + + // record only the number of blocks ended in this plane, + // any spill will be recorded in the next plane. + if (eob_run > num_coeffs - coeff_i) { + dct_tokens[j++] = TOKEN_EOB(num_coeffs - coeff_i); + blocks_ended += num_coeffs - coeff_i; + eob_run -= num_coeffs - coeff_i; + coeff_i = num_coeffs; + } else { + dct_tokens[j++] = TOKEN_EOB(eob_run); + blocks_ended += eob_run; + coeff_i += eob_run; + eob_run = 0; + } + } else if (token >= 0) { + bits_to_get = coeff_get_bits[token]; + if (bits_to_get) + bits_to_get = get_bits(gb, bits_to_get); + coeff = coeff_tables[token][bits_to_get]; + + zero_run = zero_run_base[token]; + if (zero_run_get_bits[token]) + zero_run += get_bits(gb, zero_run_get_bits[token]); + + if (zero_run) { + dct_tokens[j++] = TOKEN_ZERO_RUN(coeff, zero_run); + } else { + // Save DC into the fragment structure. DC prediction is + // done in raster order, so the actual DC can't be in with + // other tokens. We still need the token in dct_tokens[] + // however, or else the structure collapses on itself. + if (!coeff_index) + all_fragments[coded_fragment_list[coeff_i]].dc = coeff; + + dct_tokens[j++] = TOKEN_COEFF(coeff); + } + + if (coeff_index + zero_run > 64) { + av_log(s->avctx, AV_LOG_DEBUG, + "Invalid zero run of %d with %d coeffs left\n", + zero_run, 64 - coeff_index); + zero_run = 64 - coeff_index; + } + + // zero runs code multiple coefficients, + // so don't try to decode coeffs for those higher levels + for (i = coeff_index + 1; i <= coeff_index + zero_run; i++) + s->num_coded_frags[plane][i]--; + coeff_i++; + } else { + av_log(s->avctx, AV_LOG_ERROR, "Invalid token %d\n", token); + return -1; + } + } + + if (blocks_ended > s->num_coded_frags[plane][coeff_index]) + av_log(s->avctx, AV_LOG_ERROR, "More blocks ended than coded!\n"); + + // decrement the number of blocks that have higher coefficients for each + // EOB run at this level + if (blocks_ended) + for (i = coeff_index + 1; i < 64; i++) + s->num_coded_frags[plane][i] -= blocks_ended; + + // setup the next buffer + if (plane < 2) + s->dct_tokens[plane + 1][coeff_index] = dct_tokens + j; + else if (coeff_index < 63) + s->dct_tokens[0][coeff_index + 1] = dct_tokens + j; + + return eob_run; +} + +static void reverse_dc_prediction(Vp3DecodeContext *s, + int first_fragment, + int fragment_width, + int fragment_height); +/* + * This function unpacks all of the DCT coefficient data from the + * bitstream. + */ +static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb) +{ + int i; + int dc_y_table; + int dc_c_table; + int ac_y_table; + int ac_c_table; + int residual_eob_run = 0; + VLC *y_tables[64]; + VLC *c_tables[64]; + + s->dct_tokens[0][0] = s->dct_tokens_base; + + if (get_bits_left(gb) < 16) + return AVERROR_INVALIDDATA; + + /* fetch the DC table indexes */ + dc_y_table = get_bits(gb, 4); + dc_c_table = get_bits(gb, 4); + + /* unpack the Y plane DC coefficients */ + residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0, + 0, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; + if (get_bits_left(gb) < 8) + return AVERROR_INVALIDDATA; + + /* reverse prediction of the Y-plane DC coefficients */ + reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]); + + /* unpack the C plane DC coefficients */ + residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0, + 1, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; + residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0, + 2, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; + + /* reverse prediction of the C-plane DC coefficients */ + if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) { + reverse_dc_prediction(s, s->fragment_start[1], + s->fragment_width[1], s->fragment_height[1]); + reverse_dc_prediction(s, s->fragment_start[2], + s->fragment_width[1], s->fragment_height[1]); + } + + if (get_bits_left(gb) < 8) + return AVERROR_INVALIDDATA; + /* fetch the AC table indexes */ + ac_y_table = get_bits(gb, 4); + ac_c_table = get_bits(gb, 4); + + /* build tables of AC VLC tables */ + for (i = 1; i <= 5; i++) { + y_tables[i] = &s->ac_vlc_1[ac_y_table]; + c_tables[i] = &s->ac_vlc_1[ac_c_table]; + } + for (i = 6; i <= 14; i++) { + y_tables[i] = &s->ac_vlc_2[ac_y_table]; + c_tables[i] = &s->ac_vlc_2[ac_c_table]; + } + for (i = 15; i <= 27; i++) { + y_tables[i] = &s->ac_vlc_3[ac_y_table]; + c_tables[i] = &s->ac_vlc_3[ac_c_table]; + } + for (i = 28; i <= 63; i++) { + y_tables[i] = &s->ac_vlc_4[ac_y_table]; + c_tables[i] = &s->ac_vlc_4[ac_c_table]; + } + + /* decode all AC coefficients */ + for (i = 1; i <= 63; i++) { + residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i, + 0, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; + + residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i, + 1, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; + residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i, + 2, residual_eob_run); + if (residual_eob_run < 0) + return residual_eob_run; + } + + return 0; +} + +/* + * This function reverses the DC prediction for each coded fragment in + * the frame. Much of this function is adapted directly from the original + * VP3 source code. + */ +#define COMPATIBLE_FRAME(x) \ + (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type) +#define DC_COEFF(u) s->all_fragments[u].dc + +static void reverse_dc_prediction(Vp3DecodeContext *s, + int first_fragment, + int fragment_width, + int fragment_height) +{ +#define PUL 8 +#define PU 4 +#define PUR 2 +#define PL 1 + + int x, y; + int i = first_fragment; + + int predicted_dc; + + /* DC values for the left, up-left, up, and up-right fragments */ + int vl, vul, vu, vur; + + /* indexes for the left, up-left, up, and up-right fragments */ + int l, ul, u, ur; + + /* + * The 6 fields mean: + * 0: up-left multiplier + * 1: up multiplier + * 2: up-right multiplier + * 3: left multiplier + */ + static const int predictor_transform[16][4] = { + { 0, 0, 0, 0 }, + { 0, 0, 0, 128 }, // PL + { 0, 0, 128, 0 }, // PUR + { 0, 0, 53, 75 }, // PUR|PL + { 0, 128, 0, 0 }, // PU + { 0, 64, 0, 64 }, // PU |PL + { 0, 128, 0, 0 }, // PU |PUR + { 0, 0, 53, 75 }, // PU |PUR|PL + { 128, 0, 0, 0 }, // PUL + { 0, 0, 0, 128 }, // PUL|PL + { 64, 0, 64, 0 }, // PUL|PUR + { 0, 0, 53, 75 }, // PUL|PUR|PL + { 0, 128, 0, 0 }, // PUL|PU + { -104, 116, 0, 116 }, // PUL|PU |PL + { 24, 80, 24, 0 }, // PUL|PU |PUR + { -104, 116, 0, 116 } // PUL|PU |PUR|PL + }; + + /* This table shows which types of blocks can use other blocks for + * prediction. For example, INTRA is the only mode in this table to + * have a frame number of 0. That means INTRA blocks can only predict + * from other INTRA blocks. There are 2 golden frame coding types; + * blocks encoding in these modes can only predict from other blocks + * that were encoded with these 1 of these 2 modes. */ + static const unsigned char compatible_frame[9] = { + 1, /* MODE_INTER_NO_MV */ + 0, /* MODE_INTRA */ + 1, /* MODE_INTER_PLUS_MV */ + 1, /* MODE_INTER_LAST_MV */ + 1, /* MODE_INTER_PRIOR_MV */ + 2, /* MODE_USING_GOLDEN */ + 2, /* MODE_GOLDEN_MV */ + 1, /* MODE_INTER_FOUR_MV */ + 3 /* MODE_COPY */ + }; + int current_frame_type; + + /* there is a last DC predictor for each of the 3 frame types */ + short last_dc[3]; + + int transform = 0; + + vul = + vu = + vur = + vl = 0; + last_dc[0] = + last_dc[1] = + last_dc[2] = 0; + + /* for each fragment row... */ + for (y = 0; y < fragment_height; y++) { + /* for each fragment in a row... */ + for (x = 0; x < fragment_width; x++, i++) { + + /* reverse prediction if this block was coded */ + if (s->all_fragments[i].coding_method != MODE_COPY) { + current_frame_type = + compatible_frame[s->all_fragments[i].coding_method]; + + transform = 0; + if (x) { + l = i - 1; + vl = DC_COEFF(l); + if (COMPATIBLE_FRAME(l)) + transform |= PL; + } + if (y) { + u = i - fragment_width; + vu = DC_COEFF(u); + if (COMPATIBLE_FRAME(u)) + transform |= PU; + if (x) { + ul = i - fragment_width - 1; + vul = DC_COEFF(ul); + if (COMPATIBLE_FRAME(ul)) + transform |= PUL; + } + if (x + 1 < fragment_width) { + ur = i - fragment_width + 1; + vur = DC_COEFF(ur); + if (COMPATIBLE_FRAME(ur)) + transform |= PUR; + } + } + + if (transform == 0) { + /* if there were no fragments to predict from, use last + * DC saved */ + predicted_dc = last_dc[current_frame_type]; + } else { + /* apply the appropriate predictor transform */ + predicted_dc = + (predictor_transform[transform][0] * vul) + + (predictor_transform[transform][1] * vu) + + (predictor_transform[transform][2] * vur) + + (predictor_transform[transform][3] * vl); + + predicted_dc /= 128; + + /* check for outranging on the [ul u l] and + * [ul u ur l] predictors */ + if ((transform == 15) || (transform == 13)) { + if (FFABS(predicted_dc - vu) > 128) + predicted_dc = vu; + else if (FFABS(predicted_dc - vl) > 128) + predicted_dc = vl; + else if (FFABS(predicted_dc - vul) > 128) + predicted_dc = vul; + } + } + + /* at long last, apply the predictor */ + DC_COEFF(i) += predicted_dc; + /* save the DC */ + last_dc[current_frame_type] = DC_COEFF(i); + } + } + } +} + +static void apply_loop_filter(Vp3DecodeContext *s, int plane, + int ystart, int yend) +{ + int x, y; + int *bounding_values = s->bounding_values_array + 127; + + int width = s->fragment_width[!!plane]; + int height = s->fragment_height[!!plane]; + int fragment = s->fragment_start[plane] + ystart * width; + ptrdiff_t stride = s->current_frame.f->linesize[plane]; + uint8_t *plane_data = s->current_frame.f->data[plane]; + if (!s->flipped_image) + stride = -stride; + plane_data += s->data_offset[plane] + 8 * ystart * stride; + + for (y = ystart; y < yend; y++) { + for (x = 0; x < width; x++) { + /* This code basically just deblocks on the edges of coded blocks. + * However, it has to be much more complicated because of the + * brain damaged deblock ordering used in VP3/Theora. Order matters + * because some pixels get filtered twice. */ + if (s->all_fragments[fragment].coding_method != MODE_COPY) { + /* do not perform left edge filter for left columns frags */ + if (x > 0) { + s->vp3dsp.h_loop_filter( + plane_data + 8 * x, + stride, bounding_values); + } + + /* do not perform top edge filter for top row fragments */ + if (y > 0) { + s->vp3dsp.v_loop_filter( + plane_data + 8 * x, + stride, bounding_values); + } + + /* do not perform right edge filter for right column + * fragments or if right fragment neighbor is also coded + * in this frame (it will be filtered in next iteration) */ + if ((x < width - 1) && + (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) { + s->vp3dsp.h_loop_filter( + plane_data + 8 * x + 8, + stride, bounding_values); + } + + /* do not perform bottom edge filter for bottom row + * fragments or if bottom fragment neighbor is also coded + * in this frame (it will be filtered in the next row) */ + if ((y < height - 1) && + (s->all_fragments[fragment + width].coding_method == MODE_COPY)) { + s->vp3dsp.v_loop_filter( + plane_data + 8 * x + 8 * stride, + stride, bounding_values); + } + } + + fragment++; + } + plane_data += 8 * stride; + } +} + +/** + * Pull DCT tokens from the 64 levels to decode and dequant the coefficients + * for the next block in coding order + */ +static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag, + int plane, int inter, int16_t block[64]) +{ + int16_t *dequantizer = s->qmat[frag->qpi][inter][plane]; + uint8_t *perm = s->idct_scantable; + int i = 0; + + do { + int token = *s->dct_tokens[plane][i]; + switch (token & 3) { + case 0: // EOB + if (--token < 4) // 0-3 are token types so the EOB run must now be 0 + s->dct_tokens[plane][i]++; + else + *s->dct_tokens[plane][i] = token & ~3; + goto end; + case 1: // zero run + s->dct_tokens[plane][i]++; + i += (token >> 2) & 0x7f; + if (i > 63) { + av_log(s->avctx, AV_LOG_ERROR, "Coefficient index overflow\n"); + return i; + } + block[perm[i]] = (token >> 9) * dequantizer[perm[i]]; + i++; + break; + case 2: // coeff + block[perm[i]] = (token >> 2) * dequantizer[perm[i]]; + s->dct_tokens[plane][i++]++; + break; + default: // shouldn't happen + return i; + } + } while (i < 64); + // return value is expected to be a valid level + i--; +end: + // the actual DC+prediction is in the fragment structure + block[0] = frag->dc * s->qmat[0][inter][plane][0]; + return i; +} + +/** + * called when all pixels up to row y are complete + */ +static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y) +{ + int h, cy, i; + int offset[AV_NUM_DATA_POINTERS]; + + if (HAVE_THREADS && s->avctx->active_thread_type & FF_THREAD_FRAME) { + int y_flipped = s->flipped_image ? s->height - y : y; + + /* At the end of the frame, report INT_MAX instead of the height of + * the frame. This makes the other threads' ff_thread_await_progress() + * calls cheaper, because they don't have to clip their values. */ + ff_thread_report_progress(&s->current_frame, + y_flipped == s->height ? INT_MAX + : y_flipped - 1, + 0); + } + + if (!s->avctx->draw_horiz_band) + return; + + h = y - s->last_slice_end; + s->last_slice_end = y; + y -= h; + + if (!s->flipped_image) + y = s->height - y - h; + + cy = y >> s->chroma_y_shift; + offset[0] = s->current_frame.f->linesize[0] * y; + offset[1] = s->current_frame.f->linesize[1] * cy; + offset[2] = s->current_frame.f->linesize[2] * cy; + for (i = 3; i < AV_NUM_DATA_POINTERS; i++) + offset[i] = 0; + + emms_c(); + s->avctx->draw_horiz_band(s->avctx, s->current_frame.f, offset, y, 3, h); +} + +/** + * Wait for the reference frame of the current fragment. + * The progress value is in luma pixel rows. + */ +static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment, + int motion_y, int y) +{ + ThreadFrame *ref_frame; + int ref_row; + int border = motion_y & 1; + + if (fragment->coding_method == MODE_USING_GOLDEN || + fragment->coding_method == MODE_GOLDEN_MV) + ref_frame = &s->golden_frame; + else + ref_frame = &s->last_frame; + + ref_row = y + (motion_y >> 1); + ref_row = FFMAX(FFABS(ref_row), ref_row + 8 + border); + + ff_thread_await_progress(ref_frame, ref_row, 0); +} + +/* + * Perform the final rendering for a particular slice of data. + * The slice number ranges from 0..(c_superblock_height - 1). + */ +static void render_slice(Vp3DecodeContext *s, int slice) +{ + int x, y, i, j, fragment; + int16_t *block = s->block; + int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef; + int motion_halfpel_index; + uint8_t *motion_source; + int plane, first_pixel; + + if (slice >= s->c_superblock_height) + return; + + for (plane = 0; plane < 3; plane++) { + uint8_t *output_plane = s->current_frame.f->data[plane] + + s->data_offset[plane]; + uint8_t *last_plane = s->last_frame.f->data[plane] + + s->data_offset[plane]; + uint8_t *golden_plane = s->golden_frame.f->data[plane] + + s->data_offset[plane]; + ptrdiff_t stride = s->current_frame.f->linesize[plane]; + int plane_width = s->width >> (plane && s->chroma_x_shift); + int plane_height = s->height >> (plane && s->chroma_y_shift); + int8_t(*motion_val)[2] = s->motion_val[!!plane]; + + int sb_x, sb_y = slice << (!plane && s->chroma_y_shift); + int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift); + int slice_width = plane ? s->c_superblock_width + : s->y_superblock_width; + + int fragment_width = s->fragment_width[!!plane]; + int fragment_height = s->fragment_height[!!plane]; + int fragment_start = s->fragment_start[plane]; + + int do_await = !plane && HAVE_THREADS && + (s->avctx->active_thread_type & FF_THREAD_FRAME); + + if (!s->flipped_image) + stride = -stride; + if (CONFIG_GRAY && plane && (s->avctx->flags & AV_CODEC_FLAG_GRAY)) + continue; + + /* for each superblock row in the slice (both of them)... */ + for (; sb_y < slice_height; sb_y++) { + /* for each superblock in a row... */ + for (sb_x = 0; sb_x < slice_width; sb_x++) { + /* for each block in a superblock... */ + for (j = 0; j < 16; j++) { + x = 4 * sb_x + hilbert_offset[j][0]; + y = 4 * sb_y + hilbert_offset[j][1]; + fragment = y * fragment_width + x; + + i = fragment_start + fragment; + + // bounds check + if (x >= fragment_width || y >= fragment_height) + continue; + + first_pixel = 8 * y * stride + 8 * x; + + if (do_await && + s->all_fragments[i].coding_method != MODE_INTRA) + await_reference_row(s, &s->all_fragments[i], + motion_val[fragment][1], + (16 * y) >> s->chroma_y_shift); + + /* transform if this block was coded */ + if (s->all_fragments[i].coding_method != MODE_COPY) { + if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) || + (s->all_fragments[i].coding_method == MODE_GOLDEN_MV)) + motion_source = golden_plane; + else + motion_source = last_plane; + + motion_source += first_pixel; + motion_halfpel_index = 0; + + /* sort out the motion vector if this fragment is coded + * using a motion vector method */ + if ((s->all_fragments[i].coding_method > MODE_INTRA) && + (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) { + int src_x, src_y; + motion_x = motion_val[fragment][0]; + motion_y = motion_val[fragment][1]; + + src_x = (motion_x >> 1) + 8 * x; + src_y = (motion_y >> 1) + 8 * y; + + motion_halfpel_index = motion_x & 0x01; + motion_source += (motion_x >> 1); + + motion_halfpel_index |= (motion_y & 0x01) << 1; + motion_source += ((motion_y >> 1) * stride); + + if (src_x < 0 || src_y < 0 || + src_x + 9 >= plane_width || + src_y + 9 >= plane_height) { + uint8_t *temp = s->edge_emu_buffer; + if (stride < 0) + temp -= 8 * stride; + + s->vdsp.emulated_edge_mc(temp, motion_source, + stride, stride, + 9, 9, src_x, src_y, + plane_width, + plane_height); + motion_source = temp; + } + } + + /* first, take care of copying a block from either the + * previous or the golden frame */ + if (s->all_fragments[i].coding_method != MODE_INTRA) { + /* Note, it is possible to implement all MC cases + * with put_no_rnd_pixels_l2 which would look more + * like the VP3 source but this would be slower as + * put_no_rnd_pixels_tab is better optimized */ + if (motion_halfpel_index != 3) { + s->hdsp.put_no_rnd_pixels_tab[1][motion_halfpel_index]( + output_plane + first_pixel, + motion_source, stride, 8); + } else { + /* d is 0 if motion_x and _y have the same sign, + * else -1 */ + int d = (motion_x ^ motion_y) >> 31; + s->vp3dsp.put_no_rnd_pixels_l2(output_plane + first_pixel, + motion_source - d, + motion_source + stride + 1 + d, + stride, 8); + } + } + + /* invert DCT and place (or add) in final output */ + + if (s->all_fragments[i].coding_method == MODE_INTRA) { + vp3_dequant(s, s->all_fragments + i, + plane, 0, block); + s->vp3dsp.idct_put(output_plane + first_pixel, + stride, + block); + } else { + if (vp3_dequant(s, s->all_fragments + i, + plane, 1, block)) { + s->vp3dsp.idct_add(output_plane + first_pixel, + stride, + block); + } else { + s->vp3dsp.idct_dc_add(output_plane + first_pixel, + stride, block); + } + } + } else { + /* copy directly from the previous frame */ + s->hdsp.put_pixels_tab[1][0]( + output_plane + first_pixel, + last_plane + first_pixel, + stride, 8); + } + } + } + + // Filter up to the last row in the superblock row + if (!s->skip_loop_filter) + apply_loop_filter(s, plane, 4 * sb_y - !!sb_y, + FFMIN(4 * sb_y + 3, fragment_height - 1)); + } + } + + /* this looks like a good place for slice dispatch... */ + /* algorithm: + * if (slice == s->macroblock_height - 1) + * dispatch (both last slice & 2nd-to-last slice); + * else if (slice > 0) + * dispatch (slice - 1); + */ + + vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) - 16, + s->height - 16)); +} + +/// Allocate tables for per-frame data in Vp3DecodeContext +static av_cold int allocate_tables(AVCodecContext *avctx) +{ + Vp3DecodeContext *s = avctx->priv_data; + int y_fragment_count, c_fragment_count; + + free_tables(avctx); + + y_fragment_count = s->fragment_width[0] * s->fragment_height[0]; + c_fragment_count = s->fragment_width[1] * s->fragment_height[1]; + + s->superblock_coding = av_mallocz(s->superblock_count); + s->all_fragments = av_mallocz_array(s->fragment_count, sizeof(Vp3Fragment)); + + s->coded_fragment_list[0] = av_mallocz_array(s->fragment_count, sizeof(int)); + + s->dct_tokens_base = av_mallocz_array(s->fragment_count, + 64 * sizeof(*s->dct_tokens_base)); + s->motion_val[0] = av_mallocz_array(y_fragment_count, sizeof(*s->motion_val[0])); + s->motion_val[1] = av_mallocz_array(c_fragment_count, sizeof(*s->motion_val[1])); + + /* work out the block mapping tables */ + s->superblock_fragments = av_mallocz_array(s->superblock_count, 16 * sizeof(int)); + s->macroblock_coding = av_mallocz(s->macroblock_count + 1); + + if (!s->superblock_coding || !s->all_fragments || + !s->dct_tokens_base || !s->coded_fragment_list[0] || + !s->superblock_fragments || !s->macroblock_coding || + !s->motion_val[0] || !s->motion_val[1]) { + vp3_decode_end(avctx); + return -1; + } + + init_block_mapping(s); + + return 0; +} + +static av_cold int init_frames(Vp3DecodeContext *s) +{ + s->current_frame.f = av_frame_alloc(); + s->last_frame.f = av_frame_alloc(); + s->golden_frame.f = av_frame_alloc(); + + if (!s->current_frame.f || !s->last_frame.f || !s->golden_frame.f) { + av_frame_free(&s->current_frame.f); + av_frame_free(&s->last_frame.f); + av_frame_free(&s->golden_frame.f); + return AVERROR(ENOMEM); + } + + return 0; +} + +static av_cold int vp3_decode_init(AVCodecContext *avctx) +{ + Vp3DecodeContext *s = avctx->priv_data; + int i, inter, plane, ret; + int c_width; + int c_height; + int y_fragment_count, c_fragment_count; + + ret = init_frames(s); + if (ret < 0) + return ret; + + avctx->internal->allocate_progress = 1; + + if (avctx->codec_tag == MKTAG('V', 'P', '3', '0')) + s->version = 0; + else + s->version = 1; + + s->avctx = avctx; + s->width = FFALIGN(avctx->coded_width, 16); + s->height = FFALIGN(avctx->coded_height, 16); + if (avctx->codec_id != AV_CODEC_ID_THEORA) + avctx->pix_fmt = AV_PIX_FMT_YUV420P; + avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; + ff_hpeldsp_init(&s->hdsp, avctx->flags | AV_CODEC_FLAG_BITEXACT); + ff_videodsp_init(&s->vdsp, 8); + ff_vp3dsp_init(&s->vp3dsp, avctx->flags); + + for (i = 0; i < 64; i++) { +#define TRANSPOSE(x) (((x) >> 3) | (((x) & 7) << 3)) + s->idct_permutation[i] = TRANSPOSE(i); + s->idct_scantable[i] = TRANSPOSE(ff_zigzag_direct[i]); +#undef TRANSPOSE + } + + /* initialize to an impossible value which will force a recalculation + * in the first frame decode */ + for (i = 0; i < 3; i++) + s->qps[i] = -1; + + avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift); + + s->y_superblock_width = (s->width + 31) / 32; + s->y_superblock_height = (s->height + 31) / 32; + s->y_superblock_count = s->y_superblock_width * s->y_superblock_height; + + /* work out the dimensions for the C planes */ + c_width = s->width >> s->chroma_x_shift; + c_height = s->height >> s->chroma_y_shift; + s->c_superblock_width = (c_width + 31) / 32; + s->c_superblock_height = (c_height + 31) / 32; + s->c_superblock_count = s->c_superblock_width * s->c_superblock_height; + + s->superblock_count = s->y_superblock_count + (s->c_superblock_count * 2); + s->u_superblock_start = s->y_superblock_count; + s->v_superblock_start = s->u_superblock_start + s->c_superblock_count; + + s->macroblock_width = (s->width + 15) / 16; + s->macroblock_height = (s->height + 15) / 16; + s->macroblock_count = s->macroblock_width * s->macroblock_height; + + s->fragment_width[0] = s->width / FRAGMENT_PIXELS; + s->fragment_height[0] = s->height / FRAGMENT_PIXELS; + s->fragment_width[1] = s->fragment_width[0] >> s->chroma_x_shift; + s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift; + + /* fragment count covers all 8x8 blocks for all 3 planes */ + y_fragment_count = s->fragment_width[0] * s->fragment_height[0]; + c_fragment_count = s->fragment_width[1] * s->fragment_height[1]; + s->fragment_count = y_fragment_count + 2 * c_fragment_count; + s->fragment_start[1] = y_fragment_count; + s->fragment_start[2] = y_fragment_count + c_fragment_count; + + if (!s->theora_tables) { + for (i = 0; i < 64; i++) { + s->coded_dc_scale_factor[i] = vp31_dc_scale_factor[i]; + s->coded_ac_scale_factor[i] = vp31_ac_scale_factor[i]; + s->base_matrix[0][i] = vp31_intra_y_dequant[i]; + s->base_matrix[1][i] = vp31_intra_c_dequant[i]; + s->base_matrix[2][i] = vp31_inter_dequant[i]; + s->filter_limit_values[i] = vp31_filter_limit_values[i]; + } + + for (inter = 0; inter < 2; inter++) { + for (plane = 0; plane < 3; plane++) { + s->qr_count[inter][plane] = 1; + s->qr_size[inter][plane][0] = 63; + s->qr_base[inter][plane][0] = + s->qr_base[inter][plane][1] = 2 * inter + (!!plane) * !inter; + } + } + + /* init VLC tables */ + for (i = 0; i < 16; i++) { + /* DC histograms */ + init_vlc(&s->dc_vlc[i], 11, 32, + &dc_bias[i][0][1], 4, 2, + &dc_bias[i][0][0], 4, 2, 0); + + /* group 1 AC histograms */ + init_vlc(&s->ac_vlc_1[i], 11, 32, + &ac_bias_0[i][0][1], 4, 2, + &ac_bias_0[i][0][0], 4, 2, 0); + + /* group 2 AC histograms */ + init_vlc(&s->ac_vlc_2[i], 11, 32, + &ac_bias_1[i][0][1], 4, 2, + &ac_bias_1[i][0][0], 4, 2, 0); + + /* group 3 AC histograms */ + init_vlc(&s->ac_vlc_3[i], 11, 32, + &ac_bias_2[i][0][1], 4, 2, + &ac_bias_2[i][0][0], 4, 2, 0); + + /* group 4 AC histograms */ + init_vlc(&s->ac_vlc_4[i], 11, 32, + &ac_bias_3[i][0][1], 4, 2, + &ac_bias_3[i][0][0], 4, 2, 0); + } + } else { + for (i = 0; i < 16; i++) { + /* DC histograms */ + if (init_vlc(&s->dc_vlc[i], 11, 32, + &s->huffman_table[i][0][1], 8, 4, + &s->huffman_table[i][0][0], 8, 4, 0) < 0) + goto vlc_fail; + + /* group 1 AC histograms */ + if (init_vlc(&s->ac_vlc_1[i], 11, 32, + &s->huffman_table[i + 16][0][1], 8, 4, + &s->huffman_table[i + 16][0][0], 8, 4, 0) < 0) + goto vlc_fail; + + /* group 2 AC histograms */ + if (init_vlc(&s->ac_vlc_2[i], 11, 32, + &s->huffman_table[i + 16 * 2][0][1], 8, 4, + &s->huffman_table[i + 16 * 2][0][0], 8, 4, 0) < 0) + goto vlc_fail; + + /* group 3 AC histograms */ + if (init_vlc(&s->ac_vlc_3[i], 11, 32, + &s->huffman_table[i + 16 * 3][0][1], 8, 4, + &s->huffman_table[i + 16 * 3][0][0], 8, 4, 0) < 0) + goto vlc_fail; + + /* group 4 AC histograms */ + if (init_vlc(&s->ac_vlc_4[i], 11, 32, + &s->huffman_table[i + 16 * 4][0][1], 8, 4, + &s->huffman_table[i + 16 * 4][0][0], 8, 4, 0) < 0) + goto vlc_fail; + } + } + + init_vlc(&s->superblock_run_length_vlc, 6, 34, + &superblock_run_length_vlc_table[0][1], 4, 2, + &superblock_run_length_vlc_table[0][0], 4, 2, 0); + + init_vlc(&s->fragment_run_length_vlc, 5, 30, + &fragment_run_length_vlc_table[0][1], 4, 2, + &fragment_run_length_vlc_table[0][0], 4, 2, 0); + + init_vlc(&s->mode_code_vlc, 3, 8, + &mode_code_vlc_table[0][1], 2, 1, + &mode_code_vlc_table[0][0], 2, 1, 0); + + init_vlc(&s->motion_vector_vlc, 6, 63, + &motion_vector_vlc_table[0][1], 2, 1, + &motion_vector_vlc_table[0][0], 2, 1, 0); + + return allocate_tables(avctx); + +vlc_fail: + av_log(avctx, AV_LOG_FATAL, "Invalid huffman table\n"); + return -1; +} + +/// Release and shuffle frames after decode finishes +static int update_frames(AVCodecContext *avctx) +{ + Vp3DecodeContext *s = avctx->priv_data; + int ret = 0; + + /* shuffle frames (last = current) */ + ff_thread_release_buffer(avctx, &s->last_frame); + ret = ff_thread_ref_frame(&s->last_frame, &s->current_frame); + if (ret < 0) + goto fail; + + if (s->keyframe) { + ff_thread_release_buffer(avctx, &s->golden_frame); + ret = ff_thread_ref_frame(&s->golden_frame, &s->current_frame); + } + +fail: + ff_thread_release_buffer(avctx, &s->current_frame); + return ret; +} + +static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src) +{ + ff_thread_release_buffer(s->avctx, dst); + if (src->f->data[0]) + return ff_thread_ref_frame(dst, src); + return 0; +} + +static int ref_frames(Vp3DecodeContext *dst, Vp3DecodeContext *src) +{ + int ret; + if ((ret = ref_frame(dst, &dst->current_frame, &src->current_frame)) < 0 || + (ret = ref_frame(dst, &dst->golden_frame, &src->golden_frame)) < 0 || + (ret = ref_frame(dst, &dst->last_frame, &src->last_frame)) < 0) + return ret; + return 0; +} + +#if HAVE_THREADS +static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src) +{ + Vp3DecodeContext *s = dst->priv_data, *s1 = src->priv_data; + int qps_changed = 0, i, err; + +#define copy_fields(to, from, start_field, end_field) \ + memcpy(&to->start_field, &from->start_field, \ + (char *) &to->end_field - (char *) &to->start_field) + + if (!s1->current_frame.f->data[0] || + s->width != s1->width || s->height != s1->height) { + if (s != s1) + ref_frames(s, s1); + return -1; + } + + if (s != s1) { + if (!s->current_frame.f) + return AVERROR(ENOMEM); + // init tables if the first frame hasn't been decoded + if (!s->current_frame.f->data[0]) { + int y_fragment_count, c_fragment_count; + s->avctx = dst; + err = allocate_tables(dst); + if (err) + return err; + y_fragment_count = s->fragment_width[0] * s->fragment_height[0]; + c_fragment_count = s->fragment_width[1] * s->fragment_height[1]; + memcpy(s->motion_val[0], s1->motion_val[0], + y_fragment_count * sizeof(*s->motion_val[0])); + memcpy(s->motion_val[1], s1->motion_val[1], + c_fragment_count * sizeof(*s->motion_val[1])); + } + + // copy previous frame data + if ((err = ref_frames(s, s1)) < 0) + return err; + + s->keyframe = s1->keyframe; + + // copy qscale data if necessary + for (i = 0; i < 3; i++) { + if (s->qps[i] != s1->qps[1]) { + qps_changed = 1; + memcpy(&s->qmat[i], &s1->qmat[i], sizeof(s->qmat[i])); + } + } + + if (s->qps[0] != s1->qps[0]) + memcpy(&s->bounding_values_array, &s1->bounding_values_array, + sizeof(s->bounding_values_array)); + + if (qps_changed) + copy_fields(s, s1, qps, superblock_count); +#undef copy_fields + } + + return update_frames(dst); +} +#endif + +static int vp3_decode_frame(AVCodecContext *avctx, + void *data, int *got_frame, + AVPacket *avpkt) +{ + const uint8_t *buf = avpkt->data; + int buf_size = avpkt->size; + Vp3DecodeContext *s = avctx->priv_data; + GetBitContext gb; + int i, ret; + + if ((ret = init_get_bits8(&gb, buf, buf_size)) < 0) + return ret; + +#if CONFIG_THEORA_DECODER + if (s->theora && get_bits1(&gb)) { + int type = get_bits(&gb, 7); + skip_bits_long(&gb, 6*8); /* "theora" */ + + if (s->avctx->active_thread_type&FF_THREAD_FRAME) { + av_log(avctx, AV_LOG_ERROR, "midstream reconfiguration with multithreading is unsupported, try -threads 1\n"); + return AVERROR_PATCHWELCOME; + } + if (type == 0) { + vp3_decode_end(avctx); + ret = theora_decode_header(avctx, &gb); + + if (ret >= 0) + ret = vp3_decode_init(avctx); + if (ret < 0) { + vp3_decode_end(avctx); + return ret; + } + return buf_size; + } else if (type == 2) { + vp3_decode_end(avctx); + ret = theora_decode_tables(avctx, &gb); + if (ret >= 0) + ret = vp3_decode_init(avctx); + if (ret < 0) { + vp3_decode_end(avctx); + return ret; + } + return buf_size; + } + + av_log(avctx, AV_LOG_ERROR, + "Header packet passed to frame decoder, skipping\n"); + return -1; + } +#endif + + s->keyframe = !get_bits1(&gb); + if (!s->all_fragments) { + av_log(avctx, AV_LOG_ERROR, "Data packet without prior valid headers\n"); + return -1; + } + if (!s->theora) + skip_bits(&gb, 1); + for (i = 0; i < 3; i++) + s->last_qps[i] = s->qps[i]; + + s->nqps = 0; + do { + s->qps[s->nqps++] = get_bits(&gb, 6); + } while (s->theora >= 0x030200 && s->nqps < 3 && get_bits1(&gb)); + for (i = s->nqps; i < 3; i++) + s->qps[i] = -1; + + if (s->avctx->debug & FF_DEBUG_PICT_INFO) + av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n", + s->keyframe ? "key" : "", avctx->frame_number + 1, s->qps[0]); + + s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] || + avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL + : AVDISCARD_NONKEY); + + if (s->qps[0] != s->last_qps[0]) + init_loop_filter(s); + + for (i = 0; i < s->nqps; i++) + // reinit all dequantizers if the first one changed, because + // the DC of the first quantizer must be used for all matrices + if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0]) + init_dequantizer(s, i); + + if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe) + return buf_size; + + s->current_frame.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I + : AV_PICTURE_TYPE_P; + s->current_frame.f->key_frame = s->keyframe; + if (ff_thread_get_buffer(avctx, &s->current_frame, AV_GET_BUFFER_FLAG_REF) < 0) + goto error; + + if (!s->edge_emu_buffer) + s->edge_emu_buffer = av_malloc(9 * FFABS(s->current_frame.f->linesize[0])); + + if (s->keyframe) { + if (!s->theora) { + skip_bits(&gb, 4); /* width code */ + skip_bits(&gb, 4); /* height code */ + if (s->version) { + s->version = get_bits(&gb, 5); + if (avctx->frame_number == 0) + av_log(s->avctx, AV_LOG_DEBUG, + "VP version: %d\n", s->version); + } + } + if (s->version || s->theora) { + if (get_bits1(&gb)) + av_log(s->avctx, AV_LOG_ERROR, + "Warning, unsupported keyframe coding type?!\n"); + skip_bits(&gb, 2); /* reserved? */ + } + } else { + if (!s->golden_frame.f->data[0]) { + av_log(s->avctx, AV_LOG_WARNING, + "vp3: first frame not a keyframe\n"); + + s->golden_frame.f->pict_type = AV_PICTURE_TYPE_I; + if (ff_thread_get_buffer(avctx, &s->golden_frame, + AV_GET_BUFFER_FLAG_REF) < 0) + goto error; + ff_thread_release_buffer(avctx, &s->last_frame); + if ((ret = ff_thread_ref_frame(&s->last_frame, + &s->golden_frame)) < 0) + goto error; + ff_thread_report_progress(&s->last_frame, INT_MAX, 0); + } + } + + memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment)); + ff_thread_finish_setup(avctx); + + if (unpack_superblocks(s, &gb)) { + av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n"); + goto error; + } + if (unpack_modes(s, &gb)) { + av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n"); + goto error; + } + if (unpack_vectors(s, &gb)) { + av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n"); + goto error; + } + if (unpack_block_qpis(s, &gb)) { + av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n"); + goto error; + } + if (unpack_dct_coeffs(s, &gb)) { + av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n"); + goto error; + } + + for (i = 0; i < 3; i++) { + int height = s->height >> (i && s->chroma_y_shift); + if (s->flipped_image) + s->data_offset[i] = 0; + else + s->data_offset[i] = (height - 1) * s->current_frame.f->linesize[i]; + } + + s->last_slice_end = 0; + for (i = 0; i < s->c_superblock_height; i++) + render_slice(s, i); + + // filter the last row + for (i = 0; i < 3; i++) { + int row = (s->height >> (3 + (i && s->chroma_y_shift))) - 1; + apply_loop_filter(s, i, row, row + 1); + } + vp3_draw_horiz_band(s, s->height); + + /* output frame, offset as needed */ + if ((ret = av_frame_ref(data, s->current_frame.f)) < 0) + return ret; + for (i = 0; i < 3; i++) { + AVFrame *dst = data; + int off = (s->offset_x >> (i && s->chroma_y_shift)) + + (s->offset_y >> (i && s->chroma_y_shift)) * dst->linesize[i]; + dst->data[i] += off; + } + *got_frame = 1; + + if (!HAVE_THREADS || !(s->avctx->active_thread_type & FF_THREAD_FRAME)) { + ret = update_frames(avctx); + if (ret < 0) + return ret; + } + + return buf_size; + +error: + ff_thread_report_progress(&s->current_frame, INT_MAX, 0); + + if (!HAVE_THREADS || !(s->avctx->active_thread_type & FF_THREAD_FRAME)) + av_frame_unref(s->current_frame.f); + + return -1; +} + +static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb) +{ + Vp3DecodeContext *s = avctx->priv_data; + + if (get_bits1(gb)) { + int token; + if (s->entries >= 32) { /* overflow */ + av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n"); + return -1; + } + token = get_bits(gb, 5); + ff_dlog(avctx, "hti %d hbits %x token %d entry : %d size %d\n", + s->hti, s->hbits, token, s->entries, s->huff_code_size); + s->huffman_table[s->hti][token][0] = s->hbits; + s->huffman_table[s->hti][token][1] = s->huff_code_size; + s->entries++; + } else { + if (s->huff_code_size >= 32) { /* overflow */ + av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n"); + return -1; + } + s->huff_code_size++; + s->hbits <<= 1; + if (read_huffman_tree(avctx, gb)) + return -1; + s->hbits |= 1; + if (read_huffman_tree(avctx, gb)) + return -1; + s->hbits >>= 1; + s->huff_code_size--; + } + return 0; +} + +#if HAVE_THREADS +static int vp3_init_thread_copy(AVCodecContext *avctx) +{ + Vp3DecodeContext *s = avctx->priv_data; + + s->superblock_coding = NULL; + s->all_fragments = NULL; + s->coded_fragment_list[0] = NULL; + s->dct_tokens_base = NULL; + s->superblock_fragments = NULL; + s->macroblock_coding = NULL; + s->motion_val[0] = NULL; + s->motion_val[1] = NULL; + s->edge_emu_buffer = NULL; + + return init_frames(s); +} +#endif + +#if CONFIG_THEORA_DECODER +static const enum AVPixelFormat theora_pix_fmts[4] = { + AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P +}; + +static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb) +{ + Vp3DecodeContext *s = avctx->priv_data; + int visible_width, visible_height, colorspace; + uint8_t offset_x = 0, offset_y = 0; + int ret; + AVRational fps, aspect; + + s->theora_header = 0; + s->theora = get_bits_long(gb, 24); + av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora); + + /* 3.2.0 aka alpha3 has the same frame orientation as original vp3 + * but previous versions have the image flipped relative to vp3 */ + if (s->theora < 0x030200) { + s->flipped_image = 1; + av_log(avctx, AV_LOG_DEBUG, + "Old (width = get_bits(gb, 16) << 4; + visible_height = + s->height = get_bits(gb, 16) << 4; + + if (s->theora >= 0x030200) { + visible_width = get_bits_long(gb, 24); + visible_height = get_bits_long(gb, 24); + + offset_x = get_bits(gb, 8); /* offset x */ + offset_y = get_bits(gb, 8); /* offset y, from bottom */ + } + + /* sanity check */ + if (av_image_check_size(visible_width, visible_height, 0, avctx) < 0 || + visible_width + offset_x > s->width || + visible_height + offset_y > s->height) { + av_log(avctx, AV_LOG_ERROR, + "Invalid frame dimensions - w:%d h:%d x:%d y:%d (%dx%d).\n", + visible_width, visible_height, offset_x, offset_y, + s->width, s->height); + return AVERROR_INVALIDDATA; + } + + fps.num = get_bits_long(gb, 32); + fps.den = get_bits_long(gb, 32); + if (fps.num && fps.den) { + if (fps.num < 0 || fps.den < 0) { + av_log(avctx, AV_LOG_ERROR, "Invalid framerate\n"); + return AVERROR_INVALIDDATA; + } + av_reduce(&avctx->framerate.den, &avctx->framerate.num, + fps.den, fps.num, 1 << 30); + } + + aspect.num = get_bits_long(gb, 24); + aspect.den = get_bits_long(gb, 24); + if (aspect.num && aspect.den) { + av_reduce(&avctx->sample_aspect_ratio.num, + &avctx->sample_aspect_ratio.den, + aspect.num, aspect.den, 1 << 30); + ff_set_sar(avctx, avctx->sample_aspect_ratio); + } + + if (s->theora < 0x030200) + skip_bits(gb, 5); /* keyframe frequency force */ + colorspace = get_bits(gb, 8); + skip_bits(gb, 24); /* bitrate */ + + skip_bits(gb, 6); /* quality hint */ + + if (s->theora >= 0x030200) { + skip_bits(gb, 5); /* keyframe frequency force */ + avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)]; + if (avctx->pix_fmt == AV_PIX_FMT_NONE) { + av_log(avctx, AV_LOG_ERROR, "Invalid pixel format\n"); + return AVERROR_INVALIDDATA; + } + skip_bits(gb, 3); /* reserved */ + } else + avctx->pix_fmt = AV_PIX_FMT_YUV420P; + + ret = ff_set_dimensions(avctx, s->width, s->height); + if (ret < 0) + return ret; + if (!(avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP)) { + avctx->width = visible_width; + avctx->height = visible_height; + // translate offsets from theora axis ([0,0] lower left) + // to normal axis ([0,0] upper left) + s->offset_x = offset_x; + s->offset_y = s->height - visible_height - offset_y; + + if ((s->offset_x & 0x1F) && !(avctx->flags & AV_CODEC_FLAG_UNALIGNED)) { + s->offset_x &= ~0x1F; + if (!s->offset_x_warned) { + s->offset_x_warned = 1; + av_log(avctx, AV_LOG_WARNING, "Reducing offset_x from %d to %d" + "chroma samples to preserve alignment.\n", + offset_x, s->offset_x); + } + } + } + + if (colorspace == 1) + avctx->color_primaries = AVCOL_PRI_BT470M; + else if (colorspace == 2) + avctx->color_primaries = AVCOL_PRI_BT470BG; + + if (colorspace == 1 || colorspace == 2) { + avctx->colorspace = AVCOL_SPC_BT470BG; + avctx->color_trc = AVCOL_TRC_BT709; + } + + s->theora_header = 1; + return 0; +} + +static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb) +{ + Vp3DecodeContext *s = avctx->priv_data; + int i, n, matrices, inter, plane; + + if (!s->theora_header) + return AVERROR_INVALIDDATA; + + if (s->theora >= 0x030200) { + n = get_bits(gb, 3); + /* loop filter limit values table */ + if (n) + for (i = 0; i < 64; i++) + s->filter_limit_values[i] = get_bits(gb, n); + } + + if (s->theora >= 0x030200) + n = get_bits(gb, 4) + 1; + else + n = 16; + /* quality threshold table */ + for (i = 0; i < 64; i++) + s->coded_ac_scale_factor[i] = get_bits(gb, n); + + if (s->theora >= 0x030200) + n = get_bits(gb, 4) + 1; + else + n = 16; + /* dc scale factor table */ + for (i = 0; i < 64; i++) + s->coded_dc_scale_factor[i] = get_bits(gb, n); + + if (s->theora >= 0x030200) + matrices = get_bits(gb, 9) + 1; + else + matrices = 3; + + if (matrices > 384) { + av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n"); + return -1; + } + + for (n = 0; n < matrices; n++) + for (i = 0; i < 64; i++) + s->base_matrix[n][i] = get_bits(gb, 8); + + for (inter = 0; inter <= 1; inter++) { + for (plane = 0; plane <= 2; plane++) { + int newqr = 1; + if (inter || plane > 0) + newqr = get_bits1(gb); + if (!newqr) { + int qtj, plj; + if (inter && get_bits1(gb)) { + qtj = 0; + plj = plane; + } else { + qtj = (3 * inter + plane - 1) / 3; + plj = (plane + 2) % 3; + } + s->qr_count[inter][plane] = s->qr_count[qtj][plj]; + memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj], + sizeof(s->qr_size[0][0])); + memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj], + sizeof(s->qr_base[0][0])); + } else { + int qri = 0; + int qi = 0; + + for (;;) { + i = get_bits(gb, av_log2(matrices - 1) + 1); + if (i >= matrices) { + av_log(avctx, AV_LOG_ERROR, + "invalid base matrix index\n"); + return -1; + } + s->qr_base[inter][plane][qri] = i; + if (qi >= 63) + break; + i = get_bits(gb, av_log2(63 - qi) + 1) + 1; + s->qr_size[inter][plane][qri++] = i; + qi += i; + } + + if (qi > 63) { + av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi); + return -1; + } + s->qr_count[inter][plane] = qri; + } + } + } + + /* Huffman tables */ + for (s->hti = 0; s->hti < 80; s->hti++) { + s->entries = 0; + s->huff_code_size = 1; + if (!get_bits1(gb)) { + s->hbits = 0; + if (read_huffman_tree(avctx, gb)) + return -1; + s->hbits = 1; + if (read_huffman_tree(avctx, gb)) + return -1; + } + } + + s->theora_tables = 1; + + return 0; +} + +static av_cold int theora_decode_init(AVCodecContext *avctx) +{ + Vp3DecodeContext *s = avctx->priv_data; + GetBitContext gb; + int ptype; + const uint8_t *header_start[3]; + int header_len[3]; + int i; + int ret; + + avctx->pix_fmt = AV_PIX_FMT_YUV420P; + + s->theora = 1; + + if (!avctx->extradata_size) { + av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n"); + return -1; + } + + if (avpriv_split_xiph_headers(avctx->extradata, avctx->extradata_size, + 42, header_start, header_len) < 0) { + av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n"); + return -1; + } + + for (i = 0; i < 3; i++) { + if (header_len[i] <= 0) + continue; + ret = init_get_bits8(&gb, header_start[i], header_len[i]); + if (ret < 0) + return ret; + + ptype = get_bits(&gb, 8); + + if (!(ptype & 0x80)) { + av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n"); +// return -1; + } + + // FIXME: Check for this as well. + skip_bits_long(&gb, 6 * 8); /* "theora" */ + + switch (ptype) { + case 0x80: + if (theora_decode_header(avctx, &gb) < 0) + return -1; + break; + case 0x81: +// FIXME: is this needed? it breaks sometimes +// theora_decode_comments(avctx, gb); + break; + case 0x82: + if (theora_decode_tables(avctx, &gb)) + return -1; + break; + default: + av_log(avctx, AV_LOG_ERROR, + "Unknown Theora config packet: %d\n", ptype & ~0x80); + break; + } + if (ptype != 0x81 && 8 * header_len[i] != get_bits_count(&gb)) + av_log(avctx, AV_LOG_WARNING, + "%d bits left in packet %X\n", + 8 * header_len[i] - get_bits_count(&gb), ptype); + if (s->theora < 0x030200) + break; + } + + return vp3_decode_init(avctx); +} + +AVCodec ff_theora_decoder = { + .name = "theora", + .long_name = NULL_IF_CONFIG_SMALL("Theora"), + .type = AVMEDIA_TYPE_VIDEO, + .id = AV_CODEC_ID_THEORA, + .priv_data_size = sizeof(Vp3DecodeContext), + .init = theora_decode_init, + .close = vp3_decode_end, + .decode = vp3_decode_frame, + .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND | + AV_CODEC_CAP_FRAME_THREADS, + .flush = vp3_decode_flush, + .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy), + .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context) +}; +#endif + +AVCodec ff_vp3_decoder = { + .name = "vp3", + .long_name = NULL_IF_CONFIG_SMALL("On2 VP3"), + .type = AVMEDIA_TYPE_VIDEO, + .id = AV_CODEC_ID_VP3, + .priv_data_size = sizeof(Vp3DecodeContext), + .init = vp3_decode_init, + .close = vp3_decode_end, + .decode = vp3_decode_frame, + .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND | + AV_CODEC_CAP_FRAME_THREADS, + .flush = vp3_decode_flush, + .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp3_init_thread_copy), + .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context), +}; From 391fa80380523df9444569f42f41c8d8f097492e Mon Sep 17 00:00:00 2001 From: turly221 Date: Mon, 9 Dec 2024 16:06:12 +0000 Subject: [PATCH 13/13] commit patch 25381687 --- libavformat/nutdec.c | 16 +- libavformat/nutdec.c.orig | 1341 +++++++++++++++++++++++++++++++++++++ 2 files changed, 1353 insertions(+), 4 deletions(-) create mode 100644 libavformat/nutdec.c.orig diff --git a/libavformat/nutdec.c b/libavformat/nutdec.c index 2e309bcad7fd2..92dfb9b162cc6 100644 --- a/libavformat/nutdec.c +++ b/libavformat/nutdec.c @@ -382,8 +382,12 @@ static int decode_main_header(NUTContext *nut) ret = AVERROR(ENOMEM); goto fail; } - for (i = 0; i < stream_count; i++) - avformat_new_stream(s, NULL); + for (i = 0; i < stream_count; i++) { + if (!avformat_new_stream(s, NULL)) { + ret = AVERROR(ENOMEM); + goto fail; + } + } return 0; fail: @@ -829,19 +833,23 @@ static int nut_read_header(AVFormatContext *s) NUTContext *nut = s->priv_data; AVIOContext *bc = s->pb; int64_t pos; - int initialized_stream_count; + int initialized_stream_count, ret; nut->avf = s; /* main header */ pos = 0; + ret = 0; do { + if (ret == AVERROR(ENOMEM)) + return ret; + pos = find_startcode(bc, MAIN_STARTCODE, pos) + 1; if (pos < 0 + 1) { av_log(s, AV_LOG_ERROR, "No main startcode found.\n"); goto fail; } - } while (decode_main_header(nut) < 0); + } while ((ret = decode_main_header(nut)) < 0); /* stream headers */ pos = 0; diff --git a/libavformat/nutdec.c.orig b/libavformat/nutdec.c.orig new file mode 100644 index 0000000000000..2e309bcad7fd2 --- /dev/null +++ b/libavformat/nutdec.c.orig @@ -0,0 +1,1341 @@ +/* + * "NUT" Container Format demuxer + * Copyright (c) 2004-2006 Michael Niedermayer + * Copyright (c) 2003 Alex Beregszaszi + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/avstring.h" +#include "libavutil/avassert.h" +#include "libavutil/bswap.h" +#include "libavutil/dict.h" +#include "libavutil/intreadwrite.h" +#include "libavutil/mathematics.h" +#include "libavutil/tree.h" +#include "libavcodec/bytestream.h" +#include "avio_internal.h" +#include "isom.h" +#include "nut.h" +#include "riff.h" + +#define NUT_MAX_STREAMS 256 /* arbitrary sanity check value */ + +static int64_t nut_read_timestamp(AVFormatContext *s, int stream_index, + int64_t *pos_arg, int64_t pos_limit); + +static int get_str(AVIOContext *bc, char *string, unsigned int maxlen) +{ + unsigned int len = ffio_read_varlen(bc); + + if (len && maxlen) + avio_read(bc, string, FFMIN(len, maxlen)); + while (len > maxlen) { + avio_r8(bc); + len--; + if (bc->eof_reached) + len = maxlen; + } + + if (maxlen) + string[FFMIN(len, maxlen - 1)] = 0; + + if (bc->eof_reached) + return AVERROR_EOF; + if (maxlen == len) + return -1; + else + return 0; +} + +static int64_t get_s(AVIOContext *bc) +{ + int64_t v = ffio_read_varlen(bc) + 1; + + if (v & 1) + return -(v >> 1); + else + return (v >> 1); +} + +static uint64_t get_fourcc(AVIOContext *bc) +{ + unsigned int len = ffio_read_varlen(bc); + + if (len == 2) + return avio_rl16(bc); + else if (len == 4) + return avio_rl32(bc); + else { + av_log(NULL, AV_LOG_ERROR, "Unsupported fourcc length %d\n", len); + return -1; + } +} + +#ifdef TRACE +static inline uint64_t get_v_trace(AVIOContext *bc, const char *file, + const char *func, int line) +{ + uint64_t v = ffio_read_varlen(bc); + + av_log(NULL, AV_LOG_DEBUG, "get_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", + v, v, file, func, line); + return v; +} + +static inline int64_t get_s_trace(AVIOContext *bc, const char *file, + const char *func, int line) +{ + int64_t v = get_s(bc); + + av_log(NULL, AV_LOG_DEBUG, "get_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", + v, v, file, func, line); + return v; +} + +static inline uint64_t get_4cc_trace(AVIOContext *bc, char *file, + char *func, int line) +{ + uint64_t v = get_fourcc(bc); + + av_log(NULL, AV_LOG_DEBUG, "get_fourcc %5"PRId64" / %"PRIX64" in %s %s:%d\n", + v, v, file, func, line); + return v; +} +#define ffio_read_varlen(bc) get_v_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__) +#define get_s(bc) get_s_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__) +#define get_fourcc(bc) get_4cc_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__) +#endif + +static int get_packetheader(NUTContext *nut, AVIOContext *bc, + int calculate_checksum, uint64_t startcode) +{ + int64_t size; +// start = avio_tell(bc) - 8; + + startcode = av_be2ne64(startcode); + startcode = ff_crc04C11DB7_update(0, (uint8_t*) &startcode, 8); + + ffio_init_checksum(bc, ff_crc04C11DB7_update, startcode); + size = ffio_read_varlen(bc); + if (size > 4096) + avio_rb32(bc); + if (ffio_get_checksum(bc) && size > 4096) + return -1; + + ffio_init_checksum(bc, calculate_checksum ? ff_crc04C11DB7_update : NULL, 0); + + return size; +} + +static uint64_t find_any_startcode(AVIOContext *bc, int64_t pos) +{ + uint64_t state = 0; + + if (pos >= 0) + /* Note, this may fail if the stream is not seekable, but that should + * not matter, as in this case we simply start where we currently are */ + avio_seek(bc, pos, SEEK_SET); + while (!avio_feof(bc)) { + state = (state << 8) | avio_r8(bc); + if ((state >> 56) != 'N') + continue; + switch (state) { + case MAIN_STARTCODE: + case STREAM_STARTCODE: + case SYNCPOINT_STARTCODE: + case INFO_STARTCODE: + case INDEX_STARTCODE: + return state; + } + } + + return 0; +} + +/** + * Find the given startcode. + * @param code the startcode + * @param pos the start position of the search, or -1 if the current position + * @return the position of the startcode or -1 if not found + */ +static int64_t find_startcode(AVIOContext *bc, uint64_t code, int64_t pos) +{ + for (;;) { + uint64_t startcode = find_any_startcode(bc, pos); + if (startcode == code) + return avio_tell(bc) - 8; + else if (startcode == 0) + return -1; + pos = -1; + } +} + +static int nut_probe(AVProbeData *p) +{ + int i; + + for (i = 0; i < p->buf_size-8; i++) { + if (AV_RB32(p->buf+i) != MAIN_STARTCODE>>32) + continue; + if (AV_RB32(p->buf+i+4) == (MAIN_STARTCODE & 0xFFFFFFFF)) + return AVPROBE_SCORE_MAX; + } + return 0; +} + +#define GET_V(dst, check) \ + do { \ + tmp = ffio_read_varlen(bc); \ + if (!(check)) { \ + av_log(s, AV_LOG_ERROR, "Error " #dst " is (%"PRId64")\n", tmp); \ + ret = AVERROR_INVALIDDATA; \ + goto fail; \ + } \ + dst = tmp; \ + } while (0) + +static int skip_reserved(AVIOContext *bc, int64_t pos) +{ + pos -= avio_tell(bc); + if (pos < 0) { + avio_seek(bc, pos, SEEK_CUR); + return AVERROR_INVALIDDATA; + } else { + while (pos--) { + if (bc->eof_reached) + return AVERROR_INVALIDDATA; + avio_r8(bc); + } + return 0; + } +} + +static int decode_main_header(NUTContext *nut) +{ + AVFormatContext *s = nut->avf; + AVIOContext *bc = s->pb; + uint64_t tmp, end; + unsigned int stream_count; + int i, j, count, ret; + int tmp_stream, tmp_mul, tmp_pts, tmp_size, tmp_res, tmp_head_idx; + + end = get_packetheader(nut, bc, 1, MAIN_STARTCODE); + end += avio_tell(bc); + + nut->version = ffio_read_varlen(bc); + if (nut->version < NUT_MIN_VERSION && + nut->version > NUT_MAX_VERSION) { + av_log(s, AV_LOG_ERROR, "Version %d not supported.\n", + nut->version); + return AVERROR(ENOSYS); + } + if (nut->version > 3) + nut->minor_version = ffio_read_varlen(bc); + + GET_V(stream_count, tmp > 0 && tmp <= NUT_MAX_STREAMS); + + nut->max_distance = ffio_read_varlen(bc); + if (nut->max_distance > 65536) { + av_log(s, AV_LOG_DEBUG, "max_distance %d\n", nut->max_distance); + nut->max_distance = 65536; + } + + GET_V(nut->time_base_count, tmp > 0 && tmp < INT_MAX / sizeof(AVRational)); + nut->time_base = av_malloc_array(nut->time_base_count, sizeof(AVRational)); + if (!nut->time_base) + return AVERROR(ENOMEM); + + for (i = 0; i < nut->time_base_count; i++) { + GET_V(nut->time_base[i].num, tmp > 0 && tmp < (1ULL << 31)); + GET_V(nut->time_base[i].den, tmp > 0 && tmp < (1ULL << 31)); + if (av_gcd(nut->time_base[i].num, nut->time_base[i].den) != 1) { + av_log(s, AV_LOG_ERROR, "invalid time base %d/%d\n", + nut->time_base[i].num, + nut->time_base[i].den); + ret = AVERROR_INVALIDDATA; + goto fail; + } + } + tmp_pts = 0; + tmp_mul = 1; + tmp_stream = 0; + tmp_head_idx = 0; + for (i = 0; i < 256;) { + int tmp_flags = ffio_read_varlen(bc); + int tmp_fields = ffio_read_varlen(bc); + + if (tmp_fields > 0) + tmp_pts = get_s(bc); + if (tmp_fields > 1) + tmp_mul = ffio_read_varlen(bc); + if (tmp_fields > 2) + tmp_stream = ffio_read_varlen(bc); + if (tmp_fields > 3) + tmp_size = ffio_read_varlen(bc); + else + tmp_size = 0; + if (tmp_fields > 4) + tmp_res = ffio_read_varlen(bc); + else + tmp_res = 0; + if (tmp_fields > 5) + count = ffio_read_varlen(bc); + else + count = tmp_mul - tmp_size; + if (tmp_fields > 6) + get_s(bc); + if (tmp_fields > 7) + tmp_head_idx = ffio_read_varlen(bc); + + while (tmp_fields-- > 8) { + if (bc->eof_reached) { + av_log(s, AV_LOG_ERROR, "reached EOF while decoding main header\n"); + ret = AVERROR_INVALIDDATA; + goto fail; + } + ffio_read_varlen(bc); + } + + if (count <= 0 || count > 256 - (i <= 'N') - i) { + av_log(s, AV_LOG_ERROR, "illegal count %d at %d\n", count, i); + ret = AVERROR_INVALIDDATA; + goto fail; + } + if (tmp_stream >= stream_count) { + av_log(s, AV_LOG_ERROR, "illegal stream number %d >= %d\n", + tmp_stream, stream_count); + ret = AVERROR_INVALIDDATA; + goto fail; + } + + for (j = 0; j < count; j++, i++) { + if (i == 'N') { + nut->frame_code[i].flags = FLAG_INVALID; + j--; + continue; + } + nut->frame_code[i].flags = tmp_flags; + nut->frame_code[i].pts_delta = tmp_pts; + nut->frame_code[i].stream_id = tmp_stream; + nut->frame_code[i].size_mul = tmp_mul; + nut->frame_code[i].size_lsb = tmp_size + j; + nut->frame_code[i].reserved_count = tmp_res; + nut->frame_code[i].header_idx = tmp_head_idx; + } + } + av_assert0(nut->frame_code['N'].flags == FLAG_INVALID); + + if (end > avio_tell(bc) + 4) { + int rem = 1024; + GET_V(nut->header_count, tmp < 128U); + nut->header_count++; + for (i = 1; i < nut->header_count; i++) { + uint8_t *hdr; + GET_V(nut->header_len[i], tmp > 0 && tmp < 256); + if (rem < nut->header_len[i]) { + av_log(s, AV_LOG_ERROR, + "invalid elision header %d : %d > %d\n", + i, nut->header_len[i], rem); + ret = AVERROR_INVALIDDATA; + goto fail; + } + rem -= nut->header_len[i]; + hdr = av_malloc(nut->header_len[i]); + if (!hdr) { + ret = AVERROR(ENOMEM); + goto fail; + } + avio_read(bc, hdr, nut->header_len[i]); + nut->header[i] = hdr; + } + av_assert0(nut->header_len[0] == 0); + } + + // flags had been effectively introduced in version 4 + if (nut->version > 3 && end > avio_tell(bc) + 4) { + nut->flags = ffio_read_varlen(bc); + } + + if (skip_reserved(bc, end) || ffio_get_checksum(bc)) { + av_log(s, AV_LOG_ERROR, "main header checksum mismatch\n"); + ret = AVERROR_INVALIDDATA; + goto fail; + } + + nut->stream = av_calloc(stream_count, sizeof(StreamContext)); + if (!nut->stream) { + ret = AVERROR(ENOMEM); + goto fail; + } + for (i = 0; i < stream_count; i++) + avformat_new_stream(s, NULL); + + return 0; +fail: + av_freep(&nut->time_base); + for (i = 1; i < nut->header_count; i++) { + av_freep(&nut->header[i]); + } + nut->header_count = 0; + return ret; +} + +static int decode_stream_header(NUTContext *nut) +{ + AVFormatContext *s = nut->avf; + AVIOContext *bc = s->pb; + StreamContext *stc; + int class, stream_id, ret; + uint64_t tmp, end; + AVStream *st = NULL; + + end = get_packetheader(nut, bc, 1, STREAM_STARTCODE); + end += avio_tell(bc); + + GET_V(stream_id, tmp < s->nb_streams && !nut->stream[tmp].time_base); + stc = &nut->stream[stream_id]; + st = s->streams[stream_id]; + if (!st) + return AVERROR(ENOMEM); + + class = ffio_read_varlen(bc); + tmp = get_fourcc(bc); + st->codecpar->codec_tag = tmp; + switch (class) { + case 0: + st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; + st->codecpar->codec_id = av_codec_get_id((const AVCodecTag * const []) { + ff_nut_video_tags, + ff_codec_bmp_tags, + ff_codec_movvideo_tags, + 0 + }, + tmp); + break; + case 1: + st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; + st->codecpar->codec_id = av_codec_get_id((const AVCodecTag * const []) { + ff_nut_audio_tags, + ff_codec_wav_tags, + ff_nut_audio_extra_tags, + 0 + }, + tmp); + break; + case 2: + st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE; + st->codecpar->codec_id = ff_codec_get_id(ff_nut_subtitle_tags, tmp); + break; + case 3: + st->codecpar->codec_type = AVMEDIA_TYPE_DATA; + st->codecpar->codec_id = ff_codec_get_id(ff_nut_data_tags, tmp); + break; + default: + av_log(s, AV_LOG_ERROR, "unknown stream class (%d)\n", class); + return AVERROR(ENOSYS); + } + if (class < 3 && st->codecpar->codec_id == AV_CODEC_ID_NONE) + av_log(s, AV_LOG_ERROR, + "Unknown codec tag '0x%04x' for stream number %d\n", + (unsigned int) tmp, stream_id); + + GET_V(stc->time_base_id, tmp < nut->time_base_count); + GET_V(stc->msb_pts_shift, tmp < 16); + stc->max_pts_distance = ffio_read_varlen(bc); + GET_V(stc->decode_delay, tmp < 1000); // sanity limit, raise this if Moore's law is true + st->codecpar->video_delay = stc->decode_delay; + ffio_read_varlen(bc); // stream flags + + GET_V(st->codecpar->extradata_size, tmp < (1 << 30)); + if (st->codecpar->extradata_size) { + if (ff_get_extradata(s, st->codecpar, bc, st->codecpar->extradata_size) < 0) + return AVERROR(ENOMEM); + } + + if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { + GET_V(st->codecpar->width, tmp > 0); + GET_V(st->codecpar->height, tmp > 0); + st->sample_aspect_ratio.num = ffio_read_varlen(bc); + st->sample_aspect_ratio.den = ffio_read_varlen(bc); + if ((!st->sample_aspect_ratio.num) != (!st->sample_aspect_ratio.den)) { + av_log(s, AV_LOG_ERROR, "invalid aspect ratio %d/%d\n", + st->sample_aspect_ratio.num, st->sample_aspect_ratio.den); + ret = AVERROR_INVALIDDATA; + goto fail; + } + ffio_read_varlen(bc); /* csp type */ + } else if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { + GET_V(st->codecpar->sample_rate, tmp > 0); + ffio_read_varlen(bc); // samplerate_den + GET_V(st->codecpar->channels, tmp > 0); + } + if (skip_reserved(bc, end) || ffio_get_checksum(bc)) { + av_log(s, AV_LOG_ERROR, + "stream header %d checksum mismatch\n", stream_id); + ret = AVERROR_INVALIDDATA; + goto fail; + } + stc->time_base = &nut->time_base[stc->time_base_id]; + avpriv_set_pts_info(s->streams[stream_id], 63, stc->time_base->num, + stc->time_base->den); + return 0; +fail: + if (st && st->codecpar) { + av_freep(&st->codecpar->extradata); + st->codecpar->extradata_size = 0; + } + return ret; +} + +static void set_disposition_bits(AVFormatContext *avf, char *value, + int stream_id) +{ + int flag = 0, i; + + for (i = 0; ff_nut_dispositions[i].flag; ++i) + if (!strcmp(ff_nut_dispositions[i].str, value)) + flag = ff_nut_dispositions[i].flag; + if (!flag) + av_log(avf, AV_LOG_INFO, "unknown disposition type '%s'\n", value); + for (i = 0; i < avf->nb_streams; ++i) + if (stream_id == i || stream_id == -1) + avf->streams[i]->disposition |= flag; +} + +static int decode_info_header(NUTContext *nut) +{ + AVFormatContext *s = nut->avf; + AVIOContext *bc = s->pb; + uint64_t tmp, chapter_start, chapter_len; + unsigned int stream_id_plus1, count; + int chapter_id, i, ret = 0; + int64_t value, end; + char name[256], str_value[1024], type_str[256]; + const char *type; + int *event_flags = NULL; + AVChapter *chapter = NULL; + AVStream *st = NULL; + AVDictionary **metadata = NULL; + int metadata_flag = 0; + + end = get_packetheader(nut, bc, 1, INFO_STARTCODE); + end += avio_tell(bc); + + GET_V(stream_id_plus1, tmp <= s->nb_streams); + chapter_id = get_s(bc); + chapter_start = ffio_read_varlen(bc); + chapter_len = ffio_read_varlen(bc); + count = ffio_read_varlen(bc); + + if (chapter_id && !stream_id_plus1) { + int64_t start = chapter_start / nut->time_base_count; + chapter = avpriv_new_chapter(s, chapter_id, + nut->time_base[chapter_start % + nut->time_base_count], + start, start + chapter_len, NULL); + if (!chapter) { + av_log(s, AV_LOG_ERROR, "Could not create chapter.\n"); + return AVERROR(ENOMEM); + } + metadata = &chapter->metadata; + } else if (stream_id_plus1) { + st = s->streams[stream_id_plus1 - 1]; + metadata = &st->metadata; + event_flags = &st->event_flags; + metadata_flag = AVSTREAM_EVENT_FLAG_METADATA_UPDATED; + } else { + metadata = &s->metadata; + event_flags = &s->event_flags; + metadata_flag = AVFMT_EVENT_FLAG_METADATA_UPDATED; + } + + for (i = 0; i < count; i++) { + ret = get_str(bc, name, sizeof(name)); + if (ret < 0) { + av_log(s, AV_LOG_ERROR, "get_str failed while decoding info header\n"); + return ret; + } + value = get_s(bc); + str_value[0] = 0; + + if (value == -1) { + type = "UTF-8"; + ret = get_str(bc, str_value, sizeof(str_value)); + } else if (value == -2) { + ret = get_str(bc, type_str, sizeof(type_str)); + if (ret < 0) { + av_log(s, AV_LOG_ERROR, "get_str failed while decoding info header\n"); + return ret; + } + type = type_str; + ret = get_str(bc, str_value, sizeof(str_value)); + } else if (value == -3) { + type = "s"; + value = get_s(bc); + } else if (value == -4) { + type = "t"; + value = ffio_read_varlen(bc); + } else if (value < -4) { + type = "r"; + get_s(bc); + } else { + type = "v"; + } + + if (ret < 0) { + av_log(s, AV_LOG_ERROR, "get_str failed while decoding info header\n"); + return ret; + } + + if (stream_id_plus1 > s->nb_streams) { + av_log(s, AV_LOG_WARNING, + "invalid stream id %d for info packet\n", + stream_id_plus1); + continue; + } + + if (!strcmp(type, "UTF-8")) { + if (chapter_id == 0 && !strcmp(name, "Disposition")) { + set_disposition_bits(s, str_value, stream_id_plus1 - 1); + continue; + } + + if (stream_id_plus1 && !strcmp(name, "r_frame_rate")) { + sscanf(str_value, "%d/%d", &st->r_frame_rate.num, &st->r_frame_rate.den); + if (st->r_frame_rate.num >= 1000LL*st->r_frame_rate.den || + st->r_frame_rate.num < 0 || st->r_frame_rate.num < 0) + st->r_frame_rate.num = st->r_frame_rate.den = 0; + continue; + } + + if (metadata && av_strcasecmp(name, "Uses") && + av_strcasecmp(name, "Depends") && av_strcasecmp(name, "Replaces")) { + if (event_flags) + *event_flags |= metadata_flag; + av_dict_set(metadata, name, str_value, 0); + } + } + } + + if (skip_reserved(bc, end) || ffio_get_checksum(bc)) { + av_log(s, AV_LOG_ERROR, "info header checksum mismatch\n"); + return AVERROR_INVALIDDATA; + } +fail: + return FFMIN(ret, 0); +} + +static int decode_syncpoint(NUTContext *nut, int64_t *ts, int64_t *back_ptr) +{ + AVFormatContext *s = nut->avf; + AVIOContext *bc = s->pb; + int64_t end; + uint64_t tmp; + int ret; + + nut->last_syncpoint_pos = avio_tell(bc) - 8; + + end = get_packetheader(nut, bc, 1, SYNCPOINT_STARTCODE); + end += avio_tell(bc); + + tmp = ffio_read_varlen(bc); + *back_ptr = nut->last_syncpoint_pos - 16 * ffio_read_varlen(bc); + if (*back_ptr < 0) + return AVERROR_INVALIDDATA; + + ff_nut_reset_ts(nut, nut->time_base[tmp % nut->time_base_count], + tmp / nut->time_base_count); + + if (nut->flags & NUT_BROADCAST) { + tmp = ffio_read_varlen(bc); + av_log(s, AV_LOG_VERBOSE, "Syncpoint wallclock %"PRId64"\n", + av_rescale_q(tmp / nut->time_base_count, + nut->time_base[tmp % nut->time_base_count], + AV_TIME_BASE_Q)); + } + + if (skip_reserved(bc, end) || ffio_get_checksum(bc)) { + av_log(s, AV_LOG_ERROR, "sync point checksum mismatch\n"); + return AVERROR_INVALIDDATA; + } + + *ts = tmp / nut->time_base_count * + av_q2d(nut->time_base[tmp % nut->time_base_count]) * AV_TIME_BASE; + + if ((ret = ff_nut_add_sp(nut, nut->last_syncpoint_pos, *back_ptr, *ts)) < 0) + return ret; + + return 0; +} + +//FIXME calculate exactly, this is just a good approximation. +static int64_t find_duration(NUTContext *nut, int64_t filesize) +{ + AVFormatContext *s = nut->avf; + int64_t duration = 0; + + ff_find_last_ts(s, -1, &duration, NULL, nut_read_timestamp); + + if(duration > 0) + s->duration_estimation_method = AVFMT_DURATION_FROM_PTS; + return duration; +} + +static int find_and_decode_index(NUTContext *nut) +{ + AVFormatContext *s = nut->avf; + AVIOContext *bc = s->pb; + uint64_t tmp, end; + int i, j, syncpoint_count; + int64_t filesize = avio_size(bc); + int64_t *syncpoints = NULL; + uint64_t max_pts; + int8_t *has_keyframe = NULL; + int ret = AVERROR_INVALIDDATA; + + if(filesize <= 0) + return -1; + + avio_seek(bc, filesize - 12, SEEK_SET); + avio_seek(bc, filesize - avio_rb64(bc), SEEK_SET); + if (avio_rb64(bc) != INDEX_STARTCODE) { + av_log(s, AV_LOG_WARNING, "no index at the end\n"); + + if(s->duration<=0) + s->duration = find_duration(nut, filesize); + return ret; + } + + end = get_packetheader(nut, bc, 1, INDEX_STARTCODE); + end += avio_tell(bc); + + max_pts = ffio_read_varlen(bc); + s->duration = av_rescale_q(max_pts / nut->time_base_count, + nut->time_base[max_pts % nut->time_base_count], + AV_TIME_BASE_Q); + s->duration_estimation_method = AVFMT_DURATION_FROM_PTS; + + GET_V(syncpoint_count, tmp < INT_MAX / 8 && tmp > 0); + syncpoints = av_malloc_array(syncpoint_count, sizeof(int64_t)); + has_keyframe = av_malloc_array(syncpoint_count + 1, sizeof(int8_t)); + if (!syncpoints || !has_keyframe) { + ret = AVERROR(ENOMEM); + goto fail; + } + for (i = 0; i < syncpoint_count; i++) { + syncpoints[i] = ffio_read_varlen(bc); + if (syncpoints[i] <= 0) + goto fail; + if (i) + syncpoints[i] += syncpoints[i - 1]; + } + + for (i = 0; i < s->nb_streams; i++) { + int64_t last_pts = -1; + for (j = 0; j < syncpoint_count;) { + uint64_t x = ffio_read_varlen(bc); + int type = x & 1; + int n = j; + x >>= 1; + if (type) { + int flag = x & 1; + x >>= 1; + if (n + x >= syncpoint_count + 1) { + av_log(s, AV_LOG_ERROR, "index overflow A %d + %"PRIu64" >= %d\n", n, x, syncpoint_count + 1); + goto fail; + } + while (x--) + has_keyframe[n++] = flag; + has_keyframe[n++] = !flag; + } else { + if (x <= 1) { + av_log(s, AV_LOG_ERROR, "index: x %"PRIu64" is invalid\n", x); + goto fail; + } + while (x != 1) { + if (n >= syncpoint_count + 1) { + av_log(s, AV_LOG_ERROR, "index overflow B\n"); + goto fail; + } + has_keyframe[n++] = x & 1; + x >>= 1; + } + } + if (has_keyframe[0]) { + av_log(s, AV_LOG_ERROR, "keyframe before first syncpoint in index\n"); + goto fail; + } + av_assert0(n <= syncpoint_count + 1); + for (; j < n && j < syncpoint_count; j++) { + if (has_keyframe[j]) { + uint64_t B, A = ffio_read_varlen(bc); + if (!A) { + A = ffio_read_varlen(bc); + B = ffio_read_varlen(bc); + // eor_pts[j][i] = last_pts + A + B + } else + B = 0; + av_add_index_entry(s->streams[i], 16 * syncpoints[j - 1], + last_pts + A, 0, 0, AVINDEX_KEYFRAME); + last_pts += A + B; + } + } + } + } + + if (skip_reserved(bc, end) || ffio_get_checksum(bc)) { + av_log(s, AV_LOG_ERROR, "index checksum mismatch\n"); + goto fail; + } + ret = 0; + +fail: + av_free(syncpoints); + av_free(has_keyframe); + return ret; +} + +static int nut_read_close(AVFormatContext *s) +{ + NUTContext *nut = s->priv_data; + int i; + + av_freep(&nut->time_base); + av_freep(&nut->stream); + ff_nut_free_sp(nut); + for (i = 1; i < nut->header_count; i++) + av_freep(&nut->header[i]); + + return 0; +} + +static int nut_read_header(AVFormatContext *s) +{ + NUTContext *nut = s->priv_data; + AVIOContext *bc = s->pb; + int64_t pos; + int initialized_stream_count; + + nut->avf = s; + + /* main header */ + pos = 0; + do { + pos = find_startcode(bc, MAIN_STARTCODE, pos) + 1; + if (pos < 0 + 1) { + av_log(s, AV_LOG_ERROR, "No main startcode found.\n"); + goto fail; + } + } while (decode_main_header(nut) < 0); + + /* stream headers */ + pos = 0; + for (initialized_stream_count = 0; initialized_stream_count < s->nb_streams;) { + pos = find_startcode(bc, STREAM_STARTCODE, pos) + 1; + if (pos < 0 + 1) { + av_log(s, AV_LOG_ERROR, "Not all stream headers found.\n"); + goto fail; + } + if (decode_stream_header(nut) >= 0) + initialized_stream_count++; + } + + /* info headers */ + pos = 0; + for (;;) { + uint64_t startcode = find_any_startcode(bc, pos); + pos = avio_tell(bc); + + if (startcode == 0) { + av_log(s, AV_LOG_ERROR, "EOF before video frames\n"); + goto fail; + } else if (startcode == SYNCPOINT_STARTCODE) { + nut->next_startcode = startcode; + break; + } else if (startcode != INFO_STARTCODE) { + continue; + } + + decode_info_header(nut); + } + + s->internal->data_offset = pos - 8; + + if (bc->seekable) { + int64_t orig_pos = avio_tell(bc); + find_and_decode_index(nut); + avio_seek(bc, orig_pos, SEEK_SET); + } + av_assert0(nut->next_startcode == SYNCPOINT_STARTCODE); + + ff_metadata_conv_ctx(s, NULL, ff_nut_metadata_conv); + + return 0; + +fail: + nut_read_close(s); + + return AVERROR_INVALIDDATA; +} + +static int read_sm_data(AVFormatContext *s, AVIOContext *bc, AVPacket *pkt, int is_meta, int64_t maxpos) +{ + int count = ffio_read_varlen(bc); + int skip_start = 0; + int skip_end = 0; + int channels = 0; + int64_t channel_layout = 0; + int sample_rate = 0; + int width = 0; + int height = 0; + int i, ret; + + for (i=0; i= maxpos) + return AVERROR_INVALIDDATA; + ret = get_str(bc, name, sizeof(name)); + if (ret < 0) { + av_log(s, AV_LOG_ERROR, "get_str failed while reading sm data\n"); + return ret; + } + value = get_s(bc); + + if (value == -1) { + ret = get_str(bc, str_value, sizeof(str_value)); + if (ret < 0) { + av_log(s, AV_LOG_ERROR, "get_str failed while reading sm data\n"); + return ret; + } + av_log(s, AV_LOG_WARNING, "Unknown string %s / %s\n", name, str_value); + } else if (value == -2) { + uint8_t *dst = NULL; + int64_t v64, value_len; + + ret = get_str(bc, type_str, sizeof(type_str)); + if (ret < 0) { + av_log(s, AV_LOG_ERROR, "get_str failed while reading sm data\n"); + return ret; + } + value_len = ffio_read_varlen(bc); + if (value_len < 0 || value_len >= maxpos - avio_tell(bc)) + return AVERROR_INVALIDDATA; + if (!strcmp(name, "Palette")) { + dst = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, value_len); + } else if (!strcmp(name, "Extradata")) { + dst = av_packet_new_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA, value_len); + } else if (sscanf(name, "CodecSpecificSide%"SCNd64"", &v64) == 1) { + dst = av_packet_new_side_data(pkt, AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, value_len + 8); + if(!dst) + return AVERROR(ENOMEM); + AV_WB64(dst, v64); + dst += 8; + } else if (!strcmp(name, "ChannelLayout") && value_len == 8) { + channel_layout = avio_rl64(bc); + continue; + } else { + av_log(s, AV_LOG_WARNING, "Unknown data %s / %s\n", name, type_str); + avio_skip(bc, value_len); + continue; + } + if(!dst) + return AVERROR(ENOMEM); + avio_read(bc, dst, value_len); + } else if (value == -3) { + value = get_s(bc); + } else if (value == -4) { + value = ffio_read_varlen(bc); + } else if (value < -4) { + get_s(bc); + } else { + if (!strcmp(name, "SkipStart")) { + skip_start = value; + } else if (!strcmp(name, "SkipEnd")) { + skip_end = value; + } else if (!strcmp(name, "Channels")) { + channels = value; + } else if (!strcmp(name, "SampleRate")) { + sample_rate = value; + } else if (!strcmp(name, "Width")) { + width = value; + } else if (!strcmp(name, "Height")) { + height = value; + } else { + av_log(s, AV_LOG_WARNING, "Unknown integer %s\n", name); + } + } + } + + if (channels || channel_layout || sample_rate || width || height) { + uint8_t *dst = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, 28); + if (!dst) + return AVERROR(ENOMEM); + bytestream_put_le32(&dst, + AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT*(!!channels) + + AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT*(!!channel_layout) + + AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE*(!!sample_rate) + + AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS*(!!(width|height)) + ); + if (channels) + bytestream_put_le32(&dst, channels); + if (channel_layout) + bytestream_put_le64(&dst, channel_layout); + if (sample_rate) + bytestream_put_le32(&dst, sample_rate); + if (width || height){ + bytestream_put_le32(&dst, width); + bytestream_put_le32(&dst, height); + } + } + + if (skip_start || skip_end) { + uint8_t *dst = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10); + if (!dst) + return AVERROR(ENOMEM); + AV_WL32(dst, skip_start); + AV_WL32(dst+4, skip_end); + } + + if (avio_tell(bc) >= maxpos) + return AVERROR_INVALIDDATA; + + return 0; +} + +static int decode_frame_header(NUTContext *nut, int64_t *pts, int *stream_id, + uint8_t *header_idx, int frame_code) +{ + AVFormatContext *s = nut->avf; + AVIOContext *bc = s->pb; + StreamContext *stc; + int size, flags, size_mul, pts_delta, i, reserved_count, ret; + uint64_t tmp; + + if (!(nut->flags & NUT_PIPE) && + avio_tell(bc) > nut->last_syncpoint_pos + nut->max_distance) { + av_log(s, AV_LOG_ERROR, + "Last frame must have been damaged %"PRId64" > %"PRId64" + %d\n", + avio_tell(bc), nut->last_syncpoint_pos, nut->max_distance); + return AVERROR_INVALIDDATA; + } + + flags = nut->frame_code[frame_code].flags; + size_mul = nut->frame_code[frame_code].size_mul; + size = nut->frame_code[frame_code].size_lsb; + *stream_id = nut->frame_code[frame_code].stream_id; + pts_delta = nut->frame_code[frame_code].pts_delta; + reserved_count = nut->frame_code[frame_code].reserved_count; + *header_idx = nut->frame_code[frame_code].header_idx; + + if (flags & FLAG_INVALID) + return AVERROR_INVALIDDATA; + if (flags & FLAG_CODED) + flags ^= ffio_read_varlen(bc); + if (flags & FLAG_STREAM_ID) { + GET_V(*stream_id, tmp < s->nb_streams); + } + stc = &nut->stream[*stream_id]; + if (flags & FLAG_CODED_PTS) { + int coded_pts = ffio_read_varlen(bc); + // FIXME check last_pts validity? + if (coded_pts < (1 << stc->msb_pts_shift)) { + *pts = ff_lsb2full(stc, coded_pts); + } else + *pts = coded_pts - (1LL << stc->msb_pts_shift); + } else + *pts = stc->last_pts + pts_delta; + if (flags & FLAG_SIZE_MSB) + size += size_mul * ffio_read_varlen(bc); + if (flags & FLAG_MATCH_TIME) + get_s(bc); + if (flags & FLAG_HEADER_IDX) + *header_idx = ffio_read_varlen(bc); + if (flags & FLAG_RESERVED) + reserved_count = ffio_read_varlen(bc); + for (i = 0; i < reserved_count; i++) { + if (bc->eof_reached) { + av_log(s, AV_LOG_ERROR, "reached EOF while decoding frame header\n"); + return AVERROR_INVALIDDATA; + } + ffio_read_varlen(bc); + } + + if (*header_idx >= (unsigned)nut->header_count) { + av_log(s, AV_LOG_ERROR, "header_idx invalid\n"); + return AVERROR_INVALIDDATA; + } + if (size > 4096) + *header_idx = 0; + size -= nut->header_len[*header_idx]; + + if (flags & FLAG_CHECKSUM) { + avio_rb32(bc); // FIXME check this + } else if (!(nut->flags & NUT_PIPE) && + size > 2 * nut->max_distance || + FFABS(stc->last_pts - *pts) > stc->max_pts_distance) { + av_log(s, AV_LOG_ERROR, "frame size > 2max_distance and no checksum\n"); + return AVERROR_INVALIDDATA; + } + + stc->last_pts = *pts; + stc->last_flags = flags; + + return size; +fail: + return ret; +} + +static int decode_frame(NUTContext *nut, AVPacket *pkt, int frame_code) +{ + AVFormatContext *s = nut->avf; + AVIOContext *bc = s->pb; + int size, stream_id, discard, ret; + int64_t pts, last_IP_pts; + StreamContext *stc; + uint8_t header_idx; + + size = decode_frame_header(nut, &pts, &stream_id, &header_idx, frame_code); + if (size < 0) + return size; + + stc = &nut->stream[stream_id]; + + if (stc->last_flags & FLAG_KEY) + stc->skip_until_key_frame = 0; + + discard = s->streams[stream_id]->discard; + last_IP_pts = s->streams[stream_id]->last_IP_pts; + if ((discard >= AVDISCARD_NONKEY && !(stc->last_flags & FLAG_KEY)) || + (discard >= AVDISCARD_BIDIR && last_IP_pts != AV_NOPTS_VALUE && + last_IP_pts > pts) || + discard >= AVDISCARD_ALL || + stc->skip_until_key_frame) { + avio_skip(bc, size); + return 1; + } + + ret = av_new_packet(pkt, size + nut->header_len[header_idx]); + if (ret < 0) + return ret; + if (nut->header[header_idx]) + memcpy(pkt->data, nut->header[header_idx], nut->header_len[header_idx]); + pkt->pos = avio_tell(bc); // FIXME + if (stc->last_flags & FLAG_SM_DATA) { + int sm_size; + if (read_sm_data(s, bc, pkt, 0, pkt->pos + size) < 0) { + ret = AVERROR_INVALIDDATA; + goto fail; + } + if (read_sm_data(s, bc, pkt, 1, pkt->pos + size) < 0) { + ret = AVERROR_INVALIDDATA; + goto fail; + } + sm_size = avio_tell(bc) - pkt->pos; + size -= sm_size; + pkt->size -= sm_size; + } + + ret = avio_read(bc, pkt->data + nut->header_len[header_idx], size); + if (ret != size) { + if (ret < 0) + goto fail; + } + av_shrink_packet(pkt, nut->header_len[header_idx] + ret); + + pkt->stream_index = stream_id; + if (stc->last_flags & FLAG_KEY) + pkt->flags |= AV_PKT_FLAG_KEY; + pkt->pts = pts; + + return 0; +fail: + av_packet_unref(pkt); + return ret; +} + +static int nut_read_packet(AVFormatContext *s, AVPacket *pkt) +{ + NUTContext *nut = s->priv_data; + AVIOContext *bc = s->pb; + int i, frame_code = 0, ret, skip; + int64_t ts, back_ptr; + + for (;;) { + int64_t pos = avio_tell(bc); + uint64_t tmp = nut->next_startcode; + nut->next_startcode = 0; + + if (tmp) { + pos -= 8; + } else { + frame_code = avio_r8(bc); + if (avio_feof(bc)) + return AVERROR_EOF; + if (frame_code == 'N') { + tmp = frame_code; + for (i = 1; i < 8; i++) + tmp = (tmp << 8) + avio_r8(bc); + } + } + switch (tmp) { + case MAIN_STARTCODE: + case STREAM_STARTCODE: + case INDEX_STARTCODE: + skip = get_packetheader(nut, bc, 0, tmp); + avio_skip(bc, skip); + break; + case INFO_STARTCODE: + if (decode_info_header(nut) < 0) + goto resync; + break; + case SYNCPOINT_STARTCODE: + if (decode_syncpoint(nut, &ts, &back_ptr) < 0) + goto resync; + frame_code = avio_r8(bc); + case 0: + ret = decode_frame(nut, pkt, frame_code); + if (ret == 0) + return 0; + else if (ret == 1) // OK but discard packet + break; + default: +resync: + av_log(s, AV_LOG_DEBUG, "syncing from %"PRId64"\n", pos); + tmp = find_any_startcode(bc, FFMAX(nut->last_syncpoint_pos, nut->last_resync_pos) + 1); + nut->last_resync_pos = avio_tell(bc); + if (tmp == 0) + return AVERROR_INVALIDDATA; + av_log(s, AV_LOG_DEBUG, "sync\n"); + nut->next_startcode = tmp; + } + } +} + +static int64_t nut_read_timestamp(AVFormatContext *s, int stream_index, + int64_t *pos_arg, int64_t pos_limit) +{ + NUTContext *nut = s->priv_data; + AVIOContext *bc = s->pb; + int64_t pos, pts, back_ptr; + av_log(s, AV_LOG_DEBUG, "read_timestamp(X,%d,%"PRId64",%"PRId64")\n", + stream_index, *pos_arg, pos_limit); + + pos = *pos_arg; + do { + pos = find_startcode(bc, SYNCPOINT_STARTCODE, pos) + 1; + if (pos < 1) { + av_log(s, AV_LOG_ERROR, "read_timestamp failed.\n"); + return AV_NOPTS_VALUE; + } + } while (decode_syncpoint(nut, &pts, &back_ptr) < 0); + *pos_arg = pos - 1; + av_assert0(nut->last_syncpoint_pos == *pos_arg); + + av_log(s, AV_LOG_DEBUG, "return %"PRId64" %"PRId64"\n", pts, back_ptr); + if (stream_index == -2) + return back_ptr; + av_assert0(stream_index == -1); + return pts; +} + +static int read_seek(AVFormatContext *s, int stream_index, + int64_t pts, int flags) +{ + NUTContext *nut = s->priv_data; + AVStream *st = s->streams[stream_index]; + Syncpoint dummy = { .ts = pts * av_q2d(st->time_base) * AV_TIME_BASE }; + Syncpoint nopts_sp = { .ts = AV_NOPTS_VALUE, .back_ptr = AV_NOPTS_VALUE }; + Syncpoint *sp, *next_node[2] = { &nopts_sp, &nopts_sp }; + int64_t pos, pos2, ts; + int i; + + if (nut->flags & NUT_PIPE) { + return AVERROR(ENOSYS); + } + + if (st->index_entries) { + int index = av_index_search_timestamp(st, pts, flags); + if (index < 0) + index = av_index_search_timestamp(st, pts, flags ^ AVSEEK_FLAG_BACKWARD); + if (index < 0) + return -1; + + pos2 = st->index_entries[index].pos; + ts = st->index_entries[index].timestamp; + } else { + av_tree_find(nut->syncpoints, &dummy, ff_nut_sp_pts_cmp, + (void **) next_node); + av_log(s, AV_LOG_DEBUG, "%"PRIu64"-%"PRIu64" %"PRId64"-%"PRId64"\n", + next_node[0]->pos, next_node[1]->pos, next_node[0]->ts, + next_node[1]->ts); + pos = ff_gen_search(s, -1, dummy.ts, next_node[0]->pos, + next_node[1]->pos, next_node[1]->pos, + next_node[0]->ts, next_node[1]->ts, + AVSEEK_FLAG_BACKWARD, &ts, nut_read_timestamp); + if (pos < 0) + return pos; + + if (!(flags & AVSEEK_FLAG_BACKWARD)) { + dummy.pos = pos + 16; + next_node[1] = &nopts_sp; + av_tree_find(nut->syncpoints, &dummy, ff_nut_sp_pos_cmp, + (void **) next_node); + pos2 = ff_gen_search(s, -2, dummy.pos, next_node[0]->pos, + next_node[1]->pos, next_node[1]->pos, + next_node[0]->back_ptr, next_node[1]->back_ptr, + flags, &ts, nut_read_timestamp); + if (pos2 >= 0) + pos = pos2; + // FIXME dir but I think it does not matter + } + dummy.pos = pos; + sp = av_tree_find(nut->syncpoints, &dummy, ff_nut_sp_pos_cmp, + NULL); + + av_assert0(sp); + pos2 = sp->back_ptr - 15; + } + av_log(NULL, AV_LOG_DEBUG, "SEEKTO: %"PRId64"\n", pos2); + pos = find_startcode(s->pb, SYNCPOINT_STARTCODE, pos2); + avio_seek(s->pb, pos, SEEK_SET); + nut->last_syncpoint_pos = pos; + av_log(NULL, AV_LOG_DEBUG, "SP: %"PRId64"\n", pos); + if (pos2 > pos || pos2 + 15 < pos) + av_log(NULL, AV_LOG_ERROR, "no syncpoint at backptr pos\n"); + for (i = 0; i < s->nb_streams; i++) + nut->stream[i].skip_until_key_frame = 1; + + nut->last_resync_pos = 0; + + return 0; +} + +AVInputFormat ff_nut_demuxer = { + .name = "nut", + .long_name = NULL_IF_CONFIG_SMALL("NUT"), + .flags = AVFMT_SEEK_TO_PTS, + .priv_data_size = sizeof(NUTContext), + .read_probe = nut_probe, + .read_header = nut_read_header, + .read_packet = nut_read_packet, + .read_close = nut_read_close, + .read_seek = read_seek, + .extensions = "nut", + .codec_tag = ff_nut_codec_tags, +};