60 #define FREEZE_INTERVAL 128
80 int frontier = 1 << avctx->
trellis;
83 max_paths *
sizeof(*s->
paths), error);
85 2 * frontier *
sizeof(*s->
node_buf), error);
87 2 * frontier *
sizeof(*s->
nodep_buf), error);
118 bytestream_put_le16(&extradata, avctx->
frame_size);
119 bytestream_put_le16(&extradata, 7);
120 for (i = 0; i < 7; i++) {
145 #if FF_API_OLD_ENCODE_AUDIO
162 #if FF_API_OLD_ENCODE_AUDIO
178 int nibble =
FFMIN(7, abs(delta) * 4 /
192 int diff = step >> 3;
200 for (mask = 4;
mask;) {
224 int predictor, nibble, bias;
229 nibble = sample - predictor;
235 nibble = (nibble + bias) / c->
idelta;
236 nibble = av_clip(nibble, -8, 7) & 0x0F;
238 predictor += ((nibble & 0x08) ? (nibble - 0x10) : nibble) * c->
idelta;
241 c->
sample1 = av_clip_int16(predictor);
262 nibble =
FFMIN(7, abs(delta) * 4 / c->
step) + (delta < 0) * 8;
267 c->
step = av_clip(c->
step, 127, 24567);
278 const int frontier = 1 << avctx->
trellis;
285 int pathn = 0, froze = -1, i, j, k, generation = 0;
287 memset(hash, 0xff, 65536 *
sizeof(*hash));
289 memset(nodep_buf, 0, 2 * frontier *
sizeof(*nodep_buf));
290 nodes[0] = node_buf + frontier;
304 nodes[0]->
step = 127;
312 for (i = 0; i < n; i++) {
317 memset(nodes_next, 0, frontier *
sizeof(
TrellisNode*));
318 for (j = 0; j < frontier && nodes[j]; j++) {
321 const int range = (j < frontier / 2) ? 1 : 0;
322 const int step = nodes[j]->step;
325 const int predictor = ((nodes[j]->sample1 * c->
coeff1) +
326 (nodes[j]->sample2 * c->
coeff2)) / 64;
327 const int div = (sample - predictor) / step;
328 const int nmin = av_clip(div-range, -8, 6);
329 const int nmax = av_clip(div+range, -7, 7);
330 for (nidx = nmin; nidx <= nmax; nidx++) {
331 const int nibble = nidx & 0xf;
332 int dec_sample = predictor + nidx *
step;
333 #define STORE_NODE(NAME, STEP_INDEX)\
339 dec_sample = av_clip_int16(dec_sample);\
340 d = sample - dec_sample;\
341 ssd = nodes[j]->ssd + d*d;\
346 if (ssd < nodes[j]->ssd)\
359 h = &hash[(uint16_t) dec_sample];\
360 if (*h == generation)\
362 if (heap_pos < frontier) {\
367 pos = (frontier >> 1) +\
368 (heap_pos & ((frontier >> 1) - 1));\
369 if (ssd > nodes_next[pos]->ssd)\
374 u = nodes_next[pos];\
376 assert(pathn < FREEZE_INTERVAL << avctx->trellis);\
378 nodes_next[pos] = u;\
382 u->step = STEP_INDEX;\
383 u->sample2 = nodes[j]->sample1;\
384 u->sample1 = dec_sample;\
385 paths[u->path].nibble = nibble;\
386 paths[u->path].prev = nodes[j]->path;\
390 int parent = (pos - 1) >> 1;\
391 if (nodes_next[parent]->ssd <= ssd)\
393 FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\
403 #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
404 const int predictor = nodes[j]->sample1;\
405 const int div = (sample - predictor) * 4 / STEP_TABLE;\
406 int nmin = av_clip(div - range, -7, 6);\
407 int nmax = av_clip(div + range, -6, 7);\
412 for (nidx = nmin; nidx <= nmax; nidx++) {\
413 const int nibble = nidx < 0 ? 7 - nidx : nidx;\
414 int dec_sample = predictor +\
416 ff_adpcm_yamaha_difflookup[nibble]) / 8;\
417 STORE_NODE(NAME, STEP_INDEX);\
435 if (generation == 255) {
436 memset(hash, 0xff, 65536 *
sizeof(*hash));
441 if (nodes[0]->ssd > (1 << 28)) {
442 for (j = 1; j < frontier && nodes[j]; j++)
443 nodes[j]->ssd -= nodes[0]->ssd;
449 p = &paths[nodes[0]->path];
450 for (k = i; k > froze; k--) {
459 memset(nodes + 1, 0, (frontier - 1) *
sizeof(
TrellisNode*));
463 p = &paths[nodes[0]->
path];
464 for (i = n - 1; i > froze; i--) {
470 c->
sample1 = nodes[0]->sample1;
471 c->
sample2 = nodes[0]->sample2;
473 c->
step = nodes[0]->step;
474 c->
idelta = nodes[0]->step;
478 const AVFrame *frame,
int *got_packet_ptr)
480 int n, i, ch, st, pkt_size, ret;
487 samples = (
const int16_t *)frame->
data[0];
508 for (ch = 0; ch < avctx->
channels; ch++) {
521 for (ch = 0; ch < avctx->
channels; ch++) {
523 buf + ch * blocks * 8, &c->
status[ch],
526 for (i = 0; i < blocks; i++) {
527 for (ch = 0; ch < avctx->
channels; ch++) {
528 uint8_t *buf1 = buf + ch * blocks * 8 + i * 8;
529 for (j = 0; j < 8; j += 2)
530 *dst++ = buf1[j] | (buf1[j + 1] << 4);
535 for (i = 0; i < blocks; i++) {
536 for (ch = 0; ch < avctx->
channels; ch++) {
538 const int16_t *smp = &samples_p[ch][1 + i * 8];
539 for (j = 0; j < 8; j += 2) {
554 for (ch = 0; ch < avctx->
channels; ch++) {
562 for (i = 0; i < 64; i++)
566 for (i = 0; i < 64; i += 2) {
590 for (i = 0; i < avctx->
channels; i++) {
604 buf + n, &c->
status[1], n,
606 for (i = 0; i < n; i++) {
618 samples[2 * i + 1]));
625 for (i = 0; i < avctx->
channels; i++) {
631 for (i = 0; i < avctx->
channels; i++) {
636 for (i = 0; i < avctx->
channels; i++)
642 for (i = 0; i < avctx->
channels; i++)
651 for (i = 0; i < n; i += 2)
652 *dst++ = (buf[i] << 4) | buf[i + 1];
658 for (i = 0; i < n; i++)
659 *dst++ = (buf[i] << 4) | buf[n + i];
663 for (i = 7 * avctx->
channels; i < avctx->block_align; i++) {
679 for (i = 0; i < n; i += 2)
680 *dst++ = buf[i] | (buf[i + 1] << 4);
686 for (i = 0; i < n; i++)
687 *dst++ = buf[i] | (buf[n + i] << 4);
691 for (n *= avctx->
channels; n > 0; n--) {
702 avpkt->
size = pkt_size;
717 #define ADPCM_ENCODER(id_, name_, sample_fmts_, long_name_) \
718 AVCodec ff_ ## name_ ## _encoder = { \
720 .type = AVMEDIA_TYPE_AUDIO, \
722 .priv_data_size = sizeof(ADPCMEncodeContext), \
723 .init = adpcm_encode_init, \
724 .encode2 = adpcm_encode_frame, \
725 .close = adpcm_encode_close, \
726 .sample_fmts = sample_fmts_, \
727 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
const struct AVCodec * codec
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
static av_cold int adpcm_encode_init(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
static void put_sbits(PutBitContext *pb, int n, int32_t value)
static uint8_t adpcm_ms_compress_sample(ADPCMChannelStatus *c, int16_t sample)
AVFrame * coded_frame
the picture in the bitstream
static int hash(int head, const int add)
Hash function adding character.
static uint8_t adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c, int16_t sample)
static av_cold int adpcm_encode_close(AVCodecContext *avctx)
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
static uint8_t adpcm_yamaha_compress_sample(ADPCMChannelStatus *c, int16_t sample)
const uint8_t ff_adpcm_AdaptCoeff1[]
Divided by 4 to fit in 8-bit integers.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
bitstream reader API header.
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
int av_get_bits_per_sample(enum AVCodecID codec_id)
Return codec bits per sample.
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
ADPCM encoder/decoder common header.
static const uint16_t mask[17]
#define STORE_NODE(NAME, STEP_INDEX)
const int16_t ff_adpcm_step_table[89]
This is the step table.
void av_log(void *avcl, int level, const char *fmt,...)
static void put_bits(PutBitContext *s, int n, unsigned int value)
Write up to 31 bits into a bitstream.
AVFrame * avcodec_alloc_frame(void)
Allocate an AVFrame and set its fields to default values.
const int8_t ff_adpcm_index_table[16]
static uint8_t adpcm_ima_compress_sample(ADPCMChannelStatus *c, int16_t sample)
const int8_t ff_adpcm_AdaptCoeff2[]
Divided by 4 to fit in 8-bit integers.
static void adpcm_compress_trellis(AVCodecContext *avctx, const int16_t *samples, uint8_t *dst, ADPCMChannelStatus *c, int n, int stride)
static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
int ff_alloc_packet(AVPacket *avpkt, int size)
Check AVPacket size and/or allocate data.
int frame_size
Number of samples per channel in an audio frame.
const int16_t ff_adpcm_AdaptationTable[]
int sample_rate
samples per second
main external API structure.
#define FF_INPUT_BUFFER_PADDING_SIZE
#define ADPCM_ENCODER(id_, name_, sample_fmts_, long_name_)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const int8_t ff_adpcm_yamaha_difflookup[]
struct TrellisNode TrellisNode
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
const int16_t ff_adpcm_yamaha_indexscale[]
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
AVSampleFormat
Audio Sample Formats.
#define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
int trellis
trellis RD quantization
int channels
number of audio channels
struct TrellisPath TrellisPath
static enum AVSampleFormat sample_fmts[]
struct ADPCMEncodeContext ADPCMEncodeContext
uint8_t ** extended_data
pointers to the data planes/channels.
ADPCMChannelStatus status[6]
This structure stores compressed data.
int nb_samples
number of audio samples (per channel) described by this frame
static enum AVSampleFormat sample_fmts_p[]
if(!(ptr_align%ac->ptr_align)&&samples_align >=aligned_len)