h264.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
28 #include "libavutil/imgutils.h"
29 #include "internal.h"
30 #include "cabac.h"
31 #include "cabac_functions.h"
32 #include "dsputil.h"
33 #include "avcodec.h"
34 #include "mpegvideo.h"
35 #include "h264.h"
36 #include "h264data.h"
37 #include "h264_mvpred.h"
38 #include "golomb.h"
39 #include "mathops.h"
40 #include "rectangle.h"
41 #include "thread.h"
42 #include "vdpau_internal.h"
43 #include "libavutil/avassert.h"
44 
45 // #undef NDEBUG
46 #include <assert.h>
47 
48 const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
49 
50 static const uint8_t rem6[QP_MAX_NUM + 1] = {
51  0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2,
52  3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
53  0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
54 };
55 
56 static const uint8_t div6[QP_MAX_NUM + 1] = {
57  0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3,
58  3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6,
59  7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10,
60 };
61 
63 #if CONFIG_H264_DXVA2_HWACCEL
65 #endif
66 #if CONFIG_H264_VAAPI_HWACCEL
68 #endif
69 #if CONFIG_H264_VDA_HWACCEL
71 #endif
74 };
75 
76 static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
77  int (*mv)[2][4][2],
78  int mb_x, int mb_y, int mb_intra, int mb_skipped)
79 {
80  H264Context *h = opaque;
81 
82  h->mb_x = mb_x;
83  h->mb_y = mb_y;
84  h->mb_xy = mb_x + mb_y * h->mb_stride;
85  memset(h->non_zero_count_cache, 0, sizeof(h->non_zero_count_cache));
86  assert(ref >= 0);
87  /* FIXME: It is possible albeit uncommon that slice references
88  * differ between slices. We take the easy approach and ignore
89  * it for now. If this turns out to have any relevance in
90  * practice then correct remapping should be added. */
91  if (ref >= h->ref_count[0])
92  ref = 0;
93  fill_rectangle(&h->cur_pic.f.ref_index[0][4 * h->mb_xy],
94  2, 2, 2, ref, 1);
95  fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
96  fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8,
97  pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
98  assert(!FRAME_MBAFF);
100 }
101 
103 {
104  ff_draw_horiz_band(h->avctx, &h->dsp, &h->cur_pic,
105  h->ref_list[0][0].f.data[0] ? &h->ref_list[0][0] : NULL,
106  y, height, h->picture_structure, h->first_field, 1,
107  h->low_delay, h->mb_height * 16, h->mb_width * 16);
108 }
109 
110 static void free_frame_buffer(H264Context *h, Picture *pic)
111 {
112  ff_thread_release_buffer(h->avctx, &pic->f);
114 }
115 
116 static void free_picture(H264Context *h, Picture *pic)
117 {
118  int i;
119 
120  if (pic->f.data[0])
121  free_frame_buffer(h, pic);
122 
124  pic->f.qscale_table = NULL;
125  av_freep(&pic->mb_type_base);
126  pic->f.mb_type = NULL;
127  for (i = 0; i < 2; i++) {
128  av_freep(&pic->motion_val_base[i]);
129  av_freep(&pic->f.ref_index[i]);
130  pic->f.motion_val[i] = NULL;
131  }
132 }
133 
134 static void release_unused_pictures(H264Context *h, int remove_current)
135 {
136  int i;
137 
138  /* release non reference frames */
139  for (i = 0; i < h->picture_count; i++) {
140  if (h->DPB[i].f.data[0] && !h->DPB[i].f.reference &&
141  (!h->DPB[i].owner2 || h->DPB[i].owner2 == h) &&
142  (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
143  free_frame_buffer(h, &h->DPB[i]);
144  }
145  }
146 }
147 
148 static int alloc_scratch_buffers(H264Context *h, int linesize)
149 {
150  int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
151 
152  if (h->bipred_scratchpad)
153  return 0;
154 
155  h->bipred_scratchpad = av_malloc(16 * 6 * alloc_size);
156  // edge emu needs blocksize + filter length - 1
157  // (= 21x21 for h264)
158  h->edge_emu_buffer = av_mallocz(alloc_size * 2 * 21);
159  h->me.scratchpad = av_mallocz(alloc_size * 2 * 16 * 2);
160 
161  if (!h->bipred_scratchpad || !h->edge_emu_buffer || !h->me.scratchpad) {
164  av_freep(&h->me.scratchpad);
165  return AVERROR(ENOMEM);
166  }
167 
168  h->me.temp = h->me.scratchpad;
169 
170  return 0;
171 }
172 
173 static int alloc_picture(H264Context *h, Picture *pic)
174 {
175  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
176  const int mb_array_size = h->mb_stride * h->mb_height;
177  const int b4_stride = h->mb_width * 4 + 1;
178  const int b4_array_size = b4_stride * h->mb_height * 4;
179  int i, ret = 0;
180 
181  av_assert0(!pic->f.data[0]);
182 
183  if (h->avctx->hwaccel) {
184  const AVHWAccel *hwaccel = h->avctx->hwaccel;
186  if (hwaccel->priv_data_size) {
188  if (!pic->f.hwaccel_picture_private)
189  return AVERROR(ENOMEM);
190  }
191  }
192  ret = ff_thread_get_buffer(h->avctx, &pic->f);
193  if (ret < 0)
194  goto fail;
195 
196  h->linesize = pic->f.linesize[0];
197  h->uvlinesize = pic->f.linesize[1];
198 
199  if (pic->f.qscale_table == NULL) {
201  (big_mb_num + h->mb_stride) * sizeof(uint8_t),
202  fail)
204  (big_mb_num + h->mb_stride) * sizeof(uint32_t),
205  fail)
206  pic->f.mb_type = pic->mb_type_base + 2 * h->mb_stride + 1;
207  pic->f.qscale_table = pic->qscale_table_base + 2 * h->mb_stride + 1;
208 
209  for (i = 0; i < 2; i++) {
211  2 * (b4_array_size + 4) * sizeof(int16_t),
212  fail)
213  pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
214  FF_ALLOCZ_OR_GOTO(h->avctx, pic->f.ref_index[i],
215  4 * mb_array_size * sizeof(uint8_t), fail)
216  }
217  pic->f.motion_subsample_log2 = 2;
218 
219  pic->f.qstride = h->mb_stride;
220  }
221 
222  pic->owner2 = h;
223 
224  return 0;
225 fail:
226  free_frame_buffer(h, pic);
227  return (ret < 0) ? ret : AVERROR(ENOMEM);
228 }
229 
230 static inline int pic_is_unused(H264Context *h, Picture *pic)
231 {
232  if (pic->f.data[0] == NULL)
233  return 1;
234  if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF))
235  if (!pic->owner2 || pic->owner2 == h)
236  return 1;
237  return 0;
238 }
239 
241 {
242  int i;
243 
244  for (i = h->picture_range_start; i < h->picture_range_end; i++) {
245  if (pic_is_unused(h, &h->DPB[i]))
246  break;
247  }
248  if (i == h->picture_range_end)
249  return AVERROR_INVALIDDATA;
250 
251  if (h->DPB[i].needs_realloc) {
252  h->DPB[i].needs_realloc = 0;
253  free_picture(h, &h->DPB[i]);
255  }
256 
257  return i;
258 }
259 
265 {
266  static const int8_t top[12] = {
267  -1, 0, LEFT_DC_PRED, -1, -1, -1, -1, -1, 0
268  };
269  static const int8_t left[12] = {
270  0, -1, TOP_DC_PRED, 0, -1, -1, -1, 0, -1, DC_128_PRED
271  };
272  int i;
273 
274  if (!(h->top_samples_available & 0x8000)) {
275  for (i = 0; i < 4; i++) {
276  int status = top[h->intra4x4_pred_mode_cache[scan8[0] + i]];
277  if (status < 0) {
279  "top block unavailable for requested intra4x4 mode %d at %d %d\n",
280  status, h->mb_x, h->mb_y);
281  return -1;
282  } else if (status) {
283  h->intra4x4_pred_mode_cache[scan8[0] + i] = status;
284  }
285  }
286  }
287 
288  if ((h->left_samples_available & 0x8888) != 0x8888) {
289  static const int mask[4] = { 0x8000, 0x2000, 0x80, 0x20 };
290  for (i = 0; i < 4; i++)
291  if (!(h->left_samples_available & mask[i])) {
292  int status = left[h->intra4x4_pred_mode_cache[scan8[0] + 8 * i]];
293  if (status < 0) {
295  "left block unavailable for requested intra4x4 mode %d at %d %d\n",
296  status, h->mb_x, h->mb_y);
297  return -1;
298  } else if (status) {
299  h->intra4x4_pred_mode_cache[scan8[0] + 8 * i] = status;
300  }
301  }
302  }
303 
304  return 0;
305 } // FIXME cleanup like ff_h264_check_intra_pred_mode
306 
311 int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
312 {
313  static const int8_t top[4] = { LEFT_DC_PRED8x8, 1, -1, -1 };
314  static const int8_t left[5] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 };
315 
316  if (mode > 3U) {
318  "out of range intra chroma pred mode at %d %d\n",
319  h->mb_x, h->mb_y);
320  return -1;
321  }
322 
323  if (!(h->top_samples_available & 0x8000)) {
324  mode = top[mode];
325  if (mode < 0) {
327  "top block unavailable for requested intra mode at %d %d\n",
328  h->mb_x, h->mb_y);
329  return -1;
330  }
331  }
332 
333  if ((h->left_samples_available & 0x8080) != 0x8080) {
334  mode = left[mode];
335  if (is_chroma && (h->left_samples_available & 0x8080)) {
336  // mad cow disease mode, aka MBAFF + constrained_intra_pred
337  mode = ALZHEIMER_DC_L0T_PRED8x8 +
338  (!(h->left_samples_available & 0x8000)) +
339  2 * (mode == DC_128_PRED8x8);
340  }
341  if (mode < 0) {
343  "left block unavailable for requested intra mode at %d %d\n",
344  h->mb_x, h->mb_y);
345  return -1;
346  }
347  }
348 
349  return mode;
350 }
351 
353  int *dst_length, int *consumed, int length)
354 {
355  int i, si, di;
356  uint8_t *dst;
357  int bufidx;
358 
359  // src[0]&0x80; // forbidden bit
360  h->nal_ref_idc = src[0] >> 5;
361  h->nal_unit_type = src[0] & 0x1F;
362 
363  src++;
364  length--;
365 
366 #define STARTCODE_TEST \
367  if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \
368  if (src[i + 2] != 3) { \
369  /* startcode, so we must be past the end */ \
370  length = i; \
371  } \
372  break; \
373  }
374 #if HAVE_FAST_UNALIGNED
375 #define FIND_FIRST_ZERO \
376  if (i > 0 && !src[i]) \
377  i--; \
378  while (src[i]) \
379  i++
380 #if HAVE_FAST_64BIT
381  for (i = 0; i + 1 < length; i += 9) {
382  if (!((~AV_RN64A(src + i) &
383  (AV_RN64A(src + i) - 0x0100010001000101ULL)) &
384  0x8000800080008080ULL))
385  continue;
386  FIND_FIRST_ZERO;
388  i -= 7;
389  }
390 #else
391  for (i = 0; i + 1 < length; i += 5) {
392  if (!((~AV_RN32A(src + i) &
393  (AV_RN32A(src + i) - 0x01000101U)) &
394  0x80008080U))
395  continue;
396  FIND_FIRST_ZERO;
398  i -= 3;
399  }
400 #endif
401 #else
402  for (i = 0; i + 1 < length; i += 2) {
403  if (src[i])
404  continue;
405  if (i > 0 && src[i - 1] == 0)
406  i--;
408  }
409 #endif
410 
411  if (i >= length - 1) { // no escaped 0
412  *dst_length = length;
413  *consumed = length + 1; // +1 for the header
414  return src;
415  }
416 
417  // use second escape buffer for inter data
418  bufidx = h->nal_unit_type == NAL_DPC ? 1 : 0;
419  av_fast_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx],
421  dst = h->rbsp_buffer[bufidx];
422 
423  if (dst == NULL)
424  return NULL;
425 
426  memcpy(dst, src, i);
427  si = di = i;
428  while (si + 2 < length) {
429  // remove escapes (very rare 1:2^22)
430  if (src[si + 2] > 3) {
431  dst[di++] = src[si++];
432  dst[di++] = src[si++];
433  } else if (src[si] == 0 && src[si + 1] == 0) {
434  if (src[si + 2] == 3) { // escape
435  dst[di++] = 0;
436  dst[di++] = 0;
437  si += 3;
438  continue;
439  } else // next start code
440  goto nsc;
441  }
442 
443  dst[di++] = src[si++];
444  }
445  while (si < length)
446  dst[di++] = src[si++];
447 nsc:
448 
449  memset(dst + di, 0, FF_INPUT_BUFFER_PADDING_SIZE);
450 
451  *dst_length = di;
452  *consumed = si + 1; // +1 for the header
453  /* FIXME store exact number of bits in the getbitcontext
454  * (it is needed for decoding) */
455  return dst;
456 }
457 
462 static int decode_rbsp_trailing(H264Context *h, const uint8_t *src)
463 {
464  int v = *src;
465  int r;
466 
467  tprintf(h->avctx, "rbsp trailing %X\n", v);
468 
469  for (r = 1; r < 9; r++) {
470  if (v & 1)
471  return r;
472  v >>= 1;
473  }
474  return 0;
475 }
476 
477 static inline int get_lowest_part_list_y(H264Context *h, Picture *pic, int n,
478  int height, int y_offset, int list)
479 {
480  int raw_my = h->mv_cache[list][scan8[n]][1];
481  int filter_height_up = (raw_my & 3) ? 2 : 0;
482  int filter_height_down = (raw_my & 3) ? 3 : 0;
483  int full_my = (raw_my >> 2) + y_offset;
484  int top = full_my - filter_height_up;
485  int bottom = full_my + filter_height_down + height;
486 
487  return FFMAX(abs(top), bottom);
488 }
489 
490 static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n,
491  int height, int y_offset, int list0,
492  int list1, int *nrefs)
493 {
494  int my;
495 
496  y_offset += 16 * (h->mb_y >> MB_FIELD);
497 
498  if (list0) {
499  int ref_n = h->ref_cache[0][scan8[n]];
500  Picture *ref = &h->ref_list[0][ref_n];
501 
502  // Error resilience puts the current picture in the ref list.
503  // Don't try to wait on these as it will cause a deadlock.
504  // Fields can wait on each other, though.
505  if (ref->f.thread_opaque != h->cur_pic.f.thread_opaque ||
506  (ref->f.reference & 3) != h->picture_structure) {
507  my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0);
508  if (refs[0][ref_n] < 0)
509  nrefs[0] += 1;
510  refs[0][ref_n] = FFMAX(refs[0][ref_n], my);
511  }
512  }
513 
514  if (list1) {
515  int ref_n = h->ref_cache[1][scan8[n]];
516  Picture *ref = &h->ref_list[1][ref_n];
517 
518  if (ref->f.thread_opaque != h->cur_pic.f.thread_opaque ||
519  (ref->f.reference & 3) != h->picture_structure) {
520  my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1);
521  if (refs[1][ref_n] < 0)
522  nrefs[1] += 1;
523  refs[1][ref_n] = FFMAX(refs[1][ref_n], my);
524  }
525  }
526 }
527 
534 {
535  const int mb_xy = h->mb_xy;
536  const int mb_type = h->cur_pic.f.mb_type[mb_xy];
537  int refs[2][48];
538  int nrefs[2] = { 0 };
539  int ref, list;
540 
541  memset(refs, -1, sizeof(refs));
542 
543  if (IS_16X16(mb_type)) {
544  get_lowest_part_y(h, refs, 0, 16, 0,
545  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
546  } else if (IS_16X8(mb_type)) {
547  get_lowest_part_y(h, refs, 0, 8, 0,
548  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
549  get_lowest_part_y(h, refs, 8, 8, 8,
550  IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
551  } else if (IS_8X16(mb_type)) {
552  get_lowest_part_y(h, refs, 0, 16, 0,
553  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
554  get_lowest_part_y(h, refs, 4, 16, 0,
555  IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
556  } else {
557  int i;
558 
559  assert(IS_8X8(mb_type));
560 
561  for (i = 0; i < 4; i++) {
562  const int sub_mb_type = h->sub_mb_type[i];
563  const int n = 4 * i;
564  int y_offset = (i & 2) << 2;
565 
566  if (IS_SUB_8X8(sub_mb_type)) {
567  get_lowest_part_y(h, refs, n, 8, y_offset,
568  IS_DIR(sub_mb_type, 0, 0),
569  IS_DIR(sub_mb_type, 0, 1),
570  nrefs);
571  } else if (IS_SUB_8X4(sub_mb_type)) {
572  get_lowest_part_y(h, refs, n, 4, y_offset,
573  IS_DIR(sub_mb_type, 0, 0),
574  IS_DIR(sub_mb_type, 0, 1),
575  nrefs);
576  get_lowest_part_y(h, refs, n + 2, 4, y_offset + 4,
577  IS_DIR(sub_mb_type, 0, 0),
578  IS_DIR(sub_mb_type, 0, 1),
579  nrefs);
580  } else if (IS_SUB_4X8(sub_mb_type)) {
581  get_lowest_part_y(h, refs, n, 8, y_offset,
582  IS_DIR(sub_mb_type, 0, 0),
583  IS_DIR(sub_mb_type, 0, 1),
584  nrefs);
585  get_lowest_part_y(h, refs, n + 1, 8, y_offset,
586  IS_DIR(sub_mb_type, 0, 0),
587  IS_DIR(sub_mb_type, 0, 1),
588  nrefs);
589  } else {
590  int j;
591  assert(IS_SUB_4X4(sub_mb_type));
592  for (j = 0; j < 4; j++) {
593  int sub_y_offset = y_offset + 2 * (j & 2);
594  get_lowest_part_y(h, refs, n + j, 4, sub_y_offset,
595  IS_DIR(sub_mb_type, 0, 0),
596  IS_DIR(sub_mb_type, 0, 1),
597  nrefs);
598  }
599  }
600  }
601  }
602 
603  for (list = h->list_count - 1; list >= 0; list--)
604  for (ref = 0; ref < 48 && nrefs[list]; ref++) {
605  int row = refs[list][ref];
606  if (row >= 0) {
607  Picture *ref_pic = &h->ref_list[list][ref];
608  int ref_field = ref_pic->f.reference - 1;
609  int ref_field_picture = ref_pic->field_picture;
610  int pic_height = 16 * h->mb_height >> ref_field_picture;
611 
612  row <<= MB_MBAFF;
613  nrefs[list]--;
614 
615  if (!FIELD_PICTURE && ref_field_picture) { // frame referencing two fields
616  ff_thread_await_progress(&ref_pic->f,
617  FFMIN((row >> 1) - !(row & 1),
618  pic_height - 1),
619  1);
620  ff_thread_await_progress(&ref_pic->f,
621  FFMIN((row >> 1), pic_height - 1),
622  0);
623  } else if (FIELD_PICTURE && !ref_field_picture) { // field referencing one field of a frame
624  ff_thread_await_progress(&ref_pic->f,
625  FFMIN(row * 2 + ref_field,
626  pic_height - 1),
627  0);
628  } else if (FIELD_PICTURE) {
629  ff_thread_await_progress(&ref_pic->f,
630  FFMIN(row, pic_height - 1),
631  ref_field);
632  } else {
633  ff_thread_await_progress(&ref_pic->f,
634  FFMIN(row, pic_height - 1),
635  0);
636  }
637  }
638  }
639 }
640 
642  int n, int square, int height,
643  int delta, int list,
644  uint8_t *dest_y, uint8_t *dest_cb,
645  uint8_t *dest_cr,
646  int src_x_offset, int src_y_offset,
647  qpel_mc_func *qpix_op,
648  h264_chroma_mc_func chroma_op,
649  int pixel_shift, int chroma_idc)
650 {
651  const int mx = h->mv_cache[list][scan8[n]][0] + src_x_offset * 8;
652  int my = h->mv_cache[list][scan8[n]][1] + src_y_offset * 8;
653  const int luma_xy = (mx & 3) + ((my & 3) << 2);
654  int offset = ((mx >> 2) << pixel_shift) + (my >> 2) * h->mb_linesize;
655  uint8_t *src_y = pic->f.data[0] + offset;
656  uint8_t *src_cb, *src_cr;
657  int extra_width = h->emu_edge_width;
658  int extra_height = h->emu_edge_height;
659  int emu = 0;
660  const int full_mx = mx >> 2;
661  const int full_my = my >> 2;
662  const int pic_width = 16 * h->mb_width;
663  const int pic_height = 16 * h->mb_height >> MB_FIELD;
664  int ysh;
665 
666  if (mx & 7)
667  extra_width -= 3;
668  if (my & 7)
669  extra_height -= 3;
670 
671  if (full_mx < 0 - extra_width ||
672  full_my < 0 - extra_height ||
673  full_mx + 16 /*FIXME*/ > pic_width + extra_width ||
674  full_my + 16 /*FIXME*/ > pic_height + extra_height) {
676  src_y - (2 << pixel_shift) - 2 * h->mb_linesize,
677  h->mb_linesize,
678  16 + 5, 16 + 5 /*FIXME*/, full_mx - 2,
679  full_my - 2, pic_width, pic_height);
680  src_y = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
681  emu = 1;
682  }
683 
684  qpix_op[luma_xy](dest_y, src_y, h->mb_linesize); // FIXME try variable height perhaps?
685  if (!square)
686  qpix_op[luma_xy](dest_y + delta, src_y + delta, h->mb_linesize);
687 
688  if (CONFIG_GRAY && h->flags & CODEC_FLAG_GRAY)
689  return;
690 
691  if (chroma_idc == 3 /* yuv444 */) {
692  src_cb = pic->f.data[1] + offset;
693  if (emu) {
695  src_cb - (2 << pixel_shift) - 2 * h->mb_linesize,
696  h->mb_linesize,
697  16 + 5, 16 + 5 /*FIXME*/,
698  full_mx - 2, full_my - 2,
699  pic_width, pic_height);
700  src_cb = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
701  }
702  qpix_op[luma_xy](dest_cb, src_cb, h->mb_linesize); // FIXME try variable height perhaps?
703  if (!square)
704  qpix_op[luma_xy](dest_cb + delta, src_cb + delta, h->mb_linesize);
705 
706  src_cr = pic->f.data[2] + offset;
707  if (emu) {
709  src_cr - (2 << pixel_shift) - 2 * h->mb_linesize,
710  h->mb_linesize,
711  16 + 5, 16 + 5 /*FIXME*/,
712  full_mx - 2, full_my - 2,
713  pic_width, pic_height);
714  src_cr = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
715  }
716  qpix_op[luma_xy](dest_cr, src_cr, h->mb_linesize); // FIXME try variable height perhaps?
717  if (!square)
718  qpix_op[luma_xy](dest_cr + delta, src_cr + delta, h->mb_linesize);
719  return;
720  }
721 
722  ysh = 3 - (chroma_idc == 2 /* yuv422 */);
723  if (chroma_idc == 1 /* yuv420 */ && MB_FIELD) {
724  // chroma offset when predicting from a field of opposite parity
725  my += 2 * ((h->mb_y & 1) - (pic->f.reference - 1));
726  emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1);
727  }
728 
729  src_cb = pic->f.data[1] + ((mx >> 3) << pixel_shift) +
730  (my >> ysh) * h->mb_uvlinesize;
731  src_cr = pic->f.data[2] + ((mx >> 3) << pixel_shift) +
732  (my >> ysh) * h->mb_uvlinesize;
733 
734  if (emu) {
736  9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
737  pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
738  src_cb = h->edge_emu_buffer;
739  }
740  chroma_op(dest_cb, src_cb, h->mb_uvlinesize,
741  height >> (chroma_idc == 1 /* yuv420 */),
742  mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7);
743 
744  if (emu) {
746  9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
747  pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
748  src_cr = h->edge_emu_buffer;
749  }
750  chroma_op(dest_cr, src_cr, h->mb_uvlinesize, height >> (chroma_idc == 1 /* yuv420 */),
751  mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7);
752 }
753 
754 static av_always_inline void mc_part_std(H264Context *h, int n, int square,
755  int height, int delta,
756  uint8_t *dest_y, uint8_t *dest_cb,
757  uint8_t *dest_cr,
758  int x_offset, int y_offset,
759  qpel_mc_func *qpix_put,
760  h264_chroma_mc_func chroma_put,
761  qpel_mc_func *qpix_avg,
762  h264_chroma_mc_func chroma_avg,
763  int list0, int list1,
764  int pixel_shift, int chroma_idc)
765 {
766  qpel_mc_func *qpix_op = qpix_put;
767  h264_chroma_mc_func chroma_op = chroma_put;
768 
769  dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
770  if (chroma_idc == 3 /* yuv444 */) {
771  dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
772  dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
773  } else if (chroma_idc == 2 /* yuv422 */) {
774  dest_cb += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
775  dest_cr += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
776  } else { /* yuv420 */
777  dest_cb += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
778  dest_cr += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
779  }
780  x_offset += 8 * h->mb_x;
781  y_offset += 8 * (h->mb_y >> MB_FIELD);
782 
783  if (list0) {
784  Picture *ref = &h->ref_list[0][h->ref_cache[0][scan8[n]]];
785  mc_dir_part(h, ref, n, square, height, delta, 0,
786  dest_y, dest_cb, dest_cr, x_offset, y_offset,
787  qpix_op, chroma_op, pixel_shift, chroma_idc);
788 
789  qpix_op = qpix_avg;
790  chroma_op = chroma_avg;
791  }
792 
793  if (list1) {
794  Picture *ref = &h->ref_list[1][h->ref_cache[1][scan8[n]]];
795  mc_dir_part(h, ref, n, square, height, delta, 1,
796  dest_y, dest_cb, dest_cr, x_offset, y_offset,
797  qpix_op, chroma_op, pixel_shift, chroma_idc);
798  }
799 }
800 
802  int height, int delta,
803  uint8_t *dest_y, uint8_t *dest_cb,
804  uint8_t *dest_cr,
805  int x_offset, int y_offset,
806  qpel_mc_func *qpix_put,
807  h264_chroma_mc_func chroma_put,
808  h264_weight_func luma_weight_op,
809  h264_weight_func chroma_weight_op,
810  h264_biweight_func luma_weight_avg,
811  h264_biweight_func chroma_weight_avg,
812  int list0, int list1,
813  int pixel_shift, int chroma_idc)
814 {
815  int chroma_height;
816 
817  dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
818  if (chroma_idc == 3 /* yuv444 */) {
819  chroma_height = height;
820  chroma_weight_avg = luma_weight_avg;
821  chroma_weight_op = luma_weight_op;
822  dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
823  dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
824  } else if (chroma_idc == 2 /* yuv422 */) {
825  chroma_height = height;
826  dest_cb += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
827  dest_cr += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
828  } else { /* yuv420 */
829  chroma_height = height >> 1;
830  dest_cb += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
831  dest_cr += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
832  }
833  x_offset += 8 * h->mb_x;
834  y_offset += 8 * (h->mb_y >> MB_FIELD);
835 
836  if (list0 && list1) {
837  /* don't optimize for luma-only case, since B-frames usually
838  * use implicit weights => chroma too. */
839  uint8_t *tmp_cb = h->bipred_scratchpad;
840  uint8_t *tmp_cr = h->bipred_scratchpad + (16 << pixel_shift);
841  uint8_t *tmp_y = h->bipred_scratchpad + 16 * h->mb_uvlinesize;
842  int refn0 = h->ref_cache[0][scan8[n]];
843  int refn1 = h->ref_cache[1][scan8[n]];
844 
845  mc_dir_part(h, &h->ref_list[0][refn0], n, square, height, delta, 0,
846  dest_y, dest_cb, dest_cr,
847  x_offset, y_offset, qpix_put, chroma_put,
848  pixel_shift, chroma_idc);
849  mc_dir_part(h, &h->ref_list[1][refn1], n, square, height, delta, 1,
850  tmp_y, tmp_cb, tmp_cr,
851  x_offset, y_offset, qpix_put, chroma_put,
852  pixel_shift, chroma_idc);
853 
854  if (h->use_weight == 2) {
855  int weight0 = h->implicit_weight[refn0][refn1][h->mb_y & 1];
856  int weight1 = 64 - weight0;
857  luma_weight_avg(dest_y, tmp_y, h->mb_linesize,
858  height, 5, weight0, weight1, 0);
859  chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize,
860  chroma_height, 5, weight0, weight1, 0);
861  chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize,
862  chroma_height, 5, weight0, weight1, 0);
863  } else {
864  luma_weight_avg(dest_y, tmp_y, h->mb_linesize, height,
866  h->luma_weight[refn0][0][0],
867  h->luma_weight[refn1][1][0],
868  h->luma_weight[refn0][0][1] +
869  h->luma_weight[refn1][1][1]);
870  chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize, chroma_height,
872  h->chroma_weight[refn0][0][0][0],
873  h->chroma_weight[refn1][1][0][0],
874  h->chroma_weight[refn0][0][0][1] +
875  h->chroma_weight[refn1][1][0][1]);
876  chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize, chroma_height,
878  h->chroma_weight[refn0][0][1][0],
879  h->chroma_weight[refn1][1][1][0],
880  h->chroma_weight[refn0][0][1][1] +
881  h->chroma_weight[refn1][1][1][1]);
882  }
883  } else {
884  int list = list1 ? 1 : 0;
885  int refn = h->ref_cache[list][scan8[n]];
886  Picture *ref = &h->ref_list[list][refn];
887  mc_dir_part(h, ref, n, square, height, delta, list,
888  dest_y, dest_cb, dest_cr, x_offset, y_offset,
889  qpix_put, chroma_put, pixel_shift, chroma_idc);
890 
891  luma_weight_op(dest_y, h->mb_linesize, height,
893  h->luma_weight[refn][list][0],
894  h->luma_weight[refn][list][1]);
895  if (h->use_weight_chroma) {
896  chroma_weight_op(dest_cb, h->mb_uvlinesize, chroma_height,
898  h->chroma_weight[refn][list][0][0],
899  h->chroma_weight[refn][list][0][1]);
900  chroma_weight_op(dest_cr, h->mb_uvlinesize, chroma_height,
902  h->chroma_weight[refn][list][1][0],
903  h->chroma_weight[refn][list][1][1]);
904  }
905  }
906 }
907 
909  int pixel_shift, int chroma_idc)
910 {
911  /* fetch pixels for estimated mv 4 macroblocks ahead
912  * optimized for 64byte cache lines */
913  const int refn = h->ref_cache[list][scan8[0]];
914  if (refn >= 0) {
915  const int mx = (h->mv_cache[list][scan8[0]][0] >> 2) + 16 * h->mb_x + 8;
916  const int my = (h->mv_cache[list][scan8[0]][1] >> 2) + 16 * h->mb_y;
917  uint8_t **src = h->ref_list[list][refn].f.data;
918  int off = (mx << pixel_shift) +
919  (my + (h->mb_x & 3) * 4) * h->mb_linesize +
920  (64 << pixel_shift);
921  h->vdsp.prefetch(src[0] + off, h->linesize, 4);
922  if (chroma_idc == 3 /* yuv444 */) {
923  h->vdsp.prefetch(src[1] + off, h->linesize, 4);
924  h->vdsp.prefetch(src[2] + off, h->linesize, 4);
925  } else {
926  off = ((mx >> 1) << pixel_shift) +
927  ((my >> 1) + (h->mb_x & 7)) * h->uvlinesize +
928  (64 << pixel_shift);
929  h->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
930  }
931  }
932 }
933 
934 static void free_tables(H264Context *h, int free_rbsp)
935 {
936  int i;
937  H264Context *hx;
938 
941  av_freep(&h->cbp_table);
942  av_freep(&h->mvd_table[0]);
943  av_freep(&h->mvd_table[1]);
944  av_freep(&h->direct_table);
947  h->slice_table = NULL;
948  av_freep(&h->list_counts);
949 
950  av_freep(&h->mb2b_xy);
951  av_freep(&h->mb2br_xy);
952 
953  if (free_rbsp) {
954  for (i = 0; i < h->picture_count && !h->avctx->internal->is_copy; i++)
955  free_picture(h, &h->DPB[i]);
956  av_freep(&h->DPB);
957  h->picture_count = 0;
958  } else if (h->DPB) {
959  for (i = 0; i < h->picture_count; i++)
960  h->DPB[i].needs_realloc = 1;
961  }
962 
963  h->cur_pic_ptr = NULL;
964 
965  for (i = 0; i < MAX_THREADS; i++) {
966  hx = h->thread_context[i];
967  if (!hx)
968  continue;
969  av_freep(&hx->top_borders[1]);
970  av_freep(&hx->top_borders[0]);
973  av_freep(&hx->dc_val_base);
974  av_freep(&hx->me.scratchpad);
975  av_freep(&hx->er.mb_index2xy);
977  av_freep(&hx->er.er_temp_buffer);
978  av_freep(&hx->er.mbintra_table);
979  av_freep(&hx->er.mbskip_table);
980 
981  if (free_rbsp) {
982  av_freep(&hx->rbsp_buffer[1]);
983  av_freep(&hx->rbsp_buffer[0]);
984  hx->rbsp_buffer_size[0] = 0;
985  hx->rbsp_buffer_size[1] = 0;
986  }
987  if (i)
988  av_freep(&h->thread_context[i]);
989  }
990 }
991 
993 {
994  int i, j, q, x;
995  const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8);
996 
997  for (i = 0; i < 6; i++) {
998  h->dequant8_coeff[i] = h->dequant8_buffer[i];
999  for (j = 0; j < i; j++)
1000  if (!memcmp(h->pps.scaling_matrix8[j], h->pps.scaling_matrix8[i],
1001  64 * sizeof(uint8_t))) {
1002  h->dequant8_coeff[i] = h->dequant8_buffer[j];
1003  break;
1004  }
1005  if (j < i)
1006  continue;
1007 
1008  for (q = 0; q < max_qp + 1; q++) {
1009  int shift = div6[q];
1010  int idx = rem6[q];
1011  for (x = 0; x < 64; x++)
1012  h->dequant8_coeff[i][q][(x >> 3) | ((x & 7) << 3)] =
1013  ((uint32_t)dequant8_coeff_init[idx][dequant8_coeff_init_scan[((x >> 1) & 12) | (x & 3)]] *
1014  h->pps.scaling_matrix8[i][x]) << shift;
1015  }
1016  }
1017 }
1018 
1020 {
1021  int i, j, q, x;
1022  const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8);
1023  for (i = 0; i < 6; i++) {
1024  h->dequant4_coeff[i] = h->dequant4_buffer[i];
1025  for (j = 0; j < i; j++)
1026  if (!memcmp(h->pps.scaling_matrix4[j], h->pps.scaling_matrix4[i],
1027  16 * sizeof(uint8_t))) {
1028  h->dequant4_coeff[i] = h->dequant4_buffer[j];
1029  break;
1030  }
1031  if (j < i)
1032  continue;
1033 
1034  for (q = 0; q < max_qp + 1; q++) {
1035  int shift = div6[q] + 2;
1036  int idx = rem6[q];
1037  for (x = 0; x < 16; x++)
1038  h->dequant4_coeff[i][q][(x >> 2) | ((x << 2) & 0xF)] =
1039  ((uint32_t)dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] *
1040  h->pps.scaling_matrix4[i][x]) << shift;
1041  }
1042  }
1043 }
1044 
1046 {
1047  int i, x;
1049  if (h->pps.transform_8x8_mode)
1051  if (h->sps.transform_bypass) {
1052  for (i = 0; i < 6; i++)
1053  for (x = 0; x < 16; x++)
1054  h->dequant4_coeff[i][0][x] = 1 << 6;
1056  for (i = 0; i < 6; i++)
1057  for (x = 0; x < 64; x++)
1058  h->dequant8_coeff[i][0][x] = 1 << 6;
1059  }
1060 }
1061 
1063 {
1064  const int big_mb_num = h->mb_stride * (h->mb_height + 1);
1065  const int row_mb_num = h->mb_stride * 2 * h->avctx->thread_count;
1066  int x, y, i;
1067 
1069  row_mb_num * 8 * sizeof(uint8_t), fail)
1071  big_mb_num * 48 * sizeof(uint8_t), fail)
1073  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail)
1075  big_mb_num * sizeof(uint16_t), fail)
1077  big_mb_num * sizeof(uint8_t), fail)
1078  FF_ALLOCZ_OR_GOTO(h->avctx, h->mvd_table[0],
1079  16 * row_mb_num * sizeof(uint8_t), fail);
1080  FF_ALLOCZ_OR_GOTO(h->avctx, h->mvd_table[1],
1081  16 * row_mb_num * sizeof(uint8_t), fail);
1083  4 * big_mb_num * sizeof(uint8_t), fail);
1085  big_mb_num * sizeof(uint8_t), fail)
1086 
1087  memset(h->slice_table_base, -1,
1088  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base));
1089  h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
1090 
1092  big_mb_num * sizeof(uint32_t), fail);
1094  big_mb_num * sizeof(uint32_t), fail);
1095  for (y = 0; y < h->mb_height; y++)
1096  for (x = 0; x < h->mb_width; x++) {
1097  const int mb_xy = x + y * h->mb_stride;
1098  const int b_xy = 4 * x + 4 * y * h->b_stride;
1099 
1100  h->mb2b_xy[mb_xy] = b_xy;
1101  h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
1102  }
1103 
1104  if (!h->dequant4_coeff[0])
1106 
1107  if (!h->DPB) {
1109  h->DPB = av_mallocz_array(h->picture_count, sizeof(*h->DPB));
1110  if (!h->DPB)
1111  return AVERROR(ENOMEM);
1112  for (i = 0; i < h->picture_count; i++)
1115  }
1116 
1117  return 0;
1118 
1119 fail:
1120  free_tables(h, 1);
1121  return -1;
1122 }
1123 
1127 static void clone_tables(H264Context *dst, H264Context *src, int i)
1128 {
1129  dst->intra4x4_pred_mode = src->intra4x4_pred_mode + i * 8 * 2 * src->mb_stride;
1130  dst->non_zero_count = src->non_zero_count;
1131  dst->slice_table = src->slice_table;
1132  dst->cbp_table = src->cbp_table;
1133  dst->mb2b_xy = src->mb2b_xy;
1134  dst->mb2br_xy = src->mb2br_xy;
1136  dst->mvd_table[0] = src->mvd_table[0] + i * 8 * 2 * src->mb_stride;
1137  dst->mvd_table[1] = src->mvd_table[1] + i * 8 * 2 * src->mb_stride;
1138  dst->direct_table = src->direct_table;
1139  dst->list_counts = src->list_counts;
1140  dst->DPB = src->DPB;
1141  dst->cur_pic_ptr = src->cur_pic_ptr;
1142  dst->cur_pic = src->cur_pic;
1143  dst->bipred_scratchpad = NULL;
1144  dst->edge_emu_buffer = NULL;
1145  dst->me.scratchpad = NULL;
1147  src->sps.chroma_format_idc);
1148 }
1149 
1155 {
1156  ERContext *er = &h->er;
1157  int mb_array_size = h->mb_height * h->mb_stride;
1158  int y_size = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
1159  int c_size = h->mb_stride * (h->mb_height + 1);
1160  int yc_size = y_size + 2 * c_size;
1161  int x, y, i;
1162 
1164  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail)
1166  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail)
1167 
1168  h->ref_cache[0][scan8[5] + 1] =
1169  h->ref_cache[0][scan8[7] + 1] =
1170  h->ref_cache[0][scan8[13] + 1] =
1171  h->ref_cache[1][scan8[5] + 1] =
1172  h->ref_cache[1][scan8[7] + 1] =
1173  h->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
1174 
1175  /* init ER */
1176  er->avctx = h->avctx;
1177  er->dsp = &h->dsp;
1179  er->opaque = h;
1180  er->quarter_sample = 1;
1181 
1182  er->mb_num = h->mb_num;
1183  er->mb_width = h->mb_width;
1184  er->mb_height = h->mb_height;
1185  er->mb_stride = h->mb_stride;
1186  er->b8_stride = h->mb_width * 2 + 1;
1187 
1188  FF_ALLOCZ_OR_GOTO(h->avctx, er->mb_index2xy, (h->mb_num + 1) * sizeof(int),
1189  fail); // error ressilience code looks cleaner with this
1190  for (y = 0; y < h->mb_height; y++)
1191  for (x = 0; x < h->mb_width; x++)
1192  er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
1193 
1194  er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
1195  h->mb_stride + h->mb_width;
1196 
1198  mb_array_size * sizeof(uint8_t), fail);
1199 
1200  FF_ALLOC_OR_GOTO(h->avctx, er->mbintra_table, mb_array_size, fail);
1201  memset(er->mbintra_table, 1, mb_array_size);
1202 
1203  FF_ALLOCZ_OR_GOTO(h->avctx, er->mbskip_table, mb_array_size + 2, fail);
1204 
1206  fail);
1207 
1208  FF_ALLOCZ_OR_GOTO(h->avctx, h->dc_val_base, yc_size * sizeof(int16_t), fail);
1209  er->dc_val[0] = h->dc_val_base + h->mb_width * 2 + 2;
1210  er->dc_val[1] = h->dc_val_base + y_size + h->mb_stride + 1;
1211  er->dc_val[2] = er->dc_val[1] + c_size;
1212  for (i = 0; i < yc_size; i++)
1213  h->dc_val_base[i] = 1024;
1214 
1215  return 0;
1216 
1217 fail:
1218  return -1; // free_tables will clean up for us
1219 }
1220 
1221 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
1222  int parse_extradata);
1223 
1225 {
1226 
1227  h->width = h->avctx->width;
1228  h->height = h->avctx->height;
1229 
1230  h->bit_depth_luma = 8;
1231  h->chroma_format_idc = 1;
1232 
1233  ff_h264dsp_init(&h->h264dsp, 8, 1);
1234  ff_h264_pred_init(&h->hpc, h->avctx->codec_id, 8, 1);
1235 
1236  h->dequant_coeff_pps = -1;
1237 
1238  /* needed so that IDCT permutation is known early */
1239  ff_dsputil_init(&h->dsp, h->avctx);
1240  ff_videodsp_init(&h->vdsp, 8);
1241 
1242  memset(h->pps.scaling_matrix4, 16, 6 * 16 * sizeof(uint8_t));
1243  memset(h->pps.scaling_matrix8, 16, 2 * 64 * sizeof(uint8_t));
1244 }
1245 
1247 {
1248  AVCodecContext *avctx = h->avctx;
1249 
1250  if (avctx->extradata[0] == 1) {
1251  int i, cnt, nalsize;
1252  unsigned char *p = avctx->extradata;
1253 
1254  h->is_avc = 1;
1255 
1256  if (avctx->extradata_size < 7) {
1257  av_log(avctx, AV_LOG_ERROR, "avcC too short\n");
1258  return -1;
1259  }
1260  /* sps and pps in the avcC always have length coded with 2 bytes,
1261  * so put a fake nal_length_size = 2 while parsing them */
1262  h->nal_length_size = 2;
1263  // Decode sps from avcC
1264  cnt = *(p + 5) & 0x1f; // Number of sps
1265  p += 6;
1266  for (i = 0; i < cnt; i++) {
1267  nalsize = AV_RB16(p) + 2;
1268  if (p - avctx->extradata + nalsize > avctx->extradata_size)
1269  return -1;
1270  if (decode_nal_units(h, p, nalsize, 1) < 0) {
1271  av_log(avctx, AV_LOG_ERROR,
1272  "Decoding sps %d from avcC failed\n", i);
1273  return -1;
1274  }
1275  p += nalsize;
1276  }
1277  // Decode pps from avcC
1278  cnt = *(p++); // Number of pps
1279  for (i = 0; i < cnt; i++) {
1280  nalsize = AV_RB16(p) + 2;
1281  if (p - avctx->extradata + nalsize > avctx->extradata_size)
1282  return -1;
1283  if (decode_nal_units(h, p, nalsize, 1) < 0) {
1284  av_log(avctx, AV_LOG_ERROR,
1285  "Decoding pps %d from avcC failed\n", i);
1286  return -1;
1287  }
1288  p += nalsize;
1289  }
1290  // Now store right nal length size, that will be used to parse all other nals
1291  h->nal_length_size = (avctx->extradata[4] & 0x03) + 1;
1292  } else {
1293  h->is_avc = 0;
1294  if (decode_nal_units(h, avctx->extradata, avctx->extradata_size, 1) < 0)
1295  return -1;
1296  }
1297  return 0;
1298 }
1299 
1301 {
1302  H264Context *h = avctx->priv_data;
1303  int i;
1304 
1305  h->avctx = avctx;
1306  common_init(h);
1307 
1309  h->picture_range_start = 0;
1311  h->slice_context_count = 1;
1312  h->workaround_bugs = avctx->workaround_bugs;
1313  h->flags = avctx->flags;
1314 
1315  /* set defaults */
1316  // s->decode_mb = ff_h263_decode_mb;
1317  if (!avctx->has_b_frames)
1318  h->low_delay = 1;
1319 
1321 
1323 
1324  h->pixel_shift = 0;
1325  h->sps.bit_depth_luma = avctx->bits_per_raw_sample = 8;
1326 
1327  h->thread_context[0] = h;
1328  h->outputed_poc = h->next_outputed_poc = INT_MIN;
1329  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
1330  h->last_pocs[i] = INT_MIN;
1331  h->prev_poc_msb = 1 << 16;
1332  h->x264_build = -1;
1333  ff_h264_reset_sei(h);
1334  if (avctx->codec_id == AV_CODEC_ID_H264) {
1335  if (avctx->ticks_per_frame == 1)
1336  h->avctx->time_base.den *= 2;
1337  avctx->ticks_per_frame = 2;
1338  }
1339 
1340  if (avctx->extradata_size > 0 && avctx->extradata &&
1342  return -1;
1343 
1347  h->low_delay = 0;
1348  }
1349 
1350  return 0;
1351 }
1352 
1353 #define IN_RANGE(a, b, size) (((a) >= (b)) && ((a) < ((b) + (size))))
1354 #undef REBASE_PICTURE
1355 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
1356  ((pic && pic >= old_ctx->DPB && \
1357  pic < old_ctx->DPB + old_ctx->picture_count) ? \
1358  &new_ctx->DPB[pic - old_ctx->DPB] : NULL)
1359 
1360 static void copy_picture_range(Picture **to, Picture **from, int count,
1361  H264Context *new_base,
1362  H264Context *old_base)
1363 {
1364  int i;
1365 
1366  for (i = 0; i < count; i++) {
1367  assert((IN_RANGE(from[i], old_base, sizeof(*old_base)) ||
1368  IN_RANGE(from[i], old_base->DPB,
1369  sizeof(Picture) * old_base->picture_count) ||
1370  !from[i]));
1371  to[i] = REBASE_PICTURE(from[i], new_base, old_base);
1372  }
1373 }
1374 
1375 static void copy_parameter_set(void **to, void **from, int count, int size)
1376 {
1377  int i;
1378 
1379  for (i = 0; i < count; i++) {
1380  if (to[i] && !from[i])
1381  av_freep(&to[i]);
1382  else if (from[i] && !to[i])
1383  to[i] = av_malloc(size);
1384 
1385  if (from[i])
1386  memcpy(to[i], from[i], size);
1387  }
1388 }
1389 
1391 {
1392  H264Context *h = avctx->priv_data;
1393 
1394  if (!avctx->internal->is_copy)
1395  return 0;
1396  memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
1397  memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
1398 
1399  h->context_initialized = 0;
1400 
1401  return 0;
1402 }
1403 
1404 #define copy_fields(to, from, start_field, end_field) \
1405  memcpy(&to->start_field, &from->start_field, \
1406  (char *)&to->end_field - (char *)&to->start_field)
1407 
1408 static int h264_slice_header_init(H264Context *, int);
1409 
1411 
1413  const AVCodecContext *src)
1414 {
1415  H264Context *h = dst->priv_data, *h1 = src->priv_data;
1416  int inited = h->context_initialized, err = 0;
1417  int context_reinitialized = 0;
1418  int i;
1419 
1420  if (dst == src || !h1->context_initialized)
1421  return 0;
1422 
1423  if (inited &&
1424  (h->width != h1->width ||
1425  h->height != h1->height ||
1426  h->mb_width != h1->mb_width ||
1427  h->mb_height != h1->mb_height ||
1428  h->sps.bit_depth_luma != h1->sps.bit_depth_luma ||
1429  h->sps.chroma_format_idc != h1->sps.chroma_format_idc ||
1430  h->sps.colorspace != h1->sps.colorspace)) {
1431 
1433 
1434  h->width = h1->width;
1435  h->height = h1->height;
1436  h->mb_height = h1->mb_height;
1437  h->mb_width = h1->mb_width;
1438  h->mb_num = h1->mb_num;
1439  h->mb_stride = h1->mb_stride;
1440  h->b_stride = h1->b_stride;
1441 
1442  if ((err = h264_slice_header_init(h, 1)) < 0) {
1443  av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
1444  return err;
1445  }
1446  context_reinitialized = 1;
1447 
1448  /* update linesize on resize. The decoder doesn't
1449  * necessarily call ff_h264_frame_start in the new thread */
1450  h->linesize = h1->linesize;
1451  h->uvlinesize = h1->uvlinesize;
1452 
1453  /* copy block_offset since frame_start may not be called */
1454  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
1455  }
1456 
1457  if (!inited) {
1458  for (i = 0; i < MAX_SPS_COUNT; i++)
1459  av_freep(h->sps_buffers + i);
1460 
1461  for (i = 0; i < MAX_PPS_COUNT; i++)
1462  av_freep(h->pps_buffers + i);
1463 
1464  memcpy(h, h1, sizeof(*h1));
1465  memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
1466  memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
1467  memset(&h->er, 0, sizeof(h->er));
1468  memset(&h->me, 0, sizeof(h->me));
1469  h->context_initialized = 0;
1470 
1473 
1474  h->avctx = dst;
1475  h->DPB = NULL;
1476  h->cur_pic.f.extended_data = h->cur_pic.f.data;
1477 
1478  if (ff_h264_alloc_tables(h) < 0) {
1479  av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n");
1480  return AVERROR(ENOMEM);
1481  }
1482  context_init(h);
1483 
1484  for (i = 0; i < 2; i++) {
1485  h->rbsp_buffer[i] = NULL;
1486  h->rbsp_buffer_size[i] = 0;
1487  }
1488  h->bipred_scratchpad = NULL;
1489  h->edge_emu_buffer = NULL;
1490 
1491  h->thread_context[0] = h;
1492 
1493  h->dsp.clear_blocks(h->mb);
1494  h->dsp.clear_blocks(h->mb + (24 * 16 << h->pixel_shift));
1495  h->context_initialized = 1;
1496  }
1497 
1498  h->avctx->coded_height = h1->avctx->coded_height;
1499  h->avctx->coded_width = h1->avctx->coded_width;
1500  h->avctx->width = h1->avctx->width;
1501  h->avctx->height = h1->avctx->height;
1502  h->coded_picture_number = h1->coded_picture_number;
1503  h->first_field = h1->first_field;
1504  h->picture_structure = h1->picture_structure;
1505  h->qscale = h1->qscale;
1506  h->droppable = h1->droppable;
1507  h->data_partitioning = h1->data_partitioning;
1508  h->low_delay = h1->low_delay;
1509 
1510  memcpy(h->DPB, h1->DPB, h1->picture_count * sizeof(*h1->DPB));
1511 
1512  // reset s->picture[].f.extended_data to s->picture[].f.data
1513  for (i = 0; i < h->picture_count; i++)
1514  h->DPB[i].f.extended_data = h->DPB[i].f.data;
1515 
1516  h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
1517  h->cur_pic = h1->cur_pic;
1518  h->cur_pic.f.extended_data = h->cur_pic.f.data;
1519 
1520  h->workaround_bugs = h1->workaround_bugs;
1521  h->low_delay = h1->low_delay;
1522  h->droppable = h1->droppable;
1523 
1524  /* frame_start may not be called for the next thread (if it's decoding
1525  * a bottom field) so this has to be allocated here */
1526  err = alloc_scratch_buffers(h, h1->linesize);
1527  if (err < 0)
1528  return err;
1529 
1530  // extradata/NAL handling
1531  h->is_avc = h1->is_avc;
1532 
1533  // SPS/PPS
1534  copy_parameter_set((void **)h->sps_buffers, (void **)h1->sps_buffers,
1535  MAX_SPS_COUNT, sizeof(SPS));
1536  h->sps = h1->sps;
1537  copy_parameter_set((void **)h->pps_buffers, (void **)h1->pps_buffers,
1538  MAX_PPS_COUNT, sizeof(PPS));
1539  h->pps = h1->pps;
1540 
1541  // Dequantization matrices
1542  // FIXME these are big - can they be only copied when PPS changes?
1543  copy_fields(h, h1, dequant4_buffer, dequant4_coeff);
1544 
1545  for (i = 0; i < 6; i++)
1546  h->dequant4_coeff[i] = h->dequant4_buffer[0] +
1547  (h1->dequant4_coeff[i] - h1->dequant4_buffer[0]);
1548 
1549  for (i = 0; i < 6; i++)
1550  h->dequant8_coeff[i] = h->dequant8_buffer[0] +
1551  (h1->dequant8_coeff[i] - h1->dequant8_buffer[0]);
1552 
1553  h->dequant_coeff_pps = h1->dequant_coeff_pps;
1554 
1555  // POC timing
1556  copy_fields(h, h1, poc_lsb, redundant_pic_count);
1557 
1558  // reference lists
1559  copy_fields(h, h1, ref_count, list_count);
1560  copy_fields(h, h1, ref_list, intra_gb);
1561  copy_fields(h, h1, short_ref, cabac_init_idc);
1562 
1563  copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
1564  copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
1565  copy_picture_range(h->delayed_pic, h1->delayed_pic,
1566  MAX_DELAYED_PIC_COUNT + 2, h, h1);
1567 
1568  h->last_slice_type = h1->last_slice_type;
1569 
1570  if (context_reinitialized)
1572 
1573  if (!h->cur_pic_ptr)
1574  return 0;
1575 
1576  if (!h->droppable) {
1578  h->prev_poc_msb = h->poc_msb;
1579  h->prev_poc_lsb = h->poc_lsb;
1580  }
1582  h->prev_frame_num = h->frame_num;
1584 
1585  return err;
1586 }
1587 
1589 {
1590  Picture *pic;
1591  int i, ret;
1592  const int pixel_shift = h->pixel_shift;
1593 
1594  h->next_output_pic = NULL;
1595 
1597  h->cur_pic_ptr = NULL;
1598 
1599  i = find_unused_picture(h);
1600  if (i < 0) {
1601  av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1602  return i;
1603  }
1604  pic = &h->DPB[i];
1605 
1606  pic->f.reference = h->droppable ? 0 : h->picture_structure;
1609  /*
1610  * Zero key_frame here; IDR markings per slice in frame or fields are ORed
1611  * in later.
1612  * See decode_nal_units().
1613  */
1614  pic->f.key_frame = 0;
1615  pic->mmco_reset = 0;
1616 
1617  if ((ret = alloc_picture(h, pic)) < 0)
1618  return ret;
1619 
1620  h->cur_pic_ptr = pic;
1621  h->cur_pic = *h->cur_pic_ptr;
1622  h->cur_pic.f.extended_data = h->cur_pic.f.data;
1623 
1624  ff_er_frame_start(&h->er);
1625 
1626  assert(h->linesize && h->uvlinesize);
1627 
1628  for (i = 0; i < 16; i++) {
1629  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1630  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1631  }
1632  for (i = 0; i < 16; i++) {
1633  h->block_offset[16 + i] =
1634  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1635  h->block_offset[48 + 16 + i] =
1636  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1637  }
1638 
1639  /* can't be in alloc_tables because linesize isn't known there.
1640  * FIXME: redo bipred weight to not require extra buffer? */
1641  for (i = 0; i < h->slice_context_count; i++)
1642  if (h->thread_context[i]) {
1644  if (ret < 0)
1645  return ret;
1646  }
1647 
1648  /* Some macroblocks can be accessed before they're available in case
1649  * of lost slices, MBAFF or threading. */
1650  memset(h->slice_table, -1,
1651  (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
1652 
1653  // s->decode = (s->flags & CODEC_FLAG_PSNR) || !s->encoding ||
1654  // s->current_picture.f.reference /* || h->contains_intra */ || 1;
1655 
1656  /* We mark the current picture as non-reference after allocating it, so
1657  * that if we break out due to an error it can be released automatically
1658  * in the next ff_MPV_frame_start().
1659  * SVQ3 as well as most other codecs have only last/next/current and thus
1660  * get released even with set reference, besides SVQ3 and others do not
1661  * mark frames as reference later "naturally". */
1662  if (h->avctx->codec_id != AV_CODEC_ID_SVQ3)
1663  h->cur_pic_ptr->f.reference = 0;
1664 
1665  h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
1666 
1667  assert(h->cur_pic_ptr->long_ref == 0);
1668 
1669  return 0;
1670 }
1671 
1680 static void decode_postinit(H264Context *h, int setup_finished)
1681 {
1682  Picture *out = h->cur_pic_ptr;
1683  Picture *cur = h->cur_pic_ptr;
1684  int i, pics, out_of_order, out_idx;
1685  int invalid = 0, cnt = 0;
1686 
1688  h->cur_pic_ptr->f.pict_type = h->pict_type;
1689 
1690  if (h->next_output_pic)
1691  return;
1692 
1693  if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
1694  /* FIXME: if we have two PAFF fields in one packet, we can't start
1695  * the next thread here. If we have one field per packet, we can.
1696  * The check in decode_nal_units() is not good enough to find this
1697  * yet, so we assume the worst for now. */
1698  // if (setup_finished)
1699  // ff_thread_finish_setup(h->avctx);
1700  return;
1701  }
1702 
1703  cur->f.interlaced_frame = 0;
1704  cur->f.repeat_pict = 0;
1705 
1706  /* Signal interlacing information externally. */
1707  /* Prioritize picture timing SEI information over used
1708  * decoding process if it exists. */
1709 
1710  if (h->sps.pic_struct_present_flag) {
1711  switch (h->sei_pic_struct) {
1712  case SEI_PIC_STRUCT_FRAME:
1713  break;
1716  cur->f.interlaced_frame = 1;
1717  break;
1721  cur->f.interlaced_frame = 1;
1722  else
1723  // try to flag soft telecine progressive
1725  break;
1728  /* Signal the possibility of telecined film externally
1729  * (pic_struct 5,6). From these hints, let the applications
1730  * decide if they apply deinterlacing. */
1731  cur->f.repeat_pict = 1;
1732  break;
1734  // Force progressive here, doubling interlaced frame is a bad idea.
1735  cur->f.repeat_pict = 2;
1736  break;
1738  cur->f.repeat_pict = 4;
1739  break;
1740  }
1741 
1742  if ((h->sei_ct_type & 3) &&
1744  cur->f.interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0;
1745  } else {
1746  /* Derive interlacing flag from used decoding process. */
1748  }
1750 
1751  if (cur->field_poc[0] != cur->field_poc[1]) {
1752  /* Derive top_field_first from field pocs. */
1753  cur->f.top_field_first = cur->field_poc[0] < cur->field_poc[1];
1754  } else {
1755  if (cur->f.interlaced_frame || h->sps.pic_struct_present_flag) {
1756  /* Use picture timing SEI information. Even if it is a
1757  * information of a past frame, better than nothing. */
1760  cur->f.top_field_first = 1;
1761  else
1762  cur->f.top_field_first = 0;
1763  } else {
1764  /* Most likely progressive */
1765  cur->f.top_field_first = 0;
1766  }
1767  }
1768 
1769  // FIXME do something with unavailable reference frames
1770 
1771  /* Sort B-frames into display order */
1772 
1776  h->low_delay = 0;
1777  }
1778 
1782  h->low_delay = 0;
1783  }
1784 
1785  pics = 0;
1786  while (h->delayed_pic[pics])
1787  pics++;
1788 
1789  assert(pics <= MAX_DELAYED_PIC_COUNT);
1790 
1791  h->delayed_pic[pics++] = cur;
1792  if (cur->f.reference == 0)
1793  cur->f.reference = DELAYED_PIC_REF;
1794 
1795  /* Frame reordering. This code takes pictures from coding order and sorts
1796  * them by their incremental POC value into display order. It supports POC
1797  * gaps, MMCO reset codes and random resets.
1798  * A "display group" can start either with a IDR frame (f.key_frame = 1),
1799  * and/or can be closed down with a MMCO reset code. In sequences where
1800  * there is no delay, we can't detect that (since the frame was already
1801  * output to the user), so we also set h->mmco_reset to detect the MMCO
1802  * reset code.
1803  * FIXME: if we detect insufficient delays (as per h->avctx->has_b_frames),
1804  * we increase the delay between input and output. All frames affected by
1805  * the lag (e.g. those that should have been output before another frame
1806  * that we already returned to the user) will be dropped. This is a bug
1807  * that we will fix later. */
1808  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
1809  cnt += out->poc < h->last_pocs[i];
1810  invalid += out->poc == INT_MIN;
1811  }
1812  if (!h->mmco_reset && !cur->f.key_frame &&
1813  cnt + invalid == MAX_DELAYED_PIC_COUNT && cnt > 0) {
1814  h->mmco_reset = 2;
1815  if (pics > 1)
1816  h->delayed_pic[pics - 2]->mmco_reset = 2;
1817  }
1818  if (h->mmco_reset || cur->f.key_frame) {
1819  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
1820  h->last_pocs[i] = INT_MIN;
1821  cnt = 0;
1822  invalid = MAX_DELAYED_PIC_COUNT;
1823  }
1824  out = h->delayed_pic[0];
1825  out_idx = 0;
1826  for (i = 1; i < MAX_DELAYED_PIC_COUNT &&
1827  h->delayed_pic[i] &&
1828  !h->delayed_pic[i - 1]->mmco_reset &&
1829  !h->delayed_pic[i]->f.key_frame;
1830  i++)
1831  if (h->delayed_pic[i]->poc < out->poc) {
1832  out = h->delayed_pic[i];
1833  out_idx = i;
1834  }
1835  if (h->avctx->has_b_frames == 0 &&
1836  (h->delayed_pic[0]->f.key_frame || h->mmco_reset))
1837  h->next_outputed_poc = INT_MIN;
1838  out_of_order = !out->f.key_frame && !h->mmco_reset &&
1839  (out->poc < h->next_outputed_poc);
1840 
1843  } else if (out_of_order && pics - 1 == h->avctx->has_b_frames &&
1844  h->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) {
1845  if (invalid + cnt < MAX_DELAYED_PIC_COUNT) {
1846  h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, cnt);
1847  }
1848  h->low_delay = 0;
1849  } else if (h->low_delay &&
1850  ((h->next_outputed_poc != INT_MIN &&
1851  out->poc > h->next_outputed_poc + 2) ||
1852  cur->f.pict_type == AV_PICTURE_TYPE_B)) {
1853  h->low_delay = 0;
1854  h->avctx->has_b_frames++;
1855  }
1856 
1857  if (pics > h->avctx->has_b_frames) {
1858  out->f.reference &= ~DELAYED_PIC_REF;
1859  // for frame threading, the owner must be the second field's thread or
1860  // else the first thread can release the picture and reuse it unsafely
1861  out->owner2 = h;
1862  for (i = out_idx; h->delayed_pic[i]; i++)
1863  h->delayed_pic[i] = h->delayed_pic[i + 1];
1864  }
1865  memmove(h->last_pocs, &h->last_pocs[1],
1866  sizeof(*h->last_pocs) * (MAX_DELAYED_PIC_COUNT - 1));
1867  h->last_pocs[MAX_DELAYED_PIC_COUNT - 1] = cur->poc;
1868  if (!out_of_order && pics > h->avctx->has_b_frames) {
1869  h->next_output_pic = out;
1870  if (out->mmco_reset) {
1871  if (out_idx > 0) {
1872  h->next_outputed_poc = out->poc;
1873  h->delayed_pic[out_idx - 1]->mmco_reset = out->mmco_reset;
1874  } else {
1875  h->next_outputed_poc = INT_MIN;
1876  }
1877  } else {
1878  if (out_idx == 0 && pics > 1 && h->delayed_pic[0]->f.key_frame) {
1879  h->next_outputed_poc = INT_MIN;
1880  } else {
1881  h->next_outputed_poc = out->poc;
1882  }
1883  }
1884  h->mmco_reset = 0;
1885  } else {
1886  av_log(h->avctx, AV_LOG_DEBUG, "no picture\n");
1887  }
1888 
1889  if (setup_finished)
1891 }
1892 
1894  uint8_t *src_cb, uint8_t *src_cr,
1895  int linesize, int uvlinesize,
1896  int simple)
1897 {
1898  uint8_t *top_border;
1899  int top_idx = 1;
1900  const int pixel_shift = h->pixel_shift;
1901  int chroma444 = CHROMA444;
1902  int chroma422 = CHROMA422;
1903 
1904  src_y -= linesize;
1905  src_cb -= uvlinesize;
1906  src_cr -= uvlinesize;
1907 
1908  if (!simple && FRAME_MBAFF) {
1909  if (h->mb_y & 1) {
1910  if (!MB_MBAFF) {
1911  top_border = h->top_borders[0][h->mb_x];
1912  AV_COPY128(top_border, src_y + 15 * linesize);
1913  if (pixel_shift)
1914  AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
1915  if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
1916  if (chroma444) {
1917  if (pixel_shift) {
1918  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
1919  AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
1920  AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
1921  AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
1922  } else {
1923  AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
1924  AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
1925  }
1926  } else if (chroma422) {
1927  if (pixel_shift) {
1928  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
1929  AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
1930  } else {
1931  AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
1932  AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
1933  }
1934  } else {
1935  if (pixel_shift) {
1936  AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
1937  AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
1938  } else {
1939  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
1940  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
1941  }
1942  }
1943  }
1944  }
1945  } else if (MB_MBAFF) {
1946  top_idx = 0;
1947  } else
1948  return;
1949  }
1950 
1951  top_border = h->top_borders[top_idx][h->mb_x];
1952  /* There are two lines saved, the line above the top macroblock
1953  * of a pair, and the line above the bottom macroblock. */
1954  AV_COPY128(top_border, src_y + 16 * linesize);
1955  if (pixel_shift)
1956  AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
1957 
1958  if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
1959  if (chroma444) {
1960  if (pixel_shift) {
1961  AV_COPY128(top_border + 32, src_cb + 16 * linesize);
1962  AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
1963  AV_COPY128(top_border + 64, src_cr + 16 * linesize);
1964  AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
1965  } else {
1966  AV_COPY128(top_border + 16, src_cb + 16 * linesize);
1967  AV_COPY128(top_border + 32, src_cr + 16 * linesize);
1968  }
1969  } else if (chroma422) {
1970  if (pixel_shift) {
1971  AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
1972  AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
1973  } else {
1974  AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
1975  AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
1976  }
1977  } else {
1978  if (pixel_shift) {
1979  AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
1980  AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
1981  } else {
1982  AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
1983  AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
1984  }
1985  }
1986  }
1987 }
1988 
1990  uint8_t *src_cb, uint8_t *src_cr,
1991  int linesize, int uvlinesize,
1992  int xchg, int chroma444,
1993  int simple, int pixel_shift)
1994 {
1995  int deblock_topleft;
1996  int deblock_top;
1997  int top_idx = 1;
1998  uint8_t *top_border_m1;
1999  uint8_t *top_border;
2000 
2001  if (!simple && FRAME_MBAFF) {
2002  if (h->mb_y & 1) {
2003  if (!MB_MBAFF)
2004  return;
2005  } else {
2006  top_idx = MB_MBAFF ? 0 : 1;
2007  }
2008  }
2009 
2010  if (h->deblocking_filter == 2) {
2011  deblock_topleft = h->slice_table[h->mb_xy - 1 - h->mb_stride] == h->slice_num;
2012  deblock_top = h->top_type;
2013  } else {
2014  deblock_topleft = (h->mb_x > 0);
2015  deblock_top = (h->mb_y > !!MB_FIELD);
2016  }
2017 
2018  src_y -= linesize + 1 + pixel_shift;
2019  src_cb -= uvlinesize + 1 + pixel_shift;
2020  src_cr -= uvlinesize + 1 + pixel_shift;
2021 
2022  top_border_m1 = h->top_borders[top_idx][h->mb_x - 1];
2023  top_border = h->top_borders[top_idx][h->mb_x];
2024 
2025 #define XCHG(a, b, xchg) \
2026  if (pixel_shift) { \
2027  if (xchg) { \
2028  AV_SWAP64(b + 0, a + 0); \
2029  AV_SWAP64(b + 8, a + 8); \
2030  } else { \
2031  AV_COPY128(b, a); \
2032  } \
2033  } else if (xchg) \
2034  AV_SWAP64(b, a); \
2035  else \
2036  AV_COPY64(b, a);
2037 
2038  if (deblock_top) {
2039  if (deblock_topleft) {
2040  XCHG(top_border_m1 + (8 << pixel_shift),
2041  src_y - (7 << pixel_shift), 1);
2042  }
2043  XCHG(top_border + (0 << pixel_shift), src_y + (1 << pixel_shift), xchg);
2044  XCHG(top_border + (8 << pixel_shift), src_y + (9 << pixel_shift), 1);
2045  if (h->mb_x + 1 < h->mb_width) {
2046  XCHG(h->top_borders[top_idx][h->mb_x + 1],
2047  src_y + (17 << pixel_shift), 1);
2048  }
2049  }
2050  if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
2051  if (chroma444) {
2052  if (deblock_topleft) {
2053  XCHG(top_border_m1 + (24 << pixel_shift), src_cb - (7 << pixel_shift), 1);
2054  XCHG(top_border_m1 + (40 << pixel_shift), src_cr - (7 << pixel_shift), 1);
2055  }
2056  XCHG(top_border + (16 << pixel_shift), src_cb + (1 << pixel_shift), xchg);
2057  XCHG(top_border + (24 << pixel_shift), src_cb + (9 << pixel_shift), 1);
2058  XCHG(top_border + (32 << pixel_shift), src_cr + (1 << pixel_shift), xchg);
2059  XCHG(top_border + (40 << pixel_shift), src_cr + (9 << pixel_shift), 1);
2060  if (h->mb_x + 1 < h->mb_width) {
2061  XCHG(h->top_borders[top_idx][h->mb_x + 1] + (16 << pixel_shift), src_cb + (17 << pixel_shift), 1);
2062  XCHG(h->top_borders[top_idx][h->mb_x + 1] + (32 << pixel_shift), src_cr + (17 << pixel_shift), 1);
2063  }
2064  } else {
2065  if (deblock_top) {
2066  if (deblock_topleft) {
2067  XCHG(top_border_m1 + (16 << pixel_shift), src_cb - (7 << pixel_shift), 1);
2068  XCHG(top_border_m1 + (24 << pixel_shift), src_cr - (7 << pixel_shift), 1);
2069  }
2070  XCHG(top_border + (16 << pixel_shift), src_cb + 1 + pixel_shift, 1);
2071  XCHG(top_border + (24 << pixel_shift), src_cr + 1 + pixel_shift, 1);
2072  }
2073  }
2074  }
2075 }
2076 
2077 static av_always_inline int dctcoef_get(DCTELEM *mb, int high_bit_depth,
2078  int index)
2079 {
2080  if (high_bit_depth) {
2081  return AV_RN32A(((int32_t *)mb) + index);
2082  } else
2083  return AV_RN16A(mb + index);
2084 }
2085 
2086 static av_always_inline void dctcoef_set(DCTELEM *mb, int high_bit_depth,
2087  int index, int value)
2088 {
2089  if (high_bit_depth) {
2090  AV_WN32A(((int32_t *)mb) + index, value);
2091  } else
2092  AV_WN16A(mb + index, value);
2093 }
2094 
2096  int mb_type, int is_h264,
2097  int simple,
2098  int transform_bypass,
2099  int pixel_shift,
2100  int *block_offset,
2101  int linesize,
2102  uint8_t *dest_y, int p)
2103 {
2104  void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride);
2105  void (*idct_dc_add)(uint8_t *dst, DCTELEM *block, int stride);
2106  int i;
2107  int qscale = p == 0 ? h->qscale : h->chroma_qp[p - 1];
2108  block_offset += 16 * p;
2109  if (IS_INTRA4x4(mb_type)) {
2110  if (IS_8x8DCT(mb_type)) {
2111  if (transform_bypass) {
2112  idct_dc_add =
2113  idct_add = h->dsp.add_pixels8;
2114  } else {
2115  idct_dc_add = h->h264dsp.h264_idct8_dc_add;
2117  }
2118  for (i = 0; i < 16; i += 4) {
2119  uint8_t *const ptr = dest_y + block_offset[i];
2120  const int dir = h->intra4x4_pred_mode_cache[scan8[i]];
2121  if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
2122  h->hpc.pred8x8l_add[dir](ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2123  } else {
2124  const int nnz = h->non_zero_count_cache[scan8[i + p * 16]];
2125  h->hpc.pred8x8l[dir](ptr, (h->topleft_samples_available << i) & 0x8000,
2126  (h->topright_samples_available << i) & 0x4000, linesize);
2127  if (nnz) {
2128  if (nnz == 1 && dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
2129  idct_dc_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2130  else
2131  idct_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2132  }
2133  }
2134  }
2135  } else {
2136  if (transform_bypass) {
2137  idct_dc_add =
2138  idct_add = h->dsp.add_pixels4;
2139  } else {
2140  idct_dc_add = h->h264dsp.h264_idct_dc_add;
2142  }
2143  for (i = 0; i < 16; i++) {
2144  uint8_t *const ptr = dest_y + block_offset[i];
2145  const int dir = h->intra4x4_pred_mode_cache[scan8[i]];
2146 
2147  if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
2148  h->hpc.pred4x4_add[dir](ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2149  } else {
2150  uint8_t *topright;
2151  int nnz, tr;
2152  uint64_t tr_high;
2153  if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
2154  const int topright_avail = (h->topright_samples_available << i) & 0x8000;
2155  assert(h->mb_y || linesize <= block_offset[i]);
2156  if (!topright_avail) {
2157  if (pixel_shift) {
2158  tr_high = ((uint16_t *)ptr)[3 - linesize / 2] * 0x0001000100010001ULL;
2159  topright = (uint8_t *)&tr_high;
2160  } else {
2161  tr = ptr[3 - linesize] * 0x01010101u;
2162  topright = (uint8_t *)&tr;
2163  }
2164  } else
2165  topright = ptr + (4 << pixel_shift) - linesize;
2166  } else
2167  topright = NULL;
2168 
2169  h->hpc.pred4x4[dir](ptr, topright, linesize);
2170  nnz = h->non_zero_count_cache[scan8[i + p * 16]];
2171  if (nnz) {
2172  if (is_h264) {
2173  if (nnz == 1 && dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
2174  idct_dc_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2175  else
2176  idct_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2177  } else if (CONFIG_SVQ3_DECODER)
2178  ff_svq3_add_idct_c(ptr, h->mb + i * 16 + p * 256, linesize, qscale, 0);
2179  }
2180  }
2181  }
2182  }
2183  } else {
2184  h->hpc.pred16x16[h->intra16x16_pred_mode](dest_y, linesize);
2185  if (is_h264) {
2187  if (!transform_bypass)
2188  h->h264dsp.h264_luma_dc_dequant_idct(h->mb + (p * 256 << pixel_shift),
2189  h->mb_luma_dc[p],
2190  h->dequant4_coeff[p][qscale][0]);
2191  else {
2192  static const uint8_t dc_mapping[16] = {
2193  0 * 16, 1 * 16, 4 * 16, 5 * 16,
2194  2 * 16, 3 * 16, 6 * 16, 7 * 16,
2195  8 * 16, 9 * 16, 12 * 16, 13 * 16,
2196  10 * 16, 11 * 16, 14 * 16, 15 * 16 };
2197  for (i = 0; i < 16; i++)
2198  dctcoef_set(h->mb + (p * 256 << pixel_shift),
2199  pixel_shift, dc_mapping[i],
2200  dctcoef_get(h->mb_luma_dc[p],
2201  pixel_shift, i));
2202  }
2203  }
2204  } else if (CONFIG_SVQ3_DECODER)
2205  ff_svq3_luma_dc_dequant_idct_c(h->mb + p * 256,
2206  h->mb_luma_dc[p], qscale);
2207  }
2208 }
2209 
2211  int is_h264, int simple,
2212  int transform_bypass,
2213  int pixel_shift,
2214  int *block_offset,
2215  int linesize,
2216  uint8_t *dest_y, int p)
2217 {
2218  void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride);
2219  int i;
2220  block_offset += 16 * p;
2221  if (!IS_INTRA4x4(mb_type)) {
2222  if (is_h264) {
2223  if (IS_INTRA16x16(mb_type)) {
2224  if (transform_bypass) {
2225  if (h->sps.profile_idc == 244 &&
2228  h->hpc.pred16x16_add[h->intra16x16_pred_mode](dest_y, block_offset,
2229  h->mb + (p * 256 << pixel_shift),
2230  linesize);
2231  } else {
2232  for (i = 0; i < 16; i++)
2233  if (h->non_zero_count_cache[scan8[i + p * 16]] ||
2234  dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
2235  h->dsp.add_pixels4(dest_y + block_offset[i],
2236  h->mb + (i * 16 + p * 256 << pixel_shift),
2237  linesize);
2238  }
2239  } else {
2240  h->h264dsp.h264_idct_add16intra(dest_y, block_offset,
2241  h->mb + (p * 256 << pixel_shift),
2242  linesize,
2243  h->non_zero_count_cache + p * 5 * 8);
2244  }
2245  } else if (h->cbp & 15) {
2246  if (transform_bypass) {
2247  const int di = IS_8x8DCT(mb_type) ? 4 : 1;
2248  idct_add = IS_8x8DCT(mb_type) ? h->dsp.add_pixels8
2249  : h->dsp.add_pixels4;
2250  for (i = 0; i < 16; i += di)
2251  if (h->non_zero_count_cache[scan8[i + p * 16]])
2252  idct_add(dest_y + block_offset[i],
2253  h->mb + (i * 16 + p * 256 << pixel_shift),
2254  linesize);
2255  } else {
2256  if (IS_8x8DCT(mb_type))
2257  h->h264dsp.h264_idct8_add4(dest_y, block_offset,
2258  h->mb + (p * 256 << pixel_shift),
2259  linesize,
2260  h->non_zero_count_cache + p * 5 * 8);
2261  else
2262  h->h264dsp.h264_idct_add16(dest_y, block_offset,
2263  h->mb + (p * 256 << pixel_shift),
2264  linesize,
2265  h->non_zero_count_cache + p * 5 * 8);
2266  }
2267  }
2268  } else if (CONFIG_SVQ3_DECODER) {
2269  for (i = 0; i < 16; i++)
2270  if (h->non_zero_count_cache[scan8[i + p * 16]] || h->mb[i * 16 + p * 256]) {
2271  // FIXME benchmark weird rule, & below
2272  uint8_t *const ptr = dest_y + block_offset[i];
2273  ff_svq3_add_idct_c(ptr, h->mb + i * 16 + p * 256, linesize,
2274  h->qscale, IS_INTRA(mb_type) ? 1 : 0);
2275  }
2276  }
2277  }
2278 }
2279 
2280 #define BITS 8
2281 #define SIMPLE 1
2282 #include "h264_mb_template.c"
2283 
2284 #undef BITS
2285 #define BITS 16
2286 #include "h264_mb_template.c"
2287 
2288 #undef SIMPLE
2289 #define SIMPLE 0
2290 #include "h264_mb_template.c"
2291 
2293 {
2294  const int mb_xy = h->mb_xy;
2295  const int mb_type = h->cur_pic.f.mb_type[mb_xy];
2296  int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || h->qscale == 0;
2297 
2298  if (CHROMA444) {
2299  if (is_complex || h->pixel_shift)
2300  hl_decode_mb_444_complex(h);
2301  else
2302  hl_decode_mb_444_simple_8(h);
2303  } else if (is_complex) {
2304  hl_decode_mb_complex(h);
2305  } else if (h->pixel_shift) {
2306  hl_decode_mb_simple_16(h);
2307  } else
2308  hl_decode_mb_simple_8(h);
2309 }
2310 
2312 {
2313  int list, i;
2314  int luma_def, chroma_def;
2315 
2316  h->use_weight = 0;
2317  h->use_weight_chroma = 0;
2319  if (h->sps.chroma_format_idc)
2321  luma_def = 1 << h->luma_log2_weight_denom;
2322  chroma_def = 1 << h->chroma_log2_weight_denom;
2323 
2324  for (list = 0; list < 2; list++) {
2325  h->luma_weight_flag[list] = 0;
2326  h->chroma_weight_flag[list] = 0;
2327  for (i = 0; i < h->ref_count[list]; i++) {
2328  int luma_weight_flag, chroma_weight_flag;
2329 
2330  luma_weight_flag = get_bits1(&h->gb);
2331  if (luma_weight_flag) {
2332  h->luma_weight[i][list][0] = get_se_golomb(&h->gb);
2333  h->luma_weight[i][list][1] = get_se_golomb(&h->gb);
2334  if (h->luma_weight[i][list][0] != luma_def ||
2335  h->luma_weight[i][list][1] != 0) {
2336  h->use_weight = 1;
2337  h->luma_weight_flag[list] = 1;
2338  }
2339  } else {
2340  h->luma_weight[i][list][0] = luma_def;
2341  h->luma_weight[i][list][1] = 0;
2342  }
2343 
2344  if (h->sps.chroma_format_idc) {
2345  chroma_weight_flag = get_bits1(&h->gb);
2346  if (chroma_weight_flag) {
2347  int j;
2348  for (j = 0; j < 2; j++) {
2349  h->chroma_weight[i][list][j][0] = get_se_golomb(&h->gb);
2350  h->chroma_weight[i][list][j][1] = get_se_golomb(&h->gb);
2351  if (h->chroma_weight[i][list][j][0] != chroma_def ||
2352  h->chroma_weight[i][list][j][1] != 0) {
2353  h->use_weight_chroma = 1;
2354  h->chroma_weight_flag[list] = 1;
2355  }
2356  }
2357  } else {
2358  int j;
2359  for (j = 0; j < 2; j++) {
2360  h->chroma_weight[i][list][j][0] = chroma_def;
2361  h->chroma_weight[i][list][j][1] = 0;
2362  }
2363  }
2364  }
2365  }
2367  break;
2368  }
2369  h->use_weight = h->use_weight || h->use_weight_chroma;
2370  return 0;
2371 }
2372 
2378 static void implicit_weight_table(H264Context *h, int field)
2379 {
2380  int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
2381 
2382  for (i = 0; i < 2; i++) {
2383  h->luma_weight_flag[i] = 0;
2384  h->chroma_weight_flag[i] = 0;
2385  }
2386 
2387  if (field < 0) {
2388  if (h->picture_structure == PICT_FRAME) {
2389  cur_poc = h->cur_pic_ptr->poc;
2390  } else {
2391  cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1];
2392  }
2393  if (h->ref_count[0] == 1 && h->ref_count[1] == 1 && !FRAME_MBAFF &&
2394  h->ref_list[0][0].poc + h->ref_list[1][0].poc == 2 * cur_poc) {
2395  h->use_weight = 0;
2396  h->use_weight_chroma = 0;
2397  return;
2398  }
2399  ref_start = 0;
2400  ref_count0 = h->ref_count[0];
2401  ref_count1 = h->ref_count[1];
2402  } else {
2403  cur_poc = h->cur_pic_ptr->field_poc[field];
2404  ref_start = 16;
2405  ref_count0 = 16 + 2 * h->ref_count[0];
2406  ref_count1 = 16 + 2 * h->ref_count[1];
2407  }
2408 
2409  h->use_weight = 2;
2410  h->use_weight_chroma = 2;
2411  h->luma_log2_weight_denom = 5;
2412  h->chroma_log2_weight_denom = 5;
2413 
2414  for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
2415  int poc0 = h->ref_list[0][ref0].poc;
2416  for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
2417  int w = 32;
2418  if (!h->ref_list[0][ref0].long_ref && !h->ref_list[1][ref1].long_ref) {
2419  int poc1 = h->ref_list[1][ref1].poc;
2420  int td = av_clip(poc1 - poc0, -128, 127);
2421  if (td) {
2422  int tb = av_clip(cur_poc - poc0, -128, 127);
2423  int tx = (16384 + (FFABS(td) >> 1)) / td;
2424  int dist_scale_factor = (tb * tx + 32) >> 8;
2425  if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
2426  w = 64 - dist_scale_factor;
2427  }
2428  }
2429  if (field < 0) {
2430  h->implicit_weight[ref0][ref1][0] =
2431  h->implicit_weight[ref0][ref1][1] = w;
2432  } else {
2433  h->implicit_weight[ref0][ref1][field] = w;
2434  }
2435  }
2436  }
2437 }
2438 
2442 static void idr(H264Context *h)
2443 {
2445  h->prev_frame_num = 0;
2446  h->prev_frame_num_offset = 0;
2447  h->prev_poc_msb =
2448  h->prev_poc_lsb = 0;
2449 }
2450 
2451 /* forget old pics after a seek */
2452 static void flush_change(H264Context *h)
2453 {
2454  int i;
2455  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
2456  h->last_pocs[i] = INT_MIN;
2457  h->outputed_poc = h->next_outputed_poc = INT_MIN;
2458  h->prev_interlaced_frame = 1;
2459  idr(h);
2460  if (h->cur_pic_ptr)
2461  h->cur_pic_ptr->f.reference = 0;
2462  h->first_field = 0;
2463  memset(h->ref_list[0], 0, sizeof(h->ref_list[0]));
2464  memset(h->ref_list[1], 0, sizeof(h->ref_list[1]));
2465  memset(h->default_ref_list[0], 0, sizeof(h->default_ref_list[0]));
2466  memset(h->default_ref_list[1], 0, sizeof(h->default_ref_list[1]));
2467  ff_h264_reset_sei(h);
2468 }
2469 
2470 /* forget old pics after a seek */
2471 static void flush_dpb(AVCodecContext *avctx)
2472 {
2473  H264Context *h = avctx->priv_data;
2474  int i;
2475 
2476  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
2477  if (h->delayed_pic[i])
2478  h->delayed_pic[i]->f.reference = 0;
2479  h->delayed_pic[i] = NULL;
2480  }
2481 
2482  flush_change(h);
2483 
2484  for (i = 0; i < h->picture_count; i++) {
2485  if (h->DPB[i].f.data[0])
2486  free_frame_buffer(h, &h->DPB[i]);
2487  }
2488  h->cur_pic_ptr = NULL;
2489 
2490  h->mb_x = h->mb_y = 0;
2491 
2492  h->parse_context.state = -1;
2494  h->parse_context.overread = 0;
2496  h->parse_context.index = 0;
2497  h->parse_context.last_index = 0;
2498 }
2499 
2500 static int init_poc(H264Context *h)
2501 {
2502  const int max_frame_num = 1 << h->sps.log2_max_frame_num;
2503  int field_poc[2];
2504  Picture *cur = h->cur_pic_ptr;
2505 
2507  if (h->frame_num < h->prev_frame_num)
2508  h->frame_num_offset += max_frame_num;
2509 
2510  if (h->sps.poc_type == 0) {
2511  const int max_poc_lsb = 1 << h->sps.log2_max_poc_lsb;
2512 
2513  if (h->poc_lsb < h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb / 2)
2514  h->poc_msb = h->prev_poc_msb + max_poc_lsb;
2515  else if (h->poc_lsb > h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb < -max_poc_lsb / 2)
2516  h->poc_msb = h->prev_poc_msb - max_poc_lsb;
2517  else
2518  h->poc_msb = h->prev_poc_msb;
2519  field_poc[0] =
2520  field_poc[1] = h->poc_msb + h->poc_lsb;
2521  if (h->picture_structure == PICT_FRAME)
2522  field_poc[1] += h->delta_poc_bottom;
2523  } else if (h->sps.poc_type == 1) {
2524  int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
2525  int i;
2526 
2527  if (h->sps.poc_cycle_length != 0)
2528  abs_frame_num = h->frame_num_offset + h->frame_num;
2529  else
2530  abs_frame_num = 0;
2531 
2532  if (h->nal_ref_idc == 0 && abs_frame_num > 0)
2533  abs_frame_num--;
2534 
2535  expected_delta_per_poc_cycle = 0;
2536  for (i = 0; i < h->sps.poc_cycle_length; i++)
2537  // FIXME integrate during sps parse
2538  expected_delta_per_poc_cycle += h->sps.offset_for_ref_frame[i];
2539 
2540  if (abs_frame_num > 0) {
2541  int poc_cycle_cnt = (abs_frame_num - 1) / h->sps.poc_cycle_length;
2542  int frame_num_in_poc_cycle = (abs_frame_num - 1) % h->sps.poc_cycle_length;
2543 
2544  expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
2545  for (i = 0; i <= frame_num_in_poc_cycle; i++)
2546  expectedpoc = expectedpoc + h->sps.offset_for_ref_frame[i];
2547  } else
2548  expectedpoc = 0;
2549 
2550  if (h->nal_ref_idc == 0)
2551  expectedpoc = expectedpoc + h->sps.offset_for_non_ref_pic;
2552 
2553  field_poc[0] = expectedpoc + h->delta_poc[0];
2554  field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field;
2555 
2556  if (h->picture_structure == PICT_FRAME)
2557  field_poc[1] += h->delta_poc[1];
2558  } else {
2559  int poc = 2 * (h->frame_num_offset + h->frame_num);
2560 
2561  if (!h->nal_ref_idc)
2562  poc--;
2563 
2564  field_poc[0] = poc;
2565  field_poc[1] = poc;
2566  }
2567 
2569  h->cur_pic_ptr->field_poc[0] = field_poc[0];
2571  h->cur_pic_ptr->field_poc[1] = field_poc[1];
2572  cur->poc = FFMIN(cur->field_poc[0], cur->field_poc[1]);
2573 
2574  return 0;
2575 }
2576 
2581 {
2582  int i;
2583  for (i = 0; i < 16; i++) {
2584 #define T(x) (x >> 2) | ((x << 2) & 0xF)
2585  h->zigzag_scan[i] = T(zigzag_scan[i]);
2586  h->field_scan[i] = T(field_scan[i]);
2587 #undef T
2588  }
2589  for (i = 0; i < 64; i++) {
2590 #define T(x) (x >> 3) | ((x & 7) << 3)
2591  h->zigzag_scan8x8[i] = T(ff_zigzag_direct[i]);
2593  h->field_scan8x8[i] = T(field_scan8x8[i]);
2595 #undef T
2596  }
2597  if (h->sps.transform_bypass) { // FIXME same ugly
2604  } else {
2605  h->zigzag_scan_q0 = h->zigzag_scan;
2608  h->field_scan_q0 = h->field_scan;
2611  }
2612 }
2613 
2614 static int field_end(H264Context *h, int in_setup)
2615 {
2616  AVCodecContext *const avctx = h->avctx;
2617  int err = 0;
2618  h->mb_y = 0;
2619 
2620  if (!in_setup && !h->droppable)
2621  ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX,
2623 
2627 
2628  if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) {
2629  if (!h->droppable) {
2631  h->prev_poc_msb = h->poc_msb;
2632  h->prev_poc_lsb = h->poc_lsb;
2633  }
2635  h->prev_frame_num = h->frame_num;
2637  }
2638 
2639  if (avctx->hwaccel) {
2640  if (avctx->hwaccel->end_frame(avctx) < 0)
2641  av_log(avctx, AV_LOG_ERROR,
2642  "hardware accelerator failed to decode picture\n");
2643  }
2644 
2648 
2649  /*
2650  * FIXME: Error handling code does not seem to support interlaced
2651  * when slices span multiple rows
2652  * The ff_er_add_slice calls don't work right for bottom
2653  * fields; they cause massive erroneous error concealing
2654  * Error marking covers both fields (top and bottom).
2655  * This causes a mismatched s->error_count
2656  * and a bad error table. Further, the error count goes to
2657  * INT_MAX when called for bottom field, because mb_y is
2658  * past end by one (callers fault) and resync_mb_y != 0
2659  * causes problems for the first MB line, too.
2660  */
2661  if (!FIELD_PICTURE) {
2662  h->er.cur_pic = h->cur_pic_ptr;
2663  h->er.last_pic = h->ref_count[0] ? &h->ref_list[0][0] : NULL;
2664  h->er.next_pic = h->ref_count[1] ? &h->ref_list[1][0] : NULL;
2665  ff_er_frame_end(&h->er);
2666  }
2667 
2668  /* redraw edges for the frame if decoding didn't complete */
2669  if (h->er.error_count &&
2670  !h->avctx->hwaccel &&
2672  h->cur_pic_ptr->f.reference &&
2673  !(h->flags & CODEC_FLAG_EMU_EDGE)) {
2675  int hshift = desc->log2_chroma_w;
2676  int vshift = desc->log2_chroma_h;
2677  h->dsp.draw_edges(h->cur_pic.f.data[0], h->linesize,
2678  h->mb_width * 16, h->mb_height * 16,
2680  EDGE_TOP | EDGE_BOTTOM);
2681  h->dsp.draw_edges(h->cur_pic.f.data[1], h->uvlinesize,
2682  (h->mb_width * 16) >> hshift, (h->mb_height * 16) >> vshift,
2683  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
2684  EDGE_TOP | EDGE_BOTTOM);
2685  h->dsp.draw_edges(h->cur_pic.f.data[2], h->uvlinesize,
2686  (h->mb_width * 16) >> hshift, (h->mb_height * 16) >> vshift,
2687  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
2688  EDGE_TOP | EDGE_BOTTOM);
2689  }
2690  emms_c();
2691 
2692  h->current_slice = 0;
2693 
2694  return err;
2695 }
2696 
2700 static int clone_slice(H264Context *dst, H264Context *src)
2701 {
2702  memcpy(dst->block_offset, src->block_offset, sizeof(dst->block_offset));
2703  dst->cur_pic_ptr = src->cur_pic_ptr;
2704  dst->cur_pic = src->cur_pic;
2705  dst->linesize = src->linesize;
2706  dst->uvlinesize = src->uvlinesize;
2707  dst->first_field = src->first_field;
2708 
2709  dst->prev_poc_msb = src->prev_poc_msb;
2710  dst->prev_poc_lsb = src->prev_poc_lsb;
2712  dst->prev_frame_num = src->prev_frame_num;
2713  dst->short_ref_count = src->short_ref_count;
2714 
2715  memcpy(dst->short_ref, src->short_ref, sizeof(dst->short_ref));
2716  memcpy(dst->long_ref, src->long_ref, sizeof(dst->long_ref));
2717  memcpy(dst->default_ref_list, src->default_ref_list, sizeof(dst->default_ref_list));
2718  memcpy(dst->ref_list, src->ref_list, sizeof(dst->ref_list));
2719 
2720  memcpy(dst->dequant4_coeff, src->dequant4_coeff, sizeof(src->dequant4_coeff));
2721  memcpy(dst->dequant8_coeff, src->dequant8_coeff, sizeof(src->dequant8_coeff));
2722 
2723  return 0;
2724 }
2725 
2734 {
2735  int profile = sps->profile_idc;
2736 
2737  switch (sps->profile_idc) {
2739  // constraint_set1_flag set to 1
2740  profile |= (sps->constraint_set_flags & 1 << 1) ? FF_PROFILE_H264_CONSTRAINED : 0;
2741  break;
2745  // constraint_set3_flag set to 1
2746  profile |= (sps->constraint_set_flags & 1 << 3) ? FF_PROFILE_H264_INTRA : 0;
2747  break;
2748  }
2749 
2750  return profile;
2751 }
2752 
2754 {
2755  if (h->flags & CODEC_FLAG_LOW_DELAY ||
2757  !h->sps.num_reorder_frames)) {
2758  if (h->avctx->has_b_frames > 1 || h->delayed_pic[0])
2759  av_log(h->avctx, AV_LOG_WARNING, "Delayed frames seen. "
2760  "Reenabling low delay requires a codec flush.\n");
2761  else
2762  h->low_delay = 1;
2763  }
2764 
2765  if (h->avctx->has_b_frames < 2)
2766  h->avctx->has_b_frames = !h->low_delay;
2767 
2768  if (h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma ||
2770  if (h->avctx->codec &&
2772  (h->sps.bit_depth_luma != 8 || h->sps.chroma_format_idc > 1)) {
2774  "VDPAU decoding does not support video colorspace.\n");
2775  return AVERROR_INVALIDDATA;
2776  }
2777  if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 10) {
2780  h->pixel_shift = h->sps.bit_depth_luma > 8;
2781 
2783  h->sps.chroma_format_idc);
2785  h->sps.chroma_format_idc);
2786  h->dsp.dct_bits = h->sps.bit_depth_luma > 8 ? 32 : 16;
2787  ff_dsputil_init(&h->dsp, h->avctx);
2789  } else {
2790  av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth: %d\n",
2791  h->sps.bit_depth_luma);
2792  return AVERROR_INVALIDDATA;
2793  }
2794  }
2795  return 0;
2796 }
2797 
2799 {
2800  switch (h->sps.bit_depth_luma) {
2801  case 9:
2802  if (CHROMA444) {
2803  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
2804  return AV_PIX_FMT_GBRP9;
2805  } else
2806  return AV_PIX_FMT_YUV444P9;
2807  } else if (CHROMA422)
2808  return AV_PIX_FMT_YUV422P9;
2809  else
2810  return AV_PIX_FMT_YUV420P9;
2811  break;
2812  case 10:
2813  if (CHROMA444) {
2814  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
2815  return AV_PIX_FMT_GBRP10;
2816  } else
2817  return AV_PIX_FMT_YUV444P10;
2818  } else if (CHROMA422)
2819  return AV_PIX_FMT_YUV422P10;
2820  else
2821  return AV_PIX_FMT_YUV420P10;
2822  break;
2823  case 8:
2824  if (CHROMA444) {
2825  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
2826  return AV_PIX_FMT_GBRP;
2827  } else
2830  } else if (CHROMA422) {
2833  } else {
2834  return h->avctx->get_format(h->avctx, h->avctx->codec->pix_fmts ?
2835  h->avctx->codec->pix_fmts :
2839  }
2840  break;
2841  default:
2843  "Unsupported bit depth: %d\n", h->sps.bit_depth_luma);
2844  return AVERROR_INVALIDDATA;
2845  }
2846 }
2847 
2848 static int h264_slice_header_init(H264Context *h, int reinit)
2849 {
2850  int nb_slices = (HAVE_THREADS &&
2852  h->avctx->thread_count : 1;
2853  int i;
2854 
2856  h->avctx->sample_aspect_ratio = h->sps.sar;
2859  &h->chroma_x_shift, &h->chroma_y_shift);
2860 
2861  if (h->sps.timing_info_present_flag) {
2862  int64_t den = h->sps.time_scale;
2863  if (h->x264_build < 44U)
2864  den *= 2;
2866  h->sps.num_units_in_tick, den, 1 << 30);
2867  }
2868 
2870 
2871  if (reinit)
2872  free_tables(h, 0);
2873  h->first_field = 0;
2874  h->prev_interlaced_frame = 1;
2875 
2876  init_scan_tables(h);
2877  if (ff_h264_alloc_tables(h) < 0) {
2879  "Could not allocate memory for h264\n");
2880  return AVERROR(ENOMEM);
2881  }
2882 
2883  if (nb_slices > MAX_THREADS || (nb_slices > h->mb_height && h->mb_height)) {
2884  int max_slices;
2885  if (h->mb_height)
2886  max_slices = FFMIN(MAX_THREADS, h->mb_height);
2887  else
2888  max_slices = MAX_THREADS;
2889  av_log(h->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
2890  " reducing to %d\n", nb_slices, max_slices);
2891  nb_slices = max_slices;
2892  }
2893  h->slice_context_count = nb_slices;
2894 
2896  if (context_init(h) < 0) {
2897  av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
2898  return -1;
2899  }
2900  } else {
2901  for (i = 1; i < h->slice_context_count; i++) {
2902  H264Context *c;
2903  c = h->thread_context[i] = av_mallocz(sizeof(H264Context));
2904  c->avctx = h->avctx;
2905  c->dsp = h->dsp;
2906  c->vdsp = h->vdsp;
2907  c->h264dsp = h->h264dsp;
2908  c->sps = h->sps;
2909  c->pps = h->pps;
2910  c->pixel_shift = h->pixel_shift;
2911  c->width = h->width;
2912  c->height = h->height;
2913  c->linesize = h->linesize;
2914  c->uvlinesize = h->uvlinesize;
2917  c->qscale = h->qscale;
2918  c->droppable = h->droppable;
2920  c->low_delay = h->low_delay;
2921  c->mb_width = h->mb_width;
2922  c->mb_height = h->mb_height;
2923  c->mb_stride = h->mb_stride;
2924  c->mb_num = h->mb_num;
2925  c->flags = h->flags;
2927  c->pict_type = h->pict_type;
2928 
2929  init_scan_tables(c);
2930  clone_tables(c, h, i);
2931  c->context_initialized = 1;
2932  }
2933 
2934  for (i = 0; i < h->slice_context_count; i++)
2935  if (context_init(h->thread_context[i]) < 0) {
2936  av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
2937  return -1;
2938  }
2939  }
2940 
2941  h->context_initialized = 1;
2942 
2943  return 0;
2944 }
2945 
2957 {
2958  unsigned int first_mb_in_slice;
2959  unsigned int pps_id;
2960  int num_ref_idx_active_override_flag, max_refs, ret;
2961  unsigned int slice_type, tmp, i, j;
2962  int default_ref_list_done = 0;
2963  int last_pic_structure, last_pic_droppable;
2964  int needs_reinit = 0;
2965 
2966  /* FIXME: 2tap qpel isn't implemented for high bit depth. */
2967  if ((h->avctx->flags2 & CODEC_FLAG2_FAST) &&
2968  !h->nal_ref_idc && !h->pixel_shift) {
2971  } else {
2974  }
2975 
2976  first_mb_in_slice = get_ue_golomb(&h->gb);
2977 
2978  if (first_mb_in_slice == 0) { // FIXME better field boundary detection
2979  if (h0->current_slice && FIELD_PICTURE) {
2980  field_end(h, 1);
2981  }
2982 
2983  h0->current_slice = 0;
2984  if (!h0->first_field) {
2985  if (h->cur_pic_ptr && !h->droppable &&
2986  h->cur_pic_ptr->owner2 == h) {
2987  ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX,
2989  }
2990  h->cur_pic_ptr = NULL;
2991  }
2992  }
2993 
2994  slice_type = get_ue_golomb_31(&h->gb);
2995  if (slice_type > 9) {
2997  "slice type too large (%d) at %d %d\n",
2998  h->slice_type, h->mb_x, h->mb_y);
2999  return -1;
3000  }
3001  if (slice_type > 4) {
3002  slice_type -= 5;
3003  h->slice_type_fixed = 1;
3004  } else
3005  h->slice_type_fixed = 0;
3006 
3007  slice_type = golomb_to_pict_type[slice_type];
3008  if (slice_type == AV_PICTURE_TYPE_I ||
3009  (h0->current_slice != 0 && slice_type == h0->last_slice_type)) {
3010  default_ref_list_done = 1;
3011  }
3012  h->slice_type = slice_type;
3013  h->slice_type_nos = slice_type & 3;
3014 
3015  if (h->nal_unit_type == NAL_IDR_SLICE &&
3017  av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
3018  return AVERROR_INVALIDDATA;
3019  }
3020 
3021  // to make a few old functions happy, it's wrong though
3022  h->pict_type = h->slice_type;
3023 
3024  pps_id = get_ue_golomb(&h->gb);
3025  if (pps_id >= MAX_PPS_COUNT) {
3026  av_log(h->avctx, AV_LOG_ERROR, "pps_id out of range\n");
3027  return -1;
3028  }
3029  if (!h0->pps_buffers[pps_id]) {
3031  "non-existing PPS %u referenced\n",
3032  pps_id);
3033  return -1;
3034  }
3035  h->pps = *h0->pps_buffers[pps_id];
3036 
3037  if (!h0->sps_buffers[h->pps.sps_id]) {
3039  "non-existing SPS %u referenced\n",
3040  h->pps.sps_id);
3041  return -1;
3042  }
3043 
3044  if (h->pps.sps_id != h->current_sps_id ||
3045  h0->sps_buffers[h->pps.sps_id]->new) {
3046  h0->sps_buffers[h->pps.sps_id]->new = 0;
3047 
3048  h->current_sps_id = h->pps.sps_id;
3049  h->sps = *h0->sps_buffers[h->pps.sps_id];
3050 
3051  if (h->bit_depth_luma != h->sps.bit_depth_luma ||
3055  needs_reinit = 1;
3056  }
3057  if ((ret = h264_set_parameter_from_sps(h)) < 0)
3058  return ret;
3059  }
3060 
3061  h->avctx->profile = ff_h264_get_profile(&h->sps);
3062  h->avctx->level = h->sps.level_idc;
3063  h->avctx->refs = h->sps.ref_frame_count;
3064 
3065  if (h->mb_width != h->sps.mb_width ||
3066  h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag))
3067  needs_reinit = 1;
3068 
3069  h->mb_width = h->sps.mb_width;
3070  h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
3071  h->mb_num = h->mb_width * h->mb_height;
3072  h->mb_stride = h->mb_width + 1;
3073 
3074  h->b_stride = h->mb_width * 4;
3075 
3076  h->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p
3077 
3078  h->width = 16 * h->mb_width - (2 >> CHROMA444) * FFMIN(h->sps.crop_right, (8 << CHROMA444) - 1);
3079  if (h->sps.frame_mbs_only_flag)
3080  h->height = 16 * h->mb_height - (1 << h->chroma_y_shift) * FFMIN(h->sps.crop_bottom, (16 >> h->chroma_y_shift) - 1);
3081  else
3082  h->height = 16 * h->mb_height - (2 << h->chroma_y_shift) * FFMIN(h->sps.crop_bottom, (16 >> h->chroma_y_shift) - 1);
3083 
3084  if (FFALIGN(h->avctx->width, 16) == h->width &&
3085  FFALIGN(h->avctx->height, 16) == h->height) {
3086  h->width = h->avctx->width;
3087  h->height = h->avctx->height;
3088  }
3089 
3092  : AVCOL_RANGE_MPEG;
3094  if (h->avctx->colorspace != h->sps.colorspace)
3095  needs_reinit = 1;
3097  h->avctx->color_trc = h->sps.color_trc;
3098  h->avctx->colorspace = h->sps.colorspace;
3099  }
3100  }
3101 
3102  if (h->context_initialized &&
3103  (h->width != h->avctx->width ||
3104  h->height != h->avctx->height ||
3105  needs_reinit)) {
3106  h->context_initialized = 0;
3107  if (h != h0) {
3108  av_log(h->avctx, AV_LOG_ERROR, "changing width/height on "
3109  "slice %d\n", h0->current_slice + 1);
3110  return AVERROR_INVALIDDATA;
3111  }
3112 
3113  flush_change(h);
3114 
3115  if ((ret = get_pixel_format(h)) < 0)
3116  return ret;
3117  h->avctx->pix_fmt = ret;
3118 
3119  av_log(h->avctx, AV_LOG_INFO, "Reinit context to %dx%d, "
3120  "pix_fmt: %d\n", h->width, h->height, h->avctx->pix_fmt);
3121 
3122  if ((ret = h264_slice_header_init(h, 1)) < 0) {
3124  "h264_slice_header_init() failed\n");
3125  return ret;
3126  }
3127  }
3128  if (!h->context_initialized) {
3129  if (h != h0) {
3131  "Cannot (re-)initialize context during parallel decoding.\n");
3132  return -1;
3133  }
3134 
3135  if ((ret = get_pixel_format(h)) < 0)
3136  return ret;
3137  h->avctx->pix_fmt = ret;
3138 
3139  if ((ret = h264_slice_header_init(h, 0)) < 0) {
3141  "h264_slice_header_init() failed\n");
3142  return ret;
3143  }
3144  }
3145 
3146  if (h == h0 && h->dequant_coeff_pps != pps_id) {
3147  h->dequant_coeff_pps = pps_id;
3149  }
3150 
3151  h->frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num);
3152 
3153  h->mb_mbaff = 0;
3154  h->mb_aff_frame = 0;
3155  last_pic_structure = h0->picture_structure;
3156  last_pic_droppable = h0->droppable;
3157  h->droppable = h->nal_ref_idc == 0;
3158  if (h->sps.frame_mbs_only_flag) {
3160  } else {
3161  if (get_bits1(&h->gb)) { // field_pic_flag
3162  h->picture_structure = PICT_TOP_FIELD + get_bits1(&h->gb); // bottom_field_flag
3163  } else {
3165  h->mb_aff_frame = h->sps.mb_aff;
3166  }
3167  }
3169 
3170  if (h0->current_slice != 0) {
3171  if (last_pic_structure != h->picture_structure ||
3172  last_pic_droppable != h->droppable) {
3174  "Changing field mode (%d -> %d) between slices is not allowed\n",
3175  last_pic_structure, h->picture_structure);
3176  h->picture_structure = last_pic_structure;
3177  h->droppable = last_pic_droppable;
3178  return AVERROR_INVALIDDATA;
3179  } else if (!h0->cur_pic_ptr) {
3181  "unset cur_pic_ptr on %d. slice\n",
3182  h0->current_slice + 1);
3183  return AVERROR_INVALIDDATA;
3184  }
3185  } else {
3186  /* Shorten frame num gaps so we don't have to allocate reference
3187  * frames just to throw them away */
3188  if (h->frame_num != h->prev_frame_num) {
3189  int unwrap_prev_frame_num = h->prev_frame_num;
3190  int max_frame_num = 1 << h->sps.log2_max_frame_num;
3191 
3192  if (unwrap_prev_frame_num > h->frame_num)
3193  unwrap_prev_frame_num -= max_frame_num;
3194 
3195  if ((h->frame_num - unwrap_prev_frame_num) > h->sps.ref_frame_count) {
3196  unwrap_prev_frame_num = (h->frame_num - h->sps.ref_frame_count) - 1;
3197  if (unwrap_prev_frame_num < 0)
3198  unwrap_prev_frame_num += max_frame_num;
3199 
3200  h->prev_frame_num = unwrap_prev_frame_num;
3201  }
3202  }
3203 
3204  /* See if we have a decoded first field looking for a pair...
3205  * Here, we're using that to see if we should mark previously
3206  * decode frames as "finished".
3207  * We have to do that before the "dummy" in-between frame allocation,
3208  * since that can modify s->current_picture_ptr. */
3209  if (h0->first_field) {
3210  assert(h0->cur_pic_ptr);
3211  assert(h0->cur_pic_ptr->f.data[0]);
3212  assert(h0->cur_pic_ptr->f.reference != DELAYED_PIC_REF);
3213 
3214  /* Mark old field/frame as completed */
3215  if (!last_pic_droppable && h0->cur_pic_ptr->owner2 == h0) {
3216  ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
3217  last_pic_structure == PICT_BOTTOM_FIELD);
3218  }
3219 
3220  /* figure out if we have a complementary field pair */
3221  if (!FIELD_PICTURE || h->picture_structure == last_pic_structure) {
3222  /* Previous field is unmatched. Don't display it, but let it
3223  * remain for reference if marked as such. */
3224  if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
3225  ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
3226  last_pic_structure == PICT_TOP_FIELD);
3227  }
3228  } else {
3229  if (h0->cur_pic_ptr->frame_num != h->frame_num) {
3230  /* This and previous field were reference, but had
3231  * different frame_nums. Consider this field first in
3232  * pair. Throw away previous field except for reference
3233  * purposes. */
3234  if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
3235  ff_thread_report_progress(&h0->cur_pic_ptr->f, INT_MAX,
3236  last_pic_structure == PICT_TOP_FIELD);
3237  }
3238  } else {
3239  /* Second field in complementary pair */
3240  if (!((last_pic_structure == PICT_TOP_FIELD &&
3242  (last_pic_structure == PICT_BOTTOM_FIELD &&
3245  "Invalid field mode combination %d/%d\n",
3246  last_pic_structure, h->picture_structure);
3247  h->picture_structure = last_pic_structure;
3248  h->droppable = last_pic_droppable;
3249  return AVERROR_INVALIDDATA;
3250  } else if (last_pic_droppable != h->droppable) {
3252  "Cannot combine reference and non-reference fields in the same frame\n");
3254  h->picture_structure = last_pic_structure;
3255  h->droppable = last_pic_droppable;
3256  return AVERROR_PATCHWELCOME;
3257  }
3258 
3259  /* Take ownership of this buffer. Note that if another thread owned
3260  * the first field of this buffer, we're not operating on that pointer,
3261  * so the original thread is still responsible for reporting progress
3262  * on that first field (or if that was us, we just did that above).
3263  * By taking ownership, we assign responsibility to ourselves to
3264  * report progress on the second field. */
3265  h0->cur_pic_ptr->owner2 = h0;
3266  }
3267  }
3268  }
3269 
3270  while (h->frame_num != h->prev_frame_num &&
3271  h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
3272  Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
3273  av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
3274  h->frame_num, h->prev_frame_num);
3275  if (ff_h264_frame_start(h) < 0) {
3276  h0->first_field = 0;
3277  return -1;
3278  }
3279  h->prev_frame_num++;
3280  h->prev_frame_num %= 1 << h->sps.log2_max_frame_num;
3282  ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX, 0);
3283  ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX, 1);
3284  if ((ret = ff_generate_sliding_window_mmcos(h, 1)) < 0 &&
3286  return ret;
3287  if (ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index) < 0 &&
3289  return AVERROR_INVALIDDATA;
3290  /* Error concealment: if a ref is missing, copy the previous ref in its place.
3291  * FIXME: avoiding a memcpy would be nice, but ref handling makes many assumptions
3292  * about there being no actual duplicates.
3293  * FIXME: this doesn't copy padding for out-of-frame motion vectors. Given we're
3294  * concealing a lost frame, this probably isn't noticeable by comparison, but it should
3295  * be fixed. */
3296  if (h->short_ref_count) {
3297  if (prev) {
3298  av_image_copy(h->short_ref[0]->f.data, h->short_ref[0]->f.linesize,
3299  (const uint8_t **)prev->f.data, prev->f.linesize,
3300  h->avctx->pix_fmt, h->mb_width * 16, h->mb_height * 16);
3301  h->short_ref[0]->poc = prev->poc + 2;
3302  }
3303  h->short_ref[0]->frame_num = h->prev_frame_num;
3304  }
3305  }
3306 
3307  /* See if we have a decoded first field looking for a pair...
3308  * We're using that to see whether to continue decoding in that
3309  * frame, or to allocate a new one. */
3310  if (h0->first_field) {
3311  assert(h0->cur_pic_ptr);
3312  assert(h0->cur_pic_ptr->f.data[0]);
3313  assert(h0->cur_pic_ptr->f.reference != DELAYED_PIC_REF);
3314 
3315  /* figure out if we have a complementary field pair */
3316  if (!FIELD_PICTURE || h->picture_structure == last_pic_structure) {
3317  /* Previous field is unmatched. Don't display it, but let it
3318  * remain for reference if marked as such. */
3319  h0->cur_pic_ptr = NULL;
3320  h0->first_field = FIELD_PICTURE;
3321  } else {
3322  if (h0->cur_pic_ptr->frame_num != h->frame_num) {
3323  /* This and the previous field had different frame_nums.
3324  * Consider this field first in pair. Throw away previous
3325  * one except for reference purposes. */
3326  h0->first_field = 1;
3327  h0->cur_pic_ptr = NULL;
3328  } else {
3329  /* Second field in complementary pair */
3330  h0->first_field = 0;
3331  }
3332  }
3333  } else {
3334  /* Frame or first field in a potentially complementary pair */
3335  h0->first_field = FIELD_PICTURE;
3336  }
3337 
3338  if (!FIELD_PICTURE || h0->first_field) {
3339  if (ff_h264_frame_start(h) < 0) {
3340  h0->first_field = 0;
3341  return -1;
3342  }
3343  } else {
3345  }
3346  }
3347  if (h != h0 && (ret = clone_slice(h, h0)) < 0)
3348  return ret;
3349 
3350  h->cur_pic_ptr->frame_num = h->frame_num; // FIXME frame_num cleanup
3351 
3352  assert(h->mb_num == h->mb_width * h->mb_height);
3353  if (first_mb_in_slice << FIELD_OR_MBAFF_PICTURE >= h->mb_num ||
3354  first_mb_in_slice >= h->mb_num) {
3355  av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
3356  return -1;
3357  }
3358  h->resync_mb_x = h->mb_x = first_mb_in_slice % h->mb_width;
3359  h->resync_mb_y = h->mb_y = (first_mb_in_slice / h->mb_width) << FIELD_OR_MBAFF_PICTURE;
3361  h->resync_mb_y = h->mb_y = h->mb_y + 1;
3362  assert(h->mb_y < h->mb_height);
3363 
3364  if (h->picture_structure == PICT_FRAME) {
3365  h->curr_pic_num = h->frame_num;
3366  h->max_pic_num = 1 << h->sps.log2_max_frame_num;
3367  } else {
3368  h->curr_pic_num = 2 * h->frame_num + 1;
3369  h->max_pic_num = 1 << (h->sps.log2_max_frame_num + 1);
3370  }
3371 
3372  if (h->nal_unit_type == NAL_IDR_SLICE)
3373  get_ue_golomb(&h->gb); /* idr_pic_id */
3374 
3375  if (h->sps.poc_type == 0) {
3376  h->poc_lsb = get_bits(&h->gb, h->sps.log2_max_poc_lsb);
3377 
3378  if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME)
3379  h->delta_poc_bottom = get_se_golomb(&h->gb);
3380  }
3381 
3382  if (h->sps.poc_type == 1 && !h->sps.delta_pic_order_always_zero_flag) {
3383  h->delta_poc[0] = get_se_golomb(&h->gb);
3384 
3385  if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME)
3386  h->delta_poc[1] = get_se_golomb(&h->gb);
3387  }
3388 
3389  init_poc(h);
3390 
3393 
3394  // set defaults, might be overridden a few lines later
3395  h->ref_count[0] = h->pps.ref_count[0];
3396  h->ref_count[1] = h->pps.ref_count[1];
3397 
3398  if (h->slice_type_nos != AV_PICTURE_TYPE_I) {
3401  num_ref_idx_active_override_flag = get_bits1(&h->gb);
3402 
3403  if (num_ref_idx_active_override_flag) {
3404  h->ref_count[0] = get_ue_golomb(&h->gb) + 1;
3405  if (h->ref_count[0] < 1)
3406  return AVERROR_INVALIDDATA;
3407  if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
3408  h->ref_count[1] = get_ue_golomb(&h->gb) + 1;
3409  if (h->ref_count[1] < 1)
3410  return AVERROR_INVALIDDATA;
3411  }
3412  }
3413 
3415  h->list_count = 2;
3416  else
3417  h->list_count = 1;
3418  } else {
3419  h->list_count = 0;
3420  h->ref_count[0] = h->ref_count[1] = 0;
3421  }
3422 
3423 
3424  max_refs = h->picture_structure == PICT_FRAME ? 16 : 32;
3425 
3426  if (h->ref_count[0] > max_refs || h->ref_count[1] > max_refs) {
3427  av_log(h->avctx, AV_LOG_ERROR, "reference overflow\n");
3428  h->ref_count[0] = h->ref_count[1] = 0;
3429  return AVERROR_INVALIDDATA;
3430  }
3431 
3432  if (!default_ref_list_done)
3434 
3435  if (h->slice_type_nos != AV_PICTURE_TYPE_I &&
3437  h->ref_count[1] = h->ref_count[0] = 0;
3438  return -1;
3439  }
3440 
3441  if ((h->pps.weighted_pred && h->slice_type_nos == AV_PICTURE_TYPE_P) ||
3442  (h->pps.weighted_bipred_idc == 1 &&
3444  pred_weight_table(h);
3445  else if (h->pps.weighted_bipred_idc == 2 &&
3447  implicit_weight_table(h, -1);
3448  } else {
3449  h->use_weight = 0;
3450  for (i = 0; i < 2; i++) {
3451  h->luma_weight_flag[i] = 0;
3452  h->chroma_weight_flag[i] = 0;
3453  }
3454  }
3455 
3456  // If frame-mt is enabled, only update mmco tables for the first slice
3457  // in a field. Subsequent slices can temporarily clobber h->mmco_index
3458  // or h->mmco, which will cause ref list mix-ups and decoding errors
3459  // further down the line. This may break decoding if the first slice is
3460  // corrupt, thus we only do this if frame-mt is enabled.
3461  if (h->nal_ref_idc &&
3464  h0->current_slice == 0) < 0 &&
3466  return AVERROR_INVALIDDATA;
3467 
3468  if (FRAME_MBAFF) {
3470 
3472  implicit_weight_table(h, 0);
3473  implicit_weight_table(h, 1);
3474  }
3475  }
3476 
3480 
3481  if (h->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac) {
3482  tmp = get_ue_golomb_31(&h->gb);
3483  if (tmp > 2) {
3484  av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc overflow\n");
3485  return -1;
3486  }
3487  h->cabac_init_idc = tmp;
3488  }
3489 
3490  h->last_qscale_diff = 0;
3491  tmp = h->pps.init_qp + get_se_golomb(&h->gb);
3492  if (tmp > 51 + 6 * (h->sps.bit_depth_luma - 8)) {
3493  av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
3494  return -1;
3495  }
3496  h->qscale = tmp;
3497  h->chroma_qp[0] = get_chroma_qp(h, 0, h->qscale);
3498  h->chroma_qp[1] = get_chroma_qp(h, 1, h->qscale);
3499  // FIXME qscale / qp ... stuff
3500  if (h->slice_type == AV_PICTURE_TYPE_SP)
3501  get_bits1(&h->gb); /* sp_for_switch_flag */
3502  if (h->slice_type == AV_PICTURE_TYPE_SP ||
3504  get_se_golomb(&h->gb); /* slice_qs_delta */
3505 
3506  h->deblocking_filter = 1;
3507  h->slice_alpha_c0_offset = 0;
3508  h->slice_beta_offset = 0;
3510  tmp = get_ue_golomb_31(&h->gb);
3511  if (tmp > 2) {
3513  "deblocking_filter_idc %u out of range\n", tmp);
3514  return -1;
3515  }
3516  h->deblocking_filter = tmp;
3517  if (h->deblocking_filter < 2)
3518  h->deblocking_filter ^= 1; // 1<->0
3519 
3520  if (h->deblocking_filter) {
3521  h->slice_alpha_c0_offset = get_se_golomb(&h->gb) * 2;
3522  h->slice_beta_offset = get_se_golomb(&h->gb) * 2;
3523  if (h->slice_alpha_c0_offset > 12 ||
3524  h->slice_alpha_c0_offset < -12 ||
3525  h->slice_beta_offset > 12 ||
3526  h->slice_beta_offset < -12) {
3528  "deblocking filter parameters %d %d out of range\n",
3530  return -1;
3531  }
3532  }
3533  }
3534 
3535  if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
3541  h->nal_ref_idc == 0))
3542  h->deblocking_filter = 0;
3543 
3544  if (h->deblocking_filter == 1 && h0->max_contexts > 1) {
3545  if (h->avctx->flags2 & CODEC_FLAG2_FAST) {
3546  /* Cheat slightly for speed:
3547  * Do not bother to deblock across slices. */
3548  h->deblocking_filter = 2;
3549  } else {
3550  h0->max_contexts = 1;
3551  if (!h0->single_decode_warning) {
3552  av_log(h->avctx, AV_LOG_INFO,
3553  "Cannot parallelize deblocking type 1, decoding such frames in sequential order\n");
3554  h0->single_decode_warning = 1;
3555  }
3556  if (h != h0) {
3558  "Deblocking switched inside frame.\n");
3559  return 1;
3560  }
3561  }
3562  }
3563  h->qp_thresh = 15 -
3565  FFMAX3(0,
3567  h->pps.chroma_qp_index_offset[1]) +
3568  6 * (h->sps.bit_depth_luma - 8);
3569 
3570  h0->last_slice_type = slice_type;
3571  h->slice_num = ++h0->current_slice;
3572  if (h->slice_num >= MAX_SLICES) {
3574  "Too many slices, increase MAX_SLICES and recompile\n");
3575  }
3576 
3577  for (j = 0; j < 2; j++) {
3578  int id_list[16];
3579  int *ref2frm = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][j];
3580  for (i = 0; i < 16; i++) {
3581  id_list[i] = 60;
3582  if (h->ref_list[j][i].f.data[0]) {
3583  int k;
3584  uint8_t *base = h->ref_list[j][i].f.base[0];
3585  for (k = 0; k < h->short_ref_count; k++)
3586  if (h->short_ref[k]->f.base[0] == base) {
3587  id_list[i] = k;
3588  break;
3589  }
3590  for (k = 0; k < h->long_ref_count; k++)
3591  if (h->long_ref[k] && h->long_ref[k]->f.base[0] == base) {
3592  id_list[i] = h->short_ref_count + k;
3593  break;
3594  }
3595  }
3596  }
3597 
3598  ref2frm[0] =
3599  ref2frm[1] = -1;
3600  for (i = 0; i < 16; i++)
3601  ref2frm[i + 2] = 4 * id_list[i] +
3602  (h->ref_list[j][i].f.reference & 3);
3603  ref2frm[18 + 0] =
3604  ref2frm[18 + 1] = -1;
3605  for (i = 16; i < 48; i++)
3606  ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
3607  (h->ref_list[j][i].f.reference & 3);
3608  }
3609 
3610  // FIXME: fix draw_edges + PAFF + frame threads
3612  (!h->sps.frame_mbs_only_flag &&
3614  ? 0 : 16;
3616 
3617  if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
3619  "slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
3620  h->slice_num,
3621  (h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"),
3622  first_mb_in_slice,
3624  h->slice_type_fixed ? " fix" : "",
3625  h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "",
3626  pps_id, h->frame_num,
3627  h->cur_pic_ptr->field_poc[0],
3628  h->cur_pic_ptr->field_poc[1],
3629  h->ref_count[0], h->ref_count[1],
3630  h->qscale,
3631  h->deblocking_filter,
3633  h->use_weight,
3634  h->use_weight == 1 && h->use_weight_chroma ? "c" : "",
3635  h->slice_type == AV_PICTURE_TYPE_B ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
3636  }
3637 
3638  return 0;
3639 }
3640 
3642 {
3643  switch (h->slice_type) {
3644  case AV_PICTURE_TYPE_P:
3645  return 0;
3646  case AV_PICTURE_TYPE_B:
3647  return 1;
3648  case AV_PICTURE_TYPE_I:
3649  return 2;
3650  case AV_PICTURE_TYPE_SP:
3651  return 3;
3652  case AV_PICTURE_TYPE_SI:
3653  return 4;
3654  default:
3655  return -1;
3656  }
3657 }
3658 
3660  int mb_type, int top_xy,
3661  int left_xy[LEFT_MBS],
3662  int top_type,
3663  int left_type[LEFT_MBS],
3664  int mb_xy, int list)
3665 {
3666  int b_stride = h->b_stride;
3667  int16_t(*mv_dst)[2] = &h->mv_cache[list][scan8[0]];
3668  int8_t *ref_cache = &h->ref_cache[list][scan8[0]];
3669  if (IS_INTER(mb_type) || IS_DIRECT(mb_type)) {
3670  if (USES_LIST(top_type, list)) {
3671  const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
3672  const int b8_xy = 4 * top_xy + 2;
3673  int (*ref2frm)[64] = h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
3674  AV_COPY128(mv_dst - 1 * 8, h->cur_pic.f.motion_val[list][b_xy + 0]);
3675  ref_cache[0 - 1 * 8] =
3676  ref_cache[1 - 1 * 8] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 0]];
3677  ref_cache[2 - 1 * 8] =
3678  ref_cache[3 - 1 * 8] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 1]];
3679  } else {
3680  AV_ZERO128(mv_dst - 1 * 8);
3681  AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3682  }
3683 
3684  if (!IS_INTERLACED(mb_type ^ left_type[LTOP])) {
3685  if (USES_LIST(left_type[LTOP], list)) {
3686  const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
3687  const int b8_xy = 4 * left_xy[LTOP] + 1;
3688  int (*ref2frm)[64] = h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
3689  AV_COPY32(mv_dst - 1 + 0, h->cur_pic.f.motion_val[list][b_xy + b_stride * 0]);
3690  AV_COPY32(mv_dst - 1 + 8, h->cur_pic.f.motion_val[list][b_xy + b_stride * 1]);
3691  AV_COPY32(mv_dst - 1 + 16, h->cur_pic.f.motion_val[list][b_xy + b_stride * 2]);
3692  AV_COPY32(mv_dst - 1 + 24, h->cur_pic.f.motion_val[list][b_xy + b_stride * 3]);
3693  ref_cache[-1 + 0] =
3694  ref_cache[-1 + 8] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 2 * 0]];
3695  ref_cache[-1 + 16] =
3696  ref_cache[-1 + 24] = ref2frm[list][h->cur_pic.f.ref_index[list][b8_xy + 2 * 1]];
3697  } else {
3698  AV_ZERO32(mv_dst - 1 + 0);
3699  AV_ZERO32(mv_dst - 1 + 8);
3700  AV_ZERO32(mv_dst - 1 + 16);
3701  AV_ZERO32(mv_dst - 1 + 24);
3702  ref_cache[-1 + 0] =
3703  ref_cache[-1 + 8] =
3704  ref_cache[-1 + 16] =
3705  ref_cache[-1 + 24] = LIST_NOT_USED;
3706  }
3707  }
3708  }
3709 
3710  if (!USES_LIST(mb_type, list)) {
3711  fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0, 0), 4);
3712  AV_WN32A(&ref_cache[0 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3713  AV_WN32A(&ref_cache[1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3714  AV_WN32A(&ref_cache[2 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3715  AV_WN32A(&ref_cache[3 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3716  return;
3717  }
3718 
3719  {
3720  int8_t *ref = &h->cur_pic.f.ref_index[list][4 * mb_xy];
3721  int (*ref2frm)[64] = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][0] + (MB_MBAFF ? 20 : 2);
3722  uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]], ref2frm[list][ref[1]]) & 0x00FF00FF) * 0x0101;
3723  uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]], ref2frm[list][ref[3]]) & 0x00FF00FF) * 0x0101;
3724  AV_WN32A(&ref_cache[0 * 8], ref01);
3725  AV_WN32A(&ref_cache[1 * 8], ref01);
3726  AV_WN32A(&ref_cache[2 * 8], ref23);
3727  AV_WN32A(&ref_cache[3 * 8], ref23);
3728  }
3729 
3730  {
3731  int16_t(*mv_src)[2] = &h->cur_pic.f.motion_val[list][4 * h->mb_x + 4 * h->mb_y * b_stride];
3732  AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
3733  AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
3734  AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
3735  AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
3736  }
3737 }
3738 
3743 static int fill_filter_caches(H264Context *h, int mb_type)
3744 {
3745  const int mb_xy = h->mb_xy;
3746  int top_xy, left_xy[LEFT_MBS];
3747  int top_type, left_type[LEFT_MBS];
3748  uint8_t *nnz;
3749  uint8_t *nnz_cache;
3750 
3751  top_xy = mb_xy - (h->mb_stride << MB_FIELD);
3752 
3753  /* Wow, what a mess, why didn't they simplify the interlacing & intra
3754  * stuff, I can't imagine that these complex rules are worth it. */
3755 
3756  left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
3757  if (FRAME_MBAFF) {
3758  const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.f.mb_type[mb_xy - 1]);
3759  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
3760  if (h->mb_y & 1) {
3761  if (left_mb_field_flag != curr_mb_field_flag)
3762  left_xy[LTOP] -= h->mb_stride;
3763  } else {
3764  if (curr_mb_field_flag)
3765  top_xy += h->mb_stride &
3766  (((h->cur_pic.f.mb_type[top_xy] >> 7) & 1) - 1);
3767  if (left_mb_field_flag != curr_mb_field_flag)
3768  left_xy[LBOT] += h->mb_stride;
3769  }
3770  }
3771 
3772  h->top_mb_xy = top_xy;
3773  h->left_mb_xy[LTOP] = left_xy[LTOP];
3774  h->left_mb_xy[LBOT] = left_xy[LBOT];
3775  {
3776  /* For sufficiently low qp, filtering wouldn't do anything.
3777  * This is a conservative estimate: could also check beta_offset
3778  * and more accurate chroma_qp. */
3779  int qp_thresh = h->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
3780  int qp = h->cur_pic.f.qscale_table[mb_xy];
3781  if (qp <= qp_thresh &&
3782  (left_xy[LTOP] < 0 ||
3783  ((qp + h->cur_pic.f.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
3784  (top_xy < 0 ||
3785  ((qp + h->cur_pic.f.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
3786  if (!FRAME_MBAFF)
3787  return 1;
3788  if ((left_xy[LTOP] < 0 ||
3789  ((qp + h->cur_pic.f.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
3790  (top_xy < h->mb_stride ||
3791  ((qp + h->cur_pic.f.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
3792  return 1;
3793  }
3794  }
3795 
3796  top_type = h->cur_pic.f.mb_type[top_xy];
3797  left_type[LTOP] = h->cur_pic.f.mb_type[left_xy[LTOP]];
3798  left_type[LBOT] = h->cur_pic.f.mb_type[left_xy[LBOT]];
3799  if (h->deblocking_filter == 2) {
3800  if (h->slice_table[top_xy] != h->slice_num)
3801  top_type = 0;
3802  if (h->slice_table[left_xy[LBOT]] != h->slice_num)
3803  left_type[LTOP] = left_type[LBOT] = 0;
3804  } else {
3805  if (h->slice_table[top_xy] == 0xFFFF)
3806  top_type = 0;
3807  if (h->slice_table[left_xy[LBOT]] == 0xFFFF)
3808  left_type[LTOP] = left_type[LBOT] = 0;
3809  }
3810  h->top_type = top_type;
3811  h->left_type[LTOP] = left_type[LTOP];
3812  h->left_type[LBOT] = left_type[LBOT];
3813 
3814  if (IS_INTRA(mb_type))
3815  return 0;
3816 
3817  fill_filter_caches_inter(h, mb_type, top_xy, left_xy,
3818  top_type, left_type, mb_xy, 0);
3819  if (h->list_count == 2)
3820  fill_filter_caches_inter(h, mb_type, top_xy, left_xy,
3821  top_type, left_type, mb_xy, 1);
3822 
3823  nnz = h->non_zero_count[mb_xy];
3824  nnz_cache = h->non_zero_count_cache;
3825  AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
3826  AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
3827  AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
3828  AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
3829  h->cbp = h->cbp_table[mb_xy];
3830 
3831  if (top_type) {
3832  nnz = h->non_zero_count[top_xy];
3833  AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
3834  }
3835 
3836  if (left_type[LTOP]) {
3837  nnz = h->non_zero_count[left_xy[LTOP]];
3838  nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
3839  nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
3840  nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
3841  nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
3842  }
3843 
3844  /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
3845  * from what the loop filter needs */
3846  if (!CABAC && h->pps.transform_8x8_mode) {
3847  if (IS_8x8DCT(top_type)) {
3848  nnz_cache[4 + 8 * 0] =
3849  nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
3850  nnz_cache[6 + 8 * 0] =
3851  nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12;
3852  }
3853  if (IS_8x8DCT(left_type[LTOP])) {
3854  nnz_cache[3 + 8 * 1] =
3855  nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF
3856  }
3857  if (IS_8x8DCT(left_type[LBOT])) {
3858  nnz_cache[3 + 8 * 3] =
3859  nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF
3860  }
3861 
3862  if (IS_8x8DCT(mb_type)) {
3863  nnz_cache[scan8[0]] =
3864  nnz_cache[scan8[1]] =
3865  nnz_cache[scan8[2]] =
3866  nnz_cache[scan8[3]] = (h->cbp & 0x1000) >> 12;
3867 
3868  nnz_cache[scan8[0 + 4]] =
3869  nnz_cache[scan8[1 + 4]] =
3870  nnz_cache[scan8[2 + 4]] =
3871  nnz_cache[scan8[3 + 4]] = (h->cbp & 0x2000) >> 12;
3872 
3873  nnz_cache[scan8[0 + 8]] =
3874  nnz_cache[scan8[1 + 8]] =
3875  nnz_cache[scan8[2 + 8]] =
3876  nnz_cache[scan8[3 + 8]] = (h->cbp & 0x4000) >> 12;
3877 
3878  nnz_cache[scan8[0 + 12]] =
3879  nnz_cache[scan8[1 + 12]] =
3880  nnz_cache[scan8[2 + 12]] =
3881  nnz_cache[scan8[3 + 12]] = (h->cbp & 0x8000) >> 12;
3882  }
3883  }
3884 
3885  return 0;
3886 }
3887 
3888 static void loop_filter(H264Context *h, int start_x, int end_x)
3889 {
3890  uint8_t *dest_y, *dest_cb, *dest_cr;
3891  int linesize, uvlinesize, mb_x, mb_y;
3892  const int end_mb_y = h->mb_y + FRAME_MBAFF;
3893  const int old_slice_type = h->slice_type;
3894  const int pixel_shift = h->pixel_shift;
3895  const int block_h = 16 >> h->chroma_y_shift;
3896 
3897  if (h->deblocking_filter) {
3898  for (mb_x = start_x; mb_x < end_x; mb_x++)
3899  for (mb_y = end_mb_y - FRAME_MBAFF; mb_y <= end_mb_y; mb_y++) {
3900  int mb_xy, mb_type;
3901  mb_xy = h->mb_xy = mb_x + mb_y * h->mb_stride;
3902  h->slice_num = h->slice_table[mb_xy];
3903  mb_type = h->cur_pic.f.mb_type[mb_xy];
3904  h->list_count = h->list_counts[mb_xy];
3905 
3906  if (FRAME_MBAFF)
3907  h->mb_mbaff =
3908  h->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
3909 
3910  h->mb_x = mb_x;
3911  h->mb_y = mb_y;
3912  dest_y = h->cur_pic.f.data[0] +
3913  ((mb_x << pixel_shift) + mb_y * h->linesize) * 16;
3914  dest_cb = h->cur_pic.f.data[1] +
3915  (mb_x << pixel_shift) * (8 << CHROMA444) +
3916  mb_y * h->uvlinesize * block_h;
3917  dest_cr = h->cur_pic.f.data[2] +
3918  (mb_x << pixel_shift) * (8 << CHROMA444) +
3919  mb_y * h->uvlinesize * block_h;
3920  // FIXME simplify above
3921 
3922  if (MB_FIELD) {
3923  linesize = h->mb_linesize = h->linesize * 2;
3924  uvlinesize = h->mb_uvlinesize = h->uvlinesize * 2;
3925  if (mb_y & 1) { // FIXME move out of this function?
3926  dest_y -= h->linesize * 15;
3927  dest_cb -= h->uvlinesize * (block_h - 1);
3928  dest_cr -= h->uvlinesize * (block_h - 1);
3929  }
3930  } else {
3931  linesize = h->mb_linesize = h->linesize;
3932  uvlinesize = h->mb_uvlinesize = h->uvlinesize;
3933  }
3934  backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize,
3935  uvlinesize, 0);
3936  if (fill_filter_caches(h, mb_type))
3937  continue;
3938  h->chroma_qp[0] = get_chroma_qp(h, 0, h->cur_pic.f.qscale_table[mb_xy]);
3939  h->chroma_qp[1] = get_chroma_qp(h, 1, h->cur_pic.f.qscale_table[mb_xy]);
3940 
3941  if (FRAME_MBAFF) {
3942  ff_h264_filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr,
3943  linesize, uvlinesize);
3944  } else {
3945  ff_h264_filter_mb_fast(h, mb_x, mb_y, dest_y, dest_cb,
3946  dest_cr, linesize, uvlinesize);
3947  }
3948  }
3949  }
3950  h->slice_type = old_slice_type;
3951  h->mb_x = end_x;
3952  h->mb_y = end_mb_y - FRAME_MBAFF;
3953  h->chroma_qp[0] = get_chroma_qp(h, 0, h->qscale);
3954  h->chroma_qp[1] = get_chroma_qp(h, 1, h->qscale);
3955 }
3956 
3958 {
3959  const int mb_xy = h->mb_x + h->mb_y * h->mb_stride;
3960  int mb_type = (h->slice_table[mb_xy - 1] == h->slice_num) ?
3961  h->cur_pic.f.mb_type[mb_xy - 1] :
3962  (h->slice_table[mb_xy - h->mb_stride] == h->slice_num) ?
3963  h->cur_pic.f.mb_type[mb_xy - h->mb_stride] : 0;
3964  h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
3965 }
3966 
3971 {
3972  int top = 16 * (h->mb_y >> FIELD_PICTURE);
3973  int pic_height = 16 * h->mb_height >> FIELD_PICTURE;
3974  int height = 16 << FRAME_MBAFF;
3975  int deblock_border = (16 + 4) << FRAME_MBAFF;
3976 
3977  if (h->deblocking_filter) {
3978  if ((top + height) >= pic_height)
3979  height += deblock_border;
3980  top -= deblock_border;
3981  }
3982 
3983  if (top >= pic_height || (top + height) < h->emu_edge_height)
3984  return;
3985 
3986  height = FFMIN(height, pic_height - top);
3987  if (top < h->emu_edge_height) {
3988  height = top + height;
3989  top = 0;
3990  }
3991 
3992  ff_h264_draw_horiz_band(h, top, height);
3993 
3994  if (h->droppable)
3995  return;
3996 
3997  ff_thread_report_progress(&h->cur_pic_ptr->f, top + height - 1,
3999 }
4000 
4001 static void er_add_slice(H264Context *h, int startx, int starty,
4002  int endx, int endy, int status)
4003 {
4004  ERContext *er = &h->er;
4005 
4006  er->ref_count = h->ref_count[0];
4007  ff_er_add_slice(er, startx, starty, endx, endy, status);
4008 }
4009 
4010 static int decode_slice(struct AVCodecContext *avctx, void *arg)
4011 {
4012  H264Context *h = *(void **)arg;
4013  int lf_x_start = h->mb_x;
4014 
4015  h->mb_skip_run = -1;
4016 
4018  avctx->codec_id != AV_CODEC_ID_H264 ||
4019  (CONFIG_GRAY && (h->flags & CODEC_FLAG_GRAY));
4020 
4021  if (h->pps.cabac) {
4022  /* realign */
4023  align_get_bits(&h->gb);
4024 
4025  /* init cabac */
4028  h->gb.buffer + get_bits_count(&h->gb) / 8,
4029  (get_bits_left(&h->gb) + 7) / 8);
4030 
4032 
4033  for (;;) {
4034  // START_TIMER
4035  int ret = ff_h264_decode_mb_cabac(h);
4036  int eos;
4037  // STOP_TIMER("decode_mb_cabac")
4038 
4039  if (ret >= 0)
4041 
4042  // FIXME optimal? or let mb_decode decode 16x32 ?
4043  if (ret >= 0 && FRAME_MBAFF) {
4044  h->mb_y++;
4045 
4046  ret = ff_h264_decode_mb_cabac(h);
4047 
4048  if (ret >= 0)
4050  h->mb_y--;
4051  }
4052  eos = get_cabac_terminate(&h->cabac);
4053 
4054  if ((h->workaround_bugs & FF_BUG_TRUNCATED) &&
4055  h->cabac.bytestream > h->cabac.bytestream_end + 2) {
4056  er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x - 1,
4057  h->mb_y, ER_MB_END);
4058  if (h->mb_x >= lf_x_start)
4059  loop_filter(h, lf_x_start, h->mb_x + 1);
4060  return 0;
4061  }
4062  if (ret < 0 || h->cabac.bytestream > h->cabac.bytestream_end + 2) {
4064  "error while decoding MB %d %d, bytestream (%td)\n",
4065  h->mb_x, h->mb_y,
4067  er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x,
4068  h->mb_y, ER_MB_ERROR);
4069  return -1;
4070  }
4071 
4072  if (++h->mb_x >= h->mb_width) {
4073  loop_filter(h, lf_x_start, h->mb_x);
4074  h->mb_x = lf_x_start = 0;
4075  decode_finish_row(h);
4076  ++h->mb_y;
4077  if (FIELD_OR_MBAFF_PICTURE) {
4078  ++h->mb_y;
4079  if (FRAME_MBAFF && h->mb_y < h->mb_height)
4081  }
4082  }
4083 
4084  if (eos || h->mb_y >= h->mb_height) {
4085  tprintf(h->avctx, "slice end %d %d\n",
4086  get_bits_count(&h->gb), h->gb.size_in_bits);
4087  er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x - 1,
4088  h->mb_y, ER_MB_END);
4089  if (h->mb_x > lf_x_start)
4090  loop_filter(h, lf_x_start, h->mb_x);
4091  return 0;
4092  }
4093  }
4094  } else {
4095  for (;;) {
4096  int ret = ff_h264_decode_mb_cavlc(h);
4097 
4098  if (ret >= 0)
4100 
4101  // FIXME optimal? or let mb_decode decode 16x32 ?
4102  if (ret >= 0 && FRAME_MBAFF) {
4103  h->mb_y++;
4104  ret = ff_h264_decode_mb_cavlc(h);
4105 
4106  if (ret >= 0)
4108  h->mb_y--;
4109  }
4110 
4111  if (ret < 0) {
4113  "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
4114  er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x,
4115  h->mb_y, ER_MB_ERROR);
4116  return -1;
4117  }
4118 
4119  if (++h->mb_x >= h->mb_width) {
4120  loop_filter(h, lf_x_start, h->mb_x);
4121  h->mb_x = lf_x_start = 0;
4122  decode_finish_row(h);
4123  ++h->mb_y;
4124  if (FIELD_OR_MBAFF_PICTURE) {
4125  ++h->mb_y;
4126  if (FRAME_MBAFF && h->mb_y < h->mb_height)
4128  }
4129  if (h->mb_y >= h->mb_height) {
4130  tprintf(h->avctx, "slice end %d %d\n",
4131  get_bits_count(&h->gb), h->gb.size_in_bits);
4132 
4133  if (get_bits_left(&h->gb) == 0) {
4135  h->mb_x - 1, h->mb_y,
4136  ER_MB_END);
4137 
4138  return 0;
4139  } else {
4141  h->mb_x - 1, h->mb_y,
4142  ER_MB_END);
4143 
4144  return -1;
4145  }
4146  }
4147  }
4148 
4149  if (get_bits_left(&h->gb) <= 0 && h->mb_skip_run <= 0) {
4150  tprintf(h->avctx, "slice end %d %d\n",
4151  get_bits_count(&h->gb), h->gb.size_in_bits);
4152  if (get_bits_left(&h->gb) == 0) {
4154  h->mb_x - 1, h->mb_y,
4155  ER_MB_END);
4156  if (h->mb_x > lf_x_start)
4157  loop_filter(h, lf_x_start, h->mb_x);
4158 
4159  return 0;
4160  } else {
4161  er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x,
4162  h->mb_y, ER_MB_ERROR);
4163 
4164  return -1;
4165  }
4166  }
4167  }
4168  }
4169 }
4170 
4177 static int execute_decode_slices(H264Context *h, int context_count)
4178 {
4179  AVCodecContext *const avctx = h->avctx;
4180  H264Context *hx;
4181  int i;
4182 
4183  if (h->mb_y >= h->mb_height) {
4185  "Input contains more MB rows than the frame height.\n");
4186  return AVERROR_INVALIDDATA;
4187  }
4188 
4189  if (h->avctx->hwaccel ||
4191  return 0;
4192  if (context_count == 1) {
4193  return decode_slice(avctx, &h);
4194  } else {
4195  for (i = 1; i < context_count; i++) {
4196  hx = h->thread_context[i];
4197  hx->er.error_count = 0;
4198  }
4199 
4200  avctx->execute(avctx, decode_slice, h->thread_context,
4201  NULL, context_count, sizeof(void *));
4202 
4203  /* pull back stuff from slices to master context */
4204  hx = h->thread_context[context_count - 1];
4205  h->mb_x = hx->mb_x;
4206  h->mb_y = hx->mb_y;
4207  h->droppable = hx->droppable;
4209  for (i = 1; i < context_count; i++)
4211  }
4212 
4213  return 0;
4214 }
4215 
4216 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
4217  int parse_extradata)
4218 {
4219  AVCodecContext *const avctx = h->avctx;
4220  H264Context *hx;
4221  int buf_index;
4222  int context_count;
4223  int next_avc;
4224  int pass = !(avctx->active_thread_type & FF_THREAD_FRAME);
4225  int nals_needed = 0;
4226  int nal_index;
4227 
4229  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS)) {
4230  h->current_slice = 0;
4231  if (!h->first_field)
4232  h->cur_pic_ptr = NULL;
4233  ff_h264_reset_sei(h);
4234  }
4235 
4236  for (; pass <= 1; pass++) {
4237  buf_index = 0;
4238  context_count = 0;
4239  next_avc = h->is_avc ? 0 : buf_size;
4240  nal_index = 0;
4241  for (;;) {
4242  int consumed;
4243  int dst_length;
4244  int bit_length;
4245  const uint8_t *ptr;
4246  int i, nalsize = 0;
4247  int err;
4248 
4249  if (buf_index >= next_avc) {
4250  if (buf_index >= buf_size - h->nal_length_size)
4251  break;
4252  nalsize = 0;
4253  for (i = 0; i < h->nal_length_size; i++)
4254  nalsize = (nalsize << 8) | buf[buf_index++];
4255  if (nalsize <= 0 || nalsize > buf_size - buf_index) {
4257  "AVC: nal size %d\n", nalsize);
4258  break;
4259  }
4260  next_avc = buf_index + nalsize;
4261  } else {
4262  // start code prefix search
4263  for (; buf_index + 3 < next_avc; buf_index++)
4264  // This should always succeed in the first iteration.
4265  if (buf[buf_index] == 0 &&
4266  buf[buf_index + 1] == 0 &&
4267  buf[buf_index + 2] == 1)
4268  break;
4269 
4270  if (buf_index + 3 >= buf_size) {
4271  buf_index = buf_size;
4272  break;
4273  }
4274 
4275  buf_index += 3;
4276  if (buf_index >= next_avc)
4277  continue;
4278  }
4279 
4280  hx = h->thread_context[context_count];
4281 
4282  ptr = ff_h264_decode_nal(hx, buf + buf_index, &dst_length,
4283  &consumed, next_avc - buf_index);
4284  if (ptr == NULL || dst_length < 0) {
4285  buf_index = -1;
4286  goto end;
4287  }
4288  i = buf_index + consumed;
4289  if ((h->workaround_bugs & FF_BUG_AUTODETECT) && i + 3 < next_avc &&
4290  buf[i] == 0x00 && buf[i + 1] == 0x00 &&
4291  buf[i + 2] == 0x01 && buf[i + 3] == 0xE0)
4293 
4294  if (!(h->workaround_bugs & FF_BUG_TRUNCATED))
4295  while (dst_length > 0 && ptr[dst_length - 1] == 0)
4296  dst_length--;
4297  bit_length = !dst_length ? 0
4298  : (8 * dst_length -
4299  decode_rbsp_trailing(h, ptr + dst_length - 1));
4300 
4301  if (h->avctx->debug & FF_DEBUG_STARTCODE)
4303  "NAL %d at %d/%d length %d\n",
4304  hx->nal_unit_type, buf_index, buf_size, dst_length);
4305 
4306  if (h->is_avc && (nalsize != consumed) && nalsize)
4308  "AVC: Consumed only %d bytes instead of %d\n",
4309  consumed, nalsize);
4310 
4311  buf_index += consumed;
4312  nal_index++;
4313 
4314  if (pass == 0) {
4315  /* packets can sometimes contain multiple PPS/SPS,
4316  * e.g. two PAFF field pictures in one packet, or a demuxer
4317  * which splits NALs strangely if so, when frame threading we
4318  * can't start the next thread until we've read all of them */
4319  switch (hx->nal_unit_type) {
4320  case NAL_SPS:
4321  case NAL_PPS:
4322  nals_needed = nal_index;
4323  break;
4324  case NAL_DPA:
4325  case NAL_IDR_SLICE:
4326  case NAL_SLICE:
4327  init_get_bits(&hx->gb, ptr, bit_length);
4328  if (!get_ue_golomb(&hx->gb))
4329  nals_needed = nal_index;
4330  }
4331  continue;
4332  }
4333 
4334  // FIXME do not discard SEI id
4335  if (avctx->skip_frame >= AVDISCARD_NONREF && h->nal_ref_idc == 0)
4336  continue;
4337 
4338 again:
4339  /* Ignore every NAL unit type except PPS and SPS during extradata
4340  * parsing. Decoding slices is not possible in codec init
4341  * with frame-mt */
4342  if (parse_extradata && HAVE_THREADS &&
4344  (hx->nal_unit_type != NAL_PPS &&
4345  hx->nal_unit_type != NAL_SPS)) {
4346  av_log(avctx, AV_LOG_INFO, "Ignoring NAL unit %d during "
4347  "extradata parsing\n", hx->nal_unit_type);
4349  }
4350  err = 0;
4351  switch (hx->nal_unit_type) {
4352  case NAL_IDR_SLICE:
4353  if (h->nal_unit_type != NAL_IDR_SLICE) {
4355  "Invalid mix of idr and non-idr slices\n");
4356  buf_index = -1;
4357  goto end;
4358  }
4359  idr(h); // FIXME ensure we don't lose some frames if there is reordering
4360  case NAL_SLICE:
4361  init_get_bits(&hx->gb, ptr, bit_length);
4362  hx->intra_gb_ptr =
4363  hx->inter_gb_ptr = &hx->gb;
4364  hx->data_partitioning = 0;
4365 
4366  if ((err = decode_slice_header(hx, h)))
4367  break;
4368 
4369  h->cur_pic_ptr->f.key_frame |=
4370  (hx->nal_unit_type == NAL_IDR_SLICE) ||
4371  (h->sei_recovery_frame_cnt >= 0);
4372 
4373  if (h->current_slice == 1) {
4374  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS))
4375  decode_postinit(h, nal_index >= nals_needed);
4376 
4377  if (h->avctx->hwaccel &&
4378  h->avctx->hwaccel->start_frame(h->avctx, NULL, 0) < 0)
4379  return -1;
4383  }
4384 
4385  if (hx->redundant_pic_count == 0 &&
4386  (avctx->skip_frame < AVDISCARD_NONREF ||
4387  hx->nal_ref_idc) &&
4388  (avctx->skip_frame < AVDISCARD_BIDIR ||
4390  (avctx->skip_frame < AVDISCARD_NONKEY ||
4392  avctx->skip_frame < AVDISCARD_ALL) {
4393  if (avctx->hwaccel) {
4394  if (avctx->hwaccel->decode_slice(avctx,
4395  &buf[buf_index - consumed],
4396  consumed) < 0)
4397  return -1;
4398  } else if (CONFIG_H264_VDPAU_DECODER &&
4400  static const uint8_t start_code[] = {
4401  0x00, 0x00, 0x01 };
4402  ff_vdpau_add_data_chunk(h->cur_pic_ptr->f.data[0], start_code,
4403  sizeof(start_code));
4404  ff_vdpau_add_data_chunk(h->cur_pic_ptr->f.data[0], &buf[buf_index - consumed],
4405  consumed);
4406  } else
4407  context_count++;
4408  }
4409  break;
4410  case NAL_DPA:
4411  if (h->avctx->flags2 & CODEC_FLAG2_CHUNKS) {
4413  "Decoding in chunks is not supported for "
4414  "partitioned slices.\n");
4415  return AVERROR(ENOSYS);
4416  }
4417 
4418  init_get_bits(&hx->gb, ptr, bit_length);
4419  hx->intra_gb_ptr =
4420  hx->inter_gb_ptr = NULL;
4421 
4422  if ((err = decode_slice_header(hx, h)) < 0) {
4423  /* make sure data_partitioning is cleared if it was set
4424  * before, so we don't try decoding a slice without a valid
4425  * slice header later */
4426  h->data_partitioning = 0;
4427  break;
4428  }
4429 
4430  hx->data_partitioning = 1;
4431  break;
4432  case NAL_DPB:
4433  init_get_bits(&hx->intra_gb, ptr, bit_length);
4434  hx->intra_gb_ptr = &hx->intra_gb;
4435  break;
4436  case NAL_DPC:
4437  init_get_bits(&hx->inter_gb, ptr, bit_length);
4438  hx->inter_gb_ptr = &hx->inter_gb;
4439 
4440  if (hx->redundant_pic_count == 0 &&
4441  hx->intra_gb_ptr &&
4442  hx->data_partitioning &&
4443  h->cur_pic_ptr && h->context_initialized &&
4444  (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc) &&
4445  (avctx->skip_frame < AVDISCARD_BIDIR ||
4447  (avctx->skip_frame < AVDISCARD_NONKEY ||
4449  avctx->skip_frame < AVDISCARD_ALL)
4450  context_count++;
4451  break;
4452  case NAL_SEI:
4453  init_get_bits(&h->gb, ptr, bit_length);
4454  ff_h264_decode_sei(h);
4455  break;
4456  case NAL_SPS:
4457  init_get_bits(&h->gb, ptr, bit_length);
4458  if (ff_h264_decode_seq_parameter_set(h) < 0 &&
4459  h->is_avc && (nalsize != consumed) && nalsize) {
4461  "SPS decoding failure, trying again with the complete NAL\n");
4462  init_get_bits(&h->gb, buf + buf_index + 1 - consumed,
4463  8 * (nalsize - 1));
4465  }
4466 
4467  if (h264_set_parameter_from_sps(h) < 0) {
4468  buf_index = -1;
4469  goto end;
4470  }
4471  break;
4472  case NAL_PPS:
4473  init_get_bits(&h->gb, ptr, bit_length);
4474  ff_h264_decode_picture_parameter_set(h, bit_length);
4475  break;
4476  case NAL_AUD:
4477  case NAL_END_SEQUENCE:
4478  case NAL_END_STREAM:
4479  case NAL_FILLER_DATA:
4480  case NAL_SPS_EXT:
4481  case NAL_AUXILIARY_SLICE:
4482  break;
4483  case NAL_FF_IGNORE:
4484  break;
4485  default:
4486  av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
4487  hx->nal_unit_type, bit_length);
4488  }
4489 
4490  if (context_count == h->max_contexts) {
4491  execute_decode_slices(h, context_count);
4492  context_count = 0;
4493  }
4494 
4495  if (err < 0) {
4496  av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
4497  h->ref_count[0] = h->ref_count[1] = h->list_count = 0;
4498  } else if (err == 1) {
4499  /* Slice could not be decoded in parallel mode, copy down
4500  * NAL unit stuff to context 0 and restart. Note that
4501  * rbsp_buffer is not transferred, but since we no longer
4502  * run in parallel mode this should not be an issue. */
4503  h->nal_unit_type = hx->nal_unit_type;
4504  h->nal_ref_idc = hx->nal_ref_idc;
4505  hx = h;
4506  goto again;
4507  }
4508  }
4509  }
4510  if (context_count)
4511  execute_decode_slices(h, context_count);
4512 
4513 end:
4514  /* clean up */
4515  if (h->cur_pic_ptr && h->cur_pic_ptr->owner2 == h &&
4516  !h->droppable) {
4517  ff_thread_report_progress(&h->cur_pic_ptr->f, INT_MAX,
4519  }
4520 
4521  return buf_index;
4522 }
4523 
4527 static int get_consumed_bytes(int pos, int buf_size)
4528 {
4529  if (pos == 0)
4530  pos = 1; // avoid infinite loops (i doubt that is needed but ...)
4531  if (pos + 10 > buf_size)
4532  pos = buf_size; // oops ;)
4533 
4534  return pos;
4535 }
4536 
4537 static int decode_frame(AVCodecContext *avctx, void *data,
4538  int *got_frame, AVPacket *avpkt)
4539 {
4540  const uint8_t *buf = avpkt->data;
4541  int buf_size = avpkt->size;
4542  H264Context *h = avctx->priv_data;
4543  AVFrame *pict = data;
4544  int buf_index = 0;
4545 
4546  h->flags = avctx->flags;
4547  /* reset data partitioning here, to ensure GetBitContexts from previous
4548  * packets do not get used. */
4549  h->data_partitioning = 0;
4550 
4551  /* end of stream, output what is still in the buffers */
4552 out:
4553  if (buf_size == 0) {
4554  Picture *out;
4555  int i, out_idx;
4556 
4557  h->cur_pic_ptr = NULL;
4558 
4559  // FIXME factorize this with the output code below
4560  out = h->delayed_pic[0];
4561  out_idx = 0;
4562  for (i = 1;
4563  h->delayed_pic[i] &&
4564  !h->delayed_pic[i]->f.key_frame &&
4565  !h->delayed_pic[i]->mmco_reset;
4566  i++)
4567  if (h->delayed_pic[i]->poc < out->poc) {
4568  out = h->delayed_pic[i];
4569  out_idx = i;
4570  }
4571 
4572  for (i = out_idx; h->delayed_pic[i]; i++)
4573  h->delayed_pic[i] = h->delayed_pic[i + 1];
4574 
4575  if (out) {
4576  *got_frame = 1;
4577  *pict = out->f;
4578  }
4579 
4580  return buf_index;
4581  }
4582 
4583  buf_index = decode_nal_units(h, buf, buf_size, 0);
4584  if (buf_index < 0)
4585  return -1;
4586 
4587  if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) {
4588  buf_size = 0;
4589  goto out;
4590  }
4591 
4592  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
4593  if (avctx->skip_frame >= AVDISCARD_NONREF)
4594  return 0;
4595  av_log(avctx, AV_LOG_ERROR, "no frame!\n");
4596  return -1;
4597  }
4598 
4599  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) ||
4600  (h->mb_y >= h->mb_height && h->mb_height)) {
4601  if (avctx->flags2 & CODEC_FLAG2_CHUNKS)
4602  decode_postinit(h, 1);
4603 
4604  field_end(h, 0);
4605 
4606  if (!h->next_output_pic) {
4607  /* Wait for second field. */
4608  *got_frame = 0;
4609  } else {
4610  *got_frame = 1;
4611  *pict = h->next_output_pic->f;
4612  }
4613  }
4614 
4615  assert(pict->data[0] || !*got_frame);
4616 
4617  return get_consumed_bytes(buf_index, buf_size);
4618 }
4619 
4621 {
4622  int i;
4623 
4624  free_tables(h, 1); // FIXME cleanup init stuff perhaps
4625 
4626  for (i = 0; i < MAX_SPS_COUNT; i++)
4627  av_freep(h->sps_buffers + i);
4628 
4629  for (i = 0; i < MAX_PPS_COUNT; i++)
4630  av_freep(h->pps_buffers + i);
4631 }
4632 
4634 {
4635  H264Context *h = avctx->priv_data;
4636  int i;
4637 
4639 
4640  if (h->DPB && !h->avctx->internal->is_copy) {
4641  for (i = 0; i < h->picture_count; i++) {
4642  free_picture(h, &h->DPB[i]);
4643  }
4644  }
4645  av_freep(&h->DPB);
4646 
4647  return 0;
4648 }
4649 
4650 static const AVProfile profiles[] = {
4651  { FF_PROFILE_H264_BASELINE, "Baseline" },
4652  { FF_PROFILE_H264_CONSTRAINED_BASELINE, "Constrained Baseline" },
4653  { FF_PROFILE_H264_MAIN, "Main" },
4654  { FF_PROFILE_H264_EXTENDED, "Extended" },
4655  { FF_PROFILE_H264_HIGH, "High" },
4656  { FF_PROFILE_H264_HIGH_10, "High 10" },
4657  { FF_PROFILE_H264_HIGH_10_INTRA, "High 10 Intra" },
4658  { FF_PROFILE_H264_HIGH_422, "High 4:2:2" },
4659  { FF_PROFILE_H264_HIGH_422_INTRA, "High 4:2:2 Intra" },
4660  { FF_PROFILE_H264_HIGH_444, "High 4:4:4" },
4661  { FF_PROFILE_H264_HIGH_444_PREDICTIVE, "High 4:4:4 Predictive" },
4662  { FF_PROFILE_H264_HIGH_444_INTRA, "High 4:4:4 Intra" },
4663  { FF_PROFILE_H264_CAVLC_444, "CAVLC 4:4:4" },
4664  { FF_PROFILE_UNKNOWN },
4665 };
4666 
4668  .name = "h264",
4669  .type = AVMEDIA_TYPE_VIDEO,
4670  .id = AV_CODEC_ID_H264,
4671  .priv_data_size = sizeof(H264Context),
4674  .decode = decode_frame,
4675  .capabilities = /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 |
4678  .flush = flush_dpb,
4679  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
4680  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
4681  .update_thread_context = ONLY_IF_THREADS_ENABLED(decode_update_thread_context),
4682  .profiles = NULL_IF_CONFIG_SMALL(profiles),
4683 };
4684 
4685 #if CONFIG_H264_VDPAU_DECODER
4686 AVCodec ff_h264_vdpau_decoder = {
4687  .name = "h264_vdpau",
4688  .type = AVMEDIA_TYPE_VIDEO,
4689  .id = AV_CODEC_ID_H264,
4690  .priv_data_size = sizeof(H264Context),
4693  .decode = decode_frame,
4695  .flush = flush_dpb,
4696  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"),
4697  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_H264,
4698  AV_PIX_FMT_NONE},
4699  .profiles = NULL_IF_CONFIG_SMALL(profiles),
4700 };
4701 #endif
int chroma_format_idc
Definition: h264.h:148
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free
Definition: mpegvideo.h:161
Picture default_ref_list[2][32]
base reference list for all slices of a coded picture
Definition: h264.h:521
#define PICT_BOTTOM_FIELD
Definition: mpegvideo.h:628
#define CONFIG_H264_VDPAU_DECODER
Definition: config.h:420
void ff_h264_direct_dist_scale_factor(H264Context *const h)
Definition: h264_direct.c:52
enum AVPixelFormat ff_hwaccel_pixfmt_list_420[]
Definition: mpegvideo.c:133
GetBitContext inter_gb
Definition: h264.h:412
#define XCHG(a, b, xchg)
int video_signal_type_present_flag
Definition: h264.h:171
#define VERT_PRED8x8
Definition: h264pred.h:70
int last_slice_type
Definition: h264.h:569
int ff_h264_decode_mb_cabac(H264Context *h)
Decode a CABAC coded macroblock.
Definition: h264_cabac.c:1857
static void clone_tables(H264Context *dst, H264Context *src, int i)
Mimic alloc_tables(), but for every context thread.
Definition: h264.c:1127
const struct AVCodec * codec
Definition: avcodec.h:1348
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:61
int ff_h264_decode_seq_parameter_set(H264Context *h)
Decode SPS.
Definition: h264_ps.c:301
#define PICT_TOP_FIELD
Definition: mpegvideo.h:627
discard all frames except keyframes
Definition: avcodec.h:535
uint8_t * edge_emu_buffer
Definition: h264.h:620
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:3106
int workaround_bugs
Definition: h264.h:285
av_cold void ff_dsputil_init(DSPContext *c, AVCodecContext *avctx)
Definition: dsputil.c:2656
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:61
#define MAX_PICTURE_COUNT
Definition: mpegvideo.h:64
unsigned int top_samples_available
Definition: h264.h:311
static enum PixelFormat get_pixel_format(H264Context *h)
Definition: h264.c:2798
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, int parse_extradata)
Definition: h264.c:4216
unsigned int topleft_samples_available
Definition: h264.h:310
#define DC_128_PRED8x8
Definition: h264pred.h:76
int single_decode_warning
1 if the single thread fallback warning has already been displayed, 0 otherwise.
Definition: h264.h:565
GetBitContext gb
Definition: h264.h:259
5: top field, bottom field, top field repeated, in that order
Definition: h264.h:136
static av_always_inline void hl_decode_mb_predict_luma(H264Context *h, int mb_type, int is_h264, int simple, int transform_bypass, int pixel_shift, int *block_offset, int linesize, uint8_t *dest_y, int p)
Definition: h264.c:2095
#define VERT_LEFT_PRED
Definition: h264pred.h:45
#define HAVE_THREADS
Definition: config.h:236
int low_delay
Definition: h264.h:281
const uint8_t ff_zigzag_direct[64]
Definition: dsputil.c:59
int mb_num
Definition: h264.h:460
int size
GetBitContext * intra_gb_ptr
Definition: h264.h:413
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1435
This structure describes decoded (raw) audio or video data.
Definition: avcodec.h:989
int qstride
QP store stride.
Definition: avcodec.h:1145
int mb_aff_frame
Definition: h264.h:373
static void copy_parameter_set(void **to, void **from, int count, int size)
Definition: h264.c:1375
int delta_poc[2]
Definition: h264.h:499
#define IS_SUB_4X4(a)
Definition: mpegvideo.h:123
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:171
int last_qscale_diff
Definition: h264.h:433
#define LEFT_MBS
Definition: h264.h:64
mpeg2/4, h264 default
Definition: avcodec.h:585
int coded_width
Bitstream width / height, may be different from width/height.
Definition: avcodec.h:1515
int cbp
Definition: h264.h:428
3: top field, bottom field, in that order
Definition: h264.h:134
const uint8_t * ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length)
Decode a network abstraction layer unit.
Definition: h264.c:352
int first_field
Definition: h264.h:377
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:70
misc image utilities
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:237
#define ER_MB_END
int weighted_bipred_idc
Definition: h264.h:213
int chroma_qp_index_offset[2]
Definition: h264.h:216
const uint8_t * bytestream_end
Definition: cabac.h:48
int left_type[LEFT_MBS]
Definition: h264.h:302
#define EDGE_TOP
Definition: dsputil.h:441
#define CHROMA422
Definition: h264.h:86
uint16_t * cbp_table
Definition: h264.h:427
int qscale_type
Definition: avcodec.h:1150
av_cold int ff_h264_decode_init(AVCodecContext *avctx)
Definition: h264.c:1300
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264.h:611
MMCO mmco[MAX_MMCO_COUNT]
memory management control operations buffer.
Definition: h264.h:531
static void align_get_bits(GetBitContext *s)
Definition: get_bits.h:412
void * hwaccel_picture_private
hardware accelerator private data (Libav-allocated)
Definition: avcodec.h:1280
7: frame doubling
Definition: h264.h:138
void ff_er_frame_end(ERContext *s)
static av_always_inline void mc_part_weighted(H264Context *h, int n, int square, int height, int delta, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int x_offset, int y_offset, qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put, h264_weight_func luma_weight_op, h264_weight_func chroma_weight_op, h264_biweight_func luma_weight_avg, h264_biweight_func chroma_weight_avg, int list0, int list1, int pixel_shift, int chroma_idc)
Definition: h264.c:801
#define MAX_PPS_COUNT
Definition: h264.h:41
Sequence parameter set.
Definition: h264.h:145
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2079
static const uint8_t field_scan8x8[64]
Definition: h264data.h:115
static void init_dequant_tables(H264Context *h)
Definition: h264.c:1045
int coded_picture_number
Definition: h264.h:280
int mb_y
Definition: h264.h:454
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
Definition: dsputil.h:439
int bitstream_restriction_flag
Definition: h264.h:182
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:154
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:211
#define FMO
Definition: h264.h:51
int num
numerator
Definition: rational.h:44
void(* h264_idct_add16intra)(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[15 *8])
Definition: h264dsp.h:100
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: avcodec.h:1225
int needs_realloc
Picture needs to be reallocated (eg due to a frame size change)
Definition: mpegvideo.h:150
void avcodec_set_dimensions(AVCodecContext *s, int width, int height)
Definition: utils.c:149
int size
Definition: avcodec.h:916
int16_t(*[2] motion_val_base)[2]
Definition: mpegvideo.h:103
int outputed_poc
Definition: h264.h:525
int chroma_x_shift
Definition: h264.h:275
HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the b...
Definition: pixfmt.h:123
#define CONFIG_SVQ3_DECODER
Definition: config.h:492
const uint8_t * buffer
Definition: get_bits.h:53
Picture parameter set.
Definition: h264.h:205
void * thread_opaque
used by multithreading to store frame-specific info
Definition: avcodec.h:1294
static av_always_inline int dctcoef_get(DCTELEM *mb, int high_bit_depth, int index)
Definition: h264.c:2077
int field_picture
whether or not the picture was encoded in separate fields
Definition: mpegvideo.h:140
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1724
#define pass
Definition: fft.c:334
static void release_unused_pictures(H264Context *h, int remove_current)
Definition: h264.c:134
int flags
Definition: h264.h:284
const uint8_t * field_scan8x8_q0
Definition: h264.h:449
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1533
int frame_mbs_only_flag
Definition: h264.h:161
int mb_height
Definition: h264.h:458
int16_t * dc_val_base
Definition: h264.h:621
int is_avc
Used to parse AVC variant of h264.
Definition: h264.h:481
static av_always_inline void fill_filter_caches_inter(H264Context *h, int mb_type, int top_xy, int left_xy[LEFT_MBS], int top_type, int left_type[LEFT_MBS], int mb_xy, int list)
Definition: h264.c:3659
int mmco_index
Definition: h264.h:532
static int decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264.c:1412
uint8_t zigzag_scan8x8_cavlc[64]
Definition: h264.h:441
DSPContext dsp
Definition: h264.h:254
mpegvideo header.
int ff_h264_get_profile(SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264.c:2733
uint32_t dequant8_buffer[6][QP_MAX_NUM+1][64]
Definition: h264.h:362
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264.h:327
H264Context.
Definition: h264.h:252
discard all
Definition: avcodec.h:536
static int context_init(H264Context *h)
Init context Allocate buffers which are not shared amongst multiple threads.
Definition: h264.c:1154
int mmco_reset
h264 MMCO_RESET set this 1. Reordering code must not mix pictures before and after MMCO_RESET...
Definition: mpegvideo.h:133
#define IS_INTRA4x4(a)
Definition: mpegvideo.h:106
int prev_poc_msb
poc_msb of the last reference pic for POC type 0
Definition: h264.h:501
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2711
uint32_t num_units_in_tick
Definition: h264.h:178
struct H264Context H264Context
H264Context.
4: bottom field, top field, in that order
Definition: h264.h:135
static int get_lowest_part_list_y(H264Context *h, Picture *pic, int n, int height, int y_offset, int list)
Definition: h264.c:477
int profile
profile
Definition: avcodec.h:2815
#define HOR_PRED8x8
Definition: h264pred.h:69
int stride
Definition: mace.c:144
AVCodec.
Definition: avcodec.h:2960
int frame_start_found
Definition: parser.h:34
int picture_structure
Definition: h264.h:376
void ff_svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp, int dc)
Definition: svq3.c:183
#define AV_WN32A(p, v)
Definition: intreadwrite.h:458
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264.h:369
#define AV_COPY32(d, s)
Definition: intreadwrite.h:506
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:66
static const uint8_t rem6[QP_MAX_NUM+1]
Definition: h264.c:50
#define IS_INTRA_PCM(a)
Definition: mpegvideo.h:112
int profile_idc
Definition: h264.h:146
unsigned current_sps_id
id of the current SPS
Definition: h264.h:353
static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, int n, int square, int height, int delta, int list, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int src_x_offset, int src_y_offset, qpel_mc_func *qpix_op, h264_chroma_mc_func chroma_op, int pixel_shift, int chroma_idc)
Definition: h264.c:641
static av_always_inline uint32_t pack16to32(int a, int b)
Definition: h264.h:794
static const uint8_t zigzag_scan[16]
Definition: h264data.h:55
int mb_skip_run
Definition: h264.h:457
void ff_h264_init_cabac_states(H264Context *h)
Definition: h264_cabac.c:1262
#define USES_LIST(a, list)
does this mb use listX, note does not work if subMBs
Definition: mpegvideo.h:127
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1465
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:151
#define CONFIG_GRAY
Definition: config.h:277
Switching Intra.
Definition: avutil.h:249
uint8_t * chroma_pred_mode_table
Definition: h264.h:432
#define IS_DIR(a, part, list)
Definition: mpegvideo.h:126
static const uint8_t div6[QP_MAX_NUM+1]
Definition: h264.c:56
enum AVDiscard skip_frame
Definition: avcodec.h:2907
int ff_h264_decode_ref_pic_list_reordering(H264Context *h)
Definition: h264_refs.c:192
#define MAX_THREADS
Definition: mpegvideo.h:62
static const uint8_t golomb_to_pict_type[5]
Definition: h264data.h:38
#define AV_RN32A(p)
Definition: intreadwrite.h:446
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2622
int long_ref
1->long term reference 0->short term reference
Definition: mpegvideo.h:136
static av_always_inline void hl_decode_mb_idct_luma(H264Context *h, int mb_type, int is_h264, int simple, int transform_bypass, int pixel_shift, int *block_offset, int linesize, uint8_t *dest_y, int p)
Definition: h264.c:2210
static int decode_init_thread_copy(AVCodecContext *avctx)
Definition: h264.c:1390
int resync_mb_y
Definition: h264.h:456
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: utils.c:72
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
#define IS_8x8DCT(a)
Definition: h264.h:94
uint8_t scaling_matrix4[6][16]
Definition: h264.h:221
const uint8_t * bytestream
Definition: cabac.h:47
int ref2frm[MAX_SLICES][2][64]
reference to frame number lists, used in the loop filter, the first 2 are for -2,-1 ...
Definition: h264.h:408
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:50
int deblocking_filter_parameters_present
deblocking_filter_parameters_present_flag
Definition: h264.h:217
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
Definition: mimic.c:228
#define IS_INTER(a)
Definition: mpegvideo.h:110
DCTELEM mb_luma_dc[3][16 *2]
Definition: h264.h:417
uint32_t(*[6] dequant4_coeff)[16]
Definition: h264.h:363
uint8_t
int prev_frame_num_offset
for POC type 2
Definition: h264.h:504
int use_weight
Definition: h264.h:382
int mb_uvlinesize
Definition: h264.h:348
int full_range
Definition: h264.h:172
void(* h264_luma_dc_dequant_idct)(DCTELEM *output, DCTELEM *input, int qmul)
Definition: h264dsp.h:103
#define IS_8X16(a)
Definition: mpegvideo.h:118
int offset_for_non_ref_pic
Definition: h264.h:154
float delta
#define PICT_FRAME
Definition: mpegvideo.h:629
void ff_h264_reset_sei(H264Context *h)
Reset SEI values at the beginning of the frame.
Definition: h264_sei.c:40
Definition: h264.h:108
int data_partitioning
Definition: h264.h:279
int luma_weight[48][2][2]
Definition: h264.h:387
void(* qpel_mc_func)(uint8_t *dst, uint8_t *src, int stride)
Definition: dsputil.h:144
enum AVColorPrimaries color_primaries
Definition: h264.h:174
av_cold void ff_h264_decode_init_vlc(void)
Definition: h264_cavlc.c:326
void(* h264_idct_add)(uint8_t *dst, DCTELEM *block, int stride)
Definition: h264dsp.h:82
DCTELEM mb[16 *48 *2]
as a dct coeffecient is int32_t in high depth, we need to reserve twice the space.
Definition: h264.h:416
static int find_unused_picture(H264Context *h)
Definition: h264.c:240
AVCodec ff_h264_decoder
Definition: h264.c:4667
Picture ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264.h:405
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
#define ER_MB_ERROR
int cabac
entropy_coding_mode_flag
Definition: h264.h:207
int mb_xy
Definition: h264.h:461
Definition: h264.h:106
qpel_mc_func(* qpel_put)[16]
Definition: mpegvideo.h:199
#define LUMA_DC_BLOCK_INDEX
Definition: h264.h:774
int picture_range_end
Definition: h264.h:266
#define DIAG_DOWN_LEFT_PRED
Definition: h264pred.h:41
#define emms_c()
Definition: internal.h:145
uint8_t motion_subsample_log2
log2 of the size of the block which a single vector in motion_val represents: (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)
Definition: avcodec.h:1302
static const uint8_t dequant8_coeff_init[6][6]
Definition: h264data.h:263
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:402
unsigned int crop_right
frame_cropping_rect_right_offset
Definition: h264.h:166
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1454
void ff_h264_fill_mbaff_ref_list(H264Context *h)
Definition: h264_refs.c:305
int uvlinesize
Definition: h264.h:274
#define TOP_DC_PRED
Definition: h264pred.h:50
const char data[16]
Definition: mxf.c:66
int height
Definition: h264.h:273
int mb_x
Definition: h264.h:454
int transform_bypass
qpprime_y_zero_transform_bypass_flag
Definition: h264.h:149
int picture_count
Definition: h264.h:265
uint8_t * data
Definition: avcodec.h:915
static int init_poc(H264Context *h)
Definition: h264.c:2500
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:192
static int decode_rbsp_trailing(H264Context *h, const uint8_t *src)
Identify the exact end of the bitstream.
Definition: h264.c:462
int left_mb_xy[LEFT_MBS]
Definition: h264.h:297
int top_mb_xy
Definition: h264.h:295
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:43
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_...
Definition: pixfmt.h:78
int ff_h264_get_slice_type(const H264Context *h)
Reconstruct bitstream slice_type.
Definition: h264.c:3641
int redundant_pic_cnt_present
redundant_pic_cnt_present_flag
Definition: h264.h:219
int chroma_y_shift
Definition: h264.h:275
static const uint8_t dequant8_coeff_init_scan[16]
Definition: h264data.h:259
int interlaced_frame
The content of the picture is interlaced.
Definition: avcodec.h:1232
#define MAX_DELAYED_PIC_COUNT
Definition: h264.h:45
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
#define MB_MBAFF
Definition: h264.h:60
Picture * next_output_pic
Definition: h264.h:524
static av_cold void common_init(H264Context *h)
Definition: h264.c:1224
#define AV_COPY64(d, s)
Definition: intreadwrite.h:510
void * owner2
pointer to the context that allocated this picture
Definition: mpegvideo.h:149
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2086
int luma_log2_weight_denom
Definition: h264.h:384
#define IS_INTERLACED(a)
Definition: mpegvideo.h:113
static int h264_set_parameter_from_sps(H264Context *h)
Definition: h264.c:2753
int chroma_weight[48][2][2][2]
Definition: h264.h:388
static int init(AVCodecParserContext *s)
Definition: h264_parser.c:335
int last_pocs[MAX_DELAYED_PIC_COUNT]
Definition: h264.h:523
static void init_dequant4_coeff_table(H264Context *h)
Definition: h264.c:1019
#define r
Definition: input.c:51
static int pic_is_unused(H264Context *h, Picture *pic)
Definition: h264.c:230
int width
Definition: h264.h:273
void(* pred8x8l_add[2])(uint8_t *pix, const DCTELEM *block, ptrdiff_t stride)
Definition: h264pred.h:102
const uint8_t * zigzag_scan8x8_cavlc_q0
Definition: h264.h:447
H.264 / AVC / MPEG4 part10 codec.
int frame_num
Definition: h264.h:500
static void free_picture(H264Context *h, Picture *pic)
Definition: h264.c:116
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:547
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
Definition: pthread.c:702
enum AVCodecID id
Definition: avcodec.h:2974
int mb_aff
mb_adaptive_frame_field_flag
Definition: h264.h:162
enum AVColorTransferCharacteristic color_trc
Definition: h264.h:175
H264PredContext hpc
Definition: h264.h:309
void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:399
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:311
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1634
static int get_ue_golomb(GetBitContext *gb)
read unsigned exp golomb code.
Definition: golomb.h:53
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:75
static int decode_slice_header(H264Context *h, H264Context *h0)
Decode a slice header.
Definition: h264.c:2956
static int get_consumed_bytes(int pos, int buf_size)
Return the number of bytes consumed for building the current frame.
Definition: h264.c:4527
const uint8_t * zigzag_scan_q0
Definition: h264.h:445
MotionEstContext me
Definition: h264.h:257
int poc_type
pic_order_cnt_type
Definition: h264.h:151
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
static void free_frame_buffer(H264Context *h, Picture *pic)
Definition: h264.c:110
int context_initialized
Definition: h264.h:283
Multithreading support functions.
void av_log_ask_for_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message asking for a sample.
void(* h264_biweight_func)(uint8_t *dst, uint8_t *src, int stride, int height, int log2_denom, int weightd, int weights, int offset)
Definition: h264dsp.h:36
static const uint16_t mask[17]
Definition: lzw.c:38
void ff_h264_hl_decode_mb(H264Context *h)
Definition: h264.c:2292
int reference
is this picture used as reference The values for this are the same as the MpegEncContext.picture_structure variable, that is 1->top field, 2->bottom field, 3->frame/both fields.
Definition: avcodec.h:1132
ParseContext parse_context
Definition: h264.h:258
int nal_unit_type
Definition: h264.h:474
int use_weight_chroma
Definition: h264.h:383
int num_reorder_frames
Definition: h264.h:183
#define AV_RB16
Definition: intreadwrite.h:53
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
Definition: internal.h:63
discard all bidirectional frames
Definition: avcodec.h:534
void ff_h264_direct_ref_list_init(H264Context *const h)
Definition: h264_direct.c:104
#define DC_128_PRED
Definition: h264pred.h:51
#define LEFT_DC_PRED
Definition: h264pred.h:49
static av_always_inline int get_chroma_qp(H264Context *h, int t, int qscale)
Get the chroma qp.
Definition: h264.h:815
GetBitContext * inter_gb_ptr
Definition: h264.h:414
void ff_svq3_luma_dc_dequant_idct_c(DCTELEM *output, DCTELEM *input, int qp)
Definition: svq3.c:148
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:1460
#define ALZHEIMER_DC_L0T_PRED8x8
Definition: h264pred.h:79
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:88
Picture * delayed_pic[MAX_DELAYED_PIC_COUNT+2]
Definition: h264.h:522
#define IS_SUB_8X4(a)
Definition: mpegvideo.h:121
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2752
int mb_field_decoding_flag
Definition: h264.h:374
static void flush_dpb(AVCodecContext *avctx)
Definition: h264.c:2471
static int h264_slice_header_init(H264Context *, int)
Definition: h264.c:2848
int capabilities
Codec capabilities.
Definition: avcodec.h:2979
int emu_edge_width
Definition: h264.h:350
uint8_t * base[AV_NUM_DATA_POINTERS]
pointer to the first allocated byte of the picture.
Definition: avcodec.h:1073
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
PPS pps
current pps
Definition: h264.h:359
uint8_t(*[2] mvd_table)[2]
Definition: h264.h:434
int prev_interlaced_frame
Complement sei_pic_struct SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced...
Definition: h264.h:583
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1434
struct Picture * next_pic
int direct_spatial_mv_pred
Definition: h264.h:391
0: frame
Definition: h264.h:131
simple assert() macros that are a bit more flexible than ISO C assert().
void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:36
int weighted_pred
weighted_pred_flag
Definition: h264.h:212
int overread_index
the index into ParseContext.buffer of the overread bytes
Definition: parser.h:36
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:146
const char * name
Name of the codec implementation.
Definition: avcodec.h:2967
#define T(x)
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x, int y)
Definition: dsputil.h:145
ERContext er
Definition: h264.h:260
static void predict_field_decoding_flag(H264Context *h)
Definition: h264.c:3957
#define IS_INTRA(a)
Definition: mpegvideo.h:109
int ff_h264_decode_mb_cavlc(H264Context *h)
Decode a macroblock.
Definition: h264_cavlc.c:693
static int square(int x)
Definition: roqvideoenc.c:111
uint8_t * list_counts
Array of list_count per MB specifying the slice type.
Definition: h264.h:404
static void er_add_slice(H264Context *h, int startx, int starty, int endx, int endy, int status)
Definition: h264.c:4001
int delta_pic_order_always_zero_flag
Definition: h264.h:153
uint8_t * mbintra_table
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:244
int new
flag to keep track if the decoder context needs re-init due to changed SPS
Definition: h264.h:199
int * mb_index2xy
int dct_bits
Size of DCT coefficients.
Definition: dsputil.h:198
int offset_for_top_to_bottom_field
Definition: h264.h:155
#define IN_RANGE(a, b, size)
Definition: h264.c:1353
int priv_data_size
Size of HW accelerator private data.
Definition: avcodec.h:3139
int off
Definition: dsputil_bfin.c:28
uint8_t zigzag_scan8x8[64]
Definition: h264.h:440
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:69
static const uint8_t scan8[16 *3+3]
Definition: h264.h:778
void(* add_pixels8)(uint8_t *pixels, DCTELEM *block, int line_size)
Definition: dsputil.h:206
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:128
the normal 2^n-1 "JPEG" YUV ranges
Definition: avcodec.h:574
uint8_t * error_status_table
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:36
uint8_t * direct_table
Definition: h264.h:436
static av_always_inline void xchg_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int xchg, int chroma444, int simple, int pixel_shift)
Definition: h264.c:1989
uint8_t scaling_matrix8[6][64]
Definition: h264.h:222
static void copy_picture_range(Picture **to, Picture **from, int count, H264Context *new_base, H264Context *old_base)
Definition: h264.c:1360
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
Definition: h264.h:482
useful rectangle filling function
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: avcodec.h:573
int refs
number of reference frames
Definition: avcodec.h:2022
CABACContext cabac
Cabac.
Definition: h264.h:423
unsigned int left_samples_available
Definition: h264.h:313
#define IS_8X8(a)
Definition: mpegvideo.h:119
#define FRAME_MBAFF
Definition: h264.h:62
static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
Definition: h264.c:76
int ref_frame_count
num_ref_frames
Definition: h264.h:157
Picture * long_ref[32]
Definition: h264.h:520
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: avcodec.h:2981
static DCTELEM block[64]
Definition: dct-test.c:169
enum AVPictureType pict_type
Picture type of the frame, see ?_TYPE below.
Definition: avcodec.h:1065
void ff_h264_filter_mb(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
struct Picture * last_pic
int frame_num_offset
for POC type 2
Definition: h264.h:503
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2602
int x264_build
Definition: h264.h:452
uint32_t * mb2br_xy
Definition: h264.h:344
uint8_t * er_temp_buffer
int overread
the number of bytes which where irreversibly read from the next frame
Definition: parser.h:35
uint16_t * slice_table
slice_table_base + 2*mb_stride + 1
Definition: h264.h:367
int last_index
Definition: parser.h:31
uint8_t field_scan8x8_cavlc[64]
Definition: h264.h:444
int poc_cycle_length
num_ref_frames_in_pic_order_cnt_cycle
Definition: h264.h:156
int colour_description_present_flag
Definition: h264.h:173
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
Definition: pixfmt.h:77
void ff_vdpau_h264_picture_complete(H264Context *h)
Definition: vdpau.c:144
int poc
h264 frame POC
Definition: mpegvideo.h:131
AVRational sar
Definition: h264.h:170
int redundant_pic_count
Definition: h264.h:517
static const uint8_t field_scan8x8_cavlc[64]
Definition: h264data.h:134
int width
picture width / height.
Definition: avcodec.h:1508
int long_ref_count
number of actual long term references
Definition: h264.h:535
Picture.
Definition: mpegvideo.h:95
qpel_mc_func avg_2tap_qpel_pixels_tab[4][16]
Definition: dsputil.h:328
void(* pred4x4_add[2])(uint8_t *pix, const DCTELEM *block, ptrdiff_t stride)
Definition: h264pred.h:100
int cabac_init_idc
Definition: h264.h:538
static void implicit_weight_table(H264Context *h, int field)
Initialize implicit_weight table.
Definition: h264.c:2378
int size_in_bits
Definition: get_bits.h:55
SPS sps
current sps
Definition: h264.h:354
int32_t
PPS * pps_buffers[MAX_PPS_COUNT]
Definition: h264.h:489
static av_always_inline void prefetch_motion(H264Context *h, int list, int pixel_shift, int chroma_idc)
Definition: h264.c:908
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:2058
DSPContext * dsp
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color)
Definition: avplay.c:401
#define MB_FIELD
Definition: h264.h:61
#define MAX_SPS_COUNT
Definition: h264.h:40
int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length)
Decode PPS.
Definition: h264_ps.c:490
int emu_edge_height
Definition: h264.h:351
Context Adaptive Binary Arithmetic Coder inline functions.
int level
level
Definition: avcodec.h:2885
int init_qp
pic_init_qp_minus26 + 26
Definition: h264.h:214
int frame_num
h264 frame_num (raw frame_num from slice header)
Definition: mpegvideo.h:132
int mmco_reset
Definition: h264.h:533
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264.h:307
uint8_t * bipred_scratchpad
Definition: h264.h:619
int poc_lsb
Definition: h264.h:496
int max_pic_num
max_frame_num or 2 * max_frame_num for field pics.
Definition: h264.h:515
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1474
static const uint8_t field_scan[16]
Definition: h264data.h:62
int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice)
Definition: h264_refs.c:493
#define EDGE_BOTTOM
Definition: dsputil.h:442
static int decode_slice(struct AVCodecContext *avctx, void *arg)
Definition: h264.c:4010
unsigned int topright_samples_available
Definition: h264.h:312
#define AV_WN16A(p, v)
Definition: intreadwrite.h:454
const uint8_t * zigzag_scan8x8_q0
Definition: h264.h:446
int curr_pic_num
frame_num for frames or 2 * frame_num + 1 for field pics.
Definition: h264.h:510
int slice_type
Definition: h264.h:368
static void init_scan_tables(H264Context *h)
initialize scan tables
Definition: h264.c:2580
Definition: h264.h:103
static int av_unused get_cabac_terminate(CABACContext *c)
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
Definition: mpegvideo.h:88
void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur, Picture *last, int y, int h, int picture_structure, int first_field, int draw_edges, int low_delay, int v_edge_pos, int h_edge_pos)
Definition: mpegvideo.c:2458
int top_type
Definition: h264.h:300
static void loop_filter(H264Context *h, int start_x, int end_x)
Definition: h264.c:3888
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
uint32_t dequant4_buffer[6][QP_MAX_NUM+1][16]
Definition: h264.h:361
void(* h264_idct8_dc_add)(uint8_t *dst, DCTELEM *block, int stride)
Definition: h264dsp.h:88
void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:45
#define PART_NOT_AVAILABLE
Definition: h264.h:330
unsigned int list_count
Definition: h264.h:403
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2733
#define IS_16X8(a)
Definition: mpegvideo.h:117
GetBitContext intra_gb
Definition: h264.h:411
int dequant_coeff_pps
reinit tables when pps changes
Definition: h264.h:491
int pic_order_present
pic_order_present_flag
Definition: h264.h:208
static void idct_add(uint8_t *dest, int line_size, DCTELEM *block)
Definition: dsputil_sh4.c:73
Picture cur_pic
Definition: h264.h:264
SPS * sps_buffers[MAX_SPS_COUNT]
Definition: h264.h:488
void(* h264_idct8_add)(uint8_t *dst, DCTELEM *block, int stride)
Definition: h264dsp.h:84
static const int8_t mv[256][2]
Definition: 4xm.c:73
struct H264Context * thread_context[MAX_THREADS]
Definition: h264.h:544
int chroma_log2_weight_denom
Definition: h264.h:385
int bit_depth_luma
luma bit depth from sps to detect changes
Definition: h264.h:485
uint32_t * mb_type
macroblock type table mb_type_base + mb_width + 2
Definition: avcodec.h:1180
static void flush_change(H264Context *h)
Definition: h264.c:2452
short offset_for_ref_frame[256]
Definition: h264.h:181
int chroma_format_idc
chroma format from sps to detect changes
Definition: h264.h:486
VideoDSPContext vdsp
Definition: h264.h:255
int timing_info_present_flag
Definition: h264.h:177
NULL
Definition: eval.c:52
static void decode_finish_row(H264Context *h)
Draw edges and report progress for the last MB row.
Definition: h264.c:3970
struct Picture * cur_pic
int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
Execute the reference picture marking (memory management control operations).
Definition: h264_refs.c:527
int coded_picture_number
picture number in bitstream order
Definition: avcodec.h:1109
int mb_stride
Definition: h264.h:459
AVCodecContext * avctx
Definition: h264.h:253
external API header
H264 / AVC / MPEG4 part10 codec data table
int ff_h264_frame_start(H264Context *h)
Definition: h264.c:1588
Definition: h264.h:107
void ff_thread_await_progress(AVFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
Definition: pthread.c:684
int8_t * qscale_table_base
Definition: mpegvideo.h:102
int slice_alpha_c0_offset
Definition: h264.h:467
1: top field
Definition: h264.h:132
enum AVCodecID codec_id
Definition: avcodec.h:1350
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
Definition: golomb.h:96
AVHWAccel.
Definition: avcodec.h:3055
void ff_h264_remove_all_refs(H264Context *h)
Definition: h264_refs.c:434
int prev_frame_num
frame_num of the last pic for POC type 1/2
Definition: h264.h:505
uint32_t * mb_type_base
Definition: mpegvideo.h:104
int linesize[AV_NUM_DATA_POINTERS]
Size, in bytes, of the data for each picture/channel plane.
Definition: avcodec.h:1008
static void get_lowest_part_y(H264Context *h, int refs[2][48], int n, int height, int y_offset, int list0, int list1, int *nrefs)
Definition: h264.c:490
void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size)
Definition: vdpau.c:109
int next_outputed_poc
Definition: h264.h:526
#define LTOP
Definition: h264.h:65
int ff_h264_decode_sei(H264Context *h)
Decode SEI.
Definition: h264_sei.c:159
int poc_msb
Definition: h264.h:497
int debug
debug
Definition: avcodec.h:2568
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:55
int implicit_weight[48][48][2]
Definition: h264.h:389
int max_contexts
Max number of threads / contexts.
Definition: h264.h:557
main external API structure.
Definition: avcodec.h:1339
static void(WINAPI *cond_broadcast)(pthread_cond_t *cond)
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:326
int ff_h264_check_intra4x4_pred_mode(H264Context *h)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:264
static void decode_postinit(H264Context *h, int setup_finished)
Run setup operations that must be run after slice header decoding.
Definition: h264.c:1680
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264.c:1062
2: bottom field
Definition: h264.h:133
#define QP_MAX_NUM
Definition: h264.h:96
int resync_mb_x
Definition: h264.h:455
static enum AVPixelFormat hwaccel_pixfmt_list_h264_jpeg_420[]
Definition: h264.c:62
int16_t(*[2] motion_val)[2]
motion vector table
Definition: avcodec.h:1172
static void init_dequant8_coeff_table(H264Context *h)
Definition: h264.c:992
qpel_mc_func put_2tap_qpel_pixels_tab[4][16]
Definition: dsputil.h:327
uint32_t state
contains the last few bytes in MSB order
Definition: parser.h:33
int extradata_size
Definition: avcodec.h:1455
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstre...
Definition: pixfmt.h:103
int constraint_set_flags
constraint_set[0-3]_flag
Definition: h264.h:198
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:268
SEI_PicStructType sei_pic_struct
pic_struct in picture timing SEI message
Definition: h264.h:575
Picture * short_ref[32]
Definition: h264.h:519
void ff_vdpau_h264_set_reference_frames(H264Context *h)
Definition: vdpau.c:41
void(* h264_idct_dc_add)(uint8_t *dst, DCTELEM *block, int stride)
Definition: h264dsp.h:86
int coded_height
Definition: avcodec.h:1515
void avcodec_get_frame_defaults(AVFrame *frame)
Set the fields of the given AVFrame to default values.
Definition: utils.c:604
Switching Predicted.
Definition: avutil.h:250
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1580
int slice_beta_offset
Definition: h264.h:468
const uint8_t * field_scan8x8_cavlc_q0
Definition: h264.h:450
int index
Definition: gxfenc.c:72
uint32_t(*[6] dequant8_coeff)[64]
Definition: h264.h:364
static av_cold int h264_decode_end(AVCodecContext *avctx)
Definition: h264.c:4633
int qp_thresh
QP threshold to skip loopfilter.
Definition: h264.h:271
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2072
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:2065
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:372
int pixel_shift
0 for 8-bit H264, 1 for high-bit-depth H264
Definition: h264.h:268
int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb, int first_slice)
Definition: h264_refs.c:685
static void await_references(H264Context *h)
Wait until all reference frames are available for MC operations.
Definition: h264.c:533
AVHWAccel * ff_find_hwaccel(enum AVCodecID codec_id, enum AVPixelFormat pix_fmt)
Return the hardware accelerated codec for codec codec_id and pixel format pix_fmt.
Definition: utils.c:2048
void(* h264_weight_func)(uint8_t *block, int stride, int height, int log2_denom, int weight, int offset)
Definition: h264dsp.h:34
int8_t * ref_index[2]
motion reference frame index the order in which these are stored can depend on the codec...
Definition: avcodec.h:1195
unsigned int sps_id
Definition: h264.h:206
#define CABAC
Definition: h264.h:83
int log2_max_poc_lsb
log2_max_pic_order_cnt_lsb_minus4
Definition: h264.h:152
6: bottom field, top field, bottom field repeated, in that order
Definition: h264.h:137
short DCTELEM
Definition: dsputil.h:39
AVCodecContext * avctx
void(* h264_idct8_add4)(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[15 *8])
Definition: h264dsp.h:94
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer. ...
Definition: pixfmt.h:132
void ff_h264_draw_horiz_band(H264Context *h, int y, int height)
Definition: h264.c:102
int block_offset[2 *(16 *3)]
block_offset[ 0..23] for frame macroblocks block_offset[24..47] for field macroblocks ...
Definition: h264.h:341
uint32_t time_scale
Definition: h264.h:179
int field_poc[2]
h264 top/bottom POC
Definition: mpegvideo.h:130
int transform_8x8_mode
transform_8x8_mode_flag
Definition: h264.h:220
static int pred_weight_table(H264Context *h)
Definition: h264.c:2311
int pic_struct_present_flag
Definition: h264.h:189
Definition: h264.h:101
uint8_t zigzag_scan[16]
Definition: h264.h:439
av_cold void ff_h264_free_context(H264Context *h)
Free any data that may have been allocated in the H264 context like SPS, PPS etc. ...
Definition: h264.c:4620
qpel_mc_func put_h264_qpel_pixels_tab[4][16]
Definition: dsputil.h:324
#define FIELD_OR_MBAFF_PICTURE
Definition: h264.h:80
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: avcodec.h:997
int8_t * qscale_table
QP table.
Definition: avcodec.h:1139
#define IS_INTRA16x16(a)
Definition: mpegvideo.h:107
qpel_mc_func(* qpel_avg)[16]
Definition: mpegvideo.h:200
void ff_h264_filter_mb_fast(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
#define LBOT
Definition: h264.h:66
Definition: vf_drawbox.c:36
void(* clear_blocks)(DCTELEM *blocks)
Definition: dsputil.h:219
#define AV_ZERO128(d)
Definition: intreadwrite.h:542
int height
Definition: gxfenc.c:72
static void idr(H264Context *h)
instantaneous decoder refresh.
Definition: h264.c:2442
static int field_end(H264Context *h, int in_setup)
Definition: h264.c:2614
hardware decoding through VDA
Definition: pixfmt.h:153
discard all non reference
Definition: avcodec.h:533
int is_complex
Definition: h264.h:463
int slice_context_count
Definition: h264.h:559
int mb_height
pic_height_in_map_units_minus1 + 1
Definition: h264.h:160
qpel_mc_func avg_h264_qpel_pixels_tab[4][16]
Definition: dsputil.h:325
Picture * DPB
Definition: h264.h:262
uint8_t * rbsp_buffer[2]
Definition: h264.h:475
int qscale
Definition: h264.h:277
static const uint8_t dequant4_coeff_init[6][3]
Definition: h264data.h:250
#define tprintf(p,...)
Definition: get_bits.h:613
common internal api header.
#define AV_COPY128(d, s)
Definition: intreadwrite.h:514
#define FIELD_PICTURE
Definition: h264.h:63
#define MAX_SLICES
Definition: dxva2_mpeg2.c:25
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
Definition: internal.h:60
static av_cold void flush(AVCodecContext *avctx)
Flush (reset) the frame ID after seeking.
Definition: alsdec.c:1772
uint16_t * slice_table_base
Definition: h264.h:493
int log2_max_frame_num
log2_max_frame_num_minus4 + 4
Definition: h264.h:150
void ff_vdpau_h264_picture_start(H264Context *h)
Definition: vdpau.c:126
int16_t * dc_val[3]
H.264 / AVC / MPEG4 part10 motion vector predicion.
Bi-dir predicted.
Definition: avutil.h:247
AVProfile.
Definition: avcodec.h:2948
int index
Definition: parser.h:30
static int execute_decode_slices(H264Context *h, int context_count)
Call decode_slice() for each context.
Definition: h264.c:4177
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_...
Definition: pixfmt.h:79
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:2517
int cur_chroma_format_idc
Definition: h264.h:618
enum AVDiscard skip_loop_filter
Definition: avcodec.h:2893
int den
denominator
Definition: rational.h:45
#define CONFIG_SMALL
Definition: config.h:316
int chroma_qp[2]
Definition: h264.h:269
int sei_ct_type
Bit set of clock types for fields/frames in picture timing SEI message.
Definition: h264.h:590
uint16_t sub_mb_type[4]
Definition: h264.h:379
int bit_depth_luma
bit_depth_luma_minus8 + 8
Definition: h264.h:195
DSP utils.
int intra16x16_pred_mode
Definition: h264.h:292
void * priv_data
Definition: avcodec.h:1382
int prev_poc_lsb
poc_lsb of the last reference pic for POC type 0
Definition: h264.h:502
#define IS_SUB_4X8(a)
Definition: mpegvideo.h:122
int linesize
Definition: h264.h:274
Definition: h264.h:102
void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:124
static av_always_inline void backup_mb_border(H264Context *h, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: h264.c:1893
Picture * cur_pic_ptr
Definition: h264.h:263
Definition: h264.h:105
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:2773
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: avcodec.h:1239
#define IS_SUB_8X8(a)
Definition: mpegvideo.h:120
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: h264.c:4537
const uint16_t ff_h264_mb_sizes[4]
Definition: h264.c:48
#define IS_DIRECT(a)
Definition: mpegvideo.h:114
void(* h264_idct_add16)(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[15 *8])
Definition: h264dsp.h:91
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264.h:320
int ff_h264_decode_extradata(H264Context *h)
Definition: h264.c:1246
uint8_t(*[2] top_borders)[(16 *3)*2]
Definition: h264.h:314
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1390
void ff_init_cabac_states(CABACContext *c)
Definition: cabac.c:139
static int fill_filter_caches(H264Context *h, int mb_type)
Definition: h264.c:3743
void(* add_pixels4)(uint8_t *pixels, DCTELEM *block, int line_size)
Definition: dsputil.h:207
#define EDGE_WIDTH
Definition: dsputil.h:440
int key_frame
1 -> keyframe, 0-> not
Definition: avcodec.h:1058
#define AV_ZERO32(d)
Definition: intreadwrite.h:534
int mb_width
Definition: h264.h:458
enum AVPictureType pict_type
Definition: h264.h:567
int current_slice
current slice number, used to initalize slice_num of each thread/context
Definition: h264.h:549
int mb_width
pic_width_in_mbs_minus1 + 1
Definition: h264.h:159
int flags2
CODEC_FLAG2_*.
Definition: avcodec.h:1441
#define IS_16X16(a)
Definition: mpegvideo.h:116
#define AV_RN16A(p)
Definition: intreadwrite.h:442
uint32_t * mb2b_xy
Definition: h264.h:343
int slice_type_fixed
Definition: h264.h:370
struct AVFrame f
Definition: mpegvideo.h:96
int delta_poc_bottom
Definition: h264.h:498
const uint8_t * field_scan_q0
Definition: h264.h:448
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:144
static void free_tables(H264Context *h, int free_rbsp)
Definition: h264.c:934
int ff_h264_fill_default_ref_list(H264Context *h)
Fill the default_ref_list.
Definition: h264_refs.c:110
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
Definition: h264.c:1355
H264DSPContext h264dsp
Definition: h264.h:256
void ff_er_frame_start(ERContext *s)
uint8_t field_scan8x8[64]
Definition: h264.h:443
#define copy_fields(to, from, start_field, end_field)
Definition: h264.c:1404
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264.h:612
uint8_t * temp
Definition: mpegvideo.h:164
static av_always_inline void mc_part_std(H264Context *h, int n, int square, int height, int delta, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int x_offset, int y_offset, qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put, qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg, int list0, int list1, int pixel_shift, int chroma_idc)
Definition: h264.c:754
int8_t * intra4x4_pred_mode
Definition: h264.h:308
static av_always_inline void dctcoef_set(DCTELEM *mb, int high_bit_depth, int index, int value)
Definition: h264.c:2086
void ff_thread_report_progress(AVFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread.c:666
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread.c:921
int mb_linesize
may be equal to s->linesize or s->linesize * 2, for mbaff
Definition: h264.h:347
static int clone_slice(H264Context *dst, H264Context *src)
Replicate H264 "master" context to thread contexts.
Definition: h264.c:2700
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:3119
8: frame tripling
Definition: h264.h:139
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
Definition: h264.h:466
static int alloc_scratch_buffers(H264Context *h, int linesize)
Definition: h264.c:148
#define AV_RN64A(p)
Definition: intreadwrite.h:450
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: avcodec.h:1028
#define LIST_NOT_USED
Definition: h264.h:329
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:3130
static int alloc_picture(H264Context *h, Picture *pic)
Definition: h264.c:173
uint8_t(* non_zero_count)[48]
Definition: h264.h:322
unsigned int crop_bottom
frame_cropping_rect_bottom_offset
Definition: h264.h:168
exp golomb vlc stuff
uint8_t * mbskip_table
int slice_num
Definition: h264.h:366
AVPixelFormat
Pixel format.
Definition: pixfmt.h:63
This structure stores compressed data.
Definition: avcodec.h:898
int sei_recovery_frame_cnt
recovery_frame_cnt from SEI message
Definition: h264.h:609
static const uint8_t zigzag_scan8x8_cavlc[64]
Definition: h264data.h:96
int droppable
Definition: h264.h:278
int level_idc
Definition: h264.h:147
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:158
int strict_std_compliance
strictly follow the standard (MPEG4, ...).
Definition: avcodec.h:2547
void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
Definition: pthread.c:979
void(* pred8x8l[9+3])(uint8_t *src, int topleft, int topright, ptrdiff_t stride)
Definition: h264pred.h:95
#define STARTCODE_TEST
int nal_ref_idc
Definition: h264.h:473
void(* pred16x16_add[3])(uint8_t *pix, const int *block_offset, const DCTELEM *block, ptrdiff_t stride)
Definition: h264pred.h:107
uint8_t field_scan[16]
Definition: h264.h:442
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:69
int picture_range_start
Definition: h264.h:266
int b_stride
Definition: h264.h:345
Predicted.
Definition: avutil.h:246
unsigned int rbsp_buffer_size[2]
Definition: h264.h:476
#define CHROMA444
Definition: h264.h:87
Context Adaptive Binary Arithmetic Coder.
int8_t ref_cache[2][5 *8]
Definition: h264.h:328
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264.h:375
int short_ref_count
number of actual short term references
Definition: h264.h:536
static const AVProfile profiles[]
Definition: h264.c:4650
if(!(ptr_align%ac->ptr_align)&&samples_align >=aligned_len)
enum AVColorSpace colorspace
Definition: h264.h:176