svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The Libav Project
3  *
4  * This file is part of Libav.
5  *
6  * Libav is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * Libav is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with Libav; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.libav.org/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 #include "internal.h"
43 #include "dsputil.h"
44 #include "avcodec.h"
45 #include "mpegvideo.h"
46 #include "h264.h"
47 
48 #include "h264data.h" // FIXME FIXME FIXME
49 
50 #include "h264_mvpred.h"
51 #include "golomb.h"
52 #include "rectangle.h"
53 #include "vdpau_internal.h"
54 
55 #if CONFIG_ZLIB
56 #include <zlib.h>
57 #endif
58 
59 #include "svq1.h"
60 
66 typedef struct {
75  uint32_t watermark_key;
81 } SVQ3Context;
82 
83 #define FULLPEL_MODE 1
84 #define HALFPEL_MODE 2
85 #define THIRDPEL_MODE 3
86 #define PREDICT_MODE 4
87 
88 /* dual scan (from some older h264 draft)
89  * o-->o-->o o
90  * | /|
91  * o o o / o
92  * | / | |/ |
93  * o o o o
94  * /
95  * o-->o-->o-->o
96  */
97 static const uint8_t svq3_scan[16] = {
98  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
99  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
100  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
101  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
102 };
103 
104 static const uint8_t svq3_pred_0[25][2] = {
105  { 0, 0 },
106  { 1, 0 }, { 0, 1 },
107  { 0, 2 }, { 1, 1 }, { 2, 0 },
108  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
109  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
110  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
111  { 2, 4 }, { 3, 3 }, { 4, 2 },
112  { 4, 3 }, { 3, 4 },
113  { 4, 4 }
114 };
115 
116 static const int8_t svq3_pred_1[6][6][5] = {
117  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
118  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
119  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
120  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
121  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
122  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
123  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
124  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
125  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
126  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
127  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
128  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
129 };
130 
131 static const struct {
134 } svq3_dct_tables[2][16] = {
135  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
136  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
137  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
138  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
139 };
140 
141 static const uint32_t svq3_dequant_coeff[32] = {
142  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
143  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
144  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
145  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
146 };
147 
148 void ff_svq3_luma_dc_dequant_idct_c(DCTELEM *output, DCTELEM *input, int qp)
149 {
150  const int qmul = svq3_dequant_coeff[qp];
151 #define stride 16
152  int i;
153  int temp[16];
154  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
155 
156  for (i = 0; i < 4; i++) {
157  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
158  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
159  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
160  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
161 
162  temp[4 * i + 0] = z0 + z3;
163  temp[4 * i + 1] = z1 + z2;
164  temp[4 * i + 2] = z1 - z2;
165  temp[4 * i + 3] = z0 - z3;
166  }
167 
168  for (i = 0; i < 4; i++) {
169  const int offset = x_offset[i];
170  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
171  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
172  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
173  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
174 
175  output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
176  output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
177  output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
178  output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
179  }
180 }
181 #undef stride
182 
184  int stride, int qp, int dc)
185 {
186  const int qmul = svq3_dequant_coeff[qp];
187  int i;
188 
189  if (dc) {
190  dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
191  : qmul * (block[0] >> 3) / 2);
192  block[0] = 0;
193  }
194 
195  for (i = 0; i < 4; i++) {
196  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
197  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
198  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
199  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
200 
201  block[0 + 4 * i] = z0 + z3;
202  block[1 + 4 * i] = z1 + z2;
203  block[2 + 4 * i] = z1 - z2;
204  block[3 + 4 * i] = z0 - z3;
205  }
206 
207  for (i = 0; i < 4; i++) {
208  const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
209  const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
210  const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
211  const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
212  const int rr = (dc + 0x80000);
213 
214  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
215  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
216  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
217  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
218  }
219 }
220 
222  int index, const int type)
223 {
224  static const uint8_t *const scan_patterns[4] =
226 
227  int run, level, limit;
228  unsigned vlc;
229  const int intra = 3 * type >> 2;
230  const uint8_t *const scan = scan_patterns[type];
231 
232  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
233  for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
234  int sign = (vlc & 1) ? 0 : -1;
235  vlc = vlc + 1 >> 1;
236 
237  if (type == 3) {
238  if (vlc < 3) {
239  run = 0;
240  level = vlc;
241  } else if (vlc < 4) {
242  run = 1;
243  level = 1;
244  } else {
245  run = vlc & 0x3;
246  level = (vlc + 9 >> 2) - run;
247  }
248  } else {
249  if (vlc < 16) {
250  run = svq3_dct_tables[intra][vlc].run;
251  level = svq3_dct_tables[intra][vlc].level;
252  } else if (intra) {
253  run = vlc & 0x7;
254  level = (vlc >> 3) +
255  ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
256  } else {
257  run = vlc & 0xF;
258  level = (vlc >> 4) +
259  ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
260  }
261  }
262 
263  if ((index += run) >= limit)
264  return -1;
265 
266  block[scan[index]] = (level ^ sign) - sign;
267  }
268 
269  if (type != 2) {
270  break;
271  }
272  }
273 
274  return 0;
275 }
276 
277 static inline void svq3_mc_dir_part(SVQ3Context *s,
278  int x, int y, int width, int height,
279  int mx, int my, int dxy,
280  int thirdpel, int dir, int avg)
281 {
282  H264Context *h = &s->h;
283  const Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
284  uint8_t *src, *dest;
285  int i, emu = 0;
286  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
287 
288  mx += x;
289  my += y;
290 
291  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
292  my < 0 || my >= s->v_edge_pos - height - 1) {
293  if ((h->flags & CODEC_FLAG_EMU_EDGE))
294  emu = 1;
295 
296  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
297  my = av_clip(my, -16, s->v_edge_pos - height + 15);
298  }
299 
300  /* form component predictions */
301  dest = h->cur_pic.f.data[0] + x + y * h->linesize;
302  src = pic->f.data[0] + mx + my * h->linesize;
303 
304  if (emu) {
306  width + 1, height + 1,
307  mx, my, s->h_edge_pos, s->v_edge_pos);
308  src = h->edge_emu_buffer;
309  }
310  if (thirdpel)
311  (avg ? h->dsp.avg_tpel_pixels_tab
312  : h->dsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
313  width, height);
314  else
315  (avg ? h->dsp.avg_pixels_tab
316  : h->dsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
317  height);
318 
319  if (!(h->flags & CODEC_FLAG_GRAY)) {
320  mx = mx + (mx < (int) x) >> 1;
321  my = my + (my < (int) y) >> 1;
322  width = width >> 1;
323  height = height >> 1;
324  blocksize++;
325 
326  for (i = 1; i < 3; i++) {
327  dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
328  src = pic->f.data[i] + mx + my * h->uvlinesize;
329 
330  if (emu) {
332  width + 1, height + 1,
333  mx, my, (s->h_edge_pos >> 1),
334  s->v_edge_pos >> 1);
335  src = h->edge_emu_buffer;
336  }
337  if (thirdpel)
338  (avg ? h->dsp.avg_tpel_pixels_tab
339  : h->dsp.put_tpel_pixels_tab)[dxy](dest, src,
340  h->uvlinesize,
341  width, height);
342  else
343  (avg ? h->dsp.avg_pixels_tab
344  : h->dsp.put_pixels_tab)[blocksize][dxy](dest, src,
345  h->uvlinesize,
346  height);
347  }
348  }
349 }
350 
351 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
352  int dir, int avg)
353 {
354  int i, j, k, mx, my, dx, dy, x, y;
355  H264Context *h = &s->h;
356  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
357  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
358  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
359  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
360  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
361 
362  for (i = 0; i < 16; i += part_height)
363  for (j = 0; j < 16; j += part_width) {
364  const int b_xy = (4 * h->mb_x + (j >> 2)) +
365  (4 * h->mb_y + (i >> 2)) * h->b_stride;
366  int dxy;
367  x = 16 * h->mb_x + j;
368  y = 16 * h->mb_y + i;
369  k = (j >> 2 & 1) + (i >> 1 & 2) +
370  (j >> 1 & 4) + (i & 8);
371 
372  if (mode != PREDICT_MODE) {
373  pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
374  } else {
375  mx = s->next_pic->f.motion_val[0][b_xy][0] << 1;
376  my = s->next_pic->f.motion_val[0][b_xy][1] << 1;
377 
378  if (dir == 0) {
379  mx = mx * h->frame_num_offset /
380  h->prev_frame_num_offset + 1 >> 1;
381  my = my * h->frame_num_offset /
382  h->prev_frame_num_offset + 1 >> 1;
383  } else {
384  mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
385  h->prev_frame_num_offset + 1 >> 1;
386  my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
387  h->prev_frame_num_offset + 1 >> 1;
388  }
389  }
390 
391  /* clip motion vector prediction to frame border */
392  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
393  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
394 
395  /* get (optional) motion vector differential */
396  if (mode == PREDICT_MODE) {
397  dx = dy = 0;
398  } else {
399  dy = svq3_get_se_golomb(&h->gb);
400  dx = svq3_get_se_golomb(&h->gb);
401 
402  if (dx == INVALID_VLC || dy == INVALID_VLC) {
403  av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
404  return -1;
405  }
406  }
407 
408  /* compute motion vector */
409  if (mode == THIRDPEL_MODE) {
410  int fx, fy;
411  mx = (mx + 1 >> 1) + dx;
412  my = (my + 1 >> 1) + dy;
413  fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
414  fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
415  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
416 
417  svq3_mc_dir_part(s, x, y, part_width, part_height,
418  fx, fy, dxy, 1, dir, avg);
419  mx += mx;
420  my += my;
421  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
422  mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
423  my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
424  dxy = (mx & 1) + 2 * (my & 1);
425 
426  svq3_mc_dir_part(s, x, y, part_width, part_height,
427  mx >> 1, my >> 1, dxy, 0, dir, avg);
428  mx *= 3;
429  my *= 3;
430  } else {
431  mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
432  my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
433 
434  svq3_mc_dir_part(s, x, y, part_width, part_height,
435  mx, my, 0, 0, dir, avg);
436  mx *= 6;
437  my *= 6;
438  }
439 
440  /* update mv_cache */
441  if (mode != PREDICT_MODE) {
442  int32_t mv = pack16to32(mx, my);
443 
444  if (part_height == 8 && i < 8) {
445  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
446 
447  if (part_width == 8 && j < 8)
448  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
449  }
450  if (part_width == 8 && j < 8)
451  AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
452  if (part_width == 4 || part_height == 4)
453  AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
454  }
455 
456  /* write back motion vectors */
457  fill_rectangle(h->cur_pic.f.motion_val[dir][b_xy],
458  part_width >> 2, part_height >> 2, h->b_stride,
459  pack16to32(mx, my), 4);
460  }
461 
462  return 0;
463 }
464 
465 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
466 {
467  H264Context *h = &s->h;
468  int i, j, k, m, dir, mode;
469  int cbp = 0;
470  uint32_t vlc;
471  int8_t *top, *left;
472  const int mb_xy = h->mb_xy;
473  const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
474 
475  h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
476  h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
477  h->topright_samples_available = 0xFFFF;
478 
479  if (mb_type == 0) { /* SKIP */
480  if (h->pict_type == AV_PICTURE_TYPE_P ||
481  s->next_pic->f.mb_type[mb_xy] == -1) {
482  svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
483  0, 0, 0, 0, 0, 0);
484 
485  if (h->pict_type == AV_PICTURE_TYPE_B)
486  svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
487  0, 0, 0, 0, 1, 1);
488 
489  mb_type = MB_TYPE_SKIP;
490  } else {
491  mb_type = FFMIN(s->next_pic->f.mb_type[mb_xy], 6);
492  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
493  return -1;
494  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
495  return -1;
496 
497  mb_type = MB_TYPE_16x16;
498  }
499  } else if (mb_type < 8) { /* INTER */
500  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
501  mode = THIRDPEL_MODE;
502  else if (s->halfpel_flag &&
503  s->thirdpel_flag == !get_bits1(&h->gb))
504  mode = HALFPEL_MODE;
505  else
506  mode = FULLPEL_MODE;
507 
508  /* fill caches */
509  /* note ref_cache should contain here:
510  * ????????
511  * ???11111
512  * N??11111
513  * N??11111
514  * N??11111
515  */
516 
517  for (m = 0; m < 2; m++) {
518  if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
519  for (i = 0; i < 4; i++)
520  AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
521  h->cur_pic.f.motion_val[m][b_xy - 1 + i * h->b_stride]);
522  } else {
523  for (i = 0; i < 4; i++)
524  AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
525  }
526  if (h->mb_y > 0) {
527  memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
528  h->cur_pic.f.motion_val[m][b_xy - h->b_stride],
529  4 * 2 * sizeof(int16_t));
530  memset(&h->ref_cache[m][scan8[0] - 1 * 8],
531  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
532 
533  if (h->mb_x < h->mb_width - 1) {
534  AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
535  h->cur_pic.f.motion_val[m][b_xy - h->b_stride + 4]);
536  h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
537  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
538  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
539  } else
540  h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
541  if (h->mb_x > 0) {
542  AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
543  h->cur_pic.f.motion_val[m][b_xy - h->b_stride - 1]);
544  h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
545  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
546  } else
547  h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
548  } else
549  memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
550  PART_NOT_AVAILABLE, 8);
551 
552  if (h->pict_type != AV_PICTURE_TYPE_B)
553  break;
554  }
555 
556  /* decode motion vector(s) and form prediction(s) */
557  if (h->pict_type == AV_PICTURE_TYPE_P) {
558  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
559  return -1;
560  } else { /* AV_PICTURE_TYPE_B */
561  if (mb_type != 2) {
562  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
563  return -1;
564  } else {
565  for (i = 0; i < 4; i++)
566  memset(h->cur_pic.f.motion_val[0][b_xy + i * h->b_stride],
567  0, 4 * 2 * sizeof(int16_t));
568  }
569  if (mb_type != 1) {
570  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
571  return -1;
572  } else {
573  for (i = 0; i < 4; i++)
574  memset(h->cur_pic.f.motion_val[1][b_xy + i * h->b_stride],
575  0, 4 * 2 * sizeof(int16_t));
576  }
577  }
578 
579  mb_type = MB_TYPE_16x16;
580  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
581  memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
582 
583  if (mb_type == 8) {
584  if (h->mb_x > 0) {
585  for (i = 0; i < 4; i++)
586  h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
587  if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
588  h->left_samples_available = 0x5F5F;
589  }
590  if (h->mb_y > 0) {
591  h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
592  h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
593  h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
594  h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
595 
596  if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
597  h->top_samples_available = 0x33FF;
598  }
599 
600  /* decode prediction codes for luma blocks */
601  for (i = 0; i < 16; i += 2) {
602  vlc = svq3_get_ue_golomb(&h->gb);
603 
604  if (vlc >= 25) {
605  av_log(h->avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
606  return -1;
607  }
608 
609  left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
610  top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
611 
612  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
613  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
614 
615  if (left[1] == -1 || left[2] == -1) {
616  av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
617  return -1;
618  }
619  }
620  } else { /* mb_type == 33, DC_128_PRED block type */
621  for (i = 0; i < 4; i++)
622  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
623  }
624 
626 
627  if (mb_type == 8) {
629 
630  h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
631  h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
632  } else {
633  for (i = 0; i < 4; i++)
634  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
635 
636  h->top_samples_available = 0x33FF;
637  h->left_samples_available = 0x5F5F;
638  }
639 
640  mb_type = MB_TYPE_INTRA4x4;
641  } else { /* INTRA16x16 */
642  dir = i_mb_type_info[mb_type - 8].pred_mode;
643  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
644 
645  if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) < 0) {
646  av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
647  return h->intra16x16_pred_mode;
648  }
649 
650  cbp = i_mb_type_info[mb_type - 8].cbp;
651  mb_type = MB_TYPE_INTRA16x16;
652  }
653 
654  if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
655  for (i = 0; i < 4; i++)
656  memset(h->cur_pic.f.motion_val[0][b_xy + i * h->b_stride],
657  0, 4 * 2 * sizeof(int16_t));
658  if (h->pict_type == AV_PICTURE_TYPE_B) {
659  for (i = 0; i < 4; i++)
660  memset(h->cur_pic.f.motion_val[1][b_xy + i * h->b_stride],
661  0, 4 * 2 * sizeof(int16_t));
662  }
663  }
664  if (!IS_INTRA4x4(mb_type)) {
665  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
666  }
667  if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
668  memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
669  h->dsp.clear_blocks(h->mb + 0);
670  h->dsp.clear_blocks(h->mb + 384);
671  }
672 
673  if (!IS_INTRA16x16(mb_type) &&
674  (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
675  if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48) {
676  av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
677  return -1;
678  }
679 
680  cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
681  : golomb_to_inter_cbp[vlc];
682  }
683  if (IS_INTRA16x16(mb_type) ||
684  (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
685  h->qscale += svq3_get_se_golomb(&h->gb);
686 
687  if (h->qscale > 31u) {
688  av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
689  return -1;
690  }
691  }
692  if (IS_INTRA16x16(mb_type)) {
693  AV_ZERO128(h->mb_luma_dc[0] + 0);
694  AV_ZERO128(h->mb_luma_dc[0] + 8);
695  if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
697  "error while decoding intra luma dc\n");
698  return -1;
699  }
700  }
701 
702  if (cbp) {
703  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
704  const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
705 
706  for (i = 0; i < 4; i++)
707  if ((cbp & (1 << i))) {
708  for (j = 0; j < 4; j++) {
709  k = index ? (1 * (j & 1) + 2 * (i & 1) +
710  2 * (j & 2) + 4 * (i & 2))
711  : (4 * i + j);
712  h->non_zero_count_cache[scan8[k]] = 1;
713 
714  if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
716  "error while decoding block\n");
717  return -1;
718  }
719  }
720  }
721 
722  if ((cbp & 0x30)) {
723  for (i = 1; i < 3; ++i)
724  if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
726  "error while decoding chroma dc block\n");
727  return -1;
728  }
729 
730  if ((cbp & 0x20)) {
731  for (i = 1; i < 3; i++) {
732  for (j = 0; j < 4; j++) {
733  k = 16 * i + j;
734  h->non_zero_count_cache[scan8[k]] = 1;
735 
736  if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
738  "error while decoding chroma ac block\n");
739  return -1;
740  }
741  }
742  }
743  }
744  }
745  }
746 
747  h->cbp = cbp;
748  h->cur_pic.f.mb_type[mb_xy] = mb_type;
749 
750  if (IS_INTRA(mb_type))
752 
753  return 0;
754 }
755 
757 {
758  SVQ3Context *s = avctx->priv_data;
759  H264Context *h = &s->h;
760  const int mb_xy = h->mb_xy;
761  int i, header;
762  unsigned slice_id;
763 
764  header = get_bits(&h->gb, 8);
765 
766  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
767  /* TODO: what? */
768  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
769  return -1;
770  } else {
771  int length = header >> 5 & 3;
772 
774  8 * show_bits(&h->gb, 8 * length) +
775  8 * length;
776 
777  if (s->next_slice_index > h->gb.size_in_bits) {
778  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
779  return -1;
780  }
781 
782  h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
783  skip_bits(&h->gb, 8);
784 
785  if (s->watermark_key) {
786  uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
787  AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
788  header ^ s->watermark_key);
789  }
790  if (length > 0) {
791  memcpy((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
792  &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
793  }
794  skip_bits_long(&h->gb, 0);
795  }
796 
797  if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
798  av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id);
799  return -1;
800  }
801 
802  h->slice_type = golomb_to_pict_type[slice_id];
803 
804  if ((header & 0x9F) == 2) {
805  i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
806  h->mb_skip_run = get_bits(&h->gb, i) -
807  (h->mb_y * h->mb_width + h->mb_x);
808  } else {
809  skip_bits1(&h->gb);
810  h->mb_skip_run = 0;
811  }
812 
813  h->slice_num = get_bits(&h->gb, 8);
814  h->qscale = get_bits(&h->gb, 5);
815  s->adaptive_quant = get_bits1(&h->gb);
816 
817  /* unknown fields */
818  skip_bits1(&h->gb);
819 
820  if (s->unknown_flag)
821  skip_bits1(&h->gb);
822 
823  skip_bits1(&h->gb);
824  skip_bits(&h->gb, 2);
825 
826  while (get_bits1(&h->gb))
827  skip_bits(&h->gb, 8);
828 
829  /* reset intra predictors and invalidate motion vector references */
830  if (h->mb_x > 0) {
831  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
832  -1, 4 * sizeof(int8_t));
833  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
834  -1, 8 * sizeof(int8_t) * h->mb_x);
835  }
836  if (h->mb_y > 0) {
837  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
838  -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
839 
840  if (h->mb_x > 0)
841  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
842  }
843 
844  return 0;
845 }
846 
848 {
849  SVQ3Context *s = avctx->priv_data;
850  H264Context *h = &s->h;
851  int m;
852  unsigned char *extradata;
853  unsigned char *extradata_end;
854  unsigned int size;
855  int marker_found = 0;
856 
857  s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
858  s->last_pic = av_mallocz(sizeof(*s->last_pic));
859  s->next_pic = av_mallocz(sizeof(*s->next_pic));
860  if (!s->next_pic || !s->last_pic || !s->cur_pic) {
861  av_freep(&s->cur_pic);
862  av_freep(&s->last_pic);
863  av_freep(&s->next_pic);
864  return AVERROR(ENOMEM);
865  }
866 
867  if (ff_h264_decode_init(avctx) < 0)
868  return -1;
869 
870  h->flags = avctx->flags;
871  h->is_complex = 1;
873  avctx->pix_fmt = avctx->codec->pix_fmts[0];
874 
875  if (!h->context_initialized) {
876  h->chroma_qp[0] = h->chroma_qp[1] = 4;
877  h->chroma_x_shift = h->chroma_y_shift = 1;
878 
879  s->halfpel_flag = 1;
880  s->thirdpel_flag = 1;
881  s->unknown_flag = 0;
882 
883  /* prowl for the "SEQH" marker in the extradata */
884  extradata = (unsigned char *)avctx->extradata;
885  extradata_end = avctx->extradata + avctx->extradata_size;
886  if (extradata) {
887  for (m = 0; m + 8 < avctx->extradata_size; m++) {
888  if (!memcmp(extradata, "SEQH", 4)) {
889  marker_found = 1;
890  break;
891  }
892  extradata++;
893  }
894  }
895 
896  /* if a match was found, parse the extra data */
897  if (marker_found) {
898  GetBitContext gb;
899  int frame_size_code;
900 
901  size = AV_RB32(&extradata[4]);
902  if (size > extradata_end - extradata - 8)
903  return AVERROR_INVALIDDATA;
904  init_get_bits(&gb, extradata + 8, size * 8);
905 
906  /* 'frame size code' and optional 'width, height' */
907  frame_size_code = get_bits(&gb, 3);
908  switch (frame_size_code) {
909  case 0:
910  avctx->width = 160;
911  avctx->height = 120;
912  break;
913  case 1:
914  avctx->width = 128;
915  avctx->height = 96;
916  break;
917  case 2:
918  avctx->width = 176;
919  avctx->height = 144;
920  break;
921  case 3:
922  avctx->width = 352;
923  avctx->height = 288;
924  break;
925  case 4:
926  avctx->width = 704;
927  avctx->height = 576;
928  break;
929  case 5:
930  avctx->width = 240;
931  avctx->height = 180;
932  break;
933  case 6:
934  avctx->width = 320;
935  avctx->height = 240;
936  break;
937  case 7:
938  avctx->width = get_bits(&gb, 12);
939  avctx->height = get_bits(&gb, 12);
940  break;
941  }
942 
943  s->halfpel_flag = get_bits1(&gb);
944  s->thirdpel_flag = get_bits1(&gb);
945 
946  /* unknown fields */
947  skip_bits1(&gb);
948  skip_bits1(&gb);
949  skip_bits1(&gb);
950  skip_bits1(&gb);
951 
952  h->low_delay = get_bits1(&gb);
953 
954  /* unknown field */
955  skip_bits1(&gb);
956 
957  while (get_bits1(&gb))
958  skip_bits(&gb, 8);
959 
960  s->unknown_flag = get_bits1(&gb);
961  avctx->has_b_frames = !h->low_delay;
962  if (s->unknown_flag) {
963 #if CONFIG_ZLIB
964  unsigned watermark_width = svq3_get_ue_golomb(&gb);
965  unsigned watermark_height = svq3_get_ue_golomb(&gb);
966  int u1 = svq3_get_ue_golomb(&gb);
967  int u2 = get_bits(&gb, 8);
968  int u3 = get_bits(&gb, 2);
969  int u4 = svq3_get_ue_golomb(&gb);
970  unsigned long buf_len = watermark_width *
971  watermark_height * 4;
972  int offset = get_bits_count(&gb) + 7 >> 3;
973  uint8_t *buf;
974 
975  if (watermark_height > 0 &&
976  (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
977  return -1;
978 
979  buf = av_malloc(buf_len);
980  av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n",
981  watermark_width, watermark_height);
982  av_log(avctx, AV_LOG_DEBUG,
983  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
984  u1, u2, u3, u4, offset);
985  if (uncompress(buf, &buf_len, extradata + 8 + offset,
986  size - offset) != Z_OK) {
987  av_log(avctx, AV_LOG_ERROR,
988  "could not uncompress watermark logo\n");
989  av_free(buf);
990  return -1;
991  }
992  s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
993  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
994  av_log(avctx, AV_LOG_DEBUG,
995  "watermark key %#x\n", s->watermark_key);
996  av_free(buf);
997 #else
998  av_log(avctx, AV_LOG_ERROR,
999  "this svq3 file contains watermark which need zlib support compiled in\n");
1000  return -1;
1001 #endif
1002  }
1003  }
1004 
1005  h->width = avctx->width;
1006  h->height = avctx->height;
1007  h->mb_width = (h->width + 15) / 16;
1008  h->mb_height = (h->height + 15) / 16;
1009  h->mb_stride = h->mb_width + 1;
1010  h->mb_num = h->mb_width * h->mb_height;
1011  h->b_stride = 4 * h->mb_width;
1012  s->h_edge_pos = h->mb_width * 16;
1013  s->v_edge_pos = h->mb_height * 16;
1014 
1015  if (ff_h264_alloc_tables(h) < 0) {
1016  av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1017  return AVERROR(ENOMEM);
1018  }
1019  }
1020 
1021  return 0;
1022 }
1023 
1024 static int get_buffer(AVCodecContext *avctx, Picture *pic)
1025 {
1026  SVQ3Context *s = avctx->priv_data;
1027  H264Context *h = &s->h;
1028  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1029  const int mb_array_size = h->mb_stride * h->mb_height;
1030  const int b4_stride = h->mb_width * 4 + 1;
1031  const int b4_array_size = b4_stride * h->mb_height * 4;
1032  int ret;
1033 
1034  if (!pic->motion_val_base[0]) {
1035  int i;
1036 
1037  pic->mb_type_base = av_mallocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1038  if (!pic->mb_type_base)
1039  return AVERROR(ENOMEM);
1040  pic->f.mb_type = pic->mb_type_base + 2 * h->mb_stride + 1;
1041 
1042  for (i = 0; i < 2; i++) {
1043  pic->motion_val_base[i] = av_mallocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1044  pic->f.ref_index[i] = av_mallocz(4 * mb_array_size);
1045  if (!pic->motion_val_base[i] || !pic->f.ref_index[i])
1046  return AVERROR(ENOMEM);
1047 
1048  pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
1049  }
1050  }
1051  pic->f.motion_subsample_log2 = 2;
1052  pic->f.reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1053 
1054  ret = ff_get_buffer(avctx, &pic->f);
1055 
1056  h->linesize = pic->f.linesize[0];
1057  h->uvlinesize = pic->f.linesize[1];
1058 
1059  return ret;
1060 }
1061 
1062 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1063  int *got_frame, AVPacket *avpkt)
1064 {
1065  const uint8_t *buf = avpkt->data;
1066  SVQ3Context *s = avctx->priv_data;
1067  H264Context *h = &s->h;
1068  int buf_size = avpkt->size;
1069  int ret, m, i;
1070 
1071  /* special case for last picture */
1072  if (buf_size == 0) {
1073  if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1074  *(AVFrame *) data = s->next_pic->f;
1075  s->last_frame_output = 1;
1076  *got_frame = 1;
1077  }
1078  return 0;
1079  }
1080 
1081  init_get_bits(&h->gb, buf, 8 * buf_size);
1082 
1083  h->mb_x = h->mb_y = h->mb_xy = 0;
1084 
1085  if (svq3_decode_slice_header(avctx))
1086  return -1;
1087 
1088  h->pict_type = h->slice_type;
1089 
1090  if (h->pict_type != AV_PICTURE_TYPE_B)
1091  FFSWAP(Picture*, s->next_pic, s->last_pic);
1092 
1093  if (s->cur_pic->f.data[0])
1094  avctx->release_buffer(avctx, &s->cur_pic->f);
1095 
1096  /* for skipping the frame */
1097  s->cur_pic->f.pict_type = h->pict_type;
1099 
1100  ret = get_buffer(avctx, s->cur_pic);
1101  if (ret < 0)
1102  return ret;
1103 
1104  h->cur_pic_ptr = s->cur_pic;
1105  h->cur_pic = *s->cur_pic;
1106 
1107  for (i = 0; i < 16; i++) {
1108  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1109  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1110  }
1111  for (i = 0; i < 16; i++) {
1112  h->block_offset[16 + i] =
1113  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1114  h->block_offset[48 + 16 + i] =
1115  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1116  }
1117 
1118  if (h->pict_type != AV_PICTURE_TYPE_I) {
1119  if (!s->last_pic->f.data[0]) {
1120  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1121  ret = get_buffer(avctx, s->last_pic);
1122  if (ret < 0)
1123  return ret;
1124  memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1125  memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1126  s->last_pic->f.linesize[1]);
1127  memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1128  s->last_pic->f.linesize[2]);
1129  }
1130 
1131  if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1132  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1133  ret = get_buffer(avctx, s->next_pic);
1134  if (ret < 0)
1135  return ret;
1136  memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1137  memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1138  s->next_pic->f.linesize[1]);
1139  memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1140  s->next_pic->f.linesize[2]);
1141  }
1142  }
1143 
1144  if (avctx->debug & FF_DEBUG_PICT_INFO)
1146  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1148  s->halfpel_flag, s->thirdpel_flag,
1149  s->adaptive_quant, h->qscale, h->slice_num);
1150 
1151  if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1153  avctx->skip_frame >= AVDISCARD_ALL)
1154  return 0;
1155 
1156  if (s->next_p_frame_damaged) {
1157  if (h->pict_type == AV_PICTURE_TYPE_B)
1158  return 0;
1159  else
1160  s->next_p_frame_damaged = 0;
1161  }
1162 
1163  if (h->pict_type == AV_PICTURE_TYPE_B) {
1165 
1166  if (h->frame_num_offset < 0)
1167  h->frame_num_offset += 256;
1168  if (h->frame_num_offset == 0 ||
1170  av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1171  return -1;
1172  }
1173  } else {
1174  h->prev_frame_num = h->frame_num;
1175  h->frame_num = h->slice_num;
1177 
1178  if (h->prev_frame_num_offset < 0)
1179  h->prev_frame_num_offset += 256;
1180  }
1181 
1182  for (m = 0; m < 2; m++) {
1183  int i;
1184  for (i = 0; i < 4; i++) {
1185  int j;
1186  for (j = -1; j < 4; j++)
1187  h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1188  if (i < 3)
1189  h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1190  }
1191  }
1192 
1193  for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1194  for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1195  unsigned mb_type;
1196  h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1197 
1198  if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1199  ((get_bits_count(&h->gb) & 7) == 0 ||
1200  show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1201  skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1202  h->gb.size_in_bits = 8 * buf_size;
1203 
1204  if (svq3_decode_slice_header(avctx))
1205  return -1;
1206 
1207  /* TODO: support s->mb_skip_run */
1208  }
1209 
1210  mb_type = svq3_get_ue_golomb(&h->gb);
1211 
1212  if (h->pict_type == AV_PICTURE_TYPE_I)
1213  mb_type += 8;
1214  else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1215  mb_type += 4;
1216  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1218  "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1219  return -1;
1220  }
1221 
1222  if (mb_type != 0)
1224 
1225  if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1226  h->cur_pic.f.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1227  (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1228  }
1229 
1230  ff_draw_horiz_band(avctx, &h->dsp, s->cur_pic, s->last_pic->f.data[0] ? s->last_pic : NULL,
1231  16 * h->mb_y, 16, h->picture_structure, 0, 1,
1232  h->low_delay, h->mb_height * 16, h->mb_width * 16);
1233  }
1234 
1235  if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1236  *(AVFrame *)data = s->cur_pic->f;
1237  else
1238  *(AVFrame *)data = s->last_pic->f;
1239 
1240  /* Do not output the last pic after seeking. */
1241  if (s->last_pic->f.data[0] || h->low_delay)
1242  *got_frame = 1;
1243 
1244  if (h->pict_type != AV_PICTURE_TYPE_B) {
1245  FFSWAP(Picture*, s->cur_pic, s->next_pic);
1246  }
1247 
1248  return buf_size;
1249 }
1250 
1251 static void free_picture(AVCodecContext *avctx, Picture *pic)
1252 {
1253  int i;
1254  for (i = 0; i < 2; i++) {
1255  av_freep(&pic->motion_val_base[i]);
1256  av_freep(&pic->f.ref_index[i]);
1257  }
1258  av_freep(&pic->mb_type_base);
1259 
1260  if (pic->f.data[0])
1261  avctx->release_buffer(avctx, &pic->f);
1262  av_freep(&pic);
1263 }
1264 
1266 {
1267  SVQ3Context *s = avctx->priv_data;
1268  H264Context *h = &s->h;
1269 
1270  free_picture(avctx, s->cur_pic);
1271  free_picture(avctx, s->next_pic);
1272  free_picture(avctx, s->last_pic);
1273 
1275 
1276  return 0;
1277 }
1278 
1280  .name = "svq3",
1281  .type = AVMEDIA_TYPE_VIDEO,
1282  .id = AV_CODEC_ID_SVQ3,
1283  .priv_data_size = sizeof(SVQ3Context),
1287  .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1288  CODEC_CAP_DR1 |
1290  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1291  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1292  AV_PIX_FMT_NONE},
1293 };
uint8_t pred_mode
Definition: h264data.h:155
const struct AVCodec * codec
Definition: avcodec.h:1348
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:61
discard all frames except keyframes
Definition: avcodec.h:535
uint8_t * edge_emu_buffer
Definition: h264.h:620
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: dsputil.h:259
unsigned int top_samples_available
Definition: h264.h:311
GetBitContext gb
Definition: h264.h:259
int low_delay
Definition: h264.h:281
int mb_num
Definition: h264.h:460
int size
This structure describes decoded (raw) audio or video data.
Definition: avcodec.h:989
#define IS_SKIP(a)
Definition: mpegvideo.h:111
Picture * last_pic
Definition: svq3.c:70
int cbp
Definition: h264.h:428
void(* release_buffer)(struct AVCodecContext *c, AVFrame *pic)
Called to release buffers which were allocated with get_buffer.
Definition: avcodec.h:2259
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:237
uint16_t ff_svq1_packet_checksum(const uint8_t *data, const int length, int value)
Definition: svq13.c:60
static void skip_bits_long(GetBitContext *s, int n)
Definition: get_bits.h:197
av_cold int ff_h264_decode_init(AVCodecContext *avctx)
Definition: h264.c:1300
void ff_svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp, int dc)
Definition: svq3.c:183
int mb_y
Definition: h264.h:454
int size
Definition: avcodec.h:916
int16_t(*[2] motion_val_base)[2]
Definition: mpegvideo.h:103
int chroma_x_shift
Definition: h264.h:275
const uint8_t * buffer
Definition: get_bits.h:53
static unsigned svq3_get_ue_golomb(GetBitContext *gb)
Definition: golomb.h:110
#define INVALID_VLC
Definition: golomb.h:37
int flags
Definition: h264.h:284
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1533
int mb_height
Definition: h264.h:458
static void free_picture(AVCodecContext *avctx, Picture *pic)
Definition: svq3.c:1251
DSPContext dsp
Definition: h264.h:254
mpegvideo header.
int v_edge_pos
Definition: svq3.c:79
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264.h:327
H264Context.
Definition: h264.h:252
discard all
Definition: avcodec.h:536
#define IS_INTRA4x4(a)
Definition: mpegvideo.h:106
uint8_t run
Definition: svq3.c:132
#define FULLPEL_MODE
Definition: svq3.c:83
AVCodec.
Definition: avcodec.h:2960
int picture_structure
Definition: h264.h:376
#define AV_WN32A(p, v)
Definition: intreadwrite.h:458
#define AV_COPY32(d, s)
Definition: intreadwrite.h:506
static av_always_inline uint32_t pack16to32(int a, int b)
Definition: h264.h:794
static const uint8_t zigzag_scan[16]
Definition: h264data.h:55
int mb_skip_run
Definition: h264.h:457
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:151
static int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg)
Definition: svq3.c:351
static int svq3_decode_block(GetBitContext *gb, DCTELEM *block, int index, const int type)
Definition: svq3.c:221
enum AVDiscard skip_frame
Definition: avcodec.h:2907
static const uint8_t golomb_to_pict_type[5]
Definition: h264data.h:38
int thirdpel_flag
Definition: svq3.c:72
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:50
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
Definition: mimic.c:228
#define IS_INTER(a)
Definition: mpegvideo.h:110
DCTELEM mb_luma_dc[3][16 *2]
Definition: h264.h:417
uint8_t
int prev_frame_num_offset
for POC type 2
Definition: h264.h:504
#define DC_PRED8x8
Definition: h264pred.h:68
#define PICT_FRAME
Definition: mpegvideo.h:629
#define AV_RB32
Definition: intreadwrite.h:130
static int get_buffer(AVCodecContext *avctx, Picture *pic)
Definition: svq3.c:1024
DCTELEM mb[16 *48 *2]
as a dct coeffecient is int32_t in high depth, we need to reserve twice the space.
Definition: h264.h:416
int mb_xy
Definition: h264.h:461
#define AV_WL32(p, d)
Definition: intreadwrite.h:255
static const uint8_t luma_dc_zigzag_scan[16]
Definition: h264data.h:69
uint8_t motion_subsample_log2
log2 of the size of the block which a single vector in motion_val represents: (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)
Definition: avcodec.h:1302
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1454
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: dsputil.h:271
int uvlinesize
Definition: h264.h:274
const char data[16]
Definition: mxf.c:66
int height
Definition: h264.h:273
int mb_x
Definition: h264.h:454
static const IMbInfo i_mb_type_info[26]
Definition: h264data.h:159
uint8_t * data
Definition: avcodec.h:915
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:192
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:43
int chroma_y_shift
Definition: h264.h:275
static int init(AVCodecParserContext *s)
Definition: h264_parser.c:335
int width
Definition: h264.h:273
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
Definition: svq3.c:465
H.264 / AVC / MPEG4 part10 codec.
int frame_num
Definition: h264.h:500
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:311
int next_slice_index
Definition: svq3.c:74
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1634
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
Definition: mem.c:139
#define HALFPEL_MODE
Definition: svq3.c:84
tpel_mc_func avg_tpel_pixels_tab[11]
Definition: dsputil.h:310
int context_initialized
Definition: h264.h:283
void ff_h264_hl_decode_mb(H264Context *h)
Definition: h264.c:2292
int reference
is this picture used as reference The values for this are the same as the MpegEncContext.picture_structure variable, that is 1->top field, 2->bottom field, 3->frame/both fields.
Definition: avcodec.h:1132
#define DC_128_PRED
Definition: h264pred.h:51
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:88
Picture * next_pic
Definition: svq3.c:69
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1434
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:146
const char * name
Name of the codec implementation.
Definition: avcodec.h:2967
#define IS_INTRA(a)
Definition: mpegvideo.h:109
#define PREDICT_MODE
Definition: svq3.c:86
Sorenson Vector Quantizer #1 (SVQ1) video codec.
static const uint8_t scan8[16 *3+3]
Definition: h264.h:778
static av_always_inline void pred_motion(H264Context *const h, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: h264_mvpred.h:94
static int svq3_get_se_golomb(GetBitContext *gb)
Definition: golomb.h:199
int chroma_pred_mode
Definition: h264.h:291
useful rectangle filling function
unsigned int left_samples_available
Definition: h264.h:313
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: avcodec.h:2981
static DCTELEM block[64]
Definition: dct-test.c:169
enum AVPictureType pict_type
Picture type of the frame, see ?_TYPE below.
Definition: avcodec.h:1065
int frame_num_offset
for POC type 2
Definition: h264.h:503
uint32_t * mb2br_xy
Definition: h264.h:344
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
Definition: pixfmt.h:77
H264Context h
Definition: svq3.c:67
static int svq3_decode_end(AVCodecContext *avctx)
Definition: svq3.c:1265
int width
picture width / height.
Definition: avcodec.h:1508
Picture.
Definition: mpegvideo.h:95
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame)
Get a buffer for a frame.
Definition: utils.c:464
int size_in_bits
Definition: get_bits.h:55
int32_t
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:251
static av_cold int svq3_decode_init(AVCodecContext *avctx)
Definition: svq3.c:847
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color)
Definition: avplay.c:401
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264.h:307
#define AV_RL32
Definition: intreadwrite.h:146
unsigned int topright_samples_available
Definition: h264.h:312
int slice_type
Definition: h264.h:368
static const uint8_t golomb_to_intra4x4_cbp[48]
Definition: h264data.h:43
void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur, Picture *last, int y, int h, int picture_structure, int first_field, int draw_edges, int low_delay, int v_edge_pos, int h_edge_pos)
Definition: mpegvideo.c:2458
int last_frame_output
Definition: svq3.c:80
#define PART_NOT_AVAILABLE
Definition: h264.h:330
int next_p_frame_damaged
Definition: svq3.c:77
Picture cur_pic
Definition: h264.h:264
static const int8_t mv[256][2]
Definition: 4xm.c:73
uint32_t * mb_type
macroblock type table mb_type_base + mb_width + 2
Definition: avcodec.h:1180
VideoDSPContext vdsp
Definition: h264.h:255
NULL
Definition: eval.c:52
AVCodec ff_svq3_decoder
Definition: svq3.c:1279
static int width
Definition: utils.c:156
int mb_stride
Definition: h264.h:459
AVCodecContext * avctx
Definition: h264.h:253
external API header
H264 / AVC / MPEG4 part10 codec data table
int prev_frame_num
frame_num of the last pic for POC type 1/2
Definition: h264.h:505
uint32_t * mb_type_base
Definition: mpegvideo.h:104
int linesize[AV_NUM_DATA_POINTERS]
Size, in bytes, of the data for each picture/channel plane.
Definition: avcodec.h:1008
int debug
debug
Definition: avcodec.h:2568
main external API structure.
Definition: avcodec.h:1339
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:326
int ff_h264_check_intra4x4_pred_mode(H264Context *h)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:264
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264.c:1062
int16_t(*[2] motion_val)[2]
motion vector table
Definition: avcodec.h:1172
int extradata_size
Definition: avcodec.h:1455
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:268
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:293
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:260
int index
Definition: gxfenc.c:72
static const uint8_t chroma_dc_scan[4]
Definition: h264data.h:83
static void svq3_mc_dir_part(SVQ3Context *s, int x, int y, int width, int height, int mx, int my, int dxy, int thirdpel, int dir, int avg)
Definition: svq3.c:277
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:372
static const uint8_t svq3_pred_0[25][2]
Definition: svq3.c:104
int8_t * ref_index[2]
motion reference frame index the order in which these are stored can depend on the codec...
Definition: avcodec.h:1195
int unknown_flag
Definition: svq3.c:73
short DCTELEM
Definition: dsputil.h:39
int block_offset[2 *(16 *3)]
block_offset[ 0..23] for frame macroblocks block_offset[24..47] for field macroblocks ...
Definition: h264.h:341
av_cold void ff_h264_free_context(H264Context *h)
Free any data that may have been allocated in the H264 context like SPS, PPS etc. ...
Definition: h264.c:4620
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: avcodec.h:997
uint8_t level
Definition: svq3.c:133
#define IS_INTRA16x16(a)
Definition: mpegvideo.h:107
void(* clear_blocks)(DCTELEM *blocks)
Definition: dsputil.h:219
#define AV_ZERO128(d)
Definition: intreadwrite.h:542
int height
Definition: gxfenc.c:72
tpel_mc_func put_tpel_pixels_tab[11]
Thirdpel motion compensation with rounding (a+b+1)>>1.
Definition: dsputil.h:309
discard all non reference
Definition: avcodec.h:533
int is_complex
Definition: h264.h:463
int qscale
Definition: h264.h:277
uint8_t cbp
Definition: h264data.h:156
common internal api header.
int h_edge_pos
Definition: svq3.c:78
H.264 / AVC / MPEG4 part10 motion vector predicion.
Bi-dir predicted.
Definition: avutil.h:247
#define stride
int chroma_qp[2]
Definition: h264.h:269
static const uint8_t golomb_to_inter_cbp[48]
Definition: h264data.h:49
static av_always_inline void write_back_intra_pred_mode(H264Context *h)
Definition: h264.h:838
DSP utils.
int intra16x16_pred_mode
Definition: h264.h:292
static const uint32_t svq3_dequant_coeff[32]
Definition: svq3.c:141
void * priv_data
Definition: avcodec.h:1382
#define THIRDPEL_MODE
Definition: svq3.c:85
int linesize
Definition: h264.h:274
Picture * cur_pic_ptr
Definition: h264.h:263
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264.h:320
#define avg(d, s)
Definition: dsputil_align.c:51
#define av_log2
Definition: intmath.h:85
static int svq3_decode_slice_header(AVCodecContext *avctx)
Definition: svq3.c:756
int key_frame
1 -> keyframe, 0-> not
Definition: avcodec.h:1058
#define AV_ZERO32(d)
Definition: intreadwrite.h:534
int mb_width
Definition: h264.h:458
void ff_svq3_luma_dc_dequant_idct_c(DCTELEM *output, DCTELEM *input, int qp)
Definition: svq3.c:148
enum AVPictureType pict_type
Definition: h264.h:567
static const uint8_t svq3_scan[16]
Definition: svq3.c:97
Picture * cur_pic
Definition: svq3.c:68
struct AVFrame f
Definition: mpegvideo.h:96
static const int8_t svq3_pred_1[6][6][5]
Definition: svq3.c:116
uint32_t watermark_key
Definition: svq3.c:75
int8_t * intra4x4_pred_mode
Definition: h264.h:308
#define DC_PRED
Definition: h264pred.h:40
exp golomb vlc stuff
int slice_num
Definition: h264.h:366
AVPixelFormat
Pixel format.
Definition: pixfmt.h:63
This structure stores compressed data.
Definition: avcodec.h:898
static int svq3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: svq3.c:1062
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:158
static const struct @54 svq3_dct_tables[2][16]
int b_stride
Definition: h264.h:345
Predicted.
Definition: avutil.h:246
int halfpel_flag
Definition: svq3.c:71
int adaptive_quant
Definition: svq3.c:76
int8_t ref_cache[2][5 *8]
Definition: h264.h:328
if(!(ptr_align%ac->ptr_align)&&samples_align >=aligned_len)