huffyuv.c
Go to the documentation of this file.
1 /*
2  * huffyuv codec for libavcodec
3  *
4  * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7  * the algorithm used
8  *
9  * This file is part of Libav.
10  *
11  * Libav is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * Libav is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with Libav; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25 
31 #include "avcodec.h"
32 #include "get_bits.h"
33 #include "put_bits.h"
34 #include "dsputil.h"
35 #include "thread.h"
36 
37 #define VLC_BITS 11
38 
39 #if HAVE_BIGENDIAN
40 #define B 3
41 #define G 2
42 #define R 1
43 #define A 0
44 #else
45 #define B 0
46 #define G 1
47 #define R 2
48 #define A 3
49 #endif
50 
51 typedef enum Predictor{
52  LEFT= 0,
55 } Predictor;
56 
57 typedef struct HYuvContext{
65  int version;
66  int yuy2; //use yuy2 instead of 422P
67  int bgr32; //use bgr32 instead of bgr24
68  int width, height;
69  int flags;
70  int context;
73  uint8_t *temp[3];
74  uint64_t stats[3][256];
75  uint8_t len[3][256];
76  uint32_t bits[3][256];
77  uint32_t pix_bgr_map[1<<VLC_BITS];
78  VLC vlc[6]; //Y,U,V,YY,YU,YV
80  uint8_t *bitstream_buffer;
81  unsigned int bitstream_buffer_size;
84 
85 #define classic_shift_luma_table_size 42
87  34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
88  16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
89  69,68, 0
90 };
91 
92 #define classic_shift_chroma_table_size 59
94  66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
95  56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
96  214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
97 };
98 
99 static const unsigned char classic_add_luma[256] = {
100  3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
101  73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
102  68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
103  35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
104  37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
105  35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
106  27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
107  15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
108  12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
109  12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
110  18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
111  28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
112  28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
113  62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
114  54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
115  46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
116 };
117 
118 static const unsigned char classic_add_chroma[256] = {
119  3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
120  7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
121  11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
122  43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
123  143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
124  80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
125  17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
126  112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
127  0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
128  135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
129  52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
130  19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
131  7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
132  83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
133  14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
134  6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
135 };
136 
137 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
138  int i;
139  if(w<32){
140  for(i=0; i<w; i++){
141  const int temp= src[i];
142  dst[i]= temp - left;
143  left= temp;
144  }
145  return left;
146  }else{
147  for(i=0; i<16; i++){
148  const int temp= src[i];
149  dst[i]= temp - left;
150  left= temp;
151  }
152  s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
153  return src[w-1];
154  }
155 }
156 
157 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
158  int i;
159  int r,g,b;
160  r= *red;
161  g= *green;
162  b= *blue;
163  for(i=0; i<FFMIN(w,4); i++){
164  const int rt= src[i*4+R];
165  const int gt= src[i*4+G];
166  const int bt= src[i*4+B];
167  dst[i*4+R]= rt - r;
168  dst[i*4+G]= gt - g;
169  dst[i*4+B]= bt - b;
170  r = rt;
171  g = gt;
172  b = bt;
173  }
174  s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
175  *red= src[(w-1)*4+R];
176  *green= src[(w-1)*4+G];
177  *blue= src[(w-1)*4+B];
178 }
179 
180 static int read_len_table(uint8_t *dst, GetBitContext *gb){
181  int i, val, repeat;
182 
183  for(i=0; i<256;){
184  repeat= get_bits(gb, 3);
185  val = get_bits(gb, 5);
186  if(repeat==0)
187  repeat= get_bits(gb, 8);
188 //printf("%d %d\n", val, repeat);
189  if(i+repeat > 256 || get_bits_left(gb) < 0) {
190  av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
191  return -1;
192  }
193  while (repeat--)
194  dst[i++] = val;
195  }
196  return 0;
197 }
198 
199 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){
200  int len, index;
201  uint32_t bits=0;
202 
203  for(len=32; len>0; len--){
204  for(index=0; index<256; index++){
205  if(len_table[index]==len)
206  dst[index]= bits++;
207  }
208  if(bits & 1){
209  av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
210  return -1;
211  }
212  bits >>= 1;
213  }
214  return 0;
215 }
216 
217 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
218 typedef struct {
219  uint64_t val;
220  int name;
221 } HeapElem;
222 
223 static void heap_sift(HeapElem *h, int root, int size)
224 {
225  while(root*2+1 < size) {
226  int child = root*2+1;
227  if(child < size-1 && h[child].val > h[child+1].val)
228  child++;
229  if(h[root].val > h[child].val) {
230  FFSWAP(HeapElem, h[root], h[child]);
231  root = child;
232  } else
233  break;
234  }
235 }
236 
237 static void generate_len_table(uint8_t *dst, const uint64_t *stats){
238  HeapElem h[256];
239  int up[2*256];
240  int len[2*256];
241  int offset, i, next;
242  int size = 256;
243 
244  for(offset=1; ; offset<<=1){
245  for(i=0; i<size; i++){
246  h[i].name = i;
247  h[i].val = (stats[i] << 8) + offset;
248  }
249  for(i=size/2-1; i>=0; i--)
250  heap_sift(h, i, size);
251 
252  for(next=size; next<size*2-1; next++){
253  // merge the two smallest entries, and put it back in the heap
254  uint64_t min1v = h[0].val;
255  up[h[0].name] = next;
256  h[0].val = INT64_MAX;
257  heap_sift(h, 0, size);
258  up[h[0].name] = next;
259  h[0].name = next;
260  h[0].val += min1v;
261  heap_sift(h, 0, size);
262  }
263 
264  len[2*size-2] = 0;
265  for(i=2*size-3; i>=size; i--)
266  len[i] = len[up[i]] + 1;
267  for(i=0; i<size; i++) {
268  dst[i] = len[up[i]] + 1;
269  if(dst[i] >= 32) break;
270  }
271  if(i==size) break;
272  }
273 }
274 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
275 
277  uint16_t symbols[1<<VLC_BITS];
278  uint16_t bits[1<<VLC_BITS];
279  uint8_t len[1<<VLC_BITS];
280  if(s->bitstream_bpp < 24){
281  int p, i, y, u;
282  for(p=0; p<3; p++){
283  for(i=y=0; y<256; y++){
284  int len0 = s->len[0][y];
285  int limit = VLC_BITS - len0;
286  if(limit <= 0)
287  continue;
288  for(u=0; u<256; u++){
289  int len1 = s->len[p][u];
290  if(len1 > limit)
291  continue;
292  len[i] = len0 + len1;
293  bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
294  symbols[i] = (y<<8) + u;
295  if(symbols[i] != 0xffff) // reserved to mean "invalid"
296  i++;
297  }
298  }
299  free_vlc(&s->vlc[3+p]);
300  init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
301  }
302  }else{
303  uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
304  int i, b, g, r, code;
305  int p0 = s->decorrelate;
306  int p1 = !s->decorrelate;
307  // restrict the range to +/-16 becaues that's pretty much guaranteed to
308  // cover all the combinations that fit in 11 bits total, and it doesn't
309  // matter if we miss a few rare codes.
310  for(i=0, g=-16; g<16; g++){
311  int len0 = s->len[p0][g&255];
312  int limit0 = VLC_BITS - len0;
313  if(limit0 < 2)
314  continue;
315  for(b=-16; b<16; b++){
316  int len1 = s->len[p1][b&255];
317  int limit1 = limit0 - len1;
318  if(limit1 < 1)
319  continue;
320  code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
321  for(r=-16; r<16; r++){
322  int len2 = s->len[2][r&255];
323  if(len2 > limit1)
324  continue;
325  len[i] = len0 + len1 + len2;
326  bits[i] = (code << len2) + s->bits[2][r&255];
327  if(s->decorrelate){
328  map[i][G] = g;
329  map[i][B] = g+b;
330  map[i][R] = g+r;
331  }else{
332  map[i][B] = g;
333  map[i][G] = b;
334  map[i][R] = r;
335  }
336  i++;
337  }
338  }
339  }
340  free_vlc(&s->vlc[3]);
341  init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
342  }
343 }
344 
345 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
346  GetBitContext gb;
347  int i;
348 
349  init_get_bits(&gb, src, length*8);
350 
351  for(i=0; i<3; i++){
352  if(read_len_table(s->len[i], &gb)<0)
353  return -1;
354  if(generate_bits_table(s->bits[i], s->len[i])<0){
355  return -1;
356  }
357  free_vlc(&s->vlc[i]);
358  init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
359  }
360 
362 
363  return (get_bits_count(&gb)+7)/8;
364 }
365 
367 #if 1
368  GetBitContext gb;
369  int i;
370 
372  if(read_len_table(s->len[0], &gb)<0)
373  return -1;
375  if(read_len_table(s->len[1], &gb)<0)
376  return -1;
377 
378  for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
379  for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
380 
381  if(s->bitstream_bpp >= 24){
382  memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
383  memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
384  }
385  memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
386  memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
387 
388  for(i=0; i<3; i++){
389  free_vlc(&s->vlc[i]);
390  init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
391  }
392 
394 
395  return 0;
396 #else
397  av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
398  return -1;
399 #endif
400 }
401 
403  int i;
404 
405  if(s->bitstream_bpp<24){
406  for(i=0; i<3; i++){
407  s->temp[i]= av_malloc(s->width + 16);
408  }
409  }else{
410  s->temp[0]= av_mallocz(4*s->width + 16);
411  }
412 }
413 
414 static av_cold int common_init(AVCodecContext *avctx){
415  HYuvContext *s = avctx->priv_data;
416 
417  s->avctx= avctx;
418  s->flags= avctx->flags;
419 
420  dsputil_init(&s->dsp, avctx);
421 
422  s->width= avctx->width;
423  s->height= avctx->height;
424  assert(s->width>0 && s->height>0);
425 
426  return 0;
427 }
428 
429 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
430 static av_cold int decode_init(AVCodecContext *avctx)
431 {
432  HYuvContext *s = avctx->priv_data;
433 
434  common_init(avctx);
435  memset(s->vlc, 0, 3*sizeof(VLC));
436 
437  avctx->coded_frame= &s->picture;
438  s->interlaced= s->height > 288;
439 
440 s->bgr32=1;
441 //if(avctx->extradata)
442 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
443  if(avctx->extradata_size){
444  if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
445  s->version=1; // do such files exist at all?
446  else
447  s->version=2;
448  }else
449  s->version=0;
450 
451  if(s->version==2){
452  int method, interlace;
453 
454  if (avctx->extradata_size < 4)
455  return -1;
456 
457  method= ((uint8_t*)avctx->extradata)[0];
458  s->decorrelate= method&64 ? 1 : 0;
459  s->predictor= method&63;
460  s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
461  if(s->bitstream_bpp==0)
462  s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
463  interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
464  s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
465  s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
466 
467  if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0)
468  return -1;
469  }else{
470  switch(avctx->bits_per_coded_sample&7){
471  case 1:
472  s->predictor= LEFT;
473  s->decorrelate= 0;
474  break;
475  case 2:
476  s->predictor= LEFT;
477  s->decorrelate= 1;
478  break;
479  case 3:
480  s->predictor= PLANE;
481  s->decorrelate= avctx->bits_per_coded_sample >= 24;
482  break;
483  case 4:
484  s->predictor= MEDIAN;
485  s->decorrelate= 0;
486  break;
487  default:
488  s->predictor= LEFT; //OLD
489  s->decorrelate= 0;
490  break;
491  }
492  s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
493  s->context= 0;
494 
495  if(read_old_huffman_tables(s) < 0)
496  return -1;
497  }
498 
499  switch(s->bitstream_bpp){
500  case 12:
501  avctx->pix_fmt = PIX_FMT_YUV420P;
502  break;
503  case 16:
504  if(s->yuy2){
505  avctx->pix_fmt = PIX_FMT_YUYV422;
506  }else{
507  avctx->pix_fmt = PIX_FMT_YUV422P;
508  }
509  break;
510  case 24:
511  case 32:
512  if(s->bgr32){
513  avctx->pix_fmt = PIX_FMT_RGB32;
514  }else{
515  avctx->pix_fmt = PIX_FMT_BGR24;
516  }
517  break;
518  default:
519  return AVERROR_INVALIDDATA;
520  }
521 
522  alloc_temp(s);
523 
524 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
525 
526  return 0;
527 }
528 
530 {
531  HYuvContext *s = avctx->priv_data;
532  int i;
533 
534  avctx->coded_frame= &s->picture;
535  alloc_temp(s);
536 
537  for (i = 0; i < 6; i++)
538  s->vlc[i].table = NULL;
539 
540  if(s->version==2){
541  if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
542  return -1;
543  }else{
544  if(read_old_huffman_tables(s) < 0)
545  return -1;
546  }
547 
548  return 0;
549 }
550 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
551 
552 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
553 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
554  int i;
555  int index= 0;
556 
557  for(i=0; i<256;){
558  int val= len[i];
559  int repeat=0;
560 
561  for(; i<256 && len[i]==val && repeat<255; i++)
562  repeat++;
563 
564  assert(val < 32 && val >0 && repeat<256 && repeat>0);
565  if(repeat>7){
566  buf[index++]= val;
567  buf[index++]= repeat;
568  }else{
569  buf[index++]= val | (repeat<<5);
570  }
571  }
572 
573  return index;
574 }
575 
576 static av_cold int encode_init(AVCodecContext *avctx)
577 {
578  HYuvContext *s = avctx->priv_data;
579  int i, j;
580 
581  common_init(avctx);
582 
583  avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
584  avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
585  s->version=2;
586 
587  avctx->coded_frame= &s->picture;
588 
589  switch(avctx->pix_fmt){
590  case PIX_FMT_YUV420P:
591  s->bitstream_bpp= 12;
592  break;
593  case PIX_FMT_YUV422P:
594  s->bitstream_bpp= 16;
595  break;
596  case PIX_FMT_RGB32:
597  s->bitstream_bpp= 24;
598  break;
599  default:
600  av_log(avctx, AV_LOG_ERROR, "format not supported\n");
601  return -1;
602  }
604  s->decorrelate= s->bitstream_bpp >= 24;
605  s->predictor= avctx->prediction_method;
606  s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
607  if(avctx->context_model==1){
608  s->context= avctx->context_model;
610  av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
611  return -1;
612  }
613  }else s->context= 0;
614 
615  if(avctx->codec->id==CODEC_ID_HUFFYUV){
616  if(avctx->pix_fmt==PIX_FMT_YUV420P){
617  av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
618  return -1;
619  }
620  if(avctx->context_model){
621  av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
622  return -1;
623  }
624  if(s->interlaced != ( s->height > 288 ))
625  av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
626  }
627 
628  if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
629  av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
630  return -1;
631  }
632 
633  ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
634  ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
635  ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
636  if(s->context)
637  ((uint8_t*)avctx->extradata)[2]|= 0x40;
638  ((uint8_t*)avctx->extradata)[3]= 0;
639  s->avctx->extradata_size= 4;
640 
641  if(avctx->stats_in){
642  char *p= avctx->stats_in;
643 
644  for(i=0; i<3; i++)
645  for(j=0; j<256; j++)
646  s->stats[i][j]= 1;
647 
648  for(;;){
649  for(i=0; i<3; i++){
650  char *next;
651 
652  for(j=0; j<256; j++){
653  s->stats[i][j]+= strtol(p, &next, 0);
654  if(next==p) return -1;
655  p=next;
656  }
657  }
658  if(p[0]==0 || p[1]==0 || p[2]==0) break;
659  }
660  }else{
661  for(i=0; i<3; i++)
662  for(j=0; j<256; j++){
663  int d= FFMIN(j, 256-j);
664 
665  s->stats[i][j]= 100000000/(d+1);
666  }
667  }
668 
669  for(i=0; i<3; i++){
670  generate_len_table(s->len[i], s->stats[i]);
671 
672  if(generate_bits_table(s->bits[i], s->len[i])<0){
673  return -1;
674  }
675 
676  s->avctx->extradata_size+=
677  store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
678  }
679 
680  if(s->context){
681  for(i=0; i<3; i++){
682  int pels = s->width*s->height / (i?40:10);
683  for(j=0; j<256; j++){
684  int d= FFMIN(j, 256-j);
685  s->stats[i][j]= pels/(d+1);
686  }
687  }
688  }else{
689  for(i=0; i<3; i++)
690  for(j=0; j<256; j++)
691  s->stats[i][j]= 0;
692  }
693 
694 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
695 
696  alloc_temp(s);
697 
698  s->picture_number=0;
699 
700  return 0;
701 }
702 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
703 
704 /* TODO instead of restarting the read when the code isn't in the first level
705  * of the joint table, jump into the 2nd level of the individual table. */
706 #define READ_2PIX(dst0, dst1, plane1){\
707  uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
708  if(code != 0xffff){\
709  dst0 = code>>8;\
710  dst1 = code;\
711  }else{\
712  dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
713  dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
714  }\
715 }
716 
717 static void decode_422_bitstream(HYuvContext *s, int count){
718  int i;
719 
720  count/=2;
721 
722  if(count >= (get_bits_left(&s->gb))/(31*4)){
723  for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
724  READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
725  READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
726  }
727  }else{
728  for(i=0; i<count; i++){
729  READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
730  READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
731  }
732  }
733 }
734 
735 static void decode_gray_bitstream(HYuvContext *s, int count){
736  int i;
737 
738  count/=2;
739 
740  if(count >= (get_bits_left(&s->gb))/(31*2)){
741  for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
742  READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
743  }
744  }else{
745  for(i=0; i<count; i++){
746  READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
747  }
748  }
749 }
750 
751 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
752 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
753  int i;
754  const uint8_t *y = s->temp[0] + offset;
755  const uint8_t *u = s->temp[1] + offset/2;
756  const uint8_t *v = s->temp[2] + offset/2;
757 
758  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
759  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
760  return -1;
761  }
762 
763 #define LOAD4\
764  int y0 = y[2*i];\
765  int y1 = y[2*i+1];\
766  int u0 = u[i];\
767  int v0 = v[i];
768 
769  count/=2;
770  if(s->flags&CODEC_FLAG_PASS1){
771  for(i=0; i<count; i++){
772  LOAD4;
773  s->stats[0][y0]++;
774  s->stats[1][u0]++;
775  s->stats[0][y1]++;
776  s->stats[2][v0]++;
777  }
778  }
780  return 0;
781  if(s->context){
782  for(i=0; i<count; i++){
783  LOAD4;
784  s->stats[0][y0]++;
785  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
786  s->stats[1][u0]++;
787  put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
788  s->stats[0][y1]++;
789  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
790  s->stats[2][v0]++;
791  put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
792  }
793  }else{
794  for(i=0; i<count; i++){
795  LOAD4;
796  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
797  put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
798  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
799  put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
800  }
801  }
802  return 0;
803 }
804 
805 static int encode_gray_bitstream(HYuvContext *s, int count){
806  int i;
807 
808  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
809  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
810  return -1;
811  }
812 
813 #define LOAD2\
814  int y0 = s->temp[0][2*i];\
815  int y1 = s->temp[0][2*i+1];
816 #define STAT2\
817  s->stats[0][y0]++;\
818  s->stats[0][y1]++;
819 #define WRITE2\
820  put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
821  put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
822 
823  count/=2;
824  if(s->flags&CODEC_FLAG_PASS1){
825  for(i=0; i<count; i++){
826  LOAD2;
827  STAT2;
828  }
829  }
831  return 0;
832 
833  if(s->context){
834  for(i=0; i<count; i++){
835  LOAD2;
836  STAT2;
837  WRITE2;
838  }
839  }else{
840  for(i=0; i<count; i++){
841  LOAD2;
842  WRITE2;
843  }
844  }
845  return 0;
846 }
847 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
848 
849 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
850  int i;
851  for(i=0; i<count; i++){
852  int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
853  if(code != -1){
854  *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
855  }else if(decorrelate){
856  s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
857  s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
858  s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
859  }else{
860  s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
861  s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
862  s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
863  }
864  if(alpha)
865  s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
866  }
867 }
868 
869 static void decode_bgr_bitstream(HYuvContext *s, int count){
870  if(s->decorrelate){
871  if(s->bitstream_bpp==24)
872  decode_bgr_1(s, count, 1, 0);
873  else
874  decode_bgr_1(s, count, 1, 1);
875  }else{
876  if(s->bitstream_bpp==24)
877  decode_bgr_1(s, count, 0, 0);
878  else
879  decode_bgr_1(s, count, 0, 1);
880  }
881 }
882 
883 static int encode_bgr_bitstream(HYuvContext *s, int count){
884  int i;
885 
886  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
887  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
888  return -1;
889  }
890 
891 #define LOAD3\
892  int g= s->temp[0][4*i+G];\
893  int b= (s->temp[0][4*i+B] - g) & 0xff;\
894  int r= (s->temp[0][4*i+R] - g) & 0xff;
895 #define STAT3\
896  s->stats[0][b]++;\
897  s->stats[1][g]++;\
898  s->stats[2][r]++;
899 #define WRITE3\
900  put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
901  put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
902  put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
903 
905  for(i=0; i<count; i++){
906  LOAD3;
907  STAT3;
908  }
909  }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
910  for(i=0; i<count; i++){
911  LOAD3;
912  STAT3;
913  WRITE3;
914  }
915  }else{
916  for(i=0; i<count; i++){
917  LOAD3;
918  WRITE3;
919  }
920  }
921  return 0;
922 }
923 
924 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
925 static void draw_slice(HYuvContext *s, int y){
926  int h, cy, i;
927  int offset[AV_NUM_DATA_POINTERS];
928 
929  if(s->avctx->draw_horiz_band==NULL)
930  return;
931 
932  h= y - s->last_slice_end;
933  y -= h;
934 
935  if(s->bitstream_bpp==12){
936  cy= y>>1;
937  }else{
938  cy= y;
939  }
940 
941  offset[0] = s->picture.linesize[0]*y;
942  offset[1] = s->picture.linesize[1]*cy;
943  offset[2] = s->picture.linesize[2]*cy;
944  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
945  offset[i] = 0;
946  emms_c();
947 
948  s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
949 
950  s->last_slice_end= y + h;
951 }
952 
953 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
954  const uint8_t *buf = avpkt->data;
955  int buf_size = avpkt->size;
956  HYuvContext *s = avctx->priv_data;
957  const int width= s->width;
958  const int width2= s->width>>1;
959  const int height= s->height;
960  int fake_ystride, fake_ustride, fake_vstride;
961  AVFrame * const p= &s->picture;
962  int table_size= 0;
963 
964  AVFrame *picture = data;
965 
967  if (!s->bitstream_buffer)
968  return AVERROR(ENOMEM);
969 
970  memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
971  s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
972 
973  if(p->data[0])
974  ff_thread_release_buffer(avctx, p);
975 
976  p->reference= 0;
977  if(ff_thread_get_buffer(avctx, p) < 0){
978  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
979  return -1;
980  }
981 
982  if(s->context){
983  table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
984  if(table_size < 0)
985  return -1;
986  }
987 
988  if((unsigned)(buf_size-table_size) >= INT_MAX/8)
989  return -1;
990 
991  init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
992 
993  fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
994  fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
995  fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
996 
997  s->last_slice_end= 0;
998 
999  if(s->bitstream_bpp<24){
1000  int y, cy;
1001  int lefty, leftu, leftv;
1002  int lefttopy, lefttopu, lefttopv;
1003 
1004  if(s->yuy2){
1005  p->data[0][3]= get_bits(&s->gb, 8);
1006  p->data[0][2]= get_bits(&s->gb, 8);
1007  p->data[0][1]= get_bits(&s->gb, 8);
1008  p->data[0][0]= get_bits(&s->gb, 8);
1009 
1010  av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1011  return -1;
1012  }else{
1013 
1014  leftv= p->data[2][0]= get_bits(&s->gb, 8);
1015  lefty= p->data[0][1]= get_bits(&s->gb, 8);
1016  leftu= p->data[1][0]= get_bits(&s->gb, 8);
1017  p->data[0][0]= get_bits(&s->gb, 8);
1018 
1019  switch(s->predictor){
1020  case LEFT:
1021  case PLANE:
1022  decode_422_bitstream(s, width-2);
1023  lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1024  if(!(s->flags&CODEC_FLAG_GRAY)){
1025  leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1026  leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1027  }
1028 
1029  for(cy=y=1; y<s->height; y++,cy++){
1030  uint8_t *ydst, *udst, *vdst;
1031 
1032  if(s->bitstream_bpp==12){
1033  decode_gray_bitstream(s, width);
1034 
1035  ydst= p->data[0] + p->linesize[0]*y;
1036 
1037  lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1038  if(s->predictor == PLANE){
1039  if(y>s->interlaced)
1040  s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1041  }
1042  y++;
1043  if(y>=s->height) break;
1044  }
1045 
1046  draw_slice(s, y);
1047 
1048  ydst= p->data[0] + p->linesize[0]*y;
1049  udst= p->data[1] + p->linesize[1]*cy;
1050  vdst= p->data[2] + p->linesize[2]*cy;
1051 
1052  decode_422_bitstream(s, width);
1053  lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1054  if(!(s->flags&CODEC_FLAG_GRAY)){
1055  leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1056  leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1057  }
1058  if(s->predictor == PLANE){
1059  if(cy>s->interlaced){
1060  s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1061  if(!(s->flags&CODEC_FLAG_GRAY)){
1062  s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1063  s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1064  }
1065  }
1066  }
1067  }
1068  draw_slice(s, height);
1069 
1070  break;
1071  case MEDIAN:
1072  /* first line except first 2 pixels is left predicted */
1073  decode_422_bitstream(s, width-2);
1074  lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1075  if(!(s->flags&CODEC_FLAG_GRAY)){
1076  leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1077  leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1078  }
1079 
1080  cy=y=1;
1081 
1082  /* second line is left predicted for interlaced case */
1083  if(s->interlaced){
1084  decode_422_bitstream(s, width);
1085  lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1086  if(!(s->flags&CODEC_FLAG_GRAY)){
1087  leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1088  leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1089  }
1090  y++; cy++;
1091  }
1092 
1093  /* next 4 pixels are left predicted too */
1094  decode_422_bitstream(s, 4);
1095  lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1096  if(!(s->flags&CODEC_FLAG_GRAY)){
1097  leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1098  leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1099  }
1100 
1101  /* next line except the first 4 pixels is median predicted */
1102  lefttopy= p->data[0][3];
1103  decode_422_bitstream(s, width-4);
1104  s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1105  if(!(s->flags&CODEC_FLAG_GRAY)){
1106  lefttopu= p->data[1][1];
1107  lefttopv= p->data[2][1];
1108  s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1109  s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1110  }
1111  y++; cy++;
1112 
1113  for(; y<height; y++,cy++){
1114  uint8_t *ydst, *udst, *vdst;
1115 
1116  if(s->bitstream_bpp==12){
1117  while(2*cy > y){
1118  decode_gray_bitstream(s, width);
1119  ydst= p->data[0] + p->linesize[0]*y;
1120  s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1121  y++;
1122  }
1123  if(y>=height) break;
1124  }
1125  draw_slice(s, y);
1126 
1127  decode_422_bitstream(s, width);
1128 
1129  ydst= p->data[0] + p->linesize[0]*y;
1130  udst= p->data[1] + p->linesize[1]*cy;
1131  vdst= p->data[2] + p->linesize[2]*cy;
1132 
1133  s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1134  if(!(s->flags&CODEC_FLAG_GRAY)){
1135  s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1136  s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1137  }
1138  }
1139 
1140  draw_slice(s, height);
1141  break;
1142  }
1143  }
1144  }else{
1145  int y;
1146  int leftr, leftg, leftb, lefta;
1147  const int last_line= (height-1)*p->linesize[0];
1148 
1149  if(s->bitstream_bpp==32){
1150  lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
1151  leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1152  leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1153  leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1154  }else{
1155  leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1156  leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1157  leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1158  lefta= p->data[0][last_line+A]= 255;
1159  skip_bits(&s->gb, 8);
1160  }
1161 
1162  if(s->bgr32){
1163  switch(s->predictor){
1164  case LEFT:
1165  case PLANE:
1166  decode_bgr_bitstream(s, width-1);
1167  s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
1168 
1169  for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1170  decode_bgr_bitstream(s, width);
1171 
1172  s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1173  if(s->predictor == PLANE){
1174  if(s->bitstream_bpp!=32) lefta=0;
1175  if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1176  s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1177  p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1178  }
1179  }
1180  }
1181  draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1182  break;
1183  default:
1184  av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1185  }
1186  }else{
1187 
1188  av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1189  return -1;
1190  }
1191  }
1192  emms_c();
1193 
1194  *picture= *p;
1195  *data_size = sizeof(AVFrame);
1196 
1197  return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1198 }
1199 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1200 
1201 static int common_end(HYuvContext *s){
1202  int i;
1203 
1204  for(i=0; i<3; i++){
1205  av_freep(&s->temp[i]);
1206  }
1207  return 0;
1208 }
1209 
1210 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1211 static av_cold int decode_end(AVCodecContext *avctx)
1212 {
1213  HYuvContext *s = avctx->priv_data;
1214  int i;
1215 
1216  if (s->picture.data[0])
1217  avctx->release_buffer(avctx, &s->picture);
1218 
1219  common_end(s);
1221 
1222  for(i=0; i<6; i++){
1223  free_vlc(&s->vlc[i]);
1224  }
1225 
1226  return 0;
1227 }
1228 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1229 
1230 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1231 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1232  HYuvContext *s = avctx->priv_data;
1233  AVFrame *pict = data;
1234  const int width= s->width;
1235  const int width2= s->width>>1;
1236  const int height= s->height;
1237  const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1238  const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1239  const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1240  AVFrame * const p= &s->picture;
1241  int i, j, size=0;
1242 
1243  *p = *pict;
1245  p->key_frame= 1;
1246 
1247  if(s->context){
1248  for(i=0; i<3; i++){
1249  generate_len_table(s->len[i], s->stats[i]);
1250  if(generate_bits_table(s->bits[i], s->len[i])<0)
1251  return -1;
1252  size+= store_table(s, s->len[i], &buf[size]);
1253  }
1254 
1255  for(i=0; i<3; i++)
1256  for(j=0; j<256; j++)
1257  s->stats[i][j] >>= 1;
1258  }
1259 
1260  init_put_bits(&s->pb, buf+size, buf_size-size);
1261 
1262  if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1263  int lefty, leftu, leftv, y, cy;
1264 
1265  put_bits(&s->pb, 8, leftv= p->data[2][0]);
1266  put_bits(&s->pb, 8, lefty= p->data[0][1]);
1267  put_bits(&s->pb, 8, leftu= p->data[1][0]);
1268  put_bits(&s->pb, 8, p->data[0][0]);
1269 
1270  lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1271  leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1272  leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1273 
1274  encode_422_bitstream(s, 2, width-2);
1275 
1276  if(s->predictor==MEDIAN){
1277  int lefttopy, lefttopu, lefttopv;
1278  cy=y=1;
1279  if(s->interlaced){
1280  lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1281  leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1282  leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1283 
1284  encode_422_bitstream(s, 0, width);
1285  y++; cy++;
1286  }
1287 
1288  lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1289  leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1290  leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1291 
1292  encode_422_bitstream(s, 0, 4);
1293 
1294  lefttopy= p->data[0][3];
1295  lefttopu= p->data[1][1];
1296  lefttopv= p->data[2][1];
1297  s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1298  s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1299  s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1300  encode_422_bitstream(s, 0, width-4);
1301  y++; cy++;
1302 
1303  for(; y<height; y++,cy++){
1304  uint8_t *ydst, *udst, *vdst;
1305 
1306  if(s->bitstream_bpp==12){
1307  while(2*cy > y){
1308  ydst= p->data[0] + p->linesize[0]*y;
1309  s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1310  encode_gray_bitstream(s, width);
1311  y++;
1312  }
1313  if(y>=height) break;
1314  }
1315  ydst= p->data[0] + p->linesize[0]*y;
1316  udst= p->data[1] + p->linesize[1]*cy;
1317  vdst= p->data[2] + p->linesize[2]*cy;
1318 
1319  s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1320  s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1321  s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1322 
1323  encode_422_bitstream(s, 0, width);
1324  }
1325  }else{
1326  for(cy=y=1; y<height; y++,cy++){
1327  uint8_t *ydst, *udst, *vdst;
1328 
1329  /* encode a luma only line & y++ */
1330  if(s->bitstream_bpp==12){
1331  ydst= p->data[0] + p->linesize[0]*y;
1332 
1333  if(s->predictor == PLANE && s->interlaced < y){
1334  s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1335 
1336  lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1337  }else{
1338  lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1339  }
1340  encode_gray_bitstream(s, width);
1341  y++;
1342  if(y>=height) break;
1343  }
1344 
1345  ydst= p->data[0] + p->linesize[0]*y;
1346  udst= p->data[1] + p->linesize[1]*cy;
1347  vdst= p->data[2] + p->linesize[2]*cy;
1348 
1349  if(s->predictor == PLANE && s->interlaced < cy){
1350  s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1351  s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1352  s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1353 
1354  lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1355  leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1356  leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1357  }else{
1358  lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1359  leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1360  leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1361  }
1362 
1363  encode_422_bitstream(s, 0, width);
1364  }
1365  }
1366  }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1367  uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1368  const int stride = -p->linesize[0];
1369  const int fake_stride = -fake_ystride;
1370  int y;
1371  int leftr, leftg, leftb;
1372 
1373  put_bits(&s->pb, 8, leftr= data[R]);
1374  put_bits(&s->pb, 8, leftg= data[G]);
1375  put_bits(&s->pb, 8, leftb= data[B]);
1376  put_bits(&s->pb, 8, 0);
1377 
1378  sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1379  encode_bgr_bitstream(s, width-1);
1380 
1381  for(y=1; y<s->height; y++){
1382  uint8_t *dst = data + y*stride;
1383  if(s->predictor == PLANE && s->interlaced < y){
1384  s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1385  sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1386  }else{
1387  sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1388  }
1389  encode_bgr_bitstream(s, width);
1390  }
1391  }else{
1392  av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1393  }
1394  emms_c();
1395 
1396  size+= (put_bits_count(&s->pb)+31)/8;
1397  put_bits(&s->pb, 16, 0);
1398  put_bits(&s->pb, 15, 0);
1399  size/= 4;
1400 
1401  if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1402  int j;
1403  char *p= avctx->stats_out;
1404  char *end= p + 1024*30;
1405  for(i=0; i<3; i++){
1406  for(j=0; j<256; j++){
1407  snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1408  p+= strlen(p);
1409  s->stats[i][j]= 0;
1410  }
1411  snprintf(p, end-p, "\n");
1412  p++;
1413  }
1414  } else
1415  avctx->stats_out[0] = '\0';
1416  if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1417  flush_put_bits(&s->pb);
1418  s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1419  }
1420 
1421  s->picture_number++;
1422 
1423  return size*4;
1424 }
1425 
1426 static av_cold int encode_end(AVCodecContext *avctx)
1427 {
1428  HYuvContext *s = avctx->priv_data;
1429 
1430  common_end(s);
1431 
1432  av_freep(&avctx->extradata);
1433  av_freep(&avctx->stats_out);
1434 
1435  return 0;
1436 }
1437 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1438 
1439 #if CONFIG_HUFFYUV_DECODER
1440 AVCodec ff_huffyuv_decoder = {
1441  .name = "huffyuv",
1442  .type = AVMEDIA_TYPE_VIDEO,
1443  .id = CODEC_ID_HUFFYUV,
1444  .priv_data_size = sizeof(HYuvContext),
1445  .init = decode_init,
1446  .close = decode_end,
1447  .decode = decode_frame,
1449  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1450  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1451 };
1452 #endif
1453 
1454 #if CONFIG_FFVHUFF_DECODER
1455 AVCodec ff_ffvhuff_decoder = {
1456  .name = "ffvhuff",
1457  .type = AVMEDIA_TYPE_VIDEO,
1458  .id = CODEC_ID_FFVHUFF,
1459  .priv_data_size = sizeof(HYuvContext),
1460  .init = decode_init,
1461  .close = decode_end,
1462  .decode = decode_frame,
1464  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1465  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1466 };
1467 #endif
1468 
1469 #if CONFIG_HUFFYUV_ENCODER
1470 AVCodec ff_huffyuv_encoder = {
1471  .name = "huffyuv",
1472  .type = AVMEDIA_TYPE_VIDEO,
1473  .id = CODEC_ID_HUFFYUV,
1474  .priv_data_size = sizeof(HYuvContext),
1475  .init = encode_init,
1476  .encode = encode_frame,
1477  .close = encode_end,
1478  .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1479  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1480 };
1481 #endif
1482 
1483 #if CONFIG_FFVHUFF_ENCODER
1484 AVCodec ff_ffvhuff_encoder = {
1485  .name = "ffvhuff",
1486  .type = AVMEDIA_TYPE_VIDEO,
1487  .id = CODEC_ID_FFVHUFF,
1488  .priv_data_size = sizeof(HYuvContext),
1489  .init = encode_init,
1490  .encode = encode_frame,
1491  .close = encode_end,
1493  .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1494 };
1495 #endif