FFmpeg
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
aes.c
Go to the documentation of this file.
1 /*
2  * copyright (c) 2007 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * some optimization ideas from aes128.c by Reimar Doeffinger
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "common.h"
24 #include "aes.h"
25 #include "intreadwrite.h"
26 
27 typedef union {
28  uint64_t u64[2];
29  uint32_t u32[4];
30  uint8_t u8x4[4][4];
31  uint8_t u8[16];
32 } av_aes_block;
33 
34 typedef struct AVAES {
35  // Note: round_key[16] is accessed in the init code, but this only
36  // overwrites state, which does not matter (see also commit ba554c0).
39  int rounds;
40 } AVAES;
41 
42 const int av_aes_size= sizeof(AVAES);
43 
44 struct AVAES *av_aes_alloc(void)
45 {
46  return av_mallocz(sizeof(struct AVAES));
47 }
48 
49 static const uint8_t rcon[10] = {
50  0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36
51 };
52 
53 static uint8_t sbox[256];
54 static uint8_t inv_sbox[256];
55 #if CONFIG_SMALL
56 static uint32_t enc_multbl[1][256];
57 static uint32_t dec_multbl[1][256];
58 #else
59 static uint32_t enc_multbl[4][256];
60 static uint32_t dec_multbl[4][256];
61 #endif
62 
63 #if HAVE_BIGENDIAN
64 # define ROT(x, s) ((x >> s) | (x << (32-s)))
65 #else
66 # define ROT(x, s) ((x << s) | (x >> (32-s)))
67 #endif
68 
69 static inline void addkey(av_aes_block *dst, const av_aes_block *src,
70  const av_aes_block *round_key)
71 {
72  dst->u64[0] = src->u64[0] ^ round_key->u64[0];
73  dst->u64[1] = src->u64[1] ^ round_key->u64[1];
74 }
75 
76 static inline void addkey_s(av_aes_block *dst, const uint8_t *src,
77  const av_aes_block *round_key)
78 {
79  dst->u64[0] = AV_RN64(src) ^ round_key->u64[0];
80  dst->u64[1] = AV_RN64(src + 8) ^ round_key->u64[1];
81 }
82 
83 static inline void addkey_d(uint8_t *dst, const av_aes_block *src,
84  const av_aes_block *round_key)
85 {
86  AV_WN64(dst, src->u64[0] ^ round_key->u64[0]);
87  AV_WN64(dst + 8, src->u64[1] ^ round_key->u64[1]);
88 }
89 
90 static void subshift(av_aes_block s0[2], int s, const uint8_t *box)
91 {
92  av_aes_block *s1 = (av_aes_block *) (s0[0].u8 - s);
93  av_aes_block *s3 = (av_aes_block *) (s0[0].u8 + s);
94 
95  s0[0].u8[ 0] = box[s0[1].u8[ 0]];
96  s0[0].u8[ 4] = box[s0[1].u8[ 4]];
97  s0[0].u8[ 8] = box[s0[1].u8[ 8]];
98  s0[0].u8[12] = box[s0[1].u8[12]];
99  s1[0].u8[ 3] = box[s1[1].u8[ 7]];
100  s1[0].u8[ 7] = box[s1[1].u8[11]];
101  s1[0].u8[11] = box[s1[1].u8[15]];
102  s1[0].u8[15] = box[s1[1].u8[ 3]];
103  s0[0].u8[ 2] = box[s0[1].u8[10]];
104  s0[0].u8[10] = box[s0[1].u8[ 2]];
105  s0[0].u8[ 6] = box[s0[1].u8[14]];
106  s0[0].u8[14] = box[s0[1].u8[ 6]];
107  s3[0].u8[ 1] = box[s3[1].u8[13]];
108  s3[0].u8[13] = box[s3[1].u8[ 9]];
109  s3[0].u8[ 9] = box[s3[1].u8[ 5]];
110  s3[0].u8[ 5] = box[s3[1].u8[ 1]];
111 }
112 
113 static inline int mix_core(uint32_t multbl[][256], int a, int b, int c, int d){
114 #if CONFIG_SMALL
115  return multbl[0][a] ^ ROT(multbl[0][b], 8) ^ ROT(multbl[0][c], 16) ^ ROT(multbl[0][d], 24);
116 #else
117  return multbl[0][a] ^ multbl[1][b] ^ multbl[2][c] ^ multbl[3][d];
118 #endif
119 }
120 
121 static inline void mix(av_aes_block state[2], uint32_t multbl[][256], int s1, int s3){
122  uint8_t (*src)[4] = state[1].u8x4;
123  state[0].u32[0] = mix_core(multbl, src[0][0], src[s1 ][1], src[2][2], src[s3 ][3]);
124  state[0].u32[1] = mix_core(multbl, src[1][0], src[s3-1][1], src[3][2], src[s1-1][3]);
125  state[0].u32[2] = mix_core(multbl, src[2][0], src[s3 ][1], src[0][2], src[s1 ][3]);
126  state[0].u32[3] = mix_core(multbl, src[3][0], src[s1-1][1], src[1][2], src[s3-1][3]);
127 }
128 
129 static inline void crypt(AVAES *a, int s, const uint8_t *sbox,
130  uint32_t multbl[][256])
131 {
132  int r;
133 
134  for (r = a->rounds - 1; r > 0; r--) {
135  mix(a->state, multbl, 3 - s, 1 + s);
136  addkey(&a->state[1], &a->state[0], &a->round_key[r]);
137  }
138 
139  subshift(&a->state[0], s, sbox);
140 }
141 
142 void av_aes_crypt(AVAES *a, uint8_t *dst, const uint8_t *src,
143  int count, uint8_t *iv, int decrypt)
144 {
145  while (count--) {
146  addkey_s(&a->state[1], src, &a->round_key[a->rounds]);
147  if (decrypt) {
148  crypt(a, 0, inv_sbox, dec_multbl);
149  if (iv) {
150  addkey_s(&a->state[0], iv, &a->state[0]);
151  memcpy(iv, src, 16);
152  }
153  addkey_d(dst, &a->state[0], &a->round_key[0]);
154  } else {
155  if (iv)
156  addkey_s(&a->state[1], iv, &a->state[1]);
157  crypt(a, 2, sbox, enc_multbl);
158  addkey_d(dst, &a->state[0], &a->round_key[0]);
159  if (iv)
160  memcpy(iv, dst, 16);
161  }
162  src += 16;
163  dst += 16;
164  }
165 }
166 
167 static void init_multbl2(uint32_t tbl[][256], const int c[4],
168  const uint8_t *log8, const uint8_t *alog8,
169  const uint8_t *sbox)
170 {
171  int i;
172 
173  for (i = 0; i < 256; i++) {
174  int x = sbox[i];
175  if (x) {
176  int k, l, m, n;
177  x = log8[x];
178  k = alog8[x + log8[c[0]]];
179  l = alog8[x + log8[c[1]]];
180  m = alog8[x + log8[c[2]]];
181  n = alog8[x + log8[c[3]]];
182  tbl[0][i] = AV_NE(MKBETAG(k,l,m,n), MKTAG(k,l,m,n));
183 #if !CONFIG_SMALL
184  tbl[1][i] = ROT(tbl[0][i], 8);
185  tbl[2][i] = ROT(tbl[0][i], 16);
186  tbl[3][i] = ROT(tbl[0][i], 24);
187 #endif
188  }
189  }
190 }
191 
192 // this is based on the reference AES code by Paulo Barreto and Vincent Rijmen
193 int av_aes_init(AVAES *a, const uint8_t *key, int key_bits, int decrypt)
194 {
195  int i, j, t, rconpointer = 0;
196  uint8_t tk[8][4];
197  int KC = key_bits >> 5;
198  int rounds = KC + 6;
199  uint8_t log8[256];
200  uint8_t alog8[512];
201 
203  j = 1;
204  for (i = 0; i < 255; i++) {
205  alog8[i] = alog8[i + 255] = j;
206  log8[j] = i;
207  j ^= j + j;
208  if (j > 255)
209  j ^= 0x11B;
210  }
211  for (i = 0; i < 256; i++) {
212  j = i ? alog8[255 - log8[i]] : 0;
213  j ^= (j << 1) ^ (j << 2) ^ (j << 3) ^ (j << 4);
214  j = (j ^ (j >> 8) ^ 99) & 255;
215  inv_sbox[j] = i;
216  sbox[i] = j;
217  }
218  init_multbl2(dec_multbl, (const int[4]) { 0xe, 0x9, 0xd, 0xb },
219  log8, alog8, inv_sbox);
220  init_multbl2(enc_multbl, (const int[4]) { 0x2, 0x1, 0x1, 0x3 },
221  log8, alog8, sbox);
222  }
223 
224  if (key_bits != 128 && key_bits != 192 && key_bits != 256)
225  return -1;
226 
227  a->rounds = rounds;
228 
229  memcpy(tk, key, KC * 4);
230  memcpy(a->round_key[0].u8, key, KC * 4);
231 
232  for (t = KC * 4; t < (rounds + 1) * 16; t += KC * 4) {
233  for (i = 0; i < 4; i++)
234  tk[0][i] ^= sbox[tk[KC - 1][(i + 1) & 3]];
235  tk[0][0] ^= rcon[rconpointer++];
236 
237  for (j = 1; j < KC; j++) {
238  if (KC != 8 || j != KC >> 1)
239  for (i = 0; i < 4; i++)
240  tk[j][i] ^= tk[j - 1][i];
241  else
242  for (i = 0; i < 4; i++)
243  tk[j][i] ^= sbox[tk[j - 1][i]];
244  }
245 
246  memcpy(a->round_key[0].u8 + t, tk, KC * 4);
247  }
248 
249  if (decrypt) {
250  for (i = 1; i < rounds; i++) {
251  av_aes_block tmp[3];
252  tmp[2] = a->round_key[i];
253  subshift(&tmp[1], 0, sbox);
254  mix(tmp, dec_multbl, 1, 3);
255  a->round_key[i] = tmp[0];
256  }
257  } else {
258  for (i = 0; i < (rounds + 1) >> 1; i++) {
259  FFSWAP(av_aes_block, a->round_key[i], a->round_key[rounds-i]);
260  }
261  }
262 
263  return 0;
264 }
265 
266 #ifdef TEST
267 // LCOV_EXCL_START
268 #include <string.h>
269 #include "lfg.h"
270 #include "log.h"
271 
272 int main(int argc, char **argv)
273 {
274  int i, j;
275  AVAES b;
276  uint8_t rkey[2][16] = {
277  { 0 },
278  { 0x10, 0xa5, 0x88, 0x69, 0xd7, 0x4b, 0xe5, 0xa3,
279  0x74, 0xcf, 0x86, 0x7c, 0xfb, 0x47, 0x38, 0x59 }
280  };
281  uint8_t pt[16], rpt[2][16]= {
282  { 0x6a, 0x84, 0x86, 0x7c, 0xd7, 0x7e, 0x12, 0xad,
283  0x07, 0xea, 0x1b, 0xe8, 0x95, 0xc5, 0x3f, 0xa3 },
284  { 0 }
285  };
286  uint8_t rct[2][16]= {
287  { 0x73, 0x22, 0x81, 0xc0, 0xa0, 0xaa, 0xb8, 0xf7,
288  0xa5, 0x4a, 0x0c, 0x67, 0xa0, 0xc4, 0x5e, 0xcf },
289  { 0x6d, 0x25, 0x1e, 0x69, 0x44, 0xb0, 0x51, 0xe0,
290  0x4e, 0xaa, 0x6f, 0xb4, 0xdb, 0xf7, 0x84, 0x65 }
291  };
292  uint8_t temp[16];
293  int err = 0;
294 
296 
297  for (i = 0; i < 2; i++) {
298  av_aes_init(&b, rkey[i], 128, 1);
299  av_aes_crypt(&b, temp, rct[i], 1, NULL, 1);
300  for (j = 0; j < 16; j++) {
301  if (rpt[i][j] != temp[j]) {
302  av_log(NULL, AV_LOG_ERROR, "%d %02X %02X\n",
303  j, rpt[i][j], temp[j]);
304  err = 1;
305  }
306  }
307  }
308 
309  if (argc > 1 && !strcmp(argv[1], "-t")) {
310  AVAES ae, ad;
311  AVLFG prng;
312 
313  av_aes_init(&ae, "PI=3.141592654..", 128, 0);
314  av_aes_init(&ad, "PI=3.141592654..", 128, 1);
315  av_lfg_init(&prng, 1);
316 
317  for (i = 0; i < 10000; i++) {
318  for (j = 0; j < 16; j++) {
319  pt[j] = av_lfg_get(&prng);
320  }
321  {
322  START_TIMER;
323  av_aes_crypt(&ae, temp, pt, 1, NULL, 0);
324  if (!(i & (i - 1)))
325  av_log(NULL, AV_LOG_ERROR, "%02X %02X %02X %02X\n",
326  temp[0], temp[5], temp[10], temp[15]);
327  av_aes_crypt(&ad, temp, temp, 1, NULL, 1);
328  STOP_TIMER("aes");
329  }
330  for (j = 0; j < 16; j++) {
331  if (pt[j] != temp[j]) {
332  av_log(NULL, AV_LOG_ERROR, "%d %d %02X %02X\n",
333  i, j, pt[j], temp[j]);
334  }
335  }
336  }
337  }
338  return err;
339 }
340 // LCOV_EXCL_STOP
341 #endif