Line data Source code
1 : /* cipher-ocb.c - OCB cipher mode
2 : * Copyright (C) 2015, 2016 g10 Code GmbH
3 : *
4 : * This file is part of Libgcrypt.
5 : *
6 : * Libgcrypt is free software; you can redistribute it and/or modify
7 : * it under the terms of the GNU Lesser general Public License as
8 : * published by the Free Software Foundation; either version 2.1 of
9 : * the License, or (at your option) any later version.
10 : *
11 : * Libgcrypt is distributed in the hope that it will be useful,
12 : * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 : * GNU Lesser General Public License for more details.
15 : *
16 : * You should have received a copy of the GNU Lesser General Public
17 : * License along with this program; if not, see <http://www.gnu.org/licenses/>.
18 : *
19 : *
20 : * OCB is covered by several patents but may be used freely by most
21 : * software. See http://web.cs.ucdavis.edu/~rogaway/ocb/license.htm .
22 : * In particular license 1 is suitable for Libgcrypt: See
23 : * http://web.cs.ucdavis.edu/~rogaway/ocb/license1.pdf for the full
24 : * license document; it basically says:
25 : *
26 : * License 1 — License for Open-Source Software Implementations of OCB
27 : * (Jan 9, 2013)
28 : *
29 : * Under this license, you are authorized to make, use, and
30 : * distribute open-source software implementations of OCB. This
31 : * license terminates for you if you sue someone over their
32 : * open-source software implementation of OCB claiming that you have
33 : * a patent covering their implementation.
34 : */
35 :
36 :
37 : #include <config.h>
38 : #include <stdio.h>
39 : #include <stdlib.h>
40 : #include <string.h>
41 : #include <errno.h>
42 :
43 : #include "g10lib.h"
44 : #include "cipher.h"
45 : #include "bufhelp.h"
46 : #include "./cipher-internal.h"
47 :
48 :
49 : /* Double the OCB_BLOCK_LEN sized block B in-place. */
50 : static inline void
51 949292 : double_block (unsigned char *b)
52 : {
53 : #if OCB_BLOCK_LEN != 16
54 : unsigned char b_0 = b[0];
55 : int i;
56 :
57 : for (i=0; i < OCB_BLOCK_LEN - 1; i++)
58 : b[i] = (b[i] << 1) | (b[i+1] >> 7);
59 :
60 : b[OCB_BLOCK_LEN-1] = (b[OCB_BLOCK_LEN-1] << 1) ^ ((b_0 >> 7) * 135);
61 : #else
62 : /* This is the generic code for 16 byte blocks. However it is not
63 : faster than the straight byte by byte implementation. */
64 : u64 l_0, l, r;
65 :
66 949292 : l = buf_get_be64 (b);
67 949292 : r = buf_get_be64 (b + 8);
68 :
69 949292 : l_0 = -(l >> 63);
70 949292 : l = (l + l) ^ (r >> 63);
71 949292 : r = (r + r) ^ (l_0 & 135);
72 :
73 949292 : buf_put_be64 (b, l);
74 949292 : buf_put_be64 (b+8, r);
75 : #endif
76 949292 : }
77 :
78 :
79 : /* Double the OCB_BLOCK_LEN sized block S and store it at D. S and D
80 : may point to the same memory location but they may not overlap. */
81 : static void
82 948812 : double_block_cpy (unsigned char *d, const unsigned char *s)
83 : {
84 948812 : if (d != s)
85 948812 : buf_cpy (d, s, OCB_BLOCK_LEN);
86 948812 : double_block (d);
87 948812 : }
88 :
89 :
90 : /* Copy NBYTES from buffer S starting at bit offset BITOFF to buffer D. */
91 : static void
92 55756 : bit_copy (unsigned char *d, const unsigned char *s,
93 : unsigned int bitoff, unsigned int nbytes)
94 : {
95 : unsigned int shift;
96 :
97 55756 : s += bitoff / 8;
98 55756 : shift = bitoff % 8;
99 55756 : if (shift)
100 : {
101 947580 : for (; nbytes; nbytes--, d++, s++)
102 891840 : *d = (s[0] << shift) | (s[1] >> (8 - shift));
103 : }
104 : else
105 : {
106 272 : for (; nbytes; nbytes--, d++, s++)
107 256 : *d = *s;
108 : }
109 55756 : }
110 :
111 :
112 : /* Get L_big value for block N, where N is multiple of 65536. */
113 : static void
114 960 : ocb_get_L_big (gcry_cipher_hd_t c, u64 n, unsigned char *l_buf)
115 : {
116 960 : int ntz = _gcry_ctz64 (n);
117 :
118 960 : gcry_assert(ntz >= OCB_L_TABLE_SIZE);
119 :
120 960 : double_block_cpy (l_buf, c->u_mode.ocb.L[OCB_L_TABLE_SIZE - 1]);
121 1440 : for (ntz -= OCB_L_TABLE_SIZE; ntz; ntz--)
122 480 : double_block (l_buf);
123 960 : }
124 :
125 :
126 : /* Set the nonce for OCB. This requires that the key has been set.
127 : Using it again resets start a new encryption cycle using the same
128 : key. */
129 : gcry_err_code_t
130 55756 : _gcry_cipher_ocb_set_nonce (gcry_cipher_hd_t c, const unsigned char *nonce,
131 : size_t noncelen)
132 : {
133 : unsigned char ktop[OCB_BLOCK_LEN];
134 : unsigned char stretch[OCB_BLOCK_LEN + 8];
135 : unsigned int bottom;
136 : int i;
137 55756 : unsigned int burn = 0;
138 : unsigned int nburn;
139 :
140 : /* Check args. */
141 55756 : if (!c->marks.key)
142 0 : return GPG_ERR_INV_STATE; /* Key must have been set first. */
143 55756 : switch (c->u_mode.ocb.taglen)
144 : {
145 : case 8:
146 : case 12:
147 : case 16:
148 55756 : break;
149 : default:
150 0 : return GPG_ERR_BUG; /* Invalid tag length. */
151 : }
152 :
153 55756 : if (c->spec->blocksize != OCB_BLOCK_LEN)
154 0 : return GPG_ERR_CIPHER_ALGO;
155 55756 : if (!nonce)
156 0 : return GPG_ERR_INV_ARG;
157 : /* 120 bit is the allowed maximum. In addition we impose a minimum
158 : of 64 bit. */
159 55756 : if (noncelen > (120/8) || noncelen < (64/8) || noncelen >= OCB_BLOCK_LEN)
160 0 : return GPG_ERR_INV_LENGTH;
161 :
162 : /* Set up the L table. */
163 : /* L_star = E(zero_128) */
164 55756 : memset (ktop, 0, OCB_BLOCK_LEN);
165 55756 : nburn = c->spec->encrypt (&c->context.c, c->u_mode.ocb.L_star, ktop);
166 55756 : burn = nburn > burn ? nburn : burn;
167 : /* L_dollar = double(L_star) */
168 55756 : double_block_cpy (c->u_mode.ocb.L_dollar, c->u_mode.ocb.L_star);
169 : /* L_0 = double(L_dollar), ... */
170 55756 : double_block_cpy (c->u_mode.ocb.L[0], c->u_mode.ocb.L_dollar);
171 892096 : for (i = 1; i < OCB_L_TABLE_SIZE; i++)
172 836340 : double_block_cpy (c->u_mode.ocb.L[i], c->u_mode.ocb.L[i-1]);
173 :
174 : /* Prepare the nonce. */
175 55756 : memset (ktop, 0, (OCB_BLOCK_LEN - noncelen));
176 55756 : buf_cpy (ktop + (OCB_BLOCK_LEN - noncelen), nonce, noncelen);
177 55756 : ktop[0] = ((c->u_mode.ocb.taglen * 8) % 128) << 1;
178 55756 : ktop[OCB_BLOCK_LEN - noncelen - 1] |= 1;
179 55756 : bottom = ktop[OCB_BLOCK_LEN - 1] & 0x3f;
180 55756 : ktop[OCB_BLOCK_LEN - 1] &= 0xc0; /* Zero the bottom bits. */
181 55756 : nburn = c->spec->encrypt (&c->context.c, ktop, ktop);
182 55756 : burn = nburn > burn ? nburn : burn;
183 : /* Stretch = Ktop || (Ktop[1..64] xor Ktop[9..72]) */
184 55756 : buf_cpy (stretch, ktop, OCB_BLOCK_LEN);
185 55756 : buf_xor (stretch + OCB_BLOCK_LEN, ktop, ktop + 1, 8);
186 : /* Offset_0 = Stretch[1+bottom..128+bottom]
187 : (We use the IV field to store the offset) */
188 55756 : bit_copy (c->u_iv.iv, stretch, bottom, OCB_BLOCK_LEN);
189 55756 : c->marks.iv = 1;
190 :
191 : /* Checksum_0 = zeros(128)
192 : (We use the CTR field to store the checksum) */
193 55756 : memset (c->u_ctr.ctr, 0, OCB_BLOCK_LEN);
194 :
195 : /* Clear AAD buffer. */
196 55756 : memset (c->u_mode.ocb.aad_offset, 0, OCB_BLOCK_LEN);
197 55756 : memset (c->u_mode.ocb.aad_sum, 0, OCB_BLOCK_LEN);
198 :
199 : /* Setup other values. */
200 55756 : memset (c->lastiv, 0, sizeof(c->lastiv));
201 55756 : c->unused = 0;
202 55756 : c->marks.tag = 0;
203 55756 : c->marks.finalize = 0;
204 55756 : c->u_mode.ocb.data_nblocks = 0;
205 55756 : c->u_mode.ocb.aad_nblocks = 0;
206 55756 : c->u_mode.ocb.aad_nleftover = 0;
207 55756 : c->u_mode.ocb.data_finalized = 0;
208 55756 : c->u_mode.ocb.aad_finalized = 0;
209 :
210 : /* log_printhex ("L_* ", c->u_mode.ocb.L_star, OCB_BLOCK_LEN); */
211 : /* log_printhex ("L_$ ", c->u_mode.ocb.L_dollar, OCB_BLOCK_LEN); */
212 : /* log_printhex ("L_0 ", c->u_mode.ocb.L[0], OCB_BLOCK_LEN); */
213 : /* log_printhex ("L_1 ", c->u_mode.ocb.L[1], OCB_BLOCK_LEN); */
214 : /* log_debug ( "bottom : %u (decimal)\n", bottom); */
215 : /* log_printhex ("Ktop ", ktop, OCB_BLOCK_LEN); */
216 : /* log_printhex ("Stretch ", stretch, sizeof stretch); */
217 : /* log_printhex ("Offset_0 ", c->u_iv.iv, OCB_BLOCK_LEN); */
218 :
219 : /* Cleanup */
220 55756 : wipememory (ktop, sizeof ktop);
221 55756 : wipememory (stretch, sizeof stretch);
222 55756 : if (burn > 0)
223 39670 : _gcry_burn_stack (burn + 4*sizeof(void*));
224 :
225 55756 : return 0;
226 : }
227 :
228 :
229 : /* Process additional authentication data. This implementation allows
230 : to add additional authentication data at any time before the final
231 : gcry_cipher_gettag. */
232 : gcry_err_code_t
233 9052 : _gcry_cipher_ocb_authenticate (gcry_cipher_hd_t c, const unsigned char *abuf,
234 : size_t abuflen)
235 : {
236 9052 : const size_t table_maxblks = 1 << OCB_L_TABLE_SIZE;
237 9052 : const u32 table_size_mask = ((1 << OCB_L_TABLE_SIZE) - 1);
238 : unsigned char l_tmp[OCB_BLOCK_LEN];
239 9052 : unsigned int burn = 0;
240 : unsigned int nburn;
241 :
242 : /* Check that a nonce and thus a key has been set and that we have
243 : not yet computed the tag. We also return an error if the aad has
244 : been finalized (i.e. a short block has been processed). */
245 9052 : if (!c->marks.iv || c->marks.tag || c->u_mode.ocb.aad_finalized)
246 0 : return GPG_ERR_INV_STATE;
247 :
248 : /* Check correct usage and arguments. */
249 9052 : if (c->spec->blocksize != OCB_BLOCK_LEN)
250 0 : return GPG_ERR_CIPHER_ALGO;
251 :
252 : /* Process remaining data from the last call first. */
253 9052 : if (c->u_mode.ocb.aad_nleftover)
254 : {
255 290 : for (; abuflen && c->u_mode.ocb.aad_nleftover < OCB_BLOCK_LEN;
256 218 : abuf++, abuflen--)
257 218 : c->u_mode.ocb.aad_leftover[c->u_mode.ocb.aad_nleftover++] = *abuf;
258 :
259 36 : if (c->u_mode.ocb.aad_nleftover == OCB_BLOCK_LEN)
260 : {
261 22 : c->u_mode.ocb.aad_nblocks++;
262 :
263 22 : if ((c->u_mode.ocb.aad_nblocks % table_maxblks) == 0)
264 : {
265 : /* Table overflow, L needs to be generated. */
266 0 : ocb_get_L_big(c, c->u_mode.ocb.aad_nblocks + 1, l_tmp);
267 : }
268 : else
269 : {
270 22 : buf_cpy (l_tmp, ocb_get_l (c, c->u_mode.ocb.aad_nblocks),
271 : OCB_BLOCK_LEN);
272 : }
273 :
274 : /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
275 22 : buf_xor_1 (c->u_mode.ocb.aad_offset, l_tmp, OCB_BLOCK_LEN);
276 : /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */
277 22 : buf_xor (l_tmp, c->u_mode.ocb.aad_offset,
278 22 : c->u_mode.ocb.aad_leftover, OCB_BLOCK_LEN);
279 22 : nburn = c->spec->encrypt (&c->context.c, l_tmp, l_tmp);
280 22 : burn = nburn > burn ? nburn : burn;
281 22 : buf_xor_1 (c->u_mode.ocb.aad_sum, l_tmp, OCB_BLOCK_LEN);
282 :
283 22 : c->u_mode.ocb.aad_nleftover = 0;
284 : }
285 : }
286 :
287 9052 : if (!abuflen)
288 : {
289 72 : if (burn > 0)
290 5 : _gcry_burn_stack (burn + 4*sizeof(void*));
291 :
292 72 : return 0;
293 : }
294 :
295 : /* Full blocks handling. */
296 27834 : while (abuflen >= OCB_BLOCK_LEN)
297 : {
298 9874 : size_t nblks = abuflen / OCB_BLOCK_LEN;
299 : size_t nmaxblks;
300 :
301 : /* Check how many blocks to process till table overflow. */
302 9874 : nmaxblks = (c->u_mode.ocb.aad_nblocks + 1) % table_maxblks;
303 9874 : nmaxblks = (table_maxblks - nmaxblks) % table_maxblks;
304 :
305 9874 : if (nmaxblks == 0)
306 : {
307 : /* Table overflow, generate L and process one block. */
308 480 : c->u_mode.ocb.aad_nblocks++;
309 480 : ocb_get_L_big(c, c->u_mode.ocb.aad_nblocks, l_tmp);
310 :
311 : /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
312 480 : buf_xor_1 (c->u_mode.ocb.aad_offset, l_tmp, OCB_BLOCK_LEN);
313 : /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */
314 480 : buf_xor (l_tmp, c->u_mode.ocb.aad_offset, abuf, OCB_BLOCK_LEN);
315 480 : nburn = c->spec->encrypt (&c->context.c, l_tmp, l_tmp);
316 480 : burn = nburn > burn ? nburn : burn;
317 480 : buf_xor_1 (c->u_mode.ocb.aad_sum, l_tmp, OCB_BLOCK_LEN);
318 :
319 480 : abuf += OCB_BLOCK_LEN;
320 480 : abuflen -= OCB_BLOCK_LEN;
321 480 : nblks--;
322 :
323 : /* With overflow handled, retry loop again. Next overflow will
324 : * happen after 65535 blocks. */
325 480 : continue;
326 : }
327 :
328 9394 : nblks = nblks < nmaxblks ? nblks : nmaxblks;
329 :
330 : /* Use a bulk method if available. */
331 9394 : if (nblks && c->bulk.ocb_auth)
332 : {
333 : size_t nleft;
334 : size_t ndone;
335 :
336 8854 : nleft = c->bulk.ocb_auth (c, abuf, nblks);
337 8854 : ndone = nblks - nleft;
338 :
339 8854 : abuf += ndone * OCB_BLOCK_LEN;
340 8854 : abuflen -= ndone * OCB_BLOCK_LEN;
341 8854 : nblks = nleft;
342 : }
343 :
344 : /* Hash all full blocks. */
345 4828376 : while (nblks)
346 : {
347 4809588 : c->u_mode.ocb.aad_nblocks++;
348 :
349 4809588 : gcry_assert(c->u_mode.ocb.aad_nblocks & table_size_mask);
350 :
351 : /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
352 4809588 : buf_xor_1 (c->u_mode.ocb.aad_offset,
353 4809588 : ocb_get_l (c, c->u_mode.ocb.aad_nblocks),
354 : OCB_BLOCK_LEN);
355 : /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */
356 4809588 : buf_xor (l_tmp, c->u_mode.ocb.aad_offset, abuf, OCB_BLOCK_LEN);
357 4809588 : nburn = c->spec->encrypt (&c->context.c, l_tmp, l_tmp);
358 4809588 : burn = nburn > burn ? nburn : burn;
359 4809588 : buf_xor_1 (c->u_mode.ocb.aad_sum, l_tmp, OCB_BLOCK_LEN);
360 :
361 4809588 : abuf += OCB_BLOCK_LEN;
362 4809588 : abuflen -= OCB_BLOCK_LEN;
363 4809588 : nblks--;
364 : }
365 : }
366 :
367 : /* Store away the remaining data. */
368 18798 : for (; abuflen && c->u_mode.ocb.aad_nleftover < OCB_BLOCK_LEN;
369 838 : abuf++, abuflen--)
370 838 : c->u_mode.ocb.aad_leftover[c->u_mode.ocb.aad_nleftover++] = *abuf;
371 8980 : gcry_assert (!abuflen);
372 :
373 8980 : if (burn > 0)
374 5726 : _gcry_burn_stack (burn + 4*sizeof(void*));
375 :
376 8980 : return 0;
377 : }
378 :
379 :
380 : /* Hash final partial AAD block. */
381 : static void
382 28684 : ocb_aad_finalize (gcry_cipher_hd_t c)
383 : {
384 : unsigned char l_tmp[OCB_BLOCK_LEN];
385 28684 : unsigned int burn = 0;
386 : unsigned int nburn;
387 :
388 : /* Check that a nonce and thus a key has been set and that we have
389 : not yet computed the tag. We also skip this if the aad has been
390 : finalized. */
391 28684 : if (!c->marks.iv || c->marks.tag || c->u_mode.ocb.aad_finalized)
392 0 : return;
393 28684 : if (c->spec->blocksize != OCB_BLOCK_LEN)
394 0 : return; /* Ooops. */
395 :
396 : /* Hash final partial block if any. */
397 28684 : if (c->u_mode.ocb.aad_nleftover)
398 : {
399 : /* Offset_* = Offset_m xor L_* */
400 88 : buf_xor_1 (c->u_mode.ocb.aad_offset,
401 88 : c->u_mode.ocb.L_star, OCB_BLOCK_LEN);
402 : /* CipherInput = (A_* || 1 || zeros(127-bitlen(A_*))) xor Offset_* */
403 88 : buf_cpy (l_tmp, c->u_mode.ocb.aad_leftover, c->u_mode.ocb.aad_nleftover);
404 88 : memset (l_tmp + c->u_mode.ocb.aad_nleftover, 0,
405 88 : OCB_BLOCK_LEN - c->u_mode.ocb.aad_nleftover);
406 88 : l_tmp[c->u_mode.ocb.aad_nleftover] = 0x80;
407 88 : buf_xor_1 (l_tmp, c->u_mode.ocb.aad_offset, OCB_BLOCK_LEN);
408 : /* Sum = Sum_m xor ENCIPHER(K, CipherInput) */
409 88 : nburn = c->spec->encrypt (&c->context.c, l_tmp, l_tmp);
410 88 : burn = nburn > burn ? nburn : burn;
411 88 : buf_xor_1 (c->u_mode.ocb.aad_sum, l_tmp, OCB_BLOCK_LEN);
412 :
413 88 : c->u_mode.ocb.aad_nleftover = 0;
414 : }
415 :
416 : /* Mark AAD as finalized so that gcry_cipher_ocb_authenticate can
417 : * return an erro when called again. */
418 28684 : c->u_mode.ocb.aad_finalized = 1;
419 :
420 28684 : if (burn > 0)
421 44 : _gcry_burn_stack (burn + 4*sizeof(void*));
422 : }
423 :
424 :
425 :
426 : /* Checksumming for encrypt and decrypt. */
427 : static void
428 27564 : ocb_checksum (unsigned char *chksum, const unsigned char *plainbuf,
429 : size_t nblks)
430 : {
431 5178576 : while (nblks > 0)
432 : {
433 : /* Checksum_i = Checksum_{i-1} xor P_i */
434 5123448 : buf_xor_1(chksum, plainbuf, OCB_BLOCK_LEN);
435 :
436 5123448 : plainbuf += OCB_BLOCK_LEN;
437 5123448 : nblks--;
438 : }
439 27564 : }
440 :
441 :
442 : /* Common code for encrypt and decrypt. */
443 : static gcry_err_code_t
444 65172 : ocb_crypt (gcry_cipher_hd_t c, int encrypt,
445 : unsigned char *outbuf, size_t outbuflen,
446 : const unsigned char *inbuf, size_t inbuflen)
447 : {
448 65172 : const size_t table_maxblks = 1 << OCB_L_TABLE_SIZE;
449 65172 : const u32 table_size_mask = ((1 << OCB_L_TABLE_SIZE) - 1);
450 : unsigned char l_tmp[OCB_BLOCK_LEN];
451 65172 : unsigned int burn = 0;
452 : unsigned int nburn;
453 65172 : gcry_cipher_encrypt_t crypt_fn =
454 65172 : encrypt ? c->spec->encrypt : c->spec->decrypt;
455 :
456 : /* Check that a nonce and thus a key has been set and that we are
457 : not yet in end of data state. */
458 65172 : if (!c->marks.iv || c->u_mode.ocb.data_finalized)
459 0 : return GPG_ERR_INV_STATE;
460 :
461 : /* Check correct usage and arguments. */
462 65172 : if (c->spec->blocksize != OCB_BLOCK_LEN)
463 0 : return GPG_ERR_CIPHER_ALGO;
464 65172 : if (outbuflen < inbuflen)
465 0 : return GPG_ERR_BUFFER_TOO_SHORT;
466 65172 : if (c->marks.finalize)
467 : ; /* Allow arbitarty length. */
468 24488 : else if ((inbuflen % OCB_BLOCK_LEN))
469 0 : return GPG_ERR_INV_LENGTH; /* We support only full blocks for now. */
470 :
471 : /* Full blocks handling. */
472 187950 : while (inbuflen >= OCB_BLOCK_LEN)
473 : {
474 57606 : size_t nblks = inbuflen / OCB_BLOCK_LEN;
475 : size_t nmaxblks;
476 :
477 : /* Check how many blocks to process till table overflow. */
478 57606 : nmaxblks = (c->u_mode.ocb.data_nblocks + 1) % table_maxblks;
479 57606 : nmaxblks = (table_maxblks - nmaxblks) % table_maxblks;
480 :
481 57606 : if (nmaxblks == 0)
482 : {
483 : /* Table overflow, generate L and process one block. */
484 480 : c->u_mode.ocb.data_nblocks++;
485 480 : ocb_get_L_big(c, c->u_mode.ocb.data_nblocks, l_tmp);
486 :
487 480 : if (encrypt)
488 : {
489 : /* Checksum_i = Checksum_{i-1} xor P_i */
490 240 : ocb_checksum (c->u_ctr.ctr, inbuf, 1);
491 : }
492 :
493 : /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
494 480 : buf_xor_1 (c->u_iv.iv, l_tmp, OCB_BLOCK_LEN);
495 : /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
496 480 : buf_xor (outbuf, c->u_iv.iv, inbuf, OCB_BLOCK_LEN);
497 480 : nburn = crypt_fn (&c->context.c, outbuf, outbuf);
498 480 : burn = nburn > burn ? nburn : burn;
499 480 : buf_xor_1 (outbuf, c->u_iv.iv, OCB_BLOCK_LEN);
500 :
501 480 : if (!encrypt)
502 : {
503 : /* Checksum_i = Checksum_{i-1} xor P_i */
504 240 : ocb_checksum (c->u_ctr.ctr, outbuf, 1);
505 : }
506 :
507 480 : inbuf += OCB_BLOCK_LEN;
508 480 : inbuflen -= OCB_BLOCK_LEN;
509 480 : outbuf += OCB_BLOCK_LEN;
510 480 : outbuflen =- OCB_BLOCK_LEN;
511 480 : nblks--;
512 :
513 : /* With overflow handled, retry loop again. Next overflow will
514 : * happen after 65535 blocks. */
515 480 : continue;
516 : }
517 :
518 57126 : nblks = nblks < nmaxblks ? nblks : nmaxblks;
519 :
520 : /* Use a bulk method if available. */
521 57126 : if (nblks && c->bulk.ocb_crypt)
522 : {
523 : size_t nleft;
524 : size_t ndone;
525 :
526 52742 : nleft = c->bulk.ocb_crypt (c, outbuf, inbuf, nblks, encrypt);
527 52742 : ndone = nblks - nleft;
528 :
529 52742 : inbuf += ndone * OCB_BLOCK_LEN;
530 52742 : outbuf += ndone * OCB_BLOCK_LEN;
531 52742 : inbuflen -= ndone * OCB_BLOCK_LEN;
532 52742 : outbuflen -= ndone * OCB_BLOCK_LEN;
533 52742 : nblks = nleft;
534 : }
535 :
536 57126 : if (nblks)
537 : {
538 27084 : size_t nblks_chksum = nblks;
539 :
540 27084 : if (encrypt)
541 : {
542 : /* Checksum_i = Checksum_{i-1} xor P_i */
543 14052 : ocb_checksum (c->u_ctr.ctr, inbuf, nblks_chksum);
544 : }
545 :
546 : /* Encrypt all full blocks. */
547 5177136 : while (nblks)
548 : {
549 5122968 : c->u_mode.ocb.data_nblocks++;
550 :
551 5122968 : gcry_assert(c->u_mode.ocb.data_nblocks & table_size_mask);
552 :
553 : /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
554 5122968 : buf_xor_1 (c->u_iv.iv,
555 5122968 : ocb_get_l (c, c->u_mode.ocb.data_nblocks),
556 : OCB_BLOCK_LEN);
557 : /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
558 5122968 : buf_xor (outbuf, c->u_iv.iv, inbuf, OCB_BLOCK_LEN);
559 5122968 : nburn = crypt_fn (&c->context.c, outbuf, outbuf);
560 5122968 : burn = nburn > burn ? nburn : burn;
561 5122968 : buf_xor_1 (outbuf, c->u_iv.iv, OCB_BLOCK_LEN);
562 :
563 5122968 : inbuf += OCB_BLOCK_LEN;
564 5122968 : inbuflen -= OCB_BLOCK_LEN;
565 5122968 : outbuf += OCB_BLOCK_LEN;
566 5122968 : outbuflen =- OCB_BLOCK_LEN;
567 5122968 : nblks--;
568 : }
569 :
570 27084 : if (!encrypt)
571 : {
572 : /* Checksum_i = Checksum_{i-1} xor P_i */
573 13032 : ocb_checksum (c->u_ctr.ctr,
574 13032 : outbuf - nblks_chksum * OCB_BLOCK_LEN,
575 : nblks_chksum);
576 : }
577 : }
578 : }
579 :
580 : /* Encrypt final partial block. Note that we expect INBUFLEN to be
581 : shorter than OCB_BLOCK_LEN (see above). */
582 65172 : if (inbuflen)
583 : {
584 : unsigned char pad[OCB_BLOCK_LEN];
585 :
586 : /* Offset_* = Offset_m xor L_* */
587 8550 : buf_xor_1 (c->u_iv.iv, c->u_mode.ocb.L_star, OCB_BLOCK_LEN);
588 : /* Pad = ENCIPHER(K, Offset_*) */
589 8550 : nburn = c->spec->encrypt (&c->context.c, pad, c->u_iv.iv);
590 8550 : burn = nburn > burn ? nburn : burn;
591 :
592 8550 : if (encrypt)
593 : {
594 : /* Checksum_* = Checksum_m xor (P_* || 1 || zeros(127-bitlen(P_*))) */
595 : /* Note that INBUFLEN is less than OCB_BLOCK_LEN. */
596 8522 : buf_cpy (l_tmp, inbuf, inbuflen);
597 8522 : memset (l_tmp + inbuflen, 0, OCB_BLOCK_LEN - inbuflen);
598 8522 : l_tmp[inbuflen] = 0x80;
599 8522 : buf_xor_1 (c->u_ctr.ctr, l_tmp, OCB_BLOCK_LEN);
600 : /* C_* = P_* xor Pad[1..bitlen(P_*)] */
601 8522 : buf_xor (outbuf, inbuf, pad, inbuflen);
602 : }
603 : else
604 : {
605 : /* P_* = C_* xor Pad[1..bitlen(C_*)] */
606 : /* Checksum_* = Checksum_m xor (P_* || 1 || zeros(127-bitlen(P_*))) */
607 28 : buf_cpy (l_tmp, pad, OCB_BLOCK_LEN);
608 28 : buf_cpy (l_tmp, inbuf, inbuflen);
609 28 : buf_xor_1 (l_tmp, pad, OCB_BLOCK_LEN);
610 28 : l_tmp[inbuflen] = 0x80;
611 28 : buf_cpy (outbuf, l_tmp, inbuflen);
612 :
613 28 : buf_xor_1 (c->u_ctr.ctr, l_tmp, OCB_BLOCK_LEN);
614 : }
615 : }
616 :
617 : /* Compute the tag if the finalize flag has been set. */
618 65172 : if (c->marks.finalize)
619 : {
620 : /* Tag = ENCIPHER(K, Checksum xor Offset xor L_$) xor HASH(K,A) */
621 40684 : buf_xor (c->u_mode.ocb.tag, c->u_ctr.ctr, c->u_iv.iv, OCB_BLOCK_LEN);
622 40684 : buf_xor_1 (c->u_mode.ocb.tag, c->u_mode.ocb.L_dollar, OCB_BLOCK_LEN);
623 81368 : nburn = c->spec->encrypt (&c->context.c,
624 40684 : c->u_mode.ocb.tag, c->u_mode.ocb.tag);
625 40684 : burn = nburn > burn ? nburn : burn;
626 :
627 40684 : c->u_mode.ocb.data_finalized = 1;
628 : /* Note that the the final part of the tag computation is done
629 : by _gcry_cipher_ocb_get_tag. */
630 : }
631 :
632 65172 : if (burn > 0)
633 39830 : _gcry_burn_stack (burn + 4*sizeof(void*));
634 :
635 65172 : return 0;
636 : }
637 :
638 :
639 : /* Encrypt (INBUF,INBUFLEN) in OCB mode to OUTBUF. OUTBUFLEN gives
640 : the allocated size of OUTBUF. This function accepts only multiples
641 : of a full block unless gcry_cipher_final has been called in which
642 : case the next block may have any length. */
643 : gcry_err_code_t
644 38144 : _gcry_cipher_ocb_encrypt (gcry_cipher_hd_t c,
645 : unsigned char *outbuf, size_t outbuflen,
646 : const unsigned char *inbuf, size_t inbuflen)
647 :
648 : {
649 38144 : return ocb_crypt (c, 1, outbuf, outbuflen, inbuf, inbuflen);
650 : }
651 :
652 :
653 : /* Decrypt (INBUF,INBUFLEN) in OCB mode to OUTBUF. OUTBUFLEN gives
654 : the allocated size of OUTBUF. This function accepts only multiples
655 : of a full block unless gcry_cipher_final has been called in which
656 : case the next block may have any length. */
657 : gcry_err_code_t
658 27028 : _gcry_cipher_ocb_decrypt (gcry_cipher_hd_t c,
659 : unsigned char *outbuf, size_t outbuflen,
660 : const unsigned char *inbuf, size_t inbuflen)
661 : {
662 27028 : return ocb_crypt (c, 0, outbuf, outbuflen, inbuf, inbuflen);
663 : }
664 :
665 :
666 : /* Compute the tag. The last data operation has already done some
667 : part of it. To allow adding AAD even after having done all data,
668 : we finish the tag computation only here. */
669 : static void
670 28872 : compute_tag_if_needed (gcry_cipher_hd_t c)
671 : {
672 28872 : if (!c->marks.tag)
673 : {
674 28684 : ocb_aad_finalize (c);
675 28684 : buf_xor_1 (c->u_mode.ocb.tag, c->u_mode.ocb.aad_sum, OCB_BLOCK_LEN);
676 28684 : c->marks.tag = 1;
677 : }
678 28872 : }
679 :
680 :
681 : /* Copy the already computed tag to OUTTAG. OUTTAGSIZE is the
682 : allocated size of OUTTAG; the function returns an error if that is
683 : too short to hold the tag. */
684 : gcry_err_code_t
685 20088 : _gcry_cipher_ocb_get_tag (gcry_cipher_hd_t c,
686 : unsigned char *outtag, size_t outtagsize)
687 : {
688 20088 : if (c->u_mode.ocb.taglen > outtagsize)
689 0 : return GPG_ERR_BUFFER_TOO_SHORT;
690 20088 : if (!c->u_mode.ocb.data_finalized)
691 0 : return GPG_ERR_INV_STATE; /* Data has not yet been finalized. */
692 :
693 20088 : compute_tag_if_needed (c);
694 :
695 20088 : memcpy (outtag, c->u_mode.ocb.tag, c->u_mode.ocb.taglen);
696 :
697 20088 : return 0;
698 : }
699 :
700 :
701 : /* Check that the tag (INTAG,TAGLEN) matches the computed tag for the
702 : handle C. */
703 : gcry_err_code_t
704 8784 : _gcry_cipher_ocb_check_tag (gcry_cipher_hd_t c, const unsigned char *intag,
705 : size_t taglen)
706 : {
707 : size_t n;
708 :
709 8784 : if (!c->u_mode.ocb.data_finalized)
710 0 : return GPG_ERR_INV_STATE; /* Data has not yet been finalized. */
711 :
712 8784 : compute_tag_if_needed (c);
713 :
714 8784 : n = c->u_mode.ocb.taglen;
715 8784 : if (taglen < n)
716 0 : n = taglen;
717 :
718 8784 : if (!buf_eq_const (intag, c->u_mode.ocb.tag, n)
719 188 : || c->u_mode.ocb.taglen != taglen)
720 8596 : return GPG_ERR_CHECKSUM;
721 :
722 188 : return 0;
723 : }
|