LCOV - code coverage report
Current view: top level - cipher - cipher-ocb.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 298 0.0 %
Date: 2016-12-15 12:59:22 Functions: 0 14 0.0 %

          Line data    Source code
       1             : /* cipher-ocb.c -  OCB cipher mode
       2             :  * Copyright (C) 2015, 2016 g10 Code GmbH
       3             :  *
       4             :  * This file is part of Libgcrypt.
       5             :  *
       6             :  * Libgcrypt is free software; you can redistribute it and/or modify
       7             :  * it under the terms of the GNU Lesser general Public License as
       8             :  * published by the Free Software Foundation; either version 2.1 of
       9             :  * the License, or (at your option) any later version.
      10             :  *
      11             :  * Libgcrypt is distributed in the hope that it will be useful,
      12             :  * but WITHOUT ANY WARRANTY; without even the implied warranty of
      13             :  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      14             :  * GNU Lesser General Public License for more details.
      15             :  *
      16             :  * You should have received a copy of the GNU Lesser General Public
      17             :  * License along with this program; if not, see <http://www.gnu.org/licenses/>.
      18             :  *
      19             :  *
      20             :  * OCB is covered by several patents but may be used freely by most
      21             :  * software.  See http://web.cs.ucdavis.edu/~rogaway/ocb/license.htm .
      22             :  * In particular license 1 is suitable for Libgcrypt: See
      23             :  * http://web.cs.ucdavis.edu/~rogaway/ocb/license1.pdf for the full
      24             :  * license document; it basically says:
      25             :  *
      26             :  *   License 1 — License for Open-Source Software Implementations of OCB
      27             :  *               (Jan 9, 2013)
      28             :  *
      29             :  *   Under this license, you are authorized to make, use, and
      30             :  *   distribute open-source software implementations of OCB. This
      31             :  *   license terminates for you if you sue someone over their
      32             :  *   open-source software implementation of OCB claiming that you have
      33             :  *   a patent covering their implementation.
      34             :  */
      35             : 
      36             : 
      37             : #include <config.h>
      38             : #include <stdio.h>
      39             : #include <stdlib.h>
      40             : #include <string.h>
      41             : #include <errno.h>
      42             : 
      43             : #include "g10lib.h"
      44             : #include "cipher.h"
      45             : #include "bufhelp.h"
      46             : #include "./cipher-internal.h"
      47             : 
      48             : 
      49             : /* Double the OCB_BLOCK_LEN sized block B in-place.  */
      50             : static inline void
      51           0 : double_block (unsigned char *b)
      52             : {
      53             : #if OCB_BLOCK_LEN != 16
      54             :   unsigned char b_0 = b[0];
      55             :   int i;
      56             : 
      57             :   for (i=0; i < OCB_BLOCK_LEN - 1; i++)
      58             :     b[i] = (b[i] << 1) | (b[i+1] >> 7);
      59             : 
      60             :   b[OCB_BLOCK_LEN-1] = (b[OCB_BLOCK_LEN-1] << 1) ^ ((b_0 >> 7) * 135);
      61             : #else
      62             :   /* This is the generic code for 16 byte blocks.  However it is not
      63             :      faster than the straight byte by byte implementation.  */
      64             :   u64 l_0, l, r;
      65             : 
      66           0 :   l = buf_get_be64 (b);
      67           0 :   r = buf_get_be64 (b + 8);
      68             : 
      69           0 :   l_0 = -(l >> 63);
      70           0 :   l = (l + l) ^ (r >> 63);
      71           0 :   r = (r + r) ^ (l_0 & 135);
      72             : 
      73           0 :   buf_put_be64 (b, l);
      74           0 :   buf_put_be64 (b+8, r);
      75             : #endif
      76           0 : }
      77             : 
      78             : 
      79             : /* Double the OCB_BLOCK_LEN sized block S and store it at D.  S and D
      80             :    may point to the same memory location but they may not overlap.  */
      81             : static void
      82           0 : double_block_cpy (unsigned char *d, const unsigned char *s)
      83             : {
      84           0 :   if (d != s)
      85           0 :     buf_cpy (d, s, OCB_BLOCK_LEN);
      86           0 :   double_block (d);
      87           0 : }
      88             : 
      89             : 
      90             : /* Copy NBYTES from buffer S starting at bit offset BITOFF to buffer D.  */
      91             : static void
      92           0 : bit_copy (unsigned char *d, const unsigned char *s,
      93             :           unsigned int bitoff, unsigned int nbytes)
      94             : {
      95             :   unsigned int shift;
      96             : 
      97           0 :   s += bitoff / 8;
      98           0 :   shift = bitoff % 8;
      99           0 :   if (shift)
     100             :     {
     101           0 :       for (; nbytes; nbytes--, d++, s++)
     102           0 :         *d = (s[0] << shift) | (s[1] >> (8 - shift));
     103             :     }
     104             :   else
     105             :     {
     106           0 :       for (; nbytes; nbytes--, d++, s++)
     107           0 :         *d = *s;
     108             :     }
     109           0 : }
     110             : 
     111             : 
     112             : /* Get L_big value for block N, where N is multiple of 65536. */
     113             : static void
     114           0 : ocb_get_L_big (gcry_cipher_hd_t c, u64 n, unsigned char *l_buf)
     115             : {
     116           0 :   int ntz = _gcry_ctz64 (n);
     117             : 
     118           0 :   gcry_assert(ntz >= OCB_L_TABLE_SIZE);
     119             : 
     120           0 :   double_block_cpy (l_buf, c->u_mode.ocb.L[OCB_L_TABLE_SIZE - 1]);
     121           0 :   for (ntz -= OCB_L_TABLE_SIZE; ntz; ntz--)
     122           0 :     double_block (l_buf);
     123           0 : }
     124             : 
     125             : 
     126             : /* Set the nonce for OCB.  This requires that the key has been set.
     127             :    Using it again resets start a new encryption cycle using the same
     128             :    key.  */
     129             : gcry_err_code_t
     130           0 : _gcry_cipher_ocb_set_nonce (gcry_cipher_hd_t c, const unsigned char *nonce,
     131             :                             size_t noncelen)
     132             : {
     133             :   unsigned char ktop[OCB_BLOCK_LEN];
     134             :   unsigned char stretch[OCB_BLOCK_LEN + 8];
     135             :   unsigned int bottom;
     136             :   int i;
     137           0 :   unsigned int burn = 0;
     138             :   unsigned int nburn;
     139             : 
     140             :   /* Check args.  */
     141           0 :   if (!c->marks.key)
     142           0 :     return GPG_ERR_INV_STATE;  /* Key must have been set first.  */
     143           0 :   switch (c->u_mode.ocb.taglen)
     144             :     {
     145             :     case 8:
     146             :     case 12:
     147             :     case 16:
     148           0 :       break;
     149             :     default:
     150           0 :       return GPG_ERR_BUG; /* Invalid tag length. */
     151             :     }
     152             : 
     153           0 :   if (c->spec->blocksize != OCB_BLOCK_LEN)
     154           0 :     return GPG_ERR_CIPHER_ALGO;
     155           0 :   if (!nonce)
     156           0 :     return GPG_ERR_INV_ARG;
     157             :   /* 120 bit is the allowed maximum.  In addition we impose a minimum
     158             :      of 64 bit.  */
     159           0 :   if (noncelen > (120/8) || noncelen < (64/8) || noncelen >= OCB_BLOCK_LEN)
     160           0 :     return GPG_ERR_INV_LENGTH;
     161             : 
     162             :   /* Set up the L table.  */
     163             :   /* L_star = E(zero_128) */
     164           0 :   memset (ktop, 0, OCB_BLOCK_LEN);
     165           0 :   nburn = c->spec->encrypt (&c->context.c, c->u_mode.ocb.L_star, ktop);
     166           0 :   burn = nburn > burn ? nburn : burn;
     167             :   /* L_dollar = double(L_star)  */
     168           0 :   double_block_cpy (c->u_mode.ocb.L_dollar, c->u_mode.ocb.L_star);
     169             :   /* L_0 = double(L_dollar), ...  */
     170           0 :   double_block_cpy (c->u_mode.ocb.L[0], c->u_mode.ocb.L_dollar);
     171           0 :   for (i = 1; i < OCB_L_TABLE_SIZE; i++)
     172           0 :     double_block_cpy (c->u_mode.ocb.L[i], c->u_mode.ocb.L[i-1]);
     173             : 
     174             :   /* Prepare the nonce.  */
     175           0 :   memset (ktop, 0, (OCB_BLOCK_LEN - noncelen));
     176           0 :   buf_cpy (ktop + (OCB_BLOCK_LEN - noncelen), nonce, noncelen);
     177           0 :   ktop[0] = ((c->u_mode.ocb.taglen * 8) % 128) << 1;
     178           0 :   ktop[OCB_BLOCK_LEN - noncelen - 1] |= 1;
     179           0 :   bottom = ktop[OCB_BLOCK_LEN - 1] & 0x3f;
     180           0 :   ktop[OCB_BLOCK_LEN - 1] &= 0xc0; /* Zero the bottom bits.  */
     181           0 :   nburn = c->spec->encrypt (&c->context.c, ktop, ktop);
     182           0 :   burn = nburn > burn ? nburn : burn;
     183             :   /* Stretch = Ktop || (Ktop[1..64] xor Ktop[9..72]) */
     184           0 :   buf_cpy (stretch, ktop, OCB_BLOCK_LEN);
     185           0 :   buf_xor (stretch + OCB_BLOCK_LEN, ktop, ktop + 1, 8);
     186             :   /* Offset_0 = Stretch[1+bottom..128+bottom]
     187             :      (We use the IV field to store the offset) */
     188           0 :   bit_copy (c->u_iv.iv, stretch, bottom, OCB_BLOCK_LEN);
     189           0 :   c->marks.iv = 1;
     190             : 
     191             :   /* Checksum_0 = zeros(128)
     192             :      (We use the CTR field to store the checksum) */
     193           0 :   memset (c->u_ctr.ctr, 0, OCB_BLOCK_LEN);
     194             : 
     195             :   /* Clear AAD buffer.  */
     196           0 :   memset (c->u_mode.ocb.aad_offset, 0, OCB_BLOCK_LEN);
     197           0 :   memset (c->u_mode.ocb.aad_sum, 0, OCB_BLOCK_LEN);
     198             : 
     199             :   /* Setup other values.  */
     200           0 :   memset (c->lastiv, 0, sizeof(c->lastiv));
     201           0 :   c->unused = 0;
     202           0 :   c->marks.tag = 0;
     203           0 :   c->marks.finalize = 0;
     204           0 :   c->u_mode.ocb.data_nblocks = 0;
     205           0 :   c->u_mode.ocb.aad_nblocks = 0;
     206           0 :   c->u_mode.ocb.aad_nleftover = 0;
     207           0 :   c->u_mode.ocb.data_finalized = 0;
     208           0 :   c->u_mode.ocb.aad_finalized = 0;
     209             : 
     210             :   /* log_printhex ("L_*       ", c->u_mode.ocb.L_star, OCB_BLOCK_LEN); */
     211             :   /* log_printhex ("L_$       ", c->u_mode.ocb.L_dollar, OCB_BLOCK_LEN); */
     212             :   /* log_printhex ("L_0       ", c->u_mode.ocb.L[0], OCB_BLOCK_LEN); */
     213             :   /* log_printhex ("L_1       ", c->u_mode.ocb.L[1], OCB_BLOCK_LEN); */
     214             :   /* log_debug (   "bottom    : %u (decimal)\n", bottom); */
     215             :   /* log_printhex ("Ktop      ", ktop, OCB_BLOCK_LEN); */
     216             :   /* log_printhex ("Stretch   ", stretch, sizeof stretch); */
     217             :   /* log_printhex ("Offset_0  ", c->u_iv.iv, OCB_BLOCK_LEN); */
     218             : 
     219             :   /* Cleanup */
     220           0 :   wipememory (ktop, sizeof ktop);
     221           0 :   wipememory (stretch, sizeof stretch);
     222           0 :   if (burn > 0)
     223           0 :     _gcry_burn_stack (burn + 4*sizeof(void*));
     224             : 
     225           0 :   return 0;
     226             : }
     227             : 
     228             : 
     229             : /* Process additional authentication data.  This implementation allows
     230             :    to add additional authentication data at any time before the final
     231             :    gcry_cipher_gettag.  */
     232             : gcry_err_code_t
     233           0 : _gcry_cipher_ocb_authenticate (gcry_cipher_hd_t c, const unsigned char *abuf,
     234             :                                size_t abuflen)
     235             : {
     236           0 :   const size_t table_maxblks = 1 << OCB_L_TABLE_SIZE;
     237           0 :   const u32 table_size_mask = ((1 << OCB_L_TABLE_SIZE) - 1);
     238             :   unsigned char l_tmp[OCB_BLOCK_LEN];
     239           0 :   unsigned int burn = 0;
     240             :   unsigned int nburn;
     241             : 
     242             :   /* Check that a nonce and thus a key has been set and that we have
     243             :      not yet computed the tag.  We also return an error if the aad has
     244             :      been finalized (i.e. a short block has been processed).  */
     245           0 :   if (!c->marks.iv || c->marks.tag || c->u_mode.ocb.aad_finalized)
     246           0 :     return GPG_ERR_INV_STATE;
     247             : 
     248             :   /* Check correct usage and arguments.  */
     249           0 :   if (c->spec->blocksize != OCB_BLOCK_LEN)
     250           0 :     return GPG_ERR_CIPHER_ALGO;
     251             : 
     252             :   /* Process remaining data from the last call first.  */
     253           0 :   if (c->u_mode.ocb.aad_nleftover)
     254             :     {
     255           0 :       for (; abuflen && c->u_mode.ocb.aad_nleftover < OCB_BLOCK_LEN;
     256           0 :            abuf++, abuflen--)
     257           0 :         c->u_mode.ocb.aad_leftover[c->u_mode.ocb.aad_nleftover++] = *abuf;
     258             : 
     259           0 :       if (c->u_mode.ocb.aad_nleftover == OCB_BLOCK_LEN)
     260             :         {
     261           0 :           c->u_mode.ocb.aad_nblocks++;
     262             : 
     263           0 :           if ((c->u_mode.ocb.aad_nblocks % table_maxblks) == 0)
     264             :             {
     265             :               /* Table overflow, L needs to be generated. */
     266           0 :               ocb_get_L_big(c, c->u_mode.ocb.aad_nblocks + 1, l_tmp);
     267             :             }
     268             :           else
     269             :             {
     270           0 :               buf_cpy (l_tmp, ocb_get_l (c, c->u_mode.ocb.aad_nblocks),
     271             :                        OCB_BLOCK_LEN);
     272             :             }
     273             : 
     274             :           /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
     275           0 :           buf_xor_1 (c->u_mode.ocb.aad_offset, l_tmp, OCB_BLOCK_LEN);
     276             :           /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i)  */
     277           0 :           buf_xor (l_tmp, c->u_mode.ocb.aad_offset,
     278           0 :                    c->u_mode.ocb.aad_leftover, OCB_BLOCK_LEN);
     279           0 :           nburn = c->spec->encrypt (&c->context.c, l_tmp, l_tmp);
     280           0 :           burn = nburn > burn ? nburn : burn;
     281           0 :           buf_xor_1 (c->u_mode.ocb.aad_sum, l_tmp, OCB_BLOCK_LEN);
     282             : 
     283           0 :           c->u_mode.ocb.aad_nleftover = 0;
     284             :         }
     285             :     }
     286             : 
     287           0 :   if (!abuflen)
     288             :     {
     289           0 :       if (burn > 0)
     290           0 :         _gcry_burn_stack (burn + 4*sizeof(void*));
     291             : 
     292           0 :       return 0;
     293             :     }
     294             : 
     295             :   /* Full blocks handling. */
     296           0 :   while (abuflen >= OCB_BLOCK_LEN)
     297             :     {
     298           0 :       size_t nblks = abuflen / OCB_BLOCK_LEN;
     299             :       size_t nmaxblks;
     300             : 
     301             :       /* Check how many blocks to process till table overflow. */
     302           0 :       nmaxblks = (c->u_mode.ocb.aad_nblocks + 1) % table_maxblks;
     303           0 :       nmaxblks = (table_maxblks - nmaxblks) % table_maxblks;
     304             : 
     305           0 :       if (nmaxblks == 0)
     306             :         {
     307             :           /* Table overflow, generate L and process one block. */
     308           0 :           c->u_mode.ocb.aad_nblocks++;
     309           0 :           ocb_get_L_big(c, c->u_mode.ocb.aad_nblocks, l_tmp);
     310             : 
     311             :           /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
     312           0 :           buf_xor_1 (c->u_mode.ocb.aad_offset, l_tmp, OCB_BLOCK_LEN);
     313             :           /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i)  */
     314           0 :           buf_xor (l_tmp, c->u_mode.ocb.aad_offset, abuf, OCB_BLOCK_LEN);
     315           0 :           nburn = c->spec->encrypt (&c->context.c, l_tmp, l_tmp);
     316           0 :           burn = nburn > burn ? nburn : burn;
     317           0 :           buf_xor_1 (c->u_mode.ocb.aad_sum, l_tmp, OCB_BLOCK_LEN);
     318             : 
     319           0 :           abuf += OCB_BLOCK_LEN;
     320           0 :           abuflen -= OCB_BLOCK_LEN;
     321           0 :           nblks--;
     322             : 
     323             :           /* With overflow handled, retry loop again. Next overflow will
     324             :            * happen after 65535 blocks. */
     325           0 :           continue;
     326             :         }
     327             : 
     328           0 :       nblks = nblks < nmaxblks ? nblks : nmaxblks;
     329             : 
     330             :       /* Use a bulk method if available.  */
     331           0 :       if (nblks && c->bulk.ocb_auth)
     332             :         {
     333             :           size_t nleft;
     334             :           size_t ndone;
     335             : 
     336           0 :           nleft = c->bulk.ocb_auth (c, abuf, nblks);
     337           0 :           ndone = nblks - nleft;
     338             : 
     339           0 :           abuf += ndone * OCB_BLOCK_LEN;
     340           0 :           abuflen -= ndone * OCB_BLOCK_LEN;
     341           0 :           nblks = nleft;
     342             :         }
     343             : 
     344             :       /* Hash all full blocks.  */
     345           0 :       while (nblks)
     346             :         {
     347           0 :           c->u_mode.ocb.aad_nblocks++;
     348             : 
     349           0 :           gcry_assert(c->u_mode.ocb.aad_nblocks & table_size_mask);
     350             : 
     351             :           /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
     352           0 :           buf_xor_1 (c->u_mode.ocb.aad_offset,
     353           0 :                      ocb_get_l (c, c->u_mode.ocb.aad_nblocks),
     354             :                      OCB_BLOCK_LEN);
     355             :           /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i)  */
     356           0 :           buf_xor (l_tmp, c->u_mode.ocb.aad_offset, abuf, OCB_BLOCK_LEN);
     357           0 :           nburn = c->spec->encrypt (&c->context.c, l_tmp, l_tmp);
     358           0 :           burn = nburn > burn ? nburn : burn;
     359           0 :           buf_xor_1 (c->u_mode.ocb.aad_sum, l_tmp, OCB_BLOCK_LEN);
     360             : 
     361           0 :           abuf += OCB_BLOCK_LEN;
     362           0 :           abuflen -= OCB_BLOCK_LEN;
     363           0 :           nblks--;
     364             :         }
     365             :     }
     366             : 
     367             :   /* Store away the remaining data.  */
     368           0 :   for (; abuflen && c->u_mode.ocb.aad_nleftover < OCB_BLOCK_LEN;
     369           0 :        abuf++, abuflen--)
     370           0 :     c->u_mode.ocb.aad_leftover[c->u_mode.ocb.aad_nleftover++] = *abuf;
     371           0 :   gcry_assert (!abuflen);
     372             : 
     373           0 :   if (burn > 0)
     374           0 :     _gcry_burn_stack (burn + 4*sizeof(void*));
     375             : 
     376           0 :   return 0;
     377             : }
     378             : 
     379             : 
     380             : /* Hash final partial AAD block.  */
     381             : static void
     382           0 : ocb_aad_finalize (gcry_cipher_hd_t c)
     383             : {
     384             :   unsigned char l_tmp[OCB_BLOCK_LEN];
     385           0 :   unsigned int burn = 0;
     386             :   unsigned int nburn;
     387             : 
     388             :   /* Check that a nonce and thus a key has been set and that we have
     389             :      not yet computed the tag.  We also skip this if the aad has been
     390             :      finalized.  */
     391           0 :   if (!c->marks.iv || c->marks.tag || c->u_mode.ocb.aad_finalized)
     392           0 :     return;
     393           0 :   if (c->spec->blocksize != OCB_BLOCK_LEN)
     394           0 :     return;  /* Ooops.  */
     395             : 
     396             :   /* Hash final partial block if any.  */
     397           0 :   if (c->u_mode.ocb.aad_nleftover)
     398             :     {
     399             :       /* Offset_* = Offset_m xor L_*  */
     400           0 :       buf_xor_1 (c->u_mode.ocb.aad_offset,
     401           0 :                  c->u_mode.ocb.L_star, OCB_BLOCK_LEN);
     402             :       /* CipherInput = (A_* || 1 || zeros(127-bitlen(A_*))) xor Offset_*  */
     403           0 :       buf_cpy (l_tmp, c->u_mode.ocb.aad_leftover, c->u_mode.ocb.aad_nleftover);
     404           0 :       memset (l_tmp + c->u_mode.ocb.aad_nleftover, 0,
     405           0 :               OCB_BLOCK_LEN - c->u_mode.ocb.aad_nleftover);
     406           0 :       l_tmp[c->u_mode.ocb.aad_nleftover] = 0x80;
     407           0 :       buf_xor_1 (l_tmp, c->u_mode.ocb.aad_offset, OCB_BLOCK_LEN);
     408             :       /* Sum = Sum_m xor ENCIPHER(K, CipherInput)  */
     409           0 :       nburn = c->spec->encrypt (&c->context.c, l_tmp, l_tmp);
     410           0 :       burn = nburn > burn ? nburn : burn;
     411           0 :       buf_xor_1 (c->u_mode.ocb.aad_sum, l_tmp, OCB_BLOCK_LEN);
     412             : 
     413           0 :       c->u_mode.ocb.aad_nleftover = 0;
     414             :     }
     415             : 
     416             :   /* Mark AAD as finalized so that gcry_cipher_ocb_authenticate can
     417             :    * return an erro when called again.  */
     418           0 :   c->u_mode.ocb.aad_finalized = 1;
     419             : 
     420           0 :   if (burn > 0)
     421           0 :     _gcry_burn_stack (burn + 4*sizeof(void*));
     422             : }
     423             : 
     424             : 
     425             : 
     426             : /* Checksumming for encrypt and decrypt.  */
     427             : static void
     428           0 : ocb_checksum (unsigned char *chksum, const unsigned char *plainbuf,
     429             :               size_t nblks)
     430             : {
     431           0 :   while (nblks > 0)
     432             :     {
     433             :       /* Checksum_i = Checksum_{i-1} xor P_i  */
     434           0 :       buf_xor_1(chksum, plainbuf, OCB_BLOCK_LEN);
     435             : 
     436           0 :       plainbuf += OCB_BLOCK_LEN;
     437           0 :       nblks--;
     438             :     }
     439           0 : }
     440             : 
     441             : 
     442             : /* Common code for encrypt and decrypt.  */
     443             : static gcry_err_code_t
     444           0 : ocb_crypt (gcry_cipher_hd_t c, int encrypt,
     445             :            unsigned char *outbuf, size_t outbuflen,
     446             :            const unsigned char *inbuf, size_t inbuflen)
     447             : {
     448           0 :   const size_t table_maxblks = 1 << OCB_L_TABLE_SIZE;
     449           0 :   const u32 table_size_mask = ((1 << OCB_L_TABLE_SIZE) - 1);
     450             :   unsigned char l_tmp[OCB_BLOCK_LEN];
     451           0 :   unsigned int burn = 0;
     452             :   unsigned int nburn;
     453           0 :   gcry_cipher_encrypt_t crypt_fn =
     454           0 :       encrypt ? c->spec->encrypt : c->spec->decrypt;
     455             : 
     456             :   /* Check that a nonce and thus a key has been set and that we are
     457             :      not yet in end of data state. */
     458           0 :   if (!c->marks.iv || c->u_mode.ocb.data_finalized)
     459           0 :     return GPG_ERR_INV_STATE;
     460             : 
     461             :   /* Check correct usage and arguments.  */
     462           0 :   if (c->spec->blocksize != OCB_BLOCK_LEN)
     463           0 :     return GPG_ERR_CIPHER_ALGO;
     464           0 :   if (outbuflen < inbuflen)
     465           0 :     return GPG_ERR_BUFFER_TOO_SHORT;
     466           0 :   if (c->marks.finalize)
     467             :     ; /* Allow arbitarty length. */
     468           0 :   else if ((inbuflen % OCB_BLOCK_LEN))
     469           0 :     return GPG_ERR_INV_LENGTH;  /* We support only full blocks for now.  */
     470             : 
     471             :   /* Full blocks handling. */
     472           0 :   while (inbuflen >= OCB_BLOCK_LEN)
     473             :     {
     474           0 :       size_t nblks = inbuflen / OCB_BLOCK_LEN;
     475             :       size_t nmaxblks;
     476             : 
     477             :       /* Check how many blocks to process till table overflow. */
     478           0 :       nmaxblks = (c->u_mode.ocb.data_nblocks + 1) % table_maxblks;
     479           0 :       nmaxblks = (table_maxblks - nmaxblks) % table_maxblks;
     480             : 
     481           0 :       if (nmaxblks == 0)
     482             :         {
     483             :           /* Table overflow, generate L and process one block. */
     484           0 :           c->u_mode.ocb.data_nblocks++;
     485           0 :           ocb_get_L_big(c, c->u_mode.ocb.data_nblocks, l_tmp);
     486             : 
     487           0 :           if (encrypt)
     488             :             {
     489             :               /* Checksum_i = Checksum_{i-1} xor P_i  */
     490           0 :               ocb_checksum (c->u_ctr.ctr, inbuf, 1);
     491             :             }
     492             : 
     493             :           /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
     494           0 :           buf_xor_1 (c->u_iv.iv, l_tmp, OCB_BLOCK_LEN);
     495             :           /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i)  */
     496           0 :           buf_xor (outbuf, c->u_iv.iv, inbuf, OCB_BLOCK_LEN);
     497           0 :           nburn = crypt_fn (&c->context.c, outbuf, outbuf);
     498           0 :           burn = nburn > burn ? nburn : burn;
     499           0 :           buf_xor_1 (outbuf, c->u_iv.iv, OCB_BLOCK_LEN);
     500             : 
     501           0 :           if (!encrypt)
     502             :             {
     503             :               /* Checksum_i = Checksum_{i-1} xor P_i  */
     504           0 :               ocb_checksum (c->u_ctr.ctr, outbuf, 1);
     505             :             }
     506             : 
     507           0 :           inbuf += OCB_BLOCK_LEN;
     508           0 :           inbuflen -= OCB_BLOCK_LEN;
     509           0 :           outbuf += OCB_BLOCK_LEN;
     510           0 :           outbuflen =- OCB_BLOCK_LEN;
     511           0 :           nblks--;
     512             : 
     513             :           /* With overflow handled, retry loop again. Next overflow will
     514             :            * happen after 65535 blocks. */
     515           0 :           continue;
     516             :         }
     517             : 
     518           0 :       nblks = nblks < nmaxblks ? nblks : nmaxblks;
     519             : 
     520             :       /* Use a bulk method if available.  */
     521           0 :       if (nblks && c->bulk.ocb_crypt)
     522             :         {
     523             :           size_t nleft;
     524             :           size_t ndone;
     525             : 
     526           0 :           nleft = c->bulk.ocb_crypt (c, outbuf, inbuf, nblks, encrypt);
     527           0 :           ndone = nblks - nleft;
     528             : 
     529           0 :           inbuf += ndone * OCB_BLOCK_LEN;
     530           0 :           outbuf += ndone * OCB_BLOCK_LEN;
     531           0 :           inbuflen -= ndone * OCB_BLOCK_LEN;
     532           0 :           outbuflen -= ndone * OCB_BLOCK_LEN;
     533           0 :           nblks = nleft;
     534             :         }
     535             : 
     536           0 :       if (nblks)
     537             :         {
     538           0 :           size_t nblks_chksum = nblks;
     539             : 
     540           0 :           if (encrypt)
     541             :             {
     542             :               /* Checksum_i = Checksum_{i-1} xor P_i  */
     543           0 :               ocb_checksum (c->u_ctr.ctr, inbuf, nblks_chksum);
     544             :             }
     545             : 
     546             :           /* Encrypt all full blocks.  */
     547           0 :           while (nblks)
     548             :             {
     549           0 :               c->u_mode.ocb.data_nblocks++;
     550             : 
     551           0 :               gcry_assert(c->u_mode.ocb.data_nblocks & table_size_mask);
     552             : 
     553             :               /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
     554           0 :               buf_xor_1 (c->u_iv.iv,
     555           0 :                          ocb_get_l (c, c->u_mode.ocb.data_nblocks),
     556             :                          OCB_BLOCK_LEN);
     557             :               /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i)  */
     558           0 :               buf_xor (outbuf, c->u_iv.iv, inbuf, OCB_BLOCK_LEN);
     559           0 :               nburn = crypt_fn (&c->context.c, outbuf, outbuf);
     560           0 :               burn = nburn > burn ? nburn : burn;
     561           0 :               buf_xor_1 (outbuf, c->u_iv.iv, OCB_BLOCK_LEN);
     562             : 
     563           0 :               inbuf += OCB_BLOCK_LEN;
     564           0 :               inbuflen -= OCB_BLOCK_LEN;
     565           0 :               outbuf += OCB_BLOCK_LEN;
     566           0 :               outbuflen =- OCB_BLOCK_LEN;
     567           0 :               nblks--;
     568             :             }
     569             : 
     570           0 :           if (!encrypt)
     571             :             {
     572             :               /* Checksum_i = Checksum_{i-1} xor P_i  */
     573           0 :               ocb_checksum (c->u_ctr.ctr,
     574           0 :                             outbuf - nblks_chksum * OCB_BLOCK_LEN,
     575             :                             nblks_chksum);
     576             :             }
     577             :         }
     578             :     }
     579             : 
     580             :   /* Encrypt final partial block.  Note that we expect INBUFLEN to be
     581             :      shorter than OCB_BLOCK_LEN (see above).  */
     582           0 :   if (inbuflen)
     583             :     {
     584             :       unsigned char pad[OCB_BLOCK_LEN];
     585             : 
     586             :       /* Offset_* = Offset_m xor L_*  */
     587           0 :       buf_xor_1 (c->u_iv.iv, c->u_mode.ocb.L_star, OCB_BLOCK_LEN);
     588             :       /* Pad = ENCIPHER(K, Offset_*) */
     589           0 :       nburn = c->spec->encrypt (&c->context.c, pad, c->u_iv.iv);
     590           0 :       burn = nburn > burn ? nburn : burn;
     591             : 
     592           0 :       if (encrypt)
     593             :         {
     594             :           /* Checksum_* = Checksum_m xor (P_* || 1 || zeros(127-bitlen(P_*))) */
     595             :           /* Note that INBUFLEN is less than OCB_BLOCK_LEN.  */
     596           0 :           buf_cpy (l_tmp, inbuf, inbuflen);
     597           0 :           memset (l_tmp + inbuflen, 0, OCB_BLOCK_LEN - inbuflen);
     598           0 :           l_tmp[inbuflen] = 0x80;
     599           0 :           buf_xor_1 (c->u_ctr.ctr, l_tmp, OCB_BLOCK_LEN);
     600             :           /* C_* = P_* xor Pad[1..bitlen(P_*)] */
     601           0 :           buf_xor (outbuf, inbuf, pad, inbuflen);
     602             :         }
     603             :       else
     604             :         {
     605             :           /* P_* = C_* xor Pad[1..bitlen(C_*)] */
     606             :           /* Checksum_* = Checksum_m xor (P_* || 1 || zeros(127-bitlen(P_*))) */
     607           0 :           buf_cpy (l_tmp, pad, OCB_BLOCK_LEN);
     608           0 :           buf_cpy (l_tmp, inbuf, inbuflen);
     609           0 :           buf_xor_1 (l_tmp, pad, OCB_BLOCK_LEN);
     610           0 :           l_tmp[inbuflen] = 0x80;
     611           0 :           buf_cpy (outbuf, l_tmp, inbuflen);
     612             : 
     613           0 :           buf_xor_1 (c->u_ctr.ctr, l_tmp, OCB_BLOCK_LEN);
     614             :         }
     615             :     }
     616             : 
     617             :   /* Compute the tag if the finalize flag has been set.  */
     618           0 :   if (c->marks.finalize)
     619             :     {
     620             :       /* Tag = ENCIPHER(K, Checksum xor Offset xor L_$) xor HASH(K,A) */
     621           0 :       buf_xor (c->u_mode.ocb.tag, c->u_ctr.ctr, c->u_iv.iv, OCB_BLOCK_LEN);
     622           0 :       buf_xor_1 (c->u_mode.ocb.tag, c->u_mode.ocb.L_dollar, OCB_BLOCK_LEN);
     623           0 :       nburn = c->spec->encrypt (&c->context.c,
     624           0 :                                 c->u_mode.ocb.tag, c->u_mode.ocb.tag);
     625           0 :       burn = nburn > burn ? nburn : burn;
     626             : 
     627           0 :       c->u_mode.ocb.data_finalized = 1;
     628             :       /* Note that the the final part of the tag computation is done
     629             :          by _gcry_cipher_ocb_get_tag.  */
     630             :     }
     631             : 
     632           0 :   if (burn > 0)
     633           0 :     _gcry_burn_stack (burn + 4*sizeof(void*));
     634             : 
     635           0 :   return 0;
     636             : }
     637             : 
     638             : 
     639             : /* Encrypt (INBUF,INBUFLEN) in OCB mode to OUTBUF.  OUTBUFLEN gives
     640             :    the allocated size of OUTBUF.  This function accepts only multiples
     641             :    of a full block unless gcry_cipher_final has been called in which
     642             :    case the next block may have any length.  */
     643             : gcry_err_code_t
     644           0 : _gcry_cipher_ocb_encrypt (gcry_cipher_hd_t c,
     645             :                           unsigned char *outbuf, size_t outbuflen,
     646             :                           const unsigned char *inbuf, size_t inbuflen)
     647             : 
     648             : {
     649           0 :   return ocb_crypt (c, 1, outbuf, outbuflen, inbuf, inbuflen);
     650             : }
     651             : 
     652             : 
     653             : /* Decrypt (INBUF,INBUFLEN) in OCB mode to OUTBUF.  OUTBUFLEN gives
     654             :    the allocated size of OUTBUF.  This function accepts only multiples
     655             :    of a full block unless gcry_cipher_final has been called in which
     656             :    case the next block may have any length.  */
     657             : gcry_err_code_t
     658           0 : _gcry_cipher_ocb_decrypt (gcry_cipher_hd_t c,
     659             :                           unsigned char *outbuf, size_t outbuflen,
     660             :                           const unsigned char *inbuf, size_t inbuflen)
     661             : {
     662           0 :   return ocb_crypt (c, 0, outbuf, outbuflen, inbuf, inbuflen);
     663             : }
     664             : 
     665             : 
     666             : /* Compute the tag.  The last data operation has already done some
     667             :    part of it.  To allow adding AAD even after having done all data,
     668             :    we finish the tag computation only here.  */
     669             : static void
     670           0 : compute_tag_if_needed (gcry_cipher_hd_t c)
     671             : {
     672           0 :   if (!c->marks.tag)
     673             :     {
     674           0 :       ocb_aad_finalize (c);
     675           0 :       buf_xor_1 (c->u_mode.ocb.tag, c->u_mode.ocb.aad_sum, OCB_BLOCK_LEN);
     676           0 :       c->marks.tag = 1;
     677             :     }
     678           0 : }
     679             : 
     680             : 
     681             : /* Copy the already computed tag to OUTTAG.  OUTTAGSIZE is the
     682             :    allocated size of OUTTAG; the function returns an error if that is
     683             :    too short to hold the tag.  */
     684             : gcry_err_code_t
     685           0 : _gcry_cipher_ocb_get_tag (gcry_cipher_hd_t c,
     686             :                           unsigned char *outtag, size_t outtagsize)
     687             : {
     688           0 :   if (c->u_mode.ocb.taglen > outtagsize)
     689           0 :     return GPG_ERR_BUFFER_TOO_SHORT;
     690           0 :   if (!c->u_mode.ocb.data_finalized)
     691           0 :     return GPG_ERR_INV_STATE; /* Data has not yet been finalized.  */
     692             : 
     693           0 :   compute_tag_if_needed (c);
     694             : 
     695           0 :   memcpy (outtag, c->u_mode.ocb.tag, c->u_mode.ocb.taglen);
     696             : 
     697           0 :   return 0;
     698             : }
     699             : 
     700             : 
     701             : /* Check that the tag (INTAG,TAGLEN) matches the computed tag for the
     702             :    handle C.  */
     703             : gcry_err_code_t
     704           0 : _gcry_cipher_ocb_check_tag (gcry_cipher_hd_t c, const unsigned char *intag,
     705             :                             size_t taglen)
     706             : {
     707             :   size_t n;
     708             : 
     709           0 :   if (!c->u_mode.ocb.data_finalized)
     710           0 :     return GPG_ERR_INV_STATE; /* Data has not yet been finalized.  */
     711             : 
     712           0 :   compute_tag_if_needed (c);
     713             : 
     714           0 :   n = c->u_mode.ocb.taglen;
     715           0 :   if (taglen < n)
     716           0 :     n = taglen;
     717             : 
     718           0 :   if (!buf_eq_const (intag, c->u_mode.ocb.tag, n)
     719           0 :       || c->u_mode.ocb.taglen != taglen)
     720           0 :     return GPG_ERR_CHECKSUM;
     721             : 
     722           0 :   return 0;
     723             : }

Generated by: LCOV version 1.12