Commit 17e932b2 by Enrico Pozzobon

Merged rhys and rhys-avr into the same directory

parent 0f6d7890

Too many changes to show.

To preserve performance only 1000 of 1000+ files are displayed.

#define CRYPTO_KEYBYTES 16
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES 16
#define CRYPTO_ABYTES 16
#define CRYPTO_NOOVERLAP 1
#include "ace.h"
int crypto_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
return ace_aead_encrypt
(c, clen, m, mlen, ad, adlen, nsec, npub, k);
}
int crypto_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
return ace_aead_decrypt
(m, mlen, nsec, c, clen, ad, adlen, npub, k);
}
......@@ -22,6 +22,8 @@
#include "internal-sliscp-light.h"
#if !defined(__AVR__)
/**
* \brief Performs one round of the Simeck-64 block cipher.
*
......@@ -173,11 +175,12 @@ void sliscp_light256_swap_spix(unsigned char block[32])
le_store_word32(block + 12, t2);
}
void sliscp_light256_permute_spoc(unsigned char block[32], unsigned rounds)
void sliscp_light256_permute_spoc(unsigned char block[32])
{
const unsigned char *rc = sliscp_light256_RC;
uint32_t x0, x1, x2, x3, x4, x5, x6, x7;
uint32_t t0, t1;
unsigned round;
/* Load the block into local state variables */
x0 = be_load_word32(block);
......@@ -190,7 +193,7 @@ void sliscp_light256_permute_spoc(unsigned char block[32], unsigned rounds)
x7 = be_load_word32(block + 28);
/* Perform all permutation rounds */
for (; rounds > 0; --rounds, rc += 4) {
for (round = 0; round < 18; ++round, rc += 4) {
/* Apply Simeck-64 to two of the 64-bit sub-blocks */
simeck64_box(x2, x3, rc[0]);
simeck64_box(x6, x7, rc[1]);
......@@ -406,3 +409,5 @@ void sliscp_light320_swap(unsigned char block[40])
le_store_word32(block + 16, t1);
le_store_word32(block + 4, t2);
}
#endif /* !__AVR__ */
......@@ -92,7 +92,6 @@ void sliscp_light256_swap_spix(unsigned char block[32]);
* \brief Performs the sLiSCP-light permutation on a 256-bit block.
*
* \param block Points to the block to be permuted.
* \param rounds Number of rounds to be performed, usually 9 or 18.
*
* The bytes of the block are assumed to be rearranged to match the
* requirements of the SpoC-128 cipher. SpoC-128 interleaves the
......@@ -112,7 +111,7 @@ void sliscp_light256_swap_spix(unsigned char block[32]);
*
* \sa sliscp_light256_swap_spoc()
*/
void sliscp_light256_permute_spoc(unsigned char block[32], unsigned rounds);
void sliscp_light256_permute_spoc(unsigned char block[32]);
/**
* \brief Swaps rate bytes in a sLiSCP-light 256-bit block for SpoC-128.
......
......@@ -238,6 +238,17 @@
} \
} while (0)
/* Rotation functions need to be optimised for best performance on AVR.
* The most efficient rotations are where the number of bits is 1 or a
* multiple of 8, so we compose the efficient rotations to produce all
* other rotation counts of interest. */
#if defined(__AVR__)
#define LW_CRYPTO_ROTATE32_COMPOSED 1
#else
#define LW_CRYPTO_ROTATE32_COMPOSED 0
#endif
/* Rotation macros for 32-bit arguments */
/* Generic left rotate */
......@@ -254,6 +265,8 @@
(_temp >> (bits)) | (_temp << (32 - (bits))); \
}))
#if !LW_CRYPTO_ROTATE32_COMPOSED
/* Left rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define leftRotate1(a) (leftRotate((a), 1))
......@@ -322,6 +335,138 @@
#define rightRotate30(a) (rightRotate((a), 30))
#define rightRotate31(a) (rightRotate((a), 31))
#else /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Composed rotation macros where 1 and 8 are fast, but others are slow */
/* Left rotate by 1 */
#define leftRotate1(a) (leftRotate((a), 1))
/* Left rotate by 2 */
#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1))
/* Left rotate by 3 */
#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1))
/* Left rotate by 4 */
#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1))
/* Left rotate by 5: Rotate left by 8, then right by 3 */
#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 6: Rotate left by 8, then right by 2 */
#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 7: Rotate left by 8, then right by 1 */
#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1))
/* Left rotate by 8 */
#define leftRotate8(a) (leftRotate((a), 8))
/* Left rotate by 9: Rotate left by 8, then left by 1 */
#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1))
/* Left rotate by 10: Rotate left by 8, then left by 2 */
#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 11: Rotate left by 8, then left by 3 */
#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 12: Rotate left by 16, then right by 4 */
#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 13: Rotate left by 16, then right by 3 */
#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 14: Rotate left by 16, then right by 2 */
#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 15: Rotate left by 16, then right by 1 */
#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1))
/* Left rotate by 16 */
#define leftRotate16(a) (leftRotate((a), 16))
/* Left rotate by 17: Rotate left by 16, then left by 1 */
#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1))
/* Left rotate by 18: Rotate left by 16, then left by 2 */
#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 19: Rotate left by 16, then left by 3 */
#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 20: Rotate left by 16, then left by 4 */
#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 21: Rotate left by 24, then right by 3 */
#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 22: Rotate left by 24, then right by 2 */
#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 23: Rotate left by 24, then right by 1 */
#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1))
/* Left rotate by 24 */
#define leftRotate24(a) (leftRotate((a), 24))
/* Left rotate by 25: Rotate left by 24, then left by 1 */
#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1))
/* Left rotate by 26: Rotate left by 24, then left by 2 */
#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 27: Rotate left by 24, then left by 3 */
#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 28: Rotate right by 4 */
#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1))
/* Left rotate by 29: Rotate right by 3 */
#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1))
/* Left rotate by 30: Rotate right by 2 */
#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1))
/* Left rotate by 31: Rotate right by 1 */
#define leftRotate31(a) (rightRotate((a), 1))
/* Define the 32-bit right rotations in terms of left rotations */
#define rightRotate1(a) (leftRotate31((a)))
#define rightRotate2(a) (leftRotate30((a)))
#define rightRotate3(a) (leftRotate29((a)))
#define rightRotate4(a) (leftRotate28((a)))
#define rightRotate5(a) (leftRotate27((a)))
#define rightRotate6(a) (leftRotate26((a)))
#define rightRotate7(a) (leftRotate25((a)))
#define rightRotate8(a) (leftRotate24((a)))
#define rightRotate9(a) (leftRotate23((a)))
#define rightRotate10(a) (leftRotate22((a)))
#define rightRotate11(a) (leftRotate21((a)))
#define rightRotate12(a) (leftRotate20((a)))
#define rightRotate13(a) (leftRotate19((a)))
#define rightRotate14(a) (leftRotate18((a)))
#define rightRotate15(a) (leftRotate17((a)))
#define rightRotate16(a) (leftRotate16((a)))
#define rightRotate17(a) (leftRotate15((a)))
#define rightRotate18(a) (leftRotate14((a)))
#define rightRotate19(a) (leftRotate13((a)))
#define rightRotate20(a) (leftRotate12((a)))
#define rightRotate21(a) (leftRotate11((a)))
#define rightRotate22(a) (leftRotate10((a)))
#define rightRotate23(a) (leftRotate9((a)))
#define rightRotate24(a) (leftRotate8((a)))
#define rightRotate25(a) (leftRotate7((a)))
#define rightRotate26(a) (leftRotate6((a)))
#define rightRotate27(a) (leftRotate5((a)))
#define rightRotate28(a) (leftRotate4((a)))
#define rightRotate29(a) (leftRotate3((a)))
#define rightRotate30(a) (leftRotate2((a)))
#define rightRotate31(a) (leftRotate1((a)))
#endif /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Rotation macros for 64-bit arguments */
/* Generic left rotate */
......
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef LW_INTERNAL_SLISCP_LIGHT_H
#define LW_INTERNAL_SLISCP_LIGHT_H
/**
* \file internal-sliscp-light.h
* \brief sLiSCP-light permutation
*
* There are three variants of sLiSCP-light in use in the NIST submissions:
*
* \li sLiSCP-light-256 with a 256-bit block size, used in SPIX and SpoC.
* \li sLiSCP-light-192 with a 192-bit block size, used in SpoC.
* \li sLiSCP-light-320 with a 320-bit block size, used in ACE.
*
* References: https://uwaterloo.ca/communications-security-lab/lwc/ace,
* https://uwaterloo.ca/communications-security-lab/lwc/spix,
* https://uwaterloo.ca/communications-security-lab/lwc/spoc
*/
#include "internal-util.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* \brief Size of the state for sLiSCP-light-256.
*/
#define SLISCP_LIGHT256_STATE_SIZE 32
/**
* \brief Size of the state for sLiSCP-light-192.
*/
#define SLISCP_LIGHT192_STATE_SIZE 24
/**
* \brief Size of the state for sLiSCP-light-320.
*/
#define SLISCP_LIGHT320_STATE_SIZE 40
/**
* \brief Performs the sLiSCP-light permutation on a 256-bit block.
*
* \param block Points to the block to be permuted.
* \param rounds Number of rounds to be performed, usually 9 or 18.
*
* The bytes of the block are assumed to be rearranged to match the
* requirements of the SPIX cipher. SPIX places the rate bytes at
* positions 8, 9, 10, 11, 24, 25, 26, and 27.
*
* This function assumes that bytes 24-27 have been pre-swapped with
* bytes 12-15 so that the rate portion of the state is contiguous.
*
* The sliscp_light256_swap_spix() function can be used to switch
* between the canonical order and the pre-swapped order.
*
* \sa sliscp_light256_swap_spix()
*/
void sliscp_light256_permute_spix(unsigned char block[32], unsigned rounds);
/**
* \brief Swaps rate bytes in a sLiSCP-light 256-bit block for SPIX.
*
* \param block Points to the block to be rate-swapped.
*
* \sa sliscp_light256_permute_spix()
*/
void sliscp_light256_swap_spix(unsigned char block[32]);
/**
* \brief Performs the sLiSCP-light permutation on a 256-bit block.
*
* \param block Points to the block to be permuted.
*
* The bytes of the block are assumed to be rearranged to match the
* requirements of the SpoC-128 cipher. SpoC-128 interleaves the
* rate bytes and the mask bytes. This version assumes that the
* rate and mask are in contiguous bytes of the state.
*
* SpoC-128 absorbs bytes using the mask bytes of the state at offsets
* 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, and 31.
* It squeezes bytes using the rate bytes of the state at offsets
* 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, and 23.
*
* This function assumes that bytes 8-15 have been pre-swapped with 16-23
* so that the rate and mask portions of the state are contiguous.
*
* The sliscp_light256_swap_spoc() function can be used to switch
* between the canonical order and the pre-swapped order.
*
* \sa sliscp_light256_swap_spoc()
*/
void sliscp_light256_permute_spoc(unsigned char block[32]);
/**
* \brief Swaps rate bytes in a sLiSCP-light 256-bit block for SpoC-128.
*
* \param block Points to the block to be rate-swapped.
*
* \sa sliscp_light256_permute_spoc()
*/
void sliscp_light256_swap_spoc(unsigned char block[32]);
/**
* \brief Performs the sLiSCP-light permutation on a 192-bit block.
*
* \param block Points to the block to be permuted.
*/
void sliscp_light192_permute(unsigned char block[24]);
/**
* \brief Performs the sLiSCP-light permutation on a 320-bit block.
*
* \param block Points to the block to be permuted.
*
* The ACE specification refers to this permutation as "ACE" but that
* can be confused with the name of the AEAD mode so we call this
* permutation "sLiSCP-light-320" instead.
*
* ACE absorbs and squeezes data at the rate bytes 0, 1, 2, 3, 16, 17, 18, 19.
* Efficiency can suffer because of the discontinuity in rate byte positions.
*
* To counteract this, we assume that the input to the permutation has been
* pre-swapped: bytes 4, 5, 6, 7 are swapped with bytes 16, 17, 18, 19 so
* that the rate is contiguous at the start of the state.
*
* The sliscp_light320_swap() function can be used to switch between the
* canonical order and the pre-swapped order.
*
* \sa sliscp_light320_swap()
*/
void sliscp_light320_permute(unsigned char block[40]);
/**
* \brief Swaps rate bytes in a sLiSCP-light 320-bit block.
*
* \param block Points to the block to be rate-swapped.
*
* \sa sliscp_light320_permute()
*/
void sliscp_light320_swap(unsigned char block[40]);
#ifdef __cplusplus
}
#endif
#endif
#define CRYPTO_KEYBYTES 16
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES 16
#define CRYPTO_ABYTES 16
#define CRYPTO_NOOVERLAP 1
#include "ascon128.h"
int crypto_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
return ascon128a_aead_encrypt
(c, clen, m, mlen, ad, adlen, nsec, npub, k);
}
int crypto_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
return ascon128a_aead_decrypt
(m, mlen, nsec, c, clen, ad, adlen, npub, k);
}
......@@ -22,6 +22,8 @@
#include "internal-ascon.h"
#if !defined(__AVR__)
void ascon_permute(ascon_state_t *state, uint8_t first_round)
{
uint64_t t0, t1, t2, t3, t4;
......@@ -74,3 +76,5 @@ void ascon_permute(ascon_state_t *state, uint8_t first_round)
state->S[4] = x4;
#endif
}
#endif /* !__AVR__ */
......@@ -238,6 +238,17 @@
} \
} while (0)
/* Rotation functions need to be optimised for best performance on AVR.
* The most efficient rotations are where the number of bits is 1 or a
* multiple of 8, so we compose the efficient rotations to produce all
* other rotation counts of interest. */
#if defined(__AVR__)
#define LW_CRYPTO_ROTATE32_COMPOSED 1
#else
#define LW_CRYPTO_ROTATE32_COMPOSED 0
#endif
/* Rotation macros for 32-bit arguments */
/* Generic left rotate */
......@@ -254,6 +265,8 @@
(_temp >> (bits)) | (_temp << (32 - (bits))); \
}))
#if !LW_CRYPTO_ROTATE32_COMPOSED
/* Left rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define leftRotate1(a) (leftRotate((a), 1))
......@@ -322,6 +335,138 @@
#define rightRotate30(a) (rightRotate((a), 30))
#define rightRotate31(a) (rightRotate((a), 31))
#else /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Composed rotation macros where 1 and 8 are fast, but others are slow */
/* Left rotate by 1 */
#define leftRotate1(a) (leftRotate((a), 1))
/* Left rotate by 2 */
#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1))
/* Left rotate by 3 */
#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1))
/* Left rotate by 4 */
#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1))
/* Left rotate by 5: Rotate left by 8, then right by 3 */
#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 6: Rotate left by 8, then right by 2 */
#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 7: Rotate left by 8, then right by 1 */
#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1))
/* Left rotate by 8 */
#define leftRotate8(a) (leftRotate((a), 8))
/* Left rotate by 9: Rotate left by 8, then left by 1 */
#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1))
/* Left rotate by 10: Rotate left by 8, then left by 2 */
#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 11: Rotate left by 8, then left by 3 */
#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 12: Rotate left by 16, then right by 4 */
#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 13: Rotate left by 16, then right by 3 */
#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 14: Rotate left by 16, then right by 2 */
#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 15: Rotate left by 16, then right by 1 */
#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1))
/* Left rotate by 16 */
#define leftRotate16(a) (leftRotate((a), 16))
/* Left rotate by 17: Rotate left by 16, then left by 1 */
#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1))
/* Left rotate by 18: Rotate left by 16, then left by 2 */
#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 19: Rotate left by 16, then left by 3 */
#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 20: Rotate left by 16, then left by 4 */
#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 21: Rotate left by 24, then right by 3 */
#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 22: Rotate left by 24, then right by 2 */
#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 23: Rotate left by 24, then right by 1 */
#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1))
/* Left rotate by 24 */
#define leftRotate24(a) (leftRotate((a), 24))
/* Left rotate by 25: Rotate left by 24, then left by 1 */
#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1))
/* Left rotate by 26: Rotate left by 24, then left by 2 */
#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 27: Rotate left by 24, then left by 3 */
#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 28: Rotate right by 4 */
#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1))
/* Left rotate by 29: Rotate right by 3 */
#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1))
/* Left rotate by 30: Rotate right by 2 */
#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1))
/* Left rotate by 31: Rotate right by 1 */
#define leftRotate31(a) (rightRotate((a), 1))
/* Define the 32-bit right rotations in terms of left rotations */
#define rightRotate1(a) (leftRotate31((a)))
#define rightRotate2(a) (leftRotate30((a)))
#define rightRotate3(a) (leftRotate29((a)))
#define rightRotate4(a) (leftRotate28((a)))
#define rightRotate5(a) (leftRotate27((a)))
#define rightRotate6(a) (leftRotate26((a)))
#define rightRotate7(a) (leftRotate25((a)))
#define rightRotate8(a) (leftRotate24((a)))
#define rightRotate9(a) (leftRotate23((a)))
#define rightRotate10(a) (leftRotate22((a)))
#define rightRotate11(a) (leftRotate21((a)))
#define rightRotate12(a) (leftRotate20((a)))
#define rightRotate13(a) (leftRotate19((a)))
#define rightRotate14(a) (leftRotate18((a)))
#define rightRotate15(a) (leftRotate17((a)))
#define rightRotate16(a) (leftRotate16((a)))
#define rightRotate17(a) (leftRotate15((a)))
#define rightRotate18(a) (leftRotate14((a)))
#define rightRotate19(a) (leftRotate13((a)))
#define rightRotate20(a) (leftRotate12((a)))
#define rightRotate21(a) (leftRotate11((a)))
#define rightRotate22(a) (leftRotate10((a)))
#define rightRotate23(a) (leftRotate9((a)))
#define rightRotate24(a) (leftRotate8((a)))
#define rightRotate25(a) (leftRotate7((a)))
#define rightRotate26(a) (leftRotate6((a)))
#define rightRotate27(a) (leftRotate5((a)))
#define rightRotate28(a) (leftRotate4((a)))
#define rightRotate29(a) (leftRotate3((a)))
#define rightRotate30(a) (leftRotate2((a)))
#define rightRotate31(a) (leftRotate1((a)))
#endif /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Rotation macros for 64-bit arguments */
/* Generic left rotate */
......
#define CRYPTO_KEYBYTES 16
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES 16
#define CRYPTO_ABYTES 16
#define CRYPTO_NOOVERLAP 1
#include "ascon128.h"
int crypto_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
return ascon128_aead_encrypt
(c, clen, m, mlen, ad, adlen, nsec, npub, k);
}
int crypto_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
return ascon128_aead_decrypt
(m, mlen, nsec, c, clen, ad, adlen, npub, k);
}
......@@ -22,6 +22,8 @@
#include "internal-ascon.h"
#if !defined(__AVR__)
void ascon_permute(ascon_state_t *state, uint8_t first_round)
{
uint64_t t0, t1, t2, t3, t4;
......@@ -74,3 +76,5 @@ void ascon_permute(ascon_state_t *state, uint8_t first_round)
state->S[4] = x4;
#endif
}
#endif /* !__AVR__ */
......@@ -238,6 +238,17 @@
} \
} while (0)
/* Rotation functions need to be optimised for best performance on AVR.
* The most efficient rotations are where the number of bits is 1 or a
* multiple of 8, so we compose the efficient rotations to produce all
* other rotation counts of interest. */
#if defined(__AVR__)
#define LW_CRYPTO_ROTATE32_COMPOSED 1
#else
#define LW_CRYPTO_ROTATE32_COMPOSED 0
#endif
/* Rotation macros for 32-bit arguments */
/* Generic left rotate */
......@@ -254,6 +265,8 @@
(_temp >> (bits)) | (_temp << (32 - (bits))); \
}))
#if !LW_CRYPTO_ROTATE32_COMPOSED
/* Left rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define leftRotate1(a) (leftRotate((a), 1))
......@@ -322,6 +335,138 @@
#define rightRotate30(a) (rightRotate((a), 30))
#define rightRotate31(a) (rightRotate((a), 31))
#else /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Composed rotation macros where 1 and 8 are fast, but others are slow */
/* Left rotate by 1 */
#define leftRotate1(a) (leftRotate((a), 1))
/* Left rotate by 2 */
#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1))
/* Left rotate by 3 */
#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1))
/* Left rotate by 4 */
#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1))
/* Left rotate by 5: Rotate left by 8, then right by 3 */
#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 6: Rotate left by 8, then right by 2 */
#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 7: Rotate left by 8, then right by 1 */
#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1))
/* Left rotate by 8 */
#define leftRotate8(a) (leftRotate((a), 8))
/* Left rotate by 9: Rotate left by 8, then left by 1 */
#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1))
/* Left rotate by 10: Rotate left by 8, then left by 2 */
#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 11: Rotate left by 8, then left by 3 */
#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 12: Rotate left by 16, then right by 4 */
#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 13: Rotate left by 16, then right by 3 */
#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 14: Rotate left by 16, then right by 2 */
#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 15: Rotate left by 16, then right by 1 */
#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1))
/* Left rotate by 16 */
#define leftRotate16(a) (leftRotate((a), 16))
/* Left rotate by 17: Rotate left by 16, then left by 1 */
#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1))
/* Left rotate by 18: Rotate left by 16, then left by 2 */
#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 19: Rotate left by 16, then left by 3 */
#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 20: Rotate left by 16, then left by 4 */
#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 21: Rotate left by 24, then right by 3 */
#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 22: Rotate left by 24, then right by 2 */
#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 23: Rotate left by 24, then right by 1 */
#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1))
/* Left rotate by 24 */
#define leftRotate24(a) (leftRotate((a), 24))
/* Left rotate by 25: Rotate left by 24, then left by 1 */
#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1))
/* Left rotate by 26: Rotate left by 24, then left by 2 */
#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 27: Rotate left by 24, then left by 3 */
#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 28: Rotate right by 4 */
#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1))
/* Left rotate by 29: Rotate right by 3 */
#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1))
/* Left rotate by 30: Rotate right by 2 */
#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1))
/* Left rotate by 31: Rotate right by 1 */
#define leftRotate31(a) (rightRotate((a), 1))
/* Define the 32-bit right rotations in terms of left rotations */
#define rightRotate1(a) (leftRotate31((a)))
#define rightRotate2(a) (leftRotate30((a)))
#define rightRotate3(a) (leftRotate29((a)))
#define rightRotate4(a) (leftRotate28((a)))
#define rightRotate5(a) (leftRotate27((a)))
#define rightRotate6(a) (leftRotate26((a)))
#define rightRotate7(a) (leftRotate25((a)))
#define rightRotate8(a) (leftRotate24((a)))
#define rightRotate9(a) (leftRotate23((a)))
#define rightRotate10(a) (leftRotate22((a)))
#define rightRotate11(a) (leftRotate21((a)))
#define rightRotate12(a) (leftRotate20((a)))
#define rightRotate13(a) (leftRotate19((a)))
#define rightRotate14(a) (leftRotate18((a)))
#define rightRotate15(a) (leftRotate17((a)))
#define rightRotate16(a) (leftRotate16((a)))
#define rightRotate17(a) (leftRotate15((a)))
#define rightRotate18(a) (leftRotate14((a)))
#define rightRotate19(a) (leftRotate13((a)))
#define rightRotate20(a) (leftRotate12((a)))
#define rightRotate21(a) (leftRotate11((a)))
#define rightRotate22(a) (leftRotate10((a)))
#define rightRotate23(a) (leftRotate9((a)))
#define rightRotate24(a) (leftRotate8((a)))
#define rightRotate25(a) (leftRotate7((a)))
#define rightRotate26(a) (leftRotate6((a)))
#define rightRotate27(a) (leftRotate5((a)))
#define rightRotate28(a) (leftRotate4((a)))
#define rightRotate29(a) (leftRotate3((a)))
#define rightRotate30(a) (leftRotate2((a)))
#define rightRotate31(a) (leftRotate1((a)))
#endif /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Rotation macros for 64-bit arguments */
/* Generic left rotate */
......
#define CRYPTO_KEYBYTES 20
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES 16
#define CRYPTO_ABYTES 16
#define CRYPTO_NOOVERLAP 1
#include "ascon128.h"
int crypto_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
return ascon80pq_aead_encrypt
(c, clen, m, mlen, ad, adlen, nsec, npub, k);
}
int crypto_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
return ascon80pq_aead_decrypt
(m, mlen, nsec, c, clen, ad, adlen, npub, k);
}
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal-ascon.h"
#if !defined(__AVR__)
void ascon_permute(ascon_state_t *state, uint8_t first_round)
{
uint64_t t0, t1, t2, t3, t4;
#if defined(LW_UTIL_LITTLE_ENDIAN)
uint64_t x0 = be_load_word64(state->B);
uint64_t x1 = be_load_word64(state->B + 8);
uint64_t x2 = be_load_word64(state->B + 16);
uint64_t x3 = be_load_word64(state->B + 24);
uint64_t x4 = be_load_word64(state->B + 32);
#else
uint64_t x0 = state->S[0];
uint64_t x1 = state->S[1];
uint64_t x2 = state->S[2];
uint64_t x3 = state->S[3];
uint64_t x4 = state->S[4];
#endif
while (first_round < 12) {
/* Add the round constant to the state */
x2 ^= ((0x0F - first_round) << 4) | first_round;
/* Substitution layer - apply the s-box using bit-slicing
* according to the algorithm recommended in the specification */
x0 ^= x4; x4 ^= x3; x2 ^= x1;
t0 = ~x0; t1 = ~x1; t2 = ~x2; t3 = ~x3; t4 = ~x4;
t0 &= x1; t1 &= x2; t2 &= x3; t3 &= x4; t4 &= x0;
x0 ^= t1; x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t0;
x1 ^= x0; x0 ^= x4; x3 ^= x2; x2 = ~x2;
/* Linear diffusion layer */
x0 ^= rightRotate19_64(x0) ^ rightRotate28_64(x0);
x1 ^= rightRotate61_64(x1) ^ rightRotate39_64(x1);
x2 ^= rightRotate1_64(x2) ^ rightRotate6_64(x2);
x3 ^= rightRotate10_64(x3) ^ rightRotate17_64(x3);
x4 ^= rightRotate7_64(x4) ^ rightRotate41_64(x4);
/* Move onto the next round */
++first_round;
}
#if defined(LW_UTIL_LITTLE_ENDIAN)
be_store_word64(state->B, x0);
be_store_word64(state->B + 8, x1);
be_store_word64(state->B + 16, x2);
be_store_word64(state->B + 24, x3);
be_store_word64(state->B + 32, x4);
#else
state->S[0] = x0;
state->S[1] = x1;
state->S[2] = x2;
state->S[3] = x3;
state->S[4] = x4;
#endif
}
#endif /* !__AVR__ */
......@@ -22,6 +22,8 @@
#include "internal-ascon.h"
#if !defined(__AVR__)
void ascon_permute(ascon_state_t *state, uint8_t first_round)
{
uint64_t t0, t1, t2, t3, t4;
......@@ -74,3 +76,5 @@ void ascon_permute(ascon_state_t *state, uint8_t first_round)
state->S[4] = x4;
#endif
}
#endif /* !__AVR__ */
......@@ -238,6 +238,17 @@
} \
} while (0)
/* Rotation functions need to be optimised for best performance on AVR.
* The most efficient rotations are where the number of bits is 1 or a
* multiple of 8, so we compose the efficient rotations to produce all
* other rotation counts of interest. */
#if defined(__AVR__)
#define LW_CRYPTO_ROTATE32_COMPOSED 1
#else
#define LW_CRYPTO_ROTATE32_COMPOSED 0
#endif
/* Rotation macros for 32-bit arguments */
/* Generic left rotate */
......@@ -254,6 +265,8 @@
(_temp >> (bits)) | (_temp << (32 - (bits))); \
}))
#if !LW_CRYPTO_ROTATE32_COMPOSED
/* Left rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define leftRotate1(a) (leftRotate((a), 1))
......@@ -322,6 +335,138 @@
#define rightRotate30(a) (rightRotate((a), 30))
#define rightRotate31(a) (rightRotate((a), 31))
#else /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Composed rotation macros where 1 and 8 are fast, but others are slow */
/* Left rotate by 1 */
#define leftRotate1(a) (leftRotate((a), 1))
/* Left rotate by 2 */
#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1))
/* Left rotate by 3 */
#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1))
/* Left rotate by 4 */
#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1))
/* Left rotate by 5: Rotate left by 8, then right by 3 */
#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 6: Rotate left by 8, then right by 2 */
#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 7: Rotate left by 8, then right by 1 */
#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1))
/* Left rotate by 8 */
#define leftRotate8(a) (leftRotate((a), 8))
/* Left rotate by 9: Rotate left by 8, then left by 1 */
#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1))
/* Left rotate by 10: Rotate left by 8, then left by 2 */
#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 11: Rotate left by 8, then left by 3 */
#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 12: Rotate left by 16, then right by 4 */
#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 13: Rotate left by 16, then right by 3 */
#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 14: Rotate left by 16, then right by 2 */
#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 15: Rotate left by 16, then right by 1 */
#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1))
/* Left rotate by 16 */
#define leftRotate16(a) (leftRotate((a), 16))
/* Left rotate by 17: Rotate left by 16, then left by 1 */
#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1))
/* Left rotate by 18: Rotate left by 16, then left by 2 */
#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 19: Rotate left by 16, then left by 3 */
#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 20: Rotate left by 16, then left by 4 */
#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 21: Rotate left by 24, then right by 3 */
#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 22: Rotate left by 24, then right by 2 */
#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 23: Rotate left by 24, then right by 1 */
#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1))
/* Left rotate by 24 */
#define leftRotate24(a) (leftRotate((a), 24))
/* Left rotate by 25: Rotate left by 24, then left by 1 */
#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1))
/* Left rotate by 26: Rotate left by 24, then left by 2 */
#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 27: Rotate left by 24, then left by 3 */
#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 28: Rotate right by 4 */
#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1))
/* Left rotate by 29: Rotate right by 3 */
#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1))
/* Left rotate by 30: Rotate right by 2 */
#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1))
/* Left rotate by 31: Rotate right by 1 */
#define leftRotate31(a) (rightRotate((a), 1))
/* Define the 32-bit right rotations in terms of left rotations */
#define rightRotate1(a) (leftRotate31((a)))
#define rightRotate2(a) (leftRotate30((a)))
#define rightRotate3(a) (leftRotate29((a)))
#define rightRotate4(a) (leftRotate28((a)))
#define rightRotate5(a) (leftRotate27((a)))
#define rightRotate6(a) (leftRotate26((a)))
#define rightRotate7(a) (leftRotate25((a)))
#define rightRotate8(a) (leftRotate24((a)))
#define rightRotate9(a) (leftRotate23((a)))
#define rightRotate10(a) (leftRotate22((a)))
#define rightRotate11(a) (leftRotate21((a)))
#define rightRotate12(a) (leftRotate20((a)))
#define rightRotate13(a) (leftRotate19((a)))
#define rightRotate14(a) (leftRotate18((a)))
#define rightRotate15(a) (leftRotate17((a)))
#define rightRotate16(a) (leftRotate16((a)))
#define rightRotate17(a) (leftRotate15((a)))
#define rightRotate18(a) (leftRotate14((a)))
#define rightRotate19(a) (leftRotate13((a)))
#define rightRotate20(a) (leftRotate12((a)))
#define rightRotate21(a) (leftRotate11((a)))
#define rightRotate22(a) (leftRotate10((a)))
#define rightRotate23(a) (leftRotate9((a)))
#define rightRotate24(a) (leftRotate8((a)))
#define rightRotate25(a) (leftRotate7((a)))
#define rightRotate26(a) (leftRotate6((a)))
#define rightRotate27(a) (leftRotate5((a)))
#define rightRotate28(a) (leftRotate4((a)))
#define rightRotate29(a) (leftRotate3((a)))
#define rightRotate30(a) (leftRotate2((a)))
#define rightRotate31(a) (leftRotate1((a)))
#endif /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Rotation macros for 64-bit arguments */
/* Generic left rotate */
......
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal-ascon.h"
#if !defined(__AVR__)
void ascon_permute(ascon_state_t *state, uint8_t first_round)
{
uint64_t t0, t1, t2, t3, t4;
#if defined(LW_UTIL_LITTLE_ENDIAN)
uint64_t x0 = be_load_word64(state->B);
uint64_t x1 = be_load_word64(state->B + 8);
uint64_t x2 = be_load_word64(state->B + 16);
uint64_t x3 = be_load_word64(state->B + 24);
uint64_t x4 = be_load_word64(state->B + 32);
#else
uint64_t x0 = state->S[0];
uint64_t x1 = state->S[1];
uint64_t x2 = state->S[2];
uint64_t x3 = state->S[3];
uint64_t x4 = state->S[4];
#endif
while (first_round < 12) {
/* Add the round constant to the state */
x2 ^= ((0x0F - first_round) << 4) | first_round;
/* Substitution layer - apply the s-box using bit-slicing
* according to the algorithm recommended in the specification */
x0 ^= x4; x4 ^= x3; x2 ^= x1;
t0 = ~x0; t1 = ~x1; t2 = ~x2; t3 = ~x3; t4 = ~x4;
t0 &= x1; t1 &= x2; t2 &= x3; t3 &= x4; t4 &= x0;
x0 ^= t1; x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t0;
x1 ^= x0; x0 ^= x4; x3 ^= x2; x2 = ~x2;
/* Linear diffusion layer */
x0 ^= rightRotate19_64(x0) ^ rightRotate28_64(x0);
x1 ^= rightRotate61_64(x1) ^ rightRotate39_64(x1);
x2 ^= rightRotate1_64(x2) ^ rightRotate6_64(x2);
x3 ^= rightRotate10_64(x3) ^ rightRotate17_64(x3);
x4 ^= rightRotate7_64(x4) ^ rightRotate41_64(x4);
/* Move onto the next round */
++first_round;
}
#if defined(LW_UTIL_LITTLE_ENDIAN)
be_store_word64(state->B, x0);
be_store_word64(state->B + 8, x1);
be_store_word64(state->B + 16, x2);
be_store_word64(state->B + 24, x3);
be_store_word64(state->B + 32, x4);
#else
state->S[0] = x0;
state->S[1] = x1;
state->S[2] = x2;
state->S[3] = x3;
state->S[4] = x4;
#endif
}
#endif /* !__AVR__ */
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal-ascon.h"
#if !defined(__AVR__)
void ascon_permute(ascon_state_t *state, uint8_t first_round)
{
uint64_t t0, t1, t2, t3, t4;
#if defined(LW_UTIL_LITTLE_ENDIAN)
uint64_t x0 = be_load_word64(state->B);
uint64_t x1 = be_load_word64(state->B + 8);
uint64_t x2 = be_load_word64(state->B + 16);
uint64_t x3 = be_load_word64(state->B + 24);
uint64_t x4 = be_load_word64(state->B + 32);
#else
uint64_t x0 = state->S[0];
uint64_t x1 = state->S[1];
uint64_t x2 = state->S[2];
uint64_t x3 = state->S[3];
uint64_t x4 = state->S[4];
#endif
while (first_round < 12) {
/* Add the round constant to the state */
x2 ^= ((0x0F - first_round) << 4) | first_round;
/* Substitution layer - apply the s-box using bit-slicing
* according to the algorithm recommended in the specification */
x0 ^= x4; x4 ^= x3; x2 ^= x1;
t0 = ~x0; t1 = ~x1; t2 = ~x2; t3 = ~x3; t4 = ~x4;
t0 &= x1; t1 &= x2; t2 &= x3; t3 &= x4; t4 &= x0;
x0 ^= t1; x1 ^= t2; x2 ^= t3; x3 ^= t4; x4 ^= t0;
x1 ^= x0; x0 ^= x4; x3 ^= x2; x2 = ~x2;
/* Linear diffusion layer */
x0 ^= rightRotate19_64(x0) ^ rightRotate28_64(x0);
x1 ^= rightRotate61_64(x1) ^ rightRotate39_64(x1);
x2 ^= rightRotate1_64(x2) ^ rightRotate6_64(x2);
x3 ^= rightRotate10_64(x3) ^ rightRotate17_64(x3);
x4 ^= rightRotate7_64(x4) ^ rightRotate41_64(x4);
/* Move onto the next round */
++first_round;
}
#if defined(LW_UTIL_LITTLE_ENDIAN)
be_store_word64(state->B, x0);
be_store_word64(state->B + 8, x1);
be_store_word64(state->B + 16, x2);
be_store_word64(state->B + 24, x3);
be_store_word64(state->B + 32, x4);
#else
state->S[0] = x0;
state->S[1] = x1;
state->S[2] = x2;
state->S[3] = x3;
state->S[4] = x4;
#endif
}
#endif /* !__AVR__ */
#define CRYPTO_KEYBYTES 16
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES 16
#define CRYPTO_ABYTES 16
#define CRYPTO_NOOVERLAP 1
#include "comet.h"
int crypto_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
return comet_128_cham_aead_encrypt
(c, clen, m, mlen, ad, adlen, nsec, npub, k);
}
int crypto_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
return comet_128_cham_aead_decrypt
(m, mlen, nsec, c, clen, ad, adlen, npub, k);
}
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal-cham.h"
#include "internal-util.h"
#if !defined(__AVR__)
void cham128_128_encrypt
(const unsigned char *key, unsigned char *output,
const unsigned char *input)
{
uint32_t x0, x1, x2, x3;
uint32_t k[8];
uint8_t round;
/* Unpack the key and generate the key schedule */
k[0] = le_load_word32(key);
k[1] = le_load_word32(key + 4);
k[2] = le_load_word32(key + 8);
k[3] = le_load_word32(key + 12);
k[4] = k[1] ^ leftRotate1(k[1]) ^ leftRotate11(k[1]);
k[5] = k[0] ^ leftRotate1(k[0]) ^ leftRotate11(k[0]);
k[6] = k[3] ^ leftRotate1(k[3]) ^ leftRotate11(k[3]);
k[7] = k[2] ^ leftRotate1(k[2]) ^ leftRotate11(k[2]);
k[0] ^= leftRotate1(k[0]) ^ leftRotate8(k[0]);
k[1] ^= leftRotate1(k[1]) ^ leftRotate8(k[1]);
k[2] ^= leftRotate1(k[2]) ^ leftRotate8(k[2]);
k[3] ^= leftRotate1(k[3]) ^ leftRotate8(k[3]);
/* Unpack the input block */
x0 = le_load_word32(input);
x1 = le_load_word32(input + 4);
x2 = le_load_word32(input + 8);
x3 = le_load_word32(input + 12);
/* Perform the 80 rounds eight at a time */
for (round = 0; round < 80; round += 8) {
x0 = leftRotate8((x0 ^ round) + (leftRotate1(x1) ^ k[0]));
x1 = leftRotate1((x1 ^ (round + 1)) + (leftRotate8(x2) ^ k[1]));
x2 = leftRotate8((x2 ^ (round + 2)) + (leftRotate1(x3) ^ k[2]));
x3 = leftRotate1((x3 ^ (round + 3)) + (leftRotate8(x0) ^ k[3]));
x0 = leftRotate8((x0 ^ (round + 4)) + (leftRotate1(x1) ^ k[4]));
x1 = leftRotate1((x1 ^ (round + 5)) + (leftRotate8(x2) ^ k[5]));
x2 = leftRotate8((x2 ^ (round + 6)) + (leftRotate1(x3) ^ k[6]));
x3 = leftRotate1((x3 ^ (round + 7)) + (leftRotate8(x0) ^ k[7]));
}
/* Pack the state into the output block */
le_store_word32(output, x0);
le_store_word32(output + 4, x1);
le_store_word32(output + 8, x2);
le_store_word32(output + 12, x3);
}
void cham64_128_encrypt
(const unsigned char *key, unsigned char *output,
const unsigned char *input)
{
uint16_t x0, x1, x2, x3;
uint16_t k[16];
uint8_t round;
/* Unpack the key and generate the key schedule */
k[0] = le_load_word16(key);
k[1] = le_load_word16(key + 2);
k[2] = le_load_word16(key + 4);
k[3] = le_load_word16(key + 6);
k[4] = le_load_word16(key + 8);
k[5] = le_load_word16(key + 10);
k[6] = le_load_word16(key + 12);
k[7] = le_load_word16(key + 14);
k[8] = k[1] ^ leftRotate1_16(k[1]) ^ leftRotate11_16(k[1]);
k[9] = k[0] ^ leftRotate1_16(k[0]) ^ leftRotate11_16(k[0]);
k[10] = k[3] ^ leftRotate1_16(k[3]) ^ leftRotate11_16(k[3]);
k[11] = k[2] ^ leftRotate1_16(k[2]) ^ leftRotate11_16(k[2]);
k[12] = k[5] ^ leftRotate1_16(k[5]) ^ leftRotate11_16(k[5]);
k[13] = k[4] ^ leftRotate1_16(k[4]) ^ leftRotate11_16(k[4]);
k[14] = k[7] ^ leftRotate1_16(k[7]) ^ leftRotate11_16(k[7]);
k[15] = k[6] ^ leftRotate1_16(k[6]) ^ leftRotate11_16(k[6]);
k[0] ^= leftRotate1_16(k[0]) ^ leftRotate8_16(k[0]);
k[1] ^= leftRotate1_16(k[1]) ^ leftRotate8_16(k[1]);
k[2] ^= leftRotate1_16(k[2]) ^ leftRotate8_16(k[2]);
k[3] ^= leftRotate1_16(k[3]) ^ leftRotate8_16(k[3]);
k[4] ^= leftRotate1_16(k[4]) ^ leftRotate8_16(k[4]);
k[5] ^= leftRotate1_16(k[5]) ^ leftRotate8_16(k[5]);
k[6] ^= leftRotate1_16(k[6]) ^ leftRotate8_16(k[6]);
k[7] ^= leftRotate1_16(k[7]) ^ leftRotate8_16(k[7]);
/* Unpack the input block */
x0 = le_load_word16(input);
x1 = le_load_word16(input + 2);
x2 = le_load_word16(input + 4);
x3 = le_load_word16(input + 6);
/* Perform the 80 rounds four at a time */
for (round = 0; round < 80; round += 4) {
x0 = leftRotate8_16
((x0 ^ round) +
(leftRotate1_16(x1) ^ k[round % 16]));
x1 = leftRotate1_16
((x1 ^ (round + 1)) +
(leftRotate8_16(x2) ^ k[(round + 1) % 16]));
x2 = leftRotate8_16
((x2 ^ (round + 2)) +
(leftRotate1_16(x3) ^ k[(round + 2) % 16]));
x3 = leftRotate1_16
((x3 ^ (round + 3)) +
(leftRotate8_16(x0) ^ k[(round + 3) % 16]));
}
/* Pack the state into the output block */
le_store_word16(output, x0);
le_store_word16(output + 2, x1);
le_store_word16(output + 4, x2);
le_store_word16(output + 6, x3);
}
#endif
......@@ -22,6 +22,7 @@
#include "comet.h"
#include "internal-cham.h"
#include "internal-speck64.h"
#include "internal-util.h"
#include <string.h>
......@@ -478,58 +479,6 @@ int comet_64_cham_aead_decrypt
return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_64_TAG_SIZE);
}
/**
* \brief Encrypts a 64-bit block with SPECK-64-128 in COMET byte order.
*
* \param key Points to the 16 bytes of the key.
* \param output Output buffer which must be at least 8 bytes in length.
* \param input Input buffer which must be at least 8 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*
* \note This version differs from standard SPECK-64 in that it uses the
* little-endian byte order from the COMET specification which is different
* from the big-endian byte order from the original SPECK paper.
*/
static void speck64_128_comet_encrypt
(const unsigned char *key, unsigned char *output,
const unsigned char *input)
{
uint32_t l[4];
uint32_t x, y, s;
uint8_t round;
uint8_t li_in = 0;
uint8_t li_out = 3;
/* Unpack the key and the input block */
s = le_load_word32(key);
l[0] = le_load_word32(key + 4);
l[1] = le_load_word32(key + 8);
l[2] = le_load_word32(key + 12);
y = le_load_word32(input);
x = le_load_word32(input + 4);
/* Perform all encryption rounds except the last */
for (round = 0; round < 26; ++round) {
/* Perform the round with the current key schedule word */
x = (rightRotate8(x) + y) ^ s;
y = leftRotate3(y) ^ x;
/* Calculate the next key schedule word */
l[li_out] = (s + rightRotate8(l[li_in])) ^ round;
s = leftRotate3(s) ^ l[li_out];
li_in = (li_in + 1) & 0x03;
li_out = (li_out + 1) & 0x03;
}
/* Perform the last encryption round and write the result to the output */
x = (rightRotate8(x) + y) ^ s;
y = leftRotate3(y) ^ x;
le_store_word32(output, y);
le_store_word32(output + 4, x);
}
int comet_64_speck_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
......@@ -547,23 +496,23 @@ int comet_64_speck_aead_encrypt
/* Set up the initial state of Y and Z */
memset(Y, 0, 8);
speck64_128_comet_encrypt(k, Y, Y);
speck64_128_encrypt(k, Y, Y);
memcpy(Z, npub, 15);
Z[15] = 0;
lw_xor_block(Z, k, 16);
/* Process the associated data */
if (adlen > 0)
comet_process_ad(Y, Z, 8, speck64_128_comet_encrypt, ad, adlen);
comet_process_ad(Y, Z, 8, speck64_128_encrypt, ad, adlen);
/* Encrypt the plaintext to produce the ciphertext */
if (mlen > 0)
comet_encrypt_64(Y, Z, speck64_128_comet_encrypt, c, m, mlen);
comet_encrypt_64(Y, Z, speck64_128_encrypt, c, m, mlen);
/* Generate the authentication tag */
Z[15] ^= 0x80;
comet_adjust_block_key(Z);
speck64_128_comet_encrypt(Z, c + mlen, Y);
speck64_128_encrypt(Z, c + mlen, Y);
return 0;
}
......@@ -586,22 +535,22 @@ int comet_64_speck_aead_decrypt
/* Set up the initial state of Y and Z */
memset(Y, 0, 8);
speck64_128_comet_encrypt(k, Y, Y);
speck64_128_encrypt(k, Y, Y);
memcpy(Z, npub, 15);
Z[15] = 0;
lw_xor_block(Z, k, 16);
/* Process the associated data */
if (adlen > 0)
comet_process_ad(Y, Z, 8, speck64_128_comet_encrypt, ad, adlen);
comet_process_ad(Y, Z, 8, speck64_128_encrypt, ad, adlen);
/* Decrypt the ciphertext to produce the plaintext */
if (clen > COMET_64_TAG_SIZE)
comet_decrypt_64(Y, Z, speck64_128_comet_encrypt, m, c, *mlen);
comet_decrypt_64(Y, Z, speck64_128_encrypt, m, c, *mlen);
/* Check the authentication tag */
Z[15] ^= 0x80;
comet_adjust_block_key(Z);
speck64_128_comet_encrypt(Z, Y, Y);
speck64_128_encrypt(Z, Y, Y);
return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_64_TAG_SIZE);
}
......@@ -23,6 +23,8 @@
#include "internal-cham.h"
#include "internal-util.h"
#if !defined(__AVR__)
void cham128_128_encrypt
(const unsigned char *key, unsigned char *output,
const unsigned char *input)
......@@ -132,3 +134,5 @@ void cham64_128_encrypt
le_store_word16(output + 4, x2);
le_store_word16(output + 6, x3);
}
#endif
......@@ -238,6 +238,17 @@
} \
} while (0)
/* Rotation functions need to be optimised for best performance on AVR.
* The most efficient rotations are where the number of bits is 1 or a
* multiple of 8, so we compose the efficient rotations to produce all
* other rotation counts of interest. */
#if defined(__AVR__)
#define LW_CRYPTO_ROTATE32_COMPOSED 1
#else
#define LW_CRYPTO_ROTATE32_COMPOSED 0
#endif
/* Rotation macros for 32-bit arguments */
/* Generic left rotate */
......@@ -254,6 +265,8 @@
(_temp >> (bits)) | (_temp << (32 - (bits))); \
}))
#if !LW_CRYPTO_ROTATE32_COMPOSED
/* Left rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define leftRotate1(a) (leftRotate((a), 1))
......@@ -322,6 +335,138 @@
#define rightRotate30(a) (rightRotate((a), 30))
#define rightRotate31(a) (rightRotate((a), 31))
#else /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Composed rotation macros where 1 and 8 are fast, but others are slow */
/* Left rotate by 1 */
#define leftRotate1(a) (leftRotate((a), 1))
/* Left rotate by 2 */
#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1))
/* Left rotate by 3 */
#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1))
/* Left rotate by 4 */
#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1))
/* Left rotate by 5: Rotate left by 8, then right by 3 */
#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 6: Rotate left by 8, then right by 2 */
#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 7: Rotate left by 8, then right by 1 */
#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1))
/* Left rotate by 8 */
#define leftRotate8(a) (leftRotate((a), 8))
/* Left rotate by 9: Rotate left by 8, then left by 1 */
#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1))
/* Left rotate by 10: Rotate left by 8, then left by 2 */
#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 11: Rotate left by 8, then left by 3 */
#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 12: Rotate left by 16, then right by 4 */
#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 13: Rotate left by 16, then right by 3 */
#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 14: Rotate left by 16, then right by 2 */
#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 15: Rotate left by 16, then right by 1 */
#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1))
/* Left rotate by 16 */
#define leftRotate16(a) (leftRotate((a), 16))
/* Left rotate by 17: Rotate left by 16, then left by 1 */
#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1))
/* Left rotate by 18: Rotate left by 16, then left by 2 */
#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 19: Rotate left by 16, then left by 3 */
#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 20: Rotate left by 16, then left by 4 */
#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 21: Rotate left by 24, then right by 3 */
#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 22: Rotate left by 24, then right by 2 */
#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 23: Rotate left by 24, then right by 1 */
#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1))
/* Left rotate by 24 */
#define leftRotate24(a) (leftRotate((a), 24))
/* Left rotate by 25: Rotate left by 24, then left by 1 */
#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1))
/* Left rotate by 26: Rotate left by 24, then left by 2 */
#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 27: Rotate left by 24, then left by 3 */
#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 28: Rotate right by 4 */
#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1))
/* Left rotate by 29: Rotate right by 3 */
#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1))
/* Left rotate by 30: Rotate right by 2 */
#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1))
/* Left rotate by 31: Rotate right by 1 */
#define leftRotate31(a) (rightRotate((a), 1))
/* Define the 32-bit right rotations in terms of left rotations */
#define rightRotate1(a) (leftRotate31((a)))
#define rightRotate2(a) (leftRotate30((a)))
#define rightRotate3(a) (leftRotate29((a)))
#define rightRotate4(a) (leftRotate28((a)))
#define rightRotate5(a) (leftRotate27((a)))
#define rightRotate6(a) (leftRotate26((a)))
#define rightRotate7(a) (leftRotate25((a)))
#define rightRotate8(a) (leftRotate24((a)))
#define rightRotate9(a) (leftRotate23((a)))
#define rightRotate10(a) (leftRotate22((a)))
#define rightRotate11(a) (leftRotate21((a)))
#define rightRotate12(a) (leftRotate20((a)))
#define rightRotate13(a) (leftRotate19((a)))
#define rightRotate14(a) (leftRotate18((a)))
#define rightRotate15(a) (leftRotate17((a)))
#define rightRotate16(a) (leftRotate16((a)))
#define rightRotate17(a) (leftRotate15((a)))
#define rightRotate18(a) (leftRotate14((a)))
#define rightRotate19(a) (leftRotate13((a)))
#define rightRotate20(a) (leftRotate12((a)))
#define rightRotate21(a) (leftRotate11((a)))
#define rightRotate22(a) (leftRotate10((a)))
#define rightRotate23(a) (leftRotate9((a)))
#define rightRotate24(a) (leftRotate8((a)))
#define rightRotate25(a) (leftRotate7((a)))
#define rightRotate26(a) (leftRotate6((a)))
#define rightRotate27(a) (leftRotate5((a)))
#define rightRotate28(a) (leftRotate4((a)))
#define rightRotate29(a) (leftRotate3((a)))
#define rightRotate30(a) (leftRotate2((a)))
#define rightRotate31(a) (leftRotate1((a)))
#endif /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Rotation macros for 64-bit arguments */
/* Generic left rotate */
......
#define CRYPTO_KEYBYTES 16
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES 15
#define CRYPTO_ABYTES 8
#define CRYPTO_NOOVERLAP 1
#include "comet.h"
int crypto_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
return comet_64_cham_aead_encrypt
(c, clen, m, mlen, ad, adlen, nsec, npub, k);
}
int crypto_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
return comet_64_cham_aead_decrypt
(m, mlen, nsec, c, clen, ad, adlen, npub, k);
}
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal-cham.h"
#include "internal-util.h"
#if !defined(__AVR__)
void cham128_128_encrypt
(const unsigned char *key, unsigned char *output,
const unsigned char *input)
{
uint32_t x0, x1, x2, x3;
uint32_t k[8];
uint8_t round;
/* Unpack the key and generate the key schedule */
k[0] = le_load_word32(key);
k[1] = le_load_word32(key + 4);
k[2] = le_load_word32(key + 8);
k[3] = le_load_word32(key + 12);
k[4] = k[1] ^ leftRotate1(k[1]) ^ leftRotate11(k[1]);
k[5] = k[0] ^ leftRotate1(k[0]) ^ leftRotate11(k[0]);
k[6] = k[3] ^ leftRotate1(k[3]) ^ leftRotate11(k[3]);
k[7] = k[2] ^ leftRotate1(k[2]) ^ leftRotate11(k[2]);
k[0] ^= leftRotate1(k[0]) ^ leftRotate8(k[0]);
k[1] ^= leftRotate1(k[1]) ^ leftRotate8(k[1]);
k[2] ^= leftRotate1(k[2]) ^ leftRotate8(k[2]);
k[3] ^= leftRotate1(k[3]) ^ leftRotate8(k[3]);
/* Unpack the input block */
x0 = le_load_word32(input);
x1 = le_load_word32(input + 4);
x2 = le_load_word32(input + 8);
x3 = le_load_word32(input + 12);
/* Perform the 80 rounds eight at a time */
for (round = 0; round < 80; round += 8) {
x0 = leftRotate8((x0 ^ round) + (leftRotate1(x1) ^ k[0]));
x1 = leftRotate1((x1 ^ (round + 1)) + (leftRotate8(x2) ^ k[1]));
x2 = leftRotate8((x2 ^ (round + 2)) + (leftRotate1(x3) ^ k[2]));
x3 = leftRotate1((x3 ^ (round + 3)) + (leftRotate8(x0) ^ k[3]));
x0 = leftRotate8((x0 ^ (round + 4)) + (leftRotate1(x1) ^ k[4]));
x1 = leftRotate1((x1 ^ (round + 5)) + (leftRotate8(x2) ^ k[5]));
x2 = leftRotate8((x2 ^ (round + 6)) + (leftRotate1(x3) ^ k[6]));
x3 = leftRotate1((x3 ^ (round + 7)) + (leftRotate8(x0) ^ k[7]));
}
/* Pack the state into the output block */
le_store_word32(output, x0);
le_store_word32(output + 4, x1);
le_store_word32(output + 8, x2);
le_store_word32(output + 12, x3);
}
void cham64_128_encrypt
(const unsigned char *key, unsigned char *output,
const unsigned char *input)
{
uint16_t x0, x1, x2, x3;
uint16_t k[16];
uint8_t round;
/* Unpack the key and generate the key schedule */
k[0] = le_load_word16(key);
k[1] = le_load_word16(key + 2);
k[2] = le_load_word16(key + 4);
k[3] = le_load_word16(key + 6);
k[4] = le_load_word16(key + 8);
k[5] = le_load_word16(key + 10);
k[6] = le_load_word16(key + 12);
k[7] = le_load_word16(key + 14);
k[8] = k[1] ^ leftRotate1_16(k[1]) ^ leftRotate11_16(k[1]);
k[9] = k[0] ^ leftRotate1_16(k[0]) ^ leftRotate11_16(k[0]);
k[10] = k[3] ^ leftRotate1_16(k[3]) ^ leftRotate11_16(k[3]);
k[11] = k[2] ^ leftRotate1_16(k[2]) ^ leftRotate11_16(k[2]);
k[12] = k[5] ^ leftRotate1_16(k[5]) ^ leftRotate11_16(k[5]);
k[13] = k[4] ^ leftRotate1_16(k[4]) ^ leftRotate11_16(k[4]);
k[14] = k[7] ^ leftRotate1_16(k[7]) ^ leftRotate11_16(k[7]);
k[15] = k[6] ^ leftRotate1_16(k[6]) ^ leftRotate11_16(k[6]);
k[0] ^= leftRotate1_16(k[0]) ^ leftRotate8_16(k[0]);
k[1] ^= leftRotate1_16(k[1]) ^ leftRotate8_16(k[1]);
k[2] ^= leftRotate1_16(k[2]) ^ leftRotate8_16(k[2]);
k[3] ^= leftRotate1_16(k[3]) ^ leftRotate8_16(k[3]);
k[4] ^= leftRotate1_16(k[4]) ^ leftRotate8_16(k[4]);
k[5] ^= leftRotate1_16(k[5]) ^ leftRotate8_16(k[5]);
k[6] ^= leftRotate1_16(k[6]) ^ leftRotate8_16(k[6]);
k[7] ^= leftRotate1_16(k[7]) ^ leftRotate8_16(k[7]);
/* Unpack the input block */
x0 = le_load_word16(input);
x1 = le_load_word16(input + 2);
x2 = le_load_word16(input + 4);
x3 = le_load_word16(input + 6);
/* Perform the 80 rounds four at a time */
for (round = 0; round < 80; round += 4) {
x0 = leftRotate8_16
((x0 ^ round) +
(leftRotate1_16(x1) ^ k[round % 16]));
x1 = leftRotate1_16
((x1 ^ (round + 1)) +
(leftRotate8_16(x2) ^ k[(round + 1) % 16]));
x2 = leftRotate8_16
((x2 ^ (round + 2)) +
(leftRotate1_16(x3) ^ k[(round + 2) % 16]));
x3 = leftRotate1_16
((x3 ^ (round + 3)) +
(leftRotate8_16(x0) ^ k[(round + 3) % 16]));
}
/* Pack the state into the output block */
le_store_word16(output, x0);
le_store_word16(output + 2, x1);
le_store_word16(output + 4, x2);
le_store_word16(output + 6, x3);
}
#endif
......@@ -22,6 +22,7 @@
#include "comet.h"
#include "internal-cham.h"
#include "internal-speck64.h"
#include "internal-util.h"
#include <string.h>
......@@ -478,58 +479,6 @@ int comet_64_cham_aead_decrypt
return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_64_TAG_SIZE);
}
/**
* \brief Encrypts a 64-bit block with SPECK-64-128 in COMET byte order.
*
* \param key Points to the 16 bytes of the key.
* \param output Output buffer which must be at least 8 bytes in length.
* \param input Input buffer which must be at least 8 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*
* \note This version differs from standard SPECK-64 in that it uses the
* little-endian byte order from the COMET specification which is different
* from the big-endian byte order from the original SPECK paper.
*/
static void speck64_128_comet_encrypt
(const unsigned char *key, unsigned char *output,
const unsigned char *input)
{
uint32_t l[4];
uint32_t x, y, s;
uint8_t round;
uint8_t li_in = 0;
uint8_t li_out = 3;
/* Unpack the key and the input block */
s = le_load_word32(key);
l[0] = le_load_word32(key + 4);
l[1] = le_load_word32(key + 8);
l[2] = le_load_word32(key + 12);
y = le_load_word32(input);
x = le_load_word32(input + 4);
/* Perform all encryption rounds except the last */
for (round = 0; round < 26; ++round) {
/* Perform the round with the current key schedule word */
x = (rightRotate8(x) + y) ^ s;
y = leftRotate3(y) ^ x;
/* Calculate the next key schedule word */
l[li_out] = (s + rightRotate8(l[li_in])) ^ round;
s = leftRotate3(s) ^ l[li_out];
li_in = (li_in + 1) & 0x03;
li_out = (li_out + 1) & 0x03;
}
/* Perform the last encryption round and write the result to the output */
x = (rightRotate8(x) + y) ^ s;
y = leftRotate3(y) ^ x;
le_store_word32(output, y);
le_store_word32(output + 4, x);
}
int comet_64_speck_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
......@@ -547,23 +496,23 @@ int comet_64_speck_aead_encrypt
/* Set up the initial state of Y and Z */
memset(Y, 0, 8);
speck64_128_comet_encrypt(k, Y, Y);
speck64_128_encrypt(k, Y, Y);
memcpy(Z, npub, 15);
Z[15] = 0;
lw_xor_block(Z, k, 16);
/* Process the associated data */
if (adlen > 0)
comet_process_ad(Y, Z, 8, speck64_128_comet_encrypt, ad, adlen);
comet_process_ad(Y, Z, 8, speck64_128_encrypt, ad, adlen);
/* Encrypt the plaintext to produce the ciphertext */
if (mlen > 0)
comet_encrypt_64(Y, Z, speck64_128_comet_encrypt, c, m, mlen);
comet_encrypt_64(Y, Z, speck64_128_encrypt, c, m, mlen);
/* Generate the authentication tag */
Z[15] ^= 0x80;
comet_adjust_block_key(Z);
speck64_128_comet_encrypt(Z, c + mlen, Y);
speck64_128_encrypt(Z, c + mlen, Y);
return 0;
}
......@@ -586,22 +535,22 @@ int comet_64_speck_aead_decrypt
/* Set up the initial state of Y and Z */
memset(Y, 0, 8);
speck64_128_comet_encrypt(k, Y, Y);
speck64_128_encrypt(k, Y, Y);
memcpy(Z, npub, 15);
Z[15] = 0;
lw_xor_block(Z, k, 16);
/* Process the associated data */
if (adlen > 0)
comet_process_ad(Y, Z, 8, speck64_128_comet_encrypt, ad, adlen);
comet_process_ad(Y, Z, 8, speck64_128_encrypt, ad, adlen);
/* Decrypt the ciphertext to produce the plaintext */
if (clen > COMET_64_TAG_SIZE)
comet_decrypt_64(Y, Z, speck64_128_comet_encrypt, m, c, *mlen);
comet_decrypt_64(Y, Z, speck64_128_encrypt, m, c, *mlen);
/* Check the authentication tag */
Z[15] ^= 0x80;
comet_adjust_block_key(Z);
speck64_128_comet_encrypt(Z, Y, Y);
speck64_128_encrypt(Z, Y, Y);
return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_64_TAG_SIZE);
}
......@@ -23,6 +23,8 @@
#include "internal-cham.h"
#include "internal-util.h"
#if !defined(__AVR__)
void cham128_128_encrypt
(const unsigned char *key, unsigned char *output,
const unsigned char *input)
......@@ -132,3 +134,5 @@ void cham64_128_encrypt
le_store_word16(output + 4, x2);
le_store_word16(output + 6, x3);
}
#endif
......@@ -238,6 +238,17 @@
} \
} while (0)
/* Rotation functions need to be optimised for best performance on AVR.
* The most efficient rotations are where the number of bits is 1 or a
* multiple of 8, so we compose the efficient rotations to produce all
* other rotation counts of interest. */
#if defined(__AVR__)
#define LW_CRYPTO_ROTATE32_COMPOSED 1
#else
#define LW_CRYPTO_ROTATE32_COMPOSED 0
#endif
/* Rotation macros for 32-bit arguments */
/* Generic left rotate */
......@@ -254,6 +265,8 @@
(_temp >> (bits)) | (_temp << (32 - (bits))); \
}))
#if !LW_CRYPTO_ROTATE32_COMPOSED
/* Left rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define leftRotate1(a) (leftRotate((a), 1))
......@@ -322,6 +335,138 @@
#define rightRotate30(a) (rightRotate((a), 30))
#define rightRotate31(a) (rightRotate((a), 31))
#else /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Composed rotation macros where 1 and 8 are fast, but others are slow */
/* Left rotate by 1 */
#define leftRotate1(a) (leftRotate((a), 1))
/* Left rotate by 2 */
#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1))
/* Left rotate by 3 */
#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1))
/* Left rotate by 4 */
#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1))
/* Left rotate by 5: Rotate left by 8, then right by 3 */
#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 6: Rotate left by 8, then right by 2 */
#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 7: Rotate left by 8, then right by 1 */
#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1))
/* Left rotate by 8 */
#define leftRotate8(a) (leftRotate((a), 8))
/* Left rotate by 9: Rotate left by 8, then left by 1 */
#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1))
/* Left rotate by 10: Rotate left by 8, then left by 2 */
#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 11: Rotate left by 8, then left by 3 */
#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 12: Rotate left by 16, then right by 4 */
#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 13: Rotate left by 16, then right by 3 */
#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 14: Rotate left by 16, then right by 2 */
#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 15: Rotate left by 16, then right by 1 */
#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1))
/* Left rotate by 16 */
#define leftRotate16(a) (leftRotate((a), 16))
/* Left rotate by 17: Rotate left by 16, then left by 1 */
#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1))
/* Left rotate by 18: Rotate left by 16, then left by 2 */
#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 19: Rotate left by 16, then left by 3 */
#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 20: Rotate left by 16, then left by 4 */
#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 21: Rotate left by 24, then right by 3 */
#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 22: Rotate left by 24, then right by 2 */
#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 23: Rotate left by 24, then right by 1 */
#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1))
/* Left rotate by 24 */
#define leftRotate24(a) (leftRotate((a), 24))
/* Left rotate by 25: Rotate left by 24, then left by 1 */
#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1))
/* Left rotate by 26: Rotate left by 24, then left by 2 */
#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 27: Rotate left by 24, then left by 3 */
#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 28: Rotate right by 4 */
#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1))
/* Left rotate by 29: Rotate right by 3 */
#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1))
/* Left rotate by 30: Rotate right by 2 */
#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1))
/* Left rotate by 31: Rotate right by 1 */
#define leftRotate31(a) (rightRotate((a), 1))
/* Define the 32-bit right rotations in terms of left rotations */
#define rightRotate1(a) (leftRotate31((a)))
#define rightRotate2(a) (leftRotate30((a)))
#define rightRotate3(a) (leftRotate29((a)))
#define rightRotate4(a) (leftRotate28((a)))
#define rightRotate5(a) (leftRotate27((a)))
#define rightRotate6(a) (leftRotate26((a)))
#define rightRotate7(a) (leftRotate25((a)))
#define rightRotate8(a) (leftRotate24((a)))
#define rightRotate9(a) (leftRotate23((a)))
#define rightRotate10(a) (leftRotate22((a)))
#define rightRotate11(a) (leftRotate21((a)))
#define rightRotate12(a) (leftRotate20((a)))
#define rightRotate13(a) (leftRotate19((a)))
#define rightRotate14(a) (leftRotate18((a)))
#define rightRotate15(a) (leftRotate17((a)))
#define rightRotate16(a) (leftRotate16((a)))
#define rightRotate17(a) (leftRotate15((a)))
#define rightRotate18(a) (leftRotate14((a)))
#define rightRotate19(a) (leftRotate13((a)))
#define rightRotate20(a) (leftRotate12((a)))
#define rightRotate21(a) (leftRotate11((a)))
#define rightRotate22(a) (leftRotate10((a)))
#define rightRotate23(a) (leftRotate9((a)))
#define rightRotate24(a) (leftRotate8((a)))
#define rightRotate25(a) (leftRotate7((a)))
#define rightRotate26(a) (leftRotate6((a)))
#define rightRotate27(a) (leftRotate5((a)))
#define rightRotate28(a) (leftRotate4((a)))
#define rightRotate29(a) (leftRotate3((a)))
#define rightRotate30(a) (leftRotate2((a)))
#define rightRotate31(a) (leftRotate1((a)))
#endif /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Rotation macros for 64-bit arguments */
/* Generic left rotate */
......
#define CRYPTO_KEYBYTES 16
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES 15
#define CRYPTO_ABYTES 8
#define CRYPTO_NOOVERLAP 1
#include "comet.h"
int crypto_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
return comet_64_speck_aead_encrypt
(c, clen, m, mlen, ad, adlen, nsec, npub, k);
}
int crypto_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
return comet_64_speck_aead_decrypt
(m, mlen, nsec, c, clen, ad, adlen, npub, k);
}
/*
* Copyright (C) 2020 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "internal-cham.h"
#include "internal-util.h"
#if !defined(__AVR__)
void cham128_128_encrypt
(const unsigned char *key, unsigned char *output,
const unsigned char *input)
{
uint32_t x0, x1, x2, x3;
uint32_t k[8];
uint8_t round;
/* Unpack the key and generate the key schedule */
k[0] = le_load_word32(key);
k[1] = le_load_word32(key + 4);
k[2] = le_load_word32(key + 8);
k[3] = le_load_word32(key + 12);
k[4] = k[1] ^ leftRotate1(k[1]) ^ leftRotate11(k[1]);
k[5] = k[0] ^ leftRotate1(k[0]) ^ leftRotate11(k[0]);
k[6] = k[3] ^ leftRotate1(k[3]) ^ leftRotate11(k[3]);
k[7] = k[2] ^ leftRotate1(k[2]) ^ leftRotate11(k[2]);
k[0] ^= leftRotate1(k[0]) ^ leftRotate8(k[0]);
k[1] ^= leftRotate1(k[1]) ^ leftRotate8(k[1]);
k[2] ^= leftRotate1(k[2]) ^ leftRotate8(k[2]);
k[3] ^= leftRotate1(k[3]) ^ leftRotate8(k[3]);
/* Unpack the input block */
x0 = le_load_word32(input);
x1 = le_load_word32(input + 4);
x2 = le_load_word32(input + 8);
x3 = le_load_word32(input + 12);
/* Perform the 80 rounds eight at a time */
for (round = 0; round < 80; round += 8) {
x0 = leftRotate8((x0 ^ round) + (leftRotate1(x1) ^ k[0]));
x1 = leftRotate1((x1 ^ (round + 1)) + (leftRotate8(x2) ^ k[1]));
x2 = leftRotate8((x2 ^ (round + 2)) + (leftRotate1(x3) ^ k[2]));
x3 = leftRotate1((x3 ^ (round + 3)) + (leftRotate8(x0) ^ k[3]));
x0 = leftRotate8((x0 ^ (round + 4)) + (leftRotate1(x1) ^ k[4]));
x1 = leftRotate1((x1 ^ (round + 5)) + (leftRotate8(x2) ^ k[5]));
x2 = leftRotate8((x2 ^ (round + 6)) + (leftRotate1(x3) ^ k[6]));
x3 = leftRotate1((x3 ^ (round + 7)) + (leftRotate8(x0) ^ k[7]));
}
/* Pack the state into the output block */
le_store_word32(output, x0);
le_store_word32(output + 4, x1);
le_store_word32(output + 8, x2);
le_store_word32(output + 12, x3);
}
void cham64_128_encrypt
(const unsigned char *key, unsigned char *output,
const unsigned char *input)
{
uint16_t x0, x1, x2, x3;
uint16_t k[16];
uint8_t round;
/* Unpack the key and generate the key schedule */
k[0] = le_load_word16(key);
k[1] = le_load_word16(key + 2);
k[2] = le_load_word16(key + 4);
k[3] = le_load_word16(key + 6);
k[4] = le_load_word16(key + 8);
k[5] = le_load_word16(key + 10);
k[6] = le_load_word16(key + 12);
k[7] = le_load_word16(key + 14);
k[8] = k[1] ^ leftRotate1_16(k[1]) ^ leftRotate11_16(k[1]);
k[9] = k[0] ^ leftRotate1_16(k[0]) ^ leftRotate11_16(k[0]);
k[10] = k[3] ^ leftRotate1_16(k[3]) ^ leftRotate11_16(k[3]);
k[11] = k[2] ^ leftRotate1_16(k[2]) ^ leftRotate11_16(k[2]);
k[12] = k[5] ^ leftRotate1_16(k[5]) ^ leftRotate11_16(k[5]);
k[13] = k[4] ^ leftRotate1_16(k[4]) ^ leftRotate11_16(k[4]);
k[14] = k[7] ^ leftRotate1_16(k[7]) ^ leftRotate11_16(k[7]);
k[15] = k[6] ^ leftRotate1_16(k[6]) ^ leftRotate11_16(k[6]);
k[0] ^= leftRotate1_16(k[0]) ^ leftRotate8_16(k[0]);
k[1] ^= leftRotate1_16(k[1]) ^ leftRotate8_16(k[1]);
k[2] ^= leftRotate1_16(k[2]) ^ leftRotate8_16(k[2]);
k[3] ^= leftRotate1_16(k[3]) ^ leftRotate8_16(k[3]);
k[4] ^= leftRotate1_16(k[4]) ^ leftRotate8_16(k[4]);
k[5] ^= leftRotate1_16(k[5]) ^ leftRotate8_16(k[5]);
k[6] ^= leftRotate1_16(k[6]) ^ leftRotate8_16(k[6]);
k[7] ^= leftRotate1_16(k[7]) ^ leftRotate8_16(k[7]);
/* Unpack the input block */
x0 = le_load_word16(input);
x1 = le_load_word16(input + 2);
x2 = le_load_word16(input + 4);
x3 = le_load_word16(input + 6);
/* Perform the 80 rounds four at a time */
for (round = 0; round < 80; round += 4) {
x0 = leftRotate8_16
((x0 ^ round) +
(leftRotate1_16(x1) ^ k[round % 16]));
x1 = leftRotate1_16
((x1 ^ (round + 1)) +
(leftRotate8_16(x2) ^ k[(round + 1) % 16]));
x2 = leftRotate8_16
((x2 ^ (round + 2)) +
(leftRotate1_16(x3) ^ k[(round + 2) % 16]));
x3 = leftRotate1_16
((x3 ^ (round + 3)) +
(leftRotate8_16(x0) ^ k[(round + 3) % 16]));
}
/* Pack the state into the output block */
le_store_word16(output, x0);
le_store_word16(output + 2, x1);
le_store_word16(output + 4, x2);
le_store_word16(output + 6, x3);
}
#endif
......@@ -22,6 +22,7 @@
#include "comet.h"
#include "internal-cham.h"
#include "internal-speck64.h"
#include "internal-util.h"
#include <string.h>
......@@ -478,58 +479,6 @@ int comet_64_cham_aead_decrypt
return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_64_TAG_SIZE);
}
/**
* \brief Encrypts a 64-bit block with SPECK-64-128 in COMET byte order.
*
* \param key Points to the 16 bytes of the key.
* \param output Output buffer which must be at least 8 bytes in length.
* \param input Input buffer which must be at least 8 bytes in length.
*
* The \a input and \a output buffers can be the same buffer for
* in-place encryption.
*
* \note This version differs from standard SPECK-64 in that it uses the
* little-endian byte order from the COMET specification which is different
* from the big-endian byte order from the original SPECK paper.
*/
static void speck64_128_comet_encrypt
(const unsigned char *key, unsigned char *output,
const unsigned char *input)
{
uint32_t l[4];
uint32_t x, y, s;
uint8_t round;
uint8_t li_in = 0;
uint8_t li_out = 3;
/* Unpack the key and the input block */
s = le_load_word32(key);
l[0] = le_load_word32(key + 4);
l[1] = le_load_word32(key + 8);
l[2] = le_load_word32(key + 12);
y = le_load_word32(input);
x = le_load_word32(input + 4);
/* Perform all encryption rounds except the last */
for (round = 0; round < 26; ++round) {
/* Perform the round with the current key schedule word */
x = (rightRotate8(x) + y) ^ s;
y = leftRotate3(y) ^ x;
/* Calculate the next key schedule word */
l[li_out] = (s + rightRotate8(l[li_in])) ^ round;
s = leftRotate3(s) ^ l[li_out];
li_in = (li_in + 1) & 0x03;
li_out = (li_out + 1) & 0x03;
}
/* Perform the last encryption round and write the result to the output */
x = (rightRotate8(x) + y) ^ s;
y = leftRotate3(y) ^ x;
le_store_word32(output, y);
le_store_word32(output + 4, x);
}
int comet_64_speck_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
......@@ -547,23 +496,23 @@ int comet_64_speck_aead_encrypt
/* Set up the initial state of Y and Z */
memset(Y, 0, 8);
speck64_128_comet_encrypt(k, Y, Y);
speck64_128_encrypt(k, Y, Y);
memcpy(Z, npub, 15);
Z[15] = 0;
lw_xor_block(Z, k, 16);
/* Process the associated data */
if (adlen > 0)
comet_process_ad(Y, Z, 8, speck64_128_comet_encrypt, ad, adlen);
comet_process_ad(Y, Z, 8, speck64_128_encrypt, ad, adlen);
/* Encrypt the plaintext to produce the ciphertext */
if (mlen > 0)
comet_encrypt_64(Y, Z, speck64_128_comet_encrypt, c, m, mlen);
comet_encrypt_64(Y, Z, speck64_128_encrypt, c, m, mlen);
/* Generate the authentication tag */
Z[15] ^= 0x80;
comet_adjust_block_key(Z);
speck64_128_comet_encrypt(Z, c + mlen, Y);
speck64_128_encrypt(Z, c + mlen, Y);
return 0;
}
......@@ -586,22 +535,22 @@ int comet_64_speck_aead_decrypt
/* Set up the initial state of Y and Z */
memset(Y, 0, 8);
speck64_128_comet_encrypt(k, Y, Y);
speck64_128_encrypt(k, Y, Y);
memcpy(Z, npub, 15);
Z[15] = 0;
lw_xor_block(Z, k, 16);
/* Process the associated data */
if (adlen > 0)
comet_process_ad(Y, Z, 8, speck64_128_comet_encrypt, ad, adlen);
comet_process_ad(Y, Z, 8, speck64_128_encrypt, ad, adlen);
/* Decrypt the ciphertext to produce the plaintext */
if (clen > COMET_64_TAG_SIZE)
comet_decrypt_64(Y, Z, speck64_128_comet_encrypt, m, c, *mlen);
comet_decrypt_64(Y, Z, speck64_128_encrypt, m, c, *mlen);
/* Check the authentication tag */
Z[15] ^= 0x80;
comet_adjust_block_key(Z);
speck64_128_comet_encrypt(Z, Y, Y);
speck64_128_encrypt(Z, Y, Y);
return aead_check_tag(m, *mlen, Y, c + *mlen, COMET_64_TAG_SIZE);
}
......@@ -23,6 +23,8 @@
#include "internal-cham.h"
#include "internal-util.h"
#if !defined(__AVR__)
void cham128_128_encrypt
(const unsigned char *key, unsigned char *output,
const unsigned char *input)
......@@ -132,3 +134,5 @@ void cham64_128_encrypt
le_store_word16(output + 4, x2);
le_store_word16(output + 6, x3);
}
#endif
......@@ -238,6 +238,17 @@
} \
} while (0)
/* Rotation functions need to be optimised for best performance on AVR.
* The most efficient rotations are where the number of bits is 1 or a
* multiple of 8, so we compose the efficient rotations to produce all
* other rotation counts of interest. */
#if defined(__AVR__)
#define LW_CRYPTO_ROTATE32_COMPOSED 1
#else
#define LW_CRYPTO_ROTATE32_COMPOSED 0
#endif
/* Rotation macros for 32-bit arguments */
/* Generic left rotate */
......@@ -254,6 +265,8 @@
(_temp >> (bits)) | (_temp << (32 - (bits))); \
}))
#if !LW_CRYPTO_ROTATE32_COMPOSED
/* Left rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define leftRotate1(a) (leftRotate((a), 1))
......@@ -322,6 +335,138 @@
#define rightRotate30(a) (rightRotate((a), 30))
#define rightRotate31(a) (rightRotate((a), 31))
#else /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Composed rotation macros where 1 and 8 are fast, but others are slow */
/* Left rotate by 1 */
#define leftRotate1(a) (leftRotate((a), 1))
/* Left rotate by 2 */
#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1))
/* Left rotate by 3 */
#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1))
/* Left rotate by 4 */
#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1))
/* Left rotate by 5: Rotate left by 8, then right by 3 */
#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 6: Rotate left by 8, then right by 2 */
#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 7: Rotate left by 8, then right by 1 */
#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1))
/* Left rotate by 8 */
#define leftRotate8(a) (leftRotate((a), 8))
/* Left rotate by 9: Rotate left by 8, then left by 1 */
#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1))
/* Left rotate by 10: Rotate left by 8, then left by 2 */
#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 11: Rotate left by 8, then left by 3 */
#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 12: Rotate left by 16, then right by 4 */
#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 13: Rotate left by 16, then right by 3 */
#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 14: Rotate left by 16, then right by 2 */
#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 15: Rotate left by 16, then right by 1 */
#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1))
/* Left rotate by 16 */
#define leftRotate16(a) (leftRotate((a), 16))
/* Left rotate by 17: Rotate left by 16, then left by 1 */
#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1))
/* Left rotate by 18: Rotate left by 16, then left by 2 */
#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 19: Rotate left by 16, then left by 3 */
#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 20: Rotate left by 16, then left by 4 */
#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 21: Rotate left by 24, then right by 3 */
#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 22: Rotate left by 24, then right by 2 */
#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 23: Rotate left by 24, then right by 1 */
#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1))
/* Left rotate by 24 */
#define leftRotate24(a) (leftRotate((a), 24))
/* Left rotate by 25: Rotate left by 24, then left by 1 */
#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1))
/* Left rotate by 26: Rotate left by 24, then left by 2 */
#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 27: Rotate left by 24, then left by 3 */
#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 28: Rotate right by 4 */
#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1))
/* Left rotate by 29: Rotate right by 3 */
#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1))
/* Left rotate by 30: Rotate right by 2 */
#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1))
/* Left rotate by 31: Rotate right by 1 */
#define leftRotate31(a) (rightRotate((a), 1))
/* Define the 32-bit right rotations in terms of left rotations */
#define rightRotate1(a) (leftRotate31((a)))
#define rightRotate2(a) (leftRotate30((a)))
#define rightRotate3(a) (leftRotate29((a)))
#define rightRotate4(a) (leftRotate28((a)))
#define rightRotate5(a) (leftRotate27((a)))
#define rightRotate6(a) (leftRotate26((a)))
#define rightRotate7(a) (leftRotate25((a)))
#define rightRotate8(a) (leftRotate24((a)))
#define rightRotate9(a) (leftRotate23((a)))
#define rightRotate10(a) (leftRotate22((a)))
#define rightRotate11(a) (leftRotate21((a)))
#define rightRotate12(a) (leftRotate20((a)))
#define rightRotate13(a) (leftRotate19((a)))
#define rightRotate14(a) (leftRotate18((a)))
#define rightRotate15(a) (leftRotate17((a)))
#define rightRotate16(a) (leftRotate16((a)))
#define rightRotate17(a) (leftRotate15((a)))
#define rightRotate18(a) (leftRotate14((a)))
#define rightRotate19(a) (leftRotate13((a)))
#define rightRotate20(a) (leftRotate12((a)))
#define rightRotate21(a) (leftRotate11((a)))
#define rightRotate22(a) (leftRotate10((a)))
#define rightRotate23(a) (leftRotate9((a)))
#define rightRotate24(a) (leftRotate8((a)))
#define rightRotate25(a) (leftRotate7((a)))
#define rightRotate26(a) (leftRotate6((a)))
#define rightRotate27(a) (leftRotate5((a)))
#define rightRotate28(a) (leftRotate4((a)))
#define rightRotate29(a) (leftRotate3((a)))
#define rightRotate30(a) (leftRotate2((a)))
#define rightRotate31(a) (leftRotate1((a)))
#endif /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Rotation macros for 64-bit arguments */
/* Generic left rotate */
......
#define CRYPTO_KEYBYTES 16
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES 16
#define CRYPTO_ABYTES 16
#define CRYPTO_NOOVERLAP 1
#include "drygascon.h"
int crypto_aead_encrypt
(unsigned char *c, unsigned long long *clen,
const unsigned char *m, unsigned long long mlen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *nsec,
const unsigned char *npub,
const unsigned char *k)
{
return drygascon128_aead_encrypt
(c, clen, m, mlen, ad, adlen, nsec, npub, k);
}
int crypto_aead_decrypt
(unsigned char *m, unsigned long long *mlen,
unsigned char *nsec,
const unsigned char *c, unsigned long long clen,
const unsigned char *ad, unsigned long long adlen,
const unsigned char *npub,
const unsigned char *k)
{
return drygascon128_aead_decrypt
(m, mlen, nsec, c, clen, ad, adlen, npub, k);
}
......@@ -23,6 +23,8 @@
#include "internal-drysponge.h"
#include <string.h>
#if !defined(__AVR__)
/* Right rotations in bit-interleaved format */
#define intRightRotateEven(x,bits) \
(__extension__ ({ \
......@@ -289,6 +291,8 @@ void drysponge256_g(drysponge256_state_t *state)
}
}
#endif /* !__AVR__ */
void drysponge128_g_core(drysponge128_state_t *state)
{
unsigned round;
......@@ -304,6 +308,7 @@ void drysponge256_g_core(drysponge256_state_t *state)
}
/**
* \fn uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index)
* \brief Selects an element of x in constant time.
*
* \param x Points to the four elements of x.
......@@ -311,6 +316,7 @@ void drysponge256_g_core(drysponge256_state_t *state)
*
* \return The selected element of x.
*/
#if !defined(__AVR__)
STATIC_INLINE uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index)
{
/* We need to be careful how we select each element of x because
......@@ -340,6 +346,11 @@ STATIC_INLINE uint32_t drysponge_select_x(const uint32_t x[4], uint8_t index)
mask = -((uint32_t)((0x04 - (index ^ 0x03)) >> 2));
return result ^ (x[3] & mask);
}
#else
/* AVR is more or less immune to cache timing issues because it doesn't
* have anything like an L1 or L2 cache. Select the word directly */
#define drysponge_select_x(x, index) ((x)[(index)])
#endif
/**
* \brief Mixes a 32-bit value into the DrySPONGE128 state.
......
......@@ -238,6 +238,17 @@
} \
} while (0)
/* Rotation functions need to be optimised for best performance on AVR.
* The most efficient rotations are where the number of bits is 1 or a
* multiple of 8, so we compose the efficient rotations to produce all
* other rotation counts of interest. */
#if defined(__AVR__)
#define LW_CRYPTO_ROTATE32_COMPOSED 1
#else
#define LW_CRYPTO_ROTATE32_COMPOSED 0
#endif
/* Rotation macros for 32-bit arguments */
/* Generic left rotate */
......@@ -254,6 +265,8 @@
(_temp >> (bits)) | (_temp << (32 - (bits))); \
}))
#if !LW_CRYPTO_ROTATE32_COMPOSED
/* Left rotate by a specific number of bits. These macros may be replaced
* with more efficient ones on platforms that lack a barrel shifter */
#define leftRotate1(a) (leftRotate((a), 1))
......@@ -322,6 +335,138 @@
#define rightRotate30(a) (rightRotate((a), 30))
#define rightRotate31(a) (rightRotate((a), 31))
#else /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Composed rotation macros where 1 and 8 are fast, but others are slow */
/* Left rotate by 1 */
#define leftRotate1(a) (leftRotate((a), 1))
/* Left rotate by 2 */
#define leftRotate2(a) (leftRotate(leftRotate((a), 1), 1))
/* Left rotate by 3 */
#define leftRotate3(a) (leftRotate(leftRotate(leftRotate((a), 1), 1), 1))
/* Left rotate by 4 */
#define leftRotate4(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 1), 1), 1), 1))
/* Left rotate by 5: Rotate left by 8, then right by 3 */
#define leftRotate5(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 6: Rotate left by 8, then right by 2 */
#define leftRotate6(a) (rightRotate(rightRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 7: Rotate left by 8, then right by 1 */
#define leftRotate7(a) (rightRotate(leftRotate((a), 8), 1))
/* Left rotate by 8 */
#define leftRotate8(a) (leftRotate((a), 8))
/* Left rotate by 9: Rotate left by 8, then left by 1 */
#define leftRotate9(a) (leftRotate(leftRotate((a), 8), 1))
/* Left rotate by 10: Rotate left by 8, then left by 2 */
#define leftRotate10(a) (leftRotate(leftRotate(leftRotate((a), 8), 1), 1))
/* Left rotate by 11: Rotate left by 8, then left by 3 */
#define leftRotate11(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 8), 1), 1), 1))
/* Left rotate by 12: Rotate left by 16, then right by 4 */
#define leftRotate12(a) (rightRotate(rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 13: Rotate left by 16, then right by 3 */
#define leftRotate13(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 14: Rotate left by 16, then right by 2 */
#define leftRotate14(a) (rightRotate(rightRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 15: Rotate left by 16, then right by 1 */
#define leftRotate15(a) (rightRotate(leftRotate((a), 16), 1))
/* Left rotate by 16 */
#define leftRotate16(a) (leftRotate((a), 16))
/* Left rotate by 17: Rotate left by 16, then left by 1 */
#define leftRotate17(a) (leftRotate(leftRotate((a), 16), 1))
/* Left rotate by 18: Rotate left by 16, then left by 2 */
#define leftRotate18(a) (leftRotate(leftRotate(leftRotate((a), 16), 1), 1))
/* Left rotate by 19: Rotate left by 16, then left by 3 */
#define leftRotate19(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1))
/* Left rotate by 20: Rotate left by 16, then left by 4 */
#define leftRotate20(a) (leftRotate(leftRotate(leftRotate(leftRotate(leftRotate((a), 16), 1), 1), 1), 1))
/* Left rotate by 21: Rotate left by 24, then right by 3 */
#define leftRotate21(a) (rightRotate(rightRotate(rightRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 22: Rotate left by 24, then right by 2 */
#define leftRotate22(a) (rightRotate(rightRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 23: Rotate left by 24, then right by 1 */
#define leftRotate23(a) (rightRotate(leftRotate((a), 24), 1))
/* Left rotate by 24 */
#define leftRotate24(a) (leftRotate((a), 24))
/* Left rotate by 25: Rotate left by 24, then left by 1 */
#define leftRotate25(a) (leftRotate(leftRotate((a), 24), 1))
/* Left rotate by 26: Rotate left by 24, then left by 2 */
#define leftRotate26(a) (leftRotate(leftRotate(leftRotate((a), 24), 1), 1))
/* Left rotate by 27: Rotate left by 24, then left by 3 */
#define leftRotate27(a) (leftRotate(leftRotate(leftRotate(leftRotate((a), 24), 1), 1), 1))
/* Left rotate by 28: Rotate right by 4 */
#define leftRotate28(a) (rightRotate(rightRotate(rightRotate(rightRotate((a), 1), 1), 1), 1))
/* Left rotate by 29: Rotate right by 3 */
#define leftRotate29(a) (rightRotate(rightRotate(rightRotate((a), 1), 1), 1))
/* Left rotate by 30: Rotate right by 2 */
#define leftRotate30(a) (rightRotate(rightRotate((a), 1), 1))
/* Left rotate by 31: Rotate right by 1 */
#define leftRotate31(a) (rightRotate((a), 1))
/* Define the 32-bit right rotations in terms of left rotations */
#define rightRotate1(a) (leftRotate31((a)))
#define rightRotate2(a) (leftRotate30((a)))
#define rightRotate3(a) (leftRotate29((a)))
#define rightRotate4(a) (leftRotate28((a)))
#define rightRotate5(a) (leftRotate27((a)))
#define rightRotate6(a) (leftRotate26((a)))
#define rightRotate7(a) (leftRotate25((a)))
#define rightRotate8(a) (leftRotate24((a)))
#define rightRotate9(a) (leftRotate23((a)))
#define rightRotate10(a) (leftRotate22((a)))
#define rightRotate11(a) (leftRotate21((a)))
#define rightRotate12(a) (leftRotate20((a)))
#define rightRotate13(a) (leftRotate19((a)))
#define rightRotate14(a) (leftRotate18((a)))
#define rightRotate15(a) (leftRotate17((a)))
#define rightRotate16(a) (leftRotate16((a)))
#define rightRotate17(a) (leftRotate15((a)))
#define rightRotate18(a) (leftRotate14((a)))
#define rightRotate19(a) (leftRotate13((a)))
#define rightRotate20(a) (leftRotate12((a)))
#define rightRotate21(a) (leftRotate11((a)))
#define rightRotate22(a) (leftRotate10((a)))
#define rightRotate23(a) (leftRotate9((a)))
#define rightRotate24(a) (leftRotate8((a)))
#define rightRotate25(a) (leftRotate7((a)))
#define rightRotate26(a) (leftRotate6((a)))
#define rightRotate27(a) (leftRotate5((a)))
#define rightRotate28(a) (leftRotate4((a)))
#define rightRotate29(a) (leftRotate3((a)))
#define rightRotate30(a) (leftRotate2((a)))
#define rightRotate31(a) (leftRotate1((a)))
#endif /* LW_CRYPTO_ROTATE32_COMPOSED */
/* Rotation macros for 64-bit arguments */
/* Generic left rotate */
......
#define CRYPTO_KEYBYTES 32
#define CRYPTO_NSECBYTES 0
#define CRYPTO_NPUBBYTES 16
#define CRYPTO_ABYTES 32
#define CRYPTO_NOOVERLAP 1